diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index d2f61c3b3..9ed7c9ef7 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -54,7 +54,7 @@ jobs: # Disabled due to issues with cross-rs #x86_64-pc-windows-gnu, ] - channel: [1.80.0, nightly] + channel: [1.84.0, nightly] include: - os: macos-latest target: aarch64-apple-darwin @@ -64,10 +64,10 @@ jobs: channel: nightly - os: macos-latest target: aarch64-apple-darwin - channel: 1.80.0 + channel: 1.84.0 - os: windows-latest target: x86_64-pc-windows-msvc - channel: 1.80.0 + channel: 1.84.0 - os: ubuntu-latest target: x86_64-unknown-linux-gnu channel: beta diff --git a/Cargo.toml b/Cargo.toml index faa009581..69f0d8bd9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,7 +10,29 @@ keywords = ["hash", "no_std", "hashmap", "swisstable"] categories = ["data-structures", "no-std"] exclude = [".github", "/ci/*"] edition = "2021" -rust-version = "1.65.0" +rust-version = "1.84.0" + +[lints.rust] +missing_docs = "warn" +unreachable_pub = "warn" +unsafe_op_in_unsafe_fn = "warn" + +# rust_2018_idioms +bare_trait_objects = "warn" +elided_lifetimes_in_paths = "warn" +ellipsis_inclusive_range_patterns = "warn" +explicit_outlives_requirements = "warn" +unused_extern_crates = "warn" + +[lints.clippy] +doc_markdown = "allow" +manual_map = "allow" +missing_errors_doc = "allow" +missing_safety_doc = "allow" +module_name_repetitions = "allow" +must_use_candidate = "allow" +option_if_let_else = "allow" +redundant_else = "allow" [dependencies] # For the default hasher @@ -26,7 +48,7 @@ alloc = { version = "1.0.0", optional = true, package = "rustc-std-workspace-all # Support for allocators that use allocator-api2 allocator-api2 = { version = "0.2.9", optional = true, default-features = false, features = [ - "alloc", + "alloc", ] } # Equivalent trait which can be shared with other hash table implementations. @@ -50,7 +72,13 @@ bumpalo = { version = "3.13.0", features = ["allocator-api2"] } libc = "0.2.155" [features] -default = ["default-hasher", "inline-more", "allocator-api2", "equivalent", "raw-entry"] +default = [ + "default-hasher", + "inline-more", + "allocator-api2", + "equivalent", + "raw-entry", +] # Enables use of nightly features. This is only guaranteed to work on the latest # version of nightly Rust. @@ -60,12 +88,7 @@ nightly = ["foldhash?/nightly", "bumpalo/allocator_api"] rustc-internal-api = [] # Internal feature used when building as part of the standard library. -rustc-dep-of-std = [ - "nightly", - "core", - "alloc", - "rustc-internal-api", -] +rustc-dep-of-std = ["nightly", "core", "alloc", "rustc-internal-api"] # Enables serde support. serde = ["dep:serde_core", "dep:serde"] diff --git a/benches/bench.rs b/benches/bench.rs index ce3aee5ce..14bf1aeb9 100644 --- a/benches/bench.rs +++ b/benches/bench.rs @@ -1,7 +1,7 @@ -// This benchmark suite contains some benchmarks along a set of dimensions: -// Hasher: std default (SipHash) and crate default (foldhash). -// Int key distribution: low bit heavy, top bit heavy, and random. -// Task: basic functionality: insert, insert_erase, lookup, lookup_fail, iter +//! This benchmark suite contains some benchmarks along a set of dimensions: +//! * Hasher: std default (SipHash) and crate default (foldhash). +//! * Int key distribution: low bit heavy, top bit heavy, and random. +//! * Task: basic functionality: insert, insert_erase, lookup, lookup_fail, iter #![feature(test)] extern crate test; diff --git a/benches/with_capacity.rs b/benches/with_capacity.rs index eeb85b59a..6689dac79 100644 --- a/benches/with_capacity.rs +++ b/benches/with_capacity.rs @@ -1,3 +1,4 @@ +#![expect(missing_docs)] // https://github.com/rust-lang/rust/issues/137561 #![feature(test)] extern crate test; diff --git a/ci/tools.sh b/ci/tools.sh index b23e2d71c..1b4d55c8d 100644 --- a/ci/tools.sh +++ b/ci/tools.sh @@ -30,7 +30,7 @@ if retry rustup component add rustfmt ; then fi if retry rustup component add clippy ; then - cargo clippy --all --tests --features serde,rayon -- -D clippy::all + cargo clippy --all --tests --features serde,rayon -- -D warnings fi if command -v shellcheck ; then diff --git a/clippy.toml b/clippy.toml index d98bf2c09..10719990a 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1 +1 @@ -doc-valid-idents = [ "CppCon", "SwissTable", "SipHash", "HashDoS" ] +doc-valid-idents = ["CppCon", "SwissTable", "SipHash", "HashDoS"] diff --git a/src/control/bitmask.rs b/src/control/bitmask.rs index cfacfce67..15f97a18f 100644 --- a/src/control/bitmask.rs +++ b/src/control/bitmask.rs @@ -1,6 +1,4 @@ -use super::group::{ - BitMaskWord, NonZeroBitMaskWord, BITMASK_ITER_MASK, BITMASK_MASK, BITMASK_STRIDE, -}; +use super::group::{BitMaskWord, NonZeroBitMaskWord, BITMASK_ITER_MASK, BITMASK_STRIDE}; /// A bit mask which contains the result of a `Match` operation on a `Group` and /// allows iterating through them. @@ -21,16 +19,8 @@ use super::group::{ #[derive(Copy, Clone)] pub(crate) struct BitMask(pub(crate) BitMaskWord); -#[allow(clippy::use_self)] +#[expect(clippy::use_self)] impl BitMask { - /// Returns a new `BitMask` with all bits inverted. - #[inline] - #[must_use] - #[allow(dead_code)] - pub(crate) fn invert(self) -> Self { - BitMask(self.0 ^ BITMASK_MASK) - } - /// Returns a new `BitMask` with the lowest bit removed. #[inline] #[must_use] diff --git a/src/control/group/generic.rs b/src/control/group/generic.rs index ecb81d278..3af8229ca 100644 --- a/src/control/group/generic.rs +++ b/src/control/group/generic.rs @@ -24,8 +24,8 @@ pub(crate) type BitMaskWord = GroupWord; pub(crate) type NonZeroBitMaskWord = NonZeroGroupWord; pub(crate) const BITMASK_STRIDE: usize = 8; // We only care about the highest bit of each tag for the mask. -#[allow(clippy::cast_possible_truncation, clippy::unnecessary_cast)] -pub(crate) const BITMASK_MASK: BitMaskWord = u64::from_ne_bytes([Tag::DELETED.0; 8]) as GroupWord; +#[expect(clippy::cast_possible_truncation, clippy::unnecessary_cast)] +const BITMASK_MASK: BitMaskWord = u64::from_ne_bytes([Tag::DELETED.0; 8]) as GroupWord; pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0; /// Helper function to replicate a tag across a `GroupWord`. @@ -45,7 +45,7 @@ pub(crate) struct Group(GroupWord); // little-endian just before creating a BitMask. The can potentially // enable the compiler to eliminate unnecessary byte swaps if we are // only checking whether a BitMask is empty. -#[allow(clippy::use_self)] +#[expect(clippy::use_self)] impl Group { /// Number of bytes in the group. pub(crate) const WIDTH: usize = mem::size_of::(); @@ -70,27 +70,29 @@ impl Group { /// Loads a group of tags starting at the given address. #[inline] - #[allow(clippy::cast_ptr_alignment)] // unaligned load + #[expect(clippy::cast_ptr_alignment)] // unaligned load pub(crate) unsafe fn load(ptr: *const Tag) -> Self { - Group(ptr::read_unaligned(ptr.cast())) + unsafe { Group(ptr::read_unaligned(ptr.cast())) } } /// Loads a group of tags starting at the given address, which must be /// aligned to `mem::align_of::()`. #[inline] - #[allow(clippy::cast_ptr_alignment)] + #[expect(clippy::cast_ptr_alignment)] pub(crate) unsafe fn load_aligned(ptr: *const Tag) -> Self { debug_assert_eq!(ptr.align_offset(mem::align_of::()), 0); - Group(ptr::read(ptr.cast())) + unsafe { Group(ptr::read(ptr.cast())) } } /// Stores the group of tags to the given address, which must be /// aligned to `mem::align_of::()`. #[inline] - #[allow(clippy::cast_ptr_alignment)] + #[expect(clippy::cast_ptr_alignment)] pub(crate) unsafe fn store_aligned(self, ptr: *mut Tag) { debug_assert_eq!(ptr.align_offset(mem::align_of::()), 0); - ptr::write(ptr.cast(), self.0); + unsafe { + ptr::write(ptr.cast(), self.0); + } } /// Returns a `BitMask` indicating all tags in the group which *may* @@ -132,7 +134,7 @@ impl Group { /// Returns a `BitMask` indicating all tags in the group which are full. #[inline] pub(crate) fn match_full(self) -> BitMask { - self.match_empty_or_deleted().invert() + BitMask(self.match_empty_or_deleted().0 ^ BITMASK_MASK) } /// Performs the following transformation on all tags in the group: diff --git a/src/control/group/lsx.rs b/src/control/group/lsx.rs index dcac09a0c..842c410af 100644 --- a/src/control/group/lsx.rs +++ b/src/control/group/lsx.rs @@ -7,7 +7,6 @@ use core::arch::loongarch64::*; pub(crate) type BitMaskWord = u16; pub(crate) type NonZeroBitMaskWord = NonZeroU16; pub(crate) const BITMASK_STRIDE: usize = 1; -pub(crate) const BITMASK_MASK: BitMaskWord = 0xffff; pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0; /// Abstraction over a group of control tags which can be scanned in @@ -18,7 +17,7 @@ pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0; pub(crate) struct Group(m128i); // FIXME: https://github.com/rust-lang/rust-clippy/issues/3859 -#[allow(clippy::use_self)] +#[expect(clippy::use_self)] impl Group { /// Number of bytes in the group. pub(crate) const WIDTH: usize = mem::size_of::(); @@ -28,7 +27,7 @@ impl Group { /// /// This is guaranteed to be aligned to the group size. #[inline] - #[allow(clippy::items_after_statements)] + #[expect(clippy::items_after_statements)] pub(crate) const fn static_empty() -> &'static [Tag; Group::WIDTH] { #[repr(C)] struct AlignedTags { @@ -44,27 +43,29 @@ impl Group { /// Loads a group of tags starting at the given address. #[inline] - #[allow(clippy::cast_ptr_alignment)] // unaligned load + #[expect(clippy::cast_ptr_alignment)] // unaligned load pub(crate) unsafe fn load(ptr: *const Tag) -> Self { - Group(lsx_vld::<0>(ptr.cast())) + unsafe { Group(lsx_vld::<0>(ptr.cast())) } } /// Loads a group of tags starting at the given address, which must be /// aligned to `mem::align_of::()`. #[inline] - #[allow(clippy::cast_ptr_alignment)] + #[expect(clippy::cast_ptr_alignment)] pub(crate) unsafe fn load_aligned(ptr: *const Tag) -> Self { debug_assert_eq!(ptr.align_offset(mem::align_of::()), 0); - Group(lsx_vld::<0>(ptr.cast())) + unsafe { Group(lsx_vld::<0>(ptr.cast())) } } /// Stores the group of tags to the given address, which must be /// aligned to `mem::align_of::()`. #[inline] - #[allow(clippy::cast_ptr_alignment)] + #[expect(clippy::cast_ptr_alignment)] pub(crate) unsafe fn store_aligned(self, ptr: *mut Tag) { debug_assert_eq!(ptr.align_offset(mem::align_of::()), 0); - lsx_vst::<0>(self.0, ptr.cast()); + unsafe { + lsx_vst::<0>(self.0, ptr.cast()); + } } /// Returns a `BitMask` indicating all tags in the group which have diff --git a/src/control/group/mod.rs b/src/control/group/mod.rs index fe2d77483..063f912af 100644 --- a/src/control/group/mod.rs +++ b/src/control/group/mod.rs @@ -38,6 +38,4 @@ cfg_if! { } } pub(crate) use self::imp::Group; -pub(super) use self::imp::{ - BitMaskWord, NonZeroBitMaskWord, BITMASK_ITER_MASK, BITMASK_MASK, BITMASK_STRIDE, -}; +pub(super) use self::imp::{BitMaskWord, NonZeroBitMaskWord, BITMASK_ITER_MASK, BITMASK_STRIDE}; diff --git a/src/control/group/neon.rs b/src/control/group/neon.rs index 9374cb388..6c5327e2a 100644 --- a/src/control/group/neon.rs +++ b/src/control/group/neon.rs @@ -6,7 +6,6 @@ use core::num::NonZeroU64; pub(crate) type BitMaskWord = u64; pub(crate) type NonZeroBitMaskWord = NonZeroU64; pub(crate) const BITMASK_STRIDE: usize = 8; -pub(crate) const BITMASK_MASK: BitMaskWord = !0; pub(crate) const BITMASK_ITER_MASK: BitMaskWord = 0x8080_8080_8080_8080; /// Abstraction over a group of control tags which can be scanned in @@ -16,7 +15,7 @@ pub(crate) const BITMASK_ITER_MASK: BitMaskWord = 0x8080_8080_8080_8080; #[derive(Copy, Clone)] pub(crate) struct Group(neon::uint8x8_t); -#[allow(clippy::use_self)] +#[expect(clippy::use_self)] impl Group { /// Number of bytes in the group. pub(crate) const WIDTH: usize = mem::size_of::(); @@ -41,27 +40,29 @@ impl Group { /// Loads a group of tags starting at the given address. #[inline] - #[allow(clippy::cast_ptr_alignment)] // unaligned load + #[expect(clippy::cast_ptr_alignment)] // unaligned load pub(crate) unsafe fn load(ptr: *const Tag) -> Self { - Group(neon::vld1_u8(ptr.cast())) + unsafe { Group(neon::vld1_u8(ptr.cast())) } } /// Loads a group of tags starting at the given address, which must be /// aligned to `mem::align_of::()`. #[inline] - #[allow(clippy::cast_ptr_alignment)] + #[expect(clippy::cast_ptr_alignment)] pub(crate) unsafe fn load_aligned(ptr: *const Tag) -> Self { debug_assert_eq!(ptr.align_offset(mem::align_of::()), 0); - Group(neon::vld1_u8(ptr.cast())) + unsafe { Group(neon::vld1_u8(ptr.cast())) } } /// Stores the group of tags to the given address, which must be /// aligned to `mem::align_of::()`. #[inline] - #[allow(clippy::cast_ptr_alignment)] + #[expect(clippy::cast_ptr_alignment)] pub(crate) unsafe fn store_aligned(self, ptr: *mut Tag) { debug_assert_eq!(ptr.align_offset(mem::align_of::()), 0); - neon::vst1_u8(ptr.cast(), self.0); + unsafe { + neon::vst1_u8(ptr.cast(), self.0); + } } /// Returns a `BitMask` indicating all tags in the group which *may* diff --git a/src/control/group/sse2.rs b/src/control/group/sse2.rs index 0d4b10822..2b12c0104 100644 --- a/src/control/group/sse2.rs +++ b/src/control/group/sse2.rs @@ -10,7 +10,6 @@ use core::arch::x86_64 as x86; pub(crate) type BitMaskWord = u16; pub(crate) type NonZeroBitMaskWord = NonZeroU16; pub(crate) const BITMASK_STRIDE: usize = 1; -pub(crate) const BITMASK_MASK: BitMaskWord = 0xffff; pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0; /// Abstraction over a group of control tags which can be scanned in @@ -21,7 +20,7 @@ pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0; pub(crate) struct Group(x86::__m128i); // FIXME: https://github.com/rust-lang/rust-clippy/issues/3859 -#[allow(clippy::use_self)] +#[expect(clippy::use_self)] impl Group { /// Number of bytes in the group. pub(crate) const WIDTH: usize = mem::size_of::(); @@ -31,7 +30,6 @@ impl Group { /// /// This is guaranteed to be aligned to the group size. #[inline] - #[allow(clippy::items_after_statements)] pub(crate) const fn static_empty() -> &'static [Tag; Group::WIDTH] { #[repr(C)] struct AlignedTags { @@ -47,34 +45,33 @@ impl Group { /// Loads a group of tags starting at the given address. #[inline] - #[allow(clippy::cast_ptr_alignment)] // unaligned load pub(crate) unsafe fn load(ptr: *const Tag) -> Self { - Group(x86::_mm_loadu_si128(ptr.cast())) + unsafe { Group(x86::_mm_loadu_si128(ptr.cast())) } } /// Loads a group of tags starting at the given address, which must be /// aligned to `mem::align_of::()`. #[inline] - #[allow(clippy::cast_ptr_alignment)] pub(crate) unsafe fn load_aligned(ptr: *const Tag) -> Self { debug_assert_eq!(ptr.align_offset(mem::align_of::()), 0); - Group(x86::_mm_load_si128(ptr.cast())) + unsafe { Group(x86::_mm_load_si128(ptr.cast())) } } /// Stores the group of tags to the given address, which must be /// aligned to `mem::align_of::()`. #[inline] - #[allow(clippy::cast_ptr_alignment)] pub(crate) unsafe fn store_aligned(self, ptr: *mut Tag) { debug_assert_eq!(ptr.align_offset(mem::align_of::()), 0); - x86::_mm_store_si128(ptr.cast(), self.0); + unsafe { + x86::_mm_store_si128(ptr.cast(), self.0); + } } /// Returns a `BitMask` indicating all tags in the group which have /// the given value. #[inline] pub(crate) fn match_tag(self, tag: Tag) -> BitMask { - #[allow( + #[expect( clippy::cast_possible_wrap, // tag.0: Tag as i8 // tag: i32 as u16 // note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the @@ -99,7 +96,7 @@ impl Group { /// `EMPTY` or `DELETED`. #[inline] pub(crate) fn match_empty_or_deleted(self) -> BitMask { - #[allow( + #[expect( // tag: i32 as u16 // note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the // upper 16-bits of the i32 are zeroed: @@ -115,7 +112,7 @@ impl Group { /// Returns a `BitMask` indicating all tags in the group which are full. #[inline] pub(crate) fn match_full(&self) -> BitMask { - self.match_empty_or_deleted().invert() + BitMask(!self.match_empty_or_deleted().0) } /// Performs the following transformation on all tags in the group: @@ -131,7 +128,7 @@ impl Group { // let special = 0 > tag = 1111_1111 (true) or 0000_0000 (false) // 1111_1111 | 1000_0000 = 1111_1111 // 0000_0000 | 1000_0000 = 1000_0000 - #[allow( + #[expect( clippy::cast_possible_wrap, // tag: Tag::DELETED.0 as i8 )] unsafe { diff --git a/src/control/tag.rs b/src/control/tag.rs index 817dd55cd..95c7640cb 100644 --- a/src/control/tag.rs +++ b/src/control/tag.rs @@ -32,7 +32,6 @@ impl Tag { /// Creates a control tag representing a full bucket with the given hash. #[inline] - #[allow(clippy::cast_possible_truncation)] pub(crate) const fn full(hash: u64) -> Tag { // Constant for function that grabs the top 7 bits of the hash. const MIN_HASH_LEN: usize = if mem::size_of::() < mem::size_of::() { diff --git a/src/external_trait_impls/rayon/helpers.rs b/src/external_trait_impls/rayon/helpers.rs index 4420af3a2..d934264c0 100644 --- a/src/external_trait_impls/rayon/helpers.rs +++ b/src/external_trait_impls/rayon/helpers.rs @@ -4,7 +4,7 @@ use alloc::vec::Vec; use rayon::iter::{IntoParallelIterator, ParallelIterator}; /// Helper for collecting parallel iterators to an intermediary -#[allow(clippy::linkedlist)] // yes, we need linked list here for efficient appending! +#[expect(clippy::linkedlist)] // yes, we need linked list here for efficient appending! pub(super) fn collect(iter: I) -> (LinkedList>, usize) { let list = iter.into_par_iter().collect_vec_list(); diff --git a/src/external_trait_impls/rayon/map.rs b/src/external_trait_impls/rayon/map.rs index 9623ca747..e12080d0d 100644 --- a/src/external_trait_impls/rayon/map.rs +++ b/src/external_trait_impls/rayon/map.rs @@ -346,7 +346,7 @@ where self.len() == other.len() && self .into_par_iter() - .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) + .all(|(key, value)| other.get(key).is_some_and(|v| *value == *v)) } } @@ -455,7 +455,7 @@ where // Reserve the entire length if the map is empty. // Otherwise reserve half the length (rounded up), so the map // will only resize twice in the worst case. - let reserve = if map.is_empty() { len } else { (len + 1) / 2 }; + let reserve = if map.is_empty() { len } else { len.div_ceil(2) }; map.reserve(reserve); for vec in list { map.extend(vec); diff --git a/src/external_trait_impls/rayon/raw.rs b/src/external_trait_impls/rayon/raw.rs index dbe1cdef7..1e2a5cc7f 100644 --- a/src/external_trait_impls/rayon/raw.rs +++ b/src/external_trait_impls/rayon/raw.rs @@ -82,7 +82,7 @@ pub(crate) struct RawIntoParIter { impl RawIntoParIter { #[cfg_attr(feature = "inline-more", inline)] pub(super) unsafe fn par_iter(&self) -> RawParIter { - self.table.par_iter() + unsafe { self.table.par_iter() } } } @@ -120,7 +120,7 @@ unsafe impl Send for RawParDrain<'_, T, A> {} impl RawParDrain<'_, T, A> { #[cfg_attr(feature = "inline-more", inline)] pub(super) unsafe fn par_iter(&self) -> RawParIter { - self.table.as_ref().par_iter() + unsafe { self.table.as_ref().par_iter() } } } @@ -207,8 +207,10 @@ impl RawTable { /// Returns a parallel iterator over the elements in a `RawTable`. #[cfg_attr(feature = "inline-more", inline)] pub(crate) unsafe fn par_iter(&self) -> RawParIter { - RawParIter { - iter: self.iter().iter, + unsafe { + RawParIter { + iter: self.iter().iter, + } } } diff --git a/src/external_trait_impls/rayon/set.rs b/src/external_trait_impls/rayon/set.rs index 3de98fccb..8a3bfa481 100644 --- a/src/external_trait_impls/rayon/set.rs +++ b/src/external_trait_impls/rayon/set.rs @@ -384,7 +384,7 @@ where // Reserve the entire length if the set is empty. // Otherwise reserve half the length (rounded up), so the set // will only resize twice in the worst case. - let reserve = if set.is_empty() { len } else { (len + 1) / 2 }; + let reserve = if set.is_empty() { len } else { len.div_ceil(2) }; set.reserve(reserve); for vec in list { set.extend(vec); diff --git a/src/external_trait_impls/serde.rs b/src/external_trait_impls/serde.rs index 85b6170b8..92614fe04 100644 --- a/src/external_trait_impls/serde.rs +++ b/src/external_trait_impls/serde.rs @@ -177,7 +177,6 @@ mod set { deserializer.deserialize_seq(visitor) } - #[allow(clippy::missing_errors_doc)] fn deserialize_in_place(deserializer: D, place: &mut Self) -> Result<(), D::Error> where D: Deserializer<'de>, diff --git a/src/lib.rs b/src/lib.rs index c5faec43a..2b3539cdb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -25,24 +25,9 @@ strict_provenance_lints ) )] -#![cfg_attr(feature = "rustc-dep-of-std", feature(rustc_attrs))] -#![allow( - clippy::doc_markdown, - clippy::module_name_repetitions, - clippy::must_use_candidate, - clippy::option_if_let_else, - clippy::redundant_else, - clippy::manual_map, - clippy::missing_safety_doc, - clippy::missing_errors_doc -)] -#![warn(missing_docs)] -#![warn(rust_2018_idioms)] #![cfg_attr(feature = "nightly", warn(fuzzy_provenance_casts))] -#![cfg_attr( - feature = "nightly", - allow(clippy::incompatible_msrv, internal_features) -)] +#![cfg_attr(feature = "rustc-dep-of-std", feature(rustc_attrs))] +#![cfg_attr(feature = "nightly", expect(internal_features))] #![cfg_attr( all(feature = "nightly", target_arch = "loongarch64"), feature(stdarch_loongarch) diff --git a/src/macros.rs b/src/macros.rs index eaba6bed1..7177d2212 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -1,5 +1,5 @@ // See the cfg-if crate. -#[allow(unused_macro_rules)] +#[expect(unused_macro_rules)] macro_rules! cfg_if { // match if/else chains with a final `else` ($( diff --git a/src/map.rs b/src/map.rs index f37bafa02..76c5c2b59 100644 --- a/src/map.rs +++ b/src/map.rs @@ -228,7 +228,7 @@ where /// Ensures that a single closure type across uses of this which, in turn prevents multiple /// instances of any functions like `RawTable::reserve` from being generated #[cfg_attr(feature = "inline-more", inline)] -#[allow(dead_code)] +#[cfg(feature = "raw-entry")] pub(crate) fn equivalent(k: &Q) -> impl Fn(&K) -> bool + '_ where Q: Equivalent + ?Sized, @@ -236,20 +236,6 @@ where move |x| k.equivalent(x) } -#[cfg(not(feature = "nightly"))] -#[cfg_attr(feature = "inline-more", inline)] -pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 -where - Q: Hash + ?Sized, - S: BuildHasher, -{ - use core::hash::Hasher; - let mut state = hash_builder.build_hasher(); - val.hash(&mut state); - state.finish() -} - -#[cfg(feature = "nightly")] #[cfg_attr(feature = "inline-more", inline)] pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 where @@ -1608,8 +1594,10 @@ where where Q: Hash + Equivalent + ?Sized, { - self.get_disjoint_unchecked_mut_inner(ks) - .map(|res| res.map(|(_, v)| v)) + unsafe { + self.get_disjoint_unchecked_mut_inner(ks) + .map(|res| res.map(|(_, v)| v)) + } } /// Attempts to get mutable references to `N` values in the map at once, without validating that @@ -1622,7 +1610,7 @@ where where Q: Hash + Equivalent + ?Sized, { - self.get_disjoint_unchecked_mut(ks) + unsafe { self.get_disjoint_unchecked_mut(ks) } } /// Attempts to get mutable references to `N` values in the map at once, with immutable @@ -1760,8 +1748,10 @@ where where Q: Hash + Equivalent + ?Sized, { - self.get_disjoint_unchecked_mut_inner(ks) - .map(|res| res.map(|(k, v)| (&*k, v))) + unsafe { + self.get_disjoint_unchecked_mut_inner(ks) + .map(|res| res.map(|(k, v)| (&*k, v))) + } } /// Attempts to get mutable references to `N` values in the map at once, with immutable @@ -1774,7 +1764,7 @@ where where Q: Hash + Equivalent + ?Sized, { - self.get_disjoint_key_value_unchecked_mut(ks) + unsafe { self.get_disjoint_key_value_unchecked_mut(ks) } } fn get_disjoint_mut_inner( @@ -1796,9 +1786,11 @@ where where Q: Hash + Equivalent + ?Sized, { - let hashes = self.build_hashes_inner(ks); - self.table - .get_disjoint_unchecked_mut(hashes, |i, (k, _)| ks[i].equivalent(k)) + unsafe { + let hashes = self.build_hashes_inner(ks); + self.table + .get_disjoint_unchecked_mut(hashes, |i, (k, _)| ks[i].equivalent(k)) + } } fn build_hashes_inner(&self, ks: [&Q; N]) -> [u64; N] @@ -2074,7 +2066,7 @@ where } self.iter() - .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) + .all(|(key, value)| other.get(key).is_some_and(|v| *value == *v)) } } @@ -4732,7 +4724,7 @@ where let reserve = if self.is_empty() { iter.size_hint().0 } else { - (iter.size_hint().0 + 1) / 2 + iter.size_hint().0.div_ceil(2) }; self.reserve(reserve); iter.for_each(move |(k, v)| { @@ -4756,7 +4748,7 @@ where let reserve = if self.is_empty() { additional } else { - (additional + 1) / 2 + additional.div_ceil(2) }; self.reserve(reserve); } @@ -4887,7 +4879,7 @@ where } } -#[allow(dead_code)] +#[expect(dead_code)] fn assert_covariance() { fn map_key<'new>(v: HashMap<&'static str, u8>) -> HashMap<&'new str, u8> { v @@ -5009,7 +5001,6 @@ mod test_map { assert_eq!(m.len(), 1); assert!(m.insert(2, 4).is_none()); assert_eq!(m.len(), 2); - #[allow(clippy::redundant_clone)] let m2 = m.clone(); assert_eq!(*m2.get(&1).unwrap(), 2); assert_eq!(*m2.get(&2).unwrap(), 4); @@ -5808,8 +5799,8 @@ mod test_map { #[test] fn test_entry_take_doesnt_corrupt() { - #![allow(deprecated)] //rand - // Test for #19292 + #![expect(deprecated)] //rand + // Test for #19292 fn check(m: &HashMap) { for k in m.keys() { assert!(m.contains_key(k), "{k} is in keys() but not in the map?"); @@ -5844,8 +5835,8 @@ mod test_map { #[test] fn test_entry_ref_take_doesnt_corrupt() { - #![allow(deprecated)] //rand - // Test for #19292 + #![expect(deprecated)] //rand + // Test for #19292 fn check(m: &HashMap) { for k in m.keys() { assert!(m.contains_key(k), "{k} is in keys() but not in the map?"); @@ -5897,7 +5888,6 @@ mod test_map { } #[test] - #[allow(clippy::needless_borrow)] fn test_extend_ref_kv_tuple() { use std::ops::AddAssign; let mut a = HashMap::new(); @@ -6119,8 +6109,8 @@ mod test_map { #[test] fn test_replace_entry_with_doesnt_corrupt() { - #![allow(deprecated)] //rand - // Test for #19292 + #![expect(deprecated)] //rand + // Test for #19292 fn check(m: &HashMap) { for k in m.keys() { assert!(m.contains_key(k), "{k} is in keys() but not in the map?"); @@ -6389,8 +6379,10 @@ mod test_map { } unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { - let g = Global; - g.deallocate(ptr, layout) + unsafe { + let g = Global; + g.deallocate(ptr, layout) + } } } @@ -6844,10 +6836,12 @@ mod test_map_with_mmap_allocations { } unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { - // If they allocated it with this layout, it must round correctly. - let size = self.fit_to_page_size(layout.size()).unwrap(); - let _result = libc::munmap(ptr.as_ptr().cast(), size); - debug_assert_eq!(0, _result) + unsafe { + // If they allocated it with this layout, it must round correctly. + let size = self.fit_to_page_size(layout.size()).unwrap(); + let _result = libc::munmap(ptr.as_ptr().cast(), size); + debug_assert_eq!(0, _result) + } } } diff --git a/src/raw/alloc.rs b/src/raw/alloc.rs index 936d13b7c..9bb45a5dc 100644 --- a/src/raw/alloc.rs +++ b/src/raw/alloc.rs @@ -9,12 +9,11 @@ pub(crate) use self::inner::{do_alloc, Allocator, Global}; #[cfg(feature = "nightly")] mod inner { #[cfg(test)] - pub use crate::alloc::alloc::AllocError; + pub(crate) use crate::alloc::alloc::AllocError; use crate::alloc::alloc::Layout; pub(crate) use crate::alloc::alloc::{Allocator, Global}; use core::ptr::NonNull; - #[allow(clippy::map_err_ignore)] pub(crate) fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { match alloc.allocate(layout) { Ok(ptr) => Ok(ptr), @@ -33,11 +32,10 @@ mod inner { mod inner { use crate::alloc::alloc::Layout; #[cfg(test)] - pub use allocator_api2::alloc::AllocError; + pub(crate) use allocator_api2::alloc::AllocError; pub(crate) use allocator_api2::alloc::{Allocator, Global}; use core::ptr::NonNull; - #[allow(clippy::map_err_ignore)] pub(crate) fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { match alloc.allocate(layout) { Ok(ptr) => Ok(ptr), @@ -59,7 +57,7 @@ mod inner { use crate::alloc::alloc::{alloc, dealloc, Layout}; use core::ptr::NonNull; - #[allow(clippy::missing_safety_doc)] // not exposed outside of this crate + #[expect(clippy::missing_safety_doc)] // not exposed outside of this crate pub unsafe trait Allocator { fn allocate(&self, layout: Layout) -> Result, ()>; unsafe fn deallocate(&self, ptr: NonNull, layout: Layout); @@ -86,7 +84,9 @@ mod inner { } #[inline] unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { - dealloc(ptr.as_ptr(), layout); + unsafe { + dealloc(ptr.as_ptr(), layout); + } } } diff --git a/src/raw/mod.rs b/src/raw/mod.rs index 1f21feca5..0b35670af 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -18,7 +18,7 @@ pub(crate) use self::alloc::{do_alloc, Allocator, Global}; #[inline] unsafe fn offset_from(to: *const T, from: *const T) -> usize { - to.offset_from(from) as usize + unsafe { to.offset_from(from) as usize } } /// Whether memory allocation errors should return an error or abort. @@ -57,7 +57,7 @@ impl SizedTypeProperties for T {} /// Primary hash function, used to select the initial bucket to probe from. #[inline] -#[allow(clippy::cast_possible_truncation)] +#[expect(clippy::cast_possible_truncation)] fn h1(hash: u64) -> usize { // On 32-bit platforms we simply ignore the higher hash bits. hash as usize @@ -242,7 +242,7 @@ impl TableLayout { /// This is usually just a pointer to the element itself. However if the element /// is a ZST, then we instead track the index of the element in the table so /// that `erase` works properly. -pub struct Bucket { +pub(crate) struct Bucket { // Actually it is pointer to next element than element itself // this is needed to maintain pointer arithmetic invariants // keeping direct pointer to element introduces difficulty. @@ -336,10 +336,10 @@ impl Bucket { // (see TableLayout::calculate_layout_for method) invalid_mut(index + 1) } else { - base.as_ptr().sub(index) + unsafe { base.as_ptr().sub(index) } }; Self { - ptr: NonNull::new_unchecked(ptr), + ptr: unsafe { NonNull::new_unchecked(ptr) }, } } @@ -408,7 +408,7 @@ impl Bucket { // this can not be UB self.ptr.as_ptr() as usize - 1 } else { - offset_from(base.as_ptr(), self.ptr.as_ptr()) + unsafe { offset_from(base.as_ptr(), self.ptr.as_ptr()) } } } @@ -492,10 +492,10 @@ impl Bucket { // invalid pointer is good enough for ZST invalid_mut(self.ptr.as_ptr() as usize + offset) } else { - self.ptr.as_ptr().sub(offset) + unsafe { self.ptr.as_ptr().sub(offset) } }; Self { - ptr: NonNull::new_unchecked(ptr), + ptr: unsafe { NonNull::new_unchecked(ptr) }, } } @@ -516,7 +516,9 @@ impl Bucket { /// [`RawTable::erase`]: crate::raw::RawTable::erase #[cfg_attr(feature = "inline-more", inline)] pub(crate) unsafe fn drop(&self) { - self.as_ptr().drop_in_place(); + unsafe { + self.as_ptr().drop_in_place(); + } } /// Reads the `value` from `self` without moving it. This leaves the @@ -537,7 +539,7 @@ impl Bucket { /// [`RawTable::remove`]: crate::raw::RawTable::remove #[inline] pub(crate) unsafe fn read(&self) -> T { - self.as_ptr().read() + unsafe { self.as_ptr().read() } } /// Overwrites a memory location with the given `value` without reading @@ -559,7 +561,9 @@ impl Bucket { /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html #[inline] pub(crate) unsafe fn write(&self, val: T) { - self.as_ptr().write(val); + unsafe { + self.as_ptr().write(val); + } } /// Returns a shared immutable reference to the `value`. @@ -571,7 +575,7 @@ impl Bucket { /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref #[inline] pub(crate) unsafe fn as_ref<'a>(&self) -> &'a T { - &*self.as_ptr() + unsafe { &*self.as_ptr() } } /// Returns a unique mutable reference to the `value`. @@ -592,7 +596,7 @@ impl Bucket { /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html #[inline] pub(crate) unsafe fn as_mut<'a>(&self) -> &'a mut T { - &mut *self.as_ptr() + unsafe { &mut *self.as_ptr() } } } @@ -676,12 +680,9 @@ impl RawTable { debug_assert!(buckets.is_power_of_two()); Ok(Self { - table: RawTableInner::new_uninitialized( - &alloc, - Self::TABLE_LAYOUT, - buckets, - fallibility, - )?, + table: unsafe { + RawTableInner::new_uninitialized(&alloc, Self::TABLE_LAYOUT, buckets, fallibility) + }?, alloc, marker: PhantomData, }) @@ -736,7 +737,7 @@ impl RawTable { #[inline] #[cfg(feature = "nightly")] pub(crate) unsafe fn data_start(&self) -> NonNull { - NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.num_buckets())) + unsafe { NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.num_buckets())) } } /// Returns the total amount of memory allocated internally by the hash @@ -754,7 +755,7 @@ impl RawTable { /// Returns the index of a bucket from a `Bucket`. #[inline] pub(crate) unsafe fn bucket_index(&self, bucket: &Bucket) -> usize { - bucket.to_base_index(self.data_end()) + unsafe { bucket.to_base_index(self.data_end()) } } /// Returns a pointer to an element in the table. @@ -809,33 +810,39 @@ impl RawTable { // of buckets is a power of two, and `self.table.bucket_mask = self.num_buckets() - 1`. debug_assert_ne!(self.table.bucket_mask, 0); debug_assert!(index < self.num_buckets()); - Bucket::from_base_index(self.data_end(), index) + unsafe { Bucket::from_base_index(self.data_end(), index) } } /// Erases an element from the table without dropping it. #[cfg_attr(feature = "inline-more", inline)] unsafe fn erase_no_drop(&mut self, item: &Bucket) { - let index = self.bucket_index(item); - self.table.erase(index); + unsafe { + let index = self.bucket_index(item); + self.table.erase(index); + } } /// Erases an element from the table, dropping it in place. #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::needless_pass_by_value)] + #[expect(clippy::needless_pass_by_value)] pub(crate) unsafe fn erase(&mut self, item: Bucket) { - // Erase the element from the table first since drop might panic. - self.erase_no_drop(&item); - item.drop(); + unsafe { + // Erase the element from the table first since drop might panic. + self.erase_no_drop(&item); + item.drop(); + } } /// Removes an element from the table, returning it. /// /// This also returns an index to the newly free bucket. #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::needless_pass_by_value)] + #[expect(clippy::needless_pass_by_value)] pub(crate) unsafe fn remove(&mut self, item: Bucket) -> (T, usize) { - self.erase_no_drop(&item); - (item.read(), self.bucket_index(&item)) + unsafe { + self.erase_no_drop(&item); + (item.read(), self.bucket_index(&item)) + } } /// Removes an element from the table, returning it. @@ -843,12 +850,14 @@ impl RawTable { /// This also returns an index to the newly free bucket /// and the former `Tag` for that bucket. #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::needless_pass_by_value)] + #[expect(clippy::needless_pass_by_value)] pub(crate) unsafe fn remove_tagged(&mut self, item: Bucket) -> (T, usize, Tag) { - let index = self.bucket_index(&item); - let tag = *self.table.ctrl(index); - self.table.erase(index); - (item.read(), index, tag) + unsafe { + let index = self.bucket_index(&item); + let tag = *self.table.ctrl(index); + self.table.erase(index); + (item.read(), index, tag) + } } /// Finds and removes an element from the table, returning it. @@ -1058,13 +1067,15 @@ impl RawTable { // [`TableLayout`] that were used to allocate this table. // 3. The caller ensures that the control bytes of the `RawTableInner` // are already initialized. - self.table.resize_inner( - &self.alloc, - capacity, - &|table, index| hasher(table.bucket::(index).as_ref()), - fallibility, - Self::TABLE_LAYOUT, - ) + unsafe { + self.table.resize_inner( + &self.alloc, + capacity, + &|table, index| hasher(table.bucket::(index).as_ref()), + fallibility, + Self::TABLE_LAYOUT, + ) + } } /// Inserts a new element into the table, and returns its raw bucket. @@ -1117,16 +1128,18 @@ impl RawTable { #[cfg_attr(feature = "inline-more", inline)] #[cfg(feature = "rustc-internal-api")] pub(crate) unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket { - let (index, old_ctrl) = self.table.prepare_insert_index(hash); - let bucket = self.table.bucket(index); + unsafe { + let (index, old_ctrl) = self.table.prepare_insert_index(hash); + let bucket = self.table.bucket(index); - // If we are replacing a DELETED entry then we don't need to update - // the load counter. - self.table.growth_left -= old_ctrl.special_is_empty() as usize; + // If we are replacing a DELETED entry then we don't need to update + // the load counter. + self.table.growth_left -= old_ctrl.special_is_empty() as usize; - bucket.write(value); - self.table.items += 1; - bucket + bucket.write(value); + self.table.items += 1; + bucket + } } /// Temporarily removes a bucket, applying the given function to the removed @@ -1140,19 +1153,21 @@ impl RawTable { where F: FnOnce(T) -> Option, { - let index = self.bucket_index(&bucket); - let old_ctrl = *self.table.ctrl(index); - debug_assert!(self.is_bucket_full(index)); - let old_growth_left = self.table.growth_left; - let item = self.remove(bucket).0; - if let Some(new_item) = f(item) { - self.table.growth_left = old_growth_left; - self.table.set_ctrl(index, old_ctrl); - self.table.items += 1; - self.bucket(index).write(new_item); - None - } else { - Some(old_ctrl) + unsafe { + let index = self.bucket_index(&bucket); + let old_ctrl = *self.table.ctrl(index); + debug_assert!(self.is_bucket_full(index)); + let old_growth_left = self.table.growth_left; + let item = self.remove(bucket).0; + if let Some(new_item) = f(item) { + self.table.growth_left = old_growth_left; + self.table.set_ctrl(index, old_ctrl); + self.table.items += 1; + self.bucket(index).write(new_item); + None + } else { + Some(old_ctrl) + } } } @@ -1205,7 +1220,7 @@ impl RawTable { index: usize, value: T, ) -> Bucket { - self.insert_tagged_at_index(Tag::full(hash), index, value) + unsafe { self.insert_tagged_at_index(Tag::full(hash), index, value) } } /// Inserts a new element into the table at the given index with the given tag, @@ -1223,12 +1238,14 @@ impl RawTable { index: usize, value: T, ) -> Bucket { - let old_ctrl = *self.table.ctrl(index); - self.table.record_item_insert_at(index, old_ctrl, tag); + unsafe { + let old_ctrl = *self.table.ctrl(index); + self.table.record_item_insert_at(index, old_ctrl, tag); - let bucket = self.bucket(index); - bucket.write(value); - bucket + let bucket = self.bucket(index); + bucket.write(value); + bucket + } } /// Searches for an element in the table. @@ -1345,8 +1362,8 @@ impl RawTable { hashes: [u64; N], eq: impl FnMut(usize, &T) -> bool, ) -> [Option<&'_ mut T>; N] { - let ptrs = self.get_disjoint_mut_pointers(hashes, eq); - ptrs.map(|ptr| ptr.map(|mut ptr| ptr.as_mut())) + let ptrs = unsafe { self.get_disjoint_mut_pointers(hashes, eq) }; + ptrs.map(|ptr| ptr.map(|mut ptr| unsafe { ptr.as_mut() })) } unsafe fn get_disjoint_mut_pointers( @@ -1394,7 +1411,7 @@ impl RawTable { /// The caller must ensure `index` is less than the number of buckets. #[inline] pub(crate) unsafe fn is_bucket_full(&self, index: usize) -> bool { - self.table.is_bucket_full(index) + unsafe { self.table.is_bucket_full(index) } } /// Returns an iterator over every element in the table. It is up to @@ -1407,7 +1424,7 @@ impl RawTable { // 1. The caller must uphold the safety contract for `iter` method. // 2. The [`RawTableInner`] must already have properly initialized control bytes since // we will never expose RawTable::new_uninitialized in a public API. - self.table.iter() + unsafe { self.table.iter() } } /// Returns an iterator over occupied buckets that could match a given hash. @@ -1421,7 +1438,7 @@ impl RawTable { /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe. #[cfg_attr(feature = "inline-more", inline)] pub(crate) unsafe fn iter_hash(&self, hash: u64) -> RawIterHash { - RawIterHash::new(self, hash) + unsafe { RawIterHash::new(self, hash) } } /// Returns an iterator over occupied bucket indices that could match a given hash. @@ -1435,7 +1452,7 @@ impl RawTable { /// `RawIterHashIndices` struct, we have to make the `iter_hash_buckets` method unsafe. #[cfg_attr(feature = "inline-more", inline)] pub(crate) unsafe fn iter_hash_buckets(&self, hash: u64) -> RawIterHashIndices { - RawIterHashIndices::new(&self.table, hash) + unsafe { RawIterHashIndices::new(&self.table, hash) } } /// Returns an iterator over full buckets indices in the table. @@ -1443,7 +1460,7 @@ impl RawTable { /// See [`RawTableInner::full_buckets_indices`] for safety conditions. #[inline(always)] pub(crate) unsafe fn full_buckets_indices(&self) -> FullBucketsIndices { - self.table.full_buckets_indices() + unsafe { self.table.full_buckets_indices() } } /// Returns an iterator which removes all elements from the table without @@ -1646,7 +1663,7 @@ impl RawTableInner { }; // SAFETY: null pointer will be caught in above check - let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset)); + let ctrl = unsafe { NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset)) }; Ok(Self { ctrl, bucket_mask: buckets - 1, @@ -1759,7 +1776,7 @@ impl RawTableInner { #[inline] unsafe fn fix_insert_index(&self, mut index: usize) -> usize { // SAFETY: The caller of this function ensures that `index` is in the range `0..=self.bucket_mask`. - if unlikely(self.is_bucket_full(index)) { + if unlikely(unsafe { self.is_bucket_full(index) }) { debug_assert!(self.bucket_mask < Group::WIDTH); // SAFETY: // @@ -1778,10 +1795,12 @@ impl RawTableInner { // the range of the table are filled with EMPTY bytes (and we know for sure that there // is at least one FULL bucket), so this second scan either finds an empty slot (due to // the load factor) or hits the trailing control bytes (containing EMPTY). - index = Group::load_aligned(self.ctrl(0)) - .match_empty_or_deleted() - .lowest_set_bit() - .unwrap_unchecked(); + index = unsafe { + Group::load_aligned(self.ctrl(0)) + .match_empty_or_deleted() + .lowest_set_bit() + .unwrap_unchecked() + }; } index } @@ -1958,17 +1977,19 @@ impl RawTableInner { /// [`RawTableInner::find_insert_index`]: RawTableInner::find_insert_index #[inline] unsafe fn prepare_insert_index(&mut self, hash: u64) -> (usize, Tag) { - // SAFETY: Caller of this function ensures that the control bytes are properly initialized. - let index: usize = self.find_insert_index(hash); - // SAFETY: - // 1. The `find_insert_index` function either returns an `index` less than or - // equal to `self.num_buckets() = self.bucket_mask + 1` of the table, or never - // returns if it cannot find an empty or deleted slot. - // 2. The caller of this function guarantees that the table has already been - // allocated - let old_ctrl = *self.ctrl(index); - self.set_ctrl_hash(index, hash); - (index, old_ctrl) + unsafe { + // SAFETY: Caller of this function ensures that the control bytes are properly initialized. + let index: usize = self.find_insert_index(hash); + // SAFETY: + // 1. The `find_insert_index` function either returns an `index` less than or + // equal to `self.num_buckets() = self.bucket_mask + 1` of the table, or never + // returns if it cannot find an empty or deleted slot. + // 2. The caller of this function guarantees that the table has already been + // allocated + let old_ctrl = *self.ctrl(index); + self.set_ctrl_hash(index, hash); + (index, old_ctrl) + } } /// Searches for an empty or deleted bucket which is suitable for inserting @@ -2128,7 +2149,6 @@ impl RawTableInner { /// /// [`Bucket::as_ptr`]: Bucket::as_ptr /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - #[allow(clippy::mut_mut)] #[inline] unsafe fn prepare_rehash_in_place(&mut self) { // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY. @@ -2141,29 +2161,32 @@ impl RawTableInner { // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated; // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0 // and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for). - for i in (0..self.num_buckets()).step_by(Group::WIDTH) { - let group = Group::load_aligned(self.ctrl(i)); - let group = group.convert_special_to_empty_and_full_to_deleted(); - group.store_aligned(self.ctrl(i)); + unsafe { + for i in (0..self.num_buckets()).step_by(Group::WIDTH) { + let group = Group::load_aligned(self.ctrl(i)); + let group = group.convert_special_to_empty_and_full_to_deleted(); + group.store_aligned(self.ctrl(i)); + } } // Fix up the trailing control bytes. See the comments in set_ctrl // for the handling of tables smaller than the group width. - // - // SAFETY: The caller of this function guarantees that [`RawTableInner`] - // has already been allocated if unlikely(self.num_buckets() < Group::WIDTH) { // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes, // so copying `self.num_buckets() == self.bucket_mask + 1` bytes with offset equal to // `Group::WIDTH` is safe - self.ctrl(0) - .copy_to(self.ctrl(Group::WIDTH), self.num_buckets()); + unsafe { + self.ctrl(0) + .copy_to(self.ctrl(Group::WIDTH), self.num_buckets()); + } } else { // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of // control bytes,so copying `Group::WIDTH` bytes with offset equal // to `self.num_buckets() == self.bucket_mask + 1` is safe - self.ctrl(0) - .copy_to(self.ctrl(self.num_buckets()), Group::WIDTH); + unsafe { + self.ctrl(0) + .copy_to(self.ctrl(self.num_buckets()), Group::WIDTH); + } } } @@ -2213,11 +2236,13 @@ impl RawTableInner { // // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.num_buckets()` because the number // of buckets is a power of two, and `self.bucket_mask = self.num_buckets() - 1`. - let data = Bucket::from_base_index(self.data_end(), 0); - RawIter { - // SAFETY: See explanation above - iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.num_buckets()), - items: self.items, + unsafe { + let data = Bucket::from_base_index(self.data_end(), 0); + RawIter { + // SAFETY: See explanation above + iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.num_buckets()), + items: self.items, + } } } @@ -2264,10 +2289,12 @@ impl RawTableInner { // SAFETY: We know for sure that RawTableInner will outlive the // returned `RawIter` iterator, and the caller of this function // must uphold the safety contract for `drop_elements` method. - for item in self.iter::() { - // SAFETY: The caller must uphold the safety contract for - // `drop_elements` method. - item.drop(); + unsafe { + for item in self.iter::() { + // SAFETY: The caller must uphold the safety contract for + // `drop_elements` method. + item.drop(); + } } } } @@ -2320,12 +2347,14 @@ impl RawTableInner { /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html unsafe fn drop_inner_table(&mut self, alloc: &A, table_layout: TableLayout) { if !self.is_empty_singleton() { + // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method. unsafe { - // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method. self.drop_elements::(); - // SAFETY: - // 1. We have checked that our table is allocated. - // 2. The caller must uphold the safety contract for `drop_inner_table` method. + } + // SAFETY: + // 1. We have checked that our table is allocated. + // 2. The caller must uphold the safety contract for `drop_inner_table` method. + unsafe { self.free_buckets(alloc, table_layout); } } @@ -2392,7 +2421,7 @@ impl RawTableInner { unsafe fn bucket(&self, index: usize) -> Bucket { debug_assert_ne!(self.bucket_mask, 0); debug_assert!(index < self.num_buckets()); - Bucket::from_base_index(self.data_end(), index) + unsafe { Bucket::from_base_index(self.data_end(), index) } } /// Returns a raw `*mut u8` pointer to the start of the `data` element in the table @@ -2445,8 +2474,10 @@ impl RawTableInner { unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 { debug_assert_ne!(self.bucket_mask, 0); debug_assert!(index < self.num_buckets()); - let base: *mut u8 = self.data_end().as_ptr(); - base.sub((index + 1) * size_of) + unsafe { + let base: *mut u8 = self.data_end().as_ptr(); + base.sub((index + 1) * size_of) + } } /// Returns pointer to one past last `data` element in the table as viewed from @@ -2507,7 +2538,9 @@ impl RawTableInner { #[inline] unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: Tag, new_ctrl: Tag) { self.growth_left -= usize::from(old_ctrl.special_is_empty()); - self.set_ctrl(index, new_ctrl); + unsafe { + self.set_ctrl(index, new_ctrl); + } self.items += 1; } @@ -2548,8 +2581,10 @@ impl RawTableInner { /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] unsafe fn set_ctrl_hash(&mut self, index: usize, hash: u64) { - // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_hash`] - self.set_ctrl(index, Tag::full(hash)); + unsafe { + // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_hash`] + self.set_ctrl(index, Tag::full(hash)); + } } /// Replaces the hash in the control byte at the given index with the provided one, @@ -2582,10 +2617,12 @@ impl RawTableInner { /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] unsafe fn replace_ctrl_hash(&mut self, index: usize, hash: u64) -> Tag { - // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_hash`] - let prev_ctrl = *self.ctrl(index); - self.set_ctrl_hash(index, hash); - prev_ctrl + unsafe { + // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_hash`] + let prev_ctrl = *self.ctrl(index); + self.set_ctrl_hash(index, hash); + prev_ctrl + } } /// Sets a control byte, and possibly also the replicated control byte at @@ -2641,8 +2678,10 @@ impl RawTableInner { let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH; // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`] - *self.ctrl(index) = ctrl; - *self.ctrl(index2) = ctrl; + unsafe { + *self.ctrl(index) = ctrl; + *self.ctrl(index2) = ctrl; + } } /// Returns a pointer to a control byte. @@ -2672,7 +2711,7 @@ impl RawTableInner { unsafe fn ctrl(&self, index: usize) -> *mut Tag { debug_assert!(index < self.num_ctrl_bytes()); // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`] - self.ctrl.as_ptr().add(index).cast() + unsafe { self.ctrl.as_ptr().add(index).cast() } } /// Gets the slice of all control bytes. @@ -2694,7 +2733,7 @@ impl RawTableInner { #[inline] unsafe fn is_bucket_full(&self, index: usize) -> bool { debug_assert!(index < self.num_buckets()); - (*self.ctrl(index)).is_full() + unsafe { (*self.ctrl(index)).is_full() } } #[inline] @@ -2729,7 +2768,6 @@ impl RawTableInner { /// by this function results in [`undefined behavior`]. /// /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - #[allow(clippy::mut_mut)] #[inline] fn prepare_resize<'a, A>( &self, @@ -2786,7 +2824,7 @@ impl RawTableInner { /// * The [`RawTableInner`] must have properly initialized control bytes. /// /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - #[allow(clippy::inline_always)] + #[expect(clippy::inline_always)] #[inline(always)] unsafe fn reserve_rehash_inner( &mut self, @@ -2819,7 +2857,9 @@ impl RawTableInner { // used to allocate this table. // 4. The caller ensures that the control bytes of the `RawTableInner` // are already initialized. - self.rehash_in_place(hasher, layout.size, drop); + unsafe { + self.rehash_in_place(hasher, layout.size, drop); + } Ok(()) } else { // Otherwise, conservatively resize to at least the next size up @@ -2831,13 +2871,15 @@ impl RawTableInner { // [`TableLayout`] that were used to allocate this table. // 3. The caller ensures that the control bytes of the `RawTableInner` // are already initialized. - self.resize_inner( - alloc, - usize::max(new_items, full_capacity + 1), - hasher, - fallibility, - layout, - ) + unsafe { + self.resize_inner( + alloc, + usize::max(new_items, full_capacity + 1), + hasher, + fallibility, + layout, + ) + } } } @@ -2874,17 +2916,19 @@ impl RawTableInner { // // where: T0...T_n - our stored data; // CT0...CT_n - control bytes or metadata for `data`. - let ctrl = NonNull::new_unchecked(self.ctrl(0).cast::()); + unsafe { + let ctrl = NonNull::new_unchecked(self.ctrl(0).cast::()); - FullBucketsIndices { - // Load the first group - // SAFETY: See explanation above. - current_group: Group::load_aligned(ctrl.as_ptr().cast()) - .match_full() - .into_iter(), - group_first_index: 0, - ctrl, - items: self.items, + FullBucketsIndices { + // Load the first group + // SAFETY: See explanation above. + current_group: Group::load_aligned(ctrl.as_ptr().cast()) + .match_full() + .into_iter(), + group_first_index: 0, + ctrl, + items: self.items, + } } } @@ -2928,7 +2972,7 @@ impl RawTableInner { /// /// [`RawTableInner::find_insert_index`]: RawTableInner::find_insert_index /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - #[allow(clippy::inline_always)] + #[expect(clippy::inline_always)] #[inline(always)] unsafe fn resize_inner( &mut self, @@ -2948,42 +2992,44 @@ impl RawTableInner { // SAFETY: We know for sure that RawTableInner will outlive the // returned `FullBucketsIndices` iterator, and the caller of this // function ensures that the control bytes are properly initialized. - for full_byte_index in self.full_buckets_indices() { - // This may panic. - let hash = hasher(self, full_byte_index); + unsafe { + for full_byte_index in self.full_buckets_indices() { + // This may panic. + let hash = hasher(self, full_byte_index); - // SAFETY: - // We can use a simpler version of insert() here since: - // 1. There are no DELETED entries. - // 2. We know there is enough space in the table. - // 3. All elements are unique. - // 4. The caller of this function guarantees that `capacity > 0` - // so `new_table` must already have some allocated memory. - // 5. We set `growth_left` and `items` fields of the new table - // after the loop. - // 6. We insert into the table, at the returned index, the data - // matching the given hash immediately after calling this function. - let (new_index, _) = new_table.prepare_insert_index(hash); + // SAFETY: + // We can use a simpler version of insert() here since: + // 1. There are no DELETED entries. + // 2. We know there is enough space in the table. + // 3. All elements are unique. + // 4. The caller of this function guarantees that `capacity > 0` + // so `new_table` must already have some allocated memory. + // 5. We set `growth_left` and `items` fields of the new table + // after the loop. + // 6. We insert into the table, at the returned index, the data + // matching the given hash immediately after calling this function. + let (new_index, _) = new_table.prepare_insert_index(hash); - // SAFETY: - // - // * `src` is valid for reads of `layout.size` bytes, since the - // table is alive and the `full_byte_index` is guaranteed to be - // within bounds (see `FullBucketsIndices::next_impl`); - // - // * `dst` is valid for writes of `layout.size` bytes, since the - // caller ensures that `table_layout` matches the [`TableLayout`] - // that was used to allocate old table and we have the `new_index` - // returned by `prepare_insert_index`. - // - // * Both `src` and `dst` are properly aligned. - // - // * Both `src` and `dst` point to different region of memory. - ptr::copy_nonoverlapping( - self.bucket_ptr(full_byte_index, layout.size), - new_table.bucket_ptr(new_index, layout.size), - layout.size, - ); + // SAFETY: + // + // * `src` is valid for reads of `layout.size` bytes, since the + // table is alive and the `full_byte_index` is guaranteed to be + // within bounds (see `FullBucketsIndices::next_impl`); + // + // * `dst` is valid for writes of `layout.size` bytes, since the + // caller ensures that `table_layout` matches the [`TableLayout`] + // that was used to allocate old table and we have the `new_index` + // returned by `prepare_insert_index`. + // + // * Both `src` and `dst` are properly aligned. + // + // * Both `src` and `dst` point to different region of memory. + ptr::copy_nonoverlapping( + self.bucket_ptr(full_byte_index, layout.size), + new_table.bucket_ptr(new_index, layout.size), + layout.size, + ); + } } // The hash function didn't panic, so we can safely set the @@ -3024,7 +3070,7 @@ impl RawTableInner { /// * The [`RawTableInner`] must have properly initialized control bytes. /// /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - #[allow(clippy::inline_always)] + #[cfg_attr(feature = "inline-more", expect(clippy::inline_always))] #[cfg_attr(feature = "inline-more", inline(always))] #[cfg_attr(not(feature = "inline-more"), inline)] unsafe fn rehash_in_place( @@ -3037,15 +3083,19 @@ impl RawTableInner { // that we haven't rehashed yet. We unfortunately can't preserve the // element since we lost their hash and have no way of recovering it // without risking another panic. - self.prepare_rehash_in_place(); + unsafe { + self.prepare_rehash_in_place(); + } let mut guard = guard(self, move |self_| { if let Some(drop) = drop { for i in 0..self_.num_buckets() { - if *self_.ctrl(i) == Tag::DELETED { - self_.set_ctrl(i, Tag::EMPTY); - drop(self_.bucket_ptr(i, size_of)); - self_.items -= 1; + unsafe { + if *self_.ctrl(i) == Tag::DELETED { + self_.set_ctrl(i, Tag::EMPTY); + drop(self_.bucket_ptr(i, size_of)); + self_.items -= 1; + } } } } @@ -3056,11 +3106,13 @@ impl RawTableInner { // rehashed yet. Find them and re-insert them at their ideal // position. 'outer: for i in 0..guard.num_buckets() { - if *guard.ctrl(i) != Tag::DELETED { - continue; + unsafe { + if *guard.ctrl(i) != Tag::DELETED { + continue; + } } - let i_p = guard.bucket_ptr(i, size_of); + let i_p = unsafe { guard.bucket_ptr(i, size_of) }; 'inner: loop { // Hash the current item @@ -3070,7 +3122,7 @@ impl RawTableInner { // // SAFETY: Caller of this function ensures that the control bytes // are properly initialized. - let new_i = guard.find_insert_index(hash); + let new_i = unsafe { guard.find_insert_index(hash) }; // Probing works by scanning through all of the control // bytes in groups, which may not be aligned to the group @@ -3078,28 +3130,32 @@ impl RawTableInner { // same unaligned group, then there is no benefit in moving // it and we can just continue to the next item. if likely(guard.is_in_same_group(i, new_i, hash)) { - guard.set_ctrl_hash(i, hash); + unsafe { guard.set_ctrl_hash(i, hash) }; continue 'outer; } - let new_i_p = guard.bucket_ptr(new_i, size_of); + let new_i_p = unsafe { guard.bucket_ptr(new_i, size_of) }; // We are moving the current item to a new position. Write // our H2 to the control byte of the new position. - let prev_ctrl = guard.replace_ctrl_hash(new_i, hash); + let prev_ctrl = unsafe { guard.replace_ctrl_hash(new_i, hash) }; if prev_ctrl == Tag::EMPTY { - guard.set_ctrl(i, Tag::EMPTY); + unsafe { guard.set_ctrl(i, Tag::EMPTY) }; // If the target slot is empty, simply move the current // element into the new slot and clear the old control // byte. - ptr::copy_nonoverlapping(i_p, new_i_p, size_of); + unsafe { + ptr::copy_nonoverlapping(i_p, new_i_p, size_of); + } continue 'outer; } else { // If the target slot is occupied, swap the two elements // and then continue processing the element that we just // swapped into the old slot. debug_assert_eq!(prev_ctrl, Tag::DELETED); - ptr::swap_nonoverlapping(i_p, new_i_p, size_of); + unsafe { + ptr::swap_nonoverlapping(i_p, new_i_p, size_of); + } continue 'inner; } } @@ -3141,10 +3197,12 @@ impl RawTableInner { where A: Allocator, { - // SAFETY: The caller must uphold the safety contract for `free_buckets` - // method. - let (ptr, layout) = self.allocation_info(table_layout); - alloc.deallocate(ptr, layout); + unsafe { + // SAFETY: The caller must uphold the safety contract for `free_buckets` + // method. + let (ptr, layout) = self.allocation_info(table_layout); + alloc.deallocate(ptr, layout); + } } /// Returns a pointer to the allocated memory and the layout that was used to @@ -3257,7 +3315,9 @@ impl RawTableInner { /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] unsafe fn erase(&mut self, index: usize) { - debug_assert!(self.is_bucket_full(index)); + unsafe { + debug_assert!(self.is_bucket_full(index)); + } // This is the same as `index.wrapping_sub(Group::WIDTH) % self.num_buckets()` because // the number of buckets is a power of two, and `self.bucket_mask = self.num_buckets() - 1`. @@ -3265,8 +3325,12 @@ impl RawTableInner { // SAFETY: // - The caller must uphold the safety contract for `erase` method; // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask` - let empty_before = Group::load(self.ctrl(index_before)).match_empty(); - let empty_after = Group::load(self.ctrl(index)).match_empty(); + let (empty_before, empty_after) = unsafe { + ( + Group::load(self.ctrl(index_before)).match_empty(), + Group::load(self.ctrl(index)).match_empty(), + ) + }; // Inserting and searching in the map is performed by two key functions: // @@ -3311,7 +3375,9 @@ impl RawTableInner { Tag::EMPTY }; // SAFETY: the caller must uphold the safety contract for `erase` method. - self.set_ctrl(index, ctrl); + unsafe { + self.set_ctrl(index, ctrl); + } self.items -= 1; } } @@ -3425,7 +3491,9 @@ impl RawTableClone for RawTable { default_fn! { #[cfg_attr(feature = "inline-more", inline)] unsafe fn clone_from_spec(&mut self, source: &Self) { - self.clone_from_impl(source); + unsafe { + self.clone_from_impl(source); + } } } } @@ -3433,14 +3501,16 @@ impl RawTableClone for RawTable { impl RawTableClone for RawTable { #[cfg_attr(feature = "inline-more", inline)] unsafe fn clone_from_spec(&mut self, source: &Self) { - source - .table - .ctrl(0) - .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); - source - .data_start() - .as_ptr() - .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.num_buckets()); + unsafe { + source + .table + .ctrl(0) + .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); + source + .data_start() + .as_ptr() + .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.num_buckets()); + } self.table.items = source.table.items; self.table.growth_left = source.table.growth_left; @@ -3455,10 +3525,12 @@ impl RawTable { #[cfg_attr(feature = "inline-more", inline)] unsafe fn clone_from_impl(&mut self, source: &Self) { // Copy the control bytes unchanged. We do this in a single pass - source - .table - .ctrl(0) - .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); + unsafe { + source + .table + .ctrl(0) + .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); + } // The cloning of elements may panic, in which case we need // to make sure we drop only the elements that have been @@ -3466,20 +3538,24 @@ impl RawTable { let mut guard = guard((0, &mut *self), |(index, self_)| { if T::NEEDS_DROP { for i in 0..*index { - if self_.is_bucket_full(i) { - self_.bucket(i).drop(); + unsafe { + if self_.is_bucket_full(i) { + self_.bucket(i).drop(); + } } } } }); - for from in source.iter() { - let index = source.bucket_index(&from); - let to = guard.1.bucket(index); - to.write(from.as_ref().clone()); + unsafe { + for from in source.iter() { + let index = source.bucket_index(&from); + let to = guard.1.bucket(index); + to.write(from.as_ref().clone()); - // Update the index in case we need to unwind. - guard.0 = index + 1; + // Update the index in case we need to unwind. + guard.0 = index + 1; + } } // Successfully cloned all items, no need to clean up. @@ -3501,14 +3577,14 @@ impl Default for RawTable { unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If the drop function of any elements fails, then only a memory leak will occur, + // and we don't care because we are inside the `Drop` function of the `RawTable`, + // so there won't be any table left in an inconsistent state. unsafe { - // SAFETY: - // 1. We call the function only once; - // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] - // and [`TableLayout`] that were used to allocate this table. - // 3. If the drop function of any elements fails, then only a memory leak will occur, - // and we don't care because we are inside the `Drop` function of the `RawTable`, - // so there won't be any table left in an inconsistent state. self.table .drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); } @@ -3518,14 +3594,14 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable { impl Drop for RawTable { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If the drop function of any elements fails, then only a memory leak will occur, + // and we don't care because we are inside the `Drop` function of the `RawTable`, + // so there won't be any table left in an inconsistent state. unsafe { - // SAFETY: - // 1. We call the function only once; - // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] - // and [`TableLayout`] that were used to allocate this table. - // 3. If the drop function of any elements fails, then only a memory leak will occur, - // and we don't care because we are inside the `Drop` function of the `RawTable`, - // so there won't be any table left in an inconsistent state. self.table .drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); } @@ -3595,12 +3671,16 @@ impl RawIterRange { debug_assert_ne!(len, 0); debug_assert_eq!(ctrl as usize % Group::WIDTH, 0); // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`] - let end = ctrl.add(len); + let end = unsafe { ctrl.add(len) }; // Load the first group and advance ctrl to point to the next group // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`] - let current_group = Group::load_aligned(ctrl.cast()).match_full(); - let next_ctrl = ctrl.add(Group::WIDTH); + let (current_group, next_ctrl) = unsafe { + ( + Group::load_aligned(ctrl.cast()).match_full(), + ctrl.add(Group::WIDTH), + ) + }; Self { current_group: current_group.into_iter(), @@ -3661,7 +3741,7 @@ impl RawIterRange { unsafe fn next_impl(&mut self) -> Option> { loop { if let Some(index) = self.current_group.next() { - return Some(self.data.next_n(index)); + return Some(unsafe { self.data.next_n(index) }); } if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end { @@ -3673,11 +3753,13 @@ impl RawIterRange { // than the group size where the trailing control bytes are all // EMPTY. On larger tables self.end is guaranteed to be aligned // to the group size (since tables are power-of-two sized). - self.current_group = Group::load_aligned(self.next_ctrl.cast()) - .match_full() - .into_iter(); - self.data = self.data.next_n(Group::WIDTH); - self.next_ctrl = self.next_ctrl.add(Group::WIDTH); + unsafe { + self.current_group = Group::load_aligned(self.next_ctrl.cast()) + .match_full() + .into_iter(); + self.data = self.data.next_n(Group::WIDTH); + self.next_ctrl = self.next_ctrl.add(Group::WIDTH); + } } } @@ -3706,7 +3788,7 @@ impl RawIterRange { /// in the table. /// /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - #[allow(clippy::while_let_on_iterator)] + #[expect(clippy::while_let_on_iterator)] #[cfg_attr(feature = "inline-more", inline)] unsafe fn fold_impl(mut self, mut n: usize, mut acc: B, mut f: F) -> B where @@ -3717,7 +3799,7 @@ impl RawIterRange { // The returned `index` will always be in the range `0..Group::WIDTH`, // so that calling `self.data.next_n(index)` is safe (see detailed explanation below). debug_assert!(n != 0); - let bucket = self.data.next_n(index); + let bucket = unsafe { self.data.next_n(index) }; acc = f(acc, bucket); n -= 1; } @@ -3754,11 +3836,13 @@ impl RawIterRange { // The last `self.next_ctrl`, whose index would be `self.num_buckets()`, will never // actually be read, since we should have already yielded all the elements of // the table. - self.current_group = Group::load_aligned(self.next_ctrl.cast()) - .match_full() - .into_iter(); - self.data = self.data.next_n(Group::WIDTH); - self.next_ctrl = self.next_ctrl.add(Group::WIDTH); + unsafe { + self.current_group = Group::load_aligned(self.next_ctrl.cast()) + .match_full() + .into_iter(); + self.data = self.data.next_n(Group::WIDTH); + self.next_ctrl = self.next_ctrl.add(Group::WIDTH); + } } } } @@ -3826,9 +3910,11 @@ pub(crate) struct RawIter { impl RawIter { unsafe fn drop_elements(&mut self) { - if T::NEEDS_DROP && self.items != 0 { - for item in self { - item.drop(); + unsafe { + if T::NEEDS_DROP && self.items != 0 { + for item in self { + item.drop(); + } } } } @@ -3973,13 +4059,17 @@ impl FullBucketsIndices { // will always contains indices within the range `0..Group::WIDTH`, // and subsequent `self.group_first_index + index` will always return a // number less than `self.num_buckets()`. - self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH)); + unsafe { + self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH)); + } // SAFETY: See explanation above. - self.current_group = Group::load_aligned(self.ctrl.as_ptr().cast()) - .match_full() - .into_iter(); - self.group_first_index += Group::WIDTH; + unsafe { + self.current_group = Group::load_aligned(self.ctrl.as_ptr().cast()) + .match_full() + .into_iter(); + self.group_first_index += Group::WIDTH; + } } } } @@ -3997,12 +4087,10 @@ impl Iterator for FullBucketsIndices { return None; } - let nxt = unsafe { - // SAFETY: - // 1. We check number of items to yield using `items` field. - // 2. The caller ensures that the table is alive and has not moved. - self.next_impl() - }; + // SAFETY: + // 1. We check number of items to yield using `items` field. + // 2. The caller ensures that the table is alive and has not moved. + let nxt = unsafe { self.next_impl() }; debug_assert!(nxt.is_some()); self.items -= 1; @@ -4222,7 +4310,7 @@ impl RawIterHash { #[cfg_attr(feature = "inline-more", inline)] unsafe fn new(table: &RawTable, hash: u64) -> Self { RawIterHash { - inner: RawIterHashIndices::new(&table.table, hash), + inner: unsafe { RawIterHashIndices::new(&table.table, hash) }, _marker: PhantomData, } } @@ -4261,7 +4349,7 @@ impl RawIterHashIndices { unsafe fn new(table: &RawTableInner, hash: u64) -> Self { let tag_hash = Tag::full(hash); let probe_seq = table.probe_seq(hash); - let group = Group::load(table.ctrl(probe_seq.pos)); + let group = unsafe { Group::load(table.ctrl(probe_seq.pos)) }; let bitmask = group.match_tag(tag_hash).into_iter(); RawIterHashIndices { @@ -4517,8 +4605,10 @@ mod test_map { } unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { - let g = Global; - g.deallocate(ptr, layout) + unsafe { + let g = Global; + g.deallocate(ptr, layout) + } } } @@ -4561,7 +4651,7 @@ mod test_map { }), }); - for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() { + for (idx, panic_in_clone) in core::iter::repeat_n(DISARMED, 7).enumerate() { let idx = idx as u64; table.insert( idx, diff --git a/src/raw_entry.rs b/src/raw_entry.rs index 20623a83b..d8dfdc550 100644 --- a/src/raw_entry.rs +++ b/src/raw_entry.rs @@ -521,7 +521,6 @@ impl<'a, K, V, S, A: Allocator> RawEntryBuilderMut<'a, K, V, S, A> { /// assert_eq!(map[&"a"], 100); /// ``` #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::wrong_self_convention)] pub fn from_key(self, k: &Q) -> RawEntryMut<'a, K, V, S, A> where S: BuildHasher, @@ -554,7 +553,6 @@ impl<'a, K, V, S, A: Allocator> RawEntryBuilderMut<'a, K, V, S, A> { /// assert_eq!(map[&"a"], 100); /// ``` #[inline] - #[allow(clippy::wrong_self_convention)] pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> RawEntryMut<'a, K, V, S, A> where Q: Equivalent + ?Sized, @@ -587,7 +585,6 @@ impl<'a, K, V, S, A: Allocator> RawEntryBuilderMut<'a, K, V, S, A> { /// assert_eq!(map[&"a"], 100); /// ``` #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::wrong_self_convention)] pub fn from_hash(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S, A> where for<'b> F: FnMut(&'b K) -> bool, @@ -627,7 +624,6 @@ impl<'a, K, V, S, A: Allocator> RawEntryBuilder<'a, K, V, S, A> { /// assert_eq!(map.raw_entry().from_key(&key), Some((&"a", &100))); /// ``` #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::wrong_self_convention)] pub fn from_key(self, k: &Q) -> Option<(&'a K, &'a V)> where S: BuildHasher, @@ -658,7 +654,6 @@ impl<'a, K, V, S, A: Allocator> RawEntryBuilder<'a, K, V, S, A> { /// assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &key), Some((&"a", &100))); /// ``` #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::wrong_self_convention)] pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> Option<(&'a K, &'a V)> where Q: Equivalent + ?Sized, @@ -698,7 +693,6 @@ impl<'a, K, V, S, A: Allocator> RawEntryBuilder<'a, K, V, S, A> { /// assert_eq!(map.raw_entry().from_hash(hash, |k| k == &key), Some((&"a", &100))); /// ``` #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::wrong_self_convention)] pub fn from_hash(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)> where F: FnMut(&K) -> bool, @@ -1359,7 +1353,6 @@ impl<'a, K, V, S, A: Allocator> RawVacantEntryMut<'a, K, V, S, A> { /// assert_eq!(map[&"c"], 300); /// ``` #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::shadow_unrelated)] pub fn insert_hashed_nocheck(self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V) where K: Hash, diff --git a/src/set.rs b/src/set.rs index 36fb60a36..5ece27f77 100644 --- a/src/set.rs +++ b/src/set.rs @@ -1114,7 +1114,7 @@ where /// correctly, and would cause unsoundness as a result. #[cfg_attr(feature = "inline-more", inline)] pub unsafe fn insert_unique_unchecked(&mut self, value: T) -> &T { - self.map.insert_unique_unchecked(value, ()).0 + unsafe { self.map.insert_unique_unchecked(value, ()).0 } } /// Adds a value to the set, replacing the existing value, if any, that is equal to the given @@ -2530,7 +2530,7 @@ impl<'a, T, S, A: Allocator> VacantEntry<'a, T, S, A> { } } -#[allow(dead_code)] +#[expect(dead_code)] fn assert_covariance() { fn set<'new>(v: HashSet<&'static str>) -> HashSet<&'new str> { v @@ -2918,7 +2918,7 @@ mod test_set { use core::hash; #[derive(Debug)] - #[allow(dead_code)] + #[expect(dead_code)] struct Foo(&'static str, i32); impl PartialEq for Foo { @@ -2947,7 +2947,6 @@ mod test_set { } #[test] - #[allow(clippy::needless_borrow)] fn test_extend_ref() { let mut a = HashSet::new(); a.insert(1); diff --git a/src/table.rs b/src/table.rs index a890e29c2..82fc6ffae 100644 --- a/src/table.rs +++ b/src/table.rs @@ -512,7 +512,7 @@ where #[inline] pub unsafe fn get_bucket_entry_unchecked(&mut self, index: usize) -> OccupiedEntry<'_, T, A> { OccupiedEntry { - bucket: self.raw.bucket(index), + bucket: unsafe { self.raw.bucket(index) }, table: self, } } @@ -588,7 +588,7 @@ where /// ``` #[inline] pub unsafe fn get_bucket_unchecked(&self, index: usize) -> &T { - self.raw.bucket(index).as_ref() + unsafe { self.raw.bucket(index).as_ref() } } /// Gets a mutable reference to an entry in the table at the given bucket index, @@ -666,7 +666,7 @@ where /// ``` #[inline] pub unsafe fn get_bucket_unchecked_mut(&mut self, index: usize) -> &mut T { - self.raw.bucket(index).as_mut() + unsafe { self.raw.bucket(index).as_mut() } } /// Inserts an element into the `HashTable` with the given hash value, but @@ -1547,7 +1547,7 @@ where hashes: [u64; N], eq: impl FnMut(usize, &T) -> bool, ) -> [Option<&'_ mut T>; N] { - self.raw.get_disjoint_unchecked_mut(hashes, eq) + unsafe { self.raw.get_disjoint_unchecked_mut(hashes, eq) } } /// Attempts to get mutable references to `N` values in the map at once, without validating that @@ -1558,7 +1558,7 @@ where hashes: [u64; N], eq: impl FnMut(usize, &T) -> bool, ) -> [Option<&'_ mut T>; N] { - self.raw.get_disjoint_unchecked_mut(hashes, eq) + unsafe { self.raw.get_disjoint_unchecked_mut(hashes, eq) } } /// Returns the total amount of memory allocated internally by the hash diff --git a/src/util.rs b/src/util.rs index 90a8df311..8667f34dc 100644 --- a/src/util.rs +++ b/src/util.rs @@ -32,7 +32,6 @@ pub(crate) fn unlikely(b: bool) -> bool { // FIXME: use strict provenance functions once they are stable. // Implement it with a transmute for now. #[inline(always)] -#[allow(clippy::useless_transmute)] // clippy is wrong, cast and transmute are different here pub(crate) fn invalid_mut(addr: usize) -> *mut T { unsafe { core::mem::transmute(addr) } } diff --git a/tests/equivalent_trait.rs b/tests/equivalent_trait.rs index 713dddd53..31f0a63e2 100644 --- a/tests/equivalent_trait.rs +++ b/tests/equivalent_trait.rs @@ -1,3 +1,5 @@ +#![expect(missing_docs)] // https://github.com/rust-lang/rust/issues/137561 + use hashbrown::Equivalent; use hashbrown::HashMap; diff --git a/tests/rayon.rs b/tests/rayon.rs index d55e5a980..e9ba040a3 100644 --- a/tests/rayon.rs +++ b/tests/rayon.rs @@ -1,3 +1,4 @@ +#![expect(missing_docs)] // https://github.com/rust-lang/rust/issues/137561 #![cfg(feature = "rayon")] #[macro_use] diff --git a/tests/serde.rs b/tests/serde.rs index a642348b3..ef91d76b7 100644 --- a/tests/serde.rs +++ b/tests/serde.rs @@ -1,3 +1,4 @@ +#![expect(missing_docs)] // https://github.com/rust-lang/rust/issues/137561 #![cfg(feature = "serde")] use core::hash::BuildHasherDefault; diff --git a/tests/set.rs b/tests/set.rs index d25f3d459..cf9204e79 100644 --- a/tests/set.rs +++ b/tests/set.rs @@ -1,3 +1,4 @@ +#![expect(missing_docs)] // https://github.com/rust-lang/rust/issues/137561 #![cfg(not(miri))] // FIXME: takes too long use hashbrown::HashSet; @@ -20,7 +21,7 @@ fn test_hashset_insert_remove() { .collect(); // more readable with explicit `true` / `false` - #[allow(clippy::bool_assert_comparison)] + #[expect(clippy::bool_assert_comparison)] for _ in 0..32 { for x in &tx { assert_eq!(m.contains(x), false);