Skip to content
10 changes: 10 additions & 0 deletions compiler/rustc_data_structures/src/sharded.rs
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,16 @@ impl<K: Eq + Hash, V> ShardedHashMap<K, V> {
}
}
}

#[inline]
pub fn remove(&self, key: &K) {
let hash = make_hash(key);
let mut shard = self.lock_shard_by_hash(hash);

if let Entry::Occupied(e) = table_entry(&mut shard, hash, key) {
e.remove();
}
}
}

impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
Expand Down
51 changes: 51 additions & 0 deletions compiler/rustc_data_structures/src/vec_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,24 @@ impl SlotIndex {

index_and_lock.store(extra.checked_add(2).unwrap(), Ordering::Release);
}

#[inline]
unsafe fn remove<V>(&self, buckets: &[AtomicPtr<Slot<V>>; 21]) {
let bucket = &buckets[self.bucket_idx];
let ptr = self.bucket_ptr(bucket);

debug_assert!(self.index_in_bucket < self.bucket_idx.capacity());

// SAFETY: `bucket` was allocated (so <= isize in total bytes) to hold `entries`, so this
// must be inbounds.
let slot = unsafe { ptr.add(self.index_in_bucket) };

// SAFETY: initialized bucket has zeroed all memory within the bucket, so we are valid for
// AtomicU32 access.
let index_and_lock = unsafe { &(*slot).index_and_lock };

index_and_lock.store(0, Ordering::Release);
}
}

/// In-memory cache for queries whose keys are densely-numbered IDs
Expand Down Expand Up @@ -339,6 +357,39 @@ where
pub fn len(&self) -> usize {
self.len.load(Ordering::Acquire)
}

pub fn remove(&self, key: &K) {
let key = u32::try_from(key.index()).unwrap();
let slot_idx = SlotIndex::from_index(key);

unsafe { slot_idx.remove(&self.buckets) };
}

pub fn invalidate(&self, selector: impl Fn(K) -> bool) {
let mut to_remove = vec![];
let mut remaining = vec![];

self.for_each(&mut |key, _, _| {
if selector(*key) {
to_remove.push(*key);
} else {
remaining.push(*key);
}
});

for key in to_remove {
self.remove(&key);
}

for (index, key) in remaining.iter().enumerate() {
let slot = SlotIndex::from_index(u32::try_from(index).unwrap());
let key = u32::try_from(key.index()).unwrap();

unsafe { slot.put_unique(&self.present, (), key) };
}

self.len.store(remaining.len(), Ordering::Release);
}
}

/// Index into an array of buckets.
Expand Down
62 changes: 39 additions & 23 deletions compiler/rustc_hir/src/definitions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ use std::fmt::{self, Write};
use std::hash::Hash;

use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::indexmap::IndexSet;
use rustc_data_structures::stable_hash::StableHasher;
use rustc_hashes::Hash64;
use rustc_index::IndexVec;
Expand All @@ -31,6 +32,7 @@ pub struct DefPathTable {
// We do only store the local hash, as all the definitions are from the current crate.
def_path_hashes: IndexVec<DefIndex, Hash64>,
def_path_hash_to_index: DefPathHashMap,
allow_overwrite: IndexSet<DefIndex>,
}

impl DefPathTable {
Expand All @@ -40,38 +42,45 @@ impl DefPathTable {
index_to_key: Default::default(),
def_path_hashes: Default::default(),
def_path_hash_to_index: Default::default(),
allow_overwrite: Default::default(),
}
}

fn allocate(&mut self, key: DefKey, def_path_hash: DefPathHash) -> DefIndex {
fn allocate(&mut self, key: DefKey, def_path_hash: DefPathHash, in_sandbox: bool) -> DefIndex {
// Assert that all DefPathHashes correctly contain the local crate's StableCrateId.
debug_assert_eq!(self.stable_crate_id, def_path_hash.stable_crate_id());
let local_hash = def_path_hash.local_hash();

let index = self.index_to_key.push(key);
debug!("DefPathTable::insert() - {key:?} <-> {index:?}");

self.def_path_hashes.push(local_hash);
debug_assert!(self.def_path_hashes.len() == self.index_to_key.len());
let index = self.index_to_key.next_index();

// Check for hash collisions of DefPathHashes. These should be
// exceedingly rare.
if let Some(existing) = self.def_path_hash_to_index.insert(&local_hash, &index) {
let def_path1 = DefPath::make(LOCAL_CRATE, existing, |idx| self.def_key(idx));
let def_path2 = DefPath::make(LOCAL_CRATE, index, |idx| self.def_key(idx));

// Continuing with colliding DefPathHashes can lead to correctness
// issues. We must abort compilation.
//
// The likelihood of such a collision is very small, so actually
// running into one could be indicative of a poor hash function
// being used.
//
// See the documentation for DefPathHash for more information.
panic!(
"found DefPathHash collision between {def_path1:#?} and {def_path2:#?}. \
if !in_sandbox && self.allow_overwrite.swap_remove(&existing) {
self.def_path_hash_to_index.insert(&local_hash, &existing);
return existing;
} else {
let def_path1 = DefPath::make(LOCAL_CRATE, existing, |idx| self.def_key(idx));
let def_path2 = DefPath::make(LOCAL_CRATE, index, |idx| self.def_key(idx));

// Continuing with colliding DefPathHashes can lead to correctness
// issues. We must abort compilation.
//
// The likelihood of such a collision is very small, so actually
// running into one could be indicative of a poor hash function
// being used.
//
// See the documentation for DefPathHash for more information.
panic!(
"found DefPathHash collision between {def_path1:#?} and {def_path2:#?}. \
Compilation cannot continue."
);
);
}
} else {
self.index_to_key.push(key);
debug!("DefPathTable::insert() - {key:?} <-> {index:?}");

self.def_path_hashes.push(local_hash);
debug_assert!(self.def_path_hashes.len() == self.index_to_key.len());
}

index
Expand Down Expand Up @@ -377,7 +386,7 @@ impl Definitions {

// Create the root definition.
let mut table = DefPathTable::new(stable_crate_id);
let root = LocalDefId { local_def_index: table.allocate(key, def_path_hash) };
let root = LocalDefId { local_def_index: table.allocate(key, def_path_hash, false) };
assert_eq!(root.local_def_index, CRATE_DEF_INDEX);

Definitions { table }
Expand All @@ -393,6 +402,7 @@ impl Definitions {
parent: LocalDefId,
data: DefPathData,
disambiguator: &mut PerParentDisambiguatorState,
in_sandbox: bool,
) -> LocalDefId {
// We can't use `Debug` implementation for `LocalDefId` here, since it tries to acquire a
// reference to `Definitions` and we're already holding a mutable reference.
Expand Down Expand Up @@ -428,8 +438,14 @@ impl Definitions {

debug!("create_def: after disambiguation, key = {:?}", key);

let local_def_index = self.table.allocate(key, def_path_hash, in_sandbox);

if in_sandbox {
assert_eq!(self.table.allow_overwrite.insert(local_def_index), true);
}

// Create the definition.
LocalDefId { local_def_index: self.table.allocate(key, def_path_hash) }
LocalDefId { local_def_index }
}

#[inline(always)]
Expand Down
16 changes: 13 additions & 3 deletions compiler/rustc_hir/src/intravisit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,11 @@ pub trait Visitor<'v>: Sized {
/// or `ControlFlow<T>`.
type Result: VisitorResult = ();

#[inline]
fn visit_if_delayed(&self, _: LocalDefId) -> bool {
true
}

/// If `type NestedFilter` is set to visit nested items, this method
/// must also be overridden to provide a map to retrieve nested items.
fn maybe_tcx(&mut self) -> Self::MaybeTyCtxt {
Expand All @@ -239,18 +244,23 @@ pub trait Visitor<'v>: Sized {
/// this method is if you want a nested pattern but cannot supply a
/// `TyCtxt`; see `maybe_tcx` for advice.
fn visit_nested_item(&mut self, id: ItemId) -> Self::Result {
if Self::NestedFilter::INTER {
if self.should_visit_maybe_delayed_inter(id.owner_id.def_id) {
let item = self.maybe_tcx().hir_item(id);
try_visit!(self.visit_item(item));
}
Self::Result::output()
}

// Now delayed owners are only delegations, which are either item, trait item or impl item.
fn should_visit_maybe_delayed_inter(&mut self, id: LocalDefId) -> bool {
Self::NestedFilter::INTER && self.visit_if_delayed(id)
}

/// Like `visit_nested_item()`, but for trait items. See
/// `visit_nested_item()` for advice on when to override this
/// method.
fn visit_nested_trait_item(&mut self, id: TraitItemId) -> Self::Result {
if Self::NestedFilter::INTER {
if self.should_visit_maybe_delayed_inter(id.owner_id.def_id) {
let item = self.maybe_tcx().hir_trait_item(id);
try_visit!(self.visit_trait_item(item));
}
Expand All @@ -261,7 +271,7 @@ pub trait Visitor<'v>: Sized {
/// `visit_nested_item()` for advice on when to override this
/// method.
fn visit_nested_impl_item(&mut self, id: ImplItemId) -> Self::Result {
if Self::NestedFilter::INTER {
if self.should_visit_maybe_delayed_inter(id.owner_id.def_id) {
let item = self.maybe_tcx().hir_impl_item(id);
try_visit!(self.visit_impl_item(item));
}
Expand Down
10 changes: 9 additions & 1 deletion compiler/rustc_macros/src/query.rs
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@ struct QueryModifiers {
// tidy-alphabetical-start
arena_cache: Option<Ident>,
cache_on_disk: Option<Ident>,
callfront: Option<Ident>,
depth_limit: Option<Ident>,
desc: Desc,
eval_always: Option<Ident>,
Expand All @@ -153,6 +154,7 @@ fn parse_query_modifiers(input: ParseStream<'_>) -> Result<QueryModifiers> {
// tidy-alphabetical-start
let mut arena_cache = None;
let mut cache_on_disk = None;
let mut callfront = None;
let mut depth_limit = None;
let mut desc = None;
let mut eval_always = None;
Expand All @@ -179,6 +181,8 @@ fn parse_query_modifiers(input: ParseStream<'_>) -> Result<QueryModifiers> {
try_insert!(arena_cache = modifier);
} else if modifier == "cache_on_disk" {
try_insert!(cache_on_disk = modifier);
} else if modifier == "callfront" {
try_insert!(callfront = modifier);
} else if modifier == "depth_limit" {
try_insert!(depth_limit = modifier);
} else if modifier == "desc" {
Expand Down Expand Up @@ -211,6 +215,7 @@ fn parse_query_modifiers(input: ParseStream<'_>) -> Result<QueryModifiers> {
// tidy-alphabetical-start
arena_cache,
cache_on_disk,
callfront,
depth_limit,
desc,
eval_always,
Expand Down Expand Up @@ -247,6 +252,7 @@ fn make_modifiers_stream(query: &Query) -> proc_macro2::TokenStream {
// tidy-alphabetical-start
arena_cache,
cache_on_disk,
callfront,
depth_limit,
desc,
eval_always,
Expand All @@ -261,6 +267,7 @@ fn make_modifiers_stream(query: &Query) -> proc_macro2::TokenStream {
// tidy-alphabetical-start
let arena_cache = arena_cache.is_some();
let cache_on_disk = cache_on_disk.is_some();
let callfront = callfront.is_some();
let depth_limit = depth_limit.is_some();
let desc = {
// Put a description closure in the `desc` modifier.
Expand All @@ -284,7 +291,7 @@ fn make_modifiers_stream(query: &Query) -> proc_macro2::TokenStream {
// tidy-alphabetical-end

// Giving an input span to the modifier names in the modifier list seems
// to give slightly more helpful errors when one of the callback macros
// to give slightly more helpful errors when one of the callfront macros
// fails to parse the modifier list.
let query_name_span = query.name.span();
quote_spanned! {
Expand All @@ -293,6 +300,7 @@ fn make_modifiers_stream(query: &Query) -> proc_macro2::TokenStream {
// tidy-alphabetical-start
arena_cache: #arena_cache,
cache_on_disk: #cache_on_disk,
callfront: #callfront,
depth_limit: #depth_limit,
desc: #desc,
eval_always: #eval_always,
Expand Down
Loading
Loading