Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 15 additions & 9 deletions compiler/rustc_data_structures/src/sharded.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use std::borrow::Borrow;
use std::hash::{Hash, Hasher};
use std::{iter, mem};
use std::iter;

use either::Either;
use hashbrown::hash_table::{self, Entry, HashTable};
Expand Down Expand Up @@ -183,19 +183,25 @@ impl<K: Eq + Hash, V> ShardedHashMap<K, V> {
}
}

/// Insert value into the [`ShardedHashMap`] with unique key.
///
/// Checks uniqueness if debug_assertions enabled.
#[inline]
pub fn insert(&self, key: K, value: V) -> Option<V> {
pub fn insert_unique(&self, key: K, value: V) {
let hash = make_hash(&key);
let mut shard = self.lock_shard_by_hash(hash);

match table_entry(&mut shard, hash, &key) {
Entry::Occupied(e) => {
let previous = mem::replace(&mut e.into_mut().1, value);
Some(previous)
cfg_select! {
debug_assertions => match table_entry(&mut shard, hash, &key) {
Entry::Occupied(_) => {
panic!("tried to insert key that's already present");
}
Entry::Vacant(e) => {
e.insert((key, value));
}
}
Entry::Vacant(e) => {
e.insert((key, value));
None
_ => {
shard.insert_unique(hash, (key, value), |(k, _)| make_hash(k));
}
}
}
Expand Down
1 change: 1 addition & 0 deletions compiler/rustc_data_structures/src/vec_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ use rustc_index::Idx;
#[cfg(test)]
mod tests;

#[repr(packed(4))]
struct Slot<V> {
// We never construct &Slot<V> so it's fine for this to not be in an UnsafeCell.
value: V,
Expand Down
19 changes: 7 additions & 12 deletions compiler/rustc_middle/src/mir/interpret/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -473,9 +473,8 @@ impl<'tcx> TyCtxt<'tcx> {
}
let id = self.alloc_map.reserve();
debug!("creating alloc {:?} with id {id:?}", alloc_salt.0);
let had_previous = self.alloc_map.to_alloc.insert(id, alloc_salt.0.clone()).is_some();
// We just reserved, so should always be unique.
assert!(!had_previous);
self.alloc_map.to_alloc.insert_unique(id, alloc_salt.0.clone());
dedup.insert(alloc_salt, id);
id
}
Expand Down Expand Up @@ -548,21 +547,17 @@ impl<'tcx> TyCtxt<'tcx> {
}

/// Freezes an `AllocId` created with `reserve` by pointing it at an `Allocation`. Trying to
/// call this function twice, even with the same `Allocation` will ICE the compiler.
/// call this function twice, even with the same `Allocation` will ICE the compiler if
/// debug_assertions are enabled.
pub fn set_alloc_id_memory(self, id: AllocId, mem: ConstAllocation<'tcx>) {
if let Some(old) = self.alloc_map.to_alloc.insert(id, GlobalAlloc::Memory(mem)) {
bug!("tried to set allocation ID {id:?}, but it was already existing as {old:#?}");
}
self.alloc_map.to_alloc.insert_unique(id, GlobalAlloc::Memory(mem))
}

/// Freezes an `AllocId` created with `reserve` by pointing it at a static item. Trying to
/// call this function twice, even with the same `DefId` will ICE the compiler.
/// call this function twice, even with the same `DefId` will ICE the compiler if
/// debug_assertions are enabled.
pub fn set_nested_alloc_id_static(self, id: AllocId, def_id: LocalDefId) {
if let Some(old) =
self.alloc_map.to_alloc.insert(id, GlobalAlloc::Static(def_id.to_def_id()))
{
bug!("tried to set allocation ID {id:?}, but it was already existing as {old:#?}");
}
self.alloc_map.to_alloc.insert_unique(id, GlobalAlloc::Static(def_id.to_def_id()))
}
}

Expand Down
51 changes: 41 additions & 10 deletions compiler/rustc_middle/src/query/caches.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
use std::hash::{Hash, Hasher as _};
use std::sync::OnceLock;

use rustc_data_structures::sharded::ShardedHashMap;
use rustc_data_structures::fx::FxHasher;
use rustc_data_structures::hash_table::HashTable;
use rustc_data_structures::sharded::Sharded;
pub use rustc_data_structures::vec_cache::VecCache;
use rustc_hir::def_id::LOCAL_CRATE;
use rustc_index::Idx;
Expand Down Expand Up @@ -38,10 +41,25 @@ pub trait QueryCache: Sized {
fn len(&self) -> usize;
}

#[inline]
fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
let mut state = FxHasher::default();
val.hash(&mut state);
state.finish()
}

#[repr(packed(4))]
#[derive(Clone, Copy)]
struct PackedCacheEntry<K, V> {
key: K,
value: V,
index: DepNodeIndex,
}

/// In-memory cache for queries whose keys aren't suitable for any of the
/// more specialized kinds of cache. Backed by a sharded hashmap.
pub struct DefaultCache<K, V> {
cache: ShardedHashMap<K, (V, DepNodeIndex)>,
cache: Sharded<HashTable<PackedCacheEntry<K, V>>>,
}

impl<K, V> Default for DefaultCache<K, V> {
Expand All @@ -58,28 +76,41 @@ where
type Key = K;
type Value = V;

#[inline(always)]
#[inline]
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
self.cache.get(key)
let hash = make_hash(key);
let shard = self.cache.lock_shard_by_hash(hash);
shard.find(hash, |ent| { ent.key } == *key).map(|ent| (ent.value, ent.index))
}

#[inline]
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
// We may be overwriting another value. This is all right, since the dep-graph
// will check that the value fingerprint matches.
self.cache.insert(key, (value, index));
let hash = make_hash(&key);
let mut shard = self.cache.lock_shard_by_hash(hash);
cfg_select! {
debug_assertions => {
use rustc_data_structures::hash_table::Entry::*;
match shard.entry(hash, |ent| { ent.key } == key, |ent| make_hash(&{ ent.key })) {
Occupied(_) => panic!("trying to complete query twice"),
Vacant(entry) => entry.insert(PackedCacheEntry { key, value, index }),
};
}
_ => {
shard.insert_unique(hash, PackedCacheEntry { key, value, index }, |ent| make_hash(&{ ent.key }));
}
}
}

fn for_each(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
for shard in self.cache.lock_shards() {
for (k, v) in shard.iter() {
f(k, &v.0, v.1);
for PackedCacheEntry { key, value, index } in shard.iter().copied() {
f(&key, &value, index);
}
}
}

fn len(&self) -> usize {
self.cache.len()
self.cache.lock_shards().map(|shard| shard.len()).sum()
}
}

Expand Down
Loading