diff --git a/Cargo.lock b/Cargo.lock index ad01ef5e41f16..f689f88b86a7f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3983,6 +3983,10 @@ dependencies = [ "winapi", ] +[[package]] +name = "rustc_erase" +version = "0.0.0" + [[package]] name = "rustc_error_codes" version = "0.0.0" @@ -4373,6 +4377,7 @@ dependencies = [ "rustc_ast", "rustc_attr", "rustc_data_structures", + "rustc_erase", "rustc_error_messages", "rustc_errors", "rustc_feature", @@ -4571,6 +4576,7 @@ dependencies = [ "rustc-rayon-core", "rustc_ast", "rustc_data_structures", + "rustc_erase", "rustc_errors", "rustc_hir", "rustc_index", diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs index 7fab8954cb19f..fe468fc6f474e 100644 --- a/compiler/rustc_data_structures/src/lib.rs +++ b/compiler/rustc_data_structures/src/lib.rs @@ -72,6 +72,7 @@ pub mod stable_hasher; mod atomic_ref; pub mod fingerprint; pub mod profiling; +pub mod remap; pub mod sharded; pub mod stack; pub mod sync; diff --git a/compiler/rustc_data_structures/src/remap.rs b/compiler/rustc_data_structures/src/remap.rs new file mode 100644 index 0000000000000..4b72a4dde06ea --- /dev/null +++ b/compiler/rustc_data_structures/src/remap.rs @@ -0,0 +1,28 @@ +/// Remaps the type with a different lifetime for 'tcx if applicable. +pub trait Remap { + type Remap<'a>; +} + +impl Remap for u32 { + type Remap<'a> = u32; +} + +impl Remap for Option { + type Remap<'a> = Option>; +} + +impl Remap for () { + type Remap<'a> = (); +} + +impl Remap for (T0, T1) { + type Remap<'a> = (T0::Remap<'a>, T1::Remap<'a>); +} + +impl Remap for (T0, T1, T2) { + type Remap<'a> = (T0::Remap<'a>, T1::Remap<'a>, T2::Remap<'a>); +} + +impl Remap for (T0, T1, T2, T3) { + type Remap<'a> = (T0::Remap<'a>, T1::Remap<'a>, T2::Remap<'a>, T3::Remap<'a>); +} diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs index 01d292dde8d13..8fe0823fe5d11 100644 --- a/compiler/rustc_data_structures/src/sharded.rs +++ b/compiler/rustc_data_structures/src/sharded.rs @@ -1,14 +1,10 @@ use crate::fx::{FxHashMap, FxHasher}; -use crate::sync::{Lock, LockGuard}; +use crate::sync::{CacheAligned, Lock, LockGuard}; use std::borrow::Borrow; use std::collections::hash_map::RawEntryMut; use std::hash::{Hash, Hasher}; use std::mem; -#[derive(Clone, Default)] -#[cfg_attr(parallel_compiler, repr(align(64)))] -struct CacheAligned(T); - #[cfg(parallel_compiler)] // 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700, // but this should be tested on higher core count CPUs. How the `Sharded` type gets used diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs index ad71dcdf9d953..42e267ef12af0 100644 --- a/compiler/rustc_data_structures/src/sync.rs +++ b/compiler/rustc_data_structures/src/sync.rs @@ -23,6 +23,9 @@ use std::hash::{BuildHasher, Hash}; use std::ops::{Deref, DerefMut}; use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; +mod worker_local; +pub use worker_local::{Registry, WorkerLocal}; + pub use std::sync::atomic::Ordering; pub use std::sync::atomic::Ordering::SeqCst; @@ -178,33 +181,6 @@ cfg_if! { use std::cell::Cell; - #[derive(Debug)] - pub struct WorkerLocal(OneThread); - - impl WorkerLocal { - /// Creates a new worker local where the `initial` closure computes the - /// value this worker local should take for each thread in the thread pool. - #[inline] - pub fn new T>(mut f: F) -> WorkerLocal { - WorkerLocal(OneThread::new(f(0))) - } - - /// Returns the worker-local value for each thread - #[inline] - pub fn into_inner(self) -> Vec { - vec![OneThread::into_inner(self.0)] - } - } - - impl Deref for WorkerLocal { - type Target = T; - - #[inline(always)] - fn deref(&self) -> &T { - &self.0 - } - } - pub type MTRef<'a, T> = &'a mut T; #[derive(Debug, Default)] @@ -324,8 +300,6 @@ cfg_if! { }; } - pub use rayon_core::WorkerLocal; - pub use rayon::iter::ParallelIterator; use rayon::iter::IntoParallelIterator; @@ -365,6 +339,10 @@ pub fn assert_send() {} pub fn assert_send_val(_t: &T) {} pub fn assert_send_sync_val(_t: &T) {} +#[derive(Clone, Default)] +#[cfg_attr(parallel_compiler, repr(align(64)))] +pub struct CacheAligned(pub T); + pub trait HashMapExt { /// Same as HashMap::insert, but it may panic if there's already an /// entry for `key` with a value not equal to `value` diff --git a/compiler/rustc_data_structures/src/sync/worker_local.rs b/compiler/rustc_data_structures/src/sync/worker_local.rs new file mode 100644 index 0000000000000..e3202c147be23 --- /dev/null +++ b/compiler/rustc_data_structures/src/sync/worker_local.rs @@ -0,0 +1,189 @@ +use crate::sync::Lock; +use std::cell::Cell; +use std::cell::OnceCell; +use std::ops::Deref; +use std::sync::Arc; + +#[cfg(parallel_compiler)] +use {crate::cold_path, crate::sync::CacheAligned}; + +/// A pointer to the `RegistryData` which uniquely identifies a registry. +/// This identifier can be reused if the registry gets freed. +#[derive(Clone, Copy, Eq, PartialEq)] +struct RegistryId(usize); + +impl RegistryId { + #[inline(always)] + /// Verifies that the current thread is associated with the registry and returns its unique + /// index within the registry. This panics if the current thread is not associated with this + /// registry. + /// + /// Note that there's a race possible where the identifer in `THREAD_DATA` could be reused + /// so this can succeed from a different registry. + #[cfg(parallel_compiler)] + fn verify(self) -> usize { + let (id, index) = THREAD_DATA.with(|data| (data.registry_id.get(), data.index.get())); + + if id == self { + index + } else { + cold_path(|| panic!("Unable to verify registry association")) + } + } +} + +struct RegistryData { + thread_limit: usize, + threads: Lock, +} + +/// Represents a list of threads which can access worker locals. +#[derive(Clone)] +pub struct Registry(Arc); + +thread_local! { + /// The registry associated with the thread. + /// This allows the `WorkerLocal` type to clone the registry in its constructor. + static REGISTRY: OnceCell = OnceCell::new(); +} + +struct ThreadData { + registry_id: Cell, + index: Cell, +} + +thread_local! { + /// A thread local which contains the identifer of `REGISTRY` but allows for faster access. + /// It also holds the index of the current thread. + static THREAD_DATA: ThreadData = const { ThreadData { + registry_id: Cell::new(RegistryId(0)), + index: Cell::new(0), + }}; +} + +impl Registry { + /// Creates a registry which can hold up to `thread_limit` threads. + pub fn new(thread_limit: usize) -> Self { + Registry(Arc::new(RegistryData { thread_limit, threads: Lock::new(0) })) + } + + /// Gets the registry associated with the current thread. Panics if there's no such registry. + pub fn current() -> Self { + REGISTRY.with(|registry| registry.get().cloned().expect("No assocated registry")) + } + + /// Registers the current thread with the registry so worker locals can be used on it. + /// Panics if the thread limit is hit or if the thread already has an associated registry. + pub fn register(&self) { + let mut threads = self.0.threads.lock(); + if *threads < self.0.thread_limit { + REGISTRY.with(|registry| { + if registry.get().is_some() { + drop(threads); + panic!("Thread already has a registry"); + } + registry.set(self.clone()).ok(); + THREAD_DATA.with(|data| { + data.registry_id.set(self.id()); + data.index.set(*threads); + }); + *threads += 1; + }); + } else { + drop(threads); + panic!("Thread limit reached"); + } + } + + /// Gets the identifer of this registry. + fn id(&self) -> RegistryId { + RegistryId(&*self.0 as *const RegistryData as usize) + } +} + +/// Holds worker local values for each possible thread in a registry. You can only access the +/// worker local value through the `Deref` impl on the registry associated with the thread it was +/// created on. It will panic otherwise. +pub struct WorkerLocal { + #[cfg(not(parallel_compiler))] + local: T, + #[cfg(parallel_compiler)] + locals: Box<[CacheAligned]>, + #[cfg(parallel_compiler)] + registry: Registry, +} + +// This is safe because the `deref` call will return a reference to a `T` unique to each thread +// or it will panic for threads without an associated local. So there isn't a need for `T` to do +// it's own synchronization. The `verify` method on `RegistryId` has an issue where the the id +// can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse. +#[cfg(parallel_compiler)] +unsafe impl Sync for WorkerLocal {} + +impl WorkerLocal { + /// Creates a new worker local where the `initial` closure computes the + /// value this worker local should take for each thread in the registry. + #[inline] + pub fn new T>(mut initial: F) -> WorkerLocal { + #[cfg(parallel_compiler)] + { + let registry = Registry::current(); + WorkerLocal { + locals: { + let locals: Vec<_> = + (0..registry.0.thread_limit).map(|i| CacheAligned(initial(i))).collect(); + locals.into_boxed_slice() + }, + registry, + } + } + #[cfg(not(parallel_compiler))] + { + WorkerLocal { local: initial(0) } + } + } + + /// Returns the worker-local value for each thread + #[inline] + pub fn into_inner(self) -> Vec { + #[cfg(parallel_compiler)] + { + self.locals.into_vec().into_iter().map(|local| local.0).collect() + } + #[cfg(not(parallel_compiler))] + { + vec![self.local] + } + } +} + +impl WorkerLocal> { + /// Joins the elements of all the worker locals into one Vec + pub fn join(self) -> Vec { + self.into_inner().into_iter().flat_map(|v| v).collect() + } +} + +impl Default for WorkerLocal { + fn default() -> Self { + WorkerLocal::new(|_| T::default()) + } +} + +impl Deref for WorkerLocal { + type Target = T; + + #[inline(always)] + #[cfg(not(parallel_compiler))] + fn deref(&self) -> &T { + &self.local + } + + #[inline(always)] + #[cfg(parallel_compiler)] + fn deref(&self) -> &T { + // This is safe because `verify` will only return values less than + // `self.registry.thread_limit` which is the size of the `self.locals` array. + unsafe { &self.locals.get_unchecked(self.registry.id().verify()).0 } + } +} diff --git a/compiler/rustc_erase/Cargo.toml b/compiler/rustc_erase/Cargo.toml new file mode 100644 index 0000000000000..d4cb756716b1c --- /dev/null +++ b/compiler/rustc_erase/Cargo.toml @@ -0,0 +1,6 @@ +[package] +name = "rustc_erase" +version = "0.0.0" +edition = "2021" + +[lib] \ No newline at end of file diff --git a/compiler/rustc_erase/src/lib.rs b/compiler/rustc_erase/src/lib.rs new file mode 100644 index 0000000000000..fb7e778e66804 --- /dev/null +++ b/compiler/rustc_erase/src/lib.rs @@ -0,0 +1,47 @@ +// This is a separate crate so that we can `allow(incomplete_features)` for just `generic_const_exprs` +#![feature(generic_const_exprs)] +#![allow(incomplete_features)] +#![feature(core_intrinsics)] + +#[cfg(debug_assertions)] +use std::intrinsics::type_name; +use std::{ + fmt, + mem::{size_of, transmute_copy, MaybeUninit}, +}; + +#[derive(Copy, Clone)] +pub struct Erased { + data: MaybeUninit<[u8; N]>, + #[cfg(debug_assertions)] + type_id: &'static str, +} + +impl fmt::Debug for Erased { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Erased<{}>", N) + } +} + +pub type Erase = Erased<{ size_of::() }>; + +#[inline(always)] +pub fn erase(src: T) -> Erased<{ size_of::() }> { + Erased { + // SAFETY:: Is it safe to transmute to MaybeUninit + data: unsafe { transmute_copy(&src) }, + #[cfg(debug_assertions)] + type_id: type_name::(), + } +} + +/// Restores an erased value. +/// +/// This is only safe if `value` is a valid instance of `T`. +/// For example if `T` was erased with `erase` previously. +#[inline(always)] +pub unsafe fn restore(value: Erased<{ size_of::() }>) -> T { + #[cfg(debug_assertions)] + assert_eq!(value.type_id, type_name::()); + unsafe { transmute_copy(&value.data) } +} diff --git a/compiler/rustc_hir/src/hir_id.rs b/compiler/rustc_hir/src/hir_id.rs index 404abe2b068cb..c2f150cab524e 100644 --- a/compiler/rustc_hir/src/hir_id.rs +++ b/compiler/rustc_hir/src/hir_id.rs @@ -1,5 +1,8 @@ use crate::def_id::{DefId, DefIndex, LocalDefId, CRATE_DEF_ID}; -use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableOrd, ToStableHashKey}; +use rustc_data_structures::{ + remap::Remap, + stable_hasher::{HashStable, StableHasher, StableOrd, ToStableHashKey}, +}; use rustc_span::{def_id::DefPathHash, HashStableContext}; use std::fmt::{self, Debug}; @@ -9,6 +12,10 @@ pub struct OwnerId { pub def_id: LocalDefId, } +impl Remap for OwnerId { + type Remap<'a> = OwnerId; +} + impl Debug for OwnerId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Example: DefId(0:1 ~ aa[7697]::{use#0}) @@ -75,6 +82,10 @@ pub struct HirId { pub local_id: ItemLocalId, } +impl Remap for HirId { + type Remap<'a> = HirId; +} + impl Debug for HirId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Example: HirId(DefId(0:1 ~ aa[7697]::{use#0}).10) diff --git a/compiler/rustc_interface/src/interface.rs b/compiler/rustc_interface/src/interface.rs index 7a5e45ada3f6a..5b67cecf60ab6 100644 --- a/compiler/rustc_interface/src/interface.rs +++ b/compiler/rustc_interface/src/interface.rs @@ -20,6 +20,7 @@ use rustc_session::Session; use rustc_session::{early_error, CompilerIO}; use rustc_span::source_map::{FileLoader, FileName}; use rustc_span::symbol::sym; +use std::cell::OnceCell; use std::path::PathBuf; use std::result; @@ -59,9 +60,25 @@ impl Compiler { } } +fn registry_setup() { + thread_local! { + static ONCE: OnceCell<()> = OnceCell::new(); + } + + // Create a dummy registry to allow `WorkerLocal` construction. + // We use `OnceCell` so we only register one dummy registry per thread. + ONCE.with(|once| { + once.get_or_init(|| { + rustc_data_structures::sync::Registry::new(1).register(); + }); + }); +} + /// Converts strings provided as `--cfg [cfgspec]` into a `crate_cfg`. pub fn parse_cfgspecs(cfgspecs: Vec) -> FxHashSet<(String, Option)> { rustc_span::create_default_session_if_not_set_then(move |_| { + registry_setup(); + let cfg = cfgspecs .into_iter() .map(|s| { @@ -121,6 +138,8 @@ pub fn parse_cfgspecs(cfgspecs: Vec) -> FxHashSet<(String, Option) -> CheckCfg { rustc_span::create_default_session_if_not_set_then(move |_| { + registry_setup(); + let mut cfg = CheckCfg::default(); 'specs: for s in specs { diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs index 2a373ebc1324d..c009fc799ca66 100644 --- a/compiler/rustc_interface/src/passes.rs +++ b/compiler/rustc_interface/src/passes.rs @@ -781,9 +781,7 @@ pub fn create_global_ctxt<'tcx>( callback(sess, &mut local_providers, &mut extern_providers); } - let queries = queries.get_or_init(|| { - TcxQueries::new(local_providers, extern_providers, query_result_on_disk_cache) - }); + let queries = queries.get_or_init(|| TcxQueries::new(query_result_on_disk_cache)); let gcx = sess.time("setup_global_ctxt", || { global_ctxt.get_or_init(move || { @@ -795,6 +793,8 @@ pub fn create_global_ctxt<'tcx>( untracked, dep_graph, queries.on_disk_cache.as_ref().map(OnDiskCache::as_dyn), + local_providers, + extern_providers, queries.as_dyn(), rustc_query_impl::query_callbacks(arena), ) diff --git a/compiler/rustc_interface/src/util.rs b/compiler/rustc_interface/src/util.rs index e4b4d5375e64a..55e61f49da9dc 100644 --- a/compiler/rustc_interface/src/util.rs +++ b/compiler/rustc_interface/src/util.rs @@ -4,6 +4,8 @@ use libloading::Library; use rustc_ast as ast; use rustc_codegen_ssa::traits::CodegenBackend; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +#[cfg(parallel_compiler)] +use rustc_data_structures::sync; use rustc_errors::registry::Registry; use rustc_parse::validate_attr; use rustc_session as session; @@ -165,6 +167,7 @@ pub(crate) fn run_in_thread_pool_with_globals R + Send, R: Send>( use rustc_middle::ty::tls; use rustc_query_impl::{deadlock, QueryContext, QueryCtxt}; + let registry = sync::Registry::new(threads); let mut builder = rayon::ThreadPoolBuilder::new() .thread_name(|_| "rustc".to_string()) .acquire_thread_handler(jobserver::acquire_thread) @@ -195,6 +198,9 @@ pub(crate) fn run_in_thread_pool_with_globals R + Send, R: Send>( .build_scoped( // Initialize each new worker thread when created. move |thread: rayon::ThreadBuilder| { + // Register the thread for use with the `WorkerLocal` type. + registry.register(); + rustc_span::set_session_globals_then(session_globals, || thread.run()) }, // Run `f` on the first thread in the thread pool. diff --git a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs index 07cc84ab95368..b9c5f01d0010e 100644 --- a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs +++ b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs @@ -114,7 +114,7 @@ macro_rules! provide_one { fn $name<'tcx>( $tcx: TyCtxt<'tcx>, def_id_arg: ty::query::query_keys::$name<'tcx>, - ) -> ty::query::query_values::$name<'tcx> { + ) -> ty::query::query_provided::$name<'tcx> { let _prof_timer = $tcx.prof.generic_activity(concat!("metadata_decode_entry_", stringify!($name))); diff --git a/compiler/rustc_middle/Cargo.toml b/compiler/rustc_middle/Cargo.toml index 543bd56a20c18..9f7ed5b5a0927 100644 --- a/compiler/rustc_middle/Cargo.toml +++ b/compiler/rustc_middle/Cargo.toml @@ -17,6 +17,7 @@ rustc_arena = { path = "../rustc_arena" } rustc_ast = { path = "../rustc_ast" } rustc_attr = { path = "../rustc_attr" } rustc_data_structures = { path = "../rustc_data_structures" } +rustc_erase = { path = "../rustc_erase" } rustc_errors = { path = "../rustc_errors" } # Used for intra-doc links rustc_error_messages = { path = "../rustc_error_messages" } diff --git a/compiler/rustc_middle/src/query/keys.rs b/compiler/rustc_middle/src/query/keys.rs index e4bb3ce3d5a99..f54a24831d375 100644 --- a/compiler/rustc_middle/src/query/keys.rs +++ b/compiler/rustc_middle/src/query/keys.rs @@ -3,18 +3,21 @@ use crate::infer::canonical::Canonical; use crate::mir; use crate::traits; +use crate::traits::ChalkEnvironmentAndGoal; use crate::ty::fast_reject::SimplifiedType; use crate::ty::subst::{GenericArg, SubstsRef}; use crate::ty::{self, layout::TyAndLayout, Ty, TyCtxt}; +use rustc_data_structures::remap::Remap; use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE}; use rustc_hir::hir_id::{HirId, OwnerId}; +pub use rustc_middle::traits::query::type_op; use rustc_query_system::query::{DefaultCacheSelector, VecCacheSelector}; use rustc_span::symbol::{Ident, Symbol}; use rustc_span::{Span, DUMMY_SP}; /// The `Key` trait controls what types can legally be used as the key /// for a query. -pub trait Key: Sized { +pub trait Key: Sized + Remap { // N.B. Most of the keys down below have `type CacheSelector = DefaultCacheSelector;`, // it would be reasonable to use associated type defaults, to remove the duplication... // @@ -588,7 +591,7 @@ impl Key for Option { /// Canonical query goals correspond to abstract trait operations that /// are not tied to any crate in particular. -impl<'tcx, T> Key for Canonical<'tcx, T> { +impl<'tcx, T: Remap> Key for Canonical<'tcx, T> { type CacheSelector = DefaultCacheSelector; #[inline(always)] @@ -696,3 +699,132 @@ impl Key for HirId { None } } + +// Remap implementations + +impl<'tcx, T: Remap> Remap for ty::ParamEnvAnd<'tcx, T> { + type Remap<'a> = ty::ParamEnvAnd<'a, T::Remap<'a>>; +} + +impl<'tcx, T: Remap> Remap for ty::Binder<'tcx, T> { + type Remap<'a> = ty::Binder<'a, T::Remap<'a>>; +} + +impl<'tcx, T: Remap> Remap for Canonical<'tcx, T> { + type Remap<'a> = Canonical<'a, T::Remap<'a>>; +} + +impl Remap for type_op::Normalize { + type Remap<'a> = type_op::Normalize>; +} + +impl<'tcx> Remap for type_op::AscribeUserType<'tcx> { + type Remap<'a> = type_op::AscribeUserType<'a>; +} + +impl<'tcx> Remap for type_op::Subtype<'tcx> { + type Remap<'a> = type_op::Subtype<'a>; +} + +impl<'tcx> Remap for type_op::Eq<'tcx> { + type Remap<'a> = type_op::Eq<'a>; +} + +impl<'tcx> Remap for type_op::ProvePredicate<'tcx> { + type Remap<'a> = type_op::ProvePredicate<'a>; +} + +impl<'tcx> Remap for ty::FnSig<'tcx> { + type Remap<'a> = ty::FnSig<'a>; +} + +impl<'tcx> Remap for ty::AliasTy<'tcx> { + type Remap<'a> = ty::AliasTy<'a>; +} + +impl<'tcx> Remap for Ty<'tcx> { + type Remap<'a> = Ty<'a>; +} + +impl<'tcx> Remap for ty::Predicate<'tcx> { + type Remap<'a> = ty::Predicate<'a>; +} + +impl<'tcx> Remap for ChalkEnvironmentAndGoal<'tcx> { + type Remap<'a> = ChalkEnvironmentAndGoal<'a>; +} + +impl<'tcx> Remap for ty::Instance<'tcx> { + type Remap<'a> = ty::Instance<'a>; +} + +impl<'tcx> Remap for ty::InstanceDef<'tcx> { + type Remap<'a> = ty::InstanceDef<'a>; +} + +impl Remap for ty::WithOptConstParam { + type Remap<'a> = ty::WithOptConstParam>; +} + +impl Remap for SimplifiedType { + type Remap<'a> = SimplifiedType; +} + +impl<'tcx> Remap for mir::interpret::GlobalId<'tcx> { + type Remap<'a> = mir::interpret::GlobalId<'a>; +} + +impl<'tcx> Remap for mir::interpret::LitToConstInput<'tcx> { + type Remap<'a> = mir::interpret::LitToConstInput<'a>; +} + +impl<'tcx> Remap for mir::interpret::ConstAlloc<'tcx> { + type Remap<'a> = mir::interpret::ConstAlloc<'a>; +} + +impl<'tcx> Remap for mir::ConstantKind<'tcx> { + type Remap<'a> = mir::ConstantKind<'a>; +} + +impl Remap for mir::Field { + type Remap<'a> = mir::Field; +} + +impl<'tcx> Remap for ty::ValTree<'tcx> { + type Remap<'a> = ty::ValTree<'a>; +} + +impl<'tcx> Remap for ty::ParamEnv<'tcx> { + type Remap<'a> = ty::ParamEnv<'a>; +} + +impl<'tcx> Remap for ty::GenericArg<'tcx> { + type Remap<'a> = ty::GenericArg<'a>; +} + +impl<'tcx, T: Remap> Remap for &'tcx ty::List +where + for<'a> ::Remap<'a>: 'a, +{ + type Remap<'a> = &'a ty::List>; +} + +impl<'tcx> Remap for ty::ExistentialTraitRef<'tcx> { + type Remap<'a> = ty::ExistentialTraitRef<'a>; +} + +impl<'tcx> Remap for ty::Const<'tcx> { + type Remap<'a> = ty::Const<'a>; +} + +impl<'tcx> Remap for ty::TraitRef<'tcx> { + type Remap<'a> = ty::TraitRef<'a>; +} + +impl<'tcx> Remap for ty::UnevaluatedConst<'tcx> { + type Remap<'a> = ty::UnevaluatedConst<'a>; +} + +impl Remap for traits::WellFormedLoc { + type Remap<'a> = traits::WellFormedLoc; +} diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 9205a8a0ffed8..bf91ca21359d9 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -18,6 +18,8 @@ use crate::mir::{ use crate::thir::Thir; use crate::traits; use crate::traits::solve::{ExternalConstraints, ExternalConstraintsData}; +use crate::ty::query::ExternProviders; +use crate::ty::query::Providers; use crate::ty::query::{self, TyCtxtAt}; use crate::ty::{ self, AdtDef, AdtDefData, AdtKind, Binder, Const, ConstData, DefIdTree, FloatTy, FloatVar, @@ -445,7 +447,7 @@ pub struct GlobalCtxt<'tcx> { pub on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>, pub queries: &'tcx dyn query::QueryEngine<'tcx>, - pub query_caches: query::QueryCaches<'tcx>, + pub query_system: query::QuerySystem<'tcx>, pub(crate) query_kinds: &'tcx [DepKindStruct<'tcx>], // Internal caches for metadata decoding. No need to track deps on this. @@ -593,6 +595,8 @@ impl<'tcx> TyCtxt<'tcx> { untracked: Untracked, dep_graph: DepGraph, on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>, + local_providers: Providers, + extern_providers: ExternProviders, queries: &'tcx dyn query::QueryEngine<'tcx>, query_kinds: &'tcx [DepKindStruct<'tcx>], ) -> GlobalCtxt<'tcx> { @@ -618,7 +622,7 @@ impl<'tcx> TyCtxt<'tcx> { untracked, on_disk_cache, queries, - query_caches: query::QueryCaches::default(), + query_system: query::QuerySystem::new(local_providers, extern_providers), query_kinds, ty_rcache: Default::default(), pred_rcache: Default::default(), diff --git a/compiler/rustc_middle/src/ty/query.rs b/compiler/rustc_middle/src/ty/query.rs index 933aaadd62e1d..85a13cdf2810f 100644 --- a/compiler/rustc_middle/src/ty/query.rs +++ b/compiler/rustc_middle/src/ty/query.rs @@ -1,3 +1,5 @@ +#![allow(unused_parens)] + use crate::dep_graph; use crate::infer::canonical::{self, Canonical}; use crate::lint::LintExpectation; @@ -35,6 +37,7 @@ use crate::ty::subst::{GenericArg, SubstsRef}; use crate::ty::util::AlwaysRequiresDrop; use crate::ty::GeneratorDiagnosticData; use crate::ty::{self, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt, UnusedGenericParams}; +use rustc_arena::TypedArena; use rustc_ast as ast; use rustc_ast::expand::allocator::AllocatorKind; use rustc_attr as attr; @@ -42,7 +45,9 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet}; use rustc_data_structures::steal::Steal; use rustc_data_structures::svh::Svh; use rustc_data_structures::sync::Lrc; +use rustc_data_structures::sync::WorkerLocal; use rustc_data_structures::unord::UnordSet; +use rustc_erase::{erase, restore, Erase}; use rustc_errors::ErrorGuaranteed; use rustc_hir as hir; use rustc_hir::def::DefKind; @@ -51,6 +56,8 @@ use rustc_hir::hir_id::OwnerId; use rustc_hir::lang_items::{LangItem, LanguageItems}; use rustc_hir::{Crate, ItemLocalId, TraitCandidate}; use rustc_index::vec::IndexVec; +pub(crate) use rustc_query_system::query::QueryJobId; +use rustc_query_system::query::*; use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion}; use rustc_session::cstore::{CrateDepKind, CrateSource}; use rustc_session::cstore::{ExternCrate, ForeignModule, LinkagePreference, NativeLib}; @@ -60,12 +67,31 @@ use rustc_span::symbol::Symbol; use rustc_span::{Span, DUMMY_SP}; use rustc_target::abi; use rustc_target::spec::PanicStrategy; +use std::marker::PhantomData; use std::ops::Deref; use std::path::PathBuf; use std::sync::Arc; -pub(crate) use rustc_query_system::query::QueryJobId; -use rustc_query_system::query::*; +pub struct QuerySystem<'tcx> { + pub local_providers: Box, + pub extern_providers: Box, + pub arenas: QueryArenas<'tcx>, + pub caches: QueryCaches<'tcx>, + // Since we erase query value types we tell the typesystem about them with `PhantomData`. + _phantom_values: QueryPhantomValues<'tcx>, +} + +impl<'tcx> QuerySystem<'tcx> { + pub fn new(local_providers: Providers, extern_providers: ExternProviders) -> Self { + QuerySystem { + local_providers: Box::new(local_providers), + extern_providers: Box::new(extern_providers), + arenas: Default::default(), + caches: Default::default(), + _phantom_values: Default::default(), + } + } +} #[derive(Copy, Clone)] pub struct TyCtxtAt<'tcx> { @@ -106,6 +132,55 @@ impl<'tcx> TyCtxt<'tcx> { } } +#[inline(always)] +fn query_get_at<'tcx, Cache, K>( + tcx: TyCtxt<'tcx>, + execute_query: fn( + &'tcx dyn QueryEngine<'tcx>, + TyCtxt<'tcx>, + Span, + Cache::Key, + QueryMode, + ) -> Option, + query_cache: &Cache, + span: Span, + key: K, +) -> Cache::Value +where + K: IntoQueryParam, + Cache: QueryCache, +{ + let key = key.into_query_param(); + + match try_get_cached(tcx, query_cache, &key) { + Some(value) => value, + None => execute_query(tcx.queries, tcx, span, key, QueryMode::Get).unwrap(), + } +} + +#[inline(always)] +fn query_ensure<'tcx, Cache, K>( + tcx: TyCtxt<'tcx>, + execute_query: fn( + &'tcx dyn QueryEngine<'tcx>, + TyCtxt<'tcx>, + Span, + Cache::Key, + QueryMode, + ) -> Option, + query_cache: &Cache, + key: K, +) where + K: IntoQueryParam, + Cache::Value: Copy, + Cache: QueryCache, +{ + let key = key.into_query_param(); + if try_get_cached(tcx, query_cache, &key).is_none() { + execute_query(tcx.queries, tcx, DUMMY_SP, key, QueryMode::Ensure); + } +} + macro_rules! query_helper_param_ty { (DefId) => { impl IntoQueryParam }; (LocalDefId) => { impl IntoQueryParam }; @@ -113,10 +188,10 @@ macro_rules! query_helper_param_ty { } macro_rules! query_if_arena { - ([] $arena:ty, $no_arena:ty) => { + ([] $arena:tt $no_arena:tt) => { $no_arena }; - ([(arena_cache) $($rest:tt)*] $arena:ty, $no_arena:ty) => { + ([(arena_cache) $($rest:tt)*] $arena:tt $no_arena:tt) => { $arena }; ([$other:tt $($modifiers:tt)*]$($args:tt)*) => { @@ -132,7 +207,7 @@ macro_rules! separate_provide_extern_decl { for<'tcx> fn( TyCtxt<'tcx>, query_keys::$name<'tcx>, - ) -> query_values::$name<'tcx> + ) -> query_provided::$name<'tcx> }; ([$other:tt $($modifiers:tt)*][$($args:tt)*]) => { separate_provide_extern_decl!([$($modifiers)*][$($args)*]) @@ -158,9 +233,9 @@ macro_rules! separate_provide_extern_default { } macro_rules! opt_remap_env_constness { - ([][$name:ident]) => {}; + ([][$name:ident]) => { $name }; ([(remap_env_constness) $($rest:tt)*][$name:ident]) => { - let $name = $name.without_const(); + $name.without_const() }; ([$other:tt $($modifiers:tt)*][$name:ident]) => { opt_remap_env_constness!([$($modifiers)*][$name]) @@ -184,30 +259,57 @@ macro_rules! define_callbacks { $(pub type $name<'tcx> = $($K)*;)* } - #[allow(nonstandard_style, unused_lifetimes, unused_parens)] + #[allow(nonstandard_style, unused_lifetimes)] pub mod query_values { use super::*; - $(pub type $name<'tcx> = query_if_arena!([$($modifiers)*] <$V as Deref>::Target, $V);)* + $(pub type $name<'tcx> = $V;)* } - #[allow(nonstandard_style, unused_lifetimes, unused_parens)] - pub mod query_storage { + #[allow(nonstandard_style, unused_lifetimes)] + pub mod query_provided { use super::*; $( - pub type $name<'tcx> = query_if_arena!([$($modifiers)*] - <<$($K)* as Key>::CacheSelector - as CacheSelector<'tcx, <$V as Deref>::Target>>::ArenaCache, - <<$($K)* as Key>::CacheSelector as CacheSelector<'tcx, $V>>::Cache - ); + pub type $name<'tcx> = query_if_arena!([$($modifiers)*] (<$V as Deref>::Target) ($V)); )* } + #[allow(nonstandard_style, unused_lifetimes)] + pub mod query_provided_to_value { + use super::*; + $( + #[inline] + pub fn $name<'tcx>( + _tcx: TyCtxt<'tcx>, + value: query_provided::$name<'tcx>, + ) -> Erase> { + erase(query_if_arena!([$($modifiers)*] + (&*_tcx.query_system.arenas.$name.alloc(value)) + (value) + )) + } + )* + } #[allow(nonstandard_style, unused_lifetimes)] - pub mod query_stored { + pub mod query_storage { use super::*; - $(pub type $name<'tcx> = $V;)* + $( + pub type $name<'tcx> = <<$($K)* as Key>::CacheSelector as CacheSelector<'tcx, Erase<$V>>>::Cache; + )* + } + + #[derive(Default)] + pub struct QueryArenas<'tcx> { + $($(#[$attr])* pub $name: query_if_arena!([$($modifiers)*] + (WorkerLocal::Target>>) + () + ),)* + } + + #[derive(Default)] + pub struct QueryPhantomValues<'tcx> { + $($(#[$attr])* pub $name: PhantomData>,)* } #[derive(Default)] @@ -219,13 +321,12 @@ macro_rules! define_callbacks { $($(#[$attr])* #[inline(always)] pub fn $name(self, key: query_helper_param_ty!($($K)*)) { - let key = key.into_query_param(); - opt_remap_env_constness!([$($modifiers)*][key]); - - match try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key) { - Some(_) => return, - None => self.tcx.queries.$name(self.tcx, DUMMY_SP, key, QueryMode::Ensure), - }; + query_ensure( + self.tcx, + QueryEngine::$name, + &self.tcx.query_system.caches.$name, + opt_remap_env_constness!([$($modifiers)*][key]), + ); })* } @@ -235,7 +336,15 @@ macro_rules! define_callbacks { #[must_use] pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> $V { - self.at(DUMMY_SP).$name(key) + unsafe { + restore::<$V>(query_get_at( + self, + QueryEngine::$name, + &self.query_system.caches.$name, + DUMMY_SP, + opt_remap_env_constness!([$($modifiers)*][key]), + )) + } })* } @@ -244,12 +353,14 @@ macro_rules! define_callbacks { #[inline(always)] pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> $V { - let key = key.into_query_param(); - opt_remap_env_constness!([$($modifiers)*][key]); - - match try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key) { - Some(value) => value, - None => self.tcx.queries.$name(self.tcx, self.span, key, QueryMode::Get).unwrap(), + unsafe { + restore::<$V>(query_get_at( + self.tcx, + QueryEngine::$name, + &self.tcx.query_system.caches.$name, + self.span, + opt_remap_env_constness!([$($modifiers)*][key]), + )) } })* } @@ -258,7 +369,7 @@ macro_rules! define_callbacks { $(pub $name: for<'tcx> fn( TyCtxt<'tcx>, query_keys::$name<'tcx>, - ) -> query_values::$name<'tcx>,)* + ) -> query_provided::$name<'tcx>,)* } pub struct ExternProviders { @@ -313,7 +424,7 @@ macro_rules! define_callbacks { span: Span, key: query_keys::$name<'tcx>, mode: QueryMode, - ) -> Option<$V>;)* + ) -> Option>;)* } }; } @@ -335,12 +446,14 @@ macro_rules! define_feedable { $(impl<'tcx, K: IntoQueryParam<$($K)*> + Copy> TyCtxtFeed<'tcx, K> { $(#[$attr])* #[inline(always)] - pub fn $name(self, value: query_values::$name<'tcx>) -> $V { + pub fn $name(self, value: query_provided::$name<'tcx>) -> $V { let key = self.key().into_query_param(); - opt_remap_env_constness!([$($modifiers)*][key]); + let key = opt_remap_env_constness!([$($modifiers)*][key]); let tcx = self.tcx; - let cache = &tcx.query_caches.$name; + let erased = query_provided_to_value::$name(tcx, value); + let value = unsafe { restore::<$V>(erased) }; + let cache = &tcx.query_system.caches.$name; match try_get_cached(tcx, cache, &key) { Some(old) => { @@ -358,7 +471,8 @@ macro_rules! define_feedable { &value, hash_result!([$($modifiers)*]), ); - cache.complete(key, value, dep_node_index) + cache.complete(key, erased, dep_node_index); + value } } } diff --git a/compiler/rustc_query_impl/Cargo.toml b/compiler/rustc_query_impl/Cargo.toml index 21732d260354d..30e64810b160c 100644 --- a/compiler/rustc_query_impl/Cargo.toml +++ b/compiler/rustc_query_impl/Cargo.toml @@ -10,6 +10,7 @@ edition = "2021" measureme = "10.0.0" rustc_ast = { path = "../rustc_ast" } rustc_data_structures = { path = "../rustc_data_structures" } +rustc_erase = { path = "../rustc_erase" } rustc_errors = { path = "../rustc_errors" } rustc_hir = { path = "../rustc_hir" } rustc_index = { path = "../rustc_index" } diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index 2d243e13cc212..c20832fdf63a8 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs @@ -17,14 +17,22 @@ extern crate rustc_macros; #[macro_use] extern crate rustc_middle; +use rustc_data_structures::fingerprint::Fingerprint; +use rustc_data_structures::stable_hasher::HashStable; use rustc_data_structures::sync::AtomicU64; +use rustc_erase::{erase, restore, Erase}; use rustc_middle::arena::Arena; use rustc_middle::dep_graph::{self, DepKindStruct}; use rustc_middle::query::Key; -use rustc_middle::ty::query::{query_keys, query_storage, query_stored, query_values}; -use rustc_middle::ty::query::{ExternProviders, Providers, QueryEngine}; +use rustc_middle::ty::query::QueryEngine; +use rustc_middle::ty::query::{ + query_keys, query_provided, query_provided_to_value, query_storage, query_values, +}; use rustc_middle::ty::TyCtxt; +use rustc_query_system::dep_graph::DepNodeParams; +use rustc_query_system::ich::StableHashingContext; use rustc_span::Span; +use std::fmt; #[macro_use] mod plumbing; @@ -34,6 +42,7 @@ use rustc_query_system::query::*; pub use rustc_query_system::query::{deadlock, QueryContext}; pub use rustc_query_system::query::QueryConfig; +use rustc_query_system::HandleCycleError; mod on_disk_cache; pub use on_disk_cache::OnDiskCache; @@ -41,6 +50,168 @@ pub use on_disk_cache::OnDiskCache; mod profiling_support; pub use self::profiling_support::alloc_self_profile_query_strings; +struct DynamicQuery { + name: &'static str, + query_state: for<'a, 'tcx> fn( + tcx: &'a QueryCtxt<'tcx>, + ) -> &'a QueryState< + <::Remap<'tcx> as QueryCache>::Key, + rustc_middle::dep_graph::DepKind, + >, + query_cache: + for<'a, 'tcx> fn(tcx: &'a QueryCtxt<'tcx>) -> &'a ::Remap<'tcx>, + cache_on_disk: for<'tcx> fn( + tcx: TyCtxt<'tcx>, + key: &<::Remap<'tcx> as QueryCache>::Key, + ) -> bool, + execute_query: for<'tcx> fn( + tcx: TyCtxt<'tcx>, + k: <::Remap<'tcx> as QueryCache>::Key, + ) -> <::Remap<'tcx> as QueryCache>::Value, + compute: for<'tcx> fn( + tcx: TyCtxt<'tcx>, + key: <::Remap<'tcx> as QueryCache>::Key, + ) -> <::Remap<'tcx> as QueryCache>::Value, + try_load_from_disk: for<'tcx> fn( + qcx: QueryCtxt<'tcx>, + idx: &<::Remap<'tcx> as QueryCache>::Key, + ) -> TryLoadFromDisk< + QueryCtxt<'tcx>, + <::Remap<'tcx> as QueryCache>::Value, + >, + anon: bool, + eval_always: bool, + depth_limit: bool, + feedable: bool, + dep_kind: rustc_middle::dep_graph::DepKind, + handle_cycle_error: HandleCycleError, + hash_result: Option< + fn( + &mut StableHashingContext<'_>, + &<::Remap<'_> as QueryCache>::Value, + ) -> Fingerprint, + >, +} + +struct ErasedQuery { + dynamic: &'static DynamicQuery, +} + +impl Copy for ErasedQuery {} +impl Clone for ErasedQuery { + fn clone(&self) -> Self { + ErasedQuery { dynamic: self.dynamic } + } +} + +impl fmt::Debug for ErasedQuery { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "ErasedQuery<{}>", self.dynamic.name) + } +} + +impl<'tcx, C: QueryCache + RemapQueryCache + 'static> QueryConfig> + for ErasedQuery +where + for<'a> <::Remap<'tcx> as QueryCache>::Key: + HashStable>, +{ + type Key = <::Remap<'tcx> as QueryCache>::Key; + type Value = <::Remap<'tcx> as QueryCache>::Value; + type Cache = ::Remap<'tcx>; + + #[inline(always)] + fn name(&self) -> &'static str { + self.dynamic.name + } + + #[inline] + fn cache_on_disk(&self, tcx: TyCtxt<'tcx>, key: &Self::Key) -> bool { + (self.dynamic.cache_on_disk)(tcx, key) + } + + #[inline(always)] + fn query_state<'a>( + &self, + cx: &'a QueryCtxt<'tcx>, + ) -> &'a QueryState { + (self.dynamic.query_state)(&cx) + } + + #[inline(always)] + fn query_cache<'a>(&self, tcx: &'a QueryCtxt<'tcx>) -> &'a Self::Cache { + (self.dynamic.query_cache)(&tcx) + } + + fn execute_query(&self, tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value { + (self.dynamic.execute_query)(tcx, key) + } + + #[inline(always)] + fn compute(&self, tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value { + (self.dynamic.compute)(tcx, key) + } + + #[inline] + fn try_load_from_disk( + &self, + qcx: QueryCtxt<'tcx>, + key: &Self::Key, + ) -> rustc_query_system::query::TryLoadFromDisk, Self::Value> { + (self.dynamic.try_load_from_disk)(qcx, key) + } + + #[inline(always)] + fn anon(&self) -> bool { + self.dynamic.anon + } + + #[inline(always)] + fn eval_always(&self) -> bool { + self.dynamic.eval_always + } + + #[inline(always)] + fn depth_limit(&self) -> bool { + self.dynamic.depth_limit + } + + #[inline(always)] + fn feedable(&self) -> bool { + self.dynamic.feedable + } + + #[inline(always)] + fn dep_kind(&self) -> rustc_middle::dep_graph::DepKind { + self.dynamic.dep_kind + } + + #[inline(always)] + fn handle_cycle_error(&self) -> rustc_query_system::HandleCycleError { + self.dynamic.handle_cycle_error + } + + #[inline(always)] + fn hash_result(&self) -> rustc_query_system::query::HashResult { + self.dynamic.hash_result + } +} + +trait QueryErasable<'tcx> +where + for<'a> <::Remap<'tcx> as QueryCache>::Key: + HashStable>, +{ + type Cache: QueryCache + RemapQueryCache + 'static; + type Key: DepNodeParams>; + type Value; + + fn erase() -> ErasedQuery; + fn restore( + value: <::Remap<'tcx> as QueryCache>::Value, + ) -> Self::Value; +} + rustc_query_append! { define_queries! } impl<'tcx> Queries<'tcx> { diff --git a/compiler/rustc_query_impl/src/on_disk_cache.rs b/compiler/rustc_query_impl/src/on_disk_cache.rs index 70c481fb0ee2e..7cee52a4243a5 100644 --- a/compiler/rustc_query_impl/src/on_disk_cache.rs +++ b/compiler/rustc_query_impl/src/on_disk_cache.rs @@ -1,6 +1,8 @@ +use crate::ErasedQuery; use crate::QueryCtxt; use rustc_data_structures::fx::{FxHashMap, FxIndexSet}; use rustc_data_structures::memmap::Mmap; +use rustc_data_structures::stable_hasher::HashStable; use rustc_data_structures::sync::{HashMapExt, Lock, Lrc, RwLock}; use rustc_data_structures::unhash::UnhashMap; use rustc_data_structures::unord::UnordSet; @@ -13,7 +15,9 @@ use rustc_middle::mir::{self, interpret}; use rustc_middle::ty::codec::{RefDecodable, TyDecoder, TyEncoder}; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_query_system::dep_graph::DepContext; -use rustc_query_system::query::{QueryCache, QueryContext, QuerySideEffects}; +use rustc_query_system::ich::StableHashingContext; +use rustc_query_system::query::QueryConfig; +use rustc_query_system::query::{QueryCache, QuerySideEffects, RemapQueryCache}; use rustc_serialize::{ opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixedSize, MemDecoder}, Decodable, Decoder, Encodable, Encoder, @@ -658,7 +662,7 @@ impl<'a, 'tcx> Decodable> for ExpnId { #[cfg(debug_assertions)] { - use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; + use rustc_data_structures::stable_hasher::StableHasher; let local_hash: u64 = decoder.tcx.with_stable_hashing_context(|mut hcx| { let mut hasher = StableHasher::new(); expn_id.expn_data().hash_stable(&mut hcx, &mut hasher); @@ -1056,24 +1060,26 @@ impl<'a, 'tcx> Encodable> for [u8] { } } -pub fn encode_query_results<'a, 'tcx, CTX, Q>( - tcx: CTX, +pub(super) fn encode_query_results<'a, 'tcx, E>( + query: ErasedQuery, + qcx: QueryCtxt<'tcx>, encoder: &mut CacheEncoder<'a, 'tcx>, query_result_index: &mut EncodedDepNodeIndex, ) where - CTX: QueryContext + 'tcx, - Q: super::QueryConfig, - Q::Value: Encodable>, + E: super::QueryErasable<'tcx>, + for<'b> <::Remap<'tcx> as QueryCache>::Key: + HashStable>, + E::Value: Encodable>, { - let _timer = tcx - .dep_context() + let _timer = qcx + .tcx .profiler() - .verbose_generic_activity_with_arg("encode_query_results_for", std::any::type_name::()); + .verbose_generic_activity_with_arg("encode_query_results_for", query.name()); - assert!(Q::query_state(tcx).all_inactive()); - let cache = Q::query_cache(tcx); + assert!(query.query_state(&qcx).all_inactive()); + let cache = query.query_cache(&qcx); cache.iter(&mut |key, value, dep_node| { - if Q::cache_on_disk(*tcx.dep_context(), &key) { + if query.cache_on_disk(qcx.tcx, &key) { let dep_node = SerializedDepNodeIndex::new(dep_node.index()); // Record position of the cache entry. @@ -1081,7 +1087,7 @@ pub fn encode_query_results<'a, 'tcx, CTX, Q>( // Encode the type check tables with the `SerializedDepNodeIndex` // as tag. - encoder.encode_tagged(dep_node, value); + encoder.encode_tagged(dep_node, &E::restore(*value)); } }); } diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index 4dea03c1ef6a2..ebf72a5ae2887 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -4,7 +4,7 @@ use crate::on_disk_cache::{CacheDecoder, CacheEncoder, EncodedDepNodeIndex}; use crate::profiling_support::QueryKeyStringCache; -use crate::{on_disk_cache, Queries}; +use crate::{on_disk_cache, Queries, QueryErasable}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::sync::{AtomicU64, Lock}; use rustc_errors::{Diagnostic, Handler}; @@ -17,9 +17,10 @@ use rustc_middle::ty::{self, TyCtxt}; use rustc_query_system::dep_graph::{DepNodeParams, HasDepContext}; use rustc_query_system::ich::StableHashingContext; use rustc_query_system::query::{ - force_query, QueryConfig, QueryContext, QueryJobId, QueryMap, QuerySideEffects, QueryStackFrame, + force_query, QueryCache, QueryConfig, QueryContext, QueryJobId, QueryMap, QuerySideEffects, + QueryStackFrame, RemapQueryCache, }; -use rustc_query_system::{LayoutOfDepth, QueryOverflow, Value}; +use rustc_query_system::{LayoutOfDepth, QueryOverflow}; use rustc_serialize::Decodable; use rustc_session::Limit; use rustc_span::def_id::LOCAL_CRATE; @@ -265,26 +266,28 @@ macro_rules! feedable { } macro_rules! hash_result { - ([]) => {{ - Some(dep_graph::hash_result) + ([][$V:ty]) => {{ + Some(|hcx, result: &Erase<$V>| dep_graph::hash_result(hcx, &unsafe { + restore::<$V>(*result) + })) }}; - ([(no_hash) $($rest:tt)*]) => {{ + ([(no_hash) $($rest:tt)*][$V:ty]) => {{ None }}; - ([$other:tt $($modifiers:tt)*]) => { - hash_result!([$($modifiers)*]) + ([$other:tt $($modifiers:tt)*][$($args:tt)*]) => { + hash_result!([$($modifiers)*][$($args)*]) }; } macro_rules! get_provider { ([][$tcx:expr, $name:ident, $key:expr]) => {{ - $tcx.queries.local_providers.$name + $tcx.query_system.local_providers.$name }}; ([(separate_provide_extern) $($rest:tt)*][$tcx:expr, $name:ident, $key:expr]) => {{ if $key.query_crate_is_local() { - $tcx.queries.local_providers.$name + $tcx.query_system.local_providers.$name } else { - $tcx.queries.extern_providers.$name + $tcx.query_system.extern_providers.$name } }}; ([$other:tt $($modifiers:tt)*][$($args:tt)*]) => { @@ -293,14 +296,14 @@ macro_rules! get_provider { } macro_rules! should_ever_cache_on_disk { - ([]) => {{ - None + ([]$yes:tt $no:tt) => {{ + $no }}; - ([(cache) $($rest:tt)*]) => {{ - Some($crate::plumbing::try_load_from_disk::) + ([(cache) $($rest:tt)*]$yes:tt $no:tt) => {{ + $yes }}; - ([$other:tt $($modifiers:tt)*]) => { - should_ever_cache_on_disk!([$($modifiers)*]) + ([$other:tt $($modifiers:tt)*]$yes:tt $no:tt) => { + should_ever_cache_on_disk!([$($modifiers)*]$yes $no) }; } @@ -352,18 +355,26 @@ pub(crate) fn create_query_frame< QueryStackFrame::new(description, span, def_id, def_kind, kind, ty_adt_id, hash) } -fn try_load_from_on_disk_cache<'tcx, Q>(tcx: TyCtxt<'tcx>, dep_node: DepNode) +fn try_load_from_on_disk_cache_erase<'tcx, Q>(tcx: TyCtxt<'tcx>, dep_node: DepNode) +where + Q: QueryErasable<'tcx>, + for<'a> <::Remap<'tcx> as QueryCache>::Key: + HashStable>, +{ + try_load_from_on_disk_cache(Q::erase(), tcx, dep_node) +} + +fn try_load_from_on_disk_cache<'tcx, Q>(query: Q, tcx: TyCtxt<'tcx>, dep_node: DepNode) where Q: QueryConfig>, - Q::Key: DepNodeParams>, { debug_assert!(tcx.dep_graph.is_green(&dep_node)); let key = Q::Key::recover(tcx, &dep_node).unwrap_or_else(|| { panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash) }); - if Q::cache_on_disk(tcx, &key) { - let _ = Q::execute_query(tcx, key); + if query.cache_on_disk(tcx, &key) { + let _ = query.execute_query(tcx, key); } } @@ -377,11 +388,18 @@ where tcx.on_disk_cache().as_ref()?.try_load_query_result(*tcx, id) } -fn force_from_dep_node<'tcx, Q>(tcx: TyCtxt<'tcx>, dep_node: DepNode) -> bool +fn force_from_dep_node_erase<'tcx, Q>(tcx: TyCtxt<'tcx>, dep_node: DepNode) -> bool +where + Q: QueryErasable<'tcx>, + for<'a> <::Remap<'tcx> as QueryCache>::Key: + HashStable>, +{ + force_from_dep_node(Q::erase(), tcx, dep_node) +} + +fn force_from_dep_node<'tcx, Q>(query: Q, tcx: TyCtxt<'tcx>, dep_node: DepNode) -> bool where Q: QueryConfig>, - Q::Key: DepNodeParams>, - Q::Value: Value, DepKind>, { // We must avoid ever having to call `force_from_dep_node()` for a // `DepNode::codegen_unit`: @@ -405,7 +423,7 @@ where #[cfg(debug_assertions)] let _guard = tracing::span!(tracing::Level::TRACE, stringify!($name), ?key).entered(); let tcx = QueryCtxt::from_tcx(tcx); - force_query::(tcx, key, dep_node); + force_query::<_, _, DepKind>(query, tcx, key, dep_node); true } else { false @@ -414,8 +432,9 @@ where pub(crate) fn query_callback<'tcx, Q>(is_anon: bool, is_eval_always: bool) -> DepKindStruct<'tcx> where - Q: QueryConfig>, - Q::Key: DepNodeParams>, + Q: QueryErasable<'tcx>, + for<'a> <::Remap<'tcx> as QueryCache>::Key: + HashStable>, { let fingerprint_style = Q::Key::fingerprint_style(); @@ -433,8 +452,8 @@ where is_anon, is_eval_always, fingerprint_style, - force_from_dep_node: Some(force_from_dep_node::), - try_load_from_on_disk_cache: Some(try_load_from_on_disk_cache::), + force_from_dep_node: Some(force_from_dep_node_erase::), + try_load_from_on_disk_cache: Some(try_load_from_on_disk_cache_erase::), } } @@ -464,64 +483,81 @@ macro_rules! define_queries { mod queries { use std::marker::PhantomData; - $(pub struct $name<'tcx> { - data: PhantomData<&'tcx ()> - })* + $( + #[derive(Copy, Clone, Debug)] + pub struct $name<'tcx> { + data: PhantomData<&'tcx ()> + } + )* } - $(impl<'tcx> QueryConfig> for queries::$name<'tcx> { - type Key = query_keys::$name<'tcx>; - type Value = query_values::$name<'tcx>; - type Stored = query_stored::$name<'tcx>; - const NAME: &'static str = stringify!($name); + #[allow(nonstandard_style)] + mod dynamic_query { + use super::*; - #[inline] - fn cache_on_disk(tcx: TyCtxt<'tcx>, key: &Self::Key) -> bool { - ::rustc_middle::query::cached::$name(tcx, key) - } + $( + pub(super) static $name: DynamicQuery> = DynamicQuery { + name: stringify!($name), + query_state: |tcx| &tcx.queries.$name , + query_cache: |tcx| &tcx.query_system.caches.$name, + cache_on_disk: |tcx, key| ::rustc_middle::query::cached::$name(tcx, key), + execute_query: |tcx, key| erase(tcx.$name(key)), + compute: |tcx, key| query_provided_to_value::$name( + tcx, + get_provider!([$($modifiers)*][tcx, $name, key])(tcx, key) + ), + try_load_from_disk: |qcx, key| { + fn try_load_from_disk<'tcx>( + _qcx: QueryCtxt<'tcx>, + _key: &query_keys::$name<'tcx> + ) -> TryLoadFromDisk, Erase>> { + should_ever_cache_on_disk!([$($modifiers)*] { + if ::rustc_middle::query::cached::$name(_qcx.tcx, _key) { + Some(|qcx, dep_node| { + let value = $crate::plumbing::try_load_from_disk::>( + qcx, + dep_node + ); + value.map(|value| query_provided_to_value::$name(qcx.tcx, value)) + }) + } else { + None + } + } { + None + }) + } + try_load_from_disk(qcx, key) + }, + anon: is_anon!([$($modifiers)*]), + eval_always: is_eval_always!([$($modifiers)*]), + depth_limit: depth_limit!([$($modifiers)*]), + feedable: feedable!([$($modifiers)*]), + dep_kind: dep_graph::DepKind::$name, + handle_cycle_error: handle_cycle_error!([$($modifiers)*]), + hash_result: hash_result!([$($modifiers)*][query_values::$name<'_>]), + }; + )* + } - type Cache = query_storage::$name<'tcx>; + $(impl<'tcx> QueryErasable<'tcx> for queries::$name<'tcx> { + type Cache = query_storage::$name<'static>; + type Key = query_keys::$name<'tcx>; + type Value = query_values::$name<'tcx>; #[inline(always)] - fn query_state<'a>(tcx: QueryCtxt<'tcx>) -> &'a QueryState - where QueryCtxt<'tcx>: 'a - { - &tcx.queries.$name + fn erase() -> ErasedQuery { + ErasedQuery { + dynamic: &dynamic_query::$name + } } #[inline(always)] - fn query_cache<'a>(tcx: QueryCtxt<'tcx>) -> &'a Self::Cache - where 'tcx:'a - { - &tcx.query_caches.$name - } - - fn execute_query(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Stored { - tcx.$name(key) - } - - #[inline] - // key is only sometimes used - #[allow(unused_variables)] - fn compute(qcx: QueryCtxt<'tcx>, key: &Self::Key) -> fn(TyCtxt<'tcx>, Self::Key) -> Self::Value { - get_provider!([$($modifiers)*][qcx, $name, key]) - } - - #[inline] - fn try_load_from_disk(qcx: QueryCtxt<'tcx>, key: &Self::Key) -> rustc_query_system::query::TryLoadFromDisk, Self> { - let cache_on_disk = Self::cache_on_disk(qcx.tcx, key); - if cache_on_disk { should_ever_cache_on_disk!([$($modifiers)*]) } else { None } + fn restore(value: <::Remap<'tcx> as QueryCache>::Value) -> Self::Value { + unsafe { + restore::>(value) + } } - - const ANON: bool = is_anon!([$($modifiers)*]); - const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]); - const DEPTH_LIMIT: bool = depth_limit!([$($modifiers)*]); - const FEEDABLE: bool = feedable!([$($modifiers)*]); - - const DEP_KIND: rustc_middle::dep_graph::DepKind = dep_graph::DepKind::$name; - const HANDLE_CYCLE_ERROR: rustc_query_system::HandleCycleError = handle_cycle_error!([$($modifiers)*]); - - const HASH_RESULT: rustc_query_system::query::HashResult, Self> = hash_result!([$($modifiers)*]); })* #[allow(nonstandard_style)] @@ -595,6 +631,7 @@ macro_rules! define_queries { use $crate::profiling_support::QueryKeyStringCache; use rustc_query_system::query::QueryMap; use rustc_middle::dep_graph::DepKind; + use crate::QueryErasable; pub(super) const fn dummy_query_struct<'tcx>() -> QueryStruct<'tcx> { fn noop_try_collect_active_jobs(_: QueryCtxt<'_>, _: &mut QueryMap) -> Option<()> { @@ -633,12 +670,17 @@ macro_rules! define_queries { $crate::profiling_support::alloc_self_profile_query_strings_for_query_cache( tcx, stringify!($name), - &tcx.query_caches.$name, + &tcx.query_system.caches.$name, string_cache, ) }, encode_query_results: expand_if_cached!([$($modifiers)*], |tcx, encoder, query_result_index| - $crate::on_disk_cache::encode_query_results::<_, super::queries::$name<'_>>(tcx, encoder, query_result_index) + $crate::on_disk_cache::encode_query_results::>( + as QueryErasable<'tcx>>::erase(), + tcx, + encoder, + query_result_index + ) ), }})* } @@ -649,18 +691,12 @@ macro_rules! define_queries { } } -use crate::{ExternProviders, OnDiskCache, Providers}; +use crate::OnDiskCache; impl<'tcx> Queries<'tcx> { - pub fn new( - local_providers: Providers, - extern_providers: ExternProviders, - on_disk_cache: Option>, - ) -> Self { + pub fn new(on_disk_cache: Option>) -> Self { use crate::query_structs; Queries { - local_providers: Box::new(local_providers), - extern_providers: Box::new(extern_providers), query_structs: make_dep_kind_array!(query_structs).to_vec(), on_disk_cache, jobs: AtomicU64::new(1), @@ -674,8 +710,6 @@ macro_rules! define_queries_struct { input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => { #[derive(Default)] pub struct Queries<'tcx> { - local_providers: Box, - extern_providers: Box, query_structs: Vec<$crate::plumbing::QueryStruct<'tcx>>, pub on_disk_cache: Option>, jobs: AtomicU64, @@ -683,7 +717,7 @@ macro_rules! define_queries_struct { $( $(#[$attr])* $name: QueryState< - as QueryConfig>>::Key, + query_keys::$name<'tcx>, rustc_middle::dep_graph::DepKind, >, )* @@ -723,11 +757,13 @@ macro_rules! define_queries_struct { &'tcx self, tcx: TyCtxt<'tcx>, span: Span, - key: as QueryConfig>>::Key, + key: query_keys::$name<'tcx>, mode: QueryMode, - ) -> Option> { + ) -> Option>> { let qcx = QueryCtxt { tcx, queries: self }; - get_query::, _, rustc_middle::dep_graph::DepKind>(qcx, span, key, mode) + get_query::<_, _, rustc_middle::dep_graph::DepKind>( + as QueryErasable<'tcx>>::erase(), qcx, span, key, mode + ) })* } }; diff --git a/compiler/rustc_query_system/src/dep_graph/dep_node.rs b/compiler/rustc_query_system/src/dep_graph/dep_node.rs index 9e1ca6ab515d8..ca4aa249ee997 100644 --- a/compiler/rustc_query_system/src/dep_graph/dep_node.rs +++ b/compiler/rustc_query_system/src/dep_graph/dep_node.rs @@ -108,7 +108,9 @@ impl fmt::Debug for DepNode { } } -pub trait DepNodeParams: fmt::Debug + Sized { +pub trait DepNodeParams: + fmt::Debug + Sized + for<'a> HashStable> +{ fn fingerprint_style() -> FingerprintStyle; /// This method turns the parameters of a DepNodeConstructor into an opaque diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs index 9f875b4373173..d9dcc006ca177 100644 --- a/compiler/rustc_query_system/src/query/caches.rs +++ b/compiler/rustc_query_system/src/query/caches.rs @@ -1,13 +1,12 @@ use crate::dep_graph::DepNodeIndex; -use rustc_arena::TypedArena; use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::remap::Remap; use rustc_data_structures::sharded; #[cfg(parallel_compiler)] use rustc_data_structures::sharded::Sharded; #[cfg(not(parallel_compiler))] use rustc_data_structures::sync::Lock; -use rustc_data_structures::sync::WorkerLocal; use rustc_index::vec::{Idx, IndexVec}; use std::fmt::Debug; use std::hash::Hash; @@ -17,35 +16,33 @@ pub trait CacheSelector<'tcx, V> { type Cache where V: Copy; - type ArenaCache; } -pub trait QueryStorage { - type Value: Debug; - type Stored: Copy; -} - -pub trait QueryCache: QueryStorage + Sized { +pub trait QueryCache: Sized { type Key: Hash + Eq + Clone + Debug; + type Value: Copy + Debug; /// Checks if the query is already computed and in the cache. /// It returns the shard index and a lock guard to the shard, /// which will be used if the query is not in the cache and we need /// to compute it. - fn lookup(&self, key: &Self::Key) -> Option<(Self::Stored, DepNodeIndex)>; + fn lookup(&self, key: &Self::Key) -> Option<(Self::Value, DepNodeIndex)>; - fn complete(&self, key: Self::Key, value: Self::Value, index: DepNodeIndex) -> Self::Stored; + fn complete(&self, key: Self::Key, value: Self::Value, index: DepNodeIndex); fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)); } +pub trait RemapQueryCache { + type Remap<'a>: QueryCache; +} + pub struct DefaultCacheSelector(PhantomData); impl<'tcx, K: Eq + Hash, V: 'tcx> CacheSelector<'tcx, V> for DefaultCacheSelector { type Cache = DefaultCache where V: Copy; - type ArenaCache = ArenaCache<'tcx, K, V>; } pub struct DefaultCache { @@ -61,9 +58,11 @@ impl Default for DefaultCache { } } -impl QueryStorage for DefaultCache { - type Value = V; - type Stored = V; +impl RemapQueryCache for DefaultCache +where + for<'a> ::Remap<'a>: Eq + Hash + Clone + Debug, +{ + type Remap<'a> = DefaultCache, V>; } impl QueryCache for DefaultCache @@ -72,6 +71,7 @@ where V: Copy + Debug, { type Key = K; + type Value = V; #[inline(always)] fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> { @@ -86,7 +86,7 @@ where } #[inline] - fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored { + fn complete(&self, key: K, value: V, index: DepNodeIndex) { #[cfg(parallel_compiler)] let mut lock = self.cache.get_shard_by_value(&key).lock(); #[cfg(not(parallel_compiler))] @@ -94,79 +94,6 @@ where // We may be overwriting another value. This is all right, since the dep-graph // will check that the fingerprint matches. lock.insert(key, (value.clone(), index)); - value - } - - fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { - #[cfg(parallel_compiler)] - { - let shards = self.cache.lock_shards(); - for shard in shards.iter() { - for (k, v) in shard.iter() { - f(k, &v.0, v.1); - } - } - } - #[cfg(not(parallel_compiler))] - { - let map = self.cache.lock(); - for (k, v) in map.iter() { - f(k, &v.0, v.1); - } - } - } -} - -pub struct ArenaCache<'tcx, K, V> { - arena: WorkerLocal>, - #[cfg(parallel_compiler)] - cache: Sharded>, - #[cfg(not(parallel_compiler))] - cache: Lock>, -} - -impl<'tcx, K, V> Default for ArenaCache<'tcx, K, V> { - fn default() -> Self { - ArenaCache { arena: WorkerLocal::new(|_| TypedArena::default()), cache: Default::default() } - } -} - -impl<'tcx, K: Eq + Hash, V: Debug + 'tcx> QueryStorage for ArenaCache<'tcx, K, V> { - type Value = V; - type Stored = &'tcx V; -} - -impl<'tcx, K, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V> -where - K: Eq + Hash + Clone + Debug, - V: Debug, -{ - type Key = K; - - #[inline(always)] - fn lookup(&self, key: &K) -> Option<(&'tcx V, DepNodeIndex)> { - let key_hash = sharded::make_hash(key); - #[cfg(parallel_compiler)] - let lock = self.cache.get_shard_by_hash(key_hash).lock(); - #[cfg(not(parallel_compiler))] - let lock = self.cache.lock(); - let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key); - - if let Some((_, value)) = result { Some((&value.0, value.1)) } else { None } - } - - #[inline] - fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored { - let value = self.arena.alloc((value, index)); - let value = unsafe { &*(value as *const _) }; - #[cfg(parallel_compiler)] - let mut lock = self.cache.get_shard_by_value(&key).lock(); - #[cfg(not(parallel_compiler))] - let mut lock = self.cache.lock(); - // We may be overwriting another value. This is all right, since the dep-graph - // will check that the fingerprint matches. - lock.insert(key, value); - &value.0 } fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { @@ -195,7 +122,6 @@ impl<'tcx, K: Idx, V: 'tcx> CacheSelector<'tcx, V> for VecCacheSelector { type Cache = VecCache where V: Copy; - type ArenaCache = VecArenaCache<'tcx, K, V>; } pub struct VecCache { @@ -211,9 +137,11 @@ impl Default for VecCache { } } -impl QueryStorage for VecCache { - type Value = V; - type Stored = V; +impl RemapQueryCache for VecCache +where + for<'a> ::Remap<'a>: Idx, +{ + type Remap<'a> = VecCache, V>; } impl QueryCache for VecCache @@ -222,6 +150,7 @@ where V: Copy + Debug, { type Key = K; + type Value = V; #[inline(always)] fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> { @@ -233,87 +162,12 @@ where } #[inline] - fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored { + fn complete(&self, key: K, value: V, index: DepNodeIndex) { #[cfg(parallel_compiler)] let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock(); #[cfg(not(parallel_compiler))] let mut lock = self.cache.lock(); lock.insert(key, (value.clone(), index)); - value - } - - fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { - #[cfg(parallel_compiler)] - { - let shards = self.cache.lock_shards(); - for shard in shards.iter() { - for (k, v) in shard.iter_enumerated() { - if let Some(v) = v { - f(&k, &v.0, v.1); - } - } - } - } - #[cfg(not(parallel_compiler))] - { - let map = self.cache.lock(); - for (k, v) in map.iter_enumerated() { - if let Some(v) = v { - f(&k, &v.0, v.1); - } - } - } - } -} - -pub struct VecArenaCache<'tcx, K: Idx, V> { - arena: WorkerLocal>, - #[cfg(parallel_compiler)] - cache: Sharded>>, - #[cfg(not(parallel_compiler))] - cache: Lock>>, -} - -impl<'tcx, K: Idx, V> Default for VecArenaCache<'tcx, K, V> { - fn default() -> Self { - VecArenaCache { - arena: WorkerLocal::new(|_| TypedArena::default()), - cache: Default::default(), - } - } -} - -impl<'tcx, K: Eq + Idx, V: Debug + 'tcx> QueryStorage for VecArenaCache<'tcx, K, V> { - type Value = V; - type Stored = &'tcx V; -} - -impl<'tcx, K, V: 'tcx> QueryCache for VecArenaCache<'tcx, K, V> -where - K: Eq + Idx + Clone + Debug, - V: Debug, -{ - type Key = K; - - #[inline(always)] - fn lookup(&self, key: &K) -> Option<(&'tcx V, DepNodeIndex)> { - #[cfg(parallel_compiler)] - let lock = self.cache.get_shard_by_hash(key.index() as u64).lock(); - #[cfg(not(parallel_compiler))] - let lock = self.cache.lock(); - if let Some(Some(value)) = lock.get(*key) { Some((&value.0, value.1)) } else { None } - } - - #[inline] - fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored { - let value = self.arena.alloc((value, index)); - let value = unsafe { &*(value as *const _) }; - #[cfg(parallel_compiler)] - let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock(); - #[cfg(not(parallel_compiler))] - let mut lock = self.cache.lock(); - lock.insert(key, value); - &value.0 } fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs index a28e45a5c086d..fc121f31a67d7 100644 --- a/compiler/rustc_query_system/src/query/config.rs +++ b/compiler/rustc_query_system/src/query/config.rs @@ -10,52 +10,44 @@ use rustc_data_structures::fingerprint::Fingerprint; use std::fmt::Debug; use std::hash::Hash; -pub type HashResult = - Option, &>::Value) -> Fingerprint>; +pub type HashResult = Option, &V) -> Fingerprint>; -pub type TryLoadFromDisk = - Option Option<>::Value>>; +pub type TryLoadFromDisk = Option Option>; -pub trait QueryConfig { - const NAME: &'static str; +pub trait QueryConfig: Copy + Debug { + fn name(&self) -> &'static str; type Key: DepNodeParams + Eq + Hash + Clone + Debug; - type Value: Debug; - type Stored: Debug + Copy + std::borrow::Borrow; + type Value: Debug + Copy; - type Cache: QueryCache; + type Cache: QueryCache; // Don't use this method to access query results, instead use the methods on TyCtxt - fn query_state<'a>(tcx: Qcx) -> &'a QueryState - where - Qcx: 'a; + fn query_state<'a>(&self, tcx: &'a Qcx) -> &'a QueryState; // Don't use this method to access query results, instead use the methods on TyCtxt - fn query_cache<'a>(tcx: Qcx) -> &'a Self::Cache - where - Qcx: 'a; + fn query_cache<'a>(&self, tcx: &'a Qcx) -> &'a Self::Cache; - fn cache_on_disk(tcx: Qcx::DepContext, key: &Self::Key) -> bool; + fn cache_on_disk(&self, tcx: Qcx::DepContext, key: &Self::Key) -> bool; // Don't use this method to compute query results, instead use the methods on TyCtxt - fn execute_query(tcx: Qcx::DepContext, k: Self::Key) -> Self::Stored; + fn execute_query(&self, tcx: Qcx::DepContext, k: Self::Key) -> Self::Value; - fn compute(tcx: Qcx, key: &Self::Key) -> fn(Qcx::DepContext, Self::Key) -> Self::Value; + fn compute(&self, tcx: Qcx::DepContext, key: Self::Key) -> Self::Value; - fn try_load_from_disk(qcx: Qcx, idx: &Self::Key) -> TryLoadFromDisk; + fn try_load_from_disk(&self, qcx: Qcx, idx: &Self::Key) -> TryLoadFromDisk; - const ANON: bool; - const EVAL_ALWAYS: bool; - const DEPTH_LIMIT: bool; - const FEEDABLE: bool; + fn anon(&self) -> bool; + fn eval_always(&self) -> bool; + fn depth_limit(&self) -> bool; + fn feedable(&self) -> bool; - const DEP_KIND: Qcx::DepKind; - const HANDLE_CYCLE_ERROR: HandleCycleError; - - const HASH_RESULT: HashResult; + fn dep_kind(&self) -> Qcx::DepKind; + fn handle_cycle_error(&self) -> HandleCycleError; + fn hash_result(&self) -> HashResult; // Just here for convernience and checking that the key matches the kind, don't override this. - fn construct_dep_node(tcx: Qcx::DepContext, key: &Self::Key) -> DepNode { - DepNode::construct(tcx, Self::DEP_KIND, key) + fn construct_dep_node(&self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode { + DepNode::construct(tcx, self.dep_kind(), key) } } diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs index d308af1920760..512c298450799 100644 --- a/compiler/rustc_query_system/src/query/mod.rs +++ b/compiler/rustc_query_system/src/query/mod.rs @@ -8,7 +8,7 @@ pub use self::job::{print_query_stack, QueryInfo, QueryJob, QueryJobId, QueryJob mod caches; pub use self::caches::{ - CacheSelector, DefaultCacheSelector, QueryCache, QueryStorage, VecCacheSelector, + CacheSelector, DefaultCacheSelector, QueryCache, RemapQueryCache, VecCacheSelector, }; mod config; diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index ed66d1929c5e7..c2fc0da9bf88f 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -2,6 +2,7 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. +use crate::dep_graph::HasDepContext; use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex}; use crate::ich::StableHashingContext; use crate::query::caches::QueryCache; @@ -246,7 +247,7 @@ where /// Completes the query by updating the query cache with the `result`, /// signals the waiter and forgets the JobOwner, so it won't poison the query - fn complete(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex) -> C::Stored + fn complete(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex) where C: QueryCache, { @@ -257,23 +258,22 @@ where // Forget ourself so our destructor won't poison the query mem::forget(self); - let (job, result) = { - let job = { - #[cfg(parallel_compiler)] - let mut lock = state.active.get_shard_by_value(&key).lock(); - #[cfg(not(parallel_compiler))] - let mut lock = state.active.lock(); - match lock.remove(&key).unwrap() { - QueryResult::Started(job) => job, - QueryResult::Poisoned => panic!(), - } - }; - let result = cache.complete(key, result, dep_node_index); - (job, result) + // Mark as complete before we remove the job from the active state + // so no other thread can re-execute this query. + cache.complete(key.clone(), result, dep_node_index); + + let job = { + #[cfg(parallel_compiler)] + let mut lock = state.active.get_shard_by_value(&key).lock(); + #[cfg(not(parallel_compiler))] + let mut lock = state.active.lock(); + match lock.remove(&key).unwrap() { + QueryResult::Started(job) => job, + QueryResult::Poisoned => panic!(), + } }; job.signal_complete(); - result } } @@ -335,8 +335,8 @@ where /// It returns the shard index and a lock guard to the shard, /// which will be used if the query is not in the cache and we need /// to compute it. -#[inline] -pub fn try_get_cached(tcx: Tcx, cache: &C, key: &C::Key) -> Option +#[inline(always)] +pub fn try_get_cached(tcx: Tcx, cache: &C, key: &C::Key) -> Option where C: QueryCache, Tcx: DepContext, @@ -352,26 +352,31 @@ where } fn try_execute_query( + query: Q, qcx: Qcx, - state: &QueryState, - cache: &Q::Cache, span: Span, key: Q::Key, dep_node: Option>, -) -> (Q::Stored, Option) +) -> (Q::Value, Option) where Q: QueryConfig, Qcx: QueryContext, { - match JobOwner::<'_, Q::Key, Qcx::DepKind>::try_start(&qcx, state, span, key.clone()) { + let state = query.query_state(&qcx); + let cache = query.query_cache(&qcx); + match JobOwner::<'_, Q::Key, ::DepKind>::try_start( + &qcx, + state, + span, + key.clone(), + ) { TryGetJob::NotYetStarted(job) => { - let (result, dep_node_index) = - execute_job::(qcx, key.clone(), dep_node, job.id); - if Q::FEEDABLE { + let (result, dep_node_index) = execute_job(query, qcx, key.clone(), dep_node, job.id); + if query.feedable() { // We may have put a value inside the cache from inside the execution. // Verify that it has the same hash as what we have now, to ensure consistency. if let Some((cached_result, _)) = cache.lookup(&key) { - let hasher = Q::HASH_RESULT.expect("feedable forbids no_hash"); + let hasher = query.hash_result().expect("feedable forbids no_hash"); let old_hash = qcx.dep_context().with_stable_hashing_context(|mut hcx| { hasher(&mut hcx, cached_result.borrow()) @@ -383,18 +388,18 @@ where old_hash, new_hash, "Computed query value for {:?}({:?}) is inconsistent with fed value,\ncomputed={:#?}\nfed={:#?}", - Q::DEP_KIND, + query.dep_kind(), key, result, cached_result, ); } } - let result = job.complete(cache, result, dep_node_index); + job.complete(cache, result, dep_node_index); (result, Some(dep_node_index)) } TryGetJob::Cycle(error) => { - let result = mk_cycle(qcx, error, Q::HANDLE_CYCLE_ERROR); + let result = mk_cycle(qcx, error, query.handle_cycle_error()); (result, None) } #[cfg(parallel_compiler)] @@ -412,6 +417,7 @@ where } fn execute_job( + query: Q, qcx: Qcx, key: Q::Key, mut dep_node_opt: Option>, @@ -426,23 +432,23 @@ where // Fast path for when incr. comp. is off. if !dep_graph.is_fully_enabled() { let prof_timer = qcx.dep_context().profiler().query_provider(); - let result = qcx.start_query(job_id, Q::DEPTH_LIMIT, None, || { - Q::compute(qcx, &key)(*qcx.dep_context(), key) + let result = qcx.start_query(job_id, query.depth_limit(), None, || { + query.compute(*qcx.dep_context(), key) }); let dep_node_index = dep_graph.next_virtual_depnode_index(); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); return (result, dep_node_index); } - if !Q::ANON && !Q::EVAL_ALWAYS { + if !query.anon() && !query.eval_always() { // `to_dep_node` is expensive for some `DepKind`s. let dep_node = - dep_node_opt.get_or_insert_with(|| Q::construct_dep_node(*qcx.dep_context(), &key)); + dep_node_opt.get_or_insert_with(|| query.construct_dep_node(*qcx.dep_context(), &key)); // The diagnostics for this query will be promoted to the current session during // `try_mark_green()`, so we can ignore them here. if let Some(ret) = qcx.start_query(job_id, false, None, || { - try_load_from_disk_and_cache_in_memory::(qcx, &key, &dep_node) + try_load_from_disk_and_cache_in_memory(query, qcx, &key, &dep_node) }) { return ret; } @@ -452,19 +458,24 @@ where let diagnostics = Lock::new(ThinVec::new()); let (result, dep_node_index) = - qcx.start_query(job_id, Q::DEPTH_LIMIT, Some(&diagnostics), || { - if Q::ANON { - return dep_graph.with_anon_task(*qcx.dep_context(), Q::DEP_KIND, || { - Q::compute(qcx, &key)(*qcx.dep_context(), key) + qcx.start_query(job_id, query.depth_limit(), Some(&diagnostics), || { + if query.anon() { + return dep_graph.with_anon_task(*qcx.dep_context(), query.dep_kind(), || { + query.compute(*qcx.dep_context(), key) }); } // `to_dep_node` is expensive for some `DepKind`s. let dep_node = - dep_node_opt.unwrap_or_else(|| Q::construct_dep_node(*qcx.dep_context(), &key)); - - let task = Q::compute(qcx, &key); - dep_graph.with_task(dep_node, *qcx.dep_context(), key, task, Q::HASH_RESULT) + dep_node_opt.unwrap_or_else(|| query.construct_dep_node(*qcx.dep_context(), &key)); + + dep_graph.with_task( + dep_node, + *qcx.dep_context(), + (key, query), + |tcx, (key, query)| query.compute(tcx, key), + query.hash_result(), + ) }); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -473,7 +484,7 @@ where let side_effects = QuerySideEffects { diagnostics }; if std::intrinsics::unlikely(!side_effects.is_empty()) { - if Q::ANON { + if query.anon() { qcx.store_side_effects_for_anon_node(dep_node_index, side_effects); } else { qcx.store_side_effects(dep_node_index, side_effects); @@ -484,6 +495,7 @@ where } fn try_load_from_disk_and_cache_in_memory( + query: Q, qcx: Qcx, key: &Q::Key, dep_node: &DepNode, @@ -502,7 +514,7 @@ where // First we try to load the result from the on-disk cache. // Some things are never cached on disk. - if let Some(try_load_from_disk) = Q::try_load_from_disk(qcx, &key) { + if let Some(try_load_from_disk) = query.try_load_from_disk(qcx, &key) { let prof_timer = qcx.dep_context().profiler().incr_cache_loading(); // The call to `with_query_deserialization` enforces that no new `DepNodes` @@ -536,7 +548,7 @@ where if std::intrinsics::unlikely( try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich, ) { - incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT); + incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result()); } return Some((result, dep_node_index)); @@ -555,7 +567,7 @@ where let prof_timer = qcx.dep_context().profiler().query_provider(); // The dep-graph for this computation is already in-place. - let result = dep_graph.with_ignore(|| Q::compute(qcx, key)(*qcx.dep_context(), key.clone())); + let result = dep_graph.with_ignore(|| query.compute(*qcx.dep_context(), key.clone())); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -568,7 +580,7 @@ where // // See issue #82920 for an example of a miscompilation that would get turned into // an ICE by this check - incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT); + incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result()); Some((result, dep_node_index)) } @@ -688,19 +700,23 @@ fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result: /// /// Note: The optimization is only available during incr. comp. #[inline(never)] -fn ensure_must_run(qcx: Qcx, key: &Q::Key) -> (bool, Option>) +fn ensure_must_run( + query: Q, + qcx: Qcx, + key: &Q::Key, +) -> (bool, Option>) where Q: QueryConfig, Qcx: QueryContext, { - if Q::EVAL_ALWAYS { + if query.eval_always() { return (true, None); } // Ensuring an anonymous query makes no sense - assert!(!Q::ANON); + assert!(!query.anon()); - let dep_node = Q::construct_dep_node(*qcx.dep_context(), key); + let dep_node = query.construct_dep_node(*qcx.dep_context(), key); let dep_graph = qcx.dep_context().dep_graph(); match dep_graph.try_mark_green(qcx, &dep_node) { @@ -727,15 +743,21 @@ pub enum QueryMode { Ensure, } -pub fn get_query(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) -> Option +pub fn get_query( + query: Q, + qcx: Qcx, + span: Span, + key: Q::Key, + mode: QueryMode, +) -> Option where D: DepKind, Q: QueryConfig, - Q::Value: Value, Qcx: QueryContext, + Q::Value: Value, { let dep_node = if let QueryMode::Ensure = mode { - let (must_run, dep_node) = ensure_must_run::(qcx, &key); + let (must_run, dep_node) = ensure_must_run(query, qcx, &key); if !must_run { return None; } @@ -744,22 +766,19 @@ where None }; - let (result, dep_node_index) = try_execute_query::( - qcx, - Q::query_state(qcx), - Q::query_cache(qcx), - span, - key, - dep_node, - ); + let (result, dep_node_index) = try_execute_query(query, qcx, span, key, dep_node); if let Some(dep_node_index) = dep_node_index { qcx.dep_context().dep_graph().read_index(dep_node_index) } Some(result) } -pub fn force_query(qcx: Qcx, key: Q::Key, dep_node: DepNode) -where +pub fn force_query( + query: Q, + qcx: Qcx, + key: Q::Key, + dep_node: DepNode<::DepKind>, +) where D: DepKind, Q: QueryConfig, Q::Value: Value, @@ -767,14 +786,12 @@ where { // We may be concurrently trying both execute and force a query. // Ensure that only one of them runs the query. - let cache = Q::query_cache(qcx); - if let Some((_, index)) = cache.lookup(&key) { + if let Some((_, index)) = query.query_cache(&qcx).lookup(&key) { qcx.dep_context().profiler().query_cache_hit(index.into()); return; } - let state = Q::query_state(qcx); - debug_assert!(!Q::ANON); + debug_assert!(!query.anon()); - try_execute_query::(qcx, state, cache, DUMMY_SP, key, Some(dep_node)); + try_execute_query(query, qcx, DUMMY_SP, key, Some(dep_node)); } diff --git a/compiler/rustc_span/src/def_id.rs b/compiler/rustc_span/src/def_id.rs index cdda052f52906..aac379b1e8bea 100644 --- a/compiler/rustc_span/src/def_id.rs +++ b/compiler/rustc_span/src/def_id.rs @@ -1,5 +1,6 @@ use crate::{HashStableContext, Symbol}; use rustc_data_structures::fingerprint::Fingerprint; +use rustc_data_structures::remap::Remap; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey}; use rustc_data_structures::AtomicRef; use rustc_index::vec::Idx; @@ -37,6 +38,10 @@ impl fmt::Display for CrateNum { } } +impl Remap for CrateNum { + type Remap<'a> = CrateNum; +} + /// As a local identifier, a `CrateNum` is only meaningful within its context, e.g. within a tcx. /// Therefore, make sure to include the context when encode a `CrateNum`. impl Encodable for CrateNum { @@ -268,6 +273,10 @@ impl Hash for DefId { } } +impl Remap for DefId { + type Remap<'a> = DefId; +} + // Implement the same comparison as derived with the other field order. #[cfg(all(target_pointer_width = "64", target_endian = "big"))] impl Ord for DefId { @@ -374,6 +383,10 @@ pub struct LocalDefId { pub local_def_index: DefIndex, } +impl Remap for LocalDefId { + type Remap<'a> = LocalDefId; +} + // To ensure correctness of incremental compilation, // `LocalDefId` must not implement `Ord` or `PartialOrd`. // See https://github.com/rust-lang/rust/issues/90317. diff --git a/compiler/rustc_span/src/span_encoding.rs b/compiler/rustc_span/src/span_encoding.rs index d48c4f7e5a811..dd0b12418633c 100644 --- a/compiler/rustc_span/src/span_encoding.rs +++ b/compiler/rustc_span/src/span_encoding.rs @@ -10,6 +10,7 @@ use crate::SPAN_TRACK; use crate::{BytePos, SpanData}; use rustc_data_structures::fx::FxIndexSet; +use rustc_data_structures::remap::Remap; /// A compressed span. /// @@ -193,6 +194,10 @@ impl Span { } } +impl Remap for Span { + type Remap<'a> = Span; +} + #[derive(Default)] pub struct SpanInterner { spans: FxIndexSet, diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 1ced75cccbb30..564e409a4ac75 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -4,6 +4,7 @@ use rustc_arena::DroplessArena; use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::remap::Remap; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey}; use rustc_data_structures::sync::Lock; use rustc_macros::HashStable_Generic; @@ -1710,6 +1711,14 @@ impl Hash for Ident { } } +impl Remap for Ident { + type Remap<'a> = Ident; +} + +impl Remap for Symbol { + type Remap<'a> = Symbol; +} + impl fmt::Debug for Ident { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(self, f)?; diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs index 39761baf1bc29..eebfee108966c 100644 --- a/compiler/rustc_target/src/abi/mod.rs +++ b/compiler/rustc_target/src/abi/mod.rs @@ -1,3 +1,4 @@ +use rustc_data_structures::remap::Remap; pub use Integer::*; pub use Primitive::*; @@ -81,6 +82,10 @@ impl<'a, Ty> Deref for TyAndLayout<'a, Ty> { } } +impl<'a, T: Remap> Remap for TyAndLayout<'a, T> { + type Remap<'b> = TyAndLayout<'b, T::Remap<'b>>; +} + /// Trait that needs to be implemented by the higher-level type representation /// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality. pub trait TyAbiInterface<'a, C>: Sized {