Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
add with_hash_task to generate DepNode deterministically
  • Loading branch information
SparrowLii committed Jan 5, 2023
commit 4e5256e186c5505f3dab9409c873d8eb25b92d3f
8 changes: 7 additions & 1 deletion compiler/rustc_query_system/src/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
use crate::dep_graph::{DepContext, DepNodeIndex};

use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::Lock;
use rustc_data_structures::sync::{HashMapExt, Lock};

use std::hash::Hash;

Expand Down Expand Up @@ -35,6 +35,12 @@ impl<Key: Eq + Hash, Value: Clone> Cache<Key, Value> {
}
}

impl<Key: Eq + Hash, Value: Clone + Eq> Cache<Key, Value> {
pub fn insert_same(&self, key: Key, dep_node: DepNodeIndex, value: Value) {
self.hashmap.borrow_mut().insert_same(key, WithDepNode::new(dep_node, value));
}
}

#[derive(Clone, Eq, PartialEq)]
pub struct WithDepNode<T> {
dep_node: DepNodeIndex,
Expand Down
46 changes: 32 additions & 14 deletions compiler/rustc_query_system/src/dep_graph/graph.rs
Original file line number Diff line number Diff line change
Expand Up @@ -377,24 +377,48 @@ impl<K: DepKind> DepGraph<K> {

/// Executes something within an "anonymous" task, that is, a task the
/// `DepNode` of which is determined by the list of inputs it read from.
pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>(
&self,
#[inline]
pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>( &self,
cx: Tcx,
dep_kind: K,
op: OP,
) -> (R, DepNodeIndex)
where
OP: FnOnce() -> R,
{
self.with_hash_task(cx, dep_kind, op, |task_deps| {
// The dep node indices are hashed here instead of hashing the dep nodes of the
// dependencies. These indices may refer to different nodes per session, but this isn't
// a problem here because we that ensure the final dep node hash is per session only by
// combining it with the per session random number `anon_id_seed`. This hash only need
// to map the dependencies to a single value on a per session basis.
let mut hasher = StableHasher::new();
task_deps.reads.hash(&mut hasher);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Idea for a simpler solution: could we hash the DepNode instead of the DepNodeIndex to avoid order-dependence? Could this solve the issue without having to make client code do the hashing?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is good advice, but DepNode in TaskDeps only exists in the case of #[cfg(debug_assert)]. I'm using Hash bound now in where clause of with_task_hash, which can avoid hashing in client code.

hasher.finish()
})
}

/// Executes something within an "anonymous" task. The `hash` is used for
/// generating the `DepNode`.
pub fn with_hash_task<Ctxt: DepContext<DepKind = K>, OP, R, H>(
&self,
cx: Ctxt,
dep_kind: K,
op: OP,
hash: H,
) -> (R, DepNodeIndex)
where
OP: FnOnce() -> R,
H: FnOnce(&TaskDeps<K>) -> Fingerprint,
{
debug_assert!(!cx.is_eval_always(dep_kind));

if let Some(ref data) = self.data {
let task_deps = Lock::new(TaskDeps::default());
let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op);
let task_deps = task_deps.into_inner();
let task_deps = task_deps.reads;

let dep_node_index = match task_deps.len() {
let dep_node_index = match task_deps.reads.len() {
0 => {
// Because the dep-node id of anon nodes is computed from the sets of its
// dependencies we already know what the ID of this dependency-less node is
Expand All @@ -405,29 +429,23 @@ impl<K: DepKind> DepGraph<K> {
}
1 => {
// When there is only one dependency, don't bother creating a node.
task_deps[0]
task_deps.reads[0]
}
_ => {
// The dep node indices are hashed here instead of hashing the dep nodes of the
// dependencies. These indices may refer to different nodes per session, but this isn't
// a problem here because we that ensure the final dep node hash is per session only by
// combining it with the per session random number `anon_id_seed`. This hash only need
// to map the dependencies to a single value on a per session basis.
let mut hasher = StableHasher::new();
task_deps.hash(&mut hasher);
let hash_result = hash(&task_deps);

let target_dep_node = DepNode {
kind: dep_kind,
// Fingerprint::combine() is faster than sending Fingerprint
// through the StableHasher (at least as long as StableHasher
// is so slow).
hash: data.current.anon_id_seed.combine(hasher.finish()).into(),
hash: data.current.anon_id_seed.combine(hash_result).into(),
};

data.current.intern_new_node(
cx.profiler(),
target_dep_node,
task_deps,
task_deps.reads,
Fingerprint::ZERO,
)
}
Expand Down
41 changes: 34 additions & 7 deletions compiler/rustc_trait_selection/src/traits/select/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,17 @@ use rustc_middle::ty::{self, EarlyBinder, PolyProjectionPredicate, ToPolyTraitRe
use rustc_middle::ty::{Ty, TyCtxt, TypeFoldable, TypeVisitable};
use rustc_span::symbol::sym;

use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::stable_hasher::StableHasher;
use std::cell::{Cell, RefCell};
use std::cmp;
use std::fmt::{self, Display};
use std::hash::Hash;
use std::iter;

pub use rustc_middle::traits::select::*;
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_query_system::dep_graph::TaskDeps;

mod candidate_assembly;
mod confirmation;
Expand Down Expand Up @@ -1017,7 +1021,19 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
return Ok(cycle_result);
}

let (result, dep_node) = self.in_task(|this| this.evaluate_stack(&stack));
let (result, dep_node) = if cfg!(parallel_compiler) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why not use this branch in both parallel and serial code?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just wanted to minimize the impact on serial mode. We should indeed unify here for code brevity.

self.in_task_with_hash(
|this| this.evaluate_stack(&stack),
|_| {
let mut hasher = StableHasher::new();
(param_env, fresh_trait_pred).hash(&mut hasher);
hasher.finish()
},
)
} else {
self.in_task(|this| this.evaluate_stack(&stack))
};

let result = result?;

if !result.must_apply_modulo_regions() {
Expand Down Expand Up @@ -1263,17 +1279,13 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
if self.can_use_global_caches(param_env) {
if !trait_pred.needs_infer() {
debug!(?trait_pred, ?result, "insert_evaluation_cache global");
// This may overwrite the cache with the same value
// FIXME: Due to #50507 this overwrites the different values
// This should be changed to use HashMapExt::insert_same
// when that is fixed
self.tcx().evaluation_cache.insert((param_env, trait_pred), dep_node, result);
self.tcx().evaluation_cache.insert_same((param_env, trait_pred), dep_node, result);
return;
}
}

debug!(?trait_pred, ?result, "insert_evaluation_cache");
self.infcx.evaluation_cache.insert((param_env, trait_pred), dep_node, result);
self.infcx.evaluation_cache.insert_same((param_env, trait_pred), dep_node, result);
}

/// For various reasons, it's possible for a subobligation
Expand Down Expand Up @@ -1344,6 +1356,21 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
(result, dep_node)
}

fn in_task_with_hash<OP, R, H>(&mut self, op: OP, hash: H) -> (R, DepNodeIndex)
where
OP: FnOnce(&mut Self) -> R,
H: FnOnce(&TaskDeps<DepKind>) -> Fingerprint,
{
let (result, dep_node) = self.tcx().dep_graph.with_hash_task(
self.tcx(),
DepKind::TraitSelect,
|| op(self),
hash,
);
self.tcx().dep_graph.read_index(dep_node);
(result, dep_node)
}

/// filter_impls filters constant trait obligations and candidates that have a positive impl
/// for a negative goal and a negative impl for a positive goal
#[instrument(level = "debug", skip(self, candidates))]
Expand Down