Skip to content

Commit

Permalink
Auto merge of #78780 - cjgillot:req, r=Mark-Simulacrum
Browse files Browse the repository at this point in the history
Refactor query forcing

The control flow in those functions was very complex, with several layers of continuations.

I tried to simplify the implementation, while keeping essentially the same logic.
Now, all code paths go through `try_execute_query` for the actual query execution.
Communication with the `dep_graph` and the live caches are the only difference between query getting/ensuring/forcing.
  • Loading branch information
bors committed Sep 11, 2021
2 parents 43769af + 31330bf commit 8c2b6ea
Show file tree
Hide file tree
Showing 3 changed files with 266 additions and 372 deletions.
167 changes: 77 additions & 90 deletions compiler/rustc_query_system/src/dep_graph/graph.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
use parking_lot::Mutex;
use smallvec::{smallvec, SmallVec};
use std::collections::hash_map::Entry;
use std::fmt::Debug;
use std::hash::Hash;
use std::marker::PhantomData;
use std::sync::atomic::Ordering::Relaxed;
Expand Down Expand Up @@ -208,89 +209,99 @@ impl<K: DepKind> DepGraph<K> {
/// `arg` parameter.
///
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A, R>(
pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
&self,
key: DepNode<K>,
cx: Ctxt,
arg: A,
task: fn(Ctxt, A) -> R,
hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
hash_result: fn(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
) -> (R, DepNodeIndex) {
self.with_task_impl(
key,
cx,
arg,
task,
|_key| {
Some(TaskDeps {
#[cfg(debug_assertions)]
node: Some(_key),
reads: SmallVec::new(),
read_set: Default::default(),
phantom_data: PhantomData,
})
},
hash_result,
)
if self.is_fully_enabled() {
self.with_task_impl(key, cx, arg, task, hash_result)
} else {
// Incremental compilation is turned off. We just execute the task
// without tracking. We still provide a dep-node index that uniquely
// identifies the task so that we have a cheap way of referring to
// the query for self-profiling.
(task(cx, arg), self.next_virtual_depnode_index())
}
}

fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A, R>(
fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
&self,
key: DepNode<K>,
cx: Ctxt,
arg: A,
task: fn(Ctxt, A) -> R,
create_task: fn(DepNode<K>) -> Option<TaskDeps<K>>,
hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
hash_result: fn(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
) -> (R, DepNodeIndex) {
if let Some(ref data) = self.data {
let dcx = cx.dep_context();
let task_deps = create_task(key).map(Lock::new);
let result = K::with_deps(task_deps.as_ref(), || task(cx, arg));
let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);

let mut hcx = dcx.create_stable_hashing_context();
let hashing_timer = dcx.profiler().incr_result_hashing();
let current_fingerprint = hash_result(&mut hcx, &result);

let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks;

// Get timer for profiling `DepNode` interning
let node_intern_timer = self
.node_intern_event_id
.map(|eid| dcx.profiler().generic_activity_with_event_id(eid));
// Intern the new `DepNode`.
let (dep_node_index, prev_and_color) = data.current.intern_node(
dcx.profiler(),
&data.previous,
key,
edges,
current_fingerprint,
print_status,
);
drop(node_intern_timer);
// This function is only called when the graph is enabled.
let data = self.data.as_ref().unwrap();

hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
// If the following assertion triggers, it can have two reasons:
// 1. Something is wrong with DepNode creation, either here or
// in `DepGraph::try_mark_green()`.
// 2. Two distinct query keys get mapped to the same `DepNode`
// (see for example #48923).
assert!(
!self.dep_node_exists(&key),
"forcing query with already existing `DepNode`\n\
- query-key: {:?}\n\
- dep-node: {:?}",
arg,
key
);

if let Some((prev_index, color)) = prev_and_color {
debug_assert!(
data.colors.get(prev_index).is_none(),
"DepGraph::with_task() - Duplicate DepNodeColor \
insertion for {:?}",
key
);
let task_deps = if key.kind.is_eval_always() {
None
} else {
Some(Lock::new(TaskDeps {
#[cfg(debug_assertions)]
node: Some(key),
reads: SmallVec::new(),
read_set: Default::default(),
phantom_data: PhantomData,
}))
};
let result = K::with_deps(task_deps.as_ref(), || task(cx, arg));
let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);

let dcx = cx.dep_context();
let mut hcx = dcx.create_stable_hashing_context();
let hashing_timer = dcx.profiler().incr_result_hashing();
let current_fingerprint = hash_result(&mut hcx, &result);

let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks;

// Get timer for profiling `DepNode` interning
let node_intern_timer =
self.node_intern_event_id.map(|eid| dcx.profiler().generic_activity_with_event_id(eid));
// Intern the new `DepNode`.
let (dep_node_index, prev_and_color) = data.current.intern_node(
dcx.profiler(),
&data.previous,
key,
edges,
current_fingerprint,
print_status,
);
drop(node_intern_timer);

data.colors.insert(prev_index, color);
}
hashing_timer.finish_with_query_invocation_id(dep_node_index.into());

(result, dep_node_index)
} else {
// Incremental compilation is turned off. We just execute the task
// without tracking. We still provide a dep-node index that uniquely
// identifies the task so that we have a cheap way of referring to
// the query for self-profiling.
(task(cx, arg), self.next_virtual_depnode_index())
if let Some((prev_index, color)) = prev_and_color {
debug_assert!(
data.colors.get(prev_index).is_none(),
"DepGraph::with_task() - Duplicate DepNodeColor \
insertion for {:?}",
key
);

data.colors.insert(prev_index, color);
}

(result, dep_node_index)
}

/// Executes something within an "anonymous" task, that is, a task the
Expand Down Expand Up @@ -357,19 +368,6 @@ impl<K: DepKind> DepGraph<K> {
}
}

/// Executes something within an "eval-always" task which is a task
/// that runs whenever anything changes.
pub fn with_eval_always_task<Ctxt: HasDepContext<DepKind = K>, A, R>(
&self,
key: DepNode<K>,
cx: Ctxt,
arg: A,
task: fn(Ctxt, A) -> R,
hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
) -> (R, DepNodeIndex) {
self.with_task_impl(key, cx, arg, task, |_| None, hash_result)
}

#[inline]
pub fn read_index(&self, dep_node_index: DepNodeIndex) {
if let Some(ref data) = self.data {
Expand Down Expand Up @@ -484,22 +482,11 @@ impl<K: DepKind> DepGraph<K> {
None
}

/// Try to read a node index for the node dep_node.
/// Try to mark a node index for the node dep_node.
///
/// A node will have an index, when it's already been marked green, or when we can mark it
/// green. This function will mark the current task as a reader of the specified node, when
/// a node index can be found for that node.
pub fn try_mark_green_and_read<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,
dep_node: &DepNode<K>,
) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
self.try_mark_green(tcx, dep_node).map(|(prev_index, dep_node_index)| {
debug_assert!(self.is_green(&dep_node));
self.read_index(dep_node_index);
(prev_index, dep_node_index)
})
}

pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,
Expand Down
2 changes: 2 additions & 0 deletions compiler/rustc_query_system/src/query/job.rs
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,8 @@ impl<D> QueryJobId<D>
where
D: Copy + Clone + Eq + Hash,
{
#[cold]
#[inline(never)]
pub(super) fn find_cycle_in_stack(
&self,
query_map: QueryMap<D>,
Expand Down
Loading

0 comments on commit 8c2b6ea

Please sign in to comment.