Skip to content

Commit ff3c463

Browse files
committed
Do not adjust ImplicitCtxt twice for each query.
1 parent e9a478e commit ff3c463

File tree

4 files changed

+145
-73
lines changed

4 files changed

+145
-73
lines changed

compiler/rustc_incremental/src/assert_dep_graph.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ use std::io::{BufWriter, Write};
5555
pub fn assert_dep_graph(tcx: TyCtxt<'_>) {
5656
tcx.dep_graph.with_ignore(|| {
5757
if tcx.sess.opts.debugging_opts.dump_dep_graph {
58-
tcx.dep_graph.with_query(dump_graph);
58+
tcx.dep_graph.with_debug(dump_graph);
5959
}
6060

6161
if !tcx.sess.opts.debugging_opts.query_dep_graph {
@@ -207,7 +207,7 @@ fn check_paths<'tcx>(tcx: TyCtxt<'tcx>, if_this_changed: &Sources, then_this_wou
207207
}
208208
return;
209209
}
210-
tcx.dep_graph.with_query(|query| {
210+
tcx.dep_graph.with_debug(|query| {
211211
for &(_, source_def_id, ref source_dep_node) in if_this_changed {
212212
let dependents = query.transitive_predecessors(source_dep_node);
213213
for &(target_span, ref target_pass, _, ref target_dep_node) in then_this_would_need {

compiler/rustc_query_system/src/dep_graph/graph.rs

Lines changed: 116 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,17 @@
1-
use parking_lot::Mutex;
21
use rustc_data_structures::fingerprint::Fingerprint;
32
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
43
use rustc_data_structures::profiling::{EventId, QueryInvocationId, SelfProfilerRef};
54
use rustc_data_structures::sharded::{self, Sharded};
65
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
76
use rustc_data_structures::steal::Steal;
87
use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
8+
use rustc_data_structures::thin_vec::ThinVec;
9+
use rustc_data_structures::unlikely;
10+
use rustc_errors::Diagnostic;
911
use rustc_index::vec::IndexVec;
1012
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
13+
14+
use parking_lot::Mutex;
1115
use smallvec::{smallvec, SmallVec};
1216
use std::collections::hash_map::Entry;
1317
use std::fmt::Debug;
@@ -19,7 +23,7 @@ use super::query::DepGraphQuery;
1923
use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
2024
use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId};
2125
use crate::ich::StableHashingContext;
22-
use crate::query::{QueryContext, QuerySideEffects};
26+
use crate::query::{QueryContext, QueryJobId, QuerySideEffects};
2327

2428
#[cfg(debug_assertions)]
2529
use {super::debug::EdgeFilter, std::env};
@@ -151,7 +155,7 @@ impl DepGraph {
151155
self.data.is_some()
152156
}
153157

154-
pub fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
158+
pub fn with_debug(&self, f: impl Fn(&DepGraphQuery)) {
155159
if let Some(data) = &self.data {
156160
data.current.encoder.borrow().with_query(f)
157161
}
@@ -209,7 +213,13 @@ impl DepGraph {
209213
hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
210214
) -> (R, DepNodeIndex) {
211215
if self.is_fully_enabled() {
212-
self.with_task_impl(key, cx, arg, task, hash_result)
216+
self.with_task_impl(
217+
key,
218+
cx,
219+
arg,
220+
|arg, task_deps| crate::tls::with_deps(task_deps, || task(cx, arg)),
221+
hash_result,
222+
)
213223
} else {
214224
// Incremental compilation is turned off. We just execute the task
215225
// without tracking. We still provide a dep-node index that uniquely
@@ -219,12 +229,35 @@ impl DepGraph {
219229
}
220230
}
221231

232+
pub(crate) fn with_query<Ctxt: QueryContext, A: Debug, R>(
233+
&self,
234+
key: DepNode,
235+
cx: Ctxt,
236+
arg: A,
237+
token: QueryJobId,
238+
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
239+
task: fn(Ctxt::DepContext, A) -> R,
240+
hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
241+
) -> (R, DepNodeIndex) {
242+
self.with_task_impl(
243+
key,
244+
cx,
245+
arg,
246+
|arg, task_deps| {
247+
crate::tls::start_query(token, diagnostics, task_deps, || {
248+
task(*cx.dep_context(), arg)
249+
})
250+
},
251+
hash_result,
252+
)
253+
}
254+
222255
fn with_task_impl<Ctxt: HasDepContext, A: Debug, R>(
223256
&self,
224257
key: DepNode,
225258
cx: Ctxt,
226259
arg: A,
227-
task: fn(Ctxt, A) -> R,
260+
invoke: impl FnOnce(A, Option<&Lock<TaskDeps>>) -> R,
228261
hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
229262
) -> (R, DepNodeIndex) {
230263
// This function is only called when the graph is enabled.
@@ -255,7 +288,7 @@ impl DepGraph {
255288
phantom_data: PhantomData,
256289
}))
257290
};
258-
let result = crate::tls::with_deps(task_deps.as_ref(), || task(cx, arg));
291+
let result = invoke(arg, task_deps.as_ref());
259292
let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
260293

261294
let dcx = cx.dep_context();
@@ -306,57 +339,88 @@ impl DepGraph {
306339
{
307340
debug_assert!(!cx.is_eval_always(dep_kind));
308341

309-
if let Some(ref data) = self.data {
310-
let task_deps = Lock::new(TaskDeps::default());
311-
let result = crate::tls::with_deps(Some(&task_deps), op);
312-
let task_deps = task_deps.into_inner();
313-
let task_deps = task_deps.reads;
314-
315-
let dep_node_index = match task_deps.len() {
316-
0 => {
317-
// Because the dep-node id of anon nodes is computed from the sets of its
318-
// dependencies we already know what the ID of this dependency-less node is
319-
// going to be (i.e. equal to the precomputed
320-
// `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
321-
// a `StableHasher` and sending the node through interning.
322-
DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
323-
}
324-
1 => {
325-
// When there is only one dependency, don't bother creating a node.
326-
task_deps[0]
327-
}
328-
_ => {
329-
// The dep node indices are hashed here instead of hashing the dep nodes of the
330-
// dependencies. These indices may refer to different nodes per session, but this isn't
331-
// a problem here because we that ensure the final dep node hash is per session only by
332-
// combining it with the per session random number `anon_id_seed`. This hash only need
333-
// to map the dependencies to a single value on a per session basis.
334-
let mut hasher = StableHasher::new();
335-
task_deps.hash(&mut hasher);
336-
337-
let target_dep_node = DepNode {
338-
kind: dep_kind,
339-
// Fingerprint::combine() is faster than sending Fingerprint
340-
// through the StableHasher (at least as long as StableHasher
341-
// is so slow).
342-
hash: data.current.anon_id_seed.combine(hasher.finish()).into(),
343-
};
344-
345-
data.current.intern_new_node(
346-
cx.profiler(),
347-
target_dep_node,
348-
task_deps,
349-
Fingerprint::ZERO,
350-
)
351-
}
352-
};
353-
354-
(result, dep_node_index)
342+
if self.is_fully_enabled() {
343+
self.with_anon_task_impl(*cx.dep_context(), dep_kind, |task_deps| {
344+
crate::tls::with_deps(task_deps, op)
345+
})
355346
} else {
356347
(op(), self.next_virtual_depnode_index())
357348
}
358349
}
359350

351+
/// Executes something within an "anonymous" task, that is, a task the
352+
/// `DepNode` of which is determined by the list of inputs it read from.
353+
pub(crate) fn with_anon_query<Ctxt: QueryContext, A, R>(
354+
&self,
355+
dep_kind: DepKind,
356+
cx: Ctxt,
357+
arg: A,
358+
token: QueryJobId,
359+
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
360+
task: fn(Ctxt::DepContext, A) -> R,
361+
) -> (R, DepNodeIndex) {
362+
debug_assert!(!cx.dep_context().is_eval_always(dep_kind));
363+
364+
self.with_anon_task_impl(*cx.dep_context(), dep_kind, |task_deps| {
365+
crate::tls::start_query(token, diagnostics, task_deps, || task(*cx.dep_context(), arg))
366+
})
367+
}
368+
369+
fn with_anon_task_impl<Ctxt: DepContext, R>(
370+
&self,
371+
cx: Ctxt,
372+
dep_kind: DepKind,
373+
invoke: impl FnOnce(Option<&Lock<TaskDeps>>) -> R,
374+
) -> (R, DepNodeIndex) {
375+
debug_assert!(!cx.dep_context().is_eval_always(dep_kind));
376+
377+
let data = self.data.as_ref().unwrap();
378+
let task_deps = Lock::new(TaskDeps::default());
379+
let result = invoke(Some(&task_deps));
380+
let task_deps = task_deps.into_inner().reads;
381+
382+
let dep_node_index = match task_deps.len() {
383+
0 => {
384+
// Because the dep-node id of anon nodes is computed from the sets of its
385+
// dependencies we already know what the ID of this dependency-less node is
386+
// going to be (i.e. equal to the precomputed
387+
// `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
388+
// a `StableHasher` and sending the node through interning.
389+
DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
390+
}
391+
1 => {
392+
// When there is only one dependency, don't bother creating a node.
393+
task_deps[0]
394+
}
395+
_ => {
396+
// The dep node indices are hashed here instead of hashing the dep nodes of the
397+
// dependencies. These indices may refer to different nodes per session, but this isn't
398+
// a problem here because we that ensure the final dep node hash is per session only by
399+
// combining it with the per session random number `anon_id_seed`. This hash only need
400+
// to map the dependencies to a single value on a per session basis.
401+
let mut hasher = StableHasher::new();
402+
task_deps.hash(&mut hasher);
403+
404+
let target_dep_node = DepNode {
405+
kind: dep_kind,
406+
// Fingerprint::combine() is faster than sending Fingerprint
407+
// through the StableHasher (at least as long as StableHasher
408+
// is so slow).
409+
hash: data.current.anon_id_seed.combine(hasher.finish()).into(),
410+
};
411+
412+
data.current.intern_new_node(
413+
cx.profiler(),
414+
target_dep_node,
415+
task_deps,
416+
Fingerprint::ZERO,
417+
)
418+
}
419+
};
420+
421+
(result, dep_node_index)
422+
}
423+
360424
#[inline]
361425
pub fn read_index(&self, dep_node_index: DepNodeIndex) {
362426
if let Some(ref data) = self.data {

compiler/rustc_query_system/src/query/plumbing.rs

Lines changed: 22 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -429,7 +429,7 @@ where
429429
if !dep_graph.is_fully_enabled() {
430430
let prof_timer = tcx.dep_context().profiler().query_provider();
431431
let result =
432-
crate::tls::start_query(job_id, None, || query.compute(*tcx.dep_context(), key));
432+
crate::tls::start_query(job_id, None, None, || query.compute(*tcx.dep_context(), key));
433433
let dep_node_index = dep_graph.next_virtual_depnode_index();
434434
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
435435
return (result, dep_node_index);
@@ -442,7 +442,7 @@ where
442442

443443
// The diagnostics for this query will be promoted to the current session during
444444
// `try_mark_green()`, so we can ignore them here.
445-
if let Some(ret) = crate::tls::start_query(job_id, None, || {
445+
if let Some(ret) = crate::tls::start_query(job_id, None, None, || {
446446
try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
447447
}) {
448448
return ret;
@@ -452,18 +452,29 @@ where
452452
let prof_timer = tcx.dep_context().profiler().query_provider();
453453
let diagnostics = Lock::new(ThinVec::new());
454454

455-
let (result, dep_node_index) = crate::tls::start_query(job_id, Some(&diagnostics), || {
456-
if query.anon {
457-
return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
458-
query.compute(*tcx.dep_context(), key)
459-
});
460-
}
461-
455+
let (result, dep_node_index) = if query.anon {
456+
dep_graph.with_anon_query(
457+
query.dep_kind,
458+
tcx,
459+
key,
460+
job_id,
461+
Some(&diagnostics),
462+
query.compute,
463+
)
464+
} else {
462465
// `to_dep_node` is expensive for some `DepKind`s.
463466
let dep_node = dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
464467

465-
dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result)
466-
});
468+
dep_graph.with_query(
469+
dep_node,
470+
tcx,
471+
key,
472+
job_id,
473+
Some(&diagnostics),
474+
query.compute,
475+
query.hash_result,
476+
)
477+
};
467478

468479
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
469480

compiler/rustc_query_system/src/tls.rs

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -140,15 +140,12 @@ pub fn current_query_job() -> Option<QueryJobId> {
140140
pub fn start_query<R>(
141141
token: QueryJobId,
142142
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
143+
task_deps: Option<&Lock<TaskDeps>>,
143144
compute: impl FnOnce() -> R,
144145
) -> R {
145-
with_context_opt(move |current_icx| {
146-
let task_deps = current_icx.and_then(|icx| icx.task_deps);
146+
// Update the `ImplicitCtxt` to point to our new query job.
147+
let new_icx = ImplicitCtxt { query: Some(token), diagnostics, task_deps };
147148

148-
// Update the `ImplicitCtxt` to point to our new query job.
149-
let new_icx = ImplicitCtxt { query: Some(token), diagnostics, task_deps };
150-
151-
// Use the `ImplicitCtxt` while we execute the query.
152-
enter_context(&new_icx, |_| rustc_data_structures::stack::ensure_sufficient_stack(compute))
153-
})
149+
// Use the `ImplicitCtxt` while we execute the query.
150+
enter_context(&new_icx, |_| rustc_data_structures::stack::ensure_sufficient_stack(compute))
154151
}

0 commit comments

Comments
 (0)