@@ -11,9 +11,9 @@ use hir::db::DefDatabase;
11
11
use crate :: {
12
12
base_db:: {
13
13
salsa:: { Database , ParallelDatabase , Snapshot } ,
14
- Cancelled , CrateGraph , CrateId , SourceDatabase , SourceDatabaseExt ,
14
+ Cancelled , CrateId , SourceDatabase , SourceDatabaseExt ,
15
15
} ,
16
- FxHashSet , FxIndexMap , RootDatabase ,
16
+ FxIndexMap , RootDatabase ,
17
17
} ;
18
18
19
19
/// We're indexing many crates.
@@ -36,19 +36,10 @@ pub fn parallel_prime_caches(
36
36
37
37
let graph = db. crate_graph ( ) ;
38
38
let mut crates_to_prime = {
39
- let crate_ids = compute_crates_to_prime ( db, & graph) ;
40
-
41
39
let mut builder = topologic_sort:: TopologicalSortIter :: builder ( ) ;
42
40
43
- for & crate_id in & crate_ids {
44
- let crate_data = & graph[ crate_id] ;
45
- let dependencies = crate_data
46
- . dependencies
47
- . iter ( )
48
- . map ( |d| d. crate_id )
49
- . filter ( |i| crate_ids. contains ( i) ) ;
50
-
51
- builder. add ( crate_id, dependencies) ;
41
+ for crate_id in graph. iter ( ) {
42
+ builder. add ( crate_id, graph[ crate_id] . dependencies . iter ( ) . map ( |d| d. crate_id ) ) ;
52
43
}
53
44
54
45
builder. build ( )
@@ -62,27 +53,34 @@ pub fn parallel_prime_caches(
62
53
let ( work_sender, progress_receiver) = {
63
54
let ( progress_sender, progress_receiver) = crossbeam_channel:: unbounded ( ) ;
64
55
let ( work_sender, work_receiver) = crossbeam_channel:: unbounded ( ) ;
56
+ let graph = graph. clone ( ) ;
65
57
let prime_caches_worker = move |db : Snapshot < RootDatabase > | {
66
58
while let Ok ( ( crate_id, crate_name) ) = work_receiver. recv ( ) {
67
59
progress_sender
68
60
. send ( ParallelPrimeCacheWorkerProgress :: BeginCrate { crate_id, crate_name } ) ?;
69
61
70
- // This also computes the DefMap
71
- db. import_map ( crate_id) ;
62
+ let file_id = graph[ crate_id] . root_file_id ;
63
+ let root_id = db. file_source_root ( file_id) ;
64
+ if db. source_root ( root_id) . is_library {
65
+ db. crate_def_map ( crate_id) ;
66
+ } else {
67
+ // This also computes the DefMap
68
+ db. import_map ( crate_id) ;
69
+ }
72
70
73
71
progress_sender. send ( ParallelPrimeCacheWorkerProgress :: EndCrate { crate_id } ) ?;
74
72
}
75
73
76
74
Ok :: < _ , crossbeam_channel:: SendError < _ > > ( ( ) )
77
75
} ;
78
76
79
- for _ in 0 ..num_worker_threads {
77
+ for id in 0 ..num_worker_threads {
80
78
let worker = prime_caches_worker. clone ( ) ;
81
79
let db = db. snapshot ( ) ;
82
80
83
81
stdx:: thread:: Builder :: new ( stdx:: thread:: ThreadIntent :: Worker )
84
82
. allow_leak ( true )
85
- . name ( "PrimeCaches" . to_owned ( ) )
83
+ . name ( format ! ( "PrimeCaches#{id}" ) )
86
84
. spawn ( move || Cancelled :: catch ( || worker ( db) ) )
87
85
. expect ( "failed to spawn thread" ) ;
88
86
}
@@ -96,7 +94,7 @@ pub fn parallel_prime_caches(
96
94
// an index map is used to preserve ordering so we can sort the progress report in order of
97
95
// "longest crate to index" first
98
96
let mut crates_currently_indexing =
99
- FxIndexMap :: with_capacity_and_hasher ( num_worker_threads as _ , Default :: default ( ) ) ;
97
+ FxIndexMap :: with_capacity_and_hasher ( num_worker_threads, Default :: default ( ) ) ;
100
98
101
99
while crates_done < crates_total {
102
100
db. unwind_if_cancelled ( ) ;
@@ -144,19 +142,3 @@ pub fn parallel_prime_caches(
144
142
cb ( progress) ;
145
143
}
146
144
}
147
-
148
- fn compute_crates_to_prime ( db : & RootDatabase , graph : & CrateGraph ) -> FxHashSet < CrateId > {
149
- // We're only interested in the workspace crates and the `ImportMap`s of their direct
150
- // dependencies, though in practice the latter also compute the `DefMap`s.
151
- // We don't prime transitive dependencies because they're generally not visible in
152
- // the current workspace.
153
- graph
154
- . iter ( )
155
- . filter ( |& id| {
156
- let file_id = graph[ id] . root_file_id ;
157
- let root_id = db. file_source_root ( file_id) ;
158
- !db. source_root ( root_id) . is_library
159
- } )
160
- . flat_map ( |id| graph[ id] . dependencies . iter ( ) . map ( |krate| krate. crate_id ) )
161
- . collect ( )
162
- }
0 commit comments