mirrored from https://chromium.googlesource.com/v8/v8.git
-
Notifications
You must be signed in to change notification settings - Fork 4.3k
Expand file tree
/
Copy pathmodule-compiler.cc
More file actions
4269 lines (3750 loc) · 173 KB
/
module-compiler.cc
File metadata and controls
4269 lines (3750 loc) · 173 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/wasm/module-compiler.h"
#include <algorithm>
#include <atomic>
#include <memory>
#include <queue>
#include "src/api/api-inl.h"
#include "src/base/enum-set.h"
#include "src/base/fpu.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
#include "src/base/platform/time.h"
#include "src/codegen/compiler.h"
#include "src/compiler/wasm-compiler.h"
#include "src/debug/debug.h"
#include "src/handles/global-handles-inl.h"
#include "src/logging/counters-scopes.h"
#include "src/logging/metrics.h"
#include "src/tracing/trace-event.h"
#include "src/wasm/code-space-access.h"
#include "src/wasm/compilation-environment-inl.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/pgo.h"
#include "src/wasm/std-object-sizes.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-code-pointer-table-inl.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-feature-flags.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
#define TRACE_COMPILE(...) \
do { \
if (v8_flags.trace_wasm_compiler) PrintF(__VA_ARGS__); \
} while (false)
#define TRACE_STREAMING(...) \
do { \
if (v8_flags.trace_wasm_streaming) PrintF(__VA_ARGS__); \
} while (false)
#define TRACE_LAZY(...) \
do { \
if (v8_flags.trace_wasm_lazy_compilation) PrintF(__VA_ARGS__); \
} while (false)
namespace v8::internal::wasm {
namespace {
enum class CompileStrategy : uint8_t {
// Compiles functions on first use. In this case, execution will block until
// the function's baseline is reached and top tier compilation starts in
// background (if applicable).
// Lazy compilation can help to reduce startup time and code size at the risk
// of blocking execution.
kLazy,
// Compiles baseline ahead of execution and starts top tier compilation in
// background (if applicable).
kEager,
// Marker for default strategy.
kDefault = kEager,
};
class CompilationStateImpl;
class CompilationUnitBuilder;
class V8_NODISCARD BackgroundCompileScope {
public:
explicit BackgroundCompileScope(std::weak_ptr<NativeModule> native_module)
: native_module_(native_module.lock()) {}
NativeModule* native_module() const {
DCHECK(native_module_);
return native_module_.get();
}
inline CompilationStateImpl* compilation_state() const;
bool cancelled() const;
private:
// Keep the native module alive while in this scope.
std::shared_ptr<NativeModule> native_module_;
};
enum CompilationTier { kBaseline = 0, kTopTier = 1, kNumTiers = kTopTier + 1 };
// A set of work-stealing queues (vectors of units). Each background compile
// task owns one of the queues and steals from all others once its own queue
// runs empty.
class CompilationUnitQueues {
public:
// Public API for QueueImpl.
struct Queue {
bool ShouldPublish(int num_processed_units) const;
};
explicit CompilationUnitQueues(int num_imported_functions,
int num_declared_functions)
: num_imported_functions_(num_imported_functions),
num_declared_functions_(num_declared_functions) {
// Add one first queue, to add units to.
queues_.emplace_back(std::make_unique<QueueImpl>(0));
#if !defined(__cpp_lib_atomic_value_initialization) || \
__cpp_lib_atomic_value_initialization < 201911L
for (auto& atomic_counter : num_units_) {
std::atomic_init(&atomic_counter, size_t{0});
}
#endif
top_tier_compiled_ =
std::make_unique<std::atomic<bool>[]>(num_declared_functions);
#if !defined(__cpp_lib_atomic_value_initialization) || \
__cpp_lib_atomic_value_initialization < 201911L
for (int i = 0; i < num_declared_functions; i++) {
std::atomic_init(&top_tier_compiled_.get()[i], false);
}
#endif
}
Queue* GetQueueForTask(int task_id) {
int required_queues = task_id + 1;
{
base::MutexGuard queues_guard{&queues_mutex_};
if (V8_LIKELY(static_cast<int>(queues_.size()) >= required_queues)) {
return queues_[task_id].get();
}
}
// Otherwise increase the number of queues.
base::MutexGuard queues_guard{&queues_mutex_};
int num_queues = static_cast<int>(queues_.size());
while (num_queues < required_queues) {
int steal_from = num_queues + 1;
queues_.emplace_back(std::make_unique<QueueImpl>(steal_from));
++num_queues;
}
// Update the {publish_limit}s of all queues.
// We want background threads to publish regularly (to avoid contention when
// they are all publishing at the end). On the other side, each publishing
// has some overhead (part of it for synchronizing between threads), so it
// should not happen *too* often. Thus aim for 4-8 publishes per thread, but
// distribute it such that publishing is likely to happen at different
// times.
int units_per_thread = num_declared_functions_ / num_queues;
int min = std::max(10, units_per_thread / 8);
int queue_id = 0;
for (auto& queue : queues_) {
// Set a limit between {min} and {2*min}, but not smaller than {10}.
int limit = min + (min * queue_id / num_queues);
queue->publish_limit.store(limit, std::memory_order_relaxed);
++queue_id;
}
return queues_[task_id].get();
}
std::optional<WasmCompilationUnit> GetNextUnit(Queue* queue,
CompilationTier tier) {
DCHECK_LT(tier, CompilationTier::kNumTiers);
if (auto unit = GetNextUnitOfTier(queue, tier)) {
[[maybe_unused]] size_t old_units_count =
num_units_[tier].fetch_sub(1, std::memory_order_relaxed);
DCHECK_LE(1, old_units_count);
return unit;
}
return {};
}
void AddUnits(base::Vector<WasmCompilationUnit> baseline_units,
base::Vector<WasmCompilationUnit> top_tier_units,
const WasmModule* module) {
DCHECK_LT(0, baseline_units.size() + top_tier_units.size());
// Add to the individual queues in a round-robin fashion. No special care is
// taken to balance them; they will be balanced by work stealing.
QueueImpl* queue;
{
int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
base::MutexGuard queues_guard{&queues_mutex_};
while (!next_queue_to_add.compare_exchange_weak(
queue_to_add, next_task_id(queue_to_add, queues_.size()),
std::memory_order_relaxed)) {
// Retry with updated {queue_to_add}.
}
queue = queues_[queue_to_add].get();
}
base::MutexGuard guard(&queue->mutex);
std::optional<base::MutexGuard> big_units_guard;
for (auto pair :
{std::make_pair(CompilationTier::kBaseline, baseline_units),
std::make_pair(CompilationTier::kTopTier, top_tier_units)}) {
int tier = pair.first;
base::Vector<WasmCompilationUnit> units = pair.second;
if (units.empty()) continue;
num_units_[tier].fetch_add(units.size(), std::memory_order_relaxed);
for (WasmCompilationUnit unit : units) {
size_t func_size = module->functions[unit.func_index()].code.length();
if (func_size <= kBigUnitsLimit) {
queue->units[tier].push_back(unit);
} else {
if (!big_units_guard) {
big_units_guard.emplace(&big_units_queue_.mutex);
}
big_units_queue_.has_units[tier].store(true,
std::memory_order_relaxed);
big_units_queue_.units[tier].emplace(func_size, unit);
}
}
}
}
void AddTopTierPriorityUnit(WasmCompilationUnit unit, size_t priority) {
base::MutexGuard queues_guard{&queues_mutex_};
// Add to the individual queues in a round-robin fashion. No special care is
// taken to balance them; they will be balanced by work stealing.
// Priorities should only be seen as a hint here; without balancing, we
// might pop a unit with lower priority from one queue while other queues
// still hold higher-priority units.
// Since updating priorities in a std::priority_queue is difficult, we just
// add new units with higher priorities, and use the
// {CompilationUnitQueues::top_tier_compiled_} array to discard units for
// functions which are already being compiled.
int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
while (!next_queue_to_add.compare_exchange_weak(
queue_to_add, next_task_id(queue_to_add, queues_.size()),
std::memory_order_relaxed)) {
// Retry with updated {queue_to_add}.
}
{
auto* queue = queues_[queue_to_add].get();
base::MutexGuard guard(&queue->mutex);
queue->top_tier_priority_units.emplace(priority, unit);
num_priority_units_.fetch_add(1, std::memory_order_relaxed);
num_units_[CompilationTier::kTopTier].fetch_add(
1, std::memory_order_relaxed);
}
}
// Get the current number of units in the queue for |tier|. This is only a
// momentary snapshot, it's not guaranteed that {GetNextUnit} returns a unit
// if this method returns non-zero.
size_t GetSizeForTier(CompilationTier tier) const {
DCHECK_LT(tier, CompilationTier::kNumTiers);
return num_units_[tier].load(std::memory_order_relaxed);
}
void AllowAnotherTopTierJob(uint32_t func_index) {
top_tier_compiled_[declared_function_index(func_index)].store(
false, std::memory_order_relaxed);
}
size_t EstimateCurrentMemoryConsumption() const;
private:
// Functions bigger than {kBigUnitsLimit} will be compiled first, in ascending
// order of their function body size.
static constexpr size_t kBigUnitsLimit = 4096;
struct BigUnit {
BigUnit(size_t func_size, WasmCompilationUnit unit)
: func_size{func_size}, unit(unit) {}
size_t func_size;
WasmCompilationUnit unit;
bool operator<(const BigUnit& other) const {
return func_size < other.func_size;
}
};
struct TopTierPriorityUnit {
TopTierPriorityUnit(int priority, WasmCompilationUnit unit)
: priority(priority), unit(unit) {}
size_t priority;
WasmCompilationUnit unit;
bool operator<(const TopTierPriorityUnit& other) const {
return priority < other.priority;
}
};
struct BigUnitsQueue {
BigUnitsQueue() {
#if !defined(__cpp_lib_atomic_value_initialization) || \
__cpp_lib_atomic_value_initialization < 201911L
for (auto& atomic : has_units) std::atomic_init(&atomic, false);
#endif
}
mutable base::Mutex mutex;
// Can be read concurrently to check whether any elements are in the queue.
std::atomic<bool> has_units[CompilationTier::kNumTiers];
// Protected by {mutex}:
std::priority_queue<BigUnit> units[CompilationTier::kNumTiers];
};
struct QueueImpl : public Queue {
explicit QueueImpl(int next_steal_task_id)
: next_steal_task_id(next_steal_task_id) {}
// Number of units after which the task processing this queue should publish
// compilation results. Updated (reduced, using relaxed ordering) when new
// queues are allocated. If there is only one thread running, we can delay
// publishing arbitrarily.
std::atomic<int> publish_limit{kMaxInt};
base::Mutex mutex;
// All fields below are protected by {mutex}.
std::vector<WasmCompilationUnit> units[CompilationTier::kNumTiers];
std::priority_queue<TopTierPriorityUnit> top_tier_priority_units;
int next_steal_task_id;
};
int next_task_id(int task_id, size_t num_queues) const {
int next = task_id + 1;
return next == static_cast<int>(num_queues) ? 0 : next;
}
std::optional<WasmCompilationUnit> GetNextUnitOfTier(Queue* public_queue,
int tier) {
QueueImpl* queue = static_cast<QueueImpl*>(public_queue);
// First check whether there is a priority unit. Execute that first.
if (tier == CompilationTier::kTopTier) {
if (auto unit = GetTopTierPriorityUnit(queue)) {
return unit;
}
}
// Then check whether there is a big unit of that tier.
if (auto unit = GetBigUnitOfTier(tier)) return unit;
// Finally check whether our own queue has a unit of the wanted tier. If
// so, return it, otherwise get the task id to steal from.
int steal_task_id;
{
base::MutexGuard mutex_guard(&queue->mutex);
if (!queue->units[tier].empty()) {
auto unit = queue->units[tier].back();
queue->units[tier].pop_back();
return unit;
}
steal_task_id = queue->next_steal_task_id;
}
// Try to steal from all other queues. If this succeeds, return one of the
// stolen units.
{
base::MutexGuard guard{&queues_mutex_};
for (size_t steal_trials = 0; steal_trials < queues_.size();
++steal_trials, ++steal_task_id) {
if (steal_task_id >= static_cast<int>(queues_.size())) {
steal_task_id = 0;
}
if (auto unit = StealUnitsAndGetFirst(queue, steal_task_id, tier)) {
return unit;
}
}
}
// If we reach here, we didn't find any unit of the requested tier.
return {};
}
std::optional<WasmCompilationUnit> GetBigUnitOfTier(int tier) {
// Fast path without locking.
if (!big_units_queue_.has_units[tier].load(std::memory_order_relaxed)) {
return {};
}
base::MutexGuard guard(&big_units_queue_.mutex);
if (big_units_queue_.units[tier].empty()) return {};
WasmCompilationUnit unit = big_units_queue_.units[tier].top().unit;
big_units_queue_.units[tier].pop();
if (big_units_queue_.units[tier].empty()) {
big_units_queue_.has_units[tier].store(false, std::memory_order_relaxed);
}
return unit;
}
std::optional<WasmCompilationUnit> GetTopTierPriorityUnit(QueueImpl* queue) {
// Fast path without locking.
if (num_priority_units_.load(std::memory_order_relaxed) == 0) {
return {};
}
int steal_task_id;
{
base::MutexGuard mutex_guard(&queue->mutex);
while (!queue->top_tier_priority_units.empty()) {
auto unit = queue->top_tier_priority_units.top().unit;
queue->top_tier_priority_units.pop();
num_priority_units_.fetch_sub(1, std::memory_order_relaxed);
if (!top_tier_compiled_[declared_function_index(unit.func_index())]
.exchange(true, std::memory_order_relaxed)) {
return unit;
}
num_units_[CompilationTier::kTopTier].fetch_sub(
1, std::memory_order_relaxed);
}
steal_task_id = queue->next_steal_task_id;
}
// Try to steal from all other queues. If this succeeds, return one of the
// stolen units.
{
base::MutexGuard guard{&queues_mutex_};
for (size_t steal_trials = 0; steal_trials < queues_.size();
++steal_trials, ++steal_task_id) {
if (steal_task_id >= static_cast<int>(queues_.size())) {
steal_task_id = 0;
}
if (auto unit = StealTopTierPriorityUnit(queue, steal_task_id)) {
return unit;
}
}
}
return {};
}
// Steal units of {wanted_tier} from {steal_from_task_id} to {queue}. Return
// first stolen unit (rest put in queue of {task_id}), or {nullopt} if
// {steal_from_task_id} had no units of {wanted_tier}.
// Hold a shared lock on {queues_mutex_} when calling this method.
std::optional<WasmCompilationUnit> StealUnitsAndGetFirst(
QueueImpl* queue, int steal_from_task_id, int wanted_tier) {
auto* steal_queue = queues_[steal_from_task_id].get();
// Cannot steal from own queue.
if (steal_queue == queue) return {};
std::vector<WasmCompilationUnit> stolen;
std::optional<WasmCompilationUnit> returned_unit;
{
base::MutexGuard guard(&steal_queue->mutex);
auto* steal_from_vector = &steal_queue->units[wanted_tier];
if (steal_from_vector->empty()) return {};
size_t remaining = steal_from_vector->size() / 2;
auto steal_begin = steal_from_vector->begin() + remaining;
returned_unit = *steal_begin;
stolen.assign(steal_begin + 1, steal_from_vector->end());
steal_from_vector->erase(steal_begin, steal_from_vector->end());
}
base::MutexGuard guard(&queue->mutex);
auto* target_queue = &queue->units[wanted_tier];
target_queue->insert(target_queue->end(), stolen.begin(), stolen.end());
queue->next_steal_task_id = steal_from_task_id + 1;
return returned_unit;
}
// Steal one priority unit from {steal_from_task_id} to {task_id}. Return
// stolen unit, or {nullopt} if {steal_from_task_id} had no priority units.
// Hold a shared lock on {queues_mutex_} when calling this method.
std::optional<WasmCompilationUnit> StealTopTierPriorityUnit(
QueueImpl* queue, int steal_from_task_id) {
auto* steal_queue = queues_[steal_from_task_id].get();
// Cannot steal from own queue.
if (steal_queue == queue) return {};
std::optional<WasmCompilationUnit> returned_unit;
{
base::MutexGuard guard(&steal_queue->mutex);
while (true) {
if (steal_queue->top_tier_priority_units.empty()) return {};
auto unit = steal_queue->top_tier_priority_units.top().unit;
steal_queue->top_tier_priority_units.pop();
num_priority_units_.fetch_sub(1, std::memory_order_relaxed);
if (!top_tier_compiled_[declared_function_index(unit.func_index())]
.exchange(true, std::memory_order_relaxed)) {
returned_unit = unit;
break;
}
num_units_[CompilationTier::kTopTier].fetch_sub(
1, std::memory_order_relaxed);
}
}
base::MutexGuard guard(&queue->mutex);
queue->next_steal_task_id = steal_from_task_id + 1;
return returned_unit;
}
int declared_function_index(int func_index) const {
DCHECK_LE(num_imported_functions_, func_index);
DCHECK_LT(func_index, num_imported_functions_ + num_declared_functions_);
return func_index - num_imported_functions_;
}
// {queues_mutex_} protectes {queues_};
mutable base::Mutex queues_mutex_;
std::vector<std::unique_ptr<QueueImpl>> queues_;
const int num_imported_functions_;
const int num_declared_functions_;
BigUnitsQueue big_units_queue_;
std::atomic<size_t> num_units_[CompilationTier::kNumTiers];
std::atomic<size_t> num_priority_units_{0};
std::unique_ptr<std::atomic<bool>[]> top_tier_compiled_;
std::atomic<int> next_queue_to_add{0};
};
size_t CompilationUnitQueues::EstimateCurrentMemoryConsumption() const {
UPDATE_WHEN_CLASS_CHANGES(CompilationUnitQueues, 176);
UPDATE_WHEN_CLASS_CHANGES(QueueImpl, 112);
UPDATE_WHEN_CLASS_CHANGES(BigUnitsQueue, 88);
// Not including sizeof(CompilationUnitQueues) because that's included in
// sizeof(CompilationStateImpl).
size_t result = 0;
{
base::MutexGuard mutex_guard(&queues_mutex_);
result += ContentSize(queues_) + queues_.size() * sizeof(QueueImpl);
for (const auto& q : queues_) {
base::MutexGuard guard(&q->mutex);
result += ContentSize(*q->units);
result += q->top_tier_priority_units.size() * sizeof(TopTierPriorityUnit);
}
}
{
base::MutexGuard lock(&big_units_queue_.mutex);
result += big_units_queue_.units[0].size() * sizeof(BigUnit);
result += big_units_queue_.units[1].size() * sizeof(BigUnit);
}
// For {top_tier_compiled_}.
result += sizeof(std::atomic<bool>) * num_declared_functions_;
return result;
}
bool CompilationUnitQueues::Queue::ShouldPublish(
int num_processed_units) const {
auto* queue = static_cast<const QueueImpl*>(this);
return num_processed_units >=
queue->publish_limit.load(std::memory_order_relaxed);
}
// The {CompilationStateImpl} keeps track of the compilation state of the
// owning NativeModule, i.e. which functions are left to be compiled.
// It contains a task manager to allow parallel and asynchronous background
// compilation of functions.
// Its public interface {CompilationState} lives in compilation-environment.h.
class CompilationStateImpl {
public:
CompilationStateImpl(const std::shared_ptr<NativeModule>& native_module,
WasmDetectedFeatures detected_features);
~CompilationStateImpl() {
if (baseline_compile_job_->IsValid()) {
baseline_compile_job_->CancelAndDetach();
}
if (top_tier_compile_job_->IsValid()) {
top_tier_compile_job_->CancelAndDetach();
}
}
// Call right after the constructor, after the {compilation_state_} field in
// the {NativeModule} has been initialized.
void InitCompileJob();
// {kCancelUnconditionally}: Cancel all compilation.
// {kCancelInitialCompilation}: Cancel all compilation if initial (baseline)
// compilation is not finished yet.
enum CancellationPolicy { kCancelUnconditionally, kCancelInitialCompilation };
void CancelCompilation(CancellationPolicy);
bool cancelled() const;
// Apply eager tier-up to the initial compilation progress, updating all
// internal fields accordingly.
void ApplyEagerTierUpToInitialProgress(size_t hint_idx);
// Apply a compilation priority hint to initial compilation progress,
// updating all internal fields accordingly.
void ApplyCompilationPriorityToInitialProgress(size_t hint_idx,
CompilationPriority priority);
// Use PGO information to choose a better initial compilation progress
// (tiering decisions).
void ApplyPgoInfoToInitialProgress(ProfileInformation* pgo_info);
// Apply PGO information to a fully initialized compilation state. Also
// trigger compilation as needed.
void ApplyPgoInfoLate(ProfileInformation* pgo_info);
// Initialize compilation progress. Set compilation tiers to expect for
// baseline and top tier compilation. Must be set before
// {CommitCompilationUnits} is invoked which triggers background compilation.
void InitializeCompilationProgress(ProfileInformation* pgo_info);
void InitializeCompilationProgressAfterDeserialization(
base::Vector<const int> lazy_functions,
base::Vector<const int> eager_functions);
// Initializes compilation units based on the information encoded in the
// {compilation_progress_}.
void InitializeCompilationUnits(
std::unique_ptr<CompilationUnitBuilder> builder);
// Adds compilation units for another function to the
// {CompilationUnitBuilder}. This function is the streaming compilation
// equivalent to {InitializeCompilationUnits}.
void InitializeCompilationUnitForSingleFunction(
CompilationUnitBuilder* builder, int func_index);
// Add the callback to be called on compilation events. Needs to be
// set before {CommitCompilationUnits} is run to ensure that it receives all
// events. The callback object must support being deleted from any thread.
void AddCallback(std::unique_ptr<CompilationEventCallback> callback);
// Inserts new functions to compile and kicks off compilation.
void CommitCompilationUnits(base::Vector<WasmCompilationUnit> baseline_units,
base::Vector<WasmCompilationUnit> top_tier_units);
void CommitTopTierCompilationUnit(WasmCompilationUnit);
void AddTopTierPriorityCompilationUnit(WasmCompilationUnit, size_t);
CompilationUnitQueues::Queue* GetQueueForCompileTask(int task_id);
std::optional<WasmCompilationUnit> GetNextCompilationUnit(
CompilationUnitQueues::Queue*, CompilationTier tier);
void OnFinishedUnits(base::Vector<WasmCode*>);
void OnCompilationStopped(WasmDetectedFeatures detected);
void SchedulePublishCompilationResults(
std::vector<UnpublishedWasmCode> unpublished_code, CompilationTier tier);
WasmDetectedFeatures detected_features() const {
return detected_features_.load(std::memory_order_relaxed);
}
// Update the set of detected features; returns all features that were not
// detected before.
V8_WARN_UNUSED_RESULT WasmDetectedFeatures
UpdateDetectedFeatures(WasmDetectedFeatures);
size_t NumOutstandingCompilations(CompilationTier tier) const;
void SetError();
void WaitForBaselineCompileJob();
void TierUpAllFunctions();
void AllowAnotherTopTierJob(uint32_t func_index) {
compilation_unit_queues_.AllowAnotherTopTierJob(func_index);
// Reset the stored priority; otherwise triggers might be ignored if the
// priority is not bumped to the next power of two.
TypeFeedbackStorage* feedback = &native_module_->module()->type_feedback;
base::MutexGuard mutex_guard(&feedback->mutex);
feedback->feedback_for_function[func_index].tierup_priority = 0;
}
void AllowAnotherTopTierJobForAllFunctions() {
const WasmModule* module = native_module_->module();
uint32_t fn_start = module->num_imported_functions;
uint32_t fn_end = fn_start + module->num_declared_functions;
base::MutexGuard mutex_guard(&module->type_feedback.mutex);
std::unordered_map<uint32_t, FunctionTypeFeedback>& feedback_map =
module->type_feedback.feedback_for_function;
for (uint32_t i = fn_start; i < fn_end; i++) {
compilation_unit_queues_.AllowAnotherTopTierJob(i);
// Reset the stored priority; otherwise triggers might be ignored if the
// priority is not bumped to the next power of two.
if (auto it = feedback_map.find(i); it != feedback_map.end()) {
it->second.tierup_priority = 0;
}
}
}
bool failed() const {
return compile_failed_.load(std::memory_order_relaxed);
}
bool baseline_compilation_finished() const {
base::MutexGuard guard(&callbacks_mutex_);
return outstanding_baseline_units_ == 0;
}
void SetWireBytesStorage(
std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
base::MutexGuard guard(&mutex_);
wire_bytes_storage_ = std::move(wire_bytes_storage);
}
std::shared_ptr<WireBytesStorage> GetWireBytesStorage() const {
base::MutexGuard guard(&mutex_);
DCHECK_NOT_NULL(wire_bytes_storage_);
return wire_bytes_storage_;
}
void set_compilation_id(int compilation_id) {
DCHECK_EQ(compilation_id_, kInvalidCompilationID);
compilation_id_ = compilation_id;
}
size_t EstimateCurrentMemoryConsumption() const;
// Called from the delayed task to trigger caching if the timeout
// (--wasm-caching-timeout-ms) has passed since the last top-tier compilation.
// This either triggers caching or re-schedules the task if more code has
// been compiled to the top tier in the meantime.
void TriggerCachingAfterTimeout();
std::vector<WasmCode*> PublishCode(base::Vector<UnpublishedWasmCode> codes);
private:
// Trigger callbacks according to the internal counters below
// (outstanding_...).
// Hold the {callbacks_mutex_} when calling this method.
void TriggerOutstandingCallbacks();
// Trigger an exact set of callbacks. Hold the {callbacks_mutex_} when calling
// this method.
void TriggerCallbacks(base::EnumSet<CompilationEvent>);
void PublishCompilationResults(
std::vector<UnpublishedWasmCode> unpublished_code);
NativeModule* const native_module_;
std::weak_ptr<NativeModule> const native_module_weak_;
// Compilation error, atomically updated. This flag can be updated and read
// using relaxed semantics.
std::atomic<bool> compile_failed_{false};
// True if compilation was cancelled and worker threads should return. This
// flag can be updated and read using relaxed semantics.
std::atomic<bool> compile_cancelled_{false};
CompilationUnitQueues compilation_unit_queues_;
// This mutex protects all information of this {CompilationStateImpl} which is
// being accessed concurrently.
mutable base::Mutex mutex_;
// The compile job handles, initialized right after construction of
// {CompilationStateImpl}.
std::unique_ptr<JobHandle> baseline_compile_job_;
std::unique_ptr<JobHandle> top_tier_compile_job_;
// The compilation id to identify trace events linked to this compilation.
static constexpr int kInvalidCompilationID = -1;
int compilation_id_ = kInvalidCompilationID;
// Features detected to be used in this module. Features can be detected
// as a module is being compiled.
std::atomic<WasmDetectedFeatures> detected_features_;
//////////////////////////////////////////////////////////////////////////////
// Protected by {mutex_}:
// Abstraction over the storage of the wire bytes. Held in a shared_ptr so
// that background compilation jobs can keep the storage alive while
// compiling.
std::shared_ptr<WireBytesStorage> wire_bytes_storage_;
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
// This mutex protects the callbacks vector, and the counters used to
// determine which callbacks to call. The counters plus the callbacks
// themselves need to be synchronized to ensure correct order of events.
mutable base::Mutex callbacks_mutex_;
//////////////////////////////////////////////////////////////////////////////
// Protected by {callbacks_mutex_}:
// Callbacks to be called on compilation events.
std::vector<std::unique_ptr<CompilationEventCallback>> callbacks_;
// Events that already happened.
base::EnumSet<CompilationEvent> finished_events_;
int outstanding_baseline_units_ = 0;
// The amount of generated top tier code since the last
// {kFinishedCompilationChunk} event.
size_t bytes_since_last_chunk_ = 0;
// One byte per declared function, see bitfields defined below.
// This vector is initialized once to the right size, updates are *usually*
// protected by the {callbacks_mutes_}, with exceptions during
// initialization (see comment in
// {CompilationStateImpl::InitializeCompilationUnitForSingleFunction}).
base::OwnedVector<uint8_t> compilation_progress_;
// The timestamp of the last top-tier compilation.
// This field is updated on every publishing of top-tier code, and is reset
// once caching is triggered. Hence it also informs whether a caching task is
// currently being scheduled (whenever this is set).
base::TimeTicks last_top_tier_compilation_timestamp_;
// End of fields protected by {callbacks_mutex_}.
//////////////////////////////////////////////////////////////////////////////
struct PublishState {
// {mutex_} protects {publish_queue_} and {publisher_running_}.
base::Mutex mutex_;
std::vector<UnpublishedWasmCode> publish_queue_;
bool publisher_running_ = false;
};
PublishState publish_state_[CompilationTier::kNumTiers];
// Encoding of fields in the {compilation_progress_} vector.
using RequiredBaselineTierField = base::BitField8<ExecutionTier, 0, 2>;
using RequiredTopTierField = base::BitField8<ExecutionTier, 2, 2>;
using ReachedTierField = base::BitField8<ExecutionTier, 4, 2>;
};
CompilationStateImpl* Impl(CompilationState* compilation_state) {
return reinterpret_cast<CompilationStateImpl*>(compilation_state);
}
const CompilationStateImpl* Impl(const CompilationState* compilation_state) {
return reinterpret_cast<const CompilationStateImpl*>(compilation_state);
}
CompilationStateImpl* BackgroundCompileScope::compilation_state() const {
DCHECK(native_module_);
return Impl(native_module_->compilation_state());
}
size_t CompilationStateImpl::EstimateCurrentMemoryConsumption() const {
UPDATE_WHEN_CLASS_CHANGES(CompilationStateImpl, 448);
size_t result = sizeof(CompilationStateImpl);
{
base::MutexGuard guard{&mutex_};
result += compilation_unit_queues_.EstimateCurrentMemoryConsumption();
}
// To read the size of {callbacks_} and {compilation_progress_}, we'd
// need to acquire the {callbacks_mutex_}, which can cause deadlocks
// when that mutex is already held elsewhere and another thread calls
// into this function. So we rely on heuristics and informed guesses
// instead: {compilation_progress_} contains an entry for every declared
// function in the module...
result += sizeof(uint8_t) * native_module_->module()->num_declared_functions;
// ...and there are typically no more than a handful of {callbacks_}.
constexpr size_t kAssumedNumberOfCallbacks = 4;
constexpr size_t size_of_vector =
kAssumedNumberOfCallbacks *
sizeof(std::unique_ptr<CompilationEventCallback>);
// Concrete subclasses of CompilationEventCallback will be bigger, but we
// can't know that here.
constexpr size_t size_of_payload =
kAssumedNumberOfCallbacks * sizeof(CompilationEventCallback);
result += size_of_vector + size_of_payload;
if (v8_flags.trace_wasm_offheap_memory) {
PrintF("CompilationStateImpl: %zu\n", result);
}
return result;
}
bool BackgroundCompileScope::cancelled() const {
return native_module_ == nullptr ||
Impl(native_module_->compilation_state())->cancelled();
}
} // namespace
//////////////////////////////////////////////////////
// PIMPL implementation of {CompilationState}.
CompilationState::~CompilationState() { Impl(this)->~CompilationStateImpl(); }
void CompilationState::InitCompileJob() { Impl(this)->InitCompileJob(); }
void CompilationState::CancelCompilation() {
Impl(this)->CancelCompilation(CompilationStateImpl::kCancelUnconditionally);
}
void CompilationState::CancelInitialCompilation() {
Impl(this)->CancelCompilation(
CompilationStateImpl::kCancelInitialCompilation);
}
void CompilationState::SetError() { Impl(this)->SetError(); }
void CompilationState::SetWireBytesStorage(
std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
Impl(this)->SetWireBytesStorage(std::move(wire_bytes_storage));
}
std::shared_ptr<WireBytesStorage> CompilationState::GetWireBytesStorage()
const {
return Impl(this)->GetWireBytesStorage();
}
void CompilationState::AddCallback(
std::unique_ptr<CompilationEventCallback> callback) {
return Impl(this)->AddCallback(std::move(callback));
}
void CompilationState::TierUpAllFunctions() {
Impl(this)->TierUpAllFunctions();
}
void CompilationState::AllowAnotherTopTierJob(uint32_t func_index) {
Impl(this)->AllowAnotherTopTierJob(func_index);
}
void CompilationState::AllowAnotherTopTierJobForAllFunctions() {
Impl(this)->AllowAnotherTopTierJobForAllFunctions();
}
void CompilationState::InitializeAfterDeserialization(
base::Vector<const int> lazy_functions,
base::Vector<const int> eager_functions) {
Impl(this)->InitializeCompilationProgressAfterDeserialization(
lazy_functions, eager_functions);
}
bool CompilationState::failed() const { return Impl(this)->failed(); }
bool CompilationState::baseline_compilation_finished() const {
return Impl(this)->baseline_compilation_finished();
}
void CompilationState::set_compilation_id(int compilation_id) {
Impl(this)->set_compilation_id(compilation_id);
}
size_t CompilationState::EstimateCurrentMemoryConsumption() const {
return Impl(this)->EstimateCurrentMemoryConsumption();
}
std::vector<WasmCode*> CompilationState::PublishCode(
base::Vector<UnpublishedWasmCode> unpublished_code) {
return Impl(this)->PublishCode(unpublished_code);
}
// static
std::unique_ptr<CompilationState> CompilationState::New(
const std::shared_ptr<NativeModule>& native_module,
WasmDetectedFeatures detected_features) {
return std::unique_ptr<CompilationState>(reinterpret_cast<CompilationState*>(
new CompilationStateImpl(native_module, detected_features)));
}
WasmDetectedFeatures CompilationState::detected_features() const {
return Impl(this)->detected_features();
}
WasmDetectedFeatures CompilationState::UpdateDetectedFeatures(
WasmDetectedFeatures detected_features) {
return Impl(this)->UpdateDetectedFeatures(detected_features);
}
// End of PIMPL implementation of {CompilationState}.
//////////////////////////////////////////////////////
namespace {
struct ExecutionTierPair {
ExecutionTier baseline_tier;
ExecutionTier top_tier;
};
// Pass the debug state as a separate parameter to avoid data races: the debug
// state may change between its use here and its use at the call site. To have
// a consistent view on the debug state, the caller reads the debug state once
// and then passes it to this function.
ExecutionTierPair GetDefaultTiersPerModule(NativeModule* native_module,
DebugState is_in_debug_state,
bool lazy_module) {
const WasmModule* module = native_module->module();
if (lazy_module) {
return {ExecutionTier::kNone, ExecutionTier::kNone};
}
if (is_asmjs_module(module)) {
DCHECK(!is_in_debug_state);
return {ExecutionTier::kTurbofan, ExecutionTier::kTurbofan};
}
if (is_in_debug_state) {
return {ExecutionTier::kLiftoff, ExecutionTier::kLiftoff};
}
ExecutionTier baseline_tier =
v8_flags.liftoff ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan;
bool eager_tier_up = !v8_flags.wasm_dynamic_tiering && v8_flags.wasm_tier_up;
ExecutionTier top_tier =
eager_tier_up ? ExecutionTier::kTurbofan : baseline_tier;