v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
module-compiler.cc
Go to the documentation of this file.
1// Copyright 2017 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <algorithm>
8#include <atomic>
9#include <memory>
10#include <queue>
11
12#include "src/api/api-inl.h"
13#include "src/base/enum-set.h"
14#include "src/base/fpu.h"
20#include "src/debug/debug.h"
23#include "src/logging/metrics.h"
29#include "src/wasm/pgo.h"
37#include "src/wasm/wasm-js.h"
42
43#define TRACE_COMPILE(...) \
44 do { \
45 if (v8_flags.trace_wasm_compiler) PrintF(__VA_ARGS__); \
46 } while (false)
47
48#define TRACE_STREAMING(...) \
49 do { \
50 if (v8_flags.trace_wasm_streaming) PrintF(__VA_ARGS__); \
51 } while (false)
52
53#define TRACE_LAZY(...) \
54 do { \
55 if (v8_flags.trace_wasm_lazy_compilation) PrintF(__VA_ARGS__); \
56 } while (false)
57
58namespace v8::internal::wasm {
59
60namespace {
61
62enum class CompileStrategy : uint8_t {
63 // Compiles functions on first use. In this case, execution will block until
64 // the function's baseline is reached and top tier compilation starts in
65 // background (if applicable).
66 // Lazy compilation can help to reduce startup time and code size at the risk
67 // of blocking execution.
68 kLazy,
69 // Compiles baseline ahead of execution and starts top tier compilation in
70 // background (if applicable).
71 kEager,
72 // Triggers baseline compilation on first use (just like {kLazy}) with the
73 // difference that top tier compilation is started eagerly.
74 // This strategy can help to reduce startup time at the risk of blocking
75 // execution, but only in its early phase (until top tier compilation
76 // finishes).
78 // Marker for default strategy.
80};
81
82class CompilationStateImpl;
83class CompilationUnitBuilder;
84
85class V8_NODISCARD BackgroundCompileScope {
86 public:
87 explicit BackgroundCompileScope(std::weak_ptr<NativeModule> native_module)
88 : native_module_(native_module.lock()) {}
89
90 NativeModule* native_module() const {
92 return native_module_.get();
93 }
94 inline CompilationStateImpl* compilation_state() const;
95
96 bool cancelled() const;
97
98 private:
99 // Keep the native module alive while in this scope.
100 std::shared_ptr<NativeModule> native_module_;
101};
102
103enum CompilationTier { kBaseline = 0, kTopTier = 1, kNumTiers = kTopTier + 1 };
104
105// A set of work-stealing queues (vectors of units). Each background compile
106// task owns one of the queues and steals from all others once its own queue
107// runs empty.
108class CompilationUnitQueues {
109 public:
110 // Public API for QueueImpl.
111 struct Queue {
112 bool ShouldPublish(int num_processed_units) const;
113 };
114
115 explicit CompilationUnitQueues(int num_imported_functions,
116 int num_declared_functions)
117 : num_imported_functions_(num_imported_functions),
118 num_declared_functions_(num_declared_functions) {
119 // Add one first queue, to add units to.
120 queues_.emplace_back(std::make_unique<QueueImpl>(0));
121
122#if !defined(__cpp_lib_atomic_value_initialization) || \
123 __cpp_lib_atomic_value_initialization < 201911L
124 for (auto& atomic_counter : num_units_) {
125 std::atomic_init(&atomic_counter, size_t{0});
126 }
127#endif
128
130 std::make_unique<std::atomic<bool>[]>(num_declared_functions);
131
132#if !defined(__cpp_lib_atomic_value_initialization) || \
133 __cpp_lib_atomic_value_initialization < 201911L
134 for (int i = 0; i < num_declared_functions; i++) {
135 std::atomic_init(&top_tier_compiled_.get()[i], false);
136 }
137#endif
138 }
139
140 Queue* GetQueueForTask(int task_id) {
141 int required_queues = task_id + 1;
142 {
143 base::MutexGuard queues_guard{&queues_mutex_};
144 if (V8_LIKELY(static_cast<int>(queues_.size()) >= required_queues)) {
145 return queues_[task_id].get();
146 }
147 }
148
149 // Otherwise increase the number of queues.
150 base::MutexGuard queues_guard{&queues_mutex_};
151 int num_queues = static_cast<int>(queues_.size());
152 while (num_queues < required_queues) {
153 int steal_from = num_queues + 1;
154 queues_.emplace_back(std::make_unique<QueueImpl>(steal_from));
155 ++num_queues;
156 }
157
158 // Update the {publish_limit}s of all queues.
159
160 // We want background threads to publish regularly (to avoid contention when
161 // they are all publishing at the end). On the other side, each publishing
162 // has some overhead (part of it for synchronizing between threads), so it
163 // should not happen *too* often. Thus aim for 4-8 publishes per thread, but
164 // distribute it such that publishing is likely to happen at different
165 // times.
166 int units_per_thread = num_declared_functions_ / num_queues;
167 int min = std::max(10, units_per_thread / 8);
168 int queue_id = 0;
169 for (auto& queue : queues_) {
170 // Set a limit between {min} and {2*min}, but not smaller than {10}.
171 int limit = min + (min * queue_id / num_queues);
172 queue->publish_limit.store(limit, std::memory_order_relaxed);
173 ++queue_id;
174 }
175
176 return queues_[task_id].get();
177 }
178
179 std::optional<WasmCompilationUnit> GetNextUnit(Queue* queue,
180 CompilationTier tier) {
181 DCHECK_LT(tier, CompilationTier::kNumTiers);
182 if (auto unit = GetNextUnitOfTier(queue, tier)) {
183 [[maybe_unused]] size_t old_units_count =
184 num_units_[tier].fetch_sub(1, std::memory_order_relaxed);
185 DCHECK_LE(1, old_units_count);
186 return unit;
187 }
188 return {};
189 }
190
191 void AddUnits(base::Vector<WasmCompilationUnit> baseline_units,
192 base::Vector<WasmCompilationUnit> top_tier_units,
193 const WasmModule* module) {
194 DCHECK_LT(0, baseline_units.size() + top_tier_units.size());
195 // Add to the individual queues in a round-robin fashion. No special care is
196 // taken to balance them; they will be balanced by work stealing.
197 QueueImpl* queue;
198 {
199 int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
200 base::MutexGuard queues_guard{&queues_mutex_};
201 while (!next_queue_to_add.compare_exchange_weak(
202 queue_to_add, next_task_id(queue_to_add, queues_.size()),
203 std::memory_order_relaxed)) {
204 // Retry with updated {queue_to_add}.
205 }
206 queue = queues_[queue_to_add].get();
207 }
208
209 base::MutexGuard guard(&queue->mutex);
210 std::optional<base::MutexGuard> big_units_guard;
211 for (auto pair :
212 {std::make_pair(CompilationTier::kBaseline, baseline_units),
213 std::make_pair(CompilationTier::kTopTier, top_tier_units)}) {
214 int tier = pair.first;
215 base::Vector<WasmCompilationUnit> units = pair.second;
216 if (units.empty()) continue;
217 num_units_[tier].fetch_add(units.size(), std::memory_order_relaxed);
218 for (WasmCompilationUnit unit : units) {
219 size_t func_size = module->functions[unit.func_index()].code.length();
220 if (func_size <= kBigUnitsLimit) {
221 queue->units[tier].push_back(unit);
222 } else {
223 if (!big_units_guard) {
224 big_units_guard.emplace(&big_units_queue_.mutex);
225 }
226 big_units_queue_.has_units[tier].store(true,
227 std::memory_order_relaxed);
228 big_units_queue_.units[tier].emplace(func_size, unit);
229 }
230 }
231 }
232 }
233
234 void AddTopTierPriorityUnit(WasmCompilationUnit unit, size_t priority) {
235 base::MutexGuard queues_guard{&queues_mutex_};
236 // Add to the individual queues in a round-robin fashion. No special care is
237 // taken to balance them; they will be balanced by work stealing.
238 // Priorities should only be seen as a hint here; without balancing, we
239 // might pop a unit with lower priority from one queue while other queues
240 // still hold higher-priority units.
241 // Since updating priorities in a std::priority_queue is difficult, we just
242 // add new units with higher priorities, and use the
243 // {CompilationUnitQueues::top_tier_compiled_} array to discard units for
244 // functions which are already being compiled.
245 int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
246 while (!next_queue_to_add.compare_exchange_weak(
247 queue_to_add, next_task_id(queue_to_add, queues_.size()),
248 std::memory_order_relaxed)) {
249 // Retry with updated {queue_to_add}.
250 }
251
252 {
253 auto* queue = queues_[queue_to_add].get();
254 base::MutexGuard guard(&queue->mutex);
255 queue->top_tier_priority_units.emplace(priority, unit);
256 num_priority_units_.fetch_add(1, std::memory_order_relaxed);
257 num_units_[CompilationTier::kTopTier].fetch_add(
258 1, std::memory_order_relaxed);
259 }
260 }
261
262 // Get the current number of units in the queue for |tier|. This is only a
263 // momentary snapshot, it's not guaranteed that {GetNextUnit} returns a unit
264 // if this method returns non-zero.
265 size_t GetSizeForTier(CompilationTier tier) const {
266 DCHECK_LT(tier, CompilationTier::kNumTiers);
267 return num_units_[tier].load(std::memory_order_relaxed);
268 }
269
270 void AllowAnotherTopTierJob(uint32_t func_index) {
272 false, std::memory_order_relaxed);
273 }
274
275 size_t EstimateCurrentMemoryConsumption() const;
276
277 private:
278 // Functions bigger than {kBigUnitsLimit} will be compiled first, in ascending
279 // order of their function body size.
280 static constexpr size_t kBigUnitsLimit = 4096;
281
282 struct BigUnit {
283 BigUnit(size_t func_size, WasmCompilationUnit unit)
285
286 size_t func_size;
287 WasmCompilationUnit unit;
288
289 bool operator<(const BigUnit& other) const {
290 return func_size < other.func_size;
291 }
292 };
293
294 struct TopTierPriorityUnit {
295 TopTierPriorityUnit(int priority, WasmCompilationUnit unit)
296 : priority(priority), unit(unit) {}
297
298 size_t priority;
299 WasmCompilationUnit unit;
300
301 bool operator<(const TopTierPriorityUnit& other) const {
302 return priority < other.priority;
303 }
304 };
305
306 struct BigUnitsQueue {
307 BigUnitsQueue() {
308#if !defined(__cpp_lib_atomic_value_initialization) || \
309 __cpp_lib_atomic_value_initialization < 201911L
310 for (auto& atomic : has_units) std::atomic_init(&atomic, false);
311#endif
312 }
313
314 mutable base::Mutex mutex;
315
316 // Can be read concurrently to check whether any elements are in the queue.
317 std::atomic<bool> has_units[CompilationTier::kNumTiers];
318
319 // Protected by {mutex}:
320 std::priority_queue<BigUnit> units[CompilationTier::kNumTiers];
321 };
322
323 struct QueueImpl : public Queue {
324 explicit QueueImpl(int next_steal_task_id)
326
327 // Number of units after which the task processing this queue should publish
328 // compilation results. Updated (reduced, using relaxed ordering) when new
329 // queues are allocated. If there is only one thread running, we can delay
330 // publishing arbitrarily.
331 std::atomic<int> publish_limit{kMaxInt};
332
333 base::Mutex mutex;
334
335 // All fields below are protected by {mutex}.
336 std::vector<WasmCompilationUnit> units[CompilationTier::kNumTiers];
337 std::priority_queue<TopTierPriorityUnit> top_tier_priority_units;
339 };
340
341 int next_task_id(int task_id, size_t num_queues) const {
342 int next = task_id + 1;
343 return next == static_cast<int>(num_queues) ? 0 : next;
344 }
345
346 std::optional<WasmCompilationUnit> GetNextUnitOfTier(Queue* public_queue,
347 int tier) {
348 QueueImpl* queue = static_cast<QueueImpl*>(public_queue);
349
350 // First check whether there is a priority unit. Execute that first.
351 if (tier == CompilationTier::kTopTier) {
352 if (auto unit = GetTopTierPriorityUnit(queue)) {
353 return unit;
354 }
355 }
356
357 // Then check whether there is a big unit of that tier.
358 if (auto unit = GetBigUnitOfTier(tier)) return unit;
359
360 // Finally check whether our own queue has a unit of the wanted tier. If
361 // so, return it, otherwise get the task id to steal from.
362 int steal_task_id;
363 {
364 base::MutexGuard mutex_guard(&queue->mutex);
365 if (!queue->units[tier].empty()) {
366 auto unit = queue->units[tier].back();
367 queue->units[tier].pop_back();
368 return unit;
369 }
370 steal_task_id = queue->next_steal_task_id;
371 }
372
373 // Try to steal from all other queues. If this succeeds, return one of the
374 // stolen units.
375 {
377 for (size_t steal_trials = 0; steal_trials < queues_.size();
378 ++steal_trials, ++steal_task_id) {
379 if (steal_task_id >= static_cast<int>(queues_.size())) {
380 steal_task_id = 0;
381 }
382 if (auto unit = StealUnitsAndGetFirst(queue, steal_task_id, tier)) {
383 return unit;
384 }
385 }
386 }
387
388 // If we reach here, we didn't find any unit of the requested tier.
389 return {};
390 }
391
392 std::optional<WasmCompilationUnit> GetBigUnitOfTier(int tier) {
393 // Fast path without locking.
394 if (!big_units_queue_.has_units[tier].load(std::memory_order_relaxed)) {
395 return {};
396 }
398 if (big_units_queue_.units[tier].empty()) return {};
399 WasmCompilationUnit unit = big_units_queue_.units[tier].top().unit;
400 big_units_queue_.units[tier].pop();
401 if (big_units_queue_.units[tier].empty()) {
402 big_units_queue_.has_units[tier].store(false, std::memory_order_relaxed);
403 }
404 return unit;
405 }
406
407 std::optional<WasmCompilationUnit> GetTopTierPriorityUnit(QueueImpl* queue) {
408 // Fast path without locking.
409 if (num_priority_units_.load(std::memory_order_relaxed) == 0) {
410 return {};
411 }
412
413 int steal_task_id;
414 {
415 base::MutexGuard mutex_guard(&queue->mutex);
416 while (!queue->top_tier_priority_units.empty()) {
417 auto unit = queue->top_tier_priority_units.top().unit;
418 queue->top_tier_priority_units.pop();
419 num_priority_units_.fetch_sub(1, std::memory_order_relaxed);
420
421 if (!top_tier_compiled_[declared_function_index(unit.func_index())]
422 .exchange(true, std::memory_order_relaxed)) {
423 return unit;
424 }
425 num_units_[CompilationTier::kTopTier].fetch_sub(
426 1, std::memory_order_relaxed);
427 }
428 steal_task_id = queue->next_steal_task_id;
429 }
430
431 // Try to steal from all other queues. If this succeeds, return one of the
432 // stolen units.
433 {
435 for (size_t steal_trials = 0; steal_trials < queues_.size();
436 ++steal_trials, ++steal_task_id) {
437 if (steal_task_id >= static_cast<int>(queues_.size())) {
438 steal_task_id = 0;
439 }
440 if (auto unit = StealTopTierPriorityUnit(queue, steal_task_id)) {
441 return unit;
442 }
443 }
444 }
445
446 return {};
447 }
448
449 // Steal units of {wanted_tier} from {steal_from_task_id} to {queue}. Return
450 // first stolen unit (rest put in queue of {task_id}), or {nullopt} if
451 // {steal_from_task_id} had no units of {wanted_tier}.
452 // Hold a shared lock on {queues_mutex_} when calling this method.
453 std::optional<WasmCompilationUnit> StealUnitsAndGetFirst(
454 QueueImpl* queue, int steal_from_task_id, int wanted_tier) {
455 auto* steal_queue = queues_[steal_from_task_id].get();
456 // Cannot steal from own queue.
457 if (steal_queue == queue) return {};
458 std::vector<WasmCompilationUnit> stolen;
459 std::optional<WasmCompilationUnit> returned_unit;
460 {
461 base::MutexGuard guard(&steal_queue->mutex);
462 auto* steal_from_vector = &steal_queue->units[wanted_tier];
463 if (steal_from_vector->empty()) return {};
464 size_t remaining = steal_from_vector->size() / 2;
465 auto steal_begin = steal_from_vector->begin() + remaining;
466 returned_unit = *steal_begin;
467 stolen.assign(steal_begin + 1, steal_from_vector->end());
468 steal_from_vector->erase(steal_begin, steal_from_vector->end());
469 }
470 base::MutexGuard guard(&queue->mutex);
471 auto* target_queue = &queue->units[wanted_tier];
472 target_queue->insert(target_queue->end(), stolen.begin(), stolen.end());
473 queue->next_steal_task_id = steal_from_task_id + 1;
474 return returned_unit;
475 }
476
477 // Steal one priority unit from {steal_from_task_id} to {task_id}. Return
478 // stolen unit, or {nullopt} if {steal_from_task_id} had no priority units.
479 // Hold a shared lock on {queues_mutex_} when calling this method.
480 std::optional<WasmCompilationUnit> StealTopTierPriorityUnit(
481 QueueImpl* queue, int steal_from_task_id) {
482 auto* steal_queue = queues_[steal_from_task_id].get();
483 // Cannot steal from own queue.
484 if (steal_queue == queue) return {};
485 std::optional<WasmCompilationUnit> returned_unit;
486 {
487 base::MutexGuard guard(&steal_queue->mutex);
488 while (true) {
489 if (steal_queue->top_tier_priority_units.empty()) return {};
490
491 auto unit = steal_queue->top_tier_priority_units.top().unit;
492 steal_queue->top_tier_priority_units.pop();
493 num_priority_units_.fetch_sub(1, std::memory_order_relaxed);
494
495 if (!top_tier_compiled_[declared_function_index(unit.func_index())]
496 .exchange(true, std::memory_order_relaxed)) {
497 returned_unit = unit;
498 break;
499 }
500 num_units_[CompilationTier::kTopTier].fetch_sub(
501 1, std::memory_order_relaxed);
502 }
503 }
504 base::MutexGuard guard(&queue->mutex);
505 queue->next_steal_task_id = steal_from_task_id + 1;
506 return returned_unit;
507 }
508
509 int declared_function_index(int func_index) const {
510 DCHECK_LE(num_imported_functions_, func_index);
511 DCHECK_LT(func_index, num_imported_functions_ + num_declared_functions_);
512 return func_index - num_imported_functions_;
513 }
514
515 // {queues_mutex_} protectes {queues_};
516 mutable base::Mutex queues_mutex_;
517 std::vector<std::unique_ptr<QueueImpl>> queues_;
518
521
522 BigUnitsQueue big_units_queue_;
523
524 std::atomic<size_t> num_units_[CompilationTier::kNumTiers];
525 std::atomic<size_t> num_priority_units_{0};
526 std::unique_ptr<std::atomic<bool>[]> top_tier_compiled_;
527 std::atomic<int> next_queue_to_add{0};
528};
529
530size_t CompilationUnitQueues::EstimateCurrentMemoryConsumption() const {
531 UPDATE_WHEN_CLASS_CHANGES(CompilationUnitQueues, 176);
532 UPDATE_WHEN_CLASS_CHANGES(QueueImpl, 112);
533 UPDATE_WHEN_CLASS_CHANGES(BigUnitsQueue, 88);
534 // Not including sizeof(CompilationUnitQueues) because that's included in
535 // sizeof(CompilationStateImpl).
536 size_t result = 0;
537 {
538 base::MutexGuard mutex_guard(&queues_mutex_);
539 result += ContentSize(queues_) + queues_.size() * sizeof(QueueImpl);
540 for (const auto& q : queues_) {
541 base::MutexGuard guard(&q->mutex);
542 result += ContentSize(*q->units);
543 result += q->top_tier_priority_units.size() * sizeof(TopTierPriorityUnit);
544 }
545 }
546 {
547 base::MutexGuard lock(&big_units_queue_.mutex);
548 result += big_units_queue_.units[0].size() * sizeof(BigUnit);
549 result += big_units_queue_.units[1].size() * sizeof(BigUnit);
550 }
551 // For {top_tier_compiled_}.
552 result += sizeof(std::atomic<bool>) * num_declared_functions_;
553 return result;
554}
555
556bool CompilationUnitQueues::Queue::ShouldPublish(
557 int num_processed_units) const {
558 auto* queue = static_cast<const QueueImpl*>(this);
559 return num_processed_units >=
560 queue->publish_limit.load(std::memory_order_relaxed);
561}
562
563// The {CompilationStateImpl} keeps track of the compilation state of the
564// owning NativeModule, i.e. which functions are left to be compiled.
565// It contains a task manager to allow parallel and asynchronous background
566// compilation of functions.
567// Its public interface {CompilationState} lives in compilation-environment.h.
568class CompilationStateImpl {
569 public:
570 CompilationStateImpl(const std::shared_ptr<NativeModule>& native_module,
571 std::shared_ptr<Counters> async_counters,
572 WasmDetectedFeatures detected_features);
573 ~CompilationStateImpl() {
574 if (baseline_compile_job_->IsValid()) {
575 baseline_compile_job_->CancelAndDetach();
576 }
577 if (top_tier_compile_job_->IsValid()) {
578 top_tier_compile_job_->CancelAndDetach();
579 }
580 }
581
582 // Call right after the constructor, after the {compilation_state_} field in
583 // the {NativeModule} has been initialized.
584 void InitCompileJob();
585
586 // {kCancelUnconditionally}: Cancel all compilation.
587 // {kCancelInitialCompilation}: Cancel all compilation if initial (baseline)
588 // compilation is not finished yet.
589 enum CancellationPolicy { kCancelUnconditionally, kCancelInitialCompilation };
590 void CancelCompilation(CancellationPolicy);
591
592 bool cancelled() const;
593
594 // Apply a compilation hint to the initial compilation progress, updating all
595 // internal fields accordingly.
596 void ApplyCompilationHintToInitialProgress(const WasmCompilationHint& hint,
597 size_t hint_idx);
598
599 // Use PGO information to choose a better initial compilation progress
600 // (tiering decisions).
601 void ApplyPgoInfoToInitialProgress(ProfileInformation* pgo_info);
602
603 // Apply PGO information to a fully initialized compilation state. Also
604 // trigger compilation as needed.
605 void ApplyPgoInfoLate(ProfileInformation* pgo_info);
606
607 // Initialize compilation progress. Set compilation tiers to expect for
608 // baseline and top tier compilation. Must be set before
609 // {CommitCompilationUnits} is invoked which triggers background compilation.
610 void InitializeCompilationProgress(ProfileInformation* pgo_info);
611
612 void InitializeCompilationProgressAfterDeserialization(
613 base::Vector<const int> lazy_functions,
614 base::Vector<const int> eager_functions);
615
616 // Initializes compilation units based on the information encoded in the
617 // {compilation_progress_}.
618 void InitializeCompilationUnits(
619 std::unique_ptr<CompilationUnitBuilder> builder);
620
621 // Adds compilation units for another function to the
622 // {CompilationUnitBuilder}. This function is the streaming compilation
623 // equivalent to {InitializeCompilationUnits}.
624 void AddCompilationUnit(CompilationUnitBuilder* builder, int func_index);
625
626 // Add the callback to be called on compilation events. Needs to be
627 // set before {CommitCompilationUnits} is run to ensure that it receives all
628 // events. The callback object must support being deleted from any thread.
629 void AddCallback(std::unique_ptr<CompilationEventCallback> callback);
630
631 // Inserts new functions to compile and kicks off compilation.
632 void CommitCompilationUnits(base::Vector<WasmCompilationUnit> baseline_units,
633 base::Vector<WasmCompilationUnit> top_tier_units);
634 void CommitTopTierCompilationUnit(WasmCompilationUnit);
635 void AddTopTierPriorityCompilationUnit(WasmCompilationUnit, size_t);
636
637 CompilationUnitQueues::Queue* GetQueueForCompileTask(int task_id);
638
639 std::optional<WasmCompilationUnit> GetNextCompilationUnit(
640 CompilationUnitQueues::Queue*, CompilationTier tier);
641
642 void OnFinishedUnits(base::Vector<WasmCode*>);
643
644 void OnCompilationStopped(WasmDetectedFeatures detected);
645 void SchedulePublishCompilationResults(
646 std::vector<UnpublishedWasmCode> unpublished_code, CompilationTier tier);
647
648 WasmDetectedFeatures detected_features() const {
649 return detected_features_.load(std::memory_order_relaxed);
650 }
651
652 // Update the set of detected features; returns all features that were not
653 // detected before.
654 V8_WARN_UNUSED_RESULT WasmDetectedFeatures
655 UpdateDetectedFeatures(WasmDetectedFeatures);
656
657 size_t NumOutstandingCompilations(CompilationTier tier) const;
658
659 void SetError();
660
661 void WaitForCompilationEvent(CompilationEvent event);
662
663 void TierUpAllFunctions();
664
665 void AllowAnotherTopTierJob(uint32_t func_index) {
666 compilation_unit_queues_.AllowAnotherTopTierJob(func_index);
667 // Reset the stored priority; otherwise triggers might be ignored if the
668 // priority is not bumped to the next power of two.
669 TypeFeedbackStorage* feedback = &native_module_->module()->type_feedback;
670 base::MutexGuard mutex_guard(&feedback->mutex);
671 feedback->feedback_for_function[func_index].tierup_priority = 0;
672 }
673
674 void AllowAnotherTopTierJobForAllFunctions() {
675 const WasmModule* module = native_module_->module();
676 uint32_t fn_start = module->num_imported_functions;
677 uint32_t fn_end = fn_start + module->num_declared_functions;
678 base::MutexGuard mutex_guard(&module->type_feedback.mutex);
679 std::unordered_map<uint32_t, FunctionTypeFeedback>& feedback_map =
680 module->type_feedback.feedback_for_function;
681 for (uint32_t i = fn_start; i < fn_end; i++) {
682 compilation_unit_queues_.AllowAnotherTopTierJob(i);
683 // Reset the stored priority; otherwise triggers might be ignored if the
684 // priority is not bumped to the next power of two.
685 if (auto it = feedback_map.find(i); it != feedback_map.end()) {
686 it->second.tierup_priority = 0;
687 }
688 }
689 }
690
691 bool failed() const {
692 return compile_failed_.load(std::memory_order_relaxed);
693 }
694
695 bool baseline_compilation_finished() const {
696 base::MutexGuard guard(&callbacks_mutex_);
697 return outstanding_baseline_units_ == 0;
698 }
699
700 Counters* counters() const { return async_counters_.get(); }
701
702 void SetWireBytesStorage(
703 std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
704 base::MutexGuard guard(&mutex_);
705 wire_bytes_storage_ = std::move(wire_bytes_storage);
706 }
707
708 std::shared_ptr<WireBytesStorage> GetWireBytesStorage() const {
709 base::MutexGuard guard(&mutex_);
711 return wire_bytes_storage_;
712 }
713
714 void set_compilation_id(int compilation_id) {
716 compilation_id_ = compilation_id;
717 }
718
719 size_t EstimateCurrentMemoryConsumption() const;
720
721 // Called from the delayed task to trigger caching if the timeout
722 // (--wasm-caching-timeout-ms) has passed since the last top-tier compilation.
723 // This either triggers caching or re-schedules the task if more code has
724 // been compiled to the top tier in the meantime.
725 void TriggerCachingAfterTimeout();
726
727 std::vector<WasmCode*> PublishCode(base::Vector<UnpublishedWasmCode> codes);
728
729 private:
730 void AddCompilationUnitInternal(CompilationUnitBuilder* builder,
731 int function_index,
732 uint8_t function_progress);
733
734 // Trigger callbacks according to the internal counters below
735 // (outstanding_...).
736 // Hold the {callbacks_mutex_} when calling this method.
737 void TriggerOutstandingCallbacks();
738 // Trigger an exact set of callbacks. Hold the {callbacks_mutex_} when calling
739 // this method.
740 void TriggerCallbacks(base::EnumSet<CompilationEvent>);
741
742 void PublishCompilationResults(
743 std::vector<UnpublishedWasmCode> unpublished_code);
744
745 NativeModule* const native_module_;
746 std::weak_ptr<NativeModule> const native_module_weak_;
747 const std::shared_ptr<Counters> async_counters_;
748
749 // Compilation error, atomically updated. This flag can be updated and read
750 // using relaxed semantics.
751 std::atomic<bool> compile_failed_{false};
752
753 // True if compilation was cancelled and worker threads should return. This
754 // flag can be updated and read using relaxed semantics.
755 std::atomic<bool> compile_cancelled_{false};
756
757 CompilationUnitQueues compilation_unit_queues_;
758
759 // This mutex protects all information of this {CompilationStateImpl} which is
760 // being accessed concurrently.
761 mutable base::Mutex mutex_;
762
763 // The compile job handles, initialized right after construction of
764 // {CompilationStateImpl}.
765 std::unique_ptr<JobHandle> baseline_compile_job_;
766 std::unique_ptr<JobHandle> top_tier_compile_job_;
767
768 // The compilation id to identify trace events linked to this compilation.
769 static constexpr int kInvalidCompilationID = -1;
771
772 // Features detected to be used in this module. Features can be detected
773 // as a module is being compiled.
774 std::atomic<WasmDetectedFeatures> detected_features_;
775
777 // Protected by {mutex_}:
778
779 // Abstraction over the storage of the wire bytes. Held in a shared_ptr so
780 // that background compilation jobs can keep the storage alive while
781 // compiling.
782 std::shared_ptr<WireBytesStorage> wire_bytes_storage_;
783
784 // End of fields protected by {mutex_}.
786
787 // This mutex protects the callbacks vector, and the counters used to
788 // determine which callbacks to call. The counters plus the callbacks
789 // themselves need to be synchronized to ensure correct order of events.
790 mutable base::Mutex callbacks_mutex_;
791
793 // Protected by {callbacks_mutex_}:
794
795 // Callbacks to be called on compilation events.
796 std::vector<std::unique_ptr<CompilationEventCallback>> callbacks_;
797
798 // Events that already happened.
799 base::EnumSet<CompilationEvent> finished_events_;
800
802 // The amount of generated top tier code since the last
803 // {kFinishedCompilationChunk} event.
805 std::vector<uint8_t> compilation_progress_;
806
807 // The timestamp of the last top-tier compilation.
808 // This field is updated on every publishing of top-tier code, and is reset
809 // once caching is triggered. Hence it also informs whether a caching task is
810 // currently being scheduled (whenever this is set).
812
813 // End of fields protected by {callbacks_mutex_}.
815
816 struct PublishState {
817 // {mutex_} protects {publish_queue_} and {publisher_running_}.
818 base::Mutex mutex_;
819 std::vector<UnpublishedWasmCode> publish_queue_;
820 bool publisher_running_ = false;
821 };
822 PublishState publish_state_[CompilationTier::kNumTiers];
823
824 // Encoding of fields in the {compilation_progress_} vector.
825 using RequiredBaselineTierField = base::BitField8<ExecutionTier, 0, 2>;
826 using RequiredTopTierField = base::BitField8<ExecutionTier, 2, 2>;
827 using ReachedTierField = base::BitField8<ExecutionTier, 4, 2>;
828};
829
830CompilationStateImpl* Impl(CompilationState* compilation_state) {
831 return reinterpret_cast<CompilationStateImpl*>(compilation_state);
832}
833const CompilationStateImpl* Impl(const CompilationState* compilation_state) {
834 return reinterpret_cast<const CompilationStateImpl*>(compilation_state);
835}
836
837CompilationStateImpl* BackgroundCompileScope::compilation_state() const {
839 return Impl(native_module_->compilation_state());
840}
841
842size_t CompilationStateImpl::EstimateCurrentMemoryConsumption() const {
843 UPDATE_WHEN_CLASS_CHANGES(CompilationStateImpl, 464);
844 size_t result = sizeof(CompilationStateImpl);
845
846 {
847 base::MutexGuard guard{&mutex_};
848 result += compilation_unit_queues_.EstimateCurrentMemoryConsumption();
849 }
850
851 // To read the size of {callbacks_} and {compilation_progress_}, we'd
852 // need to acquire the {callbacks_mutex_}, which can cause deadlocks
853 // when that mutex is already held elsewhere and another thread calls
854 // into this function. So we rely on heuristics and informed guesses
855 // instead: {compilation_progress_} contains an entry for every declared
856 // function in the module...
857 result += sizeof(uint8_t) * native_module_->module()->num_declared_functions;
858 // ...and there are typically no more than a handful of {callbacks_}.
859 constexpr size_t kAssumedNumberOfCallbacks = 4;
860 constexpr size_t size_of_vector =
861 kAssumedNumberOfCallbacks *
862 sizeof(std::unique_ptr<CompilationEventCallback>);
863 // Concrete subclasses of CompilationEventCallback will be bigger, but we
864 // can't know that here.
865 constexpr size_t size_of_payload =
866 kAssumedNumberOfCallbacks * sizeof(CompilationEventCallback);
867 result += size_of_vector + size_of_payload;
868
869 if (v8_flags.trace_wasm_offheap_memory) {
870 PrintF("CompilationStateImpl: %zu\n", result);
871 }
872 return result;
873}
874
875bool BackgroundCompileScope::cancelled() const {
876 return native_module_ == nullptr ||
877 Impl(native_module_->compilation_state())->cancelled();
878}
879
880} // namespace
881
883// PIMPL implementation of {CompilationState}.
884
885CompilationState::~CompilationState() { Impl(this)->~CompilationStateImpl(); }
886
887void CompilationState::InitCompileJob() { Impl(this)->InitCompileJob(); }
888
889void CompilationState::CancelCompilation() {
890 Impl(this)->CancelCompilation(CompilationStateImpl::kCancelUnconditionally);
891}
892
893void CompilationState::CancelInitialCompilation() {
894 Impl(this)->CancelCompilation(
895 CompilationStateImpl::kCancelInitialCompilation);
896}
897
898void CompilationState::SetError() { Impl(this)->SetError(); }
899
900void CompilationState::SetWireBytesStorage(
901 std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
902 Impl(this)->SetWireBytesStorage(std::move(wire_bytes_storage));
903}
904
905std::shared_ptr<WireBytesStorage> CompilationState::GetWireBytesStorage()
906 const {
907 return Impl(this)->GetWireBytesStorage();
908}
909
910void CompilationState::AddCallback(
911 std::unique_ptr<CompilationEventCallback> callback) {
912 return Impl(this)->AddCallback(std::move(callback));
913}
914
915void CompilationState::TierUpAllFunctions() {
916 Impl(this)->TierUpAllFunctions();
917}
918
919void CompilationState::AllowAnotherTopTierJob(uint32_t func_index) {
920 Impl(this)->AllowAnotherTopTierJob(func_index);
921}
922
923void CompilationState::AllowAnotherTopTierJobForAllFunctions() {
924 Impl(this)->AllowAnotherTopTierJobForAllFunctions();
925}
926
927void CompilationState::InitializeAfterDeserialization(
928 base::Vector<const int> lazy_functions,
929 base::Vector<const int> eager_functions) {
930 Impl(this)->InitializeCompilationProgressAfterDeserialization(
931 lazy_functions, eager_functions);
932}
933
934bool CompilationState::failed() const { return Impl(this)->failed(); }
935
936bool CompilationState::baseline_compilation_finished() const {
937 return Impl(this)->baseline_compilation_finished();
938}
939
940void CompilationState::set_compilation_id(int compilation_id) {
941 Impl(this)->set_compilation_id(compilation_id);
942}
943
944size_t CompilationState::EstimateCurrentMemoryConsumption() const {
945 return Impl(this)->EstimateCurrentMemoryConsumption();
946}
947
948std::vector<WasmCode*> CompilationState::PublishCode(
949 base::Vector<UnpublishedWasmCode> unpublished_code) {
950 return Impl(this)->PublishCode(unpublished_code);
951}
952
953// static
954std::unique_ptr<CompilationState> CompilationState::New(
955 const std::shared_ptr<NativeModule>& native_module,
956 std::shared_ptr<Counters> async_counters,
957 WasmDetectedFeatures detected_features) {
958 return std::unique_ptr<CompilationState>(reinterpret_cast<CompilationState*>(
959 new CompilationStateImpl(std::move(native_module),
960 std::move(async_counters), detected_features)));
961}
962
963WasmDetectedFeatures CompilationState::detected_features() const {
964 return Impl(this)->detected_features();
965}
966
967WasmDetectedFeatures CompilationState::UpdateDetectedFeatures(
968 WasmDetectedFeatures detected_features) {
969 return Impl(this)->UpdateDetectedFeatures(detected_features);
970}
971
972// End of PIMPL implementation of {CompilationState}.
974
975namespace {
976
977ExecutionTier ApplyHintToExecutionTier(WasmCompilationHintTier hint,
978 ExecutionTier default_tier) {
979 switch (hint) {
980 case WasmCompilationHintTier::kDefault:
981 return default_tier;
982 case WasmCompilationHintTier::kBaseline:
983 return ExecutionTier::kLiftoff;
984 case WasmCompilationHintTier::kOptimized:
985 return ExecutionTier::kTurbofan;
986 }
987 UNREACHABLE();
988}
989
990const WasmCompilationHint* GetCompilationHint(const WasmModule* module,
991 uint32_t func_index) {
992 DCHECK_LE(module->num_imported_functions, func_index);
993 uint32_t hint_index = declared_function_index(module, func_index);
994 const std::vector<WasmCompilationHint>& compilation_hints =
995 module->compilation_hints;
996 if (hint_index < compilation_hints.size()) {
997 return &compilation_hints[hint_index];
998 }
999 return nullptr;
1000}
1001
1002CompileStrategy GetCompileStrategy(const WasmModule* module,
1003 WasmEnabledFeatures enabled_features,
1004 uint32_t func_index, bool lazy_module) {
1005 if (lazy_module) return CompileStrategy::kLazy;
1006 if (!enabled_features.has_compilation_hints()) {
1007 return CompileStrategy::kDefault;
1008 }
1009 auto* hint = GetCompilationHint(module, func_index);
1010 if (hint == nullptr) return CompileStrategy::kDefault;
1011 switch (hint->strategy) {
1012 case WasmCompilationHintStrategy::kLazy:
1013 return CompileStrategy::kLazy;
1014 case WasmCompilationHintStrategy::kEager:
1015 return CompileStrategy::kEager;
1016 case WasmCompilationHintStrategy::kLazyBaselineEagerTopTier:
1017 return CompileStrategy::kLazyBaselineEagerTopTier;
1018 case WasmCompilationHintStrategy::kDefault:
1019 return CompileStrategy::kDefault;
1020 }
1021}
1022
1023struct ExecutionTierPair {
1024 ExecutionTier baseline_tier;
1025 ExecutionTier top_tier;
1026};
1027
1028// Pass the debug state as a separate parameter to avoid data races: the debug
1029// state may change between its use here and its use at the call site. To have
1030// a consistent view on the debug state, the caller reads the debug state once
1031// and then passes it to this function.
1032ExecutionTierPair GetDefaultTiersPerModule(NativeModule* native_module,
1033 DebugState is_in_debug_state,
1034 bool lazy_module) {
1035 const WasmModule* module = native_module->module();
1036 if (lazy_module) {
1037 return {ExecutionTier::kNone, ExecutionTier::kNone};
1038 }
1039 if (is_asmjs_module(module)) {
1040 DCHECK(!is_in_debug_state);
1041 return {ExecutionTier::kTurbofan, ExecutionTier::kTurbofan};
1042 }
1043 if (is_in_debug_state) {
1044 return {ExecutionTier::kLiftoff, ExecutionTier::kLiftoff};
1045 }
1047 v8_flags.liftoff ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan;
1048 bool eager_tier_up = !v8_flags.wasm_dynamic_tiering && v8_flags.wasm_tier_up;
1050 eager_tier_up ? ExecutionTier::kTurbofan : baseline_tier;
1051 return {baseline_tier, top_tier};
1052}
1053
1054ExecutionTierPair GetLazyCompilationTiers(NativeModule* native_module,
1055 uint32_t func_index,
1056 DebugState is_in_debug_state) {
1057 // For lazy compilation, get the tiers we would use if lazy compilation is
1058 // disabled.
1059 constexpr bool kNotLazy = false;
1060 ExecutionTierPair tiers =
1061 GetDefaultTiersPerModule(native_module, is_in_debug_state, kNotLazy);
1062 // If we are in debug mode, we ignore compilation hints.
1063 if (is_in_debug_state) return tiers;
1064
1065 // Check if compilation hints override default tiering behaviour.
1066 if (native_module->enabled_features().has_compilation_hints()) {
1067 if (auto* hint = GetCompilationHint(native_module->module(), func_index)) {
1068 tiers.baseline_tier =
1069 ApplyHintToExecutionTier(hint->baseline_tier, tiers.baseline_tier);
1070 tiers.top_tier = ApplyHintToExecutionTier(hint->top_tier, tiers.top_tier);
1071 }
1072 }
1073
1074 if (V8_UNLIKELY(v8_flags.wasm_tier_up_filter >= 0 &&
1075 func_index !=
1076 static_cast<uint32_t>(v8_flags.wasm_tier_up_filter))) {
1077 tiers.top_tier = tiers.baseline_tier;
1078 }
1079
1080 // Correct top tier if necessary.
1081 static_assert(ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
1082 "Assume an order on execution tiers");
1083 if (tiers.baseline_tier > tiers.top_tier) {
1084 tiers.top_tier = tiers.baseline_tier;
1085 }
1086 return tiers;
1087}
1088
1089// The {CompilationUnitBuilder} builds compilation units and stores them in an
1090// internal buffer. The buffer is moved into the working queue of the
1091// {CompilationStateImpl} when {Commit} is called.
1092class CompilationUnitBuilder {
1093 public:
1094 explicit CompilationUnitBuilder(NativeModule* native_module)
1095 : native_module_(native_module) {}
1096
1097 void AddBaselineUnit(int func_index, ExecutionTier tier) {
1098 baseline_units_.emplace_back(func_index, tier, kNotForDebugging);
1099 }
1100
1101 void AddTopTierUnit(int func_index, ExecutionTier tier) {
1102 tiering_units_.emplace_back(func_index, tier, kNotForDebugging);
1103 }
1104
1105 void Commit() {
1106 if (baseline_units_.empty() && tiering_units_.empty()) return;
1107 compilation_state()->CommitCompilationUnits(base::VectorOf(baseline_units_),
1108 base::VectorOf(tiering_units_));
1109 Clear();
1110 }
1111
1112 void Clear() {
1113 baseline_units_.clear();
1114 tiering_units_.clear();
1115 }
1116
1117 const WasmModule* module() { return native_module_->module(); }
1118
1119 private:
1120 CompilationStateImpl* compilation_state() const {
1121 return Impl(native_module_->compilation_state());
1122 }
1123
1124 NativeModule* const native_module_;
1125 std::vector<WasmCompilationUnit> baseline_units_;
1126 std::vector<WasmCompilationUnit> tiering_units_;
1127};
1128
1129DecodeResult ValidateSingleFunction(Zone* zone, const WasmModule* module,
1130 int func_index,
1131 base::Vector<const uint8_t> code,
1132 WasmEnabledFeatures enabled_features,
1133 WasmDetectedFeatures* detected_features) {
1134 // Sometimes functions get validated unpredictably in the background, for
1135 // debugging or when inlining one function into another. We check here if that
1136 // is the case, and exit early if so.
1137 if (module->function_was_validated(func_index)) return {};
1138 const WasmFunction* func = &module->functions[func_index];
1139 bool is_shared = module->type(func->sig_index).is_shared;
1140 FunctionBody body{func->sig, func->code.offset(), code.begin(), code.end(),
1141 is_shared};
1142 DecodeResult result = ValidateFunctionBody(zone, enabled_features, module,
1143 detected_features, body);
1144 if (result.ok()) module->set_function_validated(func_index);
1145 return result;
1146}
1147
1148enum OnlyLazyFunctions : bool {
1149 kAllFunctions = false,
1150 kOnlyLazyFunctions = true,
1151};
1152
1153bool IsLazyModule(const WasmModule* module) {
1154 return v8_flags.wasm_lazy_compilation ||
1155 (v8_flags.asm_wasm_lazy_compilation && is_asmjs_module(module));
1156}
1157
1158class CompileLazyTimingScope {
1159 public:
1160 CompileLazyTimingScope(Counters* counters, NativeModule* native_module)
1161 : counters_(counters), native_module_(native_module) {
1162 timer_.Start();
1163 }
1164
1165 ~CompileLazyTimingScope() {
1166 base::TimeDelta elapsed = timer_.Elapsed();
1167 native_module_->AddLazyCompilationTimeSample(elapsed.InMicroseconds());
1168 counters_->wasm_lazy_compile_time()->AddTimedSample(elapsed);
1169 }
1170
1171 private:
1172 Counters* counters_;
1173 NativeModule* native_module_;
1174 base::ElapsedTimer timer_;
1175};
1176
1177} // namespace
1178
1179bool CompileLazy(Isolate* isolate,
1180 Tagged<WasmTrustedInstanceData> trusted_instance_data,
1181 int func_index) {
1183 NativeModule* native_module = trusted_instance_data->native_module();
1184 Counters* counters = isolate->counters();
1185
1186 // Put the timer scope around everything, including the {CodeSpaceWriteScope}
1187 // and its destruction, to measure complete overhead (apart from the runtime
1188 // function itself, which has constant overhead).
1189 std::optional<CompileLazyTimingScope> lazy_compile_time_scope;
1190 if (base::TimeTicks::IsHighResolution()) {
1191 lazy_compile_time_scope.emplace(counters, native_module);
1192 }
1193
1194 DCHECK(!native_module->lazy_compile_frozen());
1195
1196 TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
1197
1198 CompilationStateImpl* compilation_state =
1199 Impl(native_module->compilation_state());
1200 DebugState is_in_debug_state = native_module->IsInDebugState();
1201 ExecutionTierPair tiers =
1202 GetLazyCompilationTiers(native_module, func_index, is_in_debug_state);
1203
1204 DCHECK_LE(native_module->num_imported_functions(), func_index);
1205 DCHECK_LT(func_index, native_module->num_functions());
1206 WasmCompilationUnit baseline_unit{
1207 func_index, tiers.baseline_tier,
1208 is_in_debug_state ? kForDebugging : kNotForDebugging};
1209 CompilationEnv env = CompilationEnv::ForModule(native_module);
1210 WasmDetectedFeatures detected_features;
1211 WasmCompilationResult result = baseline_unit.ExecuteCompilation(
1212 &env, compilation_state->GetWireBytesStorage().get(), counters,
1213 &detected_features);
1214 compilation_state->OnCompilationStopped(detected_features);
1215
1216 // During lazy compilation, we can only get compilation errors when
1217 // {--wasm-lazy-validation} is enabled. Otherwise, the module was fully
1218 // verified before starting its execution.
1219 CHECK_IMPLIES(result.failed(), v8_flags.wasm_lazy_validation);
1220 if (result.failed()) {
1221 return false;
1222 }
1223
1224 WasmCodeRefScope code_ref_scope;
1225 WasmCode* code =
1226 native_module->PublishCode(native_module->AddCompiledCode(result));
1227 DCHECK_EQ(func_index, code->index());
1228
1229 if (V8_UNLIKELY(native_module->log_code())) {
1230 GetWasmEngine()->LogCode(base::VectorOf(&code, 1));
1231 // Log the code immediately in the current isolate.
1233 }
1234
1235 counters->wasm_lazily_compiled_functions()->Increment();
1236
1237 const WasmModule* module = native_module->module();
1238 const bool lazy_module = IsLazyModule(module);
1239 if (GetCompileStrategy(module, native_module->enabled_features(), func_index,
1240 lazy_module) == CompileStrategy::kLazy &&
1241 tiers.baseline_tier < tiers.top_tier) {
1242 WasmCompilationUnit tiering_unit{func_index, tiers.top_tier,
1244 compilation_state->CommitTopTierCompilationUnit(tiering_unit);
1245 }
1246 return true;
1247}
1248
1250 const NativeModule* native_module,
1251 int func_index) {
1252 const WasmModule* module = native_module->module();
1253
1254 CompilationStateImpl* compilation_state =
1255 Impl(native_module->compilation_state());
1256 const WasmFunction* func = &module->functions[func_index];
1258 compilation_state->GetWireBytesStorage()->GetCode(func->code);
1259
1260 auto enabled_features = native_module->enabled_features();
1261 // This path is unlikely, so the overhead for creating an extra Zone is
1262 // not important.
1263 Zone validation_zone{GetWasmEngine()->allocator(), ZONE_NAME};
1264 WasmDetectedFeatures unused_detected_features;
1265 DecodeResult decode_result =
1266 ValidateSingleFunction(&validation_zone, module, func_index, code,
1267 enabled_features, &unused_detected_features);
1268
1269 CHECK(decode_result.failed());
1270 wasm::ErrorThrower thrower(isolate, nullptr);
1271 thrower.CompileFailed(GetWasmErrorWithName(native_module->wire_bytes(),
1272 func_index, module,
1273 std::move(decode_result).error()));
1274}
1275
1276// The main purpose of this class is to copy the feedback vectors that live in
1277// `FixedArray`s on the JavaScript heap to a C++ datastructure on the `module`
1278// that is accessible to the background compilation threads.
1279// While we are at it, we also do some light processing here, e.g., mapping the
1280// feedback to functions, identified by their function index, and filtering out
1281// feedback for calls to imported functions (which we currently don't inline).
1283 public:
1284 static void Process(Isolate* isolate,
1285 Tagged<WasmTrustedInstanceData> trusted_instance_data,
1286 int func_index) {
1287 TransitiveTypeFeedbackProcessor{isolate, trusted_instance_data, func_index}
1288 .ProcessQueue();
1289 }
1290
1291 private:
1293 Isolate* isolate, Tagged<WasmTrustedInstanceData> trusted_instance_data,
1294 int func_index)
1295 : isolate_(isolate),
1296 instance_data_(trusted_instance_data),
1297 module_(trusted_instance_data->module()),
1298 mutex_guard(&module_->type_feedback.mutex),
1299 feedback_for_function_(module_->type_feedback.feedback_for_function) {
1300 queue_.insert(func_index);
1301 }
1302
1304
1306 while (!queue_.empty()) {
1307 auto next = queue_.cbegin();
1308 ProcessFunction(*next);
1309 queue_.erase(next);
1310 }
1311 }
1312
1313 void ProcessFunction(int func_index);
1314
1316 for (const CallSiteFeedback& csf : feedback) {
1317 for (int j = 0; j < csf.num_cases(); j++) {
1318 int func = csf.function_index(j);
1319 // Don't spend time on calls that have never been executed.
1320 if (csf.call_count(j) == 0) continue;
1321 // Don't recompute feedback that has already been processed.
1322 auto existing = feedback_for_function_.find(func);
1323 if (existing != feedback_for_function_.end() &&
1324 !existing->second.feedback_vector.empty()) {
1325 if (!existing->second.needs_reprocessing_after_deopt) {
1326 continue;
1327 }
1328 DCHECK(v8_flags.wasm_deopt);
1329 existing->second.needs_reprocessing_after_deopt = false;
1330 }
1331 queue_.insert(func);
1332 }
1333 }
1334 }
1335
1339 const WasmModule* const module_;
1340 // TODO(jkummerow): Check if it makes a difference to apply any updates
1341 // as a single batch at the end.
1343 std::unordered_map<uint32_t, FunctionTypeFeedback>& feedback_for_function_;
1344 std::set<int> queue_;
1345};
1346
1347bool IsCrossInstanceCall(Tagged<Object> obj, Isolate* const isolate) {
1348 return obj == ReadOnlyRoots{isolate}.wasm_cross_instance_call_symbol();
1349}
1350
1352 public:
1353 FeedbackMaker(Isolate* const isolate,
1354 Tagged<WasmTrustedInstanceData> trusted_instance_data,
1355 int func_index, int num_calls)
1356 : isolate_(isolate),
1357 instance_data_(trusted_instance_data),
1358 result_(
1359 base::OwnedVector<CallSiteFeedback>::NewForOverwrite(num_calls)),
1360 num_imported_functions_(static_cast<int>(
1361 trusted_instance_data->module()->num_imported_functions)),
1362 func_index_(func_index) {}
1363
1365 Tagged<WasmInternalFunction> internal_function =
1366 Cast<WasmFuncRef>(funcref)->internal(isolate_);
1367 // Discard cross-instance calls, as we can only inline same-instance code.
1368 if (internal_function->implicit_arg() != instance_data_) {
1369 has_non_inlineable_targets_ = true;
1370 return;
1371 }
1372 // Discard imports for now.
1373 if (internal_function->function_index() < num_imported_functions_) {
1374 has_non_inlineable_targets_ = true;
1375 return;
1376 }
1377 AddCall(internal_function->function_index(), count);
1378 }
1379
1380 void AddCallIndirectCandidate(Tagged<Object> target_truncated_obj,
1381 int count) {
1382 // Discard cross-instance calls, as we can only inline same-instance code.
1383 if (IsCrossInstanceCall(target_truncated_obj, isolate_)) {
1384 has_non_inlineable_targets_ = true;
1385 return;
1386 }
1387 Tagged<Smi> target_truncated_smi = Cast<Smi>(target_truncated_obj);
1388
1389 // We need to map a truncated call target back to a function index.
1390 // Generally there may be multiple jump tables if code spaces are far apart
1391 // (to ensure that direct calls can always use a near call to the closest
1392 // jump table).
1393 // However, here we are always handling call targets that are originally
1394 // from the `WasmDispatchTable`, whose entries are always targets pointing
1395 // into the main jump table, so we only need to check against that.
1396
1398 WasmCodePointer{static_cast<uint32_t>(target_truncated_smi.value())};
1399 Address entry = GetProcessWideWasmCodePointerTable()
1401 wasm::WasmCode* code =
1402 wasm::GetWasmCodeManager()->LookupCode(nullptr, entry);
1403 if (!code || code->native_module() != instance_data_->native_module() ||
1404 code->IsAnonymous()) {
1405 // Was not in the main table (e.g., because it's an imported function).
1406 has_non_inlineable_targets_ = true;
1407 return;
1408 }
1409 DCHECK_EQ(code->kind(), WasmCode::Kind::kWasmFunction);
1410 uint32_t func_idx = code->index();
1411 AddCall(func_idx, count);
1412 }
1413
1414 void AddCall(int target, int count) {
1415 // If we add too many calls, treat it as megamorphic.
1416 if (static_cast<size_t>(cache_usage_) == targets_cache_.size() ||
1417 is_megamorphic_) {
1418 is_megamorphic_ = true;
1419 return;
1420 }
1421 // Keep the cache sorted (using insertion-sort), highest count first.
1422 int insertion_index = 0;
1423 while (insertion_index < cache_usage_ &&
1424 counts_cache_[insertion_index] >= count) {
1425 insertion_index++;
1426 }
1427 for (int shifted_index = cache_usage_ - 1; shifted_index >= insertion_index;
1428 shifted_index--) {
1429 targets_cache_[shifted_index + 1] = targets_cache_[shifted_index];
1430 counts_cache_[shifted_index + 1] = counts_cache_[shifted_index];
1431 }
1432 targets_cache_[insertion_index] = target;
1433 counts_cache_[insertion_index] = count;
1434 cache_usage_++;
1435 }
1436
1437 bool HasTargetCached(int target) {
1438 auto end = targets_cache_.begin() + cache_usage_;
1439 DCHECK_LE(end, targets_cache_.end());
1440 return std::find(targets_cache_.begin(), end, target) != end;
1441 }
1442
1444 DCHECK_LT(seen_calls_, result_.size());
1445 result_[seen_calls_] = feedback;
1446 ++seen_calls_;
1447 }
1448
1450 if (is_megamorphic_) {
1451 if (v8_flags.trace_wasm_inlining) {
1452 PrintF("[function %d: call #%d: megamorphic]\n", func_index_,
1453 seen_calls_);
1454 }
1455 AddResult(CallSiteFeedback::CreateMegamorphic());
1456 } else if (cache_usage_ == 0) {
1457 AddResult(CallSiteFeedback{});
1458 } else if (cache_usage_ == 1) {
1459 if (v8_flags.trace_wasm_inlining) {
1460 PrintF("[function %d: call #%d inlineable (monomorphic)]\n",
1461 func_index_, seen_calls_);
1462 }
1463 AddResult(CallSiteFeedback{targets_cache_[0], counts_cache_[0]});
1464 } else {
1465 if (v8_flags.trace_wasm_inlining) {
1466 PrintF("[function %d: call #%d inlineable (polymorphic %d)]\n",
1467 func_index_, seen_calls_, cache_usage_);
1468 }
1469 DCHECK_LE(cache_usage_, kMaxPolymorphism);
1471 new CallSiteFeedback::PolymorphicCase[cache_usage_];
1472 for (int i = 0; i < cache_usage_; i++) {
1473 polymorphic[i].function_index = targets_cache_[i];
1474 polymorphic[i].absolute_call_frequency = counts_cache_[i];
1475 }
1476 AddResult(CallSiteFeedback{polymorphic, cache_usage_});
1477 }
1478 result_[seen_calls_ - 1].set_has_non_inlineable_targets(
1479 has_non_inlineable_targets_);
1480 // TODO(mliedtke): Have a better representation that merges these properties
1481 // into one object.
1482 has_non_inlineable_targets_ = false;
1483 is_megamorphic_ = false;
1484 cache_usage_ = 0;
1485 }
1486
1487 void set_has_non_inlineable_targets() { has_non_inlineable_targets_ = true; }
1488 void set_megamorphic() { is_megamorphic_ = true; }
1489
1490 // {GetResult} can only be called on a r-value reference to make it more
1491 // obvious at call sites that {this} should not be used after this operation.
1493 return std::move(result_);
1494 }
1495
1496 private:
1500 int seen_calls_ = 0;
1502 const int func_index_;
1503 int cache_usage_{0};
1504 std::array<int, kMaxPolymorphism> targets_cache_;
1505 std::array<int, kMaxPolymorphism> counts_cache_;
1506 bool has_non_inlineable_targets_ = false;
1507 // If we add more call targets than kMaxPolymorphism while processing the
1508 // feedback, treat it as megamorphic.
1509 bool is_megamorphic_ = false;
1510};
1511
1512void TransitiveTypeFeedbackProcessor::ProcessFunction(int func_index) {
1513 int which_vector = declared_function_index(module_, func_index);
1514 Tagged<Object> maybe_feedback =
1515 instance_data_->feedback_vectors()->get(which_vector);
1516 if (!IsFixedArray(maybe_feedback)) return;
1517 Tagged<FixedArray> feedback = Cast<FixedArray>(maybe_feedback);
1518 base::Vector<uint32_t> call_targets =
1519 module_->type_feedback.feedback_for_function[func_index]
1520 .call_targets.as_vector();
1521
1522 // For each entry in {call_targets}, there are two {Object} slots in the
1523 // {feedback} vector:
1524 // +--------------------------+-----------------------------+----------------+
1525 // | Call Type | Feedback: Entry 1 | Entry 2 |
1526 // +-------------------------+------------------------------+----------------+
1527 // | direct | Smi(count) | Smi(0), unused |
1528 // +--------------------------+-----------------------------+----------------+
1529 // | ref, uninitialized | Smi(0) | Smi(0) |
1530 // | ref, monomorphic | WasmFuncRef(target) | Smi(count>0) |
1531 // | ref, polymorphic | FixedArray | Undefined |
1532 // | ref, megamorphic | MegamorphicSymbol | Undefined |
1533 // +--------------------------+-----------------------------+----------------+
1534 // | indirect, uninitialized | Smi(0) | Smi(0) |
1535 // | indirect, monomorphic | Smi(truncated_target) | Smi(count>0) |
1536 // | indirect, wrong instance | WasmCrossInstanceCallSymbol | Smi(count>0) |
1537 // | indirect, polymorphic | FixedArray | Undefined |
1538 // | indirect, megamorphic | MegamorphicSymbol | Undefined |
1539 // +--------------------------+-----------------------------+----------------+
1540 // The FixedArray entries for the polymorphic cases look like the monomorphic
1541 // entries in the feedback vector itself, i.e., they can a (truncated) target,
1542 // or the wrong instance sentinel (for cross-instance call_indirect).
1543 // See {UpdateCallRefOrIndirectIC} in {wasm.tq} for how this is written.
1544 // Since this is combining untrusted data ({feedback} vector on the JS heap)
1545 // with trusted data ({call_targets}), make sure to avoid an OOB access.
1546 int checked_feedback_length = feedback->length();
1547 SBXCHECK_EQ(checked_feedback_length, call_targets.size() * 2);
1548 FeedbackMaker fm(isolate_, instance_data_, func_index,
1549 checked_feedback_length / 2);
1550 for (int i = 0; i < checked_feedback_length; i += 2) {
1551 uint32_t sentinel_or_target = call_targets[i / 2];
1552 Tagged<Object> first_slot = feedback->get(i);
1553 Tagged<Object> second_slot = feedback->get(i + 1);
1554
1555 if (sentinel_or_target != FunctionTypeFeedback::kCallRef &&
1556 sentinel_or_target != FunctionTypeFeedback::kCallIndirect) {
1557 // Direct call counts.
1558 int count = Smi::ToInt(first_slot);
1559 DCHECK_EQ(Smi::ToInt(second_slot), 0);
1560 // TODO(dlehmann): Currently, TurboFan assumes that we add feedback even
1561 // if the call count is zero. Once TurboFan is gone, revisit if we can
1562 // avoid this (similar to how we do for call_ref/call_indirect today).
1563 fm.AddCall(static_cast<int>(sentinel_or_target), count);
1564 } else if (IsSmi(second_slot) && Smi::ToInt(second_slot) == 0) {
1565 // Uninitialized call_ref or call_indirect.
1566 DCHECK_EQ(Smi::ToInt(first_slot), 0);
1567 if (v8_flags.trace_wasm_inlining) {
1568 PrintF("[function %d: call #%d: uninitialized]\n", func_index, i / 2);
1569 }
1570 } else if (IsWasmFuncRef(first_slot)) {
1571 // Monomorphic call_ref.
1572 DCHECK_EQ(sentinel_or_target, FunctionTypeFeedback::kCallRef);
1573 int count = Smi::ToInt(second_slot);
1574 fm.AddCallRefCandidate(Cast<WasmFuncRef>(first_slot), count);
1575 } else if (IsSmi(first_slot) || IsCrossInstanceCall(first_slot, isolate_)) {
1576 // Monomorphic call_indirect.
1577 DCHECK_EQ(sentinel_or_target, FunctionTypeFeedback::kCallIndirect);
1578 int count = Smi::ToInt(second_slot);
1579 fm.AddCallIndirectCandidate(first_slot, count);
1580 } else if (IsFixedArray(first_slot)) {
1581 // Polymorphic call_ref or call_indirect.
1582 Tagged<FixedArray> polymorphic = Cast<FixedArray>(first_slot);
1583 DCHECK(IsUndefined(second_slot));
1584 int checked_polymorphic_length = polymorphic->length();
1585 SBXCHECK_LE(checked_polymorphic_length, 2 * kMaxPolymorphism);
1586 if (sentinel_or_target == FunctionTypeFeedback::kCallRef) {
1587 for (int j = 0; j < checked_polymorphic_length; j += 2) {
1588 Tagged<WasmFuncRef> target = Cast<WasmFuncRef>(polymorphic->get(j));
1589 int count = Smi::ToInt(polymorphic->get(j + 1));
1590 fm.AddCallRefCandidate(target, count);
1591 }
1592 } else {
1593 DCHECK_EQ(sentinel_or_target, FunctionTypeFeedback::kCallIndirect);
1594 for (int j = 0; j < checked_polymorphic_length; j += 2) {
1595 Tagged<Object> target = polymorphic->get(j);
1596 int count = Smi::ToInt(polymorphic->get(j + 1));
1597 fm.AddCallIndirectCandidate(target, count);
1598 }
1599 }
1600 } else if (first_slot == ReadOnlyRoots{isolate_}.megamorphic_symbol()) {
1601 DCHECK(IsUndefined(second_slot));
1602 fm.set_megamorphic();
1603 } else {
1604 UNREACHABLE();
1605 }
1606
1607 if (v8_flags.wasm_deopt &&
1608 first_slot != ReadOnlyRoots{isolate_}.megamorphic_symbol()) {
1609 // If we already had feedback for this call, also add the already existing
1610 // feedback to prevent deopt loops where two different instantiations
1611 // (which have their own on-heap feedback vector) to "flip-flop" between
1612 // their inlining decisions potentially causing deopt loops.
1613 const base::OwnedVector<CallSiteFeedback>& existing =
1614 feedback_for_function_[func_index].feedback_vector;
1615 size_t feedback_index = i / 2;
1616 if (feedback_index < existing.size()) {
1617 const CallSiteFeedback& old_feedback = existing[feedback_index];
1618 if (old_feedback.has_non_inlineable_targets()) {
1620 }
1621 if (old_feedback.is_megamorphic()) {
1622 fm.set_megamorphic();
1623 }
1624 for (int j = 0; j < old_feedback.num_cases(); ++j) {
1625 int old_target_function_index = old_feedback.function_index(j);
1626 // If the new feedback already contains the target, we do not touch
1627 // the call count.
1628 if (!fm.HasTargetCached(old_target_function_index)) {
1629 fm.AddCall(old_target_function_index, old_feedback.call_count(j));
1630 // There shouldn't be any imported functions in there as they can't
1631 // be inlined. If this DCHECK is invalidated,
1632 // has_non_inlineable_targets_ would need to be updated here to
1633 // reflect that.
1634 DCHECK_GE(static_cast<uint32_t>(old_target_function_index),
1635 instance_data_->module()->num_imported_functions);
1636 }
1637 }
1638 }
1639 }
1640
1641 fm.FinalizeCall();
1642 }
1643 base::OwnedVector<CallSiteFeedback> result = std::move(fm).GetResult();
1644 EnqueueCallees(result.as_vector());
1645 DCHECK_EQ(result.size(),
1646 feedback_for_function_[func_index].call_targets.size());
1647 feedback_for_function_[func_index].feedback_vector = std::move(result);
1648}
1649
1651 Tagged<WasmTrustedInstanceData> trusted_instance_data,
1652 int func_index) {
1653 NativeModule* native_module = trusted_instance_data->native_module();
1654 CompilationStateImpl* compilation_state =
1655 Impl(native_module->compilation_state());
1656 WasmCompilationUnit tiering_unit{func_index, ExecutionTier::kTurbofan,
1658
1659 const WasmModule* module = native_module->module();
1660 int priority;
1661 {
1662 base::MutexGuard mutex_guard(&module->type_feedback.mutex);
1663 int array_index = wasm::declared_function_index(module, func_index);
1664 trusted_instance_data->tiering_budget_array()[array_index].store(
1665 v8_flags.wasm_tiering_budget, std::memory_order_relaxed);
1666 int& stored_priority =
1667 module->type_feedback.feedback_for_function[func_index].tierup_priority;
1668 if (stored_priority < kMaxInt) ++stored_priority;
1669 priority = stored_priority;
1670 }
1671 // Only create a compilation unit if this is the first time we detect this
1672 // function as hot (priority == 1), or if the priority increased
1673 // significantly. The latter is assumed to be the case if the priority
1674 // increased at least to four, and is a power of two.
1675 if (priority == 2 || !base::bits::IsPowerOfTwo(priority)) return;
1676
1677 // Before adding the tier-up unit or increasing priority, process type
1678 // feedback for best code generation.
1679 if (v8_flags.wasm_inlining) {
1680 // TODO(jkummerow): we could have collisions here if different instances
1681 // of the same module have collected different feedback. If that ever
1682 // becomes a problem, figure out a solution.
1683 TransitiveTypeFeedbackProcessor::Process(isolate, trusted_instance_data,
1684 func_index);
1685 }
1686
1687 compilation_state->AddTopTierPriorityCompilationUnit(tiering_unit, priority);
1688}
1689
1691 Tagged<WasmTrustedInstanceData> trusted_instance_data,
1692 int func_index) {
1693 NativeModule* native_module = trusted_instance_data->native_module();
1694 if (v8_flags.wasm_inlining) {
1695 TransitiveTypeFeedbackProcessor::Process(isolate, trusted_instance_data,
1696 func_index);
1697 }
1698 wasm::GetWasmEngine()->CompileFunction(isolate->counters(), native_module,
1699 func_index,
1700 wasm::ExecutionTier::kTurbofan);
1701 CHECK(!native_module->compilation_state()->failed());
1702}
1703
1705 Isolate* isolate, Tagged<WasmTrustedInstanceData> trusted_instance_data) {
1706 NativeModule* native_module = trusted_instance_data->native_module();
1707 const WasmModule* mod = native_module->module();
1708 WasmCodeRefScope code_ref_scope;
1709
1710 uint32_t start = mod->num_imported_functions;
1711 uint32_t end = start + mod->num_declared_functions;
1712 for (uint32_t func_index = start; func_index < end; func_index++) {
1713 if (!native_module->HasCodeWithTier(func_index, ExecutionTier::kTurbofan)) {
1714 TierUpNowForTesting(isolate, trusted_instance_data, func_index);
1715 }
1716 }
1717}
1718
1720 Impl(native_module->compilation_state())
1721 ->InitializeCompilationProgress(nullptr);
1722}
1723
1725 Isolate* isolate, bool is_initial_compilation) {
1726 using Feature = v8::Isolate::UseCounterFeature;
1727 static constexpr std::pair<WasmDetectedFeature, Feature> kUseCounters[] = {
1728 {WasmDetectedFeature::shared_memory, Feature::kWasmSharedMemory},
1729 {WasmDetectedFeature::reftypes, Feature::kWasmRefTypes},
1730 {WasmDetectedFeature::simd, Feature::kWasmSimdOpcodes},
1731 {WasmDetectedFeature::threads, Feature::kWasmThreadOpcodes},
1732 {WasmDetectedFeature::legacy_eh, Feature::kWasmExceptionHandling},
1733 {WasmDetectedFeature::memory64, Feature::kWasmMemory64},
1734 {WasmDetectedFeature::multi_memory, Feature::kWasmMultiMemory},
1735 {WasmDetectedFeature::gc, Feature::kWasmGC},
1736 {WasmDetectedFeature::imported_strings, Feature::kWasmImportedStrings},
1737 {WasmDetectedFeature::imported_strings_utf8,
1738 Feature::kWasmImportedStringsUtf8},
1739 {WasmDetectedFeature::return_call, Feature::kWasmReturnCall},
1740 {WasmDetectedFeature::extended_const, Feature::kWasmExtendedConst},
1741 {WasmDetectedFeature::relaxed_simd, Feature::kWasmRelaxedSimd},
1742 {WasmDetectedFeature::type_reflection, Feature::kWasmTypeReflection},
1743 {WasmDetectedFeature::exnref, Feature::kWasmExnRef},
1744 {WasmDetectedFeature::typed_funcref, Feature::kWasmTypedFuncRef},
1745 {WasmDetectedFeature::jspi, Feature::kWasmJavaScriptPromiseIntegration},
1746 {WasmDetectedFeature::branch_hinting, Feature::kWasmBranchHinting},
1747 };
1748
1749 // Check that every staging or shipping feature has a use counter as that is
1750 // the main point of tracking used features.
1751 auto check_use_counter = [](WasmDetectedFeature feat) constexpr -> bool {
1752 // Some features intentionally do not have a use counter.
1753 constexpr WasmDetectedFeature kIntentionallyNoUseCounter[] = {
1754 WasmDetectedFeature::stringref, // Deprecated / unlikely to ship.
1755 };
1756 for (auto no_use_counter_feature : kIntentionallyNoUseCounter) {
1757 if (feat == no_use_counter_feature) return true;
1758 }
1759 for (auto [feature, use_counter] : kUseCounters) {
1760 if (feat == feature) return true;
1761 }
1762 return false;
1763 };
1764#define CHECK_USE_COUNTER(feat, ...) \
1765 static_assert(check_use_counter(WasmDetectedFeature::feat));
1769#undef CHECK_USE_COUNTER
1770
1771 static constexpr size_t kMaxFeatures = arraysize(kUseCounters) + 1;
1772 base::SmallVector<Feature, kMaxFeatures> use_counter_features;
1773 if (is_initial_compilation) {
1774 // Always set the WasmModuleCompilation feature as a baseline for the other
1775 // features. Note that we also track instantiation, but the number of
1776 // compilations and instantiations are pretty unrelated.
1777 use_counter_features.push_back(Feature::kWasmModuleCompilation);
1778 }
1779
1780 for (auto [wasm_feature, feature] : kUseCounters) {
1781 if (!detected_features.contains(wasm_feature)) continue;
1782 use_counter_features.push_back(feature);
1783 }
1784 if (use_counter_features.empty()) return;
1785
1786 isolate->CountUsage(base::VectorOf(use_counter_features));
1787
1788 // Help differential fuzzers avoid detecting known/intentional platform-
1789 // specific differences.
1790 if (v8_flags.correctness_fuzzer_suppressions) {
1791 if (detected_features.has_relaxed_simd()) {
1792 PrintF("Warning: This run cannot be compared across architectures.\n");
1793 }
1794 }
1795}
1796
1797namespace {
1798
1799bool IsI16Array(wasm::ValueType type, const WasmModule* module) {
1800 if (!type.is_object_reference() || !type.has_index()) return false;
1801 ModuleTypeIndex reftype = type.ref_index();
1802 if (!module->has_array(reftype)) return false;
1803 return module->canonical_type_id(reftype) ==
1804 TypeCanonicalizer::kPredefinedArrayI16Index;
1805}
1806
1807bool IsI8Array(wasm::ValueType type, const WasmModule* module,
1808 bool allow_nullable) {
1809 if (!type.is_object_reference() || !type.has_index()) return false;
1810 if (!allow_nullable && type.is_nullable()) return false;
1811 ModuleTypeIndex reftype = type.ref_index();
1812 if (!module->has_array(reftype)) return false;
1813 return module->canonical_type_id(reftype) ==
1814 TypeCanonicalizer::kPredefinedArrayI8Index;
1815}
1816
1817// Returns the start offset of a given import, for use in error messages.
1818// The module_name payload is preceded by an i32v giving its length. That i32v
1819// is preceded by another i32v, which is either a type index (specifying the
1820// type of the previous import) or the imports count (in case of the first
1821// import). So we scan backwards as long as we find non-last LEB bytes there.
1822uint32_t ImportStartOffset(base::Vector<const uint8_t> wire_bytes,
1823 uint32_t module_name_start) {
1824 DCHECK_LT(0, module_name_start);
1825 uint32_t offset = module_name_start - 1; // Last byte of the string length.
1826 DCHECK_EQ(wire_bytes[offset] & 0x80, 0);
1827 while (offset > 0 && (wire_bytes[offset - 1] & 0x80) != 0) {
1828 offset--;
1829 }
1830 return offset;
1831}
1832
1833} // namespace
1834
1835// Validates the signatures of recognized compile-time imports, and stores
1836// them on the {module}'s {well_known_imports} list.
1838 base::Vector<const uint8_t> wire_bytes,
1839 const CompileTimeImports& imports,
1840 WasmDetectedFeatures* detected) {
1841 DCHECK_EQ(module->origin, kWasmOrigin);
1842 if (imports.empty()) return {};
1843
1844 static constexpr ValueType kRefExtern = kWasmRefExtern;
1845 static constexpr ValueType kExternRef = kWasmExternRef;
1846 static constexpr ValueType kI32 = kWasmI32;
1847
1848 // Shorthands: "r" = nullable "externref", "e" = non-nullable "ref extern".
1849 static constexpr ValueType kReps_e_i[] = {kRefExtern, kI32};
1850 static constexpr ValueType kReps_e_rr[] = {kRefExtern, kExternRef,
1851 kExternRef};
1852 static constexpr ValueType kReps_e_rii[] = {kRefExtern, kExternRef, kI32,
1853 kI32};
1854 static constexpr ValueType kReps_i_ri[] = {kI32, kExternRef, kI32};
1855 static constexpr ValueType kReps_i_rr[] = {kI32, kExternRef, kExternRef};
1856
1857 static constexpr FunctionSig kSig_e_i(1, 1, kReps_e_i);
1858 static constexpr FunctionSig kSig_e_r(1, 1, kReps_e_rr);
1859 static constexpr FunctionSig kSig_e_rr(1, 2, kReps_e_rr);
1860 static constexpr FunctionSig kSig_e_rii(1, 3, kReps_e_rii);
1861
1862 static constexpr FunctionSig kSig_i_r(1, 1, kReps_i_ri);
1863 static constexpr FunctionSig kSig_i_ri(1, 2, kReps_i_ri);
1864 static constexpr FunctionSig kSig_i_rr(1, 2, kReps_i_rr);
1865
1866 std::vector<WellKnownImport> statuses;
1867 statuses.reserve(module->num_imported_functions);
1868 for (size_t i = 0; i < module->import_table.size(); i++) {
1869 const WasmImport& import = module->import_table[i];
1870
1871 // When magic string imports are requested, check that imports with the
1872 // string constant module name are globals of the right type.
1873 if (imports.has_string_constants(wire_bytes.SubVector(
1874 import.module_name.offset(), import.module_name.end_offset()))) {
1875 if (import.kind != kExternalGlobal ||
1876 !module->globals[import.index].type.is_reference_to(
1877 HeapType::kExtern) ||
1878 module->globals[import.index].mutability != false) {
1880 wire_bytes.data() + import.field_name.offset(),
1881 import.field_name.length());
1882 return WasmError(
1883 ImportStartOffset(wire_bytes, import.module_name.offset()),
1884 "String constant import #%zu \"%.*s\" must be an immutable global "
1885 "subtyping externref",
1886 i, name.length(), name.start());
1887 }
1888 }
1889
1890 // Check compile-time imported functions.
1891 if (import.kind != kExternalFunction) continue;
1892 base::Vector<const uint8_t> module_name = wire_bytes.SubVector(
1893 import.module_name.offset(), import.module_name.end_offset());
1894 constexpr size_t kMinInterestingLength = 10;
1895 if (module_name.size() < kMinInterestingLength ||
1896 module_name.SubVector(0, 5) != base::StaticOneByteVector("wasm:")) {
1897 statuses.push_back(WellKnownImport::kUninstantiated);
1898 continue;
1899 }
1900 base::Vector<const uint8_t> collection = module_name.SubVectorFrom(5);
1901 WellKnownImport status = WellKnownImport::kUninstantiated;
1902 const WasmFunction& func = module->functions[import.index];
1903 const FunctionSig* sig = func.sig;
1904 WireBytesRef field_name = import.field_name;
1906 wire_bytes.SubVector(field_name.offset(), field_name.end_offset());
1907 if (collection == base::StaticOneByteVector("js-string") &&
1908 imports.contains(CompileTimeImport::kJsString)) {
1909#define RETURN_ERROR(module_name_string, import_name) \
1910 uint32_t error_offset = \
1911 ImportStartOffset(wire_bytes, import.module_name.offset()); \
1912 return WasmError(error_offset, \
1913 "Imported builtin function \"wasm:" module_name_string \
1914 "\" \"" import_name "\" has incorrect signature")
1915
1916#define CHECK_SIG(import_name, kSigName, kEnumName) \
1917 if (name == base::StaticOneByteVector(#import_name)) { \
1918 if (*sig != kSigName) { \
1919 RETURN_ERROR("js-string", #import_name); \
1920 } \
1921 status = WellKnownImport::kEnumName; \
1922 detected->add_imported_strings(); \
1923 } else // NOLINT(readability/braces)
1924
1925 CHECK_SIG(cast, kSig_e_r, kStringCast)
1926 CHECK_SIG(test, kSig_i_r, kStringTest)
1931 CHECK_SIG(length, kSig_i_r, kStringLength)
1932 CHECK_SIG(concat, kSig_e_rr, kStringConcat)
1933 CHECK_SIG(substring, kSig_e_rii, kStringSubstring)
1934 CHECK_SIG(equals, kSig_i_rr, kStringEquals)
1935 CHECK_SIG(compare, kSig_i_rr, kStringCompare)
1936 if (name == base::StaticOneByteVector("fromCharCodeArray")) {
1937 if (sig->parameter_count() != 3 || sig->return_count() != 1 ||
1938 !IsI16Array(sig->GetParam(0), module) || // --
1939 sig->GetParam(1) != kI32 || // --
1940 sig->GetParam(2) != kI32 || // --
1941 sig->GetReturn() != kRefExtern) {
1942 RETURN_ERROR("js-string", "fromCharCodeArray");
1943 }
1944 detected->add_imported_strings();
1945 status = WellKnownImport::kStringFromWtf16Array;
1946 } else if (name == base::StaticOneByteVector("intoCharCodeArray")) {
1947 if (sig->parameter_count() != 3 || sig->return_count() != 1 ||
1948 sig->GetParam(0) != kExternRef ||
1949 !IsI16Array(sig->GetParam(1), module) || // --
1950 sig->GetParam(2) != kI32 || // --
1951 sig->GetReturn() != kI32) {
1952 RETURN_ERROR("js-string", "intoCharCodeArray");
1953 }
1954 status = WellKnownImport::kStringToWtf16Array;
1955 detected->add_imported_strings();
1956 }
1957#undef CHECK_SIG
1958 } else if (collection == base::StaticOneByteVector("text-encoder") &&
1959 imports.contains(CompileTimeImport::kTextEncoder)) {
1960 if (name == base::StaticOneByteVector("measureStringAsUTF8")) {
1961 if (*sig != kSig_i_r) {
1962 RETURN_ERROR("text-encoder", "measureStringAsUTF8");
1963 }
1964 status = WellKnownImport::kStringMeasureUtf8;
1965 detected->add_imported_strings_utf8();
1966 } else if (name ==
1967 base::StaticOneByteVector("encodeStringIntoUTF8Array")) {
1968 if (sig->parameter_count() != 3 || sig->return_count() != 1 ||
1969 sig->GetParam(0) != kExternRef || // --
1970 !IsI8Array(sig->GetParam(1), module, true) || // --
1971 sig->GetParam(2) != kI32 || // --
1972 sig->GetReturn() != kI32) {
1973 RETURN_ERROR("text-encoder", "encodeStringIntoUTF8Array");
1974 }
1975 status = WellKnownImport::kStringIntoUtf8Array;
1976 detected->add_imported_strings_utf8();
1977 } else if (name == base::StaticOneByteVector("encodeStringToUTF8Array")) {
1978 if (sig->parameter_count() != 1 || sig->return_count() != 1 ||
1979 sig->GetParam(0) != kExternRef ||
1980 !IsI8Array(sig->GetReturn(), module, false)) {
1981 RETURN_ERROR("text-encoder", "encodeStringToUTF8Array");
1982 }
1983 status = WellKnownImport::kStringToUtf8Array;
1984 detected->add_imported_strings_utf8();
1985 }
1986 } else if (collection == base::StaticOneByteVector("text-decoder") &&
1987 imports.contains(CompileTimeImport::kTextDecoder)) {
1988 if (name == base::StaticOneByteVector("decodeStringFromUTF8Array")) {
1989 if (sig->parameter_count() != 3 || sig->return_count() != 1 ||
1990 !IsI8Array(sig->GetParam(0), module, true) || // --
1991 sig->GetParam(1) != kI32 || // --
1992 sig->GetParam(2) != kI32 || // --
1993 sig->GetReturn() != kRefExtern) {
1994 RETURN_ERROR("text-decoder", "decodeStringFromUTF8Array");
1995 }
1996 status = WellKnownImport::kStringFromUtf8Array;
1997 detected->add_imported_strings_utf8();
1998 }
1999 }
2000#undef RETURN_ERROR
2001 statuses.push_back(status);
2002 }
2003 // We're operating on a fresh WasmModule instance here, so we don't need to
2004 // check for incompatibilities with previously seen imports.
2005 DCHECK_EQ(module->num_imported_functions, statuses.size());
2006 // The "Initialize" call is currently only safe when the decoder has allocated
2007 // storage, which it allocates when there is an imports section.
2008 if (module->num_imported_functions != 0) {
2009 module->type_feedback.well_known_imports.Initialize(
2010 base::VectorOf(statuses));
2011 }
2012 return {};
2013}
2014
2015namespace {
2016
2017enum CompilationExecutionResult : int8_t { kNoMoreUnits, kYield };
2018
2019const char* GetCompilationEventName(const WasmCompilationUnit& unit,
2020 const CompilationEnv& env) {
2021 ExecutionTier tier = unit.tier();
2022 if (tier == ExecutionTier::kLiftoff) {
2023 return "wasm.BaselineCompilation";
2024 }
2025 if (tier == ExecutionTier::kTurbofan) {
2026 return "wasm.TopTierCompilation";
2027 }
2028 if (unit.func_index() <
2029 static_cast<int>(env.module->num_imported_functions)) {
2030 return "wasm.WasmToJSWrapperCompilation";
2031 }
2032 return "wasm.OtherCompilation";
2033}
2034
2035constexpr uint8_t kMainTaskId = 0;
2036
2037// Run by the {BackgroundCompileJob} (on any thread).
2038CompilationExecutionResult ExecuteCompilationUnits(
2039 std::weak_ptr<NativeModule> native_module, Counters* counters,
2040 JobDelegate* delegate, CompilationTier tier) {
2041 TRACE_EVENT0("v8.wasm", "wasm.ExecuteCompilationUnits");
2042
2043 // Compilation must be disabled in jitless mode.
2044 CHECK(!v8_flags.wasm_jitless);
2045
2046 // These fields are initialized in a {BackgroundCompileScope} before
2047 // starting compilation.
2048 std::optional<CompilationEnv> env;
2049 std::optional<base::FlushDenormalsScope> disable_denormals;
2050 std::shared_ptr<WireBytesStorage> wire_bytes;
2051 std::shared_ptr<const WasmModule> module;
2052 // Task 0 is any main thread (there might be multiple from multiple isolates),
2053 // worker threads start at 1 (thus the "+ 1").
2054 static_assert(kMainTaskId == 0);
2055 int task_id = delegate ? (int{delegate->GetTaskId()} + 1) : kMainTaskId;
2056 DCHECK_LE(0, task_id);
2057 CompilationUnitQueues::Queue* queue;
2058 std::optional<WasmCompilationUnit> unit;
2059
2060 WasmDetectedFeatures global_detected_features;
2061
2062 // Preparation (synchronized): Initialize the fields above and get the first
2063 // compilation unit.
2064 {
2065 BackgroundCompileScope compile_scope(native_module);
2066 if (compile_scope.cancelled()) return kYield;
2067 env.emplace(CompilationEnv::ForModule(compile_scope.native_module()));
2068 // We only really need this for optimized compilation, but for simplicity
2069 // set it just once for everything.
2070 disable_denormals.emplace(
2071 compile_scope.native_module()->compile_imports().contains(
2072 CompileTimeImport::kDisableDenormalFloats));
2073 wire_bytes = compile_scope.compilation_state()->GetWireBytesStorage();
2074 module = compile_scope.native_module()->shared_module();
2075 queue = compile_scope.compilation_state()->GetQueueForCompileTask(task_id);
2076 unit =
2077 compile_scope.compilation_state()->GetNextCompilationUnit(queue, tier);
2078 if (!unit) return kNoMoreUnits;
2079 }
2080 TRACE_COMPILE("ExecuteCompilationUnits (task id %d)\n", task_id);
2081
2082 std::vector<WasmCompilationResult> results_to_publish;
2083 while (true) {
2084 ExecutionTier current_tier = unit->tier();
2085 const char* event_name = GetCompilationEventName(unit.value(), env.value());
2086 TRACE_EVENT0("v8.wasm", event_name);
2087 while (unit->tier() == current_tier) {
2088 // Track detected features on a per-function basis before collecting them
2089 // into {global_detected_features}.
2090 WasmDetectedFeatures per_function_detected_features;
2091 // (asynchronous): Execute the compilation.
2092 WasmCompilationResult result =
2093 unit->ExecuteCompilation(&env.value(), wire_bytes.get(), counters,
2094 &per_function_detected_features);
2095 global_detected_features.Add(per_function_detected_features);
2096 bool compilation_succeeded = result.succeeded();
2097 ExecutionTier result_tier = result.result_tier;
2098 // We don't eagerly compile import wrappers any more.
2099 DCHECK_GE(unit->func_index(), env->module->num_imported_functions);
2100 results_to_publish.emplace_back(std::move(result));
2101
2102 bool yield = delegate && delegate->ShouldYield();
2103
2104 // (synchronized): Publish the compilation result and get the next unit.
2105 BackgroundCompileScope compile_scope(native_module);
2106 if (compile_scope.cancelled()) return kYield;
2107
2108 if (!compilation_succeeded) {
2109 compile_scope.compilation_state()->SetError();
2110 return kNoMoreUnits;
2111 }
2112
2113 if (!unit->for_debugging() && result_tier != current_tier) {
2114 compile_scope.native_module()->AddLiftoffBailout();
2115 }
2116
2117 // Yield or get next unit.
2118 if (yield ||
2119 !(unit = compile_scope.compilation_state()->GetNextCompilationUnit(
2120 queue, tier))) {
2121 std::vector<UnpublishedWasmCode> unpublished_code =
2122 compile_scope.native_module()->AddCompiledCode(
2123 base::VectorOf(results_to_publish));
2124 results_to_publish.clear();
2125 compile_scope.compilation_state()->SchedulePublishCompilationResults(
2126 std::move(unpublished_code), tier);
2127 compile_scope.compilation_state()->OnCompilationStopped(
2128 global_detected_features);
2129 return yield ? kYield : kNoMoreUnits;
2130 }
2131
2132 // Publish after finishing a certain amount of units, to avoid
2133 // contention when all threads publish at the end.
2134 bool batch_full =
2135 queue->ShouldPublish(static_cast<int>(results_to_publish.size()));
2136 // Also publish each time the compilation tier changes from Liftoff to
2137 // TurboFan, such that we immediately publish the baseline compilation
2138 // results to start execution, and do not wait for a batch to fill up.
2139 bool liftoff_finished = unit->tier() != current_tier &&
2140 unit->tier() == ExecutionTier::kTurbofan;
2141 if (batch_full || liftoff_finished) {
2142 std::vector<UnpublishedWasmCode> unpublished_code =
2143 compile_scope.native_module()->AddCompiledCode(
2144 base::VectorOf(results_to_publish));
2145 results_to_publish.clear();
2146 compile_scope.compilation_state()->SchedulePublishCompilationResults(
2147 std::move(unpublished_code), tier);
2148 }
2149 }
2150 }
2151 UNREACHABLE();
2152}
2153
2154std::unique_ptr<CompilationUnitBuilder> InitializeCompilation(
2155 Isolate* isolate, NativeModule* native_module,
2156 ProfileInformation* pgo_info) {
2157 CompilationStateImpl* compilation_state =
2158 Impl(native_module->compilation_state());
2159 auto builder = std::make_unique<CompilationUnitBuilder>(native_module);
2160 compilation_state->InitializeCompilationProgress(pgo_info);
2161 return builder;
2162}
2163
2164bool MayCompriseLazyFunctions(const WasmModule* module,
2165 WasmEnabledFeatures enabled_features) {
2166 if (IsLazyModule(module)) return true;
2167 if (enabled_features.has_compilation_hints()) return true;
2168#ifdef ENABLE_SLOW_DCHECKS
2169 int start = module->num_imported_functions;
2170 int end = start + module->num_declared_functions;
2171 for (int func_index = start; func_index < end; func_index++) {
2172 SLOW_DCHECK(GetCompileStrategy(module, enabled_features, func_index,
2173 false) != CompileStrategy::kLazy);
2174 }
2175#endif
2176 return false;
2177}
2178
2179class CompilationTimeCallback : public CompilationEventCallback {
2180 public:
2181 enum CompileMode { kSynchronous, kAsync, kStreaming };
2182 explicit CompilationTimeCallback(
2183 std::shared_ptr<Counters> async_counters,
2184 std::shared_ptr<metrics::Recorder> metrics_recorder,
2186 std::weak_ptr<NativeModule> native_module, CompileMode compile_mode)
2187 : start_time_(base::TimeTicks::Now()),
2188 async_counters_(std::move(async_counters)),
2189 metrics_recorder_(std::move(metrics_recorder)),
2190 context_id_(context_id),
2191 native_module_(std::move(native_module)),
2192 compile_mode_(compile_mode) {}
2193
2194 void call(CompilationEvent compilation_event) override {
2195 DCHECK(base::TimeTicks::IsHighResolution());
2196 std::shared_ptr<NativeModule> native_module = native_module_.lock();
2197 if (!native_module) return;
2198 auto now = base::TimeTicks::Now();
2199 auto duration = now - start_time_;
2200 if (compilation_event == CompilationEvent::kFinishedBaselineCompilation) {
2201 // Reset {start_time_} to measure tier-up time.
2202 start_time_ = now;
2203 if (compile_mode_ != kSynchronous) {
2204 TimedHistogram* histogram =
2206 ? async_counters_->wasm_async_compile_wasm_module_time()
2207 : async_counters_->wasm_streaming_compile_wasm_module_time();
2208 histogram->AddSample(static_cast<int>(duration.InMicroseconds()));
2209 }
2210
2212 (compile_mode_ != kSynchronous), // async
2213 (compile_mode_ == kStreaming), // streamed
2214 false, // cached
2215 false, // deserialized
2216 v8_flags.wasm_lazy_compilation, // lazy
2217 true, // success
2218 native_module->liftoff_code_size(), // code_size_in_bytes
2219 native_module->liftoff_bailout_count(), // liftoff_bailout_count
2220 duration.InMicroseconds()}; // wall_clock_duration_in_us
2221 metrics_recorder_->DelayMainThreadEvent(event, context_id_);
2222 }
2223 if (compilation_event == CompilationEvent::kFailedCompilation) {
2225 (compile_mode_ != kSynchronous), // async
2226 (compile_mode_ == kStreaming), // streamed
2227 false, // cached
2228 false, // deserialized
2229 v8_flags.wasm_lazy_compilation, // lazy
2230 false, // success
2231 native_module->liftoff_code_size(), // code_size_in_bytes
2232 native_module->liftoff_bailout_count(), // liftoff_bailout_count
2233 duration.InMicroseconds()}; // wall_clock_duration_in_us
2234 metrics_recorder_->DelayMainThreadEvent(event, context_id_);
2235 }
2236 }
2237
2238 private:
2239 base::TimeTicks start_time_;
2240 const std::shared_ptr<Counters> async_counters_;
2241 std::shared_ptr<metrics::Recorder> metrics_recorder_;
2243 std::weak_ptr<NativeModule> native_module_;
2244 const CompileMode compile_mode_;
2245};
2246
2247WasmError ValidateFunctions(const WasmModule* module,
2248 base::Vector<const uint8_t> wire_bytes,
2249 WasmEnabledFeatures enabled_features,
2250 OnlyLazyFunctions only_lazy_functions,
2251 WasmDetectedFeatures* detected_features) {
2252 DCHECK_EQ(module->origin, kWasmOrigin);
2253 if (only_lazy_functions &&
2254 !MayCompriseLazyFunctions(module, enabled_features)) {
2255 return {};
2256 }
2257
2258 std::function<bool(int)> filter; // Initially empty for "all functions".
2259 if (only_lazy_functions) {
2260 const bool is_lazy_module = IsLazyModule(module);
2261 filter = [module, enabled_features, is_lazy_module](int func_index) {
2262 CompileStrategy strategy = GetCompileStrategy(module, enabled_features,
2263 func_index, is_lazy_module);
2264 return strategy == CompileStrategy::kLazy ||
2265 strategy == CompileStrategy::kLazyBaselineEagerTopTier;
2266 };
2267 }
2268 // Call {ValidateFunctions} in the module decoder.
2269 return ValidateFunctions(module, enabled_features, wire_bytes, filter,
2270 detected_features);
2271}
2272
2273WasmError ValidateFunctions(const NativeModule& native_module,
2274 OnlyLazyFunctions only_lazy_functions) {
2275 WasmDetectedFeatures detected_features;
2276 WasmError result =
2277 ValidateFunctions(native_module.module(), native_module.wire_bytes(),
2278 native_module.enabled_features(), only_lazy_functions,
2279 &detected_features);
2280 if (!result.has_error()) {
2281 // This function is called before the NativeModule is finished; all detected
2282 // features will be published afterwards anyway, so ignore the return value
2283 // here.
2284 USE(native_module.compilation_state()->UpdateDetectedFeatures(
2285 detected_features));
2286 }
2287 return result;
2288}
2289
2290void CompileNativeModule(Isolate* isolate,
2292 ErrorThrower* thrower,
2293 std::shared_ptr<NativeModule> native_module,
2294 ProfileInformation* pgo_info) {
2295 CHECK(!v8_flags.jitless || v8_flags.wasm_jitless);
2296 const WasmModule* module = native_module->module();
2297
2298 // The callback captures a shared ptr to the semaphore.
2299 auto* compilation_state = Impl(native_module->compilation_state());
2300 if (base::TimeTicks::IsHighResolution()) {
2301 compilation_state->AddCallback(std::make_unique<CompilationTimeCallback>(
2302 isolate->async_counters(), isolate->metrics_recorder(), context_id,
2303 native_module, CompilationTimeCallback::kSynchronous));
2304 }
2305
2306 // Initialize the compilation units and kick off background compile tasks.
2307 std::unique_ptr<CompilationUnitBuilder> builder =
2308 InitializeCompilation(isolate, native_module.get(), pgo_info);
2309 compilation_state->InitializeCompilationUnits(std::move(builder));
2310
2311 // Validate wasm modules for lazy compilation if requested. Never validate
2312 // asm.js modules as these are valid by construction (additionally a CHECK
2313 // will catch this during lazy compilation).
2314 if (!v8_flags.wasm_lazy_validation && module->origin == kWasmOrigin) {
2315 DCHECK(!thrower->error());
2316 if (WasmError validation_error =
2317 ValidateFunctions(*native_module, kOnlyLazyFunctions)) {
2318 thrower->CompileFailed(std::move(validation_error));
2319 return;
2320 }
2321 }
2322
2323 if (!compilation_state->failed()) {
2324 compilation_state->WaitForCompilationEvent(
2325 CompilationEvent::kFinishedBaselineCompilation);
2326 }
2327
2328 if (compilation_state->failed()) {
2329 DCHECK_IMPLIES(IsLazyModule(module), !v8_flags.wasm_lazy_validation);
2330 WasmError validation_error =
2331 ValidateFunctions(*native_module, kAllFunctions);
2332 CHECK(validation_error.has_error());
2333 thrower->CompileFailed(std::move(validation_error));
2334 }
2335}
2336
2337class BackgroundCompileJob final : public JobTask {
2338 public:
2339 explicit BackgroundCompileJob(std::weak_ptr<NativeModule> native_module,
2340 std::shared_ptr<Counters> async_counters,
2341 CompilationTier tier)
2342 : native_module_(std::move(native_module)),
2343 engine_barrier_(GetWasmEngine()->GetBarrierForBackgroundCompile()),
2344 async_counters_(std::move(async_counters)),
2345 tier_(tier) {}
2346
2347 void Run(JobDelegate* delegate) override {
2348 auto engine_scope = engine_barrier_->TryLock();
2349 if (!engine_scope) return;
2350 ExecuteCompilationUnits(native_module_, async_counters_.get(), delegate,
2351 tier_);
2352 }
2353
2354 size_t GetMaxConcurrency(size_t worker_count) const override {
2355 BackgroundCompileScope compile_scope(native_module_);
2356 if (compile_scope.cancelled()) return 0;
2357 size_t flag_limit = static_cast<size_t>(
2358 std::max(1, v8_flags.wasm_num_compilation_tasks.value()));
2359 // NumOutstandingCompilations() does not reflect the units that running
2360 // workers are processing, thus add the current worker count to that number.
2361 return std::min(flag_limit,
2362 worker_count + compile_scope.compilation_state()
2363 ->NumOutstandingCompilations(tier_));
2364 }
2365
2366 private:
2367 std::weak_ptr<NativeModule> native_module_;
2368 std::shared_ptr<OperationsBarrier> engine_barrier_;
2369 const std::shared_ptr<Counters> async_counters_;
2370 const CompilationTier tier_;
2371};
2372
2373std::shared_ptr<NativeModule> GetOrCompileNewNativeModule(
2374 Isolate* isolate, WasmEnabledFeatures enabled_features,
2375 WasmDetectedFeatures detected_features, CompileTimeImports compile_imports,
2376 ErrorThrower* thrower, std::shared_ptr<const WasmModule> module,
2377 base::OwnedVector<const uint8_t> wire_bytes, int compilation_id,
2378 v8::metrics::Recorder::ContextId context_id, ProfileInformation* pgo_info) {
2379 std::shared_ptr<NativeModule> native_module =
2381 module->origin, wire_bytes.as_vector(), compile_imports, isolate);
2382 if (native_module) return native_module;
2383
2384 // Otherwise compile a new NativeModule.
2385 std::optional<TimedHistogramScope> wasm_compile_module_time_scope;
2386 if (base::TimeTicks::IsHighResolution()) {
2387 wasm_compile_module_time_scope.emplace(SELECT_WASM_COUNTER(
2388 isolate->counters(), module->origin, wasm_compile, module_time));
2389 }
2390
2391 size_t code_size_estimate =
2392 wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get());
2393 native_module = GetWasmEngine()->NewNativeModule(
2394 isolate, enabled_features, detected_features, std::move(compile_imports),
2395 module, code_size_estimate);
2396 native_module->SetWireBytes(std::move(wire_bytes));
2397 native_module->compilation_state()->set_compilation_id(compilation_id);
2398
2399 if (!v8_flags.wasm_jitless) {
2400 // Compile / validate the new module.
2401 CompileNativeModule(isolate, context_id, thrower, native_module, pgo_info);
2402 }
2403
2404 if (thrower->error()) {
2405 GetWasmEngine()->UpdateNativeModuleCache(true, std::move(native_module),
2406 isolate);
2407 return {};
2408 }
2409
2410 // Finally, put the new module in the cache; this can return the passed
2411 // NativeModule pointer, or another one (for a previously cached module).
2412 return GetWasmEngine()->UpdateNativeModuleCache(false, native_module,
2413 isolate);
2414}
2415
2416} // namespace
2417
2418std::shared_ptr<NativeModule> CompileToNativeModule(
2419 Isolate* isolate, WasmEnabledFeatures enabled_features,
2420 WasmDetectedFeatures detected_features, CompileTimeImports compile_imports,
2421 ErrorThrower* thrower, std::shared_ptr<const WasmModule> module,
2422 base::OwnedVector<const uint8_t> wire_bytes, int compilation_id,
2424 std::shared_ptr<NativeModule> native_module = GetOrCompileNewNativeModule(
2425 isolate, enabled_features, detected_features, std::move(compile_imports),
2426 thrower, module, std::move(wire_bytes), compilation_id, context_id,
2427 pgo_info);
2428 if (!native_module) return {};
2429
2430 // Ensure that the code objects are logged before returning.
2432
2433 // Now publish all detected features of this module in the current isolate.
2435 native_module->compilation_state()->detected_features(), isolate, true);
2436
2437 return native_module;
2438}
2439
2440AsyncCompileJob::AsyncCompileJob(
2441 Isolate* isolate, WasmEnabledFeatures enabled_features,
2443 DirectHandle<Context> context,
2444 DirectHandle<NativeContext> incumbent_context, const char* api_method_name,
2445 std::shared_ptr<CompilationResultResolver> resolver, int compilation_id)
2446 : isolate_(isolate),
2447 api_method_name_(api_method_name),
2448 enabled_features_(enabled_features),
2449 compile_imports_(std::move(compile_imports)),
2450 start_time_(base::TimeTicks::Now()),
2451 bytes_copy_(std::move(bytes)),
2452 wire_bytes_(bytes_copy_.as_vector()),
2453 resolver_(std::move(resolver)),
2454 compilation_id_(compilation_id) {
2455 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
2456 "wasm.AsyncCompileJob");
2457 CHECK(v8_flags.wasm_async_compilation);
2458 CHECK(!v8_flags.jitless || v8_flags.wasm_jitless);
2459 v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
2461 foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate);
2463 isolate->global_handles()->Create(context->native_context());
2464 incumbent_context_ = isolate->global_handles()->Create(*incumbent_context);
2465 DCHECK(IsNativeContext(*native_context_));
2466 context_id_ = isolate->GetOrRegisterRecorderContextId(native_context_);
2467 metrics_event_.async = true;
2468}
2469
2474
2476 // Removing this job will trigger the destructor, which will cancel all
2477 // compilation.
2479}
2480
2481// {ValidateFunctionsStreamingJobData} holds information that is shared between
2482// the {AsyncStreamingProcessor} and the {ValidateFunctionsStreamingJob}. It
2483// lives in the {AsyncStreamingProcessor} and is updated from both classes.
2485 struct Unit {
2486 // {func_index == -1} represents an "invalid" unit.
2487 int func_index = -1;
2489
2490 // Check whether the unit is valid.
2491 operator bool() const {
2492 DCHECK_LE(-1, func_index);
2493 return func_index >= 0;
2494 }
2495 };
2496
2497 void Initialize(int num_declared_functions) {
2499 units = base::OwnedVector<Unit>::NewForOverwrite(num_declared_functions);
2500 // Initially {next == end}.
2501 next_available_unit.store(units.begin(), std::memory_order_relaxed);
2502 end_of_available_units.store(units.begin(), std::memory_order_relaxed);
2503 }
2504
2505 void AddUnit(int declared_func_index, base::Vector<const uint8_t> code,
2506 JobHandle* job_handle) {
2508 // Write new unit to {*end}, then increment {end}. There is only one thread
2509 // adding new units, so no further synchronization needed.
2510 Unit* ptr = end_of_available_units.load(std::memory_order_relaxed);
2511 // Check invariant: {next <= end}.
2512 DCHECK_LE(next_available_unit.load(std::memory_order_relaxed), ptr);
2513 *ptr++ = {declared_func_index, code};
2514 // Use release semantics, so whoever loads this pointer (using acquire
2515 // semantics) sees all our previous stores.
2516 end_of_available_units.store(ptr, std::memory_order_release);
2517 size_t total_units_added = ptr - units.begin();
2518 // Periodically notify concurrency increase. This has overhead, so avoid
2519 // calling it too often. As long as threads are still running they will
2520 // continue processing new units anyway, and if background threads validate
2521 // faster than we can add units, then only notifying after increasingly long
2522 // delays is the right thing to do to avoid too many small validation tasks.
2523 // We notify on each power of two after 16 units, and every 16k units (just
2524 // to have *some* upper limit and avoiding to pile up too many units).
2525 // Additionally, notify after receiving the last unit of the module.
2526 if ((total_units_added >= 16 &&
2527 base::bits::IsPowerOfTwo(total_units_added)) ||
2528 (total_units_added % (16 * 1024)) == 0 || ptr == units.end()) {
2529 job_handle->NotifyConcurrencyIncrease();
2530 }
2531 }
2532
2533 size_t NumOutstandingUnits() const {
2534 Unit* next = next_available_unit.load(std::memory_order_relaxed);
2535 Unit* end = end_of_available_units.load(std::memory_order_relaxed);
2536 DCHECK_LE(next, end);
2537 return end - next;
2538 }
2539
2540 // Retrieve one unit to validate; returns an "invalid" unit if nothing is in
2541 // the queue.
2543 // Use an acquire load to synchronize with the store in {AddUnit}. All units
2544 // before this {end} are fully initialized and ready to execute.
2545 Unit* end = end_of_available_units.load(std::memory_order_acquire);
2546 Unit* next = next_available_unit.load(std::memory_order_relaxed);
2547 while (next < end) {
2548 if (next_available_unit.compare_exchange_weak(
2549 next, next + 1, std::memory_order_relaxed)) {
2550 return *next;
2551 }
2552 // Otherwise retry with updated {next} pointer.
2553 }
2554 return {};
2555 }
2556
2557 void UpdateDetectedFeatures(WasmDetectedFeatures new_detected_features) {
2558 WasmDetectedFeatures old_features =
2559 detected_features.load(std::memory_order_relaxed);
2560 while (!detected_features.compare_exchange_weak(
2561 old_features, old_features | new_detected_features,
2562 std::memory_order_relaxed)) {
2563 // Retry with updated {old_features}.
2564 }
2565 }
2566
2568 std::atomic<Unit*> next_available_unit;
2569 std::atomic<Unit*> end_of_available_units;
2570 std::atomic<bool> found_error{false};
2571 std::atomic<WasmDetectedFeatures> detected_features;
2572};
2573
2575 public:
2577 WasmEnabledFeatures enabled_features,
2579 : module_(module), enabled_features_(enabled_features), data_(data) {}
2580
2581 void Run(JobDelegate* delegate) override {
2582 TRACE_EVENT0("v8.wasm", "wasm.ValidateFunctionsStreaming");
2584 Zone validation_zone{GetWasmEngine()->allocator(), ZONE_NAME};
2585 WasmDetectedFeatures detected_features;
2586 while (Unit unit = data_->GetUnit()) {
2587 validation_zone.Reset();
2588 DecodeResult result = ValidateSingleFunction(
2589 &validation_zone, module_, unit.func_index, unit.code,
2590 enabled_features_, &detected_features);
2591
2592 if (result.failed()) {
2593 data_->found_error.store(true, std::memory_order_relaxed);
2594 break;
2595 }
2596 // After validating one function, check if we should yield.
2597 if (delegate->ShouldYield()) break;
2598 }
2599
2600 data_->UpdateDetectedFeatures(detected_features);
2601 }
2602
2603 size_t GetMaxConcurrency(size_t worker_count) const override {
2604 return worker_count + data_->NumOutstandingUnits();
2605 }
2606
2607 private:
2608 const WasmModule* const module_;
2611};
2612
2614 public:
2616
2618
2619 bool ProcessSection(SectionCode section_code,
2621 uint32_t offset) override;
2622
2623 bool ProcessCodeSectionHeader(int num_functions,
2624 uint32_t functions_mismatch_error_offset,
2625 std::shared_ptr<WireBytesStorage>,
2626 int code_section_start,
2627 int code_section_length) override;
2628
2630 uint32_t offset) override;
2631
2632 void OnFinishedChunk() override;
2633
2635 bool after_error) override;
2636
2637 void OnAbort() override;
2638
2640 base::Vector<const uint8_t> module_bytes) override;
2641
2642 private:
2644
2647 std::unique_ptr<CompilationUnitBuilder> compilation_unit_builder_;
2649 bool prefix_cache_hit_ = false;
2652 std::unique_ptr<JobHandle> validate_functions_job_handle_;
2653
2654 // {prefix_hasher_} computes a running hash of the wire bytes up to code
2655 // section size, but excludes the code section itself. Used by the
2656 // {NativeModuleCache} to detect potential duplicate modules.
2658};
2659
2660std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
2663 std::make_unique<AsyncStreamingProcessor>(this));
2664 return stream_;
2665}
2666
2668 // Note: This destructor always runs on the foreground thread of the isolate.
2670 // If initial compilation did not finish yet we can abort it.
2671 if (native_module_) {
2672 Impl(native_module_->compilation_state())
2673 ->CancelCompilation(CompilationStateImpl::kCancelInitialCompilation);
2674 }
2675 // Tell the streaming decoder that the AsyncCompileJob is not available
2676 // anymore.
2677 if (stream_) stream_->NotifyCompilationDiscarded();
2681 if (!module_object_.is_null()) {
2683 }
2684}
2685
2687 std::shared_ptr<const WasmModule> module, size_t code_size_estimate) {
2688 // Embedder usage count for declared shared memories.
2689 const bool has_shared_memory =
2690 std::any_of(module->memories.begin(), module->memories.end(),
2691 [](auto& memory) { return memory.is_shared; });
2692 if (has_shared_memory) {
2694 }
2695
2696 // Create the module object and populate with compiled functions and
2697 // information needed at instantiation time.
2698
2701 std::move(compile_imports_), std::move(module), code_size_estimate);
2702 native_module_->SetWireBytes(std::move(bytes_copy_));
2703 native_module_->compilation_state()->set_compilation_id(compilation_id_);
2704}
2705
2707 std::shared_ptr<const WasmModule> module, size_t code_size_estimate) {
2710 if (native_module_ == nullptr) {
2711 CreateNativeModule(std::move(module), code_size_estimate);
2712 return false;
2713 }
2714 return true;
2715}
2716
2718 // Create heap objects for script and module bytes to be stored in the
2719 // module object. Asm.js is not compiled asynchronously.
2720 DCHECK(module_object_.is_null());
2721 auto source_url =
2723 auto script =
2725 DirectHandle<WasmModuleObject> module_object =
2727
2728 module_object_ = isolate_->global_handles()->Create(*module_object);
2729}
2730
2731// This function assumes that it is executed in a HandleScope, and that a
2732// context is set on the isolate.
2733void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
2734 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
2735 "wasm.FinishAsyncCompile");
2736 if (stream_) {
2737 stream_->NotifyNativeModuleCreated(native_module_);
2738 }
2739 const WasmModule* module = native_module_->module();
2740 auto compilation_state = Impl(native_module_->compilation_state());
2741
2742 // Update the compilation state with feature detected during module decoding
2743 // and (potentially) validation. We will publish all features below, in the
2744 // current isolate, so ignore the return value here.
2745 USE(compilation_state->UpdateDetectedFeatures(detected_features_));
2746
2747 // If experimental PGO via files is enabled, load profile information now that
2748 // we have all wire bytes and know that the module is valid.
2749 if (V8_UNLIKELY(v8_flags.experimental_wasm_pgo_from_file)) {
2750 std::unique_ptr<ProfileInformation> pgo_info =
2751 LoadProfileFromFile(module, native_module_->wire_bytes());
2752 if (pgo_info) {
2753 compilation_state->ApplyPgoInfoLate(pgo_info.get());
2754 }
2755 }
2756
2757 bool is_after_deserialization = !module_object_.is_null();
2758 if (!is_after_deserialization) {
2760 }
2761
2762 // Measure duration of baseline compilation or deserialization from cache.
2765 int duration_usecs = static_cast<int>(duration.InMicroseconds());
2766 isolate_->counters()->wasm_streaming_finish_wasm_module_time()->AddSample(
2767 duration_usecs);
2768
2769 if (is_after_cache_hit || is_after_deserialization) {
2771 true, // async
2772 true, // streamed
2773 is_after_cache_hit, // cached
2774 is_after_deserialization, // deserialized
2775 v8_flags.wasm_lazy_compilation, // lazy
2776 !compilation_state->failed(), // success
2777 native_module_->turbofan_code_size(), // code_size_in_bytes
2778 native_module_->liftoff_bailout_count(), // liftoff_bailout_count
2779 duration.InMicroseconds()}; // wall_clock_duration_in_us
2780 isolate_->metrics_recorder()->DelayMainThreadEvent(event, context_id_);
2781 }
2782 }
2783
2784 DCHECK(!isolate_->context().is_null());
2785 // Finish the wasm script now and make it public to the debugger.
2787 auto sourcemap_symbol =
2788 module->debug_symbols[WasmDebugSymbols::Type::SourceMap];
2789 if (script->type() == Script::Type::kWasm &&
2790 sourcemap_symbol.type != WasmDebugSymbols::Type::None &&
2791 !sourcemap_symbol.external_url.is_empty()) {
2792 ModuleWireBytes wire_bytes(native_module_->wire_bytes());
2793 MaybeDirectHandle<String> src_map_str =
2795 wire_bytes.GetNameOrNull(sourcemap_symbol.external_url),
2797 script->set_source_mapping_url(*src_map_str.ToHandleChecked());
2798 }
2799 {
2800 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
2801 "wasm.Debug.OnAfterCompile");
2802 isolate_->debug()->OnAfterCompile(script);
2803 }
2804
2805 // Publish the detected features in this isolate, once initial compilation
2806 // is done. Validate should have detected all features, unless lazy validation
2807 // is enabled.
2808 PublishDetectedFeatures(compilation_state->detected_features(), isolate_,
2809 true);
2810
2811 // We might need debug code for the module, if the debugger was enabled while
2812 // streaming compilation was running. Since handling this while compiling via
2813 // streaming is tricky, we just remove all code which may have been generated,
2814 // and compile debug code lazily.
2815 if (native_module_->IsInDebugState()) {
2816 WasmCodeRefScope ref_scope;
2817 native_module_->RemoveCompiledCode(
2819 }
2820
2821 // Finally, log all generated code (it does not matter if this happens
2822 // repeatedly in case the script is shared).
2823 native_module_->LogWasmCodes(isolate_, module_object_->script());
2824
2826}
2827
2829 // {job} keeps the {this} pointer alive.
2830 std::unique_ptr<AsyncCompileJob> job =
2832
2833 // Revalidate the whole module to produce a deterministic error message.
2834 constexpr bool kValidate = true;
2835 WasmDetectedFeatures unused_detected_features;
2838 kWasmOrigin, &unused_detected_features);
2840 if (result.failed()) {
2841 thrower.CompileFailed(std::move(result).error());
2842 } else {
2843 // The only possible reason why {result} might be okay is if the failure
2844 // was due to compile-time imports checking.
2845 CHECK(!job->compile_imports_.empty());
2847 result.value().get(), wire_bytes_.module_bytes(), job->compile_imports_,
2848 &unused_detected_features);
2849 CHECK(error.has_error());
2850 thrower.CompileError("%s", error.message().c_str());
2851 }
2852 resolver_->OnCompilationFailed(thrower.Reify());
2853}
2854
2856 : public CompilationEventCallback {
2857 public:
2859
2860 void call(CompilationEvent event) override {
2861 // This callback is only being called from a foreground task.
2862 switch (event) {
2864 DCHECK(!last_event_.has_value());
2866 // Install the native module in the cache, or reuse a conflicting one.
2867 // If we get a conflicting module, wait until we are back in the
2868 // main thread to update {job_->native_module_} to avoid a data race.
2869 std::shared_ptr<NativeModule> cached_native_module =
2871 false, job_->native_module_, job_->isolate_);
2872 if (cached_native_module == job_->native_module_) {
2873 // There was no cached module.
2874 cached_native_module = nullptr;
2875 }
2876 job_->DoSync<FinishCompilation>(std::move(cached_native_module));
2877 }
2878 break;
2882 break;
2884 DCHECK(!last_event_.has_value());
2886 // Don't update {job_->native_module_} to avoid data races with other
2887 // compilation threads. Use a copy of the shared pointer instead.
2889 job_->isolate_);
2890 job_->DoSync<Fail>();
2891 }
2892 break;
2893 }
2894#ifdef DEBUG
2895 last_event_ = event;
2896#endif
2897 }
2898
2899 private:
2901#ifdef DEBUG
2902 // This will be modified by different threads, but they externally
2903 // synchronize, so no explicit synchronization (currently) needed here.
2904 std::optional<CompilationEvent> last_event_;
2905#endif
2906};
2907
2908// A closure to run a compilation step (either as foreground or background
2909// task) and schedule the next step(s), if any.
2911 public:
2912 virtual ~CompileStep() = default;
2913
2914 void Run(AsyncCompileJob* job, bool on_foreground) {
2915 if (on_foreground) {
2916 HandleScope scope(job->isolate_);
2917 SaveAndSwitchContext saved_context(job->isolate_, *job->native_context_);
2918 RunInForeground(job);
2919 } else {
2920 RunInBackground(job);
2921 }
2922 }
2923
2926};
2927
2929 public:
2930 CompileTask(AsyncCompileJob* job, bool on_foreground)
2931 // We only manage the background tasks with the {CancelableTaskManager} of
2932 // the {AsyncCompileJob}. Foreground tasks are managed by the system's
2933 // {CancelableTaskManager}. Background tasks cannot spawn tasks managed by
2934 // their own task manager.
2935 : CancelableTask(on_foreground ? job->isolate_->cancelable_task_manager()
2936 : &job->background_task_manager_),
2937 job_(job),
2938 on_foreground_(on_foreground) {}
2939
2940 ~CompileTask() override {
2941 if (job_ != nullptr && on_foreground_) ResetPendingForegroundTask();
2942 }
2943
2944 void RunInternal() final {
2945 if (!job_) return;
2947 job_->step_->Run(job_, on_foreground_);
2948 // After execution, reset {job_} such that we don't try to reset the pending
2949 // foreground task when the task is deleted.
2950 job_ = nullptr;
2951 }
2952
2953 void Cancel() {
2955 job_ = nullptr;
2956 }
2957
2958 private:
2959 // {job_} will be cleared to cancel a pending task.
2962
2967};
2968
2971
2972 auto new_task = std::make_unique<CompileTask>(this, true);
2973 pending_foreground_task_ = new_task.get();
2974 foreground_task_runner_->PostTask(std::move(new_task));
2975}
2976
2979
2980 auto new_task = std::make_unique<CompileTask>(this, true);
2981 pending_foreground_task_ = new_task.get();
2982 new_task->Run();
2983}
2984
2990
2992 auto task = std::make_unique<CompileTask>(this, false);
2993
2994 // If --wasm-num-compilation-tasks=0 is passed, do only spawn foreground
2995 // tasks. This is used to make timing deterministic.
2996 if (v8_flags.wasm_num_compilation_tasks > 0) {
2998 TaskPriority::kUserBlocking, std::move(task));
2999 } else {
3000 foreground_task_runner_->PostTask(std::move(task));
3001 }
3002}
3003
3004template <typename Step,
3005 AsyncCompileJob::UseExistingForegroundTask use_existing_fg_task,
3006 typename... Args>
3008 NextStep<Step>(std::forward<Args>(args)...);
3009 if (use_existing_fg_task && pending_foreground_task_ != nullptr) return;
3011}
3012
3013template <typename Step, typename... Args>
3015 NextStep<Step>(std::forward<Args>(args)...);
3017}
3018
3019template <typename Step, typename... Args>
3021 NextStep<Step>(std::forward<Args>(args)...);
3023}
3024
3025template <typename Step, typename... Args>
3027 step_.reset(new Step(std::forward<Args>(args)...));
3028}
3029
3030//==========================================================================
3031// Step 1: (async) Decode the module.
3032//==========================================================================
3034 public:
3035 explicit DecodeModule(Counters* counters,
3036 std::shared_ptr<metrics::Recorder> metrics_recorder)
3037 : counters_(counters), metrics_recorder_(std::move(metrics_recorder)) {}
3038
3039 void RunInBackground(AsyncCompileJob* job) override {
3041 {
3042 DisallowHandleAllocation no_handle;
3044 // Decode the module bytes.
3045 TRACE_COMPILE("(1) Decoding module...\n");
3046 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
3047 "wasm.DecodeModule");
3048 auto enabled_features = job->enabled_features_;
3050 enabled_features, job->wire_bytes_.module_bytes(), false, kWasmOrigin,
3053
3054 // Validate lazy functions here if requested.
3055 if (result.ok() && !v8_flags.wasm_lazy_validation) {
3056 const WasmModule* module = result.value().get();
3057 if (WasmError validation_error = ValidateFunctions(
3058 module, job->wire_bytes_.module_bytes(), job->enabled_features_,
3059 kOnlyLazyFunctions, &job->detected_features_)) {
3060 result = ModuleResult{std::move(validation_error)};
3061 }
3062 }
3063 if (result.ok()) {
3064 const WasmModule* module = result.value().get();
3066 module, job->wire_bytes_.module_bytes(), job->compile_imports_,
3067 &job->detected_features_)) {
3068 result = ModuleResult{std::move(error)};
3069 }
3070 }
3071 }
3072 if (result.failed()) {
3073 // Decoding failure; reject the promise and clean up.
3074 job->DoSync<Fail>();
3075 } else {
3076 // Decode passed.
3077 std::shared_ptr<WasmModule> module = std::move(result).value();
3078 size_t code_size_estimate =
3081 std::move(module), true /* start_compilation */,
3082 true /* lazy_functions_are_validated */, code_size_estimate);
3083 }
3084 }
3085
3086 private:
3088 std::shared_ptr<metrics::Recorder> metrics_recorder_;
3089};
3090
3091//==========================================================================
3092// Step 2 (sync): Create heap-allocated data and start compilation.
3093//==========================================================================
3095 public:
3096 PrepareAndStartCompile(std::shared_ptr<const WasmModule> module,
3097 bool start_compilation,
3098 bool lazy_functions_are_validated,
3099 size_t code_size_estimate)
3100 : module_(std::move(module)),
3101 start_compilation_(start_compilation),
3102 lazy_functions_are_validated_(lazy_functions_are_validated),
3103 code_size_estimate_(code_size_estimate) {}
3104
3105 private:
3106 void RunInForeground(AsyncCompileJob* job) override {
3107 TRACE_COMPILE("(2) Prepare and start compile...\n");
3108
3109 const bool streaming = job->wire_bytes_.length() == 0;
3110 if (streaming) {
3111 // Streaming compilation already checked for cache hits.
3113 } else if (job->GetOrCreateNativeModule(std::move(module_),
3115 job->FinishCompile(true);
3116 return;
3117 } else if (!lazy_functions_are_validated_) {
3118 // If we are not streaming and did not get a cache hit, we might have hit
3119 // the path where the streaming decoder got a prefix cache hit, but the
3120 // module then turned out to be invalid, and we are running it through
3121 // non-streaming decoding again. In this case, function bodies have not
3122 // been validated yet (would have happened in the {DecodeModule} phase
3123 // if we would not come via the non-streaming path). Thus do this now.
3124 // Note that we only need to validate lazily compiled functions, others
3125 // will be validated during eager compilation.
3127 if (!v8_flags.wasm_lazy_validation &&
3128 ValidateFunctions(*job->native_module_, kOnlyLazyFunctions)
3129 .has_error()) {
3130 job->Failed();
3131 return;
3132 }
3133 }
3134
3135 // Make sure all compilation tasks stopped running. Decoding (async step)
3136 // is done.
3138
3139 CompilationStateImpl* compilation_state =
3140 Impl(job->native_module_->compilation_state());
3141 compilation_state->AddCallback(
3142 std::make_unique<CompilationStateCallback>(job));
3144 auto compile_mode = job->stream_ == nullptr
3145 ? CompilationTimeCallback::kAsync
3146 : CompilationTimeCallback::kStreaming;
3147 compilation_state->AddCallback(std::make_unique<CompilationTimeCallback>(
3149 job->context_id_, job->native_module_, compile_mode));
3150 }
3151
3152 if (start_compilation_) {
3153 // TODO(13209): Use PGO for async compilation, if available.
3154 constexpr ProfileInformation* kNoProfileInformation = nullptr;
3155 std::unique_ptr<CompilationUnitBuilder> builder = InitializeCompilation(
3156 job->isolate(), job->native_module_.get(), kNoProfileInformation);
3157 compilation_state->InitializeCompilationUnits(std::move(builder));
3158 // In single-threaded mode there are no worker tasks that will do the
3159 // compilation. We call {WaitForCompilationEvent} here so that the main
3160 // thread participates and finishes the compilation.
3161 if (v8_flags.wasm_num_compilation_tasks == 0 || v8_flags.wasm_jitless) {
3162 compilation_state->WaitForCompilationEvent(
3164 }
3165 }
3166 }
3167
3168 const std::shared_ptr<const WasmModule> module_;
3172};
3173
3174//==========================================================================
3175// Step 3 (sync): Compilation finished.
3176//==========================================================================
3178 public:
3179 explicit FinishCompilation(std::shared_ptr<NativeModule> cached_native_module)
3180 : cached_native_module_(std::move(cached_native_module)) {}
3181
3182 private:
3183 void RunInForeground(AsyncCompileJob* job) override {
3184 TRACE_COMPILE("(3) Compilation finished\n");
3187 }
3188 // Then finalize and publish the generated module.
3189 job->FinishCompile(cached_native_module_ != nullptr);
3190 }
3191
3192 std::shared_ptr<NativeModule> cached_native_module_;
3193};
3194
3195//==========================================================================
3196// Step 4 (sync): Decoding or compilation failed.
3197//==========================================================================
3199 private:
3200 void RunInForeground(AsyncCompileJob* job) override {
3201 TRACE_COMPILE("(4) Async compilation failed.\n");
3202 // {job_} is deleted in {Failed}, therefore the {return}.
3203 return job->Failed();
3204 }
3205};
3206
3208 TRACE_COMPILE("(4) Finish module...\n");
3209 {
3210 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
3211 "wasm.OnCompilationSucceeded");
3212 // We have to make sure that an "incumbent context" is available in case
3213 // the module's start function calls out to Blink.
3214 Local<v8::Context> backup_incumbent_context =
3215 Utils::ToLocal(incumbent_context_);
3216 v8::Context::BackupIncumbentScope incumbent(backup_incumbent_context);
3217 resolver_->OnCompilationSucceeded(module_object_);
3218 }
3220}
3221
3223 : decoder_(job->enabled_features_, &job->detected_features_),
3224 job_(job),
3225 compilation_unit_builder_(nullptr) {}
3226
3227// Process the module header.
3230 TRACE_STREAMING("Process module header...\n");
3232 if (!decoder_.ok()) return false;
3233 // Note: We do not include the magic bytes in the hash; they are constant
3234 // anyways.
3235 return true;
3236}
3237
3238// Process all sections except for the code section.
3241 uint32_t offset) {
3242 TRACE_STREAMING("Process section %d ...\n", section_code);
3244 // We reached a section after the code section, we do not need the
3245 // compilation_unit_builder_ anymore.
3248 }
3250 // Combine section hashes until code section.
3251 prefix_hasher_.AddRange(bytes);
3252 }
3253 if (section_code == SectionCode::kUnknownSectionCode) {
3254 size_t bytes_consumed = ModuleDecoder::IdentifyUnknownSection(
3255 &decoder_, bytes, offset, &section_code);
3256 if (!decoder_.ok()) return false;
3257 if (section_code == SectionCode::kUnknownSectionCode) {
3258 // Skip unknown sections that we do not know how to handle.
3259 return true;
3260 }
3261 // Remove the unknown section tag from the payload bytes.
3262 offset += bytes_consumed;
3263 bytes = bytes.SubVector(bytes_consumed, bytes.size());
3264 }
3265 decoder_.DecodeSection(section_code, bytes, offset);
3266 return decoder_.ok();
3267}
3268
3269// Start the code section.
3271 int num_functions, uint32_t functions_mismatch_error_offset,
3272 std::shared_ptr<WireBytesStorage> wire_bytes_storage,
3273 int code_section_start, int code_section_length) {
3274 DCHECK_LE(0, code_section_length);
3275 before_code_section_ = false;
3276 TRACE_STREAMING("Start the code section with %d functions...\n",
3277 num_functions);
3278 prefix_hasher_.Add(static_cast<uint32_t>(code_section_length));
3279 if (!decoder_.CheckFunctionsCount(static_cast<uint32_t>(num_functions),
3280 functions_mismatch_error_offset)) {
3281 return false;
3282 }
3283
3284 decoder_.StartCodeSection({static_cast<uint32_t>(code_section_start),
3285 static_cast<uint32_t>(code_section_length)});
3286
3287 if (!GetWasmEngine()->GetStreamingCompilationOwnership(
3289 // Known prefix, wait until the end of the stream and check the cache.
3290 prefix_cache_hit_ = true;
3291 return true;
3292 }
3293
3294 // Execute the PrepareAndStartCompile step immediately and not in a separate
3295 // task.
3297 size_t code_size_estimate =
3299 code_section_length);
3302 // start_compilation: false; triggered when we receive the bodies.
3303 false,
3304 // lazy_functions_are_validated: false (bodies not received yet).
3305 false, code_size_estimate);
3306
3307 auto* compilation_state = Impl(job_->native_module_->compilation_state());
3308 compilation_state->SetWireBytesStorage(std::move(wire_bytes_storage));
3309 DCHECK_EQ(job_->native_module_->module()->origin, kWasmOrigin);
3310
3311 // Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
3312 // AsyncStreamingProcessor have to finish.
3313 job_->outstanding_finishers_.store(2);
3314 // TODO(13209): Use PGO for streaming compilation, if available.
3315 constexpr ProfileInformation* kNoProfileInformation = nullptr;
3316 compilation_unit_builder_ = InitializeCompilation(
3317 job_->isolate(), job_->native_module_.get(), kNoProfileInformation);
3318 return true;
3319}
3320
3321// Process a function body.
3323 base::Vector<const uint8_t> bytes, uint32_t offset) {
3324 TRACE_STREAMING("Process function body %d ...\n", num_functions_);
3325 uint32_t func_index =
3328 // In case of {prefix_cache_hit} we still need the function body to be
3329 // decoded. Otherwise a later cache miss cannot be handled.
3330 decoder_.DecodeFunctionBody(func_index, static_cast<uint32_t>(bytes.length()),
3331 offset);
3332
3333 if (prefix_cache_hit_) {
3334 // Don't compile yet if we might have a cache hit.
3335 return true;
3336 }
3337
3338 const WasmModule* module = decoder_.module();
3339 auto enabled_features = job_->enabled_features_;
3340 DCHECK_EQ(module->origin, kWasmOrigin);
3341 const bool lazy_module = v8_flags.wasm_lazy_compilation;
3342 CompileStrategy strategy =
3343 GetCompileStrategy(module, enabled_features, func_index, lazy_module);
3344 CHECK_IMPLIES(v8_flags.wasm_jitless, !v8_flags.wasm_lazy_validation);
3345 bool validate_lazily_compiled_function =
3346 v8_flags.wasm_jitless ||
3347 (!v8_flags.wasm_lazy_validation &&
3348 (strategy == CompileStrategy::kLazy ||
3349 strategy == CompileStrategy::kLazyBaselineEagerTopTier));
3350 if (validate_lazily_compiled_function) {
3351 // {bytes} is part of a section buffer owned by the streaming decoder. The
3352 // streaming decoder is held alive by the {AsyncCompileJob}, so we can just
3353 // use the {bytes} vector as long as the {AsyncCompileJob} is still running.
3358 std::make_unique<ValidateFunctionsStreamingJob>(
3359 module, enabled_features, &validate_functions_job_data_));
3360 }
3361 validate_functions_job_data_.AddUnit(func_index, bytes,
3363 }
3364
3365 auto* compilation_state = Impl(job_->native_module_->compilation_state());
3366 compilation_state->AddCompilationUnit(compilation_unit_builder_.get(),
3367 func_index);
3368 return true;
3369}
3370
3375
3380
3381// Finish the processing of the stream.
3383 base::OwnedVector<const uint8_t> bytes, bool after_error) {
3384 TRACE_STREAMING("Finish stream...\n");
3385 ModuleResult module_result = decoder_.FinishDecoding();
3386 if (module_result.failed()) after_error = true;
3387
3389 // Wait for background validation to finish, then check if a validation
3390 // error was found.
3391 // TODO(13447): Do not block here; register validation as another finisher
3392 // instead.
3395 if (validate_functions_job_data_.found_error) after_error = true;
3398 std::memory_order_relaxed);
3399 }
3400
3401 job_->wire_bytes_ = ModuleWireBytes(bytes.as_vector());
3402 job_->bytes_copy_ = std::move(bytes);
3403
3404 if (!after_error) {
3405 WasmDetectedFeatures detected_imports_features;
3407 module_result.value().get(), job_->wire_bytes_.module_bytes(),
3408 job_->compile_imports_, &detected_imports_features)) {
3409 after_error = true;
3410 } else {
3411 job_->detected_features_ |= detected_imports_features;
3412 }
3413 }
3414
3415 // Record event metrics.
3416 auto duration = base::TimeTicks::Now() - job_->start_time_;
3417 job_->metrics_event_.success = !after_error;
3421 job_->metrics_event_.wall_clock_duration_in_us = duration.InMicroseconds();
3422 job_->isolate_->metrics_recorder()->DelayMainThreadEvent(job_->metrics_event_,
3423 job_->context_id_);
3424
3425 if (after_error) {
3426 if (job_->native_module_ && job_->native_module_->wire_bytes().empty()) {
3427 // Clean up the temporary cache entry.
3430 }
3431 // Calling {Failed} will invalidate the {AsyncCompileJob} and delete {this}.
3432 job_->Failed();
3433 return;
3434 }
3435
3436 std::shared_ptr<WasmModule> module = std::move(module_result).value();
3437
3438 // At this point we identified the module as valid (except maybe for function
3439 // bodies, if lazy validation is enabled).
3440 // This DCHECK could be considered slow, but it only happens once per async
3441 // module compilation, and we only re-decode the module structure, without
3442 // validating function bodies. Overall this does not add a lot of overhead.
3443#ifdef DEBUG
3444 WasmDetectedFeatures detected_module_features;
3447 /* validate functions */ false, kWasmOrigin,
3448 &detected_module_features)
3449 .ok());
3450 // Module decoding should not detect any new features.
3451 DCHECK(job_->detected_features_.contains_all(detected_module_features));
3452#endif
3453
3456 if (prefix_cache_hit_) {
3457 // Restart as an asynchronous, non-streaming compilation. Most likely
3458 // {PrepareAndStartCompile} will get the native module from the cache.
3459 size_t code_size_estimate =
3462 std::move(module), true /* start_compilation */,
3463 false /* lazy_functions_are_validated_ */, code_size_estimate);
3464 return;
3465 }
3466
3467 // We have to open a HandleScope and prepare the Context for
3468 // CreateNativeModule, PrepareRuntimeObjects and FinishCompile as this is a
3469 // callback from the embedder.
3470 HandleScope scope(job_->isolate_);
3472
3473 // Record the size of the wire bytes and the number of functions. In
3474 // synchronous and asynchronous (non-streaming) compilation, this happens in
3475 // {DecodeWasmModule}.
3476 auto* module_size_histogram =
3477 job_->isolate_->counters()->wasm_wasm_module_size_bytes();
3478 module_size_histogram->AddSample(job_->wire_bytes_.module_bytes().length());
3479 auto* num_functions_histogram =
3480 job_->isolate_->counters()->wasm_functions_per_wasm_module();
3481 num_functions_histogram->AddSample(static_cast<int>(num_functions_));
3482
3483 const bool has_code_section = job_->native_module_ != nullptr;
3484 bool cache_hit = false;
3485 if (!has_code_section) {
3486 // We are processing a WebAssembly module without code section. Create the
3487 // native module now (would otherwise happen in {PrepareAndStartCompile} or
3488 // {ProcessCodeSectionHeader}).
3489 constexpr size_t kCodeSizeEstimate = 0;
3490 cache_hit =
3491 job_->GetOrCreateNativeModule(std::move(module), kCodeSizeEstimate);
3492 } else {
3493 job_->native_module_->SetWireBytes(std::move(job_->bytes_copy_));
3494 }
3495 const bool needs_finish = job_->DecrementAndCheckFinisherCount();
3496 DCHECK_IMPLIES(!has_code_section, needs_finish);
3497 if (needs_finish) {
3498 const bool failed = job_->native_module_->compilation_state()->failed();
3499 if (!cache_hit) {
3500 auto* prev_native_module = job_->native_module_.get();
3502 failed, std::move(job_->native_module_), job_->isolate_);
3503 cache_hit = prev_native_module != job_->native_module_.get();
3504 }
3505 // We finally call {Failed} or {FinishCompile}, which will invalidate the
3506 // {AsyncCompileJob} and delete {this}.
3507 if (failed) {
3508 job_->Failed();
3509 } else {
3510 job_->FinishCompile(cache_hit);
3511 }
3512 }
3513}
3514
3516 TRACE_STREAMING("Abort stream...\n");
3520 }
3521 if (job_->native_module_ && job_->native_module_->wire_bytes().empty()) {
3522 // Clean up the temporary cache entry.
3525 }
3526 // {Abort} invalidates the {AsyncCompileJob}, which in turn deletes {this}.
3527 job_->Abort();
3528}
3529
3531 base::Vector<const uint8_t> module_bytes,
3532 base::Vector<const uint8_t> wire_bytes) {
3533 TRACE_EVENT0("v8.wasm", "wasm.Deserialize");
3534 std::optional<TimedHistogramScope> time_scope;
3536 time_scope.emplace(job_->isolate()->counters()->wasm_deserialization_time(),
3537 job_->isolate());
3538 }
3539 // DeserializeNativeModule and FinishCompile assume that they are executed in
3540 // a HandleScope, and that a context is set on the isolate.
3541 HandleScope scope(job_->isolate_);
3543
3545 job_->isolate_, module_bytes, wire_bytes, job_->compile_imports_,
3546 base::VectorOf(job_->stream_->url()));
3547
3548 if (result.is_null()) return false;
3549
3551 job_->isolate_->global_handles()->Create(*result.ToHandleChecked());
3552 job_->native_module_ = job_->module_object_->shared_native_module();
3554 // Calling {FinishCompile} deletes the {AsyncCompileJob} and {this}.
3555 job_->FinishCompile(false);
3556 return true;
3557}
3558
3559CompilationStateImpl::CompilationStateImpl(
3560 const std::shared_ptr<NativeModule>& native_module,
3561 std::shared_ptr<Counters> async_counters,
3562 WasmDetectedFeatures detected_features)
3563 : native_module_(native_module.get()),
3564 native_module_weak_(std::move(native_module)),
3565 async_counters_(std::move(async_counters)),
3566 compilation_unit_queues_(native_module->num_imported_functions(),
3567 native_module->num_declared_functions()),
3568 detected_features_(detected_features) {}
3569
3570void CompilationStateImpl::InitCompileJob() {
3573 // Create the job, but don't spawn workers yet. This will happen on
3574 // {NotifyConcurrencyIncrease}.
3575 baseline_compile_job_ = V8::GetCurrentPlatform()->CreateJob(
3576 TaskPriority::kUserVisible,
3577 std::make_unique<BackgroundCompileJob>(
3578 native_module_weak_, async_counters_, CompilationTier::kBaseline));
3579 top_tier_compile_job_ = V8::GetCurrentPlatform()->CreateJob(
3580 TaskPriority::kUserVisible,
3581 std::make_unique<BackgroundCompileJob>(
3582 native_module_weak_, async_counters_, CompilationTier::kTopTier));
3583}
3584
3585void CompilationStateImpl::CancelCompilation(
3586 CompilationStateImpl::CancellationPolicy cancellation_policy) {
3587 base::MutexGuard callbacks_guard(&callbacks_mutex_);
3588
3589 if (cancellation_policy == kCancelInitialCompilation &&
3590 finished_events_.contains(
3591 CompilationEvent::kFinishedBaselineCompilation)) {
3592 // Initial compilation already finished; cannot be cancelled.
3593 return;
3594 }
3595
3596 // std::memory_order_relaxed is sufficient because no other state is
3597 // synchronized with |compile_cancelled_|.
3598 compile_cancelled_.store(true, std::memory_order_relaxed);
3599
3600 // No more callbacks after abort.
3601 callbacks_.clear();
3602}
3603
3604bool CompilationStateImpl::cancelled() const {
3605 return compile_cancelled_.load(std::memory_order_relaxed);
3606}
3607
3608void CompilationStateImpl::ApplyCompilationHintToInitialProgress(
3609 const WasmCompilationHint& hint, size_t hint_idx) {
3610 // Get old information.
3611 uint8_t& progress = compilation_progress_[hint_idx];
3612 ExecutionTier old_baseline_tier = RequiredBaselineTierField::decode(progress);
3613 ExecutionTier old_top_tier = RequiredTopTierField::decode(progress);
3614
3615 // Compute new information.
3616 ExecutionTier new_baseline_tier =
3617 ApplyHintToExecutionTier(hint.baseline_tier, old_baseline_tier);
3618 ExecutionTier new_top_tier =
3619 ApplyHintToExecutionTier(hint.top_tier, old_top_tier);
3620 switch (hint.strategy) {
3621 case WasmCompilationHintStrategy::kDefault:
3622 // Be careful not to switch from lazy to non-lazy.
3623 if (old_baseline_tier == ExecutionTier::kNone) {
3624 new_baseline_tier = ExecutionTier::kNone;
3625 }
3626 if (old_top_tier == ExecutionTier::kNone) {
3627 new_top_tier = ExecutionTier::kNone;
3628 }
3629 break;
3630 case WasmCompilationHintStrategy::kLazy:
3631 new_baseline_tier = ExecutionTier::kNone;
3632 new_top_tier = ExecutionTier::kNone;
3633 break;
3634 case WasmCompilationHintStrategy::kEager:
3635 // Nothing to do, use the encoded (new) tiers.
3636 break;
3637 case WasmCompilationHintStrategy::kLazyBaselineEagerTopTier:
3638 new_baseline_tier = ExecutionTier::kNone;
3639 break;
3640 }
3641
3642 progress = RequiredBaselineTierField::update(progress, new_baseline_tier);
3643 progress = RequiredTopTierField::update(progress, new_top_tier);
3644
3645 // Update counter for outstanding baseline units.
3646 outstanding_baseline_units_ += (new_baseline_tier != ExecutionTier::kNone) -
3647 (old_baseline_tier != ExecutionTier::kNone);
3648}
3649
3650void CompilationStateImpl::ApplyPgoInfoToInitialProgress(
3651 ProfileInformation* pgo_info) {
3652 // Functions that were executed in the profiling run are eagerly compiled to
3653 // Liftoff.
3654 const WasmModule* module = native_module_->module();
3655 for (int func_index : pgo_info->executed_functions()) {
3656 uint8_t& progress =
3658 ExecutionTier old_baseline_tier =
3659 RequiredBaselineTierField::decode(progress);
3660 // If the function is already marked for eager compilation, we are good.
3661 if (old_baseline_tier != ExecutionTier::kNone) continue;
3662
3663 // Set the baseline tier to Liftoff, so we eagerly compile to Liftoff.
3664 // TODO(13288): Compile Liftoff code in the background, if lazy compilation
3665 // is enabled.
3666 progress =
3667 RequiredBaselineTierField::update(progress, ExecutionTier::kLiftoff);
3669 }
3670
3671 // Functions that were tiered up during PGO generation are eagerly compiled to
3672 // TurboFan (in the background, not blocking instantiation).
3673 for (int func_index : pgo_info->tiered_up_functions()) {
3674 uint8_t& progress =
3676 ExecutionTier old_baseline_tier =
3677 RequiredBaselineTierField::decode(progress);
3678 ExecutionTier old_top_tier = RequiredTopTierField::decode(progress);
3679 // If the function is already marked for eager or background compilation to
3680 // TurboFan, we are good.
3681 if (old_baseline_tier == ExecutionTier::kTurbofan) continue;
3682 if (old_top_tier == ExecutionTier::kTurbofan) continue;
3683
3684 // Set top tier to TurboFan, so we eagerly trigger compilation in the
3685 // background.
3686 progress = RequiredTopTierField::update(progress, ExecutionTier::kTurbofan);
3687 }
3688}
3689
3690void CompilationStateImpl::ApplyPgoInfoLate(ProfileInformation* pgo_info) {
3691 TRACE_EVENT0("v8.wasm", "wasm.ApplyPgoInfo");
3692 const WasmModule* module = native_module_->module();
3693 CompilationUnitBuilder builder{native_module_};
3694
3695 base::MutexGuard guard(&callbacks_mutex_);
3696 // Functions that were executed in the profiling run are eagerly compiled to
3697 // Liftoff (in the background).
3698 for (int func_index : pgo_info->executed_functions()) {
3699 uint8_t& progress =
3701 ExecutionTier old_baseline_tier =
3702 RequiredBaselineTierField::decode(progress);
3703 // If the function is already marked for eager compilation, we are good.
3704 if (old_baseline_tier != ExecutionTier::kNone) continue;
3705
3706 // If we already compiled Liftoff or TurboFan code, we are also good.
3707 ExecutionTier reached_tier = ReachedTierField::decode(progress);
3708 if (reached_tier >= ExecutionTier::kLiftoff) continue;
3709
3710 // Set the baseline tier to Liftoff and schedule a compilation unit.
3711 progress =
3712 RequiredBaselineTierField::update(progress, ExecutionTier::kLiftoff);
3713 // Add this as a "top tier unit" since it does not contribute to initial
3714 // compilation ("baseline finished" might already be triggered).
3715 // TODO(clemensb): Rename "baseline finished" to "initial compile finished".
3716 // TODO(clemensb): Avoid scheduling both a Liftoff and a TurboFan unit, or
3717 // prioritize Liftoff when executing the units.
3718 builder.AddTopTierUnit(func_index, ExecutionTier::kLiftoff);
3719 }
3720
3721 // Functions that were tiered up during PGO generation are eagerly compiled to
3722 // TurboFan in the background.
3723 for (int func_index : pgo_info->tiered_up_functions()) {
3724 uint8_t& progress =
3726 ExecutionTier old_baseline_tier =
3727 RequiredBaselineTierField::decode(progress);
3728 ExecutionTier old_top_tier = RequiredTopTierField::decode(progress);
3729 // If the function is already marked for eager or background compilation to
3730 // TurboFan, we are good.
3731 if (old_baseline_tier == ExecutionTier::kTurbofan) continue;
3732 if (old_top_tier == ExecutionTier::kTurbofan) continue;
3733
3734 // If we already compiled TurboFan code, we are also good.
3735 ExecutionTier reached_tier = ReachedTierField::decode(progress);
3736 if (reached_tier == ExecutionTier::kTurbofan) continue;
3737
3738 // Set top tier to TurboFan and schedule a compilation unit.
3739 progress = RequiredTopTierField::update(progress, ExecutionTier::kTurbofan);
3740 builder.AddTopTierUnit(func_index, ExecutionTier::kTurbofan);
3741 }
3742 builder.Commit();
3743}
3744
3745void CompilationStateImpl::InitializeCompilationProgress(
3746 ProfileInformation* pgo_info) {
3747 DCHECK(!failed());
3748
3749 base::MutexGuard guard(&callbacks_mutex_);
3750
3751 if (!v8_flags.wasm_jitless) {
3752 auto* module = native_module_->module();
3753
3755
3756 // Compute the default compilation progress for all functions, and set it.
3757 const ExecutionTierPair default_tiers = GetDefaultTiersPerModule(
3758 native_module_, native_module_->IsInDebugState(), IsLazyModule(module));
3759 const uint8_t default_progress =
3760 RequiredBaselineTierField::encode(default_tiers.baseline_tier) |
3761 RequiredTopTierField::encode(default_tiers.top_tier) |
3762 ReachedTierField::encode(ExecutionTier::kNone);
3763 compilation_progress_.assign(module->num_declared_functions,
3764 default_progress);
3765 if (default_tiers.baseline_tier != ExecutionTier::kNone) {
3766 outstanding_baseline_units_ += module->num_declared_functions;
3767 }
3768
3769 // Apply compilation hints, if enabled.
3770 if (native_module_->enabled_features().has_compilation_hints()) {
3771 size_t num_hints = std::min(module->compilation_hints.size(),
3772 size_t{module->num_declared_functions});
3773 for (size_t hint_idx = 0; hint_idx < num_hints; ++hint_idx) {
3774 const auto& hint = module->compilation_hints[hint_idx];
3775 ApplyCompilationHintToInitialProgress(hint, hint_idx);
3776 }
3777 }
3778
3779 // Transform --wasm-eager-tier-up-function, if given, into a fake
3780 // compilation hint.
3781 if (V8_UNLIKELY(
3782 v8_flags.wasm_eager_tier_up_function >= 0 &&
3783 static_cast<uint32_t>(v8_flags.wasm_eager_tier_up_function) >=
3784 module->num_imported_functions &&
3785 static_cast<uint32_t>(v8_flags.wasm_eager_tier_up_function) <
3786 module->functions.size())) {
3787 uint32_t func_idx =
3788 v8_flags.wasm_eager_tier_up_function - module->num_imported_functions;
3789 WasmCompilationHint hint{WasmCompilationHintStrategy::kEager,
3790 WasmCompilationHintTier::kOptimized,
3791 WasmCompilationHintTier::kOptimized};
3792 ApplyCompilationHintToInitialProgress(hint, func_idx);
3793 }
3794 }
3795
3796 // Apply PGO information, if available.
3797 if (pgo_info) ApplyPgoInfoToInitialProgress(pgo_info);
3798
3799 // Trigger callbacks if module needs no baseline or top tier compilation. This
3800 // can be the case for an empty or fully lazy module.
3801 TriggerOutstandingCallbacks();
3802}
3803
3804void CompilationStateImpl::AddCompilationUnitInternal(
3805 CompilationUnitBuilder* builder, int function_index,
3806 uint8_t function_progress) {
3807 ExecutionTier required_baseline_tier =
3808 CompilationStateImpl::RequiredBaselineTierField::decode(
3809 function_progress);
3810 ExecutionTier required_top_tier =
3811 CompilationStateImpl::RequiredTopTierField::decode(function_progress);
3812 ExecutionTier reached_tier =
3813 CompilationStateImpl::ReachedTierField::decode(function_progress);
3814
3815 if (reached_tier < required_baseline_tier) {
3816 builder->AddBaselineUnit(function_index, required_baseline_tier);
3817 }
3818 if (reached_tier < required_top_tier &&
3819 required_baseline_tier != required_top_tier) {
3820 builder->AddTopTierUnit(function_index, required_top_tier);
3821 }
3822}
3823
3824void CompilationStateImpl::InitializeCompilationUnits(
3825 std::unique_ptr<CompilationUnitBuilder> builder) {
3826 if (!v8_flags.wasm_jitless) {
3827 int offset = native_module_->module()->num_imported_functions;
3828 {
3829 base::MutexGuard guard(&callbacks_mutex_);
3830
3831 for (size_t i = 0, e = compilation_progress_.size(); i < e; ++i) {
3832 uint8_t function_progress = compilation_progress_[i];
3833 int func_index = offset + static_cast<int>(i);
3834 AddCompilationUnitInternal(builder.get(), func_index,
3835 function_progress);
3836 }
3837 }
3838 }
3839 builder->Commit();
3840}
3841
3842void CompilationStateImpl::AddCompilationUnit(CompilationUnitBuilder* builder,
3843 int func_index) {
3844 int offset = native_module_->module()->num_imported_functions;
3845 int progress_index = func_index - offset;
3846 uint8_t function_progress = 0;
3847 if (!v8_flags.wasm_jitless) {
3848 // TODO(ahaas): This lock may cause overhead. If so, we could get rid of the
3849 // lock as follows:
3850 // 1) Make compilation_progress_ an array of atomic<uint8_t>, and access it
3851 // lock-free.
3852 // 2) Have a copy of compilation_progress_ that we use for initialization.
3853 // 3) Just re-calculate the content of compilation_progress_.
3854 base::MutexGuard guard(&callbacks_mutex_);
3855 function_progress = compilation_progress_[progress_index];
3856 }
3857 AddCompilationUnitInternal(builder, func_index, function_progress);
3858}
3859
3860void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
3861 base::Vector<const int> lazy_functions,
3862 base::Vector<const int> eager_functions) {
3863 TRACE_EVENT2("v8.wasm", "wasm.CompilationAfterDeserialization",
3864 "num_lazy_functions", lazy_functions.size(),
3865 "num_eager_functions", eager_functions.size());
3866 std::optional<TimedHistogramScope> lazy_compile_time_scope;
3867 if (base::TimeTicks::IsHighResolution()) {
3868 lazy_compile_time_scope.emplace(
3869 counters()->wasm_compile_after_deserialize());
3870 }
3871
3872 auto* module = native_module_->module();
3873 {
3874 base::MutexGuard guard(&callbacks_mutex_);
3876
3877 // Initialize the compilation progress as if everything was
3878 // TurboFan-compiled.
3879 constexpr uint8_t kProgressAfterTurbofanDeserialization =
3880 RequiredBaselineTierField::encode(ExecutionTier::kLiftoff) |
3881 RequiredTopTierField::encode(ExecutionTier::kTurbofan) |
3882 ReachedTierField::encode(ExecutionTier::kTurbofan);
3883 compilation_progress_.assign(module->num_declared_functions,
3884 kProgressAfterTurbofanDeserialization);
3885
3886 // Update compilation state for lazy functions.
3887 constexpr uint8_t kProgressForLazyFunctions =
3888 RequiredBaselineTierField::encode(ExecutionTier::kNone) |
3889 RequiredTopTierField::encode(ExecutionTier::kNone) |
3890 ReachedTierField::encode(ExecutionTier::kNone);
3891 for (auto func_index : lazy_functions) {
3892 compilation_progress_[declared_function_index(module, func_index)] =
3893 kProgressForLazyFunctions;
3894 }
3895
3896 // Update compilation state for eagerly compiled functions.
3897 constexpr bool kNotLazy = false;
3898 ExecutionTierPair default_tiers = GetDefaultTiersPerModule(
3899 native_module_, native_module_->IsInDebugState(), kNotLazy);
3900 uint8_t progress_for_eager_functions =
3901 RequiredBaselineTierField::encode(default_tiers.baseline_tier) |
3902 RequiredTopTierField::encode(default_tiers.top_tier) |
3903 ReachedTierField::encode(ExecutionTier::kNone);
3904 for (auto func_index : eager_functions) {
3905 // Check that {func_index} is not contained in {lazy_functions}.
3906 DCHECK_EQ(
3908 kProgressAfterTurbofanDeserialization);
3909 compilation_progress_[declared_function_index(module, func_index)] =
3910 progress_for_eager_functions;
3911 }
3912 DCHECK_NE(ExecutionTier::kNone, default_tiers.baseline_tier);
3913 outstanding_baseline_units_ += eager_functions.size();
3914
3915 // Baseline compilation is done if we do not have any Liftoff functions to
3916 // compile.
3917 if (eager_functions.empty() || v8_flags.wasm_lazy_compilation) {
3918 finished_events_.Add(CompilationEvent::kFinishedBaselineCompilation);
3919 }
3920 }
3921 auto builder = std::make_unique<CompilationUnitBuilder>(native_module_);
3922 InitializeCompilationUnits(std::move(builder));
3923 if (!v8_flags.wasm_lazy_compilation) {
3924 WaitForCompilationEvent(CompilationEvent::kFinishedBaselineCompilation);
3925 }
3926}
3927
3928void CompilationStateImpl::AddCallback(
3929 std::unique_ptr<CompilationEventCallback> callback) {
3930 base::MutexGuard callbacks_guard(&callbacks_mutex_);
3931 // Immediately trigger events that already happened.
3932 for (auto event : {CompilationEvent::kFinishedBaselineCompilation,
3933 CompilationEvent::kFailedCompilation}) {
3934 if (finished_events_.contains(event)) {
3935 callback->call(event);
3936 }
3937 }
3938 constexpr base::EnumSet<CompilationEvent> kFinalEvents{
3939 CompilationEvent::kFailedCompilation};
3940 if (!finished_events_.contains_any(kFinalEvents)) {
3941 callbacks_.emplace_back(std::move(callback));
3942 }
3943}
3944
3945void CompilationStateImpl::CommitCompilationUnits(
3946 base::Vector<WasmCompilationUnit> baseline_units,
3947 base::Vector<WasmCompilationUnit> top_tier_units) {
3948 base::MutexGuard guard{&mutex_};
3949 if (!baseline_units.empty() || !top_tier_units.empty()) {
3950 compilation_unit_queues_.AddUnits(baseline_units, top_tier_units,
3951 native_module_->module());
3952 }
3953 if (!baseline_units.empty()) {
3954 DCHECK(baseline_compile_job_->IsValid());
3955 baseline_compile_job_->NotifyConcurrencyIncrease();
3956 }
3957 if (!top_tier_units.empty()) {
3958 DCHECK(top_tier_compile_job_->IsValid());
3959 top_tier_compile_job_->NotifyConcurrencyIncrease();
3960 }
3961}
3962
3963void CompilationStateImpl::CommitTopTierCompilationUnit(
3964 WasmCompilationUnit unit) {
3965 CommitCompilationUnits({}, {&unit, 1});
3966}
3967
3968void CompilationStateImpl::AddTopTierPriorityCompilationUnit(
3969 WasmCompilationUnit unit, size_t priority) {
3970 compilation_unit_queues_.AddTopTierPriorityUnit(unit, priority);
3971 // We should not have a {CodeSpaceWriteScope} open at this point, as
3972 // {NotifyConcurrencyIncrease} can spawn new threads which could inherit PKU
3973 // permissions (which would be a security issue).
3974 top_tier_compile_job_->NotifyConcurrencyIncrease();
3975}
3976
3977CompilationUnitQueues::Queue* CompilationStateImpl::GetQueueForCompileTask(
3978 int task_id) {
3979 return compilation_unit_queues_.GetQueueForTask(task_id);
3980}
3981
3982std::optional<WasmCompilationUnit> CompilationStateImpl::GetNextCompilationUnit(
3983 CompilationUnitQueues::Queue* queue, CompilationTier tier) {
3984 return compilation_unit_queues_.GetNextUnit(queue, tier);
3985}
3986
3987void CompilationStateImpl::OnFinishedUnits(
3988 base::Vector<WasmCode*> code_vector) {
3989 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
3990 "wasm.OnFinishedUnits", "units", code_vector.size());
3991
3992 base::MutexGuard guard(&callbacks_mutex_);
3993
3994 // Assume an order of execution tiers that represents the quality of their
3995 // generated code.
3996 static_assert(ExecutionTier::kNone < ExecutionTier::kLiftoff &&
3997 ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
3998 "Assume an order on execution tiers");
3999
4000 if (!v8_flags.wasm_jitless) {
4002 native_module_->module()->num_declared_functions);
4003 }
4004
4005 bool has_top_tier_code = false;
4006
4007 for (size_t i = 0; i < code_vector.size(); i++) {
4008 WasmCode* code = code_vector[i];
4009 DCHECK_NOT_NULL(code);
4010 DCHECK_LT(code->index(), native_module_->num_functions());
4011
4012 has_top_tier_code |= code->tier() == ExecutionTier::kTurbofan;
4013
4014 if (code->index() <
4015 static_cast<int>(native_module_->num_imported_functions())) {
4016 // Import wrapper.
4017 DCHECK_EQ(code->tier(), ExecutionTier::kTurbofan);
4019 } else {
4020 // Function.
4021 DCHECK_NE(code->tier(), ExecutionTier::kNone);
4022
4023 // Read function's compilation progress.
4024 // This view on the compilation progress may differ from the actually
4025 // compiled code. Any lazily compiled function does not contribute to the
4026 // compilation progress but may publish code to the code manager.
4027 int slot_index =
4028 declared_function_index(native_module_->module(), code->index());
4029 uint8_t function_progress = compilation_progress_[slot_index];
4030 ExecutionTier required_baseline_tier =
4031 RequiredBaselineTierField::decode(function_progress);
4032 ExecutionTier reached_tier = ReachedTierField::decode(function_progress);
4033
4034 // Check whether required baseline or top tier are reached.
4035 if (reached_tier < required_baseline_tier &&
4036 required_baseline_tier <= code->tier()) {
4039 }
4040 if (code->tier() == ExecutionTier::kTurbofan) {
4041 bytes_since_last_chunk_ += code->instructions().size();
4042 }
4043
4044 // Update function's compilation progress.
4045 if (code->tier() > reached_tier) {
4046 compilation_progress_[slot_index] = ReachedTierField::update(
4047 compilation_progress_[slot_index], code->tier());
4048 }
4049 // Allow another top tier compilation if deopts are enabled and the
4050 // currently installed code object is a liftoff object.
4051 // Ideally, this would be done only if the code->tier() ==
4052 // ExecutionTier::Liftoff as the code object for which we run this
4053 // function should be the same as the one installed on the native_module.
4054 // This is unfortunately not the case as installing a code object on the
4055 // native module and updating the compilation_progress_ and the
4056 // CompilationUnitQueues::top_tier_compiled_ are not synchronized.
4057 // Note: GetCode() acquires the NativeModule::allocation_mutex_, so this
4058 // could cause deadlocks if any other place acquires
4059 // NativeModule::allocation_mutex_ first and then
4060 // CompilationStateImpl::callbacks_mutex_!
4061 const bool is_liftoff = code->tier() == ExecutionTier::kLiftoff;
4062 auto published_code_is_liftoff = [this](int index) {
4063 WasmCode* code = native_module_->GetCode(index);
4064 if (code == nullptr) return false;
4065 return code->is_liftoff();
4066 };
4067 if (v8_flags.wasm_deopt &&
4068 (is_liftoff || published_code_is_liftoff(code->index()))) {
4069 // Setting the reached tier below the baseline tier would create an
4070 // inconsistent state and has actually led to crashes before (see
4071 // https://crbug.com/379086474).
4072 DCHECK_LE(required_baseline_tier, ExecutionTier::kLiftoff);
4073 compilation_progress_[slot_index] = ReachedTierField::update(
4074 compilation_progress_[slot_index], ExecutionTier::kLiftoff);
4075 compilation_unit_queues_.AllowAnotherTopTierJob(code->index());
4076 }
4078 }
4079 }
4080
4081 // Update the {last_top_tier_compilation_timestamp_} if it is set (i.e. a
4082 // delayed task has already been spawned).
4083 if (has_top_tier_code && !last_top_tier_compilation_timestamp_.IsNull()) {
4084 last_top_tier_compilation_timestamp_ = base::TimeTicks::Now();
4085 }
4086
4087 TriggerOutstandingCallbacks();
4088}
4089
4090namespace {
4091class TriggerCodeCachingAfterTimeoutTask : public v8::Task {
4092 public:
4093 explicit TriggerCodeCachingAfterTimeoutTask(
4094 std::weak_ptr<NativeModule> native_module)
4095 : native_module_(std::move(native_module)) {}
4096
4097 void Run() override {
4098 if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
4099 Impl(native_module->compilation_state())->TriggerCachingAfterTimeout();
4100 }
4101 }
4102
4103 private:
4104 const std::weak_ptr<NativeModule> native_module_;
4105};
4106} // namespace
4107
4108void CompilationStateImpl::TriggerOutstandingCallbacks() {
4109 callbacks_mutex_.AssertHeld();
4110
4111 base::EnumSet<CompilationEvent> triggered_events;
4112 if (outstanding_baseline_units_ == 0) {
4113 triggered_events.Add(CompilationEvent::kFinishedBaselineCompilation);
4114 }
4115
4116 // For dynamic tiering, trigger "compilation chunk finished" after a new chunk
4117 // of size {v8_flags.wasm_caching_threshold}.
4118 if (v8_flags.wasm_dynamic_tiering &&
4119 static_cast<size_t>(v8_flags.wasm_caching_threshold) <=
4121 // Trigger caching immediately if
4122 // - there is no timeout,
4123 // - the hard threshold was reached, or
4124 // - we are running single-threaded.
4125 if (v8_flags.single_threaded || v8_flags.wasm_caching_timeout_ms <= 0 ||
4126 static_cast<size_t>(v8_flags.wasm_caching_hard_threshold) <=
4128 triggered_events.Add(CompilationEvent::kFinishedCompilationChunk);
4130 } else if (last_top_tier_compilation_timestamp_.IsNull()) {
4131 // Trigger a task after the given timeout; that task will only trigger
4132 // caching if no new code was added until then. Otherwise, it will
4133 // re-schedule itself.
4134 V8::GetCurrentPlatform()->PostDelayedTaskOnWorkerThread(
4135 TaskPriority::kUserVisible,
4136 std::make_unique<TriggerCodeCachingAfterTimeoutTask>(
4138 1e-3 * v8_flags.wasm_caching_timeout_ms);
4139
4140 // Set the timestamp (will be updated by {OnFinishedUnits} if more
4141 // top-tier compilation finished before the delayed task is being run).
4142 last_top_tier_compilation_timestamp_ = base::TimeTicks::Now();
4143 }
4144 }
4145
4146 if (compile_failed_.load(std::memory_order_relaxed)) {
4147 // *Only* trigger the "failed" event.
4148 triggered_events =
4149 base::EnumSet<CompilationEvent>({CompilationEvent::kFailedCompilation});
4150 }
4151
4152 TriggerCallbacks(triggered_events);
4153}
4154
4155void CompilationStateImpl::TriggerCallbacks(
4156 base::EnumSet<CompilationEvent> events) {
4157 if (events.empty()) return;
4158
4159 // Don't trigger past events again.
4160 events -= finished_events_;
4161 // There can be multiple compilation chunks, thus do not store this.
4162 finished_events_ |= events - CompilationEvent::kFinishedCompilationChunk;
4163
4164 for (auto event :
4165 {std::make_pair(CompilationEvent::kFailedCompilation,
4166 "wasm.CompilationFailed"),
4167 std::make_pair(CompilationEvent::kFinishedBaselineCompilation,
4168 "wasm.BaselineFinished"),
4169 std::make_pair(CompilationEvent::kFinishedCompilationChunk,
4170 "wasm.CompilationChunkFinished")}) {
4171 if (!events.contains(event.first)) continue;
4173 TRACE_EVENT1("v8.wasm", event.second, "id", compilation_id_);
4174 for (auto& callback : callbacks_) {
4175 callback->call(event.first);
4176 }
4177 }
4178
4179 if (outstanding_baseline_units_ == 0) {
4180 auto new_end = std::remove_if(
4181 callbacks_.begin(), callbacks_.end(), [](const auto& callback) {
4182 return callback->release_after_final_event();
4183 });
4184 callbacks_.erase(new_end, callbacks_.end());
4185 }
4186}
4187
4188void CompilationStateImpl::TriggerCachingAfterTimeout() {
4189 base::MutexGuard guard{&callbacks_mutex_};
4190
4191 // It can happen that we reached the hard threshold while waiting for the
4192 // timeout to expire. In that case, {bytes_since_last_chunk_} might be zero
4193 // and there is nothing new to cache.
4194 if (bytes_since_last_chunk_ == 0) return;
4195
4197 base::TimeTicks caching_time =
4199 base::TimeDelta::FromMilliseconds(v8_flags.wasm_caching_timeout_ms);
4200 base::TimeDelta time_until_caching = caching_time - base::TimeTicks::Now();
4201 // If we are still half a millisecond or more away from the timeout,
4202 // reschedule the task. Otherwise, call the caching callback.
4203 if (time_until_caching >= base::TimeDelta::FromMicroseconds(500)) {
4204 int ms_remaining =
4205 static_cast<int>(time_until_caching.InMillisecondsRoundedUp());
4206 DCHECK_LE(1, ms_remaining);
4207 V8::GetCurrentPlatform()->PostDelayedTaskOnWorkerThread(
4208 TaskPriority::kUserVisible,
4209 std::make_unique<TriggerCodeCachingAfterTimeoutTask>(
4211 ms_remaining);
4212 return;
4213 }
4214
4215 TriggerCallbacks({CompilationEvent::kFinishedCompilationChunk});
4218}
4219
4220void CompilationStateImpl::OnCompilationStopped(
4221 WasmDetectedFeatures detected_features) {
4222 WasmDetectedFeatures new_detected_features =
4223 UpdateDetectedFeatures(detected_features);
4224 if (new_detected_features.empty()) return;
4225
4226 // New detected features can only happen during eager compilation or if lazy
4227 // validation is enabled.
4228 // The exceptions are currently stringref and imported strings, which are only
4229 // detected on top-tier compilation.
4230 DCHECK(!v8_flags.wasm_lazy_compilation || v8_flags.wasm_lazy_validation ||
4231 (new_detected_features -
4232 WasmDetectedFeatures{{WasmDetectedFeature::stringref,
4233 WasmDetectedFeature::imported_strings_utf8,
4234 WasmDetectedFeature::imported_strings}})
4235 .empty());
4236 // TODO(clemensb): Fix reporting of late detected features (relevant for lazy
4237 // validation and for stringref).
4238}
4239
4240WasmDetectedFeatures CompilationStateImpl::UpdateDetectedFeatures(
4241 WasmDetectedFeatures detected_features) {
4242 WasmDetectedFeatures old_features =
4243 detected_features_.load(std::memory_order_relaxed);
4244 while (!detected_features_.compare_exchange_weak(
4245 old_features, old_features | detected_features,
4246 std::memory_order_relaxed)) {
4247 // Retry with updated {old_features}.
4248 }
4249 return detected_features - old_features;
4250}
4251
4252void CompilationStateImpl::PublishCompilationResults(
4253 std::vector<UnpublishedWasmCode> unpublished_code) {
4254 if (unpublished_code.empty()) return;
4255
4256#if DEBUG
4257 // We don't compile import wrappers eagerly.
4258 for (const auto& [code, assumptions] : unpublished_code) {
4259 int func_index = code->index();
4260 DCHECK_LE(native_module_->num_imported_functions(), func_index);
4261 DCHECK_LT(func_index, native_module_->num_functions());
4262 }
4263#endif
4264 PublishCode(base::VectorOf(unpublished_code));
4265}
4266
4267std::vector<WasmCode*> CompilationStateImpl::PublishCode(
4268 base::Vector<UnpublishedWasmCode> code) {
4269 WasmCodeRefScope code_ref_scope;
4270 std::vector<WasmCode*> published_code =
4271 native_module_->PublishCode(std::move(code));
4272 // Defer logging code in case wire bytes were not fully received yet.
4273 if (native_module_->log_code() && native_module_->HasWireBytes()) {
4274 GetWasmEngine()->LogCode(base::VectorOf(published_code));
4275 }
4276
4277 OnFinishedUnits(base::VectorOf(published_code));
4278 return published_code;
4279}
4280
4281void CompilationStateImpl::SchedulePublishCompilationResults(
4282 std::vector<UnpublishedWasmCode> unpublished_code, CompilationTier tier) {
4283 PublishState& state = publish_state_[tier];
4284 {
4285 base::MutexGuard guard(&state.mutex_);
4286 if (state.publisher_running_) {
4287 // Add new code to the queue and return.
4288 state.publish_queue_.reserve(state.publish_queue_.size() +
4289 unpublished_code.size());
4290 for (auto& c : unpublished_code) {
4291 state.publish_queue_.emplace_back(std::move(c));
4292 }
4293 return;
4294 }
4295 state.publisher_running_ = true;
4296 }
4297 while (true) {
4298 PublishCompilationResults(std::move(unpublished_code));
4299 unpublished_code.clear();
4300
4301 // Keep publishing new code that came in.
4302 base::MutexGuard guard(&state.mutex_);
4303 DCHECK(state.publisher_running_);
4304 if (state.publish_queue_.empty()) {
4305 state.publisher_running_ = false;
4306 return;
4307 }
4308 unpublished_code.swap(state.publish_queue_);
4309 }
4310}
4311
4312size_t CompilationStateImpl::NumOutstandingCompilations(
4313 CompilationTier tier) const {
4314 return compilation_unit_queues_.GetSizeForTier(tier);
4315}
4316
4317void CompilationStateImpl::SetError() {
4318 compile_cancelled_.store(true, std::memory_order_relaxed);
4319 if (compile_failed_.exchange(true, std::memory_order_relaxed)) {
4320 return; // Already failed before.
4321 }
4322
4323 base::MutexGuard callbacks_guard(&callbacks_mutex_);
4324 TriggerOutstandingCallbacks();
4325 callbacks_.clear();
4326}
4327
4328void CompilationStateImpl::WaitForCompilationEvent(
4329 CompilationEvent expect_event) {
4330 switch (expect_event) {
4331 case CompilationEvent::kFinishedBaselineCompilation:
4332 if (baseline_compile_job_->IsValid()) baseline_compile_job_->Join();
4333 break;
4334 default:
4335 // Waiting on other CompilationEvent doesn't make sense.
4336 UNREACHABLE();
4337 }
4338#ifdef DEBUG
4339 base::EnumSet<CompilationEvent> events{expect_event,
4340 CompilationEvent::kFailedCompilation};
4341 base::MutexGuard guard(&callbacks_mutex_);
4342 DCHECK(finished_events_.contains_any(events));
4343#endif
4344}
4345
4346void CompilationStateImpl::TierUpAllFunctions() {
4347 const WasmModule* module = native_module_->module();
4348 uint32_t num_wasm_functions = module->num_declared_functions;
4349 WasmCodeRefScope code_ref_scope;
4350 CompilationUnitBuilder builder(native_module_);
4351 for (uint32_t i = 0; i < num_wasm_functions; ++i) {
4352 int func_index = module->num_imported_functions + i;
4353 WasmCode* code = native_module_->GetCode(func_index);
4354 if (!code || !code->is_turbofan()) {
4355 builder.AddTopTierUnit(func_index, ExecutionTier::kTurbofan);
4356 }
4357 }
4358 builder.Commit();
4359
4360 // Join the compilation, until no compilation units are left anymore.
4361 class DummyDelegate final : public JobDelegate {
4362 bool ShouldYield() override { return false; }
4363 bool IsJoiningThread() const override { return true; }
4364 void NotifyConcurrencyIncrease() override { UNIMPLEMENTED(); }
4365 uint8_t GetTaskId() override { return kMainTaskId; }
4366 };
4367
4368 DummyDelegate delegate;
4369 ExecuteCompilationUnits(native_module_weak_, async_counters_.get(), &delegate,
4370 CompilationTier::kTopTier);
4371
4372 // We cannot wait for other compilation threads to finish, so we explicitly
4373 // compile all functions which are not yet available as TurboFan code.
4374 for (uint32_t i = 0; i < num_wasm_functions; ++i) {
4375 uint32_t func_index = module->num_imported_functions + i;
4376 WasmCode* code = native_module_->GetCode(func_index);
4377 if (!code || !code->is_turbofan()) {
4378 wasm::GetWasmEngine()->CompileFunction(async_counters_.get(),
4379 native_module_, func_index,
4380 wasm::ExecutionTier::kTurbofan);
4381 }
4382 }
4383}
4384
4386 NativeModule* native_module,
4388 const CanonicalSig* sig,
4389 CanonicalTypeIndex type_index,
4390 int expected_arity, Suspend suspend) {
4391 bool source_positions = is_asmjs_module(native_module->module());
4392 if (v8_flags.wasm_jitless) {
4395 WasmImportWrapperCache::CacheKey key(kind, type_index, expected_arity,
4396 suspend);
4397 DCHECK_NULL(cache_scope[key]);
4398 return nullptr;
4399 }
4400
4402 isolate, kind, sig, type_index, source_positions, expected_arity,
4403 suspend);
4404}
4405
4406} // namespace v8::internal::wasm
4407
4408#undef TRACE_COMPILE
4409#undef TRACE_STREAMING
4410#undef TRACE_LAZY
Isolate * isolate_
friend Zone
Definition asm-types.cc:195
const char * name
Definition builtins.cc:39
Builtins::Kind kind
Definition builtins.cc:40
#define SBXCHECK_LE(lhs, rhs)
Definition check.h:67
#define SBXCHECK_EQ(lhs, rhs)
Definition check.h:62
#define SLOW_DCHECK(condition)
Definition checks.h:21
virtual bool ShouldYield()=0
virtual void NotifyConcurrencyIncrease()=0
void PostTaskOnWorkerThread(TaskPriority priority, std::unique_ptr< Task > task, const SourceLocation &location=SourceLocation::Current())
std::unique_ptr< JobHandle > CreateJob(TaskPriority priority, std::unique_ptr< JobTask > job_task, const SourceLocation &location=SourceLocation::Current())
std::shared_ptr< v8::TaskRunner > GetForegroundTaskRunner(Isolate *isolate)
void PostTask(std::unique_ptr< Task > task, const SourceLocation &location=SourceLocation::Current())
Definition v8-platform.h:82
constexpr bool contains_all(EnumSet set) const
Definition enum-set.h:38
constexpr bool contains(E element) const
Definition enum-set.h:35
Hasher & AddRange(Iterator first, Iterator last)
Definition hashing.h:127
Hasher & Add(const T &t)
Definition hashing.h:121
constexpr size_t hash() const
Definition hashing.h:111
constexpr size_t size() const
Definition vector.h:246
Vector< T > as_vector() const
Definition vector.h:276
static OwnedVector< T > NewForOverwrite(size_t size)
Definition vector.h:294
int64_t InMicroseconds() const
Definition time.cc:251
static TimeTicks Now()
Definition time.cc:736
static bool IsHighResolution()
Definition time.cc:763
int length() const
Definition vector.h:64
Vector< T > SubVector(size_t from, size_t to) const
Definition vector.h:41
constexpr size_t size() const
Definition vector.h:70
Vector< T > SubVectorFrom(size_t from) const
Definition vector.h:46
constexpr T * data() const
Definition vector.h:100
void OnAfterCompile(DirectHandle< Script > script)
Definition debug.cc:2761
V8_WARN_UNUSED_RESULT MaybeHandle< String > NewStringFromUtf8(base::Vector< const char > str, AllocationType allocation=AllocationType::kYoung)
Definition factory.cc:753
static void Destroy(Address *location)
IndirectHandle< Object > Create(Tagged< Object > value)
GlobalHandles * global_handles() const
Definition isolate.h:1416
Counters * counters()
Definition isolate.h:1180
const std::shared_ptr< Counters > & async_counters()
Definition isolate.h:1182
void CountUsage(v8::Isolate::UseCounterFeature feature)
Definition isolate.cc:7028
Tagged< Context > context() const
Definition isolate.h:800
v8::internal::Factory * factory()
Definition isolate.h:1527
Debug * debug() const
Definition isolate.h:1474
const std::shared_ptr< metrics::Recorder > & metrics_recorder()
Definition isolate.h:1187
V8_INLINE DirectHandle< T > ToHandleChecked() const
static V8_EXPORT_PRIVATE v8::Platform * GetCurrentPlatform()
Definition v8.cc:282
static V8_EXPORT_PRIVATE DirectHandle< WasmModuleObject > New(Isolate *isolate, std::shared_ptr< wasm::NativeModule > native_module, DirectHandle< Script > script)
void Run(AsyncCompileJob *job, bool on_foreground)
CompileTask(AsyncCompileJob *job, bool on_foreground)
void RunInBackground(AsyncCompileJob *job) override
DecodeModule(Counters *counters, std::shared_ptr< metrics::Recorder > metrics_recorder)
std::shared_ptr< metrics::Recorder > metrics_recorder_
void RunInForeground(AsyncCompileJob *job) override
FinishCompilation(std::shared_ptr< NativeModule > cached_native_module)
PrepareAndStartCompile(std::shared_ptr< const WasmModule > module, bool start_compilation, bool lazy_functions_are_validated, size_t code_size_estimate)
V8_WARN_UNUSED_RESULT bool DecrementAndCheckFinisherCount()
const WasmEnabledFeatures enabled_features_
void FinishCompile(bool is_after_cache_hit)
CancelableTaskManager background_task_manager_
IndirectHandle< WasmModuleObject > module_object_
const std::shared_ptr< CompilationResultResolver > resolver_
IndirectHandle< NativeContext > incumbent_context_
std::shared_ptr< StreamingDecoder > CreateStreamingDecoder()
v8::metrics::Recorder::ContextId context_id_
std::shared_ptr< v8::TaskRunner > foreground_task_runner_
IndirectHandle< NativeContext > native_context_
std::shared_ptr< StreamingDecoder > stream_
std::shared_ptr< NativeModule > native_module_
v8::metrics::Recorder::ContextId context_id() const
std::atomic< int32_t > outstanding_finishers_
bool GetOrCreateNativeModule(std::shared_ptr< const WasmModule > module, size_t code_size_estimate)
v8::metrics::WasmModuleDecoded metrics_event_
void CreateNativeModule(std::shared_ptr< const WasmModule > module, size_t code_size_estimate)
base::OwnedVector< const uint8_t > bytes_copy_
std::unique_ptr< CompileStep > step_
void OnFinishedStream(base::OwnedVector< const uint8_t > bytes, bool after_error) override
std::unique_ptr< JobHandle > validate_functions_job_handle_
bool ProcessCodeSectionHeader(int num_functions, uint32_t functions_mismatch_error_offset, std::shared_ptr< WireBytesStorage >, int code_section_start, int code_section_length) override
bool ProcessFunctionBody(base::Vector< const uint8_t > bytes, uint32_t offset) override
bool ProcessSection(SectionCode section_code, base::Vector< const uint8_t > bytes, uint32_t offset) override
ValidateFunctionsStreamingJobData validate_functions_job_data_
bool Deserialize(base::Vector< const uint8_t > wire_bytes, base::Vector< const uint8_t > module_bytes) override
std::unique_ptr< CompilationUnitBuilder > compilation_unit_builder_
bool ProcessModuleHeader(base::Vector< const uint8_t > bytes) override
bool has_string_constants(base::Vector< const uint8_t > name) const
bool contains(CompileTimeImport imp) const
void CompileFailed(const WasmError &error)
V8_WARN_UNUSED_RESULT DirectHandle< JSObject > Reify()
void AddCall(int target, int count)
void AddCallRefCandidate(Tagged< WasmFuncRef > funcref, int count)
const Tagged< WasmTrustedInstanceData > instance_data_
void AddCallIndirectCandidate(Tagged< Object > target_truncated_obj, int count)
FeedbackMaker(Isolate *const isolate, Tagged< WasmTrustedInstanceData > trusted_instance_data, int func_index, int num_calls)
base::OwnedVector< CallSiteFeedback > GetResult() &&
base::OwnedVector< CallSiteFeedback > result_
void AddResult(CallSiteFeedback feedback)
std::array< int, kMaxPolymorphism > targets_cache_
std::array< int, kMaxPolymorphism > counts_cache_
void DecodeSection(SectionCode section_code, base::Vector< const uint8_t > bytes, uint32_t offset)
void StartCodeSection(WireBytesRef section_bytes)
const std::shared_ptr< WasmModule > & shared_module() const
void DecodeModuleHeader(base::Vector< const uint8_t > bytes)
static size_t IdentifyUnknownSection(ModuleDecoder *decoder, base::Vector< const uint8_t > bytes, uint32_t offset, SectionCode *result)
void DecodeFunctionBody(uint32_t index, uint32_t size, uint32_t offset)
bool CheckFunctionsCount(uint32_t functions_count, uint32_t error_offset)
static size_t PrefixHash(base::Vector< const uint8_t > wire_bytes)
CompilationState * compilation_state() const
const WasmModule * module() const
base::Vector< const uint8_t > wire_bytes() const
bool HasCodeWithTier(uint32_t index, ExecutionTier tier) const
WasmCode * PublishCode(UnpublishedWasmCode)
V8_WARN_UNUSED_RESULT UnpublishedWasmCode AddCompiledCode(WasmCompilationResult &)
WasmEnabledFeatures enabled_features() const
const T & value() const &
static std::unique_ptr< StreamingDecoder > CreateAsyncStreamingDecoder(std::unique_ptr< StreamingProcessor > processor)
const Tagged< WasmTrustedInstanceData > instance_data_
void EnqueueCallees(base::Vector< CallSiteFeedback > feedback)
std::unordered_map< uint32_t, FunctionTypeFeedback > & feedback_for_function_
TransitiveTypeFeedbackProcessor(Isolate *isolate, Tagged< WasmTrustedInstanceData > trusted_instance_data, int func_index)
static void Process(Isolate *isolate, Tagged< WasmTrustedInstanceData > trusted_instance_data, int func_index)
ValidateFunctionsStreamingJob(const WasmModule *module, WasmEnabledFeatures enabled_features, ValidateFunctionsStreamingJobData *data)
size_t GetMaxConcurrency(size_t worker_count) const override
static size_t EstimateNativeModuleCodeSize(const WasmModule *)
Address GetEntrypointWithoutSignatureCheck(WasmCodePointer index) const
std::shared_ptr< NativeModule > MaybeGetNativeModule(ModuleOrigin origin, base::Vector< const uint8_t > wire_bytes, const CompileTimeImports &compile_imports, Isolate *isolate)
void LogCode(base::Vector< WasmCode * >)
AccountingAllocator * allocator()
void LogOutstandingCodesForIsolate(Isolate *)
std::unique_ptr< AsyncCompileJob > RemoveCompileJob(AsyncCompileJob *job)
DirectHandle< Script > GetOrCreateScript(Isolate *, const std::shared_ptr< NativeModule > &, base::Vector< const char > source_url)
void StreamingCompilationFailed(size_t prefix_hash, const CompileTimeImports &compile_imports)
std::shared_ptr< NativeModule > NewNativeModule(Isolate *isolate, WasmEnabledFeatures enabled_features, WasmDetectedFeatures detected_features, CompileTimeImports compile_imports, std::shared_ptr< const WasmModule > module, size_t code_size_estimate)
std::shared_ptr< NativeModule > UpdateNativeModuleCache(bool has_error, std::shared_ptr< NativeModule > native_module, Isolate *isolate)
WasmCode * CompileWasmImportCallWrapper(Isolate *isolate, ImportCallKind kind, const CanonicalSig *sig, CanonicalTypeIndex sig_index, bool source_positions, int expected_arity, Suspend suspend)
T const result_
base::Mutex & mutex_
int start
uint32_t count
int end
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
v8::Global< v8::Promise::Resolver > resolver_
Isolate * isolate
SourcePositionTable * source_positions
OptionalOpIndex index
int32_t offset
TNode< Object > target
TNode< Object > callback
ZoneVector< RpoNumber > & result
const int func_index_
size_t bytes_since_last_chunk_
std::shared_ptr< NativeModule > native_module_
base::ElapsedTimer timer_
size_t priority
#define CHECK_SIG(import_name, kSigName, kEnumName)
std::atomic< size_t > num_units_[CompilationTier::kNumTiers]
v8::metrics::Recorder::ContextId context_id_
const int num_imported_functions_
base::Mutex mutex
std::priority_queue< TopTierPriorityUnit > top_tier_priority_units
base::TimeTicks last_top_tier_compilation_timestamp_
std::unique_ptr< std::atomic< bool >[]> top_tier_compiled_
std::atomic< bool > compile_failed_
PublishState publish_state_[CompilationTier::kNumTiers]
std::shared_ptr< OperationsBarrier > engine_barrier_
base::Mutex queues_mutex_
std::atomic< size_t > num_priority_units_
const CompileMode compile_mode_
static constexpr size_t kBigUnitsLimit
std::shared_ptr< metrics::Recorder > metrics_recorder_
int outstanding_baseline_units_
std::vector< uint8_t > compilation_progress_
const CompilationTier tier_
size_t func_size
std::unique_ptr< JobHandle > top_tier_compile_job_
#define TRACE_COMPILE(...)
std::shared_ptr< WireBytesStorage > wire_bytes_storage_
base::TimeTicks start_time_
std::unique_ptr< JobHandle > baseline_compile_job_
std::atomic< WasmDetectedFeatures > detected_features_
CompilationUnitQueues compilation_unit_queues_
std::atomic< bool > compile_cancelled_
BigUnitsQueue big_units_queue_
std::weak_ptr< NativeModule > const native_module_weak_
const std::shared_ptr< Counters > async_counters_
std::vector< std::unique_ptr< QueueImpl > > queues_
ExecutionTier baseline_tier
std::vector< WasmCompilationUnit > baseline_units_
base::Mutex callbacks_mutex_
std::atomic< bool > has_units[CompilationTier::kNumTiers]
#define RETURN_ERROR(module_name_string, import_name)
bool publisher_running_
Counters * counters_
const int num_declared_functions_
int compilation_id_
int next_steal_task_id
#define TRACE_STREAMING(...)
#define TRACE_LAZY(...)
std::vector< std::unique_ptr< CompilationEventCallback > > callbacks_
std::vector< WasmCompilationUnit > tiering_units_
std::priority_queue< BigUnit > units[CompilationTier::kNumTiers]
static constexpr int kInvalidCompilationID
std::atomic< int > next_queue_to_add
ExecutionTier top_tier
base::EnumSet< CompilationEvent > finished_events_
std::vector< UnpublishedWasmCode > publish_queue_
std::atomic< int > publish_limit
#define CHECK_USE_COUNTER(feat,...)
const base::Vector< const uint8_t > wire_bytes_
const WasmEnabledFeatures enabled_features_
v8::JobTask JobTask
Definition platform.h:21
v8::JobDelegate JobDelegate
Definition platform.h:20
STL namespace.
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
LockGuard< Mutex > MutexGuard
Definition mutex.h:219
template const Signature< wasm::ValueType > bool
V8_EXPORT_PRIVATE WasmCodePointerTable * GetProcessWideWasmCodePointerTable()
void TierUpNowForTesting(Isolate *isolate, Tagged< WasmTrustedInstanceData > trusted_instance_data, int func_index)
WasmImportWrapperCache * GetWasmImportWrapperCache()
constexpr IndependentHeapType kWasmRefExtern
VoidResult DecodeResult
Definition decoder.h:39
void TriggerTierUp(Isolate *isolate, Tagged< WasmTrustedInstanceData > trusted_instance_data, int func_index)
void TierUpAllForTesting(Isolate *isolate, Tagged< WasmTrustedInstanceData > trusted_instance_data)
void InitializeCompilationForTesting(NativeModule *native_module)
bool is_asmjs_module(const WasmModule *module)
constexpr IndependentHeapType kWasmExternRef
constexpr IndependentValueType kWasmI32
ModuleResult DecodeWasmModule(WasmEnabledFeatures enabled_features, base::Vector< const uint8_t > wire_bytes, bool validate_functions, ModuleOrigin origin, Counters *counters, std::shared_ptr< metrics::Recorder > metrics_recorder, v8::metrics::Recorder::ContextId context_id, DecodingMethod decoding_method, WasmDetectedFeatures *detected_features)
WasmCode * CompileImportWrapperForTest(Isolate *isolate, NativeModule *native_module, ImportCallKind kind, const CanonicalSig *sig, CanonicalTypeIndex type_index, int expected_arity, Suspend suspend)
void ThrowLazyCompilationError(Isolate *isolate, const NativeModule *native_module, int func_index)
MaybeDirectHandle< WasmModuleObject > DeserializeNativeModule(Isolate *isolate, base::Vector< const uint8_t > data, base::Vector< const uint8_t > wire_bytes_vec, const CompileTimeImports &compile_imports, base::Vector< const char > source_url)
std::shared_ptr< NativeModule > CompileToNativeModule(Isolate *isolate, WasmEnabledFeatures enabled_features, WasmDetectedFeatures detected_features, CompileTimeImports compile_imports, ErrorThrower *thrower, std::shared_ptr< const WasmModule > module, base::OwnedVector< const uint8_t > wire_bytes, int compilation_id, v8::metrics::Recorder::ContextId context_id, ProfileInformation *pgo_info)
WasmError ValidateAndSetBuiltinImports(const WasmModule *module, base::Vector< const uint8_t > wire_bytes, const CompileTimeImports &imports, WasmDetectedFeatures *detected)
size_t ContentSize(const std::vector< T > &vector)
constexpr int kMaxPolymorphism
WasmError ValidateFunctions(const WasmModule *module, WasmEnabledFeatures enabled_features, base::Vector< const uint8_t > wire_bytes, std::function< bool(int)> filter, WasmDetectedFeatures *detected_features_out)
DecodeResult ValidateFunctionBody(Zone *zone, WasmEnabledFeatures enabled, const WasmModule *module, WasmDetectedFeatures *detected, const FunctionBody &body)
WasmEngine * GetWasmEngine()
int declared_function_index(const WasmModule *module, int func_index)
std::unique_ptr< ProfileInformation > LoadProfileFromFile(const WasmModule *module, base::Vector< const uint8_t > wire_bytes)
Definition pgo.cc:222
bool IsCrossInstanceCall(Tagged< Object > obj, Isolate *const isolate)
void PublishDetectedFeatures(WasmDetectedFeatures detected_features, Isolate *isolate, bool is_initial_compilation)
bool CompileLazy(Isolate *isolate, Tagged< WasmTrustedInstanceData > trusted_instance_data, int func_index)
WasmError GetWasmErrorWithName(base::Vector< const uint8_t > wire_bytes, int func_index, const WasmModule *module, WasmError error)
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
void PrintF(const char *format,...)
Definition utils.cc:39
wasm::WasmModule WasmModule
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
V8_INLINE constexpr bool operator<(Builtin a, Builtin b)
Definition builtins.h:75
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int kMaxInt
Definition globals.h:374
JSArrayBuffer::IsDetachableBit is_shared
wasm::WasmFunction WasmFunction
kInterpreterTrampolineOffset script
uint32_t equals
uint32_t fromCharCode
uint32_t charCodeAt
uint32_t fromCodePoint
uint32_t cast
uint32_t codePointAt
uint32_t compare
uint32_t concat
#define UNREACHABLE()
Definition logging.h:67
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define UNIMPLEMENTED()
Definition logging.h:66
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
#define arraysize(array)
Definition macros.h:67
#define UPDATE_WHEN_CLASS_CHANGES(classname, size)
WasmName GetNameOrNull(WireBytesRef ref) const
base::Vector< const uint8_t > module_bytes() const
void UpdateDetectedFeatures(WasmDetectedFeatures new_detected_features)
std::atomic< WasmDetectedFeatures > detected_features
void AddUnit(int declared_func_index, base::Vector< const uint8_t > code, JobHandle *job_handle)
bool has_array(ModuleTypeIndex index) const
std::vector< WasmImport > import_table
std::vector< WasmGlobal > globals
TypeFeedbackStorage type_feedback
#define TRACE_EVENT0(category_group, name)
#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_DISABLED_BY_DEFAULT(name)
#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_WARN_UNUSED_RESULT
Definition v8config.h:671
#define V8_UNLIKELY(condition)
Definition v8config.h:660
#define V8_NODISCARD
Definition v8config.h:693
#define FOREACH_WASM_STAGING_FEATURE_FLAG(V)
#define FOREACH_WASM_SHIPPED_FEATURE_FLAG(V)
#define FOREACH_WASM_NON_FLAG_FEATURE(V)
const wasm::WasmModule * module_
#define SELECT_WASM_COUNTER(counters, origin, prefix, suffix)
#define ZONE_NAME
Definition zone.h:22