v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
wasm-code-manager.cc
Go to the documentation of this file.
1// Copyright 2017 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <algorithm>
8#include <iomanip>
9#include <numeric>
10#include <optional>
11
12#include "src/base/atomicops.h"
14#include "src/base/iterator.h"
15#include "src/base/macros.h"
20#include "src/base/vector.h"
26#include "src/common/globals.h"
30#include "src/logging/log.h"
33#include "src/utils/ostreams.h"
40#include "src/wasm/pgo.h"
44#include "src/wasm/wasm-debug.h"
53
54#if V8_ENABLE_DRUMBRAKE
56#endif // V8_ENABLE_DRUMBRAKE
57
58#if defined(V8_OS_WIN64)
60#endif // V8_OS_WIN64
61
62#define TRACE_HEAP(...) \
63 do { \
64 if (v8_flags.trace_wasm_native_heap) PrintF(__VA_ARGS__); \
65 } while (false)
66
67namespace v8 {
68namespace internal {
69namespace wasm {
70
71using trap_handler::ProtectedInstructionData;
72
73// Check that {WasmCode} objects are sufficiently small. We create many of them,
74// often for rather small functions.
75// Increase the limit if needed, but first check if the size increase is
76// justified.
77#ifndef V8_GC_MOLE
78static_assert(sizeof(WasmCode) <= 104);
79#endif
80
82 base::AddressRegion new_region) {
83 // Find the possible insertion position by identifying the first region whose
84 // start address is not less than that of {new_region}. Since there cannot be
85 // any overlap between regions, this also means that the start of {above} is
86 // bigger or equal than the *end* of {new_region}.
87 auto above = regions_.lower_bound(new_region);
88 DCHECK(above == regions_.end() || above->begin() >= new_region.end());
89
90 // Check whether to merge with {above}.
91 if (above != regions_.end() && new_region.end() == above->begin()) {
92 base::AddressRegion merged_region{new_region.begin(),
93 new_region.size() + above->size()};
94 DCHECK_EQ(merged_region.end(), above->end());
95 // Check whether to also merge with the region below.
96 if (above != regions_.begin()) {
97 auto below = above;
98 --below;
99 if (below->end() == new_region.begin()) {
100 merged_region = {below->begin(), below->size() + merged_region.size()};
101 regions_.erase(below);
102 }
103 }
104 auto insert_pos = regions_.erase(above);
105 regions_.insert(insert_pos, merged_region);
106 return merged_region;
107 }
108
109 // No element below, and not adjavent to {above}: insert and done.
110 if (above == regions_.begin()) {
111 regions_.insert(above, new_region);
112 return new_region;
113 }
114
115 auto below = above;
116 --below;
117 // Consistency check:
118 DCHECK(above == regions_.end() || below->end() < above->begin());
119
120 // Adjacent to {below}: merge and done.
121 if (below->end() == new_region.begin()) {
122 base::AddressRegion merged_region{below->begin(),
123 below->size() + new_region.size()};
124 DCHECK_EQ(merged_region.end(), new_region.end());
125 regions_.erase(below);
126 regions_.insert(above, merged_region);
127 return merged_region;
128 }
129
130 // Not adjacent to any existing region: insert between {below} and {above}.
131 DCHECK_LT(below->end(), new_region.begin());
132 regions_.insert(above, new_region);
133 return new_region;
134}
135
137 return AllocateInRegion(size,
138 {kNullAddress, std::numeric_limits<size_t>::max()});
139}
140
142 size_t size, base::AddressRegion region) {
143 // Get an iterator to the first contained region whose start address is not
144 // smaller than the start address of {region}. Start the search from the
145 // region one before that (the last one whose start address is smaller).
146 auto it = regions_.lower_bound(region);
147 if (it != regions_.begin()) --it;
148
149 for (auto end = regions_.end(); it != end; ++it) {
150 base::AddressRegion overlap = it->GetOverlap(region);
151 if (size > overlap.size()) continue;
152 base::AddressRegion ret{overlap.begin(), size};
153 base::AddressRegion old = *it;
154 auto insert_pos = regions_.erase(it);
155 if (size == old.size()) {
156 // We use the full region --> nothing to add back.
157 } else if (ret.begin() == old.begin()) {
158 // We return a region at the start --> shrink old region from front.
159 regions_.insert(insert_pos, {old.begin() + size, old.size() - size});
160 } else if (ret.end() == old.end()) {
161 // We return a region at the end --> shrink remaining region.
162 regions_.insert(insert_pos, {old.begin(), old.size() - size});
163 } else {
164 // We return something in the middle --> split the remaining region
165 // (insert the region with smaller address first).
166 regions_.insert(insert_pos, {old.begin(), ret.begin() - old.begin()});
167 regions_.insert(insert_pos, {ret.end(), old.end() - ret.end()});
168 }
169 return ret;
170 }
171 return {};
172}
173
182
186
191
195
200
201std::unique_ptr<const uint8_t[]> WasmCode::ConcatenateBytes(
202 std::initializer_list<base::Vector<const uint8_t>> vectors) {
203 size_t total_size = 0;
204 for (auto& vec : vectors) total_size += vec.size();
205 // Use default-initialization (== no initialization).
206 std::unique_ptr<uint8_t[]> result{new uint8_t[total_size]};
207 uint8_t* ptr = result.get();
208 for (auto& vec : vectors) {
209 if (vec.empty()) continue; // Avoid nullptr in {memcpy}.
210 memcpy(ptr, vec.begin(), vec.size());
211 ptr += vec.size();
212 }
213 return result;
214}
215
218 if (kind() != WasmCode::kWasmFunction) return;
219 if (protected_instructions_size_ == 0) return;
220
222
223 size_t size = instructions().size();
224 auto protected_instruction_data = this->protected_instructions();
225 const int index =
226 RegisterHandlerData(base, size, protected_instruction_data.size(),
227 protected_instruction_data.begin());
228
229 // TODO(eholk): if index is negative, fail.
230 CHECK_LE(0, index);
233}
234
236 // The return value is cached in {WasmEngine::IsolateData::log_codes}. Ensure
237 // to call {WasmEngine::EnableCodeLogging} if this return value would change
238 // for any isolate. Otherwise we might lose code events.
239 return isolate->IsLoggingCodeCreation();
240}
241
242std::string WasmCode::DebugName() const {
243 switch (kind()) {
245 return "wasm-to-c";
246 case kJumpTable:
247 return "jump-table";
248 case kWasmToJsWrapper:
249 return "wasm-to-js";
250#if V8_ENABLE_DRUMBRAKE
251 case kInterpreterEntry:
252 return "interpreter entry";
253#endif // V8_ENABLE_DRUMBRAKE
254 case kWasmFunction:
255 // Gets handled below
256 break;
257 }
258
259 ModuleWireBytes wire_bytes(native_module()->wire_bytes());
260 const WasmModule* module = native_module()->module();
261 WireBytesRef name_ref =
262 module->lazily_generated_names.LookupFunctionName(wire_bytes, index());
263 WasmName name = wire_bytes.GetNameOrNull(name_ref);
264 std::string name_buffer;
265 if (name.empty()) {
266 name_buffer.resize(32);
267 name_buffer.resize(
268 SNPrintF(base::VectorOf(&name_buffer.front(), name_buffer.size()),
269 "wasm-function[%d]", index()));
270 } else {
271 name_buffer.append(name.begin(), name.end());
272 }
273 return name_buffer;
274}
275
276void WasmCode::LogCode(Isolate* isolate, const char* source_url,
277 int script_id) const {
278 DCHECK(ShouldBeLogged(isolate));
280
281 std::string fn_name = DebugName();
282 WasmName name = base::VectorOf(fn_name);
283
284 if (native_module_) {
285 const WasmModule* module = native_module_->module();
286 const WasmDebugSymbols& symbol =
287 module->debug_symbols[WasmDebugSymbols::Type::SourceMap];
288 auto load_wasm_source_map = isolate->wasm_load_source_map_callback();
289 auto source_map = native_module_->GetWasmSourceMap();
290 if (!source_map && symbol.type == WasmDebugSymbols::Type::SourceMap &&
291 !symbol.external_url.is_empty() && load_wasm_source_map) {
293 WasmName external_url = wire_bytes.GetNameOrNull(symbol.external_url);
294 std::string external_url_string(external_url.data(), external_url.size());
295 HandleScope scope(isolate);
296 v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
297 Local<v8::String> source_map_str =
298 load_wasm_source_map(v8_isolate, external_url_string.c_str());
300 std::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
301 }
302 }
303
304 // Record source positions before adding code, otherwise when code is added,
305 // there are no source positions to associate with the added code.
306 if (!source_positions().empty()) {
307 LOG_CODE_EVENT(isolate, WasmCodeLinePosInfoRecordEvent(instruction_start(),
309 }
310
311 int code_offset = 0;
312 if (!IsAnonymous()) {
313 code_offset = native_module_->module()->functions[index_].code.offset();
314 }
315 PROFILE(isolate, CodeCreateEvent(LogEventListener::CodeTag::kFunction, this,
316 name, source_url, code_offset, script_id));
317}
318
319namespace {
320bool ProtectedInstructionDataCompare(const ProtectedInstructionData& left,
321 const ProtectedInstructionData& right) {
322 return left.instr_offset < right.instr_offset;
323}
324} // namespace
325
330 static_cast<uint32_t>(pc - instruction_start())};
331 return std::binary_search(instructions.begin(), instructions.end(), offset,
332 ProtectedInstructionDataCompare);
333}
334
335void WasmCode::Validate() const {
336 // The packing strategy for {tagged_parameter_slots} only works if both the
337 // max number of parameters and their max combined stack slot usage fits into
338 // their respective half of the result value.
339 static_assert(wasm::kV8MaxWasmFunctionParams <
340 std::numeric_limits<uint16_t>::max());
341 static constexpr int kMaxSlotsPerParam = 4; // S128 on 32-bit platforms.
342 static_assert(wasm::kV8MaxWasmFunctionParams * kMaxSlotsPerParam <
343 std::numeric_limits<uint16_t>::max());
344
345#ifdef DEBUG
346 NativeModule::CallIndirectTargetMap function_index_map;
347 if (native_module_) {
348 function_index_map =
350 }
351 // Scope for foreign WasmCode pointers.
352 WasmCodeRefScope code_ref_scope;
353 // We expect certain relocation info modes to never appear in {WasmCode}
354 // objects or to be restricted to a small set of valid values. Hence the
355 // iteration below does not use a mask, but visits all relocation data.
357 !it.done(); it.next()) {
358 RelocInfo::Mode mode = it.rinfo()->rmode();
359 switch (mode) {
361 Address target = it.rinfo()->wasm_call_address();
362 WasmCode* code = native_module_->Lookup(target);
363 CHECK_NOT_NULL(code);
364 CHECK_EQ(WasmCode::kJumpTable, code->kind());
365 CHECK(code->contains(target));
366 break;
367 }
369 Address target = it.rinfo()->wasm_stub_call_address();
370 WasmCode* code = native_module_->Lookup(target);
371 CHECK_NOT_NULL(code);
372 CHECK_EQ(WasmCode::kJumpTable, code->kind());
373 CHECK(code->contains(target));
374 break;
375 }
377 uint32_t sig_id = it.rinfo()->wasm_canonical_sig_id();
378 CHECK_LE(sig_id, GetTypeCanonicalizer()->GetCurrentNumberOfTypes());
379 break;
380 }
382 WasmCodePointer call_target =
383 it.rinfo()->wasm_code_pointer_table_entry();
384 uint32_t function_index = function_index_map.at(call_target);
385 CHECK_EQ(call_target,
386 native_module_->GetCodePointerHandle(function_index));
387 break;
388 }
391 Address target = it.rinfo()->target_internal_reference();
392 CHECK(contains(target));
393 break;
394 }
398 // These are OK to appear.
399 break;
400 default:
401 FATAL("Unexpected mode: %d", mode);
402 }
403 }
404#endif
405}
406
408 // Determines whether flags want this code to be printed.
409 bool function_index_matches =
410 (!IsAnonymous() &&
411 v8_flags.print_wasm_code_function_index == static_cast<int>(index()));
412 if (v8_flags.print_code ||
413 (kind() == kWasmFunction
414 ? (v8_flags.print_wasm_code || function_index_matches)
415 : v8_flags.print_wasm_stub_code.value())) {
416 std::string name = DebugName();
417 Print(name.c_str());
418 }
419}
420
421void WasmCode::Print(const char* name) const {
422 StdoutStream os;
423 os << "--- WebAssembly code ---\n";
424 Disassemble(name, os);
426 if (auto* debug_side_table =
428 debug_side_table->Print(os);
429 }
430 }
431 os << "--- End code ---\n";
432}
433
434void WasmCode::Disassemble(const char* name, std::ostream& os,
435 Address current_pc) const {
436 if (name) os << "name: " << name << "\n";
437 if (!IsAnonymous()) os << "index: " << index() << "\n";
438 os << "kind: " << GetWasmCodeKindAsString(kind()) << "\n";
439 if (kind() == kWasmFunction) {
441 const char* compiler =
442 is_liftoff() ? (for_debugging() ? "Liftoff (debug)" : "Liftoff")
443 : "TurboFan";
444 os << "compiler: " << compiler << "\n";
445 }
446 size_t padding = instructions().size() - unpadded_binary_size_;
447 os << "Body (size = " << instructions().size() << " = "
448 << unpadded_binary_size_ << " + " << padding << " padding)\n";
449
450 int instruction_size = unpadded_binary_size_;
451 if (constant_pool_offset_ < instruction_size) {
452 instruction_size = constant_pool_offset_;
453 }
454 if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) {
455 instruction_size = safepoint_table_offset_;
456 }
457 if (handler_table_offset_ < instruction_size) {
458 instruction_size = handler_table_offset_;
459 }
460 DCHECK_LT(0, instruction_size);
461
462#ifdef ENABLE_DISASSEMBLER
463 os << "Instructions (size = " << instruction_size << ")\n";
464 Disassembler::Decode(nullptr, os, instructions().begin(),
465 instructions().begin() + instruction_size,
466 CodeReference(this), current_pc);
467 os << "\n";
468
469 if (handler_table_size() > 0) {
470 HandlerTable table(this);
471 os << "Exception Handler Table (size = " << table.NumberOfReturnEntries()
472 << "):\n";
473 table.HandlerTableReturnPrint(os);
474 os << "\n";
475 }
476
478 os << "Protected instructions:\n pc offset\n";
479 for (auto& data : protected_instructions()) {
480 os << std::setw(10) << std::hex << data.instr_offset << std::setw(10)
481 << "\n";
482 }
483 os << "\n";
484 }
485
486 if (!source_positions().empty()) {
487 os << "Source positions:\n pc offset position\n";
489 it.Advance()) {
490 os << std::setw(10) << std::hex << it.code_offset() << std::dec
491 << std::setw(10) << it.source_position().ScriptOffset()
492 << (it.is_statement() ? " statement" : "") << "\n";
493 }
494 os << "\n";
495 }
496
497 if (deopt_data_size_ > 0) {
499 const WasmDeoptData data = view.GetDeoptData();
500 os << "Deopt exits (entries = " << data.entry_count
501 << ", byte size = " << deopt_data_size_ << ")\n";
502 constexpr char pc_offset[] = "pc-offset";
503 constexpr char source_offset[] = " source-offset";
504 constexpr char translation_index[] = " translation-index";
505 os << pc_offset << source_offset << translation_index << '\n';
506 uint32_t code_offset = data.deopt_exit_start_offset;
507 for (uint32_t i = 0; i < data.entry_count; ++i) {
508 WasmDeoptEntry entry = view.GetDeoptEntry(i);
509 os << std::setw(sizeof pc_offset - 1) << std::hex << code_offset
510 << std::dec << std::setw(sizeof source_offset - 1)
511 << entry.bytecode_offset << std::setw(sizeof translation_index - 1)
512 << entry.translation_index << '\n';
514 }
515 os << '\n';
516 }
517
518 if (safepoint_table_offset_ > 0) {
519 SafepointTable table(this);
520 table.Print(os);
521 os << "\n";
522 }
523
524 os << "RelocInfo (size = " << reloc_info().size() << ")\n";
526 !it.done(); it.next()) {
527 it.rinfo()->Print(nullptr, os);
528 }
529 os << "\n";
530#else // !ENABLE_DISASSEMBLER
531 os << "Instructions (size = " << instruction_size << ", "
532 << static_cast<void*>(instructions().begin()) << "-"
533 << static_cast<void*>(instructions().begin() + instruction_size) << ")\n";
534#endif // !ENABLE_DISASSEMBLER
535}
536
538 switch (kind) {
540 return "wasm function";
542 return "wasm-to-capi";
544 return "wasm-to-js";
545#if V8_ENABLE_DRUMBRAKE
546 case WasmCode::kInterpreterEntry:
547 return "interpreter entry";
548#endif // V8_ENABLE_DRUMBRAKE
550 return "jump table";
551 }
552 return "unknown kind";
553}
554
560
564
565// static
567 // Decrement the ref counter of all given code objects. Keep the ones whose
568 // ref count drops to zero.
569 WasmEngine::DeadCodeMap dead_code;
570 std::vector<WasmCode*> dead_wrappers;
571 for (WasmCode* code : code_vec) {
572 if (!code->DecRef()) continue; // Remaining references.
573 NativeModule* native_module = code->native_module();
574 if (native_module != nullptr) {
575 dead_code[native_module].push_back(code);
576 } else {
577 dead_wrappers.push_back(code);
578 }
579 }
580
581 if (dead_code.empty() && dead_wrappers.empty()) return;
582
583 GetWasmEngine()->FreeDeadCode(dead_code, dead_wrappers);
584}
585
589 !iterator.done() && iterator.code_offset() < code_offset;
590 iterator.Advance()) {
591 position = iterator.source_position();
592 }
593 return position;
594}
595
597 return GetSourcePositionBefore(code_offset).ScriptOffset();
598}
599
600std::tuple<int, bool, SourcePosition> WasmCode::GetInliningPosition(
601 int inlining_id) const {
602 const size_t elem_size = sizeof(int) + sizeof(bool) + sizeof(SourcePosition);
603 const uint8_t* start = inlining_positions().begin() + elem_size * inlining_id;
605 std::tuple<int, bool, SourcePosition> result;
606 std::memcpy(&std::get<0>(result), start, sizeof std::get<0>(result));
607 std::memcpy(&std::get<1>(result), start + sizeof std::get<0>(result),
608 sizeof std::get<1>(result));
609 std::memcpy(&std::get<2>(result),
610 start + sizeof std::get<0>(result) + sizeof std::get<1>(result),
611 sizeof std::get<2>(result));
612 return result;
613}
614
624
625WasmCodeAllocator::WasmCodeAllocator(std::shared_ptr<Counters> async_counters)
626 : async_counters_(std::move(async_counters)) {
627 owned_code_space_.reserve(4);
628}
629
634
636 DCHECK(owned_code_space_.empty());
638 if (code_space.IsReserved()) {
639 free_code_space_.Merge(code_space.region());
640 owned_code_space_.emplace_back(std::move(code_space));
641 async_counters_->wasm_module_num_code_spaces()->AddSample(1);
642 } else {
643 async_counters_->wasm_module_num_code_spaces()->AddSample(0);
644 }
645}
646
647namespace {
648// On Windows, we cannot commit a region that straddles different reservations
649// of virtual memory. Because we bump-allocate, and because, if we need more
650// memory, we append that memory at the end of the owned_code_space_ list, we
651// traverse that list in reverse order to find the reservation(s) that guide how
652// to chunk the region to commit.
653#if V8_OS_WIN
654constexpr bool kNeedsToSplitRangeByReservations = true;
655#else
656constexpr bool kNeedsToSplitRangeByReservations = false;
657#endif
658
659base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded(
661 const std::vector<VirtualMemory>& owned_code_space) {
662 if (!kNeedsToSplitRangeByReservations) return {range};
663
665 size_t missing_begin = range.begin();
666 size_t missing_end = range.end();
667 for (auto& vmem : base::Reversed(owned_code_space)) {
668 Address overlap_begin = std::max(missing_begin, vmem.address());
669 Address overlap_end = std::min(missing_end, vmem.end());
670 if (overlap_begin >= overlap_end) continue;
671 split_ranges.emplace_back(overlap_begin, overlap_end - overlap_begin);
672 // Opportunistically reduce the missing range. This might terminate the loop
673 // early.
674 if (missing_begin == overlap_begin) missing_begin = overlap_end;
675 if (missing_end == overlap_end) missing_end = overlap_begin;
676 if (missing_begin >= missing_end) break;
677 }
678#ifdef ENABLE_SLOW_DCHECKS
679 // The returned vector should cover the full range.
680 size_t total_split_size = 0;
681 for (auto split : split_ranges) total_split_size += split.size();
682 DCHECK_EQ(range.size(), total_split_size);
683#endif
684 return split_ranges;
685}
686
687int NumWasmFunctionsInFarJumpTable(uint32_t num_declared_functions) {
689 ? static_cast<int>(num_declared_functions)
690 : 0;
691}
692
693// Returns an overapproximation of the code size overhead per new code space
694// created by the jump tables.
695size_t OverheadPerCodeSpace(uint32_t num_declared_functions) {
696 // Overhead for the jump table.
697 size_t overhead = RoundUp<kCodeAlignment>(
698 JumpTableAssembler::SizeForNumberOfSlots(num_declared_functions));
699
700#if defined(V8_OS_WIN64)
701 // On Win64, we need to reserve some pages at the beginning of an executable
702 // space. See {AddCodeSpace}.
703 overhead += Heap::GetCodeRangeReservedAreaSize();
704#endif // V8_OS_WIN64
705
706 // Overhead for the far jump table.
708 overhead +=
711 NumWasmFunctionsInFarJumpTable(num_declared_functions)));
712 }
713
714 return overhead;
715}
716
717// Returns an estimate how much code space should be reserved, taking overhead
718// per code space into account (for jump tables). This can be smaller than the
719// passed-in {needed_size}, see comments in the code.
720size_t ReservationSizeForWasmCode(size_t needed_size,
721 int num_declared_functions,
722 size_t total_reserved_so_far) {
723 DCHECK_EQ(needed_size == 0, num_declared_functions == 0);
724 if (needed_size == 0) return 0;
725
726 size_t overhead = OverheadPerCodeSpace(num_declared_functions);
727
728 // Reserve the maximum of
729 // a) needed size + overhead (this is the minimum needed)
730 // b) 2 * overhead (to not waste too much space by overhead)
731 // c) 1/4 of current total reservation size (to grow exponentially)
732 // For the minimum size we only take the overhead into account and not the
733 // code space estimate, for two reasons:
734 // - The code space estimate is only an estimate; we might actually need less
735 // space later.
736 // - When called at module construction time we pass the estimate for all
737 // code in the module; this can still be split up into multiple spaces
738 // later.
739 size_t minimum_size = 2 * overhead;
740 size_t suggested_size = std::max(
741 std::max(RoundUp<kCodeAlignment>(needed_size) + overhead, minimum_size),
742 total_reserved_so_far / 4);
743
744 const size_t max_code_space_size =
745 size_t{v8_flags.wasm_max_code_space_size_mb} * MB;
746 if (V8_UNLIKELY(minimum_size > max_code_space_size)) {
747 auto oom_detail = base::FormattedString{}
748 << "required reservation minimum (" << minimum_size
749 << ") is bigger than supported maximum ("
750 << max_code_space_size << ")";
752 "Exceeding maximum wasm code space size",
753 oom_detail.PrintToArray().data());
754 UNREACHABLE();
755 }
756
757 // Limit by the maximum code space size.
758 return std::min(max_code_space_size, suggested_size);
759}
760
761// Same as above, but for wrapper code space which does not have jump tables.
762size_t ReservationSizeForWrappers(size_t needed_size,
763 size_t total_reserved_so_far) {
764 needed_size = RoundUp<kCodeAlignment>(needed_size);
765 // Reserve the maximum of
766 // a) needed size
767 // c) 1/4 of current total reservation size (to grow exponentially)
768 size_t suggested_size = std::max(needed_size, total_reserved_so_far / 4);
769
770 const size_t max_code_space_size =
771 size_t{v8_flags.wasm_max_code_space_size_mb} * MB;
772 if (V8_UNLIKELY(needed_size > max_code_space_size)) {
773 auto oom_detail = base::FormattedString{}
774 << "required reservation minimum (" << needed_size
775 << ") is bigger than supported maximum ("
776 << max_code_space_size << ")";
778 "Exceeding maximum wasm code space size",
779 oom_detail.PrintToArray().data());
780 UNREACHABLE();
781 }
782
783 // Limit by the maximum code space size.
784 return std::min(max_code_space_size, suggested_size);
785}
786
787// Sentinel value to be used for {AllocateForCodeInRegion} for specifying no
788// restriction on the region to allocate in.
789constexpr base::AddressRegion kUnrestrictedRegion{
790 kNullAddress, std::numeric_limits<size_t>::max()};
791
792} // namespace
793
795 base::AddressRegion region) {
796#if defined(V8_OS_WIN64)
797 // On some platforms, specifically Win64, we need to reserve some pages at
798 // the beginning of an executable space.
799 // See src/heap/spaces.cc, MemoryAllocator::InitializeCodePageAllocator() and
800 // https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
801 // for details.
802 if (WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
803 size_t size = Heap::GetCodeRangeReservedAreaSize();
804 DCHECK_LT(0, size);
805 base::Vector<uint8_t> padding =
806 AllocateForCodeInRegion(native_module, size, region);
807 CHECK_EQ(reinterpret_cast<Address>(padding.begin()), region.begin());
809 reinterpret_cast<void*>(region.begin()), region.size());
810 }
811#endif // V8_OS_WIN64
812}
813
815 NativeModule* native_module, size_t size) {
816 return AllocateForCodeInRegion(native_module, size, kUnrestrictedRegion);
817}
818
820 return AllocateForCodeInRegion(nullptr, size, kUnrestrictedRegion);
821}
822
823// {native_module} may be {nullptr} when allocating wrapper code.
825 NativeModule* native_module, size_t size, base::AddressRegion region) {
826 DCHECK_LT(0, size);
828 size = RoundUp<kCodeAlignment>(size);
829 base::AddressRegion code_space =
831 if (V8_UNLIKELY(code_space.is_empty())) {
832 // Only allocations without a specific region are allowed to fail. Otherwise
833 // the region must have been allocated big enough to hold all initial
834 // allocations (jump tables etc).
835 CHECK_EQ(kUnrestrictedRegion, region);
836
837 size_t total_reserved = 0;
838 for (auto& vmem : owned_code_space_) total_reserved += vmem.size();
839 size_t reserve_size =
840 native_module
841 ? ReservationSizeForWasmCode(
842 size, native_module->module()->num_declared_functions,
843 total_reserved)
844 : ReservationSizeForWrappers(size, total_reserved);
845 if (reserve_size < size) {
846 auto oom_detail = base::FormattedString{}
847 << "cannot reserve space for " << size
848 << "bytes of code (maximum reservation size is "
849 << reserve_size << ")";
850 V8::FatalProcessOutOfMemory(nullptr, "Grow wasm code space",
851 oom_detail.PrintToArray().data());
852 }
853 VirtualMemory new_mem = code_manager->TryAllocate(reserve_size);
854 if (!new_mem.IsReserved()) {
855 auto oom_detail = base::FormattedString{}
856 << "cannot allocate more code space (" << reserve_size
857 << " bytes, currently " << total_reserved << ")";
858 V8::FatalProcessOutOfMemory(nullptr, "Grow wasm code space",
859 oom_detail.PrintToArray().data());
860 UNREACHABLE();
861 }
862
863 base::AddressRegion new_region = new_mem.region();
864 free_code_space_.Merge(new_region);
865 owned_code_space_.emplace_back(std::move(new_mem));
866 InitializeCodeRange(native_module, new_region);
867 if (native_module) {
868 code_manager->AssignRange(new_region, native_module);
869 native_module->AddCodeSpaceLocked(new_region);
870
871 async_counters_->wasm_module_num_code_spaces()->AddSample(
872 static_cast<int>(owned_code_space_.size()));
873 }
874
875 code_space = free_code_space_.Allocate(size);
876 CHECK(!code_space.is_empty());
877 }
878 const Address commit_page_size = CommitPageSize();
879 Address commit_start = RoundUp(code_space.begin(), commit_page_size);
880 Address commit_end = RoundUp(code_space.end(), commit_page_size);
881 // {commit_start} will be either code_space.start or the start of the next
882 // page. {commit_end} will be the start of the page after the one in which
883 // the allocation ends.
884 // We start from an aligned start, and we know we allocated vmem in
885 // page multiples.
886 // We just need to commit what's not committed. The page in which we
887 // start is already committed (or we start at the beginning of a page).
888 // The end needs to be committed all through the end of the page.
889 if (commit_start < commit_end) {
890 for (base::AddressRegion split_range : SplitRangeByReservationsIfNeeded(
891 {commit_start, commit_end - commit_start}, owned_code_space_)) {
892 code_manager->Commit(split_range);
893 }
894 committed_code_space_.fetch_add(commit_end - commit_start);
895 // Committed code cannot grow bigger than maximum code space size.
897 v8_flags.wasm_max_committed_code_mb * MB);
898 }
899 DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
900 generated_code_size_.fetch_add(code_space.size(), std::memory_order_relaxed);
901
902 TRACE_HEAP("Code alloc for %p: 0x%" PRIxPTR ",+%zu\n", this,
903 code_space.begin(), size);
904 return {reinterpret_cast<uint8_t*>(code_space.begin()), code_space.size()};
905}
906
908 // Zap code area and collect freed code regions.
909 DisjointAllocationPool freed_regions;
910 size_t code_size = 0;
911 for (WasmCode* code : codes) {
912 code_size += code->instructions().size();
913 freed_regions.Merge(base::AddressRegion{code->instruction_start(),
914 code->instructions().size()});
915 ThreadIsolation::UnregisterWasmAllocation(code->instruction_start(),
916 code->instructions().size());
917 }
918 freed_code_size_.fetch_add(code_size);
919
920 // Merge {freed_regions} into {freed_code_space_} and put all ranges of full
921 // pages to decommit into {regions_to_decommit} (decommitting is expensive,
922 // so try to merge regions before decommitting).
923 DisjointAllocationPool regions_to_decommit;
924 size_t commit_page_size = CommitPageSize();
925 for (auto region : freed_regions.regions()) {
926 auto merged_region = freed_code_space_.Merge(region);
927 Address discard_start =
928 std::max(RoundUp(merged_region.begin(), commit_page_size),
929 RoundDown(region.begin(), commit_page_size));
930 Address discard_end =
931 std::min(RoundDown(merged_region.end(), commit_page_size),
932 RoundUp(region.end(), commit_page_size));
933 if (discard_start >= discard_end) continue;
934 regions_to_decommit.Merge({discard_start, discard_end - discard_start});
935 }
936
938 for (auto region : regions_to_decommit.regions()) {
939 [[maybe_unused]] size_t old_committed =
940 committed_code_space_.fetch_sub(region.size());
941 DCHECK_GE(old_committed, region.size());
942 for (base::AddressRegion split_range :
943 SplitRangeByReservationsIfNeeded(region, owned_code_space_)) {
944 code_manager->Decommit(split_range);
945 }
946 }
947}
948
950 return owned_code_space_.size();
951}
952
954 WasmDetectedFeatures detected_features,
955 CompileTimeImports compile_imports,
956 VirtualMemory code_space,
957 std::shared_ptr<const WasmModule> module,
958 std::shared_ptr<Counters> async_counters,
959 std::shared_ptr<NativeModule>* shared_this)
960 : engine_scope_(
961 GetWasmEngine()->GetBarrierForBackgroundCompile()->TryLock()),
962 code_allocator_(async_counters),
963 enabled_features_(enabled_features),
964 compile_imports_(std::move(compile_imports)),
965 module_(std::move(module)),
966 fast_api_targets_(
967 new std::atomic<Address>[module_->num_imported_functions]()),
968 fast_api_signatures_(
969 new std::atomic<
970 const MachineSignature*>[module_->num_imported_functions]()) {
972 // We receive a pointer to an empty {std::shared_ptr}, and install ourselve
973 // there.
974 DCHECK_NOT_NULL(shared_this);
975 DCHECK_NULL(*shared_this);
976 shared_this->reset(this);
978 *shared_this, std::move(async_counters), detected_features);
979 compilation_state_->InitCompileJob();
981 if (module_->num_declared_functions > 0) {
983 std::make_unique<WasmCode*[]>(module_->num_declared_functions);
984 InitializeCodePointerTableHandles(module_->num_declared_functions);
985 tiering_budgets_ = std::make_unique<std::atomic<uint32_t>[]>(
986 module_->num_declared_functions);
987 // The tiering budget is accessed directly from generated code.
988 static_assert(sizeof(*tiering_budgets_.get()) == sizeof(uint32_t));
989
990 std::fill_n(tiering_budgets_.get(), module_->num_declared_functions,
991 v8_flags.wasm_tiering_budget);
992 }
993
994 if (v8_flags.wasm_jitless) return;
995
996 // Even though there cannot be another thread using this object (since we
997 // are just constructing it), we need to hold the mutex to fulfill the
998 // precondition of {WasmCodeAllocator::Init}, which calls
999 // {NativeModule::AddCodeSpaceLocked}.
1001 auto initial_region = code_space.region();
1002 code_allocator_.Init(std::move(code_space));
1003 const bool has_code_space = initial_region.size() > 0;
1004 DCHECK_EQ(has_code_space, module_->num_declared_functions != 0);
1005 if (has_code_space) {
1006 code_allocator_.InitializeCodeRange(this, initial_region);
1007 AddCodeSpaceLocked(initial_region);
1008 }
1009}
1010
1013 if (!WasmCode::ShouldBeLogged(isolate)) return;
1014
1015 TRACE_EVENT1("v8.wasm", "wasm.LogWasmCodes", "functions",
1016 module_->num_declared_functions);
1017
1018 Tagged<Object> url_obj = script->name();
1019 DCHECK(IsString(url_obj) || IsUndefined(url_obj));
1020 std::unique_ptr<char[]> source_url =
1021 IsString(url_obj) ? Cast<String>(url_obj)->ToCString()
1022 : std::unique_ptr<char[]>(new char[1]{'\0'});
1023
1024 // Log all owned code, not just the current entries in the code table. This
1025 // will also include import wrappers.
1026 WasmCodeRefScope code_ref_scope;
1027 for (auto& code : SnapshotAllOwnedCode()) {
1028 code->LogCode(isolate, source_url.get(), script->id());
1029 }
1030}
1031
1033 uint64_t signature_hash) {
1034 const size_t relocation_size = code->relocation_size();
1035 base::OwnedVector<uint8_t> reloc_info =
1036 base::OwnedCopyOf(code->relocation_start(), relocation_size);
1037 DirectHandle<TrustedByteArray> source_pos_table(code->source_position_table(),
1039 int source_pos_len = source_pos_table->length();
1040 base::OwnedVector<uint8_t> source_pos =
1041 base::OwnedCopyOf(source_pos_table->begin(), source_pos_len);
1042
1044 base::Vector<const uint8_t> instructions(
1045 reinterpret_cast<uint8_t*>(code->body_start()),
1046 static_cast<size_t>(code->body_size()));
1047 const int stack_slots = code->stack_slots();
1048
1049 // Metadata offsets in InstructionStream objects are relative to the start of
1050 // the metadata section, whereas WasmCode expects offsets relative to
1051 // instruction_start.
1052 const int base_offset = code->instruction_size();
1053 // TODO(jgruber,v8:8758): Remove this translation. It exists only because
1054 // InstructionStream objects contains real offsets but WasmCode expects an
1055 // offset of 0 to mean 'empty'.
1056 const int safepoint_table_offset =
1057 code->has_safepoint_table() ? base_offset + code->safepoint_table_offset()
1058 : 0;
1059 const int handler_table_offset = base_offset + code->handler_table_offset();
1060 const int constant_pool_offset = base_offset + code->constant_pool_offset();
1061 const int code_comments_offset = base_offset + code->code_comments_offset();
1062
1064 base::Vector<uint8_t> dst_code_bytes =
1065 code_allocator_.AllocateForCode(this, instructions.size());
1066 {
1067 WritableJitAllocation jit_allocation =
1069 reinterpret_cast<Address>(dst_code_bytes.begin()),
1070 dst_code_bytes.size(),
1072 jit_allocation.CopyCode(0, instructions.begin(), instructions.size());
1073
1074 // Apply the relocation delta by iterating over the RelocInfo.
1075 intptr_t delta = reinterpret_cast<Address>(dst_code_bytes.begin()) -
1076 code->instruction_start();
1077 int mode_mask =
1080 auto jump_tables_ref =
1082 Address dst_code_addr = reinterpret_cast<Address>(dst_code_bytes.begin());
1083 Address constant_pool_start = dst_code_addr + constant_pool_offset;
1084 RelocIterator orig_it(*code, mode_mask);
1085 for (WritableRelocIterator it(jit_allocation, dst_code_bytes,
1086 reloc_info.as_vector(), constant_pool_start,
1087 mode_mask);
1088 !it.done(); it.next(), orig_it.next()) {
1089 RelocInfo::Mode mode = it.rinfo()->rmode();
1090 if (RelocInfo::IsWasmStubCall(mode)) {
1091 uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag();
1092 DCHECK_LT(stub_call_tag,
1093 static_cast<uint32_t>(Builtin::kFirstBytecodeHandler));
1094 Builtin builtin = static_cast<Builtin>(stub_call_tag);
1095 Address entry = GetJumpTableEntryForBuiltin(builtin, jump_tables_ref);
1096 it.rinfo()->set_wasm_stub_call_address(entry);
1097 } else if (RelocInfo::IsWasmCodePointerTableEntry(mode)) {
1098 uint32_t function_index =
1099 it.rinfo()->wasm_code_pointer_table_entry().value();
1100 WasmCodePointer target = GetCodePointerHandle(function_index);
1101 it.rinfo()->set_wasm_code_pointer_table_entry(target,
1103 } else {
1104 it.rinfo()->apply(delta);
1105 }
1106 }
1107 }
1108
1109 // Flush the i-cache after relocation.
1110 FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
1111
1112 std::unique_ptr<WasmCode> new_code{
1113 new WasmCode{this, // native_module
1114 kAnonymousFuncIndex, // index
1115 dst_code_bytes, // instructions
1116 stack_slots, // stack_slots
1117 0, // ool_spills
1118 0, // tagged_parameter_slots
1119 safepoint_table_offset, // safepoint_table_offset
1120 handler_table_offset, // handler_table_offset
1121 constant_pool_offset, // constant_pool_offset
1122 code_comments_offset, // code_comments_offset
1123 instructions.length(), // unpadded_binary_size
1124 {}, // protected_instructions
1125 reloc_info.as_vector(), // reloc_info
1126 source_pos.as_vector(), // source positions
1127 {}, // inlining positions
1128 {}, // deopt data
1130 ExecutionTier::kNone, // tier
1131 kNotForDebugging, // for_debugging
1132 signature_hash}}; // signature_hash
1133 new_code->MaybePrint();
1134 new_code->Validate();
1135
1136 return PublishCodeLocked(std::move(new_code),
1138}
1139
1141 uint32_t num_wasm_functions) {
1142 if (!num_wasm_functions) return;
1144
1149
1150 CHECK_EQ(1, code_space_data_.size());
1151 const CodeSpaceData& code_space_data = code_space_data_[0];
1152 DCHECK_NOT_NULL(code_space_data.jump_table);
1153 DCHECK_NOT_NULL(code_space_data.far_jump_table);
1154
1155 Address compile_lazy_address =
1156 code_space_data.far_jump_table->instruction_start() +
1158 BuiltinLookup::JumptableIndexForBuiltin(Builtin::kWasmCompileLazy));
1159
1161 lazy_compile_table_->instruction_start(), num_wasm_functions,
1162 module_->num_imported_functions, compile_lazy_address);
1163
1165 code_space_data.jump_table->instruction_start(), num_wasm_functions,
1167
1168 WasmCodePointerTable* code_pointer_table =
1171 "Initialize WasmCodePointerTable");
1172 DCHECK_LE(num_wasm_functions, code_pointer_handles_size_);
1173 TypeCanonicalizer* type_canonicalizer = GetTypeCanonicalizer();
1174 for (uint32_t i = 0; i < num_wasm_functions; i++) {
1175 uint64_t signature_hash = module_->signature_hash(
1176 type_canonicalizer, module_->num_imported_functions + i);
1177 code_pointer_table->SetEntrypointWithWriteScope(
1179 code_space_data.jump_table->instruction_start() +
1181 signature_hash, write_scope);
1182 }
1183}
1184
1185void NativeModule::UseLazyStubLocked(uint32_t func_index) {
1187 DCHECK_LE(module_->num_imported_functions, func_index);
1188 DCHECK_LT(func_index,
1189 module_->num_imported_functions + module_->num_declared_functions);
1190 // Avoid opening a new write scope per function. The caller should hold the
1191 // scope instead.
1192
1194
1195 // Add jump table entry for jump to the lazy compile stub.
1196 uint32_t slot_index = declared_function_index(module(), func_index);
1197 DCHECK_NULL(code_table_[slot_index]);
1198
1199 Address jump_table_target =
1202 Address lazy_compile_target =
1205 uint64_t signature_hash =
1206 module_->signature_hash(GetTypeCanonicalizer(), func_index);
1207 PatchJumpTablesLocked(slot_index, lazy_compile_target, jump_table_target,
1208 signature_hash);
1209}
1210
1211std::unique_ptr<WasmCode> NativeModule::AddCode(
1212 int index, const CodeDesc& desc, int stack_slots, int ool_spill_count,
1213 uint32_t tagged_parameter_slots,
1214 base::Vector<const uint8_t> protected_instructions_data,
1215 base::Vector<const uint8_t> source_position_table,
1216 base::Vector<const uint8_t> inlining_positions,
1218 ExecutionTier tier, ForDebugging for_debugging) {
1219 base::Vector<uint8_t> code_space;
1220 NativeModule::JumpTablesRef jump_table_ref;
1221 {
1223 code_space = code_allocator_.AllocateForCode(this, desc.instr_size);
1224 jump_table_ref =
1226 }
1227 // Only Liftoff code can have the {frame_has_feedback_slot} bit set.
1229 bool frame_has_feedback_slot = false;
1231 reinterpret_cast<Address>(code_space.begin()), code_space.size(),
1233 return AddCodeWithCodeSpace(
1234 index, desc, stack_slots, ool_spill_count, tagged_parameter_slots,
1235 protected_instructions_data, source_position_table, inlining_positions,
1236 deopt_data, kind, tier, for_debugging, frame_has_feedback_slot,
1237 code_space, jump_table_ref);
1238}
1239
1241 WasmCodePointerTable* code_pointer_table =
1243 for (uint32_t i = 0; i < code_pointer_handles_size_; i++) {
1244 code_pointer_table->FreeEntry(code_pointer_handles_[i]);
1245 }
1246
1247 code_pointer_handles_.reset();
1249}
1250
1252 uint32_t num_wasm_functions) {
1253 if (code_pointer_handles_size_ != 0) {
1254 // During testing, we might already have code pointer handles allocated.
1256 }
1258 std::make_unique<WasmCodePointer[]>(num_wasm_functions);
1259 code_pointer_handles_size_ = num_wasm_functions;
1260
1261 WasmCodePointerTable* code_pointer_table =
1263 for (uint32_t i = 0; i < num_wasm_functions; i++) {
1264 code_pointer_handles_[i] = code_pointer_table->AllocateUninitializedEntry();
1265 }
1266}
1267
1268std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
1269 int index, const CodeDesc& desc, int stack_slots, int ool_spill_count,
1270 uint32_t tagged_parameter_slots,
1271 base::Vector<const uint8_t> protected_instructions_data,
1272 base::Vector<const uint8_t> source_position_table,
1273 base::Vector<const uint8_t> inlining_positions,
1275 ExecutionTier tier, ForDebugging for_debugging,
1276 bool frame_has_feedback_slot, base::Vector<uint8_t> dst_code_bytes,
1277 const JumpTablesRef& jump_tables) {
1278 base::Vector<uint8_t> reloc_info{
1279 desc.buffer + desc.buffer_size - desc.reloc_size,
1280 static_cast<size_t>(desc.reloc_size)};
1281 UpdateCodeSize(desc.instr_size, tier, for_debugging);
1282
1283 // TODO(jgruber,v8:8758): Remove this translation. It exists only because
1284 // CodeDesc contains real offsets but WasmCode expects an offset of 0 to mean
1285 // 'empty'.
1286 const int safepoint_table_offset =
1287 desc.safepoint_table_size == 0 ? 0 : desc.safepoint_table_offset;
1288 const int handler_table_offset = desc.handler_table_offset;
1289 const int constant_pool_offset = desc.constant_pool_offset;
1290 const int code_comments_offset = desc.code_comments_offset;
1291 const int instr_size = desc.instr_size;
1292
1293 {
1295 reinterpret_cast<Address>(dst_code_bytes.begin()),
1297 true);
1298 jit_allocation.CopyCode(0, desc.buffer, desc.instr_size);
1299
1300 // Apply the relocation delta by iterating over the RelocInfo.
1301 intptr_t delta = dst_code_bytes.begin() - desc.buffer;
1302 int mode_mask =
1306 Address code_start = reinterpret_cast<Address>(dst_code_bytes.begin());
1307 Address constant_pool_start = code_start + constant_pool_offset;
1308
1309 for (WritableRelocIterator it(jit_allocation, dst_code_bytes, reloc_info,
1310 constant_pool_start, mode_mask);
1311 !it.done(); it.next()) {
1312 RelocInfo::Mode mode = it.rinfo()->rmode();
1313 if (RelocInfo::IsWasmCall(mode)) {
1314 uint32_t call_tag = it.rinfo()->wasm_call_tag();
1315 Address target = GetNearCallTargetForFunction(call_tag, jump_tables);
1316 it.rinfo()->set_wasm_call_address(target);
1317 } else if (RelocInfo::IsWasmStubCall(mode)) {
1318 uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
1319 DCHECK_LT(stub_call_tag,
1320 static_cast<uint32_t>(Builtin::kFirstBytecodeHandler));
1321 Builtin builtin = static_cast<Builtin>(stub_call_tag);
1322 Address entry = GetJumpTableEntryForBuiltin(builtin, jump_tables);
1323 it.rinfo()->set_wasm_stub_call_address(entry);
1324 } else if (RelocInfo::IsWasmCodePointerTableEntry(mode)) {
1325 uint32_t function_index =
1326 it.rinfo()->wasm_code_pointer_table_entry().value();
1327 WasmCodePointer target = GetCodePointerHandle(function_index);
1328 it.rinfo()->set_wasm_code_pointer_table_entry(target,
1330 } else {
1331 it.rinfo()->apply(delta);
1332 }
1333 }
1334 }
1335
1336 // Flush the i-cache after relocation.
1337 FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
1338
1339 // Liftoff code will not be relocated or serialized, thus do not store any
1340 // relocation information.
1341 if (tier == ExecutionTier::kLiftoff) reloc_info = {};
1342
1343 uint64_t signature_hash =
1344 module_->signature_hash(GetTypeCanonicalizer(), index);
1345
1346 std::unique_ptr<WasmCode> code{new WasmCode{this,
1347 index,
1348 dst_code_bytes,
1350 ool_spill_count,
1351 tagged_parameter_slots,
1352 safepoint_table_offset,
1353 handler_table_offset,
1354 constant_pool_offset,
1355 code_comments_offset,
1356 instr_size,
1357 protected_instructions_data,
1358 reloc_info,
1359 source_position_table,
1360 inlining_positions,
1361 deopt_data,
1362 kind,
1363 tier,
1364 for_debugging,
1365 signature_hash,
1366 frame_has_feedback_slot}};
1367
1368 code->MaybePrint();
1369 code->Validate();
1370
1371 return code;
1372}
1373
1375 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
1376 "wasm.PublishCode");
1378 return PublishCodeLocked(std::move(unpublished_code.code),
1379 unpublished_code.assumptions.get());
1380}
1381
1382std::vector<WasmCode*> NativeModule::PublishCode(
1383 base::Vector<UnpublishedWasmCode> unpublished_codes) {
1384 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
1385 "wasm.PublishCode", "number", unpublished_codes.size());
1386 std::vector<WasmCode*> published_code;
1387 published_code.reserve(unpublished_codes.size());
1389 // The published code is put into the top-most surrounding {WasmCodeRefScope}.
1390 for (auto& unpublished_code : unpublished_codes) {
1391 WasmCode* code = PublishCodeLocked(std::move(unpublished_code.code),
1392 unpublished_code.assumptions.get());
1393 if (code == nullptr) {
1394 // There were invalid assumptions in the code.
1395 DCHECK_NOT_NULL(unpublished_code.assumptions);
1396 continue;
1397 }
1398 published_code.push_back(code);
1399 }
1400 return published_code;
1401}
1402
1405 // The {~WasmCodeRefScope} destructor must run after releasing the {lock},
1406 // to avoid lock order inversion.
1407 WasmCodeRefScope ref_scope;
1410 module_->type_feedback.well_known_imports.Update(entries);
1413 }
1414}
1415
1417 switch (result.kind) {
1420#if V8_ENABLE_DRUMBRAKE
1421 case WasmCompilationResult::kInterpreterEntry:
1422 return WasmCode::Kind::kInterpreterEntry;
1423#endif // V8_ENABLE_DRUMBRAKE
1426 default:
1427 UNREACHABLE();
1428 }
1429}
1430
1431WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> owned_code,
1432 AssumptionsJournal* assumptions) {
1434 DCHECK_LT(owned_code->index(), num_functions());
1435
1436 if (assumptions != nullptr) {
1437 // We should not allocate an empty set to avoid unnecessary overhead.
1438 DCHECK(!assumptions->empty());
1439 // Only Turbofan makes assumptions.
1440 DCHECK_EQ(ExecutionTier::kTurbofan, owned_code->tier());
1441 // Assumptions are not used for imports.
1442 DCHECK_GE(owned_code->index(),
1443 static_cast<int>(module_->num_imported_functions));
1444 WellKnownImportsList& current = module_->type_feedback.well_known_imports;
1445 for (auto [import_index, status] : assumptions->import_statuses()) {
1446 if (V8_UNLIKELY(current.get(import_index) != status)) {
1447 compilation_state_->AllowAnotherTopTierJob(owned_code->index());
1448 return nullptr;
1449 }
1450 }
1451 }
1452
1453 WasmCode* code = owned_code.get();
1454 new_owned_code_.emplace_back(std::move(owned_code));
1455 DCHECK_NULL(owned_code);
1456
1457 // Add the code to the surrounding code ref scope, so the returned pointer is
1458 // guaranteed to be valid.
1460
1461 if (code->index() < static_cast<int>(module_->num_imported_functions)) {
1462 return code;
1463 }
1464
1465 code->RegisterTrapHandlerData();
1466
1467 // Assume an order of execution tiers that represents the quality of their
1468 // generated code.
1471 "Assume an order on execution tiers");
1472
1473 uint32_t slot_idx = declared_function_index(module(), code->index());
1474 WasmCode* prior_code = code_table_[slot_idx];
1475 // If we are tiered down, install all debugging code (except for stepping
1476 // code, which is only used for a single frame and never installed in the
1477 // code table of jump table). Otherwise, install code if it was compiled
1478 // with a higher tier.
1479 static_assert(
1481 "for_debugging is ordered");
1482
1483 if (should_update_code_table(code, prior_code)) {
1484 code_table_[slot_idx] = code;
1485 if (prior_code) {
1486 WasmCodeRefScope::AddRef(prior_code);
1487 // The code is added to the current {WasmCodeRefScope}, hence the ref
1488 // count cannot drop to zero here.
1489 prior_code->DecRefOnLiveCode();
1490 }
1491
1492 PatchJumpTablesLocked(slot_idx, code->instruction_start(),
1493 code->instruction_start(), code->signature_hash());
1494 } else {
1495 // The code tables does not hold a reference to the code, hence decrement
1496 // the initial ref count of 1. The code was added to the
1497 // {WasmCodeRefScope} though, so it cannot die here.
1498 code->DecRefOnLiveCode();
1499 }
1500
1501 return code;
1502}
1503
1505 WasmCode* prior_code) const {
1506 if (new_code->for_debugging() == kForStepping) {
1507 // Never install stepping code.
1508 return false;
1509 }
1510 if (debug_state_ == kDebugging) {
1511 if (new_code->for_debugging() == kNotForDebugging) {
1512 // In debug state, only install debug code.
1513 return false;
1514 }
1515 if (prior_code && prior_code->for_debugging() > new_code->for_debugging()) {
1516 // In debug state, install breakpoints over normal debug code.
1517 return false;
1518 }
1519 }
1520 // In kNoDebugging:
1521 // Install if the tier is higher than before or we replace debugging code with
1522 // non-debugging code.
1523 // Also allow installing a lower tier if deopt support is enabled and the
1524 // prior code has deopt data. (The check for deopt_data is needed as with
1525 // compilation hints, both baseline and top tier compilation run concurrently
1526 // in the background and can finish in any order.)
1527 if (prior_code && !prior_code->for_debugging() &&
1528 prior_code->tier() > new_code->tier() &&
1529 (!v8_flags.wasm_deopt || prior_code->deopt_data().empty())) {
1530 return false;
1531 }
1532 return true;
1533}
1534
1537
1538 DCHECK_EQ(this, code->native_module());
1539 DCHECK_EQ(kWithBreakpoints, code->for_debugging());
1540 DCHECK(!code->IsAnonymous());
1541 DCHECK_LE(module_->num_imported_functions, code->index());
1542 DCHECK_LT(code->index(), num_functions());
1543
1544 // If the module is tiered up by now, do not reinstall debug code.
1545 if (debug_state_ != kDebugging) return;
1546
1547 uint32_t slot_idx = declared_function_index(module(), code->index());
1548 if (WasmCode* prior_code = code_table_[slot_idx]) {
1549 WasmCodeRefScope::AddRef(prior_code);
1550 // The code is added to the current {WasmCodeRefScope}, hence the ref
1551 // count cannot drop to zero here.
1552 prior_code->DecRefOnLiveCode();
1553 }
1554 code_table_[slot_idx] = code;
1555 code->IncRef();
1556
1557 PatchJumpTablesLocked(slot_idx, code->instruction_start(),
1558 code->instruction_start(), code->signature_hash());
1559}
1560
1561std::pair<base::Vector<uint8_t>, NativeModule::JumpTablesRef>
1564 base::Vector<uint8_t> code_space =
1565 code_allocator_.AllocateForCode(this, total_code_size);
1566 auto jump_tables =
1568 return {code_space, jump_tables};
1569}
1570
1571std::unique_ptr<WasmCode> NativeModule::AddDeserializedCode(
1572 int index, base::Vector<uint8_t> instructions, int stack_slots,
1573 int ool_spills, uint32_t tagged_parameter_slots, int safepoint_table_offset,
1574 int handler_table_offset, int constant_pool_offset,
1575 int code_comments_offset, int unpadded_binary_size,
1576 base::Vector<const uint8_t> protected_instructions_data,
1577 base::Vector<const uint8_t> reloc_info,
1578 base::Vector<const uint8_t> source_position_table,
1579 base::Vector<const uint8_t> inlining_positions,
1581 ExecutionTier tier) {
1582 UpdateCodeSize(instructions.size(), tier, kNotForDebugging);
1583
1584 uint64_t signature_hash =
1585 module_->signature_hash(GetTypeCanonicalizer(), index);
1586
1587 return std::unique_ptr<WasmCode>{new WasmCode{this,
1588 index,
1589 instructions,
1591 ool_spills,
1592 tagged_parameter_slots,
1593 safepoint_table_offset,
1594 handler_table_offset,
1595 constant_pool_offset,
1596 code_comments_offset,
1597 unpadded_binary_size,
1598 protected_instructions_data,
1599 reloc_info,
1600 source_position_table,
1601 inlining_positions,
1602 deopt_data,
1603 kind,
1604 tier,
1606 signature_hash}};
1607}
1608
1609std::pair<std::vector<WasmCode*>, std::vector<WellKnownImport>>
1612 WasmCode** start = code_table_.get();
1613 WasmCode** end = start + module_->num_declared_functions;
1614 for (WasmCode* code : base::VectorOf(start, end - start)) {
1615 if (code) WasmCodeRefScope::AddRef(code);
1616 }
1617 std::vector<WellKnownImport> import_statuses(module_->num_imported_functions);
1618 for (uint32_t i = 0; i < module_->num_imported_functions; i++) {
1619 import_statuses[i] = module_->type_feedback.well_known_imports.get(i);
1620 }
1621 return {std::vector<WasmCode*>{start, end}, std::move(import_statuses)};
1622}
1623
1624std::vector<WasmCode*> NativeModule::SnapshotAllOwnedCode() const {
1627
1628 std::vector<WasmCode*> all_code(owned_code_.size());
1629 std::transform(owned_code_.begin(), owned_code_.end(), all_code.begin(),
1630 [](auto& entry) { return entry.second.get(); });
1631 std::for_each(all_code.begin(), all_code.end(), WasmCodeRefScope::AddRef);
1632 return all_code;
1633}
1634
1635WasmCode* NativeModule::GetCode(uint32_t index) const {
1638 if (code) WasmCodeRefScope::AddRef(code);
1639 return code;
1640}
1641
1642bool NativeModule::HasCode(uint32_t index) const {
1644 return code_table_[declared_function_index(module(), index)] != nullptr;
1645}
1646
1647bool NativeModule::HasCodeWithTier(uint32_t index, ExecutionTier tier) const {
1649 return code_table_[declared_function_index(module(), index)] != nullptr &&
1650 code_table_[declared_function_index(module(), index)]->tier() == tier;
1651}
1652
1654 std::unique_ptr<WasmModuleSourceMap> source_map) {
1655 source_map_ = std::move(source_map);
1656}
1657
1661
1663 JumpTableType type) {
1664 return CreateEmptyJumpTableInRegionLocked(jump_table_size,
1665 kUnrestrictedRegion, type);
1666}
1667
1668namespace {
1669
1670ThreadIsolation::JitAllocationType ToAllocationType(
1672 switch (type) {
1679 }
1680}
1681
1682} // namespace
1683
1685 int jump_table_size, base::AddressRegion region, JumpTableType type) {
1687 // Only call this if we really need a jump table.
1688 DCHECK_LT(0, jump_table_size);
1689 base::Vector<uint8_t> code_space =
1690 code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
1691 DCHECK(!code_space.empty());
1693 {
1694 WritableJitAllocation jit_allocation =
1696 reinterpret_cast<Address>(code_space.begin()), code_space.size(),
1697 ToAllocationType(type));
1698 jit_allocation.ClearBytes(0, code_space.size());
1699 }
1700 std::unique_ptr<WasmCode> code{
1701 new WasmCode{this, // native_module
1702 kAnonymousFuncIndex, // index
1703 code_space, // instructions
1704 0, // stack_slots
1705 0, // ool_spills
1706 0, // tagged_parameter_slots
1707 0, // safepoint_table_offset
1708 jump_table_size, // handler_table_offset
1709 jump_table_size, // constant_pool_offset
1710 jump_table_size, // code_comments_offset
1711 jump_table_size, // unpadded_binary_size
1712 {}, // protected_instructions
1713 {}, // reloc_info
1714 {}, // source_pos
1715 {}, // inlining pos
1716 {}, // deopt data
1717 WasmCode::kJumpTable, // kind
1718 ExecutionTier::kNone, // tier
1719 kNotForDebugging, // for_debugging
1720 0}}; // signature_hash
1721 return PublishCodeLocked(std::move(code),
1723}
1724
1726 ForDebugging for_debugging) {
1727 if (for_debugging != kNotForDebugging) return;
1728 // Count jump tables (ExecutionTier::kNone) for both Liftoff and TurboFan as
1729 // this is shared code.
1730 if (tier != ExecutionTier::kTurbofan) liftoff_code_size_.fetch_add(size);
1731 if (tier != ExecutionTier::kLiftoff) turbofan_code_size_.fetch_add(size);
1732}
1733
1734void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target,
1735 Address code_pointer_table_target,
1736 uint64_t signature_hash) {
1738
1740 code_pointer_handles_[slot_index], code_pointer_table_target,
1741 signature_hash);
1742
1743 for (auto& code_space_data : code_space_data_) {
1744 // TODO(sroettger): need to unlock both jump tables together
1745 DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
1746 if (!code_space_data.jump_table) continue;
1747 WritableJumpTablePair writable_jump_tables =
1748 ThreadIsolation::LookupJumpTableAllocations(
1749 code_space_data.jump_table->instruction_start(),
1750 code_space_data.jump_table->instructions_size_,
1751 code_space_data.far_jump_table->instruction_start(),
1752 code_space_data.far_jump_table->instructions_size_);
1753 PatchJumpTableLocked(writable_jump_tables, code_space_data, slot_index,
1754 target);
1755 }
1756}
1757
1758void NativeModule::PatchJumpTableLocked(WritableJumpTablePair& jump_table_pair,
1759 const CodeSpaceData& code_space_data,
1760 uint32_t slot_index, Address target) {
1762
1763 DCHECK_NOT_NULL(code_space_data.jump_table);
1764 DCHECK_NOT_NULL(code_space_data.far_jump_table);
1765
1766 DCHECK_LT(slot_index, module_->num_declared_functions);
1767 Address jump_table_slot =
1768 code_space_data.jump_table->instruction_start() +
1770 uint32_t far_jump_table_offset = JumpTableAssembler::FarJumpSlotIndexToOffset(
1771 BuiltinLookup::BuiltinCount() + slot_index);
1772 // Only pass the far jump table start if the far jump table actually has a
1773 // slot for this function index (i.e. does not only contain runtime stubs).
1774 bool has_far_jump_slot =
1775 far_jump_table_offset <
1776 code_space_data.far_jump_table->instructions().size();
1777 Address far_jump_table_start =
1778 code_space_data.far_jump_table->instruction_start();
1779 Address far_jump_table_slot =
1780 has_far_jump_slot ? far_jump_table_start + far_jump_table_offset
1781 : kNullAddress;
1782 JumpTableAssembler::PatchJumpTableSlot(jump_table_pair, jump_table_slot,
1783 far_jump_table_slot, target);
1785}
1786
1789
1790 // We do not need a code space if the NativeModule does not hold any
1791 // functions (wrappers live in the wrapper cache).
1792 DCHECK_LT(0, module_->num_declared_functions);
1793
1794 // Each code space must be at least twice as large as the overhead per code
1795 // space. Otherwise, we are wasting too much memory.
1796 DCHECK_GE(region.size(),
1797 2 * OverheadPerCodeSpace(module()->num_declared_functions));
1798
1799 WasmCodeRefScope code_ref_scope;
1800 WasmCode* jump_table = nullptr;
1801 WasmCode* far_jump_table = nullptr;
1802 const uint32_t num_wasm_functions = module_->num_declared_functions;
1803 const bool is_first_code_space = code_space_data_.empty();
1804 // We always need a far jump table, because it contains the runtime stubs.
1805 const bool needs_far_jump_table =
1807 const bool needs_jump_table = num_wasm_functions > 0 && needs_far_jump_table;
1808
1809 if (needs_jump_table) {
1810 // Allocate additional jump tables just as big as the first one.
1811 // This is in particular needed in cctests which add functions to the module
1812 // after the jump tables are already created (see
1813 // https://crbug.com/v8/14213).
1814 int jump_table_size =
1815 is_first_code_space
1816 ? JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions)
1818 jump_table = CreateEmptyJumpTableInRegionLocked(jump_table_size, region,
1820 CHECK(region.contains(jump_table->instruction_start()));
1821 }
1822
1823 if (needs_far_jump_table) {
1824 int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions);
1825 // See comment above for the size computation.
1826 int far_jump_table_size =
1827 is_first_code_space
1829 BuiltinLookup::BuiltinCount(), num_function_slots)
1831 far_jump_table = CreateEmptyJumpTableInRegionLocked(
1832 far_jump_table_size, region, JumpTableType::kFarJumpTable);
1833 CHECK(region.contains(far_jump_table->instruction_start()));
1834 EmbeddedData embedded_data = EmbeddedData::FromBlob();
1836 Address builtin_addresses[BuiltinLookup::BuiltinCount()];
1837 for (int i = 0; i < BuiltinLookup::BuiltinCount(); ++i) {
1838 builtin_addresses[i] = embedded_data.InstructionStartOf(
1840 }
1842 far_jump_table->instruction_start(), far_jump_table->instructions_size_,
1844
1846 jit_allocation, far_jump_table->instruction_start(), builtin_addresses,
1847 BuiltinLookup::BuiltinCount(), num_function_slots);
1848 }
1849
1850 if (is_first_code_space) {
1851 // This can be updated and accessed without locks, since the addition of the
1852 // first code space happens during initialization of the {NativeModule},
1853 // where no concurrent accesses are possible.
1854 main_jump_table_ = jump_table;
1855 main_far_jump_table_ = far_jump_table;
1856 }
1857
1858 code_space_data_.push_back(CodeSpaceData{region, jump_table, far_jump_table});
1859
1860 if (is_first_code_space) {
1861 InitializeJumpTableForLazyCompilation(num_wasm_functions);
1862 }
1863
1864 if (jump_table && !is_first_code_space) {
1865 // Patch the new jump table(s) with existing functions. If this is the first
1866 // code space, there cannot be any functions that have been compiled yet.
1867 const CodeSpaceData& new_code_space_data = code_space_data_.back();
1868 // TODO(sroettger): need to create two write scopes? Or have a write scope
1869 // for multiple allocations.
1870 WritableJumpTablePair writable_jump_tables =
1871 ThreadIsolation::LookupJumpTableAllocations(
1872 new_code_space_data.jump_table->instruction_start(),
1873
1874 new_code_space_data.jump_table->instructions_size_,
1875 new_code_space_data.far_jump_table->instruction_start(),
1876
1877 new_code_space_data.far_jump_table->instructions_size_);
1878 for (uint32_t slot_index = 0; slot_index < num_wasm_functions;
1879 ++slot_index) {
1880 if (code_table_[slot_index]) {
1881 PatchJumpTableLocked(writable_jump_tables, new_code_space_data,
1882 slot_index,
1883 code_table_[slot_index]->instruction_start());
1884 } else if (lazy_compile_table_) {
1885 // Use the main jump table as the target so that we don't have to add a
1886 // landing pad instruction to the lazy compile table entries.
1887 Address main_jump_table_target =
1890 PatchJumpTableLocked(writable_jump_tables, new_code_space_data,
1891 slot_index, main_jump_table_target);
1892 }
1893 }
1894 }
1895}
1896
1897namespace {
1898class NativeModuleWireBytesStorage final : public WireBytesStorage {
1899 public:
1900 explicit NativeModuleWireBytesStorage(
1901 std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes)
1902 : wire_bytes_(std::move(wire_bytes)) {}
1903
1904 base::Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
1905 return std::atomic_load(&wire_bytes_)
1906 ->as_vector()
1907 .SubVector(ref.offset(), ref.end_offset());
1908 }
1909
1910 std::optional<ModuleWireBytes> GetModuleBytes() const final {
1911 return std::optional<ModuleWireBytes>(
1912 std::atomic_load(&wire_bytes_)->as_vector());
1913 }
1914
1915 private:
1916 const std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes_;
1917};
1918} // namespace
1919
1921 auto shared_wire_bytes =
1922 std::make_shared<base::OwnedVector<const uint8_t>>(std::move(wire_bytes));
1923 std::atomic_store(&wire_bytes_, shared_wire_bytes);
1924 if (!shared_wire_bytes->empty()) {
1925 compilation_state_->SetWireBytesStorage(
1926 std::make_shared<NativeModuleWireBytesStorage>(
1927 std::move(shared_wire_bytes)));
1928 }
1929}
1930
1931void NativeModule::AddLazyCompilationTimeSample(int64_t sample_in_micro_sec) {
1932 num_lazy_compilations_.fetch_add(1, std::memory_order_relaxed);
1933 sum_lazy_compilation_time_in_micro_sec_.fetch_add(sample_in_micro_sec,
1934 std::memory_order_relaxed);
1935 int64_t max =
1936 max_lazy_compilation_time_in_micro_sec_.load(std::memory_order_relaxed);
1937 while (sample_in_micro_sec > max &&
1938 !max_lazy_compilation_time_in_micro_sec_.compare_exchange_weak(
1939 max, sample_in_micro_sec, std::memory_order_relaxed,
1940 std::memory_order_relaxed)) {
1941 // Repeat until we set the new maximum sucessfully.
1942 }
1943}
1944
1947 DCHECK(!new_owned_code_.empty());
1948 // Sort the {new_owned_code_} vector reversed, such that the position of the
1949 // previously inserted element can be used as a hint for the next element. If
1950 // elements in {new_owned_code_} are adjacent, this will guarantee
1951 // constant-time insertion into the map.
1952 std::sort(new_owned_code_.begin(), new_owned_code_.end(),
1953 [](const std::unique_ptr<WasmCode>& a,
1954 const std::unique_ptr<WasmCode>& b) {
1955 return a->instruction_start() > b->instruction_start();
1956 });
1957 auto insertion_hint = owned_code_.end();
1958 for (auto& code : new_owned_code_) {
1959 DCHECK_EQ(0, owned_code_.count(code->instruction_start()));
1960 // Check plausibility of the insertion hint.
1961 DCHECK(insertion_hint == owned_code_.end() ||
1962 insertion_hint->first > code->instruction_start());
1963 insertion_hint = owned_code_.emplace_hint(
1964 insertion_hint, code->instruction_start(), std::move(code));
1965 }
1966 new_owned_code_.clear();
1967}
1968
1972 auto iter = owned_code_.upper_bound(pc);
1973 if (iter == owned_code_.begin()) return nullptr;
1974 --iter;
1975 WasmCode* candidate = iter->second.get();
1976 DCHECK_EQ(candidate->instruction_start(), iter->first);
1977 if (!candidate->contains(pc)) return nullptr;
1978 WasmCodeRefScope::AddRef(candidate);
1979 return candidate;
1980}
1981
1983 base::AddressRegion code_region) const {
1985 auto jump_table_usable = [code_region](const WasmCode* jump_table) {
1986 // We only ever need to check for suitable jump tables if
1987 // {kNeedsFarJumpsBetweenCodeSpaces} is true.
1989 Address table_start = jump_table->instruction_start();
1990 Address table_end = table_start + jump_table->instructions().size();
1991 // Compute the maximum distance from anywhere in the code region to anywhere
1992 // in the jump table, avoiding any underflow.
1993 size_t max_distance = std::max(
1994 code_region.end() > table_start ? code_region.end() - table_start : 0,
1995 table_end > code_region.begin() ? table_end - code_region.begin() : 0);
1996 // kDefaultMaxWasmCodeSpaceSizeMb is <= the maximum near call distance on
1997 // the current platform.
1998 // We can allow a max_distance that is equal to
1999 // kDefaultMaxWasmCodeSpaceSizeMb, because every call or jump will target an
2000 // address *within* the region, but never exactly the end of the region. So
2001 // all occuring offsets are actually smaller than max_distance.
2002 return max_distance <= kDefaultMaxWasmCodeSpaceSizeMb * MB;
2003 };
2004
2005 for (auto& code_space_data : code_space_data_) {
2006 DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
2007 if (!code_space_data.far_jump_table) continue;
2008 // Only return these jump tables if they are reachable from the whole
2009 // {code_region}.
2011 (!jump_table_usable(code_space_data.far_jump_table) ||
2012 (code_space_data.jump_table &&
2013 !jump_table_usable(code_space_data.jump_table)))) {
2014 continue;
2015 }
2016 return {code_space_data.jump_table
2017 ? code_space_data.jump_table->instruction_start()
2018 : kNullAddress,
2019 code_space_data.far_jump_table->instruction_start()};
2020 }
2021 return {};
2022}
2023
2025 uint32_t func_index, const JumpTablesRef& jump_tables) const {
2026 DCHECK(jump_tables.is_valid());
2027 uint32_t slot_offset = JumpTableOffset(module(), func_index);
2028 return jump_tables.jump_table_start + slot_offset;
2029}
2030
2032 Builtin builtin, const JumpTablesRef& jump_tables) const {
2033 DCHECK(jump_tables.is_valid());
2034 int index = BuiltinLookup::JumptableIndexForBuiltin(builtin);
2035
2037 return jump_tables.far_jump_table_start + offset;
2038}
2039
2041 Address slot_address) const {
2042 WasmCodeRefScope code_refs;
2043 WasmCode* code = Lookup(slot_address);
2044 DCHECK_NOT_NULL(code);
2045 DCHECK_EQ(WasmCode::kJumpTable, code->kind());
2046 uint32_t slot_offset =
2047 static_cast<uint32_t>(slot_address - code->instruction_start());
2048 uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset);
2049 DCHECK_LT(slot_idx, module_->num_declared_functions);
2050 DCHECK_EQ(slot_address,
2051 code->instruction_start() +
2053 return module_->num_imported_functions + slot_idx;
2054}
2055
2058 CallIndirectTargetMap lookup_map;
2059 for (uint32_t func_index = num_imported_functions();
2060 func_index < num_functions(); func_index++) {
2061 lookup_map.emplace(GetCodePointerHandle(func_index), func_index);
2062 }
2063 return lookup_map;
2064}
2065
2068
2069 for (auto& code_space_data : code_space_data_) {
2070 if (code_space_data.far_jump_table != nullptr &&
2071 code_space_data.far_jump_table->contains(target)) {
2072 uint32_t offset = static_cast<uint32_t>(
2073 target - code_space_data.far_jump_table->instruction_start());
2075 if (index >= BuiltinLookup::BuiltinCount()) continue;
2077 continue;
2078 }
2080 }
2081 }
2082
2083 // Invalid address.
2084 return Builtin::kNoBuiltinId;
2085}
2086
2088 DCHECK_IMPLIES(index != kAnonymousFuncIndex, index >= 0);
2089 if (index == kAnonymousFuncIndex ||
2090 static_cast<uint32_t>(index) < module_->num_imported_functions) {
2091 // TODO(sroettger): do ImportWrappers need a code pointer handle?
2093 }
2095}
2096
2098 TRACE_HEAP("Deleting native module: %p\n", this);
2099 // Cancel all background compilation before resetting any field of the
2100 // NativeModule or freeing anything.
2101 compilation_state_->CancelCompilation();
2102
2103 if (V8_UNLIKELY(v8_flags.print_wasm_offheap_memory_size)) {
2104 // Print the current memory consumption of both this Module *and* the whole
2105 // engine before freeing this module. In a benchmark with a single module
2106 // this will probably be the high watermark of memory usage for the whole
2107 // engine.
2110 }
2111
2113
2114 // If experimental PGO support is enabled, serialize the PGO data now.
2115 if (V8_UNLIKELY(v8_flags.experimental_wasm_pgo_to_file)) {
2117 }
2118
2120}
2121
2123 : max_committed_code_space_(v8_flags.wasm_max_committed_code_mb * MB),
2124 critical_committed_code_space_(max_committed_code_space_ / 2),
2125 next_code_space_hint_(reinterpret_cast<Address>(
2127 // Check that --wasm-max-code-space-size-mb is not set bigger than the default
2128 // value. Otherwise we run into DCHECKs or other crashes later.
2130 v8_flags.wasm_max_code_space_size_mb);
2131}
2132
2134 // No more committed code space.
2136}
2137
2138#if defined(V8_OS_WIN64)
2139// static
2140bool WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange() {
2142 v8_flags.win64_unwinding_info;
2143}
2144#endif // V8_OS_WIN64
2145
2147 DCHECK(IsAligned(region.begin(), CommitPageSize()));
2148 DCHECK(IsAligned(region.size(), CommitPageSize()));
2149 // Reserve the size. Use CAS loop to avoid overflow on
2150 // {total_committed_code_space_}.
2151 size_t old_value = total_committed_code_space_.load();
2152 while (true) {
2154 if (region.size() > max_committed_code_space_ - old_value) {
2155 auto oom_detail = base::FormattedString{}
2156 << "trying to commit " << region.size()
2157 << ", already committed " << old_value;
2159 "Exceeding maximum wasm committed code space",
2160 oom_detail.PrintToArray().data());
2161 UNREACHABLE();
2162 }
2163 if (total_committed_code_space_.compare_exchange_weak(
2164 old_value, old_value + region.size())) {
2165 break;
2166 }
2167 }
2168
2169 TRACE_HEAP("Setting rwx permissions for 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
2170 region.begin(), region.end());
2171 bool success = GetPlatformPageAllocator()->RecommitPages(
2172 reinterpret_cast<void*>(region.begin()), region.size(),
2174
2175 if (V8_UNLIKELY(!success)) {
2176 auto oom_detail = base::FormattedString{} << "region size: "
2177 << region.size();
2178 V8::FatalProcessOutOfMemory(nullptr, "Commit wasm code space",
2179 oom_detail.PrintToArray().data());
2180 UNREACHABLE();
2181 }
2182}
2183
2186 DCHECK(IsAligned(region.begin(), allocator->CommitPageSize()));
2187 DCHECK(IsAligned(region.size(), allocator->CommitPageSize()));
2188 [[maybe_unused]] size_t old_committed =
2189 total_committed_code_space_.fetch_sub(region.size());
2190 DCHECK_LE(region.size(), old_committed);
2191 TRACE_HEAP("Decommitting system pages 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
2192 region.begin(), region.end());
2193 if (V8_UNLIKELY(!allocator->DecommitPages(
2194 reinterpret_cast<void*>(region.begin()), region.size()))) {
2195 // Decommit can fail in near-OOM situations.
2196 auto oom_detail = base::FormattedString{} << "region size: "
2197 << region.size();
2198 V8::FatalProcessOutOfMemory(nullptr, "Decommit Wasm code space",
2199 oom_detail.PrintToArray().data());
2200 }
2201}
2202
2204 NativeModule* native_module) {
2206 lookup_map_.insert(std::make_pair(
2207 region.begin(), std::make_pair(region.end(), native_module)));
2208}
2209
2211 v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
2212 DCHECK_GT(size, 0);
2213 size_t allocate_page_size = page_allocator->AllocatePageSize();
2214 size = RoundUp(size, allocate_page_size);
2215 Address hint =
2216 next_code_space_hint_.fetch_add(size, std::memory_order_relaxed);
2217
2218 // When we start exposing Wasm in jitless mode, then the jitless flag
2219 // will have to determine whether we set kMapAsJittable or not.
2220 DCHECK(!v8_flags.jitless);
2221 VirtualMemory mem(page_allocator, size, reinterpret_cast<void*>(hint),
2222 allocate_page_size,
2224 if (!mem.IsReserved()) {
2225 // Try resetting {next_code_space_hint_}, which might fail if another thread
2226 // bumped it in the meantime.
2227 Address bumped_hint = hint + size;
2228 next_code_space_hint_.compare_exchange_weak(bumped_hint, hint,
2229 std::memory_order_relaxed);
2230 return {};
2231 }
2232 TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(),
2233 mem.end(), mem.size());
2234
2235 if (mem.address() != hint) {
2236 // If the hint was ignored, just store the end of the new vmem area
2237 // unconditionally, potentially racing with other concurrent allocations (it
2238 // does not really matter which end pointer we keep in that case).
2239 next_code_space_hint_.store(mem.end(), std::memory_order_relaxed);
2240 }
2241
2242 // Don't pre-commit the code cage on Windows since it uses memory and it's not
2243 // required for recommit.
2244 // iOS cannot adjust page permissions for MAP_JIT'd pages, they are set as RWX
2245 // at the start.
2246#if !defined(V8_OS_WIN) && !defined(V8_OS_IOS)
2248#if V8_HAS_PKU_JIT_WRITE_PROTECT
2251 } else {
2252 CHECK(base::MemoryProtectionKey::SetPermissionsAndKey(
2254 RwxMemoryWriteScope::memory_protection_key()));
2255 }
2256#else
2257 UNREACHABLE();
2258#endif
2259 } else {
2262 }
2263 page_allocator->DiscardSystemPages(reinterpret_cast<void*>(mem.address()),
2264 mem.size());
2265#endif // !defined(V8_OS_WIN) && !defined(V8_OS_IOS)
2266
2268
2269 return mem;
2270}
2271
2272namespace {
2273// The numbers here are rough estimates, used to calculate the size of the
2274// initial code reservation and for estimating the amount of external memory
2275// reported to the GC.
2276// They do not need to be accurate. Choosing them too small will result in
2277// separate code spaces being allocated (compile time and runtime overhead),
2278// choosing them too large results in over-reservation (virtual address space
2279// only).
2280// In doubt, choose the numbers slightly too large on 64-bit systems (where
2281// {kNeedsFarJumpsBetweenCodeSpaces} is {true}). Over-reservation is less
2282// critical in a 64-bit address space, but separate code spaces cause overhead.
2283// On 32-bit systems (where {kNeedsFarJumpsBetweenCodeSpaces} is {false}), the
2284// opposite is true: Multiple code spaces are cheaper, and address space is
2285// scarce, hence choose numbers slightly too small.
2286//
2287// Numbers can be determined by running benchmarks with
2288// --trace-wasm-compilation-times, and piping the output through
2289// tools/wasm/code-size-factors.py.
2290#if V8_TARGET_ARCH_X64
2291constexpr size_t kTurbofanFunctionOverhead = 24;
2292constexpr size_t kTurbofanCodeSizeMultiplier = 3;
2293constexpr size_t kLiftoffFunctionOverhead = 56;
2294constexpr size_t kLiftoffCodeSizeMultiplier = 4;
2295#elif V8_TARGET_ARCH_IA32
2296constexpr size_t kTurbofanFunctionOverhead = 20;
2297constexpr size_t kTurbofanCodeSizeMultiplier = 3;
2298constexpr size_t kLiftoffFunctionOverhead = 48;
2299constexpr size_t kLiftoffCodeSizeMultiplier = 3;
2300#elif V8_TARGET_ARCH_ARM
2301constexpr size_t kTurbofanFunctionOverhead = 44;
2302constexpr size_t kTurbofanCodeSizeMultiplier = 3;
2303constexpr size_t kLiftoffFunctionOverhead = 96;
2304constexpr size_t kLiftoffCodeSizeMultiplier = 5;
2305#elif V8_TARGET_ARCH_ARM64
2306constexpr size_t kTurbofanFunctionOverhead = 40;
2307constexpr size_t kTurbofanCodeSizeMultiplier = 3;
2308constexpr size_t kLiftoffFunctionOverhead = 68;
2309constexpr size_t kLiftoffCodeSizeMultiplier = 4;
2310#else
2311// Other platforms should add their own estimates for best performance. Numbers
2312// below are the maximum of other architectures.
2313constexpr size_t kTurbofanFunctionOverhead = 44;
2314constexpr size_t kTurbofanCodeSizeMultiplier = 4;
2315constexpr size_t kLiftoffFunctionOverhead = 96;
2316constexpr size_t kLiftoffCodeSizeMultiplier = 5;
2317#endif
2318} // namespace
2319
2320// static
2322 return kLiftoffFunctionOverhead + kCodeAlignment / 2 +
2323 body_size * kLiftoffCodeSizeMultiplier;
2324}
2325
2326// static
2328 int code_section_length = 0;
2329 if (module->num_declared_functions > 0) {
2330 DCHECK_EQ(module->functions.size(),
2332 auto* first_fn = &module->functions[module->num_imported_functions];
2333 auto* last_fn = &module->functions.back();
2334 code_section_length =
2335 static_cast<int>(last_fn->code.end_offset() - first_fn->code.offset());
2336 }
2338 code_section_length);
2339}
2340
2341// static
2343 int code_section_length) {
2344 // It can happen that even without any functions we still have a code section
2345 // of size 1, defining 0 function bodies. Still report 0 overall in this case.
2346 if (num_functions == 0) return 0;
2347
2348 // The size for the jump table and far jump table is added later, per code
2349 // space (see {OverheadPerCodeSpace}). We still need to add the overhead for
2350 // the lazy compile table once, though. There are configurations where we do
2351 // not need it (non-asm.js, no dynamic tiering and no lazy compilation), but
2352 // we ignore this here as most of the time we will need it.
2353 const size_t lazy_compile_table_size =
2355
2356 const size_t overhead_per_function_turbofan =
2357 kTurbofanFunctionOverhead + kCodeAlignment / 2;
2358 size_t size_of_turbofan = overhead_per_function_turbofan * num_functions +
2359 kTurbofanCodeSizeMultiplier * code_section_length;
2360
2361 const size_t overhead_per_function_liftoff =
2362 kLiftoffFunctionOverhead + kCodeAlignment / 2;
2363 // Note: For asm.js we do not have Liftoff support, but this corner case is
2364 // being ignored here.
2365 size_t size_of_liftoff =
2366 v8_flags.liftoff ? overhead_per_function_liftoff * num_functions +
2367 kLiftoffCodeSizeMultiplier * code_section_length
2368 : 0;
2369 // Expect that typically not more than half of the functions are actually
2370 // compiled.
2371 if (v8_flags.wasm_lazy_compilation) size_of_liftoff /= 2;
2372
2373 // With dynamic tiering we don't expect to compile more than 25% with
2374 // TurboFan. If there is no liftoff though then all code will get generated
2375 // by TurboFan.
2376 if (v8_flags.liftoff && v8_flags.wasm_dynamic_tiering) size_of_turbofan /= 4;
2377
2378 return lazy_compile_table_size + size_of_liftoff + size_of_turbofan;
2379}
2380
2381// static
2383 const WasmModule* module) {
2384 size_t wasm_module_estimate = module->EstimateStoredSize();
2385
2386 uint32_t num_wasm_functions = module->num_declared_functions;
2387
2388 // TODO(wasm): Include wire bytes size.
2389 size_t native_module_estimate =
2390 sizeof(NativeModule) + // NativeModule struct
2391 (sizeof(WasmCode*) * num_wasm_functions) + // code table size
2392 (sizeof(WasmCode) * num_wasm_functions); // code object size
2393
2394 size_t jump_table_size = RoundUp<kCodeAlignment>(
2395 JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
2396 size_t far_jump_table_size =
2399 NumWasmFunctionsInFarJumpTable(num_wasm_functions)));
2400
2401 return wasm_module_estimate + native_module_estimate + jump_table_size +
2402 far_jump_table_size;
2403}
2404
2405// static
2407#if V8_HAS_PKU_JIT_WRITE_PROTECT
2409#else
2410 return false;
2411#endif // V8_HAS_PKU_JIT_WRITE_PROTECT
2412}
2413
2414// static
2418
2419// static
2421#if V8_HAS_PKU_JIT_WRITE_PROTECT
2422 return RwxMemoryWriteScope::IsPKUWritable();
2423#else
2424 return false;
2425#endif // V8_HAS_PKU_JIT_WRITE_PROTECT
2426}
2427
2428std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
2429 Isolate* isolate, WasmEnabledFeatures enabled_features,
2430 WasmDetectedFeatures detected_features, CompileTimeImports compile_imports,
2431 size_t code_size_estimate, std::shared_ptr<const WasmModule> module) {
2432#if V8_ENABLE_DRUMBRAKE
2433 if (v8_flags.wasm_jitless) {
2434 VirtualMemory code_space;
2435 std::shared_ptr<NativeModule> ret;
2436 new NativeModule(enabled_features, detected_features, compile_imports,
2437 std::move(code_space), std::move(module),
2438 isolate->async_counters(), &ret);
2439 // The constructor initialized the shared_ptr.
2440 DCHECK_NOT_NULL(ret);
2441 TRACE_HEAP("New NativeModule (wasm-jitless) %p\n", ret.get());
2442 return ret;
2443 }
2444#endif // V8_ENABLE_DRUMBRAKE
2445
2446 if (total_committed_code_space_.load() >
2448 // Flush Liftoff code and record the flushed code size.
2449 if (v8_flags.flush_liftoff_code) {
2450 auto [code_size, metadata_size] =
2452 isolate->counters()->wasm_flushed_liftoff_code_size_bytes()->AddSample(
2453 static_cast<int>(code_size));
2454 isolate->counters()
2455 ->wasm_flushed_liftoff_metadata_size_bytes()
2456 ->AddSample(static_cast<int>(metadata_size));
2457 }
2458 (reinterpret_cast<v8::Isolate*>(isolate))
2459 ->MemoryPressureNotification(MemoryPressureLevel::kCritical);
2460 size_t committed = total_committed_code_space_.load();
2463 committed + (max_committed_code_space_ - committed) / 2);
2464 }
2465
2466 // On 32-bit platforms where no far jumps are needed between code spaces, use
2467 // a reduced code size estimate. This saves some precious address space at the
2468 // cost of needing more code spaces, but those are cheaper if they do not
2469 // result in more far jumps.
2470 // For now it is the case that far jumps are only needed on 64-bit platforms,
2471 // so those two conditions can be interchanged. Think about which logic to use
2472 // once those diverge.
2476 code_size_estimate /= 2;
2477 }
2478
2479 size_t code_vmem_size = ReservationSizeForWasmCode(
2480 code_size_estimate, module->num_declared_functions, 0);
2481 DCHECK_EQ(code_vmem_size == 0, module->num_declared_functions == 0);
2482
2483 // The '--wasm-max-initial-code-space-reservation' testing flag can be used to
2484 // reduce the maximum size of the initial code space reservation (in MB).
2485 if (v8_flags.wasm_max_initial_code_space_reservation > 0) {
2486 size_t flag_max_bytes =
2487 static_cast<size_t>(v8_flags.wasm_max_initial_code_space_reservation) *
2488 MB;
2489 if (flag_max_bytes < code_vmem_size) code_vmem_size = flag_max_bytes;
2490 }
2491
2492 // Try up to two times; getting rid of dead JSArrayBuffer allocations might
2493 // require two GCs because the first GC maybe incremental and may have
2494 // floating garbage.
2495 static constexpr int kAllocationRetries = 2;
2496 VirtualMemory code_space;
2497 base::AddressRegion code_space_region;
2498 if (code_vmem_size != 0) {
2499 for (int retries = 0;; ++retries) {
2500 code_space = TryAllocate(code_vmem_size);
2501 if (code_space.IsReserved()) break;
2502 if (retries == kAllocationRetries) {
2503 auto oom_detail = base::FormattedString{}
2504 << "NewNativeModule cannot allocate code space of "
2505 << code_vmem_size << " bytes";
2506 V8::FatalProcessOutOfMemory(isolate, "Allocate initial wasm code space",
2507 oom_detail.PrintToArray().data());
2508 UNREACHABLE();
2509 }
2510 // Run one GC, then try the allocation again.
2511 isolate->heap()->MemoryPressureNotification(
2513 }
2514 code_space_region = code_space.region();
2515 DCHECK_LE(code_vmem_size, code_space.size());
2516 }
2517
2518 std::shared_ptr<NativeModule> ret;
2519 new NativeModule(enabled_features, detected_features,
2520 std::move(compile_imports),
2521 std::move(code_space), std::move(module),
2522 isolate->async_counters(), &ret);
2523 // The constructor initialized the shared_ptr.
2524 DCHECK_NOT_NULL(ret);
2525 TRACE_HEAP("New NativeModule %p: Mem: 0x%" PRIxPTR ",+%zu\n", ret.get(),
2526 code_space_region.begin(), code_space_region.size());
2527
2529 lookup_map_.insert(
2530 std::make_pair(code_space_region.begin(),
2531 std::make_pair(code_space_region.end(), ret.get())));
2532 return ret;
2533}
2534
2536 size_t code_size = code_allocator_.committed_code_space();
2537 int code_size_mb = static_cast<int>(code_size / MB);
2538#if V8_ENABLE_DRUMBRAKE
2539 if (v8_flags.wasm_jitless) {
2540 base::MutexGuard lock(&module_->interpreter_mutex_);
2541 if (auto interpreter = module_->interpreter_.lock()) {
2542 code_size_mb = static_cast<int>(interpreter->TotalBytecodeSize() / MB);
2543 }
2544 }
2545#endif // V8_ENABLE_DRUMBRAKE
2546 counters->wasm_module_code_size_mb()->AddSample(code_size_mb);
2547 int code_size_kb = static_cast<int>(code_size / KB);
2548 counters->wasm_module_code_size_kb()->AddSample(code_size_kb);
2549 // Record the size of metadata.
2550 Histogram* metadata_histogram = counters->wasm_module_metadata_size_kb();
2551 if (metadata_histogram->Enabled()) {
2552 // TODO(349610478): EstimateCurrentMemoryConsumption() acquires a large
2553 // amount of locks per NativeModule. This estimation is run on every
2554 // mark-compact GC. Reconsider whether this should be run less frequently.
2555 // (Probably incomplete) list of locks acquired:
2556 // - TypeFeedbackStorage::mutex
2557 // - LazilyGeneratedNames::mutex_
2558 // - CompilationStateImpl::mutex_
2559 // - CompilationUnitQueues::queues_mutex_
2560 // - per queue: QueueImpl::mutex
2561 // - BigUnitsQueue::mutex
2562 // - WasmImportWrapperCache::mutex_
2563 // - NativeModule::allocation_mutex_
2564 // - LazilyGeneratedNames::mutex_
2565 // - DebugInfoImpl::debug_side_tables_mutex_
2566 // - DebugInfoImpl::mutex_
2567 int metadata_size_kb =
2568 static_cast<int>(EstimateCurrentMemoryConsumption() / KB);
2569 metadata_histogram->AddSample(metadata_size_kb);
2570 }
2571 // If this is a wasm module of >= 2MB, also sample the freed code size,
2572 // absolute and relative. Code GC does not happen on asm.js
2573 // modules, and small modules will never trigger GC anyway.
2574 size_t generated_size = code_allocator_.generated_code_size();
2575 if (generated_size >= 2 * MB && module()->origin == kWasmOrigin) {
2576 size_t freed_size = code_allocator_.freed_code_size();
2577 DCHECK_LE(freed_size, generated_size);
2578 int freed_percent = static_cast<int>(100 * freed_size / generated_size);
2579 counters->wasm_module_freed_code_size_percent()->AddSample(freed_percent);
2580 }
2581}
2582
2585 std::vector<UnpublishedWasmCode> code = AddCompiledCode({&result, 1});
2586 return std::move(code[0]);
2587}
2588
2589std::vector<UnpublishedWasmCode> NativeModule::AddCompiledCode(
2591 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
2592 "wasm.AddCompiledCode", "num", results.size());
2593 DCHECK(!results.empty());
2594 std::vector<UnpublishedWasmCode> generated_code;
2595 generated_code.reserve(results.size());
2596
2597 // First, allocate code space for all the results.
2598 // Never add more than half of a code space at once. This leaves some space
2599 // for jump tables and other overhead. We could use {OverheadPerCodeSpace},
2600 // but that's only an approximation, so we are conservative here and never use
2601 // more than half a code space.
2602 size_t max_code_batch_size = v8_flags.wasm_max_code_space_size_mb * MB / 2;
2603 size_t total_code_space = 0;
2604 for (auto& result : results) {
2605 DCHECK(result.succeeded());
2606 size_t new_code_space =
2607 RoundUp<kCodeAlignment>(result.code_desc.instr_size);
2608 if (total_code_space + new_code_space > max_code_batch_size) {
2609 // Split off the first part of the {results} vector and process it
2610 // separately. This method then continues with the rest.
2611 size_t split_point = &result - results.begin();
2612 if (split_point == 0) {
2613 // Fuzzers sometimes hit this by reducing --wasm-max-code-sapce-size-mb
2614 // to an unreasonably small value. Make this an OOM to avoid getting a
2615 // CHECK failure in this case.
2616 if (v8_flags.wasm_max_code_space_size_mb <
2618 auto oom_detail = base::FormattedString{}
2619 << "--wasm-max-code-space-size="
2620 << v8_flags.wasm_max_code_space_size_mb.value();
2622 "A single code object needs more than "
2623 "half of the code space size",
2624 oom_detail.PrintToArray().data());
2625 } else {
2626 // Otherwise make this a CHECK failure so we see if this is happening
2627 // in the wild or in tests.
2628 FATAL(
2629 "A single code object needs more than half of the code space "
2630 "size");
2631 }
2632 }
2633 auto first_results = AddCompiledCode(results.SubVector(0, split_point));
2634 generated_code.insert(generated_code.end(),
2635 std::make_move_iterator(first_results.begin()),
2636 std::make_move_iterator(first_results.end()));
2637 // Continue processing the rest of the vector. This change to the
2638 // {results} vector does not invalidate iterators (which are just
2639 // pointers). In particular, the end pointer stays the same.
2640 results += split_point;
2641 total_code_space = 0;
2642 }
2643 total_code_space += new_code_space;
2644 }
2645 base::Vector<uint8_t> code_space;
2646 NativeModule::JumpTablesRef jump_tables;
2647 {
2649 code_space = code_allocator_.AllocateForCode(this, total_code_space);
2650 // Lookup the jump tables to use once, then use for all code objects.
2651 jump_tables =
2653 }
2654 // If we happen to have a {total_code_space} which is bigger than
2655 // {kMaxCodeSpaceSize}, we would not find valid jump tables for the whole
2656 // region. If this ever happens, we need to handle this case (by splitting the
2657 // {results} vector in smaller chunks).
2658 CHECK(jump_tables.is_valid());
2659
2660 std::vector<size_t> sizes;
2661 for (const auto& result : results) {
2662 sizes.emplace_back(RoundUp<kCodeAlignment>(result.code_desc.instr_size));
2663 }
2665 reinterpret_cast<Address>(code_space.begin()), sizes,
2667
2668 // Now copy the generated code into the code space and relocate it.
2669 for (auto& result : results) {
2670 DCHECK_EQ(result.code_desc.buffer, result.instr_buffer->start());
2671 size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size);
2672 base::Vector<uint8_t> this_code_space = code_space.SubVector(0, code_size);
2673 code_space += code_size;
2674 generated_code.emplace_back(
2676 result.func_index, result.code_desc, result.frame_slot_count,
2677 result.ool_spill_count, result.tagged_parameter_slots,
2678 result.protected_instructions_data.as_vector(),
2679 result.source_positions.as_vector(),
2680 result.inlining_positions.as_vector(),
2681 result.deopt_data.as_vector(), GetCodeKind(result),
2682 result.result_tier, result.for_debugging,
2683 result.frame_has_feedback_slot, this_code_space, jump_tables),
2684 std::move(result.assumptions));
2685 }
2686 DCHECK_EQ(0, code_space.size());
2687
2688 // Check that we added the expected amount of code objects, even if we split
2689 // the {results} vector.
2690 DCHECK_EQ(generated_code.capacity(), generated_code.size());
2691
2692 return generated_code;
2693}
2694
2696 // Do not tier down asm.js (just never change the tiering state).
2697 if (module()->origin != kWasmOrigin) return;
2698
2700 debug_state_ = new_debug_state;
2701}
2702
2703namespace {
2704bool ShouldRemoveCode(WasmCode* code, NativeModule::RemoveFilter filter) {
2706 !code->for_debugging()) {
2707 return false;
2708 }
2710 code->for_debugging()) {
2711 return false;
2712 }
2714 !code->is_liftoff()) {
2715 return false;
2716 }
2718 !code->is_turbofan()) {
2719 return false;
2720 }
2721 return true;
2722}
2723} // namespace
2724
2725std::pair<size_t, size_t> NativeModule::RemoveCompiledCode(
2726 RemoveFilter filter) {
2727 const uint32_t num_imports = module_->num_imported_functions;
2728 const uint32_t num_functions = module_->num_declared_functions;
2729 size_t removed_codesize = 0;
2730 size_t removed_metadatasize = 0;
2731 {
2733 for (uint32_t i = 0; i < num_functions; i++) {
2734 WasmCode* code = code_table_[i];
2735 if (code && ShouldRemoveCode(code, filter)) {
2736 removed_codesize += code->instructions_size();
2737 removed_metadatasize += code->EstimateCurrentMemoryConsumption();
2738 code_table_[i] = nullptr;
2739 // Add the code to the {WasmCodeRefScope}, so the ref count cannot drop
2740 // to zero here. It might in the {WasmCodeRefScope} destructor, though.
2742 code->DecRefOnLiveCode();
2743 uint32_t func_index = i + num_imports;
2744 UseLazyStubLocked(func_index);
2745 }
2746 }
2747 // To avoid lock order inversion, release the {allocation_mutex_} before
2748 // acquiring the {type_feedback.mutex} inside {AllowAnother...} below.
2749 }
2750 // When resuming optimized execution after a debugging session ends, or when
2751 // discarding optimized code that made outdated assumptions, allow another
2752 // tier-up task to get scheduled.
2753 if (filter == RemoveFilter::kRemoveDebugCode ||
2755 compilation_state_->AllowAnotherTopTierJobForAllFunctions();
2756 }
2757 return std::make_pair(removed_codesize, removed_metadatasize);
2758}
2759
2762 const uint32_t num_functions = module_->num_declared_functions;
2763 size_t codesize_liftoff = 0;
2764 for (uint32_t i = 0; i < num_functions; i++) {
2765 WasmCode* code = code_table_[i];
2766 if (code && code->is_liftoff()) {
2767 codesize_liftoff += code->instructions_size();
2768 }
2769 }
2770 return codesize_liftoff;
2771}
2772
2775 // Free the code space.
2777
2779 DebugInfo* debug_info = debug_info_.get();
2780 // Free the {WasmCode} objects. This will also unregister trap handler data.
2781 for (WasmCode* code : codes) {
2782 DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
2783 // TODO(407003348): Drop these checks if they don't trigger in the wild.
2784 CHECK(code->is_dying());
2785 CHECK_EQ(code->ref_count_.load(std::memory_order_acquire), 0);
2786 owned_code_.erase(code->instruction_start());
2787 }
2788 // Remove debug side tables for all removed code objects, after releasing our
2789 // lock. This is to avoid lock order inversion.
2790 if (debug_info) debug_info->RemoveDebugSideTables(codes);
2791}
2792
2797
2800 return debug_info_ != nullptr;
2801}
2802
2805 if (!debug_info_) debug_info_ = std::make_unique<DebugInfo>(this);
2806 return debug_info_.get();
2807}
2808
2812 if (!names_provider_) {
2814 std::make_unique<NamesProvider>(module_.get(), wire_bytes());
2815 }
2816 return names_provider_.get();
2817}
2818
2821 size_t result = sizeof(NativeModule);
2822 result += module_->EstimateCurrentMemoryConsumption();
2823
2824 std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes =
2825 std::atomic_load(&wire_bytes_);
2826 size_t wire_bytes_size = wire_bytes ? wire_bytes->size() : 0;
2827 result += wire_bytes_size;
2828
2829 if (source_map_) {
2830 result += source_map_->EstimateCurrentMemoryConsumption();
2831 }
2832 result += compilation_state_->EstimateCurrentMemoryConsumption();
2833 // For {tiering_budgets_}.
2834 result += module_->num_declared_functions * sizeof(uint32_t);
2835
2836 size_t external_storage = compile_imports_.constants_module().capacity();
2837 // This is an approximation: the actual number of inline-stored characters
2838 // is a little less than the result of `sizeof`.
2839 if (external_storage > sizeof(std::string)) {
2840 result += external_storage;
2841 }
2842
2843 // For fast api call targets.
2844 result += module_->num_imported_functions *
2845 (sizeof(std::atomic<Address>) + sizeof(CFunctionInfo*));
2846 // We cannot hold the `allocation_mutex_` while calling
2847 // `debug_info_->EstimateCurrentMemoryConsumption`, as we would run into a
2848 // lock-order-inversion when acquiring the `mutex_`. The reverse order happens
2849 // when calling `WasmScript::SetBreakPointForFunction`.
2850 DebugInfo* debug_info;
2851 {
2854 for (auto& [address, unique_code_ptr] : owned_code_) {
2855 result += unique_code_ptr->EstimateCurrentMemoryConsumption();
2856 }
2858 for (std::unique_ptr<WasmCode>& code : new_owned_code_) {
2859 result += code->EstimateCurrentMemoryConsumption();
2860 }
2861 // For {code_table_}.
2862 result += module_->num_declared_functions * sizeof(void*);
2864 debug_info = debug_info_.get();
2865 if (names_provider_) {
2866 result += names_provider_->EstimateCurrentMemoryConsumption();
2867 }
2868 }
2869 if (debug_info) {
2871 }
2872
2873 if (v8_flags.trace_wasm_offheap_memory) {
2874 PrintF("NativeModule wire bytes: %zu\n", wire_bytes_size);
2875 PrintF("NativeModule: %zu\n", result);
2876 }
2877 return result;
2878}
2879
2881 DCHECK(v8_flags.print_wasm_offheap_memory_size);
2882 PrintF("Off-heap memory size of NativeModule: %zu\n",
2884}
2885
2887 base::Vector<VirtualMemory> owned_code_space, size_t committed_size) {
2889 for (auto& code_space : owned_code_space) {
2890 DCHECK(code_space.IsReserved());
2891 TRACE_HEAP("VMem Release: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n",
2892 code_space.address(), code_space.end(), code_space.size());
2893
2894#if defined(V8_OS_WIN64)
2895 if (CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
2897 reinterpret_cast<void*>(code_space.address()));
2898 }
2899#endif // V8_OS_WIN64
2900
2901 lookup_map_.erase(code_space.address());
2902 ThreadIsolation::UnregisterJitPage(code_space.address(), code_space.size());
2903 code_space.Free();
2904 DCHECK(!code_space.IsReserved());
2905 }
2906
2907 DCHECK(IsAligned(committed_size, CommitPageSize()));
2908 [[maybe_unused]] size_t old_committed =
2909 total_committed_code_space_.fetch_sub(committed_size);
2910 DCHECK_LE(committed_size, old_committed);
2911}
2912
2915 if (lookup_map_.empty()) return nullptr;
2916
2917 auto iter = lookup_map_.upper_bound(pc);
2918 if (iter == lookup_map_.begin()) return nullptr;
2919 --iter;
2920 Address region_start = iter->first;
2921 Address region_end = iter->second.first;
2922 NativeModule* candidate = iter->second.second;
2923
2924 DCHECK_NOT_NULL(candidate);
2925 return region_start <= pc && pc < region_end ? candidate : nullptr;
2926}
2927
2929 NativeModule* candidate = LookupNativeModule(pc);
2930 if (candidate) return candidate->Lookup(pc);
2932}
2933
2935 // Since kNullAddress is used as a sentinel value, we should not try
2936 // to look it up in the cache
2937 if (pc == kNullAddress) return nullptr;
2938 // If 'isolate' is nullptr, do not use a cache. This can happen when
2939 // called from function V8NameConverter::NameOfAddress
2940 if (isolate) {
2941 return isolate->wasm_code_look_up_cache()->GetCacheEntry(pc)->code;
2942 } else {
2943 wasm::WasmCodeRefScope code_ref_scope;
2944 return LookupCode(pc);
2945 }
2946}
2947
2948std::pair<WasmCode*, SafepointEntry> WasmCodeManager::LookupCodeAndSafepoint(
2949 Isolate* isolate, Address pc) {
2950 auto* entry = isolate->wasm_code_look_up_cache()->GetCacheEntry(pc);
2951 WasmCode* code = entry->code;
2952 DCHECK_NOT_NULL(code);
2953 // For protected instructions we usually do not emit a safepoint because the
2954 // frame will be unwound anyway. The exception is debugging code, where the
2955 // frame might be inspected if "pause on exception" is set.
2956 // For those instructions, we thus need to explicitly return an empty
2957 // safepoint; using any previously registered safepoint can lead to crashes
2958 // when we try to visit spill slots that do not hold tagged values at this
2959 // point.
2960 // Evaluate this condition only on demand (the fast path does not need it).
2961 auto expect_safepoint = [code, pc]() {
2962 const bool is_protected_instruction = code->IsProtectedInstruction(
2963 pc - WasmFrameConstants::kProtectedInstructionReturnAddressOffset);
2964 return !is_protected_instruction || code->for_debugging();
2965 };
2966 if (!entry->safepoint_entry.is_initialized() && expect_safepoint()) {
2967 entry->safepoint_entry = SafepointTable{code}.TryFindEntry(pc);
2968 CHECK(entry->safepoint_entry.is_initialized());
2969 } else if (expect_safepoint()) {
2970 DCHECK_EQ(entry->safepoint_entry, SafepointTable{code}.TryFindEntry(pc));
2971 } else {
2972 DCHECK(!entry->safepoint_entry.is_initialized());
2973 }
2974 return std::make_pair(code, entry->safepoint_entry);
2975}
2976
2978 return isolate->wasm_code_look_up_cache()->Flush();
2979}
2980
2981namespace {
2982thread_local WasmCodeRefScope* current_code_refs_scope = nullptr;
2983} // namespace
2984
2986 : previous_scope_(current_code_refs_scope) {
2987 current_code_refs_scope = this;
2988}
2989
2991 DCHECK_EQ(this, current_code_refs_scope);
2992 current_code_refs_scope = previous_scope_;
2994}
2995
2996// static
2998 DCHECK_NOT_NULL(code);
2999 WasmCodeRefScope* current_scope = current_code_refs_scope;
3000 DCHECK_NOT_NULL(current_scope);
3001 current_scope->code_ptrs_.push_back(code);
3002 code->IncRef();
3003}
3004
3006 for (int i = 0; i < kWasmCodeLookupCacheSize; i++)
3007 cache_[i].pc.store(kNullAddress, std::memory_order_release);
3008}
3009
3011 Address pc) {
3014 uint32_t hash = ComputeAddressHash(pc);
3015 uint32_t index = hash & (kWasmCodeLookupCacheSize - 1);
3016 CacheEntry* entry = &cache_[index];
3017 if (entry->pc.load(std::memory_order_acquire) == pc) {
3018 // Code can be deallocated at two points:
3019 // - when the NativeModule that references it is garbage-
3020 // collected;
3021 // - when it is no longer referenced by its NativeModule, nor from
3022 // any stack.
3023 // The cache is cleared when a NativeModule is destroyed, and when
3024 // the isolate reports the set of code referenced from its stacks.
3025 // So, if the code is the cache, it is because it was live at some
3026 // point (when inserted in the cache), its native module is still
3027 // considered live, and it has not yet been reported as no longer
3028 // referenced from any stack. It thus cannot have been released
3029 // yet.
3030#ifdef DEBUG
3031 wasm::WasmCodeRefScope code_ref_scope;
3033#endif // DEBUG
3034 } else {
3035 // For WebAssembly frames we perform a lookup in the handler table.
3036 // This code ref scope is here to avoid a check failure when looking up
3037 // the code. It's not actually necessary to keep the code alive as it's
3038 // currently being executed.
3039 wasm::WasmCodeRefScope code_ref_scope;
3040 entry->pc.store(pc, std::memory_order_release);
3041 entry->code = wasm::GetWasmCodeManager()->LookupCode(pc);
3042 entry->safepoint_entry.Reset();
3043 }
3044 return entry;
3045}
3046
3047} // namespace wasm
3048} // namespace internal
3049} // namespace v8
3050#undef TRACE_HEAP
Builtins::Kind kind
Definition builtins.cc:40
virtual size_t AllocatePageSize()=0
virtual bool RecommitPages(void *address, size_t length, Permission permissions)
virtual bool DiscardSystemPages(void *address, size_t size)
base::AddressRegion GetOverlap(AddressRegion region) const
Vector< T > as_vector() const
Definition vector.h:276
V8_INLINE void AssertHeld() const
Definition mutex.h:153
void emplace_back(Args &&... args)
int length() const
Definition vector.h:64
Vector< T > SubVector(size_t from, size_t to) const
Definition vector.h:41
constexpr bool empty() const
Definition vector.h:73
constexpr size_t size() const
Definition vector.h:70
constexpr T * begin() const
Definition vector.h:96
constexpr T * data() const
Definition vector.h:100
static constexpr bool kAllBuiltinsAreIsolateIndependent
Definition builtins.h:262
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE int Decode(Isolate *isolate, std::ostream &os, uint8_t *begin, uint8_t *end, CodeReference code={}, Address current_pc=kNullAddress, size_t range_limit=0)
Address InstructionStartOf(Builtin builtin) const
static EmbeddedData FromBlob()
V8_EXPORT_PRIVATE void AddSample(int sample)
Definition counters.cc:50
static NEVER_READ_ONLY_SPACE constexpr bool kOnHeapBodyIsContiguous
static V8_INLINE Isolate * Current()
Definition isolate-inl.h:35
static const int kApplyMask
Definition reloc-info.h:369
uint32_t wasm_call_tag() const
static constexpr int ModeMask(Mode mode)
Definition reloc-info.h:272
static constexpr bool IsWasmCall(Mode mode)
Definition reloc-info.h:213
static constexpr bool IsWasmStubCall(Mode mode)
Definition reloc-info.h:214
static constexpr bool IsWasmCodePointerTableEntry(Mode mode)
Definition reloc-info.h:220
SafepointEntry TryFindEntry(Address pc) const
static void RegisterJitAllocations(Address start, const std::vector< size_t > &sizes, JitAllocationType type)
static void UnregisterJitPage(Address address, size_t size)
static void RegisterJitPage(Address address, size_t size)
static WritableJitAllocation LookupJitAllocation(Address addr, size_t size, JitAllocationType type, bool enforce_write_api=false)
static V8_NODISCARD bool MakeExecutable(Address address, size_t size)
static void UnregisterWasmAllocation(Address addr, size_t size)
static WritableJitAllocation RegisterJitAllocation(Address addr, size_t size, JitAllocationType type, bool enforce_write_api=false)
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
const base::AddressRegion & region() const
Definition allocation.h:243
V8_INLINE void CopyCode(size_t dst_offset, const uint8_t *src, size_t num_bytes)
V8_INLINE void ClearBytes(size_t offset, size_t len)
const std::vector< std::pair< uint32_t, WellKnownImport > > & import_statuses()
static constexpr int JumptableIndexForBuiltin(Builtin builtin)
static constexpr Builtin BuiltinForJumptableIndex(int index)
static std::unique_ptr< CompilationState > New(const std::shared_ptr< NativeModule > &, std::shared_ptr< Counters >, WasmDetectedFeatures detected_features)
size_t EstimateCurrentMemoryConsumption() const
DebugSideTable * GetDebugSideTableIfExists(const WasmCode *) const
void RemoveDebugSideTables(base::Vector< WasmCode *const >)
base::AddressRegion Allocate(size_t size)
base::AddressRegion AllocateInRegion(size_t size, base::AddressRegion)
std::set< base::AddressRegion, base::AddressRegion::StartAddressLess > regions_
base::AddressRegion Merge(base::AddressRegion)
static uint32_t FarJumpSlotOffsetToIndex(uint32_t offset)
static void PatchJumpTableSlot(WritableJumpTablePair &jump_table_pair, Address jump_table_slot, Address far_jump_table_slot, Address target)
static uint32_t FarJumpSlotIndexToOffset(uint32_t slot_index)
static constexpr uint32_t SizeForNumberOfLazyFunctions(uint32_t slot_count)
static constexpr uint32_t SizeForNumberOfSlots(uint32_t slot_count)
static uint32_t JumpSlotIndexToOffset(uint32_t slot_index)
static constexpr uint32_t SizeForNumberOfFarJumpSlots(int num_runtime_slots, int num_function_slots)
static void InitializeJumpsToLazyCompileTable(Address base, uint32_t num_slots, Address lazy_compile_table_start)
static void GenerateFarJumpTable(WritableJitAllocation &jit_allocation, Address base, Address *stub_targets, int num_runtime_slots, int num_function_slots)
static uint32_t LazyCompileSlotIndexToOffset(uint32_t slot_index)
static uint32_t SlotOffsetToIndex(uint32_t slot_offset)
static void GenerateLazyCompileTable(Address base, uint32_t num_slots, uint32_t num_imported_functions, Address wasm_compile_lazy_target)
void UpdateWellKnownImports(base::Vector< WellKnownImport > entries)
std::unique_ptr< NamesProvider > names_provider_
std::vector< CodeSpaceData > code_space_data_
Builtin GetBuiltinInJumptableSlot(Address target) const
NativeModule(const NativeModule &)=delete
void FreeCode(base::Vector< WasmCode *const >)
std::shared_ptr< base::OwnedVector< const uint8_t > > wire_bytes_
std::atomic< int64_t > sum_lazy_compilation_time_in_micro_sec_
Address GetNearCallTargetForFunction(uint32_t func_index, const JumpTablesRef &) const
static constexpr bool kNeedsFarJumpsBetweenCodeSpaces
std::shared_ptr< const WasmModule > module_
std::unique_ptr< WasmModuleSourceMap > source_map_
std::map< Address, std::unique_ptr< WasmCode > > owned_code_
std::vector< WasmCode * > SnapshotAllOwnedCode() const
std::unique_ptr< CompilationState > compilation_state_
WasmCode * GetCode(uint32_t index) const
std::unique_ptr< DebugInfo > debug_info_
std::pair< std::vector< WasmCode * >, std::vector< WellKnownImport > > SnapshotCodeTable() const
const WasmModule * module() const
std::pair< base::Vector< uint8_t >, JumpTablesRef > AllocateForDeserializedCode(size_t total_code_size)
void PatchJumpTablesLocked(uint32_t slot_index, Address target, Address code_pointer_table_target, uint64_t signature_hash)
base::Vector< const uint8_t > wire_bytes() const
WasmCode * CreateEmptyJumpTableInRegionLocked(int jump_table_size, base::AddressRegion, JumpTableType type)
std::unique_ptr< WasmCodePointer[]> code_pointer_handles_
void LogWasmCodes(Isolate *, Tagged< Script >)
std::atomic< size_t > liftoff_code_size_
const CompileTimeImports compile_imports_
WasmCode * PublishCodeLocked(std::unique_ptr< WasmCode >, AssumptionsJournal *)
void SetWireBytes(base::OwnedVector< const uint8_t > wire_bytes)
WasmCodePointer GetCodePointerHandle(int index) const
OperationsBarrier::Token engine_scope_
JumpTablesRef FindJumpTablesForRegionLocked(base::AddressRegion) const
bool HasCodeWithTier(uint32_t index, ExecutionTier tier) const
bool HasCode(uint32_t index) const
bool should_update_code_table(WasmCode *new_code, WasmCode *prior_code) const
void InitializeJumpTableForLazyCompilation(uint32_t num_wasm_functions)
Address GetJumpTableEntryForBuiltin(Builtin builtin, const JumpTablesRef &) const
std::atomic< int64_t > max_lazy_compilation_time_in_micro_sec_
std::pair< size_t, size_t > RemoveCompiledCode(RemoveFilter filter)
std::unique_ptr< WasmCode > AddDeserializedCode(int index, base::Vector< uint8_t > instructions, int stack_slots, int ool_spills, uint32_t tagged_parameter_slots, int safepoint_table_offset, int handler_table_offset, int constant_pool_offset, int code_comments_offset, int unpadded_binary_size, base::Vector< const uint8_t > protected_instructions_data, base::Vector< const uint8_t > reloc_info, base::Vector< const uint8_t > source_position_table, base::Vector< const uint8_t > inlining_positions, base::Vector< const uint8_t > deopt_data, WasmCode::Kind kind, ExecutionTier tier)
WasmCode * PublishCode(UnpublishedWasmCode)
absl::flat_hash_map< WasmCodePointer, uint32_t > CallIndirectTargetMap
std::unique_ptr< WasmCode *[]> code_table_
void AddLazyCompilationTimeSample(int64_t sample)
std::unique_ptr< WasmCode > AddCodeWithCodeSpace(int index, const CodeDesc &desc, int stack_slots, int ool_spill_count, uint32_t tagged_parameter_slots, base::Vector< const uint8_t > protected_instructions_data, base::Vector< const uint8_t > source_position_table, base::Vector< const uint8_t > inlining_positions, base::Vector< const uint8_t > deopt_data, WasmCode::Kind kind, ExecutionTier tier, ForDebugging for_debugging, bool frame_has_feedback_slot, base::Vector< uint8_t > code_space, const JumpTablesRef &jump_tables_ref)
WasmCode * CreateEmptyJumpTableLocked(int jump_table_size, JumpTableType type)
void UseLazyStubLocked(uint32_t func_index)
std::unique_ptr< WasmCode > AddCode(int index, const CodeDesc &desc, int stack_slots, int ool_spill_count, uint32_t tagged_parameter_slots, base::Vector< const uint8_t > protected_instructions, base::Vector< const uint8_t > source_position_table, base::Vector< const uint8_t > inlining_positions, base::Vector< const uint8_t > deopt_data, WasmCode::Kind kind, ExecutionTier tier, ForDebugging for_debugging)
void UpdateCodeSize(size_t, ExecutionTier, ForDebugging)
V8_WARN_UNUSED_RESULT UnpublishedWasmCode AddCompiledCode(WasmCompilationResult &)
WasmCode * AddCodeForTesting(DirectHandle< Code > code, uint64_t signature_hash)
CallIndirectTargetMap CreateIndirectCallTargetToFunctionIndexMap() const
void InitializeCodePointerTableHandles(uint32_t num_wasm_functions)
std::atomic< size_t > turbofan_code_size_
std::unique_ptr< std::atomic< uint32_t >[]> tiering_budgets_
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const
WasmModuleSourceMap * GetWasmSourceMap() const
void PatchJumpTableLocked(WritableJumpTablePair &jump_table_pair, const CodeSpaceData &, uint32_t slot_index, Address target)
std::vector< std::unique_ptr< WasmCode > > new_owned_code_
void AddCodeSpaceLocked(base::AddressRegion)
void SetWasmSourceMap(std::unique_ptr< WasmModuleSourceMap > source_map)
base::Vector< uint8_t > AllocateForCode(NativeModule *, size_t size)
base::Vector< uint8_t > AllocateForCodeInRegion(NativeModule *, size_t size, base::AddressRegion)
std::vector< VirtualMemory > owned_code_space_
WasmCodeAllocator(std::shared_ptr< Counters > async_counters)
void Init(VirtualMemory code_space)
std::shared_ptr< Counters > async_counters_
base::Vector< uint8_t > AllocateForWrapper(size_t size)
void FreeCode(base::Vector< WasmCode *const >)
void InitializeCodeRange(NativeModule *native_module, base::AddressRegion region)
CacheEntry cache_[kWasmCodeLookupCacheSize]
std::atomic< Address > next_code_space_hint_
static size_t EstimateLiftoffCodeSize(int body_size)
void FreeNativeModule(base::Vector< VirtualMemory > owned_code, size_t committed_size)
std::map< Address, std::pair< Address, NativeModule * > > lookup_map_
static size_t EstimateNativeModuleMetaDataSize(const WasmModule *)
std::atomic< size_t > critical_committed_code_space_
std::pair< WasmCode *, SafepointEntry > LookupCodeAndSafepoint(Isolate *isolate, Address pc)
static size_t EstimateNativeModuleCodeSize(const WasmModule *)
NativeModule * LookupNativeModule(Address pc) const
V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size)
std::atomic< size_t > total_committed_code_space_
WasmCode * LookupCode(Isolate *isolate, Address pc) const
std::shared_ptr< NativeModule > NewNativeModule(Isolate *isolate, WasmEnabledFeatures enabled_features, WasmDetectedFeatures detected_features, CompileTimeImports compile_imports, size_t code_size_estimate, std::shared_ptr< const WasmModule > module)
void AssignRange(base::AddressRegion, NativeModule *)
void SetEntrypointWithWriteScope(WasmCodePointer index, Address value, uint64_t signature_hash, WriteScope &write_scope)
void SetEntrypointAndSignature(WasmCodePointer index, Address value, uint64_t signature_hash)
std::tuple< int, bool, SourcePosition > GetInliningPosition(int inlining_id) const
void Print(const char *name=nullptr) const
base::Vector< uint8_t > instructions() const
V8_NOINLINE void DecRefOnPotentiallyDeadCode()
base::Vector< const trap_handler::ProtectedInstructionData > protected_instructions() const
base::Vector< const uint8_t > source_positions() const
void Disassemble(const char *name, std::ostream &os, Address current_pc=kNullAddress) const
base::Vector< const uint8_t > deopt_data() const
NativeModule * native_module() const
base::Vector< const uint8_t > reloc_info() const
bool contains(Address pc) const
void LogCode(Isolate *isolate, const char *source_url, int script_id) const
SourcePosition GetSourcePositionBefore(int code_offset)
base::Vector< const uint8_t > inlining_positions() const
std::unique_ptr< const uint8_t[]> ConcatenateBytes(std::initializer_list< base::Vector< const uint8_t > >)
static void DecrementRefCount(base::Vector< WasmCode *const >)
WasmCode(const WasmCode &)=delete
NativeModule *const native_module_
ForDebugging for_debugging() const
static bool ShouldBeLogged(Isolate *isolate)
size_t EstimateCurrentMemoryConsumption() const
int GetSourceOffsetBefore(int code_offset)
WasmDeoptEntry GetDeoptEntry(uint32_t deopt_index) const
const WasmDeoptData & GetDeoptData() const
void PrintCurrentMemoryConsumptionEstimate() const
void AddPotentiallyDeadCode(WasmCode *)
void FreeDeadCode(const DeadCodeMap &, std::vector< WasmCode * > &)
void FreeNativeModule(NativeModule *)
std::unordered_map< NativeModule *, std::vector< WasmCode * > > DeadCodeMap
std::pair< size_t, size_t > FlushLiftoffCode()
#define PROFILE(the_isolate, Call)
Definition code-events.h:59
Handle< Code > code
#define V8_EMBEDDED_CONSTANT_POOL_BOOL
Definition globals.h:81
int start
int end
Isolate * isolate
int32_t offset
ZoneVector< RpoNumber > & result
ZoneVector< Entry > entries
int pc_offset
int position
Definition liveedit.cc:290
#define LOG_CODE_EVENT(isolate, Call)
Definition log.h:83
base::SmallVector< int32_t, 1 > stack_slots
const std::shared_ptr< Counters > async_counters_
const base::Vector< const uint8_t > wire_bytes_
const WasmEnabledFeatures enabled_features_
STL namespace.
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
AddressRegion AddressRegionOf(T *ptr, size_t size)
auto Reversed(T &t)
Definition iterator.h:105
FormattedString() -> FormattedString<>
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
OwnedVector< T > OwnedCopyOf(const T *data, size_t size)
Definition vector.h:383
V8_EXPORT_PRIVATE WasmCodePointerTable * GetProcessWideWasmCodePointerTable()
constexpr int kAnonymousFuncIndex
WasmImportWrapperCache * GetWasmImportWrapperCache()
WasmCodeManager * GetWasmCodeManager()
int JumpTableOffset(const WasmModule *module, int func_index)
TypeCanonicalizer * GetTypeCanonicalizer()
constexpr WasmCodePointer kInvalidWasmCodePointer
const char * GetWasmCodeKindAsString(WasmCode::Kind kind)
size_t ContentSize(const std::vector< T > &vector)
void DumpProfileToFile(const WasmModule *module, base::Vector< const uint8_t > wire_bytes, std::atomic< uint32_t > *tiering_budget_array)
Definition pgo.cc:196
WasmEngine * GetWasmEngine()
int declared_function_index(const WasmModule *module, int func_index)
WasmCode::Kind GetCodeKind(const WasmCompilationResult &result)
constexpr size_t kV8MaxWasmFunctionParams
Definition wasm-limits.h:53
void RegisterNonABICompliantCodeRange(void *start, size_t size_in_bytes)
void UnregisterNonABICompliantCodeRange(void *start)
bool SetPermissions(v8::PageAllocator *page_allocator, void *address, size_t size, PageAllocator::Permission access)
constexpr int kInt64Size
Definition globals.h:402
v8::PageAllocator * GetPlatformPageAllocator()
Definition allocation.cc:66
void PrintF(const char *format,...)
Definition utils.cc:39
void FlushInstructionCache(void *start, size_t size)
constexpr intptr_t kCodeAlignment
Definition globals.h:964
constexpr uint32_t kDefaultMaxWasmCodeSpaceSizeMb
Definition globals.h:452
void * GetRandomMmapAddr()
size_t CommitPageSize()
constexpr int kSystemPointerSize
Definition globals.h:410
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
Definition flags.cc:1366
V8_EXPORT_PRIVATE FlagValues v8_flags
static constexpr Address kNullAddress
Definition v8-internal.h:53
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Definition flags.cc:2197
uint32_t ComputeAddressHash(Address address)
Definition utils.h:306
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
Definition c-api.cc:87
#define FATAL(...)
Definition logging.h:47
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK_GE(lhs, rhs)
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
constexpr T RoundDown(T x, intptr_t m)
Definition macros.h:371
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define UPDATE_WHEN_CLASS_CHANGES(classname, size)
WasmName GetNameOrNull(WireBytesRef ref) const
static constexpr AssumptionsJournal * kNoAssumptions
std::unique_ptr< AssumptionsJournal > assumptions
std::vector< WasmFunction > functions
#define TRACE_EVENT0(category_group, name)
#define TRACE_DISABLED_BY_DEFAULT(name)
#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
#define V8_UNLIKELY(condition)
Definition v8config.h:660
#define TRACE_HEAP(...)
WasmCodeManager code_manager
const wasm::WasmModule * module_