v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
code-range.cc
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <algorithm>
8#include <atomic>
9#include <limits>
10#include <utility>
11
12#include "src/base/bits.h"
14#include "src/base/once.h"
16#include "src/common/globals.h"
17#include "src/flags/flags.h"
18#include "src/heap/heap-inl.h"
20#if defined(V8_OS_WIN64)
22#endif // V8_OS_WIN64
23
24namespace v8 {
25namespace internal {
26
27namespace {
28
29DEFINE_LAZY_LEAKY_OBJECT_GETTER(CodeRangeAddressHint, GetCodeRangeAddressHint)
30
31void FunctionInStaticBinaryForAddressHint() {}
32
33} // anonymous namespace
34
36 size_t allocate_page_size) {
38
39 Address result = 0;
40 auto it = recently_freed_.find(code_range_size);
41 // No recently freed region has been found, try to provide a hint for placing
42 // a code region.
43 if (it == recently_freed_.end() || it->second.empty()) {
44 return RoundUp(FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint),
45 allocate_page_size);
46 }
47
48 result = it->second.back();
49 CHECK(IsAligned(result, allocate_page_size));
50 it->second.pop_back();
51 return result;
52}
53
55 size_t code_range_size) {
57 recently_freed_[code_range_size].push_back(code_range_start);
58}
59
61
62// static
66
67#define TRACE(...) \
68 if (v8_flags.trace_code_range_allocation) PrintF(__VA_ARGS__)
69
71 size_t requested, bool immutable) {
72 DCHECK_NE(requested, 0);
75 }
76
77 if (requested <= kMinimumCodeRangeSize) {
78 requested = kMinimumCodeRangeSize;
79 }
80
81 const size_t kPageSize = MutablePageMetadata::kPageSize;
82 const size_t allocate_page_size = page_allocator->AllocatePageSize();
83 CHECK(IsAligned(kPageSize, allocate_page_size));
84
86 requested <= kMaximalCodeRangeSize);
87
90 params.reservation_size = requested;
91 params.base_alignment =
93 params.page_size = kPageSize;
94 if (v8_flags.jitless) {
95 params.permissions = PageAllocator::Permission::kNoAccess;
96 params.page_initialization_mode =
98 params.page_freeing_mode = base::PageFreeingMode::kMakeInaccessible;
99 } else {
101 params.page_initialization_mode =
103 params.page_freeing_mode = base::PageFreeingMode::kDiscard;
104 }
105
106#if defined(V8_TARGET_OS_IOS) || defined(V8_TARGET_OS_CHROMEOS)
107 // iOS:
108 // We only get one shot at doing MAP_JIT on iOS. So we need to make it
109 // the least restrictive so it succeeds otherwise we will terminate the
110 // process on the failed allocation.
111 // ChromeOS:
112 // Chrome on ChromeOS uses libgcc unwinding library which seems to work an
113 // order of magnitude slower if we allocate CodeRange closer to the binary.
114 // In non-official builds Chrome collects a lot of stack traces just in case,
115 // so the slowdown of a single backtrace() call results in a noticeable
116 // increase of test times. As a workaround, do a one shot allocation without
117 // providing a hint.
118 // TODO(https://crbug.com/40096218): investigate this ChromeOS issue.
119 params.requested_start_hint = kNullAddress;
120 if (!VirtualMemoryCage::InitReservation(params)) return false;
121#else
122 constexpr size_t kRadiusInMB =
124 auto preferred_region = GetPreferredRegion(kRadiusInMB, kPageSize);
125
126 TRACE("=== Preferred region: [%p, %p)\n",
127 reinterpret_cast<void*>(preferred_region.begin()),
128 reinterpret_cast<void*>(preferred_region.end()));
129
130 // For configurations with enabled pointer compression and shared external
131 // code range we can afford trying harder to allocate code range near .text
132 // section.
133 const bool kShouldTryHarder = V8_EXTERNAL_CODE_SPACE_BOOL &&
135 v8_flags.better_code_range_allocation;
136
137 if (kShouldTryHarder) {
138 // TODO(v8:11880): consider using base::OS::GetFirstFreeMemoryRangeWithin()
139 // to avoid attempts that's going to fail anyway.
140
141 VirtualMemoryCage candidate_cage;
142
143 // Try to allocate code range at the end of preferred region, by going
144 // towards the start in steps.
145 const int kAllocationTries = 16;
146 params.requested_start_hint =
147 RoundDown(preferred_region.end() - requested, kPageSize);
148 Address step =
149 RoundDown(preferred_region.size() / kAllocationTries, kPageSize);
150 for (int i = 0; i < kAllocationTries; i++) {
151 TRACE("=== Attempt #%d, hint=%p\n", i,
152 reinterpret_cast<void*>(params.requested_start_hint));
153 if (candidate_cage.InitReservation(params)) {
154 TRACE("=== Attempt #%d (%p): [%p, %p)\n", i,
155 reinterpret_cast<void*>(params.requested_start_hint),
156 reinterpret_cast<void*>(candidate_cage.region().begin()),
157 reinterpret_cast<void*>(candidate_cage.region().end()));
158 // Allocation succeeded, check if it's in the preferred range.
159 if (preferred_region.contains(candidate_cage.region())) break;
160 // This allocation is not the one we are searhing for.
161 candidate_cage.Free();
162 }
163 if (step == 0) break;
164 params.requested_start_hint -= step;
165 }
166 if (candidate_cage.IsReserved()) {
167 *static_cast<VirtualMemoryCage*>(this) = std::move(candidate_cage);
168 }
169 }
170 if (!IsReserved()) {
171 Address the_hint = GetCodeRangeAddressHint()->GetAddressHint(
172 requested, allocate_page_size);
173 // Last resort, use whatever region we could get with minimum constraints.
174 params.requested_start_hint = the_hint;
176 params.requested_start_hint = kNullAddress;
177 if (!VirtualMemoryCage::InitReservation(params)) return false;
178 }
179 TRACE("=== Fallback attempt, hint=%p: [%p, %p)\n",
180 reinterpret_cast<void*>(params.requested_start_hint),
181 reinterpret_cast<void*>(region().begin()),
182 reinterpret_cast<void*>(region().end()));
183 }
184
185 if (v8_flags.abort_on_far_code_range &&
186 !preferred_region.contains(region())) {
187 // We didn't manage to allocate the code range close enough.
188 FATAL("Failed to allocate code range close to the .text section");
189 }
190#endif // defined(V8_TARGET_OS_IOS) || defined(V8_TARGET_OS_CHROMEOS)
191
192 // On some platforms, specifically Win64, we need to reserve some pages at
193 // the beginning of an executable space. See
194 // https://cs.chromium.org/chromium/src/components/crash/content/
195 // app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
196 // for details.
197 const size_t required_writable_area_size = GetWritableReservedAreaSize();
198 // The size of the area that might have been excluded from the area
199 // allocatable by the BoundedPageAllocator.
200 size_t excluded_allocatable_area_size = 0;
201 if (required_writable_area_size > 0) {
202 CHECK_LE(required_writable_area_size, kPageSize);
203
204 // If the start of the reservation is not kPageSize-aligned then
205 // there's a non-allocatable region before the area controlled by
206 // the BoundedPageAllocator. Use it if it's big enough.
207 const Address non_allocatable_size = page_allocator_->begin() - base();
208
209 TRACE("=== non-allocatable region: [%p, %p)\n",
210 reinterpret_cast<void*>(base()),
211 reinterpret_cast<void*>(base() + non_allocatable_size));
212
213 // Exclude the first page from allocatable pages if the required writable
214 // area doesn't fit into the non-allocatable area.
215 if (non_allocatable_size < required_writable_area_size) {
216 TRACE("=== Exclude the first page from allocatable area\n");
217 excluded_allocatable_area_size = kPageSize;
218 CHECK(page_allocator_->AllocatePagesAt(page_allocator_->begin(),
219 excluded_allocatable_area_size,
221 }
222 // Commit required amount of writable memory.
223 if (!reservation()->SetPermissions(base(), required_writable_area_size,
225 return false;
226 }
227#if defined(V8_OS_WIN64)
230 reinterpret_cast<void*>(base()), size());
231 }
232#endif // V8_OS_WIN64
233 }
234
235// Don't pre-commit the code cage on Windows since it uses memory and it's not
236// required for recommit.
237// iOS cannot adjust page permissions for MAP_JIT'd pages, they are set as RWX
238// at the start.
239#if !defined(V8_OS_WIN) && !defined(V8_OS_IOS)
240 if (params.page_initialization_mode ==
242 void* base = reinterpret_cast<void*>(page_allocator_->begin() +
243 excluded_allocatable_area_size);
244 size_t size = page_allocator_->size() - excluded_allocatable_area_size;
246 if (!ThreadIsolation::MakeExecutable(reinterpret_cast<Address>(base),
247 size)) {
248 return false;
249 }
250 } else if (!params.page_allocator->SetPermissions(
252 return false;
253 }
254 if (immutable) {
255#ifdef DEBUG
256 immutable_ = true;
257#endif
258#ifdef V8_ENABLE_MEMORY_SEALING
259 params.page_allocator->SealPages(base, size);
260#endif
261 }
262 DiscardSealedMemoryScope discard_scope("Discard global code range.");
263 if (!params.page_allocator->DiscardSystemPages(base, size)) return false;
264 }
265#endif // !defined(V8_OS_WIN)
266
267 return true;
268}
269
270// Preferred region for the code range is an intersection of the following
271// regions:
272// a) [builtins - kMaxPCRelativeDistance, builtins + kMaxPCRelativeDistance)
273// b) [RoundDown(builtins, 4GB), RoundUp(builtins, 4GB)) in order to ensure
274// Requirement (a) is there to avoid remaping of embedded builtins into
275// the code for architectures where PC-relative jump/call distance is big
276// enough.
277// Requirement (b) is aiming at helping CPU branch predictors in general and
278// in case V8_EXTERNAL_CODE_SPACE is enabled it ensures that
279// ExternalCodeCompressionScheme works for all pointers in the code range.
280// static
282 size_t allocate_page_size) {
283#ifdef V8_TARGET_ARCH_64_BIT
284 // Compute builtins location.
285 Address embedded_blob_code_start =
286 reinterpret_cast<Address>(Isolate::CurrentEmbeddedBlobCode());
287 Address embedded_blob_code_end;
288 if (embedded_blob_code_start == kNullAddress) {
289 // When there's no embedded blob use address of a function from the binary
290 // as an approximation.
291 embedded_blob_code_start =
292 FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint);
293 embedded_blob_code_end = embedded_blob_code_start + 1;
294 } else {
295 embedded_blob_code_end =
296 embedded_blob_code_start + Isolate::CurrentEmbeddedBlobCodeSize();
297 }
298
299 // Fulfil requirement (a).
300 constexpr size_t max_size = std::numeric_limits<size_t>::max();
301 size_t radius = radius_in_megabytes * MB;
302
303 Address region_start =
304 RoundUp(embedded_blob_code_end - radius, allocate_page_size);
305 if (region_start > embedded_blob_code_end) {
306 // |region_start| underflowed.
307 region_start = 0;
308 }
309 Address region_end =
310 RoundDown(embedded_blob_code_start + radius, allocate_page_size);
311 if (region_end < embedded_blob_code_start) {
312 // |region_end| overflowed.
313 region_end = RoundDown(max_size, allocate_page_size);
314 }
315
316 // Fulfil requirement (b).
317 constexpr size_t k4GB = size_t{4} * GB;
318 Address four_gb_cage_start = RoundDown(embedded_blob_code_start, k4GB);
319 Address four_gb_cage_end = four_gb_cage_start + k4GB;
320
321 region_start = std::max(region_start, four_gb_cage_start);
322 region_end = std::min(region_end, four_gb_cage_end);
323
324 return base::AddressRegion(region_start, region_end - region_start);
325#else
326 return {};
327#endif // V8_TARGET_ARCH_64_BIT
328}
329
331 // TODO(361480580): this DCHECK is temporarily disabled since we free the
332 // global CodeRange in the PoolTest.
333 // DCHECK(!immutable_);
334
335 if (IsReserved()) {
336#if defined(V8_OS_WIN64)
339 reinterpret_cast<void*>(base()));
340 }
341#endif // V8_OS_WIN64
342 GetCodeRangeAddressHint()->NotifyFreedCodeRange(
343 reservation()->region().begin(), reservation()->region().size());
345 }
346}
347
349 const uint8_t* embedded_blob_code,
350 size_t embedded_blob_code_size) {
352
353 // Remap embedded builtins into the end of the address range controlled by
354 // the BoundedPageAllocator.
355 const base::AddressRegion code_region(page_allocator()->begin(),
356 page_allocator()->size());
357 CHECK_NE(code_region.begin(), kNullAddress);
358 CHECK(!code_region.is_empty());
359
360 uint8_t* embedded_blob_code_copy =
361 embedded_blob_code_copy_.load(std::memory_order_acquire);
363 DCHECK(
364 code_region.contains(reinterpret_cast<Address>(embedded_blob_code_copy),
365 embedded_blob_code_size));
366 SLOW_DCHECK(memcmp(embedded_blob_code, embedded_blob_code_copy,
367 embedded_blob_code_size) == 0);
369 }
370
371 const size_t kAllocatePageSize = page_allocator()->AllocatePageSize();
372 const size_t kCommitPageSize = page_allocator()->CommitPageSize();
373 size_t allocate_code_size =
374 RoundUp(embedded_blob_code_size, kAllocatePageSize);
375
376 // Allocate the re-embedded code blob in such a way that it will be reachable
377 // by PC-relative addressing from biggest possible region.
378 const size_t max_pc_relative_code_range = kMaxPCRelativeCodeRangeInMB * MB;
379 size_t hint_offset =
380 std::min(max_pc_relative_code_range, code_region.size()) -
381 allocate_code_size;
382 void* hint = reinterpret_cast<void*>(code_region.begin() + hint_offset);
383
385 reinterpret_cast<uint8_t*>(page_allocator()->AllocatePages(
386 hint, allocate_code_size, kAllocatePageSize,
388
391 isolate, "Can't allocate space for re-embedded builtins");
392 }
394
395 if (code_region.size() > max_pc_relative_code_range) {
396 // The re-embedded code blob might not be reachable from the end part of
397 // the code range, so ensure that code pages will never be allocated in
398 // the "unreachable" area.
399 Address unreachable_start =
400 reinterpret_cast<Address>(embedded_blob_code_copy) +
401 max_pc_relative_code_range;
402
403 if (code_region.contains(unreachable_start)) {
404 size_t unreachable_size = code_region.end() - unreachable_start;
405
407 reinterpret_cast<void*>(unreachable_start), unreachable_size,
408 kAllocatePageSize, PageAllocator::kNoAccess);
409 CHECK_EQ(reinterpret_cast<Address>(result), unreachable_start);
410 }
411 }
412
413 size_t code_size = RoundUp(embedded_blob_code_size, kCommitPageSize);
414 if constexpr (base::OS::IsRemapPageSupported()) {
415 // By default, the embedded builtins are not remapped, but copied. This
416 // costs memory, since builtins become private dirty anonymous memory,
417 // rather than shared, clean, file-backed memory for the embedded version.
418 // If the OS supports it, we can remap the builtins *on top* of the space
419 // allocated in the code range, making the "copy" shared, clean, file-backed
420 // memory, and thus saving sizeof(builtins).
421 //
422 // Builtins should start at a page boundary, see
423 // platform-embedded-file-writer-mac.cc. If it's not the case (e.g. if the
424 // embedded builtins are not coming from the binary), fall back to copying.
425 if (IsAligned(reinterpret_cast<uintptr_t>(embedded_blob_code),
426 kCommitPageSize)) {
427 bool ok = base::OS::RemapPages(embedded_blob_code, code_size,
430
431 if (ok) {
433 std::memory_order_release);
435 }
436 }
437 }
438
441 // iOS code pages are already RWX and don't need to be modified.
442#if !defined(V8_TARGET_OS_IOS)
443 if (!page_allocator()->RecommitPages(embedded_blob_code_copy, code_size,
446 "Re-embedded builtins: recommit pages");
447 }
448#endif // defined(V8_TARGET_OS_IOS)
449 RwxMemoryWriteScope rwx_write_scope(
450 "Enable write access to copy the blob code into the code range");
451 memcpy(embedded_blob_code_copy, embedded_blob_code,
452 embedded_blob_code_size);
453 } else {
457 "Re-embedded builtins: set permissions");
458 }
459 memcpy(embedded_blob_code_copy, embedded_blob_code,
460 embedded_blob_code_size);
461
465 "Re-embedded builtins: set permissions");
466 }
467 }
469 std::memory_order_release);
471}
472
473} // namespace internal
474} // namespace v8
#define SLOW_DCHECK(condition)
Definition checks.h:21
bool contains(Address address) const
void * AllocatePages(void *hint, size_t size, size_t alignment, Permission access) override
static V8_WARN_UNUSED_RESULT constexpr bool IsRemapPageSupported()
Definition platform.h:333
static V8_WARN_UNUSED_RESULT bool RemapPages(const void *address, size_t size, void *new_address, MemoryPermission access)
std::unordered_map< size_t, std::vector< Address > > recently_freed_
Definition code-range.h:42
V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start, size_t code_range_size)
Definition code-range.cc:54
V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size, size_t alignment)
Definition code-range.cc:35
bool InitReservation(v8::PageAllocator *page_allocator, size_t requested, bool immutable)
Definition code-range.cc:70
V8_EXPORT_PRIVATE ~CodeRange() override
Definition code-range.cc:60
uint8_t * embedded_blob_code_copy() const
Definition code-range.h:83
static size_t GetWritableReservedAreaSize()
Definition code-range.cc:63
V8_EXPORT_PRIVATE void Free()
base::Mutex remap_embedded_builtins_mutex_
Definition code-range.h:138
uint8_t * RemapEmbeddedBuiltins(Isolate *isolate, const uint8_t *embedded_blob_code, size_t embedded_blob_code_size)
std::atomic< uint8_t * > embedded_blob_code_copy_
Definition code-range.h:134
static base::AddressRegion GetPreferredRegion(size_t radius_in_megabytes, size_t allocate_page_size)
static const uint8_t * CurrentEmbeddedBlobCode()
Definition isolate.cc:395
static uint32_t CurrentEmbeddedBlobCodeSize()
Definition isolate.cc:400
static V8_INLINE intptr_t GetCommitPageSize()
static V8_NODISCARD bool MakeExecutable(Address address, size_t size)
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
base::BoundedPageAllocator * page_allocator() const
Definition allocation.h:369
base::AddressRegion region() const
Definition allocation.h:365
bool InitReservation(const ReservationParams &params, base::AddressRegion existing_reservation=base::AddressRegion())
std::unique_ptr< base::BoundedPageAllocator > page_allocator_
Definition allocation.h:412
VirtualMemory * reservation()
Definition allocation.h:373
#define V8_HEAP_USE_BECORE_JIT_WRITE_PROTECT
Definition globals.h:305
#define V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT
Definition globals.h:297
#define V8_EXTERNAL_CODE_SPACE_BOOL
Definition globals.h:255
#define COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL
Definition globals.h:111
#define FUNCTION_ADDR(f)
Definition globals.h:712
int end
ZoneVector< RpoNumber > & result
#define TRACE(...)
#define DEFINE_LAZY_LEAKY_OBJECT_GETTER(T, FunctionName,...)
void RegisterNonABICompliantCodeRange(void *start, size_t size_in_bytes)
void UnregisterNonABICompliantCodeRange(void *start)
bool SetPermissions(v8::PageAllocator *page_allocator, void *address, size_t size, PageAllocator::Permission access)
v8::PageAllocator * GetPlatformPageAllocator()
Definition allocation.cc:66
constexpr int GB
Definition v8-internal.h:57
constexpr size_t kReservedCodeRangePages
Definition globals.h:512
constexpr size_t kMinimumCodeRangeSize
Definition globals.h:509
constexpr size_t kMaxPCRelativeCodeRangeInMB
constexpr bool kPlatformRequiresCodeRange
Definition globals.h:507
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr size_t kMaximalCodeRangeSize
Definition globals.h:508
static constexpr Address kNullAddress
Definition v8-internal.h:53
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Definition flags.cc:2197
#define FATAL(...)
Definition logging.h:47
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_NE(lhs, rhs)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
constexpr T RoundDown(T x, intptr_t m)
Definition macros.h:371
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403