20#if defined(V8_OS_WIN64)
31void FunctionInStaticBinaryForAddressHint() {}
36 size_t allocate_page_size) {
48 result = it->second.back();
50 it->second.pop_back();
55 size_t code_range_size) {
68 if (v8_flags.trace_code_range_allocation) PrintF(__VA_ARGS__)
71 size_t requested,
bool immutable) {
90 params.reservation_size = requested;
91 params.base_alignment =
93 params.page_size = kPageSize;
96 params.page_initialization_mode =
101 params.page_initialization_mode =
106#if defined(V8_TARGET_OS_IOS) || defined(V8_TARGET_OS_CHROMEOS)
122 constexpr size_t kRadiusInMB =
126 TRACE(
"=== Preferred region: [%p, %p)\n",
127 reinterpret_cast<void*
>(preferred_region.begin()),
128 reinterpret_cast<void*
>(preferred_region.end()));
135 v8_flags.better_code_range_allocation;
137 if (kShouldTryHarder) {
145 const int kAllocationTries = 16;
146 params.requested_start_hint =
147 RoundDown(preferred_region.end() - requested, kPageSize);
149 RoundDown(preferred_region.size() / kAllocationTries, kPageSize);
150 for (
int i = 0;
i < kAllocationTries;
i++) {
151 TRACE(
"=== Attempt #%d, hint=%p\n",
i,
152 reinterpret_cast<void*
>(params.requested_start_hint));
154 TRACE(
"=== Attempt #%d (%p): [%p, %p)\n",
i,
155 reinterpret_cast<void*
>(params.requested_start_hint),
156 reinterpret_cast<void*
>(candidate_cage.
region().
begin()),
157 reinterpret_cast<void*
>(candidate_cage.
region().
end()));
159 if (preferred_region.contains(candidate_cage.
region()))
break;
161 candidate_cage.
Free();
163 if (step == 0)
break;
164 params.requested_start_hint -= step;
171 Address the_hint = GetCodeRangeAddressHint()->GetAddressHint(
172 requested, allocate_page_size);
174 params.requested_start_hint = the_hint;
179 TRACE(
"=== Fallback attempt, hint=%p: [%p, %p)\n",
180 reinterpret_cast<void*
>(params.requested_start_hint),
181 reinterpret_cast<void*
>(region().begin()),
182 reinterpret_cast<void*
>(
region().
end()));
185 if (
v8_flags.abort_on_far_code_range &&
186 !preferred_region.contains(region())) {
188 FATAL(
"Failed to allocate code range close to the .text section");
200 size_t excluded_allocatable_area_size = 0;
201 if (required_writable_area_size > 0) {
202 CHECK_LE(required_writable_area_size, kPageSize);
209 TRACE(
"=== non-allocatable region: [%p, %p)\n",
210 reinterpret_cast<void*
>(
base()),
211 reinterpret_cast<void*
>(
base() + non_allocatable_size));
215 if (non_allocatable_size < required_writable_area_size) {
216 TRACE(
"=== Exclude the first page from allocatable area\n");
217 excluded_allocatable_area_size = kPageSize;
219 excluded_allocatable_area_size,
227#if defined(V8_OS_WIN64)
230 reinterpret_cast<void*
>(
base()),
size());
239#if !defined(V8_OS_WIN) && !defined(V8_OS_IOS)
240 if (params.page_initialization_mode ==
243 excluded_allocatable_area_size);
244 size_t size =
page_allocator_->size() - excluded_allocatable_area_size;
250 }
else if (!params.page_allocator->SetPermissions(
258#ifdef V8_ENABLE_MEMORY_SEALING
259 params.page_allocator->SealPages(
base, size);
263 if (!params.page_allocator->DiscardSystemPages(
base, size))
return false;
282 size_t allocate_page_size) {
283#ifdef V8_TARGET_ARCH_64_BIT
285 Address embedded_blob_code_start =
287 Address embedded_blob_code_end;
291 embedded_blob_code_start =
293 embedded_blob_code_end = embedded_blob_code_start + 1;
295 embedded_blob_code_end =
300 constexpr size_t max_size = std::numeric_limits<size_t>::max();
301 size_t radius = radius_in_megabytes *
MB;
304 RoundUp(embedded_blob_code_end - radius, allocate_page_size);
305 if (region_start > embedded_blob_code_end) {
310 RoundDown(embedded_blob_code_start + radius, allocate_page_size);
311 if (region_end < embedded_blob_code_start) {
313 region_end =
RoundDown(max_size, allocate_page_size);
317 constexpr size_t k4GB =
size_t{4} *
GB;
319 Address four_gb_cage_end = four_gb_cage_start + k4GB;
321 region_start = std::max(region_start, four_gb_cage_start);
322 region_end = std::min(region_end, four_gb_cage_end);
336#if defined(V8_OS_WIN64)
339 reinterpret_cast<void*
>(
base()));
342 GetCodeRangeAddressHint()->NotifyFreedCodeRange(
349 const uint8_t* embedded_blob_code,
350 size_t embedded_blob_code_size) {
365 embedded_blob_code_size));
367 embedded_blob_code_size) == 0);
373 size_t allocate_code_size =
374 RoundUp(embedded_blob_code_size, kAllocatePageSize);
380 std::min(max_pc_relative_code_range, code_region.
size()) -
382 void* hint =
reinterpret_cast<void*
>(code_region.
begin() + hint_offset);
386 hint, allocate_code_size, kAllocatePageSize,
391 isolate,
"Can't allocate space for re-embedded builtins");
395 if (code_region.
size() > max_pc_relative_code_range) {
401 max_pc_relative_code_range;
403 if (code_region.
contains(unreachable_start)) {
404 size_t unreachable_size = code_region.
end() - unreachable_start;
407 reinterpret_cast<void*
>(unreachable_start), unreachable_size,
413 size_t code_size =
RoundUp(embedded_blob_code_size, kCommitPageSize);
425 if (
IsAligned(
reinterpret_cast<uintptr_t
>(embedded_blob_code),
433 std::memory_order_release);
442#if !defined(V8_TARGET_OS_IOS)
446 "Re-embedded builtins: recommit pages");
450 "Enable write access to copy the blob code into the code range");
452 embedded_blob_code_size);
457 "Re-embedded builtins: set permissions");
460 embedded_blob_code_size);
465 "Re-embedded builtins: set permissions");
469 std::memory_order_release);
#define SLOW_DCHECK(condition)
bool contains(Address address) const
void * AllocatePages(void *hint, size_t size, size_t alignment, Permission access) override
size_t CommitPageSize() override
size_t AllocatePageSize() override
static V8_WARN_UNUSED_RESULT constexpr bool IsRemapPageSupported()
static V8_WARN_UNUSED_RESULT bool RemapPages(const void *address, size_t size, void *new_address, MemoryPermission access)
std::unordered_map< size_t, std::vector< Address > > recently_freed_
V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start, size_t code_range_size)
V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size, size_t alignment)
bool InitReservation(v8::PageAllocator *page_allocator, size_t requested, bool immutable)
V8_EXPORT_PRIVATE ~CodeRange() override
uint8_t * embedded_blob_code_copy() const
static size_t GetWritableReservedAreaSize()
V8_EXPORT_PRIVATE void Free()
base::Mutex remap_embedded_builtins_mutex_
uint8_t * RemapEmbeddedBuiltins(Isolate *isolate, const uint8_t *embedded_blob_code, size_t embedded_blob_code_size)
std::atomic< uint8_t * > embedded_blob_code_copy_
static base::AddressRegion GetPreferredRegion(size_t radius_in_megabytes, size_t allocate_page_size)
static const uint8_t * CurrentEmbeddedBlobCode()
static uint32_t CurrentEmbeddedBlobCodeSize()
static V8_INLINE intptr_t GetCommitPageSize()
static const int kPageSize
static V8_NODISCARD bool MakeExecutable(Address address, size_t size)
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
base::BoundedPageAllocator * page_allocator() const
base::AddressRegion region() const
bool InitReservation(const ReservationParams ¶ms, base::AddressRegion existing_reservation=base::AddressRegion())
std::unique_ptr< base::BoundedPageAllocator > page_allocator_
VirtualMemory * reservation()
#define V8_HEAP_USE_BECORE_JIT_WRITE_PROTECT
#define V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT
#define V8_EXTERNAL_CODE_SPACE_BOOL
#define COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL
ZoneVector< RpoNumber > & result
#define DEFINE_LAZY_LEAKY_OBJECT_GETTER(T, FunctionName,...)
@ kAllocatedPagesCanBeUninitialized
void RegisterNonABICompliantCodeRange(void *start, size_t size_in_bytes)
bool CanRegisterUnwindInfoForNonABICompliantCodeRange()
void UnregisterNonABICompliantCodeRange(void *start)
bool SetPermissions(v8::PageAllocator *page_allocator, void *address, size_t size, PageAllocator::Permission access)
v8::PageAllocator * GetPlatformPageAllocator()
constexpr size_t kReservedCodeRangePages
constexpr size_t kMinimumCodeRangeSize
constexpr size_t kMaxPCRelativeCodeRangeInMB
constexpr bool kPlatformRequiresCodeRange
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr size_t kMaximalCodeRangeSize
static constexpr Address kNullAddress
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
#define CHECK_LE(lhs, rhs)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define CHECK_NE(lhs, rhs)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
constexpr T RoundUp(T x, intptr_t m)
constexpr T RoundDown(T x, intptr_t m)
constexpr bool IsAligned(T value, U alignment)
static constexpr size_t kAnyBaseAlignment
v8::PageAllocator * page_allocator