v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
memory-allocator.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <cinttypes>
8#include <optional>
9
11#include "src/common/globals.h"
13#include "src/flags/flags.h"
15#include "src/heap/gc-tracer.h"
16#include "src/heap/heap-inl.h"
17#include "src/heap/heap.h"
20#include "src/heap/page-pool.h"
22#include "src/heap/zapping.h"
23#include "src/logging/log.h"
26
27namespace v8 {
28namespace internal {
29
30// -----------------------------------------------------------------------------
31// MemoryAllocator
32//
33
36
38 v8::PageAllocator* code_page_allocator,
39 v8::PageAllocator* trusted_page_allocator,
40 size_t capacity)
41 : isolate_(isolate),
42 data_page_allocator_(isolate->page_allocator()),
43 code_page_allocator_(code_page_allocator),
44 trusted_page_allocator_(trusted_page_allocator),
45 capacity_(RoundUp(capacity, PageMetadata::kPageSize)) {
50}
51
53 DCHECK_EQ(pool()->GetCount(isolate_), 0);
54
55 // Check that spaces were torn down before MemoryAllocator.
56 DCHECK_EQ(size_, 0u);
57 // TODO(gc) this will be true again when we fix FreeMemory.
58 // DCHECK_EQ(0, size_executable_);
59 capacity_ = 0;
60
63 }
64
65 code_page_allocator_ = nullptr;
66 data_page_allocator_ = nullptr;
68}
69
73
77
81
85
87 Executability executable) {
88 Address base = reservation->address();
89 size_t size = reservation->size();
90 if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
91 return false;
92 }
93 UpdateAllocatedSpaceLimits(base, base + size, executable);
94 return true;
95}
96
98 size_t size = reservation->size();
99 if (!reservation->SetPermissions(reservation->address(), size,
101 return false;
102 }
103 return true;
104}
105
107 Address base, size_t size) {
108 FreePages(page_allocator, reinterpret_cast<void*>(base), size);
109}
110
112 size_t chunk_size, size_t area_size, size_t alignment,
113 AllocationSpace space, Executability executable, void* hint,
114 VirtualMemory* controller) {
115 DCHECK_EQ(space == CODE_SPACE || space == CODE_LO_SPACE,
116 executable == EXECUTABLE);
118 DCHECK_LT(area_size, chunk_size);
119
120 PageAllocator::Permission permissions =
121 executable == EXECUTABLE
124 VirtualMemory reservation(page_allocator, chunk_size, hint, alignment,
125 permissions);
126 if (!reservation.IsReserved()) return HandleAllocationFailure(executable);
127
128 // We cannot use the last chunk in the address space because we would
129 // overflow when comparing top and limit if this chunk is used for a
130 // linear allocation area.
131 if ((reservation.address() + static_cast<Address>(chunk_size)) == 0u) {
133 reserved_chunk_at_virtual_memory_limit_ = std::move(reservation);
135
136 // Retry reserve virtual memory.
137 reservation =
138 VirtualMemory(page_allocator, chunk_size, hint, alignment, permissions);
139 if (!reservation.IsReserved()) return HandleAllocationFailure(executable);
140 }
141
142 Address base = reservation.address();
143
144 if (executable == EXECUTABLE) {
146 }
147
148 UpdateAllocatedSpaceLimits(base, base + chunk_size, executable);
149
150 *controller = std::move(reservation);
151 return base;
152}
153
155 Heap* heap = isolate_->heap();
156 if (!heap->deserialization_complete()) {
157 heap->FatalProcessOutOfMemory(
158 executable == EXECUTABLE
159 ? "Executable MemoryChunk allocation failed during deserialization."
160 : "MemoryChunk allocation failed during deserialization.");
161 }
162 return kNullAddress;
163}
164
165size_t MemoryAllocator::ComputeChunkSize(size_t area_size,
166 AllocationSpace space) {
167 //
168 // +----------------------------+<- base aligned at MemoryChunk::kAlignment
169 // | Header |
170 // +----------------------------+<- area_start_ (base + area_start_)
171 // | Area |
172 // +----------------------------+<- area_end_ (area_start + area_size)
173 // | Committed but not used |
174 // +----------------------------+<- base + chunk_size
175 //
176
177 return ::RoundUp(
180}
181
182std::optional<MemoryAllocator::MemoryChunkAllocationResult>
184 size_t area_size,
185 Executability executable,
186 Address hint,
187 PageSize page_size) {
188#ifndef V8_COMPRESS_POINTERS
189 // When pointer compression is enabled, spaces are expected to be at a
190 // predictable address (see mkgrokdump) so we don't supply a hint and rely on
191 // the deterministic behaviour of the BoundedPageAllocator.
192 if (hint == kNullAddress) {
193 hint = reinterpret_cast<Address>(
196 }
197#endif
198
199 VirtualMemory reservation;
200 size_t chunk_size = ComputeChunkSize(area_size, space->identity());
201 DCHECK_EQ(chunk_size % GetCommitPageSize(), 0);
202
204 chunk_size, area_size, MemoryChunk::GetAlignmentForAllocation(),
205 space->identity(), executable, reinterpret_cast<void*>(hint),
206 &reservation);
207 if (base == kNullAddress) return {};
208
209 size_ += reservation.size();
210
211 // Update executable memory size.
212 if (executable == EXECUTABLE) {
213 size_executable_ += reservation.size();
214 }
215
217 if (executable == EXECUTABLE) {
219 isolate_->heap(), &reservation,
220 base::AddressRegion(base, chunk_size));
221 heap::ZapBlock(base, chunk_size, kZapValue);
222 } else {
223 DCHECK_EQ(executable, NOT_EXECUTABLE);
224 // Zap both page header and object area at once. No guard page in-between.
225 heap::ZapBlock(base, chunk_size, kZapValue);
226 }
227 }
228
230 NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
231
233 space->identity());
234 Address area_end = area_start + area_size;
235
237 reinterpret_cast<void*>(base), nullptr, chunk_size, area_start, area_end,
238 std::move(reservation),
239 };
240}
241
243 Address start_free,
244 size_t bytes_to_free,
245 Address new_area_end) {
246 VirtualMemory* reservation = chunk->reserved_memory();
247 DCHECK(reservation->IsReserved());
248 chunk->set_size(chunk->size() - bytes_to_free);
249 chunk->set_area_end(new_area_end);
251 // Add guard page at the end.
252 size_t page_size = GetCommitPageSize();
253 DCHECK_EQ(0, chunk->area_end() % static_cast<Address>(page_size));
254 DCHECK_EQ(chunk->ChunkAddress() + chunk->size(), chunk->area_end());
255
258 !isolate_->jitless()) {
260 DiscardSealedMemoryScope discard_scope("Partially free memory.");
261 reservation->DiscardSystemPages(chunk->area_end(), page_size);
262 } else {
263 CHECK(reservation->SetPermissions(chunk->area_end(), page_size,
265 }
266 }
267 // On e.g. Windows, a reservation may be larger than a page and releasing
268 // partially starting at |start_free| will also release the potentially
269 // unused part behind the current page.
270 const size_t released_bytes = reservation->Release(start_free);
271 DCHECK_GE(size_, released_bytes);
272 size_ -= released_bytes;
273}
274
276 VirtualMemory* reservation = chunk->reserved_memory();
277 const size_t size =
278 reservation->IsReserved() ? reservation->size() : chunk->size();
279 DCHECK_GE(size_, static_cast<size_t>(size));
280 size_ -= size;
281}
282
284 Executability executable) {
285 MemoryChunk* chunk = chunk_metadata->Chunk();
287 VirtualMemory* reservation = chunk_metadata->reserved_memory();
288 const size_t size =
289 reservation->IsReserved() ? reservation->size() : chunk_metadata->size();
290 DCHECK_GE(size_, static_cast<size_t>(size));
291
292 size_ -= size;
293 if (executable == EXECUTABLE) {
296#ifdef DEBUG
297 UnregisterExecutableMemoryChunk(
298 static_cast<MutablePageMetadata*>(chunk_metadata));
299#endif // DEBUG
300
302 chunk_metadata->size());
303 }
305}
306
310
312 DCHECK(!page->Chunk()->executable());
314}
315
318 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
319
321
323 VirtualMemory* reservation = chunk->reserved_memory();
324 if (reservation->IsReserved()) {
325 reservation->Free();
326 } else {
327 // Only read-only pages can have a non-initialized reservation object. This
328 // happens when the pages are remapped to multiple locations and where the
329 // reservation would therefore be invalid.
330 FreeMemoryRegion(allocator, chunk->ChunkAddress(),
331 RoundUp(chunk->size(), allocator->AllocatePageSize()));
332 }
333
334 delete chunk;
335}
336
338 MemoryChunk* chunk = chunk_metadata->Chunk();
340 LOG(isolate_, DeleteEvent("MemoryChunk", chunk_metadata));
341 UnregisterMutableMemoryChunk(chunk_metadata);
343 reinterpret_cast<Address>(chunk_metadata),
344 chunk->IsEvacuationCandidate());
346}
347
349 DCHECK(chunk_metadata->Chunk()->IsFlagSet(MemoryChunk::UNREGISTERED));
350 DCHECK(chunk_metadata->Chunk()->IsFlagSet(MemoryChunk::PRE_FREED));
351 DCHECK(!chunk_metadata->Chunk()->InReadOnlySpace());
352
353 chunk_metadata->ReleaseAllAllocatedMemory();
354
355 DeleteMemoryChunk(chunk_metadata);
356}
357
359 MutablePageMetadata* chunk_metadata) {
360 MemoryChunk* chunk = chunk_metadata->Chunk();
362 PreFreeMemory(chunk_metadata);
363
364 switch (mode) {
366 PerformFreeMemory(chunk_metadata);
367 break;
369 // Record page to be freed later.
370 queued_pages_to_be_freed_.push_back(chunk_metadata);
371 break;
372 case FreeMode::kPool:
373 // Ensure that we only ever put pages with their markbits cleared into the
374 // pool. This is necessary because `PreFreeMemory` doesn't clear the
375 // marking bitmap and the marking bitmap is reused when this page is taken
376 // out of the pool again.
377 DCHECK(chunk_metadata->IsLivenessClear());
378 DCHECK_EQ(chunk_metadata->size(),
379 static_cast<size_t>(MutablePageMetadata::kPageSize));
381 // The chunks added to this queue will be cached until memory reducing GC.
382 pool()->Add(isolate_, chunk_metadata);
383 break;
384 }
385}
386
388 MemoryAllocator::AllocationMode alloc_mode, Space* space,
389 Executability executable) {
390 const size_t size =
392 std::optional<MemoryChunkAllocationResult> chunk_info;
393 if (alloc_mode == AllocationMode::kUsePool) {
394 DCHECK_EQ(executable, NOT_EXECUTABLE);
395 chunk_info = AllocateUninitializedPageFromPool(space);
396 }
397
398 if (!chunk_info) {
399 chunk_info =
400 AllocateUninitializedChunk(space, size, executable, PageSize::kRegular);
401 }
402
403 if (!chunk_info) return nullptr;
404
405 PageMetadata* metadata;
406 if (chunk_info->optional_metadata) {
407 metadata = new (chunk_info->optional_metadata) PageMetadata(
408 isolate_->heap(), space, chunk_info->size, chunk_info->area_start,
409 chunk_info->area_end, std::move(chunk_info->reservation));
410 } else {
411 metadata = new PageMetadata(isolate_->heap(), space, chunk_info->size,
412 chunk_info->area_start, chunk_info->area_end,
413 std::move(chunk_info->reservation));
414 }
415 MemoryChunk* chunk;
416 MemoryChunk::MainThreadFlags flags = metadata->InitialFlags(executable);
417 if (v8_flags.black_allocated_pages && space->identity() != NEW_SPACE &&
418 space->identity() != NEW_LO_SPACE &&
420 // Disable the write barrier for objects pointing to this page. We don't
421 // need to trigger the barrier for pointers to old black-allocated pages,
422 // since those are never considered for evacuation. However, we have to
423 // keep the old->shared remembered set across multiple GCs, so those
424 // pointers still need to be recorded.
425 if (!IsAnySharedSpace(space->identity())) {
426 flags &= ~MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING;
427 }
428 // And mark the page as black allocated.
430 }
431 if (executable) {
432 RwxMemoryWriteScope scope("Initialize a new MemoryChunk.");
433 chunk = new (chunk_info->chunk) MemoryChunk(flags, metadata);
434 } else {
435 chunk = new (chunk_info->chunk) MemoryChunk(flags, metadata);
436 }
437
438#ifdef DEBUG
439 if (chunk->executable()) RegisterExecutableMemoryChunk(metadata);
440#endif // DEBUG
441
442 DCHECK(metadata->IsLivenessClear());
443 space->InitializePage(metadata);
445 return metadata;
446}
447
449 ReadOnlySpace* space, Address hint) {
450 DCHECK_EQ(space->identity(), RO_SPACE);
452 std::optional<MemoryChunkAllocationResult> chunk_info =
455 if (!chunk_info) {
456 return nullptr;
457 }
458 CHECK_NULL(chunk_info->optional_metadata);
460 isolate_->heap(), space, chunk_info->size, chunk_info->area_start,
461 chunk_info->area_end, std::move(chunk_info->reservation));
462
463 new (chunk_info->chunk) MemoryChunk(metadata->InitialFlags(), metadata);
464
466 metadata->ChunkAddress(), metadata->size(),
468
469 return metadata;
470}
471
472std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>
474 ::v8::PageAllocator::SharedMemory* shared_memory, Address new_address) {
475 return shared_memory->RemapTo(reinterpret_cast<void*>(new_address));
476}
477
479 LargeObjectSpace* space, size_t object_size, Executability executable) {
480 std::optional<MemoryChunkAllocationResult> chunk_info =
481 AllocateUninitializedChunk(space, object_size, executable,
483
484 if (!chunk_info) return nullptr;
485
486 LargePageMetadata* metadata;
487 if (chunk_info->optional_metadata) {
488 metadata = new (chunk_info->optional_metadata) LargePageMetadata(
489 isolate_->heap(), space, chunk_info->size, chunk_info->area_start,
490 chunk_info->area_end, std::move(chunk_info->reservation), executable);
491 } else {
492 metadata = new LargePageMetadata(
493 isolate_->heap(), space, chunk_info->size, chunk_info->area_start,
494 chunk_info->area_end, std::move(chunk_info->reservation), executable);
495 }
496 MemoryChunk* chunk;
497 MemoryChunk::MainThreadFlags flags = metadata->InitialFlags(executable);
498 if (executable) {
499 RwxMemoryWriteScope scope("Initialize a new MemoryChunk.");
500 chunk = new (chunk_info->chunk) MemoryChunk(flags, metadata);
501 } else {
502 chunk = new (chunk_info->chunk) MemoryChunk(flags, metadata);
503 }
504
505#ifdef DEBUG
506 if (chunk->executable()) RegisterExecutableMemoryChunk(metadata);
507#endif // DEBUG
508
510 return metadata;
511}
512
513std::optional<MemoryAllocator::MemoryChunkAllocationResult>
515 MemoryChunkMetadata* chunk_metadata = pool()->Remove(isolate_);
516 if (chunk_metadata == nullptr) return {};
517 const int size = MutablePageMetadata::kPageSize;
518 const Address start = chunk_metadata->ChunkAddress();
519 const Address area_start =
520 start +
522 const Address area_end = start + size;
523 // Pooled pages are always regular data pages.
524 DCHECK_NE(CODE_SPACE, space->identity());
525 DCHECK_NE(TRUSTED_SPACE, space->identity());
526 VirtualMemory reservation(data_page_allocator(), start, size);
529 }
530 size_ += size;
534 chunk_metadata->Chunk(), chunk_metadata, size, area_start, area_end,
535 std::move(reservation),
536 };
537}
538
546
549 size_t chunk_size) {
550 // All addresses and sizes must be aligned to the commit page size.
552 DCHECK_EQ(0, chunk_size % GetCommitPageSize());
553
555 // The pages of the code range are already mapped RWX, we just need to
556 // recommit them.
557 return vm->RecommitPages(start, chunk_size,
559 } else {
560 return vm->SetPermissions(
561 start, chunk_size,
563 }
564}
565
571
573 Address addr) const {
574 // All threads should be either parked or in a safepoint whenever this method
575 // is called, thus pages cannot be allocated or freed at the same time and a
576 // mutex is not required here.
577 // As the address may not correspond to a valid heap object, the chunk we
578 // obtain below is not necessarily a valid chunk.
580 // Check if it corresponds to a known normal or large page.
581 if (auto normal_page_it = normal_pages_.find(chunk);
582 normal_page_it != normal_pages_.end()) {
583 // The chunk is a normal page.
584 // auto* normal_page = PageMetadata::cast(chunk);
585 DCHECK_LE((*normal_page_it)->address(), addr);
586 if (chunk->Metadata()->Contains(addr)) return chunk;
587 } else if (auto large_page_it = large_pages_.upper_bound(chunk);
588 large_page_it != large_pages_.begin()) {
589 // The chunk could be inside a large page.
590 DCHECK_IMPLIES(large_page_it != large_pages_.end(),
591 addr < (*large_page_it)->address());
592 auto* large_page_chunk = *std::next(large_page_it, -1);
593 DCHECK_NOT_NULL(large_page_chunk);
594 DCHECK_LE(large_page_chunk->address(), addr);
595 if (large_page_chunk->Metadata()->Contains(addr)) return large_page_chunk;
596 }
597 // Not found in any page.
598 return nullptr;
599}
600
603 if (chunk->IsLargePage()) {
604 auto result = large_pages_.insert(chunk);
605 USE(result);
606 DCHECK(result.second);
607 } else {
608 auto result = normal_pages_.insert(chunk);
609 USE(result);
610 DCHECK(result.second);
611 }
612}
613
616 if (chunk->IsLargePage()) {
617 auto size = large_pages_.erase(chunk);
618 USE(size);
619 DCHECK_EQ(1u, size);
620 } else {
621 auto size = normal_pages_.erase(chunk);
622 USE(size);
623 DCHECK_EQ(1u, size);
624 }
625}
626
628 for (auto* chunk : queued_pages_to_be_freed_) {
629 PerformFreeMemory(chunk);
630 }
632}
633
634// static
636 DCHECK(metadata->reserved_memory()->IsReserved());
637 DCHECK(!metadata->Chunk()->InReadOnlySpace());
638 // The Metadata contains a VirtualMemory reservation and the destructor will
639 // release the MemoryChunk.
640 DiscardSealedMemoryScope discard_scope("Deleting a memory chunk");
641 if (metadata->IsLargePage()) {
642 delete reinterpret_cast<LargePageMetadata*>(metadata);
643 } else {
644 delete reinterpret_cast<PageMetadata*>(metadata);
645 }
646}
647
648} // namespace internal
649} // namespace v8
Isolate * isolate_
virtual std::unique_ptr< SharedMemoryMapping > RemapTo(void *new_address) const =0
IncrementalMarking * incremental_marking() const
Definition heap.h:1062
void RememberUnmappedPage(Address page, bool compacted)
Definition heap.cc:6748
void * GetRandomMmapAddr()
Definition heap.h:1572
PagePool * page_pool() const
bool jitless() const
Definition isolate.h:1751
IsolateGroup * isolate_group() const
Definition isolate.h:1230
bool RequiresCodeRange() const
Definition isolate.cc:7424
MemoryChunk::MainThreadFlags InitialFlags(Executability executable) const
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping > RemapSharedPage(::v8::PageAllocator::SharedMemory *shared_memory, Address new_address)
void UnregisterMemoryChunk(MemoryChunkMetadata *chunk, Executability executable=NOT_EXECUTABLE)
void UnregisterReadOnlyPage(ReadOnlyPageMetadata *page)
static V8_EXPORT_PRIVATE size_t commit_page_size_bits_
void FreeMemoryRegion(v8::PageAllocator *page_allocator, Address addr, size_t size)
V8_EXPORT_PRIVATE const MemoryChunk * LookupChunkContainingAddress(Address addr) const
v8::PageAllocator * code_page_allocator_
V8_EXPORT_PRIVATE void TearDown()
V8_EXPORT_PRIVATE void Free(MemoryAllocator::FreeMode mode, MutablePageMetadata *chunk)
std::set< const MemoryChunk * > large_pages_
bool UncommitMemory(VirtualMemory *reservation)
v8::PageAllocator * trusted_page_allocator_
v8::PageAllocator * page_allocator(AllocationSpace space)
v8::PageAllocator * data_page_allocator_
static V8_INLINE intptr_t GetCommitPageSize()
V8_WARN_UNUSED_RESULT std::optional< MemoryChunkAllocationResult > AllocateUninitializedChunk(BaseSpace *space, size_t area_size, Executability executable, PageSize page_size)
void UnregisterSharedMemoryChunk(MemoryChunkMetadata *chunk)
V8_EXPORT_PRIVATE size_t GetSharedPooledChunksCount()
std::atomic< size_t > size_executable_
ReadOnlyPageMetadata * AllocateReadOnlyPage(ReadOnlySpace *space, Address hint=kNullAddress)
V8_EXPORT_PRIVATE LargePageMetadata * AllocateLargePage(LargeObjectSpace *space, size_t object_size, Executability executable)
static void DeleteMemoryChunk(MutablePageMetadata *metadata)
void PerformFreeMemory(MutablePageMetadata *chunk)
V8_WARN_UNUSED_RESULT std::optional< MemoryChunkAllocationResult > AllocateUninitializedChunkAt(BaseSpace *space, size_t area_size, Executability executable, Address hint, PageSize page_size)
std::optional< VirtualMemory > reserved_chunk_at_virtual_memory_limit_
V8_EXPORT_PRIVATE size_t GetPooledChunksCount()
V8_EXPORT_PRIVATE PageMetadata * AllocatePage(MemoryAllocator::AllocationMode alloc_mode, Space *space, Executability executable)
void RecordMemoryChunkCreated(const MemoryChunk *chunk)
std::unordered_set< const MemoryChunk *, base::hash< const MemoryChunk * > > normal_pages_
V8_EXPORT_PRIVATE MemoryAllocator(Isolate *isolate, v8::PageAllocator *code_page_allocator, v8::PageAllocator *trusted_page_allocator, size_t max_capacity)
std::optional< MemoryChunkAllocationResult > AllocateUninitializedPageFromPool(Space *space)
V8_WARN_UNUSED_RESULT bool SetPermissionsOnExecutableMemoryChunk(VirtualMemory *vm, Address start, size_t reserved_size)
std::vector< MutablePageMetadata * > queued_pages_to_be_freed_
void PartialFreeMemory(MemoryChunkMetadata *chunk, Address start_free, size_t bytes_to_free, Address new_area_end)
static V8_EXPORT_PRIVATE size_t commit_page_size_
Address AllocateAlignedMemory(size_t chunk_size, size_t area_size, size_t alignment, AllocationSpace space, Executability executable, void *hint, VirtualMemory *controller)
bool CommitMemory(VirtualMemory *reservation, Executability executable)
void PreFreeMemory(MutablePageMetadata *chunk)
void UpdateAllocatedSpaceLimits(Address low, Address high, Executability executable)
static size_t ComputeChunkSize(size_t area_size, AllocationSpace space)
v8::PageAllocator * data_page_allocator()
void UnregisterMutableMemoryChunk(MutablePageMetadata *chunk)
void RecordMemoryChunkDestroyed(const MemoryChunk *chunk)
V8_EXPORT_PRIVATE size_t GetTotalPooledChunksCount()
void FreeReadOnlyPage(ReadOnlyPageMetadata *chunk)
V8_EXPORT_PRIVATE const MemoryChunk * LookupChunkContainingAddressInSafepoint(Address addr) const
V8_EXPORT_PRIVATE void ReleasePooledChunksImmediately()
Address HandleAllocationFailure(Executability executable)
static constexpr size_t AllocatableMemoryInMemoryChunk(AllocationSpace space)
static constexpr size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space)
bool IsEvacuationCandidate() const
Executability executable() const
V8_INLINE bool IsFlagSet(Flag flag) const
V8_INLINE Address address() const
V8_INLINE MemoryChunkMetadata * Metadata()
static V8_INLINE MemoryChunk * FromAddress(Address addr)
void SetFlagSlow(Flag flag)
V8_INLINE bool InReadOnlySpace() const
static intptr_t GetAlignmentForAllocation()
static PageAllocator::Permission GetCodeModificationPermission()
MemoryChunk::MainThreadFlags InitialFlags(Executability executable) const
V8_EXPORT_PRIVATE void ReleaseImmediately(Isolate *isolate)
Definition page-pool.cc:80
size_t GetSharedCount() const
Definition page-pool.cc:147
size_t GetCount(Isolate *isolate) const
Definition page-pool.cc:137
MutablePageMetadata * Remove(Isolate *isolate)
Definition page-pool.cc:186
size_t GetTotalCount() const
Definition page-pool.cc:158
void Add(Isolate *isolate, MutablePageMetadata *chunk)
Definition page-pool.cc:173
MemoryChunk::MainThreadFlags InitialFlags() const
static void NotifyReadOnlyPageCreated(Address addr, size_t size, PageAllocator::Permission current_permissions)
static void UnregisterJitPage(Address address, size_t size)
static void RegisterJitPage(Address address, size_t size)
V8_EXPORT_PRIVATE bool DiscardSystemPages(Address address, size_t size)
V8_EXPORT_PRIVATE void Free()
V8_EXPORT_PRIVATE size_t Release(Address free_start)
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT bool SetPermissions(Address address, size_t size, PageAllocator::Permission access)
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT bool RecommitPages(Address address, size_t size, PageAllocator::Permission access)
#define V8_HEAP_USE_BECORE_JIT_WRITE_PROTECT
Definition globals.h:305
#define V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT
Definition globals.h:297
int start
ZoneVector< RpoNumber > & result
#define LOG(isolate, Call)
Definition log.h:78
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
constexpr int WhichPowerOfTwo(T value)
Definition bits.h:195
void ZapBlock(Address start, size_t size, uintptr_t zap_value)
Definition zapping.cc:26
bool ShouldZapGarbage()
Definition zapping.h:18
size_t CommitPageSize()
constexpr uint32_t kZapValue
Definition globals.h:1005
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
Definition flags.cc:1366
V8_EXPORT_PRIVATE FlagValues v8_flags
static constexpr Address kNullAddress
Definition v8-internal.h:53
void FreePages(v8::PageAllocator *page_allocator, void *address, const size_t size)
constexpr bool IsAnySharedSpace(AllocationSpace space)
Definition globals.h:1341
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
void * AlignedAddress(void *address, size_t alignment)
Definition macros.h:407
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403