5#ifndef V8_HEAP_PAGED_SPACES_H_
6#define V8_HEAP_PAGED_SPACES_H_
74 inline iterator
begin();
75 inline iterator
end();
102 bool AdvanceToNextPage();
117 static const size_t kCompactionMemoryWanted = 500 *
KB;
120 std::unique_ptr<FreeList> free_list,
126 inline bool Contains(Address a)
const;
128 bool ContainsSlow(Address addr)
const;
134 size_t Capacity()
const {
return accounting_stats_.Capacity(); }
137 size_t CommittedPhysicalMemory()
const override;
140 void VerifyCommittedPhysicalMemory()
const;
143 void IncrementCommittedPhysicalMemory(
size_t increment_value);
144 void DecrementCommittedPhysicalMemory(
size_t decrement_value);
152 accounting_stats_.ClearSize();
153 if (
v8_flags.black_allocated_pages) {
164 size_t Available()
const override;
170 size_t Size()
const override {
return accounting_stats_.Size(); }
174 size_t Waste()
const;
185 V8_INLINE size_t FreeDuringSweep(Address
start,
size_t size_in_bytes);
187 void ResetFreeList();
190 accounting_stats_.DecreaseAllocatedBytes(bytes, page);
193 accounting_stats_.IncreaseAllocatedBytes(bytes, page);
196 accounting_stats_.DecreaseCapacity(bytes);
199 accounting_stats_.IncreaseCapacity(bytes);
219 void VerifyLiveBytes()
const;
223 void VerifyCountersAfterSweeping(
Heap*
heap)
const;
224 void VerifyCountersBeforeConcurrentSweeping()
const;
226 void Print()
override;
229 static void ReportCodeStatistics(
Isolate* isolate);
230 static void ResetCodeStatistics(
Isolate* isolate);
236 int CountTotalPages()
const;
239 inline int AreaSize()
const {
return static_cast<int>(area_size_); }
242 return compaction_space_kind_ != CompactionSpaceKind::kNone;
246 return compaction_space_kind_;
255 virtual void RefillFreeList();
263 return reinterpret_cast<PageMetadata*
>(memory_chunk_list_.front());
266 return reinterpret_cast<const PageMetadata*
>(memory_chunk_list_.front());
270 return reinterpret_cast<PageMetadata*
>(memory_chunk_list_.back());
273 return reinterpret_cast<const PageMetadata*
>(memory_chunk_list_.back());
282 std::unique_ptr<ObjectIterator> GetObjectIterator(
Heap*
heap)
override;
292 void RefineAllocatedBytesAfterSweeping(
PageMetadata* page);
300 bool HasPages()
const {
return first_page() !=
nullptr; }
310 return committed_physical_memory_.load(std::memory_order_relaxed);
329 std::atomic<size_t> committed_physical_memory_{0};
332 size_t size_at_last_gc_ = 0;
335 template <
bool during_sweep>
341 if (space->SupportsConcurrentAllocation()) {
342 guard_.emplace(&space->space_mutex_);
350 return !is_compaction_space() && (identity() !=
NEW_SPACE);
358 friend class heap::HeapTester;
364 std::unique_ptr<FreeList> free_list,
367 compaction_space_kind) {}
389 compaction_space_kind),
390 destination_heap_(destination_heap) {
391 DCHECK(is_compaction_space());
394 const std::vector<PageMetadata*>&
GetNewPages() {
return new_pages_; }
396 void RefillFreeList() final;
457 if (type == ExternalBackingStoreType::kArrayBuffer)
458 return heap()->OldArrayBufferBytes();
459 return external_backing_store_bytes_[
static_cast<int>(
type)];
463 size_t filler_size_on_page);
472 using OldSpace::OldSpace;
481 return Size() - allocated_old_size_;
486 return allocated_old_size_;
490 allocated_old_size_ = allocated_old_size;
495 allocated_old_size_ +=
size;
500 allocated_old_size_ -=
size;
504 void AdjustDifferenceInAllocatedBytes(
size_t)
override;
507 size_t allocated_old_size_ = 0;
538 return external_backing_store_bytes_[
static_cast<int>(
type)];
559 return external_backing_store_bytes_[
static_cast<int>(
type)];
574 return external_backing_store_bytes_[
static_cast<int>(
type)];
588 template <
typename Callback>
609 std::variant<PageIterator, LargePageIterator>
iterator_;
CompactionSpace old_space_
CompactionSpaceCollection(Heap *heap, CompactionSpaceKind compaction_space_kind)
CompactionSpace trusted_space_
CompactionSpace * Get(AllocationSpace space)
std::optional< CompactionSpace > shared_space_
CompactionSpace code_space_
CompactionSpace(Heap *heap, AllocationSpace id, Executability executable, CompactionSpaceKind compaction_space_kind, DestinationHeap destination_heap)
std::vector< PageMetadata * > new_pages_
bool snapshotable() const final
const DestinationHeap destination_heap_
const std::vector< PageMetadata * > & GetNewPages()
bool operator==(iterator other) const
bool operator!=(iterator other) const
std::forward_iterator_tag iterator_category
PtrComprCageBase cage_base() const
PtrComprCageBase cage_base_
void AdvanceToNextObject()
const PageMetadata *const page_
HeapObjectRange(const PageMetadata *page)
static Tagged< HeapObject > FromAddress(Address address)
@ kTrustedLargeObjectSpace
OldGenerationMemoryChunkIterator(Heap *heap)
MutablePageMetadata * next()
static void ForAll(Heap *heap, Callback callback)
std::variant< PageIterator, LargePageIterator > iterator_
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final
ConcurrentAllocationMutex(const PagedSpaceBase *space)
std::optional< base::MutexGuard > guard_
virtual void NotifyNewPage(PageMetadata *page)
void DecreaseAllocatedBytes(size_t bytes, PageMetadata *page)
V8_INLINE size_t FreeInternal(Address start, size_t size_in_bytes)
virtual bool snapshotable() const
const_iterator begin() const
void DecreaseCapacity(size_t bytes)
PageMetadata * last_page() override
void IncreaseAllocatedBytes(size_t bytes, PageMetadata *page)
~PagedSpaceBase() override
bool CanExpand(size_t size) const
PageMetadata * first_page() override
AllocationStats accounting_stats_
void ClearAllocatorState()
size_t committed_physical_memory() const
bool is_compaction_space() const
CompactionSpaceKind compaction_space_kind() const
size_t Size() const override
Executability executable() const
const_iterator end() const
const PageMetadata * last_page() const override
void IncreaseCapacity(size_t bytes)
virtual void AdjustDifferenceInAllocatedBytes(size_t diff)
bool SupportsConcurrentAllocation() const
CompactionSpaceKind compaction_space_kind_
Executability executable_
V8_WARN_UNUSED_RESULT std::optional< std::pair< Address, size_t > > RawAllocateBackground(LocalHeap *local_heap, size_t min_size_in_bytes, size_t max_size_in_bytes, AllocationOrigin origin)
const PageMetadata * first_page() const override
ConstPageRange::iterator current_page_
HeapObjectRange::iterator end_
const PagedSpaceBase *const space_
ConstPageRange page_range_
HeapObjectRange::iterator cur_
PagedSpace(Heap *heap, AllocationSpace id, Executability executable, std::unique_ptr< FreeList > free_list, CompactionSpaceKind compaction_space_kind)
void ReleasePage(PageMetadata *page) override
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final
SharedTrustedSpace(Heap *heap)
size_t old_objects_size() const
void set_old_objects_size(size_t allocated_old_size)
size_t young_objects_size() const
void NotifyBlackAreaCreated(size_t size) override
void NotifyBlackAreaDestroyed(size_t size) override
static StickySpace * From(OldSpace *space)
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available space
V8_EXPORT_PRIVATE FlagValues v8_flags
static constexpr Address kNullAddress
#define NON_EXPORTED_BASE(code)
#define DCHECK_LE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define V8_EXPORT_PRIVATE
#define V8_WARN_UNUSED_RESULT