5#ifndef V8_HEAP_NEW_SPACES_H_
6#define V8_HEAP_NEW_SPACES_H_
32class MutablePageMetadata;
33class SemiSpaceNewSpace;
141 void VerifyPageMetadata()
const;
207 result += ExternalBackingStoreBytes(type);
221 virtual void Grow(
size_t new_capacity) = 0;
265 size_t min_semispace_capacity_,
266 size_t max_semispace_capacity);
270 bool ContainsSlow(Address a) const final;
274 void Grow(
size_t new_capacity) final;
277 void Shrink(
size_t new_capacity);
280 size_t Size() const final;
282 size_t SizeOfObjects() const final {
return Size(); }
286 size_t actual_capacity =
287 std::max(to_space_.current_capacity(), target_capacity_);
288 return (actual_capacity / PageMetadata::kPageSize) *
289 MemoryChunkLayout::AllocatableMemoryInDataPage();
295 return (to_space_.current_capacity_safe() / PageMetadata::kPageSize) *
296 MemoryChunkLayout::AllocatableMemoryInDataPage();
306 return from_space_.CommittedMemory() + to_space_.CommittedMemory();
310 return from_space_.MaximumCommittedMemory() +
311 to_space_.MaximumCommittedMemory();
315 size_t CommittedPhysicalMemory() const final;
318 size_t Available() const final;
320 bool ReachedTargetCapacity()
const {
321 return to_space_.current_capacity_ >= target_capacity_;
325 if (type == ExternalBackingStoreType::kArrayBuffer)
326 return heap()->YoungArrayBufferBytes();
327 DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
328 return to_space_.ExternalBackingStoreBytes(type);
331 size_t AllocatedSinceLastGC() const final;
333 void GrowToMaximumCapacityForTesting() final;
336 size_t MaximumCapacity() const final {
return maximum_capacity_; }
344 return to_space_.space_start();
352 void SetAgeMarkAndBelowAgeMarkPageFlags();
360 bool AddParkedAllocationBuffer(
int size_in_bytes,
363 void ResetParkedAllocationBuffers();
375 void Print()
override { to_space_.Print(); }
378 void MakeIterable()
override;
380 void MakeAllPagesInFromSpaceIterable();
381 void MakeUnusedPagesInToSpaceIterable();
387 return to_space_.first_page();
397 std::unique_ptr<ObjectIterator> GetObjectIterator(
Heap*
heap)
final;
406 bool ShouldPageBePromoted(
const MemoryChunk* chunk)
const;
408 V8_INLINE bool ShouldBePromoted(Address
object)
const;
411 void SwapSemiSpaces();
413 void GarbageCollectionPrologue() final;
414 void GarbageCollectionEpilogue() final;
416 void ZapUnusedMemory();
422 int GetSpaceRemainingOnCurrentPageForTesting();
423 void FillCurrentPageForTesting();
426 size_t QuarantinedSize()
const {
return quarantined_size_; }
428 return to_space_.quarantined_pages_count_;
431 quarantined_size_ = quarantined_size;
434 V8_INLINE bool IsAddressBelowAgeMark(Address address)
const;
441 std::optional<std::pair<Address, Address>> Allocate(
444 std::optional<std::pair<Address, Address>> AllocateOnNewPageBeyondCapacity(
453 void Free(Address
start, Address
end);
457 to_space_.current_page() ? to_space_.page_low() : kNullAddress;
462 V8_INLINE void IncrementAllocationTop(Address new_top);
464 V8_INLINE void DecrementAllocationTop(Address new_top);
474 Address allocation_top_ = kNullAddress;
479 size_t quarantined_size_ = 0;
482 size_t size_after_last_gc_ = 0;
486 const size_t minimum_capacity_ = 0;
489 const size_t maximum_capacity_ = 0;
492 Address age_mark_ = kNullAddress;
495 size_t target_capacity_ = 0;
509 size_t min_capacity,
size_t max_capacity);
514 void Grow(
size_t new_capacity);
515 void GrowToMaximumCapacityForTesting();
518 bool StartShrinking(
size_t new_target_capacity);
519 void FinishShrinking();
521 size_t AllocatedSinceLastGC()
const;
534 return first_page()->area_start();
539 size_at_last_gc_ = Size();
540 last_lab_page_ =
nullptr;
550 if (type == ExternalBackingStoreType::kArrayBuffer)
551 return heap()->YoungArrayBufferBytes();
552 return external_backing_store_bytes_[
static_cast<int>(
type)];
561 bool ShouldReleaseEmptyPage()
const;
564 void AllocatePageUpToCapacityForTesting();
569 size_t Available() const final;
571 size_t UsableCapacity()
const {
573 return current_capacity_ -
free_list_->wasted_bytes();
585 size_t target_capacity_ = 0;
586 size_t current_capacity_ = 0;
604 size_t max_capacity);
608 bool ContainsSlow(Address a) const final {
613 void Grow(
size_t new_capacity)
final { paged_space_.Grow(new_capacity); }
617 return paged_space_.StartShrinking(new_target_capacity);
622 size_t Size() const final {
return paged_space_.Size(); }
627 size_t Capacity() const final {
return paged_space_.Capacity(); }
635 return paged_space_.CommittedMemory();
639 return paged_space_.MaximumCommittedMemory();
644 return paged_space_.CommittedPhysicalMemory();
648 size_t Available() const final {
return paged_space_.Available(); }
651 return paged_space_.ExternalBackingStoreBytes(type);
655 return paged_space_.AllocatedSinceLastGC();
660 return paged_space_.MinimumCapacity();
665 return paged_space_.MaximumCapacity();
671 return paged_space_.first_allocatable_address();
677 paged_space_.Verify(isolate, visitor);
683 void Print() final { paged_space_.Print(); }
690 return paged_space_.first_page();
693 return paged_space_.last_page();
703 return paged_space_.GetObjectIterator(
heap);
707 paged_space_.GarbageCollectionEpilogue();
711 return paged_space_.IsPromotionCandidate(page);
715 paged_space_.GrowToMaximumCapacityForTesting();
727 return paged_space_.ShouldReleaseEmptyPage();
741#define DCHECK_SEMISPACE_ALLOCATION_TOP(top, space) \
742 SLOW_DCHECK((space).page_low() <= (top) && (top) <= (space).page_high())
static T Relaxed_Load(T *addr)
size_t ExternalBackingStoreOverallBytes() const
virtual size_t MinimumCapacity() const =0
virtual bool ContainsSlow(Address a) const =0
virtual iterator begin()=0
virtual size_t Capacity() const =0
virtual void Grow(size_t new_capacity)=0
virtual void MakeIterable()=0
virtual Address first_allocatable_address() const =0
virtual void GrowToMaximumCapacityForTesting()=0
virtual void RemovePage(PageMetadata *page)=0
virtual bool IsPromotionCandidate(const MutablePageMetadata *page) const =0
bool Contains(Tagged< Object > o) const
virtual void GarbageCollectionEpilogue()=0
virtual void GarbageCollectionPrologue()
virtual size_t AllocatedSinceLastGC() const =0
virtual const_iterator begin() const =0
virtual size_t MaximumCapacity() const =0
static const int kAllocationBufferParkingThreshold
virtual size_t TotalCapacity() const =0
virtual const_iterator end() const =0
void PromotePageToOldSpace(PageMetadata *page, FreeMode free_mode)
static PageMetadata * cast(MemoryChunkMetadata *metadata)
bool ShouldReleaseEmptyPage()
PagedSpaceForNewSpace * paged_space()
static PagedNewSpace * From(NewSpace *space)
void RemovePage(PageMetadata *page) final
const_iterator end() const final
PageMetadata * last_page() final
void GarbageCollectionEpilogue() final
heap::List< MutablePageMetadata > & memory_chunk_list() final
size_t AllocatedSinceLastGC() const final
const_iterator begin() const final
size_t Capacity() const final
const PagedSpaceForNewSpace * paged_space() const
size_t MaximumCapacity() const final
bool ContainsSlow(Address a) const final
size_t CommittedMemory() const final
size_t Size() const final
void Grow(size_t new_capacity) final
const PageMetadata * last_page() const final
size_t SizeOfObjects() const final
bool StartShrinking(size_t new_target_capacity)
const PageMetadata * first_page() const final
size_t Available() const final
void MakeIterable() override
size_t TotalCapacity() const final
void GrowToMaximumCapacityForTesting() final
size_t MaximumCommittedMemory() const final
PageMetadata * first_page() final
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final
std::unique_ptr< ObjectIterator > GetObjectIterator(Heap *heap) final
bool IsPromotionCandidate(const MutablePageMetadata *page) const final
Address first_allocatable_address() const final
PagedSpaceForNewSpace paged_space_
size_t CommittedPhysicalMemory() const final
size_t MinimumCapacity() const final
void ReleasePage(PageMetadata *page)
AllocatorPolicy * CreateAllocatorPolicy(MainAllocator *allocator) final
const size_t max_capacity_
Address first_allocatable_address() const
size_t MaximumCapacity() const
size_t MinimumCapacity() const
void GarbageCollectionEpilogue()
const size_t min_capacity_
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final
size_t TotalCapacity() const
size_t TotalCapacity() const final
const PageMetadata * last_page() const final
size_t MaximumCommittedMemory() const final
ParkedAllocationBuffersVector parked_allocation_buffers_
const SemiSpace & from_space() const
bool IsFromSpaceCommitted() const
const PageMetadata * first_page() const final
Address first_allocatable_address() const final
static SemiSpaceNewSpace * From(NewSpace *space)
std::pair< int, Address > ParkedAllocationBuffer
void SetAllocationTop(Address top)
~SemiSpaceNewSpace() final=default
PageMetadata * last_page() final
std::vector< ParkedAllocationBuffer > ParkedAllocationBuffersVector
size_t Capacity() const final
size_t QuarantinedPageCount() const
size_t CommittedMemory() const final
size_t MinimumCapacity() const final
void SetQuarantinedSize(size_t quarantined_size)
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final
size_t CurrentCapacitySafe() const
SemiSpace * active_space()
void ResetAllocationTopToCurrentPageStart()
const_iterator end() const final
Address allocation_top() const
const_iterator begin() const final
const SemiSpace & to_space() const
PageMetadata * first_page() final
SemiSpaceObjectIterator(const SemiSpaceNewSpace *space)
Tagged< HeapObject > Next() final
bool AdvancePage(size_t target_capacity)
V8_EXPORT_PRIVATE ~SemiSpace()
size_t Available() const final
size_t current_capacity() const
static void Swap(SemiSpace *from, SemiSpace *to)
SemiSpace(Heap *heap, SemiSpaceId semispace)
size_t current_capacity_safe() const
PageMetadata * last_page() final
const_iterator end() const
size_t quarantined_pages_count_
void RewindPages(int num_pages)
void IncrementCommittedPhysicalMemory(size_t increment_value)
void AddRangeToActiveSystemPages(Address start, Address end)
const PageMetadata * first_page() const final
PageMetadata * current_page() const
void DecrementCommittedPhysicalMemory(size_t decrement_value)
size_t CommittedPhysicalMemory() const final
const PageMetadata * last_page() const final
void RemovePage(PageMetadata *page)
std::unique_ptr< ObjectIterator > GetObjectIterator(Heap *heap) final
void MovePageToTheEnd(PageMetadata *page)
bool Contains(Tagged< HeapObject > o) const
Address space_start() const
PageMetadata * current_page_
void MoveQuarantinedPage(MemoryChunk *chunk)
bool ContainsSlow(Address a) const
PageMetadata * first_page() final
size_t committed_physical_memory_
void ShrinkCapacityTo(size_t capacity)
PageMetadata * InitializePage(MutablePageMetadata *chunk) final
ConstPageIterator const_iterator
Address page_high() const
const_iterator begin() const
static void AssertValidRange(Address from, Address to)
size_t Size() const final
size_t SizeOfObjects() const final
heap::List< MutablePageMetadata > memory_chunk_list_
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available space
ZoneVector< RpoNumber > & result
void Print(Tagged< Object > obj)
void ForAll(Callback callback)
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
V8_EXPORT_PRIVATE FlagValues v8_flags
static constexpr Address kNullAddress
PageIteratorImpl< PageMetadata > PageIterator
PageIteratorImpl< const PageMetadata > ConstPageIterator
#define NON_EXPORTED_BASE(code)
#define DCHECK_LE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define V8_EXPORT_PRIVATE