42 : local_heap_(local_heap),
43 isolate_heap_(local_heap->
heap()),
45 allocation_info_(allocation_info != nullptr ? allocation_info
46 : &owned_allocation_info_),
47 allocator_policy_(space->CreateAllocatorPolicy(this)),
48 supports_extending_lab_(allocator_policy_->SupportsExtendingLAB()),
49 black_allocation_(ComputeBlackAllocation(is_new_generation)) {
58 : local_heap_(nullptr),
61 allocation_info_(&owned_allocation_info_),
62 allocator_policy_(space->CreateAllocatorPolicy(this)),
63 supports_extending_lab_(false),
73 int filler_size = Heap::GetFillToAlign(
top(), alignment);
75 if (filler_size +
offset) {
149 size_t size_in_bytes,
150 size_t aligned_size_in_bytes,
151 size_t allocation_size) {
152 DCHECK_LE(size_in_bytes, aligned_size_in_bytes);
153 DCHECK_LE(aligned_size_in_bytes, allocation_size);
154 DCHECK(size_in_bytes == aligned_size_in_bytes ||
155 aligned_size_in_bytes == allocation_size);
173 static_cast<int>(size_in_bytes));
200 v8_flags.allow_allocation_in_fast_api_call ||
231 int max_aligned_size = size_in_bytes + Heap::GetMaximumFillToAlign(alignment);
232 int aligned_size_in_bytes;
235 size_in_bytes, &aligned_size_in_bytes, alignment, origin);
236 DCHECK_GE(max_aligned_size, aligned_size_in_bytes);
240 aligned_size_in_bytes, max_aligned_size);
254 if (current_top != current_limit) {
256 current_top,
static_cast<int>(current_limit - current_top));
264 if (current_top !=
kNullAddress && current_top != current_limit) {
273 if (current_top !=
kNullAddress && current_top != current_limit) {
316 return top && top <= object_address && object_address <
limit;
322#ifdef V8_RUNTIME_CALL_STATS
323 std::optional<RuntimeCallTimerScope> rcs_scope;
326 RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
329 std::optional<VMState<GC>> vmstate;
354 size_t min_size)
const {
367 return start + min_size;
381 size_t rounded_step =
static_cast<size_t>(
383 step_size = std::min(step_size, rounded_step);
387 step_size = std::min(step_size,
static_cast<size_t>(64));
391 return start + std::max(step_size, min_size);
395void MainAllocator::Verify()
const {
457 std::optional<base::MutexGuard> guard;
462 std::optional<std::pair<Address, Address>> allocation_result =
464 if (!allocation_result) {
471 if (!
space_->heap()->ShouldExpandYoungGenerationOnSlowAllocation(
477 if (!allocation_result)
return false;
483 int filler_size = Heap::GetFillToAlign(
start, alignment);
484 int aligned_size_in_bytes = size_in_bytes + filler_size;
493 size_t used = std::max(aligned_size_in_bytes,
kLabSizeInGC);
494 limit = std::min(
end,
start + used);
518 std::optional<base::MutexGuard> guard;
541 paged_space_allocator_policy_(
569 if (
space_heap()->incremental_marking()->IsMinorMarking()) {
589 DCHECK(p->SweepingDone());
590 p->ForAllFreeListCategories(
592 DCHECK_IMPLIES(!category->is_empty(),
593 category->is_linked(space->free_list()));
609 static_cast<size_t>(size_in_bytes), origin);
613bool IsPagedNewSpaceAtFullCapacity(
const PagedNewSpace* space) {
614 const auto* paged_space = space->paged_space();
615 if ((paged_space->UsableCapacity() < paged_space->TotalCapacity()) &&
616 (paged_space->TotalCapacity() - paged_space->UsableCapacity() >=
627 if (IsPagedNewSpaceAtFullCapacity(
space_) &&
628 !
space_->heap()->ShouldExpandYoungGenerationOnSlowAllocation(
661 size_in_bytes += Heap::GetMaximumFillToAlign(alignment);
684 const bool running_from_client_isolate_and_allocating_in_shared_space =
687 if (running_from_client_isolate_and_allocating_in_shared_space) {
703 if (
space_heap()->sweeper()->ShouldRefillFreelistForSpace(
712 static constexpr int kMaxPagesToSweep = 1;
725 const bool incremental_marking_with_black_allocated_pages_is_running =
728 if (!incremental_marking_with_black_allocated_pages_is_running &&
734 if (page !=
nullptr) {
745 space_heap()->ShouldExpandOldGenerationOnSlowAllocation(
789 const bool is_main_thread =
792 const auto sweeping_scope_kind =
798 isolate_heap()->tracer(), sweeping_scope_id, sweeping_scope_kind,
799 isolate_heap()->sweeper()->GetTraceIdForFlowEvent(sweeping_scope_id),
808 sweeping_mode, max_pages)) {
819 if (
v8_flags.black_allocated_pages)
return;
823 page->CreateBlackArea(top, limit);
843 size_t new_node_size = 0;
845 space_->free_list_->Allocate(size_in_bytes, &new_node_size, origin);
846 if (new_node.
is_null())
return false;
892 if (current_top + size_in_bytes > max_limit) {
901 static_cast<int>(max_limit - new_limit));
931 current_max_limit == current_limit);
935 if (!
v8_flags.black_allocated_pages) {
936 if (current_top != current_limit &&
949 space_->
Free(current_top, current_max_limit - current_top);
RegisterAllocator * allocator_
V8_EXPORT_PRIVATE void AdvanceAllocationObservers(size_t allocated)
V8_EXPORT_PRIVATE void RemoveAllocationObserver(AllocationObserver *observer)
V8_EXPORT_PRIVATE void AddAllocationObserver(AllocationObserver *observer)
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object, size_t object_size, size_t aligned_object_size)
static AllocationResult Failure()
Heap * space_heap() const
AllocatorPolicy(MainAllocator *allocator)
Heap * isolate_heap() const
MainAllocator *const allocator_
AllocationSpace identity() const
void RescheduleJobIfNeeded(GarbageCollector garbage_collector, TaskPriority priority=TaskPriority::kUserVisible)
static Tagged< HeapObject > FromAddress(Address address)
V8_EXPORT_PRIVATE void StartIncrementalMarkingIfAllocationLimitIsReached(LocalHeap *local_heap, GCFlags gc_flags, GCCallbackFlags gc_callback_flags=GCCallbackFlags::kNoGCCallbackFlags)
V8_EXPORT_PRIVATE void CreateFillerObjectAt(Address addr, int size, ClearFreedMemoryMode clear_memory_mode=ClearFreedMemoryMode::kDontClearFreedMemory)
IncrementalMarking * incremental_marking() const
ConcurrentMarking * concurrent_marking() const
PagedSpace * paged_space(int idx) const
V8_EXPORT_PRIVATE void StartMinorMSIncrementalMarkingIfNeeded()
Isolate * isolate() const
bool IsMajorMarking() const
bool is_shared_space_isolate() const
void Reset(Address top, Address limit)
V8_INLINE Address start() const
V8_INLINE Address limit() const
V8_INLINE void SetLimit(Address limit)
V8_INLINE Address IncrementTop(size_t bytes)
V8_INLINE Address top() const
void set_original_top_release(Address top)
void set_original_limit_relaxed(Address limit)
bool is_main_thread() const
std::optional< LinearAreaOriginalData > linear_area_original_data_
Address original_top_acquire() const
SpaceWithLinearArea *const space_
LocalHeap *const local_heap_
bool SupportsPendingAllocation() const
V8_EXPORT_PRIVATE void ResetLab(Address start, Address end, Address extended_end)
void ExtendLAB(Address limit)
V8_EXPORT_PRIVATE bool EnsureAllocationForTesting(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin)
bool supports_extending_lab() const
bool IsBlackAllocationEnabled() const
LocalHeap * local_heap() const
bool SupportsAllocationObserver() const
V8_EXPORT_PRIVATE void MarkLinearAllocationAreaBlack()
V8_WARN_UNUSED_RESULT V8_EXPORT_PRIVATE AllocationResult AllocateRawForceAlignmentForTesting(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin)
V8_WARN_UNUSED_RESULT AllocationResult AllocateRawSlowUnaligned(int size_in_bytes, AllocationOrigin origin=AllocationOrigin::kRuntime)
LinearAllocationArea & allocation_info()
V8_EXPORT_PRIVATE MainAllocator(LocalHeap *heap, SpaceWithLinearArea *space, IsNewGeneration is_new_generation, LinearAllocationArea *allocation_info=nullptr)
V8_EXPORT_PRIVATE void AddAllocationObserver(AllocationObserver *observer)
int ObjectAlignment() const
AllocationCounter & allocation_counter()
V8_EXPORT_PRIVATE Heap * space_heap() const
V8_WARN_UNUSED_RESULT AllocationResult AllocateRawSlowAligned(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin=AllocationOrigin::kRuntime)
std::optional< AllocationCounter > allocation_counter_
static constexpr BlackAllocation ComputeBlackAllocation(IsNewGeneration)
V8_EXPORT_PRIVATE bool IsPendingAllocation(Address object_address)
Heap * isolate_heap() const
bool in_gc_for_space() const
void PauseAllocationObservers()
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateFastAligned(int size_in_bytes, int *result_aligned_size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin)
Address ComputeLimit(Address start, Address end, size_t min_size) const
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin)
V8_INLINE bool IsLabValid() const
V8_EXPORT_PRIVATE void RemoveAllocationObserver(AllocationObserver *observer)
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object, size_t size_in_bytes, size_t aligned_size_in_bytes, size_t allocation_size)
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin)
V8_EXPORT_PRIVATE void AdvanceAllocationObservers()
LinearAreaOriginalData & linear_area_original_data()
void MarkLabStartInitialized()
AllocationSpace identity() const
V8_EXPORT_PRIVATE void MakeLinearAllocationAreaIterable()
std::unique_ptr< AllocatorPolicy > allocator_policy_
const BlackAllocation black_allocation_
void MoveOriginalTopForward()
V8_EXPORT_PRIVATE void UnmarkLinearAllocationArea()
V8_WARN_UNUSED_RESULT V8_EXPORT_PRIVATE AllocationResult AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin)
V8_EXPORT_PRIVATE bool is_main_thread() const
V8_EXPORT_PRIVATE Address AlignTopForTesting(AllocationAlignment alignment, int offset)
V8_EXPORT_PRIVATE void FreeLinearAllocationArea()
Address original_limit_relaxed() const
V8_EXPORT_PRIVATE void FreeLinearAllocationAreaAndResetFreeList()
void ResumeAllocationObservers()
static bool IsOnEvacuationCandidate(Tagged< MaybeObject > obj)
void IncreaseAllocatedLabSize(size_t bytes)
static const int kPageSize
void DecreaseAllocatedLabSize(size_t bytes)
static V8_INLINE PageMetadata * FromAllocationAreaAddress(Address address)
static V8_INLINE PageMetadata * FromHeapObject(Tagged< HeapObject > o)
static V8_INLINE PageMetadata * FromAddress(Address addr)
void DestroyBlackArea(Address start, Address end)
V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end)
void FreeLinearAllocationArea() final
PagedNewSpace *const space_
bool TryAllocatePage(int size_in_bytes, AllocationOrigin origin)
std::unique_ptr< PagedSpaceAllocatorPolicy > paged_space_allocator_policy_
bool WaitForSweepingForAllocation(int size_in_bytes, AllocationOrigin origin)
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) final
PagedNewSpaceAllocatorPolicy(PagedNewSpace *space, MainAllocator *allocator)
PagedSpaceForNewSpace * paged_space()
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) final
V8_WARN_UNUSED_RESULT bool TryExtendLAB(int size_in_bytes)
PagedSpaceBase *const space_
void FreeLinearAllocationArea() final
void SetLinearAllocationArea(Address top, Address limit, Address end)
bool RefillLab(int size_in_bytes, AllocationOrigin origin)
void FreeLinearAllocationAreaUnsynchronized()
bool TryExpandAndAllocate(size_t size_in_bytes, AllocationOrigin origin)
bool ContributeToSweeping(uint32_t max_pages=std::numeric_limits< uint32_t >::max())
bool TryAllocationFromFreeList(size_t size_in_bytes, AllocationOrigin origin)
V8_INLINE size_t Free(Address start, size_t size_in_bytes)
virtual size_t AddPage(PageMetadata *page)
bool TryExpand(LocalHeap *local_heap, AllocationOrigin origin)
void IncreaseAllocatedBytes(size_t bytes, PageMetadata *page)
bool is_compaction_space() const
void AddRangeToActiveSystemPages(PageMetadata *page, Address start, Address end)
PageMetadata * RemovePageSafe(int size_in_bytes)
virtual void RefillFreeList()
PageMetadata * last_lab_page_
void FreeLinearAllocationArea() final
SemiSpaceNewSpace *const space_
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) final
void FreeLinearAllocationAreaUnsynchronized()
static constexpr int kLabSizeInGC
std::optional< std::pair< Address, Address > > AllocateOnNewPageBeyondCapacity(int size_in_bytes, AllocationAlignment alignment)
std::optional< std::pair< Address, Address > > Allocate(int size_in_bytes, AllocationAlignment alignment)
void Free(Address start, Address end)
bool ReachedTargetCapacity() const
void AddRangeToActiveSystemPages(Address start, Address end)
bool ShouldRefillFreelistForSpace(AllocationSpace space) const
bool AreMinorSweeperTasksRunning() const
void WaitForPageToBeSwept(PageMetadata *page)
GCTracer::Scope::ScopeId GetTracingScope(AllocationSpace space, bool is_joining_thread)
V8_INLINE constexpr bool is_null() const
#define USE_ALLOCATION_ALIGNMENT_BOOL
#define V8_COMPRESS_POINTERS_8GB_BOOL
#define ALIGN_TO_ALLOCATION_ALIGNMENT(value)
#define TRACE_GC_EPOCH_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
ZoneVector< RpoNumber > & result
constexpr int kTaggedSize
constexpr intptr_t kCodeAlignment
constexpr intptr_t kObjectAlignment8GbHeap
V8_EXPORT_PRIVATE FlagValues v8_flags
static constexpr Address kNullAddress
@ kGCCallbackScheduleIdleGarbageCollection
#define DCHECK_LE(v1, v2)
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK_LE(lhs, rhs)
#define DCHECK_NOT_NULL(val)
#define CHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
constexpr T RoundDown(T x, intptr_t m)
constexpr bool IsAligned(T value, U alignment)
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN
#define V8_UNLIKELY(condition)