v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
main-allocator.cc
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <optional>
8
9#include "src/base/logging.h"
10#include "src/common/globals.h"
16#include "src/heap/heap.h"
19#include "src/heap/new-spaces.h"
22#include "src/heap/spaces.h"
23
24namespace v8 {
25namespace internal {
26
28 MainAllocator::IsNewGeneration is_new_generation) {
29 if (is_new_generation == IsNewGeneration::kYes) {
31 }
32 if (v8_flags.sticky_mark_bits) {
33 // Allocate black on all non-young spaces.
35 }
37}
38
40 IsNewGeneration is_new_generation,
41 LinearAllocationArea* allocation_info)
42 : local_heap_(local_heap),
43 isolate_heap_(local_heap->heap()),
44 space_(space),
45 allocation_info_(allocation_info != nullptr ? allocation_info
46 : &owned_allocation_info_),
47 allocator_policy_(space->CreateAllocatorPolicy(this)),
48 supports_extending_lab_(allocator_policy_->SupportsExtendingLAB()),
49 black_allocation_(ComputeBlackAllocation(is_new_generation)) {
52 allocation_counter_.emplace();
54 }
55}
56
58 : local_heap_(nullptr),
59 isolate_heap_(heap),
60 space_(space),
61 allocation_info_(&owned_allocation_info_),
62 allocator_policy_(space->CreateAllocatorPolicy(this)),
63 supports_extending_lab_(false),
64 black_allocation_(BlackAllocation::kAlwaysDisabled) {
65 DCHECK(!allocation_counter_.has_value());
67}
68
70 int offset) {
71 DCHECK(top());
72
73 int filler_size = Heap::GetFillToAlign(top(), alignment);
74
75 if (filler_size + offset) {
76 space_heap()->CreateFillerObjectAt(top(), filler_size + offset);
77 allocation_info().IncrementTop(filler_size + offset);
78 }
79
80 return top();
81}
82
84 int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
85 size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
86
88 AllocateFastAligned(size_in_bytes, nullptr, alignment, origin);
89
90 return V8_UNLIKELY(result.IsFailure())
91 ? AllocateRawSlowAligned(size_in_bytes, alignment, origin)
92 : result;
93}
94
101
103 // Adding an allocation observer may decrease the inline allocation limit, so
104 // we check here that we don't have an existing LAB.
105 CHECK(!allocation_counter().IsStepInProgress());
106 DCHECK(!IsLabValid());
108}
109
111 // AllocationObserver can remove themselves. So we can't CHECK here that no
112 // allocation step is in progress. It is also okay if there are existing LABs
113 // because removing an allocation observer can only increase the distance to
114 // the next step.
116}
117
119
121
132
135#if DEBUG
136 Verify();
137#endif
138}
139
140// Perform an allocation step when the step is reached. size_in_bytes is the
141// actual size needed for the object (required for InvokeAllocationObservers).
142// aligned_size_in_bytes is the size of the object including the filler right
143// before it to reach the right alignment (required to DCHECK the start of the
144// object). allocation_size is the size of the actual allocation which needs to
145// be used for the accounting. It can be different from aligned_size_in_bytes in
146// PagedSpace::AllocateRawAligned, where we have to overallocate in order to be
147// able to align the allocation afterwards.
149 size_t size_in_bytes,
150 size_t aligned_size_in_bytes,
151 size_t allocation_size) {
152 DCHECK_LE(size_in_bytes, aligned_size_in_bytes);
153 DCHECK_LE(aligned_size_in_bytes, allocation_size);
154 DCHECK(size_in_bytes == aligned_size_in_bytes ||
155 aligned_size_in_bytes == allocation_size);
156
158 !isolate_heap()->IsAllocationObserverActive()) {
159 return;
160 }
161
162 if (allocation_size >= allocation_counter().NextBytes()) {
163 // Only the first object in a LAB should reach the next step.
164 DCHECK_EQ(soon_object, allocation_info().start() + aligned_size_in_bytes -
165 size_in_bytes);
166
167 // Right now the LAB only contains that one object.
168 DCHECK_EQ(allocation_info().top() + allocation_size - aligned_size_in_bytes,
170
171 // Ensure that there is a valid object
172 space_heap()->CreateFillerObjectAt(soon_object,
173 static_cast<int>(size_in_bytes));
174
175#if DEBUG
176 // Ensure that allocation_info_ isn't modified during one of the
177 // AllocationObserver::Step methods.
178 LinearAllocationArea saved_allocation_info = allocation_info();
179#endif
180
181 // Run AllocationObserver::Step through the AllocationCounter.
182 allocation_counter().InvokeAllocationObservers(soon_object, size_in_bytes,
183 allocation_size);
184
185 // Ensure that start/top/limit didn't change.
186 DCHECK_EQ(saved_allocation_info.start(), allocation_info().start());
187 DCHECK_EQ(saved_allocation_info.top(), allocation_info().top());
188 DCHECK_EQ(saved_allocation_info.limit(), allocation_info().limit());
189 }
190
192 allocation_counter().NextBytes());
193}
194
196 AllocationAlignment alignment,
197 AllocationOrigin origin) {
198 // We are not supposed to allocate in fast c calls.
200 v8_flags.allow_allocation_in_fast_api_call ||
201 !isolate_heap()->isolate()->InFastCCall());
202
205 ? AllocateRawSlowAligned(size_in_bytes, alignment, origin)
206 : AllocateRawSlowUnaligned(size_in_bytes, origin);
207 return result;
208}
209
211 int size_in_bytes, AllocationOrigin origin) {
212 if (!EnsureAllocation(size_in_bytes, kTaggedAligned, origin)) {
214 }
215
216 AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
217 DCHECK(!result.IsFailure());
218
219 InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
220 size_in_bytes);
221
222 return result;
223}
224
226 int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
227 if (!EnsureAllocation(size_in_bytes, alignment, origin)) {
229 }
230
231 int max_aligned_size = size_in_bytes + Heap::GetMaximumFillToAlign(alignment);
232 int aligned_size_in_bytes;
233
235 size_in_bytes, &aligned_size_in_bytes, alignment, origin);
236 DCHECK_GE(max_aligned_size, aligned_size_in_bytes);
237 DCHECK(!result.IsFailure());
238
239 InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
240 aligned_size_in_bytes, max_aligned_size);
241
242 return result;
243}
244
246 if (!IsLabValid()) return;
247
248#if DEBUG
249 Verify();
250#endif // DEBUG
251
252 Address current_top = top();
253 Address current_limit = limit();
254 if (current_top != current_limit) {
256 current_top, static_cast<int>(current_limit - current_top));
257 }
258}
259
262 Address current_top = top();
263 Address current_limit = limit();
264 if (current_top != kNullAddress && current_top != current_limit) {
266 ->CreateBlackArea(current_top, current_limit);
267 }
268}
269
271 Address current_top = top();
272 Address current_limit = limit();
273 if (current_top != kNullAddress && current_top != current_limit) {
275 ->DestroyBlackArea(current_top, current_limit);
276 }
277}
278
284
292
309
312 base::MutexGuard guard(linear_area_original_data().linear_area_lock());
315 DCHECK_LE(top, limit);
316 return top && top <= object_address && object_address < limit;
317}
318
319bool MainAllocator::EnsureAllocation(int size_in_bytes,
320 AllocationAlignment alignment,
321 AllocationOrigin origin) {
322#ifdef V8_RUNTIME_CALL_STATS
323 std::optional<RuntimeCallTimerScope> rcs_scope;
324 if (is_main_thread()) {
325 rcs_scope.emplace(isolate_heap()->isolate(),
326 RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
327 }
328#endif // V8_RUNTIME_CALL_STATS
329 std::optional<VMState<GC>> vmstate;
330 if (is_main_thread()) {
331 vmstate.emplace(isolate_heap()->isolate());
332 }
333 return allocator_policy_->EnsureAllocation(size_in_bytes, alignment, origin);
334}
335
337 if (!IsLabValid()) return;
338
339#if DEBUG
340 Verify();
341#endif // DEBUG
342
344 allocator_policy_->FreeLinearAllocationArea();
345}
346
352
354 size_t min_size) const {
355 DCHECK_GE(end - start, min_size);
356
357 // Use the full LAB when allocation observers aren't enabled.
358 if (!SupportsAllocationObserver()) return end;
359
360 // LABs with allocation observers are only used outside GC and on the main
361 // thread.
362 DCHECK(!isolate_heap()->IsInGC());
364
365 if (!isolate_heap()->IsInlineAllocationEnabled()) {
366 // LABs are disabled, so we fit the requested area exactly.
367 return start + min_size;
368 }
369
370 // When LABs are enabled, pick the largest possible LAB size by default.
371 size_t step_size = end - start;
372
373 if (isolate_heap()->IsAllocationObserverActive()) {
374 // Ensure there are no unaccounted allocations.
376
377 size_t step = allocation_counter().NextBytes();
378 DCHECK_NE(step, 0);
379 // Generated code may allocate inline from the linear allocation area. To
380 // make sure we can observe these allocations, we use a lower limit.
381 size_t rounded_step = static_cast<size_t>(
382 RoundDown(static_cast<int>(step - 1), ObjectAlignment()));
383 step_size = std::min(step_size, rounded_step);
384 }
385
386 if (v8_flags.stress_marking) {
387 step_size = std::min(step_size, static_cast<size_t>(64));
388 }
389
390 DCHECK_LE(start + step_size, end);
391 return start + std::max(step_size, min_size);
392}
393
394#if DEBUG
395void MainAllocator::Verify() const {
396 // Ensure validity of LAB: start <= top.
398
399 if (top()) {
401 // Can't compare owner directly because of new space semi spaces.
402 DCHECK_EQ(page->owner_identity(), identity());
403 }
404
406 // Ensure that original_top <= top <= limit <= original_limit.
407 DCHECK_LE(linear_area_original_data().get_original_top_acquire(),
408 allocation_info().top());
411 linear_area_original_data().get_original_limit_relaxed());
412 } else {
414 }
415}
416#endif // DEBUG
417
419 AllocationAlignment alignment,
420 AllocationOrigin origin) {
421 return EnsureAllocation(size_in_bytes, alignment, origin);
422}
423
425 if (identity() == CODE_SPACE) {
426 return kCodeAlignment;
429 } else {
430 return kTaggedSize;
431 }
432}
433
435
437 return !in_gc() && local_heap()->is_main_thread();
438}
439
441 return in_gc() && isolate_heap() == space_heap();
442}
443
445
448
450
454
456 int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
457 std::optional<base::MutexGuard> guard;
458 if (allocator_->in_gc()) guard.emplace(space_->mutex());
459
461
462 std::optional<std::pair<Address, Address>> allocation_result =
463 space_->Allocate(size_in_bytes, alignment);
464 if (!allocation_result) {
466 // If allocation failed even though we have not even grown the space to
467 // its target capacity yet, we can bail out early.
468 return false;
469 }
470
471 if (!space_->heap()->ShouldExpandYoungGenerationOnSlowAllocation(
473 return false;
474 }
475 allocation_result =
476 space_->AllocateOnNewPageBeyondCapacity(size_in_bytes, alignment);
477 if (!allocation_result) return false;
478 }
479
480 Address start = allocation_result->first;
481 Address end = allocation_result->second;
482
483 int filler_size = Heap::GetFillToAlign(start, alignment);
484 int aligned_size_in_bytes = size_in_bytes + filler_size;
485 DCHECK_LE(start + aligned_size_in_bytes, end);
486
487 Address limit;
488
489 if (allocator_->in_gc()) {
490 // During GC we allow multiple LABs in new space and since Allocate() above
491 // returns the whole remaining page by default, we limit the size of the LAB
492 // here.
493 size_t used = std::max(aligned_size_in_bytes, kLabSizeInGC);
494 limit = std::min(end, start + used);
495 } else {
496 limit = allocator_->ComputeLimit(start, end, aligned_size_in_bytes);
497 }
498 CHECK_LE(limit, end);
499
500 if (limit != end) {
501 space_->Free(limit, end);
502 }
503
504 allocator_->ResetLab(start, limit, limit);
505
507 allocator_->limit());
508 return true;
509}
510
512 if (!allocator_->IsLabValid()) return;
513
514#if DEBUG
515 allocator_->Verify();
516#endif // DEBUG
517
518 std::optional<base::MutexGuard> guard;
519 if (allocator_->in_gc()) guard.emplace(space_->mutex());
520
522}
523
526 if (!allocator_->IsLabValid()) return;
527
528 Address current_top = allocator_->top();
529 Address current_limit = allocator_->limit();
530
533
534 space_->Free(current_top, current_limit);
535}
536
538 PagedNewSpace* space, MainAllocator* allocator)
539 : AllocatorPolicy(allocator),
540 space_(space),
541 paged_space_allocator_policy_(
542 new PagedSpaceAllocatorPolicy(space->paged_space(), allocator)) {}
543
545 int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
550 // No need to write a filler to the remaining lab because it will either be
551 // reallocated if the lab can be extended or freed otherwise.
552 }
553
554 if (!paged_space_allocator_policy_->EnsureAllocation(size_in_bytes, alignment,
555 origin)) {
556 if (!TryAllocatePage(size_in_bytes, origin)) {
557 if (!WaitForSweepingForAllocation(size_in_bytes, origin)) {
558 return false;
559 }
560 }
561 }
562
568
569 if (space_heap()->incremental_marking()->IsMinorMarking()) {
572 }
573
574 return true;
575}
576
578 int size_in_bytes, AllocationOrigin origin) {
579 // This method should be called only when there are no more pages for main
580 // thread to sweep.
581 DCHECK(space_heap()->sweeper()->IsSweepingDoneForSpace(NEW_SPACE));
582 if (!v8_flags.concurrent_sweeping || !space_heap()->sweeping_in_progress())
583 return false;
584 Sweeper* sweeper = space_heap()->sweeper();
585 if (!sweeper->AreMinorSweeperTasksRunning() &&
587#if DEBUG
588 for (PageMetadata* p : *space_) {
589 DCHECK(p->SweepingDone());
590 p->ForAllFreeListCategories(
591 [space = space_->paged_space()](FreeListCategory* category) {
592 DCHECK_IMPLIES(!category->is_empty(),
593 category->is_linked(space->free_list()));
594 });
595 }
596#endif // DEBUG
597 // All pages are already swept and relinked to the free list
598 return false;
599 }
600 // When getting here we know that any unswept new space page is currently
601 // being handled by a concurrent sweeping thread. Rather than try to cancel
602 // tasks and restart them, we wait "per page". This should be faster.
603 for (PageMetadata* p : *space_) {
604 if (!p->SweepingDone()) sweeper->WaitForPageToBeSwept(p);
605 }
608 return paged_space_allocator_policy_->TryAllocationFromFreeList(
609 static_cast<size_t>(size_in_bytes), origin);
610}
611
612namespace {
613bool IsPagedNewSpaceAtFullCapacity(const PagedNewSpace* space) {
614 const auto* paged_space = space->paged_space();
615 if ((paged_space->UsableCapacity() < paged_space->TotalCapacity()) &&
616 (paged_space->TotalCapacity() - paged_space->UsableCapacity() >=
618 // Adding another page would exceed the target capacity of the space.
619 return false;
620 }
621 return true;
622}
623} // namespace
624
626 AllocationOrigin origin) {
627 if (IsPagedNewSpaceAtFullCapacity(space_) &&
628 !space_->heap()->ShouldExpandYoungGenerationOnSlowAllocation(
630 return false;
631 if (!space_->paged_space()->AllocatePage()) return false;
632 return paged_space_allocator_policy_->TryAllocationFromFreeList(size_in_bytes,
633 origin);
634}
635
642
644 AllocationAlignment alignment,
645 AllocationOrigin origin) {
646 if (allocator_->identity() == NEW_SPACE) {
649 }
650 if ((allocator_->identity() != NEW_SPACE) && !allocator_->in_gc()) {
651 // Start incremental marking before the actual allocation, this allows the
652 // allocation function to mark the object black when incremental marking is
653 // running.
655 allocator_->local_heap(), space_heap()->GCFlagsForIncrementalMarking(),
657 }
658
659 // We don't know exactly how much filler we need to align until space is
660 // allocated, so assume the worst case.
661 size_in_bytes += Heap::GetMaximumFillToAlign(alignment);
662 if (allocator_->allocation_info().top() + size_in_bytes <=
664 return true;
665 }
666 return RefillLab(size_in_bytes, origin);
667}
668
670 AllocationOrigin origin) {
671 // Allocation in this space has failed.
672 DCHECK_GE(size_in_bytes, 0);
673
674 if (TryExtendLAB(size_in_bytes)) return true;
675
676 if (TryAllocationFromFreeList(size_in_bytes, origin)) return true;
677
678 // Don't steal pages from the shared space of the main isolate if running as a
679 // client. The issue is that the concurrent marker may be running on the main
680 // isolate and may reach the page and read its flags, which will then end up
681 // in a race, when the page of the compaction space will be merged back to the
682 // main space. For the same reason, don't take swept pages from the main
683 // shared space.
684 const bool running_from_client_isolate_and_allocating_in_shared_space =
687 if (running_from_client_isolate_and_allocating_in_shared_space) {
688 // Avoid OOM crash in the GC in order to invoke NearHeapLimitCallback after
689 // GC and give it a chance to increase the heap limit.
690 if (!isolate_heap()->force_oom() &&
691 TryExpandAndAllocate(size_in_bytes, origin)) {
692 return true;
693 }
694 return false;
695 }
696
697 // Sweeping is still in progress. The sweeper doesn't work with black
698 // allocated pages, so it's fine for the compaction space to refill the
699 // freelist from just swept pages.
700 if (space_heap()->sweeping_in_progress()) {
701 // First try to refill the free-list, concurrent sweeper threads
702 // may have freed some objects in the meantime.
703 if (space_heap()->sweeper()->ShouldRefillFreelistForSpace(
704 allocator_->identity())) {
706
707 // Retry the free list allocation.
708 if (TryAllocationFromFreeList(static_cast<size_t>(size_in_bytes), origin))
709 return true;
710 }
711
712 static constexpr int kMaxPagesToSweep = 1;
713 if (ContributeToSweeping(kMaxPagesToSweep)) {
714 if (TryAllocationFromFreeList(size_in_bytes, origin)) {
715 return true;
716 }
717 }
718 }
719
720 // If there is not enough memory in the compaction space left, try to steal
721 // a page from the corresponding "regular" page space.
722 // Don't do this though when black allocated pages are enabled and incremental
723 // marking is in progress, because otherwise evacuating into a black allocated
724 // page will cause the marker to miss the object.
725 const bool incremental_marking_with_black_allocated_pages_is_running =
726 v8_flags.black_allocated_pages &&
728 if (!incremental_marking_with_black_allocated_pages_is_running &&
731 PagedSpaceBase* main_space =
733 PageMetadata* page = main_space->RemovePageSafe(size_in_bytes);
734 if (page != nullptr) {
735 // Make sure we don't evacuate into a black allocated page.
736 DCHECK_IMPLIES(v8_flags.black_allocated_pages,
737 !page->Chunk()->IsFlagSet(MemoryChunk::BLACK_ALLOCATED));
738 space_->AddPage(page);
739 if (TryAllocationFromFreeList(static_cast<size_t>(size_in_bytes), origin))
740 return true;
741 }
742 }
743
744 if (allocator_->identity() != NEW_SPACE &&
745 space_heap()->ShouldExpandOldGenerationOnSlowAllocation(
746 allocator_->local_heap(), origin) &&
747 space_heap()->CanExpandOldGeneration(space_->AreaSize())) {
748 if (TryExpandAndAllocate(static_cast<size_t>(size_in_bytes), origin)) {
749 return true;
750 }
751 }
752
753 // Try sweeping all pages.
754 if (ContributeToSweeping()) {
755 if (TryAllocationFromFreeList(size_in_bytes, origin)) {
756 return true;
757 }
758 }
759
760 if (allocator_->identity() != NEW_SPACE && allocator_->in_gc() &&
761 !space_heap()->force_oom()) {
762 // Avoid OOM crash in the GC in order to invoke NearHeapLimitCallback after
763 // GC and give it a chance to increase the heap limit.
764 if (TryExpandAndAllocate(size_in_bytes, origin)) {
765 return true;
766 }
767 }
768 return false;
769}
770
772 AllocationOrigin origin) {
773 // Run in a loop because concurrent threads might allocate from the new free
774 // list entries before this thread gets a chance.
775 while (space_->TryExpand(allocator_->local_heap(), origin)) {
776 if (TryAllocationFromFreeList(static_cast<size_t>(size_in_bytes), origin)) {
777 return true;
778 }
779 }
780 return false;
781}
782
784 if (!space_heap()->sweeping_in_progress_for_space(allocator_->identity()))
785 return false;
786 if (space_heap()->sweeper()->IsSweepingDoneForSpace(allocator_->identity()))
787 return false;
788
789 const bool is_main_thread =
791 (allocator_->in_gc() && isolate_heap()->IsMainThread());
792 const auto sweeping_scope_kind =
794 const auto sweeping_scope_id = space_heap()->sweeper()->GetTracingScope(
795 allocator_->identity(), is_main_thread);
796
798 isolate_heap()->tracer(), sweeping_scope_id, sweeping_scope_kind,
799 isolate_heap()->sweeper()->GetTraceIdForFlowEvent(sweeping_scope_id),
801 // Cleanup invalidated old-to-new refs for compaction space in the
802 // final atomic pause.
803 Sweeper::SweepingMode sweeping_mode =
806
807 if (!space_heap()->sweeper()->ParallelSweepSpace(allocator_->identity(),
808 sweeping_mode, max_pages)) {
809 return false;
810 }
812 return true;
813}
814
816 Address limit,
817 Address end) {
818 allocator_->ResetLab(top, limit, end);
819 if (v8_flags.black_allocated_pages) return;
820 if (top != kNullAddress && top != limit) {
823 page->CreateBlackArea(top, limit);
824 }
825 }
826}
827
829 size_t size_in_bytes, AllocationOrigin origin) {
831 DCHECK(IsAligned(size_in_bytes, kTaggedSize));
833#ifdef DEBUG
834 if (allocator_->top() != allocator_->limit()) {
837 }
838#endif
839 // Don't free list allocate if there is linear space available.
840 DCHECK_LT(static_cast<size_t>(allocator_->limit() - allocator_->top()),
841 size_in_bytes);
842
843 size_t new_node_size = 0;
844 Tagged<FreeSpace> new_node =
845 space_->free_list_->Allocate(size_in_bytes, &new_node_size, origin);
846 if (new_node.is_null()) return false;
847 DCHECK_GE(new_node_size, size_in_bytes);
848
849 // The old-space-step might have finished sweeping and restarted marking.
850 // Verify that it did not turn the page of the new node into an evacuation
851 // candidate.
853
854 // Mark the old linear allocation area with a free space map so it can be
855 // skipped when scanning the heap. This also puts it back in the free list
856 // if it is big enough.
858
859 // Memory in the linear allocation area is counted as allocated. We may free
860 // a little of this again immediately - see below.
862 space_->IncreaseAllocatedBytes(new_node_size, page);
863
866 Address start = new_node.address();
867 Address end = new_node.address() + new_node_size;
868 Address limit = allocator_->ComputeLimit(start, end, size_in_bytes);
869 DCHECK_LE(limit, end);
870 DCHECK_LE(size_in_bytes, limit - start);
871 if (limit != end) {
873 space_->Free(limit, end - limit);
874 end = limit;
875 } else {
877 space_heap()->CreateFillerObjectAt(limit, static_cast<int>(end - limit));
878 }
879 }
882
883 return true;
884}
885
887 if (!allocator_->supports_extending_lab()) return false;
888 Address current_top = allocator_->top();
889 if (current_top == kNullAddress) return false;
890 Address current_limit = allocator_->limit();
892 if (current_top + size_in_bytes > max_limit) {
893 return false;
894 }
896 Address new_limit =
897 allocator_->ComputeLimit(current_top, max_limit, size_in_bytes);
898 allocator_->ExtendLAB(new_limit);
900 space_heap()->CreateFillerObjectAt(new_limit,
901 static_cast<int>(max_limit - new_limit));
902 PageMetadata* page = PageMetadata::FromAddress(current_top);
903 // No need to create a black allocation area since new space doesn't use
904 // black allocation.
906 space_->AddRangeToActiveSystemPages(page, current_limit, new_limit);
907 return true;
908}
909
916
918 if (!allocator_->IsLabValid()) return;
919
920#if DEBUG
921 allocator_->Verify();
922#endif // DEBUG
923
924 Address current_top = allocator_->top();
925 Address current_limit = allocator_->limit();
926
927 Address current_max_limit = allocator_->supports_extending_lab()
929 : current_limit;
931 current_max_limit == current_limit);
932
934
935 if (!v8_flags.black_allocated_pages) {
936 if (current_top != current_limit &&
938 PageMetadata::FromAddress(current_top)
939 ->DestroyBlackArea(current_top, current_limit);
940 }
941 }
942
944 DCHECK_GE(current_limit, current_top);
945
946 DCHECK_IMPLIES(current_limit - current_top >= 2 * kTaggedSize,
947 space_heap()->marking_state()->IsUnmarked(
948 HeapObject::FromAddress(current_top)));
949 space_->Free(current_top, current_max_limit - current_top);
950}
951
952} // namespace internal
953} // namespace v8
RegisterAllocator * allocator_
V8_EXPORT_PRIVATE void AdvanceAllocationObservers(size_t allocated)
V8_EXPORT_PRIVATE void RemoveAllocationObserver(AllocationObserver *observer)
V8_EXPORT_PRIVATE void AddAllocationObserver(AllocationObserver *observer)
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object, size_t object_size, size_t aligned_object_size)
static AllocationResult Failure()
AllocatorPolicy(MainAllocator *allocator)
MainAllocator *const allocator_
Heap * heap() const
Definition base-space.h:27
AllocationSpace identity() const
Definition base-space.h:32
void RescheduleJobIfNeeded(GarbageCollector garbage_collector, TaskPriority priority=TaskPriority::kUserVisible)
static Tagged< HeapObject > FromAddress(Address address)
V8_EXPORT_PRIVATE void StartIncrementalMarkingIfAllocationLimitIsReached(LocalHeap *local_heap, GCFlags gc_flags, GCCallbackFlags gc_callback_flags=GCCallbackFlags::kNoGCCallbackFlags)
Definition heap.cc:1978
V8_EXPORT_PRIVATE void CreateFillerObjectAt(Address addr, int size, ClearFreedMemoryMode clear_memory_mode=ClearFreedMemoryMode::kDontClearFreedMemory)
Definition heap.cc:3202
IncrementalMarking * incremental_marking() const
Definition heap.h:1062
ConcurrentMarking * concurrent_marking() const
Definition heap.h:1070
Sweeper * sweeper()
Definition heap.h:821
PagedSpace * paged_space(int idx) const
Definition heap-inl.h:148
V8_EXPORT_PRIVATE void StartMinorMSIncrementalMarkingIfNeeded()
Definition heap.cc:1228
Isolate * isolate() const
Definition heap-inl.h:61
bool is_shared_space_isolate() const
Definition isolate.h:2292
void Reset(Address top, Address limit)
V8_INLINE void SetLimit(Address limit)
V8_INLINE Address IncrementTop(size_t bytes)
void set_original_limit_relaxed(Address limit)
bool is_main_thread() const
Definition local-heap.h:194
std::optional< LinearAreaOriginalData > linear_area_original_data_
Address original_top_acquire() const
SpaceWithLinearArea *const space_
V8_EXPORT_PRIVATE void ResetLab(Address start, Address end, Address extended_end)
void ExtendLAB(Address limit)
V8_EXPORT_PRIVATE bool EnsureAllocationForTesting(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin)
LocalHeap * local_heap() const
bool SupportsAllocationObserver() const
V8_EXPORT_PRIVATE void MarkLinearAllocationAreaBlack()
V8_WARN_UNUSED_RESULT V8_EXPORT_PRIVATE AllocationResult AllocateRawForceAlignmentForTesting(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin)
V8_WARN_UNUSED_RESULT AllocationResult AllocateRawSlowUnaligned(int size_in_bytes, AllocationOrigin origin=AllocationOrigin::kRuntime)
LinearAllocationArea & allocation_info()
V8_EXPORT_PRIVATE MainAllocator(LocalHeap *heap, SpaceWithLinearArea *space, IsNewGeneration is_new_generation, LinearAllocationArea *allocation_info=nullptr)
V8_EXPORT_PRIVATE void AddAllocationObserver(AllocationObserver *observer)
AllocationCounter & allocation_counter()
V8_EXPORT_PRIVATE Heap * space_heap() const
V8_WARN_UNUSED_RESULT AllocationResult AllocateRawSlowAligned(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin=AllocationOrigin::kRuntime)
std::optional< AllocationCounter > allocation_counter_
static constexpr BlackAllocation ComputeBlackAllocation(IsNewGeneration)
V8_EXPORT_PRIVATE bool IsPendingAllocation(Address object_address)
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateFastAligned(int size_in_bytes, int *result_aligned_size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin)
Address ComputeLimit(Address start, Address end, size_t min_size) const
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin)
V8_INLINE bool IsLabValid() const
V8_EXPORT_PRIVATE void RemoveAllocationObserver(AllocationObserver *observer)
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object, size_t size_in_bytes, size_t aligned_size_in_bytes, size_t allocation_size)
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin)
V8_EXPORT_PRIVATE void AdvanceAllocationObservers()
LinearAreaOriginalData & linear_area_original_data()
AllocationSpace identity() const
V8_EXPORT_PRIVATE void MakeLinearAllocationAreaIterable()
std::unique_ptr< AllocatorPolicy > allocator_policy_
const BlackAllocation black_allocation_
V8_EXPORT_PRIVATE void UnmarkLinearAllocationArea()
V8_WARN_UNUSED_RESULT V8_EXPORT_PRIVATE AllocationResult AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin)
V8_EXPORT_PRIVATE bool is_main_thread() const
V8_EXPORT_PRIVATE Address AlignTopForTesting(AllocationAlignment alignment, int offset)
V8_EXPORT_PRIVATE void FreeLinearAllocationArea()
Address original_limit_relaxed() const
V8_EXPORT_PRIVATE void FreeLinearAllocationAreaAndResetFreeList()
static bool IsOnEvacuationCandidate(Tagged< MaybeObject > obj)
static V8_INLINE void UpdateHighWaterMark(Address mark)
base::Mutex * mutex()
Definition new-spaces.h:197
static V8_INLINE PageMetadata * FromAllocationAreaAddress(Address address)
static V8_INLINE PageMetadata * FromHeapObject(Tagged< HeapObject > o)
static V8_INLINE PageMetadata * FromAddress(Address addr)
void DestroyBlackArea(Address start, Address end)
V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end)
bool TryAllocatePage(int size_in_bytes, AllocationOrigin origin)
std::unique_ptr< PagedSpaceAllocatorPolicy > paged_space_allocator_policy_
bool WaitForSweepingForAllocation(int size_in_bytes, AllocationOrigin origin)
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) final
PagedNewSpaceAllocatorPolicy(PagedNewSpace *space, MainAllocator *allocator)
PagedSpaceForNewSpace * paged_space()
Definition new-spaces.h:718
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) final
V8_WARN_UNUSED_RESULT bool TryExtendLAB(int size_in_bytes)
void SetLinearAllocationArea(Address top, Address limit, Address end)
bool RefillLab(int size_in_bytes, AllocationOrigin origin)
bool TryExpandAndAllocate(size_t size_in_bytes, AllocationOrigin origin)
bool ContributeToSweeping(uint32_t max_pages=std::numeric_limits< uint32_t >::max())
bool TryAllocationFromFreeList(size_t size_in_bytes, AllocationOrigin origin)
V8_INLINE size_t Free(Address start, size_t size_in_bytes)
virtual size_t AddPage(PageMetadata *page)
bool TryExpand(LocalHeap *local_heap, AllocationOrigin origin)
void IncreaseAllocatedBytes(size_t bytes, PageMetadata *page)
void AddRangeToActiveSystemPages(PageMetadata *page, Address start, Address end)
PageMetadata * RemovePageSafe(int size_in_bytes)
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) final
std::optional< std::pair< Address, Address > > AllocateOnNewPageBeyondCapacity(int size_in_bytes, AllocationAlignment alignment)
std::optional< std::pair< Address, Address > > Allocate(int size_in_bytes, AllocationAlignment alignment)
void Free(Address start, Address end)
void AddRangeToActiveSystemPages(Address start, Address end)
bool ShouldRefillFreelistForSpace(AllocationSpace space) const
Definition sweeper.cc:1470
bool AreMinorSweeperTasksRunning() const
Definition sweeper.cc:899
void WaitForPageToBeSwept(PageMetadata *page)
Definition sweeper.cc:1273
GCTracer::Scope::ScopeId GetTracingScope(AllocationSpace space, bool is_joining_thread)
Definition sweeper.cc:1445
V8_INLINE constexpr bool is_null() const
Definition tagged.h:502
#define USE_ALLOCATION_ALIGNMENT_BOOL
Definition globals.h:1562
#define V8_COMPRESS_POINTERS_8GB_BOOL
Definition globals.h:608
#define ALIGN_TO_ALLOCATION_ALIGNMENT(value)
Definition globals.h:1796
NormalPageSpace * space_
Definition compactor.cc:324
int start
int end
#define TRACE_GC_EPOCH_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
Definition gc-tracer.h:84
Isolate * isolate
int32_t offset
ZoneVector< RpoNumber > & result
constexpr int kTaggedSize
Definition globals.h:542
constexpr intptr_t kCodeAlignment
Definition globals.h:964
constexpr intptr_t kObjectAlignment8GbHeap
Definition globals.h:934
V8_EXPORT_PRIVATE FlagValues v8_flags
static constexpr Address kNullAddress
Definition v8-internal.h:53
@ kGCCallbackScheduleIdleGarbageCollection
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr T RoundDown(T x, intptr_t m)
Definition macros.h:371
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN
#define V8_UNLIKELY(condition)
Definition v8config.h:660