v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
new-spaces.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <atomic>
8#include <optional>
9
10#include "src/base/logging.h"
11#include "src/base/macros.h"
12#include "src/common/globals.h"
13#include "src/flags/flags.h"
19#include "src/heap/heap-inl.h"
31#include "src/heap/safepoint.h"
32#include "src/heap/spaces-inl.h"
33#include "src/heap/spaces.h"
34#include "src/heap/zapping.h"
35
36namespace v8 {
37namespace internal {
38
39// -----------------------------------------------------------------------------
40// SemiSpace implementation
41
43 bool in_to_space = (id() != kFromSpace);
44 MemoryChunk* chunk = mutable_page->Chunk();
47 PageMetadata* page = PageMetadata::cast(mutable_page);
48 page->list_node().Initialize();
49 CHECK(page->IsLivenessClear());
51 return page;
52}
53
55 : Space(heap, NEW_SPACE, nullptr), id_(semispace) {}
56
58 const bool is_marking = heap_->isolate()->isolate_data()->is_marking();
59 while (!memory_chunk_list_.Empty()) {
61 memory_chunk_list_.Remove(page);
62 if (is_marking) {
63 page->ClearLiveness();
64 }
66 }
67}
68
69void SemiSpace::ShrinkCapacityTo(size_t capacity) {
71 // Only the to-space is allowed to have quarantined pages.
74
75 const size_t quarantined_pages =
77 // Quarantined pages are not accounted against the capacity of the space.
78 const int pages_available_for_allocation =
79 static_cast<int>(memory_chunk_list_.size() - quarantined_pages);
80 const int num_pages = static_cast<int>((capacity / PageMetadata::kPageSize)) -
81 pages_available_for_allocation;
82 if (num_pages >= 0) {
83 // The space is already smaller than it needs to be.
84 return;
85 }
86
87 RewindPages(-num_pages);
88
89 DCHECK_EQ(capacity + quarantined_pages * PageMetadata::kPageSize,
92 static_cast<int>(capacity / PageMetadata::kPageSize) + quarantined_pages,
93 memory_chunk_list_.size());
94}
95
109
111 if (!IsCommitted()) return 0;
114}
115
117 PageMetadata* new_page = heap()->memory_allocator()->AllocatePage(
119 if (new_page == nullptr) {
120 return false;
121 }
122 memory_chunk_list_.PushBack(new_page);
125 heap()->CreateFillerObjectAt(new_page->area_start(),
126 static_cast<int>(new_page->area_size()));
127 return true;
128}
129
130void SemiSpace::RewindPages(int num_pages) {
131 DCHECK_GT(num_pages, 0);
132 DCHECK(last_page());
134 size_t uncommitted_physical_memory = 0;
135 while (num_pages > 0) {
137 CHECK_NOT_NULL(last);
138 uncommitted_physical_memory += last->CommittedPhysicalMemory();
139 memory_chunk_list_.Remove(last);
140 heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kPool, last);
141 num_pages--;
142 }
143 DecrementCommittedPhysicalMemory(uncommitted_physical_memory);
144}
145
147 const auto to_space_flags =
149 heap()->incremental_marking()->marking_mode()) |
151 for (PageMetadata* page : *this) {
152 MemoryChunk* chunk = page->Chunk();
153 page->set_owner(this);
154 if (id_ == kToSpace) {
155 chunk->SetFlagsNonExecutable(to_space_flags);
156 } else {
158 // From space must preserve `NEW_SPACE_BELOW_AGE_MARK` which is used for
159 // deciding on whether to copy or promote an object.
162 }
163 DCHECK(chunk->InYoungGeneration());
164 }
165}
166
168 if (current_page_ == page) {
169 if (page->prev_page()) {
170 current_page_ = page->prev_page();
171 }
172 }
173 memory_chunk_list_.Remove(page);
175 DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
177 [this, page](ExternalBackingStoreType type, int index) {
179 type, page->ExternalBackingStoreBytes(type));
180 });
181}
182
184 DCHECK_EQ(page->owner(), this);
185 memory_chunk_list_.Remove(page);
186 memory_chunk_list_.PushBack(page);
188}
189
191 // We swap all properties but id_.
192 std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
193 std::swap(from->current_page_, to->current_page_);
195 [from, to](ExternalBackingStoreType type, int index) {
196 const size_t tmp = from->external_backing_store_bytes_[index].load(
197 std::memory_order_relaxed);
198 from->external_backing_store_bytes_[index].store(
199 to->external_backing_store_bytes_[index].load(
200 std::memory_order_relaxed),
201 std::memory_order_relaxed);
202 to->external_backing_store_bytes_[index].store(
203 tmp, std::memory_order_relaxed);
204 });
205 std::swap(from->committed_physical_memory_, to->committed_physical_memory_);
206 {
207 // Swap committed atomic counters.
208 size_t to_commited = to->committed_.load();
209 to->committed_.store(from->committed_.load());
210 from->committed_.store(to_commited);
211 }
212 std::swap(from->quarantined_pages_count_, to->quarantined_pages_count_);
213
214 // Swapping the `memory_cunk_list_` essentially swaps out the pages (actual
215 // payload) from to and from space.
216 to->FixPagesFlags();
217 from->FixPagesFlags();
218}
219
220bool SemiSpace::AdvancePage(size_t target_capacity) {
221 PageMetadata* next_page;
222
223 if (current_page_) {
224 next_page = current_page_->next_page();
225 } else {
226 // Start allocating on the first page but skip the quarantined pages.
228 next_page = first_page();
229 for (size_t i = 0; i < quarantined_pages_count_; i++) {
230 DCHECK_NOT_NULL(next_page);
231 next_page = next_page->next_page();
232 }
233 }
234
235 if (!next_page && current_capacity_ < target_capacity) {
237 if (!AllocateFreshPage()) return false;
238 next_page = last_page();
239 DCHECK_NOT_NULL(next_page);
240 }
241
242 if (!next_page) {
243 return false;
244 }
245
246 current_page_ = next_page;
249 DCHECK_IMPLIES(current_capacity_ > target_capacity,
250 heap()->IsNewSpaceAllowedToGrowAboveTargetCapacity());
251 return true;
252}
253
255 if (!base::OS::HasLazyCommits()) return;
257 committed_physical_memory_ + increment_value);
258 committed_physical_memory_ += increment_value;
259}
260
262 if (!base::OS::HasLazyCommits()) return;
263 DCHECK_LE(decrement_value, committed_physical_memory_);
264 committed_physical_memory_ -= decrement_value;
265}
266
268 PageMetadata* page = current_page();
269 MemoryChunk* chunk = page->Chunk();
270
271 DCHECK_LE(chunk->address(), start);
274
275 const size_t added_pages = page->active_system_pages()->Add(
276 chunk->Offset(start), chunk->Offset(end),
280}
281
282std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator(Heap* heap) {
283 // Use the SemiSpaceNewSpace::NewObjectIterator to iterate the ToSpace.
284 UNREACHABLE();
285}
286
288 current_page_ = nullptr;
291}
292
293#ifdef DEBUG
294void SemiSpace::Print() {}
295#endif
296
297#ifdef VERIFY_HEAP
298void SemiSpace::VerifyPageMetadata() const {
299 bool is_from_space = (id_ == kFromSpace);
300 size_t external_backing_store_bytes[static_cast<int>(
302
303 size_t actual_pages = 0;
304 size_t computed_committed_physical_memory = 0;
305
306 for (const PageMetadata* page : *this) {
307 const MemoryChunk* chunk = page->Chunk();
308 CHECK_EQ(page->owner(), this);
309 CHECK(chunk->InNewSpace());
310 CHECK(chunk->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
312 CHECK(!chunk->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE
315 CHECK(!chunk->IsQuarantined());
316 if (!is_from_space) {
317 // The pointers-from-here-are-interesting flag isn't updated dynamically
318 // on from-space pages, so it might be out of sync with the marking state.
319 if (page->heap()->incremental_marking()->IsMarking()) {
320 CHECK(page->heap()->incremental_marking()->IsMajorMarking());
321 CHECK(
323 } else {
324 CHECK(
326 }
328 chunk->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK));
329 }
331 [&external_backing_store_bytes, page](ExternalBackingStoreType type,
332 int index) {
333 external_backing_store_bytes[index] +=
334 page->ExternalBackingStoreBytes(type);
335 });
336
337 computed_committed_physical_memory += page->CommittedPhysicalMemory();
338
339 CHECK_IMPLIES(page->list_node().prev(),
340 page->list_node().prev()->list_node().next() == page);
341 actual_pages++;
342 }
344 CHECK_EQ(computed_committed_physical_memory, CommittedPhysicalMemory());
346 [this, external_backing_store_bytes](ExternalBackingStoreType type,
347 int index) {
348 CHECK_EQ(external_backing_store_bytes[index],
350 });
351}
352#endif // VERIFY_HEAP
353
354#ifdef DEBUG
356 // Addresses belong to same semi-space
358 PageMetadata* end_page = PageMetadata::FromAllocationAreaAddress(end);
359 SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
360 DCHECK_EQ(space, end_page->owner());
361 // Start address is before end address, either on same page,
362 // or end address is on a later page in the linked list of
363 // semi-space pages.
364 if (page == end_page) {
366 } else {
367 while (page != end_page) {
368 page = page->next_page();
369 }
370 DCHECK(page);
371 }
372}
373#endif
374
376 // Quarantining pages happens at the end of scavenge, after the semi spaces
377 // have been swapped. Thus, the quarantined page originates from "from space"
378 // to is moved to "to space" to keep pinned objects as live.
380 DCHECK(chunk->IsQuarantined());
382 DCHECK(chunk->IsFromPage());
383 PageMetadata* page = PageMetadata::cast(chunk->Metadata());
385 DCHECK_EQ(&from_space, page->owner());
386 DCHECK_NE(&from_space, this);
387 from_space.RemovePage(page);
391 page->set_owner(this);
393 IncrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
394 memory_chunk_list_.PushFront(page);
396}
397
398// -----------------------------------------------------------------------------
399// NewSpace implementation
400
403
405 DCHECK(!page->Chunk()->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
406 DCHECK(page->Chunk()->InYoungGeneration());
407 RemovePage(page);
408 PageMetadata* new_page = PageMetadata::ConvertNewToOld(page, free_mode);
409 DCHECK(!new_page->Chunk()->InYoungGeneration());
410 USE(new_page);
411}
412
413// -----------------------------------------------------------------------------
414// SemiSpaceNewSpace implementation
415
417 size_t initial_semispace_capacity,
418 size_t min_semispace_capacity,
419 size_t max_semispace_capacity)
420 : NewSpace(heap),
421 to_space_(heap, kToSpace),
422 from_space_(heap, kFromSpace),
423 minimum_capacity_(min_semispace_capacity),
424 maximum_capacity_(max_semispace_capacity),
425 target_capacity_(initial_semispace_capacity) {
426 DCHECK(IsAligned(initial_semispace_capacity, PageMetadata::kPageSize));
429
433}
434
435void SemiSpaceNewSpace::Grow(size_t new_capacity) {
436 heap()->safepoint()->AssertActive();
437 DCHECK(MemoryChunk::IsAligned(new_capacity));
438 DCHECK_LE(target_capacity_, new_capacity);
439 DCHECK_LE(new_capacity, maximum_capacity_);
440 target_capacity_ = new_capacity;
441}
442
446
449
450 PageMetadata* current_page = to_space().first_page();
451
452 for (size_t i = 0; i < to_space_.quarantined_pages_count_; i++) {
453 DCHECK_NOT_NULL(current_page);
454 current_page->Chunk()->SetFlagNonExecutable(
456 current_page = current_page->next_page();
457 }
458
459 if (age_mark_) {
460 PageMetadata* age_mark_page =
462 DCHECK_EQ(age_mark_page->owner(), &to_space());
463
464 // Mark all pages up to (and including) the one containing mark.
465 for (; current_page != age_mark_page;
466 current_page = current_page->next_page()) {
467 current_page->Chunk()->SetFlagNonExecutable(
469 }
470
471 DCHECK_EQ(current_page, age_mark_page);
472 current_page->Chunk()->SetFlagNonExecutable(
474 }
475}
476
477void SemiSpaceNewSpace::Shrink(size_t new_capacity) {
478 DCHECK(MemoryChunk::IsAligned(new_capacity));
479 DCHECK_LE(minimum_capacity_, new_capacity);
480 DCHECK_LE(new_capacity, target_capacity_);
482
483 target_capacity_ = new_capacity;
484 to_space_.ShrinkCapacityTo(new_capacity);
485 if (allocation_top()) {
487 }
489}
490
493 size_t size = to_space_.CommittedPhysicalMemory();
494 if (from_space_.IsCommitted()) {
496 }
497 return size;
498}
499
509std::optional<std::pair<Address, Address>>
511 int size_in_bytes, AllocationAlignment alignment) {
512 DCHECK_LT(Available(), size_in_bytes);
514 if (!to_space_.AllocateFreshPage()) return std::nullopt;
515 return Allocate(size_in_bytes, alignment);
516}
517
519 int size_in_bytes, AllocationAlignment alignment) {
520 int parked_size = 0;
521 Address start = 0;
522 for (auto it = parked_allocation_buffers_.begin();
523 it != parked_allocation_buffers_.end();) {
524 parked_size = it->first;
525 start = it->second;
526 int filler_size = Heap::GetFillToAlign(start, alignment);
527 if (size_in_bytes + filler_size <= parked_size) {
530 // We move a page with a parked allocation to the end of the pages list
531 // to maintain the invariant that the last page is the used one.
534 return true;
535 } else {
536 it++;
537 }
538 }
539 return false;
540}
541
545
547 if (!allocation_top()) return 0;
548 return static_cast<int>(to_space_.page_high() - allocation_top());
549}
550
556
557#ifdef VERIFY_HEAP
558// We do not use the SemiSpaceObjectIterator because verification doesn't assume
559// that it works (it depends on the invariants we are checking).
560void SemiSpaceNewSpace::Verify(Isolate* isolate,
561 SpaceVerificationVisitor* visitor) const {
562 VerifyObjects(isolate, visitor);
563
564 // Check semi-spaces.
567 from_space_.VerifyPageMetadata();
568 to_space_.VerifyPageMetadata();
569}
570
571// We do not use the SemiSpaceObjectIterator because verification doesn't assume
572// that it works (it depends on the invariants we are checking).
573void SemiSpaceNewSpace::VerifyObjects(Isolate* isolate,
574 SpaceVerificationVisitor* visitor) const {
575 size_t external_space_bytes[static_cast<int>(
577 PtrComprCageBase cage_base(isolate);
578 for (const PageMetadata* page = to_space_.first_page(); page;
579 page = page->next_page()) {
580 visitor->VerifyPage(page);
581
582 Address current_address = page->area_start();
583
584 while (!PageMetadata::IsAlignedToPageSize(current_address)) {
585 Tagged<HeapObject> object = HeapObject::FromAddress(current_address);
586
587 // The first word should be a map, and we expect all map pointers to
588 // be in map space or read-only space.
589 int size = object->Size(cage_base);
590
591 visitor->VerifyObject(object);
592
593 if (IsExternalString(object, cage_base)) {
594 Tagged<ExternalString> external_string = Cast<ExternalString>(object);
595 size_t string_size = external_string->ExternalPayloadSize();
596 external_space_bytes[static_cast<int>(
598 }
599
600 current_address += ALIGN_TO_ALLOCATION_ALIGNMENT(size);
601 }
602
603 visitor->VerifyPageDone(page);
604 }
605
607 [this, external_space_bytes](ExternalBackingStoreType type, int index) {
609 return;
610 }
611 CHECK_EQ(external_space_bytes[index], ExternalBackingStoreBytes(type));
612 });
613
614 if (!v8_flags.concurrent_array_buffer_sweeping) {
615 size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
616 CHECK_EQ(bytes,
618 }
619}
620#endif // VERIFY_HEAP
621
626
628 if (!IsFromSpaceCommitted()) return;
629
630 // Fix all pages in the "from" semispace.
631 for (PageMetadata* page : from_space()) {
632 heap()->CreateFillerObjectAt(page->area_start(),
633 static_cast<int>(page->area_size()));
634 }
635}
636
638 if (!to_space().current_page()) {
639 DCHECK(to_space().memory_chunk_list().Empty());
640 return;
641 }
642
643 PageIterator it(to_space().current_page());
644
645 // Fix the remaining unused pages in the "to" semispace.
646 for (PageMetadata* page = *(++it); page != nullptr; page = *(++it)) {
647 heap()->CreateFillerObjectAt(page->area_start(),
648 static_cast<int>(page->area_size()));
649 }
650}
651
654 return false;
655 }
656 // If the page contains the current age mark, it contains both objects in the
657 // interemediate generation (that could be promoted to old space) and new
658 // objects (that should remain in new space). When pinning an intermediate
659 // generation object on this page, we don't yet know whether or not the page
660 // will also contain pinned new objects (that will prevent us from promoting
661 // the page). Thus, we conservatively keep the page in new space. Pinned
662 // objects on it will either die or be promoted in the next GC cycle.
663 return !chunk->Metadata()->ContainsLimit(age_mark_);
664}
665
666std::unique_ptr<ObjectIterator> SemiSpaceNewSpace::GetObjectIterator(
667 Heap* heap) {
668 return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
669}
670
674
676 size_t const top = allocation_top();
677 size_t const quarantined = QuarantinedSize();
678
679 if (top) {
681 return quarantined +
685 static_cast<size_t>(top - to_space_.page_low());
686 } else {
688 return quarantined;
689 }
690}
691
694 return Capacity() - (Size() - QuarantinedSize());
695}
696
698 size_t current_size = Size();
699 DCHECK_GE(current_size, size_after_last_gc_);
700 return current_size - size_after_last_gc_;
701}
702
706
708 // Flip the semispaces. After flipping, to space is empty and from space has
709 // live objects.
715 DCHECK_EQ(0u, Size());
716
717#if DEBUG
718 for (PageMetadata* p : to_space_) {
719 DCHECK(p->IsLivenessClear());
720 }
721#endif // DEBUG
722}
723
725 DCHECK(!heap()->allocator()->new_space_allocator()->IsLabValid());
728
729 if (heap::ShouldZapGarbage() || v8_flags.clear_free_memory) {
731 }
732
733 // Shrink from-space down to target_capacity_ if necessary.
735 // to-space always fits into target_capacity_ after the GC.
738}
739
741 if (!IsFromSpaceCommitted()) {
742 return;
743 }
744 for (PageMetadata* page : PageRange(from_space().first_page(), nullptr)) {
745 heap::ZapBlock(page->area_start(),
746 page->HighWaterMark() - page->area_start(),
748 }
749}
750
752 DCHECK(!page->Chunk()->IsToPage());
753 DCHECK(page->Chunk()->IsFromPage());
754 from_space().RemovePage(page);
755}
756
758 const MutablePageMetadata* page) const {
759 return !page->Contains(age_mark());
760}
761
762std::optional<std::pair<Address, Address>> SemiSpaceNewSpace::Allocate(
763 int size_in_bytes, AllocationAlignment alignment) {
764 size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
765 Address top = allocation_top();
766 if (top) {
768 Address high = to_space_.page_high();
769 int filler_size = Heap::GetFillToAlign(top, alignment);
770 int aligned_size_in_bytes = size_in_bytes + filler_size;
771
772 if (top + aligned_size_in_bytes <= high) {
774 return std::pair(top, high);
775 }
776
777 int remaining_in_page = static_cast<int>(high - top);
778 heap()->CreateFillerObjectAt(top, remaining_in_page);
779 SetAllocationTop(high);
780
781 // We park unused allocation buffer space of allocations happening from the
782 // mutator.
783 if (v8_flags.allocation_buffer_parking &&
784 heap()->gc_state() == Heap::NOT_IN_GC &&
785 remaining_in_page >= kAllocationBufferParkingThreshold) {
787 ParkedAllocationBuffer(remaining_in_page, top));
788 }
789 }
790
791 if (AddFreshPage()) {
794 DCHECK_EQ(0, Heap::GetFillToAlign(start, alignment));
796 return std::pair(start, end);
797 }
798
799 if (v8_flags.allocation_buffer_parking &&
800 AddParkedAllocationBuffer(size_in_bytes, alignment)) {
805 return std::pair(start, end);
806 }
807
808 return std::nullopt;
809}
810
813 heap()->CreateFillerObjectAt(start, static_cast<int>(end - start));
814
815 if (end == allocation_top()) {
817 }
818}
819
824
828
829// -----------------------------------------------------------------------------
830// PagedSpaceForNewSpace implementation
831
833 size_t initial_capacity,
834 size_t min_capacity,
835 size_t max_capacity)
837 FreeList::CreateFreeListForNewSpace(),
839 min_capacity_(min_capacity),
840 max_capacity_(max_capacity),
841 target_capacity_(initial_capacity) {
845
848}
849
851 MutablePageMetadata* mutable_page_metadata) {
852 DCHECK_EQ(identity(), NEW_SPACE);
853 MemoryChunk* chunk = mutable_page_metadata->Chunk();
854 PageMetadata* page = PageMetadata::cast(mutable_page_metadata);
855 DCHECK_EQ(
857 page->area_size());
858 // Make sure that categories are initialized before freeing the area.
859 page->ResetAllocationStatistics();
861 page->ClearLiveness();
862 page->AllocateFreeListCategories();
863 page->InitializeFreeListCategories();
864 page->list_node().Initialize();
866 return page;
867}
868
869void PagedSpaceForNewSpace::Grow(size_t new_capacity) {
870 heap()->safepoint()->AssertActive();
872 DCHECK_LE(new_capacity, MaximumCapacity());
873 target_capacity_ = new_capacity;
874}
875
879
880bool PagedSpaceForNewSpace::StartShrinking(size_t new_target_capacity) {
881 DCHECK(heap()->tracer()->IsInAtomicPause());
882 if (new_target_capacity > target_capacity_) return false;
883 target_capacity_ = new_target_capacity;
884 return true;
885}
886
888 DCHECK(heap()->tracer()->IsInAtomicPause());
890#if DEBUG
891 // If `current_capacity_` is higher than `target_capacity_`, i.e. the
892 // space could not be shrunk all the way down to `target_capacity_`, it
893 // must mean that all pages contain live objects.
894 for (PageMetadata* page : *this) {
895 DCHECK_NE(0, page->live_bytes());
896 }
897#endif // DEBUG
898 // After a minor GC current_capacity_ could be still above max_capacity_
899 // when not enough pages got promoted or died.
901 }
902}
903
908
914
920
924
930
932 // Verify that the free space map is already initialized. Otherwise, new free
933 // list entries will be invalid.
935 heap()->isolate()->root(RootIndex::kFreeSpaceMap).ptr());
936 return TryExpand(heap()->main_thread_local_heap(),
938}
939
941 const MutablePageMetadata* page) const {
942 DCHECK_EQ(this, page->owner());
943 if (page == last_lab_page_) return false;
944 return page->AllocatedLabSize() <=
945 static_cast<size_t>(
947 v8_flags.minor_ms_page_promotion_max_lab_threshold / 100);
948}
949
953
957
958#ifdef VERIFY_HEAP
959void PagedSpaceForNewSpace::Verify(Isolate* isolate,
960 SpaceVerificationVisitor* visitor) const {
961 PagedSpaceBase::Verify(isolate, visitor);
962
964
965 auto sum_allocated_labs = [](size_t sum, const PageMetadata* page) {
966 return sum + page->AllocatedLabSize();
967 };
969 std::accumulate(begin(), end(), 0, sum_allocated_labs));
970}
971#endif // VERIFY_HEAP
972
973// -----------------------------------------------------------------------------
974// PagedNewSpace implementation
975
976PagedNewSpace::PagedNewSpace(Heap* heap, size_t initial_capacity,
977 size_t min_capacity, size_t max_capacity)
978 : NewSpace(heap),
979 paged_space_(heap, initial_capacity, min_capacity, max_capacity) {}
980
984
989
990} // namespace internal
991} // namespace v8
ThreadLocalTop * top
static void Relaxed_Store(T *addr, typename std::remove_reference< T >::type new_value)
static bool HasLazyCommits()
void AccountCommitted(size_t bytes)
Definition base-space.h:56
void AccountUncommitted(size_t bytes)
Definition base-space.h:64
virtual size_t CommittedMemory() const
Definition base-space.h:36
static Tagged< HeapObject > FromAddress(Address address)
MemoryAllocator * memory_allocator()
Definition heap.h:803
SemiSpaceNewSpace * semi_space_new_space() const
Definition heap-inl.h:439
Isolate * isolate() const
Definition heap-inl.h:61
const IsolateData * isolate_data() const
Definition isolate.h:1207
static V8_INLINE intptr_t GetCommitPageSizeBits()
V8_EXPORT_PRIVATE void Free(MemoryAllocator::FreeMode mode, MutablePageMetadata *chunk)
static V8_INLINE intptr_t GetCommitPageSize()
static constexpr size_t AllocatableMemoryInMemoryChunk(AllocationSpace space)
static constexpr size_t AllocatableMemoryInDataPage()
V8_INLINE void SetFlagNonExecutable(Flag flag)
V8_INLINE bool IsFlagSet(Flag flag) const
V8_INLINE Address address() const
V8_INLINE void SetFlagsNonExecutable(MainThreadFlags flags, MainThreadFlags mask=kAllFlagsMask)
V8_INLINE MemoryChunkMetadata * Metadata()
size_t Offset(Address addr) const
V8_INLINE bool InYoungGeneration() const
V8_INLINE void ClearFlagNonExecutable(Flag flag)
static MainThreadFlags YoungGenerationPageFlags(MarkingMode marking_mode)
static V8_INLINE constexpr bool IsAligned(Address address)
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() const
virtual void RemovePage(PageMetadata *page)=0
static const int kAllocationBufferParkingThreshold
Definition new-spaces.h:241
void PromotePageToOldSpace(PageMetadata *page, FreeMode free_mode)
PageMetadata * prev_page()
PageMetadata * next_page()
static PageMetadata * ConvertNewToOld(PageMetadata *old_page, FreeMode free_mode)
static V8_INLINE PageMetadata * FromAllocationAreaAddress(Address address)
static PageMetadata * cast(MemoryChunkMetadata *metadata)
static bool IsAlignedToPageSize(Address addr)
static V8_INLINE PageMetadata * FromAddress(Address addr)
AllocatorPolicy * CreateAllocatorPolicy(MainAllocator *allocator) final
PagedNewSpace(Heap *heap, size_t initial_capacity, size_t min_capacity, size_t max_capacity)
PagedSpaceForNewSpace paged_space_
Definition new-spaces.h:736
virtual void RemovePage(PageMetadata *page)
virtual size_t AddPage(PageMetadata *page)
bool TryExpand(LocalHeap *local_heap, AllocationOrigin origin)
void ReleasePageImpl(PageMetadata *page, MemoryAllocator::FreeMode free_mode)
size_t Available() const override
size_t Size() const override
bool StartShrinking(size_t new_target_capacity)
PagedSpaceForNewSpace(Heap *heap, size_t initial_capacity, size_t min_capacity, size_t max_capacity)
bool IsPromotionCandidate(const MutablePageMetadata *page) const
void Grow(size_t new_capacity)
void RemovePage(PageMetadata *page) final
size_t AddPage(PageMetadata *page) final
PageMetadata * InitializePage(MutablePageMetadata *chunk) final
void ReleasePage(PageMetadata *page) final
SemiSpaceNewSpace(Heap *heap, size_t initial_semispace_capacity, size_t min_semispace_capacity_, size_t max_semispace_capacity)
bool AddParkedAllocationBuffer(int size_in_bytes, AllocationAlignment alignment)
ParkedAllocationBuffersVector parked_allocation_buffers_
Definition new-spaces.h:476
V8_INLINE void IncrementAllocationTop(Address new_top)
void MoveQuarantinedPage(MemoryChunk *chunk)
friend class SemiSpaceNewSpaceAllocatorPolicy
Definition new-spaces.h:498
bool ContainsSlow(Address a) const final
std::pair< int, Address > ParkedAllocationBuffer
Definition new-spaces.h:255
void SetAllocationTop(Address top)
Definition new-spaces.h:460
V8_INLINE void DecrementAllocationTop(Address new_top)
size_t Available() const final
void Shrink(size_t new_capacity)
bool ShouldPageBePromoted(const MemoryChunk *chunk) const
size_t Capacity() const final
Definition new-spaces.h:285
size_t AllocatedSinceLastGC() const final
size_t CommittedMemory() const final
Definition new-spaces.h:305
std::optional< std::pair< Address, Address > > AllocateOnNewPageBeyondCapacity(int size_in_bytes, AllocationAlignment alignment)
size_t Size() const final
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final
Definition new-spaces.h:324
std::optional< std::pair< Address, Address > > Allocate(int size_in_bytes, AllocationAlignment alignment)
std::unique_ptr< ObjectIterator > GetObjectIterator(Heap *heap) final
void GrowToMaximumCapacityForTesting() final
void Free(Address start, Address end)
AllocatorPolicy * CreateAllocatorPolicy(MainAllocator *allocator) final
void Grow(size_t new_capacity) final
void RemovePage(PageMetadata *page) final
size_t CommittedPhysicalMemory() const final
PageMetadata * first_page() final
Definition new-spaces.h:383
bool IsPromotionCandidate(const MutablePageMetadata *page) const final
Address page_low() const
Definition new-spaces.h:71
SemiSpaceId id() const
Definition new-spaces.h:90
bool AdvancePage(size_t target_capacity)
V8_EXPORT_PRIVATE ~SemiSpace()
Definition new-spaces.cc:57
size_t current_capacity() const
Definition new-spaces.h:84
static void Swap(SemiSpace *from, SemiSpace *to)
SemiSpace(Heap *heap, SemiSpaceId semispace)
Definition new-spaces.cc:54
PageMetadata * last_page() final
Definition new-spaces.h:107
void RewindPages(int num_pages)
void IncrementCommittedPhysicalMemory(size_t increment_value)
void AddRangeToActiveSystemPages(Address start, Address end)
PageMetadata * current_page() const
Definition new-spaces.h:68
void DecrementCommittedPhysicalMemory(size_t decrement_value)
size_t CommittedPhysicalMemory() const final
void RemovePage(PageMetadata *page)
std::unique_ptr< ObjectIterator > GetObjectIterator(Heap *heap) final
void MovePageToTheEnd(PageMetadata *page)
PageMetadata * current_page_
Definition new-spaces.h:166
void MoveQuarantinedPage(MemoryChunk *chunk)
bool ContainsSlow(Address a) const
PageMetadata * first_page() final
Definition new-spaces.h:104
void ShrinkCapacityTo(size_t capacity)
Definition new-spaces.cc:69
PageMetadata * InitializePage(MutablePageMetadata *chunk) final
Definition new-spaces.cc:42
Address page_high() const
Definition new-spaces.h:74
static void AssertValidRange(Address from, Address to)
Definition new-spaces.h:134
bool IsCommitted() const
Definition new-spaces.h:60
virtual size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const
Definition spaces.h:95
void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type, size_t amount)
Definition spaces-inl.h:43
heap::List< MutablePageMetadata > memory_chunk_list_
Definition spaces.h:136
#define ALIGN_TO_ALLOCATION_ALIGNMENT(value)
Definition globals.h:1796
BasePage * page
Definition sweeper.cc:218
int start
int end
Isolate * isolate
void ZapBlock(Address start, size_t size, uintptr_t zap_value)
Definition zapping.cc:26
uintptr_t ZapValue()
Definition zapping.h:30
bool ShouldZapGarbage()
Definition zapping.h:18
Tagged(T object) -> Tagged< T >
kInterpreterTrampolineOffset Tagged< HeapObject >
void ForAll(Callback callback)
Definition spaces.h:58
V8_EXPORT_PRIVATE FlagValues v8_flags
ExternalBackingStoreType
Definition globals.h:1605
static constexpr Address kNullAddress
Definition v8-internal.h:53
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
#define DCHECK_SEMISPACE_ALLOCATION_TOP(top, space)
Definition new-spaces.h:741
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403