v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
paged-spaces.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <atomic>
8#include <iterator>
9
10#include "src/base/logging.h"
13#include "src/common/globals.h"
16#include "src/flags/flags.h"
21#include "src/heap/gc-tracer.h"
22#include "src/heap/heap.h"
32#include "src/heap/safepoint.h"
33#include "src/heap/spaces.h"
34#include "src/heap/sweeper.h"
36#include "src/objects/string.h"
37#include "src/utils/utils.h"
38
39namespace v8 {
40namespace internal {
41
42// ----------------------------------------------------------------------------
43// PagedSpaceObjectIterator
44
46 const PagedSpaceBase* space)
47 : space_(space),
48 page_range_(space->first_page(), nullptr),
49 current_page_(page_range_.begin()) {
50 heap->MakeHeapIterable();
51 USE(space_);
52}
53
54// We have hit the end of the page and should advance to the next block of
55// objects. This happens at the end of the page.
57 if (current_page_ == page_range_.end()) return false;
58 const PageMetadata* cur_page = *(current_page_++);
59 HeapObjectRange heap_objects(cur_page);
60 cur_ = heap_objects.begin();
61 end_ = heap_objects.end();
62 return true;
63}
64
65// ----------------------------------------------------------------------------
66// PagedSpaceBase implementation
67
69 Executability executable,
70 std::unique_ptr<FreeList> free_list,
71 CompactionSpaceKind compaction_space_kind)
72 : SpaceWithLinearArea(heap, space, std::move(free_list)),
73 executable_(executable),
74 compaction_space_kind_(compaction_space_kind) {
77}
78
80 MutablePageMetadata* mutable_page_metadata) {
81 MemoryChunk* chunk = mutable_page_metadata->Chunk();
82 PageMetadata* page = PageMetadata::cast(mutable_page_metadata);
85 page->area_size());
86 // Make sure that categories are initialized before freeing the area.
87 page->ResetAllocationStatistics();
88 page->AllocateFreeListCategories();
89 page->InitializeFreeListCategories();
90 page->list_node().Initialize();
92 return page;
93}
94
96 const bool is_marking = heap_->isolate()->isolate_data()->is_marking();
97 while (!memory_chunk_list_.Empty()) {
98 MutablePageMetadata* chunk = memory_chunk_list_.front();
99 memory_chunk_list_.Remove(chunk);
100 const auto mode = (id_ == NEW_SPACE || id_ == OLD_SPACE)
104 (is_marking || V8_ENABLE_STICKY_MARK_BITS_BOOL)) {
105 chunk->ClearLiveness();
106 }
107 heap()->memory_allocator()->Free(mode, chunk);
108 }
110}
111
113 base::MutexGuard guard(mutex());
114
115 DCHECK_NE(NEW_SPACE, identity());
116 DCHECK_NE(NEW_SPACE, other->identity());
117
118 // Move over pages.
119 for (auto it = other->begin(); it != other->end();) {
120 PageMetadata* p = *(it++);
121
122 // Ensure that pages are initialized before objects on it are discovered by
123 // concurrent markers.
125
126 // Relinking requires the category to be unlinked.
127 other->RemovePage(p);
128 AddPage(p);
132
133 // TODO(leszeks): Here we should allocation step, but:
134 // 1. Allocation groups are currently not handled properly by the sampling
135 // allocation profiler, and
136 // 2. Observers might try to take the space lock, which isn't reentrant.
137 // We'll have to come up with a better solution for allocation stepping
138 // before shipping, which will likely be using LocalHeap.
139 }
140 const bool is_from_client_heap =
141 (other->destination_heap() ==
143 DCHECK_IMPLIES(is_from_client_heap, identity() == SHARED_SPACE);
144 for (auto p : other->GetNewPages()) {
145 heap()->NotifyOldGenerationExpansion(
146 heap()->main_thread_local_heap(), identity(), p,
147 is_from_client_heap
148 ? Heap::OldGenerationExpansionNotificationOrigin::kFromClientHeap
149 : Heap::OldGenerationExpansionNotificationOrigin::kFromSameHeap);
150 }
151
152 DCHECK_EQ(0u, other->Size());
153 DCHECK_EQ(0u, other->Capacity());
154}
155
159 return CommittedMemory();
160 }
162}
163
165 if (!base::OS::HasLazyCommits() || increment_value == 0) return;
166 size_t old_value = committed_physical_memory_.fetch_add(
167 increment_value, std::memory_order_relaxed);
168 USE(old_value);
169 DCHECK_LT(old_value, old_value + increment_value);
170}
171
173 if (!base::OS::HasLazyCommits() || decrement_value == 0) return;
174 size_t old_value = committed_physical_memory_.fetch_sub(
175 decrement_value, std::memory_order_relaxed);
176 USE(old_value);
177 DCHECK_GT(old_value, old_value - decrement_value);
178}
179
180#if DEBUG
181void PagedSpaceBase::VerifyCommittedPhysicalMemory() const {
182 heap()->safepoint()->AssertActive();
183 size_t size = 0;
184 for (const PageMetadata* page : *this) {
185 DCHECK(page->SweepingDone());
186 size += page->CommittedPhysicalMemory();
187 }
188 // Ensure that the space's counter matches the sum of all page counters.
190}
191#endif // DEBUG
192
195 for (const PageMetadata* page : *this) {
196 if (page->Chunk() == chunk) return true;
197 }
198 return false;
199}
200
202 CHECK(page->SweepingDone());
203 // The live_byte on the page was accounted in the space allocated
204 // bytes counter. After sweeping allocated_bytes() contains the
205 // accurate live byte count on the page.
206 size_t old_counter = page->live_bytes();
207 size_t new_counter = page->allocated_bytes();
208 DCHECK_GE(old_counter, new_counter);
209 if (old_counter > new_counter) {
210 size_t counter_diff = old_counter - new_counter;
211 if (identity() == NEW_SPACE) size_at_last_gc_ -= counter_diff;
212 DecreaseAllocatedBytes(counter_diff, page);
213 DCHECK_EQ(new_counter, accounting_stats_.AllocatedOnPage(page));
215 }
216 if (!v8_flags.sticky_mark_bits) {
217 // With sticky mark-bits the counter is reset on unmarking.
218 page->SetLiveBytes(0);
219 }
220}
221
223 base::MutexGuard guard(mutex());
224 PageMetadata* page = free_list()->GetPageForSize(size_in_bytes);
225 if (!page) return nullptr;
226 RemovePage(page);
227 return page;
228}
229
231 DCHECK_NOT_NULL(page);
232 CHECK(page->SweepingDone());
233 page->set_owner(this);
234 DCHECK_IMPLIES(identity() == NEW_SPACE,
235 page->Chunk()->IsFlagSet(MemoryChunk::TO_PAGE));
236 DCHECK_IMPLIES(identity() != NEW_SPACE,
237 !page->Chunk()->IsFlagSet(MemoryChunk::TO_PAGE));
238 memory_chunk_list_.PushBack(page);
239 AccountCommitted(page->size());
240 IncreaseCapacity(page->area_size());
241 IncreaseAllocatedBytes(page->allocated_bytes(), page);
243 [this, page](ExternalBackingStoreType type, int index) {
244 IncrementExternalBackingStoreBytes(
245 type, page->ExternalBackingStoreBytes(type));
246 });
247 IncrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
248}
249
251 AddPageImpl(page);
252 return RelinkFreeListCategories(page);
253}
254
256 CHECK(page->SweepingDone());
257 DCHECK_IMPLIES(identity() == NEW_SPACE,
258 page->Chunk()->IsFlagSet(MemoryChunk::TO_PAGE));
259 memory_chunk_list_.Remove(page);
261 // Pages are only removed from new space when they are promoted to old space
262 // during a GC. This happens after sweeping as started and the allocation
263 // counters have been reset.
264 DCHECK_IMPLIES(identity() == NEW_SPACE,
265 heap()->gc_state() != Heap::NOT_IN_GC);
266 if (identity() == NEW_SPACE) {
267 page->ReleaseFreeListCategories();
268 } else {
269 DecreaseAllocatedBytes(page->allocated_bytes(), page);
270 free_list()->decrease_wasted_bytes(page->wasted_memory());
271 }
272 DecreaseCapacity(page->area_size());
273 AccountUncommitted(page->size());
275 [this, page](ExternalBackingStoreType type, int index) {
276 DecrementExternalBackingStoreBytes(
277 type, page->ExternalBackingStoreBytes(type));
278 });
279 DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
280}
281
283 for (PageMetadata* page : *this) {
284 free_list_->EvictFreeListItems(page);
285 }
286 DCHECK(free_list_->IsEmpty());
287 DCHECK_EQ(0, free_list_->Available());
288}
289
291 DCHECK_EQ(!local_heap, origin == AllocationOrigin::kGC);
292 const size_t accounted_size =
294 if (origin != AllocationOrigin::kGC && identity() != NEW_SPACE) {
295 base::MutexGuard expansion_guard(heap_->heap_expansion_mutex());
296 if (!heap()->IsOldGenerationExpansionAllowed(accounted_size,
297 expansion_guard)) {
298 return false;
299 }
300 }
301 const MemoryAllocator::AllocationMode allocation_mode =
302 (identity() == NEW_SPACE || identity() == OLD_SPACE)
305 PageMetadata* page = heap()->memory_allocator()->AllocatePage(
306 allocation_mode, this, executable());
307 if (page == nullptr) return false;
308 DCHECK_EQ(page->area_size(), accounted_size);
309 ConcurrentAllocationMutex guard(this);
310 AddPage(page);
311 if (origin != AllocationOrigin::kGC && identity() != NEW_SPACE) {
312 heap()->NotifyOldGenerationExpansion(local_heap, identity(), page);
313 }
314 Free(page->area_start(), page->area_size());
315 NotifyNewPage(page);
316 return true;
317}
318
320 return base::checked_cast<int>(std::distance(begin(), end()));
321}
322
324 ConcurrentAllocationMutex guard(this);
325 return free_list_->Available();
326}
327
328size_t PagedSpaceBase::Waste() const {
329 return free_list_->wasted_bytes();
330}
331
335
337 MemoryAllocator::FreeMode free_mode) {
338 DCHECK(page->SweepingDone());
339 DCHECK_EQ(0, page->live_bytes());
340 DCHECK_EQ(page->owner(), this);
341
342 DCHECK_IMPLIES(identity() == NEW_SPACE,
343 page->Chunk()->IsFlagSet(MemoryChunk::TO_PAGE));
344
345 memory_chunk_list_.Remove(page);
346
347 free_list_->EvictFreeListItems(page);
348
349 if (identity() == CODE_SPACE) {
350 heap()->isolate()->RemoveCodeMemoryChunk(page);
351 }
352
353 AccountUncommitted(page->size());
354 DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
355 accounting_stats_.DecreaseCapacity(page->area_size());
356 heap()->memory_allocator()->Free(free_mode, page);
357}
358
359std::unique_ptr<ObjectIterator> PagedSpaceBase::GetObjectIterator(Heap* heap) {
360 return std::unique_ptr<ObjectIterator>(
361 new PagedSpaceObjectIterator(heap, this));
362}
363
364#ifdef DEBUG
365void PagedSpaceBase::Print() {}
366#endif
367
368#ifdef VERIFY_HEAP
369void PagedSpaceBase::Verify(Isolate* isolate,
370 SpaceVerificationVisitor* visitor) const {
371 CHECK_IMPLIES(identity() != NEW_SPACE, size_at_last_gc_ == 0);
372
373 size_t external_space_bytes[static_cast<int>(
375 PtrComprCageBase cage_base(isolate);
376 for (const PageMetadata* page : *this) {
377 size_t external_page_bytes[static_cast<int>(
379
380 CHECK_EQ(page->owner(), this);
381 CHECK_IMPLIES(identity() != NEW_SPACE, page->AllocatedLabSize() == 0);
382 visitor->VerifyPage(page);
383
384 CHECK(page->SweepingDone());
385 Address end_of_previous_object = page->area_start();
386 Address top = page->area_end();
387
388 for (Tagged<HeapObject> object : HeapObjectRange(page)) {
389 CHECK(end_of_previous_object <= object.address());
390
391 // Invoke verification method for each object.
392 visitor->VerifyObject(object);
393
394 // All the interior pointers should be contained in the heap.
395 int size = object->Size(cage_base);
396 CHECK(object.address() + size <= top);
397 end_of_previous_object = object.address() + size;
398
399 if (IsExternalString(object, cage_base)) {
400 Tagged<ExternalString> external_string = Cast<ExternalString>(object);
401 size_t payload_size = external_string->ExternalPayloadSize();
402 external_page_bytes[static_cast<int>(
404 }
405 }
407 [page, external_page_bytes, &external_space_bytes](
408 ExternalBackingStoreType type, int index) {
409 CHECK_EQ(external_page_bytes[index],
410 page->ExternalBackingStoreBytes(type));
411 external_space_bytes[index] += external_page_bytes[index];
412 });
413
414 visitor->VerifyPageDone(page);
415 }
417 [this, external_space_bytes](ExternalBackingStoreType type, int index) {
419 return;
420 }
421 CHECK_EQ(external_space_bytes[index], ExternalBackingStoreBytes(type));
422 });
423
424 if (!v8_flags.concurrent_array_buffer_sweeping) {
425 if (identity() == OLD_SPACE) {
426 size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
427 CHECK_EQ(bytes, ExternalBackingStoreBytes(
429 } else if (identity() == NEW_SPACE) {
430 CHECK(v8_flags.minor_ms);
431 size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
432 CHECK_EQ(bytes, ExternalBackingStoreBytes(
434 }
435 }
436
437#ifdef DEBUG
438 VerifyCountersAfterSweeping(isolate->heap());
439#endif
440}
441
442void PagedSpaceBase::VerifyLiveBytes() const {
443 MarkingState* marking_state = heap()->marking_state();
444 PtrComprCageBase cage_base(heap()->isolate());
445 for (const PageMetadata* page : *this) {
446 CHECK(page->SweepingDone());
447 int black_size = 0;
448 for (Tagged<HeapObject> object : HeapObjectRange(page)) {
449 // All the interior pointers should be contained in the heap.
450 if (marking_state->IsMarked(object)) {
451 black_size += object->Size(cage_base);
452 }
453 }
454 CHECK_LE(black_size, page->live_bytes());
455 }
456}
457#endif // VERIFY_HEAP
458
459#ifdef DEBUG
460void PagedSpaceBase::VerifyCountersAfterSweeping(Heap* heap) const {
461 size_t total_capacity = 0;
462 size_t total_allocated = 0;
463 PtrComprCageBase cage_base(heap->isolate());
464 for (const PageMetadata* page : *this) {
465 DCHECK(page->SweepingDone());
466 total_capacity += page->area_size();
467 size_t real_allocated = 0;
468 for (Tagged<HeapObject> object : HeapObjectRange(page)) {
469 if (!IsFreeSpaceOrFiller(object)) {
470 real_allocated +=
471 ALIGN_TO_ALLOCATION_ALIGNMENT(object->Size(cage_base));
472 }
473 }
474 total_allocated += page->allocated_bytes();
475 // The real size can be smaller than the accounted size if array trimming,
476 // object slack tracking happened after sweeping.
477 DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
478 DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
479 }
480 DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
481 DCHECK_EQ(total_allocated, accounting_stats_.Size());
482}
483
484void PagedSpaceBase::VerifyCountersBeforeConcurrentSweeping() const {
485 size_t total_capacity = 0;
486 size_t total_allocated = 0;
487 for (const PageMetadata* page : *this) {
488 size_t page_allocated =
489 page->SweepingDone() ? page->allocated_bytes() : page->live_bytes();
490 total_capacity += page->area_size();
491 total_allocated += page_allocated;
492 DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
493 }
494 DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
495 DCHECK_EQ(total_allocated, accounting_stats_.Size());
496}
497#endif
498
501 MemoryChunk* chunk = page->Chunk();
502 DCHECK_LE(chunk->address(), start);
505
506 const size_t added_pages = page->active_system_pages()->Add(
507 chunk->Offset(start), chunk->Offset(end),
509
512}
513
515 PageMetadata* page, ActiveSystemPages active_system_pages) {
516 const size_t reduced_pages =
517 page->active_system_pages()->Reduce(active_system_pages);
520}
521
523 DCHECK_EQ(this, page->owner());
524 page->ForAllFreeListCategories([this](FreeListCategory* category) {
525 free_list()->RemoveCategory(category);
526 });
527}
528
530 DCHECK_EQ(this, page->owner());
531 size_t added = 0;
532 page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
533 added += category->available();
534 category->Relink(free_list());
535 });
536 free_list()->increase_wasted_bytes(page->wasted_memory());
537
538 DCHECK_IMPLIES(!page->Chunk()->IsFlagSet(MemoryChunk::NEVER_ALLOCATE_ON_PAGE),
539 page->AvailableInFreeList() ==
540 page->AvailableInFreeListFromAllocatedBytes());
541 return added;
542}
543
545 // Any PagedSpace might invoke RefillFreeList.
546 DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
547 identity() == SHARED_SPACE || identity() == NEW_SPACE ||
548 identity() == TRUSTED_SPACE || identity() == SHARED_TRUSTED_SPACE);
549 DCHECK_IMPLIES(identity() == NEW_SPACE, heap_->IsMainThread());
551
552 for (PageMetadata* p : heap()->sweeper()->GetAllSweptPagesSafe(this)) {
553 // We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
554 // entries here to make them unavailable for allocations.
555 if (p->Chunk()->IsFlagSet(MemoryChunk::NEVER_ALLOCATE_ON_PAGE)) {
556 free_list_->EvictFreeListItems(p);
557 }
558
559 ConcurrentAllocationMutex guard(this);
560 DCHECK_EQ(this, p->owner());
563 }
564}
565
569
570// -----------------------------------------------------------------------------
571// CompactionSpace implementation
572
574 // Incremental marking can be running on the main thread isolate, so when
575 // allocating a new page for the client's compaction space we can get a black
576 // allocated page. This is fine, since the page is not observed the main
577 // isolate until it's merged.
578 DCHECK_IMPLIES(identity() != SHARED_SPACE ||
580 !page->Chunk()->IsFlagSet(MemoryChunk::BLACK_ALLOCATED));
581 new_pages_.push_back(page);
582}
583
585 DCHECK_NE(NEW_SPACE, identity());
586
587 Sweeper* sweeper = heap()->sweeper();
588 size_t added = 0;
589 PageMetadata* p = nullptr;
590 while ((added <= kCompactionMemoryWanted) &&
591 (p = sweeper->GetSweptPageSafe(this))) {
592 // We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
593 // entries here to make them unavailable for allocations.
595 free_list()->EvictFreeListItems(p);
596 }
597
598 // Only during compaction pages can actually change ownership. This is
599 // safe because there exists no other competing action on the page links
600 // during compaction.
601 DCHECK_NE(this, p->owner());
602 PagedSpace* owner = static_cast<PagedSpace*>(p->owner());
603 base::MutexGuard guard(owner->mutex());
605 owner->RemovePage(p);
606 added += AddPage(p);
607 added += p->wasted_memory();
608 }
609}
610
612 Heap* heap, CompactionSpaceKind compaction_space_kind)
614 compaction_space_kind,
615 CompactionSpace::DestinationHeap::kSameHeap),
617 compaction_space_kind,
618 CompactionSpace::DestinationHeap::kSameHeap),
620 compaction_space_kind,
621 CompactionSpace::DestinationHeap::kSameHeap) {
622 if (heap->isolate()->has_shared_space()) {
623 const CompactionSpace::DestinationHeap dest_heap =
624 heap->isolate()->is_shared_space_isolate()
625 ? CompactionSpace::DestinationHeap::kSameHeap
626 : CompactionSpace::DestinationHeap::kSharedSpaceHeap;
627 shared_space_.emplace(heap->isolate()->shared_space_isolate()->heap(),
628 SHARED_SPACE, Executability::NOT_EXECUTABLE,
629 compaction_space_kind, dest_heap);
630 }
631}
632
633// -----------------------------------------------------------------------------
634// OldSpace implementation
635
637 DCHECK_EQ(page->area_size(), page->allocated_bytes());
638 if (v8_flags.minor_ms) {
639 // Reset the page's allocated bytes. The page will be swept and the
640 // allocated bytes will be updated to match the live bytes.
641 page->DecreaseAllocatedBytes(page->area_size());
642 }
643 AddPageImpl(page);
644 if (free_mode == FreeMode::kLinkCategory) {
646 }
647}
648
652
654 size_t filler_size_on_page) {
655 base::MutexGuard guard(mutex());
656 DCHECK_EQ(this, page->owner());
657 DCHECK(page->SweepingDone());
658 DCHECK_EQ(page->live_bytes(), 0);
659 DCHECK_EQ(accounting_stats_.AllocatedOnPage(page),
661 DecreaseAllocatedBytes(filler_size_on_page, page);
663}
664
665// -----------------------------------------------------------------------------
666// StickySpace implementation
667
672
673// -----------------------------------------------------------------------------
674// SharedSpace implementation
675
677 // Old-to-new slots in old objects may be overwritten with references to
678 // shared objects. Postpone releasing empty pages so that updating old-to-new
679 // slots in dead old objects may access the dead shared objects.
681}
682
683} // namespace internal
684} // namespace v8
static bool HasLazyCommits()
void DecreaseCapacity(size_t bytes)
CompactionSpaceCollection(Heap *heap, CompactionSpaceKind compaction_space_kind)
std::vector< PageMetadata * > new_pages_
void NotifyNewPage(PageMetadata *page) final
DestinationHeap destination_heap() const
void Relink(FreeList *owner)
Definition free-list.cc:118
uint32_t available() const
Definition free-list.h:80
static V8_INLINE intptr_t GetCommitPageSizeBits()
static V8_INLINE intptr_t GetCommitPageSize()
static constexpr size_t AllocatableMemoryInMemoryChunk(AllocationSpace space)
V8_INLINE bool IsFlagSet(Flag flag) const
V8_INLINE Address address() const
static V8_INLINE MemoryChunk * FromAddress(Address addr)
size_t Offset(Address addr) const
void AddPromotedPage(PageMetadata *page, FreeMode free_mode)
void ReleasePage(PageMetadata *page) override
void RelinkQuarantinedPageFreeList(PageMetadata *page, size_t filler_size_on_page)
V8_EXPORT_PRIVATE size_t AvailableInFreeList()
size_t AvailableInFreeListFromAllocatedBytes()
static PageMetadata * cast(MemoryChunkMetadata *metadata)
virtual void NotifyNewPage(PageMetadata *page)
V8_INLINE size_t Free(Address start, size_t size_in_bytes)
void DecreaseAllocatedBytes(size_t bytes, PageMetadata *page)
size_t RelinkFreeListCategories(PageMetadata *page)
void AddPageImpl(PageMetadata *page)
virtual void ReleasePage(PageMetadata *page)
virtual void RemovePage(PageMetadata *page)
void DecreaseCapacity(size_t bytes)
virtual size_t AddPage(PageMetadata *page)
bool TryExpand(LocalHeap *local_heap, AllocationOrigin origin)
void ReleasePageImpl(PageMetadata *page, MemoryAllocator::FreeMode free_mode)
void IncreaseAllocatedBytes(size_t bytes, PageMetadata *page)
std::unique_ptr< ObjectIterator > GetObjectIterator(Heap *heap) override
void MergeCompactionSpace(CompactionSpace *other)
void DecrementCommittedPhysicalMemory(size_t decrement_value)
static const size_t kCompactionMemoryWanted
size_t CommittedPhysicalMemory() const override
std::atomic< size_t > committed_physical_memory_
AllocationStats accounting_stats_
friend class PagedSpaceAllocatorPolicy
size_t committed_physical_memory() const
size_t Available() const override
bool ContainsSlow(Address addr) const
PagedSpaceBase(Heap *heap, AllocationSpace id, Executability executable, std::unique_ptr< FreeList > free_list, CompactionSpaceKind compaction_space_kind)
void AddRangeToActiveSystemPages(PageMetadata *page, Address start, Address end)
PageMetadata * RemovePageSafe(int size_in_bytes)
void IncrementCommittedPhysicalMemory(size_t increment_value)
void UnlinkFreeListCategories(PageMetadata *page)
Executability executable() const
void ReduceActiveSystemPages(PageMetadata *page, ActiveSystemPages active_system_pages)
void IncreaseCapacity(size_t bytes)
virtual void AdjustDifferenceInAllocatedBytes(size_t diff)
PageMetadata * InitializePage(MutablePageMetadata *chunk) override
void RefineAllocatedBytesAfterSweeping(PageMetadata *page)
ConstPageRange::iterator current_page_
PagedSpaceObjectIterator(Heap *heap, const PagedSpaceBase *space)
const PagedSpaceBase *const space_
AllocatorPolicy * CreateAllocatorPolicy(MainAllocator *allocator) final
void ReleasePage(PageMetadata *page) override
void AdjustDifferenceInAllocatedBytes(size_t) override
PageMetadata * GetSweptPageSafe(PagedSpaceBase *space)
Definition sweeper.cc:785
#define ALIGN_TO_ALLOCATION_ALIGNMENT(value)
Definition globals.h:1796
NormalPageSpace * space_
Definition compactor.cc:324
NormalPage * current_page_
Definition compactor.cc:327
FreeList & free_list_
Definition sweeper.cc:156
BasePage * page
Definition sweeper.cc:218
int start
int end
#define V8_ENABLE_STICKY_MARK_BITS_BOOL
Isolate * isolate
STL namespace.
V8_INLINE constexpr bool IsExternalString(InstanceType instance_type)
V8_INLINE constexpr bool IsFreeSpaceOrFiller(InstanceType instance_type)
Tagged(T object) -> Tagged< T >
kInterpreterTrampolineOffset Tagged< HeapObject >
void ForAll(Callback callback)
Definition spaces.h:58
@ SHARED_TRUSTED_SPACE
Definition globals.h:1314
V8_EXPORT_PRIVATE FlagValues v8_flags
ExternalBackingStoreType
Definition globals.h:1605
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
Heap * heap_