v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
paged-spaces.h
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_PAGED_SPACES_H_
6#define V8_HEAP_PAGED_SPACES_H_
7
8#include <atomic>
9#include <limits>
10#include <memory>
11#include <optional>
12#include <utility>
13#include <variant>
14
15#include "src/base/bounds.h"
16#include "src/base/macros.h"
18#include "src/common/globals.h"
19#include "src/flags/flags.h"
23#include "src/heap/heap.h"
26#include "src/heap/spaces.h"
27
28namespace v8 {
29namespace internal {
30
31class CompactionSpace;
32class Heap;
33class HeapObject;
34class Isolate;
35class ObjectVisitor;
36class PagedSpaceBase;
37class Sweeper;
38
39class HeapObjectRange final {
40 public:
41 class iterator final {
42 public:
44 using pointer = const value_type*;
45 using reference = const value_type&;
46 using iterator_category = std::forward_iterator_tag;
47
48 inline iterator();
49 explicit inline iterator(const PageMetadata* page);
50
51 inline iterator& operator++();
52 inline iterator operator++(int);
53
54 bool operator==(iterator other) const {
55 return cur_addr_ == other.cur_addr_;
56 }
57 bool operator!=(iterator other) const { return !(*this == other); }
58
60
61 private:
62 inline void AdvanceToNextObject();
63
65
67 Address cur_addr_ = kNullAddress; // Current iteration point.
68 int cur_size_ = 0;
69 Address cur_end_ = kNullAddress; // End iteration point.
70 };
71
72 explicit HeapObjectRange(const PageMetadata* page) : page_(page) {}
73
74 inline iterator begin();
75 inline iterator end();
76
77 private:
78 const PageMetadata* const page_;
79};
80
81// Heap object iterator in paged spaces.
82//
83// A PagedSpaceObjectIterator iterates objects from the bottom of the given
84// space to its top or from the bottom of the given page to its top.
85//
86// If objects are allocated in the page during iteration the iterator may
87// or may not iterate over those objects. The caller must create a new
88// iterator in order to be sure to visit these new objects.
90 public:
91 // Creates a new object iterator in a given space.
93
94 // Advance to the next object, skipping free spaces and other fillers and
95 // skipping the special garbage section of which there is one per space.
96 // Returns nullptr when the iteration has ended.
97 inline Tagged<HeapObject> Next() override;
98
99 private:
100 // Slow path of next(), goes into the next page. Returns false if the
101 // iteration has ended.
102 bool AdvanceToNextPage();
103
106 const PagedSpaceBase* const space_;
109};
110
112 : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
113 public:
116
117 static const size_t kCompactionMemoryWanted = 500 * KB;
118
120 std::unique_ptr<FreeList> free_list,
121 CompactionSpaceKind compaction_space_kind);
122
123 ~PagedSpaceBase() override { TearDown(); }
124
125 // Checks whether an object/address is in this space.
126 inline bool Contains(Address a) const;
127 inline bool Contains(Tagged<Object> o) const;
128 bool ContainsSlow(Address addr) const;
129
130 // Does the space need executable memory?
131 Executability executable() const { return executable_; }
132
133 // Current capacity without growing (Size() + Available()).
134 size_t Capacity() const { return accounting_stats_.Capacity(); }
135
136 // Approximate amount of physical memory committed for this space.
137 size_t CommittedPhysicalMemory() const override;
138
139#if DEBUG
140 void VerifyCommittedPhysicalMemory() const;
141#endif // DEBUG
142
143 void IncrementCommittedPhysicalMemory(size_t increment_value);
144 void DecrementCommittedPhysicalMemory(size_t decrement_value);
145
146 // Sets the capacity, the available space and the wasted space to zero.
147 // The stats are rebuilt during sweeping by adding each page to the
148 // capacity and the size when it is encountered. As free spaces are
149 // discovered during the sweeping they are subtracted from the size and added
150 // to the available and wasted totals. The free list is cleared as well.
152 accounting_stats_.ClearSize();
153 if (v8_flags.black_allocated_pages) {
154 free_list_->ResetForNonBlackAllocatedPages();
155 } else {
156 free_list_->Reset();
157 }
158 }
159
160 // Available bytes without growing. These are the bytes on the free list.
161 // The bytes in the linear allocation area are not included in this total
162 // because updating the stats would slow down allocation. New pages are
163 // immediately added to the free list so they show up here.
164 size_t Available() const override;
165
166 // Allocated bytes in this space. Garbage bytes that were not found due to
167 // concurrent sweeping are counted as being allocated! The bytes in the
168 // current linear allocation area (between top and limit) are also counted
169 // here.
170 size_t Size() const override { return accounting_stats_.Size(); }
171
172 // Wasted bytes in this space. These are just the bytes that were thrown away
173 // due to being too small to use for allocation.
174 size_t Waste() const;
175
176 // Allocate the requested number of bytes in the space from a background
177 // thread.
178 V8_WARN_UNUSED_RESULT std::optional<std::pair<Address, size_t>>
179 RawAllocateBackground(LocalHeap* local_heap, size_t min_size_in_bytes,
180 size_t max_size_in_bytes, AllocationOrigin origin);
181
182 // Free a block of memory. During sweeping, we don't update the accounting
183 // stats and don't link the free list category.
184 V8_INLINE size_t Free(Address start, size_t size_in_bytes);
185 V8_INLINE size_t FreeDuringSweep(Address start, size_t size_in_bytes);
186
187 void ResetFreeList();
188
189 void DecreaseAllocatedBytes(size_t bytes, PageMetadata* page) {
190 accounting_stats_.DecreaseAllocatedBytes(bytes, page);
191 }
192 void IncreaseAllocatedBytes(size_t bytes, PageMetadata* page) {
193 accounting_stats_.IncreaseAllocatedBytes(bytes, page);
194 }
195 void DecreaseCapacity(size_t bytes) {
196 accounting_stats_.DecreaseCapacity(bytes);
197 }
198 void IncreaseCapacity(size_t bytes) {
199 accounting_stats_.IncreaseCapacity(bytes);
200 }
201
202 PageMetadata* InitializePage(MutablePageMetadata* chunk) override;
203
204 virtual void ReleasePage(PageMetadata* page);
205
206 // Adds the page to this space and returns the number of bytes added to the
207 // free list of the space.
208 virtual size_t AddPage(PageMetadata* page);
209 virtual void RemovePage(PageMetadata* page);
210 // Remove a page if it has at least |size_in_bytes| bytes available that can
211 // be used for allocation.
212 PageMetadata* RemovePageSafe(int size_in_bytes);
213
214#ifdef VERIFY_HEAP
215 // Verify integrity of this space.
216 void Verify(Isolate* isolate,
217 SpaceVerificationVisitor* visitor) const override;
218
219 void VerifyLiveBytes() const;
220#endif
221
222#ifdef DEBUG
223 void VerifyCountersAfterSweeping(Heap* heap) const;
224 void VerifyCountersBeforeConcurrentSweeping() const;
225 // Print meta info and objects in this space.
226 void Print() override;
227
228 // Report code object related statistics
229 static void ReportCodeStatistics(Isolate* isolate);
230 static void ResetCodeStatistics(Isolate* isolate);
231#endif
232
233 bool CanExpand(size_t size) const;
234
235 // Returns the number of total pages in this space.
236 int CountTotalPages() const;
237
238 // Return size of allocatable area on a page in this space.
239 inline int AreaSize() const { return static_cast<int>(area_size_); }
240
241 bool is_compaction_space() const {
242 return compaction_space_kind_ != CompactionSpaceKind::kNone;
243 }
244
246 return compaction_space_kind_;
247 }
248
249 // Merges {other} into the current space. Note that this modifies {other},
250 // e.g., removes its bump pointer area and resets statistics.
251 void MergeCompactionSpace(CompactionSpace* other);
252
253 // Refills the free list from the corresponding free list filled by the
254 // sweeper.
255 virtual void RefillFreeList();
256
257 base::Mutex* mutex() { return &space_mutex_; }
258
259 void UnlinkFreeListCategories(PageMetadata* page);
260 size_t RelinkFreeListCategories(PageMetadata* page);
261
263 return reinterpret_cast<PageMetadata*>(memory_chunk_list_.front());
264 }
265 const PageMetadata* first_page() const override {
266 return reinterpret_cast<const PageMetadata*>(memory_chunk_list_.front());
267 }
268
270 return reinterpret_cast<PageMetadata*>(memory_chunk_list_.back());
271 }
272 const PageMetadata* last_page() const override {
273 return reinterpret_cast<const PageMetadata*>(memory_chunk_list_.back());
274 }
275
276 iterator begin() { return iterator(first_page()); }
277 iterator end() { return iterator(nullptr); }
278
279 const_iterator begin() const { return const_iterator(first_page()); }
280 const_iterator end() const { return const_iterator(nullptr); }
281
282 std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
283
284 void AddRangeToActiveSystemPages(PageMetadata* page, Address start,
285 Address end);
286 void ReduceActiveSystemPages(PageMetadata* page,
287 ActiveSystemPages active_system_pages);
288
289 // Expands the space by a single page and returns true on success.
290 bool TryExpand(LocalHeap* local_heap, AllocationOrigin origin);
291
292 void RefineAllocatedBytesAfterSweeping(PageMetadata* page);
293 virtual void AdjustDifferenceInAllocatedBytes(size_t diff) {}
294
295 protected:
296 // PagedSpaces that should be included in snapshots have different, i.e.,
297 // smaller, initial pages.
298 virtual bool snapshotable() const { return true; }
299
300 bool HasPages() const { return first_page() != nullptr; }
301
302 // Cleans up the space, frees all pages in this space except those belonging
303 // to the initial chunk, uncommits addresses in the initial chunk.
304 void TearDown();
305
306 // Spaces can use this method to get notified about pages added to it.
307 virtual void NotifyNewPage(PageMetadata* page) {}
308
310 return committed_physical_memory_.load(std::memory_order_relaxed);
311 }
312
313 void ReleasePageImpl(PageMetadata* page, MemoryAllocator::FreeMode free_mode);
314
315 void AddPageImpl(PageMetadata* page);
316
318
320
322
323 // Accounting information for this space.
325
326 // Mutex guarding any concurrent access to the space.
328
329 std::atomic<size_t> committed_physical_memory_{0};
330
331 // Used for tracking bytes allocated since last gc in new space.
332 size_t size_at_last_gc_ = 0;
333
334 private:
335 template <bool during_sweep>
336 V8_INLINE size_t FreeInternal(Address start, size_t size_in_bytes);
337
339 public:
341 if (space->SupportsConcurrentAllocation()) {
342 guard_.emplace(&space->space_mutex_);
343 }
344 }
345
346 std::optional<base::MutexGuard> guard_;
347 };
348
350 return !is_compaction_space() && (identity() != NEW_SPACE);
351 }
352
353 friend class IncrementalMarking;
356
357 // Used in cctest.
358 friend class heap::HeapTester;
359};
360
362 public:
364 std::unique_ptr<FreeList> free_list,
365 CompactionSpaceKind compaction_space_kind)
366 : PagedSpaceBase(heap, id, executable, std::move(free_list),
367 compaction_space_kind) {}
368
369 AllocatorPolicy* CreateAllocatorPolicy(MainAllocator* allocator) final;
370};
371
372// -----------------------------------------------------------------------------
373// Compaction space that is used temporarily during compaction.
374
376 public:
377 // Specifies to which heap the compaction space should be merged.
378 enum class DestinationHeap {
379 // Should be merged to the same heap.
380 kSameHeap,
381 // Should be merged to the main isolate shared space.
382 kSharedSpaceHeap
383 };
384
386 CompactionSpaceKind compaction_space_kind,
387 DestinationHeap destination_heap)
388 : PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
389 compaction_space_kind),
390 destination_heap_(destination_heap) {
391 DCHECK(is_compaction_space());
392 }
393
394 const std::vector<PageMetadata*>& GetNewPages() { return new_pages_; }
395
396 void RefillFreeList() final;
397
398 DestinationHeap destination_heap() const { return destination_heap_; }
399
400 protected:
401 void NotifyNewPage(PageMetadata* page) final;
402
403 // The space is temporary and not included in any snapshots.
404 bool snapshotable() const final { return false; }
405 // Pages that were allocated in this local space and need to be merged
406 // to the main space.
407 std::vector<PageMetadata*> new_pages_;
409};
410
411// A collection of |CompactionSpace|s used by a single compaction task.
413 public:
415 CompactionSpaceKind compaction_space_kind);
416
418 switch (space) {
419 case OLD_SPACE:
420 return &old_space_;
421 case CODE_SPACE:
422 return &code_space_;
423 case SHARED_SPACE:
425 return &*shared_space_;
426 case TRUSTED_SPACE:
427 return &trusted_space_;
428 default:
429 UNREACHABLE();
430 }
431 UNREACHABLE();
432 }
433
434 private:
437 std::optional<CompactionSpace> shared_space_;
439};
440
441// -----------------------------------------------------------------------------
442// Old generation regular object space.
443
445 public:
446 // Creates an old space object. The constructor does not allocate pages
447 // from OS.
451
452 void AddPromotedPage(PageMetadata* page, FreeMode free_mode);
453
454 void ReleasePage(PageMetadata* page) override;
455
457 if (type == ExternalBackingStoreType::kArrayBuffer)
458 return heap()->OldArrayBufferBytes();
459 return external_backing_store_bytes_[static_cast<int>(type)];
460 }
461
462 void RelinkQuarantinedPageFreeList(PageMetadata* page,
463 size_t filler_size_on_page);
464};
465
466// -----------------------------------------------------------------------------
467// StickySpace is a paged space that contain mixed young and old objects. Note
468// that its identity type is OLD_SPACE.
469
471 public:
472 using OldSpace::OldSpace;
473
474 static StickySpace* From(OldSpace* space) {
475 DCHECK(v8_flags.sticky_mark_bits);
476 return static_cast<StickySpace*>(space);
477 }
478
479 size_t young_objects_size() const {
480 DCHECK_GE(Size(), allocated_old_size_);
481 return Size() - allocated_old_size_;
482 }
483
484 size_t old_objects_size() const {
485 DCHECK_GE(Size(), allocated_old_size_);
486 return allocated_old_size_;
487 }
488
489 void set_old_objects_size(size_t allocated_old_size) {
490 allocated_old_size_ = allocated_old_size;
491 }
492
493 void NotifyBlackAreaCreated(size_t size) override {
494 DCHECK_LE(size, Capacity());
495 allocated_old_size_ += size;
496 }
497
498 void NotifyBlackAreaDestroyed(size_t size) override {
499 DCHECK_LE(size, Capacity());
500 allocated_old_size_ -= size;
501 }
502
503 private:
504 void AdjustDifferenceInAllocatedBytes(size_t) override;
505
506 // TODO(333906585): Consider tracking the young bytes instead.
507 size_t allocated_old_size_ = 0;
508};
509
510// -----------------------------------------------------------------------------
511// Old generation code object space.
512
513class CodeSpace final : public PagedSpace {
514 public:
515 // Creates a code space object. The constructor does not allocate pages from
516 // OS.
517 explicit CodeSpace(Heap* heap)
518 : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList(),
520};
521
522// -----------------------------------------------------------------------------
523// Shared space regular object space.
524
525class SharedSpace final : public PagedSpace {
526 public:
527 // Creates a shared space object. The constructor does not allocate pages from
528 // OS.
532
533 void ReleasePage(PageMetadata* page) override;
534
536 if (type == ExternalBackingStoreType::kArrayBuffer) return 0;
538 return external_backing_store_bytes_[static_cast<int>(type)];
539 }
540};
541
542// -----------------------------------------------------------------------------
543// Trusted space.
544// Essentially another old space that, when the sandbox is enabled, will be
545// located outside of the sandbox. As such an attacker cannot corrupt objects
546// located in this space and therefore these objects can be considered trusted.
547
548class TrustedSpace final : public PagedSpace {
549 public:
550 // Creates a trusted space object. The constructor does not allocate pages
551 // from OS.
555
557 if (type == ExternalBackingStoreType::kArrayBuffer) return 0;
559 return external_backing_store_bytes_[static_cast<int>(type)];
560 }
561};
562
563class SharedTrustedSpace final : public PagedSpace {
564 public:
565 // Creates a trusted space object. The constructor does not allocate pages
566 // from OS.
570
572 if (type == ExternalBackingStoreType::kArrayBuffer) return 0;
574 return external_backing_store_bytes_[static_cast<int>(type)];
575 }
576};
577
578// Iterates over the chunks (pages and large object pages) that can contain
579// pointers to new space or to evacuation candidates.
581 public:
583
584 // Return nullptr when the iterator is done.
585 inline MutablePageMetadata* next();
586
587 // Applies `callback` to all `MutablePageMetadata` returned by the iterator.
588 template <typename Callback>
589 static void ForAll(Heap* heap, Callback callback) {
591 while (MutablePageMetadata* chunk = it.next()) {
592 callback(chunk);
593 }
594 }
595
596 private:
606 Heap* const heap_;
608 // The current type of {iterator_} depends on {state_}.
609 std::variant<PageIterator, LargePageIterator> iterator_;
610};
611
612} // namespace internal
613} // namespace v8
614
615#endif // V8_HEAP_PAGED_SPACES_H_
CompactionSpaceCollection(Heap *heap, CompactionSpaceKind compaction_space_kind)
CompactionSpace * Get(AllocationSpace space)
std::optional< CompactionSpace > shared_space_
CompactionSpace(Heap *heap, AllocationSpace id, Executability executable, CompactionSpaceKind compaction_space_kind, DestinationHeap destination_heap)
std::vector< PageMetadata * > new_pages_
bool snapshotable() const final
const DestinationHeap destination_heap_
const std::vector< PageMetadata * > & GetNewPages()
bool operator==(iterator other) const
bool operator!=(iterator other) const
std::forward_iterator_tag iterator_category
PtrComprCageBase cage_base() const
const PageMetadata *const page_
HeapObjectRange(const PageMetadata *page)
static Tagged< HeapObject > FromAddress(Address address)
static void ForAll(Heap *heap, Callback callback)
std::variant< PageIterator, LargePageIterator > iterator_
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final
virtual void NotifyNewPage(PageMetadata *page)
void DecreaseAllocatedBytes(size_t bytes, PageMetadata *page)
V8_INLINE size_t FreeInternal(Address start, size_t size_in_bytes)
virtual bool snapshotable() const
const_iterator begin() const
void DecreaseCapacity(size_t bytes)
PageMetadata * last_page() override
void IncreaseAllocatedBytes(size_t bytes, PageMetadata *page)
bool CanExpand(size_t size) const
PageMetadata * first_page() override
AllocationStats accounting_stats_
size_t committed_physical_memory() const
CompactionSpaceKind compaction_space_kind() const
size_t Size() const override
Executability executable() const
const_iterator end() const
const PageMetadata * last_page() const override
void IncreaseCapacity(size_t bytes)
virtual void AdjustDifferenceInAllocatedBytes(size_t diff)
bool SupportsConcurrentAllocation() const
CompactionSpaceKind compaction_space_kind_
V8_WARN_UNUSED_RESULT std::optional< std::pair< Address, size_t > > RawAllocateBackground(LocalHeap *local_heap, size_t min_size_in_bytes, size_t max_size_in_bytes, AllocationOrigin origin)
const PageMetadata * first_page() const override
ConstPageRange::iterator current_page_
const PagedSpaceBase *const space_
PagedSpace(Heap *heap, AllocationSpace id, Executability executable, std::unique_ptr< FreeList > free_list, CompactionSpaceKind compaction_space_kind)
void ReleasePage(PageMetadata *page) override
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final
size_t old_objects_size() const
void set_old_objects_size(size_t allocated_old_size)
size_t young_objects_size() const
void NotifyBlackAreaCreated(size_t size) override
void NotifyBlackAreaDestroyed(size_t size) override
static StickySpace * From(OldSpace *space)
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final
FreeList & free_list_
Definition sweeper.cc:156
int start
int end
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available space
TNode< Object > callback
STL namespace.
@ SHARED_TRUSTED_SPACE
Definition globals.h:1314
V8_EXPORT_PRIVATE FlagValues v8_flags
ExternalBackingStoreType
Definition globals.h:1605
static constexpr Address kNullAddress
Definition v8-internal.h:53
#define NON_EXPORTED_BASE(code)
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define V8_EXPORT_PRIVATE
Definition macros.h:460
#define V8_INLINE
Definition v8config.h:500
#define V8_WARN_UNUSED_RESULT
Definition v8config.h:671
wasm::ValueType type