v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
mutable-page-metadata.h
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_MUTABLE_PAGE_METADATA_H_
6#define V8_HEAP_MUTABLE_PAGE_METADATA_H_
7
8#include <atomic>
9
10#include "src/base/macros.h"
12#include "src/common/globals.h"
14#include "src/heap/list.h"
16#include "src/heap/marking.h"
18#include "src/heap/slot-set.h"
19#include "src/sandbox/check.h"
20
21namespace v8 {
22namespace internal {
23
24class FreeListCategory;
25class SlotSet;
26class Space;
27
29
41
42// MutablePageMetadata represents a memory region owned by a specific space.
43// It is divided into the header and the body. Chunk start is always
44// 1MB aligned. Start of the body is aligned so it can accommodate
45// any heap object.
47 public:
48 // |kDone|: The page state when sweeping is complete or sweeping must not be
49 // performed on that page. Sweeper threads that are done with their work
50 // will set this value and not touch the page anymore.
51 // |kPendingSweeping|: This page is ready for parallel sweeping.
52 // |kPendingIteration|: This page is ready for parallel promoted page
53 // iteration. |kInProgress|: This page is currently swept by a sweeper thread.
54 enum class ConcurrentSweepingState : intptr_t {
55 kDone,
59 };
60
61 // Page size in bytes. This must be a multiple of the OS page size.
62 static const int kPageSize = kRegularPageSize;
63
68
69 static inline void MoveExternalBackingStoreBytes(
71 MutablePageMetadata* to, size_t amount);
72
73 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
75
76 // Only works if the object is in the first kPageSize of the MemoryChunk.
78
81 return static_cast<MutablePageMetadata*>(metadata);
82 }
83
84 static const MutablePageMetadata* cast(const MemoryChunkMetadata* metadata) {
86 return static_cast<const MutablePageMetadata*>(metadata);
87 }
88
89 MutablePageMetadata(Heap* heap, BaseSpace* space, size_t size,
91 VirtualMemory reservation, PageSize page_size);
92
94
95 size_t BucketsInSlotSet() const { return SlotSet::BucketsForSize(size()); }
96
99 return Chunk()->SetYoungGenerationPageFlags(marking_mode);
100 }
101
102 base::Mutex& mutex() { return mutex_; }
103 const base::Mutex& mutex() const { return mutex_; }
105 const base::Mutex& object_mutex() const { return object_mutex_; }
106
110
114
118
119 template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
121 if constexpr (access_mode == AccessMode::ATOMIC)
123 return slot_set_[type];
124 }
125
126 template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
127 const SlotSet* slot_set() const {
128 return const_cast<MutablePageMetadata*>(this)
130 }
131
132 template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
134 if constexpr (access_mode == AccessMode::ATOMIC)
136 return typed_slot_set_[type];
137 }
138
139 template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
141 return const_cast<MutablePageMetadata*>(this)
143 }
144
145 template <RememberedSetType type>
146 bool ContainsSlots() const {
147 return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr;
148 }
149 bool ContainsAnySlots() const;
150
152 // Not safe to be called concurrently.
155 // Not safe to be called concurrently.
157
158 template <RememberedSetType type>
161 // Conditionally reset to nullptr (instead of e.g. using std::exchange) to
162 // avoid data races when transitioning from nullptr to nullptr.
163 if (slot_set) {
164 slot_set_[type] = nullptr;
165 }
166 return slot_set;
167 }
168
169 template <RememberedSetType type>
172 // Conditionally reset to nullptr (instead of e.g. using std::exchange) to
173 // avoid data races when transitioning from nullptr to nullptr.
174 if (typed_slot_set) {
175 typed_slot_set_[type] = nullptr;
176 }
177 return typed_slot_set;
178 }
179
181
182 // Approximate amount of physical memory committed for this chunk.
184
191
193 size_t amount);
194
196 size_t amount);
197
199 return external_backing_store_bytes_[static_cast<int>(type)];
200 }
201
202 Space* owner() const {
203 return reinterpret_cast<Space*>(MemoryChunkMetadata::owner());
204 }
205
206 // Gets the chunk's allocation space, potentially dealing with a null owner_
207 // (like read-only chunks have).
208 inline AllocationSpace owner_identity() const;
209
212 return list_node_;
213 }
214
218
219 // Release memory allocated by the chunk, except that which is needed by
220 // read-only space chunks.
222
223 void IncreaseAllocatedLabSize(size_t bytes) { allocated_lab_size_ += bytes; }
224 void DecreaseAllocatedLabSize(size_t bytes) {
226 allocated_lab_size_ -= bytes;
227 }
228 size_t AllocatedLabSize() const { return allocated_lab_size_; }
229
232 size_t AgeInNewSpace() const { return age_in_new_space_; }
233
238
245
247 DCHECK(!Chunk()->InReadOnlySpace());
248 return &marking_bitmap_;
249 }
250
252 DCHECK(!Chunk()->InReadOnlySpace());
253 return &marking_bitmap_;
254 }
255
256 size_t live_bytes() const {
257 return live_byte_count_.load(std::memory_order_relaxed);
258 }
259
260 void SetLiveBytes(size_t value) {
263 live_byte_count_.store(value, std::memory_order_relaxed);
264 }
265
266 void IncrementLiveBytesAtomically(intptr_t diff) {
269 live_byte_count_.fetch_add(diff, std::memory_order_relaxed);
270 }
271
272 template <AccessMode mode = AccessMode::NON_ATOMIC>
273 void ClearLiveness();
274
275 bool IsLivenessClear() const;
276
277 bool IsLargePage() {
278 // The active_system_pages_ will be nullptr for large pages, so we uses
279 // that here instead of (for example) adding another enum member. See also
280 // the constructor where this field is set.
281 return active_system_pages_.get() == nullptr;
282 }
283
284 protected:
285 // Release all memory allocated by the chunk. Should be called when memory
286 // chunk is about to be freed.
288
289 template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
291 if (access_mode == AccessMode::ATOMIC) {
293 return;
294 }
296 }
297
298 template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
307
308 // A single slot set for small pages (of size kPageSize) or an array of slot
309 // set for large pages. In the latter case the number of entries in the array
310 // is ceil(size() / kPageSize).
312 // A single slot set for small pages (of size kPageSize) or an array of slot
313 // set for large pages. In the latter case the number of entries in the array
314 // is ceil(size() / kPageSize).
316
317 // Used by the marker to keep track of the scanning progress in large objects
318 // that have a progress tracker and are scanned in increments and
319 // concurrently.
321
322 // Count of bytes marked black on page. With sticky mark-bits, the counter
323 // represents the size of the old objects allocated on the page. This is
324 // handy, since this counter is then used when starting sweeping to set the
325 // approximate allocated size on the space (before it gets refined due to
326 // right/left-trimming or slack tracking).
327 std::atomic<intptr_t> live_byte_count_{0};
328
329 std::atomic<ConcurrentSweepingState> concurrent_sweeping_{
331
332 // Tracks off-heap memory used by this memory chunk.
333 std::atomic<size_t> external_backing_store_bytes_[static_cast<int>(
335
337
339
341
342 // This also serves as indicator for whether a page is large. See constructor.
343 std::unique_ptr<ActiveSystemPages> active_system_pages_;
344
345 // Counts overall allocated LAB size on the page since the last GC. Used
346 // only for new space pages.
348
349 // Counts the number of young gen GCs that a page survived in new space. This
350 // counter is reset to 0 whenever the page is empty.
352
354
355 // Possibly platform-dependent fields should go last. We depend on the marking
356 // bitmap offset from generated code and assume that it's stable across 64-bit
357 // platforms. In theory, there could be a difference between Linux and Android
358 // in terms of Mutex size.
359
362
363 private:
364 static constexpr intptr_t MarkingBitmapOffset() {
365 return offsetof(MutablePageMetadata, marking_bitmap_);
366 }
367
368 static constexpr intptr_t SlotSetOffset(
369 RememberedSetType remembered_set_type) {
370 return offsetof(MutablePageMetadata, slot_set_) +
371 sizeof(void*) * remembered_set_type;
372 }
373
374 // For ReleaseAllAllocatedMemory().
375 friend class MemoryAllocator;
376 friend class PagePool;
377 // For set_typed_slot_set().
378 template <RememberedSetType>
379 friend class RememberedSet;
380 // For MarkingBitmapOffset().
381 friend class CodeStubAssembler;
382 friend class MacroAssembler;
383 friend class MarkingBitmap;
384 friend class TestWithBitmap;
385 // For SlotSetOffset().
387};
388
389} // namespace internal
390
391namespace base {
392// Define special hash function for chunk pointers, to be used with std data
393// structures, e.g. std::unordered_set<MutablePageMetadata*,
394// base::hash<MutablePageMetadata*>
395template <>
396struct hash<i::MutablePageMetadata*> : hash<i::MemoryChunkMetadata*> {};
397template <>
398struct hash<const i::MutablePageMetadata*>
399 : hash<const i::MemoryChunkMetadata*> {};
400} // namespace base
401
402} // namespace v8
403
404#endif // V8_HEAP_MUTABLE_PAGE_METADATA_H_
constexpr int kRegularPageSize
#define SBXCHECK(condition)
Definition check.h:61
static constexpr size_t BucketsForSize(size_t size)
static void Release_Store(T *addr, typename std::remove_reference< T >::type new_value)
static T Acquire_Load(T *addr)
void SetYoungGenerationPageFlags(MarkingMode marking_mode)
void set_typed_slot_set(TypedSlotSet *typed_slot_set)
void ReleaseTypedSlotSet(RememberedSetType type)
const TypedSlotSet * typed_slot_set() const
void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type, size_t amount)
static PageAllocator::Permission GetCodeModificationPermission()
static V8_INLINE MutablePageMetadata * FromAddress(Address a)
MemoryChunk::MainThreadFlags InitialFlags(Executability executable) const
heap::ListNode< MutablePageMetadata > list_node_
size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const
MarkingProgressTracker & marking_progress_tracker()
TypedSlotSet * AllocateTypedSlotSet(RememberedSetType type)
V8_EXPORT_PRIVATE SlotSet * AllocateSlotSet(RememberedSetType type)
PossiblyEmptyBuckets * possibly_empty_buckets()
static MutablePageMetadata * cast(MemoryChunkMetadata *metadata)
static constexpr intptr_t MarkingBitmapOffset()
std::atomic< size_t > external_backing_store_bytes_[static_cast< int >(ExternalBackingStoreType::kNumValues)]
void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type, size_t amount)
ConcurrentSweepingState concurrent_sweeping_state()
const MarkingProgressTracker & marking_progress_tracker() const
void set_concurrent_sweeping_state(ConcurrentSweepingState state)
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() const
static void MoveExternalBackingStoreBytes(ExternalBackingStoreType type, MutablePageMetadata *from, MutablePageMetadata *to, size_t amount)
MarkingProgressTracker marking_progress_tracker_
std::atomic< ConcurrentSweepingState > concurrent_sweeping_
void SetYoungGenerationPageFlags(MarkingMode marking_mode)
void ReleaseSlotSet(RememberedSetType type)
MutablePageMetadata(Heap *heap, BaseSpace *space, size_t size, Address area_start, Address area_end, VirtualMemory reservation, PageSize page_size)
static const MutablePageMetadata * cast(const MemoryChunkMetadata *metadata)
static constexpr intptr_t SlotSetOffset(RememberedSetType remembered_set_type)
const MarkingBitmap * marking_bitmap() const
const heap::ListNode< MutablePageMetadata > & list_node() const
std::unique_ptr< ActiveSystemPages > active_system_pages_
heap::ListNode< MutablePageMetadata > & list_node()
SlotSet * slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES]
V8_INLINE void SetOldGenerationPageFlags(MarkingMode marking_mode)
TypedSlotSet * typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES]
const base::Mutex & object_mutex() const
static V8_INLINE MutablePageMetadata * FromHeapObject(Tagged< HeapObject > o)
#define V8_COMPRESS_POINTERS_8GB_BOOL
Definition globals.h:608
LiftoffAssembler::CacheState state
constexpr intptr_t kObjectAlignment8GbHeap
Definition globals.h:934
V8_EXPORT_PRIVATE FlagValues v8_flags
ExternalBackingStoreType
Definition globals.h:1605
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define V8_EXPORT_PRIVATE
Definition macros.h:460
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define V8_INLINE
Definition v8config.h:500
wasm::ValueType type