v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
slot-set.h
Go to the documentation of this file.
1// Copyright 2016 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_SLOT_SET_H_
6#define V8_HEAP_SLOT_SET_H_
7
8#include <map>
9#include <memory>
10#include <stack>
11#include <vector>
12
13#include "src/base/bit-field.h"
16#include "src/objects/slots.h"
18#include "src/utils/utils.h"
19#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
20
21namespace v8 {
22namespace internal {
23
24using ::heap::base::KEEP_SLOT;
25using ::heap::base::REMOVE_SLOT;
26using ::heap::base::SlotCallbackResult;
27
28// Possibly empty buckets (buckets that do not contain any slots) are discovered
29// by the scavenger. Buckets might become non-empty when promoting objects later
30// or in another thread, so all those buckets need to be revisited.
31// Track possibly empty buckets within a SlotSet in this data structure. The
32// class contains a word-sized bitmap, in case more bits are needed the bitmap
33// is replaced with a pointer to a malloc-allocated bitmap.
35 public:
38 : bitmap_(other.bitmap_) {
39 other.bitmap_ = kNullAddress;
40 }
41
43
46
47 void Release() {
48 if (IsAllocated()) {
50 }
53 }
54
55 void Insert(size_t bucket_index, size_t buckets) {
56 if (IsAllocated()) {
57 InsertAllocated(bucket_index);
58 } else if (bucket_index + 1 < kBitsPerWord) {
59 bitmap_ |= static_cast<uintptr_t>(1) << (bucket_index + 1);
60 } else {
61 Allocate(buckets);
62 InsertAllocated(bucket_index);
63 }
64 }
65
66 bool Contains(size_t bucket_index) {
67 if (IsAllocated()) {
68 size_t word_idx = bucket_index / kBitsPerWord;
69 uintptr_t* word = BitmapArray() + word_idx;
70 return *word &
71 (static_cast<uintptr_t>(1) << (bucket_index % kBitsPerWord));
72 } else if (bucket_index + 1 < kBitsPerWord) {
73 return bitmap_ & (static_cast<uintptr_t>(1) << (bucket_index + 1));
74 } else {
75 return false;
76 }
77 }
78
79 bool IsEmpty() const { return bitmap_ == kNullAddress; }
80
81 private:
82 static constexpr Address kPointerTag = 1;
83 static constexpr int kWordSize = sizeof(uintptr_t);
84 static constexpr int kBitsPerWord = kWordSize * kBitsPerByte;
85
86 bool IsAllocated() { return bitmap_ & kPointerTag; }
87
88 void Allocate(size_t buckets) {
90 size_t words = WordsForBuckets(buckets);
91 uintptr_t* ptr = reinterpret_cast<uintptr_t*>(
93 ptr[0] = bitmap_ >> 1;
94
95 for (size_t word_idx = 1; word_idx < words; word_idx++) {
96 ptr[word_idx] = 0;
97 }
98 bitmap_ = reinterpret_cast<Address>(ptr) + kPointerTag;
100 }
101
102 void InsertAllocated(size_t bucket_index) {
104 size_t word_idx = bucket_index / kBitsPerWord;
105 uintptr_t* word = BitmapArray() + word_idx;
106 *word |= static_cast<uintptr_t>(1) << (bucket_index % kBitsPerWord);
107 }
108
109 static size_t WordsForBuckets(size_t buckets) {
110 return (buckets + kBitsPerWord - 1) / kBitsPerWord;
111 }
112
113 uintptr_t* BitmapArray() {
115 return reinterpret_cast<uintptr_t*>(bitmap_ & ~kPointerTag);
116 }
117
119
120 FRIEND_TEST(PossiblyEmptyBucketsTest, WordsForBuckets);
121};
122
123static_assert(std::is_standard_layout<PossiblyEmptyBuckets>::value);
124static_assert(sizeof(PossiblyEmptyBuckets) == kSystemPointerSize);
125
126class SlotSet final : public ::heap::base::BasicSlotSet<kTaggedSize> {
128
129 public:
130 static const int kBucketsRegularPage =
132
133 static SlotSet* Allocate(size_t buckets) {
134 return static_cast<SlotSet*>(BasicSlotSet::Allocate(buckets));
135 }
136
137 template <v8::internal::AccessMode access_mode>
146
147 // Similar to BasicSlotSet::Iterate() but Callback takes the parameter of type
148 // MaybeObjectSlot.
149 template <
151 typename Callback>
152 size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
153 Callback callback, EmptyBucketMode mode) {
155 chunk_start, start_bucket, end_bucket,
156 [&callback](Address slot) { return callback(MaybeObjectSlot(slot)); },
157 [this, mode](size_t bucket_index) {
158 if (mode == EmptyBucketMode::FREE_EMPTY_BUCKETS) {
159 ReleaseBucket(bucket_index);
160 }
161 });
162 }
163
164 // Similar to SlotSet::Iterate() but marks potentially empty buckets
165 // internally. Stores true in empty_bucket_found in case a potentially empty
166 // bucket was found. Assumes that the possibly empty-array was already cleared
167 // by CheckPossiblyEmptyBuckets.
168 template <typename Callback>
170 Address chunk_start, size_t start_bucket, size_t end_bucket,
171 Callback callback, PossiblyEmptyBuckets* possibly_empty_buckets) {
173 chunk_start, start_bucket, end_bucket,
174 [&callback](Address slot) { return callback(MaybeObjectSlot(slot)); },
175 [possibly_empty_buckets, end_bucket](size_t bucket_index) {
176 possibly_empty_buckets->Insert(bucket_index, end_bucket);
177 });
178 }
179
180 // Check whether possibly empty buckets are really empty. Empty buckets are
181 // freed and the possibly empty state is cleared for all buckets.
183 PossiblyEmptyBuckets* possibly_empty_buckets) {
184 bool empty = true;
185 for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
186 Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
187 if (bucket) {
188 if (possibly_empty_buckets->Contains(bucket_index)) {
189 if (bucket->IsEmpty()) {
191 } else {
192 empty = false;
193 }
194 } else {
195 empty = false;
196 }
197 } else {
198 // Unfortunately we cannot DCHECK here that the corresponding bit in
199 // possibly_empty_buckets is not set. After scavenge, the
200 // MergeOldToNewRememberedSets operation might remove a recorded bucket.
201 }
202 }
203
204 possibly_empty_buckets->Release();
205
206 return empty;
207 }
208
209 void Merge(SlotSet* other, size_t buckets) {
210 for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
211 Bucket* other_bucket =
212 other->LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
213 if (!other_bucket) continue;
214 Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
215 if (bucket == nullptr) {
216 other->StoreBucket<AccessMode::NON_ATOMIC>(bucket_index, nullptr);
217 StoreBucket<AccessMode::NON_ATOMIC>(bucket_index, other_bucket);
218 } else {
219 for (int cell_index = 0; cell_index < kCellsPerBucket; cell_index++) {
220 bucket->SetCellBits<AccessMode::NON_ATOMIC>(
221 cell_index,
222 other_bucket->LoadCell<AccessMode::NON_ATOMIC>(cell_index));
223 }
224 }
225 }
226 }
227};
228
229static_assert(std::is_standard_layout<SlotSet>::value);
230static_assert(std::is_standard_layout<SlotSet::Bucket>::value);
231
232enum class SlotType : uint8_t {
233 // Full pointer sized slot storing an object start address.
234 // RelocInfo::target_object/RelocInfo::set_target_object methods are used for
235 // accessing. Used when pointer is stored in the instruction stream.
237
238 // Tagged sized slot storing an object start address.
239 // RelocInfo::target_object/RelocInfo::set_target_object methods are used for
240 // accessing. Used when pointer is stored in the instruction stream.
242
243 // Full pointer sized slot storing instruction start of Code object.
244 // RelocInfo::target_address/RelocInfo::set_target_address methods are used
245 // for accessing. Used when pointer is stored in the instruction stream.
247
248 // Raw full pointer sized slot. Slot is accessed directly. Used when pointer
249 // is stored in constant pool.
251
252 // Raw tagged sized slot. Slot is accessed directly. Used when pointer is
253 // stored in constant pool.
255
256 // Raw full pointer sized slot storing instruction start of Code object. Slot
257 // is accessed directly. Used when pointer is stored in constant pool.
259
260 // Slot got cleared but has not been removed from the slot set.
261 kCleared,
262
264};
265
266// Data structure for maintaining a list of typed slots in a page.
267// Typed slots can only appear in Code objects, so
268// the maximum possible offset is limited by the
269// LargePageMetadata::kMaxCodePageSize. The implementation is a chain of chunks,
270// where each chunk is an array of encoded (slot type, slot offset) pairs. There
271// is no duplicate detection and we do not expect many duplicates because typed
272// slots contain V8 internal pointers that are not directly exposed to JS.
274 public:
275 static const int kMaxOffset = 1 << 29;
276 TypedSlots() = default;
277 virtual ~TypedSlots();
278 void Insert(SlotType type, uint32_t offset);
279 void Merge(TypedSlots* other);
280
281 protected:
284 struct TypedSlot {
286 };
287 struct Chunk {
289 std::vector<TypedSlot> buffer;
290 };
291 static const size_t kInitialBufferSize = 100;
292 static const size_t kMaxBufferSize = 16 * KB;
293 static size_t NextCapacity(size_t capacity) {
294 return std::min({kMaxBufferSize, capacity * 2});
295 }
296 Chunk* EnsureChunk();
297 Chunk* NewChunk(Chunk* next, size_t capacity);
298 Chunk* head_ = nullptr;
299 Chunk* tail_ = nullptr;
300};
301
302// A multiset of per-page typed slots that allows concurrent iteration
303// clearing of invalid slots.
305 public:
306 using FreeRangesMap = std::map<uint32_t, uint32_t>;
307
308 enum IterationMode { FREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
309
310 explicit TypedSlotSet(Address page_start) : page_start_(page_start) {}
311
312 // Iterate over all slots in the set and for each slot invoke the callback.
313 // If the callback returns REMOVE_SLOT then the slot is removed from the set.
314 // Returns the new number of slots.
315 //
316 // Sample usage:
317 // Iterate([](SlotType slot_type, Address slot_address) {
318 // if (good(slot_type, slot_address)) return KEEP_SLOT;
319 // else return REMOVE_SLOT;
320 // });
321 // This can run concurrently to ClearInvalidSlots().
322 template <typename Callback>
323 int Iterate(Callback callback, IterationMode mode) {
324 static_assert(static_cast<uint8_t>(SlotType::kLast) < 8);
325 Chunk* chunk = head_;
326 Chunk* previous = nullptr;
327 int new_count = 0;
328 while (chunk != nullptr) {
329 bool empty = true;
330 for (TypedSlot& slot : chunk->buffer) {
331 SlotType type = TypeField::decode(slot.type_and_offset);
332 if (type != SlotType::kCleared) {
333 uint32_t offset = OffsetField::decode(slot.type_and_offset);
334 Address addr = page_start_ + offset;
335 if (callback(type, addr) == KEEP_SLOT) {
336 new_count++;
337 empty = false;
338 } else {
339 slot = ClearedTypedSlot();
340 }
341 }
342 }
343 Chunk* next = chunk->next;
344 if (mode == FREE_EMPTY_CHUNKS && empty) {
345 // We remove the chunk from the list but let it still point its next
346 // chunk to allow concurrent iteration.
347 if (previous) {
348 StoreNext(previous, next);
349 } else {
350 StoreHead(next);
351 }
352
353 delete chunk;
354 } else {
355 previous = chunk;
356 }
357 chunk = next;
358 }
359 return new_count;
360 }
361
362 // Clears all slots that have the offset in the specified ranges.
363 // This can run concurrently to Iterate().
364 void ClearInvalidSlots(const FreeRangesMap& invalid_ranges);
365
366 // Asserts that there are no recorded slots in the specified ranges.
367 void AssertNoInvalidSlots(const FreeRangesMap& invalid_ranges);
368
369 private:
370 template <typename Callback>
371 void IterateSlotsInRanges(Callback callback,
372 const FreeRangesMap& invalid_ranges);
373
374 // Atomic operations used by Iterate and ClearInvalidSlots;
376 return base::AsAtomicPointer::Relaxed_Load(&chunk->next);
377 }
378 void StoreNext(Chunk* chunk, Chunk* next) {
379 return base::AsAtomicPointer::Relaxed_Store(&chunk->next, next);
380 }
381 Chunk* LoadHead() { return base::AsAtomicPointer::Relaxed_Load(&head_); }
382 void StoreHead(Chunk* chunk) {
383 base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
384 }
386 return TypedSlot{TypeField::encode(SlotType::kCleared) |
387 OffsetField::encode(0)};
388 }
389
390 Address page_start_;
391};
392
393} // namespace internal
394} // namespace v8
395
396#endif // V8_HEAP_SLOT_SET_H_
constexpr int kPageSizeBits
void ReleaseBucket(size_t bucket_index)
void StoreBucket(Bucket **bucket, Bucket *value)
size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket, Callback callback, EmptyBucketMode mode)
Bucket ** bucket(size_t bucket_index)
static BasicSlotSet * Allocate(size_t buckets)
PossiblyEmptyBuckets(const PossiblyEmptyBuckets &)=delete
static constexpr Address kPointerTag
Definition slot-set.h:82
static constexpr int kWordSize
Definition slot-set.h:83
void Allocate(size_t buckets)
Definition slot-set.h:88
static size_t WordsForBuckets(size_t buckets)
Definition slot-set.h:109
void Insert(size_t bucket_index, size_t buckets)
Definition slot-set.h:55
FRIEND_TEST(PossiblyEmptyBucketsTest, WordsForBuckets)
bool Contains(size_t bucket_index)
Definition slot-set.h:66
PossiblyEmptyBuckets(PossiblyEmptyBuckets &&other) V8_NOEXCEPT
Definition slot-set.h:37
void InsertAllocated(size_t bucket_index)
Definition slot-set.h:102
static constexpr int kBitsPerWord
Definition slot-set.h:84
PossiblyEmptyBuckets & operator=(const PossiblyEmptyBuckets &)=delete
size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket, Callback callback, EmptyBucketMode mode)
Definition slot-set.h:152
static SlotSet * Allocate(size_t buckets)
Definition slot-set.h:133
size_t IterateAndTrackEmptyBuckets(Address chunk_start, size_t start_bucket, size_t end_bucket, Callback callback, PossiblyEmptyBuckets *possibly_empty_buckets)
Definition slot-set.h:169
static constexpr BasicSlotSet::AccessMode ConvertAccessMode()
Definition slot-set.h:138
bool CheckPossiblyEmptyBuckets(size_t buckets, PossiblyEmptyBuckets *possibly_empty_buckets)
Definition slot-set.h:182
static const int kBucketsRegularPage
Definition slot-set.h:130
void Merge(SlotSet *other, size_t buckets)
Definition slot-set.h:209
std::map< uint32_t, uint32_t > FreeRangesMap
Definition slot-set.h:306
int Iterate(Callback callback, IterationMode mode)
Definition slot-set.h:323
TypedSlotSet(Address page_start)
Definition slot-set.h:310
Chunk * LoadNext(Chunk *chunk)
Definition slot-set.h:375
static TypedSlot ClearedTypedSlot()
Definition slot-set.h:385
void StoreHead(Chunk *chunk)
Definition slot-set.h:382
void StoreNext(Chunk *chunk, Chunk *next)
Definition slot-set.h:378
static size_t NextCapacity(size_t capacity)
Definition slot-set.h:293
LineAndColumn previous
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
int32_t offset
TNode< Object > callback
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kBitsPerByte
Definition globals.h:682
void * AlignedAllocWithRetry(size_t size, size_t alignment)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
constexpr int kSystemPointerSize
Definition globals.h:410
void AlignedFree(void *ptr)
static constexpr Address kNullAddress
Definition v8-internal.h:53
SlotTraits::TMaybeObjectSlot MaybeObjectSlot
Definition globals.h:1248
#define V8_NOEXCEPT
#define DCHECK(condition)
Definition logging.h:482
#define V8_EXPORT_PRIVATE
Definition macros.h:460
std::vector< TypedSlot > buffer
Definition slot-set.h:289