v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
basic-slot-set.h
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_BASE_BASIC_SLOT_SET_H_
6#define V8_HEAP_BASE_BASIC_SLOT_SET_H_
7
8#include <cstddef>
9#include <memory>
10
12#include "src/base/bits.h"
14
15namespace v8::internal {
16class WriteBarrierCodeStubAssembler;
17} // namespace v8::internal
18
19namespace heap {
20namespace base {
21
23
24// Data structure for maintaining a set of slots.
25//
26// On a high-level the set implements a 2-level bitmap. The set assumes that the
27// slots are `SlotGranularity`-aligned and splits the valid slot offset range
28// into buckets. Each bucket is a bitmap with a bit corresponding to a single
29// slot offset.
30template <size_t SlotGranularity>
32 static constexpr auto kSystemPointerSize = sizeof(void*);
33
34 public:
35 using Address = uintptr_t;
36
37 enum AccessMode : uint8_t {
40 };
41
43 FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
44 KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
45 };
46
47 BasicSlotSet() = delete;
48
49 static BasicSlotSet* Allocate(size_t buckets) {
50 // BasicSlotSet* slot_set --+
51 // |
52 // v
53 // +-----------------+-------------------------+
54 // | num buckets | buckets array |
55 // +-----------------+-------------------------+
56 // size_t Bucket* buckets
57 //
58 //
59 // The BasicSlotSet pointer points to the beginning of the buckets array for
60 // faster access in the write barrier. The number of buckets is maintained
61 // for checking bounds for the heap sandbox.
62 const size_t buckets_size = buckets * sizeof(Bucket*);
63 const size_t size = kNumBucketsSize + buckets_size;
64 void* allocation = v8::base::AlignedAlloc(size, kSystemPointerSize);
65 CHECK(allocation);
66 BasicSlotSet* slot_set = reinterpret_cast<BasicSlotSet*>(
67 reinterpret_cast<uint8_t*>(allocation) + kNumBucketsSize);
68 DCHECK(
69 IsAligned(reinterpret_cast<uintptr_t>(slot_set), kSystemPointerSize));
70 slot_set->set_num_buckets(buckets);
71 for (size_t i = 0; i < buckets; i++) {
72 *slot_set->bucket(i) = nullptr;
73 }
74 return slot_set;
75 }
76
77 static void Delete(BasicSlotSet* slot_set) {
78 if (slot_set == nullptr) {
79 return;
80 }
81
82 for (size_t i = 0; i < slot_set->num_buckets(); i++) {
83 slot_set->ReleaseBucket(i);
84 }
85 v8::base::AlignedFree(reinterpret_cast<uint8_t*>(slot_set) -
87 }
88
89 constexpr static size_t BucketsForSize(size_t size) {
90 return (size + (SlotGranularity * kBitsPerBucket) - 1) /
91 (SlotGranularity * kBitsPerBucket);
92 }
93
94 // Converts the slot offset into bucket index.
95 constexpr static size_t BucketForSlot(size_t slot_offset) {
96 DCHECK(IsAligned(slot_offset, SlotGranularity));
97 return slot_offset / (SlotGranularity * kBitsPerBucket);
98 }
99
100 // Converts bucket index into slot offset.
101 constexpr static size_t OffsetForBucket(size_t bucket_index) {
102 return bucket_index * SlotGranularity * kBitsPerBucket;
103 }
104
105 // The slot offset specifies a slot at address page_start_ + slot_offset.
106 // AccessMode defines whether there can be concurrent access on the buckets
107 // or not.
108 template <AccessMode access_mode>
109 void Insert(size_t slot_offset) {
110 size_t bucket_index;
111 int cell_index, bit_index;
112 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
113 Bucket* bucket = LoadBucket<access_mode>(bucket_index);
114 if (bucket == nullptr) {
115 bucket = new Bucket;
116 if (!SwapInNewBucket<access_mode>(bucket_index, bucket)) {
117 delete bucket;
118 bucket = LoadBucket<access_mode>(bucket_index);
119 }
120 }
121 // Check that monotonicity is preserved, i.e., once a bucket is set we do
122 // not free it concurrently.
123 DCHECK(bucket != nullptr);
124 DCHECK_EQ(bucket->cells(), LoadBucket<access_mode>(bucket_index)->cells());
125 uint32_t mask = 1u << bit_index;
126 if ((bucket->template LoadCell<access_mode>(cell_index) & mask) == 0) {
127 bucket->template SetCellBits<access_mode>(cell_index, mask);
128 }
129 }
130
131 // The slot offset specifies a slot at address page_start_ + slot_offset.
132 // Returns true if the set contains the slot.
133 bool Contains(size_t slot_offset) {
134 size_t bucket_index;
135 int cell_index, bit_index;
136 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
137 Bucket* bucket = LoadBucket(bucket_index);
138 if (bucket == nullptr) return false;
139 return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
140 }
141
142 // The slot offset specifies a slot at address page_start_ + slot_offset.
143 void Remove(size_t slot_offset) {
144 size_t bucket_index;
145 int cell_index, bit_index;
146 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
147 Bucket* bucket = LoadBucket(bucket_index);
148 if (bucket != nullptr) {
149 uint32_t cell = bucket->LoadCell(cell_index);
150 uint32_t bit_mask = 1u << bit_index;
151 if (cell & bit_mask) {
152 bucket->ClearCellBits(cell_index, bit_mask);
153 }
154 }
155 }
156
157 // The slot offsets specify a range of slots at addresses:
158 // [page_start_ + start_offset ... page_start_ + end_offset).
159 void RemoveRange(size_t start_offset, size_t end_offset, size_t buckets,
160 EmptyBucketMode mode) {
161 CHECK_LE(end_offset, buckets * kBitsPerBucket * SlotGranularity);
162 DCHECK_LE(start_offset, end_offset);
163 size_t start_bucket;
164 int start_cell, start_bit;
165 SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
166 size_t end_bucket;
167 int end_cell, end_bit;
168 // SlotToIndices() checks that bucket index is within `size`. Since the API
169 // allow for an exclusive end interval, we compute the inclusive index and
170 // then increment the bit again (with overflows).
171 SlotToIndices(end_offset - SlotGranularity, &end_bucket, &end_cell,
172 &end_bit);
173 if (++end_bit >= kBitsPerCell) {
174 end_bit = 0;
175 if (++end_cell >= kCellsPerBucket) {
176 end_cell = 0;
177 end_bucket++;
178 }
179 }
180 uint32_t start_mask = (1u << start_bit) - 1;
181 uint32_t end_mask = ~((1u << end_bit) - 1);
182 Bucket* bucket;
183 if (start_bucket == end_bucket && start_cell == end_cell) {
184 bucket = LoadBucket(start_bucket);
185 if (bucket != nullptr) {
186 bucket->ClearCellBits(start_cell, ~(start_mask | end_mask));
187 }
188 return;
189 }
190 size_t current_bucket = start_bucket;
191 int current_cell = start_cell;
192 bucket = LoadBucket(current_bucket);
193 if (bucket != nullptr) {
194 bucket->ClearCellBits(current_cell, ~start_mask);
195 }
196 current_cell++;
197 if (current_bucket < end_bucket) {
198 if (bucket != nullptr) {
199 ClearBucket(bucket, current_cell, kCellsPerBucket);
200 }
201 // The rest of the current bucket is cleared.
202 // Move on to the next bucket.
203 current_bucket++;
204 current_cell = 0;
205 }
206 DCHECK(current_bucket == end_bucket ||
207 (current_bucket < end_bucket && current_cell == 0));
208 while (current_bucket < end_bucket) {
209 if (mode == FREE_EMPTY_BUCKETS) {
210 ReleaseBucket(current_bucket);
211 } else {
212 DCHECK(mode == KEEP_EMPTY_BUCKETS);
213 bucket = LoadBucket(current_bucket);
214 if (bucket != nullptr) {
216 }
217 }
218 current_bucket++;
219 }
220 // All buckets between start_bucket and end_bucket are cleared.
221 DCHECK(current_bucket == end_bucket);
222 if (current_bucket == buckets) return;
223 bucket = LoadBucket(current_bucket);
224 DCHECK(current_cell <= end_cell);
225 if (bucket == nullptr) return;
226 while (current_cell < end_cell) {
227 bucket->StoreCell(current_cell, 0);
228 current_cell++;
229 }
230 // All cells between start_cell and end_cell are cleared.
231 DCHECK(current_bucket == end_bucket && current_cell == end_cell);
232 bucket->ClearCellBits(end_cell, ~end_mask);
233 }
234
235 // The slot offset specifies a slot at address page_start_ + slot_offset.
236 bool Lookup(size_t slot_offset) {
237 size_t bucket_index;
238 int cell_index, bit_index;
239 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
240 Bucket* bucket = LoadBucket(bucket_index);
241 if (bucket == nullptr) return false;
242 return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
243 }
244
245 // Iterate over all slots in the set and for each slot invoke the callback.
246 // If the callback returns REMOVE_SLOT then the slot is removed from the set.
247 // Returns the new number of slots.
248 //
249 // Iteration can be performed concurrently with other operations that use
250 // atomic access mode such as insertion and removal. However there is no
251 // guarantee about ordering and linearizability.
252 //
253 // Sample usage:
254 // Iterate([](Address slot) {
255 // if (good(slot)) return KEEP_SLOT;
256 // else return REMOVE_SLOT;
257 // });
258 //
259 // Releases memory for empty buckets with FREE_EMPTY_BUCKETS.
260 template <AccessMode access_mode = AccessMode::ATOMIC, typename Callback>
261 size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
262 Callback callback, EmptyBucketMode mode) {
264 chunk_start, start_bucket, end_bucket, callback,
265 [this, mode](size_t bucket_index) {
267 ReleaseBucket(bucket_index);
268 }
269 });
270 }
271
272 static constexpr int kCellsPerBucket = 32;
273 static constexpr int kCellsPerBucketLog2 = 5;
274 static constexpr int kCellSizeBytesLog2 = 2;
275 static constexpr int kCellSizeBytes = 1 << kCellSizeBytesLog2;
276 static constexpr int kBitsPerCell = 32;
277 static constexpr int kBitsPerCellLog2 = 5;
279 static constexpr int kBitsPerBucketLog2 =
281
282 class Bucket final {
283 public:
284 Bucket() = default;
285
286 uint32_t* cells() { return cells_; }
287 const uint32_t* cells() const { return cells_; }
288 uint32_t* cell(int cell_index) { return cells_ + cell_index; }
289 const uint32_t* cell(int cell_index) const { return cells_ + cell_index; }
290
291 template <AccessMode access_mode = AccessMode::ATOMIC>
292 uint32_t LoadCell(int cell_index) {
293 DCHECK_LT(cell_index, kCellsPerBucket);
294 if constexpr (access_mode == AccessMode::ATOMIC)
295 return v8::base::AsAtomic32::Acquire_Load(cell(cell_index));
296 return *(cell(cell_index));
297 }
298
299 template <AccessMode access_mode = AccessMode::ATOMIC>
300 void SetCellBits(int cell_index, uint32_t mask) {
301 if constexpr (access_mode == AccessMode::ATOMIC) {
303 } else {
304 uint32_t* c = cell(cell_index);
305 *c = (*c & ~mask) | mask;
306 }
307 }
308
309 template <AccessMode access_mode = AccessMode::ATOMIC>
310 void ClearCellBits(int cell_index, uint32_t mask) {
311 if constexpr (access_mode == AccessMode::ATOMIC) {
313 } else {
314 *cell(cell_index) &= ~mask;
315 }
316 }
317
318 void StoreCell(int cell_index, uint32_t value) {
319 v8::base::AsAtomic32::Release_Store(cell(cell_index), value);
320 }
321
322 bool IsEmpty() const {
323 for (int i = 0; i < kCellsPerBucket; i++) {
324 if (cells_[i] != 0) {
325 return false;
326 }
327 }
328 return true;
329 }
330
331 private:
332 uint32_t cells_[kCellsPerBucket] = {0};
333 };
334
335 size_t num_buckets() const {
336 return *(reinterpret_cast<const size_t*>(this) - 1);
337 }
338
339 protected:
340 template <AccessMode access_mode = AccessMode::ATOMIC, typename Callback,
341 typename EmptyBucketCallback>
342 size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
343 Callback callback, EmptyBucketCallback empty_bucket_callback) {
344 size_t new_count = 0;
345 for (size_t bucket_index = start_bucket; bucket_index < end_bucket;
346 bucket_index++) {
347 Bucket* bucket = LoadBucket<access_mode>(bucket_index);
348 if (bucket != nullptr) {
349 size_t in_bucket_count = 0;
350 size_t cell_offset = bucket_index << kBitsPerBucketLog2;
351 for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
352 uint32_t cell = bucket->template LoadCell<access_mode>(i);
353 if (cell) {
354 uint32_t old_cell = cell;
355 uint32_t mask = 0;
356 while (cell) {
357 int bit_offset = v8::base::bits::CountTrailingZeros(cell);
358 uint32_t bit_mask = 1u << bit_offset;
359 Address slot = (cell_offset + bit_offset) * SlotGranularity;
360 if (callback(chunk_start + slot) == KEEP_SLOT) {
361 ++in_bucket_count;
362 } else {
363 mask |= bit_mask;
364 }
365 cell ^= bit_mask;
366 }
367 uint32_t new_cell = old_cell & ~mask;
368 if (old_cell != new_cell) {
369 bucket->template ClearCellBits<access_mode>(i, mask);
370 }
371 }
372 }
373 if (in_bucket_count == 0) {
374 empty_bucket_callback(bucket_index);
375 }
376 new_count += in_bucket_count;
377 }
378 }
379 return new_count;
380 }
381
382 bool FreeBucketIfEmpty(size_t bucket_index) {
384 if (bucket != nullptr) {
385 if (bucket->IsEmpty()) {
387 } else {
388 return false;
389 }
390 }
391
392 return true;
393 }
394
395 void ClearBucket(Bucket* bucket, int start_cell, int end_cell) {
396 DCHECK_GE(start_cell, 0);
397 DCHECK_LE(end_cell, kCellsPerBucket);
398 int current_cell = start_cell;
399 while (current_cell < kCellsPerBucket) {
400 bucket->StoreCell(current_cell, 0);
401 current_cell++;
402 }
403 }
404
405 template <AccessMode access_mode = AccessMode::ATOMIC>
406 void ReleaseBucket(size_t bucket_index) {
407 Bucket* bucket = LoadBucket<access_mode>(bucket_index);
408 StoreBucket<access_mode>(bucket_index, nullptr);
409 delete bucket;
410 }
411
412 template <AccessMode access_mode = AccessMode::ATOMIC>
414 if (access_mode == AccessMode::ATOMIC)
416 return *bucket;
417 }
418
419 template <AccessMode access_mode = AccessMode::ATOMIC>
420 Bucket* LoadBucket(size_t bucket_index) {
421 return LoadBucket(bucket(bucket_index));
422 }
423
424 template <AccessMode access_mode = AccessMode::ATOMIC>
425 void StoreBucket(Bucket** bucket, Bucket* value) {
426 if (access_mode == AccessMode::ATOMIC) {
428 } else {
429 *bucket = value;
430 }
431 }
432
433 template <AccessMode access_mode = AccessMode::ATOMIC>
434 void StoreBucket(size_t bucket_index, Bucket* value) {
435 StoreBucket(bucket(bucket_index), value);
436 }
437
438 template <AccessMode access_mode = AccessMode::ATOMIC>
439 bool SwapInNewBucket(size_t bucket_index, Bucket* value) {
440 Bucket** b = bucket(bucket_index);
441 if (access_mode == AccessMode::ATOMIC) {
443 b, nullptr, value) == nullptr;
444 } else {
445 DCHECK_NULL(*b);
446 *b = value;
447 return true;
448 }
449 }
450
451 // Converts the slot offset into bucket/cell/bit index.
452 void SlotToIndices(size_t slot_offset, size_t* bucket_index, int* cell_index,
453 int* bit_index) {
454 DCHECK(IsAligned(slot_offset, SlotGranularity));
455 size_t slot = slot_offset / SlotGranularity;
456 *bucket_index = slot >> kBitsPerBucketLog2;
457 // No SBXCHECK() in base.
458 CHECK(*bucket_index < (num_buckets()));
459 *cell_index =
460 static_cast<int>((slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1));
461 *bit_index = static_cast<int>(slot & (kBitsPerCell - 1));
462 }
463
464 Bucket** buckets() { return reinterpret_cast<Bucket**>(this); }
465 Bucket** bucket(size_t bucket_index) { return buckets() + bucket_index; }
466
468 *(reinterpret_cast<size_t*>(this) - 1) = num_buckets;
469 }
470
471 static constexpr int kNumBucketsSize = sizeof(size_t);
472
473 // For kNumBucketsSize.
475};
476
477} // namespace base
478} // namespace heap
479
480#endif // V8_HEAP_BASE_BASIC_SLOT_SET_H_
const uint32_t * cell(int cell_index) const
void SetCellBits(int cell_index, uint32_t mask)
uint32_t * cell(int cell_index)
void ClearCellBits(int cell_index, uint32_t mask)
uint32_t LoadCell(int cell_index)
const uint32_t * cells() const
void StoreCell(int cell_index, uint32_t value)
uint32_t cells_[kCellsPerBucket]
static constexpr int kBitsPerCell
bool Lookup(size_t slot_offset)
void ReleaseBucket(size_t bucket_index)
static constexpr int kCellSizeBytesLog2
static constexpr size_t BucketsForSize(size_t size)
static constexpr int kCellsPerBucketLog2
static void Delete(BasicSlotSet *slot_set)
Bucket * LoadBucket(size_t bucket_index)
void StoreBucket(Bucket **bucket, Bucket *value)
size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket, Callback callback, EmptyBucketMode mode)
static constexpr int kBitsPerCellLog2
static constexpr size_t BucketForSlot(size_t slot_offset)
Bucket ** bucket(size_t bucket_index)
void Insert(size_t slot_offset)
bool FreeBucketIfEmpty(size_t bucket_index)
size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket, Callback callback, EmptyBucketCallback empty_bucket_callback)
static constexpr auto kSystemPointerSize
void RemoveRange(size_t start_offset, size_t end_offset, size_t buckets, EmptyBucketMode mode)
void StoreBucket(size_t bucket_index, Bucket *value)
static constexpr int kCellsPerBucket
void set_num_buckets(size_t num_buckets)
static constexpr int kBitsPerBucket
void SlotToIndices(size_t slot_offset, size_t *bucket_index, int *cell_index, int *bit_index)
Bucket * LoadBucket(Bucket **bucket)
static BasicSlotSet * Allocate(size_t buckets)
static constexpr int kCellSizeBytes
bool Contains(size_t slot_offset)
static constexpr size_t OffsetForBucket(size_t bucket_index)
void ClearBucket(Bucket *bucket, int start_cell, int end_cell)
static constexpr int kBitsPerBucketLog2
static constexpr int kNumBucketsSize
bool SwapInNewBucket(size_t bucket_index, Bucket *value)
void Remove(size_t slot_offset)
static void Release_Store(T *addr, typename std::remove_reference< T >::type new_value)
static T Acquire_Load(T *addr)
static T Release_CompareAndSwap(T *addr, typename std::remove_reference< T >::type old_value, typename std::remove_reference< T >::type new_value)
static bool Release_SetBits(T *addr, T bits, T mask)
TNode< Object > callback
uint32_t const mask
constexpr unsigned CountTrailingZeros(T value)
Definition bits.h:144
void AlignedFree(void *ptr)
Definition memory.h:102
void * AlignedAlloc(size_t size, size_t alignment)
Definition memory.h:84
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
std::unique_ptr< ValueMirror > value