v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
object-start-bitmap.h
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_CPPGC_OBJECT_START_BITMAP_H_
6#define V8_HEAP_CPPGC_OBJECT_START_BITMAP_H_
7
8#include <limits.h>
9#include <stdint.h>
10
11#include <array>
12
15#include "src/base/bits.h"
16#include "src/base/macros.h"
19
20namespace cppgc {
21namespace internal {
22
23// A bitmap for recording object starts. Objects have to be allocated at
24// minimum granularity of kGranularity.
25//
26// Depends on internals such as:
27// - kBlinkPageSize
28// - kAllocationGranularity
29//
30// ObjectStartBitmap supports concurrent reads from multiple threads but
31// only a single mutator thread can write to it. ObjectStartBitmap relies on
32// being allocated inside the same normal page.
34 public:
35 // Granularity of addresses added to the bitmap.
36 static constexpr size_t Granularity() { return kAllocationGranularity; }
37
38 // Maximum number of entries in the bitmap.
39 static constexpr size_t MaxEntries() {
40 return kReservedForBitmap * kBitsPerCell;
41 }
42
43 inline ObjectStartBitmap();
44
45 // Finds an object header based on an
46 // address_maybe_pointing_to_the_middle_of_object. Will search for an object
47 // start in decreasing address order.
48 template <AccessMode = AccessMode::kNonAtomic>
49 inline HeapObjectHeader* FindHeader(
50 ConstAddress address_maybe_pointing_to_the_middle_of_object) const;
51
52 template <AccessMode = AccessMode::kNonAtomic>
53 inline void SetBit(ConstAddress);
54 template <AccessMode = AccessMode::kNonAtomic>
55 inline void ClearBit(ConstAddress);
56 template <AccessMode = AccessMode::kNonAtomic>
57 inline bool CheckBit(ConstAddress) const;
58
59 // Iterates all object starts recorded in the bitmap.
60 //
61 // The callback is of type
62 // void(Address)
63 // and is passed the object start address as parameter.
64 template <typename Callback>
65 inline void Iterate(Callback) const;
66
67 // Clear the object start bitmap.
68 inline void Clear();
69
70 // Marks the bitmap as fully populated. Unpopulated bitmaps are in an
71 // inconsistent state and must be populated before they can be used to find
72 // object headers.
73 inline void MarkAsFullyPopulated();
74
75 private:
76 template <AccessMode = AccessMode::kNonAtomic>
77 inline void store(size_t cell_index, uint8_t value);
78 template <AccessMode = AccessMode::kNonAtomic>
79 inline uint8_t load(size_t cell_index) const;
80
81 static constexpr size_t kBitsPerCell = sizeof(uint8_t) * CHAR_BIT;
82 static constexpr size_t kCellMask = kBitsPerCell - 1;
83 static constexpr size_t kBitmapSize =
84 (kPageSize + ((kBitsPerCell * kAllocationGranularity) - 1)) /
85 (kBitsPerCell * kAllocationGranularity);
86 static constexpr size_t kReservedForBitmap =
87 ((kBitmapSize + kAllocationMask) & ~kAllocationMask);
88
89 inline void ObjectStartIndexAndBit(ConstAddress, size_t*, size_t*) const;
90
91 // `fully_populated_` is used to denote that the bitmap is populated with all
92 // currently allocated objects on the page and is in a consistent state. It is
93 // used to guard against using the bitmap for finding headers during
94 // concurrent sweeping.
95 //
96 // Although this flag can be used by both the main thread and concurrent
97 // sweeping threads, it is not atomic. The flag should never be accessed by
98 // multiple threads at the same time. If data races are observed on this flag,
99 // it likely means that the bitmap is queried while concurrent sweeping is
100 // active, which is not supported and should be avoided.
101 bool fully_populated_ = false;
102 // The bitmap contains a bit for every kGranularity aligned address on a
103 // a NormalPage, i.e., for a page of size kBlinkPageSize.
104 std::array<uint8_t, kReservedForBitmap> object_start_bit_map_;
105};
106
111
112template <AccessMode mode>
114 ConstAddress address_maybe_pointing_to_the_middle_of_object) const {
116 const size_t page_base = reinterpret_cast<uintptr_t>(
117 address_maybe_pointing_to_the_middle_of_object) &
119 DCHECK_EQ(page_base, reinterpret_cast<uintptr_t>(this) & kPageBaseMask);
120 size_t object_offset = reinterpret_cast<uintptr_t>(
121 address_maybe_pointing_to_the_middle_of_object) &
123 size_t object_start_number = object_offset / kAllocationGranularity;
124 size_t cell_index = object_start_number / kBitsPerCell;
125 DCHECK_GT(object_start_bit_map_.size(), cell_index);
126 const size_t bit = object_start_number & kCellMask;
127 uint8_t byte = load<mode>(cell_index) & ((1 << (bit + 1)) - 1);
128 while (!byte && cell_index) {
129 DCHECK_LT(0u, cell_index);
130 byte = load<mode>(--cell_index);
131 }
132 const int leading_zeroes = v8::base::bits::CountLeadingZeros(byte);
133 object_start_number =
134 (cell_index * kBitsPerCell) + (kBitsPerCell - 1) - leading_zeroes;
135 object_offset = object_start_number * kAllocationGranularity;
136 return reinterpret_cast<HeapObjectHeader*>(page_base + object_offset);
137}
138
139template <AccessMode mode>
141 size_t cell_index, object_bit;
142 ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
143 // Only a single mutator thread can write to the bitmap, so no need for CAS.
144 store<mode>(cell_index,
145 static_cast<uint8_t>(load(cell_index) | (1 << object_bit)));
146}
147
148template <AccessMode mode>
150 size_t cell_index, object_bit;
151 ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
152 store<mode>(cell_index,
153 static_cast<uint8_t>(load(cell_index) & ~(1 << object_bit)));
154}
155
156template <AccessMode mode>
157bool ObjectStartBitmap::CheckBit(ConstAddress header_address) const {
158 size_t cell_index, object_bit;
159 ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
160 return load<mode>(cell_index) & (1 << object_bit);
161}
162
163template <AccessMode mode>
164void ObjectStartBitmap::store(size_t cell_index, uint8_t value) {
165 if (mode == AccessMode::kNonAtomic) {
166 object_start_bit_map_[cell_index] = value;
167 return;
168 }
170 ->store(value, std::memory_order_release);
171}
172
173template <AccessMode mode>
174uint8_t ObjectStartBitmap::load(size_t cell_index) const {
175 if (mode == AccessMode::kNonAtomic) {
176 return object_start_bit_map_[cell_index];
177 }
179 ->load(std::memory_order_acquire);
180}
181
183 size_t* cell_index,
184 size_t* bit) const {
185 const size_t object_offset =
186 reinterpret_cast<size_t>(header_address) & kPageOffsetMask;
187 DCHECK(!(object_offset & kAllocationMask));
188 const size_t object_start_number = object_offset / kAllocationGranularity;
189 *cell_index = object_start_number / kBitsPerCell;
190 DCHECK_GT(kBitmapSize, *cell_index);
191 *bit = object_start_number & kCellMask;
192}
193
194template <typename Callback>
195inline void ObjectStartBitmap::Iterate(Callback callback) const {
196 const Address page_base = reinterpret_cast<Address>(
197 reinterpret_cast<uintptr_t>(this) & kPageBaseMask);
198 for (size_t cell_index = 0; cell_index < kReservedForBitmap; cell_index++) {
199 if (!object_start_bit_map_[cell_index]) continue;
200
201 uint8_t value = object_start_bit_map_[cell_index];
202 while (value) {
203 const int trailing_zeroes = v8::base::bits::CountTrailingZeros(value);
204 const size_t object_start_number =
205 (cell_index * kBitsPerCell) + trailing_zeroes;
206 const Address object_address =
207 page_base + (kAllocationGranularity * object_start_number);
208 callback(object_address);
209 // Clear current object bit in temporary value to advance iteration.
210 value &= ~(1 << (object_start_number & kCellMask));
211 }
212 }
213}
214
219
221 fully_populated_ = false;
222 std::fill(object_start_bit_map_.begin(), object_start_bit_map_.end(), 0);
223}
224
225// A platform aware version of ObjectStartBitmap to provide platform specific
226// optimizations (e.g. Use non-atomic stores on ARMv7 when not marking).
228 : public ObjectStartBitmap {
229 public:
230 template <AccessMode = AccessMode::kNonAtomic>
231 inline void SetBit(ConstAddress);
232 template <AccessMode = AccessMode::kNonAtomic>
233 inline void ClearBit(ConstAddress);
234
235 private:
236 template <AccessMode>
237 static bool ShouldForceNonAtomic();
238};
239
240// static
241template <AccessMode mode>
243#if defined(V8_HOST_ARCH_ARM)
244 // Use non-atomic accesses on ARMv7 when marking is not active.
245 if (mode == AccessMode::kAtomic) {
246 if (V8_LIKELY(!WriteBarrier::IsEnabled())) return true;
247 }
248#endif // defined(V8_HOST_ARCH_ARM)
249 return false;
250}
251
252template <AccessMode mode>
260
261template <AccessMode mode>
269
270} // namespace internal
271} // namespace cppgc
272
273#endif // V8_HEAP_CPPGC_OBJECT_START_BITMAP_H_
static constexpr size_t kReservedForBitmap
static constexpr size_t Granularity()
HeapObjectHeader * FindHeader(ConstAddress address_maybe_pointing_to_the_middle_of_object) const
void ObjectStartIndexAndBit(ConstAddress, size_t *, size_t *) const
static constexpr size_t MaxEntries()
uint8_t load(size_t cell_index) const
std::array< uint8_t, kReservedForBitmap > object_start_bit_map_
void store(size_t cell_index, uint8_t value)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(trace_gc_object_stats
TNode< Object > callback
constexpr size_t kPageSize
Definition globals.h:42
uint8_t * Address
Definition globals.h:17
constexpr size_t kPageOffsetMask
Definition globals.h:43
const uint8_t * ConstAddress
Definition globals.h:18
constexpr size_t kAllocationGranularity
Definition globals.h:37
constexpr size_t kPageBaseMask
Definition globals.h:44
constexpr size_t kAllocationMask
Definition globals.h:39
constexpr unsigned CountLeadingZeros(T value)
Definition bits.h:100
constexpr unsigned CountTrailingZeros(T value)
Definition bits.h:144
V8_INLINE std::atomic< T > * AsAtomicPtr(T *t)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define V8_EXPORT_PRIVATE
Definition macros.h:460
#define V8_LIKELY(condition)
Definition v8config.h:661
std::unique_ptr< ValueMirror > value