5#ifndef V8_HEAP_CPPGC_OBJECT_START_BITMAP_H_
6#define V8_HEAP_CPPGC_OBJECT_START_BITMAP_H_
40 return kReservedForBitmap * kBitsPerCell;
48 template <AccessMode = AccessMode::kNonAtomic>
50 ConstAddress address_maybe_pointing_to_the_middle_of_object)
const;
52 template <AccessMode = AccessMode::kNonAtomic>
54 template <AccessMode = AccessMode::kNonAtomic>
56 template <AccessMode = AccessMode::kNonAtomic>
64 template <
typename Callback>
65 inline void Iterate(Callback)
const;
73 inline void MarkAsFullyPopulated();
76 template <AccessMode = AccessMode::kNonAtomic>
77 inline void store(
size_t cell_index, uint8_t value);
78 template <AccessMode = AccessMode::kNonAtomic>
79 inline uint8_t load(
size_t cell_index)
const;
81 static constexpr size_t kBitsPerCell =
sizeof(uint8_t) * CHAR_BIT;
82 static constexpr size_t kCellMask = kBitsPerCell - 1;
83 static constexpr size_t kBitmapSize =
85 (kBitsPerCell * kAllocationGranularity);
86 static constexpr size_t kReservedForBitmap =
89 inline void ObjectStartIndexAndBit(
ConstAddress,
size_t*,
size_t*)
const;
101 bool fully_populated_ =
false;
112template <AccessMode mode>
114 ConstAddress address_maybe_pointing_to_the_middle_of_object)
const {
116 const size_t page_base =
reinterpret_cast<uintptr_t
>(
117 address_maybe_pointing_to_the_middle_of_object) &
120 size_t object_offset =
reinterpret_cast<uintptr_t
>(
121 address_maybe_pointing_to_the_middle_of_object) &
126 const size_t bit = object_start_number &
kCellMask;
127 uint8_t
byte =
load<mode>(cell_index) & ((1 << (bit + 1)) - 1);
128 while (!
byte && cell_index) {
133 object_start_number =
139template <AccessMode mode>
141 size_t cell_index, object_bit;
145 static_cast<uint8_t
>(
load(cell_index) | (1 << object_bit)));
148template <AccessMode mode>
150 size_t cell_index, object_bit;
153 static_cast<uint8_t
>(
load(cell_index) & ~(1 << object_bit)));
156template <AccessMode mode>
158 size_t cell_index, object_bit;
160 return load<mode>(cell_index) & (1 << object_bit);
163template <AccessMode mode>
170 ->store(value, std::memory_order_release);
173template <AccessMode mode>
179 ->load(std::memory_order_acquire);
185 const size_t object_offset =
194template <
typename Callback>
204 const size_t object_start_number =
210 value &= ~(1 << (object_start_number &
kCellMask));
230 template <AccessMode = AccessMode::kNonAtomic>
232 template <AccessMode = AccessMode::kNonAtomic>
236 template <AccessMode>
237 static bool ShouldForceNonAtomic();
241template <AccessMode mode>
243#if defined(V8_HOST_ARCH_ARM)
252template <AccessMode mode>
261template <AccessMode mode>
static constexpr size_t kReservedForBitmap
static constexpr size_t Granularity()
void SetBit(ConstAddress)
void MarkAsFullyPopulated()
HeapObjectHeader * FindHeader(ConstAddress address_maybe_pointing_to_the_middle_of_object) const
void ClearBit(ConstAddress)
static constexpr size_t kCellMask
void Iterate(Callback) const
static constexpr size_t kBitmapSize
bool CheckBit(ConstAddress) const
void ObjectStartIndexAndBit(ConstAddress, size_t *, size_t *) const
static constexpr size_t MaxEntries()
uint8_t load(size_t cell_index) const
std::array< uint8_t, kReservedForBitmap > object_start_bit_map_
static constexpr size_t kBitsPerCell
void store(size_t cell_index, uint8_t value)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(trace_gc_object_stats
constexpr size_t kPageSize
constexpr size_t kPageOffsetMask
const uint8_t * ConstAddress
constexpr size_t kAllocationGranularity
constexpr size_t kPageBaseMask
constexpr size_t kAllocationMask
constexpr unsigned CountLeadingZeros(T value)
constexpr unsigned CountTrailingZeros(T value)
V8_INLINE std::atomic< T > * AsAtomicPtr(T *t)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
#define V8_EXPORT_PRIVATE
#define V8_LIKELY(condition)
std::unique_ptr< ValueMirror > value