5#ifndef V8_HEAP_SLOT_SET_H_
6#define V8_HEAP_SLOT_SET_H_
19#include "testing/gtest/include/gtest/gtest_prod.h"
24using ::heap::base::KEEP_SLOT;
25using ::heap::base::REMOVE_SLOT;
26using ::heap::base::SlotCallbackResult;
55 void Insert(
size_t bucket_index,
size_t buckets) {
59 bitmap_ |=
static_cast<uintptr_t
>(1) << (bucket_index + 1);
71 (
static_cast<uintptr_t
>(1) << (bucket_index %
kBitsPerWord));
73 return bitmap_ & (
static_cast<uintptr_t
>(1) << (bucket_index + 1));
91 uintptr_t* ptr =
reinterpret_cast<uintptr_t*
>(
95 for (
size_t word_idx = 1; word_idx < words; word_idx++) {
106 *word |=
static_cast<uintptr_t
>(1) << (bucket_index %
kBitsPerWord);
115 return reinterpret_cast<uintptr_t*
>(
bitmap_ & ~kPointerTag);
123static_assert(std::is_standard_layout<PossiblyEmptyBuckets>::value);
137 template <v8::
internal::AccessMode access_mode>
139 switch (access_mode) {
155 chunk_start, start_bucket, end_bucket,
157 [
this,
mode](
size_t bucket_index) {
158 if (mode == EmptyBucketMode::FREE_EMPTY_BUCKETS) {
168 template <
typename Callback>
170 Address chunk_start,
size_t start_bucket,
size_t end_bucket,
173 chunk_start, start_bucket, end_bucket,
175 [possibly_empty_buckets, end_bucket](
size_t bucket_index) {
176 possibly_empty_buckets->
Insert(bucket_index, end_bucket);
185 for (
size_t bucket_index = 0; bucket_index <
buckets; bucket_index++) {
188 if (possibly_empty_buckets->
Contains(bucket_index)) {
204 possibly_empty_buckets->
Release();
210 for (
size_t bucket_index = 0; bucket_index <
buckets; bucket_index++) {
211 Bucket* other_bucket =
213 if (!other_bucket)
continue;
229static_assert(std::is_standard_layout<SlotSet>::value);
230static_assert(std::is_standard_layout<SlotSet::Bucket>::value);
275 static const int kMaxOffset = 1 << 29;
291 static const size_t kInitialBufferSize = 100;
292 static const size_t kMaxBufferSize = 16 *
KB;
294 return std::min({kMaxBufferSize, capacity * 2});
296 Chunk* EnsureChunk();
297 Chunk* NewChunk(Chunk* next,
size_t capacity);
322 template <
typename Callback>
324 static_assert(
static_cast<uint8_t
>(SlotType::kLast) < 8);
325 Chunk* chunk = head_;
328 while (chunk !=
nullptr) {
332 if (type != SlotType::kCleared) {
334 Address addr = page_start_ +
offset;
335 if (
callback(type, addr) == KEEP_SLOT) {
339 slot = ClearedTypedSlot();
344 if (mode == FREE_EMPTY_CHUNKS && empty) {
364 void ClearInvalidSlots(
const FreeRangesMap& invalid_ranges);
367 void AssertNoInvalidSlots(
const FreeRangesMap& invalid_ranges);
370 template <
typename Callback>
371 void IterateSlotsInRanges(Callback
callback,
372 const FreeRangesMap& invalid_ranges);
376 return base::AsAtomicPointer::Relaxed_Load(&chunk->
next);
379 return base::AsAtomicPointer::Relaxed_Store(&chunk->
next, next);
383 base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
386 return TypedSlot{TypeField::encode(SlotType::kCleared) |
387 OffsetField::encode(0)};
constexpr int kPageSizeBits
static constexpr int kBitsPerCell
void ReleaseBucket(size_t bucket_index)
void StoreBucket(Bucket **bucket, Bucket *value)
size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket, Callback callback, EmptyBucketMode mode)
Bucket ** bucket(size_t bucket_index)
static constexpr int kCellsPerBucket
Bucket * LoadBucket(Bucket **bucket)
static BasicSlotSet * Allocate(size_t buckets)
PossiblyEmptyBuckets()=default
uintptr_t * BitmapArray()
PossiblyEmptyBuckets(const PossiblyEmptyBuckets &)=delete
static constexpr Address kPointerTag
static constexpr int kWordSize
void Allocate(size_t buckets)
static size_t WordsForBuckets(size_t buckets)
void Insert(size_t bucket_index, size_t buckets)
FRIEND_TEST(PossiblyEmptyBucketsTest, WordsForBuckets)
bool Contains(size_t bucket_index)
PossiblyEmptyBuckets(PossiblyEmptyBuckets &&other) V8_NOEXCEPT
void InsertAllocated(size_t bucket_index)
static constexpr int kBitsPerWord
PossiblyEmptyBuckets & operator=(const PossiblyEmptyBuckets &)=delete
size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket, Callback callback, EmptyBucketMode mode)
static SlotSet * Allocate(size_t buckets)
size_t IterateAndTrackEmptyBuckets(Address chunk_start, size_t start_bucket, size_t end_bucket, Callback callback, PossiblyEmptyBuckets *possibly_empty_buckets)
static constexpr BasicSlotSet::AccessMode ConvertAccessMode()
bool CheckPossiblyEmptyBuckets(size_t buckets, PossiblyEmptyBuckets *possibly_empty_buckets)
static const int kBucketsRegularPage
void Merge(SlotSet *other, size_t buckets)
std::map< uint32_t, uint32_t > FreeRangesMap
int Iterate(Callback callback, IterationMode mode)
TypedSlotSet(Address page_start)
Chunk * LoadNext(Chunk *chunk)
static TypedSlot ClearedTypedSlot()
void StoreHead(Chunk *chunk)
void StoreNext(Chunk *chunk, Chunk *next)
static size_t NextCapacity(size_t capacity)
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
constexpr int kTaggedSize
constexpr int kBitsPerByte
void * AlignedAllocWithRetry(size_t size, size_t alignment)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
constexpr int kSystemPointerSize
void AlignedFree(void *ptr)
@ kConstPoolEmbeddedObjectCompressed
@ kConstPoolEmbeddedObjectFull
@ kEmbeddedObjectCompressed
static constexpr Address kNullAddress
SlotTraits::TMaybeObjectSlot MaybeObjectSlot
#define DCHECK(condition)
#define V8_EXPORT_PRIVATE
std::vector< TypedSlot > buffer