23thread_local MarkingBarrier* current_marking_barrier =
nullptr;
31 if (!verification_candidate.
is_null() &&
35 LocalHeap* local_heap = LocalHeap::Current();
40 return marking_barrier;
45 current_marking_barrier = marking_barrier;
55 marking_barrier->
Write(host, slot, value);
72#ifdef V8_COMPRESS_POINTERS
76 CppHeapPointerTable& table = isolate.GetCppHeapPointerTable();
77 CppHeapPointerTable::Space* space = isolate.GetCppHeapPointerTableSpace();
83 if (
heap->cpp_heap() &&
object) {
92 marking_barrier->
Write(host, reloc_info, value);
124 host_chunk_metadata, host_chunk_metadata->
Offset(slot.
address()));
135 int number_of_own_descriptors) {
137 marking_barrier->
Write(descriptor_array, number_of_own_descriptors);
142#ifdef V8_COMPRESS_POINTERS
143 if (!slot.HasExternalPointerHandle())
return;
148 ExternalPointerTable& table =
149 isolate.GetExternalPointerTableFor(slot.
tag_range());
150 ExternalPointerTable::Space* space =
161 marking_barrier->
Write(host, slot);
168 marking_barrier->
Write(host, slot, value);
173#ifdef V8_ENABLE_LEAPTIERING
181 if (marking_barrier->
is_minor())
return;
185 static_assert(JSDispatchTable::kWriteBarrierSetsEntryMarkBit);
192 static_assert(!JSDispatchTable::kSupportsCompaction);
206 value = (value &
~Internals::kMapWordMetadataMask &
214 DCHECK(
heap->incremental_marking()->IsMarking());
222 barrier->AssertMarkingIsActivated();
254 Address raw_value = (*slot).ptr();
261 DCHECK(
heap->incremental_marking()->IsMajorMarking());
263 DCHECK(isolate->is_shared_space_isolate());
269 barrier->AssertSharedMarkingIsActivated();
311 Heap*
heap = Heap::FromWritableHeapObject(
object);
316 heap->incremental_marking()->IsMajorMarking());
377 if (LocalHeap::Current() ==
nullptr) {
379 metadata, chunk->
Offset(slot));
382 metadata, chunk->
Offset(slot));
398enum RangeWriteBarrierMode {
399 kDoGenerationalOrShared = 1 << 0,
401 kDoEvacuationSlotRecording = 1 << 2,
404template <
int kModeMask,
typename TSlot>
405void ForRangeImpl(Heap*
heap, MemoryChunk* source_chunk,
408 static_assert(kModeMask & (kDoGenerationalOrShared | kDoMarking));
410 static_assert(!(kModeMask & kDoEvacuationSlotRecording) ||
411 (kModeMask & kDoMarking));
413 MarkingBarrier* marking_barrier =
nullptr;
414 static constexpr Tagged_t kPageMask =
416 Tagged_t cached_uninteresting_page =
417 static_cast<Tagged_t>(
heap->read_only_space()->FirstPageAddress()) &
420 if (kModeMask & kDoMarking) {
424 MarkCompactCollector* collector =
heap->mark_compact_collector();
425 MutablePageMetadata* source_page_metadata =
428 for (TSlot slot = start_slot; slot < end_slot; ++slot) {
432 if (kModeMask == kDoGenerationalOrShared) {
433 Tagged_t tagged_value = *slot.location();
435 compressed_page = tagged_value & kPageMask;
436 if (compressed_page == cached_uninteresting_page) {
438 typename TSlot::TObject value = *slot;
440 if (value.GetHeapObject(&value_heap_object)) {
450 typename TSlot::TObject value = *slot;
452 if (!value.GetHeapObject(&value_heap_object))
continue;
454 if (kModeMask & kDoGenerationalOrShared) {
457 source_page_metadata, source_chunk->Offset(slot.address()));
460 source_page_metadata, source_chunk->Offset(slot.address()));
461 }
else if (kModeMask == kDoGenerationalOrShared) {
462 cached_uninteresting_page = compressed_page;
466 if (kModeMask & kDoMarking) {
467 marking_barrier->MarkValue(
object, value_heap_object);
468 if (kModeMask & kDoEvacuationSlotRecording) {
488template <
typename TSlot>
491 TSlot start_slot, TSlot end_slot) {
492 if (
v8_flags.disable_write_barriers)
return;
498 mode |= kDoGenerationalOrShared;
501 if (
heap->incremental_marking()->IsMarking()) {
504 mode |= kDoEvacuationSlotRecording;
513 case kDoGenerationalOrShared:
514 return ForRangeImpl<kDoGenerationalOrShared>(
heap, source_chunk,
object,
515 start_slot, end_slot);
518 return ForRangeImpl<kDoMarking>(
heap, source_chunk,
object, start_slot,
521 case kDoMarking | kDoEvacuationSlotRecording:
522 return ForRangeImpl<kDoMarking | kDoEvacuationSlotRecording>(
523 heap, source_chunk,
object, start_slot, end_slot);
525 case kDoGenerationalOrShared | kDoMarking:
526 return ForRangeImpl<kDoGenerationalOrShared | kDoMarking>(
527 heap, source_chunk,
object, start_slot, end_slot);
529 case kDoGenerationalOrShared | kDoMarking | kDoEvacuationSlotRecording:
530 return ForRangeImpl<kDoGenerationalOrShared | kDoMarking |
531 kDoEvacuationSlotRecording>(
532 heap, source_chunk, object, start_slot, end_slot);
538#ifdef ENABLE_SLOW_DCHECKS
544#ifdef V8_ENABLE_LEAPTIERING
547 WriteBarrier::IsRequired(host, jdt->GetCode(
handle))) {
561 if (jdt->IsMarked(
handle)) {
#define SLOW_DCHECK_IMPLIES(v1, v2)
AllocationSpace identity() const
void WriteBarrier(void *)
static CppHeap * From(v8::CppHeap *heap)
void RecordEphemeronKeyWrite(Tagged< EphemeronHashTable > table, Address key_slot)
ExternalPointerTagRange tag_range() const
static V8_INLINE bool InYoungGeneration(Tagged< Object > object)
static V8_INLINE bool InWritableSharedSpace(Tagged< HeapObject > object)
static V8_INLINE bool InAnySharedSpace(Tagged< HeapObject > object)
LocalHeap * main_thread_local_heap()
EphemeronRememberedSet * ephemeron_remembered_set()
Isolate * isolate() const
Tagged< Object > load(IsolateForSandbox isolate) const
static IsolateGroup * current()
bool is_shared_space_isolate() const
const IsolateData * isolate_data() const
MarkingBarrier * marking_barrier()
static RecordRelocSlotInfo ProcessRelocInfo(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
void MarkValue(Tagged< HeapObject > host, Tagged< HeapObject > value)
void Write(Tagged< HeapObject > host, TSlot slot, Tagged< HeapObject > value)
void WriteWithoutHost(Tagged< HeapObject > value)
V8_INLINE bool InWritableSharedSpace() const
V8_INLINE bool IsFlagSet(Flag flag) const
V8_INLINE MemoryChunkMetadata * Metadata()
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
size_t Offset(Address addr) const
V8_INLINE bool InYoungGeneration() const
bool ShouldSkipEvacuationSlotRecording() const
static V8_INLINE MemoryChunk * FromHeapObject(Tagged< HeapObject > object)
V8_INLINE bool IsMarking() const
static MutablePageMetadata * cast(MemoryChunkMetadata *metadata)
static const int kPageSize
static V8_INLINE MutablePageMetadata * FromHeapObject(Tagged< HeapObject > o)
static V8_EXPORT_PRIVATE bool Contains(Address address)
static void InsertTyped(MutablePageMetadata *memory_chunk, SlotType slot_type, uint32_t offset)
static void Insert(MutablePageMetadata *page, size_t slot_offset)
V8_INLINE constexpr bool is_null() const
static void CombinedGenerationalAndSharedEphemeronBarrierSlow(Tagged< EphemeronHashTable > table, Address slot, Tagged< HeapObject > value)
static void Marking(Tagged< HeapObject > host, HeapObjectSlot, Tagged< HeapObject > value)
static MarkingBarrier * SetForThread(MarkingBarrier *marking_barrier)
static int SharedFromCode(Address raw_host, Address raw_slot)
static void MarkingSlowFromCppHeapWrappable(Heap *heap, Tagged< JSObject > host, CppHeapPointerSlot slot, void *object)
static void ForEphemeronHashTable(Tagged< EphemeronHashTable > host, ObjectSlot slot, Tagged< Object > value, WriteBarrierMode mode)
static bool PageFlagsAreConsistent(Tagged< HeapObject > object)
static void GenerationalBarrierSlow(Tagged< HeapObject > object, Address slot, Tagged< HeapObject > value)
static constexpr bool kUninterestingPagesCanBeSkipped
static bool IsMarking(Tagged< HeapObject > object)
static MarkingBarrier * CurrentMarkingBarrier(Tagged< HeapObject > verification_candidate)
static int SharedMarkingFromCode(Address raw_host, Address raw_slot)
static void SharedSlow(Tagged< TrustedObject > host, ProtectedPointerSlot slot, Tagged< TrustedObject > value)
static void SharedHeapBarrierSlow(Tagged< HeapObject > object, Address slot)
static void MarkingSlow(Tagged< HeapObject > host, HeapObjectSlot, Tagged< HeapObject > value)
static void ForRange(Heap *heap, Tagged< HeapObject > object, TSlot start, TSlot end)
static int IndirectPointerMarkingFromCode(Address raw_host, Address raw_slot, Address raw_tag)
static void MarkingSlowFromTracedHandle(Tagged< HeapObject > value)
static void GenerationalBarrierForCodeSlow(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > value)
static void CombinedGenerationalAndSharedBarrierSlow(Tagged< HeapObject > object, Address slot, Tagged< HeapObject > value)
static int MarkingFromCode(Address raw_host, Address raw_slot)
static void EphemeronKeyWriteBarrierFromCode(Address raw_object, Address key_slot_address, Isolate *isolate)
#define HAS_SMI_TAG(value)
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
SlotTraits::TObjectSlot ObjectSlot
Tagged(T object) -> Tagged< T >
V8_INLINE constexpr bool IsValidIndirectPointerTag(IndirectPointerTag tag)
kInterpreterTrampolineOffset Tagged< HeapObject >
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
V8_EXPORT_PRIVATE FlagValues v8_flags
SlotTraits::THeapObjectSlot HeapObjectSlot
uint32_t ExternalPointerHandle
SlotTraits::TMaybeObjectSlot MaybeObjectSlot
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)