v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
heap-write-barrier.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
9#include "src/heap/heap.h"
17
18namespace v8::internal {
19
20class DescriptorArray;
21
22namespace {
23thread_local MarkingBarrier* current_marking_barrier = nullptr;
24} // namespace
25
27 Tagged<HeapObject> verification_candidate) {
28 MarkingBarrier* marking_barrier = current_marking_barrier;
29 DCHECK_NOT_NULL(marking_barrier);
30#if DEBUG
31 if (!verification_candidate.is_null() &&
32 !HeapLayout::InAnySharedSpace(verification_candidate)) {
33 Heap* host_heap =
34 MutablePageMetadata::FromHeapObject(verification_candidate)->heap();
35 LocalHeap* local_heap = LocalHeap::Current();
36 if (!local_heap) local_heap = host_heap->main_thread_local_heap();
37 DCHECK_EQ(marking_barrier, local_heap->marking_barrier());
38 }
39#endif // DEBUG
40 return marking_barrier;
41}
42
44 MarkingBarrier* existing = current_marking_barrier;
45 current_marking_barrier = marking_barrier;
46 return existing;
47}
48
57
58// static
60 MarkingBarrier* marking_barrier = CurrentMarkingBarrier(value);
61 marking_barrier->WriteWithoutHost(value);
62}
63
64// static
68 void* object) {
69 // Note: this is currently a combined barrier for marking both the
70 // CppHeapPointerTable entry and the referenced object (if any).
71
72#ifdef V8_COMPRESS_POINTERS
73 MarkingBarrier* marking_barrier = CurrentMarkingBarrier(host);
74 IsolateForPointerCompression isolate(marking_barrier->heap()->isolate());
75
76 CppHeapPointerTable& table = isolate.GetCppHeapPointerTable();
77 CppHeapPointerTable::Space* space = isolate.GetCppHeapPointerTableSpace();
78
79 ExternalPointerHandle handle = slot.Relaxed_LoadHandle();
80 table.Mark(space, handle, slot.address());
81#endif // V8_COMPRESS_POINTERS
82
83 if (heap->cpp_heap() && object) {
84 CppHeap::From(heap->cpp_heap())->WriteBarrier(object);
85 }
86}
87
89 RelocInfo* reloc_info,
90 Tagged<HeapObject> value) {
91 MarkingBarrier* marking_barrier = CurrentMarkingBarrier(host);
92 marking_barrier->Write(host, reloc_info, value);
93}
94
95// static
97 RelocInfo* reloc_info, Tagged<HeapObject> value) {
99 MarkCompactCollector::ProcessRelocInfo(host, reloc_info, value);
100
101 base::MutexGuard write_scope(info.page_metadata->mutex());
102 RememberedSet<OLD_TO_SHARED>::InsertTyped(info.page_metadata, info.slot_type,
103 info.offset);
104}
105
106// static
114
115// static
118 Tagged<TrustedObject> value) {
119 DCHECK(MemoryChunk::FromHeapObject(value)->InWritableSharedSpace());
120 if (!MemoryChunk::FromHeapObject(host)->InWritableSharedSpace()) {
121 MutablePageMetadata* host_chunk_metadata =
124 host_chunk_metadata, host_chunk_metadata->Offset(slot.address()));
125 }
126}
127
130 MarkingBarrier* marking_barrier = CurrentMarkingBarrier(host);
131 marking_barrier->Write(host, extension);
132}
133
135 int number_of_own_descriptors) {
136 MarkingBarrier* marking_barrier = CurrentMarkingBarrier(descriptor_array);
137 marking_barrier->Write(descriptor_array, number_of_own_descriptors);
138}
139
141 ExternalPointerSlot slot) {
142#ifdef V8_COMPRESS_POINTERS
143 if (!slot.HasExternalPointerHandle()) return;
144
145 MarkingBarrier* marking_barrier = CurrentMarkingBarrier(host);
146 IsolateForPointerCompression isolate(marking_barrier->heap()->isolate());
147
148 ExternalPointerTable& table =
149 isolate.GetExternalPointerTableFor(slot.tag_range());
150 ExternalPointerTable::Space* space =
151 isolate.GetExternalPointerTableSpaceFor(slot.tag_range(), host.address());
152
153 ExternalPointerHandle handle = slot.Relaxed_LoadHandle();
154 table.Mark(space, handle, slot.address());
155#endif // V8_COMPRESS_POINTERS
156}
157
159 IndirectPointerSlot slot) {
160 MarkingBarrier* marking_barrier = CurrentMarkingBarrier(host);
161 marking_barrier->Write(host, slot);
162}
163
166 Tagged<TrustedObject> value) {
167 MarkingBarrier* marking_barrier = CurrentMarkingBarrier(host);
168 marking_barrier->Write(host, slot, value);
169}
170
173#ifdef V8_ENABLE_LEAPTIERING
174 MarkingBarrier* marking_barrier = CurrentMarkingBarrier(host);
175
176 // The JSDispatchTable is only marked during major GC so we can skip the
177 // barrier if we're only doing a minor GC.
178 // This is mostly an optimization, but it does help avoid scenarios where a
179 // minor GC marking barrier marks a table entry as alive but not the Code
180 // object contained in it (because it's not a young-gen object).
181 if (marking_barrier->is_minor()) return;
182
183 // Mark both the table entry and its content.
184 JSDispatchTable* jdt = IsolateGroup::current()->js_dispatch_table();
185 static_assert(JSDispatchTable::kWriteBarrierSetsEntryMarkBit);
186 jdt->Mark(handle);
187 marking_barrier->MarkValue(host, jdt->GetCode(handle));
188
189 // We don't need to record a slot here because the entries in the
190 // JSDispatchTable are not compacted and because the pointers stored in the
191 // table entries are updated after compacting GC.
192 static_assert(!JSDispatchTable::kSupportsCompaction);
193#else
194 UNREACHABLE();
195#endif
196}
197
200 MaybeObjectSlot slot(raw_slot);
201 Address value = (*slot).ptr();
202
203#ifdef V8_MAP_PACKING
204 if (slot.address() == host.address()) {
205 // Clear metadata bits and fix object tag.
206 value = (value & ~Internals::kMapWordMetadataMask &
207 ~Internals::kMapWordXorMask) |
208 (uint64_t)kHeapObjectTag;
209 }
210#endif
211
212#if DEBUG
214 DCHECK(heap->incremental_marking()->IsMarking());
215
216 // We will only reach local objects here while incremental marking in the
217 // current isolate is enabled. However, we might still reach objects in the
218 // shared space but only from the shared space isolate (= the main isolate).
219 MarkingBarrier* barrier = CurrentMarkingBarrier(host);
221 barrier->heap()->isolate()->is_shared_space_isolate());
222 barrier->AssertMarkingIsActivated();
223#endif // DEBUG
224
225 Marking(host, slot, Tagged<MaybeObject>(value));
226 // Called by WriteBarrierCodeStubAssembler, which doesn't accept void type
227 return 0;
228}
229
231 Address raw_slot,
232 Address raw_tag) {
234 IndirectPointerTag tag = static_cast<IndirectPointerTag>(raw_tag);
236 IndirectPointerSlot slot(raw_slot, tag);
237
238#if DEBUG
240 MarkingBarrier* barrier = CurrentMarkingBarrier(host);
241 DCHECK(barrier->heap()->isolate()->isolate_data()->is_marking());
242
243 DCHECK(IsExposedTrustedObject(slot.load(barrier->heap()->isolate())));
244#endif
245
246 Marking(host, slot);
247 // Called by WriteBarrierCodeStubAssembler, which doesn't accept void type
248 return 0;
249}
250
253 MaybeObjectSlot slot(raw_slot);
254 Address raw_value = (*slot).ptr();
255 Tagged<MaybeObject> value(raw_value);
256
258
259#if DEBUG
261 DCHECK(heap->incremental_marking()->IsMajorMarking());
262 Isolate* isolate = heap->isolate();
263 DCHECK(isolate->is_shared_space_isolate());
264
265 // The shared marking barrier will only be reached from client isolates (=
266 // worker isolates).
267 MarkingBarrier* barrier = CurrentMarkingBarrier(host);
268 DCHECK(!barrier->heap()->isolate()->is_shared_space_isolate());
269 barrier->AssertSharedMarkingIsActivated();
270#endif // DEBUG
271
272 Marking(host, slot, Tagged<MaybeObject>(value));
273
274 // Called by WriteBarrierCodeStubAssembler, which doesn't accept void type
275 return 0;
276}
277
280
282 SharedHeapBarrierSlow(host, raw_slot);
283 }
284
285 // Called by WriteBarrierCodeStubAssembler, which doesn't accept void type
286 return 0;
287}
288
289// static
293
294 // Slim chunk flags consistency.
296 chunk->IsMarking());
297
298 if (!v8_flags.sticky_mark_bits) {
299 AllocationSpace identity = metadata->owner()->identity();
300
301 // Generation consistency.
302 CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE,
303 chunk->InYoungGeneration());
304 }
305
306 // Marking consistency.
307 if (metadata->IsWritable()) {
308 // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
309 // find a heap. The exception is when the ReadOnlySpace is writeable, during
310 // bootstrapping, so explicitly allow this case.
311 Heap* heap = Heap::FromWritableHeapObject(object);
312 if (chunk->InWritableSharedSpace()) {
313 // The marking bit is not set for chunks in shared spaces during MinorMS
314 // concurrent marking.
315 CHECK_EQ(chunk->IsMarking(),
316 heap->incremental_marking()->IsMajorMarking());
317 } else {
318 CHECK_EQ(chunk->IsMarking(), heap->incremental_marking()->IsMarking());
319 }
320 } else {
321 // Non-writable RO_SPACE must never have marking flag set.
322 CHECK(!chunk->IsMarking());
323 }
324 return true;
325}
326
327// static
330 Tagged<HeapObject> value) {
333 MarkCompactCollector::ProcessRelocInfo(host, rinfo, value);
334
335 base::MutexGuard write_scope(info.page_metadata->mutex());
336 RememberedSet<OLD_TO_NEW>::InsertTyped(info.page_metadata, info.slot_type,
337 info.offset);
338}
339
340// static
357
358// static
360 Tagged<HeapObject> object, Address slot, Tagged<HeapObject> value) {
362 GenerationalBarrierSlow(object, slot, value);
363
364 } else {
365 DCHECK(MemoryChunk::FromHeapObject(value)->InWritableSharedSpace());
367 SharedHeapBarrierSlow(object, slot);
368 }
369}
370
371// static
373 Address slot,
374 Tagged<HeapObject> value) {
377 if (LocalHeap::Current() == nullptr) {
379 metadata, chunk->Offset(slot));
380 } else {
382 metadata, chunk->Offset(slot));
383 }
384}
385
386// static
388 Address key_slot_address,
389 Isolate* isolate) {
392 ObjectSlot key_slot(key_slot_address);
393 ForEphemeronHashTable(table, key_slot, *key_slot, UPDATE_WRITE_BARRIER);
394}
395
396namespace {
397
398enum RangeWriteBarrierMode {
399 kDoGenerationalOrShared = 1 << 0,
400 kDoMarking = 1 << 1,
401 kDoEvacuationSlotRecording = 1 << 2,
402};
403
404template <int kModeMask, typename TSlot>
405void ForRangeImpl(Heap* heap, MemoryChunk* source_chunk,
406 Tagged<HeapObject> object, TSlot start_slot, TSlot end_slot) {
407 // At least one of generational or marking write barrier should be requested.
408 static_assert(kModeMask & (kDoGenerationalOrShared | kDoMarking));
409 // kDoEvacuationSlotRecording implies kDoMarking.
410 static_assert(!(kModeMask & kDoEvacuationSlotRecording) ||
411 (kModeMask & kDoMarking));
412
413 MarkingBarrier* marking_barrier = nullptr;
414 static constexpr Tagged_t kPageMask =
415 ~static_cast<Tagged_t>(PageMetadata::kPageSize - 1);
416 Tagged_t cached_uninteresting_page =
417 static_cast<Tagged_t>(heap->read_only_space()->FirstPageAddress()) &
418 kPageMask;
419
420 if (kModeMask & kDoMarking) {
421 marking_barrier = WriteBarrier::CurrentMarkingBarrier(object);
422 }
423
424 MarkCompactCollector* collector = heap->mark_compact_collector();
425 MutablePageMetadata* source_page_metadata =
426 MutablePageMetadata::cast(source_chunk->Metadata());
427
428 for (TSlot slot = start_slot; slot < end_slot; ++slot) {
429 // If we *only* need the generational or shared WB, we can skip objects
430 // residing on uninteresting pages.
431 Tagged_t compressed_page;
432 if (kModeMask == kDoGenerationalOrShared) {
433 Tagged_t tagged_value = *slot.location();
434 if (HAS_SMI_TAG(tagged_value)) continue;
435 compressed_page = tagged_value & kPageMask;
436 if (compressed_page == cached_uninteresting_page) {
437#if DEBUG
438 typename TSlot::TObject value = *slot;
439 Tagged<HeapObject> value_heap_object;
440 if (value.GetHeapObject(&value_heap_object)) {
441 CHECK(!HeapLayout::InYoungGeneration(value_heap_object));
442 CHECK(!HeapLayout::InWritableSharedSpace(value_heap_object));
443 }
444#endif // DEBUG
445 continue;
446 }
447 // Fall through to decompressing the pointer and fetching its actual
448 // page header flags.
449 }
450 typename TSlot::TObject value = *slot;
451 Tagged<HeapObject> value_heap_object;
452 if (!value.GetHeapObject(&value_heap_object)) continue;
453
454 if (kModeMask & kDoGenerationalOrShared) {
455 if (HeapLayout::InYoungGeneration(value_heap_object)) {
457 source_page_metadata, source_chunk->Offset(slot.address()));
458 } else if (HeapLayout::InWritableSharedSpace(value_heap_object)) {
460 source_page_metadata, source_chunk->Offset(slot.address()));
461 } else if (kModeMask == kDoGenerationalOrShared) {
462 cached_uninteresting_page = compressed_page;
463 }
464 }
465
466 if (kModeMask & kDoMarking) {
467 marking_barrier->MarkValue(object, value_heap_object);
468 if (kModeMask & kDoEvacuationSlotRecording) {
469 collector->RecordSlot(source_chunk, HeapObjectSlot(slot),
470 value_heap_object);
471 }
472 }
473 }
474}
475
476} // namespace
477
478// Instantiate `WriteBarrier::WriteBarrierForRange()` for `ObjectSlot` and
479// `MaybeObjectSlot`.
480template void WriteBarrier::ForRange<ObjectSlot>(Heap* heap,
481 Tagged<HeapObject> object,
482 ObjectSlot start_slot,
483 ObjectSlot end_slot);
485 Heap* heap, Tagged<HeapObject> object, MaybeObjectSlot start_slot,
486 MaybeObjectSlot end_slot);
487
488template <typename TSlot>
489// static
491 TSlot start_slot, TSlot end_slot) {
492 if (v8_flags.disable_write_barriers) return;
493 MemoryChunk* source_chunk = MemoryChunk::FromHeapObject(object);
495
496 if (!HeapLayout::InYoungGeneration(object) &&
497 !source_chunk->InWritableSharedSpace()) {
498 mode |= kDoGenerationalOrShared;
499 }
500
501 if (heap->incremental_marking()->IsMarking()) {
502 mode |= kDoMarking;
503 if (!source_chunk->ShouldSkipEvacuationSlotRecording()) {
504 mode |= kDoEvacuationSlotRecording;
505 }
506 }
507
508 switch (mode) {
509 // Nothing to be done.
510 case 0:
511 return;
512 // Generational only.
513 case kDoGenerationalOrShared:
514 return ForRangeImpl<kDoGenerationalOrShared>(heap, source_chunk, object,
515 start_slot, end_slot);
516 // Marking, no evacuation slot recording.
517 case kDoMarking:
518 return ForRangeImpl<kDoMarking>(heap, source_chunk, object, start_slot,
519 end_slot);
520 // Marking with evacuation slot recording.
521 case kDoMarking | kDoEvacuationSlotRecording:
522 return ForRangeImpl<kDoMarking | kDoEvacuationSlotRecording>(
523 heap, source_chunk, object, start_slot, end_slot);
524 // Generational and marking, no evacuation slot recording.
525 case kDoGenerationalOrShared | kDoMarking:
526 return ForRangeImpl<kDoGenerationalOrShared | kDoMarking>(
527 heap, source_chunk, object, start_slot, end_slot);
528 // Generational and marking with evacuation slot recording.
529 case kDoGenerationalOrShared | kDoMarking | kDoEvacuationSlotRecording:
530 return ForRangeImpl<kDoGenerationalOrShared | kDoMarking |
531 kDoEvacuationSlotRecording>(
532 heap, source_chunk, object, start_slot, end_slot);
533 default:
534 UNREACHABLE();
535 }
536}
537
538#ifdef ENABLE_SLOW_DCHECKS
539
540// static
541bool WriteBarrier::VerifyDispatchHandleMarkingState(Tagged<HeapObject> host,
543 WriteBarrierMode mode) {
544#ifdef V8_ENABLE_LEAPTIERING
545 JSDispatchTable* jdt = IsolateGroup::current()->js_dispatch_table();
546 if (mode == SKIP_WRITE_BARRIER &&
547 WriteBarrier::IsRequired(host, jdt->GetCode(handle))) {
548 return false;
549 }
550
551 if (CurrentMarkingBarrier(host)->is_not_major()) return true;
552
553 // Ensure we don't have a black -> white -> black edge. This could happen when
554 // skipping a write barrier while concurrently the dispatch entry is marked
555 // from another JSFunction.
556 if (ReadOnlyHeap::Contains(host) ||
557 (IsMarking(host) && mode != SKIP_WRITE_BARRIER) ||
558 !CurrentMarkingBarrier(host)->IsMarked(host)) {
559 return true;
560 }
561 if (jdt->IsMarked(handle)) {
562 return true;
563 }
564 Tagged<Code> value = jdt->GetCode(handle);
565 if (ReadOnlyHeap::Contains(value)) {
566 return true;
567 }
568 return !CurrentMarkingBarrier(host)->IsMarked(value);
569#else
570 return true;
571#endif // V8_ENABLE_LEAPTIERING
572}
573
574#endif // ENABLE_SLOW_DCHECKS
575
576} // namespace v8::internal
#define SLOW_DCHECK_IMPLIES(v1, v2)
Definition checks.h:22
AllocationSpace identity() const
Definition base-space.h:32
void WriteBarrier(void *)
Definition cpp-heap.cc:920
static CppHeap * From(v8::CppHeap *heap)
Definition cpp-heap.h:102
void RecordEphemeronKeyWrite(Tagged< EphemeronHashTable > table, Address key_slot)
ExternalPointerTagRange tag_range() const
Definition slots.h:421
static V8_INLINE bool InYoungGeneration(Tagged< Object > object)
static V8_INLINE bool InWritableSharedSpace(Tagged< HeapObject > object)
static V8_INLINE bool InAnySharedSpace(Tagged< HeapObject > object)
LocalHeap * main_thread_local_heap()
Definition heap.h:842
EphemeronRememberedSet * ephemeron_remembered_set()
Definition heap.h:362
Isolate * isolate() const
Definition heap-inl.h:61
Tagged< Object > load(IsolateForSandbox isolate) const
Definition slots-inl.h:349
static IsolateGroup * current()
bool is_shared_space_isolate() const
Definition isolate.h:2292
const IsolateData * isolate_data() const
Definition isolate.h:1207
MarkingBarrier * marking_barrier()
Definition local-heap.h:130
static RecordRelocSlotInfo ProcessRelocInfo(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
void MarkValue(Tagged< HeapObject > host, Tagged< HeapObject > value)
void Write(Tagged< HeapObject > host, TSlot slot, Tagged< HeapObject > value)
void WriteWithoutHost(Tagged< HeapObject > value)
static V8_INLINE MemoryChunkMetadata * FromHeapObject(Tagged< HeapObject > o)
V8_INLINE bool InWritableSharedSpace() const
V8_INLINE bool IsFlagSet(Flag flag) const
V8_INLINE MemoryChunkMetadata * Metadata()
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
size_t Offset(Address addr) const
V8_INLINE bool InYoungGeneration() const
bool ShouldSkipEvacuationSlotRecording() const
static V8_INLINE MemoryChunk * FromHeapObject(Tagged< HeapObject > object)
V8_INLINE bool IsMarking() const
static MutablePageMetadata * cast(MemoryChunkMetadata *metadata)
static V8_INLINE MutablePageMetadata * FromHeapObject(Tagged< HeapObject > o)
static V8_EXPORT_PRIVATE bool Contains(Address address)
static void InsertTyped(MutablePageMetadata *memory_chunk, SlotType slot_type, uint32_t offset)
static void Insert(MutablePageMetadata *page, size_t slot_offset)
Address address() const
Definition slots.h:78
V8_INLINE constexpr bool is_null() const
Definition tagged.h:502
static void CombinedGenerationalAndSharedEphemeronBarrierSlow(Tagged< EphemeronHashTable > table, Address slot, Tagged< HeapObject > value)
static void Marking(Tagged< HeapObject > host, HeapObjectSlot, Tagged< HeapObject > value)
static MarkingBarrier * SetForThread(MarkingBarrier *marking_barrier)
static int SharedFromCode(Address raw_host, Address raw_slot)
static void MarkingSlowFromCppHeapWrappable(Heap *heap, Tagged< JSObject > host, CppHeapPointerSlot slot, void *object)
static void ForEphemeronHashTable(Tagged< EphemeronHashTable > host, ObjectSlot slot, Tagged< Object > value, WriteBarrierMode mode)
static bool PageFlagsAreConsistent(Tagged< HeapObject > object)
static void GenerationalBarrierSlow(Tagged< HeapObject > object, Address slot, Tagged< HeapObject > value)
static constexpr bool kUninterestingPagesCanBeSkipped
static bool IsMarking(Tagged< HeapObject > object)
static MarkingBarrier * CurrentMarkingBarrier(Tagged< HeapObject > verification_candidate)
static int SharedMarkingFromCode(Address raw_host, Address raw_slot)
static void SharedSlow(Tagged< TrustedObject > host, ProtectedPointerSlot slot, Tagged< TrustedObject > value)
static void SharedHeapBarrierSlow(Tagged< HeapObject > object, Address slot)
static void MarkingSlow(Tagged< HeapObject > host, HeapObjectSlot, Tagged< HeapObject > value)
static void ForRange(Heap *heap, Tagged< HeapObject > object, TSlot start, TSlot end)
static int IndirectPointerMarkingFromCode(Address raw_host, Address raw_slot, Address raw_tag)
static void MarkingSlowFromTracedHandle(Tagged< HeapObject > value)
static void GenerationalBarrierForCodeSlow(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > value)
static void CombinedGenerationalAndSharedBarrierSlow(Tagged< HeapObject > object, Address slot, Tagged< HeapObject > value)
static int MarkingFromCode(Address raw_host, Address raw_slot)
static void EphemeronKeyWriteBarrierFromCode(Address raw_object, Address key_slot_address, Isolate *isolate)
#define HAS_SMI_TAG(value)
Definition globals.h:1771
Isolate * isolate
std::string extension
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
@ SKIP_WRITE_BARRIER
Definition objects.h:52
@ UPDATE_WRITE_BARRIER
Definition objects.h:55
SlotTraits::TObjectSlot ObjectSlot
Definition globals.h:1243
Tagged(T object) -> Tagged< T >
V8_INLINE constexpr bool IsValidIndirectPointerTag(IndirectPointerTag tag)
kInterpreterTrampolineOffset Tagged< HeapObject >
Address Tagged_t
Definition globals.h:547
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
SlotTraits::THeapObjectSlot HeapObjectSlot
Definition globals.h:1253
uint32_t ExternalPointerHandle
return value
Definition map-inl.h:893
SlotTraits::TMaybeObjectSlot MaybeObjectSlot
Definition globals.h:1248
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485