v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
marking-visitor-inl.h
Go to the documentation of this file.
1// Copyright 2019 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_MARKING_VISITOR_INL_H_
6#define V8_HEAP_MARKING_VISITOR_INL_H_
7
9// Include the non-inl header before the rest of the headers.
10
11#include "src/common/globals.h"
19#include "src/heap/marking.h"
21#include "src/heap/spaces.h"
26#include "src/objects/objects.h"
28#include "src/objects/slots.h"
29#include "src/objects/smi.h"
30#include "src/objects/string.h"
34
35// Has to be the last include (doesn't have include guards):
37
38namespace v8 {
39namespace internal {
40
41// ===========================================================================
42// Visiting strong and weak pointers =========================================
43// ===========================================================================
44
45template <typename ConcreteVisitor>
48 MarkingHelper::WorklistTarget target_worklist) {
49 DCHECK(heap_->Contains(object));
50 SynchronizePageAccess(object);
51 concrete_visitor()->AddStrongReferenceForReferenceSummarizer(retainer,
52 object);
54 concrete_visitor()->marking_state(),
55 target_worklist, object);
56}
57
58// class template arguments
59template <typename ConcreteVisitor>
60// method template arguments
61template <typename THeapObjectSlot>
63 Tagged<HeapObject> host, THeapObjectSlot slot,
64 Tagged<HeapObject> heap_object) {
65 SynchronizePageAccess(heap_object);
66 const auto target_worklist =
68 if (!target_worklist) {
69 return;
70 }
71 // TODO(chromium:1495151): Remove after diagnosing.
72 if (V8_UNLIKELY(!MemoryChunk::FromHeapObject(heap_object)->IsMarking() &&
73 IsFreeSpaceOrFiller(
76 reinterpret_cast<void*>(host->map().ptr()),
77 reinterpret_cast<void*>(host->address()),
78 reinterpret_cast<void*>(slot.address()),
79 reinterpret_cast<void*>(MemoryChunkMetadata::FromHeapObject(heap_object)
80 ->owner()
81 ->identity()));
82 }
83 MarkObject(host, heap_object, target_worklist.value());
84 concrete_visitor()->RecordSlot(host, slot, heap_object);
85}
87// static
88template <typename ConcreteVisitor>
89V8_INLINE constexpr bool
91 Tagged<HeapObject> host, Tagged<HeapObject> heap_object) {
92 return !IsMap(heap_object) ||
93 !(IsMap(host) || IsTransitionArray(host) || IsDescriptorArray(host));
95
96// class template arguments
97template <typename ConcreteVisitor>
98// method template arguments
99template <typename THeapObjectSlot>
101 Tagged<HeapObject> host, THeapObjectSlot slot,
102 Tagged<HeapObject> heap_object) {
103 SynchronizePageAccess(heap_object);
104 concrete_visitor()->AddWeakReferenceForReferenceSummarizer(host, heap_object);
105 const auto target_worklist =
107 if (!target_worklist) {
108 return;
110 if (concrete_visitor()->marking_state()->IsMarked(heap_object)) {
111 // Weak references with live values are directly processed here to
112 // reduce the processing time of weak cells during the main GC
113 // pause.
114 concrete_visitor()->RecordSlot(host, slot, heap_object);
115 } else {
116 // If we do not know about liveness of the value, we have to process
117 // the reference when we know the liveness of the whole transitive
118 // closure.
119 // Distinguish trivial cases (non involving custom weakness) from
120 // non-trivial ones. The latter are maps in host objects of type Map,
121 // TransitionArray and DescriptorArray.
123 local_weak_objects_->weak_references_trusted_local.Push(
124 TrustedObjectAndSlot{host, slot});
125 } else if (V8_LIKELY(IsTrivialWeakReferenceValue(host, heap_object))) {
126 local_weak_objects_->weak_references_trivial_local.Push(
127 HeapObjectAndSlot{host, slot});
128 } else {
129 local_weak_objects_->weak_references_non_trivial_local.Push(
130 HeapObjectAndSlot{host, slot});
131 }
132 }
133}
134
135// class template arguments
136template <typename ConcreteVisitor>
137// method template arguments
138template <typename TSlot>
140 Tagged<HeapObject> host, TSlot start, TSlot end) {
141 using THeapObjectSlot = typename TSlot::THeapObjectSlot;
142 for (TSlot slot = start; slot < end; ++slot) {
143 typename TSlot::TObject object;
144 if constexpr (SlotHoldsTrustedPointerV<TSlot>) {
145 // The fast check doesn't support the trusted cage, so skip it.
146 object = slot.Relaxed_Load();
147 } else {
148 const std::optional<Tagged<Object>> optional_object =
149 this->GetObjectFilterReadOnlyAndSmiFast(slot);
150 if (!optional_object) {
151 continue;
152 }
153 object = *optional_object;
155 Tagged<HeapObject> heap_object;
156 if (object.GetHeapObjectIfStrong(&heap_object)) {
157 // If the reference changes concurrently from strong to weak, the write
158 // barrier will treat the weak reference as strong, so we won't miss the
159 // weak reference.
160 ProcessStrongHeapObject(host, THeapObjectSlot(slot), heap_object);
161 } else if (TSlot::kCanBeWeak && object.GetHeapObjectIfWeak(&heap_object)) {
162 ProcessWeakHeapObject(host, THeapObjectSlot(slot), heap_object);
163 }
164 }
165}
166
167// class template arguments
168template <typename ConcreteVisitor>
169// method template arguments
170template <typename TSlot>
172 Tagged<HeapObject> host, TSlot slot) {
173 static_assert(!TSlot::kCanBeWeak);
174 using THeapObjectSlot = typename TSlot::THeapObjectSlot;
175 typename TSlot::TObject object = slot.Relaxed_Load();
176 Tagged<HeapObject> heap_object;
177 if (object.GetHeapObject(&heap_object)) {
178 ProcessStrongHeapObject(host, THeapObjectSlot(slot), heap_object);
179 }
180}
182template <typename ConcreteVisitor>
186 Tagged<HeapObject> object =
188 const auto target_worklist = MarkingHelper::ShouldMarkObject(heap_, object);
189 if (!target_worklist) {
190 return;
191 }
193 if (!concrete_visitor()->marking_state()->IsMarked(object)) {
194 Tagged<Code> code = UncheckedCast<Code>(host->raw_code(kAcquireLoad));
195 if (code->IsWeakObject(object)) {
196 local_weak_objects_->weak_objects_in_code_local.Push(
197 HeapObjectAndCode{object, code});
198 concrete_visitor()->AddWeakReferenceForReferenceSummarizer(host, object);
199 } else {
200 MarkObject(host, object, target_worklist.value());
201 }
202 }
203 concrete_visitor()->RecordRelocSlot(host, rinfo, object);
205
206template <typename ConcreteVisitor>
212
213 const auto target_worklist = MarkingHelper::ShouldMarkObject(heap_, target);
214 if (!target_worklist) {
215 return;
216 }
217 MarkObject(host, target, target_worklist.value());
218 concrete_visitor()->RecordRelocSlot(host, rinfo, target);
219}
221template <typename ConcreteVisitor>
224#ifdef V8_COMPRESS_POINTERS
226 if (slot.HasExternalPointerHandle()) {
227 ExternalPointerHandle handle = slot.Relaxed_LoadHandle();
228 ExternalPointerTable* table;
229 ExternalPointerTable::Space* space;
231 table = shared_external_pointer_table_;
232 space = shared_external_pointer_space_;
233 } else {
234 table = external_pointer_table_;
235 if (v8_flags.sticky_mark_bits) {
236 // Everything is considered old during major GC.
238 if (handle == kNullExternalPointerHandle) return;
239 // The object may either be in young or old EPT.
240 if (table->Contains(heap_->young_external_pointer_space(), handle)) {
241 space = heap_->young_external_pointer_space();
242 } else {
243 DCHECK(table->Contains(heap_->old_external_pointer_space(), handle));
244 space = heap_->old_external_pointer_space();
245 }
246 } else {
248 ? heap_->young_external_pointer_space()
249 : heap_->old_external_pointer_space();
250 }
251 }
252 table->Mark(space, handle, slot.address());
253 }
254#endif // V8_COMPRESS_POINTERS
255}
256
257template <typename ConcreteVisitor>
260#ifdef V8_COMPRESS_POINTERS
261 const ExternalPointerHandle handle = slot.Relaxed_LoadHandle();
263 return;
264 }
265 CppHeapPointerTable* table = cpp_heap_pointer_table_;
266 CppHeapPointerTable::Space* space = heap_->cpp_heap_pointer_space();
267 table->Mark(space, handle, slot.address());
268#endif // V8_COMPRESS_POINTERS
269 if (auto cpp_heap_pointer =
272 reinterpret_cast<void*>(cpp_heap_pointer));
273 }
274}
275
276template <typename ConcreteVisitor>
279 IndirectPointerMode mode) {
280#ifdef V8_ENABLE_SANDBOX
281 if (mode == IndirectPointerMode::kStrong) {
282 // Load the referenced object (if the slot is initialized) and mark it as
283 // alive if necessary. Indirect pointers never have to be added to a
284 // remembered set because the referenced object will update the pointer
285 // table entry when it is relocated.
286 // The marker might visit objects whose trusted pointers to each other
287 // are not yet or no longer accessible, so we must handle those here.
289 if (IsHeapObject(value)) {
291 SynchronizePageAccess(obj);
292 const auto target_worklist = MarkingHelper::ShouldMarkObject(heap_, obj);
293 if (!target_worklist) {
294 return;
295 }
296 MarkObject(host, obj, target_worklist.value());
297 }
298 }
299#else
300 UNREACHABLE();
301#endif
302}
303
304template <typename ConcreteVisitor>
307 concrete_visitor()->MarkPointerTableEntry(host, slot);
308}
309
310template <typename ConcreteVisitor>
313#ifdef V8_ENABLE_LEAPTIERING
314 JSDispatchTable* jdt = IsolateGroup::current()->js_dispatch_table();
315#ifdef DEBUG
316 JSDispatchTable::Space* space = heap_->js_dispatch_table_space();
317 JSDispatchTable::Space* ro_space =
318 heap_->isolate()->read_only_heap()->js_dispatch_table_space();
319 jdt->VerifyEntry(handle, space, ro_space);
320#endif // DEBUG
321
322 jdt->Mark(handle);
323
324 // The code objects referenced from a dispatch table entry are treated as weak
325 // references for the purpose of bytecode/baseline flushing, so they are not
326 // marked here. See also VisitJSFunction below.
327#endif // V8_ENABLE_LEAPTIERING
328}
329
330// ===========================================================================
331// Object participating in bytecode flushing =================================
332// ===========================================================================
333
334template <typename ConcreteVisitor>
336 Tagged<Map> map, Tagged<JSFunction> js_function,
337 MaybeObjectSize maybe_object_size) {
338 if (ShouldFlushBaselineCode(js_function)) {
339 DCHECK(IsBaselineCodeFlushingEnabled(code_flush_mode_));
340#ifndef V8_ENABLE_LEAPTIERING
341 local_weak_objects_->baseline_flushing_candidates_local.Push(js_function);
342#endif // !V8_ENABLE_LEAPTIERING
343 return Base::VisitJSFunction(map, js_function, maybe_object_size);
344 }
345
346 // We're not flushing the Code, so mark it as alive.
347#ifdef V8_ENABLE_LEAPTIERING
348 // Here we can see JSFunctions that aren't fully initialized (e.g. during
349 // deserialization) so we need to check for the null handle.
351 js_function->Relaxed_ReadField<JSDispatchHandle::underlying_type>(
352 JSFunction::kDispatchHandleOffset));
355 IsolateGroup::current()->js_dispatch_table()->GetCode(handle);
356 // TODO(saelo): maybe factor out common code with VisitIndirectPointer
357 // into a helper routine?
358 SynchronizePageAccess(obj);
359 const auto target_worklist = MarkingHelper::ShouldMarkObject(heap_, obj);
360 if (target_worklist) {
361 MarkObject(js_function, obj, target_worklist.value());
362 }
363 }
364#else
365
366#ifdef V8_ENABLE_SANDBOX
367 VisitIndirectPointer(js_function,
368 js_function->RawIndirectPointerField(
369 JSFunction::kCodeOffset, kCodeIndirectPointerTag),
371#else
372 VisitPointer(js_function, js_function->RawField(JSFunction::kCodeOffset));
373#endif // V8_ENABLE_SANDBOX
374
375#endif // V8_ENABLE_LEAPTIERING
376
377 // TODO(mythria): Consider updating the check for ShouldFlushBaselineCode to
378 // also include cases where there is old bytecode even when there is no
379 // baseline code and remove this check here.
380 if (IsByteCodeFlushingEnabled(code_flush_mode_) &&
381 js_function->NeedsResetDueToFlushedBytecode(heap_->isolate())) {
382 local_weak_objects_->flushed_js_functions_local.Push(js_function);
383 }
384
385 return Base::VisitJSFunction(map, js_function, maybe_object_size);
386}
387
388template <typename ConcreteVisitor>
391 MaybeObjectSize maybe_object_size) {
392 const bool can_flush_bytecode = HasBytecodeArrayForFlushing(shared_info);
393
394 // We found a BytecodeArray that can be flushed. Increment the age of the SFI.
395 if (can_flush_bytecode && !should_keep_ages_unchanged_) {
396 MakeOlder(shared_info);
397 }
398
399 if (!can_flush_bytecode || !ShouldFlushCode(shared_info)) {
400 // If the SharedFunctionInfo doesn't have old bytecode visit the function
401 // data strongly.
402#ifdef V8_ENABLE_SANDBOX
403 VisitIndirectPointer(shared_info,
404 shared_info->RawIndirectPointerField(
405 SharedFunctionInfo::kTrustedFunctionDataOffset,
408#else
409 VisitPointer(
410 shared_info,
411 shared_info->RawField(SharedFunctionInfo::kTrustedFunctionDataOffset));
412#endif
413 VisitPointer(shared_info,
414 shared_info->RawField(
415 SharedFunctionInfo::kUntrustedFunctionDataOffset));
416 } else if (!IsByteCodeFlushingEnabled(code_flush_mode_)) {
417 // If bytecode flushing is disabled but baseline code flushing is enabled
418 // then we have to visit the bytecode but not the baseline code.
419 DCHECK(IsBaselineCodeFlushingEnabled(code_flush_mode_));
420 Tagged<Code> baseline_code = shared_info->baseline_code(kAcquireLoad);
421 // Visit the bytecode hanging off baseline code.
422 VisitProtectedPointer(
423 baseline_code, baseline_code->RawProtectedPointerField(
424 Code::kDeoptimizationDataOrInterpreterDataOffset));
425 local_weak_objects_->code_flushing_candidates_local.Push(shared_info);
426 } else {
427 // In other cases, record as a flushing candidate since we have old
428 // bytecode.
429 local_weak_objects_->code_flushing_candidates_local.Push(shared_info);
430 }
431 return Base::VisitSharedFunctionInfo(map, shared_info, maybe_object_size);
432}
433
434template <typename ConcreteVisitor>
436 Tagged<SharedFunctionInfo> sfi) const {
437 if (IsFlushingDisabled(code_flush_mode_)) return false;
438
439 // TODO(rmcilroy): Enable bytecode flushing for resumable functions.
440 if (IsResumableFunction(sfi->kind()) || !sfi->allows_lazy_compilation()) {
441 return false;
442 }
443
444 // Get a snapshot of the function data field, and if it is a bytecode array,
445 // check if it is old. Note, this is done this way since this function can be
446 // called by the concurrent marker.
447 Tagged<Object> data = sfi->GetTrustedData(heap_->isolate());
448 if (IsCode(data)) {
449 Tagged<Code> baseline_code = Cast<Code>(data);
450 DCHECK_EQ(baseline_code->kind(), CodeKind::BASELINE);
451 // If baseline code flushing isn't enabled and we have baseline data on SFI
452 // we cannot flush baseline / bytecode.
453 if (!IsBaselineCodeFlushingEnabled(code_flush_mode_)) return false;
454 data = baseline_code->bytecode_or_interpreter_data();
455 } else if (!IsByteCodeFlushingEnabled(code_flush_mode_)) {
456 // If bytecode flushing isn't enabled and there is no baseline code there is
457 // nothing to flush.
458 return false;
459 }
460
461 return IsBytecodeArray(data);
462}
463
464template <typename ConcreteVisitor>
466 Tagged<SharedFunctionInfo> sfi) const {
467 // This method is used both for flushing bytecode and baseline code.
468 // During last resort GCs and stress testing we consider all code old.
469 return IsOld(sfi) || V8_UNLIKELY(IsForceFlushingEnabled(code_flush_mode_));
470}
471
472template <typename ConcreteVisitor>
474 Tagged<SharedFunctionInfo> sfi) const {
475 if (v8_flags.flush_code_based_on_time) {
476 return sfi->age() >= v8_flags.bytecode_old_time;
477 } else if (v8_flags.flush_code_based_on_tab_visibility) {
478 return isolate_in_background_ ||
480 } else {
481 return sfi->age() >= v8_flags.bytecode_old_age;
482 }
483}
484
485template <typename ConcreteVisitor>
487 Tagged<SharedFunctionInfo> sfi) const {
488 if (v8_flags.flush_code_based_on_time) {
489 if (code_flushing_increase_ == 0) {
490 return;
491 }
492
493 uint16_t current_age;
494 uint16_t updated_age;
495 do {
496 current_age = sfi->age();
497 // When the age is 0, it was reset by the function prologue in
498 // Ignition/Sparkplug. But that might have been some time after the last
499 // full GC. So in this case we don't increment the value like we normally
500 // would but just set the age to 1. All non-0 values can be incremented as
501 // expected (we add the number of seconds since the last GC) as they were
502 // definitely last executed before the last full GC.
503 updated_age = current_age == 0
504 ? 1
505 : SaturateAdd(current_age, code_flushing_increase_);
506 } while (sfi->CompareExchangeAge(current_age, updated_age) != current_age);
507 } else if (v8_flags.flush_code_based_on_tab_visibility) {
508 // No need to increment age.
509 } else {
510 uint16_t age = sfi->age();
511 if (age < v8_flags.bytecode_old_age) {
512 sfi->CompareExchangeAge(age, age + 1);
513 }
514 DCHECK_LE(sfi->age(), v8_flags.bytecode_old_age);
515 }
516}
517
518template <typename ConcreteVisitor>
520 Tagged<JSFunction> js_function) const {
521 if (!IsBaselineCodeFlushingEnabled(code_flush_mode_)) return false;
522 // Do a raw read for shared and code fields here since this function may be
523 // called on a concurrent thread. JSFunction itself should be fully
524 // initialized here but the SharedFunctionInfo, InstructionStream objects may
525 // not be initialized. We read using acquire loads to defend against that.
526 Tagged<Object> maybe_shared =
527 ACQUIRE_READ_FIELD(js_function, JSFunction::kSharedFunctionInfoOffset);
528 if (!IsSharedFunctionInfo(maybe_shared)) return false;
529
530 // See crbug.com/v8/11972 for more details on acquire / release semantics for
531 // code field. We don't use release stores when copying code pointers from
532 // SFI / FV to JSFunction but it is safe in practice.
533 Tagged<Object> maybe_code =
534 js_function->raw_code(heap_->isolate(), kAcquireLoad);
535
536#ifdef THREAD_SANITIZER
537 // This is needed because TSAN does not process the memory fence
538 // emitted after page initialization.
539 MemoryChunk::FromAddress(maybe_code.ptr())->SynchronizedLoad();
540#endif
541 if (!IsCode(maybe_code)) return false;
542 Tagged<Code> code = Cast<Code>(maybe_code);
543 if (code->kind() != CodeKind::BASELINE) return false;
544
546 return HasBytecodeArrayForFlushing(shared) && ShouldFlushCode(shared);
547}
548
549// ===========================================================================
550// Fixed arrays that need incremental processing =============================
551// ===========================================================================
552
553template <typename ConcreteVisitor>
556 MarkingProgressTracker& progress_tracker) {
557 static_assert(kMaxRegularHeapObjectSize % kTaggedSize == 0);
558 static constexpr size_t kMaxQueuedWorklistItems = 8u;
559 DCHECK(concrete_visitor()->marking_state()->IsMarked(object));
560
561 const size_t size = FixedArray::BodyDescriptor::SizeOf(map, object);
562 const size_t chunk = progress_tracker.GetNextChunkToMark();
563 const size_t total_chunks = progress_tracker.TotalNumberOfChunks();
564 size_t start = 0;
565 size_t end = 0;
566 if (chunk == 0) {
567 // We just started marking the fixed array. Push the total number of chunks
568 // to the marking worklist and publish it so that other markers can
569 // participate.
570 if (const auto target_worklist =
572 DCHECK_EQ(target_worklist.value(),
574 const size_t scheduled_chunks =
575 std::min(total_chunks, kMaxQueuedWorklistItems);
576 DCHECK_GT(scheduled_chunks, 0);
577 for (size_t i = 1; i < scheduled_chunks; ++i) {
579 // Publish each chunk into a new segment so that other markers would be
580 // able to steal work. This is probabilistic (a single marker can be
581 // fast and steal multiple segments), but it works well in practice.
583 }
584 }
585 concrete_visitor()
586 ->template VisitMapPointerIfNeeded<VisitorId::kVisitFixedArray>(object);
588 end = std::min(size, MarkingProgressTracker::kChunkSize);
589 } else {
591 end = std::min(size, start + MarkingProgressTracker::kChunkSize);
592 }
593
594 // Repost the task if needed.
595 if (chunk + kMaxQueuedWorklistItems < total_chunks) {
596 if (const auto target_worklist =
600 }
601 }
602
603 if (start < end) {
604 VisitPointers(object,
605 Cast<HeapObject>(object)->RawField(static_cast<int>(start)),
606 Cast<HeapObject>(object)->RawField(static_cast<int>(end)));
607 }
608
609 return end - start;
610}
611
612template <typename ConcreteVisitor>
615 MaybeObjectSize maybe_object_size) {
616 MarkingProgressTracker& progress_tracker =
618 return concrete_visitor()->CanUpdateValuesInHeap() &&
619 progress_tracker.IsEnabled()
620 ? VisitFixedArrayWithProgressTracker(map, object, progress_tracker)
621 : Base::VisitFixedArray(map, object, maybe_object_size);
622}
623
624// ===========================================================================
625// Custom visitation =========================================================
626// ===========================================================================
627
628template <typename ConcreteVisitor>
631 MaybeObjectSize maybe_object_size) {
632 object->MarkExtension();
633 return Base::VisitJSArrayBuffer(map, object, maybe_object_size);
634}
635
636// ===========================================================================
637// Weak JavaScript objects ===================================================
638// ===========================================================================
639
640template <typename ConcreteVisitor>
643 local_weak_objects_->ephemeron_hash_tables_local.Push(table);
644 const bool use_key_to_values = key_to_values_ != nullptr;
645
646 for (InternalIndex i : table->IterateEntries()) {
647 ObjectSlot key_slot =
648 table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
650
651 SynchronizePageAccess(key);
652 concrete_visitor()->RecordSlot(table, key_slot, key);
653 concrete_visitor()->AddWeakReferenceForReferenceSummarizer(table, key);
654
655 ObjectSlot value_slot =
656 table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
657
658 // Objects in the shared heap are prohibited from being used as keys in
659 // WeakMaps and WeakSets and therefore cannot be ephemeron keys. See also
660 // MarkCompactCollector::ProcessEphemeron.
663 heap_, concrete_visitor()->marking_state(), key)) {
664 VisitPointer(table, value_slot);
665 } else {
666 Tagged<Object> value_obj = table->ValueAt(i);
667
668 if (IsHeapObject(value_obj)) {
669 Tagged<HeapObject> value = Cast<HeapObject>(value_obj);
670 SynchronizePageAccess(value);
671 concrete_visitor()->RecordSlot(table, value_slot, value);
672 concrete_visitor()->AddWeakReferenceForReferenceSummarizer(table,
673 value);
674
675 const auto target_worklist =
677 if (!target_worklist) {
678 continue;
679 }
680
681 // Revisit ephemerons with both key and value unreachable at end
682 // of concurrent marking cycle.
683 if (concrete_visitor()->marking_state()->IsUnmarked(value)) {
684 if (V8_LIKELY(!use_key_to_values)) {
685 local_weak_objects_->next_ephemerons_local.Push(
686 Ephemeron{key, value});
687 } else {
688 auto it = key_to_values_->try_emplace(key).first;
689 it->second.push_back(value);
690 }
691 }
692 }
693 }
694 }
695 return table->SizeFromMap(map);
696}
697
698template <typename ConcreteVisitor>
700 Tagged<Map> map, Tagged<JSWeakRef> weak_ref,
701 MaybeObjectSize maybe_object_size) {
702 if (IsHeapObject(weak_ref->target())) {
703 Tagged<HeapObject> target = Cast<HeapObject>(weak_ref->target());
704 SynchronizePageAccess(target);
705 concrete_visitor()->AddWeakReferenceForReferenceSummarizer(weak_ref,
706 target);
708 heap_, concrete_visitor()->marking_state(), target)) {
709 // Record the slot inside the JSWeakRef, since the VisitJSWeakRef above
710 // didn't visit it.
711 ObjectSlot slot = weak_ref->RawField(JSWeakRef::kTargetOffset);
712 concrete_visitor()->RecordSlot(weak_ref, slot, target);
713 } else {
714 // JSWeakRef points to a potentially dead object. We have to process them
715 // when we know the liveness of the whole transitive closure.
716 local_weak_objects_->js_weak_refs_local.Push(weak_ref);
717 }
718 }
719 return Base::VisitJSWeakRef(map, weak_ref, maybe_object_size);
720}
721
722template <typename ConcreteVisitor>
724 Tagged<Map> map, Tagged<WeakCell> weak_cell,
725 MaybeObjectSize maybe_object_size) {
726 Tagged<HeapObject> target = weak_cell->relaxed_target();
727 Tagged<HeapObject> unregister_token = weak_cell->relaxed_unregister_token();
728 SynchronizePageAccess(target);
729 SynchronizePageAccess(unregister_token);
731 heap_, concrete_visitor()->marking_state(), target) &&
733 heap_, concrete_visitor()->marking_state(), unregister_token)) {
734 // Record the slots inside the WeakCell, since its IterateBody doesn't visit
735 // it.
736 ObjectSlot slot = weak_cell->RawField(WeakCell::kTargetOffset);
737 concrete_visitor()->RecordSlot(weak_cell, slot, target);
738 slot = weak_cell->RawField(WeakCell::kUnregisterTokenOffset);
739 concrete_visitor()->RecordSlot(weak_cell, slot, unregister_token);
740 } else {
741 // WeakCell points to a potentially dead object or a dead unregister
742 // token. We have to process them when we know the liveness of the whole
743 // transitive closure.
744 local_weak_objects_->weak_cells_local.Push(weak_cell);
745 concrete_visitor()->AddWeakReferenceForReferenceSummarizer(weak_cell,
746 target);
747 concrete_visitor()->AddWeakReferenceForReferenceSummarizer(
748 weak_cell, unregister_token);
749 }
750 return Base::VisitWeakCell(map, weak_cell, maybe_object_size);
751}
752
753// ===========================================================================
754// Custom weakness in descriptor arrays and transition arrays ================
755// ===========================================================================
756
757template <typename ConcreteVisitor>
760 this->template VisitMapPointerIfNeeded<VisitorId::kVisitDescriptorArray>(
761 array);
762 const int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
763 VisitPointers(array, array->GetFirstPointerSlot(),
764 array->GetDescriptorSlot(0));
765 VisitPointers(array, MaybeObjectSlot(array->GetDescriptorSlot(0)),
767 array->GetDescriptorSlot(array->number_of_descriptors())));
768 return size;
769}
770
771template <typename ConcreteVisitor>
774 MaybeObjectSize maybe_object_size) {
775 if (!concrete_visitor()->CanUpdateValuesInHeap()) {
776 // If we cannot update the values in the heap, we just treat the array
777 // strongly.
778 return VisitDescriptorArrayStrongly(map, array, maybe_object_size);
779 }
780
781 // The markbit is not used anymore. This is different from a checked
782 // transition in that the array is re-added to the worklist and thus there's
783 // many invocations of this transition. All cases (roots, marking via map,
784 // write barrier) are handled here as they all update the state accordingly.
785 const auto [start, end] =
787 mark_compact_epoch_, array);
788 if (start != end) {
790 VisitPointers(array, MaybeObjectSlot(array->GetDescriptorSlot(start)),
791 MaybeObjectSlot(array->GetDescriptorSlot(end)));
792 if (start == 0) {
793 // We are processing the object the first time. Visit the header and
794 // return a size for accounting.
795 size_t size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
796 VisitPointers(array, array->GetFirstPointerSlot(),
797 array->GetDescriptorSlot(0));
798 concrete_visitor()
799 ->template VisitMapPointerIfNeeded<VisitorId::kVisitDescriptorArray>(
800 array);
801 return size;
802 }
803 }
804 return 0;
805}
806
807template <typename ConcreteVisitor>
809 Tagged<Map> map) {
810 if (!concrete_visitor()->CanUpdateValuesInHeap() || !map->CanTransition())
811 return;
812
813 // Maps that can transition share their descriptor arrays and require
814 // special visiting logic to avoid memory leaks.
815 // Since descriptor arrays are potentially shared, ensure that only the
816 // descriptors that belong to this map are marked. The first time a
817 // non-empty descriptor array is marked, its header is also visited. The
818 // slot holding the descriptor array will be implicitly recorded when the
819 // pointer fields of this map are visited.
820 Tagged<Object> maybe_descriptors =
822 heap_->isolate(), map);
823
824 // If the descriptors are a Smi, then this Map is in the process of being
825 // deserialized, and doesn't yet have an initialized descriptor field.
826 if (IsSmi(maybe_descriptors)) {
828 return;
829 }
830
831 Tagged<DescriptorArray> descriptors =
832 Cast<DescriptorArray>(maybe_descriptors);
833 // Synchronize reading of page flags for tsan.
834 SynchronizePageAccess(descriptors);
835 // Normal processing of descriptor arrays through the pointers iteration that
836 // follows this call:
837 // - Array in read only space;
838 // - Array in a black allocated page;
839 // - StrongDescriptor array;
840 if (HeapLayout::InReadOnlySpace(descriptors) ||
841 IsStrongDescriptorArray(descriptors)) {
842 return;
843 }
844
845 if (v8_flags.black_allocated_pages &&
847 return;
848 }
849
850 const int number_of_own_descriptors = map->NumberOfOwnDescriptors();
851 if (number_of_own_descriptors) {
852 // It is possible that the concurrent marker observes the
853 // number_of_own_descriptors out of sync with the descriptors. In that
854 // case the marking write barrier for the descriptor array will ensure
855 // that all required descriptors are marked. The concurrent marker
856 // just should avoid crashing in that case. That's why we need the
857 // std::min<int>() below.
858 const auto descriptors_to_mark = std::min<int>(
859 number_of_own_descriptors, descriptors->number_of_descriptors());
860 concrete_visitor()->marking_state()->TryMark(descriptors);
862 mark_compact_epoch_, descriptors, descriptors_to_mark)) {
863#ifdef DEBUG
864 const auto target_worklist =
866 DCHECK(target_worklist);
867 DCHECK_EQ(target_worklist.value(),
869#endif // DEBUG
870 local_marking_worklists_->Push(descriptors);
871 }
872 }
873}
874
875template <typename ConcreteVisitor>
877 Tagged<Map> meta_map, Tagged<Map> map, MaybeObjectSize maybe_object_size) {
878 VisitDescriptorsForMap(map);
879 // Mark the pointer fields of the Map. If there is a transitions array, it has
880 // been marked already, so it is fine that one of these fields contains a
881 // pointer to it.
882 return Base::VisitMap(meta_map, map, maybe_object_size);
883}
884
885template <typename ConcreteVisitor>
888 MaybeObjectSize maybe_object_size) {
889 local_weak_objects_->transition_arrays_local.Push(array);
890 return Base::VisitTransitionArray(map, array, maybe_object_size);
891}
892
893template <typename ConcreteVisitor>
896#ifdef V8_ENABLE_SANDBOX
897 IndirectPointerTag tag = slot.tag();
899
901
902 // We must not see an uninitialized 'self' indirect pointer as we might
903 // otherwise fail to mark the table entry as alive.
905
906 if (tag == kCodeIndirectPointerTag) {
907 CodePointerTable* table = IsolateGroup::current()->code_pointer_table();
908 CodePointerTable::Space* space = this->heap_->code_pointer_space();
909 table->Mark(space, handle);
910 } else {
911 bool use_shared_table = IsSharedTrustedPointerType(tag);
912 DCHECK_EQ(use_shared_table, HeapLayout::InWritableSharedSpace(host));
913 TrustedPointerTable* table = use_shared_table
914 ? this->shared_trusted_pointer_table_
915 : this->trusted_pointer_table_;
916 TrustedPointerTable::Space* space =
917 use_shared_table
918 ? this->heap_->isolate()->shared_trusted_pointer_space()
919 : this->heap_->trusted_pointer_space();
920 table->Mark(space, handle);
921 }
922#else
923 UNREACHABLE();
924#endif
925}
926
927} // namespace internal
928} // namespace v8
929
931
932#endif // V8_HEAP_MARKING_VISITOR_INL_H_
UnderlyingType underlying_type
AllocationSpace identity() const
Definition base-space.h:32
Address try_load(IsolateForPointerCompression isolate, CppHeapPointerTagRange tag_range) const
Definition slots-inl.h:319
static std::pair< DescriptorIndex, DescriptorIndex > AcquireDescriptorRangeToMark(unsigned gc_epoch, Tagged< DescriptorArray > array)
static bool TryUpdateIndicesToMark(unsigned gc_epoch, Tagged< DescriptorArray > array, DescriptorIndex index_to_mark)
ExternalPointerTagRange tag_range() const
Definition slots.h:421
static int SizeOf(Tagged< Map > map, Tagged< HeapObject > raw_object)
void MarkPointerTableEntry(Tagged< HeapObject > obj, IndirectPointerSlot slot)
static V8_INLINE bool InYoungGeneration(Tagged< Object > object)
static V8_INLINE bool InWritableSharedSpace(Tagged< HeapObject > object)
static V8_INLINE bool InReadOnlySpace(Tagged< HeapObject > object)
static V8_INLINE bool InBlackAllocatedPage(Tagged< HeapObject > object)
V8_EXPORT_PRIVATE bool Contains(Tagged< HeapObject > value) const
Definition heap.cc:4341
Isolate * isolate() const
Definition heap-inl.h:61
Tagged< Object > Relaxed_Load_AllowUnpublished(IsolateForSandbox isolate) const
Definition slots-inl.h:363
IndirectPointerHandle Relaxed_LoadHandle() const
Definition slots-inl.h:398
IndirectPointerTag tag() const
Definition slots.h:524
static Tagged< InstructionStream > FromTargetAddress(Address address)
static IsolateGroup * current()
ReadOnlyHeap * read_only_heap() const
Definition isolate.h:1201
V8_NOINLINE void PushStackTraceAndDie(void *ptr1=nullptr, void *ptr2=nullptr, void *ptr3=nullptr, void *ptr4=nullptr, void *ptr5=nullptr, void *ptr6=nullptr)
Definition isolate.cc:690
V8_INLINE size_t VisitJSFunction(Tagged< Map > map, Tagged< JSFunction > object, MaybeObjectSize)
V8_INLINE size_t VisitEphemeronHashTable(Tagged< Map > map, Tagged< EphemeronHashTable > object, MaybeObjectSize)
bool IsOld(Tagged< SharedFunctionInfo > sfi) const
V8_INLINE bool MarkObject(Tagged< HeapObject > host, Tagged< HeapObject > obj, MarkingHelper::WorklistTarget target_worklist)
V8_INLINE size_t VisitFixedArrayWithProgressTracker(Tagged< Map > map, Tagged< FixedArray > object, MarkingProgressTracker &progress_tracker)
V8_INLINE size_t VisitTransitionArray(Tagged< Map > map, Tagged< TransitionArray > object, MaybeObjectSize)
V8_INLINE void VisitCodeTarget(Tagged< InstructionStream > host, RelocInfo *rinfo) final
V8_INLINE size_t VisitWeakCell(Tagged< Map > map, Tagged< WeakCell > object, MaybeObjectSize)
V8_INLINE void VisitExternalPointer(Tagged< HeapObject > host, ExternalPointerSlot slot) override
V8_INLINE void VisitIndirectPointer(Tagged< HeapObject > host, IndirectPointerSlot slot, IndirectPointerMode mode) final
void VisitJSDispatchTableEntry(Tagged< HeapObject > host, JSDispatchHandle handle) override
bool HasBytecodeArrayForFlushing(Tagged< SharedFunctionInfo > sfi) const
V8_INLINE size_t VisitJSWeakRef(Tagged< Map > map, Tagged< JSWeakRef > object, MaybeObjectSize)
void ProcessWeakHeapObject(Tagged< HeapObject > host, THeapObjectSlot slot, Tagged< HeapObject > heap_object)
V8_INLINE size_t VisitFixedArray(Tagged< Map > map, Tagged< FixedArray > object, MaybeObjectSize)
void VisitTrustedPointerTableEntry(Tagged< HeapObject > host, IndirectPointerSlot slot) final
V8_INLINE void VisitDescriptorsForMap(Tagged< Map > map)
V8_INLINE void VisitPointersImpl(Tagged< HeapObject > host, TSlot start, TSlot end)
V8_INLINE size_t VisitDescriptorArrayStrongly(Tagged< Map > map, Tagged< DescriptorArray > object, MaybeObjectSize)
void MakeOlder(Tagged< SharedFunctionInfo > sfi) const
V8_INLINE void VisitEmbeddedPointer(Tagged< InstructionStream > host, RelocInfo *rinfo) final
V8_INLINE void VisitCppHeapPointer(Tagged< HeapObject > host, CppHeapPointerSlot slot) override
V8_INLINE size_t VisitJSArrayBuffer(Tagged< Map > map, Tagged< JSArrayBuffer > object, MaybeObjectSize)
V8_INLINE size_t VisitSharedFunctionInfo(Tagged< Map > map, Tagged< SharedFunctionInfo > object, MaybeObjectSize)
void ProcessStrongHeapObject(Tagged< HeapObject > host, THeapObjectSlot slot, Tagged< HeapObject > heap_object)
V8_INLINE size_t VisitMap(Tagged< Map > map, Tagged< Map > object, MaybeObjectSize)
V8_INLINE size_t VisitDescriptorArray(Tagged< Map > map, Tagged< DescriptorArray > object, MaybeObjectSize)
bool ShouldFlushBaselineCode(Tagged< JSFunction > js_function) const
static V8_INLINE constexpr bool IsTrivialWeakReferenceValue(Tagged< HeapObject > host, Tagged< HeapObject > heap_object)
bool ShouldFlushCode(Tagged< SharedFunctionInfo > sfi) const
V8_INLINE void VisitStrongPointerImpl(Tagged< HeapObject > host, TSlot slot)
void Push(Tagged< HeapObject > object)
CppMarkingState * cpp_marking_state() const
static V8_INLINE MemoryChunkMetadata * FromHeapObject(Tagged< HeapObject > o)
static V8_INLINE MemoryChunk * FromAddress(Address addr)
static V8_INLINE MemoryChunk * FromHeapObject(Tagged< HeapObject > object)
MarkingProgressTracker & marking_progress_tracker()
static V8_INLINE MutablePageMetadata * FromHeapObject(Tagged< HeapObject > o)
PtrComprCageBase cage_base() const
Definition visitors.h:225
static constexpr bool IsCodeTargetMode(Mode mode)
Definition reloc-info.h:197
V8_INLINE Address target_address()
static constexpr bool IsEmbeddedObjectMode(Mode mode)
Definition reloc-info.h:209
V8_INLINE Tagged< HeapObject > target_object(PtrComprCageBase cage_base)
Address address() const
Definition slots.h:78
static constexpr Tagged< Smi > uninitialized_deserialization_value()
Definition smi.h:114
static PtrType Acquire_Load(Tagged< HeapObject > host, int offset=0)
V8_INLINE constexpr StorageType ptr() const
int start
int end
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kMaxRegularHeapObjectSize
Definition globals.h:680
static V8_INLINE constexpr bool IsSharedExternalPointerType(ExternalPointerTagRange tag_range)
static constexpr bool SlotHoldsTrustedPointerV
Definition globals.h:1296
bool IsForceFlushingEnabled(base::EnumSet< CodeFlushMode > mode)
Definition globals.h:1624
bool IsByteCodeFlushingEnabled(base::EnumSet< CodeFlushMode > mode)
Definition globals.h:1620
bool IsResumableFunction(FunctionKind kind)
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
bool IsBaselineCodeFlushingEnabled(base::EnumSet< CodeFlushMode > mode)
Definition globals.h:1616
Handle< To > UncheckedCast(Handle< From > value)
Definition handles-inl.h:55
uint32_t IndirectPointerHandle
constexpr ExternalPointerHandle kNullExternalPointerHandle
constexpr JSDispatchHandle kNullJSDispatchHandle(0)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available space
Definition flags.cc:2128
detail::HeapObjectAndSlotPOD< HeapObjectSlot > HeapObjectAndSlot
bool IsFlushingDisabled(base::EnumSet< CodeFlushMode > mode)
Definition globals.h:1628
V8_INLINE constexpr bool IsHeapObject(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:669
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr IndirectPointerHandle kNullIndirectPointerHandle
detail::HeapObjectAndSlotPOD< ProtectedMaybeObjectSlot > TrustedObjectAndSlot
uint32_t ExternalPointerHandle
T SaturateAdd(T a, T b)
Definition utils.h:131
static V8_INLINE constexpr bool IsSharedTrustedPointerType(IndirectPointerTag tag)
SlotTraits::TMaybeObjectSlot MaybeObjectSlot
Definition globals.h:1248
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
static constexpr RelaxedLoadTag kRelaxedLoad
Definition globals.h:2909
constexpr CppHeapPointerTagRange kAnyCppHeapPointer(CppHeapPointerTag::kFirstTag, CppHeapPointerTag::kLastTag)
static constexpr AcquireLoadTag kAcquireLoad
Definition globals.h:2908
#define ACQUIRE_READ_FIELD(p, offset)
MarkingWorklists::Local local_marking_worklists_
WeakObjects::Local local_weak_objects_
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
static V8_INLINE bool TryMarkAndPush(Heap *heap, MarkingWorklists::Local *marking_worklist, MarkingState *marking_state, WorklistTarget target_worklis, Tagged< HeapObject > object)
static V8_INLINE bool IsMarkedOrAlwaysLive(Heap *heap, MarkingStateT *marking_state, Tagged< HeapObject > object)
static V8_INLINE std::optional< WorklistTarget > ShouldMarkObject(Heap *heap, Tagged< HeapObject > object)
constexpr bool IsEmpty() const
Heap * heap_
#define V8_INLINE
Definition v8config.h:500
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_UNLIKELY(condition)
Definition v8config.h:660