v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
marker.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <cstddef>
8#include <cstdint>
9#include <memory>
10
20#include "src/heap/cppgc/heap.h"
28
29#if defined(CPPGC_CAGED_HEAP)
31#endif
32
33namespace cppgc {
34namespace internal {
35
36namespace {
37
38bool EnterIncrementalMarkingIfNeeded(MarkingConfig config, HeapBase& heap) {
39 if (config.marking_type == MarkingConfig::MarkingType::kIncremental ||
40 config.marking_type ==
41 MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
43 heap.set_incremental_marking_in_progress(true);
44 return true;
45 }
46 return false;
47}
48
49bool ExitIncrementalMarkingIfNeeded(MarkingConfig config, HeapBase& heap) {
50 if (config.marking_type == MarkingConfig::MarkingType::kIncremental ||
51 config.marking_type ==
52 MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
54 heap.set_incremental_marking_in_progress(false);
55 return true;
56 }
57 return false;
58}
59
60static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
61
62template <StatsCollector::ScopeId scope_id,
63 size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
64 typename WorklistLocal, typename Callback>
65bool DrainWorklistWithBytesAndTimeDeadline(StatsCollector* stats_collector,
66 BasicMarkingState& marking_state,
67 size_t marked_bytes_deadline,
68 v8::base::TimeTicks time_deadline,
69 WorklistLocal& worklist_local,
70 Callback callback) {
72 [&marking_state, marked_bytes_deadline, time_deadline]() {
73 return (marked_bytes_deadline <= marking_state.marked_bytes()) ||
74 (time_deadline <= v8::base::TimeTicks::Now());
75 },
76 [stats_collector]() {
77 return StatsCollector::DisabledScope(stats_collector, scope_id);
78 },
79 worklist_local, callback);
80}
81
82size_t GetNextIncrementalStepDuration(
84 return schedule.GetNextIncrementalStepDuration(
85 heap.stats_collector()->allocated_object_size());
86}
87
88} // namespace
89
91
93 public:
95
97
99
100 private:
101 void Run() final;
102
105 // TODO(chromium:1056170): Change to CancelableTask.
107};
108
110 MarkerBase* marker, StackState stack_state)
111 : marker_(marker),
112 stack_state_(stack_state),
113 handle_(Handle::NonEmptyTag{}) {}
114
115// static
118 MarkerBase* marker) {
119 // Incremental GC is possible only via the GCInvoker, so getting here
120 // guarantees that either non-nestable tasks or conservative stack
121 // scanning are supported. This is required so that the incremental
122 // task can safely finalize GC if needed.
123 DCHECK_IMPLIES(marker->heap().stack_support() !=
124 HeapBase::StackSupport::kSupportsConservativeStackScan,
125 runner->NonNestableTasksEnabled());
126
127 const bool non_nestable_tasks_enabled = runner->NonNestableTasksEnabled();
128
129 auto task = std::make_unique<IncrementalMarkingTask>(
130 marker, non_nestable_tasks_enabled ? StackState::kNoHeapPointers
131 : StackState::kMayContainHeapPointers);
132 auto handle = task->handle_;
133 if (non_nestable_tasks_enabled) {
134 runner->PostNonNestableTask(std::move(task));
135 } else {
136 runner->PostTask(std::move(task));
137 }
138 return handle;
139}
140
142 if (handle_.IsCanceled()) return;
143
145 StatsCollector::kIncrementalMark);
146
148 // Incremental marking is done so should finalize GC.
150 }
151}
152
165
167 // The fixed point iteration may have found not-fully-constructed objects.
168 // Such objects should have already been found through the stack scan though
169 // and should thus already be marked.
171#if DEBUG
172 DCHECK_NE(StackState::kNoHeapPointers, config_.stack_state);
173 std::unordered_set<HeapObjectHeader*> objects =
175 for (HeapObjectHeader* object : objects) DCHECK(object->IsMarked());
176#else
178#endif
179 }
180
181 // |discovered_ephemeron_pairs_worklist_| may still hold ephemeron pairs with
182 // dead keys.
184#if DEBUG
187 &item)) {
189 }
190#else
192#endif
193 }
194
196}
197
201
203 AllocatedObjectSizeIncreased(size_t delta) {
204 current_allocated_size_ += delta;
205 if (current_allocated_size_ > kMinAllocatedBytesPerStep) {
206 marker_.AdvanceMarkingOnAllocation();
207 current_allocated_size_ = 0;
208 }
209}
210
214 heap().stats_collector(),
215 config_.marking_type == MarkingConfig::MarkingType::kAtomic
216 ? StatsCollector::kAtomicMark
217 : StatsCollector::kIncrementalMark);
218
221
222 is_marking_ = true;
223 if (EnterIncrementalMarkingIfNeeded(config_, heap())) {
224 StatsCollector::EnabledScope inner_stats_scope(
225 heap().stats_collector(), StatsCollector::kMarkIncrementalStart);
226
227 // Performing incremental or concurrent marking.
229 // Scanning the stack is expensive so we only do it at the atomic pause.
230 VisitLocalRoots(StackState::kNoHeapPointers);
232 if (config_.marking_type ==
233 MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
236 }
240 }
241}
242
250
252 StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
253 StatsCollector::kAtomicMark);
254 StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
255 StatsCollector::kMarkAtomicPrologue);
256
257 const MarkingConfig::MarkingType old_marking_type = config_.marking_type;
258
259 if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
260 // Cancel remaining incremental tasks. Concurrent marking jobs are left to
261 // run in parallel with the atomic pause until the mutator thread runs out
262 // of work.
266 }
267 config_.stack_state = stack_state;
268 config_.marking_type = MarkingConfig::MarkingType::kAtomic;
270
271 {
272 // VisitLocalRoots() also resets the LABs.
274 // Early marking of strong cross-thread roots before parallel marking. Helps
275 // avoiding long single-threaded marking phases.
278 }
279 if (old_marking_type ==
280 MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
281 // Start parallel marking.
284 if (marker.IsActive()) {
286 } else {
287 marker.Start();
288 }
289 }
290}
291
294
295 if (config_.marking_type == MarkingConfig::MarkingType::kAtomic) {
296 return;
297 }
298
299 CHECK_EQ(config_.marking_type, MarkingConfig::MarkingType::kIncremental);
300 config_.marking_type = MarkingConfig::MarkingType::kIncrementalAndConcurrent;
303 CHECK(!marker.IsActive());
304 marker.Start();
305 CHECK(marker.IsActive());
306}
307
309 {
310 StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
311 StatsCollector::kAtomicMark);
313 heap().stats_collector(), StatsCollector::kMarkAtomicEpilogue);
315 const size_t overall_marked_bytes =
318 heap().stats_collector()->NotifyMarkingCompleted(overall_marked_bytes);
319 is_marking_ = false;
320 }
323}
324
326
329 EnterAtomicPause(stack_state);
331 {
332 StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
333 StatsCollector::kAtomicMark);
337 }
339 }
341}
342
344 public:
348 : marker_(marker),
349 callback_worklist_(callback_worklist),
350 broker_(broker) {}
351
352 void Run(JobDelegate* delegate) override {
354 marker_->heap().stats_collector(),
355 StatsCollector::kConcurrentWeakCallback);
356 MarkingWorklists::WeakCallbackWorklist::Local local(*callback_worklist_);
358 while (local.Pop(&item)) {
359 item.callback(broker_, item.parameter);
360 }
361 }
362
363 size_t GetMaxConcurrency(size_t worker_count) const override {
364 return std::min(static_cast<size_t>(1),
365 callback_worklist_->Size() + worker_count);
366 }
367
368 private:
372};
373
375 DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
376
378 return;
379 }
380
381 StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
382 StatsCollector::kAtomicWeak);
383 // Weakness callbacks are forbidden from allocating objects.
385
386 RootMarkingVisitor root_marking_visitor(mutator_marking_state_);
387
388 // Processing cross-thread roots requires taking the global process lock.
389 // Process these weak roots first to minimize the time the lock is held.
392 heap().GetWeakCrossThreadPersistentRegion().Iterate(root_marking_visitor);
395}
396
398 DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
399
401
402 // Weakness callbacks are forbidden from allocating objects.
404
405 StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
406 StatsCollector::kAtomicWeak);
407
408 RootMarkingVisitor root_marking_visitor(mutator_marking_state_);
409
410 // Launch the parallel job before anything else to provide the maximum time
411 // slice for processing.
413 std::unique_ptr<cppgc::JobHandle> job_handle{nullptr};
414 if (heap().marking_support() ==
416 job_handle = platform_->PostJob(
417 cppgc::TaskPriority::kUserBlocking,
418 std::make_unique<WeakCallbackJobTask>(
420 broker));
421 }
422
423 // Process same-thread roots.
424 heap().GetWeakPersistentRegion().Iterate(root_marking_visitor);
425
426 // Call weak callbacks on objects that may now be pointing to dead objects.
427#if defined(CPPGC_YOUNG_GENERATION)
428 if (heap().generational_gc_supported()) {
429 auto& remembered_set = heap().remembered_set();
431 // Custom callbacks assume that untraced pointers point to not yet freed
432 // objects. They must make sure that upon callback completion no
433 // UntracedMember points to a freed object. This may not hold true if a
434 // custom callback for an old object operates with a reference to a young
435 // object that was freed on a minor collection cycle. To maintain the
436 // invariant that UntracedMembers always point to valid objects, execute
437 // custom callbacks for old objects on each minor collection cycle.
438 remembered_set.ExecuteCustomCallbacks(broker);
439 } else {
440 // For major GCs, just release all the remembered weak callbacks.
441 remembered_set.ReleaseCustomCallbacks();
442 }
443 }
444#endif // defined(CPPGC_YOUNG_GENERATION)
445
446 {
447 // First, process weak container callbacks.
448 StatsCollector::EnabledScope inner_stats_scope(
449 heap().stats_collector(),
450 StatsCollector::kWeakContainerCallbacksProcessing);
452 MarkingWorklists::WeakCallbackWorklist::Local& collections_local =
454 while (collections_local.Pop(&item)) {
455 item.callback(broker, item.parameter);
456 }
457 }
458 {
459 // Then, process custom weak callbacks.
460 StatsCollector::EnabledScope inner_stats_scope(
461 heap().stats_collector(), StatsCollector::kCustomCallbacksProcessing);
463 MarkingWorklists::WeakCustomCallbackWorklist::Local& custom_callbacks =
465 while (custom_callbacks.Pop(&item)) {
466 item.callback(broker, item.parameter);
467#if defined(CPPGC_YOUNG_GENERATION)
468 if (heap().generational_gc_supported())
469 heap().remembered_set().AddWeakCallback(item);
470#endif // defined(CPPGC_YOUNG_GENERATION)
471 }
472 }
473
474 if (job_handle) {
475 job_handle->Join();
476 } else {
478 MarkingWorklists::WeakCallbackWorklist::Local& local =
480 while (local.Pop(&item)) {
481 item.callback(broker, item.parameter);
482 }
483 }
484
485 // Weak callbacks should not add any new objects for marking.
487}
488
490 StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
491 StatsCollector::kMarkVisitRoots);
492
493 // Reset LABs before scanning roots. LABs are cleared to allow
494 // ObjectStartBitmap handling without considering LABs.
496
497 {
498 StatsCollector::DisabledScope inner_stats_scope(
499 heap().stats_collector(), StatsCollector::kMarkVisitPersistents);
500 RootMarkingVisitor root_marking_visitor(mutator_marking_state_);
501 heap().GetStrongPersistentRegion().Iterate(root_marking_visitor);
502 }
503
504 if (stack_state != StackState::kNoHeapPointers) {
505 StatsCollector::DisabledScope stack_stats_scope(
506 heap().stats_collector(), StatsCollector::kMarkVisitStack);
509 });
510 }
511
512#if defined(CPPGC_YOUNG_GENERATION)
514 StatsCollector::EnabledScope inner_stats_scope(
515 heap().stats_collector(), StatsCollector::kMarkVisitRememberedSets);
516 heap().remembered_set().Visit(visitor(), conservative_visitor(),
518 }
519#endif // defined(CPPGC_YOUNG_GENERATION)
520}
521
523 StatsCollector::DisabledScope inner_stats_scope(
524 heap().stats_collector(),
525 StatsCollector::kMarkVisitCrossThreadPersistents);
526 CHECK_EQ(config_.marking_type, MarkingConfig::MarkingType::kAtomic);
528 // Lock guards against changes to {Weak}CrossThreadPersistent handles, that
529 // may conflict with marking. E.g., a WeakCrossThreadPersistent may be
530 // converted into a CrossThreadPersistent which requires that the handle
531 // is either cleared or the object is retained.
533 RootMarkingVisitor root_marking_visitor(mutator_marking_state_);
534 heap().GetStrongCrossThreadPersistentRegion().Iterate(root_marking_visitor);
536}
537
544
551
555
557 if (stack_state == StackState::kNoHeapPointers) {
559 }
560 config_.stack_state = stack_state;
561
563}
564
566 StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
567 StatsCollector::kIncrementalMark);
568 StatsCollector::EnabledScope nested_scope(heap().stats_collector(),
569 StatsCollector::kMarkOnAllocation);
571}
572
575 // Schedule another incremental task for finalizing without a stack.
577 }
578}
579
581 if (config_.marking_type != MarkingConfig::MarkingType::kAtomic ||
582 !concurrent_marker().Join())
583 return false;
584
585 // Concurrent markers may have pushed some "leftover" in-construction objects
586 // after flushing in EnterAtomicPause.
589 return true;
590}
591
599
601 // Epilogue is only needed when marking is not finished.
602 DCHECK_NE(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
604 if (config_.marking_type ==
605 MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
607 }
608}
609
611 size_t marked_bytes_limit) {
614 return false;
615 }
616
617 StatsCollector::EnabledScope deadline_scope(
618 heap().stats_collector(),
619 StatsCollector::kMarkTransitiveClosureWithDeadline, "max_duration_ms",
620 max_duration.InMillisecondsF(), "max_bytes", marked_bytes_limit);
622 const auto deadline = v8::base::TimeTicks::Now() + max_duration;
623 if (marked_bytes_limit == 0) {
624 marked_bytes_limit = GetNextIncrementalStepDuration(schedule(), heap_);
625 }
626 // `ProcessWorklistsWithDeadline()` below checks against `marked_bytes()`
627 // which are never reset.
628 size_t marked_bytes_deadline =
629 marked_bytes_limit + mutator_marking_state_.marked_bytes();
630 if (marked_bytes_deadline < marked_bytes_limit) {
631 marked_bytes_deadline = SIZE_MAX;
632 }
633 const bool is_done =
634 ProcessWorklistsWithDeadline(marked_bytes_deadline, deadline);
638 if (!is_done) {
640 }
641 return is_done;
642}
643
645 size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
646 StatsCollector* stats_collector = heap().stats_collector();
648 stats_collector, StatsCollector::kMarkTransitiveClosure);
649 bool saved_did_discover_new_ephemeron_pairs;
650 do {
652 if ((config_.marking_type == MarkingConfig::MarkingType::kAtomic) ||
653 schedule().ShouldFlushEphemeronPairs()) {
655 }
656
657 // Bailout objects may be complicated to trace and thus might take longer
658 // than other objects. Therefore we reduce the interval between deadline
659 // checks to guarantee the deadline is not exceeded.
660 if (!DrainWorklistWithBytesAndTimeDeadline<
661 StatsCollector::kMarkProcessBailOutObjects,
662 kDefaultDeadlineCheckInterval / 5>(
663 stats_collector, mutator_marking_state_, SIZE_MAX, time_deadline,
666 mutator_marking_state_.AccountMarkedBytes(
667 BasePage::FromPayload(const_cast<void*>(item.parameter)),
668 item.bailedout_size);
669 item.callback(&visitor(), item.parameter);
670 })) {
671 return false;
672 }
673
674 if (!DrainWorklistWithBytesAndTimeDeadline<
675 StatsCollector::kMarkProcessNotFullyconstructedWorklist>(
676 stats_collector, mutator_marking_state_, marked_bytes_deadline,
677 time_deadline,
679 [this](HeapObjectHeader* header) {
680 mutator_marking_state_.AccountMarkedBytes(*header);
681 DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
682 *header);
683 })) {
684 return false;
685 }
686
687 if (!DrainWorklistWithBytesAndTimeDeadline<
688 StatsCollector::kMarkProcessMarkingWorklist>(
689 stats_collector, mutator_marking_state_, marked_bytes_deadline,
691 [this](const MarkingWorklists::MarkingItem& item) {
692 const HeapObjectHeader& header =
693 HeapObjectHeader::FromObject(item.base_object_payload);
694 DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
695 DCHECK(header.IsMarked<AccessMode::kAtomic>());
696 mutator_marking_state_.AccountMarkedBytes(header);
697 item.callback(&visitor(), item.base_object_payload);
698 })) {
699 return false;
700 }
701
702 if (!DrainWorklistWithBytesAndTimeDeadline<
703 StatsCollector::kMarkProcessWriteBarrierWorklist>(
704 stats_collector, mutator_marking_state_, marked_bytes_deadline,
706 [this](HeapObjectHeader* header) {
707 mutator_marking_state_.AccountMarkedBytes(*header);
708 DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
709 *header);
710 })) {
711 return false;
712 }
713
714 if (!DrainWorklistWithBytesAndTimeDeadline<
715 StatsCollector::kMarkProcessRetraceWorklist>(
716 stats_collector, mutator_marking_state_, marked_bytes_deadline,
717 time_deadline,
719 [this](HeapObjectHeader* header) {
720 // Retracing does not increment marked bytes as the object has
721 // already been processed before.
722 DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
723 *header);
724 })) {
725 return false;
726 }
727
728 saved_did_discover_new_ephemeron_pairs =
730 if (!DrainWorklistWithBytesAndTimeDeadline<
731 StatsCollector::kMarkProcessEphemerons>(
732 stats_collector, mutator_marking_state_, marked_bytes_deadline,
733 time_deadline,
735 [this](const MarkingWorklists::EphemeronPairItem& item) {
736 mutator_marking_state_.ProcessEphemeron(
737 item.key, item.value, item.value_desc, visitor());
738 })) {
739 return false;
740 }
741 } while (!mutator_marking_state_.marking_worklist().IsLocalAndGlobalEmpty() ||
742 saved_did_discover_new_ephemeron_pairs);
743 return true;
744}
745
747 // Parallel marking may still be running which is why atomic extraction is
748 // required.
749 std::unordered_set<HeapObjectHeader*> objects =
752 if (objects.empty()) {
753 return;
754 }
756 heap().stats_collector(),
757 StatsCollector::kMarkVisitNotFullyConstructedObjects);
758 for (HeapObjectHeader* object : objects) {
759 DCHECK(object);
760 // TraceConservativelyIfNeeded delegates to either in-construction or
761 // fully constructed handling. Both handlers have their own marked bytes
762 // accounting and markbit handling (bailout).
764 }
765}
766
769 auto* compaction_worklists = heap_.compactor().compaction_worklists();
770 if (compaction_worklists) compaction_worklists->ClearForTesting();
771}
772
776
780
782 MarkerBase& marker)
783 : marker_(marker), resume_on_exit_(marker_.concurrent_marker().Cancel()) {}
784
786 if (resume_on_exit_) {
787 marker_.concurrent_marker().Start();
788 }
789}
790
792 : MarkerBase(heap, platform, config),
794 conservative_marking_visitor_(heap, mutator_marking_state_,
796 schedule_(::heap::base::IncrementalMarkingSchedule::Create()),
798 platform_) {}
799
800} // namespace internal
801} // namespace cppgc
Schedule * schedule
MarkingType
Definition heap.h:60
virtual std::unique_ptr< JobHandle > PostJob(TaskPriority priority, std::unique_ptr< JobTask > job_task)
Definition platform.h:126
MarkingWorklists::EphemeronPairsWorklist::Local & ephemeron_pairs_for_processing_worklist()
MarkingWorklists::ConcurrentMarkingBailoutWorklist::Local & concurrent_marking_bailout_worklist()
MarkingWorklists::WeakCallbackWorklist::Local & weak_container_callback_worklist()
MarkingWorklists::WriteBarrierWorklist::Local & write_barrier_worklist()
MarkingWorklists::WeakCallbackWorklist::Local & parallel_weak_callback_worklist()
MarkingWorklists::EphemeronPairsWorklist::Local & discovered_ephemeron_pairs_worklist()
MarkingWorklists::WeakCustomCallbackWorklist::Local & weak_custom_callback_worklist()
MarkingWorklists::PreviouslyNotFullyConstructedWorklist::Local & previously_not_fully_constructed_worklist()
CompactionWorklists * compaction_worklists()
Definition compactor.h:33
void NotifyOfWorkIfNeeded(cppgc::TaskPriority priority)
virtual void TraceConservativelyIfNeeded(const void *)
Definition visitor.cc:96
virtual void FinalizeIncrementalGarbageCollectionIfNeeded(cppgc::Heap::StackState)=0
PersistentRegion & GetWeakPersistentRegion()
Definition heap-base.h:147
StackSupport stack_support() const
Definition heap-base.h:176
bool generational_gc_supported() const
Definition heap-base.h:218
PersistentRegion & GetStrongPersistentRegion()
Definition heap-base.h:141
Compactor & compactor()
Definition heap-base.h:133
StatsCollector * stats_collector()
Definition heap-base.h:118
CrossThreadPersistentRegion & GetStrongCrossThreadPersistentRegion()
Definition heap-base.h:153
CrossThreadPersistentRegion & GetWeakCrossThreadPersistentRegion()
Definition heap-base.h:160
void SetStackStateOfPrevGC(EmbedderStackState stack_state)
Definition heap-base.h:195
virtual heap::base::Stack * stack()
Definition heap-base.h:174
ObjectAllocator & object_allocator()
Definition heap-base.h:135
static HeapObjectHeader & FromObject(void *address)
static Handle Post(cppgc::TaskRunner *, MarkerBase *)
Definition marker.cc:117
IncrementalMarkingTask(MarkerBase *, StackState)
Definition marker.cc:109
bool main_marking_disabled_for_testing_
Definition marker.h:208
MarkingConfig config_
Definition marker.h:199
void ProcessCrossThreadWeaknessIfNeeded()
Definition marker.cc:374
void VisitLocalRoots(StackState)
Definition marker.cc:489
MarkerBase(const MarkerBase &)=delete
std::shared_ptr< cppgc::TaskRunner > foreground_task_runner_
Definition marker.h:201
virtual ConservativeTracingVisitor & conservative_visitor()=0
virtual heap::base::IncrementalMarkingSchedule & schedule()=0
IncrementalMarkingTaskHandle incremental_marking_handle_
Definition marker.h:202
void FinishMarking(StackState)
Definition marker.cc:327
bool IncrementalMarkingStepForTesting(StackState)
Definition marker.cc:552
bool AdvanceMarkingWithLimits(v8::base::TimeDelta=kMaximumIncrementalStepDuration, size_t marked_bytes_limit=0)
Definition marker.cc:610
void HandleNotFullyConstructedObjects()
Definition marker.cc:243
static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration
Definition marker.h:163
bool IncrementalMarkingStep(StackState)
Definition marker.cc:556
virtual cppgc::Visitor & visitor()=0
MutatorMarkingState mutator_marking_state_
Definition marker.h:205
virtual ConcurrentMarkerBase & concurrent_marker()=0
void EnterAtomicPause(StackState)
Definition marker.cc:251
cppgc::Platform * platform_
Definition marker.h:200
void MarkNotFullyConstructedObjects()
Definition marker.cc:746
void AdvanceMarkingWithLimitsEpilogue()
Definition marker.cc:600
virtual heap::base::StackVisitor & stack_visitor()=0
virtual void AdvanceMarkingOnAllocationImpl()
Definition marker.cc:573
void NotifyConcurrentMarkingOfWorkIfNeeded(cppgc::TaskPriority)
Definition marker.cc:592
virtual void ScheduleIncrementalMarkingTask()
Definition marker.cc:545
bool ProcessWorklistsWithDeadline(size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline)
Definition marker.cc:644
MarkingWorklists marking_worklists_
Definition marker.h:204
IncrementalMarkingAllocationObserver incremental_marking_allocation_observer_
Definition marker.h:203
void SetMainThreadMarkingDisabledForTesting(bool)
Definition marker.cc:773
bool visited_cross_thread_persistents_in_atomic_pause_
Definition marker.h:209
void WaitForConcurrentMarkingForTesting()
Definition marker.cc:777
Marker(HeapBase &, cppgc::Platform *, MarkingConfig=MarkingConfig::Default())
Definition marker.cc:791
MarkingWorklists::MarkingWorklist::Local & marking_worklist()
MarkingWorklists::NotFullyConstructedWorklist & not_fully_constructed_worklist()
std::unordered_set< HeapObjectHeader * > Extract()
WeakCallbackWorklist * parallel_weak_callback_worklist()
NotFullyConstructedWorklist * not_fully_constructed_worklist()
WeakContainersWorklist * weak_containers_worklist()
EphemeronPairsWorklist * discovered_ephemeron_pairs_worklist()
V8_EXPORT_PRIVATE void Publish() override
MarkingWorklists::RetraceMarkedObjectsWorklist::Local & retrace_marked_objects_worklist()
static V8_INLINE void Unlock()
static V8_INLINE void AssertHeld()
static V8_INLINE void Lock()
void RegisterObserver(AllocationObserver *)
void UnregisterObserver(AllocationObserver *)
InternalScope< kDisabled, kMutatorThread > DisabledScope
void NotifyMarkingCompleted(size_t marked_bytes)
void NotifyMarkingStarted(CollectionType, MarkingType, IsForcedGC)
MarkingWorklists::WeakCallbackWorklist * callback_worklist_
Definition marker.cc:370
size_t GetMaxConcurrency(size_t worker_count) const override
Definition marker.cc:363
WeakCallbackJobTask(MarkerBase *marker, MarkingWorklists::WeakCallbackWorklist *callback_worklist, LivenessBroker &broker)
Definition marker.cc:345
void Run(JobDelegate *delegate) override
Definition marker.cc:352
void IteratePointersUntilMarker(StackVisitor *visitor) const
Definition stack.cc:161
V8_INLINE void SetMarkerIfNeededAndCallback(Callback callback)
Definition stack.h:74
bool IsEmpty() const
Definition worklist.h:126
static constexpr TimeDelta Max()
Definition time.h:233
double InMillisecondsF() const
Definition time.cc:226
static TimeTicks Now()
Definition time.cc:736
JSHeapBroker *const broker_
ConcurrentMarkerBase & concurrent_marker_
std::unique_ptr< ConservativeTracedHandlesMarkingVisitor > marking_visitor_
Definition cpp-heap.cc:271
JSHeapBroker * broker
TNode< Object > callback
Schedule const *const schedule_
size_t priority
bool DrainWorklistWithPredicate(Predicate ShouldYield, CreateStatsScopeCallback CreateStatsScope, WorklistLocal &worklist_local, ProcessWorklistItemCallback ProcessWorklistItem)
EmbedderStackState
Definition common.h:15
TaskPriority
Definition v8-platform.h:24
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
const CollectionType collection_type
Definition heap-config.h:41
#define V8_UNLIKELY(condition)
Definition v8config.h:660
std::unique_ptr< ValueMirror > value