v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
concurrent-marking.cc
Go to the documentation of this file.
1// Copyright 2017 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <algorithm>
8#include <atomic>
9#include <stack>
10#include <unordered_map>
11
12#include "include/v8config.h"
13#include "src/base/logging.h"
14#include "src/common/globals.h"
16#include "src/flags/flags.h"
20#include "src/heap/gc-tracer.h"
21#include "src/heap/heap-inl.h"
26#include "src/heap/heap.h"
32#include "src/heap/marking.h"
44#include "src/init/v8.h"
52#include "src/utils/utils-inl.h"
53
54namespace v8 {
55namespace internal {
56
57// This class caches page live bytes during concurrent marking. This
58// avoids costly CAS operations on MutablePageMetadata::live_byte_count_ for
59// each traced object.
60//
61// Page live bytes are cached in a fixed-size hash map. In the case of
62// collisions the existing entry is simply written back to
63// MutablePageMetadata::live_byte_count_ with a CAS. Afterwards it can be
64// replaced with the new entry.
66 public:
68
71
72 void Increment(MutablePageMetadata* page, intptr_t live);
73 void FlushAndClear();
74 void Erase(MutablePageMetadata* page);
75
76#if DEBUG
77 void AssertEmpty();
78#endif // DEBUG
79
80 private:
81 struct Entry {
83 intptr_t live_bytes;
84 };
85
86 static constexpr size_t kTableSize = 32;
87
89 size_t hash = std::hash<MutablePageMetadata*>{}(page);
91 return map_[hash % kTableSize];
92 }
93
94 std::array<Entry, kTableSize> map_ = {};
95};
96
98 intptr_t bytes) {
99 Entry& entry = lookup_entry(page);
100 if (entry.page == page) {
101 entry.live_bytes += bytes;
102 } else if (entry.page == nullptr) {
103 entry.page = page;
104 entry.live_bytes = bytes;
105 } else {
106 // Write back the existing entry.
108 // Now just replace it with the new entry.
109 entry.page = page;
110 entry.live_bytes = bytes;
111 }
112}
113
115 Entry& entry = lookup_entry(page);
116 if (entry.page == page) {
117 entry.page = nullptr;
118 entry.live_bytes = 0;
119 }
120}
121
123 for (auto& entry : map_) {
124 if (entry.page) {
125 entry.page->IncrementLiveBytesAtomically(entry.live_bytes);
126 entry.page = nullptr;
127 entry.live_bytes = 0;
128 }
129 }
130}
131
132#if DEBUG
133void MemoryChunkLiveBytesMap::AssertEmpty() {
134 for (auto& entry : map_) {
135 DCHECK_NULL(entry.page);
136 DCHECK_EQ(entry.live_bytes, 0);
137 }
138}
139#endif // DEBUG
140
143 std::unique_ptr<TypedSlots>>;
144
146 : public FullMarkingVisitorBase<ConcurrentMarkingVisitor> {
147 public:
149 MarkingWorklists::Local* local_marking_worklists,
150 WeakObjects::Local* local_weak_objects, Heap* heap,
151 unsigned mark_compact_epoch, base::EnumSet<CodeFlushMode> code_flush_mode,
152 bool should_keep_ages_unchanged, uint16_t code_flushing_increase,
153 MemoryChunkLiveBytesMap* memory_chunk_live_bytes_map,
154 MemoryChunkTypedSlotsMap* memory_chunk_typed_slots_map)
155 : FullMarkingVisitorBase(local_marking_worklists, local_weak_objects,
156 heap, mark_compact_epoch, code_flush_mode,
157 should_keep_ages_unchanged,
158 code_flushing_increase),
159 memory_chunk_live_bytes_map_(memory_chunk_live_bytes_map),
160 memory_chunk_typed_slots_map_(memory_chunk_typed_slots_map) {}
161
164
165 static constexpr bool EnableConcurrentVisitation() { return true; }
166
167 // Implements ephemeron semantics: Marks value if key is already reachable.
168 // Returns true if value was actually marked.
170 if (marking_state()->IsMarked(key)) {
171 const auto target_worklist =
173 DCHECK(target_worklist.has_value());
174 if (MarkObject(key, value, target_worklist.value())) {
175 return true;
176 }
177 } else if (marking_state()->IsUnmarked(value)) {
178 local_weak_objects_->next_ephemerons_local.Push(Ephemeron{key, value});
179 }
180 return false;
181 }
182
183 template <typename TSlot>
184 void RecordSlot(Tagged<HeapObject> object, TSlot slot,
185 Tagged<HeapObject> target) {
186 MarkCompactCollector::RecordSlot(object, slot, target);
187 }
188
194
195 private:
197 Tagged<HeapObject> target) {
198 if (!MarkCompactCollector::ShouldRecordRelocSlot(host, rinfo, target)) {
199 return;
200 }
201
203 MarkCompactCollector::ProcessRelocInfo(host, rinfo, target);
204
205 auto& typed_slots = (*memory_chunk_typed_slots_map_)[info.page_metadata];
206
207 if (!typed_slots) {
208 typed_slots.reset(new TypedSlots());
209 }
210
211 typed_slots->Insert(info.slot_type, info.offset);
212 }
213
216
218};
219
228
230 public:
231 JobTaskMajor(ConcurrentMarking* concurrent_marking,
232 unsigned mark_compact_epoch,
233 base::EnumSet<CodeFlushMode> code_flush_mode,
234 bool should_keep_ages_unchanged)
235 : concurrent_marking_(concurrent_marking),
236 mark_compact_epoch_(mark_compact_epoch),
237 code_flush_mode_(code_flush_mode),
238 should_keep_ages_unchanged_(should_keep_ages_unchanged),
239 trace_id_(reinterpret_cast<uint64_t>(concurrent_marking) ^
240 concurrent_marking->heap_->tracer()->CurrentEpoch(
241 GCTracer::Scope::MC_BACKGROUND_MARKING)) {}
242
243 ~JobTaskMajor() override = default;
244 JobTaskMajor(const JobTaskMajor&) = delete;
246
247 // v8::JobTask overrides.
248 void Run(JobDelegate* delegate) override {
249 // Set the current isolate such that trusted pointer tables etc are
250 // available and the cage base is set correctly for multi-cage mode.
252
253 if (delegate->IsJoiningThread()) {
254 // TRACE_GC is not needed here because the caller opens the right scope.
258 } else {
260 GCTracer::Scope::MC_BACKGROUND_MARKING,
266 }
267 }
268
269 size_t GetMaxConcurrency(size_t worker_count) const override {
270 return concurrent_marking_->GetMajorMaxConcurrency(worker_count);
271 }
272
273 uint64_t trace_id() const { return trace_id_; }
274
275 private:
277 const unsigned mark_compact_epoch_;
280 const uint64_t trace_id_;
281};
282
284 public:
285 explicit JobTaskMinor(ConcurrentMarking* concurrent_marking)
286 : concurrent_marking_(concurrent_marking),
287 trace_id_(reinterpret_cast<uint64_t>(concurrent_marking) ^
288 concurrent_marking->heap_->tracer()->CurrentEpoch(
289 GCTracer::Scope::MINOR_MS_MARK_PARALLEL)) {}
290
291 ~JobTaskMinor() override = default;
292 JobTaskMinor(const JobTaskMinor&) = delete;
294
295 // v8::JobTask overrides.
296 void Run(JobDelegate* delegate) override {
297 // Set the current isolate such that trusted pointer tables etc are
298 // available and the cage base is set correctly for multi-cage mode.
300
301 if (delegate->IsJoiningThread()) {
303 GCTracer::Scope::MINOR_MS_MARK_PARALLEL, trace_id_,
305 // TRACE_GC is not needed here because the caller opens the right scope.
306 concurrent_marking_->RunMinor(delegate);
307 } else {
309 GCTracer::Scope::MINOR_MS_BACKGROUND_MARKING,
312 concurrent_marking_->RunMinor(delegate);
313 }
314 }
315
316 size_t GetMaxConcurrency(size_t worker_count) const override {
317 return concurrent_marking_->GetMinorMaxConcurrency(worker_count);
318 }
319
320 uint64_t trace_id() const { return trace_id_; }
321
322 private:
324 const uint64_t trace_id_;
325};
326
328 : heap_(heap), weak_objects_(weak_objects) {
329#ifndef V8_ATOMIC_OBJECT_FIELD_WRITES
330 // Concurrent marking requires atomic object field writes.
331 CHECK(!v8_flags.concurrent_marking);
332#endif
333 int max_tasks;
334 if (v8_flags.concurrent_marking_max_worker_num == 0) {
336 } else {
337 max_tasks = v8_flags.concurrent_marking_max_worker_num;
338 }
339
340 task_state_.reserve(max_tasks + 1);
341 for (int i = 0; i <= max_tasks; ++i) {
342 task_state_.emplace_back(std::make_unique<TaskState>());
343 }
344}
345
347
349 base::EnumSet<CodeFlushMode> code_flush_mode,
350 unsigned mark_compact_epoch,
351 bool should_keep_ages_unchanged) {
352 size_t kBytesUntilInterruptCheck = 64 * KB;
353 int kObjectsUntilInterruptCheck = 1000;
354 uint8_t task_id = delegate->GetTaskId() + 1;
355 TaskState* task_state = task_state_[task_id].get();
356 auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
357 MarkingWorklists::Local local_marking_worklists(
358 marking_worklists_, cpp_heap
359 ? cpp_heap->CreateCppMarkingState()
361 WeakObjects::Local local_weak_objects(weak_objects_);
363 &local_marking_worklists, &local_weak_objects, heap_, mark_compact_epoch,
364 code_flush_mode, should_keep_ages_unchanged,
366 &task_state->memory_chunk_live_bytes_map,
367 &task_state->memory_chunk_typed_slots_map);
368 NativeContextInferrer native_context_inferrer;
369 NativeContextStats& native_context_stats = task_state->native_context_stats;
370 double time_ms;
371 size_t marked_bytes = 0;
372 Isolate* isolate = heap_->isolate();
373 if (v8_flags.trace_concurrent_marking) {
374 isolate->PrintWithTimestamp("Starting major concurrent marking task %d\n",
375 task_id);
376 }
377 bool another_ephemeron_iteration = false;
378 MainAllocator* const new_space_allocator =
380 : nullptr;
381
382 {
383 TimedScope scope(&time_ms);
384
385 {
386 Ephemeron ephemeron;
387 while (local_weak_objects.current_ephemerons_local.Pop(&ephemeron)) {
388 if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
390 }
391 }
392 }
393 PtrComprCageBase cage_base(isolate);
394 bool is_per_context_mode = local_marking_worklists.IsPerContextMode();
395 bool done = false;
396 while (!done) {
397 size_t current_marked_bytes = 0;
398 int objects_processed = 0;
399 while (current_marked_bytes < kBytesUntilInterruptCheck &&
400 objects_processed < kObjectsUntilInterruptCheck) {
401 Tagged<HeapObject> object;
402 if (!local_marking_worklists.Pop(&object)) {
403 done = true;
404 break;
405 }
408 objects_processed++;
409
410 Address new_space_top = kNullAddress;
411 Address new_space_limit = kNullAddress;
412 Address new_large_object = kNullAddress;
413
414 if (new_space_allocator) {
415 // The order of the two loads is important.
416 new_space_top = new_space_allocator->original_top_acquire();
417 new_space_limit = new_space_allocator->original_limit_relaxed();
418 }
419
420 if (heap_->new_lo_space()) {
421 new_large_object = heap_->new_lo_space()->pending_object();
422 }
423
424 Address addr = object.address();
425
426 if ((new_space_top <= addr && addr < new_space_limit) ||
427 addr == new_large_object) {
428 local_marking_worklists.PushOnHold(object);
429 } else {
430 Tagged<Map> map = object->map(cage_base, kAcquireLoad);
431 // The marking worklist should never contain filler objects.
432 CHECK(!IsFreeSpaceOrFillerMap(map));
433 if (is_per_context_mode) {
435 if (native_context_inferrer.Infer(cage_base, map, object,
436 &context)) {
437 local_marking_worklists.SwitchToContext(context);
438 }
439 }
440 const auto visited_size = visitor.Visit(map, object);
444 ALIGN_TO_ALLOCATION_ALIGNMENT(visited_size));
445 if (is_per_context_mode) {
446 native_context_stats.IncrementSize(
447 local_marking_worklists.Context(), map, object, visited_size);
448 }
449 current_marked_bytes += visited_size;
450 }
451 }
452 if (objects_processed > 0) another_ephemeron_iteration = true;
453 marked_bytes += current_marked_bytes;
455 marked_bytes);
456 if (delegate->ShouldYield()) {
457 TRACE_GC_NOTE("ConcurrentMarking::RunMajor Preempted");
458 break;
459 }
460 }
461
462 local_marking_worklists.Publish();
463 local_weak_objects.Publish();
465 total_marked_bytes_ += marked_bytes;
466
469 }
470 }
471 if (v8_flags.trace_concurrent_marking) {
472 heap_->isolate()->PrintWithTimestamp(
473 "Major task %d concurrently marked %dKB in %.2fms\n", task_id,
474 static_cast<int>(marked_bytes / KB), time_ms);
475 }
476
477 DCHECK(task_state->local_pretenuring_feedback.empty());
478}
479
481 public:
483
485 active_markers_.fetch_add(1, std::memory_order_relaxed);
486 }
487
488 // Returns true if all markers are done.
490 return active_markers_.fetch_sub(1, std::memory_order_relaxed) == 1;
491 }
492
493 private:
494 std::atomic<int> active_markers_{0};
495};
496
497namespace {
498
499V8_INLINE bool IsYoungObjectInLab(MainAllocator* new_space_allocator,
500 NewLargeObjectSpace* new_lo_space,
501 Tagged<HeapObject> heap_object) {
502 // The order of the two loads is important.
503 Address new_space_top = new_space_allocator->original_top_acquire();
504 Address new_space_limit = new_space_allocator->original_limit_relaxed();
505 Address new_large_object = new_lo_space->pending_object();
506
507 Address addr = heap_object.address();
508
509 return (new_space_top <= addr && addr < new_space_limit) ||
510 addr == new_large_object;
511}
512
513} // namespace
514
515template <YoungGenerationMarkingVisitationMode marking_mode>
517 TaskState* task_state) {
518 static constexpr size_t kBytesUntilInterruptCheck = 64 * KB;
519 static constexpr int kObjectsUntilInterruptCheck = 1000;
520 size_t marked_bytes = 0;
521 size_t current_marked_bytes = 0;
522 int objects_processed = 0;
524 heap_, &task_state->local_pretenuring_feedback);
527 auto& marking_worklists_local = visitor.marking_worklists_local();
528 Isolate* isolate = heap_->isolate();
529 minor_marking_state_->MarkerStarted();
530 MainAllocator* const new_space_allocator =
532 NewLargeObjectSpace* const new_lo_space = heap_->new_lo_space();
533
534 do {
535 if (delegate->IsJoiningThread()) {
536 marking_worklists_local.MergeOnHold();
537 }
538 Tagged<HeapObject> heap_object;
540 GCTracer::Scope::MINOR_MS_BACKGROUND_MARKING_CLOSURE,
542 while (marking_worklists_local.Pop(&heap_object)) {
543 if (IsYoungObjectInLab(new_space_allocator, new_lo_space, heap_object)) {
544 visitor.marking_worklists_local().PushOnHold(heap_object);
545 } else {
546 Tagged<Map> map = heap_object->map(isolate);
547 const auto visited_size = visitor.Visit(map, heap_object);
548 if (visited_size) {
549 current_marked_bytes += visited_size;
552 ALIGN_TO_ALLOCATION_ALIGNMENT(visited_size));
553 }
554 }
555
556 if (current_marked_bytes >= kBytesUntilInterruptCheck ||
557 ++objects_processed >= kObjectsUntilInterruptCheck) {
558 marked_bytes += current_marked_bytes;
559 if (delegate->ShouldYield()) {
560 TRACE_GC_NOTE("ConcurrentMarking::RunMinor Preempted");
561 minor_marking_state_->MarkerDone();
562 return marked_bytes;
563 }
564 objects_processed = 0;
565 current_marked_bytes = 0;
566 }
567 }
568 } while (remembered_sets.ProcessNextItem(&visitor));
569 if (minor_marking_state_->MarkerDone()) {
570 // This is the last active marker and it ran out of work. Request GC
571 // finalization.
573 }
574 return marked_bytes + current_marked_bytes;
575}
576
580 uint8_t task_id = delegate->GetTaskId() + 1;
581 DCHECK_LT(task_id, task_state_.size());
582 TaskState* task_state = task_state_[task_id].get();
583 double time_ms;
584 size_t marked_bytes = 0;
585 Isolate* isolate = heap_->isolate();
586 if (v8_flags.trace_concurrent_marking) {
587 isolate->PrintWithTimestamp("Starting minor concurrent marking task %d\n",
588 task_id);
589 }
590
591 {
592 TimedScope scope(&time_ms);
594 // This gets a lower bound for estimated concurrency as we may have marked
595 // most of the graph concurrently already and may not be using parallism
596 // as much.
597 estimate_concurrency_.fetch_add(1, std::memory_order_relaxed);
598 marked_bytes =
600 delegate, task_state);
601 } else {
602 marked_bytes =
604 delegate, task_state);
605 }
606 }
607
608 if (v8_flags.trace_concurrent_marking) {
609 heap_->isolate()->PrintWithTimestamp(
610 "Minor task %d concurrently marked %dKB in %.2fms\n", task_id,
611 static_cast<int>(marked_bytes / KB), time_ms);
612 }
613}
614
615size_t ConcurrentMarking::GetMajorMaxConcurrency(size_t worker_count) {
616 size_t marking_items = marking_worklists_->shared()->Size();
617 marking_items += marking_worklists_->other()->Size();
618 for (auto& worklist : marking_worklists_->context_worklists()) {
619 marking_items += worklist.worklist->Size();
620 }
621 const size_t work = std::max<size_t>(
622 {marking_items, weak_objects_->current_ephemerons.Size()});
623 size_t jobs = worker_count + work;
624 jobs = std::min<size_t>(task_state_.size() - 1, jobs);
626 return std::min<size_t>(jobs, 1);
627 }
628 return jobs;
629}
630
631size_t ConcurrentMarking::GetMinorMaxConcurrency(size_t worker_count) {
632 const size_t marking_items = marking_worklists_->shared()->Size() +
638 size_t jobs = worker_count + marking_items;
639 jobs = std::min<size_t>(task_state_.size() - 1, jobs);
641 return std::min<size_t>(jobs, 1);
642 }
643 return jobs;
644}
645
648 DCHECK(v8_flags.parallel_marking || v8_flags.concurrent_marking ||
649 v8_flags.concurrent_minor_ms_marking);
651 DCHECK(IsStopped());
652
656 return;
657 }
660 return;
661 }
662
663 if (v8_flags.concurrent_marking_high_priority_threads) {
665 }
666
667#if DEBUG
668 if (garbage_collector_.has_value()) {
669 // Concurrent marking resumes. In this case the used collectors need to
670 // match.
672 } else {
673 // If a new concurrent marking cycle starts, TaskState should not contain
674 // any data.
675 for (auto& task_state : task_state_) {
676 task_state->memory_chunk_live_bytes_map.AssertEmpty();
677 DCHECK(task_state->memory_chunk_typed_slots_map.empty());
678 DCHECK(task_state->native_context_stats.Empty());
679 DCHECK(task_state->local_pretenuring_feedback.empty());
680 DCHECK_EQ(0, task_state->marked_bytes);
681 }
683 }
684
686 // The full GC never makes use of local_pretenuring_feedback. It needs to be
687 // empty even if resuming concurrent marking.
688 for (auto& task_state : task_state_) {
689 DCHECK(task_state->local_pretenuring_feedback.empty());
690 }
691 }
692
694 // Minor marking state can only be alive if the concurrent marker was
695 // previously paused.
696 DCHECK(garbage_collector_.has_value());
699 }
700#endif // DEBUG
701
706 auto job = std::make_unique<JobTaskMajor>(
710 current_job_trace_id_.emplace(job->trace_id());
711 TRACE_GC_NOTE_WITH_FLOW("Major concurrent marking started", job->trace_id(),
714 } else {
716 minor_marking_state_ = std::make_unique<MinorMarkingState>();
720 auto job = std::make_unique<JobTaskMinor>(this);
721 current_job_trace_id_.emplace(job->trace_id());
722 TRACE_GC_NOTE_WITH_FLOW("Minor concurrent marking started", job->trace_id(),
725 }
726 DCHECK(job_handle_->IsValid());
727}
728
730 DCHECK(garbage_collector_.has_value());
732 return !marking_worklists_->shared()->IsEmpty() ||
733 !weak_objects_->current_ephemerons.IsEmpty();
734 }
736 return !marking_worklists_->shared()->IsEmpty() ||
738 ->remembered_sets_marking_handler()
739 ->RemainingRememberedSetsMarkingIteams() > 0);
740}
741
743 GarbageCollector garbage_collector, TaskPriority priority) {
744 DCHECK(v8_flags.parallel_marking || v8_flags.concurrent_marking ||
745 v8_flags.concurrent_minor_ms_marking);
746
749 return;
750 }
751
754 return;
755 }
756
757 if (heap_->IsTearingDown()) return;
758
759 if (IsStopped()) {
760 // This DCHECK is for the case that concurrent marking was paused.
764 } else {
765 DCHECK(garbage_collector_.has_value());
769 } else {
771 }
772 if (!IsWorkLeft()) return;
774 job_handle_->UpdatePriority(priority);
775 DCHECK(current_job_trace_id_.has_value());
778 ? "Major concurrent marking rescheduled"
779 : "Minor concurrent marking rescheduled",
780 current_job_trace_id_.value(),
782 job_handle_->NotifyConcurrencyIncrease();
783 }
784}
785
787 PretenuringHandler* pretenuring_handler = heap_->pretenuring_handler();
788 for (auto& task_state : task_state_) {
789 pretenuring_handler->MergeAllocationSitePretenuringFeedback(
790 task_state->local_pretenuring_feedback);
791 task_state->local_pretenuring_feedback.clear();
792 }
793}
794
796 DCHECK(v8_flags.parallel_marking || v8_flags.concurrent_marking ||
797 v8_flags.concurrent_minor_ms_marking);
801 if (!job_handle_ || !job_handle_->IsValid()) return;
802 job_handle_->Join();
803 current_job_trace_id_.reset();
804 garbage_collector_.reset();
805 minor_marking_state_.reset();
806}
807
809 if (!job_handle_ || !job_handle_->IsValid()) return;
810 job_handle_->Join();
811}
812
814 DCHECK(v8_flags.parallel_marking || v8_flags.concurrent_marking);
815 if (!job_handle_ || !job_handle_->IsValid()) return false;
816
817 job_handle_->Cancel();
818 DCHECK(current_job_trace_id_.has_value());
820 ? "Major concurrent marking paused"
821 : "Minor concurrent marking paused",
822 current_job_trace_id_.value(),
824 return true;
825}
826
828 if (!v8_flags.concurrent_marking && !v8_flags.parallel_marking) return true;
829
830 return !job_handle_ || !job_handle_->IsValid();
831}
832
834 DCHECK(garbage_collector_.has_value());
835 DCHECK(current_job_trace_id_.has_value());
837 ? "Major concurrent marking resumed"
838 : "Minor concurrent marking resumed",
839 current_job_trace_id_.value(),
842}
843
845 DCHECK(!job_handle_ || !job_handle_->IsValid());
846 for (size_t i = 1; i < task_state_.size(); i++) {
847 main_stats->Merge(task_state_[i]->native_context_stats);
848 task_state_[i]->native_context_stats.Clear();
849 }
850}
851
853 DCHECK(!job_handle_ || !job_handle_->IsValid());
854 for (auto& task_state : task_state_) {
855 task_state->memory_chunk_live_bytes_map.FlushAndClear();
856 for (auto&& [page, typed_slots] :
857 task_state->memory_chunk_typed_slots_map) {
858 RememberedSet<OLD_TO_OLD>::MergeTyped(page, std::move(typed_slots));
859 }
860 task_state->memory_chunk_typed_slots_map.clear();
861 task_state->marked_bytes = 0;
862 }
864}
865
867 size_t result = 0;
868 for (size_t i = 1; i < task_state_.size(); i++) {
869 result +=
871 }
873 return result;
874}
875
876} // namespace internal
877} // namespace v8
bool IsEmpty() const
Definition worklist.h:126
size_t Size() const
Definition worklist.h:131
virtual bool ShouldYield()=0
virtual bool IsJoiningThread() const =0
virtual uint8_t GetTaskId()=0
std::unique_ptr< JobHandle > PostJob(TaskPriority priority, std::unique_ptr< JobTask > job_task, const SourceLocation &location=SourceLocation::Current())
virtual int NumberOfWorkerThreads()=0
static void Relaxed_Store(T *addr, typename std::remove_reference< T >::type new_value)
static T Relaxed_Load(T *addr)
MemoryChunkTypedSlotsMap * memory_chunk_typed_slots_map_
MemoryChunkLiveBytesMap * memory_chunk_live_bytes_map_
bool ProcessEphemeron(Tagged< HeapObject > key, Tagged< HeapObject > value)
void RecordSlot(Tagged< HeapObject > object, TSlot slot, Tagged< HeapObject > target)
void RecordRelocSlot(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
ConcurrentMarkingVisitor(MarkingWorklists::Local *local_marking_worklists, WeakObjects::Local *local_weak_objects, Heap *heap, unsigned mark_compact_epoch, base::EnumSet< CodeFlushMode > code_flush_mode, bool should_keep_ages_unchanged, uint16_t code_flushing_increase, MemoryChunkLiveBytesMap *memory_chunk_live_bytes_map, MemoryChunkTypedSlotsMap *memory_chunk_typed_slots_map)
void IncrementLiveBytesCached(MutablePageMetadata *chunk, intptr_t by)
static constexpr bool EnableConcurrentVisitation()
JobTaskMajor(const JobTaskMajor &)=delete
JobTaskMajor & operator=(const JobTaskMajor &)=delete
size_t GetMaxConcurrency(size_t worker_count) const override
void Run(JobDelegate *delegate) override
JobTaskMajor(ConcurrentMarking *concurrent_marking, unsigned mark_compact_epoch, base::EnumSet< CodeFlushMode > code_flush_mode, bool should_keep_ages_unchanged)
size_t GetMaxConcurrency(size_t worker_count) const override
JobTaskMinor(const JobTaskMinor &)=delete
JobTaskMinor & operator=(const JobTaskMinor &)=delete
JobTaskMinor(ConcurrentMarking *concurrent_marking)
void Run(JobDelegate *delegate) override
void TryScheduleJob(GarbageCollector garbage_collector, TaskPriority priority=TaskPriority::kUserVisible)
std::optional< GarbageCollector > garbage_collector_
std::atomic< size_t > estimate_concurrency_
std::optional< uint64_t > current_job_trace_id_
std::unique_ptr< JobHandle > job_handle_
std::atomic< size_t > total_marked_bytes_
size_t GetMinorMaxConcurrency(size_t worker_count)
std::unique_ptr< MinorMarkingState > minor_marking_state_
void RunMajor(JobDelegate *delegate, base::EnumSet< CodeFlushMode > code_flush_mode, unsigned mark_compact_epoch, bool should_keep_ages_unchanged)
GarbageCollector garbage_collector() const
void FlushNativeContexts(NativeContextStats *main_stats)
size_t GetMajorMaxConcurrency(size_t worker_count)
void RescheduleJobIfNeeded(GarbageCollector garbage_collector, TaskPriority priority=TaskPriority::kUserVisible)
std::vector< std::unique_ptr< TaskState > > task_state_
void RunMinor(JobDelegate *delegate)
size_t RunMinorImpl(JobDelegate *delegate, TaskState *task_state)
ConcurrentMarking(Heap *heap, WeakObjects *weak_objects)
void set_another_ephemeron_iteration(bool another_ephemeron_iteration)
static CppHeap * From(v8::CppHeap *heap)
Definition cpp-heap.h:102
uint16_t CodeFlushingIncrease() const
Definition gc-tracer.cc:723
MainAllocator * new_space_allocator()
static V8_INLINE bool InReadOnlySpace(Tagged< HeapObject > object)
static V8_INLINE Heap * GetOwnerHeap(Tagged< HeapObject > object)
V8_INLINE void VisitMapPointerIfNeeded(Tagged< HeapObject > host)
V8_INLINE size_t Visit(Tagged< HeapObject > object)
bool IsTearingDown() const
Definition heap.h:525
V8_EXPORT_PRIVATE bool ShouldOptimizeForBattery() const
Definition heap.cc:470
NewLargeObjectSpace * new_lo_space() const
Definition heap.h:737
bool use_new_space() const
Definition heap.h:1643
MarkCompactCollector * mark_compact_collector()
Definition heap.h:813
bool ShouldCurrentGCKeepAgesUnchanged() const
Definition heap.h:1361
MinorMarkSweepCollector * minor_mark_sweep_collector()
Definition heap.h:817
v8::CppHeap * cpp_heap() const
Definition heap.h:1112
GCTracer * tracer()
Definition heap.h:800
Isolate * isolate() const
Definition heap-inl.h:61
HeapAllocator * allocator()
Definition heap.h:1640
PretenuringHandler * pretenuring_handler()
Definition heap.h:1627
Address original_top_acquire() const
Address original_limit_relaxed() const
MarkingWorklists * marking_worklists()
static V8_INLINE void RecordSlot(Tagged< HeapObject > object, THeapObjectSlot slot, Tagged< HeapObject > target)
static bool ShouldRecordRelocSlot(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
static RecordRelocSlotInfo ProcessRelocInfo(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
MarkingWorklists::Local * local_marking_worklists() const
base::EnumSet< CodeFlushMode > code_flush_mode() const
V8_INLINE bool MarkObject(Tagged< HeapObject > host, Tagged< HeapObject > obj, MarkingHelper::WorklistTarget target_worklist)
static constexpr std::nullptr_t kNoCppMarkingState
void PushOnHold(Tagged< HeapObject > object)
bool Pop(Tagged< HeapObject > *object)
const std::vector< ContextWorklistPair > & context_worklists() const
void Erase(MutablePageMetadata *page)
void Increment(MutablePageMetadata *page, intptr_t live)
std::array< Entry, kTableSize > map_
MemoryChunkLiveBytesMap & operator=(const MemoryChunkLiveBytesMap &)=delete
Entry & lookup_entry(MutablePageMetadata *page)
MemoryChunkLiveBytesMap(const MemoryChunkLiveBytesMap &)=delete
static V8_INLINE MemoryChunkMetadata * FromHeapObject(Tagged< HeapObject > o)
YoungGenerationRememberedSetsMarkingWorklist * remembered_sets_marking_handler()
MarkingWorklists::Local * local_marking_worklists()
static MutablePageMetadata * cast(MemoryChunkMetadata *metadata)
static V8_INLINE MutablePageMetadata * FromHeapObject(Tagged< HeapObject > o)
V8_INLINE bool Infer(PtrComprCageBase cage_base, Tagged< Map > map, Tagged< HeapObject > object, Address *native_context)
V8_INLINE void IncrementSize(Address context, Tagged< Map > map, Tagged< HeapObject > object, size_t size)
void Merge(const NativeContextStats &other)
static constexpr int kInitialFeedbackCapacity
std::unordered_map< Tagged< AllocationSite >, size_t, Object::Hasher > PretenuringFeedbackMap
void MergeAllocationSitePretenuringFeedback(const PretenuringFeedbackMap &local_pretenuring_feedback)
static void MergeTyped(MutablePageMetadata *page, std::unique_ptr< TypedSlots > other)
static V8_EXPORT_PRIVATE v8::Platform * GetCurrentPlatform()
Definition v8.cc:282
V8_INLINE void IncrementLiveBytesCached(MutablePageMetadata *chunk, intptr_t by)
#define V8_COMPRESS_POINTERS_8GB_BOOL
Definition globals.h:608
#define ALIGN_TO_ALLOCATION_ALIGNMENT(value)
Definition globals.h:1796
BasePage * page
Definition sweeper.cc:218
#define TRACE_GC_NOTE_WITH_FLOW(note, bind_id, flow_flags)
Definition gc-tracer.h:98
#define TRACE_GC_EPOCH(tracer, scope_id, thread_kind)
Definition gc-tracer.h:77
#define TRACE_GC_EPOCH_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
Definition gc-tracer.h:84
#define TRACE_GC_WITH_FLOW(tracer, scope_id, bind_id, flow_flags)
Definition gc-tracer.h:52
#define TRACE_GC_NOTE(note)
Definition gc-tracer.h:93
TNode< Context > context
ZoneVector< RpoNumber > & result
size_t priority
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
constexpr intptr_t kObjectAlignment8GbHeap
Definition globals.h:934
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
Definition flags.cc:1366
V8_EXPORT_PRIVATE FlagValues v8_flags
static constexpr Address kNullAddress
Definition v8-internal.h:53
TaskPriority
Definition v8-platform.h:24
static constexpr AcquireLoadTag kAcquireLoad
Definition globals.h:2908
WeakObjects weak_objects_
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
PretenuringHandler::PretenuringFeedbackMap local_pretenuring_feedback
static V8_INLINE std::optional< WorklistTarget > ShouldMarkObject(Heap *heap, Tagged< HeapObject > object)
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN
Heap * heap_
#define V8_INLINE
Definition v8config.h:500