10#include <unordered_map>
89 size_t hash = std::hash<MutablePageMetadata*>{}(
page);
94 std::array<Entry, kTableSize>
map_ = {};
100 if (entry.
page == page) {
102 }
else if (entry.
page ==
nullptr) {
116 if (entry.
page == page) {
117 entry.
page =
nullptr;
123 for (
auto& entry :
map_) {
125 entry.page->IncrementLiveBytesAtomically(entry.live_bytes);
126 entry.page =
nullptr;
127 entry.live_bytes = 0;
133void MemoryChunkLiveBytesMap::AssertEmpty() {
134 for (
auto& entry :
map_) {
143 std::unique_ptr<TypedSlots>>;
152 bool should_keep_ages_unchanged, uint16_t code_flushing_increase,
156 heap, mark_compact_epoch, code_flush_mode,
157 should_keep_ages_unchanged,
158 code_flushing_increase),
171 const auto target_worklist =
173 DCHECK(target_worklist.has_value());
183 template <
typename TSlot>
205 auto& typed_slots = (*memory_chunk_typed_slots_map_)[info.page_metadata];
211 typed_slots->Insert(info.slot_type, info.offset);
232 unsigned mark_compact_epoch,
234 bool should_keep_ages_unchanged)
239 trace_id_(reinterpret_cast<uint64_t>(concurrent_marking) ^
240 concurrent_marking->
heap_->tracer()->CurrentEpoch(
260 GCTracer::Scope::MC_BACKGROUND_MARKING,
287 trace_id_(reinterpret_cast<uint64_t>(concurrent_marking) ^
288 concurrent_marking->
heap_->tracer()->CurrentEpoch(
303 GCTracer::Scope::MINOR_MS_MARK_PARALLEL,
trace_id_,
309 GCTracer::Scope::MINOR_MS_BACKGROUND_MARKING,
329#ifndef V8_ATOMIC_OBJECT_FIELD_WRITES
334 if (
v8_flags.concurrent_marking_max_worker_num == 0) {
337 max_tasks =
v8_flags.concurrent_marking_max_worker_num;
341 for (
int i = 0;
i <= max_tasks; ++
i) {
342 task_state_.emplace_back(std::make_unique<TaskState>());
350 unsigned mark_compact_epoch,
351 bool should_keep_ages_unchanged) {
352 size_t kBytesUntilInterruptCheck = 64 *
KB;
353 int kObjectsUntilInterruptCheck = 1000;
354 uint8_t task_id = delegate->
GetTaskId() + 1;
359 ? cpp_heap->CreateCppMarkingState()
363 &local_marking_worklists, &local_weak_objects,
heap_, mark_compact_epoch,
364 code_flush_mode, should_keep_ages_unchanged,
371 size_t marked_bytes = 0;
373 if (
v8_flags.trace_concurrent_marking) {
374 isolate->PrintWithTimestamp(
"Starting major concurrent marking task %d\n",
387 while (local_weak_objects.current_ephemerons_local.Pop(&ephemeron)) {
397 size_t current_marked_bytes = 0;
398 int objects_processed = 0;
399 while (current_marked_bytes < kBytesUntilInterruptCheck &&
400 objects_processed < kObjectsUntilInterruptCheck) {
402 if (!local_marking_worklists.
Pop(&
object)) {
414 if (new_space_allocator) {
424 Address addr =
object.address();
426 if ((new_space_top <= addr && addr < new_space_limit) ||
427 addr == new_large_object) {
432 CHECK(!IsFreeSpaceOrFillerMap(map));
433 if (is_per_context_mode) {
435 if (native_context_inferrer.
Infer(cage_base, map,
object,
440 const auto visited_size = visitor.
Visit(map,
object);
445 if (is_per_context_mode) {
447 local_marking_worklists.
Context(), map,
object, visited_size);
449 current_marked_bytes += visited_size;
453 marked_bytes += current_marked_bytes;
462 local_marking_worklists.
Publish();
471 if (
v8_flags.trace_concurrent_marking) {
473 "Major task %d concurrently marked %dKB in %.2fms\n", task_id,
474 static_cast<int>(marked_bytes / KB), time_ms);
509 return (new_space_top <= addr && addr < new_space_limit) ||
510 addr == new_large_object;
515template <YoungGenerationMarkingVisitationMode marking_mode>
518 static constexpr size_t kBytesUntilInterruptCheck = 64 *
KB;
519 static constexpr int kObjectsUntilInterruptCheck = 1000;
520 size_t marked_bytes = 0;
521 size_t current_marked_bytes = 0;
522 int objects_processed = 0;
536 marking_worklists_local.MergeOnHold();
540 GCTracer::Scope::MINOR_MS_BACKGROUND_MARKING_CLOSURE,
542 while (marking_worklists_local.Pop(&heap_object)) {
543 if (IsYoungObjectInLab(new_space_allocator, new_lo_space, heap_object)) {
547 const auto visited_size = visitor.
Visit(map, heap_object);
549 current_marked_bytes += visited_size;
556 if (current_marked_bytes >= kBytesUntilInterruptCheck ||
557 ++objects_processed >= kObjectsUntilInterruptCheck) {
558 marked_bytes += current_marked_bytes;
564 objects_processed = 0;
565 current_marked_bytes = 0;
574 return marked_bytes + current_marked_bytes;
580 uint8_t task_id = delegate->
GetTaskId() + 1;
584 size_t marked_bytes = 0;
586 if (
v8_flags.trace_concurrent_marking) {
587 isolate->PrintWithTimestamp(
"Starting minor concurrent marking task %d\n",
600 delegate, task_state);
604 delegate, task_state);
608 if (
v8_flags.trace_concurrent_marking) {
610 "Minor task %d concurrently marked %dKB in %.2fms\n", task_id,
611 static_cast<int>(marked_bytes / KB), time_ms);
619 marking_items += worklist.worklist->Size();
621 const size_t work = std::max<size_t>(
623 size_t jobs = worker_count + work;
624 jobs = std::min<size_t>(
task_state_.size() - 1, jobs);
626 return std::min<size_t>(jobs, 1);
638 size_t jobs = worker_count + marking_items;
639 jobs = std::min<size_t>(
task_state_.size() - 1, jobs);
641 return std::min<size_t>(jobs, 1);
649 v8_flags.concurrent_minor_ms_marking);
663 if (
v8_flags.concurrent_marking_high_priority_threads) {
676 task_state->memory_chunk_live_bytes_map.AssertEmpty();
677 DCHECK(task_state->memory_chunk_typed_slots_map.empty());
678 DCHECK(task_state->native_context_stats.Empty());
679 DCHECK(task_state->local_pretenuring_feedback.empty());
689 DCHECK(task_state->local_pretenuring_feedback.empty());
706 auto job = std::make_unique<JobTaskMajor>(
720 auto job = std::make_unique<JobTaskMinor>(
this);
738 ->remembered_sets_marking_handler()
739 ->RemainingRememberedSetsMarkingIteams() > 0);
745 v8_flags.concurrent_minor_ms_marking);
778 ?
"Major concurrent marking rescheduled"
779 :
"Minor concurrent marking rescheduled",
790 task_state->local_pretenuring_feedback);
791 task_state->local_pretenuring_feedback.clear();
797 v8_flags.concurrent_minor_ms_marking);
820 ?
"Major concurrent marking paused"
821 :
"Minor concurrent marking paused",
828 if (!
v8_flags.concurrent_marking && !
v8_flags.parallel_marking)
return true;
837 ?
"Major concurrent marking resumed"
838 :
"Minor concurrent marking resumed",
855 task_state->memory_chunk_live_bytes_map.FlushAndClear();
856 for (
auto&& [page, typed_slots] :
857 task_state->memory_chunk_typed_slots_map) {
860 task_state->memory_chunk_typed_slots_map.clear();
861 task_state->marked_bytes = 0;
virtual bool ShouldYield()=0
virtual bool IsJoiningThread() const =0
virtual uint8_t GetTaskId()=0
static void Relaxed_Store(T *addr, typename std::remove_reference< T >::type new_value)
static T Relaxed_Load(T *addr)
MemoryChunkTypedSlotsMap * memory_chunk_typed_slots_map_
MemoryChunkLiveBytesMap * memory_chunk_live_bytes_map_
bool ProcessEphemeron(Tagged< HeapObject > key, Tagged< HeapObject > value)
void RecordSlot(Tagged< HeapObject > object, TSlot slot, Tagged< HeapObject > target)
void RecordRelocSlot(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
ConcurrentMarkingVisitor(MarkingWorklists::Local *local_marking_worklists, WeakObjects::Local *local_weak_objects, Heap *heap, unsigned mark_compact_epoch, base::EnumSet< CodeFlushMode > code_flush_mode, bool should_keep_ages_unchanged, uint16_t code_flushing_increase, MemoryChunkLiveBytesMap *memory_chunk_live_bytes_map, MemoryChunkTypedSlotsMap *memory_chunk_typed_slots_map)
void IncrementLiveBytesCached(MutablePageMetadata *chunk, intptr_t by)
static constexpr bool EnableConcurrentVisitation()
JobTaskMajor(const JobTaskMajor &)=delete
JobTaskMajor & operator=(const JobTaskMajor &)=delete
size_t GetMaxConcurrency(size_t worker_count) const override
const unsigned mark_compact_epoch_
ConcurrentMarking * concurrent_marking_
const bool should_keep_ages_unchanged_
void Run(JobDelegate *delegate) override
~JobTaskMajor() override=default
base::EnumSet< CodeFlushMode > code_flush_mode_
uint64_t trace_id() const
JobTaskMajor(ConcurrentMarking *concurrent_marking, unsigned mark_compact_epoch, base::EnumSet< CodeFlushMode > code_flush_mode, bool should_keep_ages_unchanged)
ConcurrentMarking * concurrent_marking_
uint64_t trace_id() const
~JobTaskMinor() override=default
size_t GetMaxConcurrency(size_t worker_count) const override
JobTaskMinor(const JobTaskMinor &)=delete
JobTaskMinor & operator=(const JobTaskMinor &)=delete
JobTaskMinor(ConcurrentMarking *concurrent_marking)
void Run(JobDelegate *delegate) override
V8_INLINE bool MarkerDone()
V8_INLINE void MarkerStarted()
std::atomic< int > active_markers_
void TryScheduleJob(GarbageCollector garbage_collector, TaskPriority priority=TaskPriority::kUserVisible)
std::optional< GarbageCollector > garbage_collector_
std::atomic< size_t > estimate_concurrency_
size_t TotalMarkedBytes()
std::optional< uint64_t > current_job_trace_id_
std::unique_ptr< JobHandle > job_handle_
std::atomic< size_t > total_marked_bytes_
void FlushMemoryChunkData()
size_t GetMinorMaxConcurrency(size_t worker_count)
std::unique_ptr< MinorMarkingState > minor_marking_state_
void RunMajor(JobDelegate *delegate, base::EnumSet< CodeFlushMode > code_flush_mode, unsigned mark_compact_epoch, bool should_keep_ages_unchanged)
GarbageCollector garbage_collector() const
void FlushNativeContexts(NativeContextStats *main_stats)
size_t GetMajorMaxConcurrency(size_t worker_count)
void RescheduleJobIfNeeded(GarbageCollector garbage_collector, TaskPriority priority=TaskPriority::kUserVisible)
MarkingWorklists * marking_worklists_
std::vector< std::unique_ptr< TaskState > > task_state_
void RunMinor(JobDelegate *delegate)
size_t RunMinorImpl(JobDelegate *delegate, TaskState *task_state)
ConcurrentMarking(Heap *heap, WeakObjects *weak_objects)
void set_another_ephemeron_iteration(bool another_ephemeron_iteration)
WeakObjects *const weak_objects_
bool another_ephemeron_iteration()
void FlushPretenuringFeedback()
static CppHeap * From(v8::CppHeap *heap)
MarkingState * marking_state() const
uint16_t CodeFlushingIncrease() const
MainAllocator * new_space_allocator()
static V8_INLINE bool InReadOnlySpace(Tagged< HeapObject > object)
static V8_INLINE Heap * GetOwnerHeap(Tagged< HeapObject > object)
V8_INLINE void VisitMapPointerIfNeeded(Tagged< HeapObject > host)
V8_INLINE size_t Visit(Tagged< HeapObject > object)
bool IsTearingDown() const
V8_EXPORT_PRIVATE bool ShouldOptimizeForBattery() const
NewLargeObjectSpace * new_lo_space() const
bool use_new_space() const
MarkCompactCollector * mark_compact_collector()
bool ShouldCurrentGCKeepAgesUnchanged() const
MinorMarkSweepCollector * minor_mark_sweep_collector()
v8::CppHeap * cpp_heap() const
Isolate * isolate() const
HeapAllocator * allocator()
PretenuringHandler * pretenuring_handler()
Address pending_object() const
Address original_top_acquire() const
Address original_limit_relaxed() const
MarkingWorklists * marking_worklists()
bool UseBackgroundThreadsInCycle() const
static V8_INLINE void RecordSlot(Tagged< HeapObject > object, THeapObjectSlot slot, Tagged< HeapObject > target)
static bool ShouldRecordRelocSlot(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
static RecordRelocSlotInfo ProcessRelocInfo(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
MarkingWorklists::Local * local_marking_worklists() const
base::EnumSet< CodeFlushMode > code_flush_mode() const
V8_INLINE bool MarkObject(Tagged< HeapObject > host, Tagged< HeapObject > obj, MarkingHelper::WorklistTarget target_worklist)
WeakObjects::Local *const local_weak_objects_
bool IsPerContextMode() const
Address SwitchToContext(Address context)
static constexpr std::nullptr_t kNoCppMarkingState
void PushOnHold(Tagged< HeapObject > object)
bool Pop(Tagged< HeapObject > *object)
const std::vector< ContextWorklistPair > & context_worklists() const
MarkingWorklist * other()
bool IsUsingContextWorklists() const
MarkingWorklist * shared()
void Erase(MutablePageMetadata *page)
static constexpr size_t kTableSize
void Increment(MutablePageMetadata *page, intptr_t live)
std::array< Entry, kTableSize > map_
MemoryChunkLiveBytesMap & operator=(const MemoryChunkLiveBytesMap &)=delete
Entry & lookup_entry(MutablePageMetadata *page)
MemoryChunkLiveBytesMap(const MemoryChunkLiveBytesMap &)=delete
MemoryChunkLiveBytesMap()=default
YoungGenerationRememberedSetsMarkingWorklist * remembered_sets_marking_handler()
bool is_in_atomic_pause() const
bool UseBackgroundThreadsInCycle() const
MarkingWorklists::Local * local_marking_worklists()
MarkingWorklists * marking_worklists()
void IncrementLiveBytesAtomically(intptr_t diff)
static MutablePageMetadata * cast(MemoryChunkMetadata *metadata)
static V8_INLINE MutablePageMetadata * FromHeapObject(Tagged< HeapObject > o)
V8_INLINE bool Infer(PtrComprCageBase cage_base, Tagged< Map > map, Tagged< HeapObject > object, Address *native_context)
V8_INLINE void IncrementSize(Address context, Tagged< Map > map, Tagged< HeapObject > object, size_t size)
void Merge(const NativeContextStats &other)
static constexpr int kInitialFeedbackCapacity
std::unordered_map< Tagged< AllocationSite >, size_t, Object::Hasher > PretenuringFeedbackMap
void MergeAllocationSitePretenuringFeedback(const PretenuringFeedbackMap &local_pretenuring_feedback)
static void MergeTyped(MutablePageMetadata *page, std::unique_ptr< TypedSlots > other)
static V8_EXPORT_PRIVATE v8::Platform * GetCurrentPlatform()
V8_EXPORT_PRIVATE void Publish()
MarkingWorklists::Local & marking_worklists_local()
V8_INLINE void IncrementLiveBytesCached(MutablePageMetadata *chunk, intptr_t by)
bool ProcessNextItem(Visitor *visitor)
size_t RemainingRememberedSetsMarkingIteams() const
#define V8_COMPRESS_POINTERS_8GB_BOOL
#define ALIGN_TO_ALLOCATION_ALIGNMENT(value)
#define TRACE_GC_NOTE_WITH_FLOW(note, bind_id, flow_flags)
#define TRACE_GC_EPOCH(tracer, scope_id, thread_kind)
#define TRACE_GC_EPOCH_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
#define TRACE_GC_WITH_FLOW(tracer, scope_id, bind_id, flow_flags)
#define TRACE_GC_NOTE(note)
ZoneVector< RpoNumber > & result
constexpr bool IsPowerOfTwo(T value)
constexpr intptr_t kObjectAlignment8GbHeap
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
V8_EXPORT_PRIVATE FlagValues v8_flags
static constexpr Address kNullAddress
static constexpr AcquireLoadTag kAcquireLoad
WeakObjects weak_objects_
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
constexpr bool IsAligned(T value, U alignment)
MemoryChunkLiveBytesMap memory_chunk_live_bytes_map
MemoryChunkTypedSlotsMap memory_chunk_typed_slots_map
PretenuringHandler::PretenuringFeedbackMap local_pretenuring_feedback
NativeContextStats native_context_stats
Tagged< HeapObject > value
static V8_INLINE std::optional< WorklistTarget > ShouldMarkObject(Heap *heap, Tagged< HeapObject > object)
MutablePageMetadata * page
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN