v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
minor-gc-job.cc
Go to the documentation of this file.
1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <memory>
8
11#include "src/flags/flags.h"
12#include "src/heap/heap-inl.h"
13#include "src/heap/heap.h"
14#include "src/init/v8.h"
16
17namespace v8::internal {
18
19namespace {
20
21size_t YoungGenerationTaskTriggerSize(Heap* heap) {
22 size_t young_capacity = 0;
23 if (v8_flags.sticky_mark_bits) {
24 // TODO(333906585): Adjust parameters.
25 young_capacity = heap->sticky_space()->Capacity() -
26 heap->sticky_space()->old_objects_size();
27 } else {
28 young_capacity = heap->new_space()->TotalCapacity();
29 }
30 return young_capacity * v8_flags.minor_gc_task_trigger / 100;
31}
32
33size_t YoungGenerationSize(Heap* heap) {
34 return v8_flags.sticky_mark_bits ? heap->sticky_space()->young_objects_size()
35 : heap->new_space()->Size();
36}
37
38} // namespace
39
40// Task observer that is registered on creation and automatically unregistered
41// on first step when a task is scheduled. The idea here is to not interrupt the
42// mutator with slow-path allocations for small allocation observer steps but
43// rather only interrupt a single time.
44//
45// A task should be scheduled when young generation size reaches the task
46// trigger, but may also occur before the trigger is reached. For example,
47// this method is called from the allocation observer for new space. The
48// observer step size is detemine based on the current task trigger. However,
49// due to refining allocated bytes after sweeping (allocated bytes after
50// sweeping may be less than live bytes during marking), new space size may
51// decrease while the observer step size remains the same.
52//
53// The job only checks the schedule on step and relies on the minor GC
54// cancelling the job in case it is run.
56 public:
59 // Register GC callback for all atomic pause types.
60 heap_->main_thread_local_heap()->AddGCEpilogueCallback(
63 }
64
70
71 intptr_t GetNextStepSize() final {
72 const size_t new_space_threshold = YoungGenerationTaskTriggerSize(heap_);
73 const size_t new_space_size = YoungGenerationSize(heap_);
74 if (new_space_size < new_space_threshold) {
75 return new_space_threshold - new_space_size;
76 }
77 // Force a step on next allocation.
78 return 1;
79 }
80
81 void Step(int, Address, size_t) final {
83 // Remove this observer. It will be re-added after a GC.
86 was_added_to_space_ = false;
87 }
88
89 private:
90 static void GCEpilogueCallback(void* data) {
92 reinterpret_cast<ScheduleMinorGCTaskObserver*>(data);
93 observer->RemoveFromNewSpace();
94 observer->AddToNewSpace();
95 }
96
104
106 if (!was_added_to_space_) {
107 return;
108 }
110 was_added_to_space_ = false;
111 }
112
116};
117
119 public:
120 Task(Isolate* isolate, MinorGCJob* job)
121 : CancelableTask(isolate), isolate_(isolate), job_(job) {}
122
123 // CancelableTask overrides.
124 void RunInternal() override;
125
126 Isolate* isolate() const { return isolate_; }
127
128 private:
131};
132
134 : heap_(heap),
135 minor_gc_task_observer_(new ScheduleMinorGCTaskObserver(this, heap)) {}
136
138 if (!v8_flags.minor_gc_task || IsScheduled() || heap_->IsTearingDown()) {
139 return;
140 }
141 const auto priority = v8_flags.minor_gc_task_with_lower_priority
144 std::shared_ptr<v8::TaskRunner> taskrunner =
146 if (taskrunner->NonNestableTasksEnabled()) {
147 std::unique_ptr<Task> task = std::make_unique<Task>(heap_->isolate(), this);
148 current_task_id_ = task->id();
149 taskrunner->PostNonNestableTask(std::move(task));
150 }
151}
152
154 if (!IsScheduled()) {
155 return;
156 }
157 // The task may have ran and bailed out already if major incremental marking
158 // was running, in which `TryAbort` will return `kTaskRemoved`.
161}
162
165 TRACE_EVENT_CALL_STATS_SCOPED(isolate(), "v8", "V8.MinorGCJob.Task");
166
169
170 // Set the current isolate such that trusted pointer tables etc are
171 // available and the cage base is set correctly for multi-cage mode.
172 SetCurrentIsolateScope isolate_scope(isolate());
173
174 Heap* heap = isolate()->heap();
175
176 if (heap->incremental_marking()->IsMajorMarking()) {
177 // Don't trigger a minor GC while major incremental marking is active.
178 return;
179 }
180
182}
183
184} // namespace v8::internal
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
static constexpr intptr_t kNotUsingFixedStepSize
MainAllocator * new_space_allocator()
bool IsTearingDown() const
Definition heap.h:525
LocalHeap * main_thread_local_heap()
Definition heap.h:842
std::shared_ptr< v8::TaskRunner > GetForegroundTaskRunner(TaskPriority priority=TaskPriority::kUserBlocking) const
Definition heap.cc:5903
Isolate * isolate() const
Definition heap-inl.h:61
HeapAllocator * allocator()
Definition heap.h:1640
CancelableTaskManager * cancelable_task_manager()
Definition isolate.h:1960
void RemoveGCEpilogueCallback(GCEpilogueCallback *callback, void *data)
V8_EXPORT_PRIVATE void AddAllocationObserver(AllocationObserver *observer)
V8_INLINE bool IsLabValid() const
V8_EXPORT_PRIVATE void RemoveAllocationObserver(AllocationObserver *observer)
Task(Isolate *isolate, MinorGCJob *job)
MinorGCJob(Heap *heap) V8_NOEXCEPT
CancelableTaskManager::Id current_task_id_
ScheduleMinorGCTaskObserver(MinorGCJob *job, Heap *heap)
static void GCEpilogueCallback(void *data)
void Step(int, Address, size_t) final
LiftoffAssembler::CacheState state
size_t priority
V8_EXPORT_PRIVATE FlagValues v8_flags
#define V8_NOEXCEPT
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
Heap * heap_
#define TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name)