v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
mark-compact.h
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_MARK_COMPACT_H_
6#define V8_HEAP_MARK_COMPACT_H_
7
8#include <vector>
9
10#include "include/v8-internal.h"
11#include "src/common/globals.h"
15#include "src/heap/marking.h"
17#include "src/heap/spaces.h"
18#include "src/heap/sweeper.h"
19
20namespace v8 {
21namespace internal {
22
23// Forward declarations.
24class HeapObjectVisitor;
25class LargeObjectSpace;
26class LargePageMetadata;
27class MainMarkingVisitor;
28class MarkCompactCollector;
29class RecordMigratedSlotVisitor;
30
31class RootMarkingVisitor final : public RootVisitor {
32 public:
35
36 V8_INLINE void VisitRootPointer(Root root, const char* description,
37 FullObjectSlot p) final;
38
39 V8_INLINE void VisitRootPointers(Root root, const char* description,
41 FullObjectSlot end) final;
42
43 // Keep this synced with `RootsReferencesExtractor::VisitRunningCode()`.
44 void VisitRunningCode(FullObjectSlot code_slot,
45 FullObjectSlot istream_or_smi_zero_slot) final;
46
49
50 private:
52
54};
55
56// Collector for young and old generation.
58 public:
61
64 kAtomic,
65 };
66
71
72 enum class CallOrigin {
75 };
76
77 enum class EphemeronResult {
78 // Both key and value are still unmarked.
80 // Value got marked because key was already marked.
82 // Value is already marked or always live in this GC.
84 };
85
86 // Callback function for telling whether the object *p is an unmarked
87 // heap object.
90
91 template <MarkingWorklistProcessingMode mode =
93 std::pair<size_t, size_t> ProcessMarkingWorklist(
94 v8::base::TimeDelta max_duration, size_t max_bytes_to_process);
95
96 void TearDown();
97
98 // Performs a global garbage collection.
99 void CollectGarbage();
100
102
104
105 // Prepares for GC by resetting relocation info in old and map spaces and
106 // choosing spaces to compact.
107 void Prepare();
108
109 // Stop concurrent marking (either by preempting it right away or waiting for
110 // it to complete as requested by |stop_request|).
112
113 // Returns whether compaction is running.
115
116 void StartMarking(
117 std::shared_ptr<::heap::base::IncrementalMarkingSchedule> schedule = {});
118
122
128
130 RelocInfo* rinfo,
131 Tagged<HeapObject> target);
133 RelocInfo* rinfo,
134 Tagged<HeapObject> target);
135
136 static void RecordRelocSlot(Tagged<InstructionStream> host, RelocInfo* rinfo,
137 Tagged<HeapObject> target);
138 template <typename THeapObjectSlot>
140 THeapObjectSlot slot,
141 Tagged<HeapObject> target);
142 template <typename THeapObjectSlot>
143 V8_INLINE static void RecordSlot(MemoryChunk* source_chunk,
144 THeapObjectSlot slot,
145 Tagged<HeapObject> target);
146
147 bool is_compacting() const { return compacting_; }
148
150
152 GlobalHandleVector<DescriptorArray> strong_descriptor_arrays);
153
154#ifdef DEBUG
155 // Checks whether performing mark-compact collection.
156 bool in_use() { return state_ > PREPARE_GC; }
157 bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
158#endif
159
160 void VerifyMarking();
161#ifdef VERIFY_HEAP
162 void VerifyMarkbitsAreClean();
163 void VerifyMarkbitsAreClean(PagedSpaceBase* space);
164 void VerifyMarkbitsAreClean(NewSpace* space);
165 void VerifyMarkbitsAreClean(LargeObjectSpace* space);
166#endif
167
168 unsigned epoch() const { return epoch_; }
169
173
175
179
182
186
188
189 Heap* heap() { return heap_; }
190
191 explicit MarkCompactCollector(Heap* heap);
193
194 private:
195 using ResizeNewSpaceMode = Heap::ResizeNewSpaceMode;
196
197 void ComputeEvacuationHeuristics(size_t area_size,
198 int* target_fragmentation_percent,
199 size_t* max_evacuated_bytes);
200
201 void RecordObjectStats();
202
203 // Finishes GC, performs heap verification if enabled.
204 void Finish();
205
206 // Free unmarked ArrayBufferExtensions.
208
209 void MarkLiveObjects();
210
211 // Marks the object and adds it to the worklist.
213 MarkingHelper::WorklistTarget target_worklist);
214
215 // Marks the root object and adds it to the worklist.
217 MarkingHelper::WorklistTarget target_worklist);
218
219 // Mark the heap roots and all objects reachable from them.
220 void MarkRoots(RootVisitor* root_visitor);
221
222 // Mark the stack roots and all objects reachable from them.
224
225 // Mark all objects that are directly referenced from one of the clients
226 // heaps.
229
231
232 // Updates pointers to shared objects from client heaps.
235
236 // Update pointers in sandbox-related pointer tables.
238
239 // Marks object reachable from harmony weak maps and wrapper tracing.
242
243 // If the call-site of the top optimized code was not prepared for
244 // deoptimization, then treat embedded pointers in the code as strong as
245 // otherwise they can die and try to deoptimize the underlying code.
246 void ProcessTopOptimizedFrame(ObjectVisitor* visitor, Isolate* isolate);
247
248 // Implements ephemeron semantics: Marks value if key is already reachable.
250 Tagged<HeapObject> value);
251
252 // Wrapper around `ApplyEphemeronSemantics`. Pushes unresolved ephemerons
253 // into next_ephemerons. Returns true if the ephemeron value was warked by
254 // this method.
256
257 // Marks the transitive closure by draining the marking worklist iteratively,
258 // applying ephemerons semantics and invoking embedder tracing until a
259 // fixpoint is reached. Returns false if too many iterations have been tried
260 // and the linear approach should be used.
262
263 // Marks the transitive closure applying ephemeron semantics and invoking
264 // embedder tracing with a linear algorithm for ephemerons. Only used if
265 // fixpoint iteration doesn't finish within a few iterations.
267
268 // Drains ephemeron and marking worklists. Single iteration of the
269 // fixpoint iteration.
270 bool ProcessEphemerons();
271
272 // Perform Wrapper Tracing if in use.
274
275 // Retain dying maps for `v8_flags.retain_maps_for_n_gc` garbage collections
276 // to increase chances of reusing of map transition tree in future.
277 void RetainMaps();
278
279 // Clear non-live references in weak cells, transition and descriptor arrays,
280 // and deoptimize dependent code of non-live maps.
283
284 // Special handling for clearing map slots.
285 // Returns true if the slot was cleared.
287 HeapObjectSlot slot);
288
289 // Checks if the given weak cell is a simple transition from the parent map
290 // of the given dead target. If so it clears the transition and trims
291 // the descriptor array of the parent if needed.
294 Tagged<Map> dead_target);
295
296 // Flushes a weakly held bytecode array from a shared function info.
298
299 // Clears bytecode arrays / baseline code that have not been executed for
300 // multiple collections.
302
303 bool ProcessOldBytecodeSFI(Tagged<SharedFunctionInfo> flushing_candidate);
304 bool ProcessOldBaselineSFI(Tagged<SharedFunctionInfo> flushing_candidate);
306 bool bytecode_already_decompiled);
307
308#ifndef V8_ENABLE_LEAPTIERING
310#endif // !V8_ENABLE_LEAPTIERING
311
312 // Resets any JSFunctions which have had their bytecode flushed.
314
315 // Compact every array in the global list of transition arrays and
316 // trim the corresponding descriptor array if a transition target is non-live.
319 Tagged<DescriptorArray> descriptors);
322 Tagged<TransitionArray> transitions,
323 Tagged<DescriptorArray> descriptors);
325 int num_transitions);
327
328 // After all reachable objects have been marked those weak map entries
329 // with an unreachable key are removed from all encountered weak maps.
330 // The linked list of all encountered weak maps is destroyed.
332
333 // Goes through the list of encountered trivial weak references and clears
334 // those with dead values. This is performed in a parallel job. In short, a
335 // weak reference is considered trivial if its value does not require special
336 // weakness clearing.
339 // Same, but for trusted space.
341 // Common implementation of the above two.
342 template <typename TObjectAndSlot, typename TMaybeSlot>
345 Tagged<HeapObjectReference> cleared_weak_ref);
346
347 // Goes through the list of encountered non-trivial weak references and
348 // filters out those whose values are still alive. This is performed in a
349 // parallel job.
352
353 // Goes through the list of encountered non-trivial weak references with
354 // dead values. If the value is a dead map and the parent map transitions to
355 // the dead map via weak cell, then this function also clears the map
356 // transition.
358
359 // Goes through the list of encountered JSWeakRefs and WeakCells and clears
360 // those with dead values.
361 void ClearJSWeakRefs();
362
363 // Starts sweeping of spaces by contributing on the main thread and setting
364 // up other pages for sweeping. Does not start sweeper tasks.
365 void Sweep();
366 void StartSweepSpace(PagedSpace* space);
367
368 void EvacuatePrologue();
369 void EvacuateEpilogue();
370 void Evacuate();
373
375 // Returns number of aborted pages.
378 PageMetadata* page);
380 MemoryChunk* chunk);
381
382 static const int kEphemeronChunkSize = 8 * KB;
383
385
387 int descriptors_to_trim);
388
389 void StartSweepNewSpace();
391
393
394 Heap* const heap_;
395
398
399#ifdef DEBUG
400 enum CollectorState{IDLE,
401 PREPARE_GC,
402 MARK_LIVE_OBJECTS,
403 SWEEP_SPACES,
404 ENCODE_FORWARDING_ADDRESSES,
405 UPDATE_POINTERS,
406 RELOCATE_OBJECTS};
407
408 // The current stage of the collector.
409 CollectorState state_;
410#endif
411
414
415 // True if we are collecting slots to perform evacuation from evacuation
416 // candidates.
417 bool compacting_ = false;
418 bool black_allocation_ = false;
420 bool parallel_marking_ = false;
421
423 std::unique_ptr<MarkingWorklists::Local> local_marking_worklists_;
424
426
427 std::unique_ptr<MainMarkingVisitor> marking_visitor_;
428 std::unique_ptr<WeakObjects::Local> local_weak_objects_;
431
432 std::vector<GlobalHandleVector<DescriptorArray>> strong_descriptor_arrays_;
434
435 // Candidates for pages that should be evacuated.
436 std::vector<PageMetadata*> evacuation_candidates_;
437 // Pages that are actually processed during evacuation.
438 std::vector<PageMetadata*> old_space_evacuation_pages_;
439 std::vector<PageMetadata*> new_space_evacuation_pages_;
440 std::vector<std::pair<Address, PageMetadata*>>
443 std::vector<LargePageMetadata*> promoted_large_pages_;
444
445 // Map which stores ephemeron pairs for the linear-time algorithm.
447
451
452 // Counts the number of major mark-compact collections. The counter is
453 // incremented right after marking. This is used for:
454 // - marking descriptor arrays. See NumberOfMarkedDescriptors. Only the lower
455 // two bits are used, so it is okay if this counter overflows and wraps
456 // around.
457 unsigned epoch_ = 0;
458
459 // Bytecode flushing is disabled when the code coverage mode is changed. Since
460 // that can happen while a GC is happening and we need the
461 // code_flush_mode_ to remain the same through out a GC, we record this at
462 // the start of each GC.
464
465 std::vector<PageMetadata*> empty_new_space_pages_to_be_swept_;
466
468
469 friend class Evacuator;
471 friend class RootMarkingVisitor;
473};
474
475} // namespace internal
476} // namespace v8
477
478#endif // V8_HEAP_MARK_COMPACT_H_
Schedule * schedule
bool CompactTransitionArray(Tagged< Map > map, Tagged< TransitionArray > transitions, Tagged< DescriptorArray > descriptors)
std::vector< PageMetadata * > aborted_evacuation_candidates_due_to_flags_
Heap::ResizeNewSpaceMode ResizeNewSpaceMode
std::pair< size_t, size_t > ProcessMarkingWorklist(v8::base::TimeDelta max_duration, size_t max_bytes_to_process)
void ReportAbortedEvacuationCandidateDueToOOM(Address failed_start, PageMetadata *page)
void ClearPotentialSimpleMapTransition(Tagged< Map > dead_target)
V8_INLINE void MarkObject(Tagged< HeapObject > host, Tagged< HeapObject > obj, MarkingHelper::WorklistTarget target_worklist)
void ResetAndRelinkBlackAllocatedPage(PagedSpace *, PageMetadata *)
void SweepLargeSpace(LargeObjectSpace *space)
void MarkRoots(RootVisitor *root_visitor)
void StartMarking(std::shared_ptr<::heap::base::IncrementalMarkingSchedule > schedule={})
void TrimEnumCache(Tagged< Map > map, Tagged< DescriptorArray > descriptors)
base::EnumSet< CodeFlushMode > code_flush_mode_
void RecordStrongDescriptorArraysForWeakening(GlobalHandleVector< DescriptorArray > strong_descriptor_arrays)
bool SpecialClearMapSlot(Tagged< HeapObject > host, Tagged< Map > dead_target, HeapObjectSlot slot)
void AddEvacuationCandidate(PageMetadata *p)
NativeContextInferrer native_context_inferrer_
void CollectEvacuationCandidates(PagedSpace *space)
static bool IsUnmarkedSharedHeapObject(Heap *heap, FullObjectSlot p)
void FlushSFI(Tagged< SharedFunctionInfo > sfi, bool bytecode_already_decompiled)
std::vector< std::pair< Address, PageMetadata * > > aborted_evacuation_candidates_due_to_oom_
void StartSweepSpace(PagedSpace *space)
V8_INLINE void AddTransitionArray(Tagged< TransitionArray > array)
V8_INLINE void MarkRootObject(Root root, Tagged< HeapObject > obj, MarkingHelper::WorklistTarget target_worklist)
std::vector< GlobalHandleVector< DescriptorArray > > strong_descriptor_arrays_
std::vector< PageMetadata * > evacuation_candidates_
void ComputeEvacuationHeuristics(size_t area_size, int *target_fragmentation_percent, size_t *max_evacuated_bytes)
MarkingWorklists * marking_worklists()
static bool IsOnEvacuationCandidate(Tagged< MaybeObject > obj)
bool ProcessEphemeron(Tagged< HeapObject > key, Tagged< HeapObject > value)
void ClearWeakReferences(WeakObjects::WeakObjectWorklist< TObjectAndSlot >::Local &worklist, Tagged< HeapObjectReference > cleared_weak_ref)
void TrimDescriptorArray(Tagged< Map > map, Tagged< DescriptorArray > descriptors)
std::unique_ptr< MarkingWorklists::Local > local_marking_worklists_
bool TransitionArrayNeedsCompaction(Tagged< TransitionArray > transitions, int num_transitions)
void ReportAbortedEvacuationCandidateDueToFlags(PageMetadata *page, MemoryChunk *chunk)
std::vector< PageMetadata * > new_space_evacuation_pages_
void MaybeEnableBackgroundThreadsInCycle(CallOrigin origin)
static V8_INLINE void RecordSlot(MemoryChunk *source_chunk, THeapObjectSlot slot, Tagged< HeapObject > target)
WeakObjects::Local * local_weak_objects()
int NumberOfParallelEphemeronVisitingTasks(size_t elements)
std::vector< PageMetadata * > empty_new_space_pages_to_be_swept_
void RightTrimDescriptorArray(Tagged< DescriptorArray > array, int descriptors_to_trim)
static V8_INLINE void RecordSlot(Tagged< HeapObject > object, THeapObjectSlot slot, Tagged< HeapObject > target)
void UpdatePointersInClientHeap(Isolate *client)
void ProcessTopOptimizedFrame(ObjectVisitor *visitor, Isolate *isolate)
EphemeronResult ApplyEphemeronSemantics(Tagged< HeapObject > key, Tagged< HeapObject > value)
void MarkObjectsFromClientHeap(Isolate *client)
std::unique_ptr< WeakObjects::Local > local_weak_objects_
bool StartCompaction(StartCompactionMode mode)
static bool IsUnmarkedHeapObject(Heap *heap, FullObjectSlot p)
static bool ShouldRecordRelocSlot(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
bool ProcessOldBytecodeSFI(Tagged< SharedFunctionInfo > flushing_candidate)
void MarkRootsFromConservativeStack(RootVisitor *root_visitor)
bool ProcessOldBaselineSFI(Tagged< SharedFunctionInfo > flushing_candidate)
static RecordRelocSlotInfo ProcessRelocInfo(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
NativeContextStats native_context_stats_
static void RecordRelocSlot(Tagged< InstructionStream > host, RelocInfo *rinfo, Tagged< HeapObject > target)
base::Semaphore page_parallel_job_semaphore_
std::vector< LargePageMetadata * > promoted_large_pages_
std::unique_ptr< MainMarkingVisitor > marking_visitor_
std::vector< PageMetadata * > old_space_evacuation_pages_
NonAtomicMarkingState *const non_atomic_marking_state_
MarkingWorklists::Local * local_marking_worklists() const
void FlushBytecodeFromSFI(Tagged< SharedFunctionInfo > shared_info)
base::EnumSet< CodeFlushMode > code_flush_mode() const
bool IsEvacuationCandidate() const
static V8_INLINE MemoryChunk * FromAddress(Address addr)
RootMarkingVisitor(const RootMarkingVisitor &)=delete
MarkCompactCollector *const collector_
V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p)
V8_INLINE void VisitRootPointers(Root root, const char *description, FullObjectSlot start, FullObjectSlot end) final
RootMarkingVisitor & operator=(const RootMarkingVisitor &)=delete
V8_INLINE void VisitRootPointer(Root root, const char *description, FullObjectSlot p) final
void VisitRunningCode(FullObjectSlot code_slot, FullObjectSlot istream_or_smi_zero_slot) final
RootMarkingVisitor(MarkCompactCollector *collector)
virtual GarbageCollector collector() const
Definition visitors.h:121
V8_INLINE constexpr StorageType ptr() const
int start
int end
enum v8::internal::@1270::DeoptimizableCodeIterator::@67 state_
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
Definition flags.cc:1366
absl::flat_hash_map< Tagged< HeapObject >, base::SmallVector< Tagged< HeapObject >, 1 >, Object::Hasher, Object::KeyEqualSafe > KeyToValues
@ IDLE
Definition v8-unwinder.h:45
#define V8_INLINE
Definition v8config.h:500