v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
compactor.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <map>
8#include <numeric>
9#include <unordered_map>
10#include <unordered_set>
11
22
23namespace cppgc {
24namespace internal {
25
26namespace {
27// Freelist size threshold that must be exceeded before compaction
28// should be considered.
29static constexpr size_t kFreeListSizeThreshold = 512 * kKB;
30
31// The real worker behind heap compaction, recording references to movable
32// objects ("slots".) When the objects end up being compacted and moved,
33// relocate() will adjust the slots to point to the new location of the
34// object along with handling references for interior pointers.
35//
36// The MovableReferences object is created and maintained for the lifetime
37// of one heap compaction-enhanced GC.
38class MovableReferences final {
39 using MovableReference = CompactionWorklists::MovableReference;
40
41 public:
42 explicit MovableReferences(HeapBase& heap)
43 : heap_(heap), heap_has_move_listeners_(heap.HasMoveListeners()) {}
44
45 // Adds a slot for compaction. Filters slots in dead objects.
46 void AddOrFilter(MovableReference*);
47
48 // Relocates a backing store |from| -> |to|.
49 void Relocate(Address from, Address to, size_t size_including_header);
50
51 // Relocates interior slots in a backing store that is moved |from| -> |to|.
52 void RelocateInteriorReferences(Address from, Address to, size_t size);
53
54 // Updates the collection of callbacks from the item pushed the worklist by
55 // marking visitors.
56 void UpdateCallbacks();
57
58 private:
59 HeapBase& heap_;
60
61 // Map from movable reference (value) to its slot. Upon moving an object its
62 // slot pointing to it requires updating. Movable reference should currently
63 // have only a single movable reference to them registered.
64 std::unordered_map<MovableReference, MovableReference*> movable_references_;
65
66 // Map of interior slots to their final location. Needs to be an ordered map
67 // as it is used to walk through slots starting at a given memory address.
68 // Requires log(n) lookup to make the early bailout reasonably fast.
69 //
70 // - The initial value for a given key is nullptr.
71 // - Upon moving an object this value is adjusted accordingly.
72 std::map<MovableReference*, Address> interior_movable_references_;
73
75
76#if DEBUG
77 // The following two collections are used to allow refer back from a slot to
78 // an already moved object.
79 std::unordered_set<const void*> moved_objects_;
80 std::unordered_map<MovableReference*, MovableReference>
81 interior_slot_to_object_;
82#endif // DEBUG
83};
84
85void MovableReferences::AddOrFilter(MovableReference* slot) {
86 const BasePage* slot_page = BasePage::FromInnerAddress(&heap_, slot);
87 CHECK_NOT_NULL(slot_page);
88
89 const void* value = *slot;
90 if (!value) return;
91
92 // All slots and values are part of Oilpan's heap.
93 // - Slots may be contained within dead objects if e.g. the write barrier
94 // registered the slot while backing itself has not been marked live in
95 // time. Slots in dead objects are filtered below.
96 // - Values may only be contained in or point to live objects.
97
98 const HeapObjectHeader& slot_header =
99 slot_page->ObjectHeaderFromInnerAddress(slot);
100 // Filter the slot since the object that contains the slot is dead.
101 if (!slot_header.IsMarked()) return;
102
103 const BasePage* value_page = BasePage::FromInnerAddress(&heap_, value);
104 CHECK_NOT_NULL(value_page);
105
106 // The following cases are not compacted and do not require recording:
107 // - Compactable object on large pages.
108 // - Compactable object on non-compactable spaces.
109 if (value_page->is_large() || !value_page->space().is_compactable()) return;
110
111 // Slots must reside in and values must point to live objects at this
112 // point. |value| usually points to a separate object but can also point
113 // to the an interior pointer in the same object storage which is why the
114 // dynamic header lookup is required.
115 const HeapObjectHeader& value_header =
116 value_page->ObjectHeaderFromInnerAddress(value);
117 CHECK(value_header.IsMarked());
118
119 // Slots may have been recorded already but must point to the same value.
120 auto reference_it = movable_references_.find(value);
121 if (V8_UNLIKELY(reference_it != movable_references_.end())) {
122 CHECK_EQ(slot, reference_it->second);
123 return;
124 }
125
126 // Add regular movable reference.
127 movable_references_.emplace(value, slot);
128
129 // Check whether the slot itself resides on a page that is compacted.
130 if (V8_LIKELY(!slot_page->space().is_compactable())) return;
131
134 interior_movable_references_.emplace(slot, nullptr);
135#if DEBUG
136 interior_slot_to_object_.emplace(slot, slot_header.ObjectStart());
137#endif // DEBUG
138}
139
140void MovableReferences::Relocate(Address from, Address to,
141 size_t size_including_header) {
142#if DEBUG
143 moved_objects_.insert(from);
144#endif // DEBUG
145
147 heap_.CallMoveListeners(from - sizeof(HeapObjectHeader),
148 to - sizeof(HeapObjectHeader),
149 size_including_header);
150 }
151
152 // Interior slots always need to be processed for moved objects.
153 // Consider an object A with slot A.x pointing to value B where A is
154 // allocated on a movable page itself. When B is finally moved, it needs to
155 // find the corresponding slot A.x. Object A may be moved already and the
156 // memory may have been freed, which would result in a crash.
157 if (!interior_movable_references_.empty()) {
158 const HeapObjectHeader& header = HeapObjectHeader::FromObject(to);
159 const size_t size = header.ObjectSize();
160 RelocateInteriorReferences(from, to, size);
161 }
162
163 auto it = movable_references_.find(from);
164 // This means that there is no corresponding slot for a live object.
165 // This may happen because a mutator may change the slot to point to a
166 // different object because e.g. incremental marking marked an object
167 // as live that was later on replaced.
168 if (it == movable_references_.end()) {
169 return;
170 }
171
172 // If the object is referenced by a slot that is contained on a compacted
173 // area itself, check whether it can be updated already.
174 MovableReference* slot = it->second;
175 auto interior_it = interior_movable_references_.find(slot);
176 if (interior_it != interior_movable_references_.end()) {
177 MovableReference* slot_location =
178 reinterpret_cast<MovableReference*>(interior_it->second);
179 if (!slot_location) {
180 interior_it->second = to;
181#if DEBUG
182 // Check that the containing object has not been moved yet.
183 auto reverse_it = interior_slot_to_object_.find(slot);
184 DCHECK_NE(interior_slot_to_object_.end(), reverse_it);
185 DCHECK_EQ(moved_objects_.end(), moved_objects_.find(reverse_it->second));
186#endif // DEBUG
187 } else {
188 slot = slot_location;
189 }
190 }
191
192 // Compaction is atomic so slot should not be updated during compaction.
193 DCHECK_EQ(from, *slot);
194
195 // Update the slots new value.
196 *slot = to;
197}
198
199void MovableReferences::RelocateInteriorReferences(Address from, Address to,
200 size_t size) {
201 // |from| is a valid address for a slot.
202 auto interior_it = interior_movable_references_.lower_bound(
203 reinterpret_cast<MovableReference*>(from));
204 if (interior_it == interior_movable_references_.end()) return;
205 DCHECK_GE(reinterpret_cast<Address>(interior_it->first), from);
206
207 size_t offset = reinterpret_cast<Address>(interior_it->first) - from;
208 while (offset < size) {
209 if (!interior_it->second) {
210 // Update the interior reference value, so that when the object the slot
211 // is pointing to is moved, it can reuse this value.
212 Address reference = to + offset;
213 interior_it->second = reference;
214
215 // If the |slot|'s content is pointing into the region [from, from +
216 // size) we are dealing with an interior pointer that does not point to
217 // a valid HeapObjectHeader. Such references need to be fixed up
218 // immediately.
219 Address& reference_contents = *reinterpret_cast<Address*>(reference);
220 if (reference_contents > from && reference_contents < (from + size)) {
221 reference_contents = reference_contents - from + to;
222 }
223 }
224
225 interior_it++;
226 if (interior_it == interior_movable_references_.end()) return;
227 offset = reinterpret_cast<Address>(interior_it->first) - from;
228 }
229}
230
231class CompactionState final {
233 using Pages = std::vector<NormalPage*>;
234
235 public:
236 CompactionState(NormalPageSpace* space, MovableReferences& movable_references)
237 : space_(space), movable_references_(movable_references) {}
238
239 void AddPage(NormalPage* page) {
240 DCHECK_EQ(space_, &page->space());
241 // If not the first page, add |page| onto the available pages chain.
242 if (!current_page_)
244 else
245 available_pages_.push_back(page);
246 }
247
248 void RelocateObject(const NormalPage* page, const Address header,
249 size_t size) {
250 // Allocate and copy over the live object.
251 Address compact_frontier =
253 if (compact_frontier + size > current_page_->PayloadEnd()) {
254 // Can't fit on current page. Add remaining onto the freelist and advance
255 // to next available page.
256 ReturnCurrentPageToSpace();
257
259 available_pages_.pop_back();
261 compact_frontier = current_page_->PayloadStart();
262 }
263 if (V8_LIKELY(compact_frontier != header)) {
264 // Use a non-overlapping copy, if possible.
265 if (current_page_ == page)
266 memmove(compact_frontier, header, size);
267 else
268 memcpy(compact_frontier, header, size);
269 movable_references_.Relocate(header + sizeof(HeapObjectHeader),
270 compact_frontier + sizeof(HeapObjectHeader),
271 size);
272 }
273 current_page_->object_start_bitmap().SetBit(compact_frontier);
276 }
277
278 void FinishCompactingSpace() {
279 // If the current page hasn't been allocated into, add it to the available
280 // list, for subsequent release below.
283 } else {
284 ReturnCurrentPageToSpace();
285 }
286
287 // Return remaining available pages back to the backend.
288 for (NormalPage* page : available_pages_) {
289 SetMemoryInaccessible(page->PayloadStart(), page->PayloadSize());
290 NormalPage::Destroy(page);
291 }
292 }
293
294 void FinishCompactingPage(NormalPage* page) {
295#if DEBUG || defined(V8_USE_MEMORY_SANITIZER) || \
296 defined(V8_USE_ADDRESS_SANITIZER)
297 // Zap the unused portion, until it is either compacted into or freed.
298 if (current_page_ != page) {
299 ZapMemory(page->PayloadStart(), page->PayloadSize());
300 } else {
301 ZapMemory(page->PayloadStart() + used_bytes_in_current_page_,
302 page->PayloadSize() - used_bytes_in_current_page_);
303 }
304#endif
305 page->object_start_bitmap().MarkAsFullyPopulated();
306 }
307
308 private:
309 void ReturnCurrentPageToSpace() {
310 DCHECK_EQ(space_, &current_page_->space());
311 space_->AddPage(current_page_);
312 if (used_bytes_in_current_page_ != current_page_->PayloadSize()) {
313 // Put the remainder of the page onto the free list.
314 size_t freed_size =
316 Address payload = current_page_->PayloadStart();
317 Address free_start = payload + used_bytes_in_current_page_;
318 SetMemoryInaccessible(free_start, freed_size);
319 space_->free_list().Add({free_start, freed_size});
320 current_page_->object_start_bitmap().SetBit(free_start);
321 }
322 }
323
324 NormalPageSpace* space_;
325 MovableReferences& movable_references_;
326 // Page into which compacted object will be written to.
327 NormalPage* current_page_ = nullptr;
328 // Offset into |current_page_| to the next free address.
330 // Additional pages in the current space that can be used as compaction
331 // targets. Pages that remain available at the compaction can be released.
333};
334
335void CompactPage(NormalPage* page, CompactionState& compaction_state,
336 StickyBits sticky_bits) {
337 compaction_state.AddPage(page);
338
339 page->object_start_bitmap().Clear();
340
341 for (Address header_address = page->PayloadStart();
342 header_address < page->PayloadEnd();) {
343 HeapObjectHeader* header =
344 reinterpret_cast<HeapObjectHeader*>(header_address);
345 size_t size = header->AllocatedSize();
346 DCHECK_GT(size, 0u);
347 DCHECK_LT(size, kPageSize);
348
349 if (header->IsFree()) {
350 // Unpoison the freelist entry so that we can compact into it as wanted.
351 ASAN_UNPOISON_MEMORY_REGION(header_address, size);
352 header_address += size;
353 continue;
354 }
355
356 if (!header->IsMarked()) {
357 // Compaction is currently launched only from AtomicPhaseEpilogue, so it's
358 // guaranteed to be on the mutator thread - no need to postpone
359 // finalization.
360 header->Finalize();
361
362 // As compaction is under way, leave the freed memory accessible
363 // while compacting the rest of the page. We just zap the payload
364 // to catch out other finalizers trying to access it.
365#if DEBUG || defined(V8_USE_MEMORY_SANITIZER) || \
366 defined(V8_USE_ADDRESS_SANITIZER)
367 ZapMemory(header, size);
368#endif
369 header_address += size;
370 continue;
371 }
372
373 // Object is marked.
374#if defined(CPPGC_YOUNG_GENERATION)
375 if (sticky_bits == StickyBits::kDisabled) header->Unmark();
376#else // !defined(CPPGC_YOUNG_GENERATION)
377 header->Unmark();
378#endif // !defined(CPPGC_YOUNG_GENERATION)
379
380 // Potentially unpoison the live object as well as it is the source of
381 // the copy.
382 ASAN_UNPOISON_MEMORY_REGION(header->ObjectStart(), header->ObjectSize());
383 compaction_state.RelocateObject(page, header_address, size);
384 header_address += size;
385 }
386
387 compaction_state.FinishCompactingPage(page);
388}
389
390void CompactSpace(NormalPageSpace* space, MovableReferences& movable_references,
391 StickyBits sticky_bits) {
392 using Pages = NormalPageSpace::Pages;
393
394#ifdef V8_USE_ADDRESS_SANITIZER
395 UnmarkedObjectsPoisoner().Traverse(*space);
396#endif // V8_USE_ADDRESS_SANITIZER
397
398 DCHECK(space->is_compactable());
399
400 space->free_list().Clear();
401
402 // Compaction generally follows Jonker's algorithm for fast garbage
403 // compaction. Compaction is performed in-place, sliding objects down over
404 // unused holes for a smaller heap page footprint and improved locality. A
405 // "compaction pointer" is consequently kept, pointing to the next available
406 // address to move objects down to. It will belong to one of the already
407 // compacted pages for this space, but as compaction proceeds, it will not
408 // belong to the same page as the one being currently compacted.
409 //
410 // The compaction pointer is represented by the
411 // |(current_page_, used_bytes_in_current_page_)| pair, with
412 // |used_bytes_in_current_page_| being the offset into |current_page_|, making
413 // up the next available location. When the compaction of an arena page causes
414 // the compaction pointer to exhaust the current page it is compacting into,
415 // page compaction will advance the current page of the compaction
416 // pointer, as well as the allocation point.
417 //
418 // By construction, the page compaction can be performed without having
419 // to allocate any new pages. So to arrange for the page compaction's
420 // supply of freed, available pages, we chain them together after each
421 // has been "compacted from". The page compaction will then reuse those
422 // as needed, and once finished, the chained, available pages can be
423 // released back to the OS.
424 //
425 // To ease the passing of the compaction state when iterating over an
426 // arena's pages, package it up into a |CompactionState|.
427
428 Pages pages = space->RemoveAllPages();
429 if (pages.empty()) return;
430
431 CompactionState compaction_state(space, movable_references);
432 for (BasePage* page : pages) {
433 page->ResetMarkedBytes();
434 // Large objects do not belong to this arena.
435 CompactPage(NormalPage::From(page), compaction_state, sticky_bits);
436 }
437
438 compaction_state.FinishCompactingSpace();
439 // Sweeping will verify object start bitmap of compacted space.
440}
441
442size_t UpdateHeapResidency(const std::vector<NormalPageSpace*>& spaces) {
443 return std::accumulate(spaces.cbegin(), spaces.cend(), 0u,
444 [](size_t acc, const NormalPageSpace* space) {
445 DCHECK(space->is_compactable());
446 if (!space->size()) return acc;
447 return acc + space->free_list().Size();
448 });
449}
450
451} // namespace
452
454 for (auto& space : heap_) {
455 if (!space->is_compactable()) continue;
456 DCHECK_EQ(&heap, space->raw_heap());
457 compactable_spaces_.push_back(static_cast<NormalPageSpace*>(space.get()));
458 }
459}
460
462 StackState stack_state) const {
463 if (compactable_spaces_.empty() ||
464 (marking_type == GCConfig::MarkingType::kAtomic &&
465 stack_state == StackState::kMayContainHeapPointers)) {
466 // The following check ensures that tests that want to test compaction are
467 // not interrupted by garbage collections that cannot use compaction.
469 return false;
470 }
471
473 return true;
474 }
475
476 size_t free_list_size = UpdateHeapResidency(compactable_spaces_);
477
478 return free_list_size > kFreeListSizeThreshold;
479}
480
482 StackState stack_state) {
484
485 if (!ShouldCompact(marking_type, stack_state)) return;
486
487 compaction_worklists_ = std::make_unique<CompactionWorklists>();
488
489 is_enabled_ = true;
490 is_cancelled_ = false;
491}
492
494 StackState stack_state) {
495 if (!is_enabled_ || ShouldCompact(marking_type, stack_state)) return;
496
497 is_cancelled_ = true;
498 is_enabled_ = false;
499}
500
503 compaction_worklists_->movable_slots_worklist()->Clear();
504 compaction_worklists_.reset();
505 }
506 if (!is_enabled_) return CompactableSpaceHandling::kSweep;
507
509 StatsCollector::kAtomicCompact);
510
511 MovableReferences movable_references(*heap_.heap());
512
513 CompactionWorklists::MovableReferencesWorklist::Local local(
514 *compaction_worklists_->movable_slots_worklist());
516 while (local.Pop(&slot)) {
517 movable_references.AddOrFilter(slot);
518 }
519 compaction_worklists_.reset();
520
521 const StickyBits sticky_bits = heap_.heap()->sticky_bits();
522
523 for (NormalPageSpace* space : compactable_spaces_) {
524 CompactSpace(space, movable_references, sticky_bits);
525 }
526
528 is_enabled_ = false;
529 return CompactableSpaceHandling::kIgnore;
530}
531
536
537} // namespace internal
538} // namespace cppgc
#define ASAN_UNPOISON_MEMORY_REGION(start, size)
Definition asan.h:71
MarkingType
Definition heap.h:60
std::vector< BasePage * > Pages
Definition heap-space.h:24
CompactableSpaceHandling CompactSpacesIfEnabled()
Definition compactor.cc:501
std::unique_ptr< CompactionWorklists > compaction_worklists_
Definition compactor.h:47
void InitializeIfShouldCompact(GCConfig::MarkingType, StackState)
Definition compactor.cc:481
std::vector< NormalPageSpace * > compactable_spaces_
Definition compactor.h:45
void CancelIfShouldNotCompact(GCConfig::MarkingType, StackState)
Definition compactor.cc:493
bool ShouldCompact(GCConfig::MarkingType, StackState) const
Definition compactor.cc:461
StatsCollector * stats_collector()
Definition heap-base.h:118
StickyBits sticky_bits() const
Definition heap-base.h:226
MarkerBase * marker() const
Definition heap-base.h:130
static NormalPage * From(BasePage *page)
Definition heap-page.h:205
size_t used_bytes_in_current_page_
Definition compactor.cc:329
NormalPageSpace * space_
Definition compactor.cc:324
NormalPage * current_page_
Definition compactor.cc:327
Pages available_pages_
Definition compactor.cc:332
std::map< MovableReference *, Address > interior_movable_references_
Definition compactor.cc:72
std::unordered_map< MovableReference, MovableReference * > movable_references_
Definition compactor.cc:64
const bool heap_has_move_listeners_
Definition compactor.cc:74
BasePage * page
Definition sweeper.cc:218
#define CPPGC_STACK_ALLOCATED()
Definition macros.h:44
int32_t offset
Point to
constexpr size_t kPageSize
Definition globals.h:42
uint8_t * Address
Definition globals.h:17
V8_INLINE void SetMemoryInaccessible(void *address, size_t size)
Definition memory.h:76
V8_INLINE void ZapMemory(void *address, size_t size)
Definition memory.h:25
constexpr size_t kKB
Definition globals.h:20
EmbedderStackState
Definition common.h:15
uintptr_t Address
Definition memory.h:13
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define CHECK_NOT_NULL(val)
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
Heap * heap_
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_UNLIKELY(condition)
Definition v8config.h:660