v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
sweeper.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <algorithm>
8#include <atomic>
9#include <cstdint>
10#include <memory>
11#include <optional>
12#include <vector>
13
32
33namespace cppgc::internal {
34
35namespace {
36
37constexpr TaskPriority kBackgroundBoostedPriority = TaskPriority::kUserBlocking;
38constexpr TaskPriority kBackgroundRegularPriority = TaskPriority::kUserVisible;
39constexpr TaskPriority kForegroundRegularPriority = TaskPriority::kUserBlocking;
40constexpr TaskPriority kForegroundLowPriority = TaskPriority::kUserVisible;
41
42class DeadlineChecker final {
43 public:
44 explicit DeadlineChecker(v8::base::TimeTicks end) : end_(end) {}
45
46 bool Check() {
47 return V8_UNLIKELY(++count_ % kInterval == 0) &&
49 }
50
51 private:
52 static constexpr size_t kInterval = 4;
53
55 size_t count_ = 0;
56};
57
58enum class MutatorThreadSweepingMode {
59 kOnlyFinalizers,
60 kAll,
61};
62
63constexpr const char* ToString(MutatorThreadSweepingMode sweeping_mode) {
64 switch (sweeping_mode) {
65 case MutatorThreadSweepingMode::kAll:
66 return "all";
67 case MutatorThreadSweepingMode::kOnlyFinalizers:
68 return "only-finalizers";
69 }
70}
71
72class ObjectStartBitmapVerifier final
73 : private HeapVisitor<ObjectStartBitmapVerifier> {
74 friend class HeapVisitor<ObjectStartBitmapVerifier>;
75
76 public:
77 void Verify(RawHeap& heap) {
78#if DEBUG
79 Traverse(heap);
80#endif // DEBUG
81 }
82 void Verify(NormalPage& page) {
83#if DEBUG
84 Traverse(page);
85#endif // DEBUG
86 }
87
88 private:
89 bool VisitNormalPage(NormalPage& page) {
90 // Remember bitmap and reset previous pointer.
91 bitmap_ = &page.object_start_bitmap();
92 prev_ = nullptr;
93 return false;
94 }
95
96 bool VisitHeapObjectHeader(HeapObjectHeader& header) {
97 if (header.IsLargeObject()) return true;
98
99 auto* raw_header = reinterpret_cast<ConstAddress>(&header);
100 CHECK(bitmap_->CheckBit<AccessMode::kAtomic>(raw_header));
101 if (prev_) {
102 // No other bits in the range [prev_, raw_header) should be set.
103 CHECK_EQ(prev_, bitmap_->FindHeader<AccessMode::kAtomic>(raw_header - 1));
104 }
105 prev_ = &header;
106 return true;
107 }
108
109 PlatformAwareObjectStartBitmap* bitmap_ = nullptr;
110 HeapObjectHeader* prev_ = nullptr;
111};
112
113class FreeHandlerBase {
114 public:
115 virtual ~FreeHandlerBase() = default;
116 virtual void FreeFreeList(
117 std::vector<FreeList::Block>& unfinalized_free_list) = 0;
118};
119
120class DiscardingFreeHandler : public FreeHandlerBase {
121 public:
122 DiscardingFreeHandler(PageAllocator& page_allocator, FreeList& free_list,
123 BasePage& page)
124 : page_allocator_(page_allocator), free_list_(free_list), page_(page) {}
125
126 void Free(FreeList::Block block) {
127 const auto unused_range = free_list_.AddReturningUnusedBounds(block);
128 const uintptr_t aligned_begin_unused =
129 RoundUp(reinterpret_cast<uintptr_t>(unused_range.first),
131 const uintptr_t aligned_end_unused =
132 RoundDown(reinterpret_cast<uintptr_t>(unused_range.second),
134 if (aligned_begin_unused < aligned_end_unused) {
135 const size_t discarded_size = aligned_end_unused - aligned_begin_unused;
137 reinterpret_cast<void*>(aligned_begin_unused),
138 aligned_end_unused - aligned_begin_unused);
139 page_.IncrementDiscardedMemory(discarded_size);
140 page_.space()
141 .raw_heap()
142 ->heap()
143 ->stats_collector()
144 ->IncrementDiscardedMemory(discarded_size);
145 }
146 }
147
148 void FreeFreeList(std::vector<FreeList::Block>& unfinalized_free_list) final {
149 for (auto entry : unfinalized_free_list) {
150 Free(std::move(entry));
151 }
152 }
153
154 private:
156 FreeList& free_list_;
157 BasePage& page_;
158};
159
160class RegularFreeHandler : public FreeHandlerBase {
161 public:
162 RegularFreeHandler(PageAllocator& page_allocator, FreeList& free_list,
163 BasePage& page)
164 : free_list_(free_list) {}
165
166 void Free(FreeList::Block block) { free_list_.Add(std::move(block)); }
167
168 void FreeFreeList(std::vector<FreeList::Block>& unfinalized_free_list) final {
169 for (auto entry : unfinalized_free_list) {
170 Free(std::move(entry));
171 }
172 }
173
174 private:
175 FreeList& free_list_;
176};
177
178template <typename T>
179class ThreadSafeStack {
180 public:
181 ThreadSafeStack() = default;
182
183 void Push(T t) {
184 v8::base::MutexGuard lock(&mutex_);
185 vector_.push_back(std::move(t));
186 is_empty_.store(false, std::memory_order_relaxed);
187 }
188
189 std::optional<T> Pop() {
190 v8::base::MutexGuard lock(&mutex_);
191 if (vector_.empty()) {
192 is_empty_.store(true, std::memory_order_relaxed);
193 return std::nullopt;
194 }
195 T top = std::move(vector_.back());
196 vector_.pop_back();
197 // std::move is redundant but is needed to avoid the bug in gcc-7.
198 return std::move(top);
199 }
200
201 template <typename It>
202 void Insert(It begin, It end) {
203 v8::base::MutexGuard lock(&mutex_);
204 vector_.insert(vector_.end(), begin, end);
205 is_empty_.store(false, std::memory_order_relaxed);
206 }
207
208 bool IsEmpty() const { return is_empty_.load(std::memory_order_relaxed); }
209
210 private:
212 std::vector<T> vector_;
213 std::atomic<bool> is_empty_{true};
214};
215
216struct SweepingState {
217 struct SweptPageState {
218 BasePage* page = nullptr;
219#if defined(CPPGC_CAGED_HEAP)
220 // The list of unfinalized objects may be extremely big. To save on space,
221 // if cage is enabled, the list of unfinalized objects is stored inlined in
222 // HeapObjectHeader.
223 HeapObjectHeader* unfinalized_objects_head = nullptr;
224#else // !defined(CPPGC_CAGED_HEAP)
225 std::vector<HeapObjectHeader*> unfinalized_objects;
226#endif // !defined(CPPGC_CAGED_HEAP)
228 std::vector<FreeList::Block> unfinalized_free_list;
229 bool is_empty = false;
231 };
232
233 ThreadSafeStack<BasePage*> unswept_pages;
234 ThreadSafeStack<SweptPageState> swept_unfinalized_pages;
235};
236
237using SpaceStates = std::vector<SweepingState>;
238
239void StickyUnmark(HeapObjectHeader* header, StickyBits sticky_bits) {
240#if defined(CPPGC_YOUNG_GENERATION)
241 // Young generation in Oilpan uses sticky mark bits.
242 if (sticky_bits == StickyBits::kDisabled)
243 header->Unmark<AccessMode::kAtomic>();
244#else // !defined(CPPGC_YOUNG_GENERATION)
245 header->Unmark<AccessMode::kAtomic>();
246#endif // !defined(CPPGC_YOUNG_GENERATION)
247}
248
249class InlinedFinalizationBuilderBase {
250 public:
251 struct ResultType {
252 bool is_empty = false;
254 };
255
256 protected:
257 ResultType result_;
258};
259
260// Builder that finalizes objects and adds freelist entries right away.
261template <typename FreeHandler>
262class InlinedFinalizationBuilder final : public InlinedFinalizationBuilderBase,
263 public FreeHandler {
264 public:
265 InlinedFinalizationBuilder(BasePage& page, PageAllocator& page_allocator)
266 : FreeHandler(page_allocator,
267 NormalPageSpace::From(page.space()).free_list(), page) {}
268
269 void AddFinalizer(HeapObjectHeader* header, size_t size) {
270 header->Finalize();
271 SetMemoryInaccessible(header, size);
272 }
273
274 void AddFreeListEntry(Address start, size_t size) {
275 FreeHandler::Free({start, size});
276 result_.largest_new_free_list_entry =
277 std::max(result_.largest_new_free_list_entry, size);
278 }
279
280 ResultType&& GetResult(bool is_empty) {
281 result_.is_empty = is_empty;
282 return std::move(result_);
283 }
284};
285
286// Builder that produces results for deferred processing.
287template <typename FreeHandler>
288class DeferredFinalizationBuilder final : public FreeHandler {
289 public:
290 using ResultType = SweepingState::SweptPageState;
291
292 DeferredFinalizationBuilder(BasePage& page, PageAllocator& page_allocator)
293 : FreeHandler(page_allocator, result_.cached_free_list, page) {
294 result_.page = &page;
295 }
296
297 void AddFinalizer(HeapObjectHeader* header, size_t size) {
298 if (header->IsFinalizable()) {
299#if defined(CPPGC_CAGED_HEAP)
300 if (!current_unfinalized_) {
301 DCHECK_NULL(result_.unfinalized_objects_head);
302 current_unfinalized_ = header;
303 result_.unfinalized_objects_head = header;
304 } else {
305 current_unfinalized_->SetNextUnfinalized(header);
306 current_unfinalized_ = header;
307 }
308#else // !defined(CPPGC_CAGED_HEAP)
309 result_.unfinalized_objects.push_back({header});
310#endif // !defined(CPPGC_CAGED_HEAP)
311 found_finalizer_ = true;
312 } else {
313 SetMemoryInaccessible(header, size);
314 }
315 }
316
317 void AddFreeListEntry(Address start, size_t size) {
318 if (found_finalizer_) {
319 result_.unfinalized_free_list.push_back({start, size});
320 } else {
321 FreeHandler::Free({start, size});
322 }
323 result_.largest_new_free_list_entry =
324 std::max(result_.largest_new_free_list_entry, size);
325 found_finalizer_ = false;
326 }
327
328 ResultType&& GetResult(bool is_empty) {
329 result_.is_empty = is_empty;
330 return std::move(result_);
331 }
332
333 private:
334 ResultType result_;
335 HeapObjectHeader* current_unfinalized_ = nullptr;
336 bool found_finalizer_ = false;
337};
338
339template <typename FinalizationBuilder>
340typename FinalizationBuilder::ResultType SweepNormalPage(
341 NormalPage* page, PageAllocator& page_allocator, StickyBits sticky_bits) {
342 constexpr auto kAtomicAccess = AccessMode::kAtomic;
343 FinalizationBuilder builder(*page, page_allocator);
344
345 PlatformAwareObjectStartBitmap& bitmap = page->object_start_bitmap();
346
347 size_t live_bytes = 0;
348
349 Address start_of_gap = page->PayloadStart();
350
351 const auto clear_bit_if_coalesced_entry = [&bitmap,
352 &start_of_gap](Address address) {
353 if (address != start_of_gap) {
354 // Clear only if not the first freed entry.
355 bitmap.ClearBit<AccessMode::kAtomic>(address);
356 } else {
357 // Otherwise check that the bit is set.
358 DCHECK(bitmap.CheckBit<AccessMode::kAtomic>(address));
359 }
360 };
361
362 for (Address begin = page->PayloadStart(), end = page->PayloadEnd();
363 begin != end;) {
364 DCHECK(bitmap.CheckBit<AccessMode::kAtomic>(begin));
365 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(begin);
366 const size_t size = header->AllocatedSize();
367 // Check if this is a free list entry.
368 if (header->IsFree<kAtomicAccess>()) {
369 SetMemoryInaccessible(header, std::min(kFreeListEntrySize, size));
370 // This prevents memory from being discarded in configurations where
371 // `CheckMemoryIsInaccessibleIsNoop()` is false.
372 CheckMemoryIsInaccessible(header, size);
373 clear_bit_if_coalesced_entry(begin);
374 begin += size;
375 continue;
376 }
377 // Check if object is not marked (not reachable).
378 if (!header->IsMarked<kAtomicAccess>()) {
379 builder.AddFinalizer(header, size);
380 clear_bit_if_coalesced_entry(begin);
381 begin += size;
382 continue;
383 }
384 // The object is alive.
385 const Address header_address = reinterpret_cast<Address>(header);
386 if (start_of_gap != header_address) {
387 const size_t new_free_list_entry_size =
388 static_cast<size_t>(header_address - start_of_gap);
389 builder.AddFreeListEntry(start_of_gap, new_free_list_entry_size);
390 DCHECK(bitmap.CheckBit<AccessMode::kAtomic>(start_of_gap));
391 }
392 StickyUnmark(header, sticky_bits);
393 begin += size;
394 start_of_gap = begin;
395 live_bytes += size;
396 }
397
398 const bool is_empty = live_bytes == 0;
399 CHECK_EQ(is_empty, page->marked_bytes() == 0);
400 CHECK_IMPLIES(is_empty, start_of_gap == page->PayloadStart());
401
402 // Empty pages are not added to the free list directly here. The free list is
403 // either added later on or the page is destroyed.
404 if (!is_empty && start_of_gap != page->PayloadEnd()) {
405 builder.AddFreeListEntry(
406 start_of_gap, static_cast<size_t>(page->PayloadEnd() - start_of_gap));
407 DCHECK(bitmap.CheckBit<AccessMode::kAtomic>(start_of_gap));
408 }
409 page->SetAllocatedBytesAtLastGC(live_bytes);
410 page->ResetMarkedBytes(sticky_bits == StickyBits::kDisabled ? 0 : live_bytes);
411 return builder.GetResult(is_empty);
412}
413
414constexpr BaseSpace* kSweepWithoutSpaceAssignment = nullptr;
415
416enum class EmptyPageHandling {
417 kDestroy,
418 kReturn,
419};
420
421// SweepFinalizer is responsible for heap/space/page finalization. Finalization
422// is defined as a step following concurrent sweeping which:
423// - calls finalizers;
424// - returns (unmaps) empty pages;
425// - merges freelists to the space's freelist.
426class SweepFinalizer final {
428
429 public:
430 SweepFinalizer(cppgc::Platform* platform, StatsCollector* stats_collector,
431 BaseSpace* space, size_t* unused_destroyed_normal_pages,
432 FreeMemoryHandling free_memory_handling,
433 EmptyPageHandling empty_page_handling_type)
434 : platform_(platform),
435 stats_collector_(stats_collector),
436 space_(space),
437 unused_destroyed_normal_pages_(unused_destroyed_normal_pages),
438 free_memory_handling_(free_memory_handling),
439 empty_page_handling_(empty_page_handling_type) {}
440
441 // Finalizes all space states, irrespective of deadlines and sizes.
442 void Finalize(SpaceStates& states) {
443 for (SweepingState& state : states) {
444 Finalize(state);
445 }
446 }
447
448 void Finalize(SweepingState& state) {
449 while (auto page_state = state.swept_unfinalized_pages.Pop()) {
450 FinalizePage(&*page_state);
451 }
452 }
453
454 // Finalizes a given SweepingState with a deadline and size. Only returns
455 // true if a single memory block of at least `size` bytes was returned to the
456 // free list and false otherwise.
457 bool FinalizeWithDeadlineAndSize(StatsCollector::ScopeId scope_id,
458 SweepingState& state,
459 v8::base::TimeTicks deadline, size_t size) {
460 if (state.swept_unfinalized_pages.IsEmpty()) {
461 return false;
462 }
463 StatsCollector::DisabledScope finalize_scope(stats_collector_, scope_id);
464 DeadlineChecker deadline_check(deadline);
465 while (auto page_state = state.swept_unfinalized_pages.Pop()) {
466 FinalizePage(&*page_state);
467 if (size <= largest_consecutive_block_) {
468 return true;
469 }
470 if (deadline_check.Check()) {
471 break;
472 }
473 }
474 return false;
475 }
476
477 // Finalizes a given SweepingState with a deadline. Returns false if the
478 // deadline exceeded and true if all pages are finalized.
479 bool FinalizeWithDeadline(StatsCollector::ScopeId scope_id,
480 SweepingState& state,
481 v8::base::TimeTicks deadline) {
482 if (state.swept_unfinalized_pages.IsEmpty()) {
483 return true;
484 }
485 StatsCollector::DisabledScope finalize_scope(stats_collector_, scope_id);
486 DeadlineChecker deadline_check(deadline);
487 while (auto page_state = state.swept_unfinalized_pages.Pop()) {
488 FinalizePage(&*page_state);
489 if (deadline_check.Check()) {
490 return false;
491 }
492 }
493 return true;
494 }
495
496 private:
497 void FinalizePage(SweepingState::SweptPageState* page_state) {
498 DCHECK(page_state);
499 DCHECK(page_state->page);
500 BasePage* page = page_state->page;
501
502 // Call finalizers.
503 const auto finalize_header = [](HeapObjectHeader* header) {
504 const size_t size = header->AllocatedSize();
505 header->Finalize();
506 SetMemoryInaccessible(header, size);
507 };
508#if defined(CPPGC_CAGED_HEAP)
509#if defined(CPPGC_POINTER_COMPRESSION)
510 const uint64_t cage_base = CageBaseGlobal::Get();
511#else
512 const uint64_t cage_base = CagedHeapBase::GetBase();
513#endif
514 HeapObjectHeader* next_unfinalized = nullptr;
515
516 for (auto* unfinalized_header = page_state->unfinalized_objects_head;
517 unfinalized_header; unfinalized_header = next_unfinalized) {
518 next_unfinalized = unfinalized_header->GetNextUnfinalized(cage_base);
519 finalize_header(unfinalized_header);
520 }
521#else // !defined(CPPGC_CAGED_HEAP)
522 for (HeapObjectHeader* unfinalized_header :
523 page_state->unfinalized_objects) {
524 finalize_header(unfinalized_header);
525 }
526#endif // !defined(CPPGC_CAGED_HEAP)
527
528 // Unmap page if empty.
529 if (page_state->is_empty) {
530 DCHECK_IMPLIES(page->is_large(),
531 empty_page_handling_ == EmptyPageHandling::kDestroy);
532 if (empty_page_handling_ == EmptyPageHandling::kDestroy) {
533 if (!page->is_large()) {
534 (*unused_destroyed_normal_pages_)++;
535 } else {
536 // Normal pages are added to the page pool when destroyed and thus
537 // cannot be used for a large page allocation.
539 LargePage::From(page)->PayloadSize(), largest_consecutive_block_);
540 }
541 BasePage::Destroy(page);
542 return;
543 }
544
545 // Otherwise, we currently sweep on allocation. Reinitialize the empty
546 // page and return it right away.
547 auto* normal_page = NormalPage::From(page);
548
549 // If a space has been assigned to the finalizer, then repurpose empty
550 // pages for that space. Otherwise just retain the current space for an
551 // empty page.
552 if (space_) {
553 normal_page->ChangeOwner(*space_);
554 }
555
556 page_state->cached_free_list.Clear();
557 page_state->cached_free_list.Add(
558 {normal_page->PayloadStart(), normal_page->PayloadSize()});
559
560 page_state->unfinalized_free_list.clear();
561 page_state->largest_new_free_list_entry = normal_page->PayloadSize();
562 }
563 // We either swept a non-empty page for which the space should already match
564 // or we swept an empty page for which the owner was changed.
565 DCHECK_IMPLIES(space_, space_ == &page->space());
566 DCHECK(!page->is_large());
567
568 // Merge freelists without finalizers.
569 FreeList& space_freelist = NormalPageSpace::From(page->space()).free_list();
570 space_freelist.Append(std::move(page_state->cached_free_list));
571
572 // Merge freelist with finalizers.
573 if (!page_state->unfinalized_free_list.empty()) {
574 std::unique_ptr<FreeHandlerBase> handler =
576 ? std::unique_ptr<FreeHandlerBase>(new DiscardingFreeHandler(
577 *platform_->GetPageAllocator(), space_freelist, *page))
578 : std::unique_ptr<FreeHandlerBase>(new RegularFreeHandler(
579 *platform_->GetPageAllocator(), space_freelist, *page));
580 handler->FreeFreeList(page_state->unfinalized_free_list);
581 }
582
584 page_state->largest_new_free_list_entry, largest_consecutive_block_);
585
586 // After the page was fully finalized and freelists have been merged, verify
587 // that the bitmap is consistent.
588 ObjectStartBitmapVerifier().Verify(static_cast<NormalPage&>(*page));
589
590 // Add the page to the space.
591 page->space().AddPage(page);
592 }
593
595 StatsCollector* stats_collector_;
596 BaseSpace* space_;
598 // Largest consecutive block of memory. This is the largest free list entry
599 // for normal pages and the largest page size for large objects.
601 const FreeMemoryHandling free_memory_handling_;
602 const EmptyPageHandling empty_page_handling_;
603};
604
605class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
606 friend class HeapVisitor<MutatorThreadSweeper>;
607
609
610 public:
611 MutatorThreadSweeper(HeapBase* heap, cppgc::Platform* platform,
612 StatsCollector* stats_collector, BaseSpace* space,
613 size_t* unused_destroyed_normal_pages,
614 FreeMemoryHandling free_memory_handling,
615 EmptyPageHandling empty_page_handling)
616 : platform_(platform),
617 stats_collector_(stats_collector),
618 space_(space),
619 unused_destroyed_normal_pages_(unused_destroyed_normal_pages),
620 free_memory_handling_(free_memory_handling),
621 empty_page_handling_(empty_page_handling),
622 sticky_bits_(heap->sticky_bits()) {}
623
624 static void SweepLiveLargePage(LargePage& page, StickyBits sticky_bits) {
625 HeapObjectHeader* header = page.ObjectHeader();
626 CHECK(header->IsMarked());
627 StickyUnmark(header, sticky_bits);
628 if (sticky_bits == StickyBits::kDisabled) {
629 page.ResetMarkedBytes();
630 }
631 page.space().AddPage(&page);
632 }
633
634 void Sweep(SpaceStates& states) {
635 for (SweepingState& state : states) {
636 Sweep(state);
637 }
638 }
639
640 void Sweep(SweepingState& state) {
641 while (auto page = state.unswept_pages.Pop()) {
642 SweepPage(**page);
643 }
644 }
645
646 void SweepPage(BasePage& page) { Traverse(page); }
647
648 // Returns true if out of work. This implies that sweeping is done only if
649 // `sweeping_mode` is kAll.
650 bool FinalizeAndSweepWithDeadline(StatsCollector::ScopeId scope_id,
651 SweepingState& state,
652 v8::base::TimeTicks deadline,
653 MutatorThreadSweepingMode sweeping_mode) {
654 // First, prioritize finalization of pages that were swept concurrently.
655 SweepFinalizer finalizer(
657 free_memory_handling_, EmptyPageHandling::kDestroy);
658 if (!finalizer.FinalizeWithDeadline(scope_id, state, deadline)) {
659 return false;
660 }
661
662 if (sweeping_mode != MutatorThreadSweepingMode::kOnlyFinalizers) {
663 // Help out the concurrent sweeper.
664 if (!SweepSpaceWithDeadline(&state, deadline)) {
665 return false;
666 }
667 }
668 return true;
669 }
670
671 bool SweepWithDeadlineAndSize(StatsCollector::ScopeId scope_id,
672 SweepingState& state,
673 v8::base::TimeTicks deadline, size_t size) {
674 if (state.unswept_pages.IsEmpty()) {
675 return false;
676 }
677 StatsCollector::DisabledScope sweep_scope(stats_collector_, scope_id);
678 DeadlineChecker deadline_check(deadline);
679 while (auto page = state.unswept_pages.Pop()) {
680 SweepPage(**page);
681 if (size <= largest_consecutive_block_) {
682 return true;
683 }
684 if (deadline_check.Check()) {
685 break;
686 }
687 }
688 return false;
689 }
690
691 private:
692 bool SweepSpaceWithDeadline(SweepingState* state,
693 v8::base::TimeTicks deadline) {
694 DeadlineChecker deadline_check(deadline);
695 while (auto page = state->unswept_pages.Pop()) {
696 Traverse(**page);
697 if (deadline_check.Check()) {
698 return false;
699 }
700 }
701
702 return true;
703 }
704
705 bool VisitNormalPage(NormalPage& page) {
707 page.ResetDiscardedMemory();
708 }
709 const auto result =
711 ? SweepNormalPage<
712 InlinedFinalizationBuilder<DiscardingFreeHandler>>(
714 : SweepNormalPage<InlinedFinalizationBuilder<RegularFreeHandler>>(
715 &page, *platform_->GetPageAllocator(), sticky_bits_);
716 if (result.is_empty &&
717 empty_page_handling_ == EmptyPageHandling::kDestroy) {
718 NormalPage::Destroy(&page);
719 (*unused_destroyed_normal_pages_)++;
720 } else {
721 if (space_) {
722 DCHECK_IMPLIES(!result.is_empty, space_ == &page.space());
723 page.ChangeOwner(*space_);
724 }
725 auto& target_space = NormalPageSpace::From(page.space());
726 target_space.AddPage(&page);
727 if (result.is_empty) {
728 target_space.free_list().Add({page.PayloadStart(), page.PayloadSize()});
729 }
730 // The page was eagerly finalized and all the freelist have been merged.
731 // Verify that the bitmap is consistent with headers.
732 ObjectStartBitmapVerifier().Verify(page);
734 std::max(result.is_empty ? page.PayloadSize()
735 : result.largest_new_free_list_entry,
737 }
738 return true;
739 }
740
741 bool VisitLargePage(LargePage& page) {
742 HeapObjectHeader* header = page.ObjectHeader();
743 CHECK(!header->IsMarked());
744 DCHECK_EQ(page.marked_bytes(), 0u);
745 header->Finalize();
747 std::max(page.PayloadSize(), largest_consecutive_block_);
748 LargePage::Destroy(&page);
749 return true;
750 }
751
753 StatsCollector* stats_collector_;
754 // Largest consecutive block of memory. This is the largest free list entry
755 // for normal pages and the largest page size for large objects.
757 BaseSpace* space_;
760 const EmptyPageHandling empty_page_handling_;
762};
763
764class ConcurrentSweepTask final : public cppgc::JobTask,
765 private HeapVisitor<ConcurrentSweepTask> {
766 friend class HeapVisitor<ConcurrentSweepTask>;
767
768 using FreeMemoryHandling = SweepingConfig::FreeMemoryHandling;
769
770 public:
771 ConcurrentSweepTask(Platform* platform, HeapBase& heap,
772 SpaceStates* space_states,
773 SweepingState* empty_normal_pages,
774 SweepingState* empty_large_pages,
775 FreeMemoryHandling free_memory_handling)
776 : heap_(heap),
777 page_allocator_(*platform->GetPageAllocator()),
778 space_states_(space_states),
779 empty_normal_pages_(empty_normal_pages),
780 empty_large_pages_(empty_large_pages),
781 free_memory_handling_(free_memory_handling),
782 sticky_bits_(heap.sticky_bits()) {}
783
784 void Run(cppgc::JobDelegate* delegate) final {
785 StatsCollector::EnabledConcurrentScope stats_scope(
786 heap_.stats_collector(), StatsCollector::kConcurrentSweep);
787
788 // Sweep empty normal pages first. These pages can be reused across all
789 // regular spaces.
790 if (!SweepStateOrYield(delegate, *empty_normal_pages_)) return;
791 for (SweepingState& state : *space_states_) {
792 if (!SweepStateOrYield(delegate, state)) return;
793 }
794 // Sweep empty large pages last. They generally cannot be reused.
795 // TODO(mlippautz): We could split them into pages that can be split up for
796 // normal pages.
797 if (!SweepStateOrYield(delegate, *empty_large_pages_)) return;
798
799 is_completed_.store(true, std::memory_order_relaxed);
800 }
801
802 size_t GetMaxConcurrency(size_t /* active_worker_count */) const final {
803 return is_completed_.load(std::memory_order_relaxed) ? 0 : 1;
804 }
805
806 private:
807 // Returns true if sweeping completed, or false if it yielded.
808 bool SweepStateOrYield(cppgc::JobDelegate* delegate, SweepingState& state) {
810 while (auto page = state.unswept_pages.Pop()) {
811 Traverse(**page);
812 if (delegate->ShouldYield()) {
813 return false;
814 }
815 }
816 current_sweeping_state_ = nullptr;
817 return true;
818 }
819
820 bool VisitNormalPage(NormalPage& page) {
821 if (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible) {
822 page.ResetDiscardedMemory();
823 }
824 SweepingState::SweptPageState sweep_result =
825 (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible)
826 ? SweepNormalPage<
827 DeferredFinalizationBuilder<DiscardingFreeHandler>>(
829 : SweepNormalPage<DeferredFinalizationBuilder<RegularFreeHandler>>(
831 current_sweeping_state_->swept_unfinalized_pages.Push(
832 std::move(sweep_result));
833 return true;
834 }
835
836 bool VisitLargePage(LargePage& page) {
837 HeapObjectHeader* header = page.ObjectHeader();
838 CHECK(!header->IsMarked());
839 DCHECK_EQ(page.marked_bytes(), 0u);
840#if defined(CPPGC_CAGED_HEAP)
841 HeapObjectHeader* const unfinalized_objects =
842 header->IsFinalizable() ? page.ObjectHeader() : nullptr;
843#else // !defined(CPPGC_CAGED_HEAP)
844 std::vector<HeapObjectHeader*> unfinalized_objects;
845 if (header->IsFinalizable()) {
846 unfinalized_objects.push_back(page.ObjectHeader());
847 }
848#endif // !defined(CPPGC_CAGED_HEAP)
849 // Avoid directly destroying large pages here as counter updates and
850 // backend access in BasePage::Destroy() are not concurrency safe.
851 current_sweeping_state_->swept_unfinalized_pages.Push(
852 {&page, std::move(unfinalized_objects), {}, {}, true});
853 return true;
854 }
855
856 HeapBase& heap_;
857 PageAllocator& page_allocator_;
858 SpaceStates* const space_states_;
859 SweepingState* const empty_normal_pages_;
860 SweepingState* const empty_large_pages_;
861 SweepingState* current_sweeping_state_ = nullptr;
862 std::atomic_bool is_completed_{false};
865};
866
867// This visitor starts sweeping.
868//
869// Normal spaces:
870// - Clears free lists.
871// - Moves all pages to local state (SpaceStates).
872// - ASAN: Poisons all unmarked object payloads.
873//
874// Large spaces:
875// - Directly sweeps live objects and returns pages to the space.
876// - Moves dead objects to local state (SpaceStates).
877// - ASAN: Poisons all unmarked object payloads.
878class PrepareForSweepVisitor final
879 : protected HeapVisitor<PrepareForSweepVisitor> {
880 friend class HeapVisitor<PrepareForSweepVisitor>;
881 using CompactableSpaceHandling = SweepingConfig::CompactableSpaceHandling;
882
883 public:
884 PrepareForSweepVisitor(HeapBase* heap, SpaceStates* space_states,
885 SweepingState* empty_normal_pages,
886 SweepingState* empty_large_pages,
887 CompactableSpaceHandling compactable_space_handling)
888 : heap_(heap),
889 space_states_(space_states),
890 empty_normal_pages_(empty_normal_pages),
891 empty_large_pages_(empty_large_pages),
892 compactable_space_handling_(compactable_space_handling) {}
893
894 void Run(RawHeap& raw_heap) {
895 *space_states_ = SpaceStates(raw_heap.size());
896 Traverse(raw_heap);
897 }
898
899 protected:
900 bool VisitNormalPageSpace(NormalPageSpace& space) {
901 if ((compactable_space_handling_ == CompactableSpaceHandling::kIgnore) &&
902 space.is_compactable()) {
903 return true;
904 }
905
906 CHECK(!space.linear_allocation_buffer().size());
907 space.free_list().Clear();
908#ifdef V8_USE_ADDRESS_SANITIZER
909 UnmarkedObjectsPoisoner().Traverse(space);
910#endif // V8_USE_ADDRESS_SANITIZER
911
912 BaseSpace::Pages space_pages = space.RemoveAllPages();
913 std::sort(space_pages.begin(), space_pages.end(),
914 [](const BasePage* a, const BasePage* b) {
915 return a->marked_bytes() < b->marked_bytes();
916 });
917 auto first_non_empty_page = std::find_if(
918 space_pages.begin(), space_pages.end(),
919 [](const BasePage* page) { return page->marked_bytes() != 0; });
920 empty_normal_pages_->unswept_pages.Insert(space_pages.begin(),
921 first_non_empty_page);
922 (*space_states_)[space.index()].unswept_pages.Insert(first_non_empty_page,
923 space_pages.end());
924
925 return true;
926 }
927
928 bool VisitLargePageSpace(LargePageSpace& space) {
929#ifdef V8_USE_ADDRESS_SANITIZER
930 UnmarkedObjectsPoisoner().Traverse(space);
931#endif // V8_USE_ADDRESS_SANITIZER
932
933 BaseSpace::Pages space_pages = space.RemoveAllPages();
934 for (BasePage* page : space_pages) {
935#ifdef DEBUG
936 const auto* header = LargePage::From(page)->ObjectHeader();
937 DCHECK_IMPLIES(page->marked_bytes() == 0, !header->IsMarked());
938 DCHECK_IMPLIES(page->marked_bytes() != 0, header->IsMarked());
939#endif // DEBUG
940 if (page->marked_bytes() != 0) {
941 MutatorThreadSweeper::SweepLiveLargePage(*LargePage::From(page),
942 heap_->sticky_bits());
943 } else {
944 empty_large_pages_->unswept_pages.Push(page);
945 }
946 }
947
948 return true;
949 }
950
951 private:
952 HeapBase* const heap_;
953 SpaceStates* const space_states_;
954 SweepingState* const empty_normal_pages_;
955 SweepingState* const empty_large_pages_;
956 CompactableSpaceHandling compactable_space_handling_;
957};
958
959} // namespace
960
963
964 public:
966 : heap_(heap.raw_heap()),
967 page_pool_(heap.page_backend()->page_pool()),
968 stats_collector_(heap.stats_collector()),
969 platform_(heap.platform()) {
971 }
972
973 ~SweeperImpl() { CancelAllSweepingTasks(); }
974
975 void Start(SweepingConfig config) {
977 StatsCollector::kAtomicSweep);
978 is_in_progress_ = true;
979 config_ = config;
980
981 if (!foreground_task_runner_) {
982 // The sweeper is already initialized when the platform may not be able to
983 // return a foreground task runner. Lazily initialize the runners on first
984 // sweep.
985 foreground_task_runner_ =
986 platform_->GetForegroundTaskRunner(kForegroundRegularPriority);
987 low_priority_foreground_task_runner_ =
988 platform_->GetForegroundTaskRunner(kForegroundLowPriority);
989 // Having a low priority runner implies having a regular runner as well.
990 CHECK_IMPLIES(low_priority_foreground_task_runner_.get(),
991 foreground_task_runner_.get());
992 const auto supports_non_nestable_tasks =
993 [](const std::shared_ptr<TaskRunner>& runner) {
994 return runner && runner->NonNestableTasksEnabled() &&
995 runner->NonNestableDelayedTasksEnabled();
996 };
997 if (!supports_non_nestable_tasks(foreground_task_runner_) ||
998 !supports_non_nestable_tasks(low_priority_foreground_task_runner_)) {
999 foreground_task_runner_.reset();
1000 low_priority_foreground_task_runner_.reset();
1001 }
1002 }
1003
1004 // Verify bitmap for all spaces regardless of |compactable_space_handling|.
1005 ObjectStartBitmapVerifier().Verify(heap_);
1006
1007 // If inaccessible memory is touched to check whether it is set up
1008 // correctly it cannot be discarded.
1009 if (!CanDiscardMemory()) {
1010 config_.free_memory_handling = FreeMemoryHandling::kDoNotDiscard;
1011 }
1012 if (config_.free_memory_handling ==
1013 FreeMemoryHandling::kDiscardWherePossible) {
1014 // The discarded counter will be recomputed.
1015 heap_.heap()->stats_collector()->ResetDiscardedMemory();
1016 }
1017
1018 PrepareForSweepVisitor(heap_.heap(), &space_states_, &empty_normal_pages_,
1021 .Run(heap_);
1022
1023 if (config.sweeping_type >= SweepingConfig::SweepingType::kIncremental) {
1024 ScheduleLowPriorityIncrementalSweeping();
1025 ScheduleIncrementalSweeping(kDelayWhileLowPrioritySweepingMakesProgress);
1026 }
1027 if (config.sweeping_type >=
1028 SweepingConfig::SweepingType::kIncrementalAndConcurrent) {
1029 ScheduleConcurrentSweeping();
1030 }
1031 }
1032
1034 // Before sweeping in a task, handle low priority sweeping cases. These are
1035 // no-ops if low priority sweeping is not running.
1036 if (low_priority_task_ran_) {
1037 // Low priority task made progress. Reschedule with delay.
1038 ScheduleIncrementalSweeping(kDelayWhileLowPrioritySweepingMakesProgress);
1039 return;
1040 }
1041
1042 // Low priority sweeping is not running or not being invoked on time.
1043 switch (
1044 SweepInForegroundTaskImpl(max_duration, StatsCollector::kSweepInTask)) {
1045 case SweepResult::kFullyDone:
1046 return;
1047 case SweepResult::kInProgress:
1048 ScheduleIncrementalSweeping(kDelayForRegularPrioritySweeping);
1049 return;
1050 case SweepResult::kMainThreadDoneConcurrentInProgress:
1051 // Throttle incremental sweeping while the concurrent Job is still
1052 // making progress.
1053 ScheduleIncrementalSweeping(kDelayWhileConcurrentSweepingMakesProgress);
1054 return;
1055 }
1056 UNREACHABLE();
1057 }
1058
1060 low_priority_task_ran_ = true;
1061 switch (SweepInForegroundTaskImpl(
1062 max_duration, StatsCollector::kSweepInLowPriorityTask)) {
1063 case SweepResult::kFullyDone:
1064 return;
1065 case SweepResult::kInProgress:
1066 // More work to do. Continue sweeping with low priority.
1067 ScheduleLowPriorityIncrementalSweeping();
1068 return;
1069 case SweepResult::kMainThreadDoneConcurrentInProgress:
1070 ScheduleLowPriorityIncrementalSweeping(
1071 kDelayWhileLowPrioritySweepingMakesProgress);
1072 return;
1073 }
1074 UNREACHABLE();
1075 }
1076
1077 bool SweepForLargeAllocation(BaseSpace* space, size_t size,
1078 v8::base::TimeDelta max_duration) {
1079 DCHECK(space->is_large());
1080#ifdef DEBUG
1081 // SpaceState for large objects is emtpy as those objects are put directly
1082 // on `empty_large_pages_`.
1083 SweepingState& space_state = space_states_[space->index()];
1084 DCHECK(space_state.unswept_pages.IsEmpty());
1085 DCHECK(space_state.swept_unfinalized_pages.IsEmpty());
1086#endif // DEBUG
1087 // Bail out if there's no empty large pages that could be freed and be
1088 // reused for a large allocation.
1089 if (empty_large_pages_.swept_unfinalized_pages.IsEmpty() &&
1090 empty_large_pages_.unswept_pages.IsEmpty()) {
1091 return false;
1092 }
1093
1094 StatsCollector::EnabledScope incremental_sweep_scope(
1095 stats_collector_, StatsCollector::kIncrementalSweep);
1096 StatsCollector::DisabledScope sweep_on_allocation_scope(
1097 stats_collector_, StatsCollector::kSweepOnAllocation);
1098 MutatorThreadSweepingScope sweeping_in_progress(*this);
1099
1100 const auto deadline = v8::base::TimeTicks::Now() + max_duration;
1101
1102 SweepFinalizer finalizer(
1104 config_.free_memory_handling, EmptyPageHandling::kDestroy);
1105 // Check empty pages first. Try to just finalize a page without sweeping.
1106 // If there's a single page in there we will use it.
1107 if (finalizer.FinalizeWithDeadlineAndSize(
1108 StatsCollector::kSweepFinalizeEmptyPages, empty_large_pages_,
1109 deadline, size)) {
1110 return true;
1111 }
1112 MutatorThreadSweeper sweeper(heap_.heap(), platform_, stats_collector_,
1114 config_.free_memory_handling,
1115 EmptyPageHandling::kDestroy);
1116 // Sweeping an empty page in case there's nothing with finalizers. If
1117 // there's a single page in there we will use it.
1118 if (sweeper.SweepWithDeadlineAndSize(StatsCollector::kSweepEmptyPages,
1119 empty_large_pages_, deadline, size)) {
1120 return true;
1121 }
1122
1123 return false;
1124 }
1125
1126 bool SweepForNormalAllocation(BaseSpace* space, size_t size,
1127 v8::base::TimeDelta max_duration) {
1128 DCHECK(!space->is_large());
1129
1130 if (unused_destroyed_normal_pages_ > 0 && page_pool_.pooled() > 0) {
1132 // Destroyed pages during sweeping in tasks are generally sitting in the
1133 // page pool and can be reused without increasing memory footprint.
1134 return false;
1135 }
1136
1137 SweepingState& space_state = space_states_[space->index()];
1138
1139 // Bail out if there's no empty pages and no pages to be processed for the
1140 // specific space at this moment.
1141 if (empty_normal_pages_.swept_unfinalized_pages.IsEmpty() &&
1142 empty_normal_pages_.unswept_pages.IsEmpty() &&
1143 space_state.swept_unfinalized_pages.IsEmpty() &&
1144 space_state.unswept_pages.IsEmpty()) {
1145 return false;
1146 }
1147
1148 StatsCollector::EnabledScope incremental_sweep_scope(
1149 stats_collector_, StatsCollector::kIncrementalSweep);
1150 StatsCollector::DisabledScope sweep_on_allocation_scope(
1151 stats_collector_, StatsCollector::kSweepOnAllocation);
1152 MutatorThreadSweepingScope sweeping_in_progress(*this);
1153
1154 const auto deadline = v8::base::TimeTicks::Now() + max_duration;
1155
1156 SweepFinalizer finalizer(
1158 config_.free_memory_handling, EmptyPageHandling::kReturn);
1159 // Check empty pages first. Try to just finalize a page without sweeping.
1160 // If there's a single page in there we will use it.
1161 if (finalizer.FinalizeWithDeadlineAndSize(
1162 StatsCollector::kSweepFinalizeEmptyPages, empty_normal_pages_,
1163 deadline, size)) {
1164 return true;
1165 }
1166 MutatorThreadSweeper sweeper(heap_.heap(), platform_, stats_collector_,
1168 config_.free_memory_handling,
1169 EmptyPageHandling::kReturn);
1170 // Sweeping an empty page in case there's nothing with finalizers. If
1171 // there's a single page in there we will use it.
1172 if (sweeper.SweepWithDeadlineAndSize(StatsCollector::kSweepEmptyPages,
1173 empty_normal_pages_, deadline, size)) {
1174 return true;
1175 }
1176
1177 // Process unfinalized non-empty pages as finalizing a page is generally
1178 // faster than sweeping.
1179 if (finalizer.FinalizeWithDeadlineAndSize(
1180 StatsCollector::kSweepFinalizeSweptPages, space_state, deadline,
1181 size)) {
1182 return true;
1183 }
1184 // Then, if no matching slot is found in the unfinalized pages, search the
1185 // unswept page. This also helps out the concurrent sweeper.
1186 if (sweeper.SweepWithDeadlineAndSize(StatsCollector::kSweepPages,
1187 space_state, deadline, size)) {
1188 return true;
1189 }
1190 return false;
1191 }
1192
1193 bool SweepForAllocationIfRunning(BaseSpace* space, size_t size,
1194 v8::base::TimeDelta max_duration) {
1195 if (!is_in_progress_) {
1196 return false;
1197 }
1198
1199 // Bail out for recursive sweeping calls. This can happen when finalizers
1200 // allocate new memory.
1201 if (is_sweeping_on_mutator_thread_) {
1202 return false;
1203 }
1204
1205 return space->is_large()
1206 ? SweepForLargeAllocation(space, size, max_duration)
1207 : SweepForNormalAllocation(space, size, max_duration);
1208 }
1209
1211 if (!is_in_progress_) {
1212 return false;
1213 }
1214
1215 // Bail out for recursive sweeping calls. This can happen when finalizers
1216 // allocate new memory.
1217 if (is_sweeping_on_mutator_thread_) {
1218 return false;
1219 }
1220
1221 {
1222 std::optional<StatsCollector::EnabledScope> stats_scope;
1223 if (config_.sweeping_type != SweepingConfig::SweepingType::kAtomic) {
1224 stats_scope.emplace(stats_collector_,
1225 StatsCollector::kIncrementalSweep);
1226 }
1228 StatsCollector::kSweepFinish);
1229 if (concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid() &&
1230 concurrent_sweeper_handle_->UpdatePriorityEnabled()) {
1231 concurrent_sweeper_handle_->UpdatePriority(kBackgroundBoostedPriority);
1232 }
1233 Finish();
1234 }
1235 NotifyDone();
1236 return true;
1237 }
1238
1240 return !concurrent_sweeper_handle_ ||
1241 !concurrent_sweeper_handle_->IsValid() ||
1242 !concurrent_sweeper_handle_->IsActive();
1243 }
1244
1246 if (!is_in_progress_ || is_sweeping_on_mutator_thread_) {
1247 return;
1248 }
1249 // We only finish through this method if concurrent sweeping is enabled but
1250 // not running anymore. All other paths finish sweeping through incremental
1251 // steps.
1252 if (!concurrent_sweeper_handle_ || !concurrent_sweeper_handle_->IsValid() ||
1253 concurrent_sweeper_handle_->IsActive()) {
1254 return;
1255 }
1256 // At this point we know that the concurrent sweeping task has run
1257 // out-of-work: all pages are swept. The main thread still needs to finalize
1258 // swept pages.
1259 DCHECK(std::all_of(space_states_.begin(), space_states_.end(),
1260 [](const SweepingState& state) {
1261 return state.unswept_pages.IsEmpty();
1262 }));
1263 DCHECK(empty_normal_pages_.unswept_pages.IsEmpty());
1264 DCHECK(empty_large_pages_.unswept_pages.IsEmpty());
1265 if (std::any_of(space_states_.begin(), space_states_.end(),
1266 [](const SweepingState& state) {
1267 return !state.swept_unfinalized_pages.IsEmpty();
1268 })) {
1269 return;
1270 }
1271 if (!empty_normal_pages_.swept_unfinalized_pages.IsEmpty() ||
1272 !empty_large_pages_.swept_unfinalized_pages.IsEmpty()) {
1273 return;
1274 }
1275 // All pages have also been finalized. Finalizing pages likely occured on
1276 // allocation, in which sweeping is not finalized even though all work is
1277 // done.
1278 {
1279 StatsCollector::EnabledScope stats_scope(
1280 stats_collector_, StatsCollector::kSweepFinishIfOutOfWork);
1281 FinalizeSweep();
1282 }
1283 NotifyDone();
1284 }
1285
1286 void Finish() {
1287 DCHECK(is_in_progress_);
1288
1289 MutatorThreadSweepingScope sweeping_in_progress(*this);
1290
1291 // First, call finalizers on the mutator thread. This is just an
1292 // optimization as we need to call finalizers after sweeping as well. It
1293 // allows to spend the time in the concurrent sweeper for actual sweeping.
1294 SweepFinalizer finalizer(
1295 platform_, stats_collector_, kSweepWithoutSpaceAssignment,
1296 &unused_destroyed_normal_pages_, config_.free_memory_handling,
1297 EmptyPageHandling::kDestroy);
1298 finalizer.Finalize(space_states_);
1299 finalizer.Finalize(empty_normal_pages_);
1300 finalizer.Finalize(empty_large_pages_);
1301
1302 // Then, help out the concurrent thread.
1303 MutatorThreadSweeper sweeper(
1304 heap_.heap(), platform_, stats_collector_, kSweepWithoutSpaceAssignment,
1305 &unused_destroyed_normal_pages_, config_.free_memory_handling,
1306 EmptyPageHandling::kDestroy);
1307 sweeper.Sweep(space_states_);
1308 sweeper.Sweep(empty_normal_pages_);
1309 sweeper.Sweep(empty_large_pages_);
1310
1311 // There's nothing left to sweep here for the main thread. The concurrent
1312 // sweeper may still sweep pages and create pages to be finalized after
1313 // joining the the job.
1314 FinalizeSweep();
1315 }
1316
1318 // Synchronize with the concurrent sweeper and call remaining finalizers.
1319 SynchronizeAndFinalizeConcurrentAndIncrementalSweeping();
1320
1321 // Clear space taken up by sweeper metadata.
1322 space_states_.clear();
1323
1324 is_in_progress_ = false;
1325 notify_done_pending_ = true;
1327 }
1328
1329 void NotifyDone() {
1330 DCHECK(!is_in_progress_);
1331 DCHECK(notify_done_pending_);
1332 notify_done_pending_ = false;
1333 stats_collector_->NotifySweepingCompleted(config_.sweeping_type);
1334 if (config_.free_memory_handling ==
1335 FreeMemoryHandling::kDiscardWherePossible) {
1336 heap_.heap()->page_backend()->ReleasePooledPages();
1337 }
1338 }
1339
1341 if (concurrent_sweeper_handle_) concurrent_sweeper_handle_->Join();
1342 }
1343
1345 return is_sweeping_on_mutator_thread_;
1346 }
1347
1348 bool IsSweepingInProgress() const { return is_in_progress_; }
1349
1351 StatsCollector::ScopeId internal_scope_id,
1352 MutatorThreadSweepingMode sweeping_mode) {
1353 if (!is_in_progress_) return true;
1354
1355 MutatorThreadSweepingScope sweeping_in_progress(*this);
1356
1357 {
1358 StatsCollector::EnabledScope stats_scope(
1359 stats_collector_, StatsCollector::kIncrementalSweep);
1360
1361 MutatorThreadSweeper sweeper(
1363 kSweepWithoutSpaceAssignment, &unused_destroyed_normal_pages_,
1364 config_.free_memory_handling, EmptyPageHandling::kDestroy);
1365 {
1366 StatsCollector::EnabledScope inner_stats_scope(
1367 stats_collector_, internal_scope_id, "max_duration_ms",
1368 max_duration.InMillisecondsF(), "sweeping_mode",
1369 ToString(sweeping_mode));
1370 const auto deadline = v8::base::TimeTicks::Now() + max_duration;
1371 if (!sweeper.FinalizeAndSweepWithDeadline(
1372 StatsCollector::kSweepFinalizeEmptyPages, empty_normal_pages_,
1373 deadline, sweeping_mode)) {
1374 return false;
1375 }
1376 for (auto& state : space_states_) {
1377 if (!sweeper.FinalizeAndSweepWithDeadline(
1378 StatsCollector::kSweepFinalizeSweptPages, state, deadline,
1379 sweeping_mode)) {
1380 return false;
1381 }
1382 }
1383 if (!sweeper.FinalizeAndSweepWithDeadline(
1384 StatsCollector::kSweepFinalizeEmptyPages, empty_large_pages_,
1385 deadline, sweeping_mode)) {
1386 return false;
1387 }
1388 if (sweeping_mode != MutatorThreadSweepingMode::kAll) {
1389 return false;
1390 }
1391 }
1392 FinalizeSweep();
1393 }
1394 NotifyDone();
1395 return true;
1396 }
1397
1400 DCHECK_EQ(mutator_thread_sweeping_observers_.end(),
1401 std::find(mutator_thread_sweeping_observers_.begin(),
1402 mutator_thread_sweeping_observers_.end(), observer));
1403 mutator_thread_sweeping_observers_.push_back(observer);
1404 }
1405
1408 const auto it =
1409 std::find(mutator_thread_sweeping_observers_.begin(),
1410 mutator_thread_sweeping_observers_.end(), observer);
1411 DCHECK_NE(mutator_thread_sweeping_observers_.end(), it);
1412 mutator_thread_sweeping_observers_.erase(it);
1413 }
1414
1415 private:
1417 public:
1419 : sweeper_(sweeper) {
1420 DCHECK(!sweeper_.is_sweeping_on_mutator_thread_);
1421 sweeper_.is_sweeping_on_mutator_thread_ = true;
1422 for (auto* observer : sweeper_.mutator_thread_sweeping_observers_) {
1423 observer->Start();
1424 }
1425 }
1427 sweeper_.is_sweeping_on_mutator_thread_ = false;
1428 for (auto* observer : sweeper_.mutator_thread_sweeping_observers_) {
1429 observer->End();
1430 }
1431 }
1432
1435 delete;
1436
1437 private:
1439 };
1440
1441 class IncrementalSweepTask final : public cppgc::Task {
1442 public:
1444
1445 static constexpr auto kMaxSweepDuration =
1447
1449 : sweeper_(sweeper),
1450 handle_(Handle::NonEmptyTag{}),
1451 priority_(priority) {}
1452
1453 static Handle Post(SweeperImpl& sweeper,
1454 const std::shared_ptr<cppgc::TaskRunner>& runner,
1456 std::optional<v8::base::TimeDelta> delay = {}) {
1457 auto task = std::make_unique<IncrementalSweepTask>(sweeper, priority);
1458 auto handle = task->handle_;
1459 if (delay.has_value()) {
1460 runner->PostNonNestableDelayedTask(std::move(task),
1461 delay->InSecondsF());
1462 } else {
1463 runner->PostNonNestableTask(std::move(task));
1464 }
1465 return handle;
1466 }
1467
1468 void Run() override {
1469 if (handle_.IsCanceled()) {
1470 return;
1471 }
1472 switch (priority_) {
1473 case kForegroundRegularPriority:
1474 sweeper_.SweepForTask(kMaxSweepDuration);
1475 return;
1476 case kForegroundLowPriority:
1477 sweeper_.SweepForLowPriorityTask(kMaxSweepDuration);
1478 return;
1479 default:
1480 UNREACHABLE();
1481 }
1482 }
1483
1484 private:
1486 // TODO(chromium:1056170): Change to CancelableTask.
1489 };
1490
1491 enum class SweepResult {
1492 // Sweeping is fully done.
1493 kFullyDone,
1494 // Sweeping is still in progress.
1495 kInProgress,
1496 // Sweeping on the main thread is done but concurrent sweepers are still
1497 // making progress. This may be temporary.
1498 kMainThreadDoneConcurrentInProgress,
1499 };
1500
1501 static constexpr double kMaxHeapPercentageForNoSweeping = 50;
1502
1503 static constexpr auto kDelayWhileLowPrioritySweepingMakesProgress =
1505
1506 static constexpr auto kDelayWhileConcurrentSweepingMakesProgress =
1508
1509 // We use a small delay here to allow lower priority tasks to interrupt
1510 // sweeping and take over.
1511 static constexpr auto kDelayForRegularPrioritySweeping =
1513
1516 // First round of sweeping.
1517 bool concurrent_sweep_complete = IsConcurrentSweepingDone();
1518 const auto start = v8::base::TimeTicks::Now();
1519 bool main_thread_sweep_complete = PerformSweepOnMutatorThread(
1520 max_duration, scope,
1521 concurrent_sweep_complete ? MutatorThreadSweepingMode::kAll
1522 : MutatorThreadSweepingMode::kOnlyFinalizers);
1523 if (main_thread_sweep_complete && !concurrent_sweep_complete &&
1524 IsConcurrentSweepingDone()) {
1525 // Concurrent sweeping finished while processing the first round. Use the
1526 // left over time for a second round to avoid scheduling another task.
1527 max_duration -= (v8::base::TimeTicks::Now() - start);
1528 if (max_duration > v8::base::TimeDelta::FromMilliseconds(0)) {
1529 concurrent_sweep_complete = true;
1530 main_thread_sweep_complete = PerformSweepOnMutatorThread(
1531 max_duration, scope, MutatorThreadSweepingMode::kAll);
1532 }
1533 }
1534 if (main_thread_sweep_complete) {
1535 if (!concurrent_sweep_complete) {
1536 return SweepResult::kMainThreadDoneConcurrentInProgress;
1537 } else {
1538 CHECK(!is_in_progress_);
1539 return SweepResult::kFullyDone;
1540 }
1541 }
1542 return SweepResult::kInProgress;
1543 }
1544
1546 std::optional<v8::base::TimeDelta> delay = {}) {
1547 DCHECK_GE(config_.sweeping_type,
1548 SweepingConfig::SweepingType::kIncremental);
1549
1550 if (!foreground_task_runner_) {
1551 return;
1552 }
1553
1554 low_priority_task_ran_ = false;
1555 incremental_sweeper_handle_.CancelIfNonEmpty();
1556 incremental_sweeper_handle_ = IncrementalSweepTask::Post(
1557 *this, foreground_task_runner_, kForegroundRegularPriority, delay);
1558 }
1559
1561 std::optional<v8::base::TimeDelta> delay = {}) {
1562 DCHECK_GE(config_.sweeping_type,
1563 SweepingConfig::SweepingType::kIncremental);
1564
1565 if (!low_priority_foreground_task_runner_) {
1566 return;
1567 }
1568
1569 incremental_sweeper_low_priority_handle_.CancelIfNonEmpty();
1570 incremental_sweeper_low_priority_handle_ =
1571 IncrementalSweepTask::Post(*this, low_priority_foreground_task_runner_,
1572 kForegroundLowPriority, delay);
1573 }
1574
1576 DCHECK_GE(config_.sweeping_type,
1577 SweepingConfig::SweepingType::kIncrementalAndConcurrent);
1578
1579 concurrent_sweeper_handle_ = platform_->PostJob(
1580 kBackgroundRegularPriority,
1581 std::make_unique<ConcurrentSweepTask>(
1583 &empty_large_pages_, config_.free_memory_handling));
1584 }
1585
1587 if (incremental_sweeper_handle_) {
1588 incremental_sweeper_handle_.Cancel();
1589 }
1590 if (incremental_sweeper_low_priority_handle_) {
1591 incremental_sweeper_low_priority_handle_.Cancel();
1592 }
1593 if (concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid()) {
1594 concurrent_sweeper_handle_->Cancel();
1595 }
1596 }
1597
1599 // The precondition for this call is that actual sweeping is done. So all
1600 // that's left is potentially invoking finalizers.
1601
1602 CancelAllSweepingTasks();
1603
1604 DCHECK(std::all_of(space_states_.begin(), space_states_.end(),
1605 [](const SweepingState& state) {
1606 return state.unswept_pages.IsEmpty();
1607 }));
1608 DCHECK(empty_normal_pages_.unswept_pages.IsEmpty());
1609 DCHECK(empty_large_pages_.unswept_pages.IsEmpty());
1610
1611 SweepFinalizer finalizer(
1612 platform_, stats_collector_, kSweepWithoutSpaceAssignment,
1613 &unused_destroyed_normal_pages_, config_.free_memory_handling,
1614 EmptyPageHandling::kDestroy);
1615 finalizer.Finalize(space_states_);
1616 finalizer.Finalize(empty_normal_pages_);
1617 finalizer.Finalize(empty_large_pages_);
1618 }
1619
1623 SpaceStates space_states_;
1624 // States for empty normal pages. These pages do have a space as owner which
1625 // is updated as soon as the page is reused for a specific space.
1626 SweepingState empty_normal_pages_;
1627 // States for empty large pages.
1628 // TODO(372512096): This can be further split into LO pages that are less than
1629 // a regular page size and those that are multiple, where larger sizes can
1630 // contribute to `unused_destroyed_normal_pages_`.
1631 SweepingState empty_large_pages_;
1632 // Number of pages that have been destroyed and have not been reused by the
1633 // allocator yet. We assume that returning early on
1634 // SweepForAllocationIfRunning() causes such pages to be picked up.
1637 std::shared_ptr<cppgc::TaskRunner> foreground_task_runner_;
1638 std::shared_ptr<cppgc::TaskRunner> low_priority_foreground_task_runner_;
1642 std::unique_ptr<cppgc::JobHandle> concurrent_sweeper_handle_;
1643 std::vector<Sweeper::SweepingOnMutatorThreadObserver*>
1645 // Indicates whether a low priority task has been invoked since the last
1646 // scheduling of an incremental task.
1647 bool low_priority_task_ran_ = false;
1648 // Indicates whether the sweeping phase is in progress.
1649 bool is_in_progress_ = false;
1650 bool notify_done_pending_ = false;
1651 // Indicates whether whether the sweeper (or its finalization) is currently
1652 // running on the main thread.
1653 bool is_sweeping_on_mutator_thread_ = false;
1654};
1655
1656Sweeper::Sweeper(HeapBase& heap)
1657 : heap_(heap), impl_(std::make_unique<SweeperImpl>(heap)) {}
1658
1659Sweeper::~Sweeper() = default;
1660
1661void Sweeper::Start(SweepingConfig config) { impl_->Start(config); }
1662
1663bool Sweeper::FinishIfRunning() { return impl_->FinishIfRunning(); }
1664
1665void Sweeper::FinishIfOutOfWork() { impl_->FinishIfOutOfWork(); }
1666
1668 impl_->WaitForConcurrentSweepingForTesting();
1669}
1670
1672 v8::base::TimeDelta max_duration) {
1673 return impl_->SweepForAllocationIfRunning(space, size, max_duration);
1674}
1675
1677 return impl_->IsSweepingOnMutatorThread();
1678}
1679
1681 return impl_->IsSweepingInProgress();
1682}
1683
1685 StatsCollector::ScopeId scope_id) {
1686 return impl_->PerformSweepOnMutatorThread(max_duration, scope_id,
1687 MutatorThreadSweepingMode::kAll);
1688}
1689
1691 Sweeper& sweeper)
1692 : sweeper_(sweeper) {
1693 sweeper_.impl_->AddMutatorThreadSweepingObserver(this);
1694}
1695
1697 sweeper_.impl_->RemoveMutatorThreadSweepingObserver(this);
1698}
1699
1700} // namespace cppgc::internal
static void Destroy(BasePage *)
Definition heap-page.cc:53
void AddPage(BasePage *)
Definition heap-space.cc:25
void Append(FreeList &&)
Definition free-list.cc:104
static LargePage * From(BasePage *page)
Definition heap-page.h:275
static void Destroy(LargePage *)
Definition heap-page.cc:273
static NormalPageSpace & From(BaseSpace &space)
Definition heap-space.h:93
static NormalPage * From(BasePage *page)
Definition heap-page.h:205
InternalScope< kDisabled, kMutatorThread > DisabledScope
IncrementalSweepTask(SweeperImpl &sweeper, cppgc::TaskPriority priority)
Definition sweeper.cc:1448
static Handle Post(SweeperImpl &sweeper, const std::shared_ptr< cppgc::TaskRunner > &runner, cppgc::TaskPriority priority, std::optional< v8::base::TimeDelta > delay={})
Definition sweeper.cc:1453
MutatorThreadSweepingScope(const MutatorThreadSweepingScope &)=delete
MutatorThreadSweepingScope & operator=(const MutatorThreadSweepingScope &)=delete
StatsCollector *const stats_collector_
Definition sweeper.cc:1622
bool PerformSweepOnMutatorThread(v8::base::TimeDelta max_duration, StatsCollector::ScopeId internal_scope_id, MutatorThreadSweepingMode sweeping_mode)
Definition sweeper.cc:1350
void SweepForTask(v8::base::TimeDelta max_duration)
Definition sweeper.cc:1033
std::shared_ptr< cppgc::TaskRunner > low_priority_foreground_task_runner_
Definition sweeper.cc:1638
void SynchronizeAndFinalizeConcurrentAndIncrementalSweeping()
Definition sweeper.cc:1598
bool SweepForLargeAllocation(BaseSpace *space, size_t size, v8::base::TimeDelta max_duration)
Definition sweeper.cc:1077
bool SweepForAllocationIfRunning(BaseSpace *space, size_t size, v8::base::TimeDelta max_duration)
Definition sweeper.cc:1193
void ScheduleIncrementalSweeping(std::optional< v8::base::TimeDelta > delay={})
Definition sweeper.cc:1545
std::unique_ptr< cppgc::JobHandle > concurrent_sweeper_handle_
Definition sweeper.cc:1642
std::shared_ptr< cppgc::TaskRunner > foreground_task_runner_
Definition sweeper.cc:1637
IncrementalSweepTask::Handle incremental_sweeper_low_priority_handle_
Definition sweeper.cc:1641
void ScheduleLowPriorityIncrementalSweeping(std::optional< v8::base::TimeDelta > delay={})
Definition sweeper.cc:1560
SweepResult SweepInForegroundTaskImpl(v8::base::TimeDelta max_duration, StatsCollector::ScopeId scope)
Definition sweeper.cc:1514
void AddMutatorThreadSweepingObserver(Sweeper::SweepingOnMutatorThreadObserver *observer)
Definition sweeper.cc:1398
bool SweepForNormalAllocation(BaseSpace *space, size_t size, v8::base::TimeDelta max_duration)
Definition sweeper.cc:1126
void SweepForLowPriorityTask(v8::base::TimeDelta max_duration)
Definition sweeper.cc:1059
IncrementalSweepTask::Handle incremental_sweeper_handle_
Definition sweeper.cc:1640
void Start(SweepingConfig config)
Definition sweeper.cc:975
NormalPageMemoryPool & page_pool_
Definition sweeper.cc:1621
std::vector< Sweeper::SweepingOnMutatorThreadObserver * > mutator_thread_sweeping_observers_
Definition sweeper.cc:1644
void RemoveMutatorThreadSweepingObserver(Sweeper::SweepingOnMutatorThreadObserver *observer)
Definition sweeper.cc:1406
bool IsSweepingOnMutatorThread() const
Definition sweeper.cc:1676
bool SweepForAllocationIfRunning(BaseSpace *space, size_t min_wanted_size, v8::base::TimeDelta max_duration)
Definition sweeper.cc:1671
bool IsSweepingInProgress() const
Definition sweeper.cc:1680
std::unique_ptr< SweeperImpl > impl_
Definition sweeper.h:82
void Start(SweepingConfig)
Definition sweeper.cc:1661
bool PerformSweepOnMutatorThread(v8::base::TimeDelta max_duration, StatsCollector::ScopeId)
Definition sweeper.cc:1684
void WaitForConcurrentSweepingForTesting()
Definition sweeper.cc:1667
virtual bool ShouldYield()=0
virtual void Run(JobDelegate *delegate)=0
virtual size_t GetMaxConcurrency(size_t worker_count) const =0
virtual bool DiscardSystemPages(void *address, size_t size)
virtual size_t CommitPageSize()=0
std::unique_ptr< JobHandle > PostJob(TaskPriority priority, std::unique_ptr< JobTask > job_task, const SourceLocation &location=SourceLocation::Current())
virtual PageAllocator * GetPageAllocator()
std::shared_ptr< v8::TaskRunner > GetForegroundTaskRunner(Isolate *isolate)
double InMillisecondsF() const
Definition time.cc:226
static constexpr TimeDelta FromMilliseconds(int64_t milliseconds)
Definition time.h:84
static TimeTicks Now()
Definition time.cc:736
T const result_
base::Mutex & mutex_
NormalPageSpace * space_
Definition compactor.cc:324
cppgc::PageAllocator * page_allocator_
Definition cpp-heap.cc:194
v8::Platform * platform_
Definition cpp-heap.cc:193
const StickyBits sticky_bits_
Definition sweeper.cc:761
SweepingState *const empty_normal_pages_
Definition sweeper.cc:859
ThreadSafeStack< SweptPageState > swept_unfinalized_pages
Definition sweeper.cc:234
std::vector< HeapObjectHeader * > unfinalized_objects
Definition sweeper.cc:225
SweepingState * current_sweeping_state_
Definition sweeper.cc:861
std::atomic_bool is_completed_
Definition sweeper.cc:862
FreeList & free_list_
Definition sweeper.cc:156
SpaceStates *const space_states_
Definition sweeper.cc:858
SweepingState *const empty_large_pages_
Definition sweeper.cc:860
ThreadSafeStack< BasePage * > unswept_pages
Definition sweeper.cc:233
FreeList cached_free_list
Definition sweeper.cc:227
std::vector< FreeList::Block > unfinalized_free_list
Definition sweeper.cc:228
const v8::base::TimeTicks end_
Definition sweeper.cc:54
size_t count_
Definition sweeper.cc:55
size_t largest_new_free_list_entry
Definition sweeper.cc:230
const FreeMemoryHandling free_memory_handling_
Definition sweeper.cc:601
BasePage * page
Definition sweeper.cc:218
HeapBase & heap_
Definition sweeper.cc:856
StatsCollector * stats_collector_
Definition sweeper.cc:595
std::atomic< bool > is_empty_
Definition sweeper.cc:213
bool is_empty
Definition sweeper.cc:229
size_t * unused_destroyed_normal_pages_
Definition sweeper.cc:597
HeapObjectHeader * current_unfinalized_
Definition sweeper.cc:335
bool found_finalizer_
Definition sweeper.cc:336
BasePage & page_
Definition sweeper.cc:157
const EmptyPageHandling empty_page_handling_
Definition sweeper.cc:602
PlatformAwareObjectStartBitmap * bitmap_
Definition sweeper.cc:109
static constexpr size_t kInterval
Definition sweeper.cc:52
std::vector< T > vector_
Definition sweeper.cc:212
CompactableSpaceHandling compactable_space_handling_
Definition sweeper.cc:956
size_t largest_consecutive_block_
Definition sweeper.cc:600
constexpr const char * ToString(DataViewOp op)
int start
int end
std::ostream & impl_
ZoneVector< RpoNumber > & result
LiftoffAssembler::CacheState state
size_t priority
uint8_t * Address
Definition globals.h:17
V8_INLINE void CheckMemoryIsInaccessible(const void *address, size_t size)
Definition memory.h:73
V8_INLINE void SetMemoryInaccessible(void *address, size_t size)
Definition memory.h:76
const uint8_t * ConstAddress
Definition globals.h:18
constexpr size_t kFreeListEntrySize
Definition globals.h:49
v8::PageAllocator PageAllocator
Definition platform.h:22
v8::TaskPriority TaskPriority
Definition platform.h:24
STL namespace.
void Free(void *memory)
Definition memory.h:63
Node::Uses::const_iterator begin(const Node::Uses &uses)
Definition node.h:708
TaskPriority
Definition v8-platform.h:24
Node * prev_
#define UNREACHABLE()
Definition logging.h:67
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define CHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
constexpr T RoundDown(T x, intptr_t m)
Definition macros.h:371
cppgc::internal::FreeMemoryHandling FreeMemoryHandling
Definition heap-config.h:50
CompactableSpaceHandling compactable_space_handling
Definition heap-config.h:53
Heap * heap_
#define V8_UNLIKELY(condition)
Definition v8config.h:660
WasmOrphanedGlobalHandle * handle_