v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
sweeper.cc
Go to the documentation of this file.
1// Copyright 2017 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/heap/sweeper.h"
6
7#include <algorithm>
8#include <atomic>
9#include <memory>
10#include <optional>
11#include <vector>
12
14#include "src/base/logging.h"
15#include "src/common/globals.h"
18#include "src/flags/flags.h"
23#include "src/heap/gc-tracer.h"
25#include "src/heap/heap.h"
34#include "src/heap/new-spaces.h"
40#include "src/heap/slot-set.h"
41#include "src/heap/zapping.h"
45#include "src/objects/map.h"
47
48namespace v8 {
49namespace internal {
50
52 public:
54 : sweeper_(sweeper), local_sweeper_(sweeper_) {}
55
58 DCHECK_NE(NEW_SPACE, identity);
59 while (!delegate->ShouldYield()) {
60 PageMetadata* page = sweeper_->GetSweepingPageSafe(identity);
61 if (page == nullptr) return true;
62 local_sweeper_.ParallelSweepPage(page, identity,
64 }
65 TRACE_GC_NOTE("Sweeper::ConcurrentMajorSweeper Preempted");
66 return false;
67 }
68
69 // This method is expected by `SweepingState::FinishSweeping`.
70 void Finalize() {}
71
72 private:
75};
76
77static constexpr auto kNewSpace =
78 v8_flags.sticky_mark_bits.value() ? OLD_SPACE : NEW_SPACE;
79
81 public:
83 : sweeper_(sweeper), local_sweeper_(sweeper_) {}
84
87 while (!delegate->ShouldYield()) {
89 if (page == nullptr) return true;
92 }
93 TRACE_GC_NOTE("Sweeper::ConcurrentMinorSweeper Preempted");
94 return false;
95 }
96
98 if (local_sweeper_.ParallelIteratePromotedPages(delegate)) return true;
99 TRACE_GC_NOTE("Sweeper::ConcurrentMinorSweeper Preempted");
100 return false;
101 }
102
103 private:
106};
107
108class Sweeper::MajorSweeperJob final : public JobTask {
109 private:
110 // Major sweeping jobs don't sweep new space.
111 static constexpr int kNumberOfMajorSweepingSpaces =
113
114 public:
116
117 MajorSweeperJob(Isolate* isolate, Sweeper* sweeper)
118 : sweeper_(sweeper),
121 tracer_(isolate->heap()->tracer()),
122 trace_id_(sweeper_->major_sweeping_state_.background_trace_id()) {
124 }
125
126 ~MajorSweeperJob() override = default;
127
130
131 void Run(JobDelegate* delegate) final {
132 RunImpl(delegate, delegate->IsJoiningThread());
133 }
134
135 size_t GetMaxConcurrency(size_t worker_count) const override {
136 static constexpr int kPagePerTask = 2;
137 return std::min<size_t>(
138 concurrent_sweepers.size(),
139 worker_count +
140 (sweeper_->ConcurrentMajorSweepingPageCount() + kPagePerTask - 1) /
141 kPagePerTask);
142 }
143
144 private:
145 void RunImpl(JobDelegate* delegate, bool is_joining_thread) {
146 // Set the current isolate such that trusted pointer tables etc are
147 // available and the cage base is set correctly for multi-cage mode.
148 SetCurrentIsolateScope isolate_scope(sweeper_->heap_->isolate());
149
151 const int offset = delegate->GetTaskId();
155 tracer_, sweeper_->GetTracingScope(OLD_SPACE, is_joining_thread),
156 is_joining_thread ? ThreadKind::kMain : ThreadKind::kBackground,
158 for (int i = 0; i < kNumberOfMajorSweepingSpaces; i++) {
159 const AllocationSpace space_id = static_cast<AllocationSpace>(
164 DCHECK_NE(NEW_SPACE, space_id);
165 if (!concurrent_sweeper.ConcurrentSweepSpace(space_id, delegate)) return;
166 }
167 }
168
170 std::vector<ConcurrentMajorSweeper>& concurrent_sweepers;
172 const uint64_t trace_id_;
173};
174
175class Sweeper::MinorSweeperJob final : public JobTask {
176 public:
177 static constexpr int kMaxTasks = 1;
178
179 MinorSweeperJob(Isolate* isolate, Sweeper* sweeper)
180 : sweeper_(sweeper),
183 tracer_(isolate->heap()->tracer()),
184 trace_id_(sweeper_->minor_sweeping_state_.background_trace_id()) {
186 }
187
188 ~MinorSweeperJob() override = default;
189
192
193 void Run(JobDelegate* delegate) final {
194 RunImpl(delegate, delegate->IsJoiningThread());
195 }
196
197 size_t GetMaxConcurrency(size_t worker_count) const override {
198 static constexpr int kPagePerTask = 2;
199 return std::min<size_t>(
200 concurrent_sweepers.size(),
201 worker_count +
202 (sweeper_->ConcurrentMinorSweepingPageCount() + kPagePerTask - 1) /
203 kPagePerTask);
204 }
205
206 private:
207 void RunImpl(JobDelegate* delegate, bool is_joining_thread) {
209 const int offset = delegate->GetTaskId();
213 tracer_, sweeper_->GetTracingScope(NEW_SPACE, is_joining_thread),
214 is_joining_thread ? ThreadKind::kMain : ThreadKind::kBackground,
216 // Set the current isolate such that trusted pointer tables etc are
217 // available and the cage base is set correctly for multi-cage mode.
218 SetCurrentIsolateScope isolate_scope(sweeper_->heap_->isolate());
219
220 if (!concurrent_sweeper.ConcurrentSweepSpace(delegate)) return;
221 concurrent_sweeper.ConcurrentSweepPromotedPages(delegate);
222 }
223
225 std::vector<ConcurrentMinorSweeper>& concurrent_sweepers;
227 const uint64_t trace_id_;
228};
229
230template <Sweeper::SweepingScope scope>
232 : sweeper_(sweeper) {}
233
234template <Sweeper::SweepingScope scope>
236 DCHECK(!in_progress_);
237 DCHECK(concurrent_sweepers_.empty());
238 DCHECK(!HasValidJob());
239}
240
241template <Sweeper::SweepingScope scope>
243 return job_handle_ && job_handle_->IsValid();
244}
245
246template <Sweeper::SweepingScope scope>
248 return HasValidJob() && job_handle_->IsActive();
249}
250
251template <Sweeper::SweepingScope scope>
253 if (HasValidJob()) job_handle_->Cancel();
254}
255
256template <Sweeper::SweepingScope scope>
258 DCHECK(!HasValidJob());
259 DCHECK(!in_progress_);
260 DCHECK(concurrent_sweepers_.empty());
263 !sweeper_->heap_->ShouldReduceMemory());
264 should_reduce_memory_ = (scope != Sweeper::SweepingScope::kMinor) &&
265 sweeper_->heap_->ShouldReduceMemory();
266 trace_id_ =
267 (reinterpret_cast<uint64_t>(sweeper_) ^
268 sweeper_->heap_->tracer()->CurrentEpoch(
269 scope == SweepingScope::kMajor ? GCTracer::Scope::MC_SWEEP
270 : GCTracer::Scope::MINOR_MS_SWEEP))
271 << 1;
272 background_trace_id_ = trace_id_ + 1;
273}
274
275template <Sweeper::SweepingScope scope>
277 DCHECK(!HasValidJob());
278 DCHECK(!in_progress_);
279 DCHECK(concurrent_sweepers_.empty());
281 DCHECK_NE(0, background_trace_id_);
282 in_progress_ = true;
283}
284
285template <Sweeper::SweepingScope scope>
287 DCHECK(!HasValidJob());
288 DCHECK(in_progress_);
289 if (v8_flags.concurrent_sweeping &&
290 !sweeper_->heap_->delay_sweeper_tasks_for_testing_) {
291 auto job =
292 std::make_unique<SweeperJob>(sweeper_->heap_->isolate(), sweeper_);
294 scope == SweepingScope::kMinor
295 ? GCTracer::Scope::MINOR_MS_SWEEP_START_JOBS
296 : GCTracer::Scope::MC_SWEEP_START_JOBS;
297 TRACE_GC_WITH_FLOW(sweeper_->heap_->tracer(), scope_id,
298 background_trace_id(), TRACE_EVENT_FLAG_FLOW_OUT);
299 DCHECK_IMPLIES(v8_flags.minor_ms, concurrent_sweepers_.empty());
300 int max_concurrent_sweeper_count =
301 std::min(SweeperJob::kMaxTasks,
302 V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
303 if (concurrent_sweepers_.empty()) {
304 for (int i = 0; i < max_concurrent_sweeper_count; ++i) {
305 concurrent_sweepers_.emplace_back(sweeper_);
306 }
307 }
308 DCHECK_EQ(max_concurrent_sweeper_count, concurrent_sweepers_.size());
310 std::move(job));
311 }
312}
313
314template <Sweeper::SweepingScope scope>
316 DCHECK(in_progress_);
317 if (HasValidJob()) job_handle_->Join();
318}
319
320template <Sweeper::SweepingScope scope>
322 DCHECK(in_progress_);
323 // Sweeping jobs were already joined.
324 DCHECK(!HasValidJob());
325
326 concurrent_sweepers_.clear();
327 in_progress_ = false;
328}
329
330template <Sweeper::SweepingScope scope>
332 if (!job_handle_ || !job_handle_->IsValid()) return;
333
334 DCHECK(v8_flags.concurrent_sweeping);
335 job_handle_->Cancel();
336 job_handle_.reset();
337}
338
339template <Sweeper::SweepingScope scope>
341 DCHECK(in_progress_);
342 job_handle_ = V8::GetCurrentPlatform()->PostJob(
344 std::make_unique<SweeperJob>(sweeper_->heap_->isolate(), sweeper_));
345}
346
348 SweepingMode sweeping_mode,
349 uint32_t max_pages) {
350 uint32_t pages_swept = 0;
351 bool found_usable_pages = false;
352 PageMetadata* page = nullptr;
353 while ((page = sweeper_->GetSweepingPageSafe(identity)) != nullptr) {
354 ParallelSweepPage(page, identity, sweeping_mode);
355 if (!page->Chunk()->IsFlagSet(MemoryChunk::NEVER_ALLOCATE_ON_PAGE)) {
356 found_usable_pages = true;
357#if DEBUG
358 } else {
359 // All remaining pages are also marked with NEVER_ALLOCATE_ON_PAGE.
360 base::MutexGuard guard(&sweeper_->mutex_);
361 int space_index = GetSweepSpaceIndex(identity);
362 Sweeper::SweepingList& sweeping_list =
363 sweeper_->sweeping_list_[space_index];
364 DCHECK(std::all_of(sweeping_list.begin(), sweeping_list.end(),
365 [](const PageMetadata* p) {
366 return p->Chunk()->IsFlagSet(
367 MemoryChunk::NEVER_ALLOCATE_ON_PAGE);
368 }));
369#endif // DEBUG
370 }
371 if (++pages_swept >= max_pages) break;
372 }
373 return found_usable_pages;
374}
375
377 AllocationSpace identity,
378 SweepingMode sweeping_mode) {
379 DCHECK(IsValidSweepingSpace(identity));
380
381 DCHECK(!page->SweepingDone());
382
383 {
384 base::MutexGuard guard(page->mutex());
385 DCHECK(!page->SweepingDone());
387 page->concurrent_sweeping_state());
388 page->set_concurrent_sweeping_state(
390 const FreeSpaceTreatmentMode free_space_treatment_mode =
393 DCHECK_IMPLIES(identity == NEW_SPACE,
394 !sweeper_->minor_sweeping_state_.should_reduce_memory());
395 sweeper_->RawSweep(
396 page, free_space_treatment_mode, sweeping_mode,
397 identity == NEW_SPACE
398 ? false
399 : sweeper_->major_sweeping_state_.should_reduce_memory());
400 sweeper_->AddSweptPage(page, identity);
401 DCHECK(page->SweepingDone());
402 }
403}
404
406 JobDelegate* delegate) {
407 return ContributeAndWaitForPromotedPagesIterationImpl(
408 [delegate]() { return delegate->ShouldYield(); });
409}
410
412 return ContributeAndWaitForPromotedPagesIterationImpl([]() { return false; });
413}
414
416 JobDelegate* delegate) {
417 return ParallelIteratePromotedPagesImpl(
418 [delegate]() { return delegate->ShouldYield(); });
419}
420
422 return ParallelIteratePromotedPagesImpl([]() { return false; });
423}
424
425namespace {
426class PromotedPageRecordMigratedSlotVisitor final
427 : public NewSpaceVisitor<PromotedPageRecordMigratedSlotVisitor> {
428 public:
429 explicit PromotedPageRecordMigratedSlotVisitor(MutablePageMetadata* host_page)
430 : NewSpaceVisitor<PromotedPageRecordMigratedSlotVisitor>(
431 host_page->heap()->isolate()),
432 host_chunk_(host_page->Chunk()),
433 host_page_(host_page),
435 host_page->heap()->ephemeron_remembered_set()) {
436 DCHECK(host_page->owner_identity() == OLD_SPACE ||
437 host_page->owner_identity() == LO_SPACE);
438 }
439
440 void Process(Tagged<HeapObject> object) {
441 Tagged<Map> map = object->map(cage_base());
442 if (Map::ObjectFieldsFrom(map->visitor_id()) == ObjectFields::kDataOnly) {
443 return;
444 }
445 Visit(map, object);
446 }
447
448 // TODO(v8:13883): MakeExternal() right now allows to externalize a string in
449 // the young generation (for testing) and on a promoted page that is currently
450 // being swept. If we solve the testing cases and prohobit MakeExternal() on
451 // page owned by the sweeper, this visitor can be simplified as there's no
452 // more unsafe shape changes that happen concurrently.
453 V8_INLINE static constexpr bool EnableConcurrentVisitation() { return true; }
454
455 V8_INLINE void VisitMapPointer(Tagged<HeapObject> host) final {
456 VerifyHost(host);
457 VisitObjectImpl(host, host->map(cage_base()), host->map_slot().address());
458 }
459
460 V8_INLINE void VisitPointer(Tagged<HeapObject> host, ObjectSlot p) final {
461 VisitPointersImpl(host, p, p + 1);
462 }
463 V8_INLINE void VisitPointer(Tagged<HeapObject> host,
464 MaybeObjectSlot p) final {
465 VisitPointersImpl(host, p, p + 1);
466 }
467 V8_INLINE void VisitPointers(Tagged<HeapObject> host, ObjectSlot start,
468 ObjectSlot end) final {
469 VisitPointersImpl(host, start, end);
470 }
471 V8_INLINE void VisitPointers(Tagged<HeapObject> host, MaybeObjectSlot start,
472 MaybeObjectSlot end) final {
473 VisitPointersImpl(host, start, end);
474 }
475
476 V8_INLINE size_t VisitJSArrayBuffer(Tagged<Map> map,
477 Tagged<JSArrayBuffer> object,
478 MaybeObjectSize maybe_object_size) {
479 object->YoungMarkExtensionPromoted();
480 return NewSpaceVisitor<PromotedPageRecordMigratedSlotVisitor>::
481 VisitJSArrayBuffer(map, object, maybe_object_size);
482 }
483
484 V8_INLINE size_t VisitEphemeronHashTable(Tagged<Map> map,
485 Tagged<EphemeronHashTable> table,
486 MaybeObjectSize) {
487 NewSpaceVisitor<PromotedPageRecordMigratedSlotVisitor>::
488 VisitMapPointerIfNeeded<VisitorId::kVisitEphemeronHashTable>(table);
489 EphemeronRememberedSet::IndicesSet indices;
490 for (InternalIndex i : table->IterateEntries()) {
491 ObjectSlot value_slot =
492 table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
493 VisitPointer(table, value_slot);
494 ObjectSlot key_slot =
495 table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
496 Tagged<Object> key = key_slot.Acquire_Load();
497 Tagged<HeapObject> key_object;
498 if (!key.GetHeapObject(&key_object)) continue;
499#ifdef THREAD_SANITIZER
500 MemoryChunk::FromHeapObject(key_object)->SynchronizedLoad();
501#endif // THREAD_SANITIZER
502 // With sticky mark-bits we don't need to update the remembered set for
503 // just promoted objects, since everything is promoted.
504 if (!v8_flags.sticky_mark_bits &&
505 HeapLayout::InYoungGeneration(key_object)) {
506 indices.insert(i.as_int());
507 }
508 }
509 if (!indices.empty()) {
510 ephemeron_remembered_set_->RecordEphemeronKeyWrites(table,
511 std::move(indices));
512 }
513 return EphemeronHashTable::BodyDescriptor::SizeOf(map, table);
514 }
515
516 // Entries that are skipped for recording.
517 void VisitExternalReference(Tagged<InstructionStream> host,
518 RelocInfo* rinfo) final {}
519 void VisitInternalReference(Tagged<InstructionStream> host,
520 RelocInfo* rinfo) final {}
521 void VisitExternalPointer(Tagged<HeapObject> host,
522 ExternalPointerSlot slot) final {}
523
524 // Maps can be shared, so we need to visit them to record old to shared slots.
525 V8_INLINE static constexpr bool ShouldVisitMapPointer() { return true; }
526 V8_INLINE static constexpr bool ShouldVisitReadOnlyMapPointer() {
527 return false;
528 }
529
530 private:
531 V8_INLINE void VerifyHost(Tagged<HeapObject> host) {
532 DCHECK(!HeapLayout::InWritableSharedSpace(host));
533 DCHECK(!HeapLayout::InYoungGeneration(host));
534 DCHECK(!MutablePageMetadata::FromHeapObject(host)->SweepingDone());
535 DCHECK_EQ(MutablePageMetadata::FromHeapObject(host), host_page_);
536 }
537
538 template <typename TObject>
539 V8_INLINE void VisitObjectImpl(Tagged<HeapObject> host, TObject object,
540 Address slot) {
541 Tagged<HeapObject> value_heap_object;
542 if (!object.GetHeapObject(&value_heap_object)) return;
543
544 MemoryChunk* value_chunk = MemoryChunk::FromHeapObject(value_heap_object);
545#ifdef THREAD_SANITIZER
546 value_chunk->SynchronizedLoad();
547#endif // THREAD_SANITIZER
548 // With sticky mark-bits we don't need to update the remembered set for
549 // just promoted objects, since everything is promoted.
550 if (!v8_flags.sticky_mark_bits && value_chunk->InYoungGeneration()) {
551 RememberedSet<OLD_TO_NEW_BACKGROUND>::Insert<AccessMode::ATOMIC>(
552 host_page_, host_chunk_->Offset(slot));
553 } else if (value_chunk->InWritableSharedSpace()) {
554 RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::ATOMIC>(
555 host_page_, host_chunk_->Offset(slot));
556 }
557 }
558
559 template <typename TSlot>
560 V8_INLINE void VisitPointersImpl(Tagged<HeapObject> host, TSlot start,
561 TSlot end) {
562 VerifyHost(host);
563 for (TSlot slot = start; slot < end; ++slot) {
564 typename TSlot::TObject target =
565 slot.Relaxed_Load(ObjectVisitorWithCageBases::cage_base());
566 VisitObjectImpl(host, target, slot.address());
567 }
568 }
569
570 MemoryChunk* const host_chunk_;
571 MutablePageMetadata* const host_page_;
572 EphemeronRememberedSet* ephemeron_remembered_set_;
573};
574
575// Atomically zap the specified area.
576V8_INLINE void AtomicZapBlock(Address addr, size_t size_in_bytes) {
577 static_assert(sizeof(Tagged_t) == kTaggedSize);
578 static constexpr Tagged_t kZapTagged = static_cast<Tagged_t>(kZapValue);
580 DCHECK(IsAligned(size_in_bytes, kTaggedSize));
581 const size_t size_in_tagged = size_in_bytes / kTaggedSize;
582 Tagged_t* current_addr = reinterpret_cast<Tagged_t*>(addr);
583 for (size_t i = 0; i < size_in_tagged; ++i) {
584 base::AsAtomicPtr(current_addr++)
585 ->store(kZapTagged, std::memory_order_relaxed);
586 }
587}
588
589void ZapDeadObjectsInRange(Heap* heap, Address dead_start, Address dead_end) {
590 if (dead_end != dead_start) {
591 size_t free_size = static_cast<size_t>(dead_end - dead_start);
592 AtomicZapBlock(dead_start, free_size);
593 WritableFreeSpace free_space =
594 WritableFreeSpace::ForNonExecutableMemory(dead_start, free_size);
595 heap->CreateFillerObjectAtBackground(free_space);
596 }
597}
598
599void ZapDeadObjectsOnPage(Heap* heap, PageMetadata* p) {
600 if (!heap::ShouldZapGarbage() && !v8_flags.track_gc_object_stats) {
601 // We need to zap and create fillers on promoted pages when
602 // --track-gc-object-stats is enabled because it expects all dead objects to
603 // still be valid objects. Dead object on promoted pages may otherwise
604 // contain invalid old-to-new references to pages that are gone or were
605 // already reallocated.
606 return;
607 }
608 Address dead_start = p->area_start();
609 // Iterate over the page using the live objects.
610 for (auto [object, size] : LiveObjectRange(p)) {
611 Address dead_end = object.address();
612 ZapDeadObjectsInRange(heap, dead_start, dead_end);
613 dead_start = dead_end + size;
614 }
615 ZapDeadObjectsInRange(heap, dead_start, p->area_end());
616}
617
618} // namespace
619
621 MutablePageMetadata* page) {
622 DCHECK(v8_flags.minor_ms);
623 DCHECK(!page->Chunk()->IsFlagSet(MemoryChunk::BLACK_ALLOCATED));
624 DCHECK_NOT_NULL(page);
625 {
626 base::MutexGuard guard(page->mutex());
627 DCHECK(!page->SweepingDone());
629 page->concurrent_sweeping_state());
630 page->set_concurrent_sweeping_state(
632 PromotedPageRecordMigratedSlotVisitor record_visitor(page);
633 const bool is_large_page = page->Chunk()->IsLargePage();
634 if (is_large_page) {
635 DCHECK_EQ(LO_SPACE, page->owner_identity());
636 record_visitor.Process(LargePageMetadata::cast(page)->GetObject());
637 page->ReleaseSlotSet(SURVIVOR_TO_EXTERNAL_POINTER);
638 } else {
639 DCHECK_EQ(OLD_SPACE, page->owner_identity());
640 DCHECK(!page->Chunk()->IsEvacuationCandidate());
641 for (auto [object, _] :
642 LiveObjectRange(static_cast<PageMetadata*>(page))) {
643 record_visitor.Process(object);
644 }
645 ZapDeadObjectsOnPage(sweeper_->heap_, static_cast<PageMetadata*>(page));
646 }
647 page->ClearLiveness();
648 sweeper_->NotifyPromotedPageIterationFinished(page);
649 DCHECK(page->SweepingDone());
650 }
651}
652
654 : heap_(heap),
655 marking_state_(heap_->non_atomic_marking_state()),
657
658Sweeper::~Sweeper() = default;
659
664
668
672
673namespace {
674V8_INLINE bool ComparePagesForSweepingOrder(const PageMetadata* a,
675 const PageMetadata* b) {
676 // Prioritize pages that can be allocated on.
677 if (a->Chunk()->IsFlagSet(MemoryChunk::NEVER_ALLOCATE_ON_PAGE) !=
679 return a->Chunk()->IsFlagSet(MemoryChunk::NEVER_ALLOCATE_ON_PAGE);
680 // We sort in descending order of live bytes, i.e., ascending order of
681 // free bytes, because GetSweepingPageSafe returns pages in reverse order.
682 // This works automatically for black allocated pages, since we set live bytes
683 // for them to the area size.
684 return a->live_bytes() > b->live_bytes();
685}
686} // namespace
687
694 // Sorting is done in order to make compaction more efficient: by sweeping
695 // pages with the most free bytes first, we make it more likely that when
696 // evacuating a page, already swept pages will have enough free bytes to
697 // hold the objects to move (and therefore, we won't need to wait for more
698 // pages to be swept in order to move those objects).
699 int space_index = GetSweepSpaceIndex(space);
700 DCHECK_IMPLIES(space == NEW_SPACE, sweeping_list_[space_index].empty());
701 std::sort(sweeping_list_[space_index].begin(),
702 sweeping_list_[space_index].end(), ComparePagesForSweepingOrder);
703 });
704}
705
710 int new_space_index = GetSweepSpaceIndex(kNewSpace);
711 std::sort(sweeping_list_[new_space_index].begin(),
712 sweeping_list_[new_space_index].end(),
713 ComparePagesForSweepingOrder);
714}
715
716namespace {
717bool ShouldUpdateRememberedSets(Heap* heap) {
718 DCHECK_EQ(0, heap->new_lo_space()->Size());
719 if (v8_flags.sticky_mark_bits) {
720 // TODO(333906585): Update OLD_TO_SHARED remembered set for promoted
721 // objects.
722 return false;
723 }
724 if (heap->new_space()->Size() > 0) {
725 // Keep track of OLD_TO_NEW slots
726 return true;
727 }
728 // TODO(v8:12612): OLD_TO_SHARED is not really needed on the main isolate and
729 // this condition should only apply to client isolates.
730 if (heap->isolate()->has_shared_space()) {
731 // Keep track of OLD_TO_SHARED slots
732 return true;
733 }
734 return false;
735}
736} // namespace
737
746
747namespace {
748void ClearPromotedPages(Heap* heap, std::vector<MutablePageMetadata*> pages) {
749 DCHECK(v8_flags.minor_ms);
750 for (auto* page : pages) {
751 DCHECK(!page->SweepingDone());
753 page->concurrent_sweeping_state());
754 if (!page->Chunk()->IsLargePage()) {
755 ZapDeadObjectsOnPage(heap, static_cast<PageMetadata*>(page));
756 }
757 page->ClearLiveness();
758 page->set_concurrent_sweeping_state(
760 }
761}
762} // namespace
763
765 DCHECK(v8_flags.minor_ms);
769 std::vector<MutablePageMetadata*> promoted_pages_for_clearing;
771 if (ShouldUpdateRememberedSets(heap_)) {
773 std::memory_order_release);
774 } else {
775 promoted_pages_for_clearing.swap(
779 }
780 }
782 ClearPromotedPages(heap_, promoted_pages_for_clearing);
783}
784
786 base::MutexGuard guard(&mutex_);
787 SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
788 PageMetadata* page = nullptr;
789 if (!list.empty()) {
790 page = list.back();
791 list.pop_back();
792 }
793 if (list.empty()) {
794 has_swept_pages_[GetSweepSpaceIndex(space->identity())].store(
795 false, std::memory_order_release);
796 }
797 return page;
798}
799
801 base::MutexGuard guard(&mutex_);
802 SweptList list;
803 list.swap(swept_list_[GetSweepSpaceIndex(space->identity())]);
804 has_swept_pages_[GetSweepSpaceIndex(space->identity())].store(
805 false, std::memory_order_release);
806 return list;
807}
808
810 if (!major_sweeping_in_progress()) return;
811
813 if (space == NEW_SPACE) return;
816 });
817
818 // Join all concurrent tasks.
820 // All jobs are done but we still remain in sweeping state here.
822
824 if (space == NEW_SPACE) return;
825 CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
827 });
828}
829
832
833 // If sweeping is not completed or not running at all, we try to complete it
834 // here.
835
838 heap_->tracer(), GCTracer::Scope::MINOR_MS_COMPLETE_SWEEPING,
840 GetTraceIdForFlowEvent(GCTracer::Scope::MINOR_MS_COMPLETE_SWEEPING),
842 // TODO(40096225): When finalizing sweeping for a starting a new major GC,
843 // OLD_TO_NEW is no longer needed. If this is the main isolate, we could
844 // cancel promoted page iteration instead of finishing it.
846 }
847
850 heap_->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
852 GetTraceIdForFlowEvent(GCTracer::Scope::MC_COMPLETE_SWEEPING),
854 // Discard all pooled pages on memory-reducing GCs.
857 }
860 // Sweeping should not add pages to the pool.
863 }
864}
865
867 if (!minor_sweeping_in_progress()) return;
868
871 // Array buffer sweeper may have grabbed a page for iteration to contribute.
872 // Wait until it has finished iterating.
874
875 // Join all concurrent tasks.
877 // All jobs are done but we still remain in sweeping state here.
879
882
886}
887
898
902
906
910
912 Address free_start, Address free_end, PageMetadata* page, Space* space,
913 FreeSpaceTreatmentMode free_space_treatment_mode,
914 bool should_reduce_memory) {
915 CHECK_GT(free_end, free_start);
916 size_t freed_bytes = 0;
917 size_t size = static_cast<size_t>(free_end - free_start);
918 if (free_space_treatment_mode == FreeSpaceTreatmentMode::kZapFreeSpace) {
919 CodePageMemoryModificationScopeForDebugging memory_modification_scope(page);
920 AtomicZapBlock(free_start, size);
921 }
922 freed_bytes = reinterpret_cast<PagedSpaceBase*>(space)->FreeDuringSweep(
923 free_start, size);
924 if (should_reduce_memory) {
925 ZeroOrDiscardUnusedMemory(page, free_start, size);
926 }
927
928 if (v8_flags.sticky_mark_bits) {
929 // Clear the bitmap, since fillers or slack may still be marked from black
930 // allocation.
931 page->marking_bitmap()->ClearRange<AccessMode::NON_ATOMIC>(
934 }
935
936 return freed_bytes;
937}
938
939// static
940std::optional<base::AddressRegion> Sweeper::ComputeDiscardMemoryArea(
942 const size_t page_size = MemoryAllocator::GetCommitPageSize();
943 const Address discard_start = RoundUp(start, page_size);
944 const Address discard_end = RoundDown(end, page_size);
945
946 if (discard_start < discard_end) {
947 return base::AddressRegion(discard_start, discard_end - discard_start);
948 } else {
949 return {};
950 }
951}
952
954 size_t size) {
955 if (size < FreeSpace::kSize) {
956 return;
957 }
958
959 const Address unused_start = addr + FreeSpace::kSize;
960 DCHECK(page->ContainsLimit(unused_start));
961 const Address unused_end = addr + size;
962 DCHECK(page->ContainsLimit(unused_end));
963
964 std::optional<RwxMemoryWriteScope> scope;
965 if (page->Chunk()->executable()) {
966 scope.emplace("For zeroing unused memory.");
967 }
968 const std::optional<base::AddressRegion> discard_area =
969 ComputeDiscardMemoryArea(unused_start, unused_end);
970
971#if !defined(V8_OS_WIN)
972 constexpr bool kDiscardEmptyPages = true;
973#else
974 // Discarding memory on Windows does not decommit the memory and does not
975 // contribute to reduce the memory footprint. On the other hand, these
976 // calls become expensive the more memory is allocated in the system and
977 // can result in hangs. Thus, it is better to not discard on Windows.
978 constexpr bool kDiscardEmptyPages = false;
979#endif // !defined(V8_OS_WIN)
980
981 if (kDiscardEmptyPages && discard_area) {
982 {
983 v8::PageAllocator* page_allocator =
984 heap_->memory_allocator()->page_allocator(page->owner_identity());
985 DiscardSealedMemoryScope discard_scope("Discard unused memory");
986 CHECK(page_allocator->DiscardSystemPages(
987 reinterpret_cast<void*>(discard_area->begin()),
988 discard_area->size()));
989 }
990
991 if (v8_flags.zero_unused_memory) {
992 // Now zero unused memory right before and after the discarded OS pages to
993 // help with OS page compression.
994 memset(reinterpret_cast<void*>(unused_start), 0,
995 discard_area->begin() - unused_start);
996 memset(reinterpret_cast<void*>(discard_area->end()), 0,
997 unused_end - discard_area->end());
998 }
999 } else if (v8_flags.zero_unused_memory) {
1000 // Unused memory does not span a full OS page. Simply clear all of the
1001 // unused memory. This helps with OS page compression.
1002 memset(reinterpret_cast<void*>(unused_start), 0, unused_end - unused_start);
1003 }
1004}
1005
1007 Address free_start, Address free_end, PageMetadata* page,
1008 bool record_free_ranges, TypedSlotSet::FreeRangesMap* free_ranges_map,
1009 SweepingMode sweeping_mode) {
1010 DCHECK_LE(free_start, free_end);
1011 if (sweeping_mode == SweepingMode::kEagerDuringGC) {
1012 // New space and in consequence the old-to-new remembered set is always
1013 // empty after a full GC, so we do not need to remove from it after the full
1014 // GC. However, we wouldn't even be allowed to do that, since the main
1015 // thread then owns the old-to-new remembered set. Removing from it from a
1016 // sweeper thread would race with the main thread.
1017 RememberedSet<OLD_TO_NEW>::RemoveRange(page, free_start, free_end,
1020 page, free_start, free_end, SlotSet::KEEP_EMPTY_BUCKETS);
1021
1022 // While we only add old-to-old slots on live objects, we can still end up
1023 // with old-to-old slots in free memory with e.g. right-trimming of objects.
1024 RememberedSet<OLD_TO_OLD>::RemoveRange(page, free_start, free_end,
1026 RememberedSet<TRUSTED_TO_TRUSTED>::RemoveRange(page, free_start, free_end,
1028 } else {
1029 DCHECK_NULL(page->slot_set<OLD_TO_OLD>());
1030 DCHECK_NULL(page->slot_set<TRUSTED_TO_TRUSTED>());
1031 }
1032
1033 // Old-to-shared isn't reset after a full GC, so needs to be cleaned both
1034 // during and after a full GC.
1035 RememberedSet<OLD_TO_SHARED>::RemoveRange(page, free_start, free_end,
1038 page, free_start, free_end, SlotSet::KEEP_EMPTY_BUCKETS);
1039
1040 if (record_free_ranges) {
1041 MemoryChunk* chunk = page->Chunk();
1042 free_ranges_map->insert(std::pair<uint32_t, uint32_t>(
1043 static_cast<uint32_t>(chunk->Offset(free_start)),
1044 static_cast<uint32_t>(chunk->Offset(free_end))));
1045 }
1046}
1047
1049 PageMetadata* page, const TypedSlotSet::FreeRangesMap& free_ranges_map,
1050 SweepingMode sweeping_mode) {
1051 // No support for typed trusted-to-shared-trusted pointers.
1052 DCHECK_NULL(page->typed_slot_set<TRUSTED_TO_SHARED_TRUSTED>());
1053
1054 if (sweeping_mode == SweepingMode::kEagerDuringGC) {
1055 page->ClearTypedSlotsInFreeMemory<OLD_TO_NEW>(free_ranges_map);
1056
1057 // Typed old-to-old slot sets are only ever recorded in live code objects.
1058 // Also code objects are never right-trimmed, so there cannot be any slots
1059 // in a free range.
1060 page->AssertNoTypedSlotsInFreeMemory<OLD_TO_OLD>(free_ranges_map);
1061 page->ClearTypedSlotsInFreeMemory<OLD_TO_SHARED>(free_ranges_map);
1062 return;
1063 }
1064
1066
1067 // After a full GC there are no old-to-new typed slots. The main thread
1068 // could create new slots but not in a free range.
1069 page->AssertNoTypedSlotsInFreeMemory<OLD_TO_NEW>(free_ranges_map);
1070 DCHECK_NULL(page->typed_slot_set<OLD_TO_OLD>());
1071 page->ClearTypedSlotsInFreeMemory<OLD_TO_SHARED>(free_ranges_map);
1072}
1073
1075 size_t live_bytes) {
1076 if (!v8_flags.sticky_mark_bits) {
1077 page->marking_bitmap()->Clear<AccessMode::NON_ATOMIC>();
1078 }
1079 // Keep the old live bytes counter of the page until RefillFreeList, where
1080 // the space size is refined.
1081 // The allocated_bytes() counter is precisely the total size of objects.
1082 DCHECK_EQ(live_bytes, page->allocated_bytes());
1083}
1084
1086 FreeSpaceTreatmentMode free_space_treatment_mode,
1087 SweepingMode sweeping_mode, bool should_reduce_memory) {
1088 DCHECK_NOT_NULL(p);
1089 Space* space = p->owner();
1090 DCHECK_NOT_NULL(space);
1091 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE ||
1092 space->identity() == SHARED_SPACE ||
1093 space->identity() == TRUSTED_SPACE ||
1094 space->identity() == SHARED_TRUSTED_SPACE ||
1095 (space->identity() == NEW_SPACE && v8_flags.minor_ms));
1097 DCHECK(!p->SweepingDone());
1099 DCHECK_IMPLIES(space->identity() == NEW_SPACE,
1101 DCHECK_IMPLIES(space->identity() != NEW_SPACE,
1103
1104 // Phase 1: Prepare the page for sweeping.
1105
1106 std::optional<ActiveSystemPages> active_system_pages_after_sweeping;
1107 if (should_reduce_memory) {
1108 // Only decrement counter when we discard unused system pages.
1109 active_system_pages_after_sweeping = ActiveSystemPages();
1110 active_system_pages_after_sweeping->Init(
1113 }
1114
1115 // Phase 2: Free the non-live memory and clean-up the regular remembered set
1116 // entires.
1117
1118 // Liveness and freeing statistics.
1119 size_t live_bytes = 0;
1120
1121 // Promoted pages have no interesting remebered sets yet.
1122 bool record_free_ranges = (p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
1123 p->typed_slot_set<OLD_TO_OLD>() != nullptr ||
1124 p->typed_slot_set<OLD_TO_SHARED>() != nullptr) ||
1125 DEBUG_BOOL;
1126
1127 // The free ranges map is used for filtering typed slots.
1128 TypedSlotSet::FreeRangesMap free_ranges_map;
1129
1130 // Iterate over the page using the live objects and free the memory before
1131 // the given live object.
1132 Address free_start = p->area_start();
1133
1134 for (auto [object, size] : LiveObjectRange(p)) {
1135 DCHECK(marking_state_->IsMarked(object));
1136 Address free_end = object.address();
1137 if (free_end != free_start) {
1138 FreeAndProcessFreedMemory(free_start, free_end, p, space,
1139 free_space_treatment_mode,
1140 should_reduce_memory);
1142 free_start, free_end, p, record_free_ranges, &free_ranges_map,
1143 sweeping_mode);
1144 }
1145 live_bytes += size;
1146 free_start = free_end + size;
1147
1148 if (active_system_pages_after_sweeping) {
1149 MemoryChunk* chunk = p->Chunk();
1150 active_system_pages_after_sweeping->Add(
1151 chunk->Offset(free_end), chunk->Offset(free_start),
1153 }
1154 }
1155
1156 // If there is free memory after the last live object also free that.
1157 Address free_end = p->area_end();
1158 if (free_end != free_start) {
1159 FreeAndProcessFreedMemory(free_start, free_end, p, space,
1160 free_space_treatment_mode, should_reduce_memory);
1161 CleanupRememberedSetEntriesForFreedMemory(free_start, free_end, p,
1162 record_free_ranges,
1163 &free_ranges_map, sweeping_mode);
1164 }
1165
1166 // Phase 3: Post process the page.
1168 CleanupTypedSlotsInFreeMemory(p, free_ranges_map, sweeping_mode);
1170
1171 if (active_system_pages_after_sweeping) {
1172 // Decrement accounted memory for discarded memory.
1173 PagedSpaceBase* paged_space = static_cast<PagedSpaceBase*>(p->owner());
1174 paged_space->ReduceActiveSystemPages(p,
1175 *active_system_pages_after_sweeping);
1176 }
1177}
1178
1180 return promoted_page_iteration_in_progress_.load(std::memory_order_acquire);
1181}
1182
1186
1196
1204
1211
1214 base::MutexGuard guard(&mutex_);
1215 size_t count = 0;
1216 for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
1217 if (i == GetSweepSpaceIndex(NEW_SPACE)) continue;
1218 count += sweeping_list_[i].size();
1219 }
1220 return count;
1221}
1222
1224 SweepingMode sweeping_mode,
1225 uint32_t max_pages) {
1226 DCHECK_IMPLIES(identity == NEW_SPACE, heap_->IsMainThread());
1227 return main_thread_local_sweeper_.ParallelSweepSpace(identity, sweeping_mode,
1228 max_pages);
1229}
1230
1233
1234 auto concurrent_sweeping_state = page->concurrent_sweeping_state();
1236 concurrent_sweeping_state ==
1238 if (concurrent_sweeping_state ==
1240 DCHECK(page->SweepingDone());
1241 return;
1242 }
1243
1244 AllocationSpace space = page->owner_identity();
1246
1247 auto scope_id = GetTracingScope(space, true);
1249 heap_->tracer(), scope_id, ThreadKind::kMain,
1250 GetTraceIdForFlowEvent(scope_id),
1252 if ((concurrent_sweeping_state ==
1254 TryRemoveSweepingPageSafe(space, page)) {
1255 // Page was successfully removed and can now be swept.
1257 page, space, SweepingMode::kLazyOrConcurrent);
1258
1259 } else if ((concurrent_sweeping_state ==
1262 // Page was successfully removed and can now be iterated.
1264 } else {
1265 // Some sweeper task already took ownership of that page, wait until
1266 // sweeping is finished.
1268 }
1269
1270 CHECK(page->SweepingDone());
1271}
1272
1276
1277 base::MutexGuard guard(&mutex_);
1278 while (!page->SweepingDone()) {
1280 }
1281}
1282
1284 PageMetadata* page) {
1285 base::MutexGuard guard(&mutex_);
1287 int space_index = GetSweepSpaceIndex(space);
1288 SweepingList& sweeping_list = sweeping_list_[space_index];
1289 SweepingList::iterator position =
1290 std::find(sweeping_list.begin(), sweeping_list.end(), page);
1291 if (position == sweeping_list.end()) return false;
1292 sweeping_list.erase(position);
1293 if (sweeping_list.empty()) {
1295 false, std::memory_order_release);
1296 }
1297 return true;
1298}
1299
1310
1312 DCHECK_NE(NEW_SPACE, space);
1313 AddPageImpl(space, page);
1314}
1315
1317 DCHECK_EQ(NEW_SPACE, page->owner_identity());
1318 DCHECK_LE(page->AgeInNewSpace(), v8_flags.minor_ms_max_page_age);
1319 size_t live_bytes = page->live_bytes();
1322 AddPageImpl(NEW_SPACE, page);
1323 page->IncrementAgeInNewSpace();
1324}
1325
1328 DCHECK(page->SweepingDone());
1329 DCHECK(!page->Chunk()->IsFlagSet(MemoryChunk::BLACK_ALLOCATED));
1331 DCHECK_IMPLIES(v8_flags.concurrent_sweeping && (space != NEW_SPACE),
1333 DCHECK_IMPLIES(v8_flags.concurrent_sweeping,
1335 PrepareToBeSweptPage(space, page);
1337 page->concurrent_sweeping_state());
1338 sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
1340 true, std::memory_order_release);
1341}
1342
1345 DCHECK(chunk->owner_identity() == OLD_SPACE ||
1346 chunk->owner_identity() == LO_SPACE);
1347 DCHECK_IMPLIES(v8_flags.concurrent_sweeping,
1349 size_t live_bytes = chunk->live_bytes();
1350 DCHECK_GE(chunk->area_size(), live_bytes);
1354 chunk->concurrent_sweeping_state());
1355 if (!chunk->Chunk()->IsLargePage()) {
1356 PrepareToBeIteratedPromotedPage(static_cast<PageMetadata*>(chunk));
1357 } else {
1360 }
1362 chunk->concurrent_sweeping_state());
1363 // This method is called only from the main thread while sweeping tasks have
1364 // not yet started, thus a mutex is not needed.
1367}
1368
1369namespace {
1370void VerifyPreparedPage(PageMetadata* page) {
1371#ifdef DEBUG
1372 DCHECK_GE(page->area_size(), static_cast<size_t>(page->live_bytes()));
1374 page->concurrent_sweeping_state());
1375 page->ForAllFreeListCategories([page](FreeListCategory* category) {
1376 DCHECK(!category->is_linked(page->owner()->free_list()));
1377 });
1378#endif // DEBUG
1379}
1380} // namespace
1381
1383 VerifyPreparedPage(page);
1384 page->set_concurrent_sweeping_state(
1386 PagedSpaceBase* paged_space;
1387 if (space == NEW_SPACE) {
1388 DCHECK(v8_flags.minor_ms);
1389 paged_space = heap_->paged_new_space()->paged_space();
1390 } else {
1391 paged_space = heap_->paged_space(space);
1392 }
1393
1394 paged_space->IncreaseAllocatedBytes(page->live_bytes(), page);
1395
1396 // Set the allocated_bytes_ counter to area_size and clear the wasted_memory_
1397 // counter. The free operations during sweeping will decrease allocated_bytes_
1398 // to actual live bytes and keep track of wasted_memory_.
1399 page->ResetAllocationStatistics();
1400}
1401
1403 DCHECK(!page->Chunk()->IsFlagSet(MemoryChunk::BLACK_ALLOCATED));
1404 DCHECK_EQ(OLD_SPACE, page->owner_identity());
1405 VerifyPreparedPage(page);
1406 page->set_concurrent_sweeping_state(
1408 // Account the whole page as allocated since it won't be in the free list.
1409 // TODO(v8:12612): Consider accounting for wasted bytes when checking old gen
1410 // size against old gen allocation limit, and treat previously unallocated
1411 // memory as wasted rather than allocated.
1412 page->ResetAllocationStatisticsForPromotedPage();
1413 PagedSpace* space = static_cast<PagedSpace*>(page->owner());
1414 space->IncreaseAllocatedBytes(page->allocated_bytes(), page);
1415 space->free_list()->increase_wasted_bytes(page->wasted_memory());
1416}
1417
1419 base::MutexGuard guard(&mutex_);
1421 int space_index = GetSweepSpaceIndex(space);
1422 PageMetadata* page = nullptr;
1423 SweepingList& sweeping_list = sweeping_list_[space_index];
1424 if (!sweeping_list.empty()) {
1425 page = sweeping_list.back();
1426 sweeping_list.pop_back();
1427 }
1428 if (sweeping_list.empty()) {
1430 false, std::memory_order_release);
1431 }
1432 return page;
1433}
1434
1444
1446 bool is_joining_thread) {
1447 if (space == NEW_SPACE) {
1448 return is_joining_thread ? GCTracer::Scope::MINOR_MS_SWEEP
1449 : GCTracer::Scope::MINOR_MS_BACKGROUND_SWEEPING;
1450 }
1451 return is_joining_thread ? GCTracer::Scope::MC_SWEEP
1452 : GCTracer::Scope::MC_BACKGROUND_SWEEPING;
1453}
1454
1456 return !has_sweeping_work_[GetSweepSpaceIndex(space)].load(
1457 std::memory_order_acquire);
1458}
1459
1461 base::MutexGuard guard(&mutex_);
1462 page->set_concurrent_sweeping_state(
1464 swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
1465 has_swept_pages_[GetSweepSpaceIndex(identity)].store(
1466 true, std::memory_order_release);
1468}
1469
1471 DCHECK_IMPLIES(space == NEW_SPACE, v8_flags.minor_ms);
1472 return has_swept_pages_[GetSweepSpaceIndex(space)].load(
1473 std::memory_order_acquire);
1474}
1475
1477 DCHECK(v8_flags.minor_ms);
1478 DCHECK_EQ(kNewSpace, page->owner_identity());
1479 DCHECK_EQ(0, page->live_bytes());
1480 DCHECK(page->marking_bitmap()->IsClean());
1482 DCHECK(heap_->tracer()->IsInAtomicPause());
1484 page->concurrent_sweeping_state());
1485
1486 PagedSpaceBase* paged_space = nullptr;
1487 if (v8_flags.sticky_mark_bits) {
1488 paged_space = heap_->sticky_space();
1489 } else {
1490 paged_space = PagedNewSpace::From(heap_->new_space())->paged_space();
1491 }
1492
1493 Address start = page->area_start();
1494 size_t size = page->area_size();
1495
1496 if (heap::ShouldZapGarbage()) {
1497 static constexpr Tagged_t kZapTagged = static_cast<Tagged_t>(kZapValue);
1498 const size_t size_in_tagged = size / kTaggedSize;
1499 Tagged_t* current_addr = reinterpret_cast<Tagged_t*>(start);
1500 for (size_t i = 0; i < size_in_tagged; ++i) {
1501 base::AsAtomicPtr(current_addr++)
1502 ->store(kZapTagged, std::memory_order_relaxed);
1503 }
1504 }
1505
1506 page->ResetAllocationStatistics();
1507 page->ResetAgeInNewSpace();
1508 page->ReleaseSlotSet(SURVIVOR_TO_EXTERNAL_POINTER);
1509 page->Chunk()->ClearFlagNonExecutable(MemoryChunk::NEVER_ALLOCATE_ON_PAGE);
1510 paged_space->FreeDuringSweep(start, size);
1511 paged_space->IncreaseAllocatedBytes(0, page);
1512 paged_space->RelinkFreeListCategories(page);
1513
1514 if (heap_->ShouldReduceMemory()) {
1515 ZeroOrDiscardUnusedMemory(page, start, size);
1516 // Only decrement counter when we discard unused system pages.
1517 ActiveSystemPages active_system_pages_after_sweeping;
1518 active_system_pages_after_sweeping.Init(
1521 // Decrement accounted memory for discarded memory.
1522 paged_space->ReduceActiveSystemPages(page,
1523 active_system_pages_after_sweeping);
1524 }
1525}
1526
1528 : sweeper_(sweeper),
1529 resume_on_exit_(sweeper->AreMajorSweeperTasksRunning()) {
1530 DCHECK(v8_flags.minor_ms);
1531 DCHECK_IMPLIES(resume_on_exit_, v8_flags.concurrent_sweeping);
1533}
1534
1536 if (resume_on_exit_) {
1537 sweeper_->major_sweeping_state_.Resume();
1538 }
1539}
1540
1547
1548#if DEBUG
1549bool Sweeper::HasUnsweptPagesForMajorSweeping() const {
1552 bool has_unswept_pages = false;
1553 ForAllSweepingSpaces([this, &has_unswept_pages](AllocationSpace space) {
1555 sweeping_list_[GetSweepSpaceIndex(space)].empty());
1556 if (space == NEW_SPACE) return;
1557 if (!sweeping_list_[GetSweepSpaceIndex(space)].empty())
1558 has_unswept_pages = true;
1559 });
1560 return has_unswept_pages;
1561}
1562#endif // DEBUG
1563
1564} // namespace internal
1565} // namespace v8
V8_EXPORT_PRIVATE size_t Init(size_t header_size, size_t page_size_bits, size_t user_page_size)
virtual bool ShouldYield()=0
virtual uint8_t GetTaskId()=0
virtual bool DiscardSystemPages(void *address, size_t size)
std::unique_ptr< JobHandle > PostJob(TaskPriority priority, std::unique_ptr< JobTask > job_task, const SourceLocation &location=SourceLocation::Current())
bool is_linked(FreeList *owner) const
Tagged< Object > Acquire_Load() const
Definition slots-inl.h:74
static constexpr bool NeedsYoungEpoch(ScopeId id)
GarbageCollector GetCurrentCollector() const
bool IsMainThread() const
Definition heap-inl.h:63
NewSpace * new_space() const
Definition heap.h:727
void IncrementNewSpaceSurvivingObjectSize(size_t object_size)
Definition heap.h:1305
IncrementalMarking * incremental_marking() const
Definition heap.h:1062
void IncrementPromotedObjectsSize(size_t object_size)
Definition heap.h:1300
StickySpace * sticky_space() const
Definition heap-inl.h:443
void IncrementYoungSurvivorsCounter(size_t survived)
Definition heap.h:1324
MemoryAllocator * memory_allocator()
Definition heap.h:803
PagedSpace * paged_space(int idx) const
Definition heap-inl.h:148
PagedNewSpace * paged_new_space() const
Definition heap-inl.h:435
GCTracer * tracer()
Definition heap.h:800
Isolate * isolate() const
Definition heap-inl.h:61
bool ShouldReduceMemory() const
Definition heap.h:1615
static LargePageMetadata * cast(MutablePageMetadata *metadata)
static V8_INLINE constexpr MarkBitIndex AddressToIndex(Address address)
V8_INLINE bool IsMarked(const Tagged< HeapObject > obj) const
static V8_INLINE intptr_t GetCommitPageSizeBits()
v8::PageAllocator * page_allocator(AllocationSpace space)
static V8_INLINE intptr_t GetCommitPageSize()
V8_EXPORT_PRIVATE size_t GetPooledChunksCount()
V8_EXPORT_PRIVATE void ReleasePooledChunksImmediately()
bool IsEvacuationCandidate() const
V8_INLINE bool IsFlagSet(Flag flag) const
size_t Offset(Address addr) const
ConcurrentSweepingState concurrent_sweeping_state()
void set_concurrent_sweeping_state(ConcurrentSweepingState state)
void ReleaseSlotSet(RememberedSetType type)
PagedSpaceForNewSpace * paged_space()
Definition new-spaces.h:718
static PagedNewSpace * From(NewSpace *space)
Definition new-spaces.h:598
size_t RelinkFreeListCategories(PageMetadata *page)
void IncreaseAllocatedBytes(size_t bytes, PageMetadata *page)
V8_INLINE size_t FreeDuringSweep(Address start, size_t size_in_bytes)
void ReduceActiveSystemPages(PageMetadata *page, ActiveSystemPages active_system_pages)
static void RemoveRange(MutablePageMetadata *chunk, Address start, Address end, SlotSet::EmptyBucketMode mode)
bool ConcurrentSweepSpace(AllocationSpace identity, JobDelegate *delegate)
Definition sweeper.cc:56
bool ConcurrentSweepSpace(JobDelegate *delegate)
Definition sweeper.cc:85
bool ConcurrentSweepPromotedPages(JobDelegate *delegate)
Definition sweeper.cc:97
bool ParallelIteratePromotedPages(JobDelegate *delegate)
Definition sweeper.cc:415
void ParallelSweepPage(PageMetadata *page, AllocationSpace identity, SweepingMode sweeping_mode)
Definition sweeper.cc:376
bool ParallelSweepSpace(AllocationSpace identity, SweepingMode sweeping_mode, uint32_t max_pages=std::numeric_limits< uint32_t >::max())
Definition sweeper.cc:347
bool ContributeAndWaitForPromotedPagesIteration(JobDelegate *delegate)
Definition sweeper.cc:405
void ParallelIteratePromotedPage(MutablePageMetadata *page)
Definition sweeper.cc:620
static constexpr int kNumberOfMajorSweepingSpaces
Definition sweeper.cc:111
void RunImpl(JobDelegate *delegate, bool is_joining_thread)
Definition sweeper.cc:145
size_t GetMaxConcurrency(size_t worker_count) const override
Definition sweeper.cc:135
std::vector< ConcurrentMajorSweeper > & concurrent_sweepers
Definition sweeper.cc:170
void Run(JobDelegate *delegate) final
Definition sweeper.cc:131
MajorSweeperJob(const MajorSweeperJob &)=delete
MajorSweeperJob & operator=(const MajorSweeperJob &)=delete
MajorSweeperJob(Isolate *isolate, Sweeper *sweeper)
Definition sweeper.cc:117
MinorSweeperJob(Isolate *isolate, Sweeper *sweeper)
Definition sweeper.cc:179
MinorSweeperJob & operator=(const MinorSweeperJob &)=delete
std::vector< ConcurrentMinorSweeper > & concurrent_sweepers
Definition sweeper.cc:225
void RunImpl(JobDelegate *delegate, bool is_joining_thread)
Definition sweeper.cc:207
void Run(JobDelegate *delegate) final
Definition sweeper.cc:193
MinorSweeperJob(const MinorSweeperJob &)=delete
size_t GetMaxConcurrency(size_t worker_count) const override
Definition sweeper.cc:197
bool sweeping_in_progress() const
Definition sweeper.h:114
void EnsureMajorCompleted()
Definition sweeper.cc:830
void InitializeMinorSweeping()
Definition sweeper.cc:669
static int GetSweepSpaceIndex(AllocationSpace space)
Definition sweeper.h:263
bool AreMajorSweeperTasksRunning() const
Definition sweeper.cc:903
SweptList GetAllSweptPagesSafe(PagedSpaceBase *space)
Definition sweeper.cc:800
V8_EXPORT_PRIVATE void StartMajorSweeperTasks()
Definition sweeper.cc:738
static constexpr int kNumberOfSweepingSpaces
Definition sweeper.h:206
PageMetadata * GetSweptPageSafe(PagedSpaceBase *space)
Definition sweeper.cc:785
bool ShouldRefillFreelistForSpace(AllocationSpace space) const
Definition sweeper.cc:1470
bool TryRemovePromotedPageSafe(MutablePageMetadata *chunk)
Definition sweeper.cc:1300
base::ConditionVariable cv_page_swept_
Definition sweeper.h:326
base::Mutex mutex_
Definition sweeper.h:325
static V8_EXPORT_PRIVATE std::optional< base::AddressRegion > ComputeDiscardMemoryArea(Address start, Address end)
Definition sweeper.cc:940
static bool IsValidSweepingSpace(AllocationSpace space)
Definition sweeper.h:259
MutablePageMetadata * GetPromotedPageSafe()
Definition sweeper.cc:1435
void PrepareToBeSweptPage(AllocationSpace space, PageMetadata *page)
Definition sweeper.cc:1382
void CleanupRememberedSetEntriesForFreedMemory(Address free_start, Address free_end, PageMetadata *page, bool record_free_ranges, TypedSlotSet::FreeRangesMap *free_ranges_map, SweepingMode sweeping_mode)
Definition sweeper.cc:1006
void ContributeAndWaitForPromotedPagesIteration()
Definition sweeper.cc:1183
void AddNewSpacePage(PageMetadata *page)
Definition sweeper.cc:1316
uint64_t GetTraceIdForFlowEvent(GCTracer::Scope::ScopeId scope_id) const
Definition sweeper.cc:1541
size_t promoted_pages_for_iteration_count_
Definition sweeper.h:338
base::ConditionVariable promoted_pages_iteration_notification_variable_
Definition sweeper.h:341
bool major_sweeping_in_progress() const
Definition sweeper.h:108
Sweeper(Heap *heap)
Definition sweeper.cc:653
void ZeroOrDiscardUnusedMemory(PageMetadata *page, Address addr, size_t size)
Definition sweeper.cc:953
bool minor_sweeping_in_progress() const
Definition sweeper.h:111
void NotifyPromotedPagesIterationFinished()
Definition sweeper.cc:1197
void InitializeMajorSweeping()
Definition sweeper.cc:665
size_t ConcurrentMinorSweepingPageCount()
Definition sweeper.cc:1205
NonAtomicMarkingState *const marking_state_
Definition sweeper.h:324
void ClearMarkBitsAndHandleLivenessStatistics(PageMetadata *page, size_t live_bytes)
Definition sweeper.cc:1074
bool TryRemoveSweepingPageSafe(AllocationSpace space, PageMetadata *page)
Definition sweeper.cc:1283
SweepingState< SweepingScope::kMajor > major_sweeping_state_
Definition sweeper.h:333
bool IsSweepingDoneForSpace(AllocationSpace space) const
Definition sweeper.cc:1455
bool ParallelSweepSpace(AllocationSpace identity, SweepingMode sweeping_mode, uint32_t max_pages=std::numeric_limits< uint32_t >::max())
Definition sweeper.cc:1223
size_t ConcurrentMajorSweepingPageCount()
Definition sweeper.cc:1212
std::atomic< bool > has_swept_pages_[kNumberOfSweepingSpaces]
Definition sweeper.h:330
void EnsurePageIsSwept(PageMetadata *page)
Definition sweeper.cc:1231
std::atomic< size_t > iterated_promoted_pages_count_
Definition sweeper.h:339
std::atomic< bool > promoted_page_iteration_in_progress_
Definition sweeper.h:342
PageMetadata * GetSweepingPageSafe(AllocationSpace space)
Definition sweeper.cc:1418
V8_EXPORT_PRIVATE void StartMinorSweeperTasks()
Definition sweeper.cc:764
void ForAllSweepingSpaces(Callback callback) const
Definition sweeper.h:210
void CleanupTypedSlotsInFreeMemory(PageMetadata *page, const TypedSlotSet::FreeRangesMap &free_ranges_map, SweepingMode sweeping_mode)
Definition sweeper.cc:1048
Heap *const heap_
Definition sweeper.h:323
bool AreMinorSweeperTasksRunning() const
Definition sweeper.cc:899
SweepingState< SweepingScope::kMinor > minor_sweeping_state_
Definition sweeper.h:334
std::vector< MutablePageMetadata * > sweeping_list_for_promoted_page_iteration_
Definition sweeper.h:331
size_t FreeAndProcessFreedMemory(Address free_start, Address free_end, PageMetadata *page, Space *space, FreeSpaceTreatmentMode free_space_treatment_mode, bool should_reduce_memory)
Definition sweeper.cc:911
void AddSweptPage(PageMetadata *page, AllocationSpace identity)
Definition sweeper.cc:1460
bool IsIteratingPromotedPages() const
Definition sweeper.cc:1179
void SweepEmptyNewSpacePage(PageMetadata *page)
Definition sweeper.cc:1476
SweptList swept_list_[kNumberOfSweepingSpaces]
Definition sweeper.h:327
void NotifyPromotedPageIterationFinished(MutablePageMetadata *chunk)
Definition sweeper.cc:1187
void AddPageImpl(AllocationSpace space, PageMetadata *page)
Definition sweeper.cc:1326
void WaitForPageToBeSwept(PageMetadata *page)
Definition sweeper.cc:1273
void AddPromotedPage(MutablePageMetadata *chunk)
Definition sweeper.cc:1343
base::Mutex promoted_pages_iteration_notification_mutex_
Definition sweeper.h:340
std::atomic< bool > has_sweeping_work_[kNumberOfSweepingSpaces]
Definition sweeper.h:329
void RawSweep(PageMetadata *p, FreeSpaceTreatmentMode free_space_treatment_mode, SweepingMode sweeping_mode, bool should_reduce_memory)
Definition sweeper.cc:1085
SweepingList sweeping_list_[kNumberOfSweepingSpaces]
Definition sweeper.h:328
void PrepareToBeIteratedPromotedPage(PageMetadata *page)
Definition sweeper.cc:1402
std::vector< PageMetadata * > SweepingList
Definition sweeper.h:52
bool UsingMajorSweeperTasks() const
Definition sweeper.cc:907
LocalSweeper main_thread_local_sweeper_
Definition sweeper.h:332
void EnsureMinorCompleted()
Definition sweeper.cc:888
GCTracer::Scope::ScopeId GetTracingScope(AllocationSpace space, bool is_joining_thread)
Definition sweeper.cc:1445
std::vector< PageMetadata * > SweptList
Definition sweeper.h:53
void AddPage(AllocationSpace space, PageMetadata *page)
Definition sweeper.cc:1311
std::map< uint32_t, uint32_t > FreeRangesMap
Definition slot-set.h:306
static V8_EXPORT_PRIVATE v8::Platform * GetCurrentPlatform()
Definition v8.cc:282
static V8_INLINE WritableFreeSpace ForNonExecutableMemory(base::Address addr, size_t size)
#define DEBUG_BOOL
Definition globals.h:87
BasePage * page
Definition sweeper.cc:218
int start
uint32_t count
int end
#define TRACE_GC_EPOCH_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
Definition gc-tracer.h:84
#define TRACE_GC_WITH_FLOW(tracer, scope_id, bind_id, flow_flags)
Definition gc-tracer.h:52
#define TRACE_GC_NOTE(note)
Definition gc-tracer.h:93
int32_t offset
#define _
int position
Definition liveedit.cc:290
V8_INLINE std::atomic< T > * AsAtomicPtr(T *t)
bool ShouldZapGarbage()
Definition zapping.h:18
constexpr int kTaggedSize
Definition globals.h:542
FreeSpaceTreatmentMode
Definition sweeper.h:35
SlotTraits::TObjectSlot ObjectSlot
Definition globals.h:1243
static constexpr auto kNewSpace
Definition sweeper.cc:77
Address Tagged_t
Definition globals.h:547
::heap::base::ActiveSystemPages ActiveSystemPages
constexpr uint32_t kZapValue
Definition globals.h:1005
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available space
Definition flags.cc:2128
@ FIRST_SWEEPABLE_SPACE
Definition globals.h:1328
@ SHARED_TRUSTED_SPACE
Definition globals.h:1314
@ LAST_SWEEPABLE_SPACE
Definition globals.h:1329
V8_EXPORT_PRIVATE FlagValues v8_flags
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define CHECK_GT(lhs, rhs)
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
constexpr T RoundDown(T x, intptr_t m)
Definition macros.h:371
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
MutablePageMetadata *const host_page_
Definition sweeper.cc:571
MemoryChunk *const host_chunk_
Definition sweeper.cc:570
EphemeronRememberedSet * ephemeron_remembered_set_
Definition sweeper.cc:572
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN
const uint64_t trace_id_
#define V8_INLINE
Definition v8config.h:500
std::unique_ptr< ValueMirror > key