v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
heap.cc
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/heap/heap.h"
6
7#include <algorithm>
8#include <atomic>
9#include <cinttypes>
10#include <iomanip>
11#include <memory>
12#include <optional>
13#include <unordered_map>
14#include <unordered_set>
15
16#include "include/v8-locker.h"
17#include "src/api/api-inl.h"
18#include "src/base/bits.h"
19#include "src/base/flags.h"
20#include "src/base/logging.h"
21#include "src/base/macros.h"
22#include "src/base/once.h"
31#include "src/common/globals.h"
33#include "src/debug/debug.h"
40#include "src/flags/flags.h"
45#include "src/heap/base/stack.h"
47#include "src/heap/code-range.h"
48#include "src/heap/code-stats.h"
58#include "src/heap/gc-tracer.h"
84#include "src/heap/new-spaces.h"
87#include "src/heap/page-pool.h"
93#include "src/heap/safepoint.h"
96#include "src/heap/sweeper.h"
99#include "src/heap/zapping.h"
101#include "src/init/v8.h"
103#include "src/logging/log.h"
113#include "src/objects/objects.h"
116#include "src/objects/slots.h"
117#include "src/objects/visitors.h"
119#include "src/regexp/regexp.h"
128#include "src/utils/utils-inl.h"
129#include "src/utils/utils.h"
130
131#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
133#endif // V8_ENABLE_CONSERVATIVE_STACK_SCANNING
134
135#if V8_ENABLE_WEBASSEMBLY
136#include "src/wasm/wasm-engine.h"
137#endif // V8_ENABLE_WEBASSEMBLY
138
139// Has to be the last include (doesn't have include guards):
141
142namespace v8 {
143namespace internal {
144
145void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
146 DCHECK_EQ(Smi::zero(), construct_stub_create_deopt_pc_offset());
147 set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
148}
149
150void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
151 DCHECK_EQ(Smi::zero(), construct_stub_invoke_deopt_pc_offset());
152 set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
153}
154
155void Heap::SetDeoptPCOffsetAfterAdaptShadowStack(int pc_offset) {
156 DCHECK((Smi::zero() == deopt_pc_offset_after_adapt_shadow_stack()) ||
157 (pc_offset == deopt_pc_offset_after_adapt_shadow_stack().value()));
158 set_deopt_pc_offset_after_adapt_shadow_stack(Smi::FromInt(pc_offset));
159}
160
161void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
162 DCHECK_EQ(Smi::zero(), interpreter_entry_return_pc_offset());
163 set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
164}
165
166void Heap::SetSerializedObjects(Tagged<HeapObject> objects) {
167 DCHECK(isolate()->serializer_enabled());
168 set_serialized_objects(objects);
169}
170
171void Heap::SetSerializedGlobalProxySizes(Tagged<FixedArray> sizes) {
172 DCHECK(isolate()->serializer_enabled());
173 set_serialized_global_proxy_sizes(sizes);
174}
175
176void Heap::SetBasicBlockProfilingData(DirectHandle<ArrayList> list) {
177 set_basic_block_profiling_data(*list);
178}
179
180Heap::Heap()
181 : isolate_(isolate()),
182 memory_pressure_level_(MemoryPressureLevel::kNone),
183 safepoint_(std::make_unique<IsolateSafepoint>(this)),
184 external_string_table_(this),
185 allocation_type_for_in_place_internalizable_strings_(
186 isolate()->OwnsStringTables() ? AllocationType::kOld
189 non_atomic_marking_state_(isolate_),
190 pretenuring_handler_(this) {
191 // Ensure old_generation_size_ is a multiple of kPageSize.
193
195
197
198 // Put a dummy entry in the remembered pages so we can find the list the
199 // minidump even if there are no real unmapped pages.
201}
202
203Heap::~Heap() = default;
204
205size_t Heap::MaxReserved() const {
206 const size_t kMaxNewLargeObjectSpaceSize = max_semi_space_size_;
207 return static_cast<size_t>(
208 (v8_flags.minor_ms ? 1 : 2) * max_semi_space_size_ +
209 kMaxNewLargeObjectSpaceSize + max_old_generation_size());
210}
211
212size_t Heap::YoungGenerationSizeFromOldGenerationSize(size_t old_generation) {
213 // Compute the semi space size and cap it.
214 bool is_low_memory = old_generation <= kOldGenerationLowMemory;
215 size_t semi_space;
216 if (v8_flags.minor_ms && !is_low_memory) {
217 semi_space = DefaultMaxSemiSpaceSize();
218 } else {
219 size_t ratio = is_low_memory ? OldGenerationToSemiSpaceRatioLowMemory()
221 semi_space = old_generation / ratio;
222 semi_space = std::min({semi_space, DefaultMaxSemiSpaceSize()});
223 semi_space = std::max({semi_space, DefaultMinSemiSpaceSize()});
224 semi_space = RoundUp(semi_space, PageMetadata::kPageSize);
225 }
226 return YoungGenerationSizeFromSemiSpaceSize(semi_space);
227}
228
229size_t Heap::HeapSizeFromPhysicalMemory(uint64_t physical_memory) {
230 // Compute the old generation size and cap it.
231 uint64_t old_generation = physical_memory /
234 old_generation =
235 std::min(old_generation,
236 static_cast<uint64_t>(MaxOldGenerationSize(physical_memory)));
237 old_generation =
238 std::max({old_generation, static_cast<uint64_t>(V8HeapTrait::kMinSize)});
239 old_generation = RoundUp(old_generation, PageMetadata::kPageSize);
240
241 size_t young_generation = YoungGenerationSizeFromOldGenerationSize(
242 static_cast<size_t>(old_generation));
243 return static_cast<size_t>(old_generation) + young_generation;
244}
245
246void Heap::GenerationSizesFromHeapSize(size_t heap_size,
247 size_t* young_generation_size,
248 size_t* old_generation_size) {
249 // Initialize values for the case when the given heap size is too small.
250 *young_generation_size = 0;
251 *old_generation_size = 0;
252 // Binary search for the largest old generation size that fits to the given
253 // heap limit considering the correspondingly sized young generation.
254 size_t lower = 0, upper = heap_size;
255 while (lower + 1 < upper) {
256 size_t old_generation = lower + (upper - lower) / 2;
257 size_t young_generation =
259 if (old_generation + young_generation <= heap_size) {
260 // This size configuration fits into the given heap limit.
261 *young_generation_size = young_generation;
262 *old_generation_size = old_generation;
263 lower = old_generation;
264 } else {
265 upper = old_generation;
266 }
267 }
268}
269
270size_t Heap::MinYoungGenerationSize() {
272}
273
274size_t Heap::MinOldGenerationSize() {
275 size_t paged_space_count =
277 return paged_space_count * PageMetadata::kPageSize;
278}
279
280size_t Heap::AllocatorLimitOnMaxOldGenerationSize() {
281#ifdef V8_COMPRESS_POINTERS
282 // Isolate and the young generation are also allocated on the heap.
283 return kPtrComprCageReservationSize -
285 RoundUp(sizeof(Isolate), size_t{1} << kPageSizeBits);
286#else
287 return std::numeric_limits<size_t>::max();
288#endif
289}
290
291size_t Heap::MaxOldGenerationSize(uint64_t physical_memory) {
292 size_t max_size = V8HeapTrait::kMaxSize;
293 // Increase the heap size from 2GB to 4GB for 64-bit systems with physical
294 // memory at least 16GB. The threshold is set to 15GB to accommodate for some
295 // memory being reserved by the hardware.
296#ifdef V8_HOST_ARCH_64_BIT
297 if ((physical_memory / GB) >= 15) {
298#if V8_OS_ANDROID
299 // As of 2024, Android devices with 16GiB are shipping (for instance the
300 // Pixel 9 Pro). However, a large fraction of their memory is not usable,
301 // and there is no disk swap, so heaps are still smaller than on desktop for
302 // now.
303 DCHECK_EQ(max_size / GB, 1u);
304#else
305 DCHECK_EQ(max_size / GB, 2u);
306#endif
307 max_size *= 2;
308 }
309#endif // V8_HOST_ARCH_64_BIT
310 return std::min(max_size, AllocatorLimitOnMaxOldGenerationSize());
311}
312
313namespace {
314int NumberOfSemiSpaces() { return v8_flags.minor_ms ? 1 : 2; }
315} // namespace
316
317size_t Heap::YoungGenerationSizeFromSemiSpaceSize(size_t semi_space_size) {
318 return semi_space_size *
319 (NumberOfSemiSpaces() + kNewLargeObjectSpaceToSemiSpaceRatio);
320}
321
322size_t Heap::SemiSpaceSizeFromYoungGenerationSize(
323 size_t young_generation_size) {
324 return young_generation_size /
325 (NumberOfSemiSpaces() + kNewLargeObjectSpaceToSemiSpaceRatio);
326}
327
328size_t Heap::Capacity() {
329 if (!HasBeenSetUp()) {
330 return 0;
331 }
333}
334
335size_t Heap::OldGenerationCapacity() const {
336 if (!HasBeenSetUp()) return 0;
337 PagedSpaceIterator spaces(this);
338 size_t total = 0;
339 for (PagedSpace* space = spaces.Next(); space != nullptr;
340 space = spaces.Next()) {
341 total += space->Capacity();
342 }
343 if (shared_lo_space_) {
345 }
346 return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects() +
348}
349
350size_t Heap::CommittedOldGenerationMemory() {
351 if (!HasBeenSetUp()) return 0;
352
353 PagedSpaceIterator spaces(this);
354 size_t total = 0;
355 for (PagedSpace* space = spaces.Next(); space != nullptr;
356 space = spaces.Next()) {
357 total += space->CommittedMemory();
358 }
359 if (shared_lo_space_) {
360 total += shared_lo_space_->Size();
361 }
362 return total + lo_space_->Size() + code_lo_space_->Size() +
364}
365
366size_t Heap::CommittedMemory() {
367 if (!HasBeenSetUp()) return 0;
368
369 size_t new_space_committed = new_space_ ? new_space_->CommittedMemory() : 0;
370 size_t new_lo_space_committed = new_lo_space_ ? new_lo_space_->Size() : 0;
371
372 return new_space_committed + new_lo_space_committed +
374}
375
376size_t Heap::CommittedPhysicalMemory() {
377 if (!HasBeenSetUp()) return 0;
378
379 size_t total = 0;
380 for (SpaceIterator it(this); it.HasNext();) {
381 total += it.Next()->CommittedPhysicalMemory();
382 }
383
384 return total;
385}
386
387size_t Heap::CommittedMemoryExecutable() {
388 if (!HasBeenSetUp()) return 0;
389
390 return static_cast<size_t>(memory_allocator()->SizeExecutable());
391}
392
393void Heap::UpdateMaximumCommitted() {
394 if (!HasBeenSetUp()) return;
395
396 const size_t current_committed_memory = CommittedMemory();
397 if (current_committed_memory > maximum_committed_) {
398 maximum_committed_ = current_committed_memory;
399 }
400}
401
402size_t Heap::Available() {
403 if (!HasBeenSetUp()) return 0;
404
405 size_t total = 0;
406
407 for (SpaceIterator it(this); it.HasNext();) {
408 total += it.Next()->Available();
409 }
410
411 total += memory_allocator()->Available();
412 return total;
413}
414
415bool Heap::CanExpandOldGeneration(size_t size) const {
416 if (force_oom_ || force_gc_on_next_allocation_) return false;
417 if (OldGenerationCapacity() + size > max_old_generation_size()) return false;
418 // Stay below `MaxReserved()` such that it is more likely that committing the
419 // second semi space at the beginning of a GC succeeds.
420 return memory_allocator()->Size() + size <= MaxReserved();
421}
422
423bool Heap::IsOldGenerationExpansionAllowed(
424 size_t size, const base::MutexGuard& expansion_mutex_witness) const {
426}
427
428bool Heap::CanPromoteYoungAndExpandOldGeneration(size_t size) const {
429 if (v8_flags.sticky_mark_bits) {
431 size_t new_space_capacity =
433 size_t new_lo_space_capacity = new_lo_space_ ? new_lo_space_->Size() : 0;
434 return CanExpandOldGeneration(size + new_space_capacity +
435 new_lo_space_capacity);
436 }
437 if (!new_space()) {
439 return CanExpandOldGeneration(size);
440 }
441 size_t new_space_capacity =
442 new_space()->Capacity() + new_lo_space()->Size() +
443 (v8_flags.minor_ms ? 0
446
447 // Over-estimate the new space size using capacity to allow some slack.
448 return CanExpandOldGeneration(size + new_space_capacity);
449}
450
451bool Heap::HasBeenSetUp() const {
452 // We will always have an old space when the heap is set up.
453 return old_space_ != nullptr;
454}
455
456bool Heap::ShouldUseBackgroundThreads() const {
457 return !v8_flags.single_threaded_gc_in_background ||
458 !isolate()->EfficiencyModeEnabled();
459}
460
461bool Heap::ShouldUseIncrementalMarking() const {
462 if (v8_flags.single_threaded_gc_in_background &&
463 isolate()->EfficiencyModeEnabled()) {
464 return v8_flags.incremental_marking_for_gc_in_background;
465 } else {
466 return true;
467 }
468}
469
470bool Heap::ShouldOptimizeForBattery() const {
471 return v8_flags.optimize_gc_for_battery ||
472 isolate()->BatterySaverModeEnabled();
473}
474
475GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
476 GarbageCollectionReason gc_reason,
477 const char** reason) const {
479 DCHECK_NE(static_cast<bool>(new_space()),
480 v8_flags.sticky_mark_bits.value());
482 *reason = "Concurrent MinorMS needs finalization";
484 }
485
486 // Is global GC requested?
487 if (space != NEW_SPACE && space != NEW_LO_SPACE) {
488 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
489 *reason = "GC in old space requested";
491 }
492
493 if (v8_flags.gc_global || ShouldStressCompaction() || !use_new_space()) {
494 *reason = "GC in old space forced by flags";
496 }
497
498 if (incremental_marking()->IsMajorMarking()) {
499 *reason = "Incremental marking forced finalization";
501 }
502
504 isolate_->counters()
505 ->gc_compactor_caused_by_oldspace_exhaustion()
506 ->Increment();
507 *reason = "scavenge might not succeed";
509 }
510
511 DCHECK(!v8_flags.single_generation);
512 DCHECK(!v8_flags.gc_global);
513 // Default
514 *reason = nullptr;
516}
517
518void Heap::SetGCState(HeapState state) {
519 gc_state_.store(state, std::memory_order_relaxed);
520}
521
522bool Heap::IsGCWithMainThreadStack() const {
523 return embedder_stack_state_ == StackState::kMayContainHeapPointers;
524}
525
526bool Heap::IsGCWithStack() const {
527 return IsGCWithMainThreadStack() || stack().HasBackgroundStacks();
528}
529
530bool Heap::CanShortcutStringsDuringGC(GarbageCollector collector) const {
531 if (!v8_flags.shortcut_strings_with_stack && IsGCWithStack()) return false;
532
533 switch (collector) {
535 if (!v8_flags.minor_ms_shortcut_strings) return false;
536
537 DCHECK(!incremental_marking()->IsMajorMarking());
538
539 // Minor MS cannot short cut strings during concurrent marking.
540 if (incremental_marking()->IsMinorMarking()) return false;
541
542 // Minor MS uses static roots to check for strings to shortcut.
543 if (!V8_STATIC_ROOTS_BOOL) return false;
544
545 break;
547 // Scavenger cannot short cut strings during incremental marking.
548 DCHECK(!incremental_marking()->IsMajorMarking());
549
550 if (isolate()->has_shared_space() &&
551 !isolate()->is_shared_space_isolate() &&
552 isolate()
553 ->shared_space_isolate()
554 ->heap()
556 ->IsMarking()) {
558 ->shared_space_isolate()
559 ->heap()
561 ->IsMajorMarking());
562 return false;
563 }
564 break;
565 default:
566 UNREACHABLE();
567 }
568
569 return true;
570}
571
572void Heap::PrintShortHeapStatistics() {
573 if (!v8_flags.trace_gc_verbose) return;
575 "Memory allocator, used: %6zu KB"
576 ", available: %7zu KB\n",
577 memory_allocator()->Size() / KB,
578 memory_allocator()->Available() / KB);
580 "Read-only space, used: %6zu KB"
581 ", available: %7zu KB"
582 ", committed: %6zu KB\n",
583 read_only_space_->Size() / KB, size_t{0},
586 "New space, used: %6zu KB"
587 ", available:%c %7zu KB"
588 ", committed: %6zu KB\n",
589 NewSpaceSize() / KB,
590 (v8_flags.minor_ms && minor_sweeping_in_progress()) ? '*' : ' ',
591 new_space_->Available() / KB,
592 new_space_->CommittedMemory() / KB);
594 "New large object space, used: %6zu KB"
595 ", available: %7zu KB"
596 ", committed: %6zu KB\n",
598 new_lo_space_->Available() / KB,
601 "Old space, used: %6zu KB"
602 ", available:%c %7zu KB"
603 ", committed: %6zu KB\n",
604 old_space_->SizeOfObjects() / KB,
605 major_sweeping_in_progress() ? '*' : ' ',
606 old_space_->Available() / KB,
607 old_space_->CommittedMemory() / KB);
609 "Code space, used: %6zu KB"
610 ", available:%c %7zu KB"
611 ", committed: %6zu KB\n",
612 code_space_->SizeOfObjects() / KB,
613 major_sweeping_in_progress() ? '*' : ' ',
614 code_space_->Available() / KB,
615 code_space_->CommittedMemory() / KB);
617 "Large object space, used: %6zu KB"
618 ", available: %7zu KB"
619 ", committed: %6zu KB\n",
621 lo_space_->CommittedMemory() / KB);
623 "Code large object space, used: %6zu KB"
624 ", available: %7zu KB"
625 ", committed: %6zu KB\n",
630 "Trusted space, used: %6zu KB"
631 ", available:%c %7zu KB"
632 ", committed: %6zu KB\n",
633 trusted_space_->SizeOfObjects() / KB,
634 major_sweeping_in_progress() ? '*' : ' ',
636 trusted_space_->CommittedMemory() / KB);
638 "Trusted large object space, used: %6zu KB"
639 ", available: %7zu KB"
640 ", committed: %6zu KB\n",
644 ReadOnlySpace* const ro_space = read_only_space_;
646 "All spaces, used: %6zu KB"
647 ", available:%c %7zu KB"
648 ", committed: %6zu KB\n",
649 (this->SizeOfObjects() + ro_space->Size()) / KB,
650 sweeping_in_progress() ? '*' : ' ', (this->Available()) / KB,
651 (this->CommittedMemory() + ro_space->CommittedMemory()) / KB);
652 const size_t chunks = memory_allocator()->GetPooledChunksCount();
653 PrintIsolate(isolate_, "Pool buffering %4zu chunk(s) of committed: %7zu KB\n",
654 chunks, (chunks * PageMetadata::kPageSize) / KB);
656 "External memory reported: %7" PRId64 " KB\n",
657 external_memory() / KB);
659 "Backing store memory: %7" PRIu64 " KB\n",
660 backing_store_bytes() / KB);
661 PrintIsolate(isolate_, "External memory global: %7zu KB\n",
664 "Total time spent in GC: %7.1f ms\n",
666 if (sweeping_in_progress()) {
668 "(*) Sweeping is still in progress, making available sizes "
669 "inaccurate.\n");
670 }
671}
672
673void Heap::PrintFreeListsStats() {
674 DCHECK(v8_flags.trace_gc_freelists);
675
676 if (v8_flags.trace_gc_freelists_verbose) {
678 "Freelists statistics per Page: "
679 "[category: length || total free bytes]\n");
680 }
681
682 std::vector<int> categories_lengths(
683 old_space()->free_list()->number_of_categories(), 0);
684 std::vector<size_t> categories_sums(
685 old_space()->free_list()->number_of_categories(), 0);
686 unsigned int pageCnt = 0;
687
688 // This loops computes freelists lengths and sum.
689 // If v8_flags.trace_gc_freelists_verbose is enabled, it also prints
690 // the stats of each FreeListCategory of each Page.
691 for (PageMetadata* page : *old_space()) {
692 std::ostringstream out_str;
693
694 if (v8_flags.trace_gc_freelists_verbose) {
695 out_str << "Page " << std::setw(4) << pageCnt;
696 }
697
698 for (int cat = kFirstCategory;
699 cat <= old_space()->free_list()->last_category(); cat++) {
700 FreeListCategory* free_list =
701 page->free_list_category(static_cast<FreeListCategoryType>(cat));
702 int length = free_list->FreeListLength();
703 size_t sum = free_list->SumFreeList();
704
705 if (v8_flags.trace_gc_freelists_verbose) {
706 out_str << "[" << cat << ": " << std::setw(4) << length << " || "
707 << std::setw(6) << sum << " ]"
708 << (cat == old_space()->free_list()->last_category() ? "\n"
709 : ", ");
710 }
711 categories_lengths[cat] += length;
712 categories_sums[cat] += sum;
713 }
714
715 if (v8_flags.trace_gc_freelists_verbose) {
716 PrintIsolate(isolate_, "%s", out_str.str().c_str());
717 }
718
719 pageCnt++;
720 }
721
722 // Print statistics about old_space (pages, free/wasted/used memory...).
724 isolate_,
725 "%d pages. Free space: %.1f MB (waste: %.2f). "
726 "Usage: %.1f/%.1f (MB) -> %.2f%%.\n",
727 pageCnt, static_cast<double>(old_space_->Available()) / MB,
728 static_cast<double>(old_space_->Waste()) / MB,
729 static_cast<double>(old_space_->Size()) / MB,
730 static_cast<double>(old_space_->Capacity()) / MB,
731 static_cast<double>(old_space_->Size()) / old_space_->Capacity() * 100);
732
733 // Print global statistics of each FreeListCategory (length & sum).
735 "FreeLists global statistics: "
736 "[category: length || total free KB]\n");
737 std::ostringstream out_str;
738 for (int cat = kFirstCategory;
739 cat <= old_space()->free_list()->last_category(); cat++) {
740 out_str << "[" << cat << ": " << categories_lengths[cat] << " || "
741 << std::fixed << std::setprecision(2)
742 << static_cast<double>(categories_sums[cat]) / KB << " KB]"
743 << (cat == old_space()->free_list()->last_category() ? "\n" : ", ");
744 }
745 PrintIsolate(isolate_, "%s", out_str.str().c_str());
746}
747
748void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
749 HeapStatistics stats;
750 reinterpret_cast<v8::Isolate*>(isolate())->GetHeapStatistics(&stats);
751
752// clang-format off
753#define DICT(s) "{" << s << "}"
754#define LIST(s) "[" << s << "]"
755#define QUOTE(s) "\"" << s << "\""
756#define MEMBER(s) QUOTE(s) << ":"
757
758 auto SpaceStatistics = [this](int space_index) {
759 HeapSpaceStatistics space_stats;
760 reinterpret_cast<v8::Isolate*>(isolate())->GetHeapSpaceStatistics(
761 &space_stats, space_index);
762 std::stringstream stream;
763 stream << DICT(
764 MEMBER("name")
765 << QUOTE(ToString(
766 static_cast<AllocationSpace>(space_index)))
767 << ","
768 MEMBER("size") << space_stats.space_size() << ","
769 MEMBER("used_size") << space_stats.space_used_size() << ","
770 MEMBER("available_size") << space_stats.space_available_size() << ","
771 MEMBER("physical_size") << space_stats.physical_space_size());
772 return stream.str();
773 };
774
775 stream << DICT(
776 MEMBER("isolate") << QUOTE(reinterpret_cast<void*>(isolate())) << ","
777 MEMBER("id") << gc_count() << ","
778 MEMBER("time_ms") << isolate()->time_millis_since_init() << ","
779 MEMBER("total_heap_size") << stats.total_heap_size() << ","
780 MEMBER("total_heap_size_executable")
781 << stats.total_heap_size_executable() << ","
782 MEMBER("total_physical_size") << stats.total_physical_size() << ","
783 MEMBER("total_available_size") << stats.total_available_size() << ","
784 MEMBER("used_heap_size") << stats.used_heap_size() << ","
785 MEMBER("heap_size_limit") << stats.heap_size_limit() << ","
786 MEMBER("malloced_memory") << stats.malloced_memory() << ","
787 MEMBER("external_memory") << stats.external_memory() << ","
788 MEMBER("peak_malloced_memory") << stats.peak_malloced_memory() << ","
789 MEMBER("spaces") << LIST(
790 SpaceStatistics(RO_SPACE) << "," <<
791 SpaceStatistics(NEW_SPACE) << "," <<
792 SpaceStatistics(OLD_SPACE) << "," <<
793 SpaceStatistics(CODE_SPACE) << "," <<
794 SpaceStatistics(LO_SPACE) << "," <<
795 SpaceStatistics(CODE_LO_SPACE) << "," <<
796 SpaceStatistics(NEW_LO_SPACE) << "," <<
797 SpaceStatistics(TRUSTED_SPACE) << "," <<
798 SpaceStatistics(TRUSTED_LO_SPACE)));
799
800#undef DICT
801#undef LIST
802#undef QUOTE
803#undef MEMBER
804 // clang-format on
805}
806
807void Heap::ReportStatisticsAfterGC() {
808 if (deferred_counters_.empty()) return;
809 // Move the contents into a new SmallVector first, in case
810 // {Isolate::CountUsage} puts the counters into {deferred_counters_} again.
811 decltype(deferred_counters_) to_report = std::move(deferred_counters_);
813 isolate()->CountUsage(base::VectorOf(to_report));
814}
815
818 public:
819 static bool IsNeeded() {
820 return v8_flags.verify_predictable || v8_flags.fuzzer_gc_analysis ||
821 (v8_flags.trace_allocation_stack_interval > 0);
822 }
823
828
831 if (v8_flags.verify_predictable || v8_flags.fuzzer_gc_analysis) {
833 }
834 }
835
836 void AllocationEvent(Address addr, int size) final {
837 if (v8_flags.verify_predictable) {
838 allocations_count_.fetch_add(1, std::memory_order_relaxed);
839 // Advance synthetic time by making a time request.
841
844
845 if (allocations_count_ % v8_flags.dump_allocations_digest_at_alloc == 0) {
847 }
848 } else if (v8_flags.fuzzer_gc_analysis) {
849 allocations_count_.fetch_add(1, std::memory_order_relaxed);
850 } else if (v8_flags.trace_allocation_stack_interval > 0) {
851 allocations_count_.fetch_add(1, std::memory_order_relaxed);
852 if (allocations_count_ % v8_flags.trace_allocation_stack_interval == 0) {
854 }
855 }
856 }
857
858 void MoveEvent(Address source, Address target, int size) final {
859 if (v8_flags.verify_predictable) {
860 allocations_count_.fetch_add(1, std::memory_order_relaxed);
861 // Advance synthetic time by making a time request.
863
867
868 if (allocations_count_ % v8_flags.dump_allocations_digest_at_alloc == 0) {
870 }
871 } else if (v8_flags.fuzzer_gc_analysis) {
872 allocations_count_.fetch_add(1, std::memory_order_relaxed);
873 }
874 }
875
876 void UpdateObjectSizeEvent(Address, int) final {}
877
878 private:
880 Address object_address = object.address();
881 MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
882 AllocationSpace allocation_space =
884
885 static_assert(kSpaceTagSize + kPageSizeBits <= 32);
886 uint32_t value =
887 static_cast<uint32_t>(memory_chunk->Offset(object_address)) |
888 (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
889
891 }
892
893 void UpdateAllocationsHash(uint32_t value) {
894 const uint16_t c1 = static_cast<uint16_t>(value);
895 const uint16_t c2 = static_cast<uint16_t>(value >> 16);
898 }
899
901 uint32_t hash = raw_allocations_hash_.Finalize();
902 PrintF("\n### Allocations = %zu, hash = 0x%08x\n",
903 allocations_count_.load(std::memory_order_relaxed), hash);
904 }
905
906 Heap* const heap_;
907 // Count of all allocations performed through C++ bottlenecks. This needs to
908 // be atomic as objects are moved in parallel in the GC which counts as
909 // allocations.
910 std::atomic<size_t> allocations_count_{0};
911 // Running hash over allocations performed.
913};
914
915void Heap::AddHeapObjectAllocationTracker(
917 if (allocation_trackers_.empty() && v8_flags.inline_new) {
919 }
920 allocation_trackers_.push_back(tracker);
921 if (allocation_trackers_.size() == 1) {
922 isolate_->UpdateLogObjectRelocation();
923 }
924}
925
926void Heap::RemoveHeapObjectAllocationTracker(
928 allocation_trackers_.erase(std::remove(allocation_trackers_.begin(),
929 allocation_trackers_.end(), tracker),
931 if (allocation_trackers_.empty()) {
932 isolate_->UpdateLogObjectRelocation();
933 }
934 if (allocation_trackers_.empty() && v8_flags.inline_new) {
936 }
937}
938
939void Heap::IncrementDeferredCounts(
942 features.end());
943}
944
945void Heap::GarbageCollectionPrologue(
946 GarbageCollectionReason gc_reason,
947 const v8::GCCallbackFlags gc_callback_flags) {
948 TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
949
950 is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
956
957#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
958 heap_allocator_->UpdateAllocationTimeout();
959#endif // V8_ENABLE_ALLOCATION_TIMEOUT
960
961 if (minor_gc_job()) {
964 }
965
966 // Reset GC statistics.
972 nodes_promoted_ = 0;
973
975
976#ifdef DEBUG
977 DCHECK(!AllowGarbageCollection::IsAllowed());
979
980 if (v8_flags.gc_verbose) Print();
981#endif // DEBUG
982}
983
984void Heap::GarbageCollectionPrologueInSafepoint() {
985 TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE_SAFEPOINT);
986 gc_count_++;
988}
989
990size_t Heap::NewSpaceAllocationCounter() const {
991 size_t counter = new_space_allocation_counter_;
992 if (new_space_) {
993 DCHECK(!allocator()->new_space_allocator()->IsLabValid());
994 counter += new_space()->AllocatedSinceLastGC();
995 }
996 return counter;
997}
998
999size_t Heap::SizeOfObjects() {
1000 size_t total = 0;
1001
1002 for (SpaceIterator it(this); it.HasNext();) {
1003 total += it.Next()->SizeOfObjects();
1004 }
1005 return total;
1006}
1007
1008size_t Heap::TotalGlobalHandlesSize() {
1009 return isolate_->global_handles()->TotalSize() +
1010 isolate_->traced_handles()->total_size_bytes();
1011}
1012
1013size_t Heap::UsedGlobalHandlesSize() {
1014 return isolate_->global_handles()->UsedSize() +
1015 isolate_->traced_handles()->used_size_bytes();
1016}
1017
1018void Heap::AddAllocationObserversToAllSpaces(
1019 AllocationObserver* observer, AllocationObserver* new_space_observer) {
1020 DCHECK(observer && new_space_observer);
1022 allocator()->AddAllocationObserver(observer, new_space_observer);
1023}
1024
1025void Heap::RemoveAllocationObserversFromAllSpaces(
1026 AllocationObserver* observer, AllocationObserver* new_space_observer) {
1027 DCHECK(observer && new_space_observer);
1028 allocator()->RemoveAllocationObserver(observer, new_space_observer);
1029}
1030
1031void Heap::PublishMainThreadPendingAllocations() {
1032 allocator()->PublishPendingAllocations();
1033}
1034
1035void Heap::DeoptMarkedAllocationSites() {
1036 // TODO(hpayer): If iterating over the allocation sites list becomes a
1037 // performance issue, use a cache data structure in heap instead.
1038
1041 if (site->deopt_dependent_code()) {
1043 isolate_, site,
1045 site->set_deopt_dependent_code(false);
1046 }
1047 });
1048
1050}
1051
1053 switch (collector) {
1057 return kGCTypeScavenge;
1059 return kGCTypeMinorMarkSweep;
1060 default:
1061 UNREACHABLE();
1062 }
1063}
1064
1065void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
1066 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_SAFEPOINT);
1067
1068 {
1069 // Allows handle derefs for all threads/isolates from this thread.
1070 AllowHandleUsageOnAllThreads allow_all_handle_derefs;
1071 safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
1074 });
1075
1076 if (collector == GarbageCollector::MARK_COMPACTOR &&
1077 isolate()->is_shared_space_isolate()) {
1078 isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
1079 client->heap()->safepoint()->IterateLocalHeaps(
1080 [](LocalHeap* local_heap) {
1083 });
1084 });
1085 }
1086 }
1087
1088#define UPDATE_COUNTERS_FOR_SPACE(space) \
1089 isolate_->counters()->space##_bytes_available()->Set( \
1090 static_cast<int>(space()->Available())); \
1091 isolate_->counters()->space##_bytes_committed()->Set( \
1092 static_cast<int>(space()->CommittedMemory())); \
1093 isolate_->counters()->space##_bytes_used()->Set( \
1094 static_cast<int>(space()->SizeOfObjects()));
1095#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
1096 if (space()->CommittedMemory() > 0) { \
1097 isolate_->counters()->external_fragmentation_##space()->AddSample( \
1098 static_cast<int>(100 - (space()->SizeOfObjects() * 100.0) / \
1099 space()->CommittedMemory())); \
1100 }
1101#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
1102 UPDATE_COUNTERS_FOR_SPACE(space) \
1103 UPDATE_FRAGMENTATION_FOR_SPACE(space)
1104
1105 if (new_space()) {
1107 }
1108
1111
1113#undef UPDATE_COUNTERS_FOR_SPACE
1114#undef UPDATE_FRAGMENTATION_FOR_SPACE
1115#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
1116
1117#ifdef DEBUG
1118 if (v8_flags.print_global_handles) isolate_->global_handles()->Print();
1119 if (v8_flags.print_handles) PrintHandles();
1120 if (v8_flags.check_handle_count) CheckHandleCount();
1121#endif
1122
1123 // Young generation GCs only run with memory reducing flags during
1124 // interleaved GCs.
1126 if (collector == GarbageCollector::MARK_COMPACTOR) {
1128 std::memory_order_relaxed);
1129
1130 if (v8_flags.stress_marking > 0) {
1132 }
1133 // Discard memory if the GC was requested to reduce memory.
1134 if (ShouldReduceMemory()) {
1135 memory_allocator_->ReleasePooledChunksImmediately();
1136#if V8_ENABLE_WEBASSEMBLY
1137 isolate_->stack_pool().ReleaseFinishedStacks();
1138#endif
1139 }
1140 }
1141
1142 // Remove CollectionRequested flag from main thread state, as the collection
1143 // was just performed.
1145 LocalHeap::ThreadState old_state =
1147
1148 CHECK(old_state.IsRunning());
1149
1150 // Resume all threads waiting for the GC.
1151 collection_barrier_->ResumeThreadsAwaitingCollection();
1152}
1153
1154void Heap::GarbageCollectionEpilogue(GarbageCollector collector) {
1155 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
1156 AllowGarbageCollection for_the_rest_of_the_epilogue;
1157
1159
1160 isolate_->counters()->alive_after_last_gc()->Set(
1161 static_cast<int>(SizeOfObjects()));
1162
1163 if (CommittedMemory() > 0) {
1164 isolate_->counters()->external_fragmentation_total()->AddSample(
1165 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
1166
1167 isolate_->counters()->heap_sample_total_committed()->AddSample(
1168 static_cast<int>(CommittedMemory() / KB));
1169 isolate_->counters()->heap_sample_total_used()->AddSample(
1170 static_cast<int>(SizeOfObjects() / KB));
1171 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
1172 static_cast<int>(code_space()->CommittedMemory() / KB));
1173
1174 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
1175 static_cast<int>(MaximumCommittedMemory() / KB));
1176 }
1177
1178#ifdef DEBUG
1180 if (v8_flags.code_stats) ReportCodeStatistics("After GC");
1181#endif // DEBUG
1182
1184}
1185
1189
1191
1193 return heap_->gc_callbacks_depth_ == 1;
1194}
1195
1213
1214namespace {
1215size_t MinorMSConcurrentMarkingTrigger(Heap* heap) {
1216 size_t young_capacity = 0;
1217 if (v8_flags.sticky_mark_bits) {
1218 // TODO(333906585): Adjust parameters.
1219 young_capacity = heap->sticky_space()->Capacity() -
1220 heap->sticky_space()->old_objects_size();
1221 } else {
1222 young_capacity = heap->new_space()->TotalCapacity();
1223 }
1224 return young_capacity * v8_flags.minor_ms_concurrent_marking_trigger / 100;
1225}
1226} // namespace
1227
1228void Heap::StartMinorMSIncrementalMarkingIfNeeded() {
1229 if (incremental_marking()->IsMarking()) return;
1230 if (v8_flags.concurrent_minor_ms_marking && !IsTearingDown() &&
1231 incremental_marking()->CanAndShouldBeStarted() &&
1232 V8_LIKELY(!v8_flags.gc_global)) {
1233 size_t usable_capacity = 0;
1234 size_t new_space_size = 0;
1235 if (v8_flags.sticky_mark_bits) {
1236 // TODO(333906585): Adjust parameters.
1237 usable_capacity =
1239 new_space_size = sticky_space()->young_objects_size();
1240 } else {
1241 usable_capacity = paged_new_space()->paged_space()->UsableCapacity();
1242 new_space_size = new_space()->Size();
1243 }
1244 if ((usable_capacity >=
1245 v8_flags.minor_ms_min_new_space_capacity_for_concurrent_marking_mb *
1246 MB) &&
1247 (new_space_size >= MinorMSConcurrentMarkingTrigger(this)) &&
1252 // Schedule a task for finalizing the GC if needed.
1254 }
1255 }
1256}
1257
1258void Heap::CollectAllGarbage(GCFlags gc_flags,
1259 GarbageCollectionReason gc_reason,
1260 const v8::GCCallbackFlags gc_callback_flags) {
1261 current_gc_flags_ = gc_flags;
1262 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
1264}
1265
1266namespace {
1267
1268intptr_t CompareWords(int size, Tagged<HeapObject> a, Tagged<HeapObject> b) {
1269 int slots = size / kTaggedSize;
1270 DCHECK_EQ(a->Size(), size);
1271 DCHECK_EQ(b->Size(), size);
1272 Tagged_t* slot_a = reinterpret_cast<Tagged_t*>(a.address());
1273 Tagged_t* slot_b = reinterpret_cast<Tagged_t*>(b.address());
1274 for (int i = 0; i < slots; i++) {
1275 if (*slot_a != *slot_b) {
1276 return *slot_a - *slot_b;
1277 }
1278 slot_a++;
1279 slot_b++;
1280 }
1281 return 0;
1282}
1283
1284void ReportDuplicates(int size, std::vector<Tagged<HeapObject>>* objects) {
1285 if (objects->empty()) return;
1286
1287 sort(objects->begin(), objects->end(),
1289 intptr_t c = CompareWords(size, a, b);
1290 if (c != 0) return c < 0;
1291 return a < b;
1292 });
1293
1294 std::vector<std::pair<int, Tagged<HeapObject>>> duplicates;
1295 Tagged<HeapObject> current = (*objects)[0];
1296 int count = 1;
1297 for (size_t i = 1; i < objects->size(); i++) {
1298 if (CompareWords(size, current, (*objects)[i]) == 0) {
1299 count++;
1300 } else {
1301 if (count > 1) {
1302 duplicates.push_back(std::make_pair(count - 1, current));
1303 }
1304 count = 1;
1305 current = (*objects)[i];
1306 }
1307 }
1308 if (count > 1) {
1309 duplicates.push_back(std::make_pair(count - 1, current));
1310 }
1311
1312 int threshold = v8_flags.trace_duplicate_threshold_kb * KB;
1313
1314 sort(duplicates.begin(), duplicates.end());
1315 for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) {
1316 int duplicate_bytes = it->first * size;
1317 if (duplicate_bytes < threshold) break;
1318 PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
1319 duplicate_bytes / KB);
1320 PrintF("Sample object: ");
1321 Print(it->second);
1322 PrintF("============================\n");
1323 }
1324}
1325} // anonymous namespace
1326
1327void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
1328 // Min and max number of attempts for GC. The method will continue with more
1329 // GCs until the root set is stable.
1330 static constexpr int kMaxNumberOfAttempts = 7;
1331 static constexpr int kMinNumberOfAttempts = 2;
1332
1333 // Returns the number of roots. We assume stack layout is stable but global
1334 // roots could change between GCs due to finalizers and weak callbacks.
1335 const auto num_roots = [this]() {
1336 size_t js_roots = 0;
1337 js_roots += isolate()->global_handles()->handles_count();
1338 js_roots += isolate()->eternal_handles()->handles_count();
1339 size_t cpp_roots = 0;
1340 if (auto* cpp_heap = CppHeap::From(cpp_heap_)) {
1341 cpp_roots += cpp_heap->GetStrongPersistentRegion().NodesInUse();
1342 cpp_roots +=
1343 cpp_heap->GetStrongCrossThreadPersistentRegion().NodesInUse();
1344 }
1345 return js_roots + cpp_roots;
1346 };
1347
1348 if (gc_reason == GarbageCollectionReason::kLastResort) {
1350 }
1351 RCS_SCOPE(isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
1352
1353 // The optimizing compiler may be unnecessarily holding on to memory.
1354 isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
1355 isolate()->ClearSerializerData();
1356 isolate()->compilation_cache()->Clear();
1357
1359
1360 if (gc_reason == GarbageCollectionReason::kLastResort) {
1361 gc_flags |= GCFlag::kLastResort;
1362 }
1363
1365 gc_flags |= GCFlag::kForced;
1366 }
1367
1368 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
1369 const size_t roots_before = num_roots();
1370 current_gc_flags_ = gc_flags;
1373 if ((roots_before == num_roots()) &&
1374 ((attempt + 1) >= kMinNumberOfAttempts)) {
1375 break;
1376 }
1377 }
1378
1380
1381 if (v8_flags.trace_duplicate_threshold_kb) {
1382 std::map<int, std::vector<Tagged<HeapObject>>> objects_by_size;
1383 PagedSpaceIterator spaces(this);
1384 for (PagedSpace* space = spaces.Next(); space != nullptr;
1385 space = spaces.Next()) {
1386 PagedSpaceObjectIterator it(this, space);
1387 for (Tagged<HeapObject> obj = it.Next(); !obj.is_null();
1388 obj = it.Next()) {
1389 objects_by_size[obj->Size()].push_back(obj);
1390 }
1391 }
1392 {
1394 for (Tagged<HeapObject> obj = it.Next(); !obj.is_null();
1395 obj = it.Next()) {
1396 objects_by_size[obj->Size()].push_back(obj);
1397 }
1398 }
1399 for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
1400 ++it) {
1401 ReportDuplicates(it->first, &it->second);
1402 }
1403 }
1404
1405 if (gc_reason == GarbageCollectionReason::kLastResort &&
1406 v8_flags.heap_snapshot_on_oom) {
1408 }
1409}
1410
1411void Heap::PreciseCollectAllGarbage(GCFlags gc_flags,
1412 GarbageCollectionReason gc_reason,
1413 const GCCallbackFlags gc_callback_flags) {
1415 CollectAllGarbage(gc_flags, gc_reason, gc_callback_flags);
1416}
1417
1418void Heap::HandleExternalMemoryInterrupt() {
1419 const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
1420 static_cast<GCCallbackFlags>(
1423 uint64_t current = external_memory();
1424 if (current > external_memory_hard_limit()) {
1425 TRACE_EVENT2("devtools.timeline,v8", "V8.ExternalMemoryPressure",
1426 "external_memory_mb", static_cast<int>((current) / MB),
1427 "external_memory_hard_limit_mb",
1428 static_cast<int>((external_memory_hard_limit()) / MB));
1433 kGCCallbackFlagsForExternalMemory));
1434 return;
1435 }
1436 if (v8_flags.external_memory_accounted_in_global_limit) {
1437 // Under `external_memory_accounted_in_global_limit`, external interrupt
1438 // only triggers a check to allocation limits.
1442 kGCCallbackFlagsForExternalMemory);
1443 return;
1444 }
1445 uint64_t soft_limit = external_memory_.soft_limit();
1446 if (current <= soft_limit) {
1447 return;
1448 }
1449 TRACE_EVENT2("devtools.timeline,v8", "V8.ExternalMemoryPressure",
1450 "external_memory_mb", static_cast<int>((current) / MB),
1451 "external_memory_soft_limit_mb",
1452 static_cast<int>((soft_limit) / MB));
1453 if (incremental_marking()->IsStopped()) {
1454 if (incremental_marking()->CanAndShouldBeStarted()) {
1457 kGCCallbackFlagsForExternalMemory);
1458 } else {
1459 CollectAllGarbage(i::GCFlag::kNoFlags,
1461 kGCCallbackFlagsForExternalMemory);
1462 }
1463 } else {
1464 // Incremental marking is turned on and has already been started.
1466 current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
1468 }
1469}
1470
1471uint64_t Heap::external_memory_limit_for_interrupt() {
1473}
1474
1475uint64_t Heap::external_memory_soft_limit() {
1477}
1478
1480 const char* event_name,
1481 const char* event_type)
1482 : heap_(heap), event_name_(event_name) {
1483 TRACE_EVENT_BEGIN2("devtools.timeline,v8", event_name_, "usedHeapSizeBefore",
1484 heap_->SizeOfObjects(), "type", event_type);
1485}
1486
1488 TRACE_EVENT_END1("devtools.timeline,v8", event_name_, "usedHeapSizeAfter",
1489 heap_->SizeOfObjects());
1490}
1491
1492namespace {
1493
1494template <typename Callback>
1495void InvokeExternalCallbacks(Isolate* isolate, Callback callback) {
1496 DCHECK(!AllowJavascriptExecution::IsAllowed(isolate));
1497 AllowGarbageCollection allow_gc;
1498 // Temporary override any embedder stack state as callbacks may create
1499 // their own state on the stack and recursively trigger GC.
1500 EmbedderStackStateScope embedder_scope(
1502 StackState::kMayContainHeapPointers);
1503 VMState<EXTERNAL> callback_state(isolate);
1504
1505 callback();
1506}
1507
1508size_t GlobalMemorySizeFromV8Size(size_t v8_size) {
1509 const size_t kGlobalMemoryToV8Ratio = 2;
1510 return std::min(static_cast<uint64_t>(std::numeric_limits<size_t>::max()),
1511 static_cast<uint64_t>(v8_size) * kGlobalMemoryToV8Ratio);
1512}
1513
1514} // anonymous namespace
1515
1516void Heap::SetOldGenerationAndGlobalMaximumSize(
1517 size_t max_old_generation_size) {
1518 max_old_generation_size_.store(max_old_generation_size,
1519 std::memory_order_relaxed);
1520 max_global_memory_size_ = GlobalMemorySizeFromV8Size(max_old_generation_size);
1521}
1522
1523void Heap::SetOldGenerationAndGlobalAllocationLimit(
1524 size_t new_old_generation_allocation_limit,
1525 size_t new_global_allocation_limit) {
1526 CHECK_GE(new_global_allocation_limit, new_old_generation_allocation_limit);
1527#if defined(V8_USE_PERFETTO)
1528 TRACE_COUNTER(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
1529 perfetto::CounterTrack(V8HeapTrait::kName,
1530 perfetto::ThreadTrack::Current()),
1531 new_old_generation_allocation_limit);
1532 TRACE_COUNTER(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
1533 perfetto::CounterTrack(GlobalMemoryTrait::kName,
1534 perfetto::ThreadTrack::Current()),
1535 new_global_allocation_limit);
1536#endif
1537 old_generation_allocation_limit_.store(new_old_generation_allocation_limit,
1538 std::memory_order_relaxed);
1539 global_allocation_limit_.store(new_global_allocation_limit,
1540 std::memory_order_relaxed);
1541}
1542
1543void Heap::ResetOldGenerationAndGlobalAllocationLimit() {
1545
1548 GlobalMemorySizeFromV8Size(initial_old_generation_size_));
1550}
1551
1552void Heap::CollectGarbage(AllocationSpace space,
1553 GarbageCollectionReason gc_reason,
1554 const v8::GCCallbackFlags gc_callback_flags) {
1555 CHECK(isolate_->IsOnCentralStack());
1556 DCHECK_EQ(resize_new_space_mode_, ResizeNewSpaceMode::kNone);
1557
1559 // During isolate initialization heap always grows. GC is only requested
1560 // if a new page allocation fails. In such a case we should crash with
1561 // an out-of-memory instead of performing GC because the prologue/epilogue
1562 // callbacks may see objects that are not yet deserialized.
1564 FatalProcessOutOfMemory("GC during deserialization");
1565 }
1566
1567 // CollectGarbage consists of three parts:
1568 // 1. The prologue part which may execute callbacks. These callbacks may
1569 // allocate and trigger another garbage collection.
1570 // 2. The main garbage collection phase.
1571 // 3. The epilogue part which may execute callbacks. These callbacks may
1572 // allocate and trigger another garbage collection
1573
1574 // Part 1: Invoke all callbacks which should happen before the actual garbage
1575 // collection is triggered. Note that these callbacks may trigger another
1576 // garbage collection since they may allocate.
1577
1578 // JS execution is not allowed in any of the callbacks.
1579 DisallowJavascriptExecution no_js(isolate());
1580
1581 // Some custom flushing (currently: FlushBytecodeFromSFI) can create
1582 // fresh TrustedPointerTableEntries during GC. These must not be affected
1583 // by an active TrustedPointerPublishingScope, so disable any such scope.
1584 DisableTrustedPointerPublishingScope no_trusted_pointer_tracking(isolate());
1585
1586 DCHECK(AllowGarbageCollection::IsAllowed());
1587 // TODO(chromium:1523607): Ensure this for standalone cppgc as well.
1588 CHECK_IMPLIES(!v8_flags.allow_allocation_in_fast_api_call,
1589 !isolate()->InFastCCall());
1590
1591 const char* collector_reason = nullptr;
1592 const GarbageCollector collector =
1593 SelectGarbageCollector(space, gc_reason, &collector_reason);
1595 DCHECK_IMPLIES(v8_flags.minor_ms && IsYoungGenerationCollector(collector),
1597
1598 if (collector == GarbageCollector::MARK_COMPACTOR &&
1599 incremental_marking()->IsMinorMarking()) {
1600 const GCFlags gc_flags = current_gc_flags_;
1601 // Minor GCs should not be memory reducing.
1605 current_gc_flags_ = gc_flags;
1606 }
1607
1608 const GCType gc_type = GetGCTypeFromGarbageCollector(collector);
1609
1610 // Prologue callbacks. These callbacks may trigger GC themselves and thus
1611 // cannot be related exactly to garbage collection cycles.
1612 //
1613 // GCTracer scopes are managed by callees.
1614 InvokeExternalCallbacks(isolate(), [this, gc_callback_flags, gc_type]() {
1615 // Ensure that all pending phantom callbacks are invoked.
1616 isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
1617
1618 // Prologue callbacks registered with Heap.
1619 CallGCPrologueCallbacks(gc_type, gc_callback_flags,
1620 GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
1621 });
1622
1623 // The main garbage collection phase.
1624 //
1625 // We need a stack marker at the top of all entry points to allow
1626 // deterministic passes over the stack. E.g., a verifier that should only
1627 // find a subset of references of the marker.
1628 //
1629 // TODO(chromium:1056170): Consider adding a component that keeps track
1630 // of relevant GC stack regions where interesting pointers can be found.
1631 stack().SetMarkerIfNeededAndCallback([this, collector, gc_reason,
1632 collector_reason, gc_callback_flags]() {
1633 DisallowGarbageCollection no_gc_during_gc;
1634
1635 size_t committed_memory_before =
1638 : 0;
1639
1642 DevToolsTraceEventScope devtools_trace_event_scope(
1643 this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
1644 ToString(gc_reason));
1645
1646 GarbageCollectionPrologue(gc_reason, gc_callback_flags);
1647 {
1648 GCTracer::RecordGCPhasesInfo record_gc_phases_info(this, collector,
1649 gc_reason);
1650 std::optional<TimedHistogramScope> histogram_timer_scope;
1651 std::optional<OptionalTimedHistogramScope> histogram_timer_priority_scope;
1652 TRACE_EVENT0("v8", record_gc_phases_info.trace_event_name());
1653 if (record_gc_phases_info.type_timer()) {
1654 histogram_timer_scope.emplace(record_gc_phases_info.type_timer(),
1655 isolate_);
1656 }
1657 if (record_gc_phases_info.type_priority_timer()) {
1658 histogram_timer_priority_scope.emplace(
1659 record_gc_phases_info.type_priority_timer(), isolate_,
1661 }
1662
1663 PerformGarbageCollection(collector, gc_reason, collector_reason);
1664
1665 // Clear flags describing the current GC now that the current GC is
1666 // complete. Do this before GarbageCollectionEpilogue() since that could
1667 // trigger another unforced GC.
1668 is_current_gc_forced_ = false;
1670
1671 if (collector == GarbageCollector::MARK_COMPACTOR ||
1672 collector == GarbageCollector::SCAVENGER) {
1673 tracer()->RecordGCPhasesHistograms(record_gc_phases_info.mode());
1674 }
1675 if ((collector == GarbageCollector::MARK_COMPACTOR ||
1677 cpp_heap()) {
1679 }
1680 }
1681
1682 GarbageCollectionEpilogue(collector);
1683 if (collector == GarbageCollector::MARK_COMPACTOR &&
1684 v8_flags.track_detached_contexts) {
1685 isolate()->CheckDetachedContextsAfterGC();
1686 }
1687
1688 if (collector == GarbageCollector::MARK_COMPACTOR) {
1689 if (memory_reducer_ != nullptr) {
1690 memory_reducer_->NotifyMarkCompact(committed_memory_before);
1691 }
1696 }
1697 }
1698
1701 // Young generation cycles finish atomically. It is important that
1702 // StopObservablePause, and StopCycle are called in this
1703 // order; the latter may replace the current event with that of an
1704 // interrupted full cycle.
1705 if (IsYoungGenerationCollector(collector)) {
1707 } else {
1709 }
1711 });
1712
1713 if ((collector == GarbageCollector::MARK_COMPACTOR) &&
1717 }
1719 }
1720
1721 // Epilogue callbacks. These callbacks may trigger GC themselves and thus
1722 // cannot be related exactly to garbage collection cycles.
1723 //
1724 // GCTracer scopes are managed by callees.
1725 InvokeExternalCallbacks(isolate(), [this, gc_callback_flags, gc_type]() {
1726 // Epilogue callbacks registered with Heap.
1727 CallGCEpilogueCallbacks(gc_type, gc_callback_flags,
1728 GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
1729
1730 isolate()->global_handles()->PostGarbageCollectionProcessing(
1731 gc_callback_flags);
1732 });
1733
1734 if (collector == GarbageCollector::MARK_COMPACTOR) {
1735 if ((gc_callback_flags &
1737 isolate()->CountUsage(v8::Isolate::kForcedGC);
1738 }
1739 if (v8_flags.heap_snapshot_on_gc > 0 &&
1740 static_cast<size_t>(v8_flags.heap_snapshot_on_gc) == ms_count_) {
1742 }
1743 } else {
1744 // Start incremental marking for the next cycle. We do this only for
1745 // minor GCs to avoid a loop where mark-compact causes another mark-compact.
1749 }
1750
1751 if (!CanExpandOldGeneration(0)) {
1753 if (!CanExpandOldGeneration(0)) {
1754 if (v8_flags.heap_snapshot_on_oom) {
1756 }
1757 FatalProcessOutOfMemory("Reached heap limit");
1758 }
1759 }
1760
1761 if (collector == GarbageCollector::MARK_COMPACTOR) {
1763 }
1764}
1765
1767 public:
1768 static void TryPostJob(Heap* heap) {
1769 const auto runner = heap->GetForegroundTaskRunner();
1770 if (runner->IdleTasksEnabled()) {
1771 runner->PostIdleTask(
1772 std::make_unique<IdleTaskOnContextDispose>(heap->isolate()));
1773 }
1774 }
1775
1777 : CancelableIdleTask(isolate), isolate_(isolate) {}
1778
1779 void RunInternal(double deadline_in_seconds) override {
1780 auto* heap = isolate_->heap();
1781 const base::TimeDelta time_to_run = base::TimeTicks::Now() - creation_time_;
1782 // The provided delta uses embedder timestamps.
1783 const base::TimeDelta idle_time = base::TimeDelta::FromMillisecondsD(
1784 (deadline_in_seconds * 1000) - heap->MonotonicallyIncreasingTimeInMs());
1785 const bool time_to_run_exceeded = time_to_run > kMaxTimeToRun;
1786 if (V8_UNLIKELY(v8_flags.trace_context_disposal)) {
1787 isolate_->PrintWithTimestamp(
1788 "[context-disposal/idle task] time-to-run: %fms (max delay: %fms), "
1789 "idle time: %fms%s\n",
1790 time_to_run.InMillisecondsF(), kMaxTimeToRun.InMillisecondsF(),
1791 idle_time.InMillisecondsF(),
1792 time_to_run_exceeded ? ", not starting any action" : "");
1793 }
1794 if (time_to_run_exceeded) {
1795 return;
1796 }
1797 TryRunMinorGC(idle_time);
1798 }
1799
1800 private:
1801 static constexpr base::TimeDelta kFrameTime =
1802 base::TimeDelta::FromMillisecondsD(16);
1803
1804 // We limit any idle time actions here by a maximum time to run of a single
1805 // frame. This avoids that these tasks are executed too late and causes
1806 // (unpredictable) side effects with e.g. promotion of newly allocated
1807 // objects.
1808 static constexpr base::TimeDelta kMaxTimeToRun = kFrameTime + kFrameTime;
1809
1810 void TryRunMinorGC(const base::TimeDelta idle_time) {
1811 // The following logic estimates whether a young generation GC would fit in
1812 // `idle_time.` We bail out for a young gen below 1MB to avoid executing GC
1813 // when the mutator is not actually active.
1814 static constexpr size_t kMinYounGenSize = 1 * MB;
1815
1816 auto* heap = isolate_->heap();
1817 const std::optional<double> young_gen_gc_speed =
1818 heap->tracer()->YoungGenerationSpeedInBytesPerMillisecond(
1819 YoungGenerationSpeedMode::kUpToAndIncludingAtomicPause);
1820 if (!young_gen_gc_speed) {
1821 return;
1822 }
1823 const size_t young_gen_bytes = heap->YoungGenerationSizeOfObjects();
1824 const base::TimeDelta young_gen_estimate =
1825 base::TimeDelta::FromMillisecondsD(young_gen_bytes /
1826 *young_gen_gc_speed);
1827 const bool run_young_gen_gc =
1828 young_gen_estimate < idle_time && young_gen_bytes > kMinYounGenSize;
1829 if (V8_UNLIKELY(v8_flags.trace_context_disposal)) {
1830 isolate_->PrintWithTimestamp(
1831 "[context-disposal/idle task] young generation size: %zuKB (min: "
1832 "%zuKB), GC speed: %fKB/ms, estimated time: %fms%s\n",
1833 young_gen_bytes / KB, kMinYounGenSize / KB, *young_gen_gc_speed / KB,
1834 young_gen_estimate.InMillisecondsF(),
1835 run_young_gen_gc ? ", performing young gen GC"
1836 : ", not starting young gen GC");
1837 }
1838 if (run_young_gen_gc) {
1839 heap->CollectGarbage(NEW_SPACE,
1840 GarbageCollectionReason::kIdleContextDisposal);
1841 }
1842 }
1843
1845 const base::TimeTicks creation_time_ = base::TimeTicks::Now();
1846};
1847
1848int Heap::NotifyContextDisposed(bool has_dependent_context) {
1849 if (!has_dependent_context) {
1853 }
1854 if (memory_reducer_) {
1855 memory_reducer_->NotifyPossibleGarbage();
1856 }
1857 } else if (v8_flags.idle_gc_on_context_disposal &&
1858 !v8_flags.single_generation) {
1861 }
1862 if (!isolate()->context().is_null()) {
1863 RemoveDirtyFinalizationRegistriesOnContext(isolate()->raw_native_context());
1864 isolate()->raw_native_context()->set_retained_maps(
1865 ReadOnlyRoots(this).empty_weak_array_list());
1866 }
1867
1868 return ++contexts_disposed_;
1869}
1870
1871void Heap::StartIncrementalMarking(GCFlags gc_flags,
1872 GarbageCollectionReason gc_reason,
1873 GCCallbackFlags gc_callback_flags,
1874 GarbageCollector collector) {
1875 DCHECK(incremental_marking()->IsStopped());
1876 CHECK_IMPLIES(!v8_flags.allow_allocation_in_fast_api_call,
1877 !isolate()->InFastCCall());
1879
1880 if (gc_callbacks_depth_ > 0) {
1881 // Do not start incremental marking while invoking GC callbacks.
1882 // Heap::CollectGarbage already decided which GC is going to be
1883 // invoked. In case it chose a young-gen GC, starting an incremental
1884 // full GC during callbacks would break the separate GC phases
1885 // guarantee.
1886 return;
1887 }
1888
1889 if (IsYoungGenerationCollector(collector)) {
1891 } else {
1892 // Sweeping needs to be completed such that markbits are all cleared before
1893 // starting marking again.
1895 }
1896
1897 std::optional<SafepointScope> safepoint_scope;
1898
1899 {
1900 AllowGarbageCollection allow_shared_gc;
1901 safepoint_scope.emplace(isolate(), kGlobalSafepointForSharedSpaceIsolate);
1902 }
1903
1904#ifdef DEBUG
1905 VerifyCountersAfterSweeping();
1906#endif
1907
1908 std::vector<Isolate*> paused_clients =
1910
1911 // Now that sweeping is completed, we can start the next full GC cycle.
1912 tracer()->StartCycle(collector, gc_reason, nullptr,
1914
1915 current_gc_flags_ = gc_flags;
1916 current_gc_callback_flags_ = gc_callback_flags;
1917
1918 incremental_marking()->Start(collector, gc_reason);
1919
1920 if (collector == GarbageCollector::MARK_COMPACTOR) {
1921 DCHECK(incremental_marking()->IsMajorMarking());
1925 }
1926
1927 if (isolate()->is_shared_space_isolate()) {
1928 for (Isolate* client : paused_clients) {
1929 client->heap()->concurrent_marking()->Resume();
1930 }
1931 } else {
1932 DCHECK(paused_clients.empty());
1933 }
1934}
1935
1936namespace {
1937void CompleteArrayBufferSweeping(Heap* heap) {
1938 auto* array_buffer_sweeper = heap->array_buffer_sweeper();
1939 if (array_buffer_sweeper->sweeping_in_progress()) {
1940 auto* tracer = heap->tracer();
1941 GCTracer::Scope::ScopeId scope_id;
1942
1943 switch (tracer->GetCurrentCollector()) {
1945 scope_id = GCTracer::Scope::MINOR_MS_COMPLETE_SWEEP_ARRAY_BUFFERS;
1946 break;
1948 scope_id = GCTracer::Scope::SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS;
1949 break;
1951 scope_id = GCTracer::Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS;
1952 }
1953
1955 tracer, scope_id, ThreadKind::kMain,
1956 array_buffer_sweeper->GetTraceIdForFlowEvent(scope_id),
1958 array_buffer_sweeper->EnsureFinished();
1959 }
1960}
1961} // namespace
1962
1963void Heap::CompleteSweepingFull() {
1964 EnsureSweepingCompleted(SweepingForcedFinalizationMode::kUnifiedHeap);
1965
1968 !CppHeap::From(cpp_heap())->sweeper().IsSweepingInProgress());
1969 DCHECK(!tracer()->IsSweepingInProgress());
1970}
1971
1977
1978void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
1979 LocalHeap* local_heap, GCFlags gc_flags,
1980 const GCCallbackFlags gc_callback_flags) {
1981 if (incremental_marking()->IsStopped() &&
1982 incremental_marking()->CanAndShouldBeStarted()) {
1984 case IncrementalMarkingLimit::kHardLimit:
1985 if (local_heap->is_main_thread_for(this)) {
1987 gc_flags,
1991 gc_callback_flags);
1992 } else {
1993 ExecutionAccess access(isolate());
1994 isolate()->stack_guard()->RequestStartIncrementalMarking();
1995 if (auto* job = incremental_marking()->incremental_marking_job()) {
1996 job->ScheduleTask();
1997 }
1998 }
1999 break;
2000 case IncrementalMarkingLimit::kSoftLimit:
2001 if (auto* job = incremental_marking()->incremental_marking_job()) {
2002 job->ScheduleTask(TaskPriority::kUserVisible);
2003 }
2004 break;
2005 case IncrementalMarkingLimit::kFallbackForEmbedderLimit:
2006 // This is a fallback case where no appropriate limits have been
2007 // configured yet.
2008 if (local_heap->is_main_thread_for(this) &&
2009 memory_reducer() != nullptr) {
2011 }
2012 break;
2013 case IncrementalMarkingLimit::kNoLimit:
2014 break;
2015 }
2016 }
2017}
2018
2019void Heap::MoveRange(Tagged<HeapObject> dst_object, const ObjectSlot dst_slot,
2020 const ObjectSlot src_slot, int len,
2021 WriteBarrierMode mode) {
2022 DCHECK_NE(len, 0);
2023 DCHECK_NE(dst_object->map(), ReadOnlyRoots(this).fixed_cow_array_map());
2024 const ObjectSlot dst_end(dst_slot + len);
2025 // Ensure no range overflow.
2026 DCHECK(dst_slot < dst_end);
2027 DCHECK(src_slot < src_slot + len);
2028
2029 if ((v8_flags.concurrent_marking && incremental_marking()->IsMarking()) ||
2030 (v8_flags.minor_ms && sweeper()->IsIteratingPromotedPages())) {
2031 if (dst_slot < src_slot) {
2032 // Copy tagged values forward using relaxed load/stores that do not
2033 // involve value decompression.
2034 const AtomicSlot atomic_dst_end(dst_end);
2035 AtomicSlot dst(dst_slot);
2036 AtomicSlot src(src_slot);
2037 while (dst < atomic_dst_end) {
2038 *dst = *src;
2039 ++dst;
2040 ++src;
2041 }
2042 } else {
2043 // Copy tagged values backwards using relaxed load/stores that do not
2044 // involve value decompression.
2045 const AtomicSlot atomic_dst_begin(dst_slot);
2046 AtomicSlot dst(dst_slot + len - 1);
2047 AtomicSlot src(src_slot + len - 1);
2048 while (dst >= atomic_dst_begin) {
2049 *dst = *src;
2050 --dst;
2051 --src;
2052 }
2053 }
2054 } else {
2055 MemMove(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
2056 }
2057 if (mode == SKIP_WRITE_BARRIER) {
2058 return;
2059 }
2060 WriteBarrier::ForRange(this, dst_object, dst_slot, dst_end);
2061}
2062
2063// Instantiate Heap::CopyRange().
2064template V8_EXPORT_PRIVATE void Heap::CopyRange<ObjectSlot>(
2065 Tagged<HeapObject> dst_object, ObjectSlot dst_slot, ObjectSlot src_slot,
2066 int len, WriteBarrierMode mode);
2067template V8_EXPORT_PRIVATE void Heap::CopyRange<MaybeObjectSlot>(
2068 Tagged<HeapObject> dst_object, MaybeObjectSlot dst_slot,
2069 MaybeObjectSlot src_slot, int len, WriteBarrierMode mode);
2070
2071template <typename TSlot>
2072void Heap::CopyRange(Tagged<HeapObject> dst_object, const TSlot dst_slot,
2073 const TSlot src_slot, int len, WriteBarrierMode mode) {
2074 DCHECK_NE(len, 0);
2075
2076 DCHECK_NE(dst_object->map(), ReadOnlyRoots(this).fixed_cow_array_map());
2077 const TSlot dst_end(dst_slot + len);
2078 // Ensure ranges do not overlap.
2079 DCHECK(dst_end <= src_slot || (src_slot + len) <= dst_slot);
2080
2081 if ((v8_flags.concurrent_marking && incremental_marking()->IsMarking()) ||
2082 (v8_flags.minor_ms && sweeper()->IsIteratingPromotedPages())) {
2083 // Copy tagged values using relaxed load/stores that do not involve value
2084 // decompression.
2085 const AtomicSlot atomic_dst_end(dst_end);
2086 AtomicSlot dst(dst_slot);
2087 AtomicSlot src(src_slot);
2088 while (dst < atomic_dst_end) {
2089 *dst = *src;
2090 ++dst;
2091 ++src;
2092 }
2093 } else {
2094 MemCopy(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
2095 }
2096 if (mode == SKIP_WRITE_BARRIER) {
2097 return;
2098 }
2099 WriteBarrier::ForRange(this, dst_object, dst_slot, dst_end);
2100}
2101
2102bool Heap::CollectionRequested() {
2103 return collection_barrier_->WasGCRequested();
2104}
2105
2106void Heap::CollectGarbageForBackground(LocalHeap* local_heap) {
2107 CHECK(local_heap->is_main_thread());
2111}
2112
2113void Heap::CheckCollectionRequested() {
2114 if (!CollectionRequested()) return;
2115
2119}
2120
2121void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
2122 if (start_new_space_size == 0) return;
2123
2124 promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
2125 static_cast<double>(start_new_space_size) * 100);
2126
2129 (static_cast<double>(promoted_objects_size_) /
2130 static_cast<double>(previous_new_space_surviving_object_size_) * 100);
2131 } else {
2132 promotion_rate_ = 0;
2133 }
2134
2136 (static_cast<double>(new_space_surviving_object_size_) /
2137 static_cast<double>(start_new_space_size) * 100);
2138
2139 double survival_rate = promotion_ratio_ + new_space_surviving_rate_;
2140 tracer()->AddSurvivalRatio(survival_rate);
2141}
2142
2143namespace {
2144
2145GCTracer::Scope::ScopeId CollectorScopeId(GarbageCollector collector) {
2146 switch (collector) {
2148 return GCTracer::Scope::ScopeId::MARK_COMPACTOR;
2150 return GCTracer::Scope::ScopeId::MINOR_MARK_SWEEPER;
2152 return GCTracer::Scope::ScopeId::SCAVENGER;
2153 }
2154 UNREACHABLE();
2155}
2156
2157void ClearStubCaches(Isolate* isolate) {
2158 isolate->load_stub_cache()->Clear();
2159 isolate->store_stub_cache()->Clear();
2160 isolate->define_own_stub_cache()->Clear();
2161
2162 if (isolate->is_shared_space_isolate()) {
2163 isolate->global_safepoint()->IterateClientIsolates([](Isolate* client) {
2164 client->load_stub_cache()->Clear();
2165 client->store_stub_cache()->Clear();
2166 client->define_own_stub_cache()->Clear();
2167 });
2168 }
2169}
2170
2171} // namespace
2172
2173void Heap::PerformGarbageCollection(GarbageCollector collector,
2174 GarbageCollectionReason gc_reason,
2175 const char* collector_reason) {
2176 if (IsYoungGenerationCollector(collector)) {
2177 if (v8_flags.sticky_mark_bits) {
2179 // TODO(333906585): It's not necessary to complete full sweeping here.
2180 // Make sure that only the OLD_SPACE is swept.
2182 } else {
2184 if (v8_flags.verify_heap) {
2185 // If heap verification is enabled, we want to ensure that sweeping is
2186 // completed here, as it will be triggered from Heap::Verify anyway.
2187 // In this way, sweeping finalization is accounted to the corresponding
2188 // full GC cycle.
2190 }
2191 }
2192 } else {
2195 }
2196
2197 const base::TimeTicks atomic_pause_start_time = base::TimeTicks::Now();
2198
2199 std::optional<SafepointScope> safepoint_scope;
2200 {
2201 AllowGarbageCollection allow_shared_gc;
2202 safepoint_scope.emplace(isolate(), kGlobalSafepointForSharedSpaceIsolate);
2203 }
2204
2205 if (!incremental_marking_->IsMarking() ||
2206 (collector == GarbageCollector::SCAVENGER)) {
2207 tracer()->StartCycle(collector, gc_reason, collector_reason,
2209 }
2210
2212 if ((!Heap::IsYoungGenerationCollector(collector) || v8_flags.minor_ms) &&
2213 incremental_marking_->IsMarking()) {
2214 DCHECK_IMPLIES(Heap::IsYoungGenerationCollector(collector),
2215 incremental_marking_->IsMinorMarking());
2216 tracer()->UpdateCurrentEvent(gc_reason, collector_reason);
2217 }
2218
2219 DCHECK(tracer()->IsConsistentWithCollector(collector));
2220 TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
2221
2222 collection_barrier_->StopTimeToCollectionTimer();
2223
2224 std::vector<Isolate*> paused_clients =
2226
2228
2229 tracer()->StartInSafepoint(atomic_pause_start_time);
2230
2232
2234
2235 const size_t start_young_generation_size =
2237
2238 // Make sure allocation observers are disabled until the new new space
2239 // capacity is set in the epilogue.
2240 PauseAllocationObserversScope pause_observers(this);
2241
2242 const size_t new_space_capacity_before_gc = NewSpaceTargetCapacity();
2243
2244 if (collector == GarbageCollector::MARK_COMPACTOR) {
2245 MarkCompact();
2246 } else if (collector == GarbageCollector::MINOR_MARK_SWEEPER) {
2248 } else {
2250 Scavenge();
2251 }
2252
2253 // We don't want growing or shrinking of the current cycle to affect
2254 // pretenuring decisions. The numbers collected in the GC will be for the
2255 // capacity that was set before the GC.
2256 pretenuring_handler_.ProcessPretenuringFeedback(new_space_capacity_before_gc);
2257
2258 UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
2260
2261 isolate_->counters()->objs_since_last_young()->Set(0);
2262
2263 isolate_->eternal_handles()->PostGarbageCollectionProcessing();
2264
2265 // Update relocatables.
2267
2268 if (isolate_->is_shared_space_isolate()) {
2269 // Allows handle derefs for all threads/isolates from this thread.
2270 AllowHandleUsageOnAllThreads allow_all_handle_derefs;
2271 isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
2273 });
2274 }
2275
2276 // First round weak callbacks are not supposed to allocate and trigger
2277 // nested GCs.
2278 isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
2279
2280 if (cpp_heap() && (collector == GarbageCollector::MARK_COMPACTOR ||
2282 // TraceEpilogue may trigger operations that invalidate global handles. It
2283 // has to be called *after* all other operations that potentially touch
2284 // and reset global handles. It is also still part of the main garbage
2285 // collection pause and thus needs to be called *before* any operation
2286 // that can potentially trigger recursive garbage collections.
2287 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
2289 }
2290
2291 if (collector == GarbageCollector::MARK_COMPACTOR) {
2292 ClearStubCaches(isolate());
2293 }
2294
2296
2298
2299 const base::TimeTicks atomic_pause_end_time = base::TimeTicks::Now();
2300 tracer()->StopInSafepoint(atomic_pause_end_time);
2301
2302 ResumeConcurrentThreadsInClients(std::move(paused_clients));
2303
2304 // After every full GC the old generation allocation limit should be
2305 // configured.
2308}
2309
2310void Heap::PerformHeapVerification() {
2312
2313 if (isolate()->is_shared_space_isolate()) {
2314 // Allow handle creation for client isolates even if they are parked. This
2315 // is because some object verification methods create handles.
2316 AllowHandleUsageOnAllThreads allow_handle_creation;
2317 isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
2319 });
2320 }
2321}
2322
2323std::vector<Isolate*> Heap::PauseConcurrentThreadsInClients(
2324 GarbageCollector collector) {
2325 std::vector<Isolate*> paused_clients;
2326
2327 if (isolate()->is_shared_space_isolate()) {
2328 isolate()->global_safepoint()->IterateClientIsolates(
2329 [collector, &paused_clients](Isolate* client) {
2330 CHECK(client->heap()->deserialization_complete());
2331
2332 if (v8_flags.concurrent_marking &&
2333 client->heap()->concurrent_marking()->Pause()) {
2334 paused_clients.push_back(client);
2335 }
2336
2337 if (collector == GarbageCollector::MARK_COMPACTOR) {
2338 Sweeper* const client_sweeper = client->heap()->sweeper();
2340 }
2341 });
2342 }
2343
2344 return paused_clients;
2345}
2346
2347void Heap::ResumeConcurrentThreadsInClients(
2348 std::vector<Isolate*> paused_clients) {
2349 if (isolate()->is_shared_space_isolate()) {
2350 for (Isolate* client : paused_clients) {
2351 client->heap()->concurrent_marking()->Resume();
2352 }
2353 } else {
2354 DCHECK(paused_clients.empty());
2355 }
2356}
2357
2358bool Heap::CollectGarbageShared(LocalHeap* local_heap,
2359 GarbageCollectionReason gc_reason) {
2360 DCHECK(isolate()->has_shared_space());
2361
2364 FatalProcessOutOfMemory("GC during deserialization");
2365 }
2366
2367 Isolate* shared_space_isolate = isolate()->shared_space_isolate();
2368 return shared_space_isolate->heap()->CollectGarbageFromAnyThread(local_heap,
2369 gc_reason);
2370}
2371
2372bool Heap::CollectGarbageFromAnyThread(LocalHeap* local_heap,
2373 GarbageCollectionReason gc_reason) {
2374 DCHECK(local_heap->IsRunning());
2375
2376 if (isolate() == local_heap->heap()->isolate() &&
2377 local_heap->is_main_thread()) {
2379 return true;
2380 } else {
2381 if (!collection_barrier_->TryRequestGC()) return false;
2382
2383 const LocalHeap::ThreadState old_state =
2385
2386 if (old_state.IsRunning()) {
2387 const bool performed_gc =
2388 collection_barrier_->AwaitCollectionBackground(local_heap);
2389 return performed_gc;
2390 } else {
2391 DCHECK(old_state.IsParked());
2392 return false;
2393 }
2394 }
2395}
2396
2397void Heap::CompleteSweepingYoung() {
2398 DCHECK(!v8_flags.sticky_mark_bits);
2399
2400 // If sweeping is in progress and there are no sweeper tasks running, finish
2401 // the sweeping here, to avoid having to pause and resume during the young
2402 // generation GC.
2404
2406
2407#if defined(CPPGC_YOUNG_GENERATION)
2408 // Always complete sweeping if young generation is enabled.
2409 if (cpp_heap()) {
2410 if (auto* iheap = CppHeap::From(cpp_heap());
2412 iheap->FinishSweepingIfRunning();
2413 }
2414#endif // defined(CPPGC_YOUNG_GENERATION)
2415}
2416
2417void Heap::EnsureSweepingCompletedForObject(Tagged<HeapObject> object) {
2418 if (!sweeping_in_progress()) return;
2419
2421 if (chunk->InReadOnlySpace()) return;
2422
2423 MutablePageMetadata* mutable_page =
2425 if (mutable_page->SweepingDone()) return;
2426
2427 // SweepingDone() is always true for large pages.
2428 DCHECK(!chunk->IsLargePage());
2429
2430 PageMetadata* page = PageMetadata::cast(mutable_page);
2431 sweeper()->EnsurePageIsSwept(page);
2432}
2433
2434// static
2435Heap::LimitsCompuatationResult Heap::ComputeNewAllocationLimits(Heap* heap) {
2436 DCHECK(!heap->using_initial_limit());
2437 heap->tracer()->RecordGCSizeCounters();
2438 const HeapGrowingMode mode = heap->CurrentHeapGrowingMode();
2439 std::optional<double> v8_gc_speed =
2440 heap->tracer()->OldGenerationSpeedInBytesPerMillisecond();
2441 double v8_mutator_speed =
2442 heap->tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond();
2443 double v8_growing_factor = MemoryController<V8HeapTrait>::GrowingFactor(
2444 heap, heap->max_old_generation_size(), v8_gc_speed, v8_mutator_speed,
2445 mode);
2446 std::optional<double> embedder_gc_speed =
2447 heap->tracer()->EmbedderSpeedInBytesPerMillisecond();
2448 double embedder_speed =
2449 heap->tracer()->EmbedderAllocationThroughputInBytesPerMillisecond();
2450 double embedder_growing_factor =
2451 (embedder_gc_speed.has_value() && embedder_speed > 0)
2453 heap, heap->max_global_memory_size_, embedder_gc_speed,
2454 embedder_speed, mode)
2455 : 0;
2456
2457 size_t new_space_capacity = heap->NewSpaceTargetCapacity();
2458
2459 size_t new_old_generation_allocation_limit =
2461 heap, heap->OldGenerationConsumedBytesAtLastGC(),
2462 heap->OldGenerationConsumedBytesAtLastGC() * v8_growing_factor,
2463 heap->min_old_generation_size_, heap->max_old_generation_size(),
2464 new_space_capacity, mode);
2465
2466 double global_growing_factor =
2467 std::max(v8_growing_factor, embedder_growing_factor);
2468 double external_growing_factor = std::min(
2469 global_growing_factor, GlobalMemoryTrait::kConservativeGrowingFactor);
2470 DCHECK_GT(global_growing_factor, 0);
2471 DCHECK_GT(external_growing_factor, 0);
2472 size_t new_global_allocation_limit =
2474 heap, heap->GlobalConsumedBytesAtLastGC(),
2475 (heap->OldGenerationConsumedBytesAtLastGC() +
2476 heap->embedder_size_at_last_gc_) *
2477 global_growing_factor +
2478 (v8_flags.external_memory_accounted_in_global_limit
2479 ? heap->external_memory_.low_since_mark_compact() *
2480 external_growing_factor
2481 : 0),
2482 heap->min_global_memory_size_, heap->max_global_memory_size_,
2483 new_space_capacity, mode);
2484
2485 return {new_old_generation_allocation_limit, new_global_allocation_limit};
2486}
2487
2488void Heap::RecomputeLimits(GarbageCollector collector, base::TimeTicks time) {
2489 if (IsYoungGenerationCollector(collector) &&
2491 return;
2492 }
2493 if (using_initial_limit()) {
2495 return;
2496 }
2497
2498 auto new_limits = ComputeNewAllocationLimits(this);
2499 size_t new_old_generation_allocation_limit =
2500 new_limits.old_generation_allocation_limit;
2501 size_t new_global_allocation_limit = new_limits.global_allocation_limit;
2502
2503 if (collector == GarbageCollector::MARK_COMPACTOR) {
2504 if (v8_flags.memory_balancer) {
2505 // Now recompute the new allocation limit.
2506 mb_->RecomputeLimits(new_limits.global_allocation_limit -
2507 new_limits.old_generation_allocation_limit,
2508 time);
2509 } else {
2511 new_limits.old_generation_allocation_limit,
2512 new_limits.global_allocation_limit);
2513 }
2514
2517 tracer()->AverageMarkCompactMutatorUtilization());
2518 } else {
2520 new_old_generation_allocation_limit = std::min(
2521 new_old_generation_allocation_limit, old_generation_allocation_limit());
2522 new_global_allocation_limit =
2523 std::min(new_global_allocation_limit, global_allocation_limit());
2525 new_old_generation_allocation_limit, new_global_allocation_limit);
2526 }
2527
2529 GlobalMemorySizeFromV8Size(max_old_generation_size_));
2531}
2532
2533void Heap::RecomputeLimitsAfterLoadingIfNeeded() {
2535 return;
2536 }
2537
2538 if ((OldGenerationSpaceAvailable() > 0) && (GlobalMemoryAvailable() > 0)) {
2539 // Only recompute limits if memory accumulated during loading may lead to
2540 // atomic GC. If there is still room to allocate, keep the current limits.
2543 return;
2544 }
2545
2546 if (!incremental_marking()->IsMajorMarking()) {
2547 // Incremental marking should have started already but was delayed. Don't
2548 // update the limits yet to not delay starting incremental marking any
2549 // further. Limits will be updated on incremental marking start, with the
2550 // intention to give more slack and avoid an immediate large finalization
2551 // pause.
2552 return;
2553 }
2554
2556
2563
2564 auto new_limits = ComputeNewAllocationLimits(this);
2565 size_t new_old_generation_allocation_limit =
2566 new_limits.old_generation_allocation_limit;
2567 size_t new_global_allocation_limit = new_limits.global_allocation_limit;
2568
2569 new_old_generation_allocation_limit = std::max(
2570 new_old_generation_allocation_limit, old_generation_allocation_limit());
2571 new_global_allocation_limit =
2572 std::max(new_global_allocation_limit, global_allocation_limit());
2573 SetOldGenerationAndGlobalAllocationLimit(new_old_generation_allocation_limit,
2574 new_global_allocation_limit);
2575
2577 GlobalMemorySizeFromV8Size(max_old_generation_size_));
2579}
2580
2581void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags,
2582 GCTracer::Scope::ScopeId scope_id) {
2583 if (gc_prologue_callbacks_.IsEmpty()) return;
2584
2585 GCCallbacksScope scope(this);
2586 if (scope.CheckReenter()) {
2587 RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCPrologueCallback);
2588 TRACE_GC(tracer(), scope_id);
2589 HandleScope handle_scope(isolate());
2590 gc_prologue_callbacks_.Invoke(gc_type, flags);
2591 }
2592}
2593
2594void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags,
2595 GCTracer::Scope::ScopeId scope_id) {
2596 if (gc_epilogue_callbacks_.IsEmpty()) return;
2597
2598 GCCallbacksScope scope(this);
2599 if (scope.CheckReenter()) {
2600 RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
2601 TRACE_GC(tracer(), scope_id);
2602 HandleScope handle_scope(isolate());
2603 gc_epilogue_callbacks_.Invoke(gc_type, flags);
2604 }
2605}
2606
2607void Heap::MarkCompact() {
2609
2610 PROFILE(isolate_, CodeMovingGCEvent());
2611
2613 uint64_t size_of_objects_before_gc = SizeOfObjects();
2614
2616
2617 ms_count_++;
2619
2621
2623
2625
2626 if (v8_flags.allocation_site_pretenuring) {
2627 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
2628 }
2629 // This should be updated before PostGarbageCollectionProcessing, which
2630 // can cause another GC. Take into account the objects promoted during
2631 // GC.
2633 static_cast<size_t>(promoted_objects_size_);
2638 // Limits can now be computed based on estimate from MARK_COMPACT.
2640}
2641
2642void Heap::MinorMarkSweep() {
2643 DCHECK(v8_flags.minor_ms);
2646 DCHECK(!incremental_marking()->IsMajorMarking());
2647
2648 TRACE_GC(tracer(), GCTracer::Scope::MINOR_MS);
2649
2651 minor_mark_sweep_collector_->CollectGarbage();
2653}
2654
2655void Heap::MarkCompactEpilogue() {
2656 TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
2658
2659 isolate_->counters()->objs_since_last_full()->Set(0);
2660}
2661
2662void Heap::MarkCompactPrologue() {
2663 TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
2664 isolate_->descriptor_lookup_cache()->Clear();
2665 RegExpResultsCache::Clear(string_split_cache());
2666 RegExpResultsCache::Clear(regexp_multiple_cache());
2668
2670}
2671
2672void Heap::Scavenge() {
2674 DCHECK(!incremental_marking()->IsMarking());
2675
2676 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
2678
2679 // Implements Cheney's copying algorithm
2680 scavenger_collector_->CollectGarbage();
2681
2683}
2684
2686 for (size_t i = 0; i < young_strings_.size(); ++i) {
2687 if (young_strings_[i] == string) return true;
2688 }
2689 for (size_t i = 0; i < old_strings_.size(); ++i) {
2690 if (old_strings_[i] == string) return true;
2691 }
2692 return false;
2693}
2694
2695void Heap::UpdateExternalString(Tagged<String> string, size_t old_payload,
2696 size_t new_payload) {
2697 DCHECK(IsExternalString(string));
2698
2700
2701 if (old_payload > new_payload) {
2702 page->DecrementExternalBackingStoreBytes(
2703 ExternalBackingStoreType::kExternalString, old_payload - new_payload);
2704 } else {
2705 page->IncrementExternalBackingStoreBytes(
2706 ExternalBackingStoreType::kExternalString, new_payload - old_payload);
2707 }
2708}
2709
2710Tagged<String> Heap::UpdateYoungReferenceInExternalStringTableEntry(
2711 Heap* heap, FullObjectSlot p) {
2712 // This is only used for Scavenger.
2713 DCHECK(!v8_flags.minor_ms);
2714
2715 PtrComprCageBase cage_base(heap->isolate());
2717 MapWord first_word = obj->map_word(cage_base, kRelaxedLoad);
2718
2719 Tagged<String> new_string;
2720
2721 if (InFromPage(obj)) {
2722 if (!first_word.IsForwardingAddress()) {
2723 // Unreachable external string can be finalized.
2724 Tagged<String> string = Cast<String>(obj);
2725 if (!IsExternalString(string, cage_base)) {
2726 // Original external string has been internalized.
2727 DCHECK(IsThinString(string, cage_base));
2728 return Tagged<String>();
2729 }
2730 heap->FinalizeExternalString(string);
2731 return Tagged<String>();
2732 }
2733 new_string = Cast<String>(first_word.ToForwardingAddress(obj));
2734 } else {
2735 new_string = Cast<String>(obj);
2736 }
2737
2738 // String is still reachable.
2739 if (IsThinString(new_string, cage_base)) {
2740 // Filtering Thin strings out of the external string table.
2741 return Tagged<String>();
2742 } else if (IsExternalString(new_string, cage_base)) {
2745 PageMetadata::FromAddress((*p).ptr()),
2746 PageMetadata::FromHeapObject(new_string),
2747 Cast<ExternalString>(new_string)->ExternalPayloadSize());
2748 return new_string;
2749 }
2750
2751 // Internalization can replace external strings with non-external strings.
2752 return IsExternalString(new_string, cage_base) ? new_string
2753 : Tagged<String>();
2754}
2755
2757#ifdef DEBUG
2758 std::set<Tagged<String>> visited_map;
2759 std::map<MutablePageMetadata*, size_t> size_map;
2761 for (size_t i = 0; i < young_strings_.size(); ++i) {
2762 Tagged<String> obj = Cast<String>(Tagged<Object>(young_strings_[i]));
2764 DCHECK_IMPLIES(!v8_flags.sticky_mark_bits,
2765 mc->Chunk()->InYoungGeneration());
2767 DCHECK(!IsTheHole(obj, heap_->isolate()));
2768 DCHECK(IsExternalString(obj));
2769 // Note: we can have repeated elements in the table.
2770 DCHECK_EQ(0, visited_map.count(obj));
2771 visited_map.insert(obj);
2772 size_map[mc] += Cast<ExternalString>(obj)->ExternalPayloadSize();
2773 }
2774 for (std::map<MutablePageMetadata*, size_t>::iterator it = size_map.begin();
2775 it != size_map.end(); it++)
2776 DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
2777#endif
2778}
2779
2781#ifdef DEBUG
2782 std::set<Tagged<String>> visited_map;
2783 std::map<MutablePageMetadata*, size_t> size_map;
2785 VerifyYoung();
2786 for (size_t i = 0; i < old_strings_.size(); ++i) {
2787 Tagged<String> obj = Cast<String>(Tagged<Object>(old_strings_[i]));
2789 DCHECK_IMPLIES(!v8_flags.sticky_mark_bits,
2790 !mc->Chunk()->InYoungGeneration());
2792 DCHECK(!IsTheHole(obj, heap_->isolate()));
2793 DCHECK(IsExternalString(obj));
2794 // Note: we can have repeated elements in the table.
2795 DCHECK_EQ(0, visited_map.count(obj));
2796 visited_map.insert(obj);
2797 size_map[mc] += Cast<ExternalString>(obj)->ExternalPayloadSize();
2798 }
2799 for (std::map<MutablePageMetadata*, size_t>::iterator it = size_map.begin();
2800 it != size_map.end(); it++)
2801 DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
2802#endif
2803}
2804
2806 Heap::ExternalStringTableUpdaterCallback updater_func) {
2807 if (young_strings_.empty()) return;
2808
2809 FullObjectSlot start(young_strings_.data());
2810 FullObjectSlot end(young_strings_.data() + young_strings_.size());
2811 FullObjectSlot last = start;
2812
2813 for (FullObjectSlot p = start; p < end; ++p) {
2814 Tagged<String> target = updater_func(heap_, p);
2815
2816 if (target.is_null()) continue;
2817
2818 DCHECK(IsExternalString(target));
2819
2820 if (HeapLayout::InYoungGeneration(target)) {
2821 // String is still in new space. Update the table entry.
2822 last.store(target);
2823 ++last;
2824 } else {
2825 // String got promoted. Move it to the old string list.
2826 old_strings_.push_back(target);
2827 }
2828 }
2829
2830 DCHECK(last <= end);
2831 young_strings_.resize(last - start);
2832 if (v8_flags.verify_heap) {
2833 VerifyYoung();
2834 }
2835}
2836
2838 old_strings_.reserve(old_strings_.size() + young_strings_.size());
2839 std::move(std::begin(young_strings_), std::end(young_strings_),
2840 std::back_inserter(old_strings_));
2841 young_strings_.clear();
2842}
2843
2845 if (!young_strings_.empty()) {
2847 Root::kExternalStringsTable, nullptr,
2848 FullObjectSlot(young_strings_.data()),
2849 FullObjectSlot(young_strings_.data() + young_strings_.size()));
2850 }
2851}
2852
2854 IterateYoung(v);
2855 if (!old_strings_.empty()) {
2857 Root::kExternalStringsTable, nullptr,
2858 FullObjectSlot(old_strings_.data()),
2859 FullObjectSlot(old_strings_.data() + old_strings_.size()));
2860 }
2861}
2862
2863void Heap::UpdateYoungReferencesInExternalStringTable(
2866}
2867
2869 Heap::ExternalStringTableUpdaterCallback updater_func) {
2870 if (!old_strings_.empty()) {
2871 FullObjectSlot start(old_strings_.data());
2872 FullObjectSlot end(old_strings_.data() + old_strings_.size());
2873 for (FullObjectSlot p = start; p < end; ++p)
2874 p.store(updater_func(heap_, p));
2875 }
2876
2877 UpdateYoungReferences(updater_func);
2878}
2879
2880void Heap::UpdateReferencesInExternalStringTable(
2883}
2884
2885void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
2886 ProcessNativeContexts(retainer);
2887 ProcessAllocationSites(retainer);
2889}
2890
2891void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
2892 Tagged<Object> head =
2894 // Update the head of the list of contexts.
2896}
2897
2898void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
2899 Tagged<Object> allocation_site_obj =
2901 retainer);
2904 allocation_site_obj));
2905}
2906
2907void Heap::ProcessDirtyJSFinalizationRegistries(WeakObjectRetainer* retainer) {
2909 this, dirty_js_finalization_registries_list(), retainer);
2911 // If the list is empty, set the tail to undefined. Otherwise the tail is set
2912 // by WeakListVisitor<JSFinalizationRegistry>::VisitLiveObject.
2913 if (IsUndefined(head, isolate())) {
2915 }
2916}
2917
2928
2929void Heap::ForeachAllocationSite(
2930 Tagged<Object> list,
2931 const std::function<void(Tagged<AllocationSite>)>& visitor) {
2933 Tagged<Object> current = list;
2934 while (IsAllocationSite(current)) {
2937 visitor(site);
2938 Tagged<Object> current_nested = site->nested_site();
2939 while (IsAllocationSite(current_nested)) {
2940 Tagged<AllocationSite> nested_site = Cast<AllocationSite>(current_nested);
2941 visitor(nested_site);
2942 current_nested = nested_site->nested_site();
2943 }
2944 current = site->weak_next();
2945 }
2946}
2947
2948void Heap::ResetAllAllocationSitesDependentCode(AllocationType allocation) {
2949 DisallowGarbageCollection no_gc_scope;
2950 bool marked = false;
2951
2954 [&marked, allocation, this](Tagged<AllocationSite> site) {
2955 if (site->GetAllocationType() == allocation) {
2956 site->ResetPretenureDecision();
2957 site->set_deopt_dependent_code(true);
2958 marked = true;
2960 return;
2961 }
2962 });
2963 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
2964}
2965
2966void Heap::EvaluateOldSpaceLocalPretenuring(
2967 uint64_t size_of_objects_before_gc) {
2968 uint64_t size_of_objects_after_gc = SizeOfObjects();
2969 double old_generation_survival_rate =
2970 (static_cast<double>(size_of_objects_after_gc) * 100) /
2971 static_cast<double>(size_of_objects_before_gc);
2972
2973 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
2974 // Too many objects died in the old generation, pretenuring of wrong
2975 // allocation sites may be the cause for that. We have to deopt all
2976 // dependent code registered in the allocation sites to re-evaluate
2977 // our pretenuring decisions.
2979 if (v8_flags.trace_pretenuring) {
2980 PrintF(
2981 "Deopt all allocation sites dependent code due to low survival "
2982 "rate in the old generation %f\n",
2983 old_generation_survival_rate);
2984 }
2985 }
2986}
2987
2990
2991#ifdef V8_COMPRESS_POINTERS
2992// TODO(ishell, v8:8875): When pointer compression is enabled the kHeaderSize
2993// is only kTaggedSize aligned but we can keep using unaligned access since
2994// both x64 and arm64 architectures (where pointer compression supported)
2995// allow unaligned access to doubles.
2997#else
2999#endif
3000
3001int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
3002 if (V8_COMPRESS_POINTERS_8GB_BOOL) return 0;
3003 switch (alignment) {
3004 case kTaggedAligned:
3005 return 0;
3006 case kDoubleAligned:
3007 case kDoubleUnaligned:
3008 return kDoubleSize - kTaggedSize;
3009 default:
3010 UNREACHABLE();
3011 }
3012}
3013
3014// static
3015int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
3016 if (V8_COMPRESS_POINTERS_8GB_BOOL) return 0;
3017 if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
3018 return kTaggedSize;
3019 if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0) {
3020 return kDoubleSize - kTaggedSize; // No fill if double is always aligned.
3021 }
3022 return 0;
3023}
3024
3025size_t Heap::GetCodeRangeReservedAreaSize() {
3027}
3028
3029Tagged<HeapObject> Heap::PrecedeWithFiller(Tagged<HeapObject> object,
3030 int filler_size) {
3031 CreateFillerObjectAt(object.address(), filler_size);
3032 return HeapObject::FromAddress(object.address() + filler_size);
3033}
3034
3035Tagged<HeapObject> Heap::PrecedeWithFillerBackground(Tagged<HeapObject> object,
3036 int filler_size) {
3038 WritableFreeSpace::ForNonExecutableMemory(object.address(), filler_size));
3039 return HeapObject::FromAddress(object.address() + filler_size);
3040}
3041
3042Tagged<HeapObject> Heap::AlignWithFillerBackground(
3043 Tagged<HeapObject> object, int object_size, int allocation_size,
3044 AllocationAlignment alignment) {
3045 const int filler_size = allocation_size - object_size;
3046 DCHECK_LT(0, filler_size);
3047 const int pre_filler = GetFillToAlign(object.address(), alignment);
3048 if (pre_filler) {
3049 object = PrecedeWithFillerBackground(object, pre_filler);
3050 }
3051 DCHECK_LE(0, filler_size - pre_filler);
3052 const int post_filler = filler_size - pre_filler;
3053 if (post_filler) {
3055 object.address() + object_size, post_filler));
3056 }
3057 return object;
3058}
3059
3060void* Heap::AllocateExternalBackingStore(
3061 const std::function<void*(size_t)>& allocate, size_t byte_length) {
3062 size_t max = isolate()->array_buffer_allocator()->MaxAllocationSize();
3064 if (byte_length > max) {
3065 return nullptr;
3066 }
3067 if (!always_allocate() && new_space()) {
3068 size_t new_space_backing_store_bytes =
3070 if ((!incremental_marking()->IsMajorMarking()) &&
3071 new_space_backing_store_bytes >= 2 * DefaultMaxSemiSpaceSize() &&
3072 new_space_backing_store_bytes >= byte_length) {
3073 // Performing a young generation GC amortizes over the allocated backing
3074 // store bytes and may free enough external bytes for this allocation.
3077 }
3078 }
3079 void* result = allocate(byte_length);
3080 if (result) return result;
3081 if (!always_allocate()) {
3082 for (int i = 0; i < 2; i++) {
3085 result = allocate(byte_length);
3086 if (result) return result;
3087 }
3090 }
3091 return allocate(byte_length);
3092}
3093
3094// When old generation allocation limit is not configured (before the first full
3095// GC), this method shrinks the initial very large old generation size. This
3096// method can only shrink allocation limits but not increase it again.
3097void Heap::ShrinkOldGenerationAllocationLimitIfNotConfigured() {
3099 tracer()->SurvivalEventsRecorded()) {
3100 base::MutexGuard guard(old_space()->mutex());
3101 const size_t minimum_growing_step =
3104 size_t new_old_generation_allocation_limit =
3105 std::max(OldGenerationConsumedBytes() + minimum_growing_step,
3106 static_cast<size_t>(
3107 static_cast<double>(old_generation_allocation_limit()) *
3108 (tracer()->AverageSurvivalRatio() / 100)));
3109 new_old_generation_allocation_limit = std::min(
3110 new_old_generation_allocation_limit, old_generation_allocation_limit());
3111 size_t new_global_allocation_limit = std::max(
3112 GlobalConsumedBytes() + minimum_growing_step,
3113 static_cast<size_t>(static_cast<double>(global_allocation_limit()) *
3114 (tracer()->AverageSurvivalRatio() / 100)));
3115 new_global_allocation_limit =
3116 std::min(new_global_allocation_limit, global_allocation_limit());
3118 new_old_generation_allocation_limit, new_global_allocation_limit);
3119 }
3120}
3121
3122void Heap::FlushNumberStringCache() {
3123 // Flush the number to string cache.
3124 int len = number_string_cache()->length();
3125 ReadOnlyRoots roots{isolate()};
3126 for (int i = 0; i < len; i++) {
3127 number_string_cache()->set(i, roots.undefined_value(), SKIP_WRITE_BARRIER);
3128 }
3129}
3130
3131namespace {
3132
3133void CreateFillerObjectAtImpl(const WritableFreeSpace& free_space, Heap* heap,
3134 ClearFreedMemoryMode clear_memory_mode) {
3135 int size = free_space.Size();
3136 if (size == 0) return;
3141
3142 // TODO(v8:13070): Filler sizes are irrelevant for 8GB+ heaps. Adding them
3143 // should be avoided in this mode.
3144 ReadOnlyRoots roots(heap);
3145 if (size == kTaggedSize) {
3146 HeapObject::SetFillerMap(free_space,
3147 roots.unchecked_one_pointer_filler_map());
3148 // Ensure the filler map is properly initialized.
3149 DCHECK(IsMap(
3150 HeapObject::FromAddress(free_space.Address())->map(heap->isolate())));
3151 } else if (size == 2 * kTaggedSize) {
3152 HeapObject::SetFillerMap(free_space,
3153 roots.unchecked_two_pointer_filler_map());
3154 if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
3155 free_space.ClearTagged<kTaggedSize>((size / kTaggedSize) - 1);
3156 }
3157 // Ensure the filler map is properly initialized.
3158 DCHECK(IsMap(
3159 HeapObject::FromAddress(free_space.Address())->map(heap->isolate())));
3160 } else {
3161 DCHECK_GT(size, 2 * kTaggedSize);
3162 HeapObject::SetFillerMap(free_space, roots.unchecked_free_space_map());
3163 FreeSpace::SetSize(free_space, size, kRelaxedStore);
3164 if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
3165 free_space.ClearTagged<2 * kTaggedSize>((size / kTaggedSize) - 2);
3166 }
3167
3168 // During bootstrapping we need to create a free space object before its
3169 // map is initialized. In this case we cannot access the map yet, as it
3170 // might be null, or not set up properly yet.
3171 DCHECK_IMPLIES(roots.is_initialized(RootIndex::kFreeSpaceMap),
3172 IsMap(HeapObject::FromAddress(free_space.Address())
3173 ->map(heap->isolate())));
3174 }
3175}
3176
3177#ifdef DEBUG
3178void VerifyNoNeedToClearSlots(Address start, Address end) {
3179 MemoryChunk* chunk = MemoryChunk::FromAddress(start);
3180 if (chunk->InReadOnlySpace()) return;
3181 if (!v8_flags.sticky_mark_bits && chunk->InYoungGeneration()) return;
3182 MutablePageMetadata* mutable_page =
3183 MutablePageMetadata::cast(chunk->Metadata());
3184 BaseSpace* space = mutable_page->owner();
3185 space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
3186}
3187#else
3188void VerifyNoNeedToClearSlots(Address start, Address end) {}
3189#endif // DEBUG
3190
3191} // namespace
3192
3193void Heap::CreateFillerObjectAtBackground(const WritableFreeSpace& free_space) {
3194 // TODO(leszeks): Verify that no slots need to be recorded.
3195 // Do not verify whether slots are cleared here: the concurrent thread is not
3196 // allowed to access the main thread's remembered set.
3197 CreateFillerObjectAtRaw(free_space,
3199 ClearRecordedSlots::kNo, VerifyNoSlotsRecorded::kNo);
3200}
3201
3202void Heap::CreateFillerObjectAt(Address addr, int size,
3203 ClearFreedMemoryMode clear_memory_mode) {
3204 if (size == 0) return;
3205 if (MemoryChunk::FromAddress(addr)->executable()) {
3206 WritableJitPage jit_page(addr, size);
3207 WritableFreeSpace free_space = jit_page.FreeRange(addr, size);
3208 CreateFillerObjectAtRaw(free_space, clear_memory_mode,
3210 VerifyNoSlotsRecorded::kYes);
3211 } else {
3212 WritableFreeSpace free_space =
3214 CreateFillerObjectAtRaw(free_space, clear_memory_mode,
3216 VerifyNoSlotsRecorded::kYes);
3217 }
3218}
3219
3220void Heap::CreateFillerObjectAtRaw(
3221 const WritableFreeSpace& free_space, ClearFreedMemoryMode clear_memory_mode,
3222 ClearRecordedSlots clear_slots_mode,
3223 VerifyNoSlotsRecorded verify_no_slots_recorded) {
3224 // TODO(mlippautz): It would be nice to DCHECK that we never call this
3225 // with {addr} pointing into large object space; however we currently do,
3226 // see, e.g., Factory::NewFillerObject and in many tests.
3227 size_t size = free_space.Size();
3228 if (size == 0) return;
3229 CreateFillerObjectAtImpl(free_space, this, clear_memory_mode);
3230 Address addr = free_space.Address();
3231 if (clear_slots_mode == ClearRecordedSlots::kYes) {
3232 ClearRecordedSlotRange(addr, addr + size);
3233 } else if (verify_no_slots_recorded == VerifyNoSlotsRecorded::kYes) {
3234 VerifyNoNeedToClearSlots(addr, addr + size);
3235 }
3236}
3237
3238bool Heap::CanMoveObjectStart(Tagged<HeapObject> object) {
3239 if (!v8_flags.move_object_start) {
3240 return false;
3241 }
3242
3243 // Sampling heap profiler may have a reference to the object.
3244 if (heap_profiler()->is_sampling_allocations()) {
3245 return false;
3246 }
3247
3248 if (IsLargeObject(object)) {
3249 return false;
3250 }
3251
3252 // Compilation jobs may have references to the object.
3253 if (isolate()->concurrent_recompilation_enabled() &&
3254 isolate()->optimizing_compile_dispatcher()->HasJobs()) {
3255 return false;
3256 }
3257
3258 // Concurrent marking does not support moving object starts without snapshot
3259 // protocol.
3260 //
3261 // TODO(v8:13726): This can be improved via concurrently reading the contents
3262 // in the marker at the cost of some complexity.
3263 if (incremental_marking()->IsMarking()) {
3264 return false;
3265 }
3266
3267 // Concurrent sweeper does not support moving object starts. It assumes that
3268 // markbits (black regions) and object starts are matching up.
3269 if (!MutablePageMetadata::FromHeapObject(object)->SweepingDone()) {
3270 return false;
3271 }
3272
3273 return true;
3274}
3275
3276bool Heap::IsImmovable(Tagged<HeapObject> object) {
3278 return chunk->NeverEvacuate() || chunk->IsLargePage();
3279}
3280
3281bool Heap::IsLargeObject(Tagged<HeapObject> object) {
3282 return MemoryChunk::FromHeapObject(object)->IsLargePage();
3283}
3284
3285#ifdef ENABLE_SLOW_DCHECKS
3286namespace {
3287
3288class LeftTrimmerVerifierRootVisitor : public RootVisitor {
3289 public:
3290 explicit LeftTrimmerVerifierRootVisitor(Tagged<FixedArrayBase> to_check)
3291 : to_check_(to_check) {}
3292
3293 LeftTrimmerVerifierRootVisitor(const LeftTrimmerVerifierRootVisitor&) =
3294 delete;
3295 LeftTrimmerVerifierRootVisitor& operator=(
3296 const LeftTrimmerVerifierRootVisitor&) = delete;
3297
3298 void VisitRootPointers(Root root, const char* description,
3299 FullObjectSlot start, FullObjectSlot end) override {
3300 for (FullObjectSlot p = start; p < end; ++p) {
3301 // V8_EXTERNAL_CODE_SPACE specific: we might be comparing
3302 // InstructionStream object with non-InstructionStream object here and it
3303 // might produce false positives because operator== for tagged values
3304 // compares only lower 32 bits when pointer compression is enabled.
3305 DCHECK_NE((*p).ptr(), to_check_.ptr());
3306 }
3307 }
3308
3309 void VisitRootPointers(Root root, const char* description,
3311 OffHeapObjectSlot end) override {
3312 DCHECK(root == Root::kStringTable ||
3313 root == Root::kSharedStructTypeRegistry);
3314 // We can skip iterating the string table and shared struct type registry,
3315 // they don't point to any fixed arrays.
3316 }
3317
3318 private:
3319 Tagged<FixedArrayBase> to_check_;
3320};
3321} // namespace
3322#endif // ENABLE_SLOW_DCHECKS
3323
3324namespace {
3325bool MayContainRecordedSlots(Tagged<HeapObject> object) {
3326 // New space object do not have recorded slots.
3327 if (HeapLayout::InYoungGeneration(object)) {
3328 return false;
3329 }
3330 // Allowlist objects that definitely do not have pointers.
3331 if (IsByteArray(object) || IsFixedDoubleArray(object)) return false;
3332 // Conservatively return true for other objects.
3333 return true;
3334}
3335} // namespace
3336
3337void Heap::OnMoveEvent(Tagged<HeapObject> source, Tagged<HeapObject> target,
3338 int size_in_bytes) {
3339 if (heap_profiler()->is_tracking_object_moves()) {
3340 heap_profiler()->ObjectMoveEvent(source.address(), target.address(),
3341 size_in_bytes,
3342 /*is_embedder_object=*/false);
3343 }
3344 for (auto& tracker : allocation_trackers_) {
3345 tracker->MoveEvent(source.address(), target.address(), size_in_bytes);
3346 }
3347 if (IsSharedFunctionInfo(target, isolate_)) {
3348 LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source.address(),
3349 target.address()));
3350 } else if (IsNativeContext(target, isolate_)) {
3351 if (isolate_->current_embedder_state() != nullptr) {
3352 isolate_->current_embedder_state()->OnMoveEvent(source.address(),
3353 target.address());
3354 }
3356 NativeContextMoveEvent(source.address(), target.address()));
3357 } else if (IsMap(target, isolate_)) {
3358 LOG(isolate_, MapMoveEvent(Cast<Map>(source), Cast<Map>(target)));
3359 }
3360}
3361
3362Tagged<FixedArrayBase> Heap::LeftTrimFixedArray(Tagged<FixedArrayBase> object,
3363 int elements_to_trim) {
3364 if (elements_to_trim == 0) {
3365 // This simplifies reasoning in the rest of the function.
3366 return object;
3367 }
3368 CHECK(!object.is_null());
3369 DCHECK(CanMoveObjectStart(object));
3370 // Add custom visitor to concurrent marker if new left-trimmable type
3371 // is added.
3372 DCHECK(IsFixedArray(object) || IsFixedDoubleArray(object));
3373 const int element_size = IsFixedArray(object) ? kTaggedSize : kDoubleSize;
3374 const int bytes_to_trim = elements_to_trim * element_size;
3375 Tagged<Map> map = object->map();
3376
3377 // For now this trick is only applied to fixed arrays which may be in new
3378 // space or old space. In a large object space the object's start must
3379 // coincide with chunk and thus the trick is just not applicable.
3380 DCHECK(!IsLargeObject(object));
3381 DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
3382
3383 static_assert(offsetof(FixedArrayBase, map_) == 0);
3384 static_assert(offsetof(FixedArrayBase, length_) == kTaggedSize);
3385 static_assert(sizeof(FixedArrayBase) == 2 * kTaggedSize);
3386
3387 const int len = object->length();
3388 DCHECK(elements_to_trim <= len);
3389
3390 // Calculate location of new array start.
3391 Address old_start = object.address();
3392 Address new_start = old_start + bytes_to_trim;
3393
3394 // Technically in new space this write might be omitted (except for
3395 // debug mode which iterates through the heap), but to play safer
3396 // we still do it.
3398 WritableFreeSpace::ForNonExecutableMemory(old_start, bytes_to_trim),
3400 MayContainRecordedSlots(object) ? ClearRecordedSlots::kYes
3402 VerifyNoSlotsRecorded::kYes);
3403
3404 // Initialize header of the trimmed array. Since left trimming is only
3405 // performed on pages which are not concurrently swept creating a filler
3406 // object does not require synchronization.
3407 RELAXED_WRITE_FIELD(object, bytes_to_trim,
3408 Tagged<Object>(MapWord::FromMap(map).ptr()));
3409 RELAXED_WRITE_FIELD(object, bytes_to_trim + kTaggedSize,
3410 Smi::FromInt(len - elements_to_trim));
3411
3412 Tagged<FixedArrayBase> new_object =
3414
3415 if (isolate()->log_object_relocation()) {
3416 // Notify the heap profiler of change in object layout.
3417 OnMoveEvent(object, new_object, new_object->Size());
3418 }
3419
3420#ifdef ENABLE_SLOW_DCHECKS
3421 if (v8_flags.enable_slow_asserts) {
3422 // Make sure the stack or other roots (e.g., Handles) don't contain pointers
3423 // to the original FixedArray (which is now the filler object).
3424 std::optional<IsolateSafepointScope> safepoint_scope;
3425
3426 {
3427 AllowGarbageCollection allow_gc;
3428 safepoint_scope.emplace(this);
3429 }
3430
3431 LeftTrimmerVerifierRootVisitor root_visitor(object);
3432 ReadOnlyRoots(this).Iterate(&root_visitor);
3433
3434 // Stale references are allowed in some locations. IterateRoots() uses
3435 // ClearStaleLeftTrimmedPointerVisitor internally to clear such references
3436 // beforehand.
3437 IterateRoots(&root_visitor,
3439 }
3440#endif // ENABLE_SLOW_DCHECKS
3441
3442 return new_object;
3443}
3444
3445template <typename Array>
3446void Heap::RightTrimArray(Tagged<Array> object, int new_capacity,
3447 int old_capacity) {
3448 DCHECK_EQ(old_capacity, object->capacity());
3449 DCHECK_LT(new_capacity, old_capacity);
3450 DCHECK_GE(new_capacity, 0);
3451
3452 if constexpr (Array::kElementsAreMaybeObject) {
3453 // For MaybeObject elements, this function is safe to use only at the end
3454 // of the mark compact collection: When marking, we record the weak slots,
3455 // and shrinking invalidates them.
3457 }
3458
3459 const int bytes_to_trim = (old_capacity - new_capacity) * Array::kElementSize;
3460
3461 // Calculate location of new array end.
3462 const int old_size = Array::SizeFor(old_capacity);
3463 DCHECK_EQ(object->AllocatedSize(), old_size);
3464 Address old_end = object.address() + old_size;
3465 Address new_end = old_end - bytes_to_trim;
3466
3467 const bool clear_slots = MayContainRecordedSlots(object);
3468
3469 // Technically in new space this write might be omitted (except for debug
3470 // mode which iterates through the heap), but to play safer we still do it.
3471 // We do not create a filler for objects in a large object space.
3472 if (!IsLargeObject(object)) {
3474 object, old_size, old_size - bytes_to_trim,
3476 if (!v8_flags.black_allocated_pages) {
3478 // Clear the mark bits of the black area that belongs now to the filler.
3479 // This is an optimization. The sweeper will release black fillers anyway.
3480 if (incremental_marking()->black_allocation() &&
3481 marking_state()->IsMarked(filler)) {
3482 PageMetadata* page = PageMetadata::FromAddress(new_end);
3483 page->marking_bitmap()->ClearRange<AccessMode::ATOMIC>(
3485 MarkingBitmap::LimitAddressToIndex(new_end + bytes_to_trim));
3486 }
3487 }
3488 } else if (clear_slots) {
3489 // Large objects are not swept, so it is not necessary to clear the
3490 // recorded slot.
3492 (old_end - new_end) / kTaggedSize);
3493 }
3494
3495 // Initialize header of the trimmed array. We are storing the new capacity
3496 // using release store after creating a filler for the left-over space to
3497 // avoid races with the sweeper thread.
3498 object->set_capacity(new_capacity, kReleaseStore);
3499
3500 // Notify the heap object allocation tracker of change in object layout. The
3501 // array may not be moved during GC, and size has to be adjusted nevertheless.
3502 for (auto& tracker : allocation_trackers_) {
3503 tracker->UpdateObjectSizeEvent(object.address(),
3504 Array::SizeFor(new_capacity));
3505 }
3506}
3507
3508#define DEF_RIGHT_TRIM(T) \
3509 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void \
3510 Heap::RightTrimArray<T>(Tagged<T> object, int new_capacity, \
3511 int old_capacity);
3513#undef DEF_RIGHT_TRIM
3514
3515void Heap::MakeHeapIterable() {
3516 EnsureSweepingCompleted(SweepingForcedFinalizationMode::kV8Only);
3517
3519}
3520
3521void Heap::MakeLinearAllocationAreasIterable() {
3522 allocator()->MakeLinearAllocationAreasIterable();
3523
3524 safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
3526 });
3527
3528 if (isolate()->is_shared_space_isolate()) {
3529 isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
3531 });
3532 }
3533}
3534
3535void Heap::FreeLinearAllocationAreas() {
3537
3539 [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationAreas(); });
3540
3541 if (isolate()->is_shared_space_isolate()) {
3542 isolate()->global_safepoint()->IterateClientIsolates(
3543 [](Isolate* client) { client->heap()->FreeLinearAllocationAreas(); });
3544 }
3545}
3546
3547void Heap::FreeMainThreadLinearAllocationAreas() {
3548 allocator()->FreeLinearAllocationAreas();
3549}
3550
3551void Heap::MarkSharedLinearAllocationAreasBlack() {
3552 DCHECK(!v8_flags.black_allocated_pages);
3553 allocator()->MarkSharedLinearAllocationAreasBlack();
3555
3556 safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
3558 });
3559}
3560
3561void Heap::UnmarkSharedLinearAllocationAreas() {
3562 DCHECK(!v8_flags.black_allocated_pages);
3563 allocator()->UnmarkSharedLinearAllocationAreas();
3565 safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
3567 });
3568}
3569
3570void Heap::FreeSharedLinearAllocationAreasAndResetFreeLists() {
3571 DCHECK(v8_flags.black_allocated_pages);
3572 allocator()->FreeSharedLinearAllocationAreasAndResetFreeLists();
3574
3575 safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
3577 });
3578}
3579
3580void Heap::Unmark() {
3581 DCHECK(v8_flags.sticky_mark_bits);
3583
3584 auto unmark_space = [](auto& space) {
3585 for (auto* page : space) {
3586 page->marking_bitmap()->template Clear<AccessMode::NON_ATOMIC>();
3587 page->Chunk()->SetMajorGCInProgress();
3588 page->SetLiveBytes(0);
3589 }
3590 };
3591
3592 unmark_space(*old_space());
3593 unmark_space(*lo_space());
3594
3595 if (isolate()->is_shared_space_isolate()) {
3596 unmark_space(*shared_space());
3597 unmark_space(*shared_lo_space());
3598 }
3599
3600 {
3601 RwxMemoryWriteScope scope("For writing flags.");
3602 unmark_space(*code_space());
3603 unmark_space(*code_lo_space());
3604 }
3605
3606 unmark_space(*trusted_space());
3607 unmark_space(*trusted_lo_space());
3608}
3609
3610void Heap::DeactivateMajorGCInProgressFlag() {
3611 DCHECK(v8_flags.sticky_mark_bits);
3613
3614 auto deactivate_space = [](auto& space) {
3615 for (auto* metadata : space) {
3616 metadata->Chunk()->ResetMajorGCInProgress();
3617 }
3618 };
3619
3620 deactivate_space(*old_space());
3621 deactivate_space(*lo_space());
3622
3623 {
3624 RwxMemoryWriteScope scope("For writing flags.");
3625 deactivate_space(*code_space());
3626 deactivate_space(*code_lo_space());
3627 }
3628
3629 if (isolate()->is_shared_space_isolate()) {
3630 deactivate_space(*shared_space());
3631 deactivate_space(*shared_lo_space());
3632 }
3633
3634 deactivate_space(*trusted_space());
3635 deactivate_space(*trusted_lo_space());
3636}
3637
3638namespace {
3639
3640double ComputeMutatorUtilizationImpl(double mutator_speed,
3641 std::optional<double> gc_speed) {
3642 constexpr double kMinMutatorUtilization = 0.0;
3643 constexpr double kConservativeGcSpeedInBytesPerMillisecond = 200000;
3644 if (mutator_speed == 0) return kMinMutatorUtilization;
3645 if (!gc_speed) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
3646 // Derivation:
3647 // mutator_utilization = mutator_time / (mutator_time + gc_time)
3648 // mutator_time = 1 / mutator_speed
3649 // gc_time = 1 / gc_speed
3650 // mutator_utilization = (1 / mutator_speed) /
3651 // (1 / mutator_speed + 1 / gc_speed)
3652 // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
3653 return *gc_speed / (mutator_speed + *gc_speed);
3654}
3655
3656} // namespace
3657
3658double Heap::ComputeMutatorUtilization(const char* tag, double mutator_speed,
3659 std::optional<double> gc_speed) {
3660 double result = ComputeMutatorUtilizationImpl(mutator_speed, gc_speed);
3661 if (v8_flags.trace_mutator_utilization) {
3662 isolate()->PrintWithTimestamp(
3663 "%s mutator utilization = %.3f ("
3664 "mutator_speed=%.f, gc_speed=%.f)\n",
3665 tag, result, mutator_speed, gc_speed.value_or(0));
3666 }
3667 return result;
3668}
3669
3670bool Heap::HasLowYoungGenerationAllocationRate() {
3672 "Young generation",
3673 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond(),
3674 tracer()->YoungGenerationSpeedInBytesPerMillisecond(
3676 constexpr double kHighMutatorUtilization = 0.993;
3677 return mu > kHighMutatorUtilization;
3678}
3679
3680bool Heap::HasLowOldGenerationAllocationRate() {
3682 "Old generation",
3683 tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond(),
3684 tracer()->OldGenerationSpeedInBytesPerMillisecond());
3685 const double kHighMutatorUtilization = 0.993;
3686 return mu > kHighMutatorUtilization;
3687}
3688
3689bool Heap::HasLowEmbedderAllocationRate() {
3691 "Embedder", tracer()->EmbedderAllocationThroughputInBytesPerMillisecond(),
3692 tracer()->EmbedderSpeedInBytesPerMillisecond());
3693 const double kHighMutatorUtilization = 0.993;
3694 return mu > kHighMutatorUtilization;
3695}
3696
3697bool Heap::HasLowAllocationRate() {
3700}
3701
3702bool Heap::IsIneffectiveMarkCompact(size_t old_generation_size,
3703 double mutator_utilization) {
3704 const double kHighHeapPercentage = 0.8;
3705 const double kLowMutatorUtilization = 0.4;
3706 return old_generation_size >=
3707 kHighHeapPercentage * max_old_generation_size() &&
3708 mutator_utilization < kLowMutatorUtilization;
3709}
3710
3711namespace {
3712static constexpr int kMaxConsecutiveIneffectiveMarkCompacts = 4;
3713}
3714
3715void Heap::CheckIneffectiveMarkCompact(size_t old_generation_size,
3716 double mutator_utilization) {
3717 if (!v8_flags.detect_ineffective_gcs_near_heap_limit) return;
3718 if (!IsIneffectiveMarkCompact(old_generation_size, mutator_utilization)) {
3720 return;
3721 }
3724 kMaxConsecutiveIneffectiveMarkCompacts) {
3726 // The callback increased the heap limit.
3728 return;
3729 }
3730 if (v8_flags.heap_snapshot_on_oom) {
3732 }
3733 FatalProcessOutOfMemory("Ineffective mark-compacts near heap limit");
3734 }
3735}
3736
3737bool Heap::HasHighFragmentation() {
3738 const size_t used = OldGenerationSizeOfObjects();
3739 const size_t committed = CommittedOldGenerationMemory();
3740
3741 // Background thread allocation could result in committed memory being less
3742 // than used memory in some situations.
3743 if (committed < used) return false;
3744
3745 constexpr size_t kSlack = 16 * MB;
3746
3747 // Fragmentation is high if committed > 2 * used + kSlack.
3748 // Rewrite the expression to avoid overflow.
3749 return committed - used > used + kSlack;
3750}
3751
3752bool Heap::ShouldOptimizeForMemoryUsage() {
3753 const size_t kOldGenerationSlack = max_old_generation_size() / 8;
3754 return isolate()->priority() == v8::Isolate::Priority::kBestEffort ||
3755 isolate()->MemorySaverModeEnabled() || HighMemoryPressure() ||
3756 !CanExpandOldGeneration(kOldGenerationSlack);
3757}
3758
3760 public:
3763
3764 ~ActivateMemoryReducerTask() override = default;
3767 delete;
3768
3769 private:
3770 // v8::internal::CancelableTask overrides.
3771 void RunInternal() override {
3772 heap_->ActivateMemoryReducerIfNeededOnMainThread();
3773 }
3774
3776};
3777
3778void Heap::ActivateMemoryReducerIfNeeded() {
3779 if (memory_reducer_ == nullptr) return;
3780 // This method may be called from any thread. Post a task to run it on the
3781 // isolate's main thread to avoid synchronization.
3782 task_runner_->PostTask(std::make_unique<ActivateMemoryReducerTask>(this));
3783}
3784
3785void Heap::ActivateMemoryReducerIfNeededOnMainThread() {
3786 // Activate memory reducer when switching to background if
3787 // - there was no mark compact since the start.
3788 // - the committed memory can be potentially reduced.
3789 // 2 pages for the old, code, and map space + 1 page for new space.
3790 const int kMinCommittedMemory = 7 * PageMetadata::kPageSize;
3791 if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
3792 isolate()->is_backgrounded()) {
3793 memory_reducer_->NotifyPossibleGarbage();
3794 }
3795}
3796
3797Heap::ResizeNewSpaceMode Heap::ShouldResizeNewSpace() {
3798 if (ShouldReduceMemory()) {
3799 return (v8_flags.predictable) ? ResizeNewSpaceMode::kNone
3800 : ResizeNewSpaceMode::kShrink;
3801 }
3802
3803 static const size_t kLowAllocationThroughput = 1000;
3804 const double allocation_throughput =
3805 tracer_->AllocationThroughputInBytesPerMillisecond();
3806 const bool should_shrink = !v8_flags.predictable &&
3807 (allocation_throughput != 0) &&
3808 (allocation_throughput < kLowAllocationThroughput);
3809
3810 const bool should_grow =
3813
3814 if (should_grow) survived_since_last_expansion_ = 0;
3815
3816 if (should_grow == should_shrink) return ResizeNewSpaceMode::kNone;
3817 return should_grow ? ResizeNewSpaceMode::kGrow : ResizeNewSpaceMode::kShrink;
3818}
3819
3820namespace {
3821size_t ComputeReducedNewSpaceSize(NewSpace* new_space) {
3822 size_t new_capacity =
3823 std::max(new_space->MinimumCapacity(), 2 * new_space->Size());
3824 size_t rounded_new_capacity =
3825 ::RoundUp(new_capacity, PageMetadata::kPageSize);
3826 DCHECK_LE(new_space->TotalCapacity(), new_space->MaximumCapacity());
3827 return std::min(new_space->TotalCapacity(), rounded_new_capacity);
3828}
3829} // anonymous namespace
3830
3831void Heap::StartResizeNewSpace() {
3832 DCHECK_EQ(ResizeNewSpaceMode::kNone, resize_new_space_mode_);
3833 DCHECK(v8_flags.minor_ms);
3835 if (resize_new_space_mode_ == ResizeNewSpaceMode::kShrink) {
3836 size_t reduced_capacity = ComputeReducedNewSpaceSize(new_space());
3837 paged_new_space()->StartShrinking(reduced_capacity);
3838 }
3839}
3840
3841void Heap::ResizeNewSpace() {
3842 DCHECK_IMPLIES(!v8_flags.minor_ms,
3843 resize_new_space_mode_ == ResizeNewSpaceMode::kNone);
3844 const ResizeNewSpaceMode mode =
3846 resize_new_space_mode_ = ResizeNewSpaceMode::kNone;
3847
3848 switch (mode) {
3849 case ResizeNewSpaceMode::kShrink:
3851 break;
3852 case ResizeNewSpaceMode::kGrow:
3854 break;
3855 case ResizeNewSpaceMode::kNone:
3856 break;
3857 }
3858}
3859
3860void Heap::ReduceNewSpaceSizeForTesting() { ReduceNewSpaceSize(); }
3861void Heap::ExpandNewSpaceSizeForTesting() { ExpandNewSpaceSize(); }
3862
3863void Heap::ExpandNewSpaceSize() {
3864 // Grow the size of new space if there is room to grow, and enough data
3865 // has survived scavenge since the last expansion.
3866 const size_t suggested_capacity =
3867 static_cast<size_t>(v8_flags.semi_space_growth_factor) *
3869 const size_t chosen_capacity =
3870 std::min(suggested_capacity, new_space_->MaximumCapacity());
3871 DCHECK(IsAligned(chosen_capacity, PageMetadata::kPageSize));
3872
3873 if (chosen_capacity > new_space_->TotalCapacity()) {
3874 new_space_->Grow(chosen_capacity);
3875 new_lo_space()->SetCapacity(new_space()->TotalCapacity());
3876 }
3877}
3878
3879void Heap::ReduceNewSpaceSize() {
3880 if (!v8_flags.minor_ms) {
3881 const size_t reduced_capacity = ComputeReducedNewSpaceSize(new_space());
3882 semi_space_new_space()->Shrink(reduced_capacity);
3883 } else {
3884 // MinorMS starts shrinking new space as part of sweeping.
3886 }
3887 new_lo_space_->SetCapacity(new_space()->TotalCapacity());
3888}
3889
3890size_t Heap::NewSpaceSize() {
3891 if (v8_flags.sticky_mark_bits) {
3892 return sticky_space()->young_objects_size();
3893 }
3894 return new_space() ? new_space()->Size() : 0;
3895}
3896
3897size_t Heap::NewSpaceCapacity() const {
3898 if (v8_flags.sticky_mark_bits) {
3900 }
3901 return new_space() ? new_space()->Capacity() : 0;
3902}
3903
3904size_t Heap::NewSpaceTargetCapacity() const {
3905 if (v8_flags.sticky_mark_bits) {
3906 // TODO(333906585): Adjust target capacity for new sticky-space.
3908 }
3909 return new_space() ? new_space()->TotalCapacity() : 0;
3910}
3911
3912void Heap::FinalizeIncrementalMarkingAtomically(
3913 GarbageCollectionReason gc_reason) {
3914 DCHECK(!incremental_marking()->IsStopped());
3916}
3917
3918void Heap::FinalizeIncrementalMarkingAtomicallyIfRunning(
3919 GarbageCollectionReason gc_reason) {
3920 if (!incremental_marking()->IsStopped()) {
3922 }
3923}
3924
3925void Heap::InvokeIncrementalMarkingPrologueCallbacks() {
3926 AllowGarbageCollection allow_allocation;
3929 GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
3930}
3931
3932void Heap::InvokeIncrementalMarkingEpilogueCallbacks() {
3933 AllowGarbageCollection allow_allocation;
3936 GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
3937}
3938
3939namespace {
3940thread_local Address pending_layout_change_object_address = kNullAddress;
3941
3942#ifdef V8_ENABLE_SANDBOX
3943class ExternalPointerSlotInvalidator
3944 : public HeapVisitor<ExternalPointerSlotInvalidator> {
3945 public:
3946 explicit ExternalPointerSlotInvalidator(Isolate* isolate)
3947 : HeapVisitor(isolate), isolate_(isolate) {}
3948
3949 void VisitPointers(Tagged<HeapObject> host, ObjectSlot start,
3950 ObjectSlot end) override {}
3951 void VisitPointers(Tagged<HeapObject> host, MaybeObjectSlot start,
3952 MaybeObjectSlot end) override {}
3953 void VisitInstructionStreamPointer(Tagged<Code> host,
3954 InstructionStreamSlot slot) override {}
3955 void VisitMapPointer(Tagged<HeapObject> host) override {}
3956
3957 void VisitExternalPointer(Tagged<HeapObject> host,
3958 ExternalPointerSlot slot) override {
3959 DCHECK_EQ(target_, host);
3960 ExternalPointerTable::Space* space =
3961 IsolateForSandbox(isolate_).GetExternalPointerTableSpaceFor(
3962 slot.tag_range(), host.address());
3963 space->NotifyExternalPointerFieldInvalidated(slot.address(),
3964 slot.tag_range());
3965 num_invalidated_slots++;
3966 }
3967
3968 int Visit(Tagged<HeapObject> target) {
3969 target_ = target;
3970 num_invalidated_slots = 0;
3971 HeapVisitor::Visit(target);
3972 return num_invalidated_slots;
3973 }
3974
3975 private:
3976 Isolate* isolate_;
3977 Tagged<HeapObject> target_;
3978 int num_invalidated_slots = 0;
3979};
3980#endif // V8_ENABLE_SANDBOX
3981
3982} // namespace
3983
3984void Heap::NotifyObjectLayoutChange(
3986 InvalidateRecordedSlots invalidate_recorded_slots,
3987 InvalidateExternalPointerSlots invalidate_external_pointer_slots,
3988 int new_size) {
3989 if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes) {
3990 const bool may_contain_recorded_slots = MayContainRecordedSlots(object);
3991 MutablePageMetadata* const chunk =
3993 // Do not remove the recorded slot in the map word as this one can never be
3994 // invalidated.
3995 const Address clear_range_start = object.address() + kTaggedSize;
3996 // Only slots in the range of the new object size (which is potentially
3997 // smaller than the original one) can be invalidated. Clearing of recorded
3998 // slots up to the original object size even conflicts with concurrent
3999 // sweeping.
4000 const Address clear_range_end = object.address() + new_size;
4001
4002 if (incremental_marking()->IsMarking()) {
4003 ObjectLock::Lock(object);
4004 DCHECK_EQ(pending_layout_change_object_address, kNullAddress);
4005 pending_layout_change_object_address = object.address();
4006 if (may_contain_recorded_slots && incremental_marking()->IsCompacting()) {
4008 chunk, clear_range_start, clear_range_end,
4009 SlotSet::EmptyBucketMode::KEEP_EMPTY_BUCKETS);
4010 }
4011 }
4012
4013 if (may_contain_recorded_slots) {
4015 chunk, clear_range_start, clear_range_end,
4016 SlotSet::EmptyBucketMode::KEEP_EMPTY_BUCKETS);
4018 chunk, clear_range_start, clear_range_end,
4019 SlotSet::EmptyBucketMode::KEEP_EMPTY_BUCKETS);
4021 chunk, clear_range_start, clear_range_end,
4022 SlotSet::EmptyBucketMode::KEEP_EMPTY_BUCKETS);
4023 }
4024
4025 DCHECK(!chunk->InTrustedSpace());
4026 }
4027
4028 // During external pointer table compaction, the external pointer table
4029 // records addresses of fields that index into the external pointer table. As
4030 // such, it needs to be informed when such a field is invalidated.
4031 if (invalidate_external_pointer_slots ==
4033 // Currently, the only time this function receives
4034 // InvalidateExternalPointerSlots::kYes is when an external string
4035 // transitions to a thin string. If this ever changed to happen for array
4036 // buffer extension slots, we would have to run the invalidator in
4037 // pointer-compression-but-no-sandbox configurations as well.
4038 DCHECK(IsString(object));
4039#ifdef V8_ENABLE_SANDBOX
4041 ExternalPointerSlotInvalidator slot_invalidator(isolate());
4042 int num_invalidated_slots = slot_invalidator.Visit(object);
4043 USE(num_invalidated_slots);
4044 DCHECK_GT(num_invalidated_slots, 0);
4045 }
4046
4047 // During concurrent marking for a minor GC, the heap also builds up a
4048 // RememberedSet of external pointer field locations, and uses that set to
4049 // evacuate external pointer table entries when promoting objects. Here we
4050 // would need to invalidate that set too; until we do, assert that
4051 // NotifyObjectLayoutChange is never called on young objects.
4053#endif
4054 }
4055
4056#ifdef VERIFY_HEAP
4057 if (v8_flags.verify_heap) {
4058 HeapVerifier::SetPendingLayoutChangeObject(this, object);
4059 }
4060#endif
4061}
4062
4063// static
4064void Heap::NotifyObjectLayoutChangeDone(Tagged<HeapObject> object) {
4065 if (pending_layout_change_object_address != kNullAddress) {
4066 DCHECK_EQ(pending_layout_change_object_address, object.address());
4067 ObjectLock::Unlock(object);
4068 pending_layout_change_object_address = kNullAddress;
4069 }
4070}
4071
4072void Heap::NotifyObjectSizeChange(Tagged<HeapObject> object, int old_size,
4073 int new_size,
4074 ClearRecordedSlots clear_recorded_slots) {
4075 old_size = ALIGN_TO_ALLOCATION_ALIGNMENT(old_size);
4076 new_size = ALIGN_TO_ALLOCATION_ALIGNMENT(new_size);
4077 DCHECK_LE(new_size, old_size);
4078 DCHECK(!IsLargeObject(object));
4079 if (new_size == old_size) return;
4080
4081 const bool is_main_thread = LocalHeap::Current() == nullptr;
4082
4083 DCHECK_IMPLIES(!is_main_thread,
4084 clear_recorded_slots == ClearRecordedSlots::kNo);
4085
4086 const auto verify_no_slots_recorded =
4087 is_main_thread ? VerifyNoSlotsRecorded::kYes : VerifyNoSlotsRecorded::kNo;
4088
4089 const auto clear_memory_mode = ClearFreedMemoryMode::kDontClearFreedMemory;
4090
4091 const Address filler = object.address() + new_size;
4092 const int filler_size = old_size - new_size;
4094 WritableFreeSpace::ForNonExecutableMemory(filler, filler_size),
4095 clear_memory_mode, clear_recorded_slots, verify_no_slots_recorded);
4096}
4097
4098double Heap::MonotonicallyIncreasingTimeInMs() const {
4100 static_cast<double>(base::Time::kMillisecondsPerSecond);
4101}
4102
4103#if DEBUG
4104void Heap::VerifyNewSpaceTop() {
4105 if (!new_space()) return;
4106 allocator()->new_space_allocator()->Verify();
4107}
4108#endif // DEBUG
4109
4111 public:
4114
4115 ~MemoryPressureInterruptTask() override = default;
4118 delete;
4119
4120 private:
4121 // v8::internal::CancelableTask overrides.
4122 void RunInternal() override { heap_->CheckMemoryPressure(); }
4123
4125};
4126
4127void Heap::CheckMemoryPressure() {
4128 if (HighMemoryPressure()) {
4129 // The optimizing compiler may be unnecessarily holding on to memory.
4130 isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
4131 }
4132 // Reset the memory pressure level to avoid recursive GCs triggered by
4133 // CheckMemoryPressure from AdjustAmountOfExternalMemory called by
4134 // the finalizers.
4135 MemoryPressureLevel memory_pressure_level = memory_pressure_level_.exchange(
4136 MemoryPressureLevel::kNone, std::memory_order_relaxed);
4137 if (memory_pressure_level == MemoryPressureLevel::kCritical) {
4138 TRACE_EVENT0("devtools.timeline,v8", "V8.CheckMemoryPressure");
4140 } else if (memory_pressure_level == MemoryPressureLevel::kModerate) {
4141 if (v8_flags.incremental_marking && incremental_marking()->IsStopped()) {
4142 TRACE_EVENT0("devtools.timeline,v8", "V8.CheckMemoryPressure");
4145 }
4146 }
4147}
4148
4149void Heap::CollectGarbageOnMemoryPressure() {
4150 const int kGarbageThresholdInBytes = 8 * MB;
4151 const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
4152 // This constant is the maximum response time in RAIL performance model.
4153 const double kMaxMemoryPressurePauseMs = 100;
4154
4161
4162 // Estimate how much memory we can free.
4163 int64_t potential_garbage =
4165 // If we can potentially free large amount of memory, then start GC right
4166 // away instead of waiting for memory reducer.
4167 if (potential_garbage >= kGarbageThresholdInBytes &&
4168 potential_garbage >=
4169 CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
4170 // If we spent less than half of the time budget, then perform full GC
4171 // Otherwise, start incremental marking.
4172 if (end - start < kMaxMemoryPressurePauseMs / 2) {
4176 } else {
4177 if (v8_flags.incremental_marking && incremental_marking()->IsStopped()) {
4180 }
4181 }
4182 }
4183}
4184
4185void Heap::MemoryPressureNotification(MemoryPressureLevel level,
4186 bool is_isolate_locked) {
4187 TRACE_EVENT1("devtools.timeline,v8", "V8.MemoryPressureNotification", "level",
4188 static_cast<int>(level));
4190 memory_pressure_level_.exchange(level, std::memory_order_relaxed);
4195 if (is_isolate_locked) {
4197 } else {
4198 ExecutionAccess access(isolate());
4199 isolate()->stack_guard()->RequestGC();
4201 std::make_unique<MemoryPressureInterruptTask>(this));
4202 }
4203 }
4204}
4205
4206void Heap::EagerlyFreeExternalMemoryAndWasmCode() {
4207#if V8_ENABLE_WEBASSEMBLY
4208 if (v8_flags.flush_liftoff_code) {
4209 // Flush Liftoff code and record the flushed code size.
4210 auto [code_size, metadata_size] = wasm::GetWasmEngine()->FlushLiftoffCode();
4211 isolate_->counters()->wasm_flushed_liftoff_code_size_bytes()->AddSample(
4212 static_cast<int>(code_size));
4213 isolate_->counters()->wasm_flushed_liftoff_metadata_size_bytes()->AddSample(
4214 static_cast<int>(metadata_size));
4215 }
4216#endif // V8_ENABLE_WEBASSEMBLY
4217 CompleteArrayBufferSweeping(this);
4218}
4219
4220void Heap::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
4221 void* data) {
4222 const size_t kMaxCallbacks = 100;
4223 CHECK_LT(near_heap_limit_callbacks_.size(), kMaxCallbacks);
4224 for (auto callback_data : near_heap_limit_callbacks_) {
4225 CHECK_NE(callback_data.first, callback);
4226 }
4227 near_heap_limit_callbacks_.push_back(std::make_pair(callback, data));
4228}
4229
4230void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
4231 size_t heap_limit) {
4232 for (size_t i = 0; i < near_heap_limit_callbacks_.size(); i++) {
4233 if (near_heap_limit_callbacks_[i].first == callback) {
4235 if (heap_limit) {
4236 RestoreHeapLimit(heap_limit);
4237 }
4238 return;
4239 }
4240 }
4241 UNREACHABLE();
4242}
4243
4244void Heap::AppendArrayBufferExtension(ArrayBufferExtension* extension) {
4245 // ArrayBufferSweeper is managing all counters and updating Heap counters.
4247}
4248
4249void Heap::ResizeArrayBufferExtension(ArrayBufferExtension* extension,
4250 int64_t delta) {
4251 // ArrayBufferSweeper is managing all counters and updating Heap counters.
4252 array_buffer_sweeper_->Resize(extension, delta);
4253}
4254
4255void Heap::DetachArrayBufferExtension(ArrayBufferExtension* extension) {
4256 // ArrayBufferSweeper is managing all counters and updating Heap counters.
4257 return array_buffer_sweeper_->Detach(extension);
4258}
4259
4260void Heap::AutomaticallyRestoreInitialHeapLimit(double threshold_percent) {
4262 initial_max_old_generation_size_ * threshold_percent;
4263}
4264
4265bool Heap::InvokeNearHeapLimitCallback() {
4266 if (!near_heap_limit_callbacks_.empty()) {
4267 AllowGarbageCollection allow_gc;
4268 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_NEAR_HEAP_LIMIT);
4269 VMState<EXTERNAL> callback_state(isolate());
4270 HandleScope scope(isolate());
4272 near_heap_limit_callbacks_.back().first;
4273 void* data = near_heap_limit_callbacks_.back().second;
4274 size_t heap_limit = callback(data, max_old_generation_size(),
4276 if (heap_limit > max_old_generation_size()) {
4278 std::min(heap_limit, AllocatorLimitOnMaxOldGenerationSize()));
4279 return true;
4280 }
4281 }
4282 return false;
4283}
4284
4285bool Heap::MeasureMemory(std::unique_ptr<v8::MeasureMemoryDelegate> delegate,
4286 v8::MeasureMemoryExecution execution) {
4287 HandleScope handle_scope(isolate());
4288 std::vector<Handle<NativeContext>> contexts = FindAllNativeContexts();
4289 std::vector<Handle<NativeContext>> to_measure;
4290 for (auto& current : contexts) {
4291 if (delegate->ShouldMeasure(v8::Utils::ToLocal(current))) {
4292 to_measure.push_back(current);
4293 }
4294 }
4295 return memory_measurement_->EnqueueRequest(std::move(delegate), execution,
4296 to_measure);
4297}
4298
4299std::unique_ptr<v8::MeasureMemoryDelegate>
4300Heap::CreateDefaultMeasureMemoryDelegate(
4302 v8::MeasureMemoryMode mode) {
4303 return i::MemoryMeasurement::DefaultDelegate(
4304 reinterpret_cast<v8::Isolate*>(isolate_), context, promise, mode);
4305}
4306
4307void Heap::CollectCodeStatistics() {
4308 TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
4309 SafepointScope safepoint_scope(isolate(),
4313 // We do not look for code in new space, or map space. If code
4314 // somehow ends up in those spaces, we would miss it here.
4320}
4321
4322#ifdef DEBUG
4323
4324void Heap::Print() {
4325 if (!HasBeenSetUp()) return;
4326 isolate()->PrintStack(stdout);
4327
4328 for (SpaceIterator it(this); it.HasNext();) {
4329 it.Next()->Print();
4330 }
4331}
4332
4333void Heap::ReportCodeStatistics(const char* title) {
4334 PrintF("###### Code Stats (%s) ######\n", title);
4335 CollectCodeStatistics();
4336 CodeStatistics::ReportCodeStatistics(isolate());
4337}
4338
4339#endif // DEBUG
4340
4341bool Heap::Contains(Tagged<HeapObject> value) const {
4342 if (ReadOnlyHeap::Contains(value)) {
4343 return false;
4344 }
4345 if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
4346 return false;
4347 }
4348
4349 if (!HasBeenSetUp()) return false;
4350
4351 return (new_space_ && new_space_->Contains(value)) ||
4352 old_space_->Contains(value) || code_space_->Contains(value) ||
4353 (shared_space_ && shared_space_->Contains(value)) ||
4355 lo_space_->Contains(value) || code_lo_space_->Contains(value) ||
4356 (new_lo_space_ && new_lo_space_->Contains(value)) ||
4357 trusted_space_->Contains(value) ||
4358 trusted_lo_space_->Contains(value) ||
4362}
4363
4364bool Heap::ContainsCode(Tagged<HeapObject> value) const {
4365 // TODO(v8:11880): support external code space.
4366 if (memory_allocator()->IsOutsideAllocatedSpace(value.address(),
4367 EXECUTABLE)) {
4368 return false;
4369 }
4370 return HasBeenSetUp() &&
4371 (code_space_->Contains(value) || code_lo_space_->Contains(value));
4372}
4373
4374bool Heap::SharedHeapContains(Tagged<HeapObject> value) const {
4376 if (shared_allocation_space_->Contains(value)) return true;
4377 if (shared_lo_allocation_space_->Contains(value)) return true;
4378 if (shared_trusted_allocation_space_->Contains(value)) return true;
4379 if (shared_trusted_lo_allocation_space_->Contains(value)) return true;
4380 }
4381
4382 return false;
4383}
4384
4385bool Heap::MustBeInSharedOldSpace(Tagged<HeapObject> value) {
4386 if (isolate()->OwnsStringTables()) return false;
4387 if (ReadOnlyHeap::Contains(value)) return false;
4388 if (HeapLayout::InYoungGeneration(value)) return false;
4389 if (IsExternalString(value)) return false;
4390 if (IsInternalizedString(value)) return true;
4391 return false;
4392}
4393
4394bool Heap::InSpace(Tagged<HeapObject> value, AllocationSpace space) const {
4395 if (memory_allocator()->IsOutsideAllocatedSpace(
4396 value.address(),
4398 return false;
4399 }
4400 if (!HasBeenSetUp()) return false;
4401
4402 switch (space) {
4403 case NEW_SPACE:
4404 return new_space_->Contains(value);
4405 case OLD_SPACE:
4406 return old_space_->Contains(value);
4407 case CODE_SPACE:
4408 return code_space_->Contains(value);
4409 case SHARED_SPACE:
4410 return shared_space_->Contains(value);
4411 case TRUSTED_SPACE:
4412 return trusted_space_->Contains(value);
4414 return shared_trusted_space_->Contains(value);
4415 case LO_SPACE:
4416 return lo_space_->Contains(value);
4417 case CODE_LO_SPACE:
4418 return code_lo_space_->Contains(value);
4419 case NEW_LO_SPACE:
4420 return new_lo_space_->Contains(value);
4421 case SHARED_LO_SPACE:
4422 return shared_lo_space_->Contains(value);
4424 return shared_trusted_lo_space_->Contains(value);
4425 case TRUSTED_LO_SPACE:
4426 return trusted_lo_space_->Contains(value);
4427 case RO_SPACE:
4428 return ReadOnlyHeap::Contains(value);
4429 }
4430 UNREACHABLE();
4431}
4432
4433bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const {
4434 if (memory_allocator()->IsOutsideAllocatedSpace(
4435 addr, IsAnyCodeSpace(space) ? EXECUTABLE : NOT_EXECUTABLE)) {
4436 return false;
4437 }
4438 if (!HasBeenSetUp()) return false;
4439
4440 switch (space) {
4441 case NEW_SPACE:
4442 return new_space_->ContainsSlow(addr);
4443 case OLD_SPACE:
4444 return old_space_->ContainsSlow(addr);
4445 case CODE_SPACE:
4446 return code_space_->ContainsSlow(addr);
4447 case SHARED_SPACE:
4448 return shared_space_->ContainsSlow(addr);
4449 case TRUSTED_SPACE:
4450 return trusted_space_->ContainsSlow(addr);
4452 return shared_trusted_space_->ContainsSlow(addr);
4453 case LO_SPACE:
4454 return lo_space_->ContainsSlow(addr);
4455 case CODE_LO_SPACE:
4456 return code_lo_space_->ContainsSlow(addr);
4457 case NEW_LO_SPACE:
4458 return new_lo_space_->ContainsSlow(addr);
4459 case SHARED_LO_SPACE:
4460 return shared_lo_space_->ContainsSlow(addr);
4463 case TRUSTED_LO_SPACE:
4464 return trusted_lo_space_->ContainsSlow(addr);
4465 case RO_SPACE:
4466 return read_only_space_->ContainsSlow(addr);
4467 }
4468 UNREACHABLE();
4469}
4470
4471bool Heap::IsValidAllocationSpace(AllocationSpace space) {
4472 switch (space) {
4473 case NEW_SPACE:
4474 case OLD_SPACE:
4475 case CODE_SPACE:
4476 case SHARED_SPACE:
4477 case LO_SPACE:
4478 case NEW_LO_SPACE:
4479 case CODE_LO_SPACE:
4480 case SHARED_LO_SPACE:
4481 case TRUSTED_SPACE:
4483 case TRUSTED_LO_SPACE:
4485 case RO_SPACE:
4486 return true;
4487 default:
4488 return false;
4489 }
4490}
4491
4492#ifdef DEBUG
4493void Heap::VerifyCountersAfterSweeping() {
4494 MakeHeapIterable();
4495 PagedSpaceIterator spaces(this);
4496 for (PagedSpace* space = spaces.Next(); space != nullptr;
4497 space = spaces.Next()) {
4498 space->VerifyCountersAfterSweeping(this);
4499 }
4500}
4501
4502void Heap::VerifyCountersBeforeConcurrentSweeping(GarbageCollector collector) {
4503 if (v8_flags.minor_ms && new_space()) {
4504 PagedSpaceBase* space = paged_new_space()->paged_space();
4505 space->RefillFreeList();
4506 space->VerifyCountersBeforeConcurrentSweeping();
4507 }
4508 if (collector != GarbageCollector::MARK_COMPACTOR) return;
4509 PagedSpaceIterator spaces(this);
4510 for (PagedSpace* space = spaces.Next(); space != nullptr;
4511 space = spaces.Next()) {
4512 // We need to refine the counters on pages that are already swept and have
4513 // not been moved over to the actual space. Otherwise, the AccountingStats
4514 // are just an over approximation.
4515 space->RefillFreeList();
4516 space->VerifyCountersBeforeConcurrentSweeping();
4517 }
4518}
4519
4520void Heap::VerifyCommittedPhysicalMemory() {
4521 PagedSpaceIterator spaces(this);
4522 for (PagedSpace* space = spaces.Next(); space != nullptr;
4523 space = spaces.Next()) {
4524 space->VerifyCommittedPhysicalMemory();
4525 }
4526 if (v8_flags.minor_ms && new_space()) {
4527 paged_new_space()->paged_space()->VerifyCommittedPhysicalMemory();
4528 }
4529}
4530#endif // DEBUG
4531
4532void Heap::IterateWeakRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
4533 DCHECK(!options.contains(SkipRoot::kWeak));
4534
4535 if (!options.contains(SkipRoot::kUnserializable)) {
4536 // Isolate::topmost_script_having_context_address is treated weakly.
4538 Root::kWeakRoots, nullptr,
4539 FullObjectSlot(isolate()->topmost_script_having_context_address()));
4540 }
4541
4542 if (!options.contains(SkipRoot::kOldGeneration) &&
4543 !options.contains(SkipRoot::kUnserializable) &&
4544 isolate()->OwnsStringTables()) {
4545 // Do not visit for the following reasons.
4546 // - Serialization, since the string table is custom serialized.
4547 // - If we are skipping old generation, since all internalized strings
4548 // are in old space.
4549 // - If the string table is shared and this is not the shared heap,
4550 // since all internalized strings are in the shared heap.
4551 isolate()->string_table()->IterateElements(v);
4552 }
4553 v->Synchronize(VisitorSynchronization::kStringTable);
4554 if (!options.contains(SkipRoot::kExternalStringTable) &&
4555 !options.contains(SkipRoot::kUnserializable)) {
4556 // Scavenge collections have special processing for this.
4557 // Do not visit for serialization, since the external string table will
4558 // be populated from scratch upon deserialization.
4560 }
4561 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4562 if (!options.contains(SkipRoot::kOldGeneration) &&
4563 !options.contains(SkipRoot::kUnserializable) &&
4564 isolate()->is_shared_space_isolate() &&
4565 isolate()->shared_struct_type_registry()) {
4566 isolate()->shared_struct_type_registry()->IterateElements(isolate(), v);
4567 }
4568 v->Synchronize(VisitorSynchronization::kSharedStructTypeRegistry);
4569}
4570
4571void Heap::IterateSmiRoots(RootVisitor* v) {
4572 // Acquire execution access since we are going to read stack limit values.
4573 ExecutionAccess access(isolate());
4574 v->VisitRootPointers(Root::kSmiRootList, nullptr,
4575 roots_table().smi_roots_begin(),
4576 roots_table().smi_roots_end());
4577 v->Synchronize(VisitorSynchronization::kSmiRootList);
4578}
4579
4581 Root root, const char* description, FullObjectSlot p) {
4582 if (!IsHeapObject(*p)) return;
4583
4584 if (IsLeftTrimmed(p)) {
4585 p.store(Smi::zero());
4586 } else {
4587 visitor_->VisitRootPointer(root, description, p);
4588 }
4589}
4590
4592 if (!IsHeapObject(*p)) return false;
4594 if (!current->map_word(cage_base(), kRelaxedLoad).IsForwardingAddress() &&
4595 IsFreeSpaceOrFiller(current, cage_base())) {
4596#ifdef DEBUG
4597 // We need to find a FixedArrayBase map after walking the fillers.
4598 while (
4599 !current->map_word(cage_base(), kRelaxedLoad).IsForwardingAddress() &&
4600 IsFreeSpaceOrFiller(current, cage_base())) {
4601 Address next = current.ptr();
4602 if (current->map(cage_base()) ==
4603 ReadOnlyRoots(heap_).one_pointer_filler_map()) {
4604 next += kTaggedSize;
4605 } else if (current->map(cage_base()) ==
4606 ReadOnlyRoots(heap_).two_pointer_filler_map()) {
4607 next += 2 * kTaggedSize;
4608 } else {
4609 next += current->Size();
4610 }
4611 current = Cast<HeapObject>(Tagged<Object>(next));
4612 }
4613 DCHECK(
4614 current->map_word(cage_base(), kRelaxedLoad).IsForwardingAddress() ||
4615 IsFixedArrayBase(current, cage_base()));
4616#endif // DEBUG
4617 return true;
4618 } else {
4619 return false;
4620 }
4621}
4622
4624 Heap* heap, RootVisitor* visitor)
4625 : heap_(heap),
4626 visitor_(visitor)
4627#if V8_COMPRESS_POINTERS
4628 ,
4629 cage_base_(heap->isolate())
4630#endif // V8_COMPRESS_POINTERS
4631{
4632 USE(heap_);
4633}
4634
4636 Root root, const char* description, FullObjectSlot p) {
4637 ClearLeftTrimmedOrForward(root, description, p);
4638}
4639
4641 Root root, const char* description, FullObjectSlot start,
4643 for (FullObjectSlot p = start; p < end; ++p) {
4644 ClearLeftTrimmedOrForward(root, description, p);
4645 }
4646}
4647
4649 FullObjectSlot code_slot, FullObjectSlot istream_or_smi_zero_slot) {
4650 // Directly forward to actual visitor here. Code objects and instruction
4651 // stream will not be left-trimmed.
4652 DCHECK(!IsLeftTrimmed(code_slot));
4653 DCHECK(!IsLeftTrimmed(istream_or_smi_zero_slot));
4654 visitor_->VisitRunningCode(code_slot, istream_or_smi_zero_slot);
4655}
4656
4657void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options,
4658 IterateRootsMode roots_mode) {
4659 v->VisitRootPointers(Root::kStrongRootList, nullptr,
4660 roots_table().strong_roots_begin(),
4661 roots_table().strong_roots_end());
4662 v->Synchronize(VisitorSynchronization::kStrongRootList);
4663
4664 isolate_->bootstrapper()->Iterate(v);
4665 v->Synchronize(VisitorSynchronization::kBootstrapper);
4667 v->Synchronize(VisitorSynchronization::kRelocatable);
4668 isolate_->debug()->Iterate(v);
4669 v->Synchronize(VisitorSynchronization::kDebug);
4670
4671 isolate_->compilation_cache()->Iterate(v);
4672 v->Synchronize(VisitorSynchronization::kCompilationCache);
4673
4674 const bool skip_iterate_builtins =
4675 options.contains(SkipRoot::kOldGeneration) ||
4677 options.contains(SkipRoot::kReadOnlyBuiltins) &&
4678 // Prior to ReadOnlyPromotion, builtins may be on the mutable heap.
4679 !isolate_->serializer_enabled());
4680 if (!skip_iterate_builtins) {
4681 IterateBuiltins(v);
4682 v->Synchronize(VisitorSynchronization::kBuiltins);
4683 }
4684
4685 // Iterate over pointers being held by inactive threads.
4686 isolate_->thread_manager()->Iterate(v);
4687 v->Synchronize(VisitorSynchronization::kThreadManager);
4688
4689 // Visitors in this block only run when not serializing. These include:
4690 //
4691 // - Thread-local and stack.
4692 // - Handles.
4693 // - Microtasks.
4694 // - The startup object cache.
4695 //
4696 // When creating real startup snapshot, these areas are expected to be empty.
4697 // It is also possible to create a snapshot of a *running* isolate for testing
4698 // purposes. In this case, these areas are likely not empty and will simply be
4699 // skipped.
4700 //
4701 // The general guideline for adding visitors to this section vs. adding them
4702 // above is that non-transient heap state is always visited, transient heap
4703 // state is visited only when not serializing.
4704 if (!options.contains(SkipRoot::kUnserializable)) {
4705 if (!options.contains(SkipRoot::kTracedHandles)) {
4706 // Young GCs always skip traced handles and visit them manually.
4707 DCHECK(!options.contains(SkipRoot::kOldGeneration));
4708
4709 isolate_->traced_handles()->Iterate(v);
4710 }
4711
4712 if (!options.contains(SkipRoot::kGlobalHandles)) {
4713 // Young GCs always skip global handles and visit them manually.
4714 DCHECK(!options.contains(SkipRoot::kOldGeneration));
4715
4716 if (options.contains(SkipRoot::kWeak)) {
4717 isolate_->global_handles()->IterateStrongRoots(v);
4718 } else {
4719 isolate_->global_handles()->IterateAllRoots(v);
4720 }
4721 }
4722 v->Synchronize(VisitorSynchronization::kGlobalHandles);
4723
4724 if (!options.contains(SkipRoot::kStack)) {
4725 ClearStaleLeftTrimmedPointerVisitor left_trim_visitor(this, v);
4726 IterateStackRoots(&left_trim_visitor);
4727 if (!options.contains(SkipRoot::kConservativeStack)) {
4728 IterateConservativeStackRoots(v, roots_mode);
4729 }
4730 v->Synchronize(VisitorSynchronization::kStackRoots);
4731 }
4732
4733 // Iterate over main thread handles in handle scopes.
4734 if (!options.contains(SkipRoot::kMainThreadHandles)) {
4735 // Clear main thread handles with stale references to left-trimmed
4736 // objects. The GC would crash on such stale references.
4737 ClearStaleLeftTrimmedPointerVisitor left_trim_visitor(this, v);
4738 isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
4739 }
4740 // Iterate local handles for all local heaps.
4741 safepoint_->Iterate(v);
4742 // Iterates all persistent handles.
4743 isolate_->persistent_handles_list()->Iterate(v, isolate_);
4744 v->Synchronize(VisitorSynchronization::kHandleScope);
4745
4746 if (options.contains(SkipRoot::kOldGeneration)) {
4747 isolate_->eternal_handles()->IterateYoungRoots(v);
4748 } else {
4749 isolate_->eternal_handles()->IterateAllRoots(v);
4750 }
4751 v->Synchronize(VisitorSynchronization::kEternalHandles);
4752
4753 // Iterate over pending Microtasks stored in MicrotaskQueues.
4754 MicrotaskQueue* default_microtask_queue =
4755 isolate_->default_microtask_queue();
4756 if (default_microtask_queue) {
4757 MicrotaskQueue* microtask_queue = default_microtask_queue;
4758 do {
4759 microtask_queue->IterateMicrotasks(v);
4761 } while (microtask_queue != default_microtask_queue);
4762 }
4763 v->Synchronize(VisitorSynchronization::kMicroTasks);
4764
4765 // Iterate over other strong roots (currently only identity maps and
4766 // deoptimization entries).
4768 current = current->next) {
4769 v->VisitRootPointers(Root::kStrongRoots, current->label, current->start,
4770 current->end);
4771 }
4772 v->Synchronize(VisitorSynchronization::kStrongRoots);
4773
4774 // Iterate over the startup and shared heap object caches unless
4775 // serializing or deserializing.
4777 v->Synchronize(VisitorSynchronization::kStartupObjectCache);
4778
4779 // Iterate over shared heap object cache when the isolate owns this data
4780 // structure. Isolates which own the shared heap object cache are:
4781 // * All isolates when not using --shared-string-table.
4782 // * Shared space/main isolate with --shared-string-table.
4783 //
4784 // Isolates which do not own the shared heap object cache should not iterate
4785 // it.
4786 if (isolate_->OwnsStringTables()) {
4788 v->Synchronize(VisitorSynchronization::kSharedHeapObjectCache);
4789 }
4790 }
4791
4792 if (!options.contains(SkipRoot::kWeak)) {
4793 IterateWeakRoots(v, options);
4794 }
4795}
4796
4797void Heap::IterateRootsIncludingClients(RootVisitor* v,
4798 base::EnumSet<SkipRoot> options) {
4799 IterateRoots(v, options, IterateRootsMode::kMainIsolate);
4800
4801 if (isolate()->is_shared_space_isolate()) {
4802 ClientRootVisitor<> client_root_visitor(v);
4803 isolate()->global_safepoint()->IterateClientIsolates(
4804 [v = &client_root_visitor, options](Isolate* client) {
4805 client->heap()->IterateRoots(v, options,
4806 IterateRootsMode::kClientIsolate);
4807 });
4808 }
4809}
4810
4811void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
4812 isolate_->global_handles()->IterateWeakRoots(v);
4813 isolate_->traced_handles()->Iterate(v);
4814}
4815
4816void Heap::IterateBuiltins(RootVisitor* v) {
4817 Builtins* builtins = isolate()->builtins();
4818 for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
4819 ++builtin) {
4820 const char* name = Builtins::name(builtin);
4821 v->VisitRootPointer(Root::kBuiltins, name, builtins->builtin_slot(builtin));
4822 }
4823
4824 for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLastTier0;
4825 ++builtin) {
4826 v->VisitRootPointer(Root::kBuiltins, Builtins::name(builtin),
4827 builtins->builtin_tier0_slot(builtin));
4828 }
4829
4830 // The entry table doesn't need to be updated since all builtins are embedded.
4832}
4833
4834void Heap::IterateStackRoots(RootVisitor* v) { isolate_->Iterate(v); }
4835
4836void Heap::IterateConservativeStackRoots(RootVisitor* root_visitor,
4837 IterateRootsMode roots_mode) {
4838#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
4839 if (!IsGCWithStack()) return;
4840
4841 // In case of a shared GC, we're interested in the main isolate for CSS.
4842 Isolate* main_isolate = roots_mode == IterateRootsMode::kClientIsolate
4844 : isolate();
4845
4846 ConservativeStackVisitor stack_visitor(main_isolate, root_visitor);
4847 IterateConservativeStackRoots(&stack_visitor);
4848#endif // V8_ENABLE_CONSERVATIVE_STACK_SCANNING
4849}
4850
4851void Heap::IterateConservativeStackRoots(
4852 ::heap::base::StackVisitor* stack_visitor) {
4854
4856 stack().IteratePointersUntilMarker(stack_visitor);
4857 }
4858 stack().IterateBackgroundStacks(stack_visitor);
4859}
4860
4861void Heap::IterateRootsForPrecisePinning(RootVisitor* visitor) {
4862 IterateStackRoots(visitor);
4863 isolate()->handle_scope_implementer()->Iterate(visitor);
4864}
4865
4866// static
4867size_t Heap::DefaultMinSemiSpaceSize() {
4868#if ENABLE_HUGEPAGE
4869 static constexpr size_t kMinSemiSpaceSize =
4870 kHugePageSize * kPointerMultiplier;
4871#else
4872 static constexpr size_t kMinSemiSpaceSize = 512 * KB * kPointerMultiplier;
4873#endif
4874 static_assert(kMinSemiSpaceSize % (1 << kPageSizeBits) == 0);
4875
4876 return kMinSemiSpaceSize;
4877}
4878
4879// static
4880size_t Heap::DefaultMaxSemiSpaceSize() {
4881#if ENABLE_HUGEPAGE
4882 static constexpr size_t kMaxSemiSpaceCapacityBaseUnit =
4883 kHugePageSize * 2 * kPointerMultiplier;
4884#else
4885 static constexpr size_t kMaxSemiSpaceCapacityBaseUnit =
4886 MB * kPointerMultiplier;
4887#endif
4888 static_assert(kMaxSemiSpaceCapacityBaseUnit % (1 << kPageSizeBits) == 0);
4889
4890 size_t max_semi_space_size =
4891 (v8_flags.minor_ms ? v8_flags.minor_ms_max_new_space_capacity_mb
4892 : v8_flags.scavenger_max_new_space_capacity_mb) *
4893 kMaxSemiSpaceCapacityBaseUnit;
4894 DCHECK_EQ(0, max_semi_space_size % (1 << kPageSizeBits));
4895 return max_semi_space_size;
4896}
4897
4898// static
4899size_t Heap::OldGenerationToSemiSpaceRatio() {
4900 DCHECK(!v8_flags.minor_ms);
4901 // Compute a ration such that when old gen max capacity is set to the highest
4902 // supported value, young gen max capacity would also be set to the max.
4903 DCHECK_LT(0u, v8_flags.scavenger_max_new_space_capacity_mb);
4904 static size_t kMaxOldGenSizeToMaxYoungGenSizeRatio =
4906 (v8_flags.scavenger_max_new_space_capacity_mb * MB);
4907 return kMaxOldGenSizeToMaxYoungGenSizeRatio / kPointerMultiplier;
4908}
4909
4910// static
4911size_t Heap::OldGenerationToSemiSpaceRatioLowMemory() {
4912 static constexpr size_t kOldGenerationToSemiSpaceRatioLowMemory =
4914 return kOldGenerationToSemiSpaceRatioLowMemory / (v8_flags.minor_ms ? 2 : 1);
4915}
4916
4917void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints,
4918 v8::CppHeap* cpp_heap) {
4920 // Initialize max_semi_space_size_.
4921 {
4923 if (constraints.max_young_generation_size_in_bytes() > 0) {
4926 }
4927 if (v8_flags.max_semi_space_size > 0) {
4929 static_cast<size_t>(v8_flags.max_semi_space_size) * MB;
4930 } else if (v8_flags.max_heap_size > 0) {
4931 size_t max_heap_size = static_cast<size_t>(v8_flags.max_heap_size) * MB;
4932 size_t young_generation_size, old_generation_size;
4933 if (v8_flags.max_old_space_size > 0) {
4934 old_generation_size =
4935 static_cast<size_t>(v8_flags.max_old_space_size) * MB;
4936 young_generation_size = max_heap_size > old_generation_size
4937 ? max_heap_size - old_generation_size
4938 : 0;
4939 } else {
4940 GenerationSizesFromHeapSize(max_heap_size, &young_generation_size,
4941 &old_generation_size);
4942 }
4944 SemiSpaceSizeFromYoungGenerationSize(young_generation_size);
4945 }
4946 if (v8_flags.stress_compaction) {
4947 // This will cause more frequent GCs when stressing.
4949 }
4950 if (!v8_flags.minor_ms) {
4951 // TODO(dinfuehr): Rounding to a power of 2 is technically no longer
4952 // needed but yields best performance on Pixel2.
4954 static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
4955 static_cast<uint64_t>(max_semi_space_size_)));
4956 }
4961 }
4962
4963 // Initialize max_old_generation_size_ and max_global_memory_.
4964 {
4965 size_t max_old_generation_size = 700ul * (kSystemPointerSize / 4) * MB;
4966 if (constraints.max_old_generation_size_in_bytes() > 0) {
4967 max_old_generation_size = constraints.max_old_generation_size_in_bytes();
4968 }
4969 if (v8_flags.max_old_space_size > 0) {
4970 max_old_generation_size =
4971 static_cast<size_t>(v8_flags.max_old_space_size) * MB;
4972 } else if (v8_flags.max_heap_size > 0) {
4973 size_t max_heap_size = static_cast<size_t>(v8_flags.max_heap_size) * MB;
4974 size_t young_generation_size =
4976 max_old_generation_size = max_heap_size > young_generation_size
4977 ? max_heap_size - young_generation_size
4978 : 0;
4979 }
4980 max_old_generation_size =
4981 std::max(max_old_generation_size, MinOldGenerationSize());
4982 max_old_generation_size = std::min(max_old_generation_size,
4984 max_old_generation_size =
4985 RoundDown<PageMetadata::kPageSize>(max_old_generation_size);
4986
4987 SetOldGenerationAndGlobalMaximumSize(max_old_generation_size);
4988 }
4989
4991 v8_flags.max_heap_size > 0,
4992 v8_flags.max_semi_space_size == 0 || v8_flags.max_old_space_size == 0);
4993
4994 // Initialize min_semispace_size_.
4995 {
4997 if (!v8_flags.optimize_for_size) {
4998 // Start with at least 1*MB semi-space on machines with a lot of memory.
5000 std::max(min_semi_space_size_, static_cast<size_t>(1 * MB));
5001 }
5003 if (v8_flags.min_semi_space_size > 0) {
5005 static_cast<size_t>(v8_flags.min_semi_space_size) * MB;
5006 }
5010 }
5011
5012 // Initialize initial_semispace_size_.
5013 {
5015 if (constraints.initial_young_generation_size_in_bytes() > 0) {
5018 }
5019 if (v8_flags.initial_heap_size > 0) {
5020 size_t young_generation, old_generation;
5021 Heap::GenerationSizesFromHeapSize(
5022 static_cast<size_t>(v8_flags.initial_heap_size) * MB,
5023 &young_generation, &old_generation);
5025 SemiSpaceSizeFromYoungGenerationSize(young_generation);
5026 }
5033 }
5034
5037
5038 if (v8_flags.lazy_new_space_shrinking) {
5040 }
5041
5042 // Initialize initial_old_space_size_.
5043 std::optional<size_t> initial_old_generation_size =
5044 [&]() -> std::optional<size_t> {
5045 if (v8_flags.initial_old_space_size > 0) {
5046 return static_cast<size_t>(v8_flags.initial_old_space_size) * MB;
5047 }
5048 if (v8_flags.initial_heap_size > 0) {
5049 size_t initial_heap_size =
5050 static_cast<size_t>(v8_flags.initial_heap_size) * MB;
5051 size_t young_generation_size =
5053 return initial_heap_size > young_generation_size
5054 ? initial_heap_size - young_generation_size
5055 : 0;
5056 }
5057 return std::nullopt;
5058 }();
5059 if (initial_old_generation_size.has_value()) {
5061 initial_old_generation_size_ = *initial_old_generation_size;
5062 } else {
5064 if (constraints.initial_old_generation_size_in_bytes() > 0) {
5067 }
5068 }
5074 // If the embedder pre-configures the initial old generation size,
5075 // then allow V8 to skip full GCs below that threshold.
5078 GlobalMemorySizeFromV8Size(min_old_generation_size_);
5079 }
5082
5083 // We rely on being able to allocate new arrays in paged spaces.
5085 (JSArray::kHeaderSize +
5088
5090
5091 heap_profiler_ = std::make_unique<HeapProfiler>(this);
5092 if (cpp_heap) {
5093 AttachCppHeap(cpp_heap);
5094 owning_cpp_heap_.reset(CppHeap::From(cpp_heap));
5095 }
5096
5097 configured_ = true;
5098}
5099
5100void Heap::AddToRingBuffer(const char* string) {
5101 size_t first_part =
5102 std::min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
5103 memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
5104 ring_buffer_end_ += first_part;
5105 if (first_part < strlen(string)) {
5106 ring_buffer_full_ = true;
5107 size_t second_part = strlen(string) - first_part;
5108 memcpy(trace_ring_buffer_, string + first_part, second_part);
5109 ring_buffer_end_ = second_part;
5110 }
5111}
5112
5113void Heap::GetFromRingBuffer(char* buffer) {
5114 size_t copied = 0;
5115 if (ring_buffer_full_) {
5117 memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
5118 }
5119 memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
5120}
5121
5122void Heap::ConfigureHeapDefault() {
5123 v8::ResourceConstraints constraints;
5124 ConfigureHeap(constraints, nullptr);
5125}
5126
5127void Heap::RecordStats(HeapStats* stats) {
5128 stats->start_marker = HeapStats::kStartMarker;
5129 stats->end_marker = HeapStats::kEndMarker;
5130 stats->ro_space_size = read_only_space_->Size();
5131 stats->ro_space_capacity = read_only_space_->Capacity();
5132 stats->new_space_size = NewSpaceSize();
5133 stats->new_space_capacity = NewSpaceCapacity();
5134 stats->old_space_size = old_space_->SizeOfObjects();
5135 stats->old_space_capacity = old_space_->Capacity();
5136 stats->code_space_size = code_space_->SizeOfObjects();
5137 stats->code_space_capacity = code_space_->Capacity();
5138 stats->map_space_size = 0;
5139 stats->map_space_capacity = 0;
5140 stats->lo_space_size = lo_space_->Size();
5141 stats->code_lo_space_size = code_lo_space_->Size();
5142 isolate_->global_handles()->RecordStats(stats);
5143 stats->memory_allocator_size = memory_allocator()->Size();
5144 stats->memory_allocator_capacity =
5146 stats->os_error = base::OS::GetLastError();
5147 // TODO(leszeks): Include the string table in both current and peak usage.
5148 stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
5149 stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
5150 GetFromRingBuffer(stats->last_few_messages);
5151}
5152
5153size_t Heap::OldGenerationSizeOfObjects() const {
5154 size_t total = 0;
5155 if (v8_flags.sticky_mark_bits)
5156 total += sticky_space()->old_objects_size();
5157 else
5158 total += old_space()->SizeOfObjects();
5159 total += lo_space()->SizeOfObjects();
5160 total += code_space()->SizeOfObjects();
5161 total += code_lo_space()->SizeOfObjects();
5162 if (shared_space()) {
5163 total += shared_space()->SizeOfObjects();
5164 }
5165 if (shared_lo_space()) {
5166 total += shared_lo_space()->SizeOfObjects();
5167 }
5168 total += trusted_space()->SizeOfObjects();
5169 total += trusted_lo_space()->SizeOfObjects();
5170 return total;
5171}
5172
5173size_t Heap::OldGenerationWastedBytes() const {
5174 PagedSpaceIterator spaces(this);
5175 size_t total = 0;
5176 for (PagedSpace* space = spaces.Next(); space != nullptr;
5177 space = spaces.Next()) {
5178 total += space->Waste();
5179 }
5180 return total;
5181}
5182
5183size_t Heap::OldGenerationConsumedBytes() const {
5185}
5186
5187size_t Heap::YoungGenerationSizeOfObjects() const {
5190 if (v8_flags.sticky_mark_bits) {
5191 return sticky_space()->young_objects_size() +
5193 }
5195 return new_space()->SizeOfObjects() + new_lo_space()->SizeOfObjects();
5196}
5197
5198size_t Heap::YoungGenerationWastedBytes() const {
5200 DCHECK(v8_flags.minor_ms);
5201 return paged_new_space()->paged_space()->Waste();
5202}
5203
5204size_t Heap::YoungGenerationConsumedBytes() const {
5205 if (!new_space()) {
5206 return 0;
5207 }
5209 if (v8_flags.minor_ms) {
5211 }
5212 // When using Scavenger, memory is compacted. Thus wasted space is always 0.
5213 // The diff between `new_space()->SizeOfObjects()` and
5214 // `new_space()->CurrentCapacitySafe()` is less than one page. Using capacity
5215 // here is also easier for concurrency since this method is reachable from
5216 // background old allocations.
5219}
5220
5221size_t Heap::EmbedderSizeOfObjects() const {
5222 return cpp_heap_ ? CppHeap::From(cpp_heap_)->used_size() : 0;
5223}
5224
5225size_t Heap::GlobalSizeOfObjects() const {
5227 (v8_flags.external_memory_accounted_in_global_limit ? external_memory()
5228 : 0);
5229}
5230
5231size_t Heap::GlobalWastedBytes() const { return OldGenerationWastedBytes(); }
5232
5233size_t Heap::GlobalConsumedBytes() const {
5235}
5236
5237size_t Heap::OldGenerationConsumedBytesAtLastGC() const {
5239}
5240
5241size_t Heap::GlobalConsumedBytesAtLastGC() const {
5243 (v8_flags.external_memory_accounted_in_global_limit
5245 : 0);
5246}
5247
5248uint64_t Heap::AllocatedExternalMemorySinceMarkCompact() const {
5250}
5251
5252bool Heap::AllocationLimitOvershotByLargeMargin() const {
5253 // This guards against too eager finalization in small heaps.
5254 // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
5255 constexpr size_t kMarginForSmallHeaps = 32u * MB;
5256
5257 uint64_t size_now = OldGenerationConsumedBytes();
5258 if (!v8_flags.external_memory_accounted_in_global_limit) {
5260 }
5261 if (incremental_marking()->IsMajorMarking()) {
5262 // No interleaved GCs, so we count young gen as part of old gen.
5263 size_now += YoungGenerationConsumedBytes();
5264 }
5265
5266 const size_t v8_overshoot = old_generation_allocation_limit() < size_now
5267 ? size_now - old_generation_allocation_limit()
5268 : 0;
5269 const size_t global_limit = global_allocation_limit();
5270 const size_t global_size = GlobalConsumedBytes();
5271 const size_t global_overshoot =
5272 global_limit < global_size ? global_size - global_limit : 0;
5273
5274 // Bail out if the V8 and global sizes are still below their respective
5275 // limits.
5276 if (v8_overshoot == 0 && global_overshoot == 0) {
5277 return false;
5278 }
5279
5280 // Overshoot margin is 50% of allocation limit or half-way to the max heap
5281 // with special handling of small heaps.
5282 const size_t v8_margin = std::min(
5283 std::max(old_generation_allocation_limit() / 2, kMarginForSmallHeaps),
5284 (max_old_generation_size() - old_generation_allocation_limit()) / 2);
5285 const size_t global_margin =
5286 std::min(std::max(global_limit / 2, kMarginForSmallHeaps),
5287 (max_global_memory_size_ - global_limit) / 2);
5288
5289 return v8_overshoot >= v8_margin || global_overshoot >= global_margin;
5290}
5291
5292bool Heap::ShouldOptimizeForLoadTime() const {
5293 double load_start_time = load_start_time_ms_.load(std::memory_order_relaxed);
5294 return load_start_time != kLoadTimeNotLoading &&
5296 MonotonicallyIncreasingTimeInMs() < load_start_time + kMaxLoadTimeMs;
5297}
5298
5299// This predicate is called when an old generation space cannot allocated from
5300// the free list and is about to add a new page. Returning false will cause a
5301// major GC. It happens when the old generation allocation limit is reached and
5302// - either we need to optimize for memory usage,
5303// - or the incremental marking is not in progress and we cannot start it.
5304bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap,
5305 AllocationOrigin origin) {
5306 if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
5307 // We reached the old generation allocation limit.
5308
5309 // Allocations in the GC should always succeed if possible.
5310 if (origin == AllocationOrigin::kGC) return true;
5311
5312 // Background threads need to be allowed to allocate without GC after teardown
5313 // was initiated.
5314 if (gc_state() == TEAR_DOWN) return true;
5315
5316 // Allocations need to succeed during isolate deserialization. With shared
5317 // heap allocations, a client isolate may perform shared heap allocations
5318 // during isolate deserialization as well.
5319 if (!deserialization_complete() ||
5320 !local_heap->heap()->deserialization_complete()) {
5321 return true;
5322 }
5323
5324 // Make it more likely that retry of allocations succeeds.
5325 if (local_heap->IsRetryOfFailedAllocation()) return true;
5326
5327 // Background thread requested GC, allocation should fail
5328 if (CollectionRequested()) return false;
5329
5330 if (ShouldOptimizeForMemoryUsage()) return false;
5331
5332 if (ShouldOptimizeForLoadTime()) return true;
5333
5334 if (incremental_marking()->IsMajorMarking() &&
5336 return false;
5337 }
5338
5339 if (incremental_marking()->IsStopped() &&
5340 IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
5341 // We cannot start incremental marking.
5342 return false;
5343 }
5344 return true;
5345}
5346
5347// This predicate is called when an young generation space cannot allocated
5348// from the free list and is about to add a new page. Returning false will
5349// cause a GC.
5350bool Heap::ShouldExpandYoungGenerationOnSlowAllocation(size_t allocation_size) {
5352
5353 if (always_allocate()) return true;
5354
5355 if (gc_state() == TEAR_DOWN) return true;
5356
5357 if (!CanPromoteYoungAndExpandOldGeneration(allocation_size)) {
5358 // Assuming all of new space is alive, doing a full GC and promoting all
5359 // objects should still succeed. Don't let new space grow if it means it
5360 // will exceed the available size of old space.
5361 return false;
5362 }
5363
5364 if (incremental_marking()->IsMajorMarking() &&
5366 // Allocate a new page during full GC incremental marking to avoid
5367 // prematurely finalizing the incremental GC. Once the full GC is over, new
5368 // space will be empty and capacity will be reset.
5369 return true;
5370 }
5371
5372 return false;
5373}
5374
5375bool Heap::IsNewSpaceAllowedToGrowAboveTargetCapacity() const {
5376 return always_allocate() || gc_state() == TEAR_DOWN ||
5378}
5379
5380Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
5381 if (ShouldReduceMemory() || v8_flags.stress_compaction) {
5382 return Heap::HeapGrowingMode::kMinimal;
5383 }
5384
5386 return Heap::HeapGrowingMode::kConservative;
5387 }
5388
5389 if (memory_reducer() != nullptr && memory_reducer()->ShouldGrowHeapSlowly()) {
5390 return Heap::HeapGrowingMode::kSlow;
5391 }
5392
5393 return Heap::HeapGrowingMode::kDefault;
5394}
5395
5396size_t Heap::GlobalMemoryAvailable() {
5397 size_t global_size = GlobalConsumedBytes();
5398 size_t global_limit = global_allocation_limit();
5399
5400 if (global_size < global_limit) {
5401 return global_limit - global_size;
5402 } else {
5403 return 0;
5404 }
5405}
5406
5407namespace {
5408
5409double PercentToLimit(size_t size_at_gc, size_t size_now, size_t limit) {
5410 if (size_now < size_at_gc) {
5411 return 0.0;
5412 }
5413 if (size_now > limit) {
5414 return 100.0;
5415 }
5416 const size_t current_bytes = size_now - size_at_gc;
5417 const size_t total_bytes = limit - size_at_gc;
5418 DCHECK_LE(current_bytes, total_bytes);
5419 return static_cast<double>(current_bytes) * 100 / total_bytes;
5420}
5421
5422} // namespace
5423
5424double Heap::PercentToOldGenerationLimit() const {
5425 return PercentToLimit(OldGenerationConsumedBytesAtLastGC(),
5428}
5429
5430double Heap::PercentToGlobalMemoryLimit() const {
5431 return PercentToLimit(GlobalConsumedBytesAtLastGC(), GlobalConsumedBytes(),
5433}
5434
5435// - kNoLimit means that either incremental marking is disabled or it is too
5436// early to start incremental marking.
5437// - kSoftLimit means that incremental marking should be started soon.
5438// - kHardLimit means that incremental marking should be started immediately.
5439// - kFallbackForEmbedderLimit means that incremental marking should be
5440// started as soon as the embedder does not allocate with high throughput
5441// anymore.
5442Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
5443 // InstructionStream using an AlwaysAllocateScope assumes that the GC state
5444 // does not change; that implies that no marking steps must be performed.
5445 if (!incremental_marking()->CanAndShouldBeStarted() || always_allocate()) {
5446 // Incremental marking is disabled or it is too early to start.
5447 return IncrementalMarkingLimit::kNoLimit;
5448 }
5449 if (v8_flags.stress_incremental_marking) {
5450 return IncrementalMarkingLimit::kHardLimit;
5451 }
5452 if (incremental_marking()->IsBelowActivationThresholds()) {
5453 // Incremental marking is disabled or it is too early to start.
5454 return IncrementalMarkingLimit::kNoLimit;
5455 }
5457 // If there is high memory pressure or stress testing is enabled, then
5458 // start marking immediately.
5459 return IncrementalMarkingLimit::kHardLimit;
5460 }
5461
5462 if (v8_flags.stress_marking > 0) {
5463 int current_percent = static_cast<int>(
5465 if (current_percent > 0) {
5466 if (v8_flags.trace_stress_marking) {
5467 isolate()->PrintWithTimestamp(
5468 "[IncrementalMarking] %d%% of the memory limit reached\n",
5469 current_percent);
5470 }
5471 if (v8_flags.fuzzer_gc_analysis) {
5472 // Skips values >=100% since they already trigger marking.
5473 if (current_percent < 100) {
5474 double max_marking_limit_reached =
5475 max_marking_limit_reached_.load(std::memory_order_relaxed);
5476 while (current_percent > max_marking_limit_reached) {
5477 max_marking_limit_reached_.compare_exchange_weak(
5478 max_marking_limit_reached, current_percent,
5479 std::memory_order_relaxed);
5480 }
5481 }
5482 } else if (current_percent >= stress_marking_percentage_) {
5483 return IncrementalMarkingLimit::kHardLimit;
5484 }
5485 }
5486 }
5487
5488 if (v8_flags.incremental_marking_soft_trigger > 0 ||
5489 v8_flags.incremental_marking_hard_trigger > 0) {
5490 int current_percent = static_cast<int>(
5492 if (current_percent > v8_flags.incremental_marking_hard_trigger &&
5493 v8_flags.incremental_marking_hard_trigger > 0) {
5494 return IncrementalMarkingLimit::kHardLimit;
5495 }
5496 if (current_percent > v8_flags.incremental_marking_soft_trigger &&
5497 v8_flags.incremental_marking_soft_trigger > 0) {
5498 return IncrementalMarkingLimit::kSoftLimit;
5499 }
5500 return IncrementalMarkingLimit::kNoLimit;
5501 }
5502
5504 size_t old_generation_space_available = OldGenerationSpaceAvailable();
5505 size_t global_memory_available = GlobalMemoryAvailable();
5506
5507 if (old_generation_space_available > NewSpaceTargetCapacity() &&
5508 (global_memory_available > NewSpaceTargetCapacity())) {
5509 if (cpp_heap() && gc_count_ == 0 && using_initial_limit()) {
5510 // At this point the embedder memory is above the activation
5511 // threshold. No GC happened so far and it's thus unlikely to get a
5512 // configured heap any time soon. Start a memory reducer in this case
5513 // which will wait until the allocation rate is low to trigger garbage
5514 // collection.
5515 return IncrementalMarkingLimit::kFallbackForEmbedderLimit;
5516 }
5517 return IncrementalMarkingLimit::kNoLimit;
5518 }
5520 return IncrementalMarkingLimit::kHardLimit;
5521 }
5523 return IncrementalMarkingLimit::kNoLimit;
5524 }
5525 if (old_generation_space_available == 0) {
5526 return IncrementalMarkingLimit::kHardLimit;
5527 }
5528 if (global_memory_available == 0) {
5529 return IncrementalMarkingLimit::kHardLimit;
5530 }
5531 return IncrementalMarkingLimit::kSoftLimit;
5532}
5533
5534bool Heap::ShouldStressCompaction() const {
5535 return v8_flags.stress_compaction && (gc_count_ & 1) != 0;
5536}
5537
5538void Heap::EnableInlineAllocation() { inline_allocation_enabled_ = true; }
5539
5540void Heap::DisableInlineAllocation() {
5543}
5544
5545void Heap::SetUp(LocalHeap* main_thread_local_heap) {
5548 main_thread_local_heap_ = main_thread_local_heap;
5549 heap_allocator_ = &main_thread_local_heap->heap_allocator_;
5551
5552 // Set the stack start for the main thread that sets up the heap.
5553 SetStackStart();
5554
5555#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
5556 heap_allocator_->UpdateAllocationTimeout();
5557#endif // V8_ENABLE_ALLOCATION_TIMEOUT
5558
5559 // Initialize heap spaces and initial maps and objects.
5560 //
5561 // If the heap is not yet configured (e.g. through the API), configure it.
5562 // Configuration is based on the flags new-space-size (really the semispace
5563 // size) and old-space-size if set or the initial values of semispace_size_
5564 // and old_generation_size_ otherwise.
5566
5568 reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
5570
5571 v8::PageAllocator* code_page_allocator;
5572 if (isolate_->RequiresCodeRange() || code_range_size_ != 0) {
5573 const size_t requested_size =
5575 // When a target requires the code range feature, we put all code objects in
5576 // a contiguous range of virtual address space, so that they can call each
5577 // other with near calls.
5578#ifdef V8_COMPRESS_POINTERS
5579 // When pointer compression is enabled, isolates in the same group share the
5580 // same CodeRange, owned by the IsolateGroup.
5581 code_range_ = isolate_->isolate_group()->EnsureCodeRange(requested_size);
5582#else
5583 // Otherwise, each isolate has its own CodeRange, owned by the heap.
5584 code_range_ = std::make_unique<CodeRange>();
5585 if (!code_range_->InitReservation(isolate_->page_allocator(),
5586 requested_size, false)) {
5588 isolate_, "Failed to reserve virtual memory for CodeRange");
5589 }
5590#endif // V8_COMPRESS_POINTERS
5591
5592 LOG(isolate_,
5593 NewEvent("CodeRange",
5594 reinterpret_cast<void*>(code_range_->reservation()->address()),
5596
5597 isolate_->AddCodeRange(code_range_->reservation()->region().begin(),
5598 code_range_->reservation()->region().size());
5599 code_page_allocator = code_range_->page_allocator();
5600 } else {
5601 code_page_allocator = isolate_->page_allocator();
5602 }
5603
5604 v8::PageAllocator* trusted_page_allocator;
5605#ifdef V8_ENABLE_SANDBOX
5606 trusted_page_allocator =
5607 TrustedRange::GetProcessWideTrustedRange()->page_allocator();
5608#else
5609 trusted_page_allocator = isolate_->page_allocator();
5610#endif
5611
5613 reinterpret_cast<v8::Isolate*>(isolate()));
5614
5615 collection_barrier_.reset(new CollectionBarrier(this, this->task_runner_));
5616
5617 // Set up memory allocator.
5619 isolate_, code_page_allocator, trusted_page_allocator, MaxReserved()));
5620
5621 sweeper_.reset(new Sweeper(this));
5622
5624
5628
5630 new IncrementalMarking(this, mark_compact_collector_->weak_objects()));
5631
5632 if (v8_flags.concurrent_marking || v8_flags.parallel_marking) {
5633 concurrent_marking_.reset(
5634 new ConcurrentMarking(this, mark_compact_collector_->weak_objects()));
5635 } else {
5636 concurrent_marking_.reset(new ConcurrentMarking(this, nullptr));
5637 }
5638
5639 // Set up layout tracing callback.
5640 if (V8_UNLIKELY(v8_flags.trace_gc_heap_layout)) {
5642 if (V8_UNLIKELY(!v8_flags.trace_gc_heap_layout_ignore_minor_gc)) {
5643 gc_type = static_cast<v8::GCType>(gc_type | kGCTypeScavenge |
5645 }
5647 nullptr);
5649 nullptr);
5650 }
5651}
5652
5653void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
5654 DCHECK_NOT_NULL(ro_heap);
5656 read_only_space_ == ro_heap->read_only_space());
5657 DCHECK_NULL(space_[RO_SPACE].get());
5658 read_only_space_ = ro_heap->read_only_space();
5660}
5661
5662void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
5663 if (read_only_space_) {
5665 delete read_only_space_;
5666 }
5667
5670}
5671
5673 public:
5675 : CancelableTask(isolate), isolate_(isolate) {}
5676
5677 void RunInternal() override {
5678 Heap* heap = isolate_->heap();
5680 UnparkedScope unparked_scope(&local_heap);
5681
5682 const int kNumIterations = 2000;
5683 const int kSmallObjectSize = 10 * kTaggedSize;
5684 const int kMediumObjectSize = 8 * KB;
5685 const int kLargeObjectSize =
5686 static_cast<int>(MutablePageMetadata::kPageSize -
5688
5689 for (int i = 0; i < kNumIterations; i++) {
5690 // Isolate tear down started, stop allocation...
5691 if (heap->gc_state() == Heap::TEAR_DOWN) return;
5692
5693 AllocationResult result = local_heap.AllocateRaw(
5696 if (!result.IsFailure()) {
5697 heap->CreateFillerObjectAtBackground(
5699 kSmallObjectSize));
5700 } else {
5701 heap->CollectGarbageFromAnyThread(&local_heap);
5702 }
5703
5704 result = local_heap.AllocateRaw(kMediumObjectSize, AllocationType::kOld,
5707 if (!result.IsFailure()) {
5708 heap->CreateFillerObjectAtBackground(
5710 kMediumObjectSize));
5711 } else {
5712 heap->CollectGarbageFromAnyThread(&local_heap);
5713 }
5714
5715 result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld,
5718 if (!result.IsFailure()) {
5719 heap->CreateFillerObjectAtBackground(
5721 kLargeObjectSize));
5722 } else {
5723 heap->CollectGarbageFromAnyThread(&local_heap);
5724 }
5725 local_heap.Safepoint();
5726 }
5727
5729 }
5730
5731 // Schedules task on background thread
5732 static void Schedule(Isolate* isolate) {
5733 auto task = std::make_unique<StressConcurrentAllocationTask>(isolate);
5734 const double kDelayInSeconds = 0.1;
5736 TaskPriority::kUserVisible, std::move(task), kDelayInSeconds);
5737 }
5738
5739 private:
5741};
5742
5744 public:
5747
5748 void Step(int bytes_allocated, Address, size_t) override {
5750 if (v8_flags.stress_concurrent_allocation) {
5751 // Only schedule task if --stress-concurrent-allocation is enabled. This
5752 // allows tests to disable flag even when Isolate was already initialized.
5754 }
5757 }
5758
5759 private:
5761};
5762
5763namespace {
5764
5765size_t ReturnNull() { return 0; }
5766
5767} // namespace
5768
5769void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info,
5770 LinearAllocationArea& old_allocation_info) {
5771 // Ensure SetUpFromReadOnlySpace has been ran.
5773
5774 if (v8_flags.sticky_mark_bits) {
5775 space_[OLD_SPACE] = std::make_unique<StickySpace>(this);
5776 old_space_ = static_cast<OldSpace*>(space_[OLD_SPACE].get());
5777 } else {
5778 space_[OLD_SPACE] = std::make_unique<OldSpace>(this);
5779 old_space_ = static_cast<OldSpace*>(space_[OLD_SPACE].get());
5780 }
5781
5782 if (!v8_flags.single_generation) {
5783 if (!v8_flags.sticky_mark_bits) {
5784 if (v8_flags.minor_ms) {
5785 space_[NEW_SPACE] = std::make_unique<PagedNewSpace>(
5788 } else {
5789 space_[NEW_SPACE] = std::make_unique<SemiSpaceNewSpace>(
5792 }
5793 new_space_ = static_cast<NewSpace*>(space_[NEW_SPACE].get());
5794 }
5795
5797 std::make_unique<NewLargeObjectSpace>(this, NewSpaceCapacity());
5799 static_cast<NewLargeObjectSpace*>(space_[NEW_LO_SPACE].get());
5800 }
5801
5802 space_[CODE_SPACE] = std::make_unique<CodeSpace>(this);
5803 code_space_ = static_cast<CodeSpace*>(space_[CODE_SPACE].get());
5804
5805 space_[LO_SPACE] = std::make_unique<OldLargeObjectSpace>(this);
5806 lo_space_ = static_cast<OldLargeObjectSpace*>(space_[LO_SPACE].get());
5807
5808 space_[CODE_LO_SPACE] = std::make_unique<CodeLargeObjectSpace>(this);
5810 static_cast<CodeLargeObjectSpace*>(space_[CODE_LO_SPACE].get());
5811
5812 space_[TRUSTED_SPACE] = std::make_unique<TrustedSpace>(this);
5813 trusted_space_ = static_cast<TrustedSpace*>(space_[TRUSTED_SPACE].get());
5814
5815 space_[TRUSTED_LO_SPACE] = std::make_unique<TrustedLargeObjectSpace>(this);
5817 static_cast<TrustedLargeObjectSpace*>(space_[TRUSTED_LO_SPACE].get());
5818
5819 if (isolate()->is_shared_space_isolate()) {
5820 DCHECK(!v8_flags.sticky_mark_bits);
5821
5822 space_[SHARED_SPACE] = std::make_unique<SharedSpace>(this);
5823 shared_space_ = static_cast<SharedSpace*>(space_[SHARED_SPACE].get());
5824
5825 space_[SHARED_LO_SPACE] = std::make_unique<SharedLargeObjectSpace>(this);
5827 static_cast<SharedLargeObjectSpace*>(space_[SHARED_LO_SPACE].get());
5828
5829 space_[SHARED_TRUSTED_SPACE] = std::make_unique<SharedTrustedSpace>(this);
5831 static_cast<SharedTrustedSpace*>(space_[SHARED_TRUSTED_SPACE].get());
5832
5834 std::make_unique<SharedTrustedLargeObjectSpace>(this);
5837 }
5838
5839 if (isolate()->has_shared_space()) {
5840 Heap* heap = isolate()->shared_space_isolate()->heap();
5841 shared_allocation_space_ = heap->shared_space_;
5842 shared_lo_allocation_space_ = heap->shared_lo_space_;
5843
5844 shared_trusted_allocation_space_ = heap->shared_trusted_space_;
5845 shared_trusted_lo_allocation_space_ = heap->shared_trusted_lo_space_;
5846 }
5847
5848 main_thread_local_heap()->SetUpMainThread(new_allocation_info,
5849 old_allocation_info);
5850
5851 base::TimeTicks startup_time = base::TimeTicks::Now();
5852
5853 tracer_.reset(new GCTracer(this, startup_time));
5856 if (v8_flags.memory_reducer) memory_reducer_.reset(new MemoryReducer(this));
5858 live_object_stats_.reset(new ObjectStats(this));
5859 dead_object_stats_.reset(new ObjectStats(this));
5860 }
5863 std::make_unique<Heap::AllocationTrackerForDebugging>(this);
5864 }
5865
5866 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5867 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5868
5870
5871 if (new_space() || v8_flags.sticky_mark_bits) {
5872 minor_gc_job_.reset(new MinorGCJob(this));
5873 }
5874
5875 if (v8_flags.stress_marking > 0) {
5877 }
5878 if (IsStressingScavenge()) {
5880 allocator()->new_space_allocator()->AddAllocationObserver(
5882 }
5883
5884 if (v8_flags.memory_balancer) {
5885 mb_.reset(new MemoryBalancer(this, startup_time));
5886 }
5887}
5888
5889void Heap::InitializeHashSeed() {
5891 uint64_t new_hash_seed;
5892 if (v8_flags.hash_seed == 0) {
5893 int64_t rnd = isolate()->random_number_generator()->NextInt64();
5894 new_hash_seed = static_cast<uint64_t>(rnd);
5895 } else {
5896 new_hash_seed = static_cast<uint64_t>(v8_flags.hash_seed);
5897 }
5898 Tagged<ByteArray> hash_seed = ReadOnlyRoots(this).hash_seed();
5899 MemCopy(hash_seed->begin(), reinterpret_cast<uint8_t*>(&new_hash_seed),
5900 kInt64Size);
5901}
5902
5903std::shared_ptr<v8::TaskRunner> Heap::GetForegroundTaskRunner(
5904 TaskPriority priority) const {
5906 reinterpret_cast<v8::Isolate*>(isolate()), priority);
5907}
5908
5909// static
5910void Heap::InitializeOncePerProcess() {
5911#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
5912 HeapAllocator::InitializeOncePerProcess();
5913#endif
5915 if (v8_flags.predictable) {
5917 }
5918}
5919
5920void Heap::PrintMaxMarkingLimitReached() {
5921 PrintF("\n### Maximum marking limit reached = %.02lf\n",
5922 max_marking_limit_reached_.load(std::memory_order_relaxed));
5923}
5924
5925void Heap::PrintMaxNewSpaceSizeReached() {
5926 PrintF("\n### Maximum new space size reached = %.02lf\n",
5928}
5929
5930int Heap::NextStressMarkingLimit() {
5931 return isolate()->fuzzer_rng()->NextInt(v8_flags.stress_marking + 1);
5932}
5933
5934void Heap::WeakenDescriptorArrays(
5935 GlobalHandleVector<DescriptorArray> strong_descriptor_arrays) {
5936 if (incremental_marking()->IsMajorMarking()) {
5937 // During incremental/concurrent marking regular DescriptorArray objects are
5938 // treated with custom weakness. This weakness depends on
5939 // DescriptorArray::raw_gc_state() which is not set up properly upon
5940 // deserialization. The strong arrays are transitioned to weak ones at the
5941 // end of the GC.
5943 std::move(strong_descriptor_arrays));
5944 return;
5945 }
5946
5947 // No GC is running, weaken the arrays right away.
5949 Tagged<Map> descriptor_array_map =
5950 ReadOnlyRoots(isolate()).descriptor_array_map();
5951 for (auto it = strong_descriptor_arrays.begin();
5952 it != strong_descriptor_arrays.end(); ++it) {
5953 Tagged<DescriptorArray> array = it.raw();
5954 DCHECK(IsStrongDescriptorArray(array));
5955 array->set_map_safe_transition_no_write_barrier(isolate(),
5956 descriptor_array_map);
5957 DCHECK_EQ(array->raw_gc_state(kRelaxedLoad), 0);
5958 }
5959}
5960
5961void Heap::NotifyDeserializationComplete() {
5962 // There are no concurrent/background threads yet.
5964
5966
5967#if DEBUG
5968 PagedSpaceIterator spaces(this);
5969 for (PagedSpace* s = spaces.Next(); s != nullptr; s = spaces.Next()) {
5970 // All pages right after bootstrapping must be marked as never-evacuate.
5971 for (PageMetadata* p : *s) {
5972 DCHECK(p->Chunk()->NeverEvacuate());
5973 }
5974 }
5975#endif // DEBUG
5976
5977 if (v8_flags.stress_concurrent_allocation) {
5984 }
5985
5986 // Deserialization will never create objects in new space.
5987 DCHECK_IMPLIES(new_space(), new_space()->Size() == 0);
5988 DCHECK_IMPLIES(new_lo_space(), new_lo_space()->Size() == 0);
5989
5991}
5992
5993void Heap::NotifyBootstrapComplete() {
5994 // This function is invoked for each native context creation. We are
5995 // interested only in the first native context.
5998 }
5999}
6000
6001void Heap::NotifyOldGenerationExpansion(
6002 LocalHeap* local_heap, AllocationSpace space,
6003 MutablePageMetadata* chunk_metadata,
6004 OldGenerationExpansionNotificationOrigin notification_origin) {
6005 // Pages created during bootstrapping may contain immortal immovable objects.
6006 if (!deserialization_complete()) {
6007 DCHECK_NE(NEW_SPACE, chunk_metadata->owner()->identity());
6008 chunk_metadata->Chunk()->MarkNeverEvacuate();
6009 }
6010 if (IsAnyCodeSpace(space)) {
6011 isolate()->AddCodeMemoryChunk(chunk_metadata);
6012 }
6013
6014 // Don't notify MemoryReducer when calling from client heap as otherwise not
6015 // thread safe.
6016 const size_t kMemoryReducerActivationThreshold = 1 * MB;
6017 if (local_heap->is_main_thread_for(this) && memory_reducer() != nullptr &&
6020 kMemoryReducerActivationThreshold &&
6021 (notification_origin ==
6022 OldGenerationExpansionNotificationOrigin::kFromSameHeap) &&
6023 v8_flags.memory_reducer_for_small_heaps) {
6025 }
6026}
6027
6028void Heap::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) {
6030}
6031
6032EmbedderRootsHandler* Heap::GetEmbedderRootsHandler() const {
6034}
6035
6036void Heap::AttachCppHeap(v8::CppHeap* cpp_heap) {
6037 // Only a single CppHeap can be attached at a time.
6039
6041 CppHeap::From(cpp_heap)->AttachIsolate(isolate());
6042 cpp_heap_ = cpp_heap;
6043}
6044
6045std::optional<StackState> Heap::overridden_stack_state() const {
6046 if (!embedder_stack_state_origin_) return {};
6047 return embedder_stack_state_;
6048}
6049
6050void Heap::SetStackStart() {
6051 // If no main thread local heap has been set up (we're still in the
6052 // deserialization process), we don't need to set the stack start.
6053 if (main_thread_local_heap_ == nullptr) return;
6054 stack().SetStackStart();
6055}
6056
6061
6062const ::heap::base::Stack& Heap::stack() const {
6065}
6066
6067void Heap::StartTearDown() {
6068 if (cpp_heap_) {
6069 // This may invoke a GC in case marking is running to get us into a
6070 // well-defined state for tear down.
6072 }
6073
6074 // Stressing incremental marking should make it likely to force a GC here with
6075 // a CppHeap present. Stress compaction serves as a more deterministic way to
6076 // trigger such a GC.
6077 if (v8_flags.stress_compaction) {
6079 }
6080
6081 // the heap during teardown.
6083
6084 if (v8_flags.concurrent_marking) {
6086 }
6087
6089
6090 // Background threads may allocate and block until GC is performed. However
6091 // this might never happen when the main thread tries to quit and doesn't
6092 // process the event queue anymore. Avoid this deadlock by allowing all
6093 // allocations after tear down was requested to make sure all background
6094 // threads finish.
6095 collection_barrier_->NotifyShutdownRequested();
6096
6097 // Main thread isn't going to allocate anymore.
6099
6101}
6102
6103void Heap::TearDownWithSharedHeap() {
6105
6106 // Assert that there are no background threads left and no executable memory
6107 // chunks are unprotected.
6109
6110 // Now that all threads are stopped, verify the heap before tearing down the
6111 // heap/isolate.
6113
6114 // Might use the external pointer which might be in the shared heap.
6116
6117 // Publish shared object worklist for the main thread if incremental marking
6118 // is enabled for the shared heap.
6120}
6121
6122void Heap::TearDown() {
6124
6125 // Assert that there are no background threads left and no executable memory
6126 // chunks are unprotected.
6128
6129 DCHECK(concurrent_marking()->IsStopped());
6130
6131 // It's too late for Heap::Verify() here, as parts of the Isolate are
6132 // already gone by the time this is called.
6133
6135
6136 if (v8_flags.fuzzer_gc_analysis) {
6137 if (v8_flags.stress_marking > 0) {
6139 }
6140 if (IsStressingScavenge()) {
6142 }
6143 }
6144
6145 if (cpp_heap_) {
6147 cpp_heap_ = nullptr;
6148 isolate_->RunReleaseCppHeapCallback(std::move(owning_cpp_heap_));
6149 }
6150
6151 minor_gc_job_.reset();
6152
6157 }
6159
6160 if (IsStressingScavenge()) {
6161 allocator()->new_space_allocator()->RemoveAllocationObserver(
6164 stress_scavenge_observer_ = nullptr;
6165 }
6166
6168 mark_compact_collector_->TearDown();
6170 }
6171
6173 minor_mark_sweep_collector_->TearDown();
6175 }
6176
6177 sweeper_->TearDown();
6178 sweeper_.reset();
6179
6180 scavenger_collector_.reset();
6181 array_buffer_sweeper_.reset();
6182 incremental_marking_.reset();
6183 concurrent_marking_.reset();
6184
6185 memory_measurement_.reset();
6188
6189 if (memory_reducer_ != nullptr) {
6190 memory_reducer_->TearDown();
6191 memory_reducer_.reset();
6192 }
6193
6194 live_object_stats_.reset();
6195 dead_object_stats_.reset();
6196
6197 embedder_roots_handler_ = nullptr;
6198
6199 tracer_.reset();
6200
6202
6203 for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
6204 space_[i].reset();
6205 }
6206
6207 read_only_space_ = nullptr;
6208
6211
6212 StrongRootsEntry* next = nullptr;
6214 current = next) {
6215 next = current->next;
6216 delete current;
6217 }
6218 strong_roots_head_ = nullptr;
6219
6220 memory_allocator_.reset();
6221
6222 heap_profiler_.reset();
6223}
6224
6225// static
6226bool Heap::IsFreeSpaceValid(FreeSpace object) {
6228 Tagged<Object> free_space_map =
6229 heap->isolate()->root(RootIndex::kFreeSpaceMap);
6230 CHECK(!heap->deserialization_complete() ||
6231 object.map_slot().contains_map_value(free_space_map.ptr()));
6232 CHECK_LE(FreeSpace::kNextOffset + kTaggedSize, object.size(kRelaxedLoad));
6233 return true;
6234}
6235
6236void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
6237 GCType gc_type, void* data) {
6239 callback, reinterpret_cast<v8::Isolate*>(isolate()), gc_type, data);
6240}
6241
6242void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
6243 void* data) {
6245}
6246
6247void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
6248 GCType gc_type, void* data) {
6250 callback, reinterpret_cast<v8::Isolate*>(isolate()), gc_type, data);
6251}
6252
6253void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
6254 void* data) {
6256}
6257
6258namespace {
6259Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
6261 AllocationType allocation) {
6262 if (array->length() == 0) {
6263 return array;
6264 }
6265 int new_length = array->CountLiveWeakReferences();
6266 if (new_length == array->length()) {
6267 return array;
6268 }
6269
6271 heap->isolate(),
6272 handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
6273 new_length, allocation);
6274 // Allocation might have caused GC and turned some of the elements into
6275 // cleared weak heap objects. Count the number of live references again and
6276 // fill in the new array.
6277 int copy_to = 0;
6278 for (int i = 0; i < array->length(); i++) {
6279 Tagged<MaybeObject> element = array->Get(i);
6280 if (element.IsCleared()) continue;
6281 new_array->Set(copy_to++, element);
6282 }
6283 new_array->set_length(copy_to);
6284 return new_array;
6285}
6286
6287} // anonymous namespace
6288
6289void Heap::CompactWeakArrayLists() {
6290 // Find known PrototypeUsers and compact them.
6291 std::vector<Handle<PrototypeInfo>> prototype_infos;
6292 {
6293 HeapObjectIterator iterator(this);
6294 for (Tagged<HeapObject> o = iterator.Next(); !o.is_null();
6295 o = iterator.Next()) {
6296 if (IsPrototypeInfo(*o)) {
6297 Tagged<PrototypeInfo> prototype_info = Cast<PrototypeInfo>(o);
6298 if (IsWeakArrayList(prototype_info->prototype_users())) {
6299 prototype_infos.emplace_back(handle(prototype_info, isolate()));
6300 }
6301 }
6302 }
6303 }
6304 for (auto& prototype_info : prototype_infos) {
6306 Cast<WeakArrayList>(prototype_info->prototype_users()), isolate());
6307 DCHECK(InOldSpace(*array) ||
6308 *array == ReadOnlyRoots(this).empty_weak_array_list());
6312 prototype_info->set_prototype_users(new_array);
6313 }
6314
6315 // Find known WeakArrayLists and compact them.
6316 Handle<WeakArrayList> scripts(script_list(), isolate());
6317 DCHECK(InOldSpace(*scripts));
6318 scripts = CompactWeakArrayList(this, scripts, AllocationType::kOld);
6319 set_script_list(*scripts);
6320}
6321
6322void Heap::AddRetainedMaps(DirectHandle<NativeContext> context,
6324 Handle<WeakArrayList> array(Cast<WeakArrayList>(context->retained_maps()),
6325 isolate());
6326 int new_maps_size = static_cast<int>(maps.size()) * kRetainMapEntrySize;
6327 if (array->length() + new_maps_size > array->capacity()) {
6328 CompactRetainedMaps(*array);
6329 }
6330 int cur_length = array->length();
6331 array =
6332 WeakArrayList::EnsureSpace(isolate(), array, cur_length + new_maps_size);
6333 if (*array != context->retained_maps()) {
6334 context->set_retained_maps(*array);
6335 }
6336
6337 {
6339 Tagged<WeakArrayList> raw_array = *array;
6340 for (DirectHandle<Map> map : maps) {
6342
6343 if (map->is_in_retained_map_list()) {
6344 continue;
6345 }
6346
6347 raw_array->Set(cur_length, MakeWeak(*map));
6348 raw_array->Set(cur_length + 1,
6349 Smi::FromInt(v8_flags.retain_maps_for_n_gc));
6350 cur_length += kRetainMapEntrySize;
6351 raw_array->set_length(cur_length);
6352
6353 map->set_is_in_retained_map_list(true);
6354 }
6355 }
6356}
6357
6358void Heap::CompactRetainedMaps(Tagged<WeakArrayList> retained_maps) {
6359 int length = retained_maps->length();
6360 int new_length = 0;
6361 // This loop compacts the array by removing cleared weak cells.
6362 for (int i = 0; i < length; i += kRetainMapEntrySize) {
6363 Tagged<MaybeObject> maybe_object = retained_maps->Get(i);
6364 if (maybe_object.IsCleared()) {
6365 continue;
6366 }
6367
6368 DCHECK(maybe_object.IsWeak());
6369
6370 Tagged<MaybeObject> age = retained_maps->Get(i + 1);
6371 DCHECK(IsSmi(age));
6372 if (i != new_length) {
6373 retained_maps->Set(new_length, maybe_object);
6374 retained_maps->Set(new_length + 1, age);
6375 }
6377 }
6378 Tagged<HeapObject> undefined = ReadOnlyRoots(this).undefined_value();
6379 for (int i = new_length; i < length; i++) {
6380 retained_maps->Set(i, undefined);
6381 }
6382 if (new_length != length) retained_maps->set_length(new_length);
6383}
6384
6385void Heap::FatalProcessOutOfMemory(const char* location) {
6387}
6388
6389#ifdef DEBUG
6390
6391class PrintHandleVisitor : public RootVisitor {
6392 public:
6393 void VisitRootPointers(Root root, const char* description,
6395 for (FullObjectSlot p = start; p < end; ++p)
6396 PrintF(" handle %p to %p\n", p.ToVoidPtr(),
6397 reinterpret_cast<void*>((*p).ptr()));
6398 }
6399};
6400
6401void Heap::PrintHandles() {
6402 PrintF("Handles:\n");
6403 PrintHandleVisitor v;
6404 isolate_->handle_scope_implementer()->Iterate(&v);
6405}
6406
6407#endif
6408
6410 public:
6415 void VisitRootPointers(Root root, const char* description,
6417 handle_count_ += end - start;
6418 }
6419
6420 private:
6421 ptrdiff_t handle_count_;
6422};
6423
6424void Heap::CheckHandleCount() {
6426 isolate_->handle_scope_implementer()->Iterate(&v);
6427}
6428
6429// static
6430int Heap::InsertIntoRememberedSetFromCode(MutablePageMetadata* chunk,
6431 size_t slot_offset) {
6432 // This is called during runtime by a builtin, therefore it is run in the main
6433 // thread.
6434 DCHECK_NULL(LocalHeap::Current());
6436 return 0;
6437}
6438
6439#ifdef DEBUG
6440void Heap::VerifySlotRangeHasNoRecordedSlots(Address start, Address end) {
6441#ifndef V8_DISABLE_WRITE_BARRIERS
6446#endif
6447}
6448#endif
6449
6450void Heap::ClearRecordedSlotRange(Address start, Address end) {
6451#ifndef V8_DISABLE_WRITE_BARRIERS
6453 DCHECK(!chunk->IsLargePage());
6454#if !V8_ENABLE_STICKY_MARK_BITS_BOOL
6455 if (!chunk->InYoungGeneration())
6456#endif
6457 {
6458 PageMetadata* page = PageMetadata::cast(chunk->Metadata());
6459 // This method will be invoked on objects in shared space for
6460 // internalization and string forwarding during GC.
6461 DCHECK(page->owner_identity() == OLD_SPACE ||
6462 page->owner_identity() == TRUSTED_SPACE ||
6463 page->owner_identity() == SHARED_SPACE);
6464
6465 if (!page->SweepingDone()) {
6472 }
6473 }
6474#endif
6475}
6476
6480 PagedSpace* space = heap_->paged_space(counter_++);
6481 if (space) return space;
6482 }
6483 return nullptr;
6484}
6485
6487 public:
6488 virtual ~HeapObjectsFilter() = default;
6489 virtual bool SkipObject(Tagged<HeapObject> object) = 0;
6490};
6491
6493 public:
6497
6498 ~UnreachableObjectsFilter() override = default;
6499
6500 bool SkipObject(Tagged<HeapObject> object) override {
6501 // Space object iterators should skip free space or filler objects.
6502 DCHECK(!IsFreeSpaceOrFiller(object));
6503 // If the bucket corresponding to the object's chunk does not exist, or the
6504 // object is not found in the bucket, return true.
6506 if (reachable_.count(chunk) == 0) return true;
6507 return reachable_[chunk]->count(object) == 0;
6508 }
6509
6510 private:
6511 using BucketType = std::unordered_set<Tagged<HeapObject>, Object::Hasher>;
6512
6514 // If the bucket corresponding to the object's chunk does not exist, then
6515 // create an empty bucket.
6517 if (reachable_.count(chunk) == 0) {
6518 reachable_[chunk] = std::make_unique<BucketType>();
6519 }
6520 // Insert the object if not present; return whether it was indeed inserted.
6521 if (reachable_[chunk]->count(object)) return false;
6522 reachable_[chunk]->insert(object);
6523 return true;
6524 }
6525
6527 public:
6530
6531 void VisitMapPointer(Tagged<HeapObject> object) override {
6533 }
6538
6543
6545 InstructionStreamSlot slot) override {
6546 Tagged<Object> maybe_code = slot.load(code_cage_base());
6547 Tagged<HeapObject> heap_object;
6548 if (maybe_code.GetHeapObject(&heap_object)) {
6549 MarkHeapObject(heap_object);
6550 }
6551 }
6552
6554 RelocInfo* rinfo) final {
6556 InstructionStream::FromTargetAddress(rinfo->target_address());
6557 MarkHeapObject(target);
6558 }
6560 RelocInfo* rinfo) final {
6561 MarkHeapObject(rinfo->target_object(cage_base()));
6562 }
6563
6564 void VisitRootPointers(Root root, const char* description,
6567 }
6568 void VisitRootPointers(Root root, const char* description,
6570 OffHeapObjectSlot end) override {
6572 }
6573
6575 while (!marking_stack_.empty()) {
6577 marking_stack_.pop_back();
6578 VisitObject(filter_->heap_->isolate(), obj, this);
6579 }
6580 }
6581
6582 private:
6583 template <typename TSlot>
6585 // Treat weak references as strong.
6586 for (TSlot p = start; p < end; ++p) {
6587 typename TSlot::TObject object = p.load(cage_base());
6588#ifdef V8_ENABLE_DIRECT_HANDLE
6589 if (object.ptr() == kTaggedNullAddress) continue;
6590#endif
6591 Tagged<HeapObject> heap_object;
6592 if (object.GetHeapObject(&heap_object)) {
6593 MarkHeapObject(heap_object);
6594 }
6595 }
6596 }
6597
6599 if (filter_->MarkAsReachable(heap_object)) {
6600 marking_stack_.push_back(heap_object);
6601 }
6602 }
6603
6605 std::vector<Tagged<HeapObject>> marking_stack_;
6606 };
6607
6608 friend class MarkingVisitor;
6609
6611 MarkingVisitor visitor(this);
6612 heap_->stack().SetMarkerIfNeededAndCallback(
6613 [this, &visitor]() { heap_->IterateRoots(&visitor, {}); });
6614 visitor.TransitiveClosure();
6615 }
6616
6619 std::unordered_map<MemoryChunkMetadata*, std::unique_ptr<BucketType>,
6622};
6623
6631
6633 const SafepointScope& safepoint_scope,
6634 HeapObjectsFiltering filtering)
6635 : HeapObjectIterator(heap, nullptr, filtering) {}
6636
6638 Heap* heap, SafepointScope* safepoint_scope_or_nullptr,
6639 HeapObjectsFiltering filtering)
6640 : heap_(heap),
6641 safepoint_scope_(safepoint_scope_or_nullptr),
6642 space_iterator_(heap_) {
6644 switch (filtering) {
6645 case kFilterUnreachable:
6646 filter_ = std::make_unique<UnreachableObjectsFilter>(heap_);
6647 break;
6648 default:
6649 break;
6650 }
6651 // Start the iteration.
6654}
6655
6657
6659 if (!filter_) return NextObject();
6660
6662 while (!obj.is_null() && filter_->SkipObject(obj)) obj = NextObject();
6663 return obj;
6664}
6665
6667 // No iterator means we are done.
6668 if (!object_iterator_) return Tagged<HeapObject>();
6669
6671 // If the current iterator has more objects we are fine.
6672 if (!obj.is_null()) return obj;
6673 // Go though the spaces looking for one that has objects.
6674 while (space_iterator_.HasNext()) {
6676 obj = object_iterator_->Next();
6677 if (!obj.is_null()) return obj;
6678 }
6679 // Done with the last space.
6680 object_iterator_.reset();
6681 return Tagged<HeapObject>();
6682}
6683
6684void Heap::UpdateTotalGCTime(base::TimeDelta duration) {
6685 total_gc_time_ms_ += duration;
6686}
6687
6689 int last = 0;
6690 Isolate* isolate = heap_->isolate();
6691 for (size_t i = 0; i < young_strings_.size(); ++i) {
6693 if (IsTheHole(o, isolate)) {
6694 continue;
6695 }
6696 // The real external string is already in one of these vectors and was or
6697 // will be processed. Re-processing it will add a duplicate to the vector.
6698 if (IsThinString(o)) continue;
6699 DCHECK(IsExternalString(o));
6701 young_strings_[last++] = o;
6702 } else {
6703 old_strings_.push_back(o);
6704 }
6705 }
6706 young_strings_.resize(last);
6707}
6708
6710 CleanUpYoung();
6711 int last = 0;
6712 Isolate* isolate = heap_->isolate();
6713 for (size_t i = 0; i < old_strings_.size(); ++i) {
6714 Tagged<Object> o = old_strings_[i];
6715 if (IsTheHole(o, isolate)) {
6716 continue;
6717 }
6718 // The real external string is already in one of these vectors and was or
6719 // will be processed. Re-processing it will add a duplicate to the vector.
6720 if (IsThinString(o)) continue;
6721 DCHECK(IsExternalString(o));
6723 old_strings_[last++] = o;
6724 }
6725 old_strings_.resize(last);
6726 if (v8_flags.verify_heap) {
6727 Verify();
6728 }
6729}
6730
6732 for (size_t i = 0; i < young_strings_.size(); ++i) {
6733 Tagged<Object> o = young_strings_[i];
6734 // Dont finalize thin strings.
6735 if (IsThinString(o)) continue;
6736 heap_->FinalizeExternalString(Cast<ExternalString>(o));
6737 }
6738 young_strings_.clear();
6739 for (size_t i = 0; i < old_strings_.size(); ++i) {
6740 Tagged<Object> o = old_strings_[i];
6741 // Dont finalize thin strings.
6742 if (IsThinString(o)) continue;
6743 heap_->FinalizeExternalString(Cast<ExternalString>(o));
6744 }
6745 old_strings_.clear();
6746}
6747
6748void Heap::RememberUnmappedPage(Address page, bool compacted) {
6749 // Tag the page pointer to make it findable in the dump file.
6750 if (compacted) {
6751 page ^= 0xC1EAD & (PageMetadata::kPageSize - 1); // Cleared.
6752 } else {
6753 page ^= 0x1D1ED & (PageMetadata::kPageSize - 1); // I died.
6754 }
6758}
6759
6760size_t Heap::YoungArrayBufferBytes() {
6761 return array_buffer_sweeper()->YoungBytes();
6762}
6763
6764uint64_t Heap::UpdateExternalMemory(int64_t delta) {
6765 uint64_t amount = external_memory_.UpdateAmount(delta);
6766 uint64_t low_since_mark_compact = external_memory_.low_since_mark_compact();
6767 if (amount < low_since_mark_compact) {
6769 }
6770 return amount;
6771}
6772
6773size_t Heap::OldArrayBufferBytes() {
6774 return array_buffer_sweeper()->OldBytes();
6775}
6776
6777StrongRootsEntry* Heap::RegisterStrongRoots(const char* label,
6780 // We're either on the main thread, or in a background thread with an active
6781 // local heap.
6782 DCHECK(isolate()->CurrentLocalHeap()->IsRunning());
6783
6785
6787 entry->start = start;
6788 entry->end = end;
6789 entry->prev = nullptr;
6790 entry->next = strong_roots_head_;
6791
6792 if (strong_roots_head_) {
6794 strong_roots_head_->prev = entry;
6795 }
6796 strong_roots_head_ = entry;
6797
6798 return entry;
6799}
6800
6801void Heap::UpdateStrongRoots(StrongRootsEntry* entry, FullObjectSlot start,
6803 entry->start = start;
6804 entry->end = end;
6805}
6806
6807void Heap::UnregisterStrongRoots(StrongRootsEntry* entry) {
6808 // We're either on the main thread, or in a background thread with an active
6809 // local heap.
6810 DCHECK(isolate()->CurrentLocalHeap()->IsRunning());
6811
6813
6814 StrongRootsEntry* prev = entry->prev;
6815 StrongRootsEntry* next = entry->next;
6816
6817 if (prev) prev->next = next;
6818 if (next) next->prev = prev;
6819
6820 if (strong_roots_head_ == entry) {
6821 DCHECK_NULL(prev);
6822 strong_roots_head_ = next;
6823 }
6824
6825 delete entry;
6826}
6827
6828void Heap::SetBuiltinsConstantsTable(Tagged<FixedArray> cache) {
6829 set_builtins_constants_table(cache);
6830}
6831
6832void Heap::SetDetachedContexts(Tagged<WeakArrayList> detached_contexts) {
6833 set_detached_contexts(detached_contexts);
6834}
6835
6836bool Heap::HasDirtyJSFinalizationRegistries() {
6837 return !IsUndefined(dirty_js_finalization_registries_list(), isolate());
6838}
6839
6840void Heap::PostFinalizationRegistryCleanupTaskIfNeeded() {
6841 // Only one cleanup task is posted at a time.
6844 return;
6845 }
6846 auto task = std::make_unique<FinalizationRegistryCleanupTask>(this);
6847 task_runner_->PostNonNestableTask(std::move(task));
6849}
6850
6851void Heap::EnqueueDirtyJSFinalizationRegistry(
6852 Tagged<JSFinalizationRegistry> finalization_registry,
6853 std::function<void(Tagged<HeapObject> object, ObjectSlot slot,
6854 Tagged<Object> target)>
6855 gc_notify_updated_slot) {
6856 // Add a FinalizationRegistry to the tail of the dirty list.
6858 IsJSFinalizationRegistry(dirty_js_finalization_registries_list()));
6859 DCHECK(IsUndefined(finalization_registry->next_dirty(), isolate()));
6860 DCHECK(!finalization_registry->scheduled_for_cleanup());
6861 finalization_registry->set_scheduled_for_cleanup(true);
6864 set_dirty_js_finalization_registries_list(finalization_registry);
6865 // dirty_js_finalization_registries_list_ is rescanned by
6866 // ProcessWeakListRoots.
6867 } else {
6870 tail->set_next_dirty(finalization_registry);
6871 gc_notify_updated_slot(
6872 tail, tail->RawField(JSFinalizationRegistry::kNextDirtyOffset),
6873 finalization_registry);
6874 }
6876 // dirty_js_finalization_registries_list_tail_ is rescanned by
6877 // ProcessWeakListRoots.
6878}
6879
6881Heap::DequeueDirtyJSFinalizationRegistry() {
6882 // Take a FinalizationRegistry from the head of the dirty list for fairness.
6886 isolate());
6888 head->set_next_dirty(ReadOnlyRoots(this).undefined_value());
6891 ReadOnlyRoots(this).undefined_value());
6892 }
6893 return head;
6894 }
6895 return {};
6896}
6897
6898void Heap::RemoveDirtyFinalizationRegistriesOnContext(
6899 Tagged<NativeContext> context) {
6901
6902 Isolate* isolate = this->isolate();
6903 Tagged<Object> prev = ReadOnlyRoots(isolate).undefined_value();
6905 while (!IsUndefined(current, isolate)) {
6906 Tagged<JSFinalizationRegistry> finalization_registry =
6908 if (finalization_registry->native_context() == context) {
6909 if (IsUndefined(prev, isolate)) {
6911 finalization_registry->next_dirty());
6912 } else {
6913 Cast<JSFinalizationRegistry>(prev)->set_next_dirty(
6914 finalization_registry->next_dirty());
6915 }
6916 finalization_registry->set_scheduled_for_cleanup(false);
6917 current = finalization_registry->next_dirty();
6918 finalization_registry->set_next_dirty(
6919 ReadOnlyRoots(isolate).undefined_value());
6920 } else {
6921 prev = current;
6922 current = finalization_registry->next_dirty();
6923 }
6924 }
6926}
6927
6928void Heap::KeepDuringJob(DirectHandle<HeapObject> target) {
6929 DCHECK(IsUndefined(weak_refs_keep_during_job()) ||
6930 IsOrderedHashSet(weak_refs_keep_during_job()));
6932 if (IsUndefined(weak_refs_keep_during_job(), isolate())) {
6933 table = isolate()->factory()->NewOrderedHashSet();
6934 } else {
6935 table =
6936 handle(Cast<OrderedHashSet>(weak_refs_keep_during_job()), isolate());
6937 }
6938 MaybeHandle<OrderedHashSet> maybe_table =
6939 OrderedHashSet::Add(isolate(), table, target);
6940 if (!maybe_table.ToHandle(&table)) {
6941 FATAL(
6942 "Fatal JavaScript error: Too many distinct WeakRef objects "
6943 "created or dereferenced during single event loop turn.");
6944 }
6945 set_weak_refs_keep_during_job(*table);
6946}
6947
6948void Heap::ClearKeptObjects() {
6949 set_weak_refs_keep_during_job(ReadOnlyRoots(isolate()).undefined_value());
6950}
6951
6952size_t Heap::NumberOfTrackedHeapObjectTypes() {
6954}
6955
6956size_t Heap::ObjectCountAtLastGC(size_t index) {
6957 if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
6958 return 0;
6959 return live_object_stats_->object_count_last_gc(index);
6960}
6961
6962size_t Heap::ObjectSizeAtLastGC(size_t index) {
6963 if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
6964 return 0;
6965 return live_object_stats_->object_size_last_gc(index);
6966}
6967
6968bool Heap::GetObjectTypeName(size_t index, const char** object_type,
6969 const char** object_sub_type) {
6970 if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
6971
6972 switch (static_cast<int>(index)) {
6973#define COMPARE_AND_RETURN_NAME(name) \
6974 case name: \
6975 *object_type = #name; \
6976 *object_sub_type = ""; \
6977 return true;
6979#undef COMPARE_AND_RETURN_NAME
6980
6981#define COMPARE_AND_RETURN_NAME(name) \
6982 case ObjectStats::FIRST_VIRTUAL_TYPE + \
6983 static_cast<int>(ObjectStats::VirtualInstanceType::name): \
6984 *object_type = #name; \
6985 *object_sub_type = ""; \
6986 return true;
6988#undef COMPARE_AND_RETURN_NAME
6989 }
6990 return false;
6991}
6992
6993size_t Heap::NumberOfNativeContexts() {
6994 int result = 0;
6996 while (!IsUndefined(context, isolate())) {
6997 ++result;
6999 context = native_context->next_context_link();
7000 }
7001 return result;
7002}
7003
7004std::vector<Handle<NativeContext>> Heap::FindAllNativeContexts() {
7005 std::vector<Handle<NativeContext>> result;
7007 while (!IsUndefined(context, isolate())) {
7009 result.push_back(handle(native_context, isolate()));
7010 context = native_context->next_context_link();
7011 }
7012 return result;
7013}
7014
7015std::vector<Tagged<WeakArrayList>> Heap::FindAllRetainedMaps() {
7016 std::vector<Tagged<WeakArrayList>> result;
7018 while (!IsUndefined(context, isolate())) {
7020 result.push_back(Cast<WeakArrayList>(native_context->retained_maps()));
7021 context = native_context->next_context_link();
7022 }
7023 return result;
7024}
7025
7026size_t Heap::NumberOfDetachedContexts() {
7027 // The detached_contexts() array has two entries per detached context.
7028 return detached_contexts()->length() / 2;
7029}
7030
7031bool Heap::AllowedToBeMigrated(Tagged<Map> map, Tagged<HeapObject> object,
7032 AllocationSpace dst) {
7033 // Object migration is governed by the following rules:
7034 //
7035 // 1) Objects in new-space can be migrated to the old space
7036 // that matches their target space or they stay in new-space.
7037 // 2) Objects in old-space stay in the same space when migrating.
7038 // 3) Fillers (two or more words) can migrate due to left-trimming of
7039 // fixed arrays in new-space or old space.
7040 // 4) Fillers (one word) can never migrate, they are skipped by
7041 // incremental marking explicitly to prevent invalid pattern.
7042 //
7043 // Since this function is used for debugging only, we do not place
7044 // asserts here, but check everything explicitly.
7045 if (map == ReadOnlyRoots(this).one_pointer_filler_map()) {
7046 return false;
7047 }
7048 InstanceType type = map->instance_type();
7050 AllocationSpace src = chunk->owner_identity();
7051 switch (src) {
7052 case NEW_SPACE:
7053 return dst == NEW_SPACE || dst == OLD_SPACE;
7054 case OLD_SPACE:
7055 return dst == OLD_SPACE;
7056 case CODE_SPACE:
7057 return dst == CODE_SPACE && type == INSTRUCTION_STREAM_TYPE;
7058 case SHARED_SPACE:
7059 return dst == SHARED_SPACE;
7060 case TRUSTED_SPACE:
7061 return dst == TRUSTED_SPACE;
7063 return dst == SHARED_TRUSTED_SPACE;
7064 case LO_SPACE:
7065 case CODE_LO_SPACE:
7066 case NEW_LO_SPACE:
7067 case SHARED_LO_SPACE:
7068 case TRUSTED_LO_SPACE:
7070 case RO_SPACE:
7071 return false;
7072 }
7073 UNREACHABLE();
7074}
7075
7076size_t Heap::EmbedderAllocationCounter() const {
7078}
7079
7080void Heap::CreateObjectStats() {
7082 if (!live_object_stats_) {
7083 live_object_stats_.reset(new ObjectStats(this));
7084 }
7085 if (!dead_object_stats_) {
7086 dead_object_stats_.reset(new ObjectStats(this));
7087 }
7088}
7089
7090Tagged<Map> Heap::GcSafeMapOfHeapObject(Tagged<HeapObject> object) {
7091 PtrComprCageBase cage_base(isolate());
7092 MapWord map_word = object->map_word(cage_base, kRelaxedLoad);
7093 if (map_word.IsForwardingAddress()) {
7094 return map_word.ToForwardingAddress(object)->map(cage_base);
7095 }
7096 return map_word.ToMap();
7097}
7098
7099Tagged<GcSafeCode> Heap::GcSafeGetCodeFromInstructionStream(
7100 Tagged<HeapObject> instruction_stream, Address inner_pointer) {
7102 UncheckedCast<InstructionStream>(instruction_stream);
7103 DCHECK(!istream.is_null());
7104 DCHECK(GcSafeInstructionStreamContains(istream, inner_pointer));
7105 return UncheckedCast<GcSafeCode>(istream->raw_code(kAcquireLoad));
7106}
7107
7108bool Heap::GcSafeInstructionStreamContains(
7109 Tagged<InstructionStream> instruction_stream, Address addr) {
7110 Tagged<Map> map = GcSafeMapOfHeapObject(instruction_stream);
7111 DCHECK_EQ(map, ReadOnlyRoots(this).instruction_stream_map());
7112
7113 Builtin builtin_lookup_result =
7115 if (Builtins::IsBuiltinId(builtin_lookup_result)) {
7116 // Builtins don't have InstructionStream objects.
7118 instruction_stream->code(kAcquireLoad)->builtin_id()));
7119 return false;
7120 }
7121
7122 Address start = instruction_stream.address();
7123 Address end = start + instruction_stream->SizeFromMap(map);
7124 return start <= addr && addr < end;
7125}
7126
7127std::optional<Tagged<InstructionStream>>
7128Heap::GcSafeTryFindInstructionStreamForInnerPointer(Address inner_pointer) {
7129 std::optional<Address> start =
7131 if (start.has_value()) {
7133 }
7134
7135 return {};
7136}
7137
7138std::optional<Tagged<GcSafeCode>> Heap::GcSafeTryFindCodeForInnerPointer(
7139 Address inner_pointer) {
7140 Builtin maybe_builtin =
7142 if (Builtins::IsBuiltinId(maybe_builtin)) {
7143 return Cast<GcSafeCode>(isolate()->builtins()->code(maybe_builtin));
7144 }
7145
7146 std::optional<Tagged<InstructionStream>> maybe_istream =
7148 if (!maybe_istream) return {};
7149
7150 return GcSafeGetCodeFromInstructionStream(*maybe_istream, inner_pointer);
7151}
7152
7153Tagged<Code> Heap::FindCodeForInnerPointer(Address inner_pointer) {
7154 return GcSafeFindCodeForInnerPointer(inner_pointer)->UnsafeCastToCode();
7155}
7156
7157Tagged<GcSafeCode> Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
7158 std::optional<Tagged<GcSafeCode>> maybe_code =
7159 GcSafeTryFindCodeForInnerPointer(inner_pointer);
7160 // Callers expect that the code object is found.
7161 CHECK(maybe_code.has_value());
7162 return UncheckedCast<GcSafeCode>(maybe_code.value());
7163}
7164
7165std::optional<Tagged<Code>> Heap::TryFindCodeForInnerPointerForPrinting(
7166 Address inner_pointer) {
7167 if (InSpaceSlow(inner_pointer, i::CODE_SPACE) ||
7168 InSpaceSlow(inner_pointer, i::CODE_LO_SPACE) ||
7169 i::OffHeapInstructionStream::PcIsOffHeap(isolate(), inner_pointer)) {
7170 std::optional<Tagged<GcSafeCode>> maybe_code =
7171 GcSafeTryFindCodeForInnerPointer(inner_pointer);
7172 if (maybe_code.has_value()) {
7173 return maybe_code.value()->UnsafeCastToCode();
7174 }
7175 }
7176 return {};
7177}
7178
7179#ifdef DEBUG
7180void Heap::IncrementObjectCounters() {
7181 isolate_->counters()->objs_since_last_full()->Increment();
7182 isolate_->counters()->objs_since_last_young()->Increment();
7183}
7184#endif // DEBUG
7185
7186bool Heap::IsStressingScavenge() {
7187 return v8_flags.stress_scavenge > 0 && new_space();
7188}
7189
7190void Heap::SetIsMarkingFlag(bool value) {
7191 isolate()->isolate_data()->is_marking_flag_ = value;
7192}
7193
7194uint8_t* Heap::IsMarkingFlagAddress() {
7195 return &isolate()->isolate_data()->is_marking_flag_;
7196}
7197
7198void Heap::SetIsMinorMarkingFlag(bool value) {
7199 isolate()->isolate_data()->is_minor_marking_flag_ = value;
7200}
7201
7202uint8_t* Heap::IsMinorMarkingFlagAddress() {
7203 return &isolate()->isolate_data()->is_minor_marking_flag_;
7204}
7205
7214
7215// StrongRootBlocks are allocated as a block of addresses, prefixed with a
7216// StrongRootsEntry pointer:
7217//
7218// | StrongRootsEntry*
7219// | Address 1
7220// | ...
7221// | Address N
7222//
7223// The allocate method registers the range "Address 1" to "Address N" with the
7224// heap as a strong root array, saves that entry in StrongRootsEntry*, and
7225// returns a pointer to Address 1.
7227 void* block = base::Malloc(sizeof(StrongRootsEntry*) + n * sizeof(Address));
7228
7229 StrongRootsEntry** header = reinterpret_cast<StrongRootsEntry**>(block);
7230 Address* ret = reinterpret_cast<Address*>(reinterpret_cast<char*>(block) +
7231 sizeof(StrongRootsEntry*));
7232
7233 memset(ret, kNullAddress, n * sizeof(Address));
7234 *header = heap()->RegisterStrongRoots(
7235 "StrongRootAllocator", FullObjectSlot(ret), FullObjectSlot(ret + n));
7236
7237 return ret;
7238}
7239
7241 // The allocate method returns a pointer to Address 1, so the deallocate
7242 // method has to offset that pointer back by sizeof(StrongRootsEntry*).
7243 void* block = reinterpret_cast<char*>(p) - sizeof(StrongRootsEntry*);
7244 StrongRootsEntry** header = reinterpret_cast<StrongRootsEntry**>(block);
7245
7246 heap()->UnregisterStrongRoots(*header);
7247
7248 base::Free(block);
7249}
7250
7251#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
7252void Heap::set_allocation_timeout(int allocation_timeout) {
7253 heap_allocator_->SetAllocationTimeout(allocation_timeout);
7254}
7255#endif // V8_ENABLE_ALLOCATION_TIMEOUT
7256
7257void Heap::FinishSweepingIfOutOfWork() {
7259 sweeper()->UsingMajorSweeperTasks() &&
7260 !sweeper()->AreMajorSweeperTasksRunning()) {
7261 // At this point we know that all concurrent sweeping tasks have run
7262 // out of work and quit: all pages are swept. The main thread still needs
7263 // to complete sweeping though.
7265 !sweeper()->HasUnsweptPagesForMajorSweeping());
7266 EnsureSweepingCompleted(SweepingForcedFinalizationMode::kV8Only);
7267 }
7268 if (cpp_heap()) {
7269 // Ensure that sweeping is also completed for the C++ managed heap, if one
7270 // exists and it's out of work.
7272 }
7273}
7274
7275void Heap::EnsureSweepingCompleted(SweepingForcedFinalizationMode mode) {
7276 CompleteArrayBufferSweeping(this);
7277
7279
7280 if (sweeper()->sweeping_in_progress()) {
7281 bool was_minor_sweeping_in_progress = minor_sweeping_in_progress();
7282 bool was_major_sweeping_in_progress = major_sweeping_in_progress();
7284
7285 if (was_major_sweeping_in_progress) {
7286 TRACE_GC_EPOCH_WITH_FLOW(tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
7288 sweeper_->GetTraceIdForFlowEvent(
7289 GCTracer::Scope::MC_COMPLETE_SWEEPING),
7293 if (shared_space()) {
7296 }
7297
7299 } else if (v8_flags.sticky_mark_bits) {
7300 // With sticky markbits there is no separate young gen. Minor sweeping
7301 // will thus sweep pages in old space, so old space freelist should be
7302 // refilled.
7303 DCHECK(was_minor_sweeping_in_progress);
7305 }
7306
7307 if (!v8_flags.sticky_mark_bits && v8_flags.minor_ms && use_new_space() &&
7308 was_minor_sweeping_in_progress) {
7310 tracer(), GCTracer::Scope::MINOR_MS_COMPLETE_SWEEPING,
7312 sweeper_->GetTraceIdForFlowEvent(
7313 GCTracer::Scope::MINOR_MS_COMPLETE_SWEEPING),
7316 }
7317
7319
7320#ifdef VERIFY_HEAP
7321 if (v8_flags.verify_heap) {
7322 EvacuationVerifier verifier(this);
7323 verifier.Run();
7324 }
7325#endif
7326 }
7327
7328 if (mode == SweepingForcedFinalizationMode::kUnifiedHeap && cpp_heap()) {
7329 // Ensure that sweeping is also completed for the C++ managed heap, if one
7330 // exists.
7332 DCHECK(!CppHeap::From(cpp_heap())->sweeper().IsSweepingInProgress());
7333 }
7334
7336 mode == SweepingForcedFinalizationMode::kUnifiedHeap || !cpp_heap(),
7337 !tracer()->IsSweepingInProgress());
7338
7339 if (v8_flags.external_memory_accounted_in_global_limit) {
7340 if (!using_initial_limit()) {
7341 auto new_limits = ComputeNewAllocationLimits(this);
7343 new_limits.old_generation_allocation_limit,
7344 new_limits.global_allocation_limit);
7345 }
7346 }
7347}
7348
7349void Heap::EnsureQuarantinedPagesSweepingCompleted() {
7350 if (v8_flags.minor_ms) {
7351 return;
7352 }
7353 scavenger_collector_->CompleteSweepingQuarantinedPagesIfNeeded();
7354}
7355
7356void Heap::EnsureYoungSweepingCompleted() {
7357 CompleteArrayBufferSweeping(this);
7358
7360
7361 if (!sweeper()->minor_sweeping_in_progress()) return;
7362 DCHECK(!v8_flags.sticky_mark_bits);
7363
7365 tracer(), GCTracer::Scope::MINOR_MS_COMPLETE_SWEEPING, ThreadKind::kMain,
7366 sweeper_->GetTraceIdForFlowEvent(
7367 GCTracer::Scope::MINOR_MS_COMPLETE_SWEEPING),
7369
7372
7374}
7375
7376void Heap::NotifyLoadingStarted() {
7378 double now_ms = MonotonicallyIncreasingTimeInMs();
7380 load_start_time_ms_.store(now_ms, std::memory_order_relaxed);
7381}
7382
7383void Heap::NotifyLoadingEnded() {
7384 load_start_time_ms_.store(kLoadTimeNotLoading, std::memory_order_relaxed);
7386 if (auto* job = incremental_marking()->incremental_marking_job()) {
7387 // The task will start incremental marking (if needed not already started)
7388 // and advance marking if incremental marking is active.
7389 job->ScheduleTask(TaskPriority::kUserVisible);
7390 }
7391}
7392
7393int Heap::NextScriptId() {
7394 FullObjectSlot last_script_id_slot(&roots_table()[RootIndex::kLastScriptId]);
7395 Tagged<Smi> last_id = Cast<Smi>(last_script_id_slot.Relaxed_Load());
7396 Tagged<Smi> new_id, last_id_before_cas;
7397 do {
7398 if (last_id.value() == Smi::kMaxValue) {
7399 static_assert(v8::UnboundScript::kNoScriptId == 0);
7400 new_id = Smi::FromInt(1);
7401 } else {
7402 new_id = Smi::FromInt(last_id.value() + 1);
7403 }
7404
7405 // CAS returns the old value on success, and the current value in the slot
7406 // on failure. Therefore, we want to break if the returned value matches the
7407 // old value (last_id), and keep looping (with the new last_id value) if it
7408 // doesn't.
7409 last_id_before_cas = last_id;
7410 last_id =
7411 Cast<Smi>(last_script_id_slot.Relaxed_CompareAndSwap(last_id, new_id));
7412 } while (last_id != last_id_before_cas);
7413
7414 return new_id.value();
7415}
7416
7417int Heap::NextDebuggingId() {
7418 int last_id = last_debugging_id().value();
7419 if (last_id == DebugInfo::DebuggingIdBits::kMax) {
7420 last_id = DebugInfo::kNoDebuggingId;
7421 }
7422 last_id++;
7423 set_last_debugging_id(Smi::FromInt(last_id));
7424 return last_id;
7425}
7426
7427int Heap::NextStackTraceId() {
7428 int last_id = last_stack_trace_id().value();
7429 if (last_id == Smi::kMaxValue) {
7430 last_id = 0;
7431 }
7432 last_id++;
7433 set_last_stack_trace_id(Smi::FromInt(last_id));
7434 return last_id;
7435}
7436
7438 Heap* heap, EmbedderStackStateOrigin origin, StackState stack_state)
7439 : heap_(heap),
7440 old_stack_state_(heap_->embedder_stack_state_),
7441 old_origin_(heap->embedder_stack_state_origin_) {
7442 // Explicit scopes take precedence over implicit scopes.
7446 heap_->embedder_stack_state_ = stack_state;
7447 heap_->embedder_stack_state_origin_ = origin;
7448 }
7449}
7450
7455
7458 : scope_(std::make_unique<cppgc::internal::ClassNameAsHeapObjectNameScope>(
7459 *CppHeap::From(heap))) {}
7460
7462 default;
7463
7464#if V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT || \
7465 V8_HEAP_USE_PKU_JIT_WRITE_PROTECT || V8_HEAP_USE_BECORE_JIT_WRITE_PROTECT
7466
7469 VirtualMemory* reservation,
7470 base::AddressRegion region)
7471 : rwx_write_scope_("Write access for zapping.") {
7472#if !defined(DEBUG) && !defined(VERIFY_HEAP) && !defined(USE_SIMULATOR)
7473 UNREACHABLE();
7474#endif
7475}
7476
7478 CodePageMemoryModificationScopeForDebugging(MemoryChunkMetadata* chunk)
7479 : rwx_write_scope_("Write access for zapping.") {
7480#if !defined(DEBUG) && !defined(VERIFY_HEAP) && !defined(USE_SIMULATOR)
7481 UNREACHABLE();
7482#endif
7483}
7484
7487
7488#else // V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT ||
7489 // V8_HEAP_USE_PKU_JIT_WRITE_PROTECT ||
7490 // V8_HEAP_USE_BECORE_JIT_WRITE_PROTECT
7491
7494 VirtualMemory* reservation,
7495 base::AddressRegion region) {
7496#if !defined(DEBUG) && !defined(VERIFY_HEAP) && !defined(USE_SIMULATOR)
7497 UNREACHABLE();
7498#endif
7499}
7500
7503#if !defined(DEBUG) && !defined(VERIFY_HEAP) && !defined(USE_SIMULATOR)
7504 UNREACHABLE();
7505#endif
7506}
7507
7510
7511#endif
7512
7514
7515} // namespace internal
7516} // namespace v8
Isolate * isolate_
#define DISALLOW_GARBAGE_COLLECTION(name)
constexpr int kPageSizeBits
bool generational_gc_supported() const
Definition heap-base.h:218
static void EnforcePredictableOrder()
Definition worklist.cc:13
static Isolate * TryGetCurrent()
Definition api.cc:9954
void(*)(Isolate *isolate, GCType type, GCCallbackFlags flags, void *data) GCCallbackWithData
void PostDelayedTaskOnWorkerThread(TaskPriority priority, std::unique_ptr< Task > task, double delay_in_seconds, const SourceLocation &location=SourceLocation::Current())
virtual double MonotonicallyIncreasingTime()=0
std::shared_ptr< v8::TaskRunner > GetForegroundTaskRunner(Isolate *isolate)
size_t max_old_generation_size_in_bytes() const
Definition v8-isolate.h:124
size_t code_range_size_in_bytes() const
Definition v8-isolate.h:114
size_t initial_young_generation_size_in_bytes() const
Definition v8-isolate.h:150
size_t max_young_generation_size_in_bytes() const
Definition v8-isolate.h:136
size_t initial_old_generation_size_in_bytes() const
Definition v8-isolate.h:143
void PostTask(std::unique_ptr< Task > task, const SourceLocation &location=SourceLocation::Current())
Definition v8-platform.h:82
void PostNonNestableTask(std::unique_ptr< Task > task, const SourceLocation &location=SourceLocation::Current())
static const int kNoScriptId
Definition v8-script.h:91
static int GetLastError()
T * insert(T *pos, const T &value)
static constexpr int64_t kMillisecondsPerSecond
Definition time.h:45
double InMillisecondsF() const
Definition time.cc:226
static TimeTicks Now()
Definition time.cc:736
constexpr T * begin() const
Definition vector.h:96
constexpr T * end() const
Definition vector.h:103
ActivateMemoryReducerTask & operator=(const ActivateMemoryReducerTask &)=delete
ActivateMemoryReducerTask(const ActivateMemoryReducerTask &)=delete
~ActivateMemoryReducerTask() override=default
uint64_t GetTraceIdForFlowEvent(GCTracer::Scope::ScopeId scope_id) const
AllocationSpace identity() const
Definition base-space.h:32
virtual size_t CommittedMemory() const
Definition base-space.h:36
static constexpr Builtin kFirst
Definition builtins.h:112
static constexpr bool AllBuiltinsAreIsolateIndependent()
Definition builtins.h:263
static constexpr bool IsBuiltinId(Builtin builtin)
Definition builtins.h:128
static constexpr Builtin kLast
Definition builtins.h:113
static constexpr bool kCodeObjectsAreInROSpace
Definition builtins.h:98
static constexpr Builtin kLastTier0
Definition builtins.h:114
static V8_EXPORT_PRIVATE const char * name(Builtin builtin)
Definition builtins.cc:226
void VisitRootPointers(Root root, const char *description, FullObjectSlot start, FullObjectSlot end) override
Definition heap.cc:6415
void VisitRootPointer(Root root, const char *description, FullObjectSlot p) override
Definition heap.cc:4635
void ClearLeftTrimmedOrForward(Root root, const char *description, FullObjectSlot p)
Definition heap.cc:4580
void VisitRunningCode(FullObjectSlot code_slot, FullObjectSlot istream_or_smi_zero_slot) override
Definition heap.cc:4648
void VisitRootPointers(Root root, const char *description, FullObjectSlot start, FullObjectSlot end) override
Definition heap.cc:4640
ClearStaleLeftTrimmedPointerVisitor(Heap *heap, RootVisitor *visitor)
Definition heap.cc:4623
CodePageMemoryModificationScopeForDebugging(Heap *heap, VirtualMemory *reservation, base::AddressRegion region)
Definition heap.cc:7493
static size_t GetWritableReservedAreaSize()
Definition code-range.cc:63
static void ResetCodeAndMetadataStatistics(Isolate *isolate)
Definition code-stats.cc:51
static void CollectCodeStatistics(PagedSpace *space, Isolate *isolate)
Definition code-stats.cc:63
CppClassNamesAsHeapObjectNameScope(v8::CppHeap *heap)
Definition heap.cc:7456
static CppHeap * From(v8::CppHeap *heap)
Definition cpp-heap.h:102
size_t allocated_size() const
Definition cpp-heap.h:172
void FinishSweepingIfOutOfWork()
Definition cpp-heap.cc:1255
void FinishAtomicSweepingIfRunning()
Definition cpp-heap.cc:1246
void AttachIsolate(Isolate *isolate)
Definition cpp-heap.cc:601
size_t used_size() const
Definition cpp-heap.h:169
static const int kNoDebuggingId
static void DeoptimizeMarkedCode(Isolate *isolate)
static bool MarkCodeForDeoptimization(Isolate *isolate, Tagged< ObjectT > object, DependencyGroups groups)
std::optional< EmbedderStackStateOrigin > old_origin_
Definition heap.h:2703
EmbedderStackStateScope(Heap *heap, EmbedderStackStateOrigin origin, StackState stack_state)
Definition heap.cc:7437
const StackState old_stack_state_
Definition heap.h:2702
Isolate * isolate() const
Definition factory.h:1281
static void SetSize(const WritableFreeSpace &writable_free_space, int size, RelaxedStoreTag)
Tagged< Object > load() const
Definition slots-inl.h:48
Tagged< Object > Relaxed_Load() const
Definition slots-inl.h:82
void store(Tagged< Object > value) const
Definition slots-inl.h:54
Tagged< Object > Relaxed_CompareAndSwap(Tagged< Object > old, Tagged< Object > target) const
Definition slots-inl.h:108
void Remove(CallbackType callback, void *data)
void Invoke(GCType gc_type, GCCallbackFlags gc_callback_flags) const
void Add(CallbackType callback, v8::Isolate *isolate, GCType gc_type, void *data)
TimedHistogram * type_timer() const
Definition gc-tracer.h:266
TimedHistogram * type_priority_timer() const
Definition gc-tracer.h:267
void AddSurvivalRatio(double survival_ratio)
Definition gc-tracer.cc:733
void StopInSafepoint(base::TimeTicks time)
Definition gc-tracer.cc:322
void RecordGCSizeCounters() const
void UpdateCurrentEvent(GarbageCollectionReason gc_reason, const char *collector_reason)
Definition gc-tracer.cc:211
void StopObservablePause(GarbageCollector collector, base::TimeTicks time)
Definition gc-tracer.cc:338
void StartInSafepoint(base::TimeTicks time)
Definition gc-tracer.cc:307
void StartObservablePause(base::TimeTicks time)
Definition gc-tracer.cc:206
void RecordGCPhasesHistograms(RecordGCPhasesInfo::Mode mode)
void NotifyYoungSweepingCompletedAndStopCycleIfFinished()
Definition gc-tracer.cc:589
void StartCycle(GarbageCollector collector, GarbageCollectionReason gc_reason, const char *collector_reason, MarkingType marking)
Definition gc-tracer.cc:228
void NotifyFullSweepingCompletedAndStopCycleIfFinished()
Definition gc-tracer.cc:537
GarbageCollector GetCurrentCollector() const
static const int kCheckHandleThreshold
Definition handles.h:314
void SetReadOnlySpace(ReadOnlySpace *)
static void GCEpiloguePrintHeapLayout(v8::Isolate *isolate, v8::GCType gc_type, v8::GCCallbackFlags flags, void *data)
static void GCProloguePrintHeapLayout(v8::Isolate *isolate, v8::GCType gc_type, v8::GCCallbackFlags flags, void *data)
static V8_INLINE bool InYoungGeneration(Tagged< Object > object)
static V8_INLINE bool InAnySharedSpace(Tagged< HeapObject > object)
std::unique_ptr< ObjectIterator > object_iterator_
Definition heap.h:2647
Tagged< HeapObject > NextObject()
Definition heap.cc:6666
SpaceIterator space_iterator_
Definition heap.h:2645
std::unique_ptr< HeapObjectsFilter > filter_
Definition heap.h:2643
HeapObjectIterator(Heap *heap, HeapObjectsFiltering filtering=kNoFiltering)
Definition heap.cc:6624
Tagged< HeapObject > Next()
Definition heap.cc:6658
static Tagged< HeapObject > FromAddress(Address address)
static void SetFillerMap(const WritableFreeSpace &writable_page, Tagged< Map > value)
virtual bool SkipObject(Tagged< HeapObject > object)=0
virtual ~HeapObjectsFilter()=default
void WriteSnapshotToDiskAfterGC(HeapSnapshotMode snapshot_mode=HeapSnapshotMode::kRegular)
void ObjectMoveEvent(Address from, Address to, int size, bool is_native_object)
static const int kStartMarker
Definition heap.h:2492
static const int kEndMarker
Definition heap.h:2493
static V8_INLINE Heap * GetOwnerHeap(Tagged< HeapObject > object)
static V8_INLINE void VerifyHeapIfEnabled(Heap *heap)
void UpdateObjectSizeEvent(Address, int) final
Definition heap.cc:876
void MoveEvent(Address source, Address target, int size) final
Definition heap.cc:858
void UpdateAllocationsHash(Tagged< HeapObject > object)
Definition heap.cc:879
void AllocationEvent(Address addr, int size) final
Definition heap.cc:836
DevToolsTraceEventScope(Heap *heap, const char *event_name, const char *event_type)
Definition heap.cc:1479
uint64_t UpdateAmount(int64_t delta)
Definition heap.h:234
void UpdateLowSinceMarkCompact(uint64_t amount)
Definition heap.h:245
void UpdateLimitForInterrupt(uint64_t amount)
Definition heap.h:241
bool Contains(Tagged< String > string)
Definition heap.cc:2685
void UpdateReferences(Heap::ExternalStringTableUpdaterCallback updater_func)
Definition heap.cc:2868
void IterateYoung(RootVisitor *v)
Definition heap.cc:2844
void UpdateYoungReferences(Heap::ExternalStringTableUpdaterCallback updater_func)
Definition heap.cc:2805
std::vector< TaggedBase > old_strings_
Definition heap.h:1699
std::vector< TaggedBase > young_strings_
Definition heap.h:1698
void IterateAll(RootVisitor *v)
Definition heap.cc:2853
V8_EXPORT_PRIVATE size_t MaxReserved() const
Definition heap.cc:205
GarbageCollector SelectGarbageCollector(AllocationSpace space, GarbageCollectionReason gc_reason, const char **reason) const
Definition heap.cc:475
std::vector< Isolate * > PauseConcurrentThreadsInClients(GarbageCollector collector)
Definition heap.cc:2323
size_t embedder_size_at_last_gc_
Definition heap.h:2336
ResizeNewSpaceMode resize_new_space_mode_
Definition heap.h:2397
ExternalStringTable external_string_table_
Definition heap.h:2364
static constexpr double kLoadTimeNotLoading
Definition heap.h:2402
std::atomic< size_t > global_allocation_limit_
Definition heap.h:2228
V8_EXPORT_PRIVATE void FreeLinearAllocationAreas()
Definition heap.cc:3535
Address remembered_unmapped_pages_[kRememberedUnmappedPages]
Definition heap.h:2221
void CompactRetainedMaps(Tagged< WeakArrayList > retained_maps)
Definition heap.cc:6358
static V8_EXPORT_PRIVATE size_t AllocatorLimitOnMaxOldGenerationSize()
Definition heap.cc:280
SharedLargeObjectSpace * shared_lo_space() const
Definition heap.h:736
static const int kHeapLimitMultiplier
Definition heap.h:303
int nodes_copied_in_new_space_
Definition heap.h:2255
size_t old_generation_size_at_last_gc_
Definition heap.h:2330
void CompleteSweepingYoung()
Definition heap.cc:2397
std::unique_ptr< ObjectStats > live_object_stats_
Definition heap.h:2276
bool HasDirtyJSFinalizationRegistries()
Definition heap.cc:6836
std::unique_ptr< MinorMarkSweepCollector > minor_mark_sweep_collector_
Definition heap.h:2267
V8_EXPORT_PRIVATE void FinalizeIncrementalMarkingAtomically(GarbageCollectionReason gc_reason)
Definition heap.cc:3912
size_t MaxOldGenerationSize()
Definition heap.h:1235
size_t Available()
Definition heap.cc:402
void ConfigureHeap(const v8::ResourceConstraints &constraints, v8::CppHeap *cpp_heap)
Definition heap.cc:4917
V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size) const
Definition heap.cc:415
V8_INLINE uint64_t external_memory() const
Definition heap-inl.h:67
bool update_allocation_limits_after_loading_
Definition heap.h:2407
OldGenerationExpansionNotificationOrigin
Definition heap.h:380
std::unique_ptr< MemoryReducer > memory_reducer_
Definition heap.h:2275
void PerformHeapVerification()
Definition heap.cc:2310
NewSpace * new_space() const
Definition heap.h:727
GCFlags current_gc_flags_
Definition heap.h:2351
void GarbageCollectionPrologue(GarbageCollectionReason gc_reason, const v8::GCCallbackFlags gc_callback_flags)
Definition heap.cc:945
void ClearRecordedSlotRange(Address start, Address end)
Definition heap.cc:6450
SharedTrustedSpace * shared_trusted_space_
Definition heap.h:2152
V8_EXPORT_PRIVATE void StartIncrementalMarkingIfAllocationLimitIsReached(LocalHeap *local_heap, GCFlags gc_flags, GCCallbackFlags gc_callback_flags=GCCallbackFlags::kNoGCCallbackFlags)
Definition heap.cc:1978
std::unique_ptr< ArrayBufferSweeper > array_buffer_sweeper_
Definition heap.h:2269
bool IsTearingDown() const
Definition heap.h:525
double last_gc_time_
Definition heap.h:2262
void set_native_contexts_list(Tagged< Object > object)
Definition heap.h:457
void set_allocation_sites_list(Tagged< UnionOf< Smi, Undefined, AllocationSiteWithWeakNext > > object)
Definition heap.h:466
void PrintMaxMarkingLimitReached()
Definition heap.cc:5920
static bool IsYoungGenerationCollector(GarbageCollector collector)
Definition heap.h:346
bool HasLowEmbedderAllocationRate()
Definition heap.cc:3689
size_t min_old_generation_size_
Definition heap.h:2089
std::unique_ptr< CppHeap > owning_cpp_heap_
Definition heap.h:2300
ExternalMemoryAccounting external_memory_
Definition heap.h:2073
bool AllocationLimitOvershotByLargeMargin() const
Definition heap.cc:5252
NewLargeObjectSpace * new_lo_space_
Definition heap.h:2148
HeapState gc_state() const
Definition heap.h:521
std::unique_ptr< MemoryMeasurement > memory_measurement_
Definition heap.h:2274
void CollectGarbageOnMemoryPressure()
Definition heap.cc:4149
SharedSpace * shared_space() const
Definition heap.h:733
SharedTrustedSpace * shared_trusted_allocation_space_
Definition heap.h:2160
size_t maximum_committed_
Definition heap.h:2115
double PercentToOldGenerationLimit() const
Definition heap.cc:5424
SharedLargeObjectSpace * shared_lo_space_
Definition heap.h:2149
V8_EXPORT_PRIVATE size_t YoungGenerationSizeOfObjects() const
Definition heap.cc:5187
void SetOldGenerationAndGlobalAllocationLimit(size_t new_old_generation_allocation_limit, size_t new_global_allocation_limit)
Definition heap.cc:1523
PagedSpace * shared_allocation_space_
Definition heap.h:2158
bool sweeping_in_progress() const
Definition heap.h:1532
void MarkCompactPrologue()
Definition heap.cc:2662
StrongRootsEntry * strong_roots_head_
Definition heap.h:2312
OldLargeObjectSpace * lo_space() const
Definition heap.h:734
std::vector< std::pair< v8::NearHeapLimitCallback, void * > > near_heap_limit_callbacks_
Definition heap.h:2136
V8_EXPORT_PRIVATE void RemoveHeapObjectAllocationTracker(HeapObjectAllocationTracker *tracker)
Definition heap.cc:926
NewLargeObjectSpace * new_lo_space() const
Definition heap.h:737
bool use_new_space() const
Definition heap.h:1643
void AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback, GCType gc_type_filter, void *data)
Definition heap.cc:6247
bool ring_buffer_full_
Definition heap.h:2343
size_t initial_old_generation_size_
Definition heap.h:2100
static const int kRememberedUnmappedPages
Definition heap.h:1707
void RecomputeLimitsAfterLoadingIfNeeded()
Definition heap.cc:2533
void CreateFillerObjectAtRaw(const WritableFreeSpace &free_space, ClearFreedMemoryMode clear_memory_mode, ClearRecordedSlots clear_slots_mode, VerifyNoSlotsRecorded verify_no_slots_recorded)
Definition heap.cc:3220
char trace_ring_buffer_[kTraceRingBufferSize]
Definition heap.h:2338
size_t previous_new_space_surviving_object_size_
Definition heap.h:2252
MemoryReducer * memory_reducer()
Definition heap.h:1912
ResizeNewSpaceMode ShouldResizeNewSpace()
Definition heap.cc:3797
void GarbageCollectionEpilogueInSafepoint(GarbageCollector collector)
Definition heap.cc:1065
static constexpr int kRetainMapEntrySize
Definition heap.h:1821
MarkCompactCollector * mark_compact_collector()
Definition heap.h:813
std::atomic< v8::MemoryPressureLevel > memory_pressure_level_
Definition heap.h:2133
void CheckIneffectiveMarkCompact(size_t old_generation_size, double mutator_utilization)
Definition heap.cc:3715
V8_EXPORT_PRIVATE void CollectAllAvailableGarbage(GarbageCollectionReason gc_reason)
Definition heap.cc:1327
std::unique_ptr< ObjectStats > dead_object_stats_
Definition heap.h:2277
std::unique_ptr< CollectionBarrier > collection_barrier_
Definition heap.h:2368
size_t max_global_memory_size_
Definition heap.h:2096
static GarbageCollector YoungGenerationCollector()
Definition heap.h:353
std::atomic< size_t > max_old_generation_size_
Definition heap.h:2092
size_t old_generation_capacity_after_bootstrap_
Definition heap.h:2116
void ConfigureHeapDefault()
Definition heap.cc:5122
V8_EXPORT_PRIVATE void SetGCState(HeapState state)
Definition heap.cc:518
CodeLargeObjectSpace * code_lo_space_
Definition heap.h:2147
void AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback, GCType gc_type_filter, void *data)
Definition heap.cc:6236
LocalHeap * main_thread_local_heap()
Definition heap.h:842
void CheckCollectionRequested()
Definition heap.cc:2113
size_t NewSpaceSize()
Definition heap.cc:3890
size_t old_generation_allocation_limit() const
Definition heap.h:1924
V8_EXPORT_PRIVATE size_t YoungGenerationWastedBytes() const
Definition heap.cc:5198
void OnMoveEvent(Tagged< HeapObject > source, Tagged< HeapObject > target, int size_in_bytes)
Definition heap.cc:3337
void MarkCompact()
Definition heap.cc:2607
Tagged< Object > dirty_js_finalization_registries_list()
Definition heap.h:478
size_t max_old_generation_size() const
Definition heap.h:1940
void IterateStackRoots(RootVisitor *v)
Definition heap.cc:4834
double new_space_surviving_rate_
Definition heap.h:2253
size_t GlobalMemoryAvailable()
Definition heap.cc:5396
V8_EXPORT_PRIVATE void CreateFillerObjectAt(Address addr, int size, ClearFreedMemoryMode clear_memory_mode=ClearFreedMemoryMode::kDontClearFreedMemory)
Definition heap.cc:3202
std::atomic< HeapState > gc_state_
Definition heap.h:2193
TrustedSpace * trusted_space_
Definition heap.h:2151
bool delay_sweeper_tasks_for_testing_
Definition heap.h:2385
Tagged< Map > GcSafeMapOfHeapObject(Tagged< HeapObject > object)
Definition heap.cc:7090
static const int kMaxLoadTimeMs
Definition heap.h:1918
void CheckMemoryPressure()
Definition heap.cc:4127
IncrementalMarking * incremental_marking() const
Definition heap.h:1062
V8_INLINE RootsTable & roots_table()
Definition heap-inl.h:69
SharedTrustedLargeObjectSpace * shared_trusted_lo_space_
Definition heap.h:2154
bool InvokeNearHeapLimitCallback()
Definition heap.cc:4265
void RememberUnmappedPage(Address page, bool compacted)
Definition heap.cc:6748
void ReportStatisticsAfterGC()
Definition heap.cc:807
void ProcessNativeContexts(WeakObjectRetainer *retainer)
Definition heap.cc:2891
base::Mutex strong_roots_mutex_
Definition heap.h:2313
size_t max_semi_space_size_
Definition heap.h:2084
void SetGetExternallyAllocatedMemoryInBytesCallback(GetExternallyAllocatedMemoryInBytesCallback callback)
Definition heap.h:965
unsigned int ms_count_
Definition heap.h:2207
std::unique_ptr< Sweeper > sweeper_
Definition heap.h:2265
bool ShouldStressCompaction() const
Definition heap.cc:5534
std::unique_ptr< CodeRange > code_range_
Definition heap.h:2295
OldSpace * old_space() const
Definition heap.h:730
GCCallbacks gc_epilogue_callbacks_
Definition heap.h:2242
ArrayBufferSweeper * array_buffer_sweeper()
Definition heap.h:823
ConcurrentMarking * concurrent_marking() const
Definition heap.h:1070
int contexts_disposed_
Definition heap.h:2139
void ReduceNewSpaceSize()
Definition heap.cc:3879
void MarkCompactEpilogue()
Definition heap.cc:2655
static bool InFromPage(Tagged< Object > object)
Definition heap-inl.h:238
NewSpace * new_space_
Definition heap.h:2142
bool IsGCWithMainThreadStack() const
Definition heap.cc:522
V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects() const
Definition heap.cc:5153
V8_EXPORT_PRIVATE void StartIncrementalMarking(GCFlags gc_flags, GarbageCollectionReason gc_reason, GCCallbackFlags gc_callback_flags=GCCallbackFlags::kNoGCCallbackFlags, GarbageCollector collector=GarbageCollector::MARK_COMPACTOR)
Definition heap.cc:1871
std::unique_ptr< AllocationTrackerForDebugging > allocation_tracker_for_debugging_
Definition heap.h:2281
size_t old_generation_wasted_at_last_gc_
Definition heap.h:2333
std::unique_ptr< ConcurrentMarking > concurrent_marking_
Definition heap.h:2273
ReadOnlySpace * read_only_space_
Definition heap.h:2150
static V8_EXPORT_PRIVATE size_t SemiSpaceSizeFromYoungGenerationSize(size_t young_generation_size)
Definition heap.cc:322
V8_EXPORT_PRIVATE void CollectAllGarbage(GCFlags gc_flags, GarbageCollectionReason gc_reason, const GCCallbackFlags gc_callback_flags=kNoGCCallbackFlags)
Definition heap.cc:1258
V8_EXPORT_PRIVATE Tagged< HeapObject > PrecedeWithFillerBackground(Tagged< HeapObject > object, int filler_size)
Definition heap.cc:3035
void RemoveDirtyFinalizationRegistriesOnContext(Tagged< NativeContext > context)
Definition heap.cc:6898
OldLargeObjectSpace * shared_lo_allocation_space_
Definition heap.h:2159
void GetFromRingBuffer(char *buffer)
Definition heap.cc:5113
V8_EXPORT_PRIVATE size_t EmbedderSizeOfObjects() const
Definition heap.cc:5221
void CheckHandleCount()
Definition heap.cc:6424
static const size_t kMaxInitialOldGenerationSize
Definition heap.h:306
std::unique_ptr< MemoryBalancer > mb_
Definition heap.h:2399
bool IsIneffectiveMarkCompact(size_t old_generation_size, double mutator_utilization)
Definition heap.cc:3702
V8_EXPORT_PRIVATE void MakeHeapIterable()
Definition heap.cc:3515
void RecomputeLimits(GarbageCollector collector, base::TimeTicks time)
Definition heap.cc:2488
static constexpr size_t kPhysicalMemoryToOldGenerationRatio
Definition heap.h:310
HeapProfiler * heap_profiler() const
Definition heap.h:366
std::unique_ptr< HeapProfiler > heap_profiler_
Definition heap.h:2283
bool HasLowYoungGenerationAllocationRate()
Definition heap.cc:3670
std::unique_ptr< IncrementalMarking > incremental_marking_
Definition heap.h:2272
V8_EXPORT_PRIVATE size_t OldGenerationWastedBytes() const
Definition heap.cc:5173
bool always_allocate() const
Definition heap.h:1957
std::unique_ptr< GCTracer > tracer_
Definition heap.h:2264
void IterateWeakRoots(RootVisitor *v, base::EnumSet< SkipRoot > options)
Definition heap.cc:4532
MinorMarkSweepCollector * minor_mark_sweep_collector()
Definition heap.h:817
TrustedSpace * trusted_space() const
Definition heap.h:739
V8_EXPORT_PRIVATE bool CollectGarbageFromAnyThread(LocalHeap *local_heap, GarbageCollectionReason gc_reason=GarbageCollectionReason::kBackgroundAllocationFailure)
Definition heap.cc:2372
void EagerlyFreeExternalMemoryAndWasmCode()
Definition heap.cc:4206
bool is_current_gc_forced_
Definition heap.h:2359
MinorGCJob * minor_gc_job()
Definition heap.h:1994
V8_EXPORT_PRIVATE bool CanPromoteYoungAndExpandOldGeneration(size_t size) const
Definition heap.cc:428
StickySpace * sticky_space() const
Definition heap-inl.h:443
V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs() const
Definition heap.cc:4098
bool major_sweeping_in_progress() const
Definition heap.h:1539
int max_regular_code_object_size_
Definition heap.h:2376
std::vector< HeapObjectAllocationTracker * > allocation_trackers_
Definition heap.h:2387
bool CollectionRequested()
Definition heap.cc:2102
V8_EXPORT_PRIVATE size_t YoungGenerationConsumedBytes() const
Definition heap.cc:5204
base::SmallVector< v8::Isolate::UseCounterFeature, 8 > deferred_counters_
Definition heap.h:2246
size_t CommittedMemory()
Definition heap.cc:366
void FinishSweepingIfOutOfWork()
Definition heap.cc:7257
HeapGrowingMode CurrentHeapGrowingMode()
Definition heap.cc:5380
uint64_t external_memory_hard_limit()
Definition heap.h:612
static V8_EXPORT_PRIVATE size_t YoungGenerationSizeFromSemiSpaceSize(size_t semi_space_size)
Definition heap.cc:317
static V8_EXPORT_PRIVATE size_t YoungGenerationSizeFromOldGenerationSize(size_t old_generation_size)
Definition heap.cc:212
static constexpr size_t kOldGenerationLowMemory
Definition heap.h:311
void MakeLinearAllocationAreasIterable()
Definition heap.cc:3521
size_t NewSpaceTargetCapacity() const
Definition heap.cc:3904
void set_dirty_js_finalization_registries_list_tail(Tagged< Object > object)
Definition heap.h:481
size_t initial_max_old_generation_size_
Definition heap.h:2098
void IterateRoots(RootVisitor *v, base::EnumSet< SkipRoot > options, IterateRootsMode roots_mode=IterateRootsMode::kMainIsolate)
Definition heap.cc:4657
v8::CppHeap * cpp_heap_
Definition heap.h:2305
bool is_finalization_registry_cleanup_task_posted_
Definition heap.h:2389
size_t OldGenerationSpaceAvailable()
Definition heap.h:1882
bool HasBeenSetUp() const
Definition heap.cc:451
StrongRootsEntry * RegisterStrongRoots(const char *label, FullObjectSlot start, FullObjectSlot end)
Definition heap.cc:6777
std::unique_ptr< ScavengerCollector > scavenger_collector_
Definition heap.h:2268
int nodes_died_in_new_space_
Definition heap.h:2254
std::unique_ptr< MarkCompactCollector > mark_compact_collector_
Definition heap.h:2266
bool InOldSpace(Tagged< Object > object)
Definition heap-inl.h:271
MemoryAllocator * memory_allocator()
Definition heap.h:803
void ExpandNewSpaceSize()
Definition heap.cc:3863
void GarbageCollectionPrologueInSafepoint()
Definition heap.cc:984
void RestoreHeapLimit(size_t heap_limit)
Definition heap.h:664
void IterateConservativeStackRoots(RootVisitor *root_visitor, IterateRootsMode roots_mode=IterateRootsMode::kMainIsolate)
Definition heap.cc:4836
V8_EXPORT_PRIVATE size_t OldGenerationConsumedBytes() const
Definition heap.cc:5183
static const int kTraceRingBufferSize
Definition heap.h:315
V8_EXPORT_PRIVATE size_t GlobalWastedBytes() const
Definition heap.cc:5231
V8_EXPORT_PRIVATE void EnsureSweepingCompleted(SweepingForcedFinalizationMode mode)
Definition heap.cc:7275
V8_EXPORT_PRIVATE void CompleteSweepingFull()
Definition heap.cc:1963
V8_EXPORT_PRIVATE void DisableInlineAllocation()
Definition heap.cc:5540
Tagged< GcSafeCode > GcSafeFindCodeForInnerPointer(Address inner_pointer)
Definition heap.cc:7157
void CreateFillerObjectAtBackground(const WritableFreeSpace &free_space)
Definition heap.cc:3193
Sweeper * sweeper()
Definition heap.h:821
IsolateSafepoint * safepoint()
Definition heap.h:579
OldSpace * old_space_
Definition heap.h:2143
size_t ring_buffer_end_
Definition heap.h:2344
CodeSpace * code_space_
Definition heap.h:2144
static size_t DefaultMinSemiSpaceSize()
Definition heap.cc:4867
static V8_EXPORT_PRIVATE size_t DefaultMaxSemiSpaceSize()
Definition heap.cc:4880
uintptr_t mmap_region_base_
Definition heap.h:2217
GCCallbackFlags current_gc_callback_flags_
Definition heap.h:2354
V8_EXPORT_PRIVATE size_t OldGenerationConsumedBytesAtLastGC() const
Definition heap.cc:5237
bool inline_allocation_enabled_
Definition heap.h:2378
size_t MaximumCommittedMemory()
Definition heap.h:1278
int gc_count() const
Definition heap.h:1351
void IterateBuiltins(RootVisitor *v)
Definition heap.cc:4816
GarbageCollector current_or_last_garbage_collector_
Definition heap.h:2361
SharedSpace * shared_space_
Definition heap.h:2145
CodeLargeObjectSpace * code_lo_space() const
Definition heap.h:735
std::atomic< size_t > old_generation_allocation_limit_
Definition heap.h:2227
int remembered_unmapped_pages_index_
Definition heap.h:2220
EmbedderRootsHandler * embedder_roots_handler_
Definition heap.h:2306
V8_EXPORT_PRIVATE bool ShouldOptimizeForLoadTime() const
Definition heap.cc:5292
void AttachCppHeap(v8::CppHeap *cpp_heap)
Definition heap.cc:6036
TrustedLargeObjectSpace * trusted_lo_space() const
Definition heap.h:743
void ProcessAllocationSites(WeakObjectRetainer *retainer)
Definition heap.cc:2898
static constexpr size_t kNewLargeObjectSpaceToSemiSpaceRatio
Definition heap.h:313
GCFlags GCFlagsForIncrementalMarking()
Definition heap.h:1030
bool is_current_gc_for_heap_profiler_
Definition heap.h:2360
Tagged< GcSafeCode > GcSafeGetCodeFromInstructionStream(Tagged< HeapObject > instruction_stream, Address inner_pointer)
Definition heap.cc:7099
std::unique_ptr< MemoryAllocator > memory_allocator_
Definition heap.h:2271
std::shared_ptr< v8::TaskRunner > task_runner_
Definition heap.h:2285
StackState embedder_stack_state_
Definition heap.h:2309
Tagged< UnionOf< Smi, Undefined, AllocationSiteWithWeakNext > > allocation_sites_list()
Definition heap.h:471
int gc_callbacks_depth_
Definition heap.h:2372
SemiSpaceNewSpace * semi_space_new_space() const
Definition heap-inl.h:439
base::TimeDelta total_gc_time_ms_
Definition heap.h:2259
bool CanMoveObjectStart(Tagged< HeapObject > object)
Definition heap.cc:3238
bool initial_size_overwritten_
Definition heap.h:2113
void FlushNumberStringCache()
Definition heap.cc:3122
bool need_to_remove_stress_concurrent_allocation_observer_
Definition heap.h:2317
IncrementalMarkingLimit IncrementalMarkingLimitReached()
Definition heap.cc:5442
void GarbageCollectionEpilogue(GarbageCollector collector)
Definition heap.cc:1154
int NextStressMarkingLimit()
Definition heap.cc:5930
V8_EXPORT_PRIVATE void EnableInlineAllocation()
Definition heap.cc:5538
CodeSpace * code_space() const
Definition heap.h:732
size_t global_allocation_limit() const
Definition heap.h:1928
std::optional< EmbedderStackStateOrigin > embedder_stack_state_origin_
Definition heap.h:2310
void set_dirty_js_finalization_registries_list(Tagged< Object > object)
Definition heap.h:475
MarkingState * marking_state()
Definition heap.h:1621
bool using_initial_limit() const
Definition heap.h:1932
void UpdateMaximumCommitted()
Definition heap.cc:393
LocalHeap * main_thread_local_heap_
Definition heap.h:2191
static size_t OldGenerationToSemiSpaceRatio()
Definition heap.cc:4899
TrustedLargeObjectSpace * trusted_lo_space_
Definition heap.h:2153
V8_EXPORT_PRIVATE size_t GlobalConsumedBytesAtLastGC() const
Definition heap.cc:5241
double promotion_ratio_
Definition heap.h:2249
size_t CommittedOldGenerationMemory()
Definition heap.cc:350
std::optional< Tagged< InstructionStream > > GcSafeTryFindInstructionStreamForInnerPointer(Address inner_pointer)
Definition heap.cc:7128
size_t min_global_memory_size_
Definition heap.h:2095
int stress_marking_percentage_
Definition heap.h:2197
std::optional< Tagged< GcSafeCode > > GcSafeTryFindCodeForInnerPointer(Address inner_pointer)
Definition heap.cc:7138
void EnsureYoungSweepingCompleted()
Definition heap.cc:7356
bool IsStressingScavenge()
Definition heap.cc:7186
bool HighMemoryPressure()
Definition heap.h:655
uint64_t backing_store_bytes() const
Definition heap.h:625
std::unique_ptr< IsolateSafepoint > safepoint_
Definition heap.h:2357
V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(const char *location)
Definition heap.cc:6385
Tagged< Object > native_contexts_list() const
Definition heap.h:461
SharedTrustedLargeObjectSpace * shared_trusted_lo_allocation_space_
Definition heap.h:2161
bool ShouldUseBackgroundThreads() const
Definition heap.cc:456
size_t survived_since_last_expansion_
Definition heap.h:2125
size_t initial_semispace_size_
Definition heap.h:2086
std::unique_ptr< AllocationObserver > stress_concurrent_allocation_observer_
Definition heap.h:2279
size_t min_semi_space_size_
Definition heap.h:2085
V8_EXPORT_PRIVATE bool InSpaceSlow(Address addr, AllocationSpace space) const
Definition heap.cc:4433
V8_EXPORT_PRIVATE size_t GlobalSizeOfObjects() const
Definition heap.cc:5225
unsigned int gc_count_
Definition heap.h:2210
std::unique_ptr< MinorGCJob > minor_gc_job_
Definition heap.h:2278
void ProcessDirtyJSFinalizationRegistries(WeakObjectRetainer *retainer)
Definition heap.cc:2907
V8_EXPORT_PRIVATE size_t GlobalConsumedBytes() const
Definition heap.cc:5233
size_t new_space_surviving_object_size_
Definition heap.h:2251
void AddAllocationObserversToAllSpaces(AllocationObserver *observer, AllocationObserver *new_space_observer)
Definition heap.cc:1018
PagedNewSpace * paged_new_space() const
Definition heap-inl.h:435
V8_EXPORT_PRIVATE void AddHeapObjectAllocationTracker(HeapObjectAllocationTracker *tracker)
Definition heap.cc:915
int consecutive_ineffective_mark_compacts_
Definition heap.h:2214
OldLargeObjectSpace * lo_space_
Definition heap.h:2146
static const int kPointerMultiplier
Definition heap.h:299
void PrintMaxNewSpaceSizeReached()
Definition heap.cc:5925
V8_EXPORT_PRIVATE uint64_t AllocatedExternalMemorySinceMarkCompact() const
Definition heap.cc:5248
V8_EXPORT_PRIVATE void CollectGarbage(AllocationSpace space, GarbageCollectionReason gc_reason, const GCCallbackFlags gc_callback_flags=kNoGCCallbackFlags)
Definition heap.cc:1552
static V8_EXPORT_PRIVATE size_t MinOldGenerationSize()
Definition heap.cc:274
std::vector< Handle< NativeContext > > FindAllNativeContexts()
Definition heap.cc:7004
V8_EXPORT_PRIVATE bool ShouldOptimizeForMemoryUsage()
Definition heap.cc:3752
static const int kOldSurvivalRateLowThreshold
Definition heap.h:1711
static V8_EXPORT_PRIVATE int GetFillToAlign(Address address, AllocationAlignment alignment)
Definition heap.cc:3015
V8_EXPORT_PRIVATE::heap::base::Stack & stack()
Definition heap.cc:6057
bool HasLowOldGenerationAllocationRate()
Definition heap.cc:3680
size_t new_space_allocation_counter_
Definition heap.h:2322
size_t NewSpaceCapacity() const
Definition heap.cc:3897
double PercentToGlobalMemoryLimit() const
Definition heap.cc:5430
size_t initial_max_old_generation_size_threshold_
Definition heap.h:2099
size_t Capacity()
Definition heap.cc:328
bool deserialization_complete_
Definition heap.h:2374
bool GcSafeInstructionStreamContains(Tagged< InstructionStream > instruction_stream, Address addr)
Definition heap.cc:7108
void UpdateOldGenerationAllocationCounter()
Definition heap.h:1334
void ShrinkOldGenerationAllocationLimitIfNotConfigured()
Definition heap.cc:3097
void set_using_initial_limit(bool value)
Definition heap.h:1936
StressScavengeObserver * stress_scavenge_observer_
Definition heap.h:2200
void ForeachAllocationSite(Tagged< Object > list, const std::function< void(Tagged< AllocationSite >)> &visitor)
Definition heap.cc:2929
PretenuringHandler pretenuring_handler_
Definition heap.h:2394
void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc)
Definition heap.cc:2966
v8::CppHeap * cpp_heap() const
Definition heap.h:1112
void MinorMarkSweep()
Definition heap.cc:2642
static LimitsCompuatationResult ComputeNewAllocationLimits(Heap *heap)
Definition heap.cc:2435
size_t old_generation_allocation_counter_at_last_gc_
Definition heap.h:2327
SharedTrustedSpace * shared_trusted_space() const
Definition heap.h:740
GCTracer * tracer()
Definition heap.h:800
double promotion_rate_
Definition heap.h:2250
double ComputeMutatorUtilization(const char *tag, double mutator_speed, std::optional< double > gc_speed)
Definition heap.cc:3658
bool IsGCWithStack() const
Definition heap.cc:526
void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags, GCTracer::Scope::ScopeId scope_id)
Definition heap.cc:2581
Isolate * isolate() const
Definition heap-inl.h:61
HeapAllocator * heap_allocator_
Definition heap.h:2079
void RemoveAllocationObserversFromAllSpaces(AllocationObserver *observer, AllocationObserver *new_space_observer)
Definition heap.cc:1025
static V8_EXPORT_PRIVATE void GenerationSizesFromHeapSize(size_t heap_size, size_t *young_generation_size, size_t *old_generation_size)
Definition heap.cc:246
void ResumeConcurrentThreadsInClients(std::vector< Isolate * > paused_clients)
Definition heap.cc:2347
void ResetOldGenerationAndGlobalAllocationLimit()
Definition heap.cc:1543
void PerformGarbageCollection(GarbageCollector collector, GarbageCollectionReason gc_reason, const char *collector_reason)
Definition heap.cc:2173
void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags, GCTracer::Scope::ScopeId scope_id)
Definition heap.cc:2594
size_t code_range_size_
Definition heap.h:2083
std::atomic< double > max_marking_limit_reached_
Definition heap.h:2204
size_t promoted_objects_size_
Definition heap.h:2248
void SetOldGenerationAndGlobalMaximumSize(size_t max_old_generation_size)
Definition heap.cc:1516
void EnsureQuarantinedPagesSweepingCompleted()
Definition heap.cc:7349
void NotifyObjectSizeChange(Tagged< HeapObject >, int old_size, int new_size, ClearRecordedSlots clear_recorded_slots)
Definition heap.cc:4072
SweepingForcedFinalizationMode
Definition heap.h:1545
GCCallbacks gc_prologue_callbacks_
Definition heap.h:2241
std::atomic< double > load_start_time_ms_
Definition heap.h:2405
GetExternallyAllocatedMemoryInBytesCallback external_memory_callback_
Definition heap.h:2244
V8_EXPORT_PRIVATE void SetStackStart()
Definition heap.cc:6050
bool is_full_gc_during_loading_
Definition heap.h:2411
bool minor_sweeping_in_progress() const
Definition heap.h:1536
static size_t OldGenerationToSemiSpaceRatioLowMemory()
Definition heap.cc:4911
V8_EXPORT_PRIVATE size_t OldGenerationCapacity() const
Definition heap.cc:335
void UpdateSurvivalStatistics(int start_new_space_size)
Definition heap.cc:2121
Tagged< Object > dirty_js_finalization_registries_list_tail()
Definition heap.h:484
bool deserialization_complete() const
Definition heap.h:638
V8_EXPORT_PRIVATE void FinalizeIncrementalMarkingAtomicallyIfRunning(GarbageCollectionReason gc_reason)
Definition heap.cc:3918
V8_EXPORT_PRIVATE size_t NewSpaceAllocationCounter() const
Definition heap.cc:990
bool ShouldReduceMemory() const
Definition heap.h:1615
bool force_gc_on_next_allocation_
Definition heap.h:2384
static const uintptr_t kMmapRegionMask
Definition heap.h:2216
V8_EXPORT_PRIVATE void FreeMainThreadLinearAllocationAreas()
Definition heap.cc:3547
V8_EXPORT_PRIVATE size_t SizeOfObjects()
Definition heap.cc:999
static V8_EXPORT_PRIVATE bool IsLargeObject(Tagged< HeapObject > object)
Definition heap.cc:3281
void ResetAllAllocationSitesDependentCode(AllocationType allocation)
Definition heap.cc:2948
void RunInternal(double deadline_in_seconds) override
Definition heap.cc:1779
void TryRunMinorGC(const base::TimeDelta idle_time)
Definition heap.cc:1810
IdleTaskOnContextDispose(Isolate *isolate)
Definition heap.cc:1776
static void TryPostJob(Heap *heap)
Definition heap.cc:1768
void Start(GarbageCollector garbage_collector, GarbageCollectionReason gc_reason)
static Tagged< InstructionStream > FromTargetAddress(Address address)
V8_EXPORT_PRIVATE void AssertMainThreadIsOnlyThread()
Definition safepoint.cc:304
void IterateLocalHeaps(Callback callback)
Definition safepoint.h:37
void PrintStack(StringStream *accumulator, PrintStackMode mode=kPrintStackVerbose)
Definition isolate.cc:1757
Isolate * shared_space_isolate() const
Definition isolate.h:2295
static constexpr size_t kMaxByteLength
static const int kInitialMaxFastElementArray
Definition js-array.h:144
static void PrototypeRegistryCompactionCallback(Tagged< HeapObject > value, int old_index, int new_index)
bool Contains(Tagged< HeapObject > obj) const
size_t Size() const override
size_t Available() const override
bool ContainsSlow(Address addr) const
size_t SizeOfObjects() const override
constexpr bool IsRunning() const
Definition local-heap.h:252
constexpr bool IsParked() const
Definition local-heap.h:258
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int size_in_bytes, AllocationType allocation, AllocationOrigin origin=AllocationOrigin::kRuntime, AllocationAlignment alignment=kTaggedAligned)
bool is_main_thread() const
Definition local-heap.h:194
void InvokeGCEpilogueCallbacksInSafepoint(GCCallbacksInSafepoint::GCType gc_type)
void MarkSharedLinearAllocationAreasBlack()
Heap * heap() const
Definition local-heap.h:122
::heap::base::Stack stack_
Definition local-heap.h:405
AtomicThreadState state_
Definition local-heap.h:380
void UnmarkSharedLinearAllocationsArea()
bool IsRetryOfFailedAllocation() const
Definition local-heap.h:118
bool is_main_thread_for(Heap *heap) const
Definition local-heap.h:195
HeapAllocator heap_allocator_
Definition local-heap.h:400
MarkingBarrier * marking_barrier()
Definition local-heap.h:130
void SetUpMainThread(LinearAllocationArea &new_allocation_info, LinearAllocationArea &old_allocation_info)
void MakeLinearAllocationAreasIterable()
void FreeSharedLinearAllocationAreasAndResetFreeLists()
Tagged< Map > ToMap() const
bool IsForwardingAddress() const
static MapWord FromMap(const Tagged< Map > map)
Tagged< HeapObject > ToForwardingAddress(Tagged< HeapObject > map_word_host)
void RecordStrongDescriptorArraysForWeakening(GlobalHandleVector< DescriptorArray > strong_descriptor_arrays)
static V8_INLINE constexpr MarkBitIndex LimitAddressToIndex(Address address)
static V8_INLINE constexpr MarkBitIndex AddressToIndex(Address address)
V8_WARN_UNUSED_RESULT V8_INLINE bool ToHandle(Handle< S > *out) const
V8_EXPORT_PRIVATE void TearDown()
V8_EXPORT_PRIVATE size_t GetPooledChunksCount()
static constexpr int MaxRegularCodeObjectSize()
static constexpr size_t ObjectStartOffsetInDataPage()
static V8_INLINE MemoryChunkMetadata * FromHeapObject(Tagged< HeapObject > o)
V8_INLINE MemoryChunkMetadata * Metadata()
static V8_INLINE MemoryChunk * FromAddress(Address addr)
size_t Offset(Address addr) const
V8_INLINE bool InYoungGeneration() const
static V8_INLINE MemoryChunk * FromHeapObject(Tagged< HeapObject > object)
V8_INLINE bool InReadOnlySpace() const
static double GrowingFactor(Heap *heap, size_t max_heap_size, std::optional< double > gc_speed, double mutator_speed, Heap::HeapGrowingMode growing_mode)
static size_t MinimumAllocationLimitGrowingStep(Heap::HeapGrowingMode growing_mode)
static size_t BoundAllocationLimit(Heap *heap, size_t current_size, uint64_t limit, size_t min_size, size_t max_size, size_t new_space_capacity, Heap::HeapGrowingMode growing_mode)
MemoryPressureInterruptTask(const MemoryPressureInterruptTask &)=delete
MemoryPressureInterruptTask & operator=(const MemoryPressureInterruptTask &)=delete
static MutablePageMetadata * cast(MemoryChunkMetadata *metadata)
static void MoveExternalBackingStoreBytes(ExternalBackingStoreType type, MutablePageMetadata *from, MutablePageMetadata *to, size_t amount)
static V8_INLINE MutablePageMetadata * FromHeapObject(Tagged< HeapObject > o)
void SetCapacity(size_t capacity)
size_t Available() const override
size_t ExternalBackingStoreOverallBytes() const
Definition new-spaces.h:203
virtual size_t MinimumCapacity() const =0
virtual bool ContainsSlow(Address a) const =0
virtual size_t Capacity() const =0
virtual void Grow(size_t new_capacity)=0
bool Contains(Tagged< Object > o) const
virtual size_t AllocatedSinceLastGC() const =0
virtual size_t MaximumCapacity() const =0
virtual size_t TotalCapacity() const =0
static V8_INLINE void Lock(Tagged< HeapObject > heap_object)
static V8_INLINE void Unlock(Tagged< HeapObject > heap_object)
static constexpr int OBJECT_STATS_COUNT
PtrComprCageBase code_cage_base() const
Definition visitors.h:235
PtrComprCageBase cage_base() const
Definition visitors.h:225
static Builtin TryLookupCode(Isolate *isolate, Address address)
static HandleType< OrderedHashSet >::MaybeType Add(Isolate *isolate, HandleType< OrderedHashSet > table, DirectHandle< Object > value)
static PageMetadata * cast(MemoryChunkMetadata *metadata)
static V8_INLINE PageMetadata * FromHeapObject(Tagged< HeapObject > o)
static V8_INLINE PageMetadata * FromAddress(Address addr)
void ReleaseOnTearDown(Isolate *isolate)
Definition page-pool.cc:43
PagedSpaceForNewSpace * paged_space()
Definition new-spaces.h:718
bool StartShrinking(size_t new_target_capacity)
Definition new-spaces.h:616
bool Contains(Address a) const
size_t Available() const override
bool ContainsSlow(Address addr) const
size_t Size() const override
const Heap *const heap_
Definition heap.h:2600
void ProcessPretenuringFeedback(size_t new_space_capacity_before_gc)
void RemoveAllocationSitePretenuringFeedback(Tagged< AllocationSite > site)
static Tagged< WeakArrayList > Compact(DirectHandle< WeakArrayList > array, Heap *heap, CompactionCallback callback, AllocationType allocation=AllocationType::kYoung)
Definition objects.cc:3838
static V8_EXPORT_PRIVATE bool Contains(Address address)
ReadOnlySpace * read_only_space() const
void Iterate(RootVisitor *visitor)
Definition roots.cc:54
virtual V8_EXPORT_PRIVATE void TearDown(MemoryAllocator *memory_allocator)
size_t Size() const override
bool ContainsSlow(Address addr) const
static void Clear(Tagged< FixedArray > cache)
Definition regexp.cc:1396
static void Iterate(Isolate *isolate, RootVisitor *v)
Definition objects.cc:4108
static void PostGarbageCollectionProcessing(Isolate *isolate)
Definition objects.cc:4078
static void Insert(MutablePageMetadata *page, size_t slot_offset)
static void RemoveRange(MutablePageMetadata *chunk, Address start, Address end, SlotSet::EmptyBucketMode mode)
static void CheckNoneInRange(MutablePageMetadata *page, Address start, Address end)
virtual void VisitRootPointers(Root root, const char *description, FullObjectSlot start, FullObjectSlot end)=0
virtual void VisitRunningCode(FullObjectSlot code_slot, FullObjectSlot istream_or_smi_zero_slot)
Definition visitors.h:101
virtual void Synchronize(VisitorSynchronization::SyncTag tag)
Definition visitors.h:112
virtual void VisitRootPointer(Root root, const char *description, FullObjectSlot p)
Definition visitors.h:75
V8_INLINE void AddCharacter(uint16_t c)
void Shrink(size_t new_capacity)
size_t QuarantinedPageCount() const
Definition new-spaces.h:427
static void IterateStartupObjectCache(Isolate *isolate, RootVisitor *visitor)
static void IterateSharedHeapObjectCache(Isolate *isolate, RootVisitor *visitor)
void * ToVoidPtr() const
Definition slots.h:76
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr Tagged< Smi > zero()
Definition smi.h:99
static constexpr int kMaxValue
Definition smi.h:101
virtual std::unique_ptr< ObjectIterator > GetObjectIterator(Heap *heap)=0
size_t old_objects_size() const
size_t young_objects_size() const
void Step(int bytes_allocated, Address, size_t) override
Definition heap.cc:5748
static void Schedule(Isolate *isolate)
Definition heap.cc:5732
StressConcurrentAllocationTask(Isolate *isolate)
Definition heap.cc:5674
Address * allocate_impl(size_t n)
Definition heap.cc:7226
void deallocate_impl(Address *p, size_t n) noexcept
Definition heap.cc:7240
StrongRootsEntry * next
Definition heap.h:171
StrongRootsEntry * prev
Definition heap.h:170
void EnsureMajorCompleted()
Definition sweeper.cc:830
void ContributeAndWaitForPromotedPagesIteration()
Definition sweeper.cc:1183
void EnsurePageIsSwept(PageMetadata *page)
Definition sweeper.cc:1231
void EnsureMinorCompleted()
Definition sweeper.cc:888
V8_INLINE constexpr StorageType ptr() const
constexpr bool IsCleared() const
bool GetHeapObject(Tagged< HeapObject > *result) const
constexpr bool IsWeak() const
V8_INLINE constexpr bool is_null() const
Definition tagged.h:502
static std::optional< Address > StartOfJitAllocationAt(Address inner_pointer)
V8_INLINE void MarkPointersImpl(TSlot start, TSlot end)
Definition heap.cc:6584
void VisitRootPointers(Root root, const char *description, FullObjectSlot start, FullObjectSlot end) override
Definition heap.cc:6564
void VisitMapPointer(Tagged< HeapObject > object) override
Definition heap.cc:6531
void VisitPointers(Tagged< HeapObject > host, MaybeObjectSlot start, MaybeObjectSlot end) final
Definition heap.cc:6539
void VisitRootPointers(Root root, const char *description, OffHeapObjectSlot start, OffHeapObjectSlot end) override
Definition heap.cc:6568
void VisitCodeTarget(Tagged< InstructionStream > host, RelocInfo *rinfo) final
Definition heap.cc:6553
std::vector< Tagged< HeapObject > > marking_stack_
Definition heap.cc:6605
void VisitPointers(Tagged< HeapObject > host, ObjectSlot start, ObjectSlot end) override
Definition heap.cc:6534
void VisitInstructionStreamPointer(Tagged< Code > host, InstructionStreamSlot slot) override
Definition heap.cc:6544
V8_INLINE void MarkHeapObject(Tagged< HeapObject > heap_object)
Definition heap.cc:6598
void VisitEmbeddedPointer(Tagged< InstructionStream > host, RelocInfo *rinfo) final
Definition heap.cc:6559
MarkingVisitor(UnreachableObjectsFilter *filter)
Definition heap.cc:6528
~UnreachableObjectsFilter() override=default
bool SkipObject(Tagged< HeapObject > object) override
Definition heap.cc:6500
std::unordered_map< MemoryChunkMetadata *, std::unique_ptr< BucketType >, base::hash< MemoryChunkMetadata * > > reachable_
Definition heap.cc:6621
std::unordered_set< Tagged< HeapObject >, Object::Hasher > BucketType
Definition heap.cc:6511
bool MarkAsReachable(Tagged< HeapObject > object)
Definition heap.cc:6513
static V8_EXPORT_PRIVATE v8::Platform * GetCurrentPlatform()
Definition v8.cc:282
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
static V8_EXPORT_PRIVATE const OOMDetails kHeapOOM
Definition v8.h:37
static Handle< WeakArrayList > EnsureSpace(Isolate *isolate, Handle< WeakArrayList > array, int length, AllocationType allocation=AllocationType::kYoung)
virtual Tagged< Object > RetainAs(Tagged< Object > object)=0
void ClearTagged(size_t count) const
static V8_INLINE WritableFreeSpace ForNonExecutableMemory(base::Address addr, size_t size)
V8_INLINE WritableFreeSpace FreeRange(Address addr, size_t size)
static void ForRange(Heap *heap, Tagged< HeapObject > object, TSlot start, TSlot end)
std::pair< size_t, size_t > FlushLiftoffCode()
#define PROFILE(the_isolate, Call)
Definition code-events.h:59
Handle< Code > code
#define V8_COMPRESS_POINTERS_8GB_BOOL
Definition globals.h:608
#define ALIGN_TO_ALLOCATION_ALIGNMENT(value)
Definition globals.h:1796
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
NormalPageSpace * space_
Definition compactor.cc:324
const MapRef map_
#define MEMBER(s)
#define DICT(s)
#define QUOTE(s)
BasePage * page
Definition sweeper.cc:218
int start
uint32_t count
int end
DeclarationScope * scope_
std::unique_ptr< SafepointScope > safepoint_scope_
LineAndColumn current
LineAndColumn previous
MicrotaskQueue * microtask_queue
Definition execution.cc:77
Label label
DisallowGarbageCollection no_gc_
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
#define TRACE_GC_EPOCH(tracer, scope_id, thread_kind)
Definition gc-tracer.h:77
#define TRACE_GC_EPOCH_WITH_FLOW(tracer, scope_id, thread_kind, bind_id, flow_flags)
Definition gc-tracer.h:84
#define TRACE_GC(tracer, scope_id)
Definition gc-tracer.h:35
Isolate * isolate
#define DEF_RIGHT_TRIM(T)
Definition heap.cc:3508
#define COMPARE_AND_RETURN_NAME(name)
#define LIST(s)
#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)
#define UPDATE_COUNTERS_FOR_SPACE(space)
std::string extension
TNode< Context > context
TNode< Object > target
TNode< Object > callback
RpoNumber block
ZoneVector< RpoNumber > & result
ZoneStack< RpoNumber > & stack
Builtin builtin
MovableLabel handler
LiftoffAssembler::CacheState state
int pc_offset
ZoneVector< int > slots
#define LOG(isolate, Call)
Definition log.h:78
#define LOG_CODE_EVENT(isolate, Call)
Definition log.h:83
NonAtomicMarkingState * marking_state_
size_t priority
base::Mutex mutex
const int length_
Definition mul-fft.cc:473
EmbedderStackState
Definition common.h:15
STL namespace.
V8_BASE_EXPORT constexpr uint64_t RoundUpToPowerOfTwo64(uint64_t value)
Definition bits.h:235
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
void * Malloc(size_t size)
Definition memory.h:36
void sort(C &container)
void Free(void *memory)
Definition memory.h:63
WasmEngine * GetWasmEngine()
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
static constexpr FreeListCategoryType kFirstCategory
Definition free-list.h:39
ClearRecordedSlots
Definition heap.h:137
GarbageCollectionReason
Definition globals.h:1428
constexpr int kSpaceTagSize
Definition globals.h:1331
constexpr const char * ToString(DeoptimizeKind kind)
Definition globals.h:880
ClearFreedMemoryMode
Definition heap.h:143
constexpr int kTaggedSize
Definition globals.h:542
constexpr Address kTaggedNullAddress
Definition handles.h:53
template Tagged< Object > VisitWeakList< AllocationSiteWithWeakNext >(Heap *heap, Tagged< Object > list, WeakObjectRetainer *retainer)
constexpr int kInt64Size
Definition globals.h:402
constexpr int kMaxRegularHeapObjectSize
Definition globals.h:680
@ SKIP_WRITE_BARRIER
Definition objects.h:52
static void ReturnNull(const v8::FunctionCallbackInfo< v8::Value > &info)
SlotTraits::TObjectSlot ObjectSlot
Definition globals.h:1243
void PrintF(const char *format,...)
Definition utils.cc:39
int32_t FreeListCategoryType
Definition free-list.h:37
Tagged(T object) -> Tagged< T >
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
kInterpreterTrampolineOffset Tagged< HeapObject >
void MemsetTagged(Tagged_t *start, Tagged< MaybeObject > value, size_t counter)
Definition slots-inl.h:486
Address Tagged_t
Definition globals.h:547
void VisitObject(Isolate *isolate, Tagged< HeapObject > object, ObjectVisitor *visitor)
void * GetRandomMmapAddr()
constexpr bool IsAnyCodeSpace(AllocationSpace space)
Definition globals.h:1334
void Print(Tagged< Object > obj)
Definition objects.h:774
Handle< To > UncheckedCast(Handle< From > value)
Definition handles-inl.h:55
base::Flags< GCFlag, uint8_t > GCFlags
Definition heap.h:192
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr intptr_t kObjectAlignment8GbHeap
Definition globals.h:934
V8_EXPORT_PRIVATE void MemMove(void *dest, const void *src, size_t size)
Definition memcopy.h:189
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available space
Definition flags.cc:2128
template Tagged< Object > VisitWeakList< JSFinalizationRegistry >(Heap *heap, Tagged< Object > list, WeakObjectRetainer *retainer)
typename detail::FlattenUnionHelper< Union<>, Ts... >::type UnionOf
Definition union.h:123
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
Definition flags.cc:1366
Tagged< MaybeWeak< T > > MakeWeak(Tagged< T > value)
Definition tagged.h:893
constexpr uint32_t kClearedFreeMemoryValue
Definition globals.h:1004
@ FIRST_MUTABLE_SPACE
Definition globals.h:1324
@ SHARED_TRUSTED_LO_SPACE
Definition globals.h:1319
@ FIRST_GROWABLE_PAGED_SPACE
Definition globals.h:1326
@ LAST_GROWABLE_PAGED_SPACE
Definition globals.h:1327
@ SHARED_TRUSTED_SPACE
Definition globals.h:1314
@ LAST_MUTABLE_SPACE
Definition globals.h:1325
V8_INLINE constexpr bool IsHeapObject(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:669
V8_EXPORT_PRIVATE FlagValues v8_flags
ExternalBackingStoreType
Definition globals.h:1605
InvalidateRecordedSlots
Definition heap.h:139
InvalidateExternalPointerSlots
Definition heap.h:141
EmbedderStackStateOrigin
Definition heap.h:158
SlotTraits::TOffHeapObjectSlot OffHeapObjectSlot
Definition globals.h:1258
constexpr size_t kMaximalCodeRangeSize
Definition globals.h:508
return value
Definition map-inl.h:893
static constexpr Address kNullAddress
Definition v8-internal.h:53
static constexpr GlobalSafepointForSharedSpaceIsolateTag kGlobalSafepointForSharedSpaceIsolate
Definition safepoint.h:240
constexpr int kDoubleSize
Definition globals.h:407
void PrintIsolate(void *isolate, const char *format,...)
Definition utils.cc:61
constexpr intptr_t kDoubleAlignmentMask
Definition globals.h:950
void MemCopy(void *dest, const void *src, size_t size)
Definition memcopy.h:124
template Tagged< Object > VisitWeakList< Context >(Heap *heap, Tagged< Object > list, WeakObjectRetainer *retainer)
constexpr intptr_t kDoubleAlignment
Definition globals.h:949
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Definition flags.cc:2197
!IsContextMap !IsContextMap native_context
Definition map-inl.h:877
static GCType GetGCTypeFromGarbageCollector(GarbageCollector collector)
Definition heap.cc:1052
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
static constexpr ReleaseStoreTag kReleaseStore
Definition globals.h:2910
GCCallbackFlags
@ kGCCallbackScheduleIdleGarbageCollection
@ kGCCallbackFlagForced
@ kNoGCCallbackFlags
@ kGCCallbackFlagCollectAllExternalMemory
@ kGCCallbackFlagSynchronousPhantomCallbackProcessing
@ kGCCallbackFlagCollectAllAvailableGarbage
Local< T > Handle
static constexpr RelaxedLoadTag kRelaxedLoad
Definition globals.h:2909
TaskPriority
Definition v8-platform.h:24
static constexpr RelaxedStoreTag kRelaxedStore
Definition globals.h:2911
@ kGCTypeMinorMarkSweep
@ kGCTypeScavenge
@ kGCTypeMarkSweepCompact
@ kGCTypeIncrementalMarking
size_t(*)(void *data, size_t current_heap_limit, size_t initial_heap_limit) NearHeapLimitCallback
MemoryPressureLevel
Definition v8-isolate.h:175
MeasureMemoryExecution
static constexpr AcquireLoadTag kAcquireLoad
Definition globals.h:2908
MeasureMemoryMode
#define RELAXED_WRITE_FIELD(p, offset, value)
#define VIRTUAL_INSTANCE_TYPE_LIST(V)
#define INSTANCE_TYPE_LIST(V)
#define RCS_SCOPE(...)
#define FATAL(...)
Definition logging.h:47
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK_GE(lhs, rhs)
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define CHECK_GT(lhs, rhs)
#define CHECK_LT(lhs, rhs)
#define CHECK_LE(lhs, rhs)
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
constexpr T RoundDown(T x, intptr_t m)
Definition macros.h:371
#define V8_EXPORT_PRIVATE
Definition macros.h:460
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define RIGHT_TRIMMABLE_ARRAY_LIST(V)
Definition heap.h:438
static constexpr size_t kMaxSize
static constexpr size_t kMinSize
static constexpr double kConservativeGrowingFactor
static constexpr char kName[]
static bool is_gc_stats_enabled()
static constexpr char kName[]
EphemeronRememberedSet * ephemeron_remembered_set_
Definition sweeper.cc:572
#define OFFSET_OF_DATA_START(Type)
#define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_EVENT0(category_group, name)
#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_EVENT_END1(category_group, name, arg1_name, arg1_val)
#define TRACE_DISABLED_BY_DEFAULT(name)
#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
#define TRACE_EVENT_FLAG_FLOW_OUT
#define TRACE_EVENT_FLAG_FLOW_IN
RootVisitor * visitor_
Heap * heap_
#define V8_STATIC_ROOTS_BOOL
Definition v8config.h:1001
#define V8_INLINE
Definition v8config.h:500
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_UNLIKELY(condition)
Definition v8config.h:660