v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
large-spaces.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include "src/base/logging.h"
10#include "src/common/globals.h"
17#include "src/heap/list.h"
19#include "src/heap/marking.h"
24#include "src/heap/slot-set.h"
25#include "src/heap/spaces-inl.h"
26#include "src/logging/log.h"
28#include "src/utils/ostreams.h"
29
30namespace v8 {
31namespace internal {
32
33// -----------------------------------------------------------------------------
34// LargeObjectSpaceObjectIterator
35
40
42 while (current_ != nullptr) {
45 if (!IsFreeSpaceOrFiller(object)) return object;
46 }
47 return Tagged<HeapObject>();
48}
49
50// -----------------------------------------------------------------------------
51// OldLargeObjectSpace
52
54 : Space(heap, id, nullptr),
55 size_(0),
56 page_count_(0),
57 objects_size_(0),
58 pending_object_(0) {}
59
61 // We return zero here since we cannot take advantage of already allocated
62 // large object memory.
63 return 0;
64}
65
67 while (!memory_chunk_list_.Empty()) {
69 LOG(heap()->isolate(),
70 DeleteEvent("LargeObjectChunk",
71 reinterpret_cast<void*>(page->ChunkAddress())));
72 memory_chunk_list_.Remove(page);
73 heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
74 page);
75 }
76}
77
79 size_t object_size) {
80 if (!heap()->IsAllocationObserverActive()) return;
81
82 if (object_size >= allocation_counter_.NextBytes()) {
83 // Ensure that there is a valid object
84 heap_->CreateFillerObjectAt(soon_object, static_cast<int>(object_size));
85
86 allocation_counter_.InvokeAllocationObservers(soon_object, object_size,
87 object_size);
88 }
89
90 // Large objects can be accounted immediately since no LAB is involved.
92}
93
97
101
103 int object_size) {
104 return AllocateRaw(local_heap, object_size, NOT_EXECUTABLE);
105}
106
108 int object_size,
109 Executability executable) {
110 object_size = ALIGN_TO_ALLOCATION_ALIGNMENT(object_size);
112 !allocation_counter_.HasAllocationObservers());
115
116 // Check if we want to force a GC before growing the old space further.
117 // If so, fail the allocation.
118 if (!heap()->ShouldExpandOldGenerationOnSlowAllocation(
119 local_heap, AllocationOrigin::kRuntime) ||
120 !heap()->CanExpandOldGeneration(object_size)) {
122 }
123
124 heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
125 local_heap, heap()->GCFlagsForIncrementalMarking(),
127
128 LargePageMetadata* page = AllocateLargePage(object_size, executable);
129 if (page == nullptr) return AllocationResult::Failure();
130 Tagged<HeapObject> object = page->GetObject();
131 if (local_heap->is_main_thread() && identity() != SHARED_LO_SPACE) {
132 UpdatePendingObject(object);
133 }
134 if (v8_flags.sticky_mark_bits ||
135 heap()->incremental_marking()->black_allocation()) {
136 heap()->marking_state()->TryMarkAndAccountLiveBytes(object, object_size);
137 }
138 DCHECK_IMPLIES(heap()->incremental_marking()->black_allocation(),
139 heap()->marking_state()->IsMarked(object));
140 page->Chunk()->InitializationMemoryFence();
141 heap()->NotifyOldGenerationExpansion(local_heap, identity(), page);
142
143 if (local_heap->is_main_thread() && identity() != SHARED_LO_SPACE) {
145 static_cast<size_t>(object_size));
146 }
147 return AllocationResult::FromObject(object);
148}
149
151 int object_size, Executability executable) {
152 base::MutexGuard expansion_guard(heap_->heap_expansion_mutex());
153
154 if (identity() != NEW_LO_SPACE &&
155 !heap()->IsOldGenerationExpansionAllowed(object_size, expansion_guard)) {
156 return nullptr;
157 }
158
159 LargePageMetadata* page = heap()->memory_allocator()->AllocateLargePage(
160 this, object_size, executable);
161 if (page == nullptr) return nullptr;
162 DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
163
164 {
166 AddPage(page, object_size);
167 }
168
169 return page;
170}
171
173 // On a platform that provides lazy committing of memory, we over-account
174 // the actually committed memory. There is no easy way right now to support
175 // precise accounting of committed memory in large object space.
176 return CommittedMemory();
177}
178
180 MemoryChunk* chunk = page->Chunk();
181 DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
182 DCHECK(chunk->IsLargePage());
185 PtrComprCageBase cage_base(heap()->isolate());
186 static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page);
189 heap()->incremental_marking()->marking_mode(), LO_SPACE);
190 AddPage(page, static_cast<size_t>(page->GetObject()->Size(cage_base)));
191}
192
193void LargeObjectSpace::AddPage(LargePageMetadata* page, size_t object_size) {
194 size_ += static_cast<int>(page->size());
195 AccountCommitted(page->size());
196 objects_size_ += object_size;
197 page_count_++;
198 memory_chunk_list_.PushBack(page);
199 page->set_owner(this);
201 [this, page](ExternalBackingStoreType type, int index) {
203 type, page->ExternalBackingStoreBytes(type));
204 });
205}
206
208 size_ -= static_cast<int>(page->size());
209 AccountUncommitted(page->size());
210 page_count_--;
211 memory_chunk_list_.Remove(page);
212 page->set_owner(nullptr);
214 [this, page](ExternalBackingStoreType type, int index) {
216 type, page->ExternalBackingStoreBytes(type));
217 });
218}
219
221 Tagged<HeapObject> object,
222 size_t object_size) {
223 MemoryChunk* chunk = page->Chunk();
224#ifdef DEBUG
225 PtrComprCageBase cage_base(heap()->isolate());
226 DCHECK_EQ(object, page->GetObject());
227 DCHECK_EQ(object_size, page->GetObject()->Size(cage_base));
229#endif // DEBUG
230
231 const size_t used_committed_size =
232 ::RoundUp(chunk->Offset(object.address()) + object_size,
234
235 // Object shrunk since last GC.
236 if (object_size < page->area_size()) {
237 page->ClearOutOfLiveRangeSlots(object.address() + object_size);
238 const Address new_area_end = page->area_start() + object_size;
239
240 // Object shrunk enough that we can even free some OS pages.
241 if (used_committed_size < page->size()) {
242 const size_t bytes_to_free = page->size() - used_committed_size;
243 heap()->memory_allocator()->PartialFreeMemory(
244 page, chunk->address() + used_committed_size, bytes_to_free,
245 new_area_end);
246 size_ -= bytes_to_free;
247 AccountUncommitted(bytes_to_free);
248 } else {
249 // Can't free OS page but keep object area up-to-date.
250 page->set_area_end(new_area_end);
251 }
252 }
253
254 DCHECK_EQ(used_committed_size, page->size());
255 DCHECK_EQ(object_size, page->area_size());
256}
257
260
261 bool owned = (chunk->owner() == this);
262
263 SLOW_DCHECK(!owned || ContainsSlow(object.address()));
264
265 return owned;
266}
267
270 for (const LargePageMetadata* page : *this) {
271 if (page->Chunk() == chunk) return true;
272 }
273 return false;
274}
275
276std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator(
277 Heap* heap) {
278 return std::unique_ptr<ObjectIterator>(
280}
281
282#ifdef VERIFY_HEAP
283// We do not assume that the large object iterator works, because it depends
284// on the invariants we are checking during verification.
285void LargeObjectSpace::Verify(Isolate* isolate,
286 SpaceVerificationVisitor* visitor) const {
287 size_t external_backing_store_bytes[static_cast<int>(
289
290 PtrComprCageBase cage_base(isolate);
291 for (const LargePageMetadata* chunk = first_page(); chunk != nullptr;
292 chunk = chunk->next_page()) {
293 visitor->VerifyPage(chunk);
294
295 // Each chunk contains an object that starts at the large object page's
296 // object area start.
297 Tagged<HeapObject> object = chunk->GetObject();
298 PageMetadata* page = PageMetadata::FromHeapObject(object);
299 CHECK(object.address() == page->area_start());
300
301 // Only certain types may be in the large object space:
302#define V(Name) Is##Name(object, cage_base) ||
303 const bool is_valid_lo_space_object =
305#undef V
306 if (!is_valid_lo_space_object) {
307 i::Print(object);
308 FATAL("Found invalid Object (instance_type=%i) in large object space.",
309 object->map(cage_base)->instance_type());
310 }
311
312 // Invoke visitor on each object.
313 visitor->VerifyObject(object);
314
316 [chunk, &external_backing_store_bytes](ExternalBackingStoreType type,
317 int index) {
318 external_backing_store_bytes[index] +=
319 chunk->ExternalBackingStoreBytes(type);
320 });
321
322 visitor->VerifyPageDone(chunk);
323 }
325 [this, external_backing_store_bytes](ExternalBackingStoreType type,
326 int index) {
327 CHECK_EQ(external_backing_store_bytes[index],
329 });
330}
331#endif
332
333#ifdef DEBUG
334void LargeObjectSpace::Print() {
335 StdoutStream os;
337 for (Tagged<HeapObject> obj = it.Next(); !obj.is_null(); obj = it.Next()) {
338 i::Print(obj, os);
339 }
340}
341#endif // DEBUG
342
345 pending_object_.store(object.address(), std::memory_order_release);
346}
347
350
353
355 : LargeObjectSpace(heap, NEW_LO_SPACE), capacity_(capacity) {}
356
358 int object_size) {
359 object_size = ALIGN_TO_ALLOCATION_ALIGNMENT(object_size);
360 DCHECK(local_heap->is_main_thread());
361 // Do not allocate more objects if promoting the existing object would exceed
362 // the old generation capacity.
363 if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
365 }
366
367 // Allocation for the first object must succeed independent from the capacity.
368 if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
369 if (!heap()->ShouldExpandYoungGenerationOnSlowAllocation(object_size)) {
371 }
372 }
373
375 if (page == nullptr) return AllocationResult::Failure();
376
377 // The size of the first object may exceed the capacity.
378 capacity_ = std::max(capacity_, SizeOfObjects());
379
380 Tagged<HeapObject> result = page->GetObject();
381 MemoryChunk* chunk = page->Chunk();
384 if (v8_flags.minor_ms) {
385 page->ClearLiveness();
386 }
388 DCHECK(chunk->IsLargePage());
389 DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
391 static_cast<size_t>(object_size));
393}
394
399
401 for (LargePageMetadata* page = first_page(); page != nullptr;
402 page = page->next_page()) {
403 MemoryChunk* chunk = page->Chunk();
406 }
407}
408
410 const std::function<bool(Tagged<HeapObject>)>& is_dead) {
411 DCHECK(!heap()->incremental_marking()->IsMarking());
412 size_t surviving_object_size = 0;
413 PtrComprCageBase cage_base(heap()->isolate());
414 for (auto it = begin(); it != end();) {
415 LargePageMetadata* page = *it;
416 it++;
417 Tagged<HeapObject> object = page->GetObject();
418 if (is_dead(object)) {
419 RemovePage(page);
420 heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
421 page);
422 } else {
423 surviving_object_size += static_cast<size_t>(object->Size(cage_base));
424 }
425 }
426 // Right-trimming does not update the objects_size_ counter. We are lazily
427 // updating it after every GC.
428 objects_size_ = surviving_object_size;
429}
430
431void NewLargeObjectSpace::SetCapacity(size_t capacity) {
432 capacity_ = std::max(capacity, SizeOfObjects());
433}
434
437
439 int object_size) {
440 return OldLargeObjectSpace::AllocateRaw(local_heap, object_size, EXECUTABLE);
441}
442
444 size_t object_size) {
445 OldLargeObjectSpace::AddPage(page, object_size);
446}
447
449 heap()->isolate()->RemoveCodeMemoryChunk(page);
451}
452
455
458
461
462} // namespace internal
463} // namespace v8
#define SLOW_DCHECK(condition)
Definition checks.h:21
V8_EXPORT_PRIVATE void AdvanceAllocationObservers(size_t allocated)
V8_EXPORT_PRIVATE void RemoveAllocationObserver(AllocationObserver *observer)
V8_EXPORT_PRIVATE void AddAllocationObserver(AllocationObserver *observer)
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object, size_t object_size, size_t aligned_object_size)
static AllocationResult Failure()
static AllocationResult FromObject(Tagged< HeapObject > heap_object)
void AccountCommitted(size_t bytes)
Definition base-space.h:56
void AccountUncommitted(size_t bytes)
Definition base-space.h:64
AllocationSpace identity() const
Definition base-space.h:32
virtual size_t CommittedMemory() const
Definition base-space.h:36
void AddPage(LargePageMetadata *page, size_t object_size) override
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(LocalHeap *local_heap, int object_size)
void RemovePage(LargePageMetadata *page) override
base::Mutex * heap_expansion_mutex()
Definition heap.h:1263
V8_EXPORT_PRIVATE void CreateFillerObjectAt(Address addr, int size, ClearFreedMemoryMode clear_memory_mode=ClearFreedMemoryMode::kDontClearFreedMemory)
Definition heap.cc:3202
Tagged< HeapObject > Next() override
LargeObjectSpaceObjectIterator(LargeObjectSpace *space)
virtual void RemovePage(LargePageMetadata *page)
friend class LargeObjectSpaceObjectIterator
std::atomic< size_t > size_
std::unique_ptr< ObjectIterator > GetObjectIterator(Heap *heap) override
bool Contains(Tagged< HeapObject > obj) const
base::RecursiveMutex allocation_mutex_
LargePageMetadata * AllocateLargePage(int object_size, Executability executable)
std::atomic< Address > pending_object_
size_t Available() const override
size_t CommittedPhysicalMemory() const override
LargePageMetadata * first_page() override
void AdvanceAndInvokeAllocationObservers(Address soon_object, size_t size)
void RemoveAllocationObserver(AllocationObserver *observer)
LargeObjectSpace(Heap *heap, AllocationSpace id)
void AddAllocationObserver(AllocationObserver *observer)
std::atomic< size_t > objects_size_
virtual void AddPage(LargePageMetadata *page, size_t object_size)
void ShrinkPageToObjectSize(LargePageMetadata *page, Tagged< HeapObject > object, size_t object_size)
AllocationCounter allocation_counter_
bool ContainsSlow(Address addr) const
size_t SizeOfObjects() const override
void UpdatePendingObject(Tagged< HeapObject > object)
Tagged< HeapObject > GetObject() const
bool is_main_thread() const
Definition local-heap.h:194
static V8_INLINE intptr_t GetCommitPageSize()
static V8_INLINE MemoryChunkMetadata * FromHeapObject(Tagged< HeapObject > o)
void SetOldGenerationPageFlags(MarkingMode marking_mode, AllocationSpace space)
V8_INLINE void SetFlagNonExecutable(Flag flag)
Executability executable() const
V8_INLINE bool IsFlagSet(Flag flag) const
V8_INLINE Address address() const
static V8_INLINE MemoryChunk * FromAddress(Address addr)
size_t Offset(Address addr) const
V8_INLINE void ClearFlagNonExecutable(Flag flag)
void FreeDeadObjects(const std::function< bool(Tagged< HeapObject >)> &is_dead)
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(LocalHeap *local_heap, int object_size)
void SetCapacity(size_t capacity)
NewLargeObjectSpace(Heap *heap, size_t capacity)
size_t Available() const override
V8_EXPORT_PRIVATE OldLargeObjectSpace(Heap *heap)
void PromoteNewLargeObject(LargePageMetadata *page)
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(LocalHeap *local_heap, int object_size)
static V8_INLINE PageMetadata * FromHeapObject(Tagged< HeapObject > o)
virtual void VerifyObject(Tagged< HeapObject > object)=0
virtual void VerifyPageDone(const MemoryChunkMetadata *chunk)=0
virtual void VerifyPage(const MemoryChunkMetadata *chunk)=0
virtual size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const
Definition spaces.h:95
void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type, size_t amount)
Definition spaces-inl.h:36
void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type, size_t amount)
Definition spaces-inl.h:43
heap::List< MutablePageMetadata > memory_chunk_list_
Definition spaces.h:136
const int size_
Definition assembler.cc:132
#define ALIGN_TO_ALLOCATION_ALIGNMENT(value)
Definition globals.h:1796
BasePage * page
Definition sweeper.cc:218
Isolate * isolate
ZoneVector< RpoNumber > & result
#define LOG(isolate, Call)
Definition log.h:78
kInterpreterTrampolineOffset Tagged< HeapObject >
void Print(Tagged< Object > obj)
Definition objects.h:774
void ForAll(Callback callback)
Definition spaces.h:58
@ SHARED_TRUSTED_LO_SPACE
Definition globals.h:1319
V8_EXPORT_PRIVATE FlagValues v8_flags
ExternalBackingStoreType
Definition globals.h:1605
static constexpr Address kNullAddress
Definition v8-internal.h:53
@ kGCCallbackScheduleIdleGarbageCollection
#define DYNAMICALLY_SIZED_HEAP_OBJECT_LIST(V)
#define FATAL(...)
Definition logging.h:47
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387