45 if (!IsFreeSpaceOrFiller(
object))
return object;
70 DeleteEvent(
"LargeObjectChunk",
71 reinterpret_cast<void*
>(page->ChunkAddress())));
80 if (!
heap()->IsAllocationObserverActive())
return;
118 if (!
heap()->ShouldExpandOldGenerationOnSlowAllocation(
120 !
heap()->CanExpandOldGeneration(object_size)) {
124 heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
125 local_heap,
heap()->GCFlagsForIncrementalMarking(),
135 heap()->incremental_marking()->black_allocation()) {
136 heap()->marking_state()->TryMarkAndAccountLiveBytes(
object, object_size);
139 heap()->marking_state()->IsMarked(
object));
140 page->Chunk()->InitializationMemoryFence();
141 heap()->NotifyOldGenerationExpansion(local_heap,
identity(), page);
145 static_cast<size_t>(object_size));
155 !
heap()->IsOldGenerationExpansionAllowed(object_size, expansion_guard)) {
160 this, object_size, executable);
161 if (page ==
nullptr)
return nullptr;
162 DCHECK_GE(page->area_size(),
static_cast<size_t>(object_size));
189 heap()->incremental_marking()->marking_mode(),
LO_SPACE);
190 AddPage(page,
static_cast<size_t>(page->GetObject()->Size(cage_base)));
194 size_ +=
static_cast<int>(page->size());
199 page->set_owner(
this);
203 type, page->ExternalBackingStoreBytes(type));
208 size_ -=
static_cast<int>(page->size());
212 page->set_owner(
nullptr);
216 type, page->ExternalBackingStoreBytes(type));
222 size_t object_size) {
227 DCHECK_EQ(object_size, page->GetObject()->Size(cage_base));
231 const size_t used_committed_size =
236 if (object_size < page->area_size()) {
237 page->ClearOutOfLiveRangeSlots(
object.address() + object_size);
238 const Address new_area_end = page->area_start() + object_size;
241 if (used_committed_size < page->
size()) {
242 const size_t bytes_to_free = page->size() - used_committed_size;
243 heap()->memory_allocator()->PartialFreeMemory(
244 page, chunk->
address() + used_committed_size, bytes_to_free,
246 size_ -= bytes_to_free;
250 page->set_area_end(new_area_end);
254 DCHECK_EQ(used_committed_size, page->size());
255 DCHECK_EQ(object_size, page->area_size());
261 bool owned = (chunk->
owner() ==
this);
271 if (page->Chunk() == chunk)
return true;
278 return std::unique_ptr<ObjectIterator>(
285void LargeObjectSpace::Verify(
Isolate* isolate,
287 size_t external_backing_store_bytes[
static_cast<int>(
290 PtrComprCageBase cage_base(isolate);
291 for (
const LargePageMetadata* chunk =
first_page(); chunk !=
nullptr;
299 CHECK(
object.address() == page->area_start());
302#define V(Name) Is##Name(object, cage_base) ||
303 const bool is_valid_lo_space_object =
306 if (!is_valid_lo_space_object) {
308 FATAL(
"Found invalid Object (instance_type=%i) in large object space.",
309 object->map(cage_base)->instance_type());
318 external_backing_store_bytes[
index] +=
319 chunk->ExternalBackingStoreBytes(type);
327 CHECK_EQ(external_backing_store_bytes[index],
334void LargeObjectSpace::Print() {
369 if (!
heap()->ShouldExpandYoungGenerationOnSlowAllocation(object_size)) {
385 page->ClearLiveness();
391 static_cast<size_t>(object_size));
411 DCHECK(!
heap()->incremental_marking()->IsMarking());
412 size_t surviving_object_size = 0;
414 for (
auto it =
begin(); it !=
end();) {
418 if (is_dead(
object)) {
423 surviving_object_size +=
static_cast<size_t>(
object->Size(cage_base));
444 size_t object_size) {
449 heap()->isolate()->RemoveCodeMemoryChunk(page);
#define SLOW_DCHECK(condition)
V8_EXPORT_PRIVATE void AdvanceAllocationObservers(size_t allocated)
V8_EXPORT_PRIVATE void RemoveAllocationObserver(AllocationObserver *observer)
V8_EXPORT_PRIVATE void AddAllocationObserver(AllocationObserver *observer)
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object, size_t object_size, size_t aligned_object_size)
static AllocationResult Failure()
static AllocationResult FromObject(Tagged< HeapObject > heap_object)
void AccountCommitted(size_t bytes)
void AccountUncommitted(size_t bytes)
AllocationSpace identity() const
virtual size_t CommittedMemory() const
CodeLargeObjectSpace(Heap *heap)
void AddPage(LargePageMetadata *page, size_t object_size) override
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(LocalHeap *local_heap, int object_size)
void RemovePage(LargePageMetadata *page) override
base::Mutex * heap_expansion_mutex()
V8_EXPORT_PRIVATE void CreateFillerObjectAt(Address addr, int size, ClearFreedMemoryMode clear_memory_mode=ClearFreedMemoryMode::kDontClearFreedMemory)
LargePageMetadata * current_
Tagged< HeapObject > Next() override
LargeObjectSpaceObjectIterator(LargeObjectSpace *space)
virtual void RemovePage(LargePageMetadata *page)
friend class LargeObjectSpaceObjectIterator
std::atomic< size_t > size_
std::unique_ptr< ObjectIterator > GetObjectIterator(Heap *heap) override
bool Contains(Tagged< HeapObject > obj) const
base::RecursiveMutex allocation_mutex_
LargePageMetadata * AllocateLargePage(int object_size, Executability executable)
std::atomic< Address > pending_object_
size_t Available() const override
size_t CommittedPhysicalMemory() const override
Address pending_object() const
LargePageMetadata * first_page() override
void AdvanceAndInvokeAllocationObservers(Address soon_object, size_t size)
void RemoveAllocationObserver(AllocationObserver *observer)
LargeObjectSpace(Heap *heap, AllocationSpace id)
base::Mutex pending_allocation_mutex_
void AddAllocationObserver(AllocationObserver *observer)
std::atomic< size_t > objects_size_
virtual void AddPage(LargePageMetadata *page, size_t object_size)
void ShrinkPageToObjectSize(LargePageMetadata *page, Tagged< HeapObject > object, size_t object_size)
AllocationCounter allocation_counter_
bool ContainsSlow(Address addr) const
size_t SizeOfObjects() const override
void UpdatePendingObject(Tagged< HeapObject > object)
Tagged< HeapObject > GetObject() const
LargePageMetadata * next_page()
bool is_main_thread() const
static V8_INLINE intptr_t GetCommitPageSize()
void SetOldGenerationPageFlags(MarkingMode marking_mode, AllocationSpace space)
void InitializationMemoryFence()
V8_INLINE void SetFlagNonExecutable(Flag flag)
Executability executable() const
V8_INLINE bool IsFlagSet(Flag flag) const
V8_INLINE Address address() const
static V8_INLINE MemoryChunk * FromAddress(Address addr)
size_t Offset(Address addr) const
V8_INLINE void ClearFlagNonExecutable(Flag flag)
void FreeDeadObjects(const std::function< bool(Tagged< HeapObject >)> &is_dead)
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(LocalHeap *local_heap, int object_size)
void SetCapacity(size_t capacity)
NewLargeObjectSpace(Heap *heap, size_t capacity)
size_t Available() const override
V8_EXPORT_PRIVATE OldLargeObjectSpace(Heap *heap)
void PromoteNewLargeObject(LargePageMetadata *page)
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(LocalHeap *local_heap, int object_size)
static V8_INLINE PageMetadata * FromHeapObject(Tagged< HeapObject > o)
SharedLargeObjectSpace(Heap *heap)
SharedTrustedLargeObjectSpace(Heap *heap)
virtual void VerifyObject(Tagged< HeapObject > object)=0
virtual void VerifyPageDone(const MemoryChunkMetadata *chunk)=0
virtual void VerifyPage(const MemoryChunkMetadata *chunk)=0
virtual size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const
void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type, size_t amount)
void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type, size_t amount)
heap::List< MutablePageMetadata > memory_chunk_list_
TrustedLargeObjectSpace(Heap *heap)
#define ALIGN_TO_ALLOCATION_ALIGNMENT(value)
ZoneVector< RpoNumber > & result
#define LOG(isolate, Call)
kInterpreterTrampolineOffset Tagged< HeapObject >
void Print(Tagged< Object > obj)
void ForAll(Callback callback)
@ SHARED_TRUSTED_LO_SPACE
V8_EXPORT_PRIVATE FlagValues v8_flags
static constexpr Address kNullAddress
@ kGCCallbackScheduleIdleGarbageCollection
#define DYNAMICALLY_SIZED_HEAP_OBJECT_LIST(V)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
constexpr T RoundUp(T x, intptr_t m)