v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
large-spaces.h
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_LARGE_SPACES_H_
6#define V8_HEAP_LARGE_SPACES_H_
7
8#include <atomic>
9#include <functional>
10#include <memory>
11#include <unordered_map>
12
13#include "src/base/macros.h"
15#include "src/common/globals.h"
17#include "src/heap/heap.h"
20#include "src/heap/spaces.h"
22
23namespace v8 {
24namespace internal {
25
26class Isolate;
27class LocalHeap;
28
29// -----------------------------------------------------------------------------
30// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and managed by
31// the large object space. Large objects do not move during garbage collections.
32
34 public:
37
38 ~LargeObjectSpace() override { TearDown(); }
39
40 // Releases internal resources, frees objects in this space.
41 void TearDown();
42
43 // Available bytes for objects in this space.
44 size_t Available() const override;
45
46 size_t Size() const override { return size_; }
47 size_t SizeOfObjects() const override { return objects_size_; }
48
49 // Approximate amount of physical memory committed for this space.
50 size_t CommittedPhysicalMemory() const override;
51
52 int PageCount() const { return page_count_; }
53
54 void ShrinkPageToObjectSize(LargePageMetadata* page,
55 Tagged<HeapObject> object, size_t object_size);
56
57 // Checks whether a heap object is in this space; O(1).
58 bool Contains(Tagged<HeapObject> obj) const;
59 // Checks whether an address is in the object area in this space. Iterates all
60 // objects in the space. May be slow.
61 bool ContainsSlow(Address addr) const;
62
63 // Checks whether the space is empty.
64 bool IsEmpty() const { return first_page() == nullptr; }
65
66 virtual void AddPage(LargePageMetadata* page, size_t object_size);
67 virtual void RemovePage(LargePageMetadata* page);
68
70 return reinterpret_cast<LargePageMetadata*>(memory_chunk_list_.front());
71 }
72 const LargePageMetadata* first_page() const override {
73 return reinterpret_cast<const LargePageMetadata*>(
74 memory_chunk_list_.front());
75 }
76
77 iterator begin() { return iterator(first_page()); }
78 iterator end() { return iterator(nullptr); }
79
80 const_iterator begin() const { return const_iterator(first_page()); }
81 const_iterator end() const { return const_iterator(nullptr); }
82
83 std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
84
85 void AddAllocationObserver(AllocationObserver* observer);
86 void RemoveAllocationObserver(AllocationObserver* observer);
87
88#ifdef VERIFY_HEAP
89 void Verify(Isolate* isolate, SpaceVerificationVisitor* visitor) const final;
90#endif
91
92#ifdef DEBUG
93 void Print() override;
94#endif
95
96 // The last allocated object that is not guaranteed to be initialized when the
97 // concurrent marker visits it.
98 Address pending_object() const {
99 return pending_object_.load(std::memory_order_acquire);
100 }
101
103 pending_object_.store(0, std::memory_order_release);
104 }
105
106 base::Mutex* pending_allocation_mutex() { return &pending_allocation_mutex_; }
107
108 void set_objects_size(size_t objects_size) { objects_size_ = objects_size; }
109
110 protected:
112
113 void AdvanceAndInvokeAllocationObservers(Address soon_object, size_t size);
114
115 LargePageMetadata* AllocateLargePage(int object_size,
116 Executability executable);
117
118 void UpdatePendingObject(Tagged<HeapObject> object);
119
120 std::atomic<size_t> size_; // allocated bytes
121 int page_count_; // number of chunks
122 std::atomic<size_t> objects_size_; // size of objects
123 // The mutex has to be recursive because profiler tick might happen while
124 // holding this lock, then the profiler will try to iterate the call stack
125 // which might end up calling CodeLargeObjectSpace::FindPage() and thus
126 // trying to lock the mutex for a second time.
128
129 // Current potentially uninitialized object. Protected by
130 // pending_allocation_mutex_.
131 std::atomic<Address> pending_object_;
132
133 // Used to protect pending_object_.
135
137
138 private:
140};
141
143 public:
145
147 AllocateRaw(LocalHeap* local_heap, int object_size);
148
150
151 protected:
154 int object_size,
155 Executability executable);
156};
157
159 public:
161};
162
163// Similar to the TrustedSpace, but for large objects.
165 public:
167};
168
169// Similar to the TrustedLargeObjectSpace, but for shared objects.
174
176 public:
177 NewLargeObjectSpace(Heap* heap, size_t capacity);
178
180 AllocateRaw(LocalHeap* local_heap, int object_size);
181
182 // Available bytes for objects in this space.
183 size_t Available() const override;
184
185 void Flip();
186
187 void FreeDeadObjects(const std::function<bool(Tagged<HeapObject>)>& is_dead);
188
189 void SetCapacity(size_t capacity);
190
191 private:
192 size_t capacity_;
193};
194
196 public:
197 explicit CodeLargeObjectSpace(Heap* heap);
198
200 AllocateRaw(LocalHeap* local_heap, int object_size);
201
202 protected:
203 void AddPage(LargePageMetadata* page, size_t object_size) override;
204 void RemovePage(LargePageMetadata* page) override;
205};
206
208 public:
210
211 Tagged<HeapObject> Next() override;
212
213 private:
215};
216
217} // namespace internal
218} // namespace v8
219
220#endif // V8_HEAP_LARGE_SPACES_H_
void AddPage(LargePageMetadata *page, size_t object_size) override
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(LocalHeap *local_heap, int object_size)
void RemovePage(LargePageMetadata *page) override
Tagged< HeapObject > Next() override
LargeObjectSpaceObjectIterator(LargeObjectSpace *space)
std::atomic< size_t > size_
base::RecursiveMutex allocation_mutex_
size_t Size() const override
std::atomic< Address > pending_object_
void set_objects_size(size_t objects_size)
LargePageMetadata * first_page() override
const_iterator begin() const
std::atomic< size_t > objects_size_
AllocationCounter allocation_counter_
const_iterator end() const
const LargePageMetadata * first_page() const override
size_t SizeOfObjects() const override
base::Mutex * pending_allocation_mutex()
void FreeDeadObjects(const std::function< bool(Tagged< HeapObject >)> &is_dead)
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(LocalHeap *local_heap, int object_size)
void SetCapacity(size_t capacity)
NewLargeObjectSpace(Heap *heap, size_t capacity)
size_t Available() const override
V8_EXPORT_PRIVATE OldLargeObjectSpace(Heap *heap)
void PromoteNewLargeObject(LargePageMetadata *page)
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(LocalHeap *local_heap, int object_size)
const int size_
Definition assembler.cc:132
#define V8_EXPORT_PRIVATE
Definition macros.h:460
#define V8_WARN_UNUSED_RESULT
Definition v8config.h:671