v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
main-allocator.h
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_MAIN_ALLOCATOR_H_
6#define V8_HEAP_MAIN_ALLOCATOR_H_
7
8#include <optional>
9
10#include "src/common/globals.h"
13#include "src/heap/gc-tracer.h"
16
17namespace v8 {
18namespace internal {
19
20class Heap;
21class LocalHeap;
22class MainAllocator;
23class PagedNewSpace;
24class PagedSpaceBase;
25class SemiSpaceNewSpace;
26class SpaceWithLinearArea;
27
29 public:
30 explicit AllocatorPolicy(MainAllocator* allocator);
31 virtual ~AllocatorPolicy() = default;
32
33 // Sets up a linear allocation area that fits the given number of bytes.
34 // Returns false if there is not enough space and the caller has to retry
35 // after collecting garbage.
36 // Writes to `max_aligned_size` the actual number of bytes used for checking
37 // that there is enough space.
38 virtual bool EnsureAllocation(int size_in_bytes,
39 AllocationAlignment alignment,
40 AllocationOrigin origin) = 0;
41 virtual void FreeLinearAllocationArea() = 0;
42
43 virtual bool SupportsExtendingLAB() const { return false; }
44
45 protected:
46 Heap* space_heap() const;
47 Heap* isolate_heap() const;
48
50};
51
53 public:
55 MainAllocator* allocator)
56 : AllocatorPolicy(allocator), space_(space) {}
57
58 bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
59 AllocationOrigin origin) final;
60 void FreeLinearAllocationArea() final;
61
62 private:
63 static constexpr int kLabSizeInGC = 32 * KB;
64
66
68};
69
71 public:
73 : AllocatorPolicy(allocator), space_(space) {}
74
75 bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
76 AllocationOrigin origin) final;
77 void FreeLinearAllocationArea() final;
78
79 private:
80 bool RefillLab(int size_in_bytes, AllocationOrigin origin);
81
82 // Returns true if allocation may be possible after sweeping.
83 bool ContributeToSweeping(
84 uint32_t max_pages = std::numeric_limits<uint32_t>::max());
85
86 bool TryAllocationFromFreeList(size_t size_in_bytes, AllocationOrigin origin);
87
88 bool TryExpandAndAllocate(size_t size_in_bytes, AllocationOrigin origin);
89
90 V8_WARN_UNUSED_RESULT bool TryExtendLAB(int size_in_bytes);
91
92 void SetLinearAllocationArea(Address top, Address limit, Address end);
93
95
97
99};
100
102 public:
104
105 bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
106 AllocationOrigin origin) final;
107 void FreeLinearAllocationArea() final;
108
109 bool SupportsExtendingLAB() const final { return true; }
110
111 private:
112 bool TryAllocatePage(int size_in_bytes, AllocationOrigin origin);
113 bool WaitForSweepingForAllocation(int size_in_bytes, AllocationOrigin origin);
114
116 std::unique_ptr<PagedSpaceAllocatorPolicy> paged_space_allocator_policy_;
117};
118
120 public:
121 Address get_original_top_acquire() const {
122 return original_top_.load(std::memory_order_acquire);
123 }
125 return original_limit_.load(std::memory_order_relaxed);
126 }
127
128 void set_original_top_release(Address top) {
129 original_top_.store(top, std::memory_order_release);
130 }
131 void set_original_limit_relaxed(Address limit) {
132 original_limit_.store(limit, std::memory_order_relaxed);
133 }
134
135 base::Mutex* linear_area_lock() { return &linear_area_lock_; }
136
137 private:
138 // The top and the limit at the time of setting the linear allocation area.
139 // These values can be accessed by background tasks. Protected by
140 // pending_allocation_mutex_.
141 std::atomic<Address> original_top_ = 0;
142 std::atomic<Address> original_limit_ = 0;
143
144 // Protects original_top_ and original_limit_.
146};
147
149 public:
150 struct InGCTag {};
151 static constexpr InGCTag kInGC{};
152
153 enum class IsNewGeneration { kNo, kYes };
154
155 // Use this constructor on main/background threads. `allocation_info` can be
156 // used for allocation support in generated code (currently new and old
157 // space).
160 IsNewGeneration is_new_generation,
161 LinearAllocationArea* allocation_info = nullptr);
162
163 // Use this constructor for GC LABs/allocations.
165 InGCTag);
166
167 // Returns the allocation pointer in this space.
168 Address start() const { return allocation_info_->start(); }
169 Address top() const { return allocation_info_->top(); }
170 Address limit() const { return allocation_info_->limit(); }
171
172 // The allocation top address.
173 Address* allocation_top_address() const {
174 return allocation_info_->top_address();
175 }
176
177 // The allocation limit address.
178 Address* allocation_limit_address() const {
179 return allocation_info_->limit_address();
180 }
181
182 Address original_top_acquire() const {
183 return linear_area_original_data().get_original_top_acquire();
184 }
185
186 Address original_limit_relaxed() const {
187 return linear_area_original_data().get_original_limit_relaxed();
188 }
189
190 void MoveOriginalTopForward();
191 V8_EXPORT_PRIVATE void ResetLab(Address start, Address end,
192 Address extended_end);
193 V8_EXPORT_PRIVATE bool IsPendingAllocation(Address object_address);
194
195 LinearAllocationArea& allocation_info() { return *allocation_info_; }
196
198 return *allocation_info_;
199 }
200
202 return allocation_counter_.value();
203 }
204
206 return allocation_counter_.value();
207 }
208
210 AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
211 AllocationOrigin origin);
212
214 AllocateRawForceAlignmentForTesting(int size_in_bytes,
215 AllocationAlignment alignment,
217
218 V8_EXPORT_PRIVATE void AddAllocationObserver(AllocationObserver* observer);
219 V8_EXPORT_PRIVATE void RemoveAllocationObserver(AllocationObserver* observer);
220 void PauseAllocationObservers();
221 void ResumeAllocationObservers();
222
223 V8_EXPORT_PRIVATE void AdvanceAllocationObservers();
224 V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object,
225 size_t size_in_bytes,
226 size_t aligned_size_in_bytes,
227 size_t allocation_size);
228
229 V8_EXPORT_PRIVATE void MakeLinearAllocationAreaIterable();
230
231 V8_EXPORT_PRIVATE void MarkLinearAllocationAreaBlack();
232 V8_EXPORT_PRIVATE void UnmarkLinearAllocationArea();
233 V8_EXPORT_PRIVATE void FreeLinearAllocationAreaAndResetFreeList();
234
235 V8_EXPORT_PRIVATE Address AlignTopForTesting(AllocationAlignment alignment,
236 int offset);
237
238 V8_INLINE bool TryFreeLast(Address object_address, int object_size);
239
240 // When allocation observers are active we may use a lower limit to allow the
241 // observers to 'interrupt' earlier than the natural limit. Given a linear
242 // area bounded by [start, end), this function computes the limit to use to
243 // allow proper observation based on existing observers. min_size specifies
244 // the minimum size that the limited area should have.
245 Address ComputeLimit(Address start, Address end, size_t min_size) const;
246
247#if DEBUG
248 void Verify() const;
249#endif // DEBUG
250
251 // Checks whether the LAB is currently in use.
252 V8_INLINE bool IsLabValid() const {
253 return allocation_info_->top() != kNullAddress;
254 }
255
256 V8_EXPORT_PRIVATE void FreeLinearAllocationArea();
257
258 void ExtendLAB(Address limit);
259
260 V8_EXPORT_PRIVATE bool EnsureAllocationForTesting(
261 int size_in_bytes, AllocationAlignment alignment,
262 AllocationOrigin origin);
263
264 private:
265 enum class BlackAllocation {
266 kAlwaysEnabled,
267 kAlwaysDisabled,
268 kEnabledOnMarking
269 };
270
271 static constexpr BlackAllocation ComputeBlackAllocation(IsNewGeneration);
272
273 // Allocates an object from the linear allocation area. Assumes that the
274 // linear allocation area is large enough to fit the object.
276 AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin);
277
278 // Tries to allocate an aligned object from the linear allocation area.
279 // Returns nullptr if the linear allocation area does not fit the object.
280 // Otherwise, returns the object pointer and writes the allocation size
281 // (object size + alignment filler size) to the result_aligned_size_in_bytes.
283 AllocateFastAligned(int size_in_bytes, int* result_aligned_size_in_bytes,
284 AllocationAlignment alignment, AllocationOrigin origin);
285
286 // Slow path of allocation function
288 AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
289 AllocationOrigin origin);
290
291 // Allocate the requested number of bytes in the space if possible, return a
292 // failure object if not.
293 V8_WARN_UNUSED_RESULT AllocationResult AllocateRawSlowUnaligned(
294 int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
295
296 // Allocate the requested number of bytes in the space double aligned if
297 // possible, return a failure object if not.
299 AllocateRawSlowAligned(int size_in_bytes, AllocationAlignment alignment,
300 AllocationOrigin origin = AllocationOrigin::kRuntime);
301
302 bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
303 AllocationOrigin origin);
304
305 void MarkLabStartInitialized();
306
307 bool IsBlackAllocationEnabled() const;
308
310 return linear_area_original_data_.value();
311 }
312
314 return linear_area_original_data_.value();
315 }
316
317 int ObjectAlignment() const;
318
319 AllocationSpace identity() const;
320
322 return allocation_counter_.has_value();
323 }
324
326 return linear_area_original_data_.has_value();
327 }
328
329 // Returns true when this LAB is used during GC.
330 bool in_gc() const { return local_heap_ == nullptr; }
331
332 // Returns true when this LAB is used during GC and the space is in the heap
333 // that is currently collected. This is needed because a GC can directly
334 // promote new space objects into shared space (which might not be currently
335 // collected in worker isolates).
336 bool in_gc_for_space() const;
337
338 bool supports_extending_lab() const { return supports_extending_lab_; }
339
340 V8_EXPORT_PRIVATE bool is_main_thread() const;
341
342 LocalHeap* local_heap() const { return local_heap_; }
343
344 // The heap for the current thread (respectively LocalHeap). See comment for
345 // `space_heap()` as well.
346 Heap* isolate_heap() const { return isolate_heap_; }
347
348 // Returns the space's heap. Note that this might differ from `isolate_heap()`
349 // for shared space in worker isolates.
350 V8_EXPORT_PRIVATE Heap* space_heap() const;
351
352 // The current main or background thread's LocalHeap. nullptr for GC threads.
356
357 std::optional<AllocationCounter> allocation_counter_;
359 // This memory is used if no LinearAllocationArea& is passed in as argument.
361 std::optional<LinearAreaOriginalData> linear_area_original_data_;
362 std::unique_ptr<AllocatorPolicy> allocator_policy_;
363
366
367 friend class AllocatorPolicy;
370};
371
372} // namespace internal
373} // namespace v8
374
375#endif // V8_HEAP_MAIN_ALLOCATOR_H_
virtual bool SupportsExtendingLAB() const
virtual void FreeLinearAllocationArea()=0
AllocatorPolicy(MainAllocator *allocator)
virtual ~AllocatorPolicy()=default
MainAllocator *const allocator_
virtual bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin)=0
void set_original_limit_relaxed(Address limit)
std::optional< LinearAreaOriginalData > linear_area_original_data_
Address original_top_acquire() const
SpaceWithLinearArea *const space_
const LinearAllocationArea & allocation_info() const
Address * allocation_limit_address() const
LocalHeap * local_heap() const
bool SupportsAllocationObserver() const
LinearAllocationArea & allocation_info()
AllocationCounter & allocation_counter()
std::optional< AllocationCounter > allocation_counter_
const LinearAreaOriginalData & linear_area_original_data() const
V8_INLINE bool IsLabValid() const
LinearAreaOriginalData & linear_area_original_data()
std::unique_ptr< AllocatorPolicy > allocator_policy_
const BlackAllocation black_allocation_
Address * allocation_top_address() const
Address original_limit_relaxed() const
const AllocationCounter & allocation_counter() const
LinearAllocationArea owned_allocation_info_
LinearAllocationArea *const allocation_info_
std::unique_ptr< PagedSpaceAllocatorPolicy > paged_space_allocator_policy_
PagedSpaceAllocatorPolicy(PagedSpaceBase *space, MainAllocator *allocator)
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) final
SemiSpaceNewSpaceAllocatorPolicy(SemiSpaceNewSpace *space, MainAllocator *allocator)
int start
int end
int32_t offset
STL namespace.
#define V8_EXPORT_PRIVATE
Definition macros.h:460
#define V8_INLINE
Definition v8config.h:500
#define V8_WARN_UNUSED_RESULT
Definition v8config.h:671