v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
object-allocator.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
8#include "src/base/logging.h"
9#include "src/base/macros.h"
16#include "src/heap/cppgc/heap.h"
24
25namespace cppgc {
26namespace internal {
27
28namespace {
29
30void MarkRangeAsYoung(BasePage& page, Address begin, Address end) {
31#if defined(CPPGC_YOUNG_GENERATION)
32 DCHECK_LT(begin, end);
33
34 if (!page.heap().generational_gc_supported()) return;
35
36 // Then, if the page is newly allocated, force the first and last cards to be
37 // marked as young.
38 const bool new_page =
39 (begin == page.PayloadStart()) && (end == page.PayloadEnd());
40
41 auto& age_table = CagedHeapLocalData::Get().age_table;
42 age_table.SetAgeForRange(CagedHeap::OffsetFromAddress(begin),
44 AgeTable::Age::kYoung,
45 new_page ? AgeTable::AdjacentCardsPolicy::kIgnore
46 : AgeTable::AdjacentCardsPolicy::kConsider);
47 page.set_as_containing_young_objects(true);
48#endif // defined(CPPGC_YOUNG_GENERATION)
49}
50
51void AddToFreeList(NormalPageSpace& space, Address start, size_t size) {
52 // No need for SetMemoryInaccessible() as LAB memory is retrieved as free
53 // inaccessible memory.
54 space.free_list().Add({start, size});
55 // Concurrent marking may be running while the LAB is set up next to a live
56 // object sharing the same cell in the bitmap.
60}
61
62void ReplaceLinearAllocationBuffer(NormalPageSpace& space,
63 StatsCollector& stats_collector,
64 Address new_buffer, size_t new_size) {
65 auto& lab = space.linear_allocation_buffer();
66 if (lab.size()) {
67 AddToFreeList(space, lab.start(), lab.size());
68 stats_collector.NotifyExplicitFree(lab.size());
69 }
70
71 lab.Set(new_buffer, new_size);
72 if (new_size) {
73 DCHECK_NOT_NULL(new_buffer);
74 stats_collector.NotifyAllocation(new_size);
75 auto* page = NormalPage::From(BasePage::FromPayload(new_buffer));
76 // Concurrent marking may be running while the LAB is set up next to a live
77 // object sharing the same cell in the bitmap.
78 page->object_start_bitmap().ClearBit<AccessMode::kAtomic>(new_buffer);
79 MarkRangeAsYoung(*page, new_buffer, new_buffer + new_size);
80 }
81}
82
83LargePage* TryAllocateLargeObjectImpl(PageBackend& page_backend,
84 LargePageSpace& space, size_t size) {
85 LargePage* page = LargePage::TryCreate(page_backend, space, size);
86 if (page) return page;
87
88 Sweeper& sweeper = space.raw_heap()->heap()->sweeper();
89
90 // Lazily sweep pages of this heap. This is not exhaustive to limit jank on
91 // allocation.
92 if (sweeper.SweepForAllocationIfRunning(
93 &space, size, v8::base::TimeDelta::FromMicroseconds(500)) &&
94 (page = LargePage::TryCreate(page_backend, space, size))) {
95 return page;
96 }
97
98 // Before finishing all sweeping, finish sweeping of a given space which is
99 // cheaper.
100 if (sweeper.SweepForAllocationIfRunning(&space, size,
102 (page = LargePage::TryCreate(page_backend, space, size))) {
103 return page;
104 }
105
106 if (sweeper.FinishIfRunning() &&
107 (page = LargePage::TryCreate(page_backend, space, size))) {
108 return page;
109 }
110
111 return nullptr;
112}
113
114void* TryAllocateLargeObject(PageBackend& page_backend, LargePageSpace& space,
115 StatsCollector& stats_collector, size_t size,
116 GCInfoIndex gcinfo) {
117 LargePage* page = TryAllocateLargeObjectImpl(page_backend, space, size);
118 if (!page) return nullptr;
119
120 space.AddPage(page);
121
122 auto* header = new (page->ObjectHeader())
123 HeapObjectHeader(HeapObjectHeader::kLargeObjectSizeInHeader, gcinfo);
124
125 stats_collector.NotifyAllocation(size);
126 MarkRangeAsYoung(*page, page->PayloadStart(), page->PayloadEnd());
127
128 return header->ObjectStart();
129}
130
131} // namespace
132
134
136 StatsCollector& stats_collector,
137 PreFinalizerHandler& prefinalizer_handler,
138 FatalOutOfMemoryHandler& oom_handler,
139 GarbageCollector& garbage_collector)
140 : raw_heap_(heap),
141 page_backend_(page_backend),
142 stats_collector_(stats_collector),
143 prefinalizer_handler_(prefinalizer_handler),
144 oom_handler_(oom_handler),
145 garbage_collector_(garbage_collector) {}
146
148 size_t size,
149 AlignVal alignment,
150 GCInfoIndex gcinfo,
151 void** object) {
152 *object = OutOfLineAllocateImpl(space, size, alignment, gcinfo);
155 // Objects allocated during pre finalizers should be allocated as black
156 // since marking is already done. Atomics are not needed because there is
157 // no concurrent marking in the background.
159 // Resetting the allocation buffer forces all further allocations in pre
160 // finalizers to go through this slow path.
161 ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
163 }
164}
165
166namespace {
167constexpr GCConfig kOnAllocationFailureGCConfig = {
168 CollectionType::kMajor, StackState::kMayContainHeapPointers,
169 GCConfig::MarkingType::kAtomic,
170 GCConfig::SweepingType::kIncrementalAndConcurrent,
171 GCConfig::FreeMemoryHandling::kDiscardWherePossible};
172} // namespace
173
175 size_t size, AlignVal alignment,
176 GCInfoIndex gcinfo) {
177 DCHECK_EQ(0, size & kAllocationMask);
179 // Out-of-line allocation allows for checking this is all situations.
181
182 // If this allocation is big enough, allocate a large object.
183 if (size >= kLargeObjectSizeThreshold) {
184 auto& large_space = LargePageSpace::From(
186 // LargePage has a natural alignment that already satisfies
187 // `kMaxSupportedAlignment`.
188 void* result = TryAllocateLargeObject(page_backend_, large_space,
189 stats_collector_, size, gcinfo);
190 if (!result) {
191 for (int i = 0; i < 2; i++) {
192 auto config = kOnAllocationFailureGCConfig;
194 result = TryAllocateLargeObject(page_backend_, large_space,
195 stats_collector_, size, gcinfo);
196 if (result) {
197 return result;
198 }
199 }
200#if defined(CPPGC_CAGED_HEAP)
201 const auto last_alloc_status =
203 const std::string suffix =
205 last_alloc_status);
206 oom_handler_("Oilpan: Large allocation. " + suffix);
207#else
208 oom_handler_("Oilpan: Large allocation.");
209#endif
210 }
211 return result;
212 }
213
214 size_t request_size = size;
215 // Adjust size to be able to accommodate alignment.
216 const size_t dynamic_alignment = static_cast<size_t>(alignment);
217 if (dynamic_alignment != kAllocationGranularity) {
218 CHECK_EQ(2 * sizeof(HeapObjectHeader), dynamic_alignment);
219 request_size += kAllocationGranularity;
220 }
221
222 bool success = TryRefillLinearAllocationBuffer(space, request_size);
223 if (!success) {
224 for (int i = 0; i < 2; i++) {
225 auto config = kOnAllocationFailureGCConfig;
227 success = TryRefillLinearAllocationBuffer(space, request_size);
228 if (success) {
229 break;
230 }
231 }
232 if (!success) {
233#if defined(CPPGC_CAGED_HEAP)
234 const auto last_alloc_status =
236 const std::string suffix =
238 last_alloc_status);
239 oom_handler_("Oilpan: Normal allocation. " + suffix);
240#else
241 oom_handler_("Oilpan: Normal allocation.");
242#endif
243 }
244 }
245
246 // The allocation must succeed, as we just refilled the LAB.
247 void* result = (dynamic_alignment == kAllocationGranularity)
248 ? AllocateObjectOnSpace(space, size, gcinfo)
249 : AllocateObjectOnSpace(space, size, alignment, gcinfo);
250 CHECK(result);
251 return result;
252}
253
255 NormalPageSpace& space) {
256 auto* const new_page = NormalPage::TryCreate(page_backend_, space);
257 if (!new_page) return false;
258
259 space.AddPage(new_page);
260 // Set linear allocation buffer to new page.
261 ReplaceLinearAllocationBuffer(space, stats_collector_,
262 new_page->PayloadStart(),
263 new_page->PayloadSize());
264 return true;
265}
266
268 size_t size) {
269 // Try to allocate from the freelist.
270 if (TryRefillLinearAllocationBufferFromFreeList(space, size)) return true;
271
272 Sweeper& sweeper = raw_heap_.heap()->sweeper();
273 // Lazily sweep pages of this heap. This is not exhaustive to limit jank on
274 // allocation. Allocation from the free list may still fail as actual buckets
275 // are not exhaustively searched for a suitable block. Instead, buckets are
276 // tested from larger sizes that are guaranteed to fit the block to smaller
277 // bucket sizes that may only potentially fit the block. For the bucket that
278 // may exactly fit the allocation of `size` bytes (no overallocation), only
279 // the first entry is checked.
280 if (sweeper.SweepForAllocationIfRunning(
281 &space, size, v8::base::TimeDelta::FromMicroseconds(500)) &&
283 return true;
284 }
285
286 // Sweeping was off or did not yield in any memory within limited
287 // contributing. We expand at this point as that's cheaper than possibly
288 // continuing sweeping the whole heap.
289 if (TryExpandAndRefillLinearAllocationBuffer(space)) return true;
290
291 // Expansion failed. Before finishing all sweeping, finish sweeping of a given
292 // space which is cheaper.
293 if (sweeper.SweepForAllocationIfRunning(&space, size,
296 return true;
297 }
298
299 // Heap expansion and sweeping of a space failed. At this point the caller
300 // could run OOM or do a full GC which needs to finish sweeping if it's
301 // running. Hence, we may as well finish sweeping here. Note that this is
302 // possibly very expensive but not more expensive than running a full GC as
303 // the alternative is OOM.
304 if (sweeper.FinishIfRunning()) {
305 // Sweeping may have added memory to the free list.
306 if (TryRefillLinearAllocationBufferFromFreeList(space, size)) return true;
307
308 // Sweeping may have freed pages completely.
309 if (TryExpandAndRefillLinearAllocationBuffer(space)) return true;
310 }
311 return false;
312}
313
315 NormalPageSpace& space, size_t size) {
316 const FreeList::Block entry = space.free_list().Allocate(size);
317 if (!entry.address) return false;
318
319 // Assume discarded memory on that page is now zero.
320 auto& page = *NormalPage::From(BasePage::FromPayload(entry.address));
321 if (page.discarded_memory()) {
322 stats_collector_.DecrementDiscardedMemory(page.discarded_memory());
323 page.ResetDiscardedMemory();
324 }
325
326 ReplaceLinearAllocationBuffer(
327 space, stats_collector_, static_cast<Address>(entry.address), entry.size);
328 return true;
329}
330
332 class Resetter : public HeapVisitor<Resetter> {
333 public:
334 explicit Resetter(StatsCollector& stats) : stats_collector_(stats) {}
335
336 bool VisitLargePageSpace(LargePageSpace&) { return true; }
337
338 bool VisitNormalPageSpace(NormalPageSpace& space) {
339 ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
340 return true;
341 }
342
343 private:
345 } visitor(stats_collector_);
346
347 visitor.Traverse(raw_heap_);
348}
349
351 class YoungMarker : public HeapVisitor<YoungMarker> {
352 public:
353 bool VisitNormalPage(NormalPage& page) {
354 MarkRangeAsYoung(page, page.PayloadStart(), page.PayloadEnd());
355 return true;
356 }
357
358 bool VisitLargePage(LargePage& page) {
359 MarkRangeAsYoung(page, page.PayloadStart(), page.PayloadEnd());
360 return true;
361 }
362 } visitor;
363 USE(visitor);
364
365#if defined(CPPGC_YOUNG_GENERATION)
366 visitor.Traverse(raw_heap_);
367#endif // defined(CPPGC_YOUNG_GENERATION)
368}
369
373
374#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
375void ObjectAllocator::UpdateAllocationTimeout() {
376 allocation_timeout_ = garbage_collector_.UpdateAllocationTimeout();
377}
378
379void ObjectAllocator::TriggerGCOnAllocationTimeoutIfNeeded() {
380 if (!allocation_timeout_) return;
381 DCHECK_GT(*allocation_timeout_, 0);
382 if (--*allocation_timeout_ == 0) {
383 garbage_collector_.CollectGarbage(kOnAllocationFailureGCConfig);
384 allocation_timeout_ = garbage_collector_.UpdateAllocationTimeout();
385 DCHECK(allocation_timeout_);
386 DCHECK_GT(*allocation_timeout_, 0);
387 }
388}
389#endif // V8_ENABLE_ALLOCATION_TIMEOUT
390
391} // namespace internal
392} // namespace cppgc
friend class internal::ObjectAllocator
HeapBase & heap() const
Definition heap-page.cc:35
static BasePage * FromPayload(void *)
Definition heap-page.h:314
AllocatorType & page_allocator()
Definition caged-heap.h:53
static RetType OffsetFromAddress(const void *address)
Definition caged-heap.h:30
static CagedHeap & Instance()
Definition caged-heap.cc:93
virtual void CollectGarbage(GCConfig)=0
virtual bool IsGCForbidden() const
Definition heap-base.cc:343
static HeapObjectHeader & FromObject(void *address)
static constexpr uint16_t kLargeObjectSizeInHeader
static LargePageSpace & From(BaseSpace &space)
Definition heap-space.h:118
static LargePage * TryCreate(PageBackend &, LargePageSpace &, size_t)
Definition heap-page.cc:250
static NormalPage * From(BasePage *page)
Definition heap-page.h:205
static NormalPage * TryCreate(PageBackend &, NormalPageSpace &)
Definition heap-page.cc:164
PlatformAwareObjectStartBitmap & object_start_bitmap()
Definition heap-page.h:241
bool TryRefillLinearAllocationBuffer(NormalPageSpace &, size_t)
void V8_PRESERVE_MOST OutOfLineAllocateGCSafePoint(NormalPageSpace &, size_t, AlignVal, GCInfoIndex, void **)
static constexpr size_t kSmallestSpaceSize
void * AllocateObjectOnSpace(NormalPageSpace &, size_t, GCInfoIndex)
void * OutOfLineAllocateImpl(NormalPageSpace &, size_t, AlignVal, GCInfoIndex)
PreFinalizerHandler & prefinalizer_handler_
bool TryExpandAndRefillLinearAllocationBuffer(NormalPageSpace &)
bool TryRefillLinearAllocationBufferFromFreeList(NormalPageSpace &, size_t)
FatalOutOfMemoryHandler & oom_handler_
BaseSpace * Space(RegularSpaceType type)
Definition raw-heap.h:69
bool SweepForAllocationIfRunning(BaseSpace *space, size_t min_wanted_size, v8::base::TimeDelta max_duration)
Definition sweeper.cc:1671
static const char * AllocationStatusToString(AllocationStatus)
AllocationStatus get_last_allocation_status() const
static constexpr TimeDelta Max()
Definition time.h:233
static constexpr TimeDelta FromMicroseconds(int64_t microseconds)
Definition time.h:87
BasePage * page
Definition sweeper.cc:218
StatsCollector * stats_collector_
Definition sweeper.cc:595
int start
int end
ZoneVector< RpoNumber > & result
uint8_t * Address
Definition globals.h:17
constexpr size_t kAllocationGranularity
Definition globals.h:37
constexpr size_t kAllocationMask
Definition globals.h:39
constexpr size_t kLargeObjectSizeThreshold
Definition globals.h:46
uint16_t GCInfoIndex
Definition gc-info.h:21
constexpr size_t kFreeListEntrySize
Definition globals.h:49
Node::Uses::const_iterator begin(const Node::Uses &uses)
Definition node.h:708
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293