v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
external-entity-table-inl.h
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_SANDBOX_EXTERNAL_ENTITY_TABLE_INL_H_
6#define V8_SANDBOX_EXTERNAL_ENTITY_TABLE_INL_H_
7
9// Include the non-inl header before the rest of the headers.
10
11#include "src/base/atomicops.h"
13#include "src/base/iterator.h"
17
18namespace v8 {
19namespace internal {
20
21template <typename Entry, size_t size>
23 // The segments belonging to this space must have already been deallocated
24 // (through TearDownSpace()), otherwise we may leak memory.
25 DCHECK(segments_.empty());
26}
27
28template <typename Entry, size_t size>
30 auto freelist = freelist_head_.load(std::memory_order_relaxed);
31 return freelist.length();
32}
33
34template <typename Entry, size_t size>
36 mutex_.AssertHeld();
37 return static_cast<uint32_t>(segments_.size());
38}
39
40template <typename Entry, size_t size>
43 Segment segment = Segment::Containing(index);
44 return segments_.find(segment) != segments_.end();
45}
46
47template <typename Entry, size_t size>
50
52
53 // Allocate the read-only segment of the table. This segment is always
54 // located at offset 0, and contains the null entry (pointing at
55 // kNullAddress) at index 0. It may later be temporarily marked read-write,
56 // see UnsealedReadOnlySegmentScope.
57 Address first_segment = this->vas_->AllocatePages(
59 if (first_segment != this->vas_->base()) {
61 nullptr,
62 "ExternalEntityTable::InitializeTable (first segment allocation)");
63 }
64 DCHECK_EQ(first_segment - this->vas_->base(), kInternalReadOnlySegmentOffset);
65}
66
67template <typename Entry, size_t size>
69 DCHECK(this->is_initialized());
70
72 // Deallocate the (read-only) first segment.
73 this->vas_->FreePages(this->vas_->base(), kSegmentSize);
74 }
75
77}
78
79template <typename Entry, size_t size>
81#ifdef DEBUG
82 DCHECK_EQ(space->owning_table_, nullptr);
83 space->owning_table_ = this;
84#endif
85}
86
87template <typename Entry, size_t size>
89 DCHECK(this->is_initialized());
90 DCHECK(space->BelongsTo(this));
91 for (auto segment : space->segments_) {
92 this->FreeTableSegment(segment);
93 }
94 space->segments_.clear();
95}
96
97template <typename Entry, size_t size>
99 Space* space) {
101 DCHECK(this->is_initialized());
102 DCHECK(space->BelongsTo(this));
103
104 DCHECK(!space->is_internal_read_only_space());
105 space->is_internal_read_only_space_ = true;
106
107 UnsealReadOnlySegmentScope unseal_scope(this);
108
109 // Physically attach the segment.
110 FreelistHead freelist;
111 {
112 base::MutexGuard guard(&space->mutex_);
113 DCHECK_EQ(space->segments_.size(), 0);
116
117 // For the internal read-only segment, index 0 is reserved for the `null`
118 // entry, so start the freelist at offset 1.
119 freelist = Base::InitializeFreeList(segment, 1);
120
121 Extend(space, segment, freelist);
122 }
123
124 DCHECK(!freelist.is_empty());
125 DCHECK_EQ(freelist.next(), kInternalNullEntryIndex + 1);
126 DCHECK(space->Contains(freelist.next()));
127}
128
129template <typename Entry, size_t size>
131 Space* space) {
132 DCHECK(this->is_initialized());
133 DCHECK(space->BelongsTo(this));
134 // Remove the RO segment from the space's segment list without freeing it.
135 // The table itself manages the RO segment's lifecycle.
136 base::MutexGuard guard(&space->mutex_);
137 DCHECK_EQ(space->segments_.size(), 1);
138 space->segments_.clear();
139}
140
141template <typename Entry, size_t size>
148
149template <typename Entry, size_t size>
156
157template <typename Entry, size_t size>
159 if (auto res = TryAllocateEntry(space)) {
160 return *res;
161 }
162 V8::FatalProcessOutOfMemory(nullptr, "ExternalEntityTable::AllocateEntry");
163}
164
165template <typename Entry, size_t size>
167 Space* space) {
168 DCHECK(this->is_initialized());
169 DCHECK(space->BelongsTo(this));
170
171 // We currently don't want entry allocation to trigger garbage collection as
172 // this may cause seemingly harmless pointer field assignments to trigger
173 // garbage collection. This is especially true for lazily-initialized
174 // external pointer slots which will typically only allocate the external
175 // pointer table entry when the pointer is first set to a non-null value.
177
178 FreelistHead freelist;
179 for (;;) {
180 // This is essentially DCLP (see
181 // https://preshing.com/20130930/double-checked-locking-is-fixed-in-cpp11/)
182 // and so requires an acquire load as well as a release store in Grow() to
183 // prevent reordering of memory accesses, which could for example cause one
184 // thread to read a freelist entry before it has been properly initialized.
185 freelist = space->freelist_head_.load(std::memory_order_acquire);
186 if (V8_UNLIKELY(freelist.is_empty())) {
187 // Freelist is empty. Need to take the lock, then attempt to allocate a
188 // new segment if no other thread has done it in the meantime.
189 base::MutexGuard guard(&space->mutex_);
190
191 // Reload freelist head in case another thread already grew the table.
192 freelist = space->freelist_head_.load(std::memory_order_relaxed);
193
194 if (freelist.is_empty()) {
195 // Freelist is (still) empty so extend this space by another segment.
196 if (auto maybe_freelist = TryExtend(space)) {
197 freelist = *maybe_freelist;
198 } else {
199 return {};
200 }
201 // Extend() adds one segment to the space and so to its freelist.
203 }
204 }
205
206 if (V8_LIKELY(TryAllocateEntryFromFreelist(space, freelist))) {
207 break;
208 }
209 }
210
211 uint32_t allocated_entry = freelist.next();
212 DCHECK(space->Contains(allocated_entry));
213 DCHECK_IMPLIES(!space->is_internal_read_only_space(), allocated_entry != 0);
214 return allocated_entry;
215}
216
217template <typename Entry, size_t size>
219 Space* space, uint32_t threshold_index) {
220 DCHECK(this->is_initialized());
221
222 FreelistHead freelist;
223 bool success = false;
224 while (!success) {
225 freelist = space->freelist_head_.load(std::memory_order_acquire);
226 // Check that the next free entry is below the threshold.
227 if (freelist.is_empty() || freelist.next() >= threshold_index) return 0;
228
229 success = TryAllocateEntryFromFreelist(space, freelist);
230 }
231
232 uint32_t allocated_entry = freelist.next();
233 DCHECK(space->Contains(allocated_entry));
234 DCHECK_NE(allocated_entry, 0);
235 DCHECK_LT(allocated_entry, threshold_index);
236 return allocated_entry;
237}
238
239template <typename Entry, size_t size>
241 Space* space, FreelistHead freelist) {
242 DCHECK(!freelist.is_empty());
243 DCHECK(space->Contains(freelist.next()));
244
245 Entry& freelist_entry = this->at(freelist.next());
246 uint32_t next_freelist_entry = freelist_entry.GetNextFreelistEntryIndex();
247 FreelistHead new_freelist(next_freelist_entry, freelist.length() - 1);
248 bool success = space->freelist_head_.compare_exchange_strong(
249 freelist, new_freelist, std::memory_order_relaxed);
250
251 // When the CAS succeeded, the entry must've been a freelist entry.
252 // Otherwise, this is not guaranteed as another thread may have allocated
253 // and overwritten the same entry in the meantime.
254 if (success) {
255 DCHECK_IMPLIES(freelist.length() > 1, !new_freelist.is_empty());
256 DCHECK_IMPLIES(freelist.length() == 1, new_freelist.is_empty());
257 }
258 return success;
259}
260
261template <typename Entry, size_t size>
262std::optional<typename ExternalEntityTable<Entry, size>::FreelistHead>
264 // Freelist should be empty when calling this method.
265 DCHECK_EQ(space->freelist_length(), 0);
266 // The caller must lock the space's mutex before extending it.
267 space->mutex_.AssertHeld();
268 // The read-only space must never be extended with a newly-allocated segment.
269 DCHECK(!space->is_internal_read_only_space());
270
271 // Allocate the new segment.
272 auto extended = this->TryAllocateAndInitializeSegment();
273 if (!extended) return {};
274
275 auto [segment, freelist_head] = *extended;
276 Extend(space, segment, freelist_head);
277 return freelist_head;
278}
279
280template <typename Entry, size_t size>
282 FreelistHead freelist) {
283 // Freelist should be empty when calling this method.
284 DCHECK_EQ(space->freelist_length(), 0);
285 // The caller must lock the space's mutex before extending it.
286 space->mutex_.AssertHeld();
287
288 space->segments_.insert(segment);
290 segment.number() != 0);
291 DCHECK_EQ(space->is_internal_read_only_space(), segment.number() == 0);
292 DCHECK_EQ(space->is_internal_read_only_space(),
294
295 if (V8_UNLIKELY(space->is_internal_read_only_space())) {
296 // For the internal read-only segment, index 0 is reserved for the `null`
297 // entry. The underlying memory has been nulled by allocation, and is
298 // therefore already initialized.
299#ifdef DEBUG
300 uint32_t first = segment.first_entry();
302 static constexpr uint8_t kNullBytes[kEntrySize] = {0};
303 CHECK_EQ(memcmp(&this->at(first), kNullBytes, kEntrySize), 0);
304#endif // DEBUG
305 }
306
307 // This must be a release store to prevent reordering of of earlier stores to
308 // the freelist (for example during initialization of the segment) from being
309 // reordered past this store. See AllocateEntry() for more details.
310 space->freelist_head_.store(freelist, std::memory_order_release);
311}
312
313template <typename Entry, size_t size>
315 return GenericSweep(space, [](Entry&) {});
316}
317
318template <typename Entry, size_t size>
319template <typename Callback>
321 Callback callback) {
322 DCHECK(space->BelongsTo(this));
323
324 // Lock the space. Technically this is not necessary since no other thread can
325 // allocate entries at this point, but some of the methods we call on the
326 // space assert that the lock is held.
327 base::MutexGuard guard(&space->mutex_);
328
329 // There must not be any entry allocations while the table is being swept as
330 // that would not be safe. Set the freelist to this special marker value to
331 // easily catch any violation of this requirement.
332 space->freelist_head_.store(kEntryAllocationIsForbiddenMarker,
333 std::memory_order_relaxed);
334
335 // Here we can iterate over the segments collection without taking a lock
336 // because no other thread can currently allocate entries in this space.
337 uint32_t current_freelist_head = 0;
338 uint32_t current_freelist_length = 0;
339 std::vector<Segment> segments_to_deallocate;
340
341 for (auto segment : base::Reversed(space->segments_)) {
342 // Remember the state of the freelist before this segment in case this
343 // segment turns out to be completely empty and we deallocate it.
344 uint32_t previous_freelist_head = current_freelist_head;
345 uint32_t previous_freelist_length = current_freelist_length;
346
347 // Process every entry in this segment, again going top to bottom.
348 for (WriteIterator it = this->iter_at(segment.last_entry());
349 it.index() >= segment.first_entry(); --it) {
350 if (!it->IsMarked()) {
351 it->MakeFreelistEntry(current_freelist_head);
352 current_freelist_head = it.index();
353 current_freelist_length++;
354 } else {
355 callback(*it);
356 it->Unmark();
357 }
358 }
359
360 // If a segment is completely empty, free it.
361 uint32_t free_entries = current_freelist_length - previous_freelist_length;
362 bool segment_is_empty = free_entries == kEntriesPerSegment;
363 if (segment_is_empty) {
364 segments_to_deallocate.push_back(segment);
365 // Restore the state of the freelist before this segment.
366 current_freelist_head = previous_freelist_head;
367 current_freelist_length = previous_freelist_length;
368 }
369 }
370
371 // We cannot remove the segments while iterating over the segments set, so
372 // defer that until now.
373 for (auto segment : segments_to_deallocate) {
374 // Segment zero is reserved.
375 DCHECK_NE(segment.number(), 0);
376 this->FreeTableSegment(segment);
377 space->segments_.erase(segment);
378 }
379
380 FreelistHead new_freelist(current_freelist_head, current_freelist_length);
381 space->freelist_head_.store(new_freelist, std::memory_order_release);
382 DCHECK_EQ(space->freelist_length(), current_freelist_length);
383
384 uint32_t num_live_entries = space->capacity() - current_freelist_length;
385 return num_live_entries;
386}
387
388template <typename Entry, size_t size>
389template <typename Callback>
391 Callback callback) {
392 DCHECK(space->BelongsTo(this));
393
394 base::MutexGuard guard(&space->mutex_);
395 for (auto segment : space->segments_) {
396 for (uint32_t i = segment.first_entry(); i <= segment.last_entry(); i++) {
397 callback(i);
398 }
399 }
400}
401
402} // namespace internal
403} // namespace v8
404
405#endif // V8_SANDBOX_EXTERNAL_ENTITY_TABLE_INL_H_
Address base() const
virtual void FreePages(Address address, size_t size)=0
virtual V8_WARN_UNUSED_RESULT Address AllocatePages(Address hint, size_t size, size_t alignment, PagePermissions permissions)=0
virtual V8_WARN_UNUSED_RESULT bool SetPagePermissions(Address address, size_t size, PagePermissions permissions)=0
static constexpr uint32_t kInternalReadOnlySegmentOffset
bool TryAllocateEntryFromFreelist(Space *space, FreelistHead freelist)
uint32_t AllocateEntryBelow(Space *space, uint32_t threshold_index)
void Extend(Space *space, Segment segment, FreelistHead freelist)
void IterateEntriesIn(Space *space, Callback callback)
std::optional< FreelistHead > TryExtend(Space *space)
static constexpr FreelistHead kEntryAllocationIsForbiddenMarker
std::optional< uint32_t > TryAllocateEntry(Space *space)
static constexpr uint32_t kInternalNullEntryIndex
static constexpr size_t kEntriesPerSegment
std::optional< std::pair< Segment, FreelistHead > > TryAllocateAndInitializeSegment()
VirtualAddressSpace * vas_
FreelistHead InitializeFreeList(Segment segment, uint32_t start_offset=0)
void FreeTableSegment(Segment segment)
WriteIterator iter_at(uint32_t index)
static constexpr bool kUseContiguousMemory
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
base::Mutex & mutex_
TNode< Object > callback
auto Reversed(T &t)
Definition iterator.h:105
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
static Segment At(uint32_t offset)
static Segment Containing(uint32_t entry_index)
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_UNLIKELY(condition)
Definition v8config.h:660