v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
external-pointer-table.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
11
12#ifdef V8_COMPRESS_POINTERS
13
14namespace v8 {
15namespace internal {
16
17void ExternalPointerTable::SetUpFromReadOnlyArtifacts(
18 Space* read_only_space, const ReadOnlyArtifacts* artifacts) {
19 UnsealReadOnlySegmentScope unseal_scope(this);
20 for (const auto& registry_entry : artifacts->external_pointer_registry()) {
21 ExternalPointerHandle handle = AllocateAndInitializeEntry(
22 read_only_space, registry_entry.value, registry_entry.tag);
23 CHECK_EQ(handle, registry_entry.handle);
24 }
25}
26
27// An iterator over a set of sets of segments that returns a total ordering of
28// segments in highest to lowest address order. This lets us easily build a
29// sorted singly-linked freelist.
30//
31// When given a single set of segments, it's the same as iterating over
32// std::set<Segment> in reverse order.
33//
34// With multiple segment sets, we still produce a total order. Sets are
35// annotated so that we can associate some data with their segments. This is
36// useful when evacuating the young ExternalPointerTable::Space into the old
37// generation in a major collection, as both spaces could have been compacting,
38// with different starts to the evacuation area.
39template <typename Segment, typename Data>
40class SegmentsIterator {
41 using iterator = typename std::set<Segment>::reverse_iterator;
42 using const_iterator = typename std::set<Segment>::const_reverse_iterator;
43
44 public:
45 SegmentsIterator() = default;
46
47 void AddSegments(const std::set<Segment>& segments, Data data) {
48 streams_.emplace_back(segments.rbegin(), segments.rend(), data);
49 }
50
51 std::optional<std::pair<Segment, Data>> Next() {
52 int stream = -1;
53 int min_stream = -1;
54 std::optional<std::pair<Segment, Data>> result;
55 for (auto [iter, end, data] : streams_) {
56 stream++;
57 if (iter != end) {
58 Segment segment = *iter;
59 if (!result || result.value().first < segment) {
60 min_stream = stream;
61 result.emplace(segment, data);
62 }
63 }
64 }
65 if (result) {
66 streams_[min_stream].iter++;
67 return result;
68 }
69 return {};
70 }
71
72 private:
73 struct Stream {
74 iterator iter;
75 const_iterator end;
76 Data data;
77
78 Stream(iterator iter, const_iterator end, Data data)
79 : iter(iter), end(end), data(data) {}
80 };
81
82 std::vector<Stream> streams_;
83};
84
85uint32_t ExternalPointerTable::EvacuateAndSweepAndCompact(Space* space,
86 Space* from_space,
87 Counters* counters) {
88 DCHECK(space->BelongsTo(this));
89 DCHECK(!space->is_internal_read_only_space());
90
91 DCHECK_IMPLIES(from_space, from_space->BelongsTo(this));
92 DCHECK_IMPLIES(from_space, !from_space->is_internal_read_only_space());
93
94 // Lock the space. Technically this is not necessary since no other thread can
95 // allocate entries at this point, but some of the methods we call on the
96 // space assert that the lock is held.
97 base::MutexGuard guard(&space->mutex_);
98 // Same for the invalidated fields mutex.
99 base::MutexGuard invalidated_fields_guard(&space->invalidated_fields_mutex_);
100
101 // There must not be any entry allocations while the table is being swept as
102 // that would not be safe. Set the freelist to this special marker value to
103 // easily catch any violation of this requirement.
104 space->freelist_head_.store(kEntryAllocationIsForbiddenMarker,
105 std::memory_order_relaxed);
106
107 SegmentsIterator<Segment, CompactionResult> segments_iter;
108 Histogram* counter = counters->external_pointer_table_compaction_outcome();
109 CompactionResult space_compaction = FinishCompaction(space, counter);
110 segments_iter.AddSegments(space->segments_, space_compaction);
111
112 // If from_space is present, take its segments and add them to the sweep
113 // iterator. Wait until after the sweep to actually give from_space's
114 // segments to the other space, to avoid invalidating the iterator.
115 std::set<Segment> from_space_segments;
116 if (from_space) {
117 base::MutexGuard from_space_guard(&from_space->mutex_);
118 base::MutexGuard from_space_invalidated_fields_guard(
119 &from_space->invalidated_fields_mutex_);
120
121 std::swap(from_space->segments_, from_space_segments);
122 DCHECK(from_space->segments_.empty());
123
124 CompactionResult from_space_compaction =
125 FinishCompaction(from_space, counter);
126 segments_iter.AddSegments(from_space_segments, from_space_compaction);
127
128 FreelistHead empty_freelist;
129 from_space->freelist_head_.store(empty_freelist, std::memory_order_relaxed);
130
131 for (Address field : from_space->invalidated_fields_)
132 space->invalidated_fields_.push_back(field);
133 from_space->ClearInvalidatedFields();
134 }
135
136 // Sweep top to bottom and rebuild the freelist from newly dead and
137 // previously freed entries while also clearing the marking bit on live
138 // entries and resolving evacuation entries table when compacting the table.
139 // This way, the freelist ends up sorted by index which already makes the
140 // table somewhat self-compacting and is required for the compaction
141 // algorithm so that evacuated entries are evacuated to the start of a space.
142 // This method must run either on the mutator thread or while the mutator is
143 // stopped.
144 uint32_t current_freelist_head = 0;
145 uint32_t current_freelist_length = 0;
146 auto AddToFreelist = [&](uint32_t entry_index) {
147 at(entry_index).MakeFreelistEntry(current_freelist_head);
148 current_freelist_head = entry_index;
149 current_freelist_length++;
150 };
151
152 std::vector<Segment> segments_to_deallocate;
153 while (auto current = segments_iter.Next()) {
154 Segment segment = current->first;
155 CompactionResult compaction = current->second;
156
157 bool segment_will_be_evacuated =
158 compaction.success &&
159 segment.first_entry() >= compaction.start_of_evacuation_area;
160
161 // Remember the state of the freelist before this segment in case this
162 // segment turns out to be completely empty and we deallocate it.
163 uint32_t previous_freelist_head = current_freelist_head;
164 uint32_t previous_freelist_length = current_freelist_length;
165
166 // Process every entry in this segment, again going top to bottom.
167 for (uint32_t i = segment.last_entry(); i >= segment.first_entry(); i--) {
168 auto payload = at(i).GetRawPayload();
169 if (payload.ContainsEvacuationEntry()) {
170 // Segments that will be evacuated cannot contain evacuation entries
171 // into which other entries would be evacuated.
172 DCHECK(!segment_will_be_evacuated);
173
174 // An evacuation entry contains the address of the external pointer
175 // field that owns the entry that is to be evacuated.
176 Address handle_location =
177 payload.ExtractEvacuationEntryHandleLocation();
178 DCHECK_NE(handle_location, kNullAddress);
179
180 // The external pointer field may have been invalidated in the meantime
181 // (for example if the host object has been in-place converted to a
182 // different type of object). In that case, the field no longer
183 // contains an external pointer handle and we therefore cannot evacuate
184 // the old entry. This is fine as the entry is guaranteed to be dead.
185 if (space->FieldWasInvalidated(handle_location)) {
186 // In this case, we must, however, free the evacuation entry.
187 // Otherwise, we would be left with effectively a stale evacuation
188 // entry that we'd try to process again during the next GC.
189 AddToFreelist(i);
190 continue;
191 }
192
193 // Resolve the evacuation entry: take the pointer to the handle from the
194 // evacuation entry, copy the entry to its new location, and finally
195 // update the handle to point to the new entry.
196 //
197 // While we now know that the entry being evacuated is free, we don't
198 // add it to (the start of) the freelist because that would immediately
199 // cause new fragmentation when the next entry is allocated. Instead, we
200 // assume that the segments out of which entries are evacuated will all
201 // be decommitted anyway after this loop, which is usually the case
202 // unless compaction was already aborted during marking.
203 ResolveEvacuationEntryDuringSweeping(
204 i, reinterpret_cast<ExternalPointerHandle*>(handle_location),
205 compaction.start_of_evacuation_area);
206
207 // The entry must now contain an external pointer and be unmarked as
208 // the entry that was evacuated must have been processed already (it
209 // is in an evacuated segment, which are processed first as they are
210 // at the end of the space). This will have cleared the marking bit.
211 DCHECK(at(i).HasExternalPointer(kAnyExternalPointerTagRange));
212 DCHECK(!at(i).GetRawPayload().HasMarkBitSet());
213 } else if (!payload.HasMarkBitSet()) {
214 FreeManagedResourceIfPresent(i);
215 AddToFreelist(i);
216 } else {
217 auto new_payload = payload;
218 new_payload.ClearMarkBit();
219 at(i).SetRawPayload(new_payload);
220 }
221
222 // We must have resolved all evacuation entries. Otherwise, we'll try to
223 // process them again during the next GC, which would cause problems.
224 DCHECK(!at(i).HasEvacuationEntry());
225 }
226
227 // If a segment is completely empty, or if all live entries will be
228 // evacuated out of it at the end of this loop, free the segment.
229 // Note: for segments that will be evacuated, we could avoid building up a
230 // freelist, but it's probably not worth the effort.
231 uint32_t free_entries = current_freelist_length - previous_freelist_length;
232 bool segment_is_empty = free_entries == kEntriesPerSegment;
233 if (segment_is_empty || segment_will_be_evacuated) {
234 segments_to_deallocate.push_back(segment);
235 // Restore the state of the freelist before this segment.
236 current_freelist_head = previous_freelist_head;
237 current_freelist_length = previous_freelist_length;
238 }
239 }
240
241 space->segments_.merge(from_space_segments);
242
243 // We cannot deallocate the segments during the above loop, so do it now.
244 for (auto segment : segments_to_deallocate) {
245#ifdef DEBUG
246 // There should not be any live entries in the segments we are freeing.
247 // TODO(saelo): we should be able to assert here that we're not freeing any
248 // entries here. Otherwise, we'd have to FreeManagedResourceIfPresent.
249 // for (uint32_t i = segment.last_entry(); i >= segment.first_entry(); i--)
250 // {
251 // CHECK(!at(i).HasExternalPointer(kAnyExternalPointerTag));
252 //}
253#endif
254 FreeTableSegment(segment);
255 space->segments_.erase(segment);
256 }
257
258 space->ClearInvalidatedFields();
259
260 FreelistHead new_freelist(current_freelist_head, current_freelist_length);
261 space->freelist_head_.store(new_freelist, std::memory_order_release);
262 DCHECK_EQ(space->freelist_length(), current_freelist_length);
263
264 uint32_t num_live_entries = space->capacity() - current_freelist_length;
265 counters->external_pointers_count()->AddSample(num_live_entries);
266 return num_live_entries;
267}
268
269uint32_t ExternalPointerTable::SweepAndCompact(Space* space,
270 Counters* counters) {
271 return EvacuateAndSweepAndCompact(space, nullptr, counters);
272}
273
274uint32_t ExternalPointerTable::Sweep(Space* space, Counters* counters) {
275 DCHECK(!space->IsCompacting());
276 return SweepAndCompact(space, counters);
277}
278
279void ExternalPointerTable::ResolveEvacuationEntryDuringSweeping(
280 uint32_t new_index, ExternalPointerHandle* handle_location,
281 uint32_t start_of_evacuation_area) {
282 // We must have a valid handle here. If this fails, it might mean that an
283 // object with external pointers was in-place converted to another type of
284 // object without informing the external pointer table.
285 ExternalPointerHandle old_handle = *handle_location;
286 CHECK(IsValidHandle(old_handle));
287
288 uint32_t old_index = HandleToIndex(old_handle);
289 ExternalPointerHandle new_handle = IndexToHandle(new_index);
290
291 // The compaction algorithm always moves an entry from the evacuation area to
292 // the front of the table. These DCHECKs verify this invariant.
293 DCHECK_GE(old_index, start_of_evacuation_area);
294 DCHECK_LT(new_index, start_of_evacuation_area);
295 auto& new_entry = at(new_index);
296 at(old_index).Evacuate(new_entry, EvacuateMarkMode::kLeaveUnmarked);
297 *handle_location = new_handle;
298
299 // If this entry references a managed resource, update the resource to
300 // reference the new entry.
301 if (Address addr = at(new_index).ExtractManagedResourceOrNull()) {
302 ManagedResource* resource = reinterpret_cast<ManagedResource*>(addr);
303 DCHECK_EQ(resource->ept_entry_, old_handle);
304 resource->ept_entry_ = new_handle;
305 }
306}
307
308} // namespace internal
309} // namespace v8
310
311#endif // V8_COMPRESS_POINTERS
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
int end
ZoneVector< RpoNumber > & result
LockGuard< Mutex > MutexGuard
Definition mutex.h:219
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr ExternalPointerTagRange kAnyExternalPointerTagRange(kFirstExternalPointerTag, kLastExternalPointerTag)
uint32_t ExternalPointerHandle
static constexpr Address kNullAddress
Definition v8-internal.h:53
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485