v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
compactible-external-entity-table-inl.h
Go to the documentation of this file.
1// Copyright 2024 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_SANDBOX_COMPACTIBLE_EXTERNAL_ENTITY_TABLE_INL_H_
6#define V8_SANDBOX_COMPACTIBLE_EXTERNAL_ENTITY_TABLE_INL_H_
7
9// Include the non-inl header before the rest of the headers.
10
11#include <algorithm>
12
16
17#ifdef V8_COMPRESS_POINTERS
18
19namespace v8 {
20namespace internal {
21
22template <typename Entry, size_t size>
23uint32_t CompactibleExternalEntityTable<Entry, size>::AllocateEntry(
24 Space* space) {
25 uint32_t index = Base::AllocateEntry(space);
26
27 // When we're compacting a space, we're trying to move all entries above a
28 // threshold index (the start of the evacuation area) into segments below
29 // that threshold. However, if the freelist becomes too short and we start
30 // allocating entries inside the area that is supposed to be evacuated, we
31 // need to abort compaction. This is not just an optimization but is also
32 // required for correctness: during sweeping we might otherwise assume that
33 // all entries inside the evacuation area have been moved and that these
34 // segments can therefore be deallocated. In particular, this check will also
35 // make sure that we abort compaction if we extend the space with a new
36 // segment and allocate at least one entry in it (if that segment is located
37 // after the threshold, otherwise it is unproblematic).
38 uint32_t start_of_evacuation_area =
39 space->start_of_evacuation_area_.load(std::memory_order_relaxed);
40 if (V8_UNLIKELY(index >= start_of_evacuation_area)) {
41 space->AbortCompacting(start_of_evacuation_area);
42 }
43
44 return index;
45}
46
47template <typename Entry, size_t size>
48typename CompactibleExternalEntityTable<Entry, size>::CompactionResult
49CompactibleExternalEntityTable<Entry, size>::FinishCompaction(
50 Space* space, Histogram* counter) {
51 DCHECK(space->BelongsTo(this));
52 DCHECK(!space->is_internal_read_only_space());
53
54 // When compacting, we can compute the number of unused segments at the end of
55 // the table and deallocate those after sweeping.
56 uint32_t start_of_evacuation_area =
57 space->start_of_evacuation_area_.load(std::memory_order_relaxed);
58 bool evacuation_was_successful = false;
59 if (space->IsCompacting()) {
60 auto outcome = ExternalEntityTableCompactionOutcome::kAborted;
61 if (space->CompactingWasAborted()) {
62 // Compaction was aborted during marking because the freelist grew to
63 // short. In this case, it is not guaranteed that any segments will now be
64 // completely free. Extract the original start_of_evacuation_area value.
65 start_of_evacuation_area &= ~Space::kCompactionAbortedMarker;
66 } else {
67 // Entry evacuation was successful so all segments inside the evacuation
68 // area are now guaranteed to be free and so can be deallocated.
69 evacuation_was_successful = true;
70 outcome = ExternalEntityTableCompactionOutcome::kSuccess;
71 }
72 DCHECK(IsAligned(start_of_evacuation_area,
74
75 space->StopCompacting();
76 counter->AddSample(static_cast<int>(outcome));
77 }
78
79 return {start_of_evacuation_area, evacuation_was_successful};
80}
81
82template <typename Entry, size_t size>
83void CompactibleExternalEntityTable<Entry, size>::MaybeCreateEvacuationEntry(
84 Space* space, uint32_t index, Address handle_location) {
85 // Check if the entry should be evacuated for table compaction.
86 // The current value of the start of the evacuation area is cached in a local
87 // variable here as it otherwise may be changed by another marking thread
88 // while this method runs, causing non-optimal behaviour (for example, the
89 // allocation of an evacuation entry _after_ the entry that is evacuated).
90 uint32_t start_of_evacuation_area =
91 space->start_of_evacuation_area_.load(std::memory_order_relaxed);
92 if (index >= start_of_evacuation_area) {
93 DCHECK(space->IsCompacting());
94 uint32_t new_index =
95 Base::AllocateEntryBelow(space, start_of_evacuation_area);
96 if (new_index) {
97 DCHECK_LT(new_index, start_of_evacuation_area);
98 DCHECK(space->Contains(new_index));
99 // Even though the new entry will only be accessed during sweeping, this
100 // still needs to be an atomic write as another thread may attempt (and
101 // fail) to allocate the same table entry, thereby causing a read from
102 // this memory location. Without an atomic store here, TSan would then
103 // complain about a data race.
104 Base::at(new_index).MakeEvacuationEntry(handle_location);
105 } else {
106 // In this case, the application has allocated a sufficiently large
107 // number of entries from the freelist so that new entries would now be
108 // allocated inside the area that is being compacted. While it would be
109 // possible to shrink that area and continue compacting, we probably do
110 // not want to put more pressure on the freelist and so instead simply
111 // abort compaction here. Entries that have already been visited will
112 // still be compacted during Sweep, but there is no guarantee that any
113 // blocks at the end of the table will now be completely free.
114 space->AbortCompacting(start_of_evacuation_area);
115 }
116 }
117}
118
119template <typename Entry, size_t size>
120void CompactibleExternalEntityTable<Entry, size>::Space::StartCompacting(
121 uint32_t start_of_evacuation_area) {
122 DCHECK_EQ(invalidated_fields_.size(), 0);
123 start_of_evacuation_area_.store(start_of_evacuation_area,
124 std::memory_order_relaxed);
125}
126
127template <typename Entry, size_t size>
128void CompactibleExternalEntityTable<Entry, size>::Space::StopCompacting() {
129 start_of_evacuation_area_.store(kNotCompactingMarker,
130 std::memory_order_relaxed);
131}
132
133template <typename Entry, size_t size>
134void CompactibleExternalEntityTable<Entry, size>::Space::AbortCompacting(
135 uint32_t start_of_evacuation_area) {
136 uint32_t compaction_aborted_marker =
137 start_of_evacuation_area | kCompactionAbortedMarker;
138 DCHECK_NE(compaction_aborted_marker, kNotCompactingMarker);
139 start_of_evacuation_area_.store(compaction_aborted_marker,
140 std::memory_order_relaxed);
141}
142
143template <typename Entry, size_t size>
144bool CompactibleExternalEntityTable<Entry, size>::Space::IsCompacting() {
145 return start_of_evacuation_area_.load(std::memory_order_relaxed) !=
146 kNotCompactingMarker;
147}
148
149template <typename Entry, size_t size>
150bool CompactibleExternalEntityTable<Entry,
151 size>::Space::CompactingWasAborted() {
152 auto value = start_of_evacuation_area_.load(std::memory_order_relaxed);
153 return (value & kCompactionAbortedMarker) == kCompactionAbortedMarker;
154}
155
156template <typename Entry, size_t size>
157bool CompactibleExternalEntityTable<Entry, size>::Space::FieldWasInvalidated(
158 Address field_address) const {
159 invalidated_fields_mutex_.AssertHeld();
160 return std::find(invalidated_fields_.begin(), invalidated_fields_.end(),
161 field_address) != invalidated_fields_.end();
162}
163
164template <typename Entry, size_t size>
165void CompactibleExternalEntityTable<Entry,
166 size>::Space::ClearInvalidatedFields() {
167 invalidated_fields_mutex_.AssertHeld();
168 invalidated_fields_.clear();
169}
170
171template <typename Entry, size_t size>
172void CompactibleExternalEntityTable<Entry, size>::Space::AddInvalidatedField(
173 Address field_address) {
174 if (IsCompacting()) {
175 base::MutexGuard guard(&invalidated_fields_mutex_);
176 invalidated_fields_.push_back(field_address);
177 }
178}
179
180template <typename Entry, size_t size>
181void CompactibleExternalEntityTable<Entry,
182 size>::Space::StartCompactingIfNeeded() {
183 // Take the lock so that we can be sure that no other thread modifies the
184 // segments set concurrently.
185 base::MutexGuard guard(&this->mutex_);
186
187 // This method may be executed while other threads allocate entries from the
188 // freelist. In that case, this method may use incorrect data to determine if
189 // table compaction is necessary. That's fine however since in the worst
190 // case, compaction will simply be aborted right away if the freelist became
191 // too small.
192 uint32_t num_free_entries = this->freelist_length();
193 uint32_t num_total_entries = this->capacity();
194
195 // Current (somewhat arbitrary) heuristic: need compacting if the space is
196 // more than 1MB in size, is at least 10% empty, and if at least one segment
197 // can be freed after successful compaction.
198 double free_ratio = static_cast<double>(num_free_entries) /
199 static_cast<double>(num_total_entries);
200 uint32_t num_segments_to_evacuate =
201 (num_free_entries / 2) / Base::kEntriesPerSegment;
202 uint32_t space_size = num_total_entries * Base::kEntrySize;
203 bool should_compact = (space_size >= 1 * MB) && (free_ratio >= 0.10) &&
204 (num_segments_to_evacuate >= 1);
205
206 // However, if --stress-compaction is enabled, we compact whenever possible:
207 // whenever we have at least two segments, one to evacuate entries into and
208 // the other to evacuate entries from.
209 if (v8_flags.stress_compaction) {
210 should_compact = this->num_segments() > 1;
211 num_segments_to_evacuate = std::max(1u, num_segments_to_evacuate);
212 }
213
214 if (should_compact) {
215 // If we're compacting, attempt to free up the last N segments so that they
216 // can be decommitted afterwards.
217 auto first_segment_to_evacuate =
218 *std::prev(this->segments_.end(), num_segments_to_evacuate);
219 uint32_t start_of_evacuation_area = first_segment_to_evacuate.first_entry();
220 StartCompacting(start_of_evacuation_area);
221 }
222}
223
224} // namespace internal
225} // namespace v8
226
227#endif // V8_COMPRESS_POINTERS
228
229#endif // V8_SANDBOX_COMPACTIBLE_EXTERNAL_ENTITY_TABLE_INL_H_
static constexpr size_t kEntriesPerSegment
base::Mutex & mutex_
LockGuard< Mutex > MutexGuard
Definition mutex.h:219
V8_EXPORT_PRIVATE FlagValues v8_flags
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Definition flags.cc:2197
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define V8_UNLIKELY(condition)
Definition v8config.h:660