v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
stack-guard.cc
Go to the documentation of this file.
1// Copyright 2019 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
15#include "src/roots/roots-inl.h"
17#include "src/utils/memcopy.h"
18
19#ifdef V8_ENABLE_SPARKPLUG
21#endif
22
23#ifdef V8_ENABLE_MAGLEV
25#endif // V8_ENABLE_MAGLEV
26
27#if V8_ENABLE_WEBASSEMBLY
29#endif // V8_ENABLE_WEBASSEMBLY
30
31namespace v8 {
32namespace internal {
33
55
56void StackGuard::SetStackLimit(uintptr_t limit) {
58 SetStackLimitInternal(access, limit,
60}
61
63 uintptr_t limit, uintptr_t jslimit) {
64 // If the current limits are special (e.g. due to a pending interrupt) then
65 // leave them alone.
68 }
70#ifdef USE_SIMULATOR
71 if (thread_local_.climit() == thread_local_.real_climit_) {
72 thread_local_.set_climit(limit);
73 }
74 thread_local_.real_climit_ = limit;
75#endif
76}
77
79 // Try to compare and swap the new jslimit without the ExecutionAccess lock.
80 uintptr_t old_jslimit = base::Relaxed_CompareAndSwap(
82 USE(old_jslimit);
84 old_jslimit == kInterruptLimit);
85 // Either way, set the real limit. This does not require synchronization.
87}
88
89#ifdef USE_SIMULATOR
90void StackGuard::AdjustStackLimitForSimulator() {
92 uintptr_t climit = thread_local_.real_climit_;
93 // If the current limits are special (e.g. due to a pending interrupt) then
94 // leave them alone.
98 }
99}
100
101void StackGuard::ResetStackLimitForSimulator() {
102 ExecutionAccess access(isolate_);
103 // If the current limits are special due to a pending interrupt then
104 // leave them alone.
107 }
108}
109#endif
110
115 // Intercept already requested interrupts.
116 uint32_t intercepted =
118 scope->intercepted_flags_ = intercepted;
119 thread_local_.interrupt_flags_ &= ~intercepted;
120 } else {
122 // Restore postponed interrupts.
123 uint32_t restored_flags = 0;
125 current != nullptr; current = current->prev_) {
126 restored_flags |= (current->intercepted_flags_ & scope->intercept_mask_);
127 current->intercepted_flags_ &= ~scope->intercept_mask_;
128 }
129 thread_local_.interrupt_flags_ |= restored_flags;
130 }
132 // Add scope to the chain.
135}
136
141 if (top->mode_ == InterruptsScope::kPostponeInterrupts) {
142 // Make intercepted interrupts active.
143 DCHECK_EQ(thread_local_.interrupt_flags_ & top->intercept_mask_, 0);
144 thread_local_.interrupt_flags_ |= top->intercepted_flags_;
145 } else {
147 // Postpone existing interupts if needed.
148 if (top->prev_) {
149 for (uint32_t interrupt = 1; interrupt < ALL_INTERRUPTS;
150 interrupt = interrupt << 1) {
151 InterruptFlag flag = static_cast<InterruptFlag>(interrupt);
152 if ((thread_local_.interrupt_flags_ & flag) &&
153 top->prev_->Intercept(flag)) {
155 }
156 }
157 }
158 }
160 // Remove scope from chain.
162}
163
168
171 // Check the chain of InterruptsScope for interception.
174 return;
175 }
176
177 // Not intercepted. Set as active interrupt flag.
180
181 // If this isolate is waiting in a futex, notify it to wake up.
183}
184
187 // Clear the interrupt flag from the chain of InterruptsScope.
189 current != nullptr; current = current->prev_) {
190 current->intercepted_flags_ &= ~flag;
191 }
192
193 // Clear the interrupt flag from the active interrupt flags.
196}
197
200 return false;
201 }
203 if ((thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) != 0) {
204 thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION;
206 return true;
207 }
208 return false;
209}
210
214 if ((thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) != 0) {
215 // The TERMINATE_EXECUTION interrupt is special, since it terminates
216 // execution but should leave V8 in a resumable state. If it exists, we only
217 // fetch and clear that bit. On resume, V8 can continue processing other
218 // interrupts.
219 mask = TERMINATE_EXECUTION;
220 }
221
222 int result = static_cast<int>(thread_local_.interrupt_flags_ & mask);
225 return result;
226}
227
230 MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
231 thread_local_ = {};
232 return to + sizeof(ThreadLocal);
233}
234
237 MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
238 return from + sizeof(ThreadLocal);
239}
240
246
248 const ExecutionAccess& lock) {
249 const uintptr_t kLimitSize = v8_flags.stack_size * KB;
251 uintptr_t limit = base::Stack::GetStackStart() - kLimitSize;
254#ifdef USE_SIMULATOR
255 real_climit_ = limit;
256 set_climit(limit);
257#endif
258 interrupt_scopes_ = nullptr;
260}
261
266 uintptr_t stored_limit = per_thread->stack_limit();
267 // You should hold the ExecutionAccess lock when you call this.
268 if (stored_limit != 0) {
269 SetStackLimit(stored_limit);
270 }
271}
272
273// --- C a l l s t o n a t i v e s ---
274
275namespace {
276
277bool TestAndClear(int* bitfield, int mask) {
278 bool result = (*bitfield & mask);
279 *bitfield &= ~mask;
280 return result;
281}
282
283class V8_NODISCARD ShouldBeZeroOnReturnScope final {
284 public:
285#ifndef DEBUG
286 explicit ShouldBeZeroOnReturnScope(int*) {}
287#else // DEBUG
288 explicit ShouldBeZeroOnReturnScope(int* v) : v_(v) {}
289 ~ShouldBeZeroOnReturnScope() { DCHECK_EQ(*v_, 0); }
290
291 private:
292 int* v_;
293#endif // DEBUG
294};
295
296} // namespace
297
299 TRACE_EVENT0("v8.execute", "V8.HandleInterrupts");
300
301#if DEBUG
302 isolate_->heap()->VerifyNewSpaceTop();
303#endif
304
305 if (v8_flags.verify_predictable) {
306 // Advance synthetic time by making a time request.
308 }
309
310 // Fetch and clear interrupt bits in one go. See comments inside the method
311 // for special handling of TERMINATE_EXECUTION.
312 int interrupt_flags = FetchAndClearInterrupts(level);
313
314 // All interrupts should be fully processed when returning from this method.
315 ShouldBeZeroOnReturnScope should_be_zero_on_return(&interrupt_flags);
316
317 if (TestAndClear(&interrupt_flags, TERMINATE_EXECUTION)) {
318 TRACE_EVENT0("v8.execute", "V8.TerminateExecution");
320 }
321
322 if (TestAndClear(&interrupt_flags, GC_REQUEST)) {
323 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GCHandleGCRequest");
325 }
326
327 if (TestAndClear(&interrupt_flags, START_INCREMENTAL_MARKING)) {
329 }
330
331 if (TestAndClear(&interrupt_flags, GLOBAL_SAFEPOINT)) {
332 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GlobalSafepoint");
334 }
335
336#if V8_ENABLE_WEBASSEMBLY
337 if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) {
338 TRACE_EVENT0("v8.wasm", "V8.WasmGrowSharedMemory");
339 BackingStore::UpdateSharedWasmMemoryObjects(isolate_);
340 }
341
342 if (TestAndClear(&interrupt_flags, LOG_WASM_CODE)) {
343 TRACE_EVENT0("v8.wasm", "V8.LogCode");
345 }
346
347 if (TestAndClear(&interrupt_flags, WASM_CODE_GC)) {
348 TRACE_EVENT0("v8.wasm", "V8.WasmCodeGC");
350 }
351#endif // V8_ENABLE_WEBASSEMBLY
352
353 if (TestAndClear(&interrupt_flags, DEOPT_MARKED_ALLOCATION_SITES)) {
355 "V8.GCDeoptMarkedAllocationSites");
357 }
358
359 if (TestAndClear(&interrupt_flags, INSTALL_CODE)) {
361 "V8.InstallOptimizedFunctions");
364 }
365
366#ifdef V8_ENABLE_SPARKPLUG
367 if (TestAndClear(&interrupt_flags, INSTALL_BASELINE_CODE)) {
369 "V8.FinalizeBaselineConcurrentCompilation");
370 isolate_->baseline_batch_compiler()->InstallBatch();
371 }
372#endif // V8_ENABLE_SPARKPLUG
373
374#ifdef V8_ENABLE_MAGLEV
375 if (TestAndClear(&interrupt_flags, INSTALL_MAGLEV_CODE)) {
377 "V8.FinalizeMaglevConcurrentCompilation");
378 isolate_->maglev_concurrent_dispatcher()->FinalizeFinishedJobs();
379 }
380#endif // V8_ENABLE_MAGLEV
381
382 if (TestAndClear(&interrupt_flags, API_INTERRUPT)) {
383 TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks");
384 // Callbacks must be invoked outside of ExecutionAccess lock.
386 }
387
388#ifdef V8_RUNTIME_CALL_STATS
389 // Runtime call stats can be enabled at any via Chrome tracing and since
390 // there's no global list of active Isolates this seems to be the only
391 // simple way to invalidate the protector.
392 if (TracingFlags::is_runtime_stats_enabled() &&
393 Protectors::IsNoProfilingIntact(isolate_)) {
394 Protectors::InvalidateNoProfiling(isolate_);
395 }
396#endif
397
398 isolate_->counters()->stack_interrupts()->Increment();
399
400 return ReadOnlyRoots(isolate_).undefined_value();
401}
402
403} // namespace internal
404} // namespace v8
static StackSlot GetStackStart()
void DeoptMarkedAllocationSites()
Definition heap.cc:1035
V8_EXPORT_PRIVATE void StartIncrementalMarkingOnInterrupt()
Definition heap.cc:1972
void HandleGCRequest()
Definition heap.cc:1196
V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs() const
Definition heap.cc:4098
bool Intercept(StackGuard::InterruptFlag flag)
Counters * counters()
Definition isolate.h:1180
OptimizingCompileDispatcher * optimizing_compile_dispatcher()
Definition isolate.h:1715
void InvokeApiInterruptCallbacks()
Definition isolate.cc:1966
PerIsolateThreadData * FindOrAllocatePerThreadDataForThisThread()
Definition isolate.cc:536
FutexWaitListNode * futex_wait_list_node()
Definition isolate.h:1958
Tagged< Object > TerminateExecution()
Definition isolate.cc:1950
bool concurrent_recompilation_enabled()
Definition isolate.h:1705
LocalHeap * main_thread_local_heap()
Definition isolate.cc:7479
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate *isolate, uintptr_t c_limit)
Definition simulator.h:104
void set_interrupt_requested(InterruptLevel level, bool requested)
void Initialize(Isolate *isolate, const ExecutionAccess &lock)
bool has_interrupt_requested(InterruptLevel level)
void InitThread(const ExecutionAccess &lock)
static const uintptr_t kInterruptLimit
static constexpr InterruptFlag InterruptLevelMask(InterruptLevel level)
void update_interrupt_requests_and_stack_limits(const ExecutionAccess &lock)
bool has_pending_interrupts(const ExecutionAccess &lock)
void ClearInterrupt(InterruptFlag flag)
Tagged< Object > HandleInterrupts(InterruptLevel level=InterruptLevel::kAnyEffect)
char * ArchiveStackGuard(char *to)
void SetStackLimitForStackSwitching(uintptr_t limit)
void PushInterruptsScope(InterruptsScope *scope)
void SetStackLimitInternal(const ExecutionAccess &lock, uintptr_t limit, uintptr_t jslimit)
void SetStackLimit(uintptr_t limit)
char * RestoreStackGuard(char *from)
void RequestInterrupt(InterruptFlag flag)
bool CheckInterrupt(InterruptFlag flag)
int FetchAndClearInterrupts(InterruptLevel level)
void ReportLiveCodeFromStackForGC(Isolate *)
void LogOutstandingCodesForIsolate(Isolate *)
ZoneVector< RpoNumber > & result
std::vector< int > v_
uint32_t const mask
Atomic8 Relaxed_CompareAndSwap(volatile Atomic8 *ptr, Atomic8 old_value, Atomic8 new_value)
Definition atomicops.h:104
WasmEngine * GetWasmEngine()
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
Definition flags.cc:1366
V8_EXPORT_PRIVATE FlagValues v8_flags
void MemCopy(void *dest, const void *src, size_t size)
Definition memcopy.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
#define TRACE_EVENT0(category_group, name)
#define TRACE_DISABLED_BY_DEFAULT(name)
#define V8_NODISCARD
Definition v8config.h:693