v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
simulator-arm.h
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_EXECUTION_ARM_SIMULATOR_ARM_H_
6#define V8_EXECUTION_ARM_SIMULATOR_ARM_H_
7
8// Declares a Simulator for ARM instructions if we are not generating a native
9// ARM binary. This Simulator allows us to run and debug ARM code generation on
10// regular desktop machines.
11// V8 calls into generated code by using the GeneratedCode class,
12// which will start execution in the Simulator or forwards to the real entry
13// on an ARM HW platform.
14
15// globals.h defines USE_SIMULATOR.
16#include "src/common/globals.h"
17
18#if defined(USE_SIMULATOR)
19// Running with a simulator.
20
21#include "src/base/hashmap.h"
28
29namespace heap::base {
30class StackVisitor;
31}
32
33namespace v8 {
34namespace internal {
35
36class CachePage {
37 public:
38 static const int LINE_VALID = 0;
39 static const int LINE_INVALID = 1;
40
41 static const int kPageShift = 12;
42 static const int kPageSize = 1 << kPageShift;
43 static const int kPageMask = kPageSize - 1;
44 static const int kLineShift = 2; // The cache line is only 4 bytes right now.
45 static const int kLineLength = 1 << kLineShift;
46 static const int kLineMask = kLineLength - 1;
47
48 CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
49
50 char* ValidityByte(int offset) {
51 return &validity_map_[offset >> kLineShift];
52 }
53
54 char* CachedData(int offset) { return &data_[offset]; }
55
56 private:
57 char data_[kPageSize]; // The cached data.
58 static const int kValidityMapSize = kPageSize >> kLineShift;
59 char validity_map_[kValidityMapSize]; // One byte per line.
60};
61
62class Simulator : public SimulatorBase {
63 public:
64 friend class ArmDebugger;
65 enum Register {
66 no_reg = -1,
67 r0 = 0,
68 r1,
69 r2,
70 r3,
71 r4,
72 r5,
73 r6,
74 r7,
75 r8,
76 r9,
77 r10,
78 r11,
79 r12,
80 r13,
81 r14,
82 r15,
83 num_registers,
84 fp = 11,
85 ip = 12,
86 sp = 13,
87 lr = 14,
88 pc = 15,
89 s0 = 0,
90 s1,
91 s2,
92 s3,
93 s4,
94 s5,
95 s6,
96 s7,
97 s8,
98 s9,
99 s10,
100 s11,
101 s12,
102 s13,
103 s14,
104 s15,
105 s16,
106 s17,
107 s18,
108 s19,
109 s20,
110 s21,
111 s22,
112 s23,
113 s24,
114 s25,
115 s26,
116 s27,
117 s28,
118 s29,
119 s30,
120 s31,
121 num_s_registers = 32,
122 d0 = 0,
123 d1,
124 d2,
125 d3,
126 d4,
127 d5,
128 d6,
129 d7,
130 d8,
131 d9,
132 d10,
133 d11,
134 d12,
135 d13,
136 d14,
137 d15,
138 d16,
139 d17,
140 d18,
141 d19,
142 d20,
143 d21,
144 d22,
145 d23,
146 d24,
147 d25,
148 d26,
149 d27,
150 d28,
151 d29,
152 d30,
153 d31,
154 num_d_registers = 32,
155 q0 = 0,
156 q1,
157 q2,
158 q3,
159 q4,
160 q5,
161 q6,
162 q7,
163 q8,
164 q9,
165 q10,
166 q11,
167 q12,
168 q13,
169 q14,
170 q15,
171 num_q_registers = 16
172 };
173
174 explicit Simulator(Isolate* isolate);
175 ~Simulator();
176
177 // The currently executing Simulator instance. Potentially there can be one
178 // for each native thread.
179 V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
180
181 // Accessors for register state. Reading the pc value adheres to the ARM
182 // architecture specification and is off by a 8 from the currently executing
183 // instruction.
184 void set_register(int reg, int32_t value);
185 V8_EXPORT_PRIVATE int32_t get_register(int reg) const;
186 double get_double_from_register_pair(int reg);
187 void set_register_pair_from_double(int reg, double* value);
188 void set_dw_register(int dreg, const int* dbl);
189
190 // Support for VFP.
191 void get_d_register(int dreg, uint64_t* value);
192 void set_d_register(int dreg, const uint64_t* value);
193 void get_d_register(int dreg, uint32_t* value);
194 void set_d_register(int dreg, const uint32_t* value);
195 // Support for NEON.
196 template <typename T, int SIZE = kSimd128Size>
197 void get_neon_register(int reg, T (&value)[SIZE / sizeof(T)]);
198 template <typename T, int SIZE = kSimd128Size>
199 void set_neon_register(int reg, const T (&value)[SIZE / sizeof(T)]);
200
201 void set_s_register(int reg, unsigned int value);
202 unsigned int get_s_register(int reg) const;
203
204 void set_d_register_from_double(int dreg, const Float64 dbl) {
205 SetVFPRegister<Float64, 2>(dreg, dbl);
206 }
207 void set_d_register_from_double(int dreg, const double dbl) {
208 SetVFPRegister<double, 2>(dreg, dbl);
209 }
210
211 Float64 get_double_from_d_register(int dreg) {
212 return GetFromVFPRegister<Float64, 2>(dreg);
213 }
214
215 void set_s_register_from_float(int sreg, const Float32 flt) {
216 SetVFPRegister<Float32, 1>(sreg, flt);
217 }
218 void set_s_register_from_float(int sreg, const float flt) {
219 SetVFPRegister<float, 1>(sreg, flt);
220 }
221
222 Float32 get_float_from_s_register(int sreg) {
223 return GetFromVFPRegister<Float32, 1>(sreg);
224 }
225
226 void set_s_register_from_sinteger(int sreg, const int sint) {
227 SetVFPRegister<int, 1>(sreg, sint);
228 }
229
230 int get_sinteger_from_s_register(int sreg) {
231 return GetFromVFPRegister<int, 1>(sreg);
232 }
233
234 // Special case of set_register and get_register to access the raw PC value.
235 void set_pc(int32_t value);
236 V8_EXPORT_PRIVATE int32_t get_pc() const;
237
238 Address get_sp() const { return static_cast<Address>(get_register(sp)); }
239
240 // Accessor to the internal simulator stack area. Adds a safety
241 // margin to prevent overflows (kAdditionalStackMargin).
242 uintptr_t StackLimit(uintptr_t c_limit) const;
243
244 uintptr_t StackBase() const;
245
246 // Return central stack view, without additional safety margins.
247 // Users, for example wasm::StackMemory, can add their own.
248 base::Vector<uint8_t> GetCentralStackView() const;
249 static constexpr int JSStackLimitMargin() { return kAdditionalStackMargin; }
250
251 void IterateRegistersAndStack(::heap::base::StackVisitor* visitor);
252
253 // Executes ARM instructions until the PC reaches end_sim_pc.
254 void Execute();
255
256 template <typename Return, typename... Args>
257 Return Call(Address entry, Args... args) {
258 return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
259 }
260
261 // Alternative: call a 2-argument double function.
262 template <typename Return>
263 Return CallFP(Address entry, double d0, double d1) {
264 return ConvertReturn<Return>(CallFPImpl(entry, d0, d1));
265 }
266
267 // Push an address onto the JS stack.
268 V8_EXPORT_PRIVATE uintptr_t PushAddress(uintptr_t address);
269
270 // Pop an address from the JS stack.
271 V8_EXPORT_PRIVATE uintptr_t PopAddress();
272
273 // Debugger input.
274 void set_last_debugger_input(ArrayUniquePtr<char> input) {
275 last_debugger_input_ = std::move(input);
276 }
277 const char* last_debugger_input() { return last_debugger_input_.get(); }
278
279 // Redirection support.
280 static void SetRedirectInstruction(Instruction* instruction);
281
282 // ICache checking.
283 static bool ICacheMatch(void* one, void* two);
284 static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
285 size_t size);
286
287 // Returns true if pc register contains one of the 'special_values' defined
288 // below (bad_lr, end_sim_pc).
289 bool has_bad_pc() const;
290
291 // EABI variant for double arguments in use.
292 bool use_eabi_hardfloat() {
293#if USE_EABI_HARDFLOAT
294 return true;
295#else
296 return false;
297#endif
298 }
299
300 // Manage instruction tracing.
301 bool InstructionTracingEnabled();
302
303 void ToggleInstructionTracing();
304
305 private:
306 enum special_values {
307 // Known bad pc value to ensure that the simulator does not execute
308 // without being properly setup.
309 bad_lr = -1,
310 // A pc value used to signal the simulator to stop execution. Generally
311 // the lr is set to this value on transition from native C code to
312 // simulated execution, so that the simulator can "return" to the native
313 // C code.
314 end_sim_pc = -2
315 };
316
317 V8_EXPORT_PRIVATE intptr_t CallImpl(Address entry, int argument_count,
318 const intptr_t* arguments);
319 intptr_t CallFPImpl(Address entry, double d0, double d1);
320
321 // Unsupported instructions use Format to print an error and stop execution.
322 void Format(Instruction* instr, const char* format);
323
324 // Checks if the current instruction should be executed based on its
325 // condition bits.
326 inline bool ConditionallyExecute(Instruction* instr);
327
328 // Helper functions to set the conditional flags in the architecture state.
329 void SetNZFlags(int32_t val);
330 void SetCFlag(bool val);
331 void SetVFlag(bool val);
332 bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
333 bool BorrowFrom(int32_t left, int32_t right, int32_t carry = 1);
334 bool OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
335 bool addition);
336
337 inline int GetCarry() { return c_flag_ ? 1 : 0; }
338
339 // Support for VFP.
340 void Compute_FPSCR_Flags(float val1, float val2);
341 void Compute_FPSCR_Flags(double val1, double val2);
342 void Copy_FPSCR_to_APSR();
343 inline float canonicalizeNaN(float value);
344 inline double canonicalizeNaN(double value);
345 inline Float32 canonicalizeNaN(Float32 value);
346 inline Float64 canonicalizeNaN(Float64 value);
347
348 // Helper functions to decode common "addressing" modes
349 int32_t GetShiftRm(Instruction* instr, bool* carry_out);
350 int32_t GetImm(Instruction* instr, bool* carry_out);
351 int32_t ProcessPU(Instruction* instr, int num_regs, int operand_size,
352 intptr_t* start_address, intptr_t* end_address);
353 void HandleRList(Instruction* instr, bool load);
354 void HandleVList(Instruction* inst);
355 void SoftwareInterrupt(Instruction* instr);
356 void DebugAtNextPC();
357
358 // Take a copy of v8 simulator tracing flag because flags are frozen after
359 // start.
360 bool instruction_tracing_ = v8_flags.trace_sim;
361
362 // Helper to write back values to register.
363 void AdvancedSIMDElementOrStructureLoadStoreWriteback(int Rn, int Rm,
364 int ebytes);
365
366 // Stop helper functions.
367 inline bool isWatchedStop(uint32_t bkpt_code);
368 inline bool isEnabledStop(uint32_t bkpt_code);
369 inline void EnableStop(uint32_t bkpt_code);
370 inline void DisableStop(uint32_t bkpt_code);
371 inline void IncreaseStopCounter(uint32_t bkpt_code);
372 void PrintStopInfo(uint32_t code);
373
374 // Read and write memory.
375 // The *Ex functions are exclusive access. The writes return the strex status:
376 // 0 if the write succeeds, and 1 if the write fails.
377 inline uint8_t ReadBU(int32_t addr);
378 inline int8_t ReadB(int32_t addr);
379 uint8_t ReadExBU(int32_t addr);
380 inline void WriteB(int32_t addr, uint8_t value);
381 inline void WriteB(int32_t addr, int8_t value);
382 int WriteExB(int32_t addr, uint8_t value);
383
384 inline uint16_t ReadHU(int32_t addr);
385 inline int16_t ReadH(int32_t addr);
386 uint16_t ReadExHU(int32_t addr);
387 // Note: Overloaded on the sign of the value.
388 inline void WriteH(int32_t addr, uint16_t value);
389 inline void WriteH(int32_t addr, int16_t value);
390 int WriteExH(int32_t addr, uint16_t value);
391
392 inline int ReadW(int32_t addr);
393 int ReadExW(int32_t addr);
394 inline void WriteW(int32_t addr, int value);
395 int WriteExW(int32_t addr, int value);
396
397 int32_t* ReadDW(int32_t addr);
398 void WriteDW(int32_t addr, int32_t value1, int32_t value2);
399 int32_t* ReadExDW(int32_t addr);
400 int WriteExDW(int32_t addr, int32_t value1, int32_t value2);
401
402 // Executing is handled based on the instruction type.
403 // Both type 0 and type 1 rolled into one.
404 void DecodeType01(Instruction* instr);
405 void DecodeType2(Instruction* instr);
406 void DecodeType3(Instruction* instr);
407 void DecodeType4(Instruction* instr);
408 void DecodeType5(Instruction* instr);
409 void DecodeType6(Instruction* instr);
410 void DecodeType7(Instruction* instr);
411
412 // CP15 coprocessor instructions.
413 void DecodeTypeCP15(Instruction* instr);
414
415 // Support for VFP.
416 void DecodeTypeVFP(Instruction* instr);
417 void DecodeType6CoprocessorIns(Instruction* instr);
418 void DecodeSpecialCondition(Instruction* instr);
419
420 void DecodeFloatingPointDataProcessing(Instruction* instr);
421 void DecodeUnconditional(Instruction* instr);
422 void DecodeAdvancedSIMDDataProcessing(Instruction* instr);
423 void DecodeMemoryHintsAndBarriers(Instruction* instr);
424 void DecodeAdvancedSIMDElementOrStructureLoadStore(Instruction* instr);
425 void DecodeAdvancedSIMDLoadStoreMultipleStructures(Instruction* instr);
426 void DecodeAdvancedSIMDLoadSingleStructureToAllLanes(Instruction* instr);
427 void DecodeAdvancedSIMDLoadStoreSingleStructureToOneLane(Instruction* instr);
428 void DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr);
429
430 void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
431 void DecodeVCMP(Instruction* instr);
432 void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
433 int32_t ConvertDoubleToInt(double val, bool unsigned_integer,
434 VFPRoundingMode mode);
435 void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
436
437 // Executes one instruction.
438 void InstructionDecode(Instruction* instr);
439
440 // ICache.
441 static void CheckICache(base::CustomMatcherHashMap* i_cache,
442 Instruction* instr);
443 static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
444 int size);
445 static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
446 void* page);
447
448 // Handle arguments and return value for runtime FP functions.
449 void GetFpArgs(double* x, double* y, int32_t* z);
450 void SetFpResult(const double& result);
451 void TrashCallerSaveRegisters();
452
453 template <class ReturnType, int register_size>
454 ReturnType GetFromVFPRegister(int reg_index);
455
456 template <class InputType, int register_size>
457 void SetVFPRegister(int reg_index, const InputType& value);
458
459 void SetSpecialRegister(SRegisterFieldMask reg_and_mask, uint32_t value);
460 uint32_t GetFromSpecialRegister(SRegister reg);
461
462 void CallInternal(Address entry);
463
464 // Architecture state.
465 // Saturating instructions require a Q flag to indicate saturation.
466 // There is currently no way to read the CPSR directly, and thus read the Q
467 // flag, so this is left unimplemented.
469 bool n_flag_;
470 bool z_flag_;
471 bool c_flag_;
472 bool v_flag_;
473
474 // VFP architecture state.
475 unsigned int vfp_registers_[num_d_registers * 2];
476 bool n_flag_FPSCR_;
477 bool z_flag_FPSCR_;
478 bool c_flag_FPSCR_;
479 bool v_flag_FPSCR_;
480
481 // VFP rounding mode. See ARM DDI 0406B Page A2-29.
482 VFPRoundingMode FPSCR_rounding_mode_;
483 bool FPSCR_default_NaN_mode_;
484
485 // VFP FP exception flags architecture state.
486 bool inv_op_vfp_flag_;
487 bool div_zero_vfp_flag_;
488 bool overflow_vfp_flag_;
489 bool underflow_vfp_flag_;
490 bool inexact_vfp_flag_;
491
492 // Simulator support for the stack.
493 uint8_t* stack_;
494 static const size_t kAllocatedStackSize = 1 * MB;
495 // We leave a small buffer below the usable stack to protect against potential
496 // stack underflows.
497 static const int kStackMargin = 64;
498 // Added in Simulator::StackLimit()
499 static const int kAdditionalStackMargin = 4 * KB;
500 static const size_t kUsableStackSize = kAllocatedStackSize - kStackMargin;
501 bool pc_modified_;
502 int icount_;
503
504 // Debugger input.
505 ArrayUniquePtr<char> last_debugger_input_;
506
507 // Registered breakpoints.
508 Instruction* break_pc_;
509 Instr break_instr_;
510
512
513 // A stop is watched if its code is less than kNumOfWatchedStops.
514 // Only watched stops support enabling/disabling and the counter feature.
515 static const uint32_t kNumOfWatchedStops = 256;
516
517 // Breakpoint is disabled if bit 31 is set.
518 static const uint32_t kStopDisabledBit = 1 << 31;
519
520 // A stop is enabled, meaning the simulator will stop when meeting the
521 // instruction, if bit 31 of watched_stops_[code].count is unset.
522 // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
523 // the breakpoint was hit or gone through.
524 struct StopCountAndDesc {
525 uint32_t count;
526 char* desc;
527 };
528 StopCountAndDesc watched_stops_[kNumOfWatchedStops];
529
530 // Synchronization primitives. See ARM DDI 0406C.b, A2.9.
531 enum class MonitorAccess {
532 Open,
533 Exclusive,
534 };
535
536 enum class TransactionSize {
537 None = 0,
538 Byte = 1,
539 HalfWord = 2,
540 Word = 4,
541 DoubleWord = 8,
542 };
543
544 // The least-significant bits of the address are ignored. The number of bits
545 // is implementation-defined, between 3 and 11. See ARM DDI 0406C.b, A3.4.3.
546 static const int32_t kExclusiveTaggedAddrMask = ~((1 << 11) - 1);
547
548 class LocalMonitor {
549 public:
550 LocalMonitor();
551
552 // These functions manage the state machine for the local monitor, but do
553 // not actually perform loads and stores. NotifyStoreExcl only returns
554 // true if the exclusive store is allowed; the global monitor will still
555 // have to be checked to see whether the memory should be updated.
556 void NotifyLoad(int32_t addr);
557 void NotifyLoadExcl(int32_t addr, TransactionSize size);
558 void NotifyStore(int32_t addr);
559 bool NotifyStoreExcl(int32_t addr, TransactionSize size);
560
561 private:
562 void Clear();
563
564 MonitorAccess access_state_;
565 int32_t tagged_addr_;
566 TransactionSize size_;
567 };
568
569 class GlobalMonitor {
570 public:
571 class SimulatorMutex final {
572 public:
573 explicit SimulatorMutex(GlobalMonitor* global_monitor) {
574 if (!global_monitor->IsSingleThreaded()) {
575 guard.emplace(global_monitor->mutex_);
576 }
577 }
578
579 private:
580 std::optional<base::MutexGuard> guard;
581 };
582
583 class Processor {
584 public:
585 Processor();
586
587 private:
588 friend class GlobalMonitor;
589 // These functions manage the state machine for the global monitor, but do
590 // not actually perform loads and stores.
591 void Clear_Locked();
592 void NotifyLoadExcl_Locked(int32_t addr);
593 void NotifyStore_Locked(int32_t addr, bool is_requesting_processor);
594 bool NotifyStoreExcl_Locked(int32_t addr, bool is_requesting_processor);
595
596 MonitorAccess access_state_;
597 int32_t tagged_addr_;
598 Processor* next_;
599 Processor* prev_;
600 // A strex can fail due to background cache evictions. Rather than
601 // simulating this, we'll just occasionally introduce cases where an
602 // exclusive store fails. This will happen once after every
603 // kMaxFailureCounter exclusive stores.
604 static const int kMaxFailureCounter = 5;
605 int failure_counter_;
606 };
607
608 void NotifyLoadExcl_Locked(int32_t addr, Processor* processor);
609 void NotifyStore_Locked(int32_t addr, Processor* processor);
610 bool NotifyStoreExcl_Locked(int32_t addr, Processor* processor);
611
612 // Called when the simulator is constructed.
613 void PrependProcessor(Processor* processor);
614 // Called when the simulator is destroyed.
615 void RemoveProcessor(Processor* processor);
616
617 static GlobalMonitor* Get();
618
619 private:
620 bool IsSingleThreaded() const { return num_processors_ == 1; }
621
622 // Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
623 GlobalMonitor() = default;
624 friend class base::LeakyObject<GlobalMonitor>;
625
626 Processor* head_ = nullptr;
627 std::atomic<uint32_t> num_processors_ = 0;
628 base::Mutex mutex_;
629 };
630
631 LocalMonitor local_monitor_;
632 GlobalMonitor::Processor global_monitor_processor_;
633 GlobalMonitor* global_monitor_;
634};
635
636} // namespace internal
637} // namespace v8
638
639#endif // defined(USE_SIMULATOR)
640#endif // V8_EXECUTION_ARM_SIMULATOR_ARM_H_
Isolate * isolate_
#define one
uint8_t data_[MAX_STACK_LENGTH]
base::Mutex & mutex_
const int size_
Definition assembler.cc:132
int start
uint32_t count
LineAndColumn current
#define SIZE(Type, type, TYPE, ctype)
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
int32_t offset
Instruction * instr
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int y
int x
constexpr size_t kPageSize
Definition globals.h:42
int int32_t
Definition unicode.cc:40
unsigned short uint16_t
Definition unicode.cc:39
signed short int16_t
Definition unicode.cc:38
uintptr_t Address
Definition memory.h:13
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
FloatWithBits< 32 > Float32
Definition index.h:233
UntaggedUnion< Word32, Word64 > Word
Definition index.h:535
FloatWithBits< 64 > Float64
Definition index.h:234
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register no_reg
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register r11
@ None
Definition v8-object.h:141
base::SmallVector< RegisterT, kStaticCapacity > registers_
Node * prev_
#define V8_EXPORT_PRIVATE
Definition macros.h:460