v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
simulator-ppc.h
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_EXECUTION_PPC_SIMULATOR_PPC_H_
6#define V8_EXECUTION_PPC_SIMULATOR_PPC_H_
7
8// Declares a Simulator for PPC instructions if we are not generating a native
9// PPC binary. This Simulator allows us to run and debug PPC code generation on
10// regular desktop machines.
11// V8 calls into generated code via the GeneratedCode wrapper,
12// which will start execution in the Simulator or forwards to the real entry
13// on a PPC HW platform.
14
15// globals.h defines USE_SIMULATOR.
16#include "src/common/globals.h"
17
18#if defined(USE_SIMULATOR)
19// Running with a simulator.
20
21#include "src/base/hashmap.h"
28
29namespace heap::base {
30class StackVisitor;
31}
32
33namespace v8 {
34namespace internal {
35
36class CachePage {
37 public:
38 static const int LINE_VALID = 0;
39 static const int LINE_INVALID = 1;
40
41 static const int kPageShift = 12;
42 static const int kPageSize = 1 << kPageShift;
43 static const int kPageMask = kPageSize - 1;
44 static const int kLineShift = 2; // The cache line is only 4 bytes right now.
45 static const int kLineLength = 1 << kLineShift;
46 static const int kLineMask = kLineLength - 1;
47
48 CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
49
50 char* ValidityByte(int offset) {
51 return &validity_map_[offset >> kLineShift];
52 }
53
54 char* CachedData(int offset) { return &data_[offset]; }
55
56 private:
57 char data_[kPageSize]; // The cached data.
58 static const int kValidityMapSize = kPageSize >> kLineShift;
59 char validity_map_[kValidityMapSize]; // One byte per line.
60};
61
62class Simulator : public SimulatorBase {
63 public:
64 friend class PPCDebugger;
65 enum Register {
66 no_reg = -1,
67 r0 = 0,
68 sp,
69 r2,
70 r3,
71 r4,
72 r5,
73 r6,
74 r7,
75 r8,
76 r9,
77 r10,
78 r11,
79 r12,
80 r13,
81 r14,
82 r15,
83 r16,
84 r17,
85 r18,
86 r19,
87 r20,
88 r21,
89 r22,
90 r23,
91 r24,
92 r25,
93 r26,
94 r27,
95 r28,
96 r29,
97 r30,
98 fp,
99 kNumGPRs = 32,
100 d0 = 0,
101 d1,
102 d2,
103 d3,
104 d4,
105 d5,
106 d6,
107 d7,
108 d8,
109 d9,
110 d10,
111 d11,
112 d12,
113 d13,
114 d14,
115 d15,
116 d16,
117 d17,
118 d18,
119 d19,
120 d20,
121 d21,
122 d22,
123 d23,
124 d24,
125 d25,
126 d26,
127 d27,
128 d28,
129 d29,
130 d30,
131 d31,
132 kNumFPRs = 32,
133 // PPC Simd registers are a serapre set from Floating Point registers. Refer
134 // to register-ppc.h for more details.
135 v0 = 0,
136 v1,
137 v2,
138 v3,
139 v4,
140 v5,
141 v6,
142 v7,
143 v8,
144 v9,
145 v10,
146 v11,
147 v12,
148 v13,
149 v14,
150 v15,
151 v16,
152 v17,
153 v18,
154 v19,
155 v20,
156 v21,
157 v22,
158 v23,
159 v24,
160 v25,
161 v26,
162 v27,
163 v28,
164 v29,
165 v30,
166 v31,
167 kNumSIMDRs = 32
168 };
169
170 explicit Simulator(Isolate* isolate);
171 ~Simulator();
172
173 // The currently executing Simulator instance. Potentially there can be one
174 // for each native thread.
175 static Simulator* current(v8::internal::Isolate* isolate);
176
177 // Accessors for register state.
178 void set_register(int reg, intptr_t value);
179 intptr_t get_register(int reg) const;
180 double get_double_from_register_pair(int reg);
181 void set_d_register_from_double(int dreg, const double dbl) {
182 DCHECK(dreg >= 0 && dreg < kNumFPRs);
183 fp_registers_[dreg] = base::bit_cast<int64_t>(dbl);
184 }
185 double get_double_from_d_register(int dreg) {
186 DCHECK(dreg >= 0 && dreg < kNumFPRs);
187 return base::bit_cast<double>(fp_registers_[dreg]);
188 }
189 void set_d_register(int dreg, int64_t value) {
190 DCHECK(dreg >= 0 && dreg < kNumFPRs);
191 fp_registers_[dreg] = value;
192 }
193 int64_t get_d_register(int dreg) {
194 DCHECK(dreg >= 0 && dreg < kNumFPRs);
195 return fp_registers_[dreg];
196 }
197
198 // Special case of set_register and get_register to access the raw PC value.
199 void set_pc(intptr_t value);
200 intptr_t get_pc() const;
201
202 Address get_sp() const { return static_cast<Address>(get_register(sp)); }
203
204 // Accessor to the internal Link Register
205 intptr_t get_lr() const;
206
207 // Accessor to the internal simulator stack area. Adds a safety
208 // margin to prevent overflows.
209 uintptr_t StackLimit(uintptr_t c_limit) const;
210
211 uintptr_t StackBase() const;
212
213 // Return central stack view, without additional safety margins.
214 // Users, for example wasm::StackMemory, can add their own.
215 base::Vector<uint8_t> GetCentralStackView() const;
216 static constexpr int JSStackLimitMargin() { return kStackProtectionSize; }
217
218 void IterateRegistersAndStack(::heap::base::StackVisitor* visitor);
219
220 // Executes PPC instructions until the PC reaches end_sim_pc.
221 void Execute();
222
223 template <typename Return, typename... Args>
224 Return Call(Address entry, Args... args) {
225 return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
226 }
227
228 // Alternative: call a 2-argument double function.
229 void CallFP(Address entry, double d0, double d1);
230 int32_t CallFPReturnsInt(Address entry, double d0, double d1);
231 double CallFPReturnsDouble(Address entry, double d0, double d1);
232
233 // Push an address onto the JS stack.
234 V8_EXPORT_PRIVATE uintptr_t PushAddress(uintptr_t address);
235
236 // Pop an address from the JS stack.
237 V8_EXPORT_PRIVATE uintptr_t PopAddress();
238
239 // Debugger input.
240 void set_last_debugger_input(char* input);
241 char* last_debugger_input() { return last_debugger_input_; }
242
243 // Redirection support.
244 static void SetRedirectInstruction(Instruction* instruction);
245
246 // ICache checking.
247 static bool ICacheMatch(void* one, void* two);
248 static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
249 size_t size);
250
251 // Returns true if pc register contains one of the 'special_values' defined
252 // below (bad_lr, end_sim_pc).
253 bool has_bad_pc() const;
254
255 // Manage instruction tracing.
256 bool InstructionTracingEnabled();
257
258 void ToggleInstructionTracing();
259
260 enum special_values {
261 // Known bad pc value to ensure that the simulator does not execute
262 // without being properly setup.
263 bad_lr = -1,
264 // A pc value used to signal the simulator to stop execution. Generally
265 // the lr is set to this value on transition from native C code to
266 // simulated execution, so that the simulator can "return" to the native
267 // C code.
268 end_sim_pc = -2
269 };
270
271 intptr_t CallImpl(Address entry, int argument_count,
272 const intptr_t* arguments);
273
274 enum BCType { BC_OFFSET, BC_LINK_REG, BC_CTR_REG };
275
276 // Unsupported instructions use Format to print an error and stop execution.
277 void Format(Instruction* instr, const char* format);
278
279 // Helper functions to set the conditional flags in the architecture state.
280 bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
281 bool BorrowFrom(int32_t left, int32_t right);
282 bool OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
283 bool addition);
284
285 // Helper functions to decode common "addressing" modes
286 int32_t GetShiftRm(Instruction* instr, bool* carry_out);
287 int32_t GetImm(Instruction* instr, bool* carry_out);
288 void ProcessPUW(Instruction* instr, int num_regs, int operand_size,
289 intptr_t* start_address, intptr_t* end_address);
290 void HandleRList(Instruction* instr, bool load);
291 void HandleVList(Instruction* inst);
292 void SoftwareInterrupt(Instruction* instr);
293 void DebugAtNextPC();
294
295 // Take a copy of v8 simulator tracing flag because flags are frozen after
296 // start.
297 bool instruction_tracing_ = v8_flags.trace_sim;
298
299 // Stop helper functions.
300 inline bool isStopInstruction(Instruction* instr);
301 inline bool isWatchedStop(uint32_t bkpt_code);
302 inline bool isEnabledStop(uint32_t bkpt_code);
303 inline void EnableStop(uint32_t bkpt_code);
304 inline void DisableStop(uint32_t bkpt_code);
305 inline void IncreaseStopCounter(uint32_t bkpt_code);
306 void PrintStopInfo(uint32_t code);
307
308 // Read and write memory.
309 template <typename T>
310 inline void Read(uintptr_t address, T* value) {
311 base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
312 memcpy(value, reinterpret_cast<const char*>(address), sizeof(T));
313 }
314
315 template <typename T>
316 inline void ReadEx(uintptr_t address, T* value) {
317 base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
318 GlobalMonitor::Get()->NotifyLoadExcl(
319 address, static_cast<TransactionSize>(sizeof(T)),
320 isolate_->thread_id());
321 memcpy(value, reinterpret_cast<const char*>(address), sizeof(T));
322 }
323
324 template <typename T>
325 inline void Write(uintptr_t address, T value) {
326 base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
327 GlobalMonitor::Get()->NotifyStore(address,
328 static_cast<TransactionSize>(sizeof(T)),
329 isolate_->thread_id());
330 memcpy(reinterpret_cast<char*>(address), &value, sizeof(T));
331 }
332
333 template <typename T>
334 inline int32_t WriteEx(uintptr_t address, T value) {
335 base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
336 if (GlobalMonitor::Get()->NotifyStoreExcl(
337 address, static_cast<TransactionSize>(sizeof(T)),
338 isolate_->thread_id())) {
339 memcpy(reinterpret_cast<char*>(address), &value, sizeof(T));
340 return 0;
341 } else {
342 return 1;
343 }
344 }
345
346 // Byte Reverse.
347 static inline __uint128_t __builtin_bswap128(__uint128_t v) {
348 union {
349 uint64_t u64[2];
350 __uint128_t u128;
351 } res, val;
352 val.u128 = v;
353 res.u64[0] = ByteReverse<int64_t>(val.u64[1]);
354 res.u64[1] = ByteReverse<int64_t>(val.u64[0]);
355 return res.u128;
356 }
357
358#define RW_VAR_LIST(V) \
359 V(QWU, unsigned __int128) \
360 V(QW, __int128) \
361 V(DWU, uint64_t) \
362 V(DW, int64_t) \
363 V(WU, uint32_t) \
364 V(W, int32_t) V(HU, uint16_t) V(H, int16_t) V(BU, uint8_t) V(B, int8_t)
365
366#define GENERATE_RW_FUNC(size, type) \
367 inline type Read##size(uintptr_t addr); \
368 inline type ReadEx##size(uintptr_t addr); \
369 inline void Write##size(uintptr_t addr, type value); \
370 inline int32_t WriteEx##size(uintptr_t addr, type value);
371
372 RW_VAR_LIST(GENERATE_RW_FUNC)
373#undef GENERATE_RW_FUNC
374
375 void Trace(Instruction* instr);
376 void SetCR0(intptr_t result, bool setSO = false);
377 void SetCR6(bool true_for_all);
378 void ExecuteBranchConditional(Instruction* instr, BCType type);
379 void ExecuteGeneric(Instruction* instr);
380
381 void SetFPSCR(int bit) { fp_condition_reg_ |= (1 << (31 - bit)); }
382 void ClearFPSCR(int bit) { fp_condition_reg_ &= ~(1 << (31 - bit)); }
383
384 // Executes one instruction.
385 void ExecuteInstruction(Instruction* instr);
386
387 // ICache.
388 static void CheckICache(base::CustomMatcherHashMap* i_cache,
389 Instruction* instr);
390 static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
391 int size);
392 static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
393 void* page);
394
395 // Handle arguments and return value for runtime FP functions.
396 void GetFpArgs(double* x, double* y, intptr_t* z);
397 void SetFpResult(const double& result);
398 void TrashCallerSaveRegisters();
399
400 void CallInternal(Address entry);
401
402 // Architecture state.
403 // Saturating instructions require a Q flag to indicate saturation.
404 // There is currently no way to read the CPSR directly, and thus read the Q
405 // flag, so this is left unimplemented.
406 intptr_t registers_[kNumGPRs];
407 int32_t condition_reg_;
408 int32_t fp_condition_reg_;
409 intptr_t special_reg_lr_;
410 intptr_t special_reg_pc_;
411 intptr_t special_reg_ctr_;
412 int32_t special_reg_xer_;
413
414 int64_t fp_registers_[kNumFPRs];
415
416 // Simd registers.
417 union simdr_t {
418 int8_t int8[16];
419 uint8_t uint8[16];
420 int16_t int16[8];
421 uint16_t uint16[8];
422 int32_t int32[4];
423 uint32_t uint32[4];
424 int64_t int64[2];
425 uint64_t uint64[2];
426 float f32[4];
427 double f64[2];
428 };
429 simdr_t simd_registers_[kNumSIMDRs];
430
431 // Vector register lane numbers on IBM machines are reversed compared to
432 // x64. For example, doing an I32x4 extract_lane with lane number 0 on x64
433 // will be equal to lane number 3 on IBM machines. Vector registers are only
434 // used for compiling Wasm code at the moment. To keep the Wasm
435 // simulation accurate, we need to make sure accessing a lane is correctly
436 // simulated and as such we reverse the lane number on the getters and setters
437 // below. We need to be careful when getting/setting values on the Low or High
438 // side of a simulated register. In the simulation, "Low" is equal to the MSB
439 // and "High" is equal to the LSB in memory. "force_ibm_lane_numbering" could
440 // be used to disabled automatic lane number reversal and help with accessing
441 // the Low or High side of a simulated register.
442 template <class T>
443 T get_simd_register_by_lane(int reg, int lane,
444 bool force_ibm_lane_numbering = true) {
445 if (force_ibm_lane_numbering) {
446 lane = (kSimd128Size / sizeof(T)) - 1 - lane;
447 }
448 CHECK_LE(lane, kSimd128Size / sizeof(T));
449 CHECK_LT(reg, kNumSIMDRs);
450 CHECK_GE(lane, 0);
451 CHECK_GE(reg, 0);
452 return (reinterpret_cast<T*>(&simd_registers_[reg]))[lane];
453 }
454
455 template <class T>
456 T get_simd_register_bytes(int reg, int byte_from) {
457 // Byte location is reversed in memory.
458 int from = kSimd128Size - 1 - (byte_from + sizeof(T) - 1);
459 void* src = reinterpret_cast<uint8_t*>(&simd_registers_[reg]) + from;
460 T dst;
461 memcpy(&dst, src, sizeof(T));
462 return dst;
463 }
464
465 template <class T>
466 void set_simd_register_by_lane(int reg, int lane, const T& value,
467 bool force_ibm_lane_numbering = true) {
468 if (force_ibm_lane_numbering) {
469 lane = (kSimd128Size / sizeof(T)) - 1 - lane;
470 }
471 CHECK_LE(lane, kSimd128Size / sizeof(T));
472 CHECK_LT(reg, kNumSIMDRs);
473 CHECK_GE(lane, 0);
474 CHECK_GE(reg, 0);
475 (reinterpret_cast<T*>(&simd_registers_[reg]))[lane] = value;
476 }
477
478 template <class T>
479 void set_simd_register_bytes(int reg, int byte_from, T value) {
480 // Byte location is reversed in memory.
481 int from = kSimd128Size - 1 - (byte_from + sizeof(T) - 1);
482 void* dst = reinterpret_cast<uint8_t*>(&simd_registers_[reg]) + from;
483 memcpy(dst, &value, sizeof(T));
484 }
485
486 simdr_t& get_simd_register(int reg) { return simd_registers_[reg]; }
487
488 void set_simd_register(int reg, const simdr_t& value) {
489 simd_registers_[reg] = value;
490 }
491
492 // Simulator support for the stack.
493 uint8_t* stack_;
494 static const size_t kStackProtectionSize = 256 * kSystemPointerSize;
495 // This includes a protection margin at each end of the stack area.
496 static size_t AllocatedStackSize() {
497 size_t stack_size = v8_flags.sim_stack_size * KB;
498 return stack_size + (2 * kStackProtectionSize);
499 }
500 static size_t UsableStackSize() {
501 return AllocatedStackSize() - kStackProtectionSize;
502 }
503 bool pc_modified_;
504 int icount_;
505
506 // Debugger input.
507 char* last_debugger_input_;
508
509 // Registered breakpoints.
510 Instruction* break_pc_;
511 Instr break_instr_;
512
514
515 // A stop is watched if its code is less than kNumOfWatchedStops.
516 // Only watched stops support enabling/disabling and the counter feature.
517 static const uint32_t kNumOfWatchedStops = 256;
518
519 // Breakpoint is disabled if bit 31 is set.
520 static const uint32_t kStopDisabledBit = 1 << 31;
521
522 // A stop is enabled, meaning the simulator will stop when meeting the
523 // instruction, if bit 31 of watched_stops_[code].count is unset.
524 // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
525 // the breakpoint was hit or gone through.
526 struct StopCountAndDesc {
527 uint32_t count;
528 char* desc;
529 };
530 StopCountAndDesc watched_stops_[kNumOfWatchedStops];
531
532 // Synchronization primitives. See ARM DDI 0406C.b, A2.9.
533 enum class MonitorAccess {
534 Open,
535 Exclusive,
536 };
537
538 enum class TransactionSize {
539 None = 0,
540 Byte = 1,
541 HalfWord = 2,
542 Word = 4,
543 DWord = 8,
544 };
545
546 class GlobalMonitor {
547 public:
548 // Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
549 base::Mutex mutex;
550
551 void NotifyLoadExcl(uintptr_t addr, TransactionSize size,
552 ThreadId thread_id);
553 void NotifyStore(uintptr_t addr, TransactionSize size, ThreadId thread_id);
554 bool NotifyStoreExcl(uintptr_t addr, TransactionSize size,
555 ThreadId thread_id);
556
557 static GlobalMonitor* Get();
558
559 private:
560 // Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
561 GlobalMonitor() = default;
562 friend class base::LeakyObject<GlobalMonitor>;
563
564 void Clear();
565
566 MonitorAccess access_state_ = MonitorAccess::Open;
567 uintptr_t tagged_addr_ = 0;
568 TransactionSize size_ = TransactionSize::None;
569 ThreadId thread_id_ = ThreadId::Invalid();
570 };
571};
572
573} // namespace internal
574} // namespace v8
575
576#endif // defined(USE_SIMULATOR)
577#endif // V8_EXECUTION_PPC_SIMULATOR_PPC_H_
Isolate * isolate_
#define T
#define one
uint8_t data_[MAX_STACK_LENGTH]
const int size_
Definition assembler.cc:132
int start
uint32_t count
LineAndColumn current
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
int32_t offset
Instruction * instr
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int y
int x
base::Mutex mutex
constexpr size_t kPageSize
Definition globals.h:42
int int32_t
Definition unicode.cc:40
unsigned short uint16_t
Definition unicode.cc:39
signed short int16_t
Definition unicode.cc:38
uintptr_t Address
Definition memory.h:13
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
UntaggedUnion< Word32, Word64 > Word
Definition index.h:535
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register no_reg
constexpr int kSimd128Size
Definition globals.h:706
constexpr int kSystemPointerSize
Definition globals.h:410
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register r11
@ None
Definition v8-object.h:141
base::SmallVector< RegisterT, kStaticCapacity > registers_
#define CHECK_GE(lhs, rhs)
#define CHECK_LT(lhs, rhs)
#define CHECK_LE(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define V8_EXPORT_PRIVATE
Definition macros.h:460
std::unique_ptr< ValueMirror > value