5#ifndef V8_EXECUTION_PPC_SIMULATOR_PPC_H_
6#define V8_EXECUTION_PPC_SIMULATOR_PPC_H_
18#if defined(USE_SIMULATOR)
38 static const int LINE_VALID = 0;
39 static const int LINE_INVALID = 1;
41 static const int kPageShift = 12;
42 static const int kPageSize = 1 << kPageShift;
43 static const int kPageMask =
kPageSize - 1;
44 static const int kLineShift = 2;
45 static const int kLineLength = 1 << kLineShift;
46 static const int kLineMask = kLineLength - 1;
48 CachePage() { memset(&validity_map_, LINE_INVALID,
sizeof(validity_map_)); }
50 char* ValidityByte(
int offset) {
51 return &validity_map_[
offset >> kLineShift];
58 static const int kValidityMapSize =
kPageSize >> kLineShift;
59 char validity_map_[kValidityMapSize];
62class Simulator :
public SimulatorBase {
64 friend class PPCDebugger;
170 explicit Simulator(Isolate* isolate);
178 void set_register(
int reg, intptr_t value);
179 intptr_t get_register(
int reg)
const;
180 double get_double_from_register_pair(
int reg);
181 void set_d_register_from_double(
int dreg,
const double dbl) {
182 DCHECK(dreg >= 0 && dreg < kNumFPRs);
183 fp_registers_[dreg] = base::bit_cast<int64_t>(dbl);
185 double get_double_from_d_register(
int dreg) {
186 DCHECK(dreg >= 0 && dreg < kNumFPRs);
187 return base::bit_cast<double>(fp_registers_[dreg]);
189 void set_d_register(
int dreg, int64_t value) {
190 DCHECK(dreg >= 0 && dreg < kNumFPRs);
191 fp_registers_[dreg] =
value;
193 int64_t get_d_register(
int dreg) {
194 DCHECK(dreg >= 0 && dreg < kNumFPRs);
195 return fp_registers_[dreg];
199 void set_pc(intptr_t value);
200 intptr_t get_pc()
const;
202 Address get_sp()
const {
return static_cast<Address>(get_register(sp)); }
205 intptr_t get_lr()
const;
209 uintptr_t StackLimit(uintptr_t c_limit)
const;
211 uintptr_t StackBase()
const;
215 base::Vector<uint8_t> GetCentralStackView()
const;
216 static constexpr int JSStackLimitMargin() {
return kStackProtectionSize; }
223 template <
typename Return,
typename... Args>
224 Return Call(Address entry, Args...
args) {
225 return VariadicCall<Return>(
this, &Simulator::CallImpl, entry,
args...);
229 void CallFP(Address entry,
double d0,
double d1);
230 int32_t CallFPReturnsInt(Address entry,
double d0,
double d1);
231 double CallFPReturnsDouble(Address entry,
double d0,
double d1);
240 void set_last_debugger_input(
char* input);
241 char* last_debugger_input() {
return last_debugger_input_; }
244 static void SetRedirectInstruction(Instruction* instruction);
247 static bool ICacheMatch(
void*
one,
void* two);
248 static void FlushICache(base::CustomMatcherHashMap* i_cache,
void*
start,
253 bool has_bad_pc()
const;
256 bool InstructionTracingEnabled();
258 void ToggleInstructionTracing();
260 enum special_values {
271 intptr_t CallImpl(Address entry,
int argument_count,
272 const intptr_t* arguments);
274 enum BCType { BC_OFFSET, BC_LINK_REG, BC_CTR_REG };
277 void Format(Instruction*
instr,
const char* format);
280 bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
281 bool BorrowFrom(int32_t left, int32_t right);
282 bool OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
286 int32_t GetShiftRm(Instruction*
instr,
bool* carry_out);
288 void ProcessPUW(Instruction*
instr,
int num_regs,
int operand_size,
289 intptr_t* start_address, intptr_t* end_address);
290 void HandleRList(Instruction*
instr,
bool load);
291 void HandleVList(Instruction* inst);
292 void SoftwareInterrupt(Instruction*
instr);
293 void DebugAtNextPC();
297 bool instruction_tracing_ =
v8_flags.trace_sim;
300 inline bool isStopInstruction(Instruction*
instr);
301 inline bool isWatchedStop(uint32_t bkpt_code);
302 inline bool isEnabledStop(uint32_t bkpt_code);
303 inline void EnableStop(uint32_t bkpt_code);
304 inline void DisableStop(uint32_t bkpt_code);
305 inline void IncreaseStopCounter(uint32_t bkpt_code);
306 void PrintStopInfo(uint32_t code);
309 template <
typename T>
310 inline void Read(uintptr_t address, T* value) {
311 base::MutexGuard lock_guard(&GlobalMonitor::Get()->
mutex);
312 memcpy(value,
reinterpret_cast<const char*
>(address),
sizeof(T));
315 template <
typename T>
316 inline void ReadEx(uintptr_t address, T* value) {
317 base::MutexGuard lock_guard(&GlobalMonitor::Get()->
mutex);
318 GlobalMonitor::Get()->NotifyLoadExcl(
319 address,
static_cast<TransactionSize
>(
sizeof(T)),
321 memcpy(value,
reinterpret_cast<const char*
>(address),
sizeof(T));
324 template <
typename T>
325 inline void Write(uintptr_t address, T value) {
326 base::MutexGuard lock_guard(&GlobalMonitor::Get()->
mutex);
327 GlobalMonitor::Get()->NotifyStore(address,
328 static_cast<TransactionSize
>(
sizeof(T)),
330 memcpy(
reinterpret_cast<char*
>(address), &value,
sizeof(T));
333 template <
typename T>
334 inline int32_t WriteEx(uintptr_t address, T value) {
335 base::MutexGuard lock_guard(&GlobalMonitor::Get()->
mutex);
336 if (GlobalMonitor::Get()->NotifyStoreExcl(
337 address,
static_cast<TransactionSize
>(
sizeof(T)),
339 memcpy(
reinterpret_cast<char*
>(address), &value,
sizeof(T));
347 static inline __uint128_t __builtin_bswap128(__uint128_t v) {
353 res.u64[0] = ByteReverse<int64_t>(val.u64[1]);
354 res.u64[1] = ByteReverse<int64_t>(val.u64[0]);
358#define RW_VAR_LIST(V) \
359 V(QWU, unsigned __int128) \
364 V(W, int32_t) V(HU, uint16_t) V(H, int16_t) V(BU, uint8_t) V(B, int8_t)
366#define GENERATE_RW_FUNC(size, type) \
367 inline type Read##size(uintptr_t addr); \
368 inline type ReadEx##size(uintptr_t addr); \
369 inline void Write##size(uintptr_t addr, type value); \
370 inline int32_t WriteEx##size(uintptr_t addr, type value);
372 RW_VAR_LIST(GENERATE_RW_FUNC)
373#undef GENERATE_RW_FUNC
375 void Trace(Instruction*
instr);
376 void SetCR0(intptr_t
result,
bool setSO =
false);
377 void SetCR6(
bool true_for_all);
378 void ExecuteBranchConditional(Instruction*
instr, BCType type);
379 void ExecuteGeneric(Instruction*
instr);
381 void SetFPSCR(
int bit) { fp_condition_reg_ |= (1 << (31 - bit)); }
382 void ClearFPSCR(
int bit) { fp_condition_reg_ &= ~(1 << (31 - bit)); }
385 void ExecuteInstruction(Instruction*
instr);
388 static void CheckICache(base::CustomMatcherHashMap* i_cache,
390 static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t
start,
392 static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
396 void GetFpArgs(
double*
x,
double*
y, intptr_t*
z);
397 void SetFpResult(
const double&
result);
398 void TrashCallerSaveRegisters();
400 void CallInternal(Address entry);
409 intptr_t special_reg_lr_;
410 intptr_t special_reg_pc_;
411 intptr_t special_reg_ctr_;
414 int64_t fp_registers_[kNumFPRs];
429 simdr_t simd_registers_[kNumSIMDRs];
443 T get_simd_register_by_lane(
int reg,
int lane,
444 bool force_ibm_lane_numbering =
true) {
445 if (force_ibm_lane_numbering) {
448 CHECK_LE(lane, kSimd128Size /
sizeof(T));
452 return (
reinterpret_cast<T*
>(&simd_registers_[
reg]))[lane];
456 T get_simd_register_bytes(
int reg,
int byte_from) {
459 void* src =
reinterpret_cast<uint8_t*
>(&simd_registers_[
reg]) + from;
461 memcpy(&dst, src,
sizeof(T));
466 void set_simd_register_by_lane(
int reg,
int lane,
const T& value,
467 bool force_ibm_lane_numbering =
true) {
468 if (force_ibm_lane_numbering) {
471 CHECK_LE(lane, kSimd128Size /
sizeof(T));
475 (
reinterpret_cast<T*
>(&simd_registers_[
reg]))[lane] = value;
479 void set_simd_register_bytes(
int reg,
int byte_from, T value) {
482 void* dst =
reinterpret_cast<uint8_t*
>(&simd_registers_[
reg]) + from;
483 memcpy(dst, &value,
sizeof(T));
486 simdr_t& get_simd_register(
int reg) {
return simd_registers_[
reg]; }
488 void set_simd_register(
int reg,
const simdr_t& value) {
496 static size_t AllocatedStackSize() {
497 size_t stack_size =
v8_flags.sim_stack_size *
KB;
498 return stack_size + (2 * kStackProtectionSize);
500 static size_t UsableStackSize() {
501 return AllocatedStackSize() - kStackProtectionSize;
507 char* last_debugger_input_;
510 Instruction* break_pc_;
517 static const uint32_t kNumOfWatchedStops = 256;
520 static const uint32_t kStopDisabledBit = 1 << 31;
526 struct StopCountAndDesc {
530 StopCountAndDesc watched_stops_[kNumOfWatchedStops];
533 enum class MonitorAccess {
538 enum class TransactionSize {
546 class GlobalMonitor {
551 void NotifyLoadExcl(uintptr_t addr, TransactionSize size,
553 void NotifyStore(uintptr_t addr, TransactionSize size, ThreadId thread_id);
554 bool NotifyStoreExcl(uintptr_t addr, TransactionSize size,
557 static GlobalMonitor*
Get();
561 GlobalMonitor() =
default;
562 friend class base::LeakyObject<GlobalMonitor>;
566 MonitorAccess access_state_ = MonitorAccess::Open;
567 uintptr_t tagged_addr_ = 0;
568 TransactionSize
size_ = TransactionSize::None;
569 ThreadId thread_id_ = ThreadId::Invalid();
uint8_t data_[MAX_STACK_LENGTH]
base::Vector< const DirectHandle< Object > > args
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
ZoneVector< RpoNumber > & result
constexpr size_t kPageSize
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
UntaggedUnion< Word32, Word64 > Word
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register no_reg
constexpr int kSimd128Size
constexpr int kSystemPointerSize
V8_EXPORT_PRIVATE FlagValues v8_flags
base::SmallVector< RegisterT, kStaticCapacity > registers_
#define CHECK_GE(lhs, rhs)
#define CHECK_LT(lhs, rhs)
#define CHECK_LE(lhs, rhs)
#define DCHECK(condition)
#define V8_EXPORT_PRIVATE
std::unique_ptr< ValueMirror > value