5#ifndef V8_EXECUTION_S390_SIMULATOR_S390_H_
6#define V8_EXECUTION_S390_SIMULATOR_S390_H_
18#if defined(USE_SIMULATOR)
36 static const int LINE_VALID = 0;
37 static const int LINE_INVALID = 1;
39 static const int kPageShift = 12;
40 static const int kPageSize = 1 << kPageShift;
41 static const int kPageMask =
kPageSize - 1;
42 static const int kLineShift = 2;
43 static const int kLineLength = 1 << kLineShift;
44 static const int kLineMask = kLineLength - 1;
46 CachePage() { memset(&validity_map_, LINE_INVALID,
sizeof(validity_map_)); }
48 char* ValidityByte(
int offset) {
49 return &validity_map_[
offset >> kLineShift];
56 static const int kValidityMapSize =
kPageSize >> kLineShift;
57 char validity_map_[kValidityMapSize];
61static T ComputeRounding(T a,
int mode) {
66 return std::nearbyint(a);
79class Simulator :
public SimulatorBase {
81 friend class S390Debugger;
125 explicit Simulator(Isolate* isolate);
133 void set_register(
int reg, uint64_t value);
134 const uint64_t& get_register(
int reg)
const;
135 uint64_t& get_register(
int reg);
136 template <
typename T>
137 T get_low_register(
int reg)
const;
138 template <
typename T>
139 T get_high_register(
int reg)
const;
140 void set_low_register(
int reg, uint32_t value);
141 void set_high_register(
int reg, uint32_t value);
143 double get_double_from_register_pair(
int reg);
150 T get_fpr(
int dreg) {
151 DCHECK(dreg >= 0 && dreg < kNumFPRs);
152 return get_simd_register_by_lane<T>(dreg, 0);
156 void set_fpr(
int dreg,
const T val) {
157 DCHECK(dreg >= 0 && dreg < kNumFPRs);
158 set_simd_register_by_lane<T>(dreg, 0, val);
162 void set_pc(intptr_t value);
163 intptr_t get_pc()
const;
165 Address get_sp()
const {
return static_cast<Address>(get_register(sp)); }
169 uintptr_t StackLimit(uintptr_t c_limit)
const;
171 uintptr_t StackBase()
const;
175 base::Vector<uint8_t> GetCentralStackView()
const;
176 static constexpr int JSStackLimitMargin() {
return kStackProtectionSize; }
183 template <
typename Return,
typename... Args>
184 Return Call(Address entry, Args...
args) {
185 return VariadicCall<Return>(
this, &Simulator::CallImpl, entry,
args...);
189 void CallFP(Address entry,
double d0,
double d1);
190 int32_t CallFPReturnsInt(Address entry,
double d0,
double d1);
191 double CallFPReturnsDouble(Address entry,
double d0,
double d1);
200 void set_last_debugger_input(
char* input);
201 char* last_debugger_input() {
return last_debugger_input_; }
204 static void SetRedirectInstruction(Instruction* instruction);
207 static bool ICacheMatch(
void*
one,
void* two);
208 static void FlushICache(base::CustomMatcherHashMap* i_cache,
void*
start,
213 bool has_bad_pc()
const;
216 bool InstructionTracingEnabled();
218 void ToggleInstructionTracing();
220 enum special_values {
231 intptr_t CallImpl(Address entry,
int argument_count,
232 const intptr_t* arguments);
235 void Format(Instruction*
instr,
const char* format);
238 bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
239 bool BorrowFrom(int32_t left, int32_t right);
240 template <
typename T1>
241 inline bool OverflowFromSigned(
T1 alu_out,
T1 left,
T1 right,
bool addition);
244 int32_t GetShiftRm(Instruction*
instr,
bool* carry_out);
246 void ProcessPUW(Instruction*
instr,
int num_regs,
int operand_size,
247 intptr_t* start_address, intptr_t* end_address);
248 void HandleRList(Instruction*
instr,
bool load);
249 void HandleVList(Instruction* inst);
250 void SoftwareInterrupt(Instruction*
instr);
251 void DebugAtNextPC();
255 bool instruction_tracing_ =
v8_flags.trace_sim;
258 inline bool isStopInstruction(Instruction*
instr);
259 inline bool isWatchedStop(uint32_t bkpt_code);
260 inline bool isEnabledStop(uint32_t bkpt_code);
261 inline void EnableStop(uint32_t bkpt_code);
262 inline void DisableStop(uint32_t bkpt_code);
263 inline void IncreaseStopCounter(uint32_t bkpt_code);
264 void PrintStopInfo(uint32_t code);
267 inline uint8_t ReadBU(intptr_t addr);
268 inline int8_t ReadB(intptr_t addr);
269 inline void WriteB(intptr_t addr, uint8_t value);
270 inline void WriteB(intptr_t addr, int8_t value);
272 inline uint16_t ReadHU(intptr_t addr);
273 inline int16_t ReadH(intptr_t addr);
275 inline void WriteH(intptr_t addr, uint16_t value);
276 inline void WriteH(intptr_t addr, int16_t value);
278 inline uint32_t ReadWU(intptr_t addr);
279 inline int32_t ReadW(intptr_t addr);
280 inline int64_t ReadW64(intptr_t addr);
281 inline void WriteW(intptr_t addr, uint32_t value);
282 inline void WriteW(intptr_t addr, int32_t value);
284 inline int64_t ReadDW(intptr_t addr);
285 inline double ReadDouble(intptr_t addr);
286 inline float ReadFloat(intptr_t addr);
287 inline void WriteDW(intptr_t addr, int64_t value);
290 void Trace(Instruction*
instr);
292 template <
typename T>
293 void SetS390ConditionCode(T lhs, T rhs) {
296 condition_reg_ |=
CC_EQ;
297 }
else if (lhs < rhs) {
298 condition_reg_ |=
CC_LT;
299 }
else if (lhs > rhs) {
300 condition_reg_ |=
CC_GT;
306 if (condition_reg_ == 0) condition_reg_ =
unordered;
310 template <
typename T>
311 void SetS390ConditionCodeCarry(T
result,
bool overflow) {
313 bool zero_result = (
result ==
static_cast<T
>(0));
314 if (zero_result && !overflow) {
316 }
else if (!zero_result && !overflow) {
318 }
else if (zero_result && overflow) {
320 }
else if (!zero_result && overflow) {
326 bool isNaN(
double value) {
return (value != value); }
332 template <
typename T>
333 void SetS390BitWiseConditionCode(T value) {
337 condition_reg_ |=
CC_EQ;
339 condition_reg_ |=
CC_LT;
342 void SetS390OverflowCode(
bool isOF) {
343 if (isOF) condition_reg_ =
CC_OF;
346 bool TestConditionCode(Condition
mask) {
348 if (
mask == 0xf)
return true;
350 return (condition_reg_ &
mask) != 0;
354 void ExecuteInstruction(Instruction*
instr,
bool auto_incr_pc =
true);
357 static void CheckICache(base::CustomMatcherHashMap* i_cache,
359 static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t
start,
361 static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
365 void GetFpArgs(
double*
x,
double*
y, intptr_t*
z);
366 void SetFpResult(
const double&
result);
367 void TrashCallerSaveRegisters();
369 void CallInternal(Address entry,
int reg_arg_count = 3);
387 fpr_t fp_registers_[kNumFPRs];
389 static constexpr fpr_t fp_zero = {{0}};
391 fpr_t get_simd_register(
int reg) {
return fp_registers_[
reg]; }
393 void set_simd_register(
int reg,
const fpr_t& value) {
413 T get_simd_register_by_lane(
int reg,
int lane,
414 bool force_ibm_lane_numbering =
true) {
415 if (force_ibm_lane_numbering) {
418 CHECK_LE(lane, kSimd128Size /
sizeof(T));
422 return (
reinterpret_cast<T*
>(&fp_registers_[
reg]))[lane];
426 void set_simd_register_by_lane(
int reg,
int lane,
const T& value,
427 bool force_ibm_lane_numbering =
true) {
428 if (force_ibm_lane_numbering) {
431 CHECK_LE(lane, kSimd128Size /
sizeof(T));
435 (
reinterpret_cast<T*
>(&fp_registers_[
reg]))[lane] = value;
441 intptr_t special_reg_pc_;
447 static size_t AllocatedStackSize() {
448 size_t stack_size =
v8_flags.sim_stack_size *
KB;
449 return stack_size + (2 * kStackProtectionSize);
451 static size_t UsableStackSize() {
452 return AllocatedStackSize() - kStackProtectionSize;
458 char* last_debugger_input_;
461 Instruction* break_pc_;
468 static const uint32_t kNumOfWatchedStops = 256;
471 static const uint32_t kStopDisabledBit = 1 << 31;
477 struct StopCountAndDesc {
481 StopCountAndDesc watched_stops_[kNumOfWatchedStops];
484 int DecodeInstructionOriginal(Instruction*
instr);
485 int DecodeInstruction(Instruction*
instr);
486 int Evaluate_Unknown(Instruction*
instr);
487#define MAX_NUM_OPCODES (1 << 16)
488 using EvaluateFuncType = int (Simulator::*)(Instruction*);
490 static EvaluateFuncType EvalTable[MAX_NUM_OPCODES];
491 static void EvalTableInit();
493#define EVALUATE(name) int Evaluate_##name(Instruction* instr)
494#define EVALUATE_VR_INSTRUCTIONS(name, op_name, op_value) EVALUATE(op_name);
506#undef EVALUATE_VR_INSTRUCTIONS
uint8_t data_[MAX_STACK_LENGTH]
#define S390_VRR_E_OPCODE_LIST(V)
#define S390_VRI_A_OPCODE_LIST(V)
#define S390_VRI_C_OPCODE_LIST(V)
#define S390_VRR_C_OPCODE_LIST(V)
#define S390_VRS_A_OPCODE_LIST(V)
#define S390_VRR_A_OPCODE_LIST(V)
#define S390_VRS_B_OPCODE_LIST(V)
#define S390_VRR_F_OPCODE_LIST(V)
#define S390_VRR_B_OPCODE_LIST(V)
#define S390_VRS_C_OPCODE_LIST(V)
#define S390_VRX_OPCODE_LIST(V)
base::Vector< const DirectHandle< Object > > args
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and X(inclusive) percent " "of the regular marking start limit") DEFINE_INT(stress_scavenge
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
ZoneVector< RpoNumber > & result
constexpr size_t kPageSize
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register no_reg
constexpr int kSimd128Size
@ ROUND_TO_NEAREST_TO_EVEN
@ ROUND_TO_NEAREST_AWAY_FROM_0
constexpr int kSystemPointerSize
V8_EXPORT_PRIVATE FlagValues v8_flags
base::SmallVector< RegisterT, kStaticCapacity > registers_
#define CHECK_GE(lhs, rhs)
#define CHECK_LT(lhs, rhs)
#define CHECK_LE(lhs, rhs)
#define DCHECK(condition)
#define V8_EXPORT_PRIVATE
#define T1(name, string, precedence)
std::unique_ptr< ValueMirror > value