v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
simulator-arm64.cc
Go to the documentation of this file.
1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
8
9#if defined(USE_SIMULATOR)
10
11#include <stdlib.h>
12
13#include <cmath>
14#include <cstdarg>
15#include <type_traits>
16
25#include "src/heap/base/stack.h"
30#include "src/utils/ostreams.h"
31
32#if V8_OS_WIN
33#include <windows.h>
34#endif
35
36#if V8_ENABLE_WEBASSEMBLY
38#endif // V8_ENABLE_WEBASSEMBLY
39
40namespace v8 {
41namespace internal {
42
43// This macro provides a platform independent use of sscanf. The reason for
44// SScanF not being implemented in a platform independent way through
45// ::v8::internal::OS in the same way as SNPrintF is that the
46// Windows C Run-Time Library does not provide vsscanf.
47#define SScanF sscanf
48
49// Helpers for colors.
50#define COLOUR(colour_code) "\033[0;" colour_code "m"
51#define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m"
52#define NORMAL ""
53#define GREY "30"
54#define RED "31"
55#define GREEN "32"
56#define YELLOW "33"
57#define BLUE "34"
58#define MAGENTA "35"
59#define CYAN "36"
60#define WHITE "37"
61
62using TEXT_COLOUR = char const* const;
63TEXT_COLOUR clr_normal = v8_flags.log_colour ? COLOUR(NORMAL) : "";
64TEXT_COLOUR clr_flag_name = v8_flags.log_colour ? COLOUR_BOLD(WHITE) : "";
65TEXT_COLOUR clr_flag_value = v8_flags.log_colour ? COLOUR(NORMAL) : "";
66TEXT_COLOUR clr_reg_name = v8_flags.log_colour ? COLOUR_BOLD(CYAN) : "";
67TEXT_COLOUR clr_reg_value = v8_flags.log_colour ? COLOUR(CYAN) : "";
68TEXT_COLOUR clr_vreg_name = v8_flags.log_colour ? COLOUR_BOLD(MAGENTA) : "";
69TEXT_COLOUR clr_vreg_value = v8_flags.log_colour ? COLOUR(MAGENTA) : "";
70TEXT_COLOUR clr_memory_address = v8_flags.log_colour ? COLOUR_BOLD(BLUE) : "";
71TEXT_COLOUR clr_debug_number = v8_flags.log_colour ? COLOUR_BOLD(YELLOW) : "";
72TEXT_COLOUR clr_debug_message = v8_flags.log_colour ? COLOUR(YELLOW) : "";
73TEXT_COLOUR clr_printf = v8_flags.log_colour ? COLOUR(GREEN) : "";
74
75DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
76 Simulator::GlobalMonitor::Get)
77
78bool Simulator::ProbeMemory(uintptr_t address, uintptr_t access_size) {
79#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
80 uintptr_t last_accessed_byte = address + access_size - 1;
81 uintptr_t current_pc = reinterpret_cast<uintptr_t>(pc_);
82 uintptr_t landing_pad =
83 trap_handler::ProbeMemory(last_accessed_byte, current_pc);
84 if (!landing_pad) return true;
85 set_pc(landing_pad);
86 set_reg(kWasmTrapHandlerFaultAddressRegister.code(), current_pc);
87 return false;
88#else
89 return true;
90#endif
91}
92
93// This is basically the same as PrintF, with a guard for v8_flags.trace_sim.
94void Simulator::TraceSim(const char* format, ...) {
95 if (v8_flags.trace_sim) {
96 va_list arguments;
97 va_start(arguments, format);
98 base::OS::VFPrint(stream_, format, arguments);
99 va_end(arguments);
100 }
101}
102
103const Instruction* Simulator::kEndOfSimAddress = nullptr;
104
105void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
106 int width = msb - lsb + 1;
107 DCHECK(is_uintn(bits, width) || is_intn(bits, width));
108
109 bits <<= lsb;
110 uint32_t mask = ((1 << width) - 1) << lsb;
111 DCHECK_EQ(mask & write_ignore_mask_, 0);
112
113 value_ = (value_ & ~mask) | (bits & mask);
114}
115
116SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
117 switch (id) {
118 case NZCV:
119 return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask);
120 case FPCR:
121 return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
122 default:
123 UNREACHABLE();
124 }
125}
126
127// Get the active Simulator for the current thread.
128Simulator* Simulator::current(Isolate* isolate) {
129 Isolate::PerIsolateThreadData* isolate_data =
130 isolate->FindOrAllocatePerThreadDataForThisThread();
131 DCHECK_NOT_NULL(isolate_data);
132
133 Simulator* sim = isolate_data->simulator();
134 if (sim == nullptr) {
135 if (v8_flags.trace_sim || v8_flags.debug_sim) {
136 sim = new Simulator(new Decoder<DispatchingDecoderVisitor>(), isolate);
137 } else {
138 sim = new Decoder<Simulator>();
139 sim->isolate_ = isolate;
140 }
141 isolate_data->set_simulator(sim);
142 }
143 return sim;
144}
145
146void Simulator::CallImpl(Address entry, CallArgument* args) {
147 int index_x = 0;
148 int index_d = 0;
149
150 std::vector<int64_t> stack_args(0);
151 for (int i = 0; !args[i].IsEnd(); i++) {
152 CallArgument arg = args[i];
153 if (arg.IsX() && (index_x < 8)) {
154 set_xreg(index_x++, arg.bits());
155 } else if (arg.IsD() && (index_d < 8)) {
156 set_dreg_bits(index_d++, arg.bits());
157 } else {
158 DCHECK(arg.IsD() || arg.IsX());
159 stack_args.push_back(arg.bits());
160 }
161 }
162
163 // Process stack arguments, and make sure the stack is suitably aligned.
164 uintptr_t original_stack = sp();
165 uintptr_t entry_stack =
166 original_stack - stack_args.size() * sizeof(stack_args[0]);
168 entry_stack &= -base::OS::ActivationFrameAlignment();
169 }
170 char* stack = reinterpret_cast<char*>(entry_stack);
171 std::vector<int64_t>::const_iterator it;
172 for (it = stack_args.begin(); it != stack_args.end(); it++) {
173 memcpy(stack, &(*it), sizeof(*it));
174 stack += sizeof(*it);
175 }
176
177 DCHECK(reinterpret_cast<uintptr_t>(stack) <= original_stack);
178 set_sp(entry_stack);
179
180 // Call the generated code.
181 set_pc(entry);
182 set_lr(kEndOfSimAddress);
183 CheckPCSComplianceAndRun();
184
185 set_sp(original_stack);
186}
187
188#ifdef DEBUG
189namespace {
190int PopLowestIndexAsCode(CPURegList* list) {
191 if (list->IsEmpty()) {
192 return -1;
193 }
194 uint64_t reg_list = list->bits();
195 int index = base::bits::CountTrailingZeros(reg_list);
196 DCHECK((1LL << index) & reg_list);
197 list->Remove(index);
198
199 return index;
200}
201} // namespace
202#endif
203
204void Simulator::CheckPCSComplianceAndRun() {
205 // Adjust JS-based stack limit to C-based stack limit.
206 isolate_->stack_guard()->AdjustStackLimitForSimulator();
207
208#ifdef DEBUG
211
212 int64_t saved_registers[kNumberOfCalleeSavedRegisters];
213 uint64_t saved_fpregisters[kNumberOfCalleeSavedVRegisters];
214
215 CPURegList register_list = kCalleeSaved;
216 CPURegList fpregister_list = kCalleeSavedV;
217
218 for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
219 // x31 is not a caller saved register, so no need to specify if we want
220 // the stack or zero.
221 saved_registers[i] = xreg(PopLowestIndexAsCode(&register_list));
222 }
223 for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) {
224 saved_fpregisters[i] = dreg_bits(PopLowestIndexAsCode(&fpregister_list));
225 }
226 int64_t original_stack = sp();
227 int64_t original_fp = fp();
228#endif
229 // Start the simulation!
230 Run();
231#ifdef DEBUG
232 DCHECK_EQ(original_stack, sp());
233 DCHECK_EQ(original_fp, fp());
234 // Check that callee-saved registers have been preserved.
235 register_list = kCalleeSaved;
236 fpregister_list = kCalleeSavedV;
237 for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
238 DCHECK_EQ(saved_registers[i], xreg(PopLowestIndexAsCode(&register_list)));
239 }
240 for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) {
241 DCHECK(saved_fpregisters[i] ==
242 dreg_bits(PopLowestIndexAsCode(&fpregister_list)));
243 }
244
245 // Corrupt caller saved register minus the return regiters.
246
247 // In theory x0 to x7 can be used for return values, but V8 only uses x0, x1
248 // for now .
249 register_list = kCallerSaved;
250 register_list.Remove(x0);
251 register_list.Remove(x1);
252
253 // In theory d0 to d7 can be used for return values, but V8 only uses d0
254 // for now .
255 fpregister_list = kCallerSavedV;
256 fpregister_list.Remove(d0);
257
258 CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
259 CorruptRegisters(&fpregister_list, kCallerSavedVRegisterCorruptionValue);
260#endif
261}
262
263#ifdef DEBUG
264// The least significant byte of the curruption value holds the corresponding
265// register's code.
266void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
267 if (list->type() == CPURegister::kRegister) {
268 while (!list->IsEmpty()) {
269 unsigned code = PopLowestIndexAsCode(list);
270 set_xreg(code, value | code);
271 }
272 } else {
273 DCHECK_EQ(list->type(), CPURegister::kVRegister);
274 while (!list->IsEmpty()) {
275 unsigned code = PopLowestIndexAsCode(list);
276 set_dreg_bits(code, value | code);
277 }
278 }
279}
280
281void Simulator::CorruptAllCallerSavedCPURegisters() {
282 // Corrupt alters its parameter so copy them first.
283 CPURegList register_list = kCallerSaved;
284 CPURegList fpregister_list = kCallerSavedV;
285
286 CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
287 CorruptRegisters(&fpregister_list, kCallerSavedVRegisterCorruptionValue);
288}
289#endif
290
291// Extending the stack by 2 * 64 bits is required for stack alignment purposes.
292uintptr_t Simulator::PushAddress(uintptr_t address) {
293 DCHECK(sizeof(uintptr_t) < 2 * kXRegSize);
294 intptr_t new_sp = sp() - 2 * kXRegSize;
295 uintptr_t* alignment_slot = reinterpret_cast<uintptr_t*>(new_sp + kXRegSize);
296 memcpy(alignment_slot, &kSlotsZapValue, kSystemPointerSize);
297 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
298 memcpy(stack_slot, &address, kSystemPointerSize);
299 set_sp(new_sp);
300 return new_sp;
301}
302
303uintptr_t Simulator::PopAddress() {
304 intptr_t current_sp = sp();
305 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
306 uintptr_t address = *stack_slot;
307 DCHECK_LT(sizeof(uintptr_t), 2 * kXRegSize);
308 set_sp(current_sp + 2 * kXRegSize);
309 return address;
310}
311
312// Returns the limit of the stack area to enable checking for stack overflows.
313uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
314 // The simulator uses a separate JS stack. If we have exhausted the C stack,
315 // we also drop down the JS limit to reflect the exhaustion on the JS stack.
316 if (base::Stack::GetCurrentStackPosition() < c_limit) {
317 return get_sp();
318 }
319
320 // Otherwise the limit is the JS stack. Leave a safety margin to prevent
321 // overrunning the stack when pushing values.
322 return stack_limit_ + kAdditionalStackMargin;
323}
324
325uintptr_t Simulator::StackBase() const {
326 uintptr_t result = stack_ + AllocatedStackSize() - kStackProtectionSize;
327 // The stack base is 16-byte aligned.
328 return result & ~0xFULL;
329}
330
331void Simulator::SetStackLimit(uintptr_t limit) {
332 stack_limit_ = static_cast<uintptr_t>(limit - kAdditionalStackMargin);
333}
334
335base::Vector<uint8_t> Simulator::GetCentralStackView() const {
336 // We do not add an additional safety margin as above in
337 // Simulator::StackLimit, as users of this method are expected to add their
338 // own margin.
339 return base::VectorOf(
340 reinterpret_cast<uint8_t*>(stack_ + kStackProtectionSize),
341 UsableStackSize());
342}
343
344// We touch the stack, which may or may not have been initialized properly. Msan
345// reports here are not interesting.
346DISABLE_MSAN void Simulator::IterateRegistersAndStack(
348 for (int i = 0; i < kNumberOfRegisters; ++i) {
349 visitor->VisitPointer(reinterpret_cast<const void*>(xreg(i)));
350 }
351 for (const void* const* current =
352 reinterpret_cast<const void* const*>(get_sp());
354 const void* address = *current;
355 if (address == nullptr) {
356 continue;
357 }
358 visitor->VisitPointer(address);
359 }
360}
361
362void Simulator::SetRedirectInstruction(Instruction* instruction) {
363 instruction->SetInstructionBits(
365}
366
367Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
368 Isolate* isolate, FILE* stream)
369 : decoder_(decoder),
371 last_debugger_input_(nullptr),
372 log_parameters_(NO_PARAM),
373 icount_for_stop_sim_at_(0),
374 isolate_(isolate) {
375 // Setup the decoder.
376 decoder_->AppendVisitor(this);
377
378 Init(stream);
379
380 if (v8_flags.trace_sim) {
381 decoder_->InsertVisitorBefore(print_disasm_, this);
382 log_parameters_ = LOG_ALL;
383 }
384}
385
386Simulator::Simulator()
387 : decoder_(nullptr),
389 last_debugger_input_(nullptr),
390 log_parameters_(NO_PARAM),
391 isolate_(nullptr) {
392 Init(stdout);
393 CHECK(!v8_flags.trace_sim);
394}
395
396void Simulator::Init(FILE* stream) {
397 ResetState();
398
399 // Allocate and setup the simulator stack.
400 size_t stack_size = AllocatedStackSize();
401
402 stack_ = reinterpret_cast<uintptr_t>(new uint8_t[stack_size]());
403 stack_limit_ = stack_ + kStackProtectionSize;
404 set_sp(StackBase());
405
406 stream_ = stream;
407 print_disasm_ = new PrintDisassembler(stream_);
408
409 // The debugger needs to disassemble code without the simulator executing an
410 // instruction, so we create a dedicated decoder.
411 disassembler_decoder_ = new Decoder<DispatchingDecoderVisitor>();
412 disassembler_decoder_->AppendVisitor(print_disasm_);
413
414 global_monitor_ = GlobalMonitor::Get();
415 global_monitor_->PrependProcessor(&global_monitor_processor_);
416
417 // Enabling deadlock detection while simulating is too slow.
418 SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kIgnore);
419}
420
421void Simulator::ResetState() {
422 // Reset the system registers.
423 nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
424 fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
425
426 // Reset registers to 0.
427 pc_ = nullptr;
428 for (unsigned i = 0; i < kNumberOfRegisters; i++) {
429 set_xreg(i, 0xBADBEEF);
430 }
431 for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
432 // Set FP registers to a value that is NaN in both 32-bit and 64-bit FP.
433 set_dreg_bits(i, 0x7FF000007F800001UL);
434 }
435 // Returning to address 0 exits the Simulator.
436 set_lr(kEndOfSimAddress);
437
438 // Reset debug helpers.
439 breakpoints_.clear();
440 break_on_next_ = false;
441
442 btype_ = DefaultBType;
443}
444
445Simulator::~Simulator() {
446 global_monitor_->RemoveProcessor(&global_monitor_processor_);
447 delete[] reinterpret_cast<uint8_t*>(stack_);
448 delete disassembler_decoder_;
449 delete print_disasm_;
450 delete decoder_;
451}
452
453void Simulator::Run() {
454 // Flush any written registers before executing anything, so that
455 // manually-set registers are logged _before_ the first instruction.
456 LogAllWrittenRegisters();
457
458 pc_modified_ = false;
459
460 if (v8_flags.stop_sim_at == 0) {
461 // Fast version of the dispatch loop without checking whether the simulator
462 // should be stopping at a particular executed instruction.
463 while (pc_ != kEndOfSimAddress) {
464 ExecuteInstruction();
465 }
466 } else {
467 // v8_flags.stop_sim_at is at the non-default value. Stop in the debugger
468 // when we reach the particular instruction count.
469 while (pc_ != kEndOfSimAddress) {
470 icount_for_stop_sim_at_ =
471 base::AddWithWraparound(icount_for_stop_sim_at_, 1);
472 if (icount_for_stop_sim_at_ == v8_flags.stop_sim_at) {
473 Debug();
474 }
475 ExecuteInstruction();
476 }
477 }
478}
479
480void Simulator::RunFrom(Instruction* start) {
481 set_pc(start);
482 Run();
483}
484
485// Calls into the V8 runtime are based on this very simple interface.
486// Note: To be able to return two values from some calls the code in runtime.cc
487// uses the ObjectPair structure.
488// The simulator assumes all runtime calls return two 64-bits values. If they
489// don't, register x1 is clobbered. This is fine because x1 is caller-saved.
490#if defined(V8_OS_WIN)
491using SimulatorRuntimeCall_ReturnPtr = int64_t (*)(
492 int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3, int64_t arg4,
493 int64_t arg5, int64_t arg6, int64_t arg7, int64_t arg8, int64_t arg9,
494 int64_t arg10, int64_t arg11, int64_t arg12, int64_t arg13, int64_t arg14,
495 int64_t arg15, int64_t arg16, int64_t arg17, int64_t arg18, int64_t arg19);
496#endif
497
498using SimulatorRuntimeCall = ObjectPair (*)(
499 int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3, int64_t arg4,
500 int64_t arg5, int64_t arg6, int64_t arg7, int64_t arg8, int64_t arg9,
501 int64_t arg10, int64_t arg11, int64_t arg12, int64_t arg13, int64_t arg14,
502 int64_t arg15, int64_t arg16, int64_t arg17, int64_t arg18, int64_t arg19);
503
504using SimulatorRuntimeCompareCall = int64_t (*)(double arg1, double arg2);
505using SimulatorRuntimeFPFPCall = double (*)(double arg1, double arg2);
506using SimulatorRuntimeFPCall = double (*)(double arg1);
507using SimulatorRuntimeFPIntCall = double (*)(double arg1, int32_t arg2);
508using SimulatorRuntimeIntFPCall = int32_t (*)(double darg0);
509// Define four args for future flexibility; at the time of this writing only
510// one is ever used.
511using SimulatorRuntimeFPTaggedCall = double (*)(int64_t arg0, int64_t arg1,
512 int64_t arg2, int64_t arg3);
513
514// This signature supports direct call in to API function native callback
515// (refer to InvocationCallback in v8.h).
516using SimulatorRuntimeDirectApiCall = void (*)(int64_t arg0);
517
518// This signature supports direct call to accessor getter callback.
519using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1);
520
521// Separate for fine-grained UBSan blocklisting. Casting any given C++
522// function to {SimulatorRuntimeCall} is undefined behavior; but since
523// the target function can indeed be any function that's exposed via
524// the "fast C call" mechanism, we can't reconstruct its signature here.
525ObjectPair UnsafeGenericFunctionCall(
526 int64_t function, int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
527 int64_t arg4, int64_t arg5, int64_t arg6, int64_t arg7, int64_t arg8,
528 int64_t arg9, int64_t arg10, int64_t arg11, int64_t arg12, int64_t arg13,
529 int64_t arg14, int64_t arg15, int64_t arg16, int64_t arg17, int64_t arg18,
530 int64_t arg19) {
531 SimulatorRuntimeCall target =
532 reinterpret_cast<SimulatorRuntimeCall>(function);
533 return target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9,
534 arg10, arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18,
535 arg19);
536}
537
538using MixedRuntimeCall_0 = AnyCType (*)();
539
540#define BRACKETS(ident, N) ident[N]
541
542#define REP_0(expr, FMT)
543#define REP_1(expr, FMT) FMT(expr, 0)
544#define REP_2(expr, FMT) REP_1(expr, FMT), FMT(expr, 1)
545#define REP_3(expr, FMT) REP_2(expr, FMT), FMT(expr, 2)
546#define REP_4(expr, FMT) REP_3(expr, FMT), FMT(expr, 3)
547#define REP_5(expr, FMT) REP_4(expr, FMT), FMT(expr, 4)
548#define REP_6(expr, FMT) REP_5(expr, FMT), FMT(expr, 5)
549#define REP_7(expr, FMT) REP_6(expr, FMT), FMT(expr, 6)
550#define REP_8(expr, FMT) REP_7(expr, FMT), FMT(expr, 7)
551#define REP_9(expr, FMT) REP_8(expr, FMT), FMT(expr, 8)
552#define REP_10(expr, FMT) REP_9(expr, FMT), FMT(expr, 9)
553#define REP_11(expr, FMT) REP_10(expr, FMT), FMT(expr, 10)
554#define REP_12(expr, FMT) REP_11(expr, FMT), FMT(expr, 11)
555#define REP_13(expr, FMT) REP_12(expr, FMT), FMT(expr, 12)
556#define REP_14(expr, FMT) REP_13(expr, FMT), FMT(expr, 13)
557#define REP_15(expr, FMT) REP_14(expr, FMT), FMT(expr, 14)
558#define REP_16(expr, FMT) REP_15(expr, FMT), FMT(expr, 15)
559#define REP_17(expr, FMT) REP_16(expr, FMT), FMT(expr, 16)
560#define REP_18(expr, FMT) REP_17(expr, FMT), FMT(expr, 17)
561#define REP_19(expr, FMT) REP_18(expr, FMT), FMT(expr, 18)
562#define REP_20(expr, FMT) REP_19(expr, FMT), FMT(expr, 19)
563
564#define GEN_MAX_PARAM_COUNT(V) \
565 V(0) \
566 V(1) \
567 V(2) \
568 V(3) \
569 V(4) \
570 V(5) \
571 V(6) \
572 V(7) \
573 V(8) \
574 V(9) \
575 V(10) \
576 V(11) \
577 V(12) \
578 V(13) \
579 V(14) \
580 V(15) \
581 V(16) \
582 V(17) \
583 V(18) \
584 V(19) \
585 V(20)
586
587#define MIXED_RUNTIME_CALL(N) \
588 using MixedRuntimeCall_##N = AnyCType (*)(REP_##N(AnyCType arg, CONCAT));
589
590GEN_MAX_PARAM_COUNT(MIXED_RUNTIME_CALL)
591#undef MIXED_RUNTIME_CALL
592
593#define CALL_ARGS(N) REP_##N(args, BRACKETS)
594#define CALL_TARGET_VARARG(N) \
595 if (signature.ParameterCount() == N) { /* NOLINT */ \
596 MixedRuntimeCall_##N target = \
597 reinterpret_cast<MixedRuntimeCall_##N>(target_address); \
598 result = target(CALL_ARGS(N)); \
599 } else /* NOLINT */
600
601void Simulator::CallAnyCTypeFunction(Address target_address,
602 const EncodedCSignature& signature) {
603 TraceSim("Type: mixed types BUILTIN_CALL\n");
604
605 const int64_t* stack_pointer = reinterpret_cast<int64_t*>(sp());
606 const double* double_stack_pointer = reinterpret_cast<double*>(sp());
607 int num_gp_params = 0, num_fp_params = 0, num_stack_params = 0;
608
609 CHECK_LE(signature.ParameterCount(), kMaxCParameters);
610 static_assert(sizeof(AnyCType) == 8, "AnyCType is assumed to be 64-bit.");
611 AnyCType args[kMaxCParameters];
612 // The first 8 parameters of each type (GP or FP) are placed in corresponding
613 // registers. The rest are expected to be on the stack, where each parameter
614 // type counts on its own. For example a function like:
615 // foo(int i1, ..., int i9, float f1, float f2) will use up all 8 GP
616 // registers, place i9 on the stack, and place f1 and f2 in FP registers.
617 // Source: https://developer.arm.com/documentation/ihi0055/d/, section
618 // "Parameter Passing".
619 for (int i = 0; i < signature.ParameterCount(); ++i) {
620 if (signature.IsFloat(i)) {
621 if (num_fp_params < 8) {
622 args[i].double_value = dreg(num_fp_params++);
623 } else {
624 args[i].double_value = double_stack_pointer[num_stack_params++];
625 }
626 } else {
627 if (num_gp_params < 8) {
628 args[i].int64_value = xreg(num_gp_params++);
629 } else {
630 args[i].int64_value = stack_pointer[num_stack_params++];
631 }
632 }
633 }
634 AnyCType result;
635 GEN_MAX_PARAM_COUNT(CALL_TARGET_VARARG)
636 /* else */ {
637 UNREACHABLE();
638 }
639 static_assert(20 == kMaxCParameters,
640 "If you've changed kMaxCParameters, please change the "
641 "GEN_MAX_PARAM_COUNT macro.");
642
643#undef CALL_TARGET_VARARG
644#undef CALL_ARGS
645#undef GEN_MAX_PARAM_COUNT
646
647#ifdef DEBUG
648 CorruptAllCallerSavedCPURegisters();
649#endif
650
651 if (signature.IsReturnFloat()) {
652 set_dreg(0, result.double_value);
653 } else {
654 set_xreg(0, result.int64_value);
655 }
656}
657
658void Simulator::DoRuntimeCall(Instruction* instr) {
659 Redirection* redirection = Redirection::FromInstruction(instr);
660
661 // The called C code might itself call simulated code, so any
662 // caller-saved registers (including lr) could still be clobbered by a
663 // redirected call.
664 Instruction* return_address = lr();
665
666 int64_t external =
667 reinterpret_cast<int64_t>(redirection->external_function());
668
669 TraceSim("Call to host function at %p\n", redirection->external_function());
670
671 // SP must be 16-byte-aligned at the call interface.
672 bool stack_alignment_exception = ((sp() & 0xF) != 0);
673 if (stack_alignment_exception) {
674 TraceSim(" with unaligned stack 0x%016" PRIx64 ".\n", sp());
675 FATAL("ALIGNMENT EXCEPTION");
676 }
677
678 Address func_addr =
679 reinterpret_cast<Address>(redirection->external_function());
680 SimulatorData* simulator_data = isolate_->simulator_data();
681 DCHECK_NOT_NULL(simulator_data);
682 const EncodedCSignature& signature =
683 simulator_data->GetSignatureForTarget(func_addr);
684 if (signature.IsValid()) {
685 CHECK(redirection->type() == ExternalReference::FAST_C_CALL);
686 CallAnyCTypeFunction(external, signature);
687 set_lr(return_address);
688 set_pc(return_address);
689 return;
690 }
691
692 int64_t* stack_pointer = reinterpret_cast<int64_t*>(sp());
693
694 const int64_t arg0 = xreg(0);
695 const int64_t arg1 = xreg(1);
696 const int64_t arg2 = xreg(2);
697 const int64_t arg3 = xreg(3);
698 const int64_t arg4 = xreg(4);
699 const int64_t arg5 = xreg(5);
700 const int64_t arg6 = xreg(6);
701 const int64_t arg7 = xreg(7);
702 const int64_t arg8 = stack_pointer[0];
703 const int64_t arg9 = stack_pointer[1];
704 const int64_t arg10 = stack_pointer[2];
705 const int64_t arg11 = stack_pointer[3];
706 const int64_t arg12 = stack_pointer[4];
707 const int64_t arg13 = stack_pointer[5];
708 const int64_t arg14 = stack_pointer[6];
709 const int64_t arg15 = stack_pointer[7];
710 const int64_t arg16 = stack_pointer[8];
711 const int64_t arg17 = stack_pointer[9];
712 const int64_t arg18 = stack_pointer[10];
713 const int64_t arg19 = stack_pointer[11];
714 static_assert(kMaxCParameters == 20);
715
716#ifdef V8_USE_MEMORY_SANITIZER
717 // `UnsafeGenericFunctionCall()` dispatches calls to functions with
718 // varying signatures and relies on the fact that the mismatched prototype
719 // used by the caller and the prototype used by the callee (defined using
720 // the `RUNTIME_FUNCTION*()` macros happen to line up so that things more
721 // or less work out [1].
722 //
723 // Unfortunately, this confuses MSan's uninit tracking with eager checks
724 // enabled; it's unclear if these are all false positives or if there are
725 // legitimate reports. For now, unconditionally unpoison args to
726 // unblock finding and fixing more violations with MSan eager checks.
727 //
728 // TODO(crbug.com/v8/14712): Fix the MSan violations and migrate to
729 // something like crrev.com/c/5422076 instead.
730 //
731 // [1] Yes, this is undefined behaviour. 🙈🙉🙊
732 MSAN_MEMORY_IS_INITIALIZED(&arg0, sizeof(arg0));
733 MSAN_MEMORY_IS_INITIALIZED(&arg1, sizeof(arg1));
734 MSAN_MEMORY_IS_INITIALIZED(&arg2, sizeof(arg2));
735 MSAN_MEMORY_IS_INITIALIZED(&arg3, sizeof(arg3));
736 MSAN_MEMORY_IS_INITIALIZED(&arg4, sizeof(arg4));
737 MSAN_MEMORY_IS_INITIALIZED(&arg5, sizeof(arg5));
738 MSAN_MEMORY_IS_INITIALIZED(&arg6, sizeof(arg6));
739 MSAN_MEMORY_IS_INITIALIZED(&arg7, sizeof(arg7));
740 MSAN_MEMORY_IS_INITIALIZED(&arg8, sizeof(arg8));
741 MSAN_MEMORY_IS_INITIALIZED(&arg9, sizeof(arg9));
742 MSAN_MEMORY_IS_INITIALIZED(&arg10, sizeof(arg10));
743 MSAN_MEMORY_IS_INITIALIZED(&arg11, sizeof(arg11));
744 MSAN_MEMORY_IS_INITIALIZED(&arg12, sizeof(arg12));
745 MSAN_MEMORY_IS_INITIALIZED(&arg13, sizeof(arg13));
746 MSAN_MEMORY_IS_INITIALIZED(&arg14, sizeof(arg14));
747 MSAN_MEMORY_IS_INITIALIZED(&arg15, sizeof(arg15));
748 MSAN_MEMORY_IS_INITIALIZED(&arg16, sizeof(arg16));
749 MSAN_MEMORY_IS_INITIALIZED(&arg17, sizeof(arg17));
750 MSAN_MEMORY_IS_INITIALIZED(&arg18, sizeof(arg18));
751 MSAN_MEMORY_IS_INITIALIZED(&arg19, sizeof(arg19));
752#endif // V8_USE_MEMORY_SANITIZER
753
754 switch (redirection->type()) {
755 default:
756 TraceSim("Type: Unknown.\n");
757 UNREACHABLE();
758
759 case ExternalReference::BUILTIN_CALL:
760#if defined(V8_OS_WIN)
761 {
762 // Object f(v8::internal::Arguments).
763 TraceSim("Type: BUILTIN_CALL\n");
764
765 // When this simulator runs on Windows x64 host, function with ObjectPair
766 // return type accepts an implicit pointer to caller allocated memory for
767 // ObjectPair as return value. This diverges the calling convention from
768 // function which returns primitive type, so function returns ObjectPair
769 // and primitive type cannot share implementation.
770
771 // We don't know how many arguments are being passed, but we can
772 // pass 8 without touching the stack. They will be ignored by the
773 // host function if they aren't used.
774 TraceSim(
775 "Arguments: "
776 "0x%016" PRIx64 ", 0x%016" PRIx64
777 ", "
778 "0x%016" PRIx64 ", 0x%016" PRIx64
779 ", "
780 "0x%016" PRIx64 ", 0x%016" PRIx64
781 ", "
782 "0x%016" PRIx64 ", 0x%016" PRIx64
783 ", "
784 "0x%016" PRIx64 ", 0x%016" PRIx64
785 ", "
786 "0x%016" PRIx64 ", 0x%016" PRIx64
787 ", "
788 "0x%016" PRIx64 ", 0x%016" PRIx64
789 ", "
790 "0x%016" PRIx64 ", 0x%016" PRIx64
791 ", "
792 "0x%016" PRIx64 ", 0x%016" PRIx64
793 ", "
794 "0x%016" PRIx64 ", 0x%016" PRIx64,
795 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10,
796 arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18, arg19);
797
798 SimulatorRuntimeCall_ReturnPtr target =
799 reinterpret_cast<SimulatorRuntimeCall_ReturnPtr>(external);
800
801 int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7,
802 arg8, arg9, arg10, arg11, arg12, arg13, arg14,
803 arg15, arg16, arg17, arg18, arg19);
804 TraceSim("Returned: 0x%16\n", result);
805#ifdef DEBUG
806 CorruptAllCallerSavedCPURegisters();
807#endif
808 set_xreg(0, result);
809
810 break;
811 }
812#endif
813 case ExternalReference::BUILTIN_CALL_PAIR: {
814 // Object f(v8::internal::Arguments) or
815 // ObjectPair f(v8::internal::Arguments).
816 TraceSim("Type: BUILTIN_CALL\n");
817
818 // We don't know how many arguments are being passed, but we can
819 // pass 8 without touching the stack. They will be ignored by the
820 // host function if they aren't used.
821 TraceSim(
822 "Arguments: "
823 "0x%016" PRIx64 ", 0x%016" PRIx64
824 ", "
825 "0x%016" PRIx64 ", 0x%016" PRIx64
826 ", "
827 "0x%016" PRIx64 ", 0x%016" PRIx64
828 ", "
829 "0x%016" PRIx64 ", 0x%016" PRIx64
830 ", "
831 "0x%016" PRIx64 ", 0x%016" PRIx64
832 ", "
833 "0x%016" PRIx64 ", 0x%016" PRIx64
834 ", "
835 "0x%016" PRIx64 ", 0x%016" PRIx64
836 ", "
837 "0x%016" PRIx64 ", 0x%016" PRIx64
838 ", "
839 "0x%016" PRIx64 ", 0x%016" PRIx64
840 ", "
841 "0x%016" PRIx64 ", 0x%016" PRIx64,
842 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10,
843 arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18, arg19);
844
845 ObjectPair result = UnsafeGenericFunctionCall(
846 external, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9,
847 arg10, arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18, arg19);
848#ifdef V8_USE_MEMORY_SANITIZER
849 // `UnsafeGenericFunctionCall()` dispatches calls to functions with
850 // varying signatures and relies on the fact that the mismatched prototype
851 // used by the caller and the prototype used by the callee (defined using
852 // the `RUNTIME_FUNCTION*()` macros happen to line up so that things more
853 // or less work out [1].
854 //
855 // Unfortunately, this confuses MSan's uninit tracking with eager checks
856 // enabled; it's unclear if these are all false positives or if there are
857 // legitimate reports. For now, unconditionally unpoison `result` to
858 // unblock finding and fixing more violations with MSan eager checks.
859 //
860 // TODO(crbug.com/v8/14712): Fix the MSan violations and migrate to
861 // something like crrev.com/c/5422076 instead.
862 //
863 // [1] Yes, this is undefined behaviour. 🙈🙉🙊
865#endif
866 TraceSim("Returned: {%p, %p}\n", reinterpret_cast<void*>(result.x),
867 reinterpret_cast<void*>(result.y));
868#ifdef DEBUG
869 CorruptAllCallerSavedCPURegisters();
870#endif
871 set_xreg(0, static_cast<int64_t>(result.x));
872 set_xreg(1, static_cast<int64_t>(result.y));
873 break;
874 }
875
876 case ExternalReference::BUILTIN_COMPARE_CALL: {
877 // int f(double, double)
878 TraceSim("Type: BUILTIN_COMPARE_CALL\n");
879 SimulatorRuntimeCompareCall target =
880 reinterpret_cast<SimulatorRuntimeCompareCall>(external);
881 TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
882 int64_t result = target(dreg(0), dreg(1));
883 TraceSim("Returned: %" PRId64 "\n", result);
884#ifdef DEBUG
885 CorruptAllCallerSavedCPURegisters();
886#endif
887 set_xreg(0, result);
888 break;
889 }
890
891 case ExternalReference::BUILTIN_FP_CALL: {
892 // double f(double)
893 TraceSim("Type: BUILTIN_FP_CALL\n");
894 SimulatorRuntimeFPCall target =
895 reinterpret_cast<SimulatorRuntimeFPCall>(external);
896 TraceSim("Argument: %f\n", dreg(0));
897 double result = target(dreg(0));
898 TraceSim("Returned: %f\n", result);
899#ifdef DEBUG
900 CorruptAllCallerSavedCPURegisters();
901#endif
902 set_dreg(0, result);
903 break;
904 }
905
906 case ExternalReference::BUILTIN_FP_FP_CALL: {
907 // double f(double, double)
908 TraceSim("Type: BUILTIN_FP_FP_CALL\n");
909 SimulatorRuntimeFPFPCall target =
910 reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
911 TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
912 double result = target(dreg(0), dreg(1));
913 TraceSim("Returned: %f\n", result);
914#ifdef DEBUG
915 CorruptAllCallerSavedCPURegisters();
916#endif
917 set_dreg(0, result);
918 break;
919 }
920
921 case ExternalReference::BUILTIN_FP_INT_CALL: {
922 // double f(double, int)
923 TraceSim("Type: BUILTIN_FP_INT_CALL\n");
924 SimulatorRuntimeFPIntCall target =
925 reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
926 TraceSim("Arguments: %f, %d\n", dreg(0), wreg(0));
927 double result = target(dreg(0), wreg(0));
928 TraceSim("Returned: %f\n", result);
929#ifdef DEBUG
930 CorruptAllCallerSavedCPURegisters();
931#endif
932 set_dreg(0, result);
933 break;
934 }
935
936 case ExternalReference::BUILTIN_INT_FP_CALL: {
937 // int f(double)
938 TraceSim("Type: BUILTIN_INT_FP_CALL\n");
939 SimulatorRuntimeIntFPCall target =
940 reinterpret_cast<SimulatorRuntimeIntFPCall>(external);
941 TraceSim("Argument: %f", dreg(0));
942 int32_t result = target(dreg(0));
943 TraceSim("Returned: %d\n", result);
944#ifdef DEBUG
945 CorruptAllCallerSavedCPURegisters();
946#endif
947 set_xreg(0, result);
948 break;
949 }
950
951 case ExternalReference::BUILTIN_FP_POINTER_CALL: {
952 // double f(Address tagged_ptr)
953 TraceSim("Type: BUILTIN_FP_POINTER_CALL\n");
954 SimulatorRuntimeFPTaggedCall target =
955 reinterpret_cast<SimulatorRuntimeFPTaggedCall>(external);
956 TraceSim(
957 "Arguments: "
958 "0x%016" PRIx64 ", 0x%016" PRIx64 ", 0x%016" PRIx64 ", 0x%016" PRIx64,
959 arg0, arg1, arg2, arg3);
960 double result = target(arg0, arg1, arg2, arg3);
961 TraceSim("Returned: %f\n", result);
962#ifdef DEBUG
963 CorruptAllCallerSavedCPURegisters();
964#endif
965 set_dreg(0, result);
966 break;
967 }
968
969 case ExternalReference::DIRECT_API_CALL: {
970 // void f(v8::FunctionCallbackInfo&)
971 TraceSim("Type: DIRECT_API_CALL\n");
972 TraceSim("Arguments: 0x%016" PRIx64 "\n", arg0);
973 SimulatorRuntimeDirectApiCall target =
974 reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
975 target(arg0);
976 TraceSim("No return value.");
977#ifdef DEBUG
978 CorruptAllCallerSavedCPURegisters();
979#endif
980 break;
981 }
982
983 case ExternalReference::DIRECT_GETTER_CALL: {
984 // void f(v8::Local<String> property, v8::PropertyCallbackInfo& info)
985 TraceSim("Type: DIRECT_GETTER_CALL\n");
986 TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 "\n", arg0, arg1);
987 SimulatorRuntimeDirectGetterCall target =
988 reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
989 target(arg0, arg1);
990 TraceSim("No return value.");
991#ifdef DEBUG
992 CorruptAllCallerSavedCPURegisters();
993#endif
994 break;
995 }
996 }
997
998 set_lr(return_address);
999 set_pc(return_address);
1000}
1001
1002const char* Simulator::xreg_names[] = {
1003 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10",
1004 "x11", "x12", "x13", "x14", "x15", "ip0", "ip1", "x18", "x19", "x20", "x21",
1005 "x22", "x23", "x24", "x25", "x26", "cp", "x28", "fp", "lr", "xzr", "sp"};
1006
1007const char* Simulator::wreg_names[] = {
1008 "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8",
1009 "w9", "w10", "w11", "w12", "w13", "w14", "w15", "w16", "w17",
1010 "w18", "w19", "w20", "w21", "w22", "w23", "w24", "w25", "w26",
1011 "wcp", "w28", "wfp", "wlr", "wzr", "wsp"};
1012
1013const char* Simulator::sreg_names[] = {
1014 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10",
1015 "s11", "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21",
1016 "s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
1017
1018const char* Simulator::dreg_names[] = {
1019 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
1020 "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21",
1021 "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
1022
1023const char* Simulator::vreg_names[] = {
1024 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
1025 "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21",
1026 "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
1027
1028const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
1029 static_assert(arraysize(Simulator::wreg_names) == (kNumberOfRegisters + 1),
1030 "Array must be large enough to hold all register names.");
1031 DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
1032 // The modulo operator has no effect here, but it silences a broken GCC
1033 // warning about out-of-bounds array accesses.
1034 code %= kNumberOfRegisters;
1035
1036 // If the code represents the stack pointer, index the name after zr.
1037 if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
1038 code = kZeroRegCode + 1;
1039 }
1040 return wreg_names[code];
1041}
1042
1043const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
1044 static_assert(arraysize(Simulator::xreg_names) == (kNumberOfRegisters + 1),
1045 "Array must be large enough to hold all register names.");
1046 DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
1047 code %= kNumberOfRegisters;
1048
1049 // If the code represents the stack pointer, index the name after zr.
1050 if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
1051 code = kZeroRegCode + 1;
1052 }
1053 return xreg_names[code];
1054}
1055
1056const char* Simulator::SRegNameForCode(unsigned code) {
1057 static_assert(arraysize(Simulator::sreg_names) == kNumberOfVRegisters,
1058 "Array must be large enough to hold all register names.");
1059 DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
1060 return sreg_names[code % kNumberOfVRegisters];
1061}
1062
1063const char* Simulator::DRegNameForCode(unsigned code) {
1064 static_assert(arraysize(Simulator::dreg_names) == kNumberOfVRegisters,
1065 "Array must be large enough to hold all register names.");
1066 DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
1067 return dreg_names[code % kNumberOfVRegisters];
1068}
1069
1070const char* Simulator::VRegNameForCode(unsigned code) {
1071 static_assert(arraysize(Simulator::vreg_names) == kNumberOfVRegisters,
1072 "Array must be large enough to hold all register names.");
1073 DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
1074 return vreg_names[code % kNumberOfVRegisters];
1075}
1076
1077void LogicVRegister::ReadUintFromMem(VectorFormat vform, int index,
1078 uint64_t addr) const {
1079 switch (LaneSizeInBitsFromFormat(vform)) {
1080 case 8:
1081 register_.Insert(index, SimMemory::Read<uint8_t>(addr));
1082 break;
1083 case 16:
1084 register_.Insert(index, SimMemory::Read<uint16_t>(addr));
1085 break;
1086 case 32:
1087 register_.Insert(index, SimMemory::Read<uint32_t>(addr));
1088 break;
1089 case 64:
1090 register_.Insert(index, SimMemory::Read<uint64_t>(addr));
1091 break;
1092 default:
1093 UNREACHABLE();
1094 }
1095}
1096
1097void LogicVRegister::WriteUintToMem(VectorFormat vform, int index,
1098 uint64_t addr) const {
1099 switch (LaneSizeInBitsFromFormat(vform)) {
1100 case 8:
1101 SimMemory::Write<uint8_t>(addr, static_cast<uint8_t>(Uint(vform, index)));
1102 break;
1103 case 16:
1104 SimMemory::Write<uint16_t>(addr,
1105 static_cast<uint16_t>(Uint(vform, index)));
1106 break;
1107 case 32:
1108 SimMemory::Write<uint32_t>(addr,
1109 static_cast<uint32_t>(Uint(vform, index)));
1110 break;
1111 case 64:
1112 SimMemory::Write<uint64_t>(addr, Uint(vform, index));
1113 break;
1114 default:
1115 UNREACHABLE();
1116 }
1117}
1118
1119int Simulator::CodeFromName(const char* name) {
1120 for (unsigned i = 0; i < kNumberOfRegisters; i++) {
1121 if ((strcmp(xreg_names[i], name) == 0) ||
1122 (strcmp(wreg_names[i], name) == 0)) {
1123 return i;
1124 }
1125 }
1126 for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
1127 if ((strcmp(vreg_names[i], name) == 0) ||
1128 (strcmp(dreg_names[i], name) == 0) ||
1129 (strcmp(sreg_names[i], name) == 0)) {
1130 return i;
1131 }
1132 }
1133 if ((strcmp("sp", name) == 0) || (strcmp("wsp", name) == 0)) {
1134 return kSPRegInternalCode;
1135 }
1136 if (strcmp("x16", name) == 0) return CodeFromName("ip0");
1137 if (strcmp("x17", name) == 0) return CodeFromName("ip1");
1138 if (strcmp("x29", name) == 0) return CodeFromName("fp");
1139 if (strcmp("x30", name) == 0) return CodeFromName("lr");
1140 return -1;
1141}
1142
1143// Helpers ---------------------------------------------------------------------
1144template <typename T>
1145T Simulator::AddWithCarry(bool set_flags, T left, T right, int carry_in) {
1146 // Use unsigned types to avoid implementation-defined overflow behaviour.
1147 static_assert(std::is_unsigned_v<T>, "operands must be unsigned");
1148 static_assert((sizeof(T) == kWRegSize) || (sizeof(T) == kXRegSize),
1149 "Only W- or X-sized operands are tested");
1150
1151 DCHECK((carry_in == 0) || (carry_in == 1));
1152 T result = left + right + carry_in;
1153
1154 if (set_flags) {
1155 nzcv().SetN(CalcNFlag(result));
1156 nzcv().SetZ(CalcZFlag(result));
1157
1158 // Compute the C flag by comparing the result to the max unsigned integer.
1159 T max_uint_2op = std::numeric_limits<T>::max() - carry_in;
1160 nzcv().SetC((left > max_uint_2op) || ((max_uint_2op - left) < right));
1161
1162 // Overflow iff the sign bit is the same for the two inputs and different
1163 // for the result.
1164 T sign_mask = T(1) << (sizeof(T) * 8 - 1);
1165 T left_sign = left & sign_mask;
1166 T right_sign = right & sign_mask;
1167 T result_sign = result & sign_mask;
1168 nzcv().SetV((left_sign == right_sign) && (left_sign != result_sign));
1169
1170 LogSystemRegister(NZCV);
1171 }
1172 return result;
1173}
1174
1175template <typename T>
1176void Simulator::AddSubWithCarry(Instruction* instr) {
1177 // Use unsigned types to avoid implementation-defined overflow behaviour.
1178 static_assert(std::is_unsigned_v<T>, "operands must be unsigned");
1179
1180 T op2 = reg<T>(instr->Rm());
1181 T new_val;
1182
1183 if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) {
1184 op2 = ~op2;
1185 }
1186
1187 new_val = AddWithCarry<T>(instr->FlagsUpdate(), reg<T>(instr->Rn()), op2,
1188 nzcv().C());
1189
1190 set_reg<T>(instr->Rd(), new_val);
1191}
1192
1193sim_uint128_t Simulator::PolynomialMult128(uint64_t op1, uint64_t op2,
1194 int lane_size_in_bits) const {
1195 DCHECK_LE(static_cast<unsigned>(lane_size_in_bits), kDRegSizeInBits);
1196 sim_uint128_t result = std::make_pair(0, 0);
1197 sim_uint128_t op2q = std::make_pair(0, op2);
1198 for (int i = 0; i < lane_size_in_bits; i++) {
1199 if ((op1 >> i) & 1) {
1200 result = Eor128(result, Lsl128(op2q, i));
1201 }
1202 }
1203 return result;
1204}
1205
1206sim_uint128_t Simulator::Lsl128(sim_uint128_t x, unsigned shift) const {
1207 DCHECK_LE(shift, 64);
1208 if (shift == 0) return x;
1209 if (shift == 64) return std::make_pair(x.second, 0);
1210 uint64_t lo = x.second << shift;
1211 uint64_t hi = (x.first << shift) | (x.second >> (64 - shift));
1212 return std::make_pair(hi, lo);
1213}
1214
1215sim_uint128_t Simulator::Eor128(sim_uint128_t x, sim_uint128_t y) const {
1216 return std::make_pair(x.first ^ y.first, x.second ^ y.second);
1217}
1218
1219template <typename T>
1220T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) {
1221 using unsignedT = std::make_unsigned_t<T>;
1222
1223 if (amount == 0) {
1224 return value;
1225 }
1226 // Larger shift {amount}s would be undefined behavior in C++.
1227 DCHECK(amount < sizeof(value) * kBitsPerByte);
1228
1229 switch (shift_type) {
1230 case LSL:
1231 return static_cast<unsignedT>(value) << amount;
1232 case LSR:
1233 return static_cast<unsignedT>(value) >> amount;
1234 case ASR:
1235 return value >> amount;
1236 case ROR: {
1237 unsignedT mask = (static_cast<unsignedT>(1) << amount) - 1;
1238 return (static_cast<unsignedT>(value) >> amount) |
1239 ((value & mask) << (sizeof(mask) * 8 - amount));
1240 }
1241 default:
1242 UNIMPLEMENTED();
1243 return 0;
1244 }
1245}
1246
1247template <typename T>
1248T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) {
1249 const unsigned kSignExtendBShift = (sizeof(T) - 1) * 8;
1250 const unsigned kSignExtendHShift = (sizeof(T) - 2) * 8;
1251 const unsigned kSignExtendWShift = (sizeof(T) - 4) * 8;
1252 using unsignedT = std::make_unsigned_t<T>;
1253
1254 switch (extend_type) {
1255 case UXTB:
1256 value &= kByteMask;
1257 break;
1258 case UXTH:
1259 value &= kHalfWordMask;
1260 break;
1261 case UXTW:
1262 value &= kWordMask;
1263 break;
1264 case SXTB:
1265 value =
1266 static_cast<T>(static_cast<unsignedT>(value) << kSignExtendBShift) >>
1267 kSignExtendBShift;
1268 break;
1269 case SXTH:
1270 value =
1271 static_cast<T>(static_cast<unsignedT>(value) << kSignExtendHShift) >>
1272 kSignExtendHShift;
1273 break;
1274 case SXTW:
1275 value =
1276 static_cast<T>(static_cast<unsignedT>(value) << kSignExtendWShift) >>
1277 kSignExtendWShift;
1278 break;
1279 case UXTX:
1280 case SXTX:
1281 break;
1282 default:
1283 UNREACHABLE();
1284 }
1285 return static_cast<T>(static_cast<unsignedT>(value) << left_shift);
1286}
1287
1288template <typename T>
1289void Simulator::Extract(Instruction* instr) {
1290 unsigned lsb = instr->ImmS();
1291 T op2 = reg<T>(instr->Rm());
1292 T result = op2;
1293
1294 if (lsb) {
1295 T op1 = reg<T>(instr->Rn());
1296 result = op2 >> lsb | (op1 << ((sizeof(T) * 8) - lsb));
1297 }
1298 set_reg<T>(instr->Rd(), result);
1299}
1300
1301void Simulator::FPCompare(double val0, double val1) {
1302 AssertSupportedFPCR();
1303
1304 // TODO(jbramley): This assumes that the C++ implementation handles
1305 // comparisons in the way that we expect (as per AssertSupportedFPCR()).
1306 if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) {
1307 nzcv().SetRawValue(FPUnorderedFlag);
1308 } else if (val0 < val1) {
1309 nzcv().SetRawValue(FPLessThanFlag);
1310 } else if (val0 > val1) {
1311 nzcv().SetRawValue(FPGreaterThanFlag);
1312 } else if (val0 == val1) {
1313 nzcv().SetRawValue(FPEqualFlag);
1314 } else {
1315 UNREACHABLE();
1316 }
1317 LogSystemRegister(NZCV);
1318}
1319
1320Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatForSize(
1321 size_t reg_size, size_t lane_size) {
1322 DCHECK_GE(reg_size, lane_size);
1323
1324 uint32_t format = 0;
1325 if (reg_size != lane_size) {
1326 switch (reg_size) {
1327 default:
1328 UNREACHABLE();
1329 case kQRegSize:
1330 format = kPrintRegAsQVector;
1331 break;
1332 case kDRegSize:
1333 format = kPrintRegAsDVector;
1334 break;
1335 }
1336 }
1337
1338 switch (lane_size) {
1339 default:
1340 UNREACHABLE();
1341 case kQRegSize:
1342 format |= kPrintReg1Q;
1343 break;
1344 case kDRegSize:
1345 format |= kPrintReg1D;
1346 break;
1347 case kSRegSize:
1348 format |= kPrintReg1S;
1349 break;
1350 case kHRegSize:
1351 format |= kPrintReg1H;
1352 break;
1353 case kBRegSize:
1354 format |= kPrintReg1B;
1355 break;
1356 }
1357
1358 // These sizes would be duplicate case labels.
1359 static_assert(kXRegSize == kDRegSize, "X and D registers must be same size.");
1360 static_assert(kWRegSize == kSRegSize, "W and S registers must be same size.");
1361 static_assert(kPrintXReg == kPrintReg1D,
1362 "X and D register printing code is shared.");
1363 static_assert(kPrintWReg == kPrintReg1S,
1364 "W and S register printing code is shared.");
1365
1366 return static_cast<PrintRegisterFormat>(format);
1367}
1368
1369Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormat(
1370 VectorFormat vform) {
1371 switch (vform) {
1372 default:
1373 UNREACHABLE();
1374 case kFormat16B:
1375 return kPrintReg16B;
1376 case kFormat8B:
1377 return kPrintReg8B;
1378 case kFormat8H:
1379 return kPrintReg8H;
1380 case kFormat4H:
1381 return kPrintReg4H;
1382 case kFormat4S:
1383 return kPrintReg4S;
1384 case kFormat2S:
1385 return kPrintReg2S;
1386 case kFormat2D:
1387 return kPrintReg2D;
1388 case kFormat1D:
1389 return kPrintReg1D;
1390
1391 case kFormatB:
1392 return kPrintReg1B;
1393 case kFormatH:
1394 return kPrintReg1H;
1395 case kFormatS:
1396 return kPrintReg1S;
1397 case kFormatD:
1398 return kPrintReg1D;
1399 }
1400}
1401
1402Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatFP(
1403 VectorFormat vform) {
1404 switch (vform) {
1405 default:
1406 UNREACHABLE();
1407 case kFormat4S:
1408 return kPrintReg4SFP;
1409 case kFormat2S:
1410 return kPrintReg2SFP;
1411 case kFormat2D:
1412 return kPrintReg2DFP;
1413 case kFormat1D:
1414 return kPrintReg1DFP;
1415
1416 case kFormatS:
1417 return kPrintReg1SFP;
1418 case kFormatD:
1419 return kPrintReg1DFP;
1420 }
1421}
1422
1423void Simulator::SetBreakpoint(Instruction* location) {
1424 for (unsigned i = 0; i < breakpoints_.size(); i++) {
1425 if (breakpoints_.at(i).location == location) {
1426 PrintF(stream_, "Existing breakpoint at %p was %s\n",
1427 reinterpret_cast<void*>(location),
1428 breakpoints_.at(i).enabled ? "disabled" : "enabled");
1429 breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled;
1430 return;
1431 }
1432 }
1433 Breakpoint new_breakpoint = {location, true};
1434 breakpoints_.push_back(new_breakpoint);
1435 PrintF(stream_, "Set a breakpoint at %p\n",
1436 reinterpret_cast<void*>(location));
1437}
1438
1439void Simulator::ListBreakpoints() {
1440 PrintF(stream_, "Breakpoints:\n");
1441 for (unsigned i = 0; i < breakpoints_.size(); i++) {
1442 PrintF(stream_, "%p : %s\n",
1443 reinterpret_cast<void*>(breakpoints_.at(i).location),
1444 breakpoints_.at(i).enabled ? "enabled" : "disabled");
1445 }
1446}
1447
1448void Simulator::CheckBreakpoints() {
1449 bool hit_a_breakpoint = false;
1450 for (unsigned i = 0; i < breakpoints_.size(); i++) {
1451 if ((breakpoints_.at(i).location == pc_) && breakpoints_.at(i).enabled) {
1452 hit_a_breakpoint = true;
1453 // Disable this breakpoint.
1454 breakpoints_.at(i).enabled = false;
1455 }
1456 }
1457 if (hit_a_breakpoint) {
1458 PrintF(stream_, "Hit and disabled a breakpoint at %p.\n",
1459 reinterpret_cast<void*>(pc_));
1460 Debug();
1461 }
1462}
1463
1464void Simulator::CheckBreakNext() {
1465 // If the current instruction is a BL, insert a breakpoint just after it.
1466 if (break_on_next_ && pc_->IsBranchAndLinkToRegister()) {
1467 SetBreakpoint(pc_->following());
1468 break_on_next_ = false;
1469 }
1470}
1471
1472void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
1473 Instruction* end = start->InstructionAtOffset(count * kInstrSize);
1474 for (Instruction* pc = start; pc < end; pc = pc->following()) {
1475 disassembler_decoder_->Decode(pc);
1476 }
1477}
1478
1479void Simulator::PrintWrittenRegisters() {
1480 for (unsigned i = 0; i < kNumberOfRegisters; i++) {
1481 if (registers_[i].WrittenSinceLastLog()) PrintRegister(i);
1482 }
1483}
1484
1485void Simulator::PrintWrittenVRegisters() {
1486 for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
1487 // At this point there is no type information, so print as a raw 1Q.
1488 if (vregisters_[i].WrittenSinceLastLog()) PrintVRegister(i, kPrintReg1Q);
1489 }
1490}
1491
1492void Simulator::PrintSystemRegisters() {
1493 PrintSystemRegister(NZCV);
1494 PrintSystemRegister(FPCR);
1495}
1496
1497void Simulator::PrintRegisters() {
1498 for (unsigned i = 0; i < kNumberOfRegisters; i++) {
1499 PrintRegister(i);
1500 }
1501}
1502
1503void Simulator::PrintVRegisters() {
1504 for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
1505 // At this point there is no type information, so print as a raw 1Q.
1506 PrintVRegister(i, kPrintReg1Q);
1507 }
1508}
1509
1510void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) {
1511 registers_[code].NotifyRegisterLogged();
1512
1513 // Don't print writes into xzr.
1514 if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) {
1515 return;
1516 }
1517
1518 // The template for all x and w registers:
1519 // "# x{code}: 0x{value}"
1520 // "# w{code}: 0x{value}"
1521
1522 PrintRegisterRawHelper(code, r31mode);
1523 fprintf(stream_, "\n");
1524}
1525
1526// Print a register's name and raw value.
1527//
1528// The `bytes` and `lsb` arguments can be used to limit the bytes that are
1529// printed. These arguments are intended for use in cases where register hasn't
1530// actually been updated (such as in PrintVWrite).
1531//
1532// No newline is printed. This allows the caller to print more details (such as
1533// a floating-point interpretation or a memory access annotation).
1534void Simulator::PrintVRegisterRawHelper(unsigned code, int bytes, int lsb) {
1535 // The template for vector types:
1536 // "# v{code}: 0xFFEEDDCCBBAA99887766554433221100".
1537 // An example with bytes=4 and lsb=8:
1538 // "# v{code}: 0xBBAA9988 ".
1539 fprintf(stream_, "# %s%5s: %s", clr_vreg_name, VRegNameForCode(code),
1540 clr_vreg_value);
1541
1542 int msb = lsb + bytes - 1;
1543 int byte = kQRegSize - 1;
1544
1545 // Print leading padding spaces. (Two spaces per byte.)
1546 while (byte > msb) {
1547 fprintf(stream_, " ");
1548 byte--;
1549 }
1550
1551 // Print the specified part of the value, byte by byte.
1552 qreg_t rawbits = qreg(code);
1553 fprintf(stream_, "0x");
1554 while (byte >= lsb) {
1555 fprintf(stream_, "%02x", rawbits.val[byte]);
1556 byte--;
1557 }
1558
1559 // Print trailing padding spaces.
1560 while (byte >= 0) {
1561 fprintf(stream_, " ");
1562 byte--;
1563 }
1564 fprintf(stream_, "%s", clr_normal);
1565}
1566
1567// Print each of the specified lanes of a register as a float or double value.
1568//
1569// The `lane_count` and `lslane` arguments can be used to limit the lanes that
1570// are printed. These arguments are intended for use in cases where register
1571// hasn't actually been updated (such as in PrintVWrite).
1572//
1573// No newline is printed. This allows the caller to print more details (such as
1574// a memory access annotation).
1575void Simulator::PrintVRegisterFPHelper(unsigned code,
1576 unsigned lane_size_in_bytes,
1577 int lane_count, int rightmost_lane) {
1578 DCHECK((lane_size_in_bytes == kSRegSize) ||
1579 (lane_size_in_bytes == kDRegSize));
1580
1581 unsigned msb = (lane_count + rightmost_lane) * lane_size_in_bytes;
1582 DCHECK_LE(msb, static_cast<unsigned>(kQRegSize));
1583
1584 // For scalar types ((lane_count == 1) && (rightmost_lane == 0)), a register
1585 // name is used:
1586 // " (s{code}: {value})"
1587 // " (d{code}: {value})"
1588 // For vector types, "..." is used to represent one or more omitted lanes.
1589 // " (..., {value}, {value}, ...)"
1590 if ((lane_count == 1) && (rightmost_lane == 0)) {
1591 const char* name = (lane_size_in_bytes == kSRegSize)
1592 ? SRegNameForCode(code)
1593 : DRegNameForCode(code);
1594 fprintf(stream_, " (%s%s: ", clr_vreg_name, name);
1595 } else {
1596 if (msb < (kQRegSize - 1)) {
1597 fprintf(stream_, " (..., ");
1598 } else {
1599 fprintf(stream_, " (");
1600 }
1601 }
1602
1603 // Print the list of values.
1604 const char* separator = "";
1605 int leftmost_lane = rightmost_lane + lane_count - 1;
1606 for (int lane = leftmost_lane; lane >= rightmost_lane; lane--) {
1607 double value = (lane_size_in_bytes == kSRegSize)
1608 ? vreg(code).Get<float>(lane)
1609 : vreg(code).Get<double>(lane);
1610 fprintf(stream_, "%s%s%#g%s", separator, clr_vreg_value, value, clr_normal);
1611 separator = ", ";
1612 }
1613
1614 if (rightmost_lane > 0) {
1615 fprintf(stream_, ", ...");
1616 }
1617 fprintf(stream_, ")");
1618}
1619
1620// Print a register's name and raw value.
1621//
1622// Only the least-significant `size_in_bytes` bytes of the register are printed,
1623// but the value is aligned as if the whole register had been printed.
1624//
1625// For typical register updates, size_in_bytes should be set to kXRegSize
1626// -- the default -- so that the whole register is printed. Other values of
1627// size_in_bytes are intended for use when the register hasn't actually been
1628// updated (such as in PrintWrite).
1629//
1630// No newline is printed. This allows the caller to print more details (such as
1631// a memory access annotation).
1632void Simulator::PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode,
1633 int size_in_bytes) {
1634 // The template for all supported sizes.
1635 // "# x{code}: 0xFFEEDDCCBBAA9988"
1636 // "# w{code}: 0xBBAA9988"
1637 // "# w{code}<15:0>: 0x9988"
1638 // "# w{code}<7:0>: 0x88"
1639 unsigned padding_chars = (kXRegSize - size_in_bytes) * 2;
1640
1641 const char* name = "";
1642 const char* suffix = "";
1643 switch (size_in_bytes) {
1644 case kXRegSize:
1645 name = XRegNameForCode(code, r31mode);
1646 break;
1647 case kWRegSize:
1648 name = WRegNameForCode(code, r31mode);
1649 break;
1650 case 2:
1651 name = WRegNameForCode(code, r31mode);
1652 suffix = "<15:0>";
1653 padding_chars -= strlen(suffix);
1654 break;
1655 case 1:
1656 name = WRegNameForCode(code, r31mode);
1657 suffix = "<7:0>";
1658 padding_chars -= strlen(suffix);
1659 break;
1660 default:
1661 UNREACHABLE();
1662 }
1663 fprintf(stream_, "# %s%5s%s: ", clr_reg_name, name, suffix);
1664
1665 // Print leading padding spaces.
1666 DCHECK_LT(padding_chars, kXRegSize * 2U);
1667 for (unsigned i = 0; i < padding_chars; i++) {
1668 putc(' ', stream_);
1669 }
1670
1671 // Print the specified bits in hexadecimal format.
1672 uint64_t bits = reg<uint64_t>(code, r31mode);
1673 bits &= kXRegMask >> ((kXRegSize - size_in_bytes) * 8);
1674 static_assert(sizeof(bits) == kXRegSize,
1675 "X registers and uint64_t must be the same size.");
1676
1677 int chars = size_in_bytes * 2;
1678 fprintf(stream_, "%s0x%0*" PRIx64 "%s", clr_reg_value, chars, bits,
1679 clr_normal);
1680}
1681
1682void Simulator::PrintVRegister(unsigned code, PrintRegisterFormat format) {
1683 vregisters_[code].NotifyRegisterLogged();
1684
1685 int lane_size_log2 = format & kPrintRegLaneSizeMask;
1686
1687 int reg_size_log2;
1688 if (format & kPrintRegAsQVector) {
1689 reg_size_log2 = kQRegSizeLog2;
1690 } else if (format & kPrintRegAsDVector) {
1691 reg_size_log2 = kDRegSizeLog2;
1692 } else {
1693 // Scalar types.
1694 reg_size_log2 = lane_size_log2;
1695 }
1696
1697 int lane_count = 1 << (reg_size_log2 - lane_size_log2);
1698 int lane_size = 1 << lane_size_log2;
1699
1700 // The template for vector types:
1701 // "# v{code}: 0x{rawbits} (..., {value}, ...)".
1702 // The template for scalar types:
1703 // "# v{code}: 0x{rawbits} ({reg}:{value})".
1704 // The values in parentheses after the bit representations are floating-point
1705 // interpretations. They are displayed only if the kPrintVRegAsFP bit is set.
1706
1707 PrintVRegisterRawHelper(code);
1708 if (format & kPrintRegAsFP) {
1709 PrintVRegisterFPHelper(code, lane_size, lane_count);
1710 }
1711
1712 fprintf(stream_, "\n");
1713}
1714
1715void Simulator::PrintSystemRegister(SystemRegister id) {
1716 switch (id) {
1717 case NZCV:
1718 fprintf(stream_, "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n", clr_flag_name,
1719 clr_flag_value, nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
1720 clr_normal);
1721 break;
1722 case FPCR: {
1723 static const char* rmode[] = {
1724 "0b00 (Round to Nearest)", "0b01 (Round towards Plus Infinity)",
1725 "0b10 (Round towards Minus Infinity)", "0b11 (Round towards Zero)"};
1726 DCHECK(fpcr().RMode() < arraysize(rmode));
1727 fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
1728 clr_flag_name, clr_flag_value, fpcr().AHP(), fpcr().DN(),
1729 fpcr().FZ(), rmode[fpcr().RMode()], clr_normal);
1730 break;
1731 }
1732 default:
1733 UNREACHABLE();
1734 }
1735}
1736
1737void Simulator::PrintRead(uintptr_t address, unsigned reg_code,
1738 PrintRegisterFormat format) {
1739 registers_[reg_code].NotifyRegisterLogged();
1740
1741 USE(format);
1742
1743 // The template is "# {reg}: 0x{value} <- {address}".
1744 PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister);
1745 fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n", clr_memory_address, address,
1746 clr_normal);
1747}
1748
1749void Simulator::PrintVRead(uintptr_t address, unsigned reg_code,
1750 PrintRegisterFormat format, unsigned lane) {
1751 vregisters_[reg_code].NotifyRegisterLogged();
1752
1753 // The template is "# v{code}: 0x{rawbits} <- address".
1754 PrintVRegisterRawHelper(reg_code);
1755 if (format & kPrintRegAsFP) {
1756 PrintVRegisterFPHelper(reg_code, GetPrintRegLaneSizeInBytes(format),
1757 GetPrintRegLaneCount(format), lane);
1758 }
1759 fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n", clr_memory_address, address,
1760 clr_normal);
1761}
1762
1763void Simulator::PrintWrite(uintptr_t address, unsigned reg_code,
1764 PrintRegisterFormat format) {
1765 DCHECK_EQ(GetPrintRegLaneCount(format), 1U);
1766
1767 // The template is "# v{code}: 0x{value} -> {address}". To keep the trace tidy
1768 // and readable, the value is aligned with the values in the register trace.
1769 PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister,
1770 GetPrintRegSizeInBytes(format));
1771 fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n", clr_memory_address, address,
1772 clr_normal);
1773}
1774
1775void Simulator::PrintVWrite(uintptr_t address, unsigned reg_code,
1776 PrintRegisterFormat format, unsigned lane) {
1777 // The templates:
1778 // "# v{code}: 0x{rawbits} -> {address}"
1779 // "# v{code}: 0x{rawbits} (..., {value}, ...) -> {address}".
1780 // "# v{code}: 0x{rawbits} ({reg}:{value}) -> {address}"
1781 // Because this trace doesn't represent a change to the source register's
1782 // value, only the relevant part of the value is printed. To keep the trace
1783 // tidy and readable, the raw value is aligned with the other values in the
1784 // register trace.
1785 int lane_count = GetPrintRegLaneCount(format);
1786 int lane_size = GetPrintRegLaneSizeInBytes(format);
1787 int reg_size = GetPrintRegSizeInBytes(format);
1788 PrintVRegisterRawHelper(reg_code, reg_size, lane_size * lane);
1789 if (format & kPrintRegAsFP) {
1790 PrintVRegisterFPHelper(reg_code, lane_size, lane_count, lane);
1791 }
1792 fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n", clr_memory_address, address,
1793 clr_normal);
1794}
1795
1796// Visitors---------------------------------------------------------------------
1797
1798void Simulator::VisitUnimplemented(Instruction* instr) {
1799 fprintf(stream_, "Unimplemented instruction at %p: 0x%08" PRIx32 "\n",
1800 reinterpret_cast<void*>(instr), instr->InstructionBits());
1801 UNIMPLEMENTED();
1802}
1803
1804void Simulator::VisitUnallocated(Instruction* instr) {
1805 fprintf(stream_, "Unallocated instruction at %p: 0x%08" PRIx32 "\n",
1806 reinterpret_cast<void*>(instr), instr->InstructionBits());
1807 UNIMPLEMENTED();
1808}
1809
1810void Simulator::VisitPCRelAddressing(Instruction* instr) {
1811 switch (instr->Mask(PCRelAddressingMask)) {
1812 case ADR:
1813 set_reg(instr->Rd(), instr->ImmPCOffsetTarget());
1814 break;
1815 case ADRP: // Not implemented in the assembler.
1816 UNIMPLEMENTED();
1817 default:
1818 UNREACHABLE();
1819 }
1820}
1821
1822void Simulator::VisitUnconditionalBranch(Instruction* instr) {
1823 switch (instr->Mask(UnconditionalBranchMask)) {
1824 case BL:
1825 set_lr(instr->following());
1826 [[fallthrough]];
1827 case B:
1828 set_pc(instr->ImmPCOffsetTarget());
1829 break;
1830 default:
1831 UNREACHABLE();
1832 }
1833}
1834
1835void Simulator::VisitConditionalBranch(Instruction* instr) {
1836 DCHECK(instr->Mask(ConditionalBranchMask) == B_cond);
1837 if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) {
1838 set_pc(instr->ImmPCOffsetTarget());
1839 }
1840}
1841
1842Simulator::BType Simulator::GetBTypeFromInstruction(
1843 const Instruction* instr) const {
1844 switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
1845 case BLR:
1846 return BranchAndLink;
1847 case BR:
1848 if (!PcIsInGuardedPage() || (instr->Rn() == 16) || (instr->Rn() == 17)) {
1849 return BranchFromUnguardedOrToIP;
1850 }
1851 return BranchFromGuardedNotToIP;
1852 }
1853 return DefaultBType;
1854}
1855
1856void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
1857 Instruction* target = reg<Instruction*>(instr->Rn());
1858 switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
1859 case BLR: {
1860 set_lr(instr->following());
1861 if (instr->Rn() == 31) {
1862 // BLR XZR is used as a guard for the constant pool. We should never hit
1863 // this, but if we do trap to allow debugging.
1864 Debug();
1865 }
1866 [[fallthrough]];
1867 }
1868 case BR:
1869 case RET:
1870 set_pc(target);
1871 break;
1872 default:
1873 UNIMPLEMENTED();
1874 }
1875 set_btype(GetBTypeFromInstruction(instr));
1876}
1877
1878void Simulator::VisitTestBranch(Instruction* instr) {
1879 unsigned bit_pos =
1880 (instr->ImmTestBranchBit5() << 5) | instr->ImmTestBranchBit40();
1881 bool take_branch = ((xreg(instr->Rt()) & (1ULL << bit_pos)) == 0);
1882 switch (instr->Mask(TestBranchMask)) {
1883 case TBZ:
1884 break;
1885 case TBNZ:
1886 take_branch = !take_branch;
1887 break;
1888 default:
1889 UNIMPLEMENTED();
1890 }
1891 if (take_branch) {
1892 set_pc(instr->ImmPCOffsetTarget());
1893 }
1894}
1895
1896void Simulator::VisitCompareBranch(Instruction* instr) {
1897 unsigned rt = instr->Rt();
1898 bool take_branch = false;
1899 switch (instr->Mask(CompareBranchMask)) {
1900 case CBZ_w:
1901 take_branch = (wreg(rt) == 0);
1902 break;
1903 case CBZ_x:
1904 take_branch = (xreg(rt) == 0);
1905 break;
1906 case CBNZ_w:
1907 take_branch = (wreg(rt) != 0);
1908 break;
1909 case CBNZ_x:
1910 take_branch = (xreg(rt) != 0);
1911 break;
1912 default:
1913 UNIMPLEMENTED();
1914 }
1915 if (take_branch) {
1916 set_pc(instr->ImmPCOffsetTarget());
1917 }
1918}
1919
1920template <typename T>
1921void Simulator::AddSubHelper(Instruction* instr, T op2) {
1922 // Use unsigned types to avoid implementation-defined overflow behaviour.
1923 static_assert(std::is_unsigned_v<T>, "operands must be unsigned");
1924
1925 bool set_flags = instr->FlagsUpdate();
1926 T new_val = 0;
1927 Instr operation = instr->Mask(AddSubOpMask);
1928
1929 switch (operation) {
1930 case ADD:
1931 case ADDS: {
1932 new_val =
1933 AddWithCarry<T>(set_flags, reg<T>(instr->Rn(), instr->RnMode()), op2);
1934 break;
1935 }
1936 case SUB:
1937 case SUBS: {
1938 new_val = AddWithCarry<T>(set_flags, reg<T>(instr->Rn(), instr->RnMode()),
1939 ~op2, 1);
1940 break;
1941 }
1942 default:
1943 UNREACHABLE();
1944 }
1945
1946 set_reg<T>(instr->Rd(), new_val, instr->RdMode());
1947}
1948
1949void Simulator::VisitAddSubShifted(Instruction* instr) {
1950 Shift shift_type = static_cast<Shift>(instr->ShiftDP());
1951 unsigned shift_amount = instr->ImmDPShift();
1952
1953 if (instr->SixtyFourBits()) {
1954 uint64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
1955 AddSubHelper(instr, op2);
1956 } else {
1957 uint32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount);
1958 AddSubHelper(instr, op2);
1959 }
1960}
1961
1962void Simulator::VisitAddSubImmediate(Instruction* instr) {
1963 int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
1964 if (instr->SixtyFourBits()) {
1965 AddSubHelper(instr, static_cast<uint64_t>(op2));
1966 } else {
1967 AddSubHelper(instr, static_cast<uint32_t>(op2));
1968 }
1969}
1970
1971void Simulator::VisitAddSubExtended(Instruction* instr) {
1972 Extend ext = static_cast<Extend>(instr->ExtendMode());
1973 unsigned left_shift = instr->ImmExtendShift();
1974 if (instr->SixtyFourBits()) {
1975 uint64_t op2 = ExtendValue(xreg(instr->Rm()), ext, left_shift);
1976 AddSubHelper(instr, op2);
1977 } else {
1978 uint32_t op2 = ExtendValue(wreg(instr->Rm()), ext, left_shift);
1979 AddSubHelper(instr, op2);
1980 }
1981}
1982
1983void Simulator::VisitAddSubWithCarry(Instruction* instr) {
1984 if (instr->SixtyFourBits()) {
1985 AddSubWithCarry<uint64_t>(instr);
1986 } else {
1987 AddSubWithCarry<uint32_t>(instr);
1988 }
1989}
1990
1991void Simulator::VisitLogicalShifted(Instruction* instr) {
1992 Shift shift_type = static_cast<Shift>(instr->ShiftDP());
1993 unsigned shift_amount = instr->ImmDPShift();
1994
1995 if (instr->SixtyFourBits()) {
1996 uint64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
1997 op2 = (instr->Mask(NOT) == NOT) ? ~op2 : op2;
1998 LogicalHelper(instr, op2);
1999 } else {
2000 uint32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount);
2001 op2 = (instr->Mask(NOT) == NOT) ? ~op2 : op2;
2002 LogicalHelper(instr, op2);
2003 }
2004}
2005
2006void Simulator::VisitLogicalImmediate(Instruction* instr) {
2007 if (instr->SixtyFourBits()) {
2008 LogicalHelper(instr, static_cast<uint64_t>(instr->ImmLogical()));
2009 } else {
2010 LogicalHelper(instr, static_cast<uint32_t>(instr->ImmLogical()));
2011 }
2012}
2013
2014template <typename T>
2015void Simulator::LogicalHelper(Instruction* instr, T op2) {
2016 T op1 = reg<T>(instr->Rn());
2017 T result = 0;
2018 bool update_flags = false;
2019
2020 // Switch on the logical operation, stripping out the NOT bit, as it has a
2021 // different meaning for logical immediate instructions.
2022 switch (instr->Mask(LogicalOpMask & ~NOT)) {
2023 case ANDS:
2024 update_flags = true;
2025 [[fallthrough]];
2026 case AND:
2027 result = op1 & op2;
2028 break;
2029 case ORR:
2030 result = op1 | op2;
2031 break;
2032 case EOR:
2033 result = op1 ^ op2;
2034 break;
2035 default:
2036 UNIMPLEMENTED();
2037 }
2038
2039 if (update_flags) {
2040 nzcv().SetN(CalcNFlag(result));
2041 nzcv().SetZ(CalcZFlag(result));
2042 nzcv().SetC(0);
2043 nzcv().SetV(0);
2044 LogSystemRegister(NZCV);
2045 }
2046
2047 set_reg<T>(instr->Rd(), result, instr->RdMode());
2048}
2049
2050void Simulator::VisitConditionalCompareRegister(Instruction* instr) {
2051 if (instr->SixtyFourBits()) {
2052 ConditionalCompareHelper(instr, static_cast<uint64_t>(xreg(instr->Rm())));
2053 } else {
2054 ConditionalCompareHelper(instr, static_cast<uint32_t>(wreg(instr->Rm())));
2055 }
2056}
2057
2058void Simulator::VisitConditionalCompareImmediate(Instruction* instr) {
2059 if (instr->SixtyFourBits()) {
2060 ConditionalCompareHelper(instr, static_cast<uint64_t>(instr->ImmCondCmp()));
2061 } else {
2062 ConditionalCompareHelper(instr, static_cast<uint32_t>(instr->ImmCondCmp()));
2063 }
2064}
2065
2066template <typename T>
2067void Simulator::ConditionalCompareHelper(Instruction* instr, T op2) {
2068 // Use unsigned types to avoid implementation-defined overflow behaviour.
2069 static_assert(std::is_unsigned_v<T>, "operands must be unsigned");
2070
2071 T op1 = reg<T>(instr->Rn());
2072
2073 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
2074 // If the condition passes, set the status flags to the result of comparing
2075 // the operands.
2076 if (instr->Mask(ConditionalCompareMask) == CCMP) {
2077 AddWithCarry<T>(true, op1, ~op2, 1);
2078 } else {
2079 DCHECK(instr->Mask(ConditionalCompareMask) == CCMN);
2080 AddWithCarry<T>(true, op1, op2, 0);
2081 }
2082 } else {
2083 // If the condition fails, set the status flags to the nzcv immediate.
2084 nzcv().SetFlags(instr->Nzcv());
2085 LogSystemRegister(NZCV);
2086 }
2087}
2088
2089void Simulator::VisitLoadStoreUnsignedOffset(Instruction* instr) {
2090 int offset = instr->ImmLSUnsigned() << instr->SizeLS();
2091 LoadStoreHelper(instr, offset, Offset);
2092}
2093
2094void Simulator::VisitLoadStoreUnscaledOffset(Instruction* instr) {
2095 LoadStoreHelper(instr, instr->ImmLS(), Offset);
2096}
2097
2098void Simulator::VisitLoadStorePreIndex(Instruction* instr) {
2099 LoadStoreHelper(instr, instr->ImmLS(), PreIndex);
2100}
2101
2102void Simulator::VisitLoadStorePostIndex(Instruction* instr) {
2103 LoadStoreHelper(instr, instr->ImmLS(), PostIndex);
2104}
2105
2106void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
2107 Extend ext = static_cast<Extend>(instr->ExtendMode());
2108 DCHECK((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
2109 unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
2110
2111 int64_t offset = ExtendValue(xreg(instr->Rm()), ext, shift_amount);
2112 LoadStoreHelper(instr, offset, Offset);
2113}
2114
2115void Simulator::LoadStoreHelper(Instruction* instr, int64_t offset,
2116 AddrMode addrmode) {
2117 unsigned srcdst = instr->Rt();
2118 unsigned addr_reg = instr->Rn();
2119 uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
2120 uintptr_t stack = 0;
2121
2122 unsigned access_size = 1 << instr->SizeLS();
2123 // First, check whether the memory is accessible (for wasm trap handling).
2124 if (!ProbeMemory(address, access_size)) return;
2125
2126 {
2127 GlobalMonitor::SimulatorMutex lock_guard(global_monitor_);
2128 if (instr->IsLoad()) {
2129 local_monitor_.NotifyLoad();
2130 } else {
2131 local_monitor_.NotifyStore();
2132 global_monitor_->NotifyStore_Locked(&global_monitor_processor_);
2133 }
2134 }
2135
2136 // Handle the writeback for stores before the store. On a CPU the writeback
2137 // and the store are atomic, but when running on the simulator it is possible
2138 // to be interrupted in between. The simulator is not thread safe and V8 does
2139 // not require it to be to run JavaScript therefore the profiler may sample
2140 // the "simulated" CPU in the middle of load/store with writeback. The code
2141 // below ensures that push operations are safe even when interrupted: the
2142 // stack pointer will be decremented before adding an element to the stack.
2143 if (instr->IsStore()) {
2144 LoadStoreWriteBack(addr_reg, offset, addrmode);
2145
2146 // For store the address post writeback is used to check access below the
2147 // stack.
2148 stack = sp();
2149 }
2150
2151 LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreMask));
2152 switch (op) {
2153 // Use _no_log variants to suppress the register trace (LOG_REGS,
2154 // LOG_VREGS). We will print a more detailed log.
2155 case LDRB_w:
2156 set_wreg_no_log(srcdst, MemoryRead<uint8_t>(address));
2157 break;
2158 case LDRH_w:
2159 set_wreg_no_log(srcdst, MemoryRead<uint16_t>(address));
2160 break;
2161 case LDR_w:
2162 set_wreg_no_log(srcdst, MemoryRead<uint32_t>(address));
2163 break;
2164 case LDR_x:
2165 set_xreg_no_log(srcdst, MemoryRead<uint64_t>(address));
2166 break;
2167 case LDRSB_w:
2168 set_wreg_no_log(srcdst, MemoryRead<int8_t>(address));
2169 break;
2170 case LDRSH_w:
2171 set_wreg_no_log(srcdst, MemoryRead<int16_t>(address));
2172 break;
2173 case LDRSB_x:
2174 set_xreg_no_log(srcdst, MemoryRead<int8_t>(address));
2175 break;
2176 case LDRSH_x:
2177 set_xreg_no_log(srcdst, MemoryRead<int16_t>(address));
2178 break;
2179 case LDRSW_x:
2180 set_xreg_no_log(srcdst, MemoryRead<int32_t>(address));
2181 break;
2182 case LDR_b:
2183 set_breg_no_log(srcdst, MemoryRead<uint8_t>(address));
2184 break;
2185 case LDR_h:
2186 set_hreg_no_log(srcdst, MemoryRead<uint16_t>(address));
2187 break;
2188 case LDR_s:
2189 set_sreg_no_log(srcdst, MemoryRead<float>(address));
2190 break;
2191 case LDR_d:
2192 set_dreg_no_log(srcdst, MemoryRead<double>(address));
2193 break;
2194 case LDR_q:
2195 set_qreg_no_log(srcdst, MemoryRead<qreg_t>(address));
2196 break;
2197
2198 case STRB_w:
2199 MemoryWrite<uint8_t>(address, wreg(srcdst));
2200 break;
2201 case STRH_w:
2202 MemoryWrite<uint16_t>(address, wreg(srcdst));
2203 break;
2204 case STR_w:
2205 MemoryWrite<uint32_t>(address, wreg(srcdst));
2206 break;
2207 case STR_x:
2208 MemoryWrite<uint64_t>(address, xreg(srcdst));
2209 break;
2210 case STR_b:
2211 MemoryWrite<uint8_t>(address, breg(srcdst));
2212 break;
2213 case STR_h:
2214 MemoryWrite<uint16_t>(address, hreg(srcdst));
2215 break;
2216 case STR_s:
2217 MemoryWrite<float>(address, sreg(srcdst));
2218 break;
2219 case STR_d:
2220 MemoryWrite<double>(address, dreg(srcdst));
2221 break;
2222 case STR_q:
2223 MemoryWrite<qreg_t>(address, qreg(srcdst));
2224 break;
2225
2226 default:
2227 UNIMPLEMENTED();
2228 }
2229
2230 // Print a detailed trace (including the memory address) instead of the basic
2231 // register:value trace generated by set_*reg().
2232 if (instr->IsLoad()) {
2233 if ((op == LDR_s) || (op == LDR_d)) {
2234 LogVRead(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size));
2235 } else if ((op == LDR_b) || (op == LDR_h) || (op == LDR_q)) {
2236 LogVRead(address, srcdst, GetPrintRegisterFormatForSize(access_size));
2237 } else {
2238 LogRead(address, srcdst, GetPrintRegisterFormatForSize(access_size));
2239 }
2240 } else {
2241 if ((op == STR_s) || (op == STR_d)) {
2242 LogVWrite(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size));
2243 } else if ((op == STR_b) || (op == STR_h) || (op == STR_q)) {
2244 LogVWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size));
2245 } else {
2246 LogWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size));
2247 }
2248 }
2249
2250 // Handle the writeback for loads after the load to ensure safe pop
2251 // operation even when interrupted in the middle of it. The stack pointer
2252 // is only updated after the load so pop(fp) will never break the invariant
2253 // sp <= fp expected while walking the stack in the sampler.
2254 if (instr->IsLoad()) {
2255 // For loads the address pre writeback is used to check access below the
2256 // stack.
2257 stack = sp();
2258
2259 LoadStoreWriteBack(addr_reg, offset, addrmode);
2260 }
2261
2262 // Accesses below the stack pointer (but above the platform stack limit) are
2263 // not allowed in the ABI.
2264 CheckMemoryAccess(address, stack);
2265}
2266
2267void Simulator::VisitLoadStorePairOffset(Instruction* instr) {
2268 LoadStorePairHelper(instr, Offset);
2269}
2270
2271void Simulator::VisitLoadStorePairPreIndex(Instruction* instr) {
2272 LoadStorePairHelper(instr, PreIndex);
2273}
2274
2275void Simulator::VisitLoadStorePairPostIndex(Instruction* instr) {
2276 LoadStorePairHelper(instr, PostIndex);
2277}
2278
2279void Simulator::LoadStorePairHelper(Instruction* instr, AddrMode addrmode) {
2280 unsigned rt = instr->Rt();
2281 unsigned rt2 = instr->Rt2();
2282 unsigned addr_reg = instr->Rn();
2283 size_t access_size = 1ULL << instr->SizeLSPair();
2284 int64_t offset = instr->ImmLSPair() * access_size;
2285 uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
2286 uintptr_t address2 = address + access_size;
2287 uintptr_t stack = 0;
2288
2289 {
2290 GlobalMonitor::SimulatorMutex lock_guard(global_monitor_);
2291 if (instr->IsLoad()) {
2292 local_monitor_.NotifyLoad();
2293 } else {
2294 local_monitor_.NotifyStore();
2295 global_monitor_->NotifyStore_Locked(&global_monitor_processor_);
2296 }
2297 }
2298
2299 // Handle the writeback for stores before the store. On a CPU the writeback
2300 // and the store are atomic, but when running on the simulator it is possible
2301 // to be interrupted in between. The simulator is not thread safe and V8 does
2302 // not require it to be to run JavaScript therefore the profiler may sample
2303 // the "simulated" CPU in the middle of load/store with writeback. The code
2304 // below ensures that push operations are safe even when interrupted: the
2305 // stack pointer will be decremented before adding an element to the stack.
2306 if (instr->IsStore()) {
2307 LoadStoreWriteBack(addr_reg, offset, addrmode);
2308
2309 // For store the address post writeback is used to check access below the
2310 // stack.
2311 stack = sp();
2312 }
2313
2314 LoadStorePairOp op =
2315 static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
2316
2317 // 'rt' and 'rt2' can only be aliased for stores.
2318 DCHECK(((op & LoadStorePairLBit) == 0) || (rt != rt2));
2319
2320 switch (op) {
2321 // Use _no_log variants to suppress the register trace (LOG_REGS,
2322 // LOG_VREGS). We will print a more detailed log.
2323 case LDP_w: {
2324 DCHECK_EQ(access_size, static_cast<unsigned>(kWRegSize));
2325 set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
2326 set_wreg_no_log(rt2, MemoryRead<uint32_t>(address2));
2327 break;
2328 }
2329 case LDP_s: {
2330 DCHECK_EQ(access_size, static_cast<unsigned>(kSRegSize));
2331 set_sreg_no_log(rt, MemoryRead<float>(address));
2332 set_sreg_no_log(rt2, MemoryRead<float>(address2));
2333 break;
2334 }
2335 case LDP_x: {
2336 DCHECK_EQ(access_size, static_cast<unsigned>(kXRegSize));
2337 set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
2338 set_xreg_no_log(rt2, MemoryRead<uint64_t>(address2));
2339 break;
2340 }
2341 case LDP_d: {
2342 DCHECK_EQ(access_size, static_cast<unsigned>(kDRegSize));
2343 set_dreg_no_log(rt, MemoryRead<double>(address));
2344 set_dreg_no_log(rt2, MemoryRead<double>(address2));
2345 break;
2346 }
2347 case LDP_q: {
2348 DCHECK_EQ(access_size, static_cast<unsigned>(kQRegSize));
2349 set_qreg(rt, MemoryRead<qreg_t>(address), NoRegLog);
2350 set_qreg(rt2, MemoryRead<qreg_t>(address2), NoRegLog);
2351 break;
2352 }
2353 case LDPSW_x: {
2354 DCHECK_EQ(access_size, static_cast<unsigned>(kWRegSize));
2355 set_xreg_no_log(rt, MemoryRead<int32_t>(address));
2356 set_xreg_no_log(rt2, MemoryRead<int32_t>(address2));
2357 break;
2358 }
2359 case STP_w: {
2360 DCHECK_EQ(access_size, static_cast<unsigned>(kWRegSize));
2361 MemoryWrite<uint32_t>(address, wreg(rt));
2362 MemoryWrite<uint32_t>(address2, wreg(rt2));
2363 break;
2364 }
2365 case STP_s: {
2366 DCHECK_EQ(access_size, static_cast<unsigned>(kSRegSize));
2367 MemoryWrite<float>(address, sreg(rt));
2368 MemoryWrite<float>(address2, sreg(rt2));
2369 break;
2370 }
2371 case STP_x: {
2372 DCHECK_EQ(access_size, static_cast<unsigned>(kXRegSize));
2373 MemoryWrite<uint64_t>(address, xreg(rt));
2374 MemoryWrite<uint64_t>(address2, xreg(rt2));
2375 break;
2376 }
2377 case STP_d: {
2378 DCHECK_EQ(access_size, static_cast<unsigned>(kDRegSize));
2379 MemoryWrite<double>(address, dreg(rt));
2380 MemoryWrite<double>(address2, dreg(rt2));
2381 break;
2382 }
2383 case STP_q: {
2384 DCHECK_EQ(access_size, static_cast<unsigned>(kQRegSize));
2385 MemoryWrite<qreg_t>(address, qreg(rt));
2386 MemoryWrite<qreg_t>(address2, qreg(rt2));
2387 break;
2388 }
2389 default:
2390 UNREACHABLE();
2391 }
2392
2393 // Print a detailed trace (including the memory address) instead of the basic
2394 // register:value trace generated by set_*reg().
2395 if (instr->IsLoad()) {
2396 if ((op == LDP_s) || (op == LDP_d)) {
2397 LogVRead(address, rt, GetPrintRegisterFormatForSizeFP(access_size));
2398 LogVRead(address2, rt2, GetPrintRegisterFormatForSizeFP(access_size));
2399 } else if (op == LDP_q) {
2400 LogVRead(address, rt, GetPrintRegisterFormatForSize(access_size));
2401 LogVRead(address2, rt2, GetPrintRegisterFormatForSize(access_size));
2402 } else {
2403 LogRead(address, rt, GetPrintRegisterFormatForSize(access_size));
2404 LogRead(address2, rt2, GetPrintRegisterFormatForSize(access_size));
2405 }
2406 } else {
2407 if ((op == STP_s) || (op == STP_d)) {
2408 LogVWrite(address, rt, GetPrintRegisterFormatForSizeFP(access_size));
2409 LogVWrite(address2, rt2, GetPrintRegisterFormatForSizeFP(access_size));
2410 } else if (op == STP_q) {
2411 LogVWrite(address, rt, GetPrintRegisterFormatForSize(access_size));
2412 LogVWrite(address2, rt2, GetPrintRegisterFormatForSize(access_size));
2413 } else {
2414 LogWrite(address, rt, GetPrintRegisterFormatForSize(access_size));
2415 LogWrite(address2, rt2, GetPrintRegisterFormatForSize(access_size));
2416 }
2417 }
2418
2419 // Handle the writeback for loads after the load to ensure safe pop
2420 // operation even when interrupted in the middle of it. The stack pointer
2421 // is only updated after the load so pop(fp) will never break the invariant
2422 // sp <= fp expected while walking the stack in the sampler.
2423 if (instr->IsLoad()) {
2424 // For loads the address pre writeback is used to check access below the
2425 // stack.
2426 stack = sp();
2427
2428 LoadStoreWriteBack(addr_reg, offset, addrmode);
2429 }
2430
2431 // Accesses below the stack pointer (but above the platform stack limit) are
2432 // not allowed in the ABI.
2433 CheckMemoryAccess(address, stack);
2434}
2435
2436void Simulator::VisitLoadLiteral(Instruction* instr) {
2437 uintptr_t address = instr->LiteralAddress();
2438 unsigned rt = instr->Rt();
2439
2440 {
2441 GlobalMonitor::SimulatorMutex lock_guard(global_monitor_);
2442 local_monitor_.NotifyLoad();
2443 }
2444
2445 switch (instr->Mask(LoadLiteralMask)) {
2446 // Use _no_log variants to suppress the register trace (LOG_REGS,
2447 // LOG_VREGS), then print a more detailed log.
2448 case LDR_w_lit:
2449 set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
2450 LogRead(address, rt, kPrintWReg);
2451 break;
2452 case LDR_x_lit:
2453 set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
2454 LogRead(address, rt, kPrintXReg);
2455 break;
2456 case LDR_s_lit:
2457 set_sreg_no_log(rt, MemoryRead<float>(address));
2458 LogVRead(address, rt, kPrintSReg);
2459 break;
2460 case LDR_d_lit:
2461 set_dreg_no_log(rt, MemoryRead<double>(address));
2462 LogVRead(address, rt, kPrintDReg);
2463 break;
2464 default:
2465 UNREACHABLE();
2466 }
2467}
2468
2469uintptr_t Simulator::LoadStoreAddress(unsigned addr_reg, int64_t offset,
2470 AddrMode addrmode) {
2471 const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask;
2472 uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
2473 if ((addr_reg == kSPRegCode) && ((address % 16) != 0)) {
2474 // When the base register is SP the stack pointer is required to be
2475 // quadword aligned prior to the address calculation and write-backs.
2476 // Misalignment will cause a stack alignment fault.
2477 FATAL("ALIGNMENT EXCEPTION");
2478 }
2479
2480 if ((addrmode == Offset) || (addrmode == PreIndex)) {
2481 address += offset;
2482 }
2483
2484 return address;
2485}
2486
2487void Simulator::LoadStoreWriteBack(unsigned addr_reg, int64_t offset,
2488 AddrMode addrmode) {
2489 if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
2490 DCHECK_NE(offset, 0);
2491 uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
2492 set_reg(addr_reg, address + offset, Reg31IsStackPointer);
2493 }
2494}
2495
2496Simulator::TransactionSize Simulator::get_transaction_size(unsigned size) {
2497 switch (size) {
2498 case 0:
2499 return TransactionSize::None;
2500 case 1:
2501 return TransactionSize::Byte;
2502 case 2:
2503 return TransactionSize::HalfWord;
2504 case 4:
2505 return TransactionSize::Word;
2506 case 8:
2507 return TransactionSize::DoubleWord;
2508 default:
2509 UNREACHABLE();
2510 }
2511}
2512
2513void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
2514 unsigned rt = instr->Rt();
2515 unsigned rn = instr->Rn();
2517 instr->Mask(LoadStoreAcquireReleaseMask));
2518
2519 switch (op) {
2520 case CAS_w:
2521 case CASA_w:
2522 case CASL_w:
2523 case CASAL_w:
2524 CompareAndSwapHelper<uint32_t>(instr);
2525 return;
2526 case CAS_x:
2527 case CASA_x:
2528 case CASL_x:
2529 case CASAL_x:
2530 CompareAndSwapHelper<uint64_t>(instr);
2531 return;
2532 case CASB:
2533 case CASAB:
2534 case CASLB:
2535 case CASALB:
2536 CompareAndSwapHelper<uint8_t>(instr);
2537 return;
2538 case CASH:
2539 case CASAH:
2540 case CASLH:
2541 case CASALH:
2542 CompareAndSwapHelper<uint16_t>(instr);
2543 return;
2544 case CASP_w:
2545 case CASPA_w:
2546 case CASPL_w:
2547 case CASPAL_w:
2548 CompareAndSwapPairHelper<uint32_t>(instr);
2549 return;
2550 case CASP_x:
2551 case CASPA_x:
2552 case CASPL_x:
2553 case CASPAL_x:
2554 CompareAndSwapPairHelper<uint64_t>(instr);
2555 return;
2556 default:
2557 break;
2558 }
2559
2560 int32_t is_acquire_release = instr->LoadStoreXAcquireRelease();
2561 int32_t is_exclusive = (instr->LoadStoreXNotExclusive() == 0);
2562 int32_t is_load = instr->LoadStoreXLoad();
2563 int32_t is_pair = instr->LoadStoreXPair();
2564 USE(is_acquire_release);
2565 USE(is_pair);
2566 DCHECK_NE(is_acquire_release, 0); // Non-acquire/release unimplemented.
2567 DCHECK_EQ(is_pair, 0); // Pair unimplemented.
2568 unsigned access_size = 1 << instr->LoadStoreXSizeLog2();
2569 uintptr_t address = LoadStoreAddress(rn, 0, AddrMode::Offset);
2570 DCHECK_EQ(address % access_size, 0);
2571 // First, check whether the memory is accessible (for wasm trap handling).
2572 if (!ProbeMemory(address, access_size)) return;
2573 GlobalMonitor::SimulatorMutex lock_guard(global_monitor_);
2574
2575 if (is_load != 0) {
2576 if (is_exclusive) {
2577 local_monitor_.NotifyLoadExcl(address, get_transaction_size(access_size));
2578 global_monitor_->NotifyLoadExcl_Locked(address,
2579 &global_monitor_processor_);
2580 } else {
2581 local_monitor_.NotifyLoad();
2582 }
2583 switch (op) {
2584 case LDAR_b:
2585 case LDAXR_b:
2586 set_wreg_no_log(rt, MemoryRead<uint8_t>(address));
2587 break;
2588 case LDAR_h:
2589 case LDAXR_h:
2590 set_wreg_no_log(rt, MemoryRead<uint16_t>(address));
2591 break;
2592 case LDAR_w:
2593 case LDAXR_w:
2594 set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
2595 break;
2596 case LDAR_x:
2597 case LDAXR_x:
2598 set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
2599 break;
2600 default:
2601 UNIMPLEMENTED();
2602 }
2603 LogRead(address, rt, GetPrintRegisterFormatForSize(access_size));
2604 } else {
2605 if (is_exclusive) {
2606 unsigned rs = instr->Rs();
2607 DCHECK_NE(rs, rt);
2608 DCHECK_NE(rs, rn);
2609 if (local_monitor_.NotifyStoreExcl(address,
2610 get_transaction_size(access_size)) &&
2611 global_monitor_->NotifyStoreExcl_Locked(address,
2612 &global_monitor_processor_)) {
2613 switch (op) {
2614 case STLXR_b:
2615 MemoryWrite<uint8_t>(address, wreg(rt));
2616 break;
2617 case STLXR_h:
2618 MemoryWrite<uint16_t>(address, wreg(rt));
2619 break;
2620 case STLXR_w:
2621 MemoryWrite<uint32_t>(address, wreg(rt));
2622 break;
2623 case STLXR_x:
2624 MemoryWrite<uint64_t>(address, xreg(rt));
2625 break;
2626 default:
2627 UNIMPLEMENTED();
2628 }
2629 LogWrite(address, rt, GetPrintRegisterFormatForSize(access_size));
2630 set_wreg(rs, 0);
2631 } else {
2632 set_wreg(rs, 1);
2633 }
2634 } else {
2635 local_monitor_.NotifyStore();
2636 global_monitor_->NotifyStore_Locked(&global_monitor_processor_);
2637 switch (op) {
2638 case STLR_b:
2639 MemoryWrite<uint8_t>(address, wreg(rt));
2640 break;
2641 case STLR_h:
2642 MemoryWrite<uint16_t>(address, wreg(rt));
2643 break;
2644 case STLR_w:
2645 MemoryWrite<uint32_t>(address, wreg(rt));
2646 break;
2647 case STLR_x:
2648 MemoryWrite<uint64_t>(address, xreg(rt));
2649 break;
2650 default:
2651 UNIMPLEMENTED();
2652 }
2653 }
2654 }
2655}
2656
2657template <typename T>
2658void Simulator::CompareAndSwapHelper(const Instruction* instr) {
2659 unsigned rs = instr->Rs();
2660 unsigned rt = instr->Rt();
2661 unsigned rn = instr->Rn();
2662
2663 unsigned element_size = sizeof(T);
2664 uint64_t address = reg<uint64_t>(rn, Reg31IsStackPointer);
2665
2666 // First, check whether the memory is accessible (for wasm trap handling).
2667 if (!ProbeMemory(address, element_size)) return;
2668
2669 bool is_acquire = instr->Bit(22) == 1;
2670 bool is_release = instr->Bit(15) == 1;
2671
2672 T comparevalue = reg<T>(rs);
2673 T newvalue = reg<T>(rt);
2674
2675 // The architecture permits that the data read clears any exclusive monitors
2676 // associated with that location, even if the compare subsequently fails.
2677 local_monitor_.NotifyLoad();
2678
2679 T data = MemoryRead<T>(address);
2680 if (is_acquire) {
2681 // Approximate load-acquire by issuing a full barrier after the load.
2682 std::atomic_thread_fence(std::memory_order_seq_cst);
2683 }
2684
2685 if (data == comparevalue) {
2686 GlobalMonitor::SimulatorMutex lock_guard(global_monitor_);
2687
2688 if (is_release) {
2689 local_monitor_.NotifyStore();
2690 global_monitor_->NotifyStore_Locked(&global_monitor_processor_);
2691 // Approximate store-release by issuing a full barrier before the store.
2692 std::atomic_thread_fence(std::memory_order_seq_cst);
2693 }
2694
2695 MemoryWrite<T>(address, newvalue);
2696 LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size));
2697 }
2698
2699 set_reg<T>(rs, data);
2700 LogRead(address, rs, GetPrintRegisterFormatForSize(element_size));
2701}
2702
2703template <typename T>
2704void Simulator::CompareAndSwapPairHelper(const Instruction* instr) {
2705 DCHECK((sizeof(T) == 4) || (sizeof(T) == 8));
2706 unsigned rs = instr->Rs();
2707 unsigned rt = instr->Rt();
2708 unsigned rn = instr->Rn();
2709
2710 DCHECK((rs % 2 == 0) && (rt % 2 == 0));
2711
2712 unsigned element_size = sizeof(T);
2713 uint64_t address = reg<uint64_t>(rn, Reg31IsStackPointer);
2714
2715 uint64_t address2 = address + element_size;
2716
2717 // First, check whether the memory is accessible (for wasm trap handling).
2718 if (!ProbeMemory(address, element_size)) return;
2719 if (!ProbeMemory(address2, element_size)) return;
2720
2721 bool is_acquire = instr->Bit(22) == 1;
2722 bool is_release = instr->Bit(15) == 1;
2723
2724 T comparevalue_high = reg<T>(rs + 1);
2725 T comparevalue_low = reg<T>(rs);
2726 T newvalue_high = reg<T>(rt + 1);
2727 T newvalue_low = reg<T>(rt);
2728
2729 // The architecture permits that the data read clears any exclusive monitors
2730 // associated with that location, even if the compare subsequently fails.
2731 local_monitor_.NotifyLoad();
2732
2733 T data_low = MemoryRead<T>(address);
2734 T data_high = MemoryRead<T>(address2);
2735
2736 if (is_acquire) {
2737 // Approximate load-acquire by issuing a full barrier after the load.
2738 std::atomic_thread_fence(std::memory_order_seq_cst);
2739 }
2740
2741 bool same =
2742 (data_high == comparevalue_high) && (data_low == comparevalue_low);
2743 if (same) {
2744 GlobalMonitor::SimulatorMutex lock_guard(global_monitor_);
2745
2746 if (is_release) {
2747 local_monitor_.NotifyStore();
2748 global_monitor_->NotifyStore_Locked(&global_monitor_processor_);
2749 // Approximate store-release by issuing a full barrier before the store.
2750 std::atomic_thread_fence(std::memory_order_seq_cst);
2751 }
2752
2753 MemoryWrite<T>(address, newvalue_low);
2754 MemoryWrite<T>(address2, newvalue_high);
2755 }
2756
2757 set_reg<T>(rs + 1, data_high);
2758 set_reg<T>(rs, data_low);
2759
2760 PrintRegisterFormat format = GetPrintRegisterFormatForSize(element_size);
2761 LogRead(address, rs, format);
2762 LogRead(address2, rs + 1, format);
2763
2764 if (same) {
2765 LogWrite(address, rt, format);
2766 LogWrite(address2, rt + 1, format);
2767 }
2768}
2769
2770template <typename T>
2771void Simulator::AtomicMemorySimpleHelper(const Instruction* instr) {
2772 unsigned rs = instr->Rs();
2773 unsigned rt = instr->Rt();
2774 unsigned rn = instr->Rn();
2775
2776 bool is_acquire = (instr->Bit(23) == 1) && (rt != kZeroRegCode);
2777 bool is_release = instr->Bit(22) == 1;
2778
2779 unsigned element_size = sizeof(T);
2780 uint64_t address = xreg(rn, Reg31IsStackPointer);
2781 DCHECK_EQ(address % element_size, 0);
2782
2783 // First, check whether the memory is accessible (for wasm trap handling).
2784 if (!ProbeMemory(address, element_size)) return;
2785
2786 local_monitor_.NotifyLoad();
2787
2788 T value = reg<T>(rs);
2789
2790 T data = MemoryRead<T>(address);
2791
2792 if (is_acquire) {
2793 // Approximate load-acquire by issuing a full barrier after the load.
2794 std::atomic_thread_fence(std::memory_order_seq_cst);
2795 }
2796
2797 T result = 0;
2798 switch (instr->Mask(AtomicMemorySimpleOpMask)) {
2799 case LDADDOp:
2800 result = data + value;
2801 break;
2802 case LDCLROp:
2803 DCHECK(!std::numeric_limits<T>::is_signed);
2804 result = data & ~value;
2805 break;
2806 case LDEOROp:
2807 DCHECK(!std::numeric_limits<T>::is_signed);
2808 result = data ^ value;
2809 break;
2810 case LDSETOp:
2811 DCHECK(!std::numeric_limits<T>::is_signed);
2812 result = data | value;
2813 break;
2814
2815 // Signed/Unsigned difference is done via the templated type T.
2816 case LDSMAXOp:
2817 case LDUMAXOp:
2818 result = (data > value) ? data : value;
2819 break;
2820 case LDSMINOp:
2821 case LDUMINOp:
2822 result = (data > value) ? value : data;
2823 break;
2824 }
2825
2826 if (is_release) {
2827 GlobalMonitor::SimulatorMutex lock_guard(global_monitor_);
2828 local_monitor_.NotifyStore();
2829 global_monitor_->NotifyStore_Locked(&global_monitor_processor_);
2830 // Approximate store-release by issuing a full barrier before the store.
2831 std::atomic_thread_fence(std::memory_order_seq_cst);
2832 }
2833
2834 MemoryWrite<T>(address, result);
2835 set_reg<T>(rt, data);
2836
2837 PrintRegisterFormat format = GetPrintRegisterFormatForSize(element_size);
2838 LogRead(address, rt, format);
2839 LogWrite(address, rs, format);
2840}
2841
2842template <typename T>
2843void Simulator::AtomicMemorySwapHelper(const Instruction* instr) {
2844 unsigned rs = instr->Rs();
2845 unsigned rt = instr->Rt();
2846 unsigned rn = instr->Rn();
2847
2848 bool is_acquire = (instr->Bit(23) == 1) && (rt != kZeroRegCode);
2849 bool is_release = instr->Bit(22) == 1;
2850
2851 unsigned element_size = sizeof(T);
2852 uint64_t address = xreg(rn, Reg31IsStackPointer);
2853
2854 // First, check whether the memory is accessible (for wasm trap handling).
2855 if (!ProbeMemory(address, element_size)) return;
2856
2857 local_monitor_.NotifyLoad();
2858
2859 T data = MemoryRead<T>(address);
2860 if (is_acquire) {
2861 // Approximate load-acquire by issuing a full barrier after the load.
2862 std::atomic_thread_fence(std::memory_order_seq_cst);
2863 }
2864
2865 if (is_release) {
2866 GlobalMonitor::SimulatorMutex lock_guard(global_monitor_);
2867 local_monitor_.NotifyStore();
2868 global_monitor_->NotifyStore_Locked(&global_monitor_processor_);
2869 // Approximate store-release by issuing a full barrier before the store.
2870 std::atomic_thread_fence(std::memory_order_seq_cst);
2871 }
2872 MemoryWrite<T>(address, reg<T>(rs));
2873
2874 set_reg<T>(rt, data);
2875
2876 PrintRegisterFormat format = GetPrintRegisterFormatForSize(element_size);
2877 LogRead(address, rt, format);
2878 LogWrite(address, rs, format);
2879}
2880
2881#define ATOMIC_MEMORY_SIMPLE_UINT_LIST(V) \
2882 V(LDADD) \
2883 V(LDCLR) \
2884 V(LDEOR) \
2885 V(LDSET) \
2886 V(LDUMAX) \
2887 V(LDUMIN)
2888
2889#define ATOMIC_MEMORY_SIMPLE_INT_LIST(V) \
2890 V(LDSMAX) \
2891 V(LDSMIN)
2892
2893void Simulator::VisitAtomicMemory(Instruction* instr) {
2894 switch (instr->Mask(AtomicMemoryMask)) {
2895// clang-format off
2896#define SIM_FUNC_B(A) \
2897 case A##B: \
2898 case A##AB: \
2899 case A##LB: \
2900 case A##ALB:
2901#define SIM_FUNC_H(A) \
2902 case A##H: \
2903 case A##AH: \
2904 case A##LH: \
2905 case A##ALH:
2906#define SIM_FUNC_w(A) \
2907 case A##_w: \
2908 case A##A_w: \
2909 case A##L_w: \
2910 case A##AL_w:
2911#define SIM_FUNC_x(A) \
2912 case A##_x: \
2913 case A##A_x: \
2914 case A##L_x: \
2915 case A##AL_x:
2916
2917 ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_B)
2918 AtomicMemorySimpleHelper<uint8_t>(instr);
2919 break;
2920 ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_B)
2921 AtomicMemorySimpleHelper<int8_t>(instr);
2922 break;
2923 ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_H)
2924 AtomicMemorySimpleHelper<uint16_t>(instr);
2925 break;
2926 ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_H)
2927 AtomicMemorySimpleHelper<int16_t>(instr);
2928 break;
2929 ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_w)
2930 AtomicMemorySimpleHelper<uint32_t>(instr);
2931 break;
2932 ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_w)
2933 AtomicMemorySimpleHelper<int32_t>(instr);
2934 break;
2935 ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_x)
2936 AtomicMemorySimpleHelper<uint64_t>(instr);
2937 break;
2938 ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_x)
2939 AtomicMemorySimpleHelper<int64_t>(instr);
2940 break;
2941 // clang-format on
2942
2943 case SWPB:
2944 case SWPAB:
2945 case SWPLB:
2946 case SWPALB:
2947 AtomicMemorySwapHelper<uint8_t>(instr);
2948 break;
2949 case SWPH:
2950 case SWPAH:
2951 case SWPLH:
2952 case SWPALH:
2953 AtomicMemorySwapHelper<uint16_t>(instr);
2954 break;
2955 case SWP_w:
2956 case SWPA_w:
2957 case SWPL_w:
2958 case SWPAL_w:
2959 AtomicMemorySwapHelper<uint32_t>(instr);
2960 break;
2961 case SWP_x:
2962 case SWPA_x:
2963 case SWPL_x:
2964 case SWPAL_x:
2965 AtomicMemorySwapHelper<uint64_t>(instr);
2966 break;
2967 }
2968}
2969
2970void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
2971 if ((address >= stack_limit_) && (address < stack)) {
2972 fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
2973 fprintf(stream_, " sp is here: 0x%016" PRIx64 "\n",
2974 static_cast<uint64_t>(stack));
2975 fprintf(stream_, " access was here: 0x%016" PRIx64 "\n",
2976 static_cast<uint64_t>(address));
2977 fprintf(stream_, " stack limit is here: 0x%016" PRIx64 "\n",
2978 static_cast<uint64_t>(stack_limit_));
2979 fprintf(stream_, "\n");
2980 FATAL("ACCESS BELOW STACK POINTER");
2981 }
2982}
2983
2984void Simulator::VisitMoveWideImmediate(Instruction* instr) {
2985 MoveWideImmediateOp mov_op =
2986 static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
2987 int64_t new_xn_val = 0;
2988
2989 bool is_64_bits = instr->SixtyFourBits() == 1;
2990 // Shift is limited for W operations.
2991 DCHECK(is_64_bits || (instr->ShiftMoveWide() < 2));
2992
2993 // Get the shifted immediate.
2994 int64_t shift = instr->ShiftMoveWide() * 16;
2995 int64_t shifted_imm16 = static_cast<int64_t>(instr->ImmMoveWide()) << shift;
2996
2997 // Compute the new value.
2998 switch (mov_op) {
2999 case MOVN_w:
3000 case MOVN_x: {
3001 new_xn_val = ~shifted_imm16;
3002 if (!is_64_bits) new_xn_val &= kWRegMask;
3003 break;
3004 }
3005 case MOVK_w:
3006 case MOVK_x: {
3007 unsigned reg_code = instr->Rd();
3008 int64_t prev_xn_val = is_64_bits ? xreg(reg_code) : wreg(reg_code);
3009 new_xn_val = (prev_xn_val & ~(INT64_C(0xFFFF) << shift)) | shifted_imm16;
3010 break;
3011 }
3012 case MOVZ_w:
3013 case MOVZ_x: {
3014 new_xn_val = shifted_imm16;
3015 break;
3016 }
3017 default:
3018 UNREACHABLE();
3019 }
3020
3021 // Update the destination register.
3022 set_xreg(instr->Rd(), new_xn_val);
3023}
3024
3025void Simulator::VisitConditionalSelect(Instruction* instr) {
3026 uint64_t new_val = xreg(instr->Rn());
3027 if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
3028 new_val = xreg(instr->Rm());
3029 switch (instr->Mask(ConditionalSelectMask)) {
3030 case CSEL_w:
3031 case CSEL_x:
3032 break;
3033 case CSINC_w:
3034 case CSINC_x:
3035 new_val++;
3036 break;
3037 case CSINV_w:
3038 case CSINV_x:
3039 new_val = ~new_val;
3040 break;
3041 case CSNEG_w:
3042 case CSNEG_x:
3043 // Simulate two's complement (instead of casting to signed and negating)
3044 // to avoid undefined behavior on signed overflow.
3045 new_val = (~new_val) + 1;
3046 break;
3047 default:
3048 UNIMPLEMENTED();
3049 }
3050 }
3051 if (instr->SixtyFourBits()) {
3052 set_xreg(instr->Rd(), new_val);
3053 } else {
3054 set_wreg(instr->Rd(), static_cast<uint32_t>(new_val));
3055 }
3056}
3057
3058void Simulator::VisitDataProcessing1Source(Instruction* instr) {
3059 unsigned dst = instr->Rd();
3060 unsigned src = instr->Rn();
3061
3062 switch (instr->Mask(DataProcessing1SourceMask)) {
3063 case RBIT_w:
3064 set_wreg(dst, base::bits::ReverseBits(wreg(src)));
3065 break;
3066 case RBIT_x:
3067 set_xreg(dst, base::bits::ReverseBits(xreg(src)));
3068 break;
3069 case REV16_w:
3070 set_wreg(dst, ReverseBytes(wreg(src), 1));
3071 break;
3072 case REV16_x:
3073 set_xreg(dst, ReverseBytes(xreg(src), 1));
3074 break;
3075 case REV_w:
3076 set_wreg(dst, ReverseBytes(wreg(src), 2));
3077 break;
3078 case REV32_x:
3079 set_xreg(dst, ReverseBytes(xreg(src), 2));
3080 break;
3081 case REV_x:
3082 set_xreg(dst, ReverseBytes(xreg(src), 3));
3083 break;
3084 case CLZ_w:
3085 set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits));
3086 break;
3087 case CLZ_x:
3088 set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits));
3089 break;
3090 case CLS_w: {
3091 set_wreg(dst, CountLeadingSignBits(wreg(src), kWRegSizeInBits));
3092 break;
3093 }
3094 case CLS_x: {
3095 set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSizeInBits));
3096 break;
3097 }
3098 default:
3099 UNIMPLEMENTED();
3100 }
3101}
3102
3103template <typename T>
3104void Simulator::DataProcessing2Source(Instruction* instr) {
3105 Shift shift_op = NO_SHIFT;
3106 T result = 0;
3107 switch (instr->Mask(DataProcessing2SourceMask)) {
3108 case SDIV_w:
3109 case SDIV_x: {
3110 T rn = reg<T>(instr->Rn());
3111 T rm = reg<T>(instr->Rm());
3112 if ((rn == std::numeric_limits<T>::min()) && (rm == -1)) {
3113 result = std::numeric_limits<T>::min();
3114 } else if (rm == 0) {
3115 // Division by zero can be trapped, but not on A-class processors.
3116 result = 0;
3117 } else {
3118 result = rn / rm;
3119 }
3120 break;
3121 }
3122 case UDIV_w:
3123 case UDIV_x: {
3124 using unsignedT = std::make_unsigned_t<T>;
3125 unsignedT rn = static_cast<unsignedT>(reg<T>(instr->Rn()));
3126 unsignedT rm = static_cast<unsignedT>(reg<T>(instr->Rm()));
3127 if (rm == 0) {
3128 // Division by zero can be trapped, but not on A-class processors.
3129 result = 0;
3130 } else {
3131 result = rn / rm;
3132 }
3133 break;
3134 }
3135 case LSLV_w:
3136 case LSLV_x:
3137 shift_op = LSL;
3138 break;
3139 case LSRV_w:
3140 case LSRV_x:
3141 shift_op = LSR;
3142 break;
3143 case ASRV_w:
3144 case ASRV_x:
3145 shift_op = ASR;
3146 break;
3147 case RORV_w:
3148 case RORV_x:
3149 shift_op = ROR;
3150 break;
3151 default:
3152 UNIMPLEMENTED();
3153 }
3154
3155 if (shift_op != NO_SHIFT) {
3156 // Shift distance encoded in the least-significant five/six bits of the
3157 // register.
3158 unsigned shift = wreg(instr->Rm());
3159 if (sizeof(T) == kWRegSize) {
3160 shift &= kShiftAmountWRegMask;
3161 } else {
3162 shift &= kShiftAmountXRegMask;
3163 }
3164 result = ShiftOperand(reg<T>(instr->Rn()), shift_op, shift);
3165 }
3166 set_reg<T>(instr->Rd(), result);
3167}
3168
3169void Simulator::VisitDataProcessing2Source(Instruction* instr) {
3170 if (instr->SixtyFourBits()) {
3171 DataProcessing2Source<int64_t>(instr);
3172 } else {
3173 DataProcessing2Source<int32_t>(instr);
3174 }
3175}
3176
3177void Simulator::VisitDataProcessing3Source(Instruction* instr) {
3178 int64_t result = 0;
3179 // Extract and sign- or zero-extend 32-bit arguments for widening operations.
3180 uint64_t rn_u32 = reg<uint32_t>(instr->Rn());
3181 uint64_t rm_u32 = reg<uint32_t>(instr->Rm());
3182 int64_t rn_s32 = reg<int32_t>(instr->Rn());
3183 int64_t rm_s32 = reg<int32_t>(instr->Rm());
3184 switch (instr->Mask(DataProcessing3SourceMask)) {
3185 case MADD_w:
3186 case MADD_x:
3187 result = base::AddWithWraparound(
3188 xreg(instr->Ra()),
3189 base::MulWithWraparound(xreg(instr->Rn()), xreg(instr->Rm())));
3190 break;
3191 case MSUB_w:
3192 case MSUB_x:
3193 result = base::SubWithWraparound(
3194 xreg(instr->Ra()),
3195 base::MulWithWraparound(xreg(instr->Rn()), xreg(instr->Rm())));
3196 break;
3197 case SMADDL_x:
3198 result = base::AddWithWraparound(xreg(instr->Ra()), (rn_s32 * rm_s32));
3199 break;
3200 case SMSUBL_x:
3201 result = base::SubWithWraparound(xreg(instr->Ra()), (rn_s32 * rm_s32));
3202 break;
3203 case UMADDL_x:
3204 result = static_cast<uint64_t>(xreg(instr->Ra())) + (rn_u32 * rm_u32);
3205 break;
3206 case UMSUBL_x:
3207 result = static_cast<uint64_t>(xreg(instr->Ra())) - (rn_u32 * rm_u32);
3208 break;
3209 case SMULH_x:
3210 DCHECK_EQ(instr->Ra(), kZeroRegCode);
3211 result =
3212 base::bits::SignedMulHigh64(xreg(instr->Rn()), xreg(instr->Rm()));
3213 break;
3214 case UMULH_x:
3215 DCHECK_EQ(instr->Ra(), kZeroRegCode);
3216 result =
3217 base::bits::UnsignedMulHigh64(xreg(instr->Rn()), xreg(instr->Rm()));
3218 break;
3219 default:
3220 UNIMPLEMENTED();
3221 }
3222
3223 if (instr->SixtyFourBits()) {
3224 set_xreg(instr->Rd(), result);
3225 } else {
3226 set_wreg(instr->Rd(), static_cast<int32_t>(result));
3227 }
3228}
3229
3230template <typename T>
3231void Simulator::BitfieldHelper(Instruction* instr) {
3232 using unsignedT = std::make_unsigned_t<T>;
3233 T reg_size = sizeof(T) * 8;
3234 T R = instr->ImmR();
3235 T S = instr->ImmS();
3236 T diff = S - R;
3237 T mask;
3238 if (diff >= 0) {
3239 mask = diff < reg_size - 1 ? (static_cast<unsignedT>(1) << (diff + 1)) - 1
3240 : static_cast<T>(-1);
3241 } else {
3242 uint64_t umask = ((1ULL << (S + 1)) - 1);
3243 umask = (umask >> R) | (umask << (reg_size - R));
3244 mask = static_cast<T>(umask);
3245 diff += reg_size;
3246 }
3247
3248 // inzero indicates if the extracted bitfield is inserted into the
3249 // destination register value or in zero.
3250 // If extend is true, extend the sign of the extracted bitfield.
3251 bool inzero = false;
3252 bool extend = false;
3253 switch (instr->Mask(BitfieldMask)) {
3254 case BFM_x:
3255 case BFM_w:
3256 break;
3257 case SBFM_x:
3258 case SBFM_w:
3259 inzero = true;
3260 extend = true;
3261 break;
3262 case UBFM_x:
3263 case UBFM_w:
3264 inzero = true;
3265 break;
3266 default:
3267 UNIMPLEMENTED();
3268 }
3269
3270 T dst = inzero ? 0 : reg<T>(instr->Rd());
3271 T src = reg<T>(instr->Rn());
3272 // Rotate source bitfield into place.
3273 T result = R == 0 ? src
3274 : (static_cast<unsignedT>(src) >> R) |
3275 (static_cast<unsignedT>(src) << (reg_size - R));
3276 // Determine the sign extension.
3277 T topbits_preshift = (static_cast<unsignedT>(1) << (reg_size - diff - 1)) - 1;
3278 T signbits =
3279 diff >= reg_size - 1
3280 ? 0
3281 : ((extend && ((src >> S) & 1) ? topbits_preshift : 0) << (diff + 1));
3282
3283 // Merge sign extension, dest/zero and bitfield.
3284 result = signbits | (result & mask) | (dst & ~mask);
3285
3286 set_reg<T>(instr->Rd(), result);
3287}
3288
3289void Simulator::VisitBitfield(Instruction* instr) {
3290 if (instr->SixtyFourBits()) {
3291 BitfieldHelper<int64_t>(instr);
3292 } else {
3293 BitfieldHelper<int32_t>(instr);
3294 }
3295}
3296
3297void Simulator::VisitExtract(Instruction* instr) {
3298 if (instr->SixtyFourBits()) {
3299 Extract<uint64_t>(instr);
3300 } else {
3301 Extract<uint32_t>(instr);
3302 }
3303}
3304
3305void Simulator::VisitFPImmediate(Instruction* instr) {
3306 AssertSupportedFPCR();
3307
3308 unsigned dest = instr->Rd();
3309 switch (instr->Mask(FPImmediateMask)) {
3310 case FMOV_s_imm:
3311 set_sreg(dest, instr->ImmFP32());
3312 break;
3313 case FMOV_d_imm:
3314 set_dreg(dest, instr->ImmFP64());
3315 break;
3316 default:
3317 UNREACHABLE();
3318 }
3319}
3320
3321void Simulator::VisitFPIntegerConvert(Instruction* instr) {
3322 AssertSupportedFPCR();
3323
3324 unsigned dst = instr->Rd();
3325 unsigned src = instr->Rn();
3326
3327 FPRounding round = fpcr().RMode();
3328
3329 switch (instr->Mask(FPIntegerConvertMask)) {
3330 case FCVTAS_ws:
3331 set_wreg(dst, FPToInt32(sreg(src), FPTieAway));
3332 break;
3333 case FCVTAS_xs:
3334 set_xreg(dst, FPToInt64(sreg(src), FPTieAway));
3335 break;
3336 case FCVTAS_wd:
3337 set_wreg(dst, FPToInt32(dreg(src), FPTieAway));
3338 break;
3339 case FCVTAS_xd:
3340 set_xreg(dst, FPToInt64(dreg(src), FPTieAway));
3341 break;
3342 case FCVTAU_ws:
3343 set_wreg(dst, FPToUInt32(sreg(src), FPTieAway));
3344 break;
3345 case FCVTAU_xs:
3346 set_xreg(dst, FPToUInt64(sreg(src), FPTieAway));
3347 break;
3348 case FCVTAU_wd:
3349 set_wreg(dst, FPToUInt32(dreg(src), FPTieAway));
3350 break;
3351 case FCVTAU_xd:
3352 set_xreg(dst, FPToUInt64(dreg(src), FPTieAway));
3353 break;
3354 case FCVTMS_ws:
3355 set_wreg(dst, FPToInt32(sreg(src), FPNegativeInfinity));
3356 break;
3357 case FCVTMS_xs:
3358 set_xreg(dst, FPToInt64(sreg(src), FPNegativeInfinity));
3359 break;
3360 case FCVTMS_wd:
3361 set_wreg(dst, FPToInt32(dreg(src), FPNegativeInfinity));
3362 break;
3363 case FCVTMS_xd:
3364 set_xreg(dst, FPToInt64(dreg(src), FPNegativeInfinity));
3365 break;
3366 case FCVTMU_ws:
3367 set_wreg(dst, FPToUInt32(sreg(src), FPNegativeInfinity));
3368 break;
3369 case FCVTMU_xs:
3370 set_xreg(dst, FPToUInt64(sreg(src), FPNegativeInfinity));
3371 break;
3372 case FCVTMU_wd:
3373 set_wreg(dst, FPToUInt32(dreg(src), FPNegativeInfinity));
3374 break;
3375 case FCVTMU_xd:
3376 set_xreg(dst, FPToUInt64(dreg(src), FPNegativeInfinity));
3377 break;
3378 case FCVTNS_ws:
3379 set_wreg(dst, FPToInt32(sreg(src), FPTieEven));
3380 break;
3381 case FCVTNS_xs:
3382 set_xreg(dst, FPToInt64(sreg(src), FPTieEven));
3383 break;
3384 case FCVTNS_wd:
3385 set_wreg(dst, FPToInt32(dreg(src), FPTieEven));
3386 break;
3387 case FCVTNS_xd:
3388 set_xreg(dst, FPToInt64(dreg(src), FPTieEven));
3389 break;
3390 case FCVTNU_ws:
3391 set_wreg(dst, FPToUInt32(sreg(src), FPTieEven));
3392 break;
3393 case FCVTNU_xs:
3394 set_xreg(dst, FPToUInt64(sreg(src), FPTieEven));
3395 break;
3396 case FCVTNU_wd:
3397 set_wreg(dst, FPToUInt32(dreg(src), FPTieEven));
3398 break;
3399 case FCVTNU_xd:
3400 set_xreg(dst, FPToUInt64(dreg(src), FPTieEven));
3401 break;
3402 case FCVTZS_ws:
3403 set_wreg(dst, FPToInt32(sreg(src), FPZero));
3404 break;
3405 case FCVTZS_xs:
3406 set_xreg(dst, FPToInt64(sreg(src), FPZero));
3407 break;
3408 case FCVTZS_wd:
3409 set_wreg(dst, FPToInt32(dreg(src), FPZero));
3410 break;
3411 case FCVTZS_xd:
3412 set_xreg(dst, FPToInt64(dreg(src), FPZero));
3413 break;
3414 case FCVTZU_ws:
3415 set_wreg(dst, FPToUInt32(sreg(src), FPZero));
3416 break;
3417 case FCVTZU_xs:
3418 set_xreg(dst, FPToUInt64(sreg(src), FPZero));
3419 break;
3420 case FCVTZU_wd:
3421 set_wreg(dst, FPToUInt32(dreg(src), FPZero));
3422 break;
3423 case FCVTZU_xd:
3424 set_xreg(dst, FPToUInt64(dreg(src), FPZero));
3425 break;
3426 case FJCVTZS:
3427 set_wreg(dst, FPToFixedJS(dreg(src)));
3428 break;
3429 case FMOV_ws:
3430 set_wreg(dst, sreg_bits(src));
3431 break;
3432 case FMOV_xd:
3433 set_xreg(dst, dreg_bits(src));
3434 break;
3435 case FMOV_sw:
3436 set_sreg_bits(dst, wreg(src));
3437 break;
3438 case FMOV_dx:
3439 set_dreg_bits(dst, xreg(src));
3440 break;
3441
3442 // A 32-bit input can be handled in the same way as a 64-bit input, since
3443 // the sign- or zero-extension will not affect the conversion.
3444 case SCVTF_dx:
3445 set_dreg(dst, FixedToDouble(xreg(src), 0, round));
3446 break;
3447 case SCVTF_dw:
3448 set_dreg(dst, FixedToDouble(wreg(src), 0, round));
3449 break;
3450 case UCVTF_dx:
3451 set_dreg(dst, UFixedToDouble(xreg(src), 0, round));
3452 break;
3453 case UCVTF_dw: {
3454 set_dreg(dst, UFixedToDouble(reg<uint32_t>(src), 0, round));
3455 break;
3456 }
3457 case SCVTF_sx:
3458 set_sreg(dst, FixedToFloat(xreg(src), 0, round));
3459 break;
3460 case SCVTF_sw:
3461 set_sreg(dst, FixedToFloat(wreg(src), 0, round));
3462 break;
3463 case UCVTF_sx:
3464 set_sreg(dst, UFixedToFloat(xreg(src), 0, round));
3465 break;
3466 case UCVTF_sw: {
3467 set_sreg(dst, UFixedToFloat(reg<uint32_t>(src), 0, round));
3468 break;
3469 }
3470
3471 default:
3472 UNREACHABLE();
3473 }
3474}
3475
3476void Simulator::VisitFPFixedPointConvert(Instruction* instr) {
3477 AssertSupportedFPCR();
3478
3479 unsigned dst = instr->Rd();
3480 unsigned src = instr->Rn();
3481 int fbits = 64 - instr->FPScale();
3482
3483 FPRounding round = fpcr().RMode();
3484
3485 switch (instr->Mask(FPFixedPointConvertMask)) {
3486 // A 32-bit input can be handled in the same way as a 64-bit input, since
3487 // the sign- or zero-extension will not affect the conversion.
3488 case SCVTF_dx_fixed:
3489 set_dreg(dst, FixedToDouble(xreg(src), fbits, round));
3490 break;
3491 case SCVTF_dw_fixed:
3492 set_dreg(dst, FixedToDouble(wreg(src), fbits, round));
3493 break;
3494 case UCVTF_dx_fixed:
3495 set_dreg(dst, UFixedToDouble(xreg(src), fbits, round));
3496 break;
3497 case UCVTF_dw_fixed: {
3498 set_dreg(dst, UFixedToDouble(reg<uint32_t>(src), fbits, round));
3499 break;
3500 }
3501 case SCVTF_sx_fixed:
3502 set_sreg(dst, FixedToFloat(xreg(src), fbits, round));
3503 break;
3504 case SCVTF_sw_fixed:
3505 set_sreg(dst, FixedToFloat(wreg(src), fbits, round));
3506 break;
3507 case UCVTF_sx_fixed:
3508 set_sreg(dst, UFixedToFloat(xreg(src), fbits, round));
3509 break;
3510 case UCVTF_sw_fixed: {
3511 set_sreg(dst, UFixedToFloat(reg<uint32_t>(src), fbits, round));
3512 break;
3513 }
3514 default:
3515 UNREACHABLE();
3516 }
3517}
3518
3519void Simulator::VisitFPCompare(Instruction* instr) {
3520 AssertSupportedFPCR();
3521
3522 switch (instr->Mask(FPCompareMask)) {
3523 case FCMP_s:
3524 FPCompare(sreg(instr->Rn()), sreg(instr->Rm()));
3525 break;
3526 case FCMP_d:
3527 FPCompare(dreg(instr->Rn()), dreg(instr->Rm()));
3528 break;
3529 case FCMP_s_zero:
3530 FPCompare(sreg(instr->Rn()), 0.0f);
3531 break;
3532 case FCMP_d_zero:
3533 FPCompare(dreg(instr->Rn()), 0.0);
3534 break;
3535 default:
3536 UNIMPLEMENTED();
3537 }
3538}
3539
3540void Simulator::VisitFPConditionalCompare(Instruction* instr) {
3541 AssertSupportedFPCR();
3542
3543 switch (instr->Mask(FPConditionalCompareMask)) {
3544 case FCCMP_s:
3545 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
3546 FPCompare(sreg(instr->Rn()), sreg(instr->Rm()));
3547 } else {
3548 nzcv().SetFlags(instr->Nzcv());
3549 LogSystemRegister(NZCV);
3550 }
3551 break;
3552 case FCCMP_d: {
3553 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
3554 FPCompare(dreg(instr->Rn()), dreg(instr->Rm()));
3555 } else {
3556 // If the condition fails, set the status flags to the nzcv immediate.
3557 nzcv().SetFlags(instr->Nzcv());
3558 LogSystemRegister(NZCV);
3559 }
3560 break;
3561 }
3562 default:
3563 UNIMPLEMENTED();
3564 }
3565}
3566
3567void Simulator::VisitFPConditionalSelect(Instruction* instr) {
3568 AssertSupportedFPCR();
3569
3570 Instr selected;
3571 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
3572 selected = instr->Rn();
3573 } else {
3574 selected = instr->Rm();
3575 }
3576
3577 switch (instr->Mask(FPConditionalSelectMask)) {
3578 case FCSEL_s:
3579 set_sreg(instr->Rd(), sreg(selected));
3580 break;
3581 case FCSEL_d:
3582 set_dreg(instr->Rd(), dreg(selected));
3583 break;
3584 default:
3585 UNIMPLEMENTED();
3586 }
3587}
3588
3589void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
3590 AssertSupportedFPCR();
3591
3592 FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
3593 VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS;
3594 SimVRegister& rd = vreg(instr->Rd());
3595 SimVRegister& rn = vreg(instr->Rn());
3596 bool inexact_exception = false;
3597
3598 unsigned fd = instr->Rd();
3599 unsigned fn = instr->Rn();
3600
3601 switch (instr->Mask(FPDataProcessing1SourceMask)) {
3602 case FMOV_s:
3603 set_sreg(fd, sreg(fn));
3604 return;
3605 case FMOV_d:
3606 set_dreg(fd, dreg(fn));
3607 return;
3608 case FABS_s:
3609 case FABS_d:
3610 fabs_(vform, vreg(fd), vreg(fn));
3611 // Explicitly log the register update whilst we have type information.
3612 LogVRegister(fd, GetPrintRegisterFormatFP(vform));
3613 return;
3614 case FNEG_s:
3615 case FNEG_d:
3616 fneg(vform, vreg(fd), vreg(fn));
3617 // Explicitly log the register update whilst we have type information.
3618 LogVRegister(fd, GetPrintRegisterFormatFP(vform));
3619 return;
3620 case FCVT_ds:
3621 set_dreg(fd, FPToDouble(sreg(fn)));
3622 return;
3623 case FCVT_sd:
3624 set_sreg(fd, FPToFloat(dreg(fn), FPTieEven));
3625 return;
3626 case FCVT_hs:
3627 set_hreg(fd, FPToFloat16(sreg(fn), FPTieEven));
3628 return;
3629 case FCVT_sh:
3630 set_sreg(fd, FPToFloat(hreg(fn)));
3631 return;
3632 case FCVT_dh:
3633 set_dreg(fd, FPToDouble(FPToFloat(hreg(fn))));
3634 return;
3635 case FCVT_hd:
3636 set_hreg(fd, FPToFloat16(dreg(fn), FPTieEven));
3637 return;
3638 case FSQRT_s:
3639 case FSQRT_d:
3640 fsqrt(vform, rd, rn);
3641 // Explicitly log the register update whilst we have type information.
3642 LogVRegister(fd, GetPrintRegisterFormatFP(vform));
3643 return;
3644 case FRINTI_s:
3645 case FRINTI_d:
3646 break; // Use FPCR rounding mode.
3647 case FRINTX_s:
3648 case FRINTX_d:
3649 inexact_exception = true;
3650 break;
3651 case FRINTA_s:
3652 case FRINTA_d:
3653 fpcr_rounding = FPTieAway;
3654 break;
3655 case FRINTM_s:
3656 case FRINTM_d:
3657 fpcr_rounding = FPNegativeInfinity;
3658 break;
3659 case FRINTN_s:
3660 case FRINTN_d:
3661 fpcr_rounding = FPTieEven;
3662 break;
3663 case FRINTP_s:
3664 case FRINTP_d:
3665 fpcr_rounding = FPPositiveInfinity;
3666 break;
3667 case FRINTZ_s:
3668 case FRINTZ_d:
3669 fpcr_rounding = FPZero;
3670 break;
3671 default:
3672 UNIMPLEMENTED();
3673 }
3674
3675 // Only FRINT* instructions fall through the switch above.
3676 frint(vform, rd, rn, fpcr_rounding, inexact_exception);
3677 // Explicitly log the register update whilst we have type information
3678 LogVRegister(fd, GetPrintRegisterFormatFP(vform));
3679}
3680
3681void Simulator::VisitFPDataProcessing2Source(Instruction* instr) {
3682 AssertSupportedFPCR();
3683
3684 VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS;
3685 SimVRegister& rd = vreg(instr->Rd());
3686 SimVRegister& rn = vreg(instr->Rn());
3687 SimVRegister& rm = vreg(instr->Rm());
3688
3689 switch (instr->Mask(FPDataProcessing2SourceMask)) {
3690 case FADD_s:
3691 case FADD_d:
3692 fadd(vform, rd, rn, rm);
3693 break;
3694 case FSUB_s:
3695 case FSUB_d:
3696 fsub(vform, rd, rn, rm);
3697 break;
3698 case FMUL_s:
3699 case FMUL_d:
3700 fmul(vform, rd, rn, rm);
3701 break;
3702 case FNMUL_s:
3703 case FNMUL_d:
3704 fnmul(vform, rd, rn, rm);
3705 break;
3706 case FDIV_s:
3707 case FDIV_d:
3708 fdiv(vform, rd, rn, rm);
3709 break;
3710 case FMAX_s:
3711 case FMAX_d:
3712 fmax(vform, rd, rn, rm);
3713 break;
3714 case FMIN_s:
3715 case FMIN_d:
3716 fmin(vform, rd, rn, rm);
3717 break;
3718 case FMAXNM_s:
3719 case FMAXNM_d:
3720 fmaxnm(vform, rd, rn, rm);
3721 break;
3722 case FMINNM_s:
3723 case FMINNM_d:
3724 fminnm(vform, rd, rn, rm);
3725 break;
3726 default:
3727 UNREACHABLE();
3728 }
3729 // Explicitly log the register update whilst we have type information.
3730 LogVRegister(instr->Rd(), GetPrintRegisterFormatFP(vform));
3731}
3732
3733void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
3734 AssertSupportedFPCR();
3735
3736 unsigned fd = instr->Rd();
3737 unsigned fn = instr->Rn();
3738 unsigned fm = instr->Rm();
3739 unsigned fa = instr->Ra();
3740
3741 switch (instr->Mask(FPDataProcessing3SourceMask)) {
3742 // fd = fa +/- (fn * fm)
3743 case FMADD_s:
3744 set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm)));
3745 break;
3746 case FMSUB_s:
3747 set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm)));
3748 break;
3749 case FMADD_d:
3750 set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm)));
3751 break;
3752 case FMSUB_d:
3753 set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm)));
3754 break;
3755 // Negated variants of the above.
3756 case FNMADD_s:
3757 set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm)));
3758 break;
3759 case FNMSUB_s:
3760 set_sreg(fd, FPMulAdd(-sreg(fa), sreg(fn), sreg(fm)));
3761 break;
3762 case FNMADD_d:
3763 set_dreg(fd, FPMulAdd(-dreg(fa), -dreg(fn), dreg(fm)));
3764 break;
3765 case FNMSUB_d:
3766 set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm)));
3767 break;
3768 default:
3769 UNIMPLEMENTED();
3770 }
3771}
3772
3773bool Simulator::FPProcessNaNs(Instruction* instr) {
3774 unsigned fd = instr->Rd();
3775 unsigned fn = instr->Rn();
3776 unsigned fm = instr->Rm();
3777 bool done = false;
3778
3779 if (instr->Mask(FP64) == FP64) {
3780 double result = FPProcessNaNs(dreg(fn), dreg(fm));
3781 if (std::isnan(result)) {
3782 set_dreg(fd, result);
3783 done = true;
3784 }
3785 } else {
3786 float result = FPProcessNaNs(sreg(fn), sreg(fm));
3787 if (std::isnan(result)) {
3788 set_sreg(fd, result);
3789 done = true;
3790 }
3791 }
3792
3793 return done;
3794}
3795
3796// clang-format off
3797#define PAUTH_SYSTEM_MODES(V) \
3798 V(B1716, 17, xreg(16), kPACKeyIB) \
3799 V(BSP, 30, xreg(31, Reg31IsStackPointer), kPACKeyIB)
3800// clang-format on
3801
3802void Simulator::VisitSystem(Instruction* instr) {
3803 // Some system instructions hijack their Op and Cp fields to represent a
3804 // range of immediates instead of indicating a different instruction. This
3805 // makes the decoding tricky.
3806 if (instr->Mask(SystemPAuthFMask) == SystemPAuthFixed) {
3807 // The BType check for PACIBSP happens in CheckBType().
3808 switch (instr->Mask(SystemPAuthMask)) {
3809#define DEFINE_PAUTH_FUNCS(SUFFIX, DST, MOD, KEY) \
3810 case PACI##SUFFIX: \
3811 set_xreg(DST, AddPAC(xreg(DST), MOD, KEY, kInstructionPointer)); \
3812 break; \
3813 case AUTI##SUFFIX: \
3814 set_xreg(DST, AuthPAC(xreg(DST), MOD, KEY, kInstructionPointer)); \
3815 break;
3816
3817 PAUTH_SYSTEM_MODES(DEFINE_PAUTH_FUNCS)
3818#undef DEFINE_PAUTH_FUNCS
3819#undef PAUTH_SYSTEM_MODES
3820 }
3821 } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
3822 switch (instr->Mask(SystemSysRegMask)) {
3823 case MRS: {
3824 switch (instr->ImmSystemRegister()) {
3825 case NZCV:
3826 set_xreg(instr->Rt(), nzcv().RawValue());
3827 break;
3828 case FPCR:
3829 set_xreg(instr->Rt(), fpcr().RawValue());
3830 break;
3831 default:
3832 UNIMPLEMENTED();
3833 }
3834 break;
3835 }
3836 case MSR: {
3837 switch (instr->ImmSystemRegister()) {
3838 case NZCV:
3839 nzcv().SetRawValue(wreg(instr->Rt()));
3840 LogSystemRegister(NZCV);
3841 break;
3842 case FPCR:
3843 fpcr().SetRawValue(wreg(instr->Rt()));
3844 LogSystemRegister(FPCR);
3845 break;
3846 default:
3847 UNIMPLEMENTED();
3848 }
3849 break;
3850 }
3851 }
3852 } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
3853 DCHECK(instr->Mask(SystemHintMask) == HINT);
3854 switch (instr->ImmHint()) {
3855 case NOP:
3856 case YIELD:
3857 case CSDB:
3858 case BTI_jc:
3859 case BTI:
3860 case BTI_c:
3861 case BTI_j:
3862 // The BType checks happen in CheckBType().
3863 break;
3864 default:
3865 UNIMPLEMENTED();
3866 }
3867 } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
3868 std::atomic_thread_fence(std::memory_order_seq_cst);
3869 } else {
3870 UNIMPLEMENTED();
3871 }
3872}
3873
3874bool Simulator::GetValue(const char* desc, int64_t* value) {
3875 int regnum = CodeFromName(desc);
3876 if (regnum >= 0) {
3877 unsigned code = regnum;
3878 if (code == kZeroRegCode) {
3879 // Catch the zero register and return 0.
3880 *value = 0;
3881 return true;
3882 } else if (code == kSPRegInternalCode) {
3883 // Translate the stack pointer code to 31, for Reg31IsStackPointer.
3884 code = 31;
3885 }
3886 if (desc[0] == 'w') {
3887 *value = wreg(code, Reg31IsStackPointer);
3888 } else {
3889 *value = xreg(code, Reg31IsStackPointer);
3890 }
3891 return true;
3892 } else if (strncmp(desc, "0x", 2) == 0) {
3893 return SScanF(desc + 2, "%" SCNx64, reinterpret_cast<uint64_t*>(value)) ==
3894 1;
3895 } else {
3896 return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
3897 }
3898}
3899
3900bool Simulator::PrintValue(const char* desc) {
3901 if (strcmp(desc, "sp") == 0) {
3902 DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
3903 PrintF(stream_, "%s sp:%s 0x%016" PRIx64 "%s\n", clr_reg_name,
3904 clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
3905 return true;
3906 } else if (strcmp(desc, "wsp") == 0) {
3907 DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
3908 PrintF(stream_, "%s wsp:%s 0x%08" PRIx32 "%s\n", clr_reg_name,
3909 clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
3910 return true;
3911 }
3912
3913 int i = CodeFromName(desc);
3914 static_assert(kNumberOfRegisters == kNumberOfVRegisters,
3915 "Must be same number of Registers as VRegisters.");
3916 if (i < 0 || static_cast<unsigned>(i) >= kNumberOfVRegisters) return false;
3917
3918 if (desc[0] == 'v') {
3919 struct qreg_t reg = qreg(i);
3920 PrintF(stream_, "%s %s:%s (%s0x%02x%s", clr_vreg_name, VRegNameForCode(i),
3921 clr_normal, clr_vreg_value, reg.val[0], clr_normal);
3922 for (int b = 1; b < kQRegSize; b++) {
3923 PrintF(stream_, ", %s0x%02x%s", clr_vreg_value, reg.val[b], clr_normal);
3924 }
3925 PrintF(stream_, ")\n");
3926 return true;
3927 } else if (desc[0] == 'd') {
3928 PrintF(stream_, "%s %s:%s %g%s\n", clr_vreg_name, DRegNameForCode(i),
3929 clr_vreg_value, dreg(i), clr_normal);
3930 return true;
3931 } else if (desc[0] == 's') {
3932 PrintF(stream_, "%s %s:%s %g%s\n", clr_vreg_name, SRegNameForCode(i),
3933 clr_vreg_value, sreg(i), clr_normal);
3934 return true;
3935 } else if (desc[0] == 'w') {
3936 PrintF(stream_, "%s %s:%s 0x%08" PRIx32 "%s\n", clr_reg_name,
3937 WRegNameForCode(i), clr_reg_value, wreg(i), clr_normal);
3938 return true;
3939 } else {
3940 // X register names have a wide variety of starting characters, but anything
3941 // else will be an X register.
3942 PrintF(stream_, "%s %s:%s 0x%016" PRIx64 "%s\n", clr_reg_name,
3943 XRegNameForCode(i), clr_reg_value, xreg(i), clr_normal);
3944 return true;
3945 }
3946}
3947
3948void Simulator::Debug() {
3949 if (v8_flags.correctness_fuzzer_suppressions) {
3950 PrintF("Debugger disabled for differential fuzzing.\n");
3951 return;
3952 }
3953 bool done = false;
3954 while (!done) {
3955 // Disassemble the next instruction to execute before doing anything else.
3956 PrintInstructionsAt(pc_, 1);
3957 // Read the command line.
3958 ArrayUniquePtr<char> line(ReadLine("sim> "));
3959 done = ExecDebugCommand(std::move(line));
3960 }
3961}
3962
3963bool Simulator::ExecDebugCommand(ArrayUniquePtr<char> line_ptr) {
3964#define COMMAND_SIZE 63
3965#define ARG_SIZE 255
3966
3967#define STR(a) #a
3968#define XSTR(a) STR(a)
3969
3970 char cmd[COMMAND_SIZE + 1];
3971 char arg1[ARG_SIZE + 1];
3972 char arg2[ARG_SIZE + 1];
3973 char* argv[3] = {cmd, arg1, arg2};
3974
3975 // Make sure to have a proper terminating character if reaching the limit.
3976 cmd[COMMAND_SIZE] = 0;
3977 arg1[ARG_SIZE] = 0;
3978 arg2[ARG_SIZE] = 0;
3979
3980 bool cleared_log_disasm_bit = false;
3981
3982 if (line_ptr == nullptr) return false;
3983
3984 // Repeat last command by default.
3985 const char* line = line_ptr.get();
3986 const char* last_input = last_debugger_input();
3987 if (strcmp(line, "\n") == 0 && (last_input != nullptr)) {
3988 line_ptr.reset();
3989 line = last_input;
3990 } else {
3991 // Update the latest command ran
3992 set_last_debugger_input(std::move(line_ptr));
3993 }
3994
3995 // Use sscanf to parse the individual parts of the command line. At the
3996 // moment no command expects more than two parameters.
3997 int argc = SScanF(line,
3998 "%" XSTR(COMMAND_SIZE) "s "
3999 "%" XSTR(ARG_SIZE) "s "
4000 "%" XSTR(ARG_SIZE) "s",
4001 cmd, arg1, arg2);
4002
4003 // stepi / si ------------------------------------------------------------
4004 if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
4005 // We are about to execute instructions, after which by default we
4006 // should increment the pc_. If it was set when reaching this debug
4007 // instruction, it has not been cleared because this instruction has not
4008 // completed yet. So clear it manually.
4009 pc_modified_ = false;
4010
4011 if (argc == 1) {
4012 ExecuteInstruction();
4013 } else {
4014 int64_t number_of_instructions_to_execute = 1;
4015 GetValue(arg1, &number_of_instructions_to_execute);
4016
4017 set_log_parameters(log_parameters() | LOG_DISASM);
4018 while (number_of_instructions_to_execute-- > 0) {
4019 ExecuteInstruction();
4020 }
4021 set_log_parameters(log_parameters() & ~LOG_DISASM);
4022 PrintF("\n");
4023 }
4024
4025 // If it was necessary, the pc has already been updated or incremented
4026 // when executing the instruction. So we do not want it to be updated
4027 // again. It will be cleared when exiting.
4028 pc_modified_ = true;
4029
4030 // next / n
4031 // --------------------------------------------------------------
4032 } else if ((strcmp(cmd, "next") == 0) || (strcmp(cmd, "n") == 0)) {
4033 // Tell the simulator to break after the next executed BL.
4034 break_on_next_ = true;
4035 // Continue.
4036 return true;
4037
4038 // continue / cont / c
4039 // ---------------------------------------------------
4040 } else if ((strcmp(cmd, "continue") == 0) || (strcmp(cmd, "cont") == 0) ||
4041 (strcmp(cmd, "c") == 0)) {
4042 // Leave the debugger shell.
4043 return true;
4044
4045 // disassemble / disasm / di
4046 // ---------------------------------------------
4047 } else if (strcmp(cmd, "disassemble") == 0 || strcmp(cmd, "disasm") == 0 ||
4048 strcmp(cmd, "di") == 0) {
4049 int64_t n_of_instrs_to_disasm = 10; // default value.
4050 int64_t address = reinterpret_cast<int64_t>(pc_); // default value.
4051 if (argc >= 2) { // disasm <n of instrs>
4052 GetValue(arg1, &n_of_instrs_to_disasm);
4053 }
4054 if (argc >= 3) { // disasm <n of instrs> <address>
4055 GetValue(arg2, &address);
4056 }
4057
4058 // Disassemble.
4059 PrintInstructionsAt(reinterpret_cast<Instruction*>(address),
4060 n_of_instrs_to_disasm);
4061 PrintF("\n");
4062
4063 // print / p
4064 // -------------------------------------------------------------
4065 } else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
4066 if (argc == 2) {
4067 if (strcmp(arg1, "all") == 0) {
4068 PrintRegisters();
4069 PrintVRegisters();
4070 } else {
4071 if (!PrintValue(arg1)) {
4072 PrintF("%s unrecognized\n", arg1);
4073 }
4074 }
4075 } else {
4076 PrintF(
4077 "print <register>\n"
4078 " Print the content of a register. (alias 'p')\n"
4079 " 'print all' will print all registers.\n"
4080 " Use 'printobject' to get more details about the value.\n");
4081 }
4082
4083 // printobject / po
4084 // ------------------------------------------------------
4085 } else if ((strcmp(cmd, "printobject") == 0) || (strcmp(cmd, "po") == 0)) {
4086 if (argc == 2) {
4087 int64_t value;
4088 StdoutStream os;
4089 if (GetValue(arg1, &value)) {
4090 Tagged<Object> obj(value);
4091 os << arg1 << ": \n";
4092#ifdef DEBUG
4093 Print(obj, os);
4094 os << "\n";
4095#else
4096 os << Brief(obj) << "\n";
4097#endif
4098 } else {
4099 os << arg1 << " unrecognized\n";
4100 }
4101 } else {
4102 PrintF(
4103 "printobject <value>\n"
4104 "printobject <register>\n"
4105 " Print details about the value. (alias 'po')\n");
4106 }
4107
4108 // stack / mem
4109 // ----------------------------------------------------------
4110 } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 ||
4111 strcmp(cmd, "dump") == 0) {
4112 int64_t* cur = nullptr;
4113 int64_t* end = nullptr;
4114 int next_arg = 1;
4115
4116 if (strcmp(cmd, "stack") == 0) {
4117 cur = reinterpret_cast<int64_t*>(sp());
4118
4119 } else { // "mem"
4120 int64_t value;
4121 if (!GetValue(arg1, &value)) {
4122 PrintF("%s unrecognized\n", arg1);
4123 return false;
4124 }
4125 cur = reinterpret_cast<int64_t*>(value);
4126 next_arg++;
4127 }
4128
4129 int64_t words = 0;
4130 if (argc == next_arg) {
4131 words = 10;
4132 } else if (argc == next_arg + 1) {
4133 if (!GetValue(argv[next_arg], &words)) {
4134 PrintF("%s unrecognized\n", argv[next_arg]);
4135 PrintF("Printing 10 double words by default");
4136 words = 10;
4137 }
4138 } else {
4139 UNREACHABLE();
4140 }
4141 end = cur + words;
4142
4143 bool skip_obj_print = (strcmp(cmd, "dump") == 0);
4144 while (cur < end) {
4145 PrintF(" 0x%016" PRIx64 ": 0x%016" PRIx64 " %10" PRId64,
4146 reinterpret_cast<uint64_t>(cur), *cur, *cur);
4147 if (!skip_obj_print) {
4148 Tagged<Object> obj(*cur);
4149 Heap* current_heap = isolate_->heap();
4150 if (IsSmi(obj) ||
4151 IsValidHeapObject(current_heap, Cast<HeapObject>(obj))) {
4152 PrintF(" (");
4153 if (IsSmi(obj)) {
4154 PrintF("smi %" PRId32, Smi::ToInt(obj));
4155 } else {
4156 ShortPrint(obj);
4157 }
4158 PrintF(")");
4159 }
4160 }
4161 PrintF("\n");
4162 cur++;
4163 }
4164
4165 // trace / t
4166 // -------------------------------------------------------------
4167 } else if (strcmp(cmd, "trace") == 0 || strcmp(cmd, "t") == 0) {
4168 if ((log_parameters() & LOG_ALL) != LOG_ALL) {
4169 PrintF("Enabling disassembly, registers and memory write tracing\n");
4170 set_log_parameters(log_parameters() | LOG_ALL);
4171 } else {
4172 PrintF("Disabling disassembly, registers and memory write tracing\n");
4173 set_log_parameters(log_parameters() & ~LOG_ALL);
4174 }
4175
4176 // break / b
4177 // -------------------------------------------------------------
4178 } else if (strcmp(cmd, "break") == 0 || strcmp(cmd, "b") == 0) {
4179 if (argc == 2) {
4180 int64_t value;
4181 if (GetValue(arg1, &value)) {
4182 SetBreakpoint(reinterpret_cast<Instruction*>(value));
4183 } else {
4184 PrintF("%s unrecognized\n", arg1);
4185 }
4186 } else {
4187 ListBreakpoints();
4188 PrintF("Use `break <address>` to set or disable a breakpoint\n");
4189 }
4190
4191 // backtrace / bt
4192 // ---------------------------------------------------------------
4193 } else if (strcmp(cmd, "backtrace") == 0 || strcmp(cmd, "bt") == 0) {
4194 Address pc = reinterpret_cast<Address>(pc_);
4195 Address lr = reinterpret_cast<Address>(this->lr());
4196 Address sp = static_cast<Address>(this->sp());
4197 Address fp = static_cast<Address>(this->fp());
4198
4199 int i = 0;
4200 while (true) {
4201 PrintF("#%d: " V8PRIxPTR_FMT " (sp=" V8PRIxPTR_FMT ", fp=" V8PRIxPTR_FMT
4202 ")\n",
4203 i, pc, sp, fp);
4204 pc = lr;
4205 sp = fp;
4206 if (pc == reinterpret_cast<Address>(kEndOfSimAddress)) {
4207 break;
4208 }
4209 lr = *(reinterpret_cast<Address*>(fp) + 1);
4210 fp = *reinterpret_cast<Address*>(fp);
4211 i++;
4212 if (i > 100) {
4213 PrintF("Too many frames\n");
4214 break;
4215 }
4216 }
4217
4218 // gdb
4219 // -------------------------------------------------------------------
4220 } else if (strcmp(cmd, "gdb") == 0) {
4221 PrintF("Relinquishing control to gdb.\n");
4222 base::OS::DebugBreak();
4223 PrintF("Regaining control from gdb.\n");
4224
4225 // sysregs
4226 // ---------------------------------------------------------------
4227 } else if (strcmp(cmd, "sysregs") == 0) {
4228 PrintSystemRegisters();
4229
4230 // help / h
4231 // --------------------------------------------------------------
4232 } else if (strcmp(cmd, "help") == 0 || strcmp(cmd, "h") == 0) {
4233 PrintF(
4234 "stepi / si\n"
4235 " stepi <n>\n"
4236 " Step <n> instructions.\n"
4237 "next / n\n"
4238 " Continue execution until a BL instruction is reached.\n"
4239 " At this point a breakpoint is set just after this BL.\n"
4240 " Then execution is resumed. It will probably later hit the\n"
4241 " breakpoint just set.\n"
4242 "continue / cont / c\n"
4243 " Continue execution from here.\n"
4244 "disassemble / disasm / di\n"
4245 " disassemble <n> <address>\n"
4246 " Disassemble <n> instructions from current <address>.\n"
4247 " By default <n> is 20 and <address> is the current pc.\n"
4248 "print / p\n"
4249 " print <register>\n"
4250 " Print the content of a register.\n"
4251 " 'print all' will print all registers.\n"
4252 " Use 'printobject' to get more details about the value.\n"
4253 "printobject / po\n"
4254 " printobject <value>\n"
4255 " printobject <register>\n"
4256 " Print details about the value.\n"
4257 "stack\n"
4258 " stack [<words>]\n"
4259 " Dump stack content, default dump 10 words\n"
4260 "mem\n"
4261 " mem <address> [<words>]\n"
4262 " Dump memory content, default dump 10 words\n"
4263 "dump\n"
4264 " dump <address> [<words>]\n"
4265 " Dump memory content without pretty printing JS objects, "
4266 "default dump 10 words\n"
4267 "trace / t\n"
4268 " Toggle disassembly and register tracing\n"
4269 "break / b\n"
4270 " break : list all breakpoints\n"
4271 " break <address> : set / enable / disable a breakpoint.\n"
4272 "backtrace / bt\n"
4273 " Walk the frame pointers, dumping the pc/sp/fp for each frame.\n"
4274 "gdb\n"
4275 " Enter gdb.\n"
4276 "sysregs\n"
4277 " Print all system registers (including NZCV).\n");
4278 } else {
4279 PrintF("Unknown command: %s\n", cmd);
4280 PrintF("Use 'help' for more information.\n");
4281 }
4282
4283 if (cleared_log_disasm_bit == true) {
4284 set_log_parameters(log_parameters_ | LOG_DISASM);
4285 }
4286 return false;
4287}
4288
4289void Simulator::VisitException(Instruction* instr) {
4290 switch (instr->Mask(ExceptionMask)) {
4291 case HLT: {
4292 if (instr->ImmException() == kImmExceptionIsDebug) {
4293 // Read the arguments encoded inline in the instruction stream.
4294 uint32_t code;
4295 uint32_t parameters;
4296
4297 memcpy(&code, pc_->InstructionAtOffset(kDebugCodeOffset), sizeof(code));
4298 memcpy(&parameters, pc_->InstructionAtOffset(kDebugParamsOffset),
4299 sizeof(parameters));
4300 char const* message = reinterpret_cast<char const*>(
4301 pc_->InstructionAtOffset(kDebugMessageOffset));
4302
4303 // Always print something when we hit a debug point that breaks.
4304 // We are going to break, so printing something is not an issue in
4305 // terms of speed.
4306 if (v8_flags.trace_sim_messages || v8_flags.trace_sim ||
4307 (parameters & BREAK)) {
4308 if (message != nullptr) {
4309 PrintF(stream_, "# %sDebugger hit %d: %s%s%s\n", clr_debug_number,
4310 code, clr_debug_message, message, clr_normal);
4311 } else {
4312 PrintF(stream_, "# %sDebugger hit %d.%s\n", clr_debug_number, code,
4313 clr_normal);
4314 }
4315 Builtin maybe_builtin = OffHeapInstructionStream::TryLookupCode(
4316 Isolate::Current(), reinterpret_cast<Address>(pc_));
4317 if (Builtins::IsBuiltinId(maybe_builtin)) {
4318 char const* name = Builtins::name(maybe_builtin);
4319 PrintF(stream_, "# %s %sLOCATION: %s%s\n",
4320 clr_debug_number, clr_debug_message, name, clr_normal);
4321 }
4322 }
4323
4324 // Other options.
4325 switch (parameters & kDebuggerTracingDirectivesMask) {
4326 case TRACE_ENABLE:
4327 set_log_parameters(log_parameters() | parameters);
4328 if (parameters & LOG_SYS_REGS) {
4329 PrintSystemRegisters();
4330 }
4331 if (parameters & LOG_REGS) {
4332 PrintRegisters();
4333 }
4334 if (parameters & LOG_VREGS) {
4335 PrintVRegisters();
4336 }
4337 break;
4338 case TRACE_DISABLE:
4339 set_log_parameters(log_parameters() & ~parameters);
4340 break;
4341 case TRACE_OVERRIDE:
4342 set_log_parameters(parameters);
4343 break;
4344 default:
4345 // We don't support a one-shot LOG_DISASM.
4346 DCHECK_EQ(parameters & LOG_DISASM, 0);
4347 // Don't print information that is already being traced.
4348 parameters &= ~log_parameters();
4349 // Print the requested information.
4350 if (parameters & LOG_SYS_REGS) PrintSystemRegisters();
4351 if (parameters & LOG_REGS) PrintRegisters();
4352 if (parameters & LOG_VREGS) PrintVRegisters();
4353 }
4354
4355 // The stop parameters are inlined in the code. Skip them:
4356 // - Skip to the end of the message string.
4357 size_t size = kDebugMessageOffset + strlen(message) + 1;
4358 pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstrSize));
4359 // - Verify that the unreachable marker is present.
4360 DCHECK(pc_->Mask(ExceptionMask) == HLT);
4361 DCHECK_EQ(pc_->ImmException(), kImmExceptionIsUnreachable);
4362 // - Skip past the unreachable marker.
4363 set_pc(pc_->following());
4364
4365 // Check if the debugger should break.
4366 if (parameters & BREAK) Debug();
4367
4368 } else if (instr->ImmException() == kImmExceptionIsRedirectedCall) {
4369 DoRuntimeCall(instr);
4370 } else if (instr->ImmException() == kImmExceptionIsPrintf) {
4371 DoPrintf(instr);
4372 } else if (instr->ImmException() == kImmExceptionIsSwitchStackLimit) {
4373 DoSwitchStackLimit(instr);
4374 } else if (instr->ImmException() == kImmExceptionIsUnreachable) {
4375 fprintf(stream_, "Hit UNREACHABLE marker at PC=%p.\n",
4376 reinterpret_cast<void*>(pc_));
4377 abort();
4378
4379 } else {
4380 base::OS::DebugBreak();
4381 }
4382 break;
4383 }
4384 case BRK:
4385 base::OS::DebugBreak();
4386 break;
4387 default:
4388 UNIMPLEMENTED();
4389 }
4390}
4391
4392void Simulator::VisitNEON2RegMisc(Instruction* instr) {
4393 NEONFormatDecoder nfd(instr);
4394 VectorFormat vf = nfd.GetVectorFormat();
4395
4396 // Format mapping for "long pair" instructions, [su]addlp, [su]adalp.
4397 static const NEONFormatMap map_lp = {
4398 {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
4399 VectorFormat vf_lp = nfd.GetVectorFormat(&map_lp);
4400
4401 static const NEONFormatMap map_fcvtl = {{22}, {NF_4S, NF_2D}};
4402 VectorFormat vf_fcvtl = nfd.GetVectorFormat(&map_fcvtl);
4403
4404 static const NEONFormatMap map_fcvtn = {{22, 30},
4405 {NF_4H, NF_8H, NF_2S, NF_4S}};
4406 VectorFormat vf_fcvtn = nfd.GetVectorFormat(&map_fcvtn);
4407
4408 SimVRegister& rd = vreg(instr->Rd());
4409 SimVRegister& rn = vreg(instr->Rn());
4410
4411 if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) {
4412 // These instructions all use a two bit size field, except NOT and RBIT,
4413 // which use the field to encode the operation.
4414 switch (instr->Mask(NEON2RegMiscMask)) {
4415 case NEON_REV64:
4416 rev64(vf, rd, rn);
4417 break;
4418 case NEON_REV32:
4419 rev32(vf, rd, rn);
4420 break;
4421 case NEON_REV16:
4422 rev16(vf, rd, rn);
4423 break;
4424 case NEON_SUQADD:
4425 suqadd(vf, rd, rn);
4426 break;
4427 case NEON_USQADD:
4428 usqadd(vf, rd, rn);
4429 break;
4430 case NEON_CLS:
4431 cls(vf, rd, rn);
4432 break;
4433 case NEON_CLZ:
4434 clz(vf, rd, rn);
4435 break;
4436 case NEON_CNT:
4437 cnt(vf, rd, rn);
4438 break;
4439 case NEON_SQABS:
4440 abs(vf, rd, rn).SignedSaturate(vf);
4441 break;
4442 case NEON_SQNEG:
4443 neg(vf, rd, rn).SignedSaturate(vf);
4444 break;
4445 case NEON_CMGT_zero:
4446 cmp(vf, rd, rn, 0, gt);
4447 break;
4448 case NEON_CMGE_zero:
4449 cmp(vf, rd, rn, 0, ge);
4450 break;
4451 case NEON_CMEQ_zero:
4452 cmp(vf, rd, rn, 0, eq);
4453 break;
4454 case NEON_CMLE_zero:
4455 cmp(vf, rd, rn, 0, le);
4456 break;
4457 case NEON_CMLT_zero:
4458 cmp(vf, rd, rn, 0, lt);
4459 break;
4460 case NEON_ABS:
4461 abs(vf, rd, rn);
4462 break;
4463 case NEON_NEG:
4464 neg(vf, rd, rn);
4465 break;
4466 case NEON_SADDLP:
4467 saddlp(vf_lp, rd, rn);
4468 break;
4469 case NEON_UADDLP:
4470 uaddlp(vf_lp, rd, rn);
4471 break;
4472 case NEON_SADALP:
4473 sadalp(vf_lp, rd, rn);
4474 break;
4475 case NEON_UADALP:
4476 uadalp(vf_lp, rd, rn);
4477 break;
4478 case NEON_RBIT_NOT:
4479 vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
4480 switch (instr->FPType()) {
4481 case 0:
4482 not_(vf, rd, rn);
4483 break;
4484 case 1:
4485 rbit(vf, rd, rn);
4486 break;
4487 default:
4488 UNIMPLEMENTED();
4489 }
4490 break;
4491 }
4492 } else {
4493 VectorFormat fpf = nfd.GetVectorFormat(instr->Mask(NEON2RegMiscHPFixed) ==
4494 NEON2RegMiscHPFixed
4495 ? nfd.FPHPFormatMap()
4496 : nfd.FPFormatMap());
4497 FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
4498 bool inexact_exception = false;
4499
4500 // These instructions all use a one bit size field, except XTN, SQXTUN,
4501 // SHLL, SQXTN and UQXTN, which use a two bit size field.
4502 switch (instr->Mask(NEON2RegMiscFPMask ^ NEON2RegMiscHPFixed)) {
4503 case NEON_FABS:
4504 fabs_(fpf, rd, rn);
4505 return;
4506 case NEON_FNEG:
4507 fneg(fpf, rd, rn);
4508 return;
4509 case NEON_FSQRT:
4510 fsqrt(fpf, rd, rn);
4511 return;
4512 case NEON_FCVTL:
4513 if (instr->Mask(NEON_Q)) {
4514 fcvtl2(vf_fcvtl, rd, rn);
4515 } else {
4516 fcvtl(vf_fcvtl, rd, rn);
4517 }
4518 return;
4519 case NEON_FCVTN:
4520 if (instr->Mask(NEON_Q)) {
4521 fcvtn2(vf_fcvtn, rd, rn);
4522 } else {
4523 fcvtn(vf_fcvtn, rd, rn);
4524 }
4525 return;
4526 case NEON_FCVTXN:
4527 if (instr->Mask(NEON_Q)) {
4528 fcvtxn2(vf_fcvtn, rd, rn);
4529 } else {
4530 fcvtxn(vf_fcvtn, rd, rn);
4531 }
4532 return;
4533
4534 // The following instructions break from the switch statement, rather
4535 // than return.
4536 case NEON_FRINTI:
4537 break; // Use FPCR rounding mode.
4538 case NEON_FRINTX:
4539 inexact_exception = true;
4540 break;
4541 case NEON_FRINTA:
4542 fpcr_rounding = FPTieAway;
4543 break;
4544 case NEON_FRINTM:
4545 fpcr_rounding = FPNegativeInfinity;
4546 break;
4547 case NEON_FRINTN:
4548 fpcr_rounding = FPTieEven;
4549 break;
4550 case NEON_FRINTP:
4551 fpcr_rounding = FPPositiveInfinity;
4552 break;
4553 case NEON_FRINTZ:
4554 fpcr_rounding = FPZero;
4555 break;
4556
4557 // The remaining cases return to the caller.
4558 case NEON_FCVTNS:
4559 fcvts(fpf, rd, rn, FPTieEven);
4560 return;
4561 case NEON_FCVTNU:
4562 fcvtu(fpf, rd, rn, FPTieEven);
4563 return;
4564 case NEON_FCVTPS:
4565 fcvts(fpf, rd, rn, FPPositiveInfinity);
4566 return;
4567 case NEON_FCVTPU:
4568 fcvtu(fpf, rd, rn, FPPositiveInfinity);
4569 return;
4570 case NEON_FCVTMS:
4571 fcvts(fpf, rd, rn, FPNegativeInfinity);
4572 return;
4573 case NEON_FCVTMU:
4574 fcvtu(fpf, rd, rn, FPNegativeInfinity);
4575 return;
4576 case NEON_FCVTZS:
4577 fcvts(fpf, rd, rn, FPZero);
4578 return;
4579 case NEON_FCVTZU:
4580 fcvtu(fpf, rd, rn, FPZero);
4581 return;
4582 case NEON_FCVTAS:
4583 fcvts(fpf, rd, rn, FPTieAway);
4584 return;
4585 case NEON_FCVTAU:
4586 fcvtu(fpf, rd, rn, FPTieAway);
4587 return;
4588 case NEON_SCVTF:
4589 scvtf(fpf, rd, rn, 0, fpcr_rounding);
4590 return;
4591 case NEON_UCVTF:
4592 ucvtf(fpf, rd, rn, 0, fpcr_rounding);
4593 return;
4594 case NEON_URSQRTE:
4595 ursqrte(fpf, rd, rn);
4596 return;
4597 case NEON_URECPE:
4598 urecpe(fpf, rd, rn);
4599 return;
4600 case NEON_FRSQRTE:
4601 frsqrte(fpf, rd, rn);
4602 return;
4603 case NEON_FRECPE:
4604 frecpe(fpf, rd, rn, fpcr_rounding);
4605 return;
4606 case NEON_FCMGT_zero:
4607 fcmp_zero(fpf, rd, rn, gt);
4608 return;
4609 case NEON_FCMGE_zero:
4610 fcmp_zero(fpf, rd, rn, ge);
4611 return;
4612 case NEON_FCMEQ_zero:
4613 fcmp_zero(fpf, rd, rn, eq);
4614 return;
4615 case NEON_FCMLE_zero:
4616 fcmp_zero(fpf, rd, rn, le);
4617 return;
4618 case NEON_FCMLT_zero:
4619 fcmp_zero(fpf, rd, rn, lt);
4620 return;
4621 default:
4622 if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) &&
4623 (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) {
4624 switch (instr->Mask(NEON2RegMiscMask)) {
4625 case NEON_XTN:
4626 xtn(vf, rd, rn);
4627 return;
4628 case NEON_SQXTN:
4629 sqxtn(vf, rd, rn);
4630 return;
4631 case NEON_UQXTN:
4632 uqxtn(vf, rd, rn);
4633 return;
4634 case NEON_SQXTUN:
4635 sqxtun(vf, rd, rn);
4636 return;
4637 case NEON_SHLL:
4638 vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
4639 if (instr->Mask(NEON_Q)) {
4640 shll2(vf, rd, rn);
4641 } else {
4642 shll(vf, rd, rn);
4643 }
4644 return;
4645 default:
4646 UNIMPLEMENTED();
4647 }
4648 } else {
4649 UNIMPLEMENTED();
4650 }
4651 }
4652
4653 // Only FRINT* instructions fall through the switch above.
4654 frint(fpf, rd, rn, fpcr_rounding, inexact_exception);
4655 }
4656}
4657
4658void Simulator::VisitNEON3SameFP(NEON3SameOp op, VectorFormat vf,
4659 SimVRegister& rd, SimVRegister& rn,
4660 SimVRegister& rm) {
4661 switch (op) {
4662 case NEON_FADD:
4663 fadd(vf, rd, rn, rm);
4664 break;
4665 case NEON_FSUB:
4666 fsub(vf, rd, rn, rm);
4667 break;
4668 case NEON_FMUL:
4669 fmul(vf, rd, rn, rm);
4670 break;
4671 case NEON_FDIV:
4672 fdiv(vf, rd, rn, rm);
4673 break;
4674 case NEON_FMAX:
4675 fmax(vf, rd, rn, rm);
4676 break;
4677 case NEON_FMIN:
4678 fmin(vf, rd, rn, rm);
4679 break;
4680 case NEON_FMAXNM:
4681 fmaxnm(vf, rd, rn, rm);
4682 break;
4683 case NEON_FMINNM:
4684 fminnm(vf, rd, rn, rm);
4685 break;
4686 case NEON_FMLA:
4687 fmla(vf, rd, rn, rm);
4688 break;
4689 case NEON_FMLS:
4690 fmls(vf, rd, rn, rm);
4691 break;
4692 case NEON_FMULX:
4693 fmulx(vf, rd, rn, rm);
4694 break;
4695 case NEON_FACGE:
4696 fabscmp(vf, rd, rn, rm, ge);
4697 break;
4698 case NEON_FACGT:
4699 fabscmp(vf, rd, rn, rm, gt);
4700 break;
4701 case NEON_FCMEQ:
4702 fcmp(vf, rd, rn, rm, eq);
4703 break;
4704 case NEON_FCMGE:
4705 fcmp(vf, rd, rn, rm, ge);
4706 break;
4707 case NEON_FCMGT:
4708 fcmp(vf, rd, rn, rm, gt);
4709 break;
4710 case NEON_FRECPS:
4711 frecps(vf, rd, rn, rm);
4712 break;
4713 case NEON_FRSQRTS:
4714 frsqrts(vf, rd, rn, rm);
4715 break;
4716 case NEON_FABD:
4717 fabd(vf, rd, rn, rm);
4718 break;
4719 case NEON_FADDP:
4720 faddp(vf, rd, rn, rm);
4721 break;
4722 case NEON_FMAXP:
4723 fmaxp(vf, rd, rn, rm);
4724 break;
4725 case NEON_FMAXNMP:
4726 fmaxnmp(vf, rd, rn, rm);
4727 break;
4728 case NEON_FMINP:
4729 fminp(vf, rd, rn, rm);
4730 break;
4731 case NEON_FMINNMP:
4732 fminnmp(vf, rd, rn, rm);
4733 break;
4734 default:
4735 UNIMPLEMENTED();
4736 }
4737}
4738
4739void Simulator::VisitNEON3Same(Instruction* instr) {
4740 NEONFormatDecoder nfd(instr);
4741 SimVRegister& rd = vreg(instr->Rd());
4742 SimVRegister& rn = vreg(instr->Rn());
4743 SimVRegister& rm = vreg(instr->Rm());
4744
4745 if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) {
4746 VectorFormat vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
4747 switch (instr->Mask(NEON3SameLogicalMask)) {
4748 case NEON_AND:
4749 and_(vf, rd, rn, rm);
4750 break;
4751 case NEON_ORR:
4752 orr(vf, rd, rn, rm);
4753 break;
4754 case NEON_ORN:
4755 orn(vf, rd, rn, rm);
4756 break;
4757 case NEON_EOR:
4758 eor(vf, rd, rn, rm);
4759 break;
4760 case NEON_BIC:
4761 bic(vf, rd, rn, rm);
4762 break;
4763 case NEON_BIF:
4764 bif(vf, rd, rn, rm);
4765 break;
4766 case NEON_BIT:
4767 bit(vf, rd, rn, rm);
4768 break;
4769 case NEON_BSL:
4770 bsl(vf, rd, rn, rm);
4771 break;
4772 default:
4773 UNIMPLEMENTED();
4774 }
4775 } else if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
4776 VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
4777 VisitNEON3SameFP(instr->Mask(NEON3SameFPMask), vf, rd, rn, rm);
4778 } else {
4779 VectorFormat vf = nfd.GetVectorFormat();
4780 switch (instr->Mask(NEON3SameMask)) {
4781 case NEON_ADD:
4782 add(vf, rd, rn, rm);
4783 break;
4784 case NEON_ADDP:
4785 addp(vf, rd, rn, rm);
4786 break;
4787 case NEON_CMEQ:
4788 cmp(vf, rd, rn, rm, eq);
4789 break;
4790 case NEON_CMGE:
4791 cmp(vf, rd, rn, rm, ge);
4792 break;
4793 case NEON_CMGT:
4794 cmp(vf, rd, rn, rm, gt);
4795 break;
4796 case NEON_CMHI:
4797 cmp(vf, rd, rn, rm, hi);
4798 break;
4799 case NEON_CMHS:
4800 cmp(vf, rd, rn, rm, hs);
4801 break;
4802 case NEON_CMTST:
4803 cmptst(vf, rd, rn, rm);
4804 break;
4805 case NEON_MLS:
4806 mls(vf, rd, rn, rm);
4807 break;
4808 case NEON_MLA:
4809 mla(vf, rd, rn, rm);
4810 break;
4811 case NEON_MUL:
4812 mul(vf, rd, rn, rm);
4813 break;
4814 case NEON_PMUL:
4815 pmul(vf, rd, rn, rm);
4816 break;
4817 case NEON_SMAX:
4818 smax(vf, rd, rn, rm);
4819 break;
4820 case NEON_SMAXP:
4821 smaxp(vf, rd, rn, rm);
4822 break;
4823 case NEON_SMIN:
4824 smin(vf, rd, rn, rm);
4825 break;
4826 case NEON_SMINP:
4827 sminp(vf, rd, rn, rm);
4828 break;
4829 case NEON_SUB:
4830 sub(vf, rd, rn, rm);
4831 break;
4832 case NEON_UMAX:
4833 umax(vf, rd, rn, rm);
4834 break;
4835 case NEON_UMAXP:
4836 umaxp(vf, rd, rn, rm);
4837 break;
4838 case NEON_UMIN:
4839 umin(vf, rd, rn, rm);
4840 break;
4841 case NEON_UMINP:
4842 uminp(vf, rd, rn, rm);
4843 break;
4844 case NEON_SSHL:
4845 sshl(vf, rd, rn, rm);
4846 break;
4847 case NEON_USHL:
4848 ushl(vf, rd, rn, rm);
4849 break;
4850 case NEON_SABD:
4851 AbsDiff(vf, rd, rn, rm, true);
4852 break;
4853 case NEON_UABD:
4854 AbsDiff(vf, rd, rn, rm, false);
4855 break;
4856 case NEON_SABA:
4857 saba(vf, rd, rn, rm);
4858 break;
4859 case NEON_UABA:
4860 uaba(vf, rd, rn, rm);
4861 break;
4862 case NEON_UQADD:
4863 add(vf, rd, rn, rm).UnsignedSaturate(vf);
4864 break;
4865 case NEON_SQADD:
4866 add(vf, rd, rn, rm).SignedSaturate(vf);
4867 break;
4868 case NEON_UQSUB:
4869 sub(vf, rd, rn, rm).UnsignedSaturate(vf);
4870 break;
4871 case NEON_SQSUB:
4872 sub(vf, rd, rn, rm).SignedSaturate(vf);
4873 break;
4874 case NEON_SQDMULH:
4875 sqdmulh(vf, rd, rn, rm);
4876 break;
4877 case NEON_SQRDMULH:
4878 sqrdmulh(vf, rd, rn, rm);
4879 break;
4880 case NEON_UQSHL:
4881 ushl(vf, rd, rn, rm).UnsignedSaturate(vf);
4882 break;
4883 case NEON_SQSHL:
4884 sshl(vf, rd, rn, rm).SignedSaturate(vf);
4885 break;
4886 case NEON_URSHL:
4887 ushl(vf, rd, rn, rm).Round(vf);
4888 break;
4889 case NEON_SRSHL:
4890 sshl(vf, rd, rn, rm).Round(vf);
4891 break;
4892 case NEON_UQRSHL:
4893 ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
4894 break;
4895 case NEON_SQRSHL:
4896 sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
4897 break;
4898 case NEON_UHADD:
4899 add(vf, rd, rn, rm).Uhalve(vf);
4900 break;
4901 case NEON_URHADD:
4902 add(vf, rd, rn, rm).Uhalve(vf).Round(vf);
4903 break;
4904 case NEON_SHADD:
4905 add(vf, rd, rn, rm).Halve(vf);
4906 break;
4907 case NEON_SRHADD:
4908 add(vf, rd, rn, rm).Halve(vf).Round(vf);
4909 break;
4910 case NEON_UHSUB:
4911 sub(vf, rd, rn, rm).Uhalve(vf);
4912 break;
4913 case NEON_SHSUB:
4914 sub(vf, rd, rn, rm).Halve(vf);
4915 break;
4916 default:
4917 UNIMPLEMENTED();
4918 }
4919 }
4920}
4921
4922void Simulator::VisitNEON3SameHP(Instruction* instr) {
4923 NEONFormatDecoder nfd(instr);
4924 SimVRegister& rd = vreg(instr->Rd());
4925 SimVRegister& rn = vreg(instr->Rn());
4926 SimVRegister& rm = vreg(instr->Rm());
4927 VectorFormat vf = nfd.GetVectorFormat(nfd.FPHPFormatMap());
4928 VisitNEON3SameFP(instr->Mask(NEON3SameFPMask) | NEON3SameHPMask, vf, rd, rn,
4929 rm);
4930}
4931
4932void Simulator::VisitNEON3Different(Instruction* instr) {
4933 NEONFormatDecoder nfd(instr);
4934 VectorFormat vf = nfd.GetVectorFormat();
4935 VectorFormat vf_l = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
4936
4937 SimVRegister& rd = vreg(instr->Rd());
4938 SimVRegister& rn = vreg(instr->Rn());
4939 SimVRegister& rm = vreg(instr->Rm());
4940 int size = instr->NEONSize();
4941
4942 switch (instr->Mask(NEON3DifferentMask)) {
4943 case NEON_PMULL:
4944 if ((size == 1) || (size == 2)) { // S/D reserved.
4945 VisitUnallocated(instr);
4946 } else {
4947 if (size == 3) vf_l = kFormat1Q;
4948 pmull(vf_l, rd, rn, rm);
4949 }
4950 break;
4951 case NEON_PMULL2:
4952 if ((size == 1) || (size == 2)) { // S/D reserved.
4953 VisitUnallocated(instr);
4954 } else {
4955 if (size == 3) vf_l = kFormat1Q;
4956 pmull2(vf_l, rd, rn, rm);
4957 }
4958 break;
4959 case NEON_UADDL:
4960 uaddl(vf_l, rd, rn, rm);
4961 break;
4962 case NEON_UADDL2:
4963 uaddl2(vf_l, rd, rn, rm);
4964 break;
4965 case NEON_SADDL:
4966 saddl(vf_l, rd, rn, rm);
4967 break;
4968 case NEON_SADDL2:
4969 saddl2(vf_l, rd, rn, rm);
4970 break;
4971 case NEON_USUBL:
4972 usubl(vf_l, rd, rn, rm);
4973 break;
4974 case NEON_USUBL2:
4975 usubl2(vf_l, rd, rn, rm);
4976 break;
4977 case NEON_SSUBL:
4978 ssubl(vf_l, rd, rn, rm);
4979 break;
4980 case NEON_SSUBL2:
4981 ssubl2(vf_l, rd, rn, rm);
4982 break;
4983 case NEON_SABAL:
4984 sabal(vf_l, rd, rn, rm);
4985 break;
4986 case NEON_SABAL2:
4987 sabal2(vf_l, rd, rn, rm);
4988 break;
4989 case NEON_UABAL:
4990 uabal(vf_l, rd, rn, rm);
4991 break;
4992 case NEON_UABAL2:
4993 uabal2(vf_l, rd, rn, rm);
4994 break;
4995 case NEON_SABDL:
4996 sabdl(vf_l, rd, rn, rm);
4997 break;
4998 case NEON_SABDL2:
4999 sabdl2(vf_l, rd, rn, rm);
5000 break;
5001 case NEON_UABDL:
5002 uabdl(vf_l, rd, rn, rm);
5003 break;
5004 case NEON_UABDL2:
5005 uabdl2(vf_l, rd, rn, rm);
5006 break;
5007 case NEON_SMLAL:
5008 smlal(vf_l, rd, rn, rm);
5009 break;
5010 case NEON_SMLAL2:
5011 smlal2(vf_l, rd, rn, rm);
5012 break;
5013 case NEON_UMLAL:
5014 umlal(vf_l, rd, rn, rm);
5015 break;
5016 case NEON_UMLAL2:
5017 umlal2(vf_l, rd, rn, rm);
5018 break;
5019 case NEON_SMLSL:
5020 smlsl(vf_l, rd, rn, rm);
5021 break;
5022 case NEON_SMLSL2:
5023 smlsl2(vf_l, rd, rn, rm);
5024 break;
5025 case NEON_UMLSL:
5026 umlsl(vf_l, rd, rn, rm);
5027 break;
5028 case NEON_UMLSL2:
5029 umlsl2(vf_l, rd, rn, rm);
5030 break;
5031 case NEON_SMULL:
5032 smull(vf_l, rd, rn, rm);
5033 break;
5034 case NEON_SMULL2:
5035 smull2(vf_l, rd, rn, rm);
5036 break;
5037 case NEON_UMULL:
5038 umull(vf_l, rd, rn, rm);
5039 break;
5040 case NEON_UMULL2:
5041 umull2(vf_l, rd, rn, rm);
5042 break;
5043 case NEON_SQDMLAL:
5044 sqdmlal(vf_l, rd, rn, rm);
5045 break;
5046 case NEON_SQDMLAL2:
5047 sqdmlal2(vf_l, rd, rn, rm);
5048 break;
5049 case NEON_SQDMLSL:
5050 sqdmlsl(vf_l, rd, rn, rm);
5051 break;
5052 case NEON_SQDMLSL2:
5053 sqdmlsl2(vf_l, rd, rn, rm);
5054 break;
5055 case NEON_SQDMULL:
5056 sqdmull(vf_l, rd, rn, rm);
5057 break;
5058 case NEON_SQDMULL2:
5059 sqdmull2(vf_l, rd, rn, rm);
5060 break;
5061 case NEON_UADDW:
5062 uaddw(vf_l, rd, rn, rm);
5063 break;
5064 case NEON_UADDW2:
5065 uaddw2(vf_l, rd, rn, rm);
5066 break;
5067 case NEON_SADDW:
5068 saddw(vf_l, rd, rn, rm);
5069 break;
5070 case NEON_SADDW2:
5071 saddw2(vf_l, rd, rn, rm);
5072 break;
5073 case NEON_USUBW:
5074 usubw(vf_l, rd, rn, rm);
5075 break;
5076 case NEON_USUBW2:
5077 usubw2(vf_l, rd, rn, rm);
5078 break;
5079 case NEON_SSUBW:
5080 ssubw(vf_l, rd, rn, rm);
5081 break;
5082 case NEON_SSUBW2:
5083 ssubw2(vf_l, rd, rn, rm);
5084 break;
5085 case NEON_ADDHN:
5086 addhn(vf, rd, rn, rm);
5087 break;
5088 case NEON_ADDHN2:
5089 addhn2(vf, rd, rn, rm);
5090 break;
5091 case NEON_RADDHN:
5092 raddhn(vf, rd, rn, rm);
5093 break;
5094 case NEON_RADDHN2:
5095 raddhn2(vf, rd, rn, rm);
5096 break;
5097 case NEON_SUBHN:
5098 subhn(vf, rd, rn, rm);
5099 break;
5100 case NEON_SUBHN2:
5101 subhn2(vf, rd, rn, rm);
5102 break;
5103 case NEON_RSUBHN:
5104 rsubhn(vf, rd, rn, rm);
5105 break;
5106 case NEON_RSUBHN2:
5107 rsubhn2(vf, rd, rn, rm);
5108 break;
5109 default:
5110 UNIMPLEMENTED();
5111 }
5112}
5113
5114void Simulator::VisitNEON3Extension(Instruction* instr) {
5115 NEONFormatDecoder nfd(instr);
5116 SimVRegister& rd = vreg(instr->Rd());
5117 SimVRegister& rm = vreg(instr->Rm());
5118 SimVRegister& rn = vreg(instr->Rn());
5119 VectorFormat vf = nfd.GetVectorFormat();
5120
5121 switch (instr->Mask(NEON3ExtensionMask)) {
5122 case NEON_SDOT:
5123 if (vf == kFormat4S || vf == kFormat2S) {
5124 sdot(vf, rd, rn, rm);
5125 } else {
5126 VisitUnallocated(instr);
5127 }
5128
5129 break;
5130 default:
5131 UNIMPLEMENTED();
5132 }
5133}
5134
5135void Simulator::VisitNEONAcrossLanes(Instruction* instr) {
5136 NEONFormatDecoder nfd(instr);
5137
5138 SimVRegister& rd = vreg(instr->Rd());
5139 SimVRegister& rn = vreg(instr->Rn());
5140
5141 // The input operand's VectorFormat is passed for these instructions.
5142 if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
5143 VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
5144
5145 switch (instr->Mask(NEONAcrossLanesFPMask)) {
5146 case NEON_FMAXV:
5147 fmaxv(vf, rd, rn);
5148 break;
5149 case NEON_FMINV:
5150 fminv(vf, rd, rn);
5151 break;
5152 case NEON_FMAXNMV:
5153 fmaxnmv(vf, rd, rn);
5154 break;
5155 case NEON_FMINNMV:
5156 fminnmv(vf, rd, rn);
5157 break;
5158 default:
5159 UNIMPLEMENTED();
5160 }
5161 } else {
5162 VectorFormat vf = nfd.GetVectorFormat();
5163
5164 switch (instr->Mask(NEONAcrossLanesMask)) {
5165 case NEON_ADDV:
5166 addv(vf, rd, rn);
5167 break;
5168 case NEON_SMAXV:
5169 smaxv(vf, rd, rn);
5170 break;
5171 case NEON_SMINV:
5172 sminv(vf, rd, rn);
5173 break;
5174 case NEON_UMAXV:
5175 umaxv(vf, rd, rn);
5176 break;
5177 case NEON_UMINV:
5178 uminv(vf, rd, rn);
5179 break;
5180 case NEON_SADDLV:
5181 saddlv(vf, rd, rn);
5182 break;
5183 case NEON_UADDLV:
5184 uaddlv(vf, rd, rn);
5185 break;
5186 default:
5187 UNIMPLEMENTED();
5188 }
5189 }
5190}
5191
5192void Simulator::VisitNEONByIndexedElement(Instruction* instr) {
5193 NEONFormatDecoder nfd(instr);
5194 VectorFormat vf_r = nfd.GetVectorFormat();
5195 VectorFormat vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
5196
5197 SimVRegister& rd = vreg(instr->Rd());
5198 SimVRegister& rn = vreg(instr->Rn());
5199
5200 ByElementOp Op = nullptr;
5201
5202 int rm_reg = instr->Rm();
5203 int index = (instr->NEONH() << 1) | instr->NEONL();
5204 if (instr->NEONSize() == 1) {
5205 rm_reg &= 0xF;
5206 index = (index << 1) | instr->NEONM();
5207 }
5208
5209 switch (instr->Mask(NEONByIndexedElementMask)) {
5210 case NEON_MUL_byelement:
5211 Op = &Simulator::mul;
5212 vf = vf_r;
5213 break;
5214 case NEON_MLA_byelement:
5215 Op = &Simulator::mla;
5216 vf = vf_r;
5217 break;
5218 case NEON_MLS_byelement:
5219 Op = &Simulator::mls;
5220 vf = vf_r;
5221 break;
5223 Op = &Simulator::sqdmulh;
5224 vf = vf_r;
5225 break;
5227 Op = &Simulator::sqrdmulh;
5228 vf = vf_r;
5229 break;
5231 if (instr->Mask(NEON_Q)) {
5232 Op = &Simulator::smull2;
5233 } else {
5234 Op = &Simulator::smull;
5235 }
5236 break;
5238 if (instr->Mask(NEON_Q)) {
5239 Op = &Simulator::umull2;
5240 } else {
5241 Op = &Simulator::umull;
5242 }
5243 break;
5245 if (instr->Mask(NEON_Q)) {
5246 Op = &Simulator::smlal2;
5247 } else {
5248 Op = &Simulator::smlal;
5249 }
5250 break;
5252 if (instr->Mask(NEON_Q)) {
5253 Op = &Simulator::umlal2;
5254 } else {
5255 Op = &Simulator::umlal;
5256 }
5257 break;
5259 if (instr->Mask(NEON_Q)) {
5260 Op = &Simulator::smlsl2;
5261 } else {
5262 Op = &Simulator::smlsl;
5263 }
5264 break;
5266 if (instr->Mask(NEON_Q)) {
5267 Op = &Simulator::umlsl2;
5268 } else {
5269 Op = &Simulator::umlsl;
5270 }
5271 break;
5273 if (instr->Mask(NEON_Q)) {
5274 Op = &Simulator::sqdmull2;
5275 } else {
5276 Op = &Simulator::sqdmull;
5277 }
5278 break;
5280 if (instr->Mask(NEON_Q)) {
5281 Op = &Simulator::sqdmlal2;
5282 } else {
5283 Op = &Simulator::sqdmlal;
5284 }
5285 break;
5287 if (instr->Mask(NEON_Q)) {
5288 Op = &Simulator::sqdmlsl2;
5289 } else {
5290 Op = &Simulator::sqdmlsl;
5291 }
5292 break;
5293 default:
5294 index = instr->NEONH();
5295 if ((instr->FPType() & 1) == 0) {
5296 index = (index << 1) | instr->NEONL();
5297 }
5298
5299 vf = nfd.GetVectorFormat(nfd.FPFormatMap());
5300
5301 switch (instr->Mask(NEONByIndexedElementFPMask)) {
5303 Op = &Simulator::fmul;
5304 break;
5306 Op = &Simulator::fmla;
5307 break;
5309 Op = &Simulator::fmls;
5310 break;
5312 Op = &Simulator::fmulx;
5313 break;
5314 default:
5315 UNIMPLEMENTED();
5316 }
5317 }
5318
5319 (this->*Op)(vf, rd, rn, vreg(rm_reg), index);
5320}
5321
5322void Simulator::VisitNEONCopy(Instruction* instr) {
5323 NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap());
5324 VectorFormat vf = nfd.GetVectorFormat();
5325
5326 SimVRegister& rd = vreg(instr->Rd());
5327 SimVRegister& rn = vreg(instr->Rn());
5328 int imm5 = instr->ImmNEON5();
5329 int lsb = LowestSetBitPosition(imm5);
5330 int reg_index = imm5 >> lsb;
5331
5332 if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) {
5333 int imm4 = instr->ImmNEON4();
5334 DCHECK_GE(lsb, 1);
5335 int rn_index = imm4 >> (lsb - 1);
5336 ins_element(vf, rd, reg_index, rn, rn_index);
5337 } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) {
5338 ins_immediate(vf, rd, reg_index, xreg(instr->Rn()));
5339 } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) {
5340 uint64_t value = LogicVRegister(rn).Uint(vf, reg_index);
5341 value &= MaxUintFromFormat(vf);
5342 set_xreg(instr->Rd(), value);
5343 } else if (instr->Mask(NEONCopyUmovMask) == NEON_SMOV) {
5344 int64_t value = LogicVRegister(rn).Int(vf, reg_index);
5345 if (instr->NEONQ()) {
5346 set_xreg(instr->Rd(), value);
5347 } else {
5348 DCHECK(is_int32(value));
5349 set_wreg(instr->Rd(), static_cast<int32_t>(value));
5350 }
5351 } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) {
5352 dup_element(vf, rd, rn, reg_index);
5353 } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) {
5354 dup_immediate(vf, rd, xreg(instr->Rn()));
5355 } else {
5356 UNIMPLEMENTED();
5357 }
5358}
5359
5360void Simulator::VisitNEONExtract(Instruction* instr) {
5361 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
5362 VectorFormat vf = nfd.GetVectorFormat();
5363 SimVRegister& rd = vreg(instr->Rd());
5364 SimVRegister& rn = vreg(instr->Rn());
5365 SimVRegister& rm = vreg(instr->Rm());
5366 if (instr->Mask(NEONExtractMask) == NEON_EXT) {
5367 int index = instr->ImmNEONExt();
5368 ext(vf, rd, rn, rm, index);
5369 } else {
5370 UNIMPLEMENTED();
5371 }
5372}
5373
5374void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
5375 AddrMode addr_mode) {
5376 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
5377 VectorFormat vf = nfd.GetVectorFormat();
5378
5379 uint64_t addr_base = xreg(instr->Rn(), Reg31IsStackPointer);
5380 int reg_size = RegisterSizeInBytesFromFormat(vf);
5381
5382 int reg[4];
5383 uint64_t addr[4];
5384 for (int i = 0; i < 4; i++) {
5385 reg[i] = (instr->Rt() + i) % kNumberOfVRegisters;
5386 addr[i] = addr_base + (i * reg_size);
5387 }
5388 int count = 1;
5389 bool log_read = true;
5390
5391 // Bit 23 determines whether this is an offset or post-index addressing mode.
5392 // In offset mode, bits 20 to 16 should be zero; these bits encode the
5393 // register of immediate in post-index mode.
5394 if ((instr->Bit(23) == 0) && (instr->Bits(20, 16) != 0)) {
5395 UNREACHABLE();
5396 }
5397
5398 // We use the PostIndex mask here, as it works in this case for both Offset
5399 // and PostIndex addressing.
5400 switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
5401 case NEON_LD1_4v:
5402 case NEON_LD1_4v_post:
5403 ld1(vf, vreg(reg[3]), addr[3]);
5404 count++;
5405 [[fallthrough]];
5406 case NEON_LD1_3v:
5407 case NEON_LD1_3v_post:
5408 ld1(vf, vreg(reg[2]), addr[2]);
5409 count++;
5410 [[fallthrough]];
5411 case NEON_LD1_2v:
5412 case NEON_LD1_2v_post:
5413 ld1(vf, vreg(reg[1]), addr[1]);
5414 count++;
5415 [[fallthrough]];
5416 case NEON_LD1_1v:
5417 case NEON_LD1_1v_post:
5418 ld1(vf, vreg(reg[0]), addr[0]);
5419 break;
5420 case NEON_ST1_4v:
5421 case NEON_ST1_4v_post:
5422 st1(vf, vreg(reg[3]), addr[3]);
5423 count++;
5424 [[fallthrough]];
5425 case NEON_ST1_3v:
5426 case NEON_ST1_3v_post:
5427 st1(vf, vreg(reg[2]), addr[2]);
5428 count++;
5429 [[fallthrough]];
5430 case NEON_ST1_2v:
5431 case NEON_ST1_2v_post:
5432 st1(vf, vreg(reg[1]), addr[1]);
5433 count++;
5434 [[fallthrough]];
5435 case NEON_ST1_1v:
5436 case NEON_ST1_1v_post:
5437 st1(vf, vreg(reg[0]), addr[0]);
5438 log_read = false;
5439 break;
5440 case NEON_LD2_post:
5441 case NEON_LD2:
5442 ld2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]);
5443 count = 2;
5444 break;
5445 case NEON_ST2:
5446 case NEON_ST2_post:
5447 st2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]);
5448 count = 2;
5449 log_read = false;
5450 break;
5451 case NEON_LD3_post:
5452 case NEON_LD3:
5453 ld3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]);
5454 count = 3;
5455 break;
5456 case NEON_ST3:
5457 case NEON_ST3_post:
5458 st3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]);
5459 count = 3;
5460 log_read = false;
5461 break;
5462 case NEON_LD4_post:
5463 case NEON_LD4:
5464 ld4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]), addr[0]);
5465 count = 4;
5466 break;
5467 case NEON_ST4:
5468 case NEON_ST4_post:
5469 st4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]), addr[0]);
5470 count = 4;
5471 log_read = false;
5472 break;
5473 default:
5474 UNIMPLEMENTED();
5475 }
5476
5477 {
5478 GlobalMonitor::SimulatorMutex lock_guard(global_monitor_);
5479 if (log_read) {
5480 local_monitor_.NotifyLoad();
5481 } else {
5482 local_monitor_.NotifyStore();
5483 global_monitor_->NotifyStore_Locked(&global_monitor_processor_);
5484 }
5485 }
5486
5487 // Explicitly log the register update whilst we have type information.
5488 for (int i = 0; i < count; i++) {
5489 // For de-interleaving loads, only print the base address.
5490 int lane_size = LaneSizeInBytesFromFormat(vf);
5491 PrintRegisterFormat format = GetPrintRegisterFormatTryFP(
5492 GetPrintRegisterFormatForSize(reg_size, lane_size));
5493 if (log_read) {
5494 LogVRead(addr_base, reg[i], format);
5495 } else {
5496 LogVWrite(addr_base, reg[i], format);
5497 }
5498 }
5499
5500 if (addr_mode == PostIndex) {
5501 int rm = instr->Rm();
5502 // The immediate post index addressing mode is indicated by rm = 31.
5503 // The immediate is implied by the number of vector registers used.
5504 addr_base +=
5505 (rm == 31) ? RegisterSizeInBytesFromFormat(vf) * count : xreg(rm);
5506 set_xreg(instr->Rn(), addr_base);
5507 } else {
5508 DCHECK_EQ(addr_mode, Offset);
5509 }
5510}
5511
5512void Simulator::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
5513 NEONLoadStoreMultiStructHelper(instr, Offset);
5514}
5515
5516void Simulator::VisitNEONLoadStoreMultiStructPostIndex(Instruction* instr) {
5517 NEONLoadStoreMultiStructHelper(instr, PostIndex);
5518}
5519
5520void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
5521 AddrMode addr_mode) {
5522 uint64_t addr = xreg(instr->Rn(), Reg31IsStackPointer);
5523 int rt = instr->Rt();
5524
5525 // Bit 23 determines whether this is an offset or post-index addressing mode.
5526 // In offset mode, bits 20 to 16 should be zero; these bits encode the
5527 // register of immediate in post-index mode.
5528 DCHECK_IMPLIES(instr->Bit(23) == 0, instr->Bits(20, 16) == 0);
5529
5530 bool do_load = false;
5531
5532 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
5533 VectorFormat vf_t = nfd.GetVectorFormat();
5534
5536 // We use the PostIndex mask here, as it works in this case for both Offset
5537 // and PostIndex addressing.
5538 switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
5539 case NEON_LD1_b:
5540 case NEON_LD1_b_post:
5541 case NEON_LD2_b:
5542 case NEON_LD2_b_post:
5543 case NEON_LD3_b:
5544 case NEON_LD3_b_post:
5545 case NEON_LD4_b:
5546 case NEON_LD4_b_post:
5547 do_load = true;
5548 [[fallthrough]];
5549 case NEON_ST1_b:
5550 case NEON_ST1_b_post:
5551 case NEON_ST2_b:
5552 case NEON_ST2_b_post:
5553 case NEON_ST3_b:
5554 case NEON_ST3_b_post:
5555 case NEON_ST4_b:
5556 case NEON_ST4_b_post:
5557 break;
5558
5559 case NEON_LD1_h:
5560 case NEON_LD1_h_post:
5561 case NEON_LD2_h:
5562 case NEON_LD2_h_post:
5563 case NEON_LD3_h:
5564 case NEON_LD3_h_post:
5565 case NEON_LD4_h:
5566 case NEON_LD4_h_post:
5567 do_load = true;
5568 [[fallthrough]];
5569 case NEON_ST1_h:
5570 case NEON_ST1_h_post:
5571 case NEON_ST2_h:
5572 case NEON_ST2_h_post:
5573 case NEON_ST3_h:
5574 case NEON_ST3_h_post:
5575 case NEON_ST4_h:
5576 case NEON_ST4_h_post:
5577 vf = kFormat8H;
5578 break;
5579
5580 case NEON_LD1_s:
5581 case NEON_LD1_s_post:
5582 case NEON_LD2_s:
5583 case NEON_LD2_s_post:
5584 case NEON_LD3_s:
5585 case NEON_LD3_s_post:
5586 case NEON_LD4_s:
5587 case NEON_LD4_s_post:
5588 do_load = true;
5589 [[fallthrough]];
5590 case NEON_ST1_s:
5591 case NEON_ST1_s_post:
5592 case NEON_ST2_s:
5593 case NEON_ST2_s_post:
5594 case NEON_ST3_s:
5595 case NEON_ST3_s_post:
5596 case NEON_ST4_s:
5597 case NEON_ST4_s_post: {
5598 static_assert((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d,
5599 "LSB of size distinguishes S and D registers.");
5600 static_assert(
5601 (NEON_LD1_s_post | (1 << NEONLSSize_offset)) == NEON_LD1_d_post,
5602 "LSB of size distinguishes S and D registers.");
5603 static_assert((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d,
5604 "LSB of size distinguishes S and D registers.");
5605 static_assert(
5606 (NEON_ST1_s_post | (1 << NEONLSSize_offset)) == NEON_ST1_d_post,
5607 "LSB of size distinguishes S and D registers.");
5608 vf = ((instr->NEONLSSize() & 1) == 0) ? kFormat4S : kFormat2D;
5609 break;
5610 }
5611
5612 case NEON_LD1R:
5613 case NEON_LD1R_post: {
5614 vf = vf_t;
5615 if (!ProbeMemory(addr, LaneSizeInBytesFromFormat(vf))) return;
5616 ld1r(vf, vreg(rt), addr);
5617 do_load = true;
5618 break;
5619 }
5620
5621 case NEON_LD2R:
5622 case NEON_LD2R_post: {
5623 vf = vf_t;
5624 if (!ProbeMemory(addr, 2 * LaneSizeInBytesFromFormat(vf))) return;
5625 int rt2 = (rt + 1) % kNumberOfVRegisters;
5626 ld2r(vf, vreg(rt), vreg(rt2), addr);
5627 do_load = true;
5628 break;
5629 }
5630
5631 case NEON_LD3R:
5632 case NEON_LD3R_post: {
5633 vf = vf_t;
5634 if (!ProbeMemory(addr, 3 * LaneSizeInBytesFromFormat(vf))) return;
5635 int rt2 = (rt + 1) % kNumberOfVRegisters;
5636 int rt3 = (rt2 + 1) % kNumberOfVRegisters;
5637 ld3r(vf, vreg(rt), vreg(rt2), vreg(rt3), addr);
5638 do_load = true;
5639 break;
5640 }
5641
5642 case NEON_LD4R:
5643 case NEON_LD4R_post: {
5644 vf = vf_t;
5645 if (!ProbeMemory(addr, 4 * LaneSizeInBytesFromFormat(vf))) return;
5646 int rt2 = (rt + 1) % kNumberOfVRegisters;
5647 int rt3 = (rt2 + 1) % kNumberOfVRegisters;
5648 int rt4 = (rt3 + 1) % kNumberOfVRegisters;
5649 ld4r(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), addr);
5650 do_load = true;
5651 break;
5652 }
5653 default:
5654 UNIMPLEMENTED();
5655 }
5656
5657 PrintRegisterFormat print_format =
5658 GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf));
5659 // Make sure that the print_format only includes a single lane.
5660 print_format =
5661 static_cast<PrintRegisterFormat>(print_format & ~kPrintRegAsVectorMask);
5662
5663 int esize = LaneSizeInBytesFromFormat(vf);
5664 int index_shift = LaneSizeInBytesLog2FromFormat(vf);
5665 int lane = instr->NEONLSIndex(index_shift);
5666 int scale = 0;
5667 int rt2 = (rt + 1) % kNumberOfVRegisters;
5668 int rt3 = (rt2 + 1) % kNumberOfVRegisters;
5669 int rt4 = (rt3 + 1) % kNumberOfVRegisters;
5670 switch (instr->Mask(NEONLoadStoreSingleLenMask)) {
5672 scale = 1;
5673 if (!ProbeMemory(addr, scale * esize)) return;
5674 if (do_load) {
5675 ld1(vf, vreg(rt), lane, addr);
5676 LogVRead(addr, rt, print_format, lane);
5677 } else {
5678 st1(vf, vreg(rt), lane, addr);
5679 LogVWrite(addr, rt, print_format, lane);
5680 }
5681 break;
5683 scale = 2;
5684 if (!ProbeMemory(addr, scale * esize)) return;
5685 if (do_load) {
5686 ld2(vf, vreg(rt), vreg(rt2), lane, addr);
5687 LogVRead(addr, rt, print_format, lane);
5688 LogVRead(addr + esize, rt2, print_format, lane);
5689 } else {
5690 st2(vf, vreg(rt), vreg(rt2), lane, addr);
5691 LogVWrite(addr, rt, print_format, lane);
5692 LogVWrite(addr + esize, rt2, print_format, lane);
5693 }
5694 break;
5696 scale = 3;
5697 if (!ProbeMemory(addr, scale * esize)) return;
5698 if (do_load) {
5699 ld3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr);
5700 LogVRead(addr, rt, print_format, lane);
5701 LogVRead(addr + esize, rt2, print_format, lane);
5702 LogVRead(addr + (2 * esize), rt3, print_format, lane);
5703 } else {
5704 st3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr);
5705 LogVWrite(addr, rt, print_format, lane);
5706 LogVWrite(addr + esize, rt2, print_format, lane);
5707 LogVWrite(addr + (2 * esize), rt3, print_format, lane);
5708 }
5709 break;
5711 scale = 4;
5712 if (!ProbeMemory(addr, scale * esize)) return;
5713 if (do_load) {
5714 ld4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr);
5715 LogVRead(addr, rt, print_format, lane);
5716 LogVRead(addr + esize, rt2, print_format, lane);
5717 LogVRead(addr + (2 * esize), rt3, print_format, lane);
5718 LogVRead(addr + (3 * esize), rt4, print_format, lane);
5719 } else {
5720 st4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr);
5721 LogVWrite(addr, rt, print_format, lane);
5722 LogVWrite(addr + esize, rt2, print_format, lane);
5723 LogVWrite(addr + (2 * esize), rt3, print_format, lane);
5724 LogVWrite(addr + (3 * esize), rt4, print_format, lane);
5725 }
5726 break;
5727 default:
5728 UNIMPLEMENTED();
5729 }
5730
5731 {
5732 GlobalMonitor::SimulatorMutex lock_guard(global_monitor_);
5733 if (do_load) {
5734 local_monitor_.NotifyLoad();
5735 } else {
5736 local_monitor_.NotifyStore();
5737 global_monitor_->NotifyStore_Locked(&global_monitor_processor_);
5738 }
5739 }
5740
5741 if (addr_mode == PostIndex) {
5742 int rm = instr->Rm();
5743 int lane_size = LaneSizeInBytesFromFormat(vf);
5744 set_xreg(instr->Rn(), addr + ((rm == 31) ? (scale * lane_size) : xreg(rm)));
5745 }
5746}
5747
5748void Simulator::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
5749 NEONLoadStoreSingleStructHelper(instr, Offset);
5750}
5751
5752void Simulator::VisitNEONLoadStoreSingleStructPostIndex(Instruction* instr) {
5753 NEONLoadStoreSingleStructHelper(instr, PostIndex);
5754}
5755
5756void Simulator::VisitNEONModifiedImmediate(Instruction* instr) {
5757 SimVRegister& rd = vreg(instr->Rd());
5758 int cmode = instr->NEONCmode();
5759 int cmode_3_1 = (cmode >> 1) & 7;
5760 int cmode_3 = (cmode >> 3) & 1;
5761 int cmode_2 = (cmode >> 2) & 1;
5762 int cmode_1 = (cmode >> 1) & 1;
5763 int cmode_0 = cmode & 1;
5764 int q = instr->NEONQ();
5765 int op_bit = instr->NEONModImmOp();
5766 uint64_t imm8 = instr->ImmNEONabcdefgh();
5767
5768 // Find the format and immediate value
5769 uint64_t imm = 0;
5771 switch (cmode_3_1) {
5772 case 0x0:
5773 case 0x1:
5774 case 0x2:
5775 case 0x3:
5776 vform = (q == 1) ? kFormat4S : kFormat2S;
5777 imm = imm8 << (8 * cmode_3_1);
5778 break;
5779 case 0x4:
5780 case 0x5:
5781 vform = (q == 1) ? kFormat8H : kFormat4H;
5782 imm = imm8 << (8 * cmode_1);
5783 break;
5784 case 0x6:
5785 vform = (q == 1) ? kFormat4S : kFormat2S;
5786 if (cmode_0 == 0) {
5787 imm = imm8 << 8 | 0x000000FF;
5788 } else {
5789 imm = imm8 << 16 | 0x0000FFFF;
5790 }
5791 break;
5792 case 0x7:
5793 if (cmode_0 == 0 && op_bit == 0) {
5794 vform = q ? kFormat16B : kFormat8B;
5795 imm = imm8;
5796 } else if (cmode_0 == 0 && op_bit == 1) {
5797 vform = q ? kFormat2D : kFormat1D;
5798 imm = 0;
5799 for (int i = 0; i < 8; ++i) {
5800 if (imm8 & (1ULL << i)) {
5801 imm |= (UINT64_C(0xFF) << (8 * i));
5802 }
5803 }
5804 } else { // cmode_0 == 1, cmode == 0xF.
5805 if (op_bit == 0) {
5806 vform = q ? kFormat4S : kFormat2S;
5807 imm = base::bit_cast<uint32_t>(instr->ImmNEONFP32());
5808 } else if (q == 1) {
5809 vform = kFormat2D;
5810 imm = base::bit_cast<uint64_t>(instr->ImmNEONFP64());
5811 } else {
5812 DCHECK((q == 0) && (op_bit == 1) && (cmode == 0xF));
5813 VisitUnallocated(instr);
5814 }
5815 }
5816 break;
5817 default:
5818 UNREACHABLE();
5819 }
5820
5821 // Find the operation.
5823 if (cmode_3 == 0) {
5824 if (cmode_0 == 0) {
5826 } else { // cmode<0> == '1'
5828 }
5829 } else { // cmode<3> == '1'
5830 if (cmode_2 == 0) {
5831 if (cmode_0 == 0) {
5833 } else { // cmode<0> == '1'
5835 }
5836 } else { // cmode<2> == '1'
5837 if (cmode_1 == 0) {
5839 } else { // cmode<1> == '1'
5840 if (cmode_0 == 0) {
5842 } else { // cmode<0> == '1'
5844 }
5845 }
5846 }
5847 }
5848
5849 // Call the logic function.
5850 switch (op) {
5852 orr(vform, rd, rd, imm);
5853 break;
5855 bic(vform, rd, rd, imm);
5856 break;
5858 movi(vform, rd, imm);
5859 break;
5861 mvni(vform, rd, imm);
5862 break;
5863 default:
5864 VisitUnimplemented(instr);
5865 }
5866}
5867
5868void Simulator::VisitNEONScalar2RegMisc(Instruction* instr) {
5869 NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
5870 VectorFormat vf = nfd.GetVectorFormat();
5871
5872 SimVRegister& rd = vreg(instr->Rd());
5873 SimVRegister& rn = vreg(instr->Rn());
5874
5875 if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) {
5876 // These instructions all use a two bit size field, except NOT and RBIT,
5877 // which use the field to encode the operation.
5878 switch (instr->Mask(NEONScalar2RegMiscMask)) {
5880 cmp(vf, rd, rn, 0, eq);
5881 break;
5883 cmp(vf, rd, rn, 0, ge);
5884 break;
5886 cmp(vf, rd, rn, 0, gt);
5887 break;
5889 cmp(vf, rd, rn, 0, lt);
5890 break;
5892 cmp(vf, rd, rn, 0, le);
5893 break;
5894 case NEON_ABS_scalar:
5895 abs(vf, rd, rn);
5896 break;
5897 case NEON_SQABS_scalar:
5898 abs(vf, rd, rn).SignedSaturate(vf);
5899 break;
5900 case NEON_NEG_scalar:
5901 neg(vf, rd, rn);
5902 break;
5903 case NEON_SQNEG_scalar:
5904 neg(vf, rd, rn).SignedSaturate(vf);
5905 break;
5906 case NEON_SUQADD_scalar:
5907 suqadd(vf, rd, rn);
5908 break;
5909 case NEON_USQADD_scalar:
5910 usqadd(vf, rd, rn);
5911 break;
5912 default:
5913 UNIMPLEMENTED();
5914 }
5915 } else {
5916 VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
5917 FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
5918
5919 // These instructions all use a one bit size field, except SQXTUN, SQXTN
5920 // and UQXTN, which use a two bit size field.
5921 switch (instr->Mask(NEONScalar2RegMiscFPMask)) {
5922 case NEON_FRECPE_scalar:
5923 frecpe(fpf, rd, rn, fpcr_rounding);
5924 break;
5925 case NEON_FRECPX_scalar:
5926 frecpx(fpf, rd, rn);
5927 break;
5929 frsqrte(fpf, rd, rn);
5930 break;
5932 fcmp_zero(fpf, rd, rn, gt);
5933 break;
5935 fcmp_zero(fpf, rd, rn, ge);
5936 break;
5938 fcmp_zero(fpf, rd, rn, eq);
5939 break;
5941 fcmp_zero(fpf, rd, rn, le);
5942 break;
5944 fcmp_zero(fpf, rd, rn, lt);
5945 break;
5946 case NEON_SCVTF_scalar:
5947 scvtf(fpf, rd, rn, 0, fpcr_rounding);
5948 break;
5949 case NEON_UCVTF_scalar:
5950 ucvtf(fpf, rd, rn, 0, fpcr_rounding);
5951 break;
5952 case NEON_FCVTNS_scalar:
5953 fcvts(fpf, rd, rn, FPTieEven);
5954 break;
5955 case NEON_FCVTNU_scalar:
5956 fcvtu(fpf, rd, rn, FPTieEven);
5957 break;
5958 case NEON_FCVTPS_scalar:
5959 fcvts(fpf, rd, rn, FPPositiveInfinity);
5960 break;
5961 case NEON_FCVTPU_scalar:
5962 fcvtu(fpf, rd, rn, FPPositiveInfinity);
5963 break;
5964 case NEON_FCVTMS_scalar:
5965 fcvts(fpf, rd, rn, FPNegativeInfinity);
5966 break;
5967 case NEON_FCVTMU_scalar:
5968 fcvtu(fpf, rd, rn, FPNegativeInfinity);
5969 break;
5970 case NEON_FCVTZS_scalar:
5971 fcvts(fpf, rd, rn, FPZero);
5972 break;
5973 case NEON_FCVTZU_scalar:
5974 fcvtu(fpf, rd, rn, FPZero);
5975 break;
5976 case NEON_FCVTAS_scalar:
5977 fcvts(fpf, rd, rn, FPTieAway);
5978 break;
5979 case NEON_FCVTAU_scalar:
5980 fcvtu(fpf, rd, rn, FPTieAway);
5981 break;
5982 case NEON_FCVTXN_scalar:
5983 // Unlike all of the other FP instructions above, fcvtxn encodes dest
5984 // size S as size<0>=1. There's only one case, so we ignore the form.
5985 DCHECK_EQ(instr->Bit(22), 1);
5986 fcvtxn(kFormatS, rd, rn);
5987 break;
5988 default:
5989 switch (instr->Mask(NEONScalar2RegMiscMask)) {
5990 case NEON_SQXTN_scalar:
5991 sqxtn(vf, rd, rn);
5992 break;
5993 case NEON_UQXTN_scalar:
5994 uqxtn(vf, rd, rn);
5995 break;
5996 case NEON_SQXTUN_scalar:
5997 sqxtun(vf, rd, rn);
5998 break;
5999 default:
6000 UNIMPLEMENTED();
6001 }
6002 }
6003 }
6004}
6005
6006void Simulator::VisitNEONScalar3Diff(Instruction* instr) {
6007 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
6008 VectorFormat vf = nfd.GetVectorFormat();
6009
6010 SimVRegister& rd = vreg(instr->Rd());
6011 SimVRegister& rn = vreg(instr->Rn());
6012 SimVRegister& rm = vreg(instr->Rm());
6013 switch (instr->Mask(NEONScalar3DiffMask)) {
6015 sqdmlal(vf, rd, rn, rm);
6016 break;
6018 sqdmlsl(vf, rd, rn, rm);
6019 break;
6021 sqdmull(vf, rd, rn, rm);
6022 break;
6023 default:
6024 UNIMPLEMENTED();
6025 }
6026}
6027
6028void Simulator::VisitNEONScalar3Same(Instruction* instr) {
6029 NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
6030 VectorFormat vf = nfd.GetVectorFormat();
6031
6032 SimVRegister& rd = vreg(instr->Rd());
6033 SimVRegister& rn = vreg(instr->Rn());
6034 SimVRegister& rm = vreg(instr->Rm());
6035
6036 if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) {
6037 vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
6038 switch (instr->Mask(NEONScalar3SameFPMask)) {
6039 case NEON_FMULX_scalar:
6040 fmulx(vf, rd, rn, rm);
6041 break;
6042 case NEON_FACGE_scalar:
6043 fabscmp(vf, rd, rn, rm, ge);
6044 break;
6045 case NEON_FACGT_scalar:
6046 fabscmp(vf, rd, rn, rm, gt);
6047 break;
6048 case NEON_FCMEQ_scalar:
6049 fcmp(vf, rd, rn, rm, eq);
6050 break;
6051 case NEON_FCMGE_scalar:
6052 fcmp(vf, rd, rn, rm, ge);
6053 break;
6054 case NEON_FCMGT_scalar:
6055 fcmp(vf, rd, rn, rm, gt);
6056 break;
6057 case NEON_FRECPS_scalar:
6058 frecps(vf, rd, rn, rm);
6059 break;
6061 frsqrts(vf, rd, rn, rm);
6062 break;
6063 case NEON_FABD_scalar:
6064 fabd(vf, rd, rn, rm);
6065 break;
6066 default:
6067 UNIMPLEMENTED();
6068 }
6069 } else {
6070 switch (instr->Mask(NEONScalar3SameMask)) {
6071 case NEON_ADD_scalar:
6072 add(vf, rd, rn, rm);
6073 break;
6074 case NEON_SUB_scalar:
6075 sub(vf, rd, rn, rm);
6076 break;
6077 case NEON_CMEQ_scalar:
6078 cmp(vf, rd, rn, rm, eq);
6079 break;
6080 case NEON_CMGE_scalar:
6081 cmp(vf, rd, rn, rm, ge);
6082 break;
6083 case NEON_CMGT_scalar:
6084 cmp(vf, rd, rn, rm, gt);
6085 break;
6086 case NEON_CMHI_scalar:
6087 cmp(vf, rd, rn, rm, hi);
6088 break;
6089 case NEON_CMHS_scalar:
6090 cmp(vf, rd, rn, rm, hs);
6091 break;
6092 case NEON_CMTST_scalar:
6093 cmptst(vf, rd, rn, rm);
6094 break;
6095 case NEON_USHL_scalar:
6096 ushl(vf, rd, rn, rm);
6097 break;
6098 case NEON_SSHL_scalar:
6099 sshl(vf, rd, rn, rm);
6100 break;
6102 sqdmulh(vf, rd, rn, rm);
6103 break;
6105 sqrdmulh(vf, rd, rn, rm);
6106 break;
6107 case NEON_UQADD_scalar:
6108 add(vf, rd, rn, rm).UnsignedSaturate(vf);
6109 break;
6110 case NEON_SQADD_scalar:
6111 add(vf, rd, rn, rm).SignedSaturate(vf);
6112 break;
6113 case NEON_UQSUB_scalar:
6114 sub(vf, rd, rn, rm).UnsignedSaturate(vf);
6115 break;
6116 case NEON_SQSUB_scalar:
6117 sub(vf, rd, rn, rm).SignedSaturate(vf);
6118 break;
6119 case NEON_UQSHL_scalar:
6120 ushl(vf, rd, rn, rm).UnsignedSaturate(vf);
6121 break;
6122 case NEON_SQSHL_scalar:
6123 sshl(vf, rd, rn, rm).SignedSaturate(vf);
6124 break;
6125 case NEON_URSHL_scalar:
6126 ushl(vf, rd, rn, rm).Round(vf);
6127 break;
6128 case NEON_SRSHL_scalar:
6129 sshl(vf, rd, rn, rm).Round(vf);
6130 break;
6131 case NEON_UQRSHL_scalar:
6132 ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
6133 break;
6134 case NEON_SQRSHL_scalar:
6135 sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
6136 break;
6137 default:
6138 UNIMPLEMENTED();
6139 }
6140 }
6141}
6142
6143void Simulator::VisitNEONScalarByIndexedElement(Instruction* instr) {
6144 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
6145 VectorFormat vf = nfd.GetVectorFormat();
6146 VectorFormat vf_r = nfd.GetVectorFormat(nfd.ScalarFormatMap());
6147
6148 SimVRegister& rd = vreg(instr->Rd());
6149 SimVRegister& rn = vreg(instr->Rn());
6150 ByElementOp Op = nullptr;
6151
6152 int rm_reg = instr->Rm();
6153 int index = (instr->NEONH() << 1) | instr->NEONL();
6154 if (instr->NEONSize() == 1) {
6155 rm_reg &= 0xF;
6156 index = (index << 1) | instr->NEONM();
6157 }
6158
6159 switch (instr->Mask(NEONScalarByIndexedElementMask)) {
6161 Op = &Simulator::sqdmull;
6162 break;
6164 Op = &Simulator::sqdmlal;
6165 break;
6167 Op = &Simulator::sqdmlsl;
6168 break;
6170 Op = &Simulator::sqdmulh;
6171 vf = vf_r;
6172 break;
6174 Op = &Simulator::sqrdmulh;
6175 vf = vf_r;
6176 break;
6177 default:
6178 vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
6179 index = instr->NEONH();
6180 if ((instr->FPType() & 1) == 0) {
6181 index = (index << 1) | instr->NEONL();
6182 }
6183 switch (instr->Mask(NEONScalarByIndexedElementFPMask)) {
6185 Op = &Simulator::fmul;
6186 break;
6188 Op = &Simulator::fmla;
6189 break;
6191 Op = &Simulator::fmls;
6192 break;
6194 Op = &Simulator::fmulx;
6195 break;
6196 default:
6197 UNIMPLEMENTED();
6198 }
6199 }
6200
6201 (this->*Op)(vf, rd, rn, vreg(rm_reg), index);
6202}
6203
6204void Simulator::VisitNEONScalarCopy(Instruction* instr) {
6205 NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap());
6206 VectorFormat vf = nfd.GetVectorFormat();
6207
6208 SimVRegister& rd = vreg(instr->Rd());
6209 SimVRegister& rn = vreg(instr->Rn());
6210
6211 if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) {
6212 int imm5 = instr->ImmNEON5();
6213 int lsb = LowestSetBitPosition(imm5);
6214 int rn_index = imm5 >> lsb;
6215 dup_element(vf, rd, rn, rn_index);
6216 } else {
6217 UNIMPLEMENTED();
6218 }
6219}
6220
6221void Simulator::VisitNEONScalarPairwise(Instruction* instr) {
6222 NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap());
6223 VectorFormat vf = nfd.GetVectorFormat();
6224
6225 SimVRegister& rd = vreg(instr->Rd());
6226 SimVRegister& rn = vreg(instr->Rn());
6227 switch (instr->Mask(NEONScalarPairwiseMask)) {
6228 case NEON_ADDP_scalar:
6229 addp(vf, rd, rn);
6230 break;
6231 case NEON_FADDP_scalar:
6232 faddp(vf, rd, rn);
6233 break;
6234 case NEON_FMAXP_scalar:
6235 fmaxp(vf, rd, rn);
6236 break;
6238 fmaxnmp(vf, rd, rn);
6239 break;
6240 case NEON_FMINP_scalar:
6241 fminp(vf, rd, rn);
6242 break;
6244 fminnmp(vf, rd, rn);
6245 break;
6246 default:
6247 UNIMPLEMENTED();
6248 }
6249}
6250
6251void Simulator::VisitNEONScalarShiftImmediate(Instruction* instr) {
6252 SimVRegister& rd = vreg(instr->Rd());
6253 SimVRegister& rn = vreg(instr->Rn());
6254 FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
6255
6256 static const NEONFormatMap map = {
6257 {22, 21, 20, 19},
6259 NF_D, NF_D, NF_D, NF_D, NF_D}};
6260 NEONFormatDecoder nfd(instr, &map);
6261 VectorFormat vf = nfd.GetVectorFormat();
6262
6263 int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh());
6264 int immhimmb = instr->ImmNEONImmhImmb();
6265 int right_shift = (16 << highestSetBit) - immhimmb;
6266 int left_shift = immhimmb - (8 << highestSetBit);
6267 switch (instr->Mask(NEONScalarShiftImmediateMask)) {
6268 case NEON_SHL_scalar:
6269 shl(vf, rd, rn, left_shift);
6270 break;
6271 case NEON_SLI_scalar:
6272 sli(vf, rd, rn, left_shift);
6273 break;
6275 sqshl(vf, rd, rn, left_shift);
6276 break;
6278 uqshl(vf, rd, rn, left_shift);
6279 break;
6280 case NEON_SQSHLU_scalar:
6281 sqshlu(vf, rd, rn, left_shift);
6282 break;
6283 case NEON_SRI_scalar:
6284 sri(vf, rd, rn, right_shift);
6285 break;
6286 case NEON_SSHR_scalar:
6287 sshr(vf, rd, rn, right_shift);
6288 break;
6289 case NEON_USHR_scalar:
6290 ushr(vf, rd, rn, right_shift);
6291 break;
6292 case NEON_SRSHR_scalar:
6293 sshr(vf, rd, rn, right_shift).Round(vf);
6294 break;
6295 case NEON_URSHR_scalar:
6296 ushr(vf, rd, rn, right_shift).Round(vf);
6297 break;
6298 case NEON_SSRA_scalar:
6299 ssra(vf, rd, rn, right_shift);
6300 break;
6301 case NEON_USRA_scalar:
6302 usra(vf, rd, rn, right_shift);
6303 break;
6304 case NEON_SRSRA_scalar:
6305 srsra(vf, rd, rn, right_shift);
6306 break;
6307 case NEON_URSRA_scalar:
6308 ursra(vf, rd, rn, right_shift);
6309 break;
6310 case NEON_UQSHRN_scalar:
6311 uqshrn(vf, rd, rn, right_shift);
6312 break;
6314 uqrshrn(vf, rd, rn, right_shift);
6315 break;
6316 case NEON_SQSHRN_scalar:
6317 sqshrn(vf, rd, rn, right_shift);
6318 break;
6320 sqrshrn(vf, rd, rn, right_shift);
6321 break;
6323 sqshrun(vf, rd, rn, right_shift);
6324 break;
6326 sqrshrun(vf, rd, rn, right_shift);
6327 break;
6329 fcvts(vf, rd, rn, FPZero, right_shift);
6330 break;
6332 fcvtu(vf, rd, rn, FPZero, right_shift);
6333 break;
6335 scvtf(vf, rd, rn, right_shift, fpcr_rounding);
6336 break;
6338 ucvtf(vf, rd, rn, right_shift, fpcr_rounding);
6339 break;
6340 default:
6341 UNIMPLEMENTED();
6342 }
6343}
6344
6345void Simulator::VisitNEONShiftImmediate(Instruction* instr) {
6346 SimVRegister& rd = vreg(instr->Rd());
6347 SimVRegister& rn = vreg(instr->Rn());
6348 FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
6349
6350 // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H,
6351 // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined.
6352 static const NEONFormatMap map = {
6353 {22, 21, 20, 19, 30},
6358 NEONFormatDecoder nfd(instr, &map);
6359 VectorFormat vf = nfd.GetVectorFormat();
6360
6361 // 0001->8H, 001x->4S, 01xx->2D, all others undefined.
6362 static const NEONFormatMap map_l = {
6363 {22, 21, 20, 19},
6365 VectorFormat vf_l = nfd.GetVectorFormat(&map_l);
6366
6367 int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh());
6368 int immhimmb = instr->ImmNEONImmhImmb();
6369 int right_shift = (16 << highestSetBit) - immhimmb;
6370 int left_shift = immhimmb - (8 << highestSetBit);
6371
6372 switch (instr->Mask(NEONShiftImmediateMask)) {
6373 case NEON_SHL:
6374 shl(vf, rd, rn, left_shift);
6375 break;
6376 case NEON_SLI:
6377 sli(vf, rd, rn, left_shift);
6378 break;
6379 case NEON_SQSHLU:
6380 sqshlu(vf, rd, rn, left_shift);
6381 break;
6382 case NEON_SRI:
6383 sri(vf, rd, rn, right_shift);
6384 break;
6385 case NEON_SSHR:
6386 sshr(vf, rd, rn, right_shift);
6387 break;
6388 case NEON_USHR:
6389 ushr(vf, rd, rn, right_shift);
6390 break;
6391 case NEON_SRSHR:
6392 sshr(vf, rd, rn, right_shift).Round(vf);
6393 break;
6394 case NEON_URSHR:
6395 ushr(vf, rd, rn, right_shift).Round(vf);
6396 break;
6397 case NEON_SSRA:
6398 ssra(vf, rd, rn, right_shift);
6399 break;
6400 case NEON_USRA:
6401 usra(vf, rd, rn, right_shift);
6402 break;
6403 case NEON_SRSRA:
6404 srsra(vf, rd, rn, right_shift);
6405 break;
6406 case NEON_URSRA:
6407 ursra(vf, rd, rn, right_shift);
6408 break;
6409 case NEON_SQSHL_imm:
6410 sqshl(vf, rd, rn, left_shift);
6411 break;
6412 case NEON_UQSHL_imm:
6413 uqshl(vf, rd, rn, left_shift);
6414 break;
6415 case NEON_SCVTF_imm:
6416 scvtf(vf, rd, rn, right_shift, fpcr_rounding);
6417 break;
6418 case NEON_UCVTF_imm:
6419 ucvtf(vf, rd, rn, right_shift, fpcr_rounding);
6420 break;
6421 case NEON_FCVTZS_imm:
6422 fcvts(vf, rd, rn, FPZero, right_shift);
6423 break;
6424 case NEON_FCVTZU_imm:
6425 fcvtu(vf, rd, rn, FPZero, right_shift);
6426 break;
6427 case NEON_SSHLL:
6428 vf = vf_l;
6429 if (instr->Mask(NEON_Q)) {
6430 sshll2(vf, rd, rn, left_shift);
6431 } else {
6432 sshll(vf, rd, rn, left_shift);
6433 }
6434 break;
6435 case NEON_USHLL:
6436 vf = vf_l;
6437 if (instr->Mask(NEON_Q)) {
6438 ushll2(vf, rd, rn, left_shift);
6439 } else {
6440 ushll(vf, rd, rn, left_shift);
6441 }
6442 break;
6443 case NEON_SHRN:
6444 if (instr->Mask(NEON_Q)) {
6445 shrn2(vf, rd, rn, right_shift);
6446 } else {
6447 shrn(vf, rd, rn, right_shift);
6448 }
6449 break;
6450 case NEON_RSHRN:
6451 if (instr->Mask(NEON_Q)) {
6452 rshrn2(vf, rd, rn, right_shift);
6453 } else {
6454 rshrn(vf, rd, rn, right_shift);
6455 }
6456 break;
6457 case NEON_UQSHRN:
6458 if (instr->Mask(NEON_Q)) {
6459 uqshrn2(vf, rd, rn, right_shift);
6460 } else {
6461 uqshrn(vf, rd, rn, right_shift);
6462 }
6463 break;
6464 case NEON_UQRSHRN:
6465 if (instr->Mask(NEON_Q)) {
6466 uqrshrn2(vf, rd, rn, right_shift);
6467 } else {
6468 uqrshrn(vf, rd, rn, right_shift);
6469 }
6470 break;
6471 case NEON_SQSHRN:
6472 if (instr->Mask(NEON_Q)) {
6473 sqshrn2(vf, rd, rn, right_shift);
6474 } else {
6475 sqshrn(vf, rd, rn, right_shift);
6476 }
6477 break;
6478 case NEON_SQRSHRN:
6479 if (instr->Mask(NEON_Q)) {
6480 sqrshrn2(vf, rd, rn, right_shift);
6481 } else {
6482 sqrshrn(vf, rd, rn, right_shift);
6483 }
6484 break;
6485 case NEON_SQSHRUN:
6486 if (instr->Mask(NEON_Q)) {
6487 sqshrun2(vf, rd, rn, right_shift);
6488 } else {
6489 sqshrun(vf, rd, rn, right_shift);
6490 }
6491 break;
6492 case NEON_SQRSHRUN:
6493 if (instr->Mask(NEON_Q)) {
6494 sqrshrun2(vf, rd, rn, right_shift);
6495 } else {
6496 sqrshrun(vf, rd, rn, right_shift);
6497 }
6498 break;
6499 default:
6500 UNIMPLEMENTED();
6501 }
6502}
6503
6504void Simulator::VisitNEONTable(Instruction* instr) {
6505 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
6506 VectorFormat vf = nfd.GetVectorFormat();
6507
6508 SimVRegister& rd = vreg(instr->Rd());
6509 SimVRegister& rn = vreg(instr->Rn());
6510 SimVRegister& rn2 = vreg((instr->Rn() + 1) % kNumberOfVRegisters);
6511 SimVRegister& rn3 = vreg((instr->Rn() + 2) % kNumberOfVRegisters);
6512 SimVRegister& rn4 = vreg((instr->Rn() + 3) % kNumberOfVRegisters);
6513 SimVRegister& rm = vreg(instr->Rm());
6514
6515 switch (instr->Mask(NEONTableMask)) {
6516 case NEON_TBL_1v:
6517 tbl(vf, rd, rn, rm);
6518 break;
6519 case NEON_TBL_2v:
6520 tbl(vf, rd, rn, rn2, rm);
6521 break;
6522 case NEON_TBL_3v:
6523 tbl(vf, rd, rn, rn2, rn3, rm);
6524 break;
6525 case NEON_TBL_4v:
6526 tbl(vf, rd, rn, rn2, rn3, rn4, rm);
6527 break;
6528 case NEON_TBX_1v:
6529 tbx(vf, rd, rn, rm);
6530 break;
6531 case NEON_TBX_2v:
6532 tbx(vf, rd, rn, rn2, rm);
6533 break;
6534 case NEON_TBX_3v:
6535 tbx(vf, rd, rn, rn2, rn3, rm);
6536 break;
6537 case NEON_TBX_4v:
6538 tbx(vf, rd, rn, rn2, rn3, rn4, rm);
6539 break;
6540 default:
6541 UNIMPLEMENTED();
6542 }
6543}
6544
6545void Simulator::VisitNEONSHA3(Instruction* instr) {
6546 NEONFormatDecoder nfd(instr);
6547 VectorFormat vf = nfd.GetVectorFormat();
6548
6549 SimVRegister& rd = vreg(instr->Rd());
6550 SimVRegister& rn = vreg(instr->Rn());
6551 SimVRegister& rm = vreg(instr->Rm());
6552 SimVRegister& ra = vreg(instr->Ra());
6553 SimVRegister temp;
6554
6555 switch (instr->Mask(NEONSHA3Mask)) {
6556 case NEON_BCAX:
6557 bic(vf, temp, rm, ra);
6558 eor(vf, rd, rn, temp);
6559 break;
6560 case NEON_EOR3:
6561 eor(vf, temp, rm, ra);
6562 eor(vf, rd, rn, temp);
6563 break;
6564 default:
6565 UNIMPLEMENTED();
6566 }
6567}
6568void Simulator::VisitNEONPerm(Instruction* instr) {
6569 NEONFormatDecoder nfd(instr);
6570 VectorFormat vf = nfd.GetVectorFormat();
6571
6572 SimVRegister& rd = vreg(instr->Rd());
6573 SimVRegister& rn = vreg(instr->Rn());
6574 SimVRegister& rm = vreg(instr->Rm());
6575
6576 switch (instr->Mask(NEONPermMask)) {
6577 case NEON_TRN1:
6578 trn1(vf, rd, rn, rm);
6579 break;
6580 case NEON_TRN2:
6581 trn2(vf, rd, rn, rm);
6582 break;
6583 case NEON_UZP1:
6584 uzp1(vf, rd, rn, rm);
6585 break;
6586 case NEON_UZP2:
6587 uzp2(vf, rd, rn, rm);
6588 break;
6589 case NEON_ZIP1:
6590 zip1(vf, rd, rn, rm);
6591 break;
6592 case NEON_ZIP2:
6593 zip2(vf, rd, rn, rm);
6594 break;
6595 default:
6596 UNIMPLEMENTED();
6597 }
6598}
6599
6600void Simulator::DoSwitchStackLimit(Instruction* instr) {
6601 const int64_t stack_limit = xreg(16);
6602 // stack_limit represents js limit and adjusted by extra runaway gap.
6603 // Also, stack switching code reads js_limit generated by
6604 // {Simulator::StackLimit} and then resets it back here.
6605 // So without adjusting back incoming value by safety gap
6606 // {stack_limit_} will be shortened by kAdditionalStackMargin yielding
6607 // positive feedback loop.
6608 SetStackLimit(stack_limit);
6609}
6610
6611void Simulator::DoPrintf(Instruction* instr) {
6612 DCHECK((instr->Mask(ExceptionMask) == HLT) &&
6613 (instr->ImmException() == kImmExceptionIsPrintf));
6614
6615 // Read the arguments encoded inline in the instruction stream.
6616 uint32_t arg_count;
6617 uint32_t arg_pattern_list;
6618 static_assert(sizeof(*instr) == 1);
6619 memcpy(&arg_count, instr + kPrintfArgCountOffset, sizeof(arg_count));
6620 memcpy(&arg_pattern_list, instr + kPrintfArgPatternListOffset,
6621 sizeof(arg_pattern_list));
6622
6623 DCHECK_LE(arg_count, kPrintfMaxArgCount);
6624 DCHECK_EQ(arg_pattern_list >> (kPrintfArgPatternBits * arg_count), 0);
6625
6626 // We need to call the host printf function with a set of arguments defined by
6627 // arg_pattern_list. Because we don't know the types and sizes of the
6628 // arguments, this is very difficult to do in a robust and portable way. To
6629 // work around the problem, we pick apart the format string, and print one
6630 // format placeholder at a time.
6631
6632 // Allocate space for the format string. We take a copy, so we can modify it.
6633 // Leave enough space for one extra character per expected argument (plus the
6634 // '\0' termination).
6635 const char* format_base = reg<const char*>(0);
6636 DCHECK_NOT_NULL(format_base);
6637 size_t length = strlen(format_base) + 1;
6638 char* const format = new char[length + arg_count];
6639
6640 // A list of chunks, each with exactly one format placeholder.
6641 const char* chunks[kPrintfMaxArgCount];
6642
6643 // Copy the format string and search for format placeholders.
6644 uint32_t placeholder_count = 0;
6645 char* format_scratch = format;
6646 for (size_t i = 0; i < length; i++) {
6647 if (format_base[i] != '%') {
6648 *format_scratch++ = format_base[i];
6649 } else {
6650 if (format_base[i + 1] == '%') {
6651 // Ignore explicit "%%" sequences.
6652 *format_scratch++ = format_base[i];
6653
6654 if (placeholder_count == 0) {
6655 // The first chunk is passed to printf using "%s", so we need to
6656 // unescape "%%" sequences in this chunk. (Just skip the next '%'.)
6657 i++;
6658 } else {
6659 // Otherwise, pass through "%%" unchanged.
6660 *format_scratch++ = format_base[++i];
6661 }
6662 } else {
6663 CHECK(placeholder_count < arg_count);
6664 // Insert '\0' before placeholders, and store their locations.
6665 *format_scratch++ = '\0';
6666 chunks[placeholder_count++] = format_scratch;
6667 *format_scratch++ = format_base[i];
6668 }
6669 }
6670 }
6671 DCHECK(format_scratch <= (format + length + arg_count));
6672 CHECK(placeholder_count == arg_count);
6673
6674 // Finally, call printf with each chunk, passing the appropriate register
6675 // argument. Normally, printf returns the number of bytes transmitted, so we
6676 // can emulate a single printf call by adding the result from each chunk. If
6677 // any call returns a negative (error) value, though, just return that value.
6678
6679 fprintf(stream_, "%s", clr_printf);
6680
6681 // Because '\0' is inserted before each placeholder, the first string in
6682 // 'format' contains no format placeholders and should be printed literally.
6683 int result = fprintf(stream_, "%s", format);
6684 int pcs_r = 1; // Start at x1. x0 holds the format string.
6685 int pcs_f = 0; // Start at d0.
6686 if (result >= 0) {
6687 for (uint32_t i = 0; i < placeholder_count; i++) {
6688 int part_result = -1;
6689
6690 uint32_t arg_pattern = arg_pattern_list >> (i * kPrintfArgPatternBits);
6691 arg_pattern &= (1 << kPrintfArgPatternBits) - 1;
6692 switch (arg_pattern) {
6693 case kPrintfArgW:
6694 part_result = fprintf(stream_, chunks[i], wreg(pcs_r++));
6695 break;
6696 case kPrintfArgX:
6697 part_result = fprintf(stream_, chunks[i], xreg(pcs_r++));
6698 break;
6699 case kPrintfArgD:
6700 part_result = fprintf(stream_, chunks[i], dreg(pcs_f++));
6701 break;
6702 default:
6703 UNREACHABLE();
6704 }
6705
6706 if (part_result < 0) {
6707 // Handle error values.
6708 result = part_result;
6709 break;
6710 }
6711
6712 result += part_result;
6713 }
6714 }
6715
6716 fprintf(stream_, "%s", clr_normal);
6717
6718#ifdef DEBUG
6719 CorruptAllCallerSavedCPURegisters();
6720#endif
6721
6722 // Printf returns its result in x0 (just like the C library's printf).
6723 set_xreg(0, result);
6724
6725 // The printf parameters are inlined in the code, so skip them.
6726 set_pc(instr->InstructionAtOffset(kPrintfLength));
6727
6728 // Set LR as if we'd just called a native printf function.
6729 set_lr(pc());
6730
6731 delete[] format;
6732}
6733
6734Simulator::LocalMonitor::LocalMonitor()
6735 : access_state_(MonitorAccess::Open),
6736 tagged_addr_(0),
6737 size_(TransactionSize::None) {}
6738
6739void Simulator::LocalMonitor::Clear() {
6740 access_state_ = MonitorAccess::Open;
6741 tagged_addr_ = 0;
6742 size_ = TransactionSize::None;
6743}
6744
6745void Simulator::LocalMonitor::NotifyLoad() {
6746 if (access_state_ == MonitorAccess::Exclusive) {
6747 // A non exclusive load could clear the local monitor. As a result, it's
6748 // most strict to unconditionally clear the local monitor on load.
6749 Clear();
6750 }
6751}
6752
6753void Simulator::LocalMonitor::NotifyLoadExcl(uintptr_t addr,
6754 TransactionSize size) {
6755 access_state_ = MonitorAccess::Exclusive;
6756 tagged_addr_ = addr;
6757 size_ = size;
6758}
6759
6760void Simulator::LocalMonitor::NotifyStore() {
6761 if (access_state_ == MonitorAccess::Exclusive) {
6762 // A non exclusive store could clear the local monitor. As a result, it's
6763 // most strict to unconditionally clear the local monitor on store.
6764 Clear();
6765 }
6766}
6767
6768bool Simulator::LocalMonitor::NotifyStoreExcl(uintptr_t addr,
6769 TransactionSize size) {
6770 if (access_state_ == MonitorAccess::Exclusive) {
6771 // It is allowed for a processor to require that the address matches
6772 // exactly (B2.10.1), so this comparison does not mask addr.
6773 if (addr == tagged_addr_ && size_ == size) {
6774 Clear();
6775 return true;
6776 } else {
6777 // It is implementation-defined whether an exclusive store to a
6778 // non-tagged address will update memory. As a result, it's most strict
6779 // to unconditionally clear the local monitor.
6780 Clear();
6781 return false;
6782 }
6783 } else {
6784 DCHECK(access_state_ == MonitorAccess::Open);
6785 return false;
6786 }
6787}
6788
6789Simulator::GlobalMonitor::Processor::Processor()
6790 : access_state_(MonitorAccess::Open),
6791 tagged_addr_(0),
6792 next_(nullptr),
6793 prev_(nullptr),
6794 failure_counter_(0) {}
6795
6796void Simulator::GlobalMonitor::Processor::Clear_Locked() {
6797 access_state_ = MonitorAccess::Open;
6798 tagged_addr_ = 0;
6799}
6800
6801void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked(
6802 uintptr_t addr) {
6803 access_state_ = MonitorAccess::Exclusive;
6804 tagged_addr_ = addr;
6805}
6806
6807void Simulator::GlobalMonitor::Processor::NotifyStore_Locked(
6808 bool is_requesting_processor) {
6809 if (access_state_ == MonitorAccess::Exclusive) {
6810 // A non exclusive store could clear the global monitor. As a result, it's
6811 // most strict to unconditionally clear global monitors on store.
6812 Clear_Locked();
6813 }
6814}
6815
6816bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked(
6817 uintptr_t addr, bool is_requesting_processor) {
6818 if (access_state_ == MonitorAccess::Exclusive) {
6819 if (is_requesting_processor) {
6820 // It is allowed for a processor to require that the address matches
6821 // exactly (B2.10.2), so this comparison does not mask addr.
6822 if (addr == tagged_addr_) {
6823 Clear_Locked();
6824 // Introduce occasional stxr failures. This is to simulate the
6825 // behavior of hardware, which can randomly fail due to background
6826 // cache evictions.
6827 if (failure_counter_++ >= kMaxFailureCounter) {
6828 failure_counter_ = 0;
6829 return false;
6830 } else {
6831 return true;
6832 }
6833 }
6834 } else if ((addr & kExclusiveTaggedAddrMask) ==
6835 (tagged_addr_ & kExclusiveTaggedAddrMask)) {
6836 // Check the masked addresses when responding to a successful lock by
6837 // another processor so the implementation is more conservative (i.e. the
6838 // granularity of locking is as large as possible.)
6839 Clear_Locked();
6840 return false;
6841 }
6842 }
6843 return false;
6844}
6845
6846void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(uintptr_t addr,
6847 Processor* processor) {
6848 processor->NotifyLoadExcl_Locked(addr);
6849}
6850
6851void Simulator::GlobalMonitor::NotifyStore_Locked(Processor* processor) {
6852 // Notify each processor of the store operation.
6853 for (Processor* iter = head_; iter; iter = iter->next_) {
6854 bool is_requesting_processor = iter == processor;
6855 iter->NotifyStore_Locked(is_requesting_processor);
6856 }
6857}
6858
6859bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(uintptr_t addr,
6860 Processor* processor) {
6861 if (processor->NotifyStoreExcl_Locked(addr, true)) {
6862 // Notify the other processors that this StoreExcl succeeded.
6863 for (Processor* iter = head_; iter; iter = iter->next_) {
6864 if (iter != processor) {
6865 iter->NotifyStoreExcl_Locked(addr, false);
6866 }
6867 }
6868 return true;
6869 } else {
6870 return false;
6871 }
6872}
6873
6874void Simulator::GlobalMonitor::PrependProcessor(Processor* processor) {
6875 base::MutexGuard lock_guard(&mutex_);
6876 if (head_) {
6877 head_->prev_ = processor;
6878 }
6879 processor->prev_ = nullptr;
6880 processor->next_ = head_;
6881 head_ = processor;
6882 num_processors_++;
6883}
6884
6885void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
6886 base::MutexGuard lock_guard(&mutex_);
6887 if (processor->prev_) {
6888 processor->prev_->next_ = processor->next_;
6889 } else {
6890 head_ = processor->next_;
6891 }
6892 if (processor->next_) {
6893 processor->next_->prev_ = processor->prev_;
6894 }
6895 processor->prev_ = nullptr;
6896 processor->next_ = nullptr;
6897 num_processors_--;
6898}
6899
6900#undef SScanF
6901#undef COLOUR
6902#undef COLOUR_BOLD
6903#undef NORMAL
6904#undef GREY
6905#undef RED
6906#undef GREEN
6907#undef YELLOW
6908#undef BLUE
6909#undef MAGENTA
6910#undef CYAN
6911#undef WHITE
6912#undef COMMAND_SIZE
6913#undef ARG_SIZE
6914#undef STR
6915#undef XSTR
6916
6917} // namespace internal
6918} // namespace v8
6919
6920//
6921// The following functions are used by our gdb macros.
6922//
6923V8_DEBUGGING_EXPORT extern bool _v8_internal_Simulator_ExecDebugCommand(
6924 const char* command) {
6925 i::Isolate* isolate = i::Isolate::Current();
6926 if (!isolate) {
6927 fprintf(stderr, "No V8 Isolate found\n");
6928 return false;
6929 }
6930 i::Simulator* simulator = i::Simulator::current(isolate);
6931 if (!simulator) {
6932 fprintf(stderr, "No Arm64 simulator found\n");
6933 return false;
6934 }
6935 // Copy the command so that the simulator can take ownership of it.
6936 size_t len = strlen(command);
6937 i::ArrayUniquePtr<char> command_copy(i::NewArray<char>(len + 1));
6938 i::MemCopy(command_copy.get(), command, len + 1);
6939 return simulator->ExecDebugCommand(std::move(command_copy));
6940}
6941
6942#undef BRACKETS
6943
6944#endif // USE_SIMULATOR
Isolate * isolate_
#define V(Name)
#define T
#define BREAK
interpreter::OperandScale scale
Definition builtins.cc:44
virtual void VisitPointer(const void *address)=0
static int ActivationFrameAlignment()
static V8_NOINLINE StackSlot GetCurrentStackPosition()
static Instr ImmException(int imm16)
StackGuard * stack_guard()
Definition isolate.h:1198
constexpr int8_t code() const
Register const value_
base::Mutex & mutex_
Handle< Code > code
const int size_
Definition assembler.cc:132
#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL
Definition globals.h:167
int start
uint32_t count
int end
LineAndColumn current
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Isolate * isolate
int32_t offset
#define XSTR(s)
TNode< Object > target
Instruction * instr
ZoneVector< RpoNumber > & result
#define DEFINE_LAZY_LEAKY_OBJECT_GETTER(T, FunctionName,...)
LiftoffRegister reg
EmitFn fn
int y
int x
uint32_t const mask
#define MSAN_MEMORY_IS_INITIALIZED(start, size)
Definition msan.h:37
#define DISABLE_MSAN
Definition msan.h:40
int int32_t
Definition unicode.cc:40
unsigned short uint16_t
Definition unicode.cc:39
constexpr unsigned CountLeadingZeros(T value)
Definition bits.h:100
constexpr unsigned CountTrailingZeros(T value)
Definition bits.h:144
T ReverseBytes(T value)
Definition bits.h:74
constexpr unsigned CountLeadingSignBits(T value)
Definition bits.h:132
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
uintptr_t Address
Definition memory.h:13
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
constexpr NEONLoadStoreMultiStructPostIndexOp NEON_LD3_post
constexpr DataProcessing1SourceOp RBIT_w
constexpr NEONLoadStoreSingleOp NEONLoadStoreSingle2
constexpr NEONScalarPairwiseOp NEON_FMINNMP_scalar
constexpr NEON2RegMiscOp NEON_FNEG
constexpr FPDataProcessing1SourceOp FSQRT_d
constexpr FPDataProcessing2SourceOp FMUL_s
constexpr FPFixedPointConvertOp SCVTF_dw_fixed
constexpr NEONTableOp NEON_TBX_4v
constexpr NEON3SameOp NEON_FMULX
constexpr DataProcessing1SourceOp REV_x
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_ST4_s_post
constexpr FPConditionalCompareOp FCCMP_d
constexpr Opcode ADD
constexpr BitfieldOp BFM_x
constexpr LoadStoreAcquireReleaseOp LDAR_h
constexpr NEONShiftImmediateOp NEON_SSRA
constexpr NEONScalar3SameOp NEON_UQADD_scalar
constexpr NEONLoadStoreSingleOp NEONLoadStoreSingle4
constexpr NEONScalar2RegMiscOp NEON_FCVTNS_scalar
constexpr NEONScalar3SameOp NEON_CMGE_scalar
constexpr FPIntegerConvertOp FCVTNU_xd
constexpr UnconditionalBranchOp BL
constexpr NEONLoadStoreMultiStructOp NEON_LD3
constexpr Opcode ORR
constexpr NEONLoadStoreSingleStructOp NEON_LD3_h
constexpr FPIntegerConvertOp FCVTZS_wd
constexpr NEON3SameOp NEON_SMAX
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_ST3_b_post
constexpr NEONScalarPairwiseOp NEON_FMAXP_scalar
constexpr NEON3SameOp NEON_SABD
constexpr NEONLoadStoreSingleStructOp NEON_ST1_b
constexpr FPCompareOp FCMP_d_zero
constexpr NEON2RegMiscOp NEON_FCMLT_zero
constexpr FPIntegerConvertOp FCVTAS_ws
constexpr NEONScalar2RegMiscOp NEON_FRECPE_scalar
constexpr NEONScalar2RegMiscOp NEON_FCVTAU_scalar
constexpr NEONLoadStoreSingleStructOp NEON_LD1_s
constexpr NEON3SameOp NEON_FCMGE
constexpr BitfieldOp UBFM_w
constexpr ExceptionOp BRK
constexpr NEONLoadStoreMultiStructOp NEON_ST3
constexpr int64_t kByteMask
constexpr FPIntegerConvertOp FCVTZU_xd
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD4_s_post
constexpr NEON2RegMiscOp NEON_UADDLP
constexpr DataProcessing3SourceOp MSUB_x
constexpr NEONScalarByIndexedElementOp NEON_SQDMLSL_byelement_scalar
constexpr NEONScalarPairwiseOp NEON_FADDP_scalar
constexpr NEONLoadStoreSingleStructOp NEON_LD2_h
constexpr NEONSHA3Op NEON_EOR3
constexpr NEON3SameOp NEON_FADD
constexpr NEONScalar2RegMiscOp NEON_FCVTMS_scalar
constexpr FPFixedPointConvertOp SCVTF_sx_fixed
constexpr Opcode AND
constexpr DataProcessing2SourceOp LSLV_w
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD1_s_post
constexpr DataProcessing3SourceOp SMSUBL_x
constexpr NEON2RegMiscOp NEON_CMGT_zero
constexpr FPDataProcessing3SourceOp FNMSUB_d
constexpr NEON3SameOp NEON_FRECPS
constexpr FPDataProcessing1SourceOp FRINTX_d
constexpr BitfieldOp SBFM_w
constexpr FPIntegerConvertOp UCVTF_sx
constexpr NEON3SameOp NEON_SQADD
constexpr FPDataProcessing3SourceOp FNMSUB_s
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD1_h_post
constexpr FPDataProcessing1SourceOp FMOV_s
constexpr NEON3DifferentOp NEON_SABAL
constexpr FPDataProcessing1SourceOp FRINTM_d
constexpr NEONScalar3SameOp NEON_SSHL_scalar
constexpr FPDataProcessing2SourceOp FMIN_s
constexpr NEONScalar3SameOp NEON_UQSUB_scalar
constexpr NEONLoadStoreSingleStructOp NEON_LD2_s
constexpr NEONScalar2RegMiscOp NEON_FCVTNU_scalar
constexpr NEONLoadStoreSingleStructOp NEON_ST3_h
int LowestSetBitPosition(uint64_t value)
constexpr NEONLoadStoreMultiStructOp NEON_ST1_4v
uint32_t NEONModifiedImmediateOp
constexpr NEONModifiedImmediateOp NEONModifiedImmediate_BIC
constexpr NEONShiftImmediateOp NEON_RSHRN
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD3_b_post
constexpr FPDataProcessing2SourceOp FSUB_d
constexpr NEON2RegMiscOp NEON_FCVTMU
constexpr AddSubOp ADDS
constexpr NEONShiftImmediateOp NEON_URSHR
constexpr LoadStoreAcquireReleaseOp CASB
constexpr NEONScalar3SameOp NEON_UQSHL_scalar
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_ST3_s_post
const Instr kImmExceptionIsRedirectedCall
constexpr NEONScalar2RegMiscOp NEON_SUQADD_scalar
constexpr LoadStoreAcquireReleaseOp STLR_h
constexpr LoadStoreAcquireReleaseOp CASPA_w
constexpr NEONScalar2RegMiscOp NEON_FCVTPU_scalar
constexpr UnconditionalBranchToRegisterOp BLR
constexpr AtomicMemoryOp SWPALB
constexpr FPDataProcessing1SourceOp FCVT_hs
constexpr NEON2RegMiscOp NEON_ABS
constexpr NEONShiftImmediateOp NEON_FCVTZS_imm
constexpr NEON2RegMiscOp NEON_FCVTAS
constexpr NEONLoadStoreSingleStructOp NEON_LD4_b
constexpr NEONScalarShiftImmediateOp NEON_SQRSHRN_scalar
constexpr NEONLoadStoreSingleStructOp NEON_LD1_b
constexpr NEONScalarShiftImmediateOp NEON_SQSHLU_scalar
constexpr FPCompareOp FCMP_d
constexpr NEONScalarByIndexedElementOp NEON_FMUL_byelement_scalar
constexpr FPDataProcessing1SourceOp FRINTN_d
constexpr LoadStoreAcquireReleaseOp CAS_x
constexpr NEONScalarPairwiseOp NEON_ADDP_scalar
constexpr NEONShiftImmediateOp NEON_UQSHRN
constexpr FPDataProcessing1SourceOp FRINTX_s
constexpr NEON3SameOp NEON_UMAXP
constexpr NEONShiftImmediateOp NEON_SSHR
constexpr NEON3SameOp NEON_PMUL
constexpr FPIntegerConvertOp FCVTMU_ws
constexpr NEON3DifferentOp NEON_SMLSL2
constexpr NEON3SameOp NEON_CMGE
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD4_h_post
constexpr NEONScalar3SameOp NEON_SQADD_scalar
constexpr LoadStoreAcquireReleaseOp CASA_w
constexpr NEON3SameOp NEON_SUB
constexpr NEON3SameOp NEON_BSL
constexpr FPIntegerConvertOp FCVTZU_xs
constexpr NEON3SameOp NEON_UHADD
constexpr NEON3SameOp NEON_FABD
constexpr AtomicMemoryOp SWPLB
constexpr FPIntegerConvertOp FCVTMU_xs
constexpr LoadStoreAcquireReleaseOp CASALH
constexpr NEON3SameOp NEON_UHSUB
constexpr NEONTableOp NEON_TBL_2v
constexpr NEONScalarShiftImmediateOp NEON_FCVTZS_imm_scalar
constexpr FPCompareOp FCMP_s_zero
constexpr NEONLoadStoreMultiStructOp NEON_LD2
constexpr LoadStoreAcquireReleaseOp STLXR_x
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD3R_post
constexpr NEONAcrossLanesOp NEON_SMINV
constexpr NEONScalarShiftImmediateOp NEON_URSHR_scalar
constexpr int64_t kWRegMask
constexpr DataProcessing3SourceOp MSUB_w
constexpr NEONScalarShiftImmediateOp NEON_UCVTF_imm_scalar
constexpr NEONLoadStoreSingleStructOp NEON_LD1_h
constexpr NEONLoadStoreMultiStructPostIndexOp NEON_LD4_post
constexpr LoadStoreAcquireReleaseOp CASH
constexpr FPDataProcessing3SourceOp FMADD_d
constexpr NEONShiftImmediateOp NEON_SRSHR
constexpr NEONScalar3SameOp NEON_ADD_scalar
constexpr NEON3SameOp NEON_FACGE
constexpr NEON3SameOp NEON_UMIN
constexpr NEON2RegMiscOp NEON_SADALP
constexpr LoadStoreAcquireReleaseOp LDAR_x
void PrintF(const char *format,...)
Definition utils.cc:39
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD3_h_post
constexpr ConditionalSelectOp CSINC_w
char * ReadLine(const char *prompt)
Definition utils.cc:69
constexpr NEON3SameOp NEON_FMLS
constexpr NEON3SameOp NEON_ORN
constexpr FPFixedPointConvertOp UCVTF_dx_fixed
constexpr BitfieldOp SBFM_x
constexpr FPDataProcessing1SourceOp FRINTZ_d
constexpr FPFixedPointConvertOp UCVTF_dw_fixed
constexpr DataProcessing2SourceOp LSLV_x
constexpr FPDataProcessing1SourceOp FNEG_d
constexpr NEONScalarShiftImmediateOp NEON_URSRA_scalar
constexpr NEON3DifferentOp NEON_SADDL2
constexpr ConditionalSelectOp CSEL_x
constexpr TestBranchOp TBZ
constexpr NEON2RegMiscOp NEON_CMLT_zero
constexpr LoadStoreAcquireReleaseOp CASPAL_x
constexpr FPIntegerConvertOp FCVTMS_xd
constexpr NEONScalar2RegMiscOp NEON_ABS_scalar
constexpr NEONLoadStoreMultiStructOp NEON_ST1_3v
constexpr NEON3SameOp NEON_CMGT
const RegList kCalleeSaved
Definition reglist-arm.h:31
constexpr NEONLoadStoreMultiStructOp NEON_LD1_3v
constexpr int kSRegSize
constexpr NEON2RegMiscOp NEON_FCVTZS
constexpr LoadStoreAcquireReleaseOp CASA_x
constexpr ShiftOp LSR
constexpr AtomicMemoryOp SWPAL_w
constexpr NEONScalar2RegMiscOp NEON_USQADD_scalar
constexpr PCRelAddressingOp ADRP
constexpr LoadStoreAcquireReleaseOp CASLH
constexpr NEON2RegMiscOp NEON_RBIT_NOT
constexpr LoadStoreAcquireReleaseOp LDAR_w
constexpr LoadStoreAcquireReleaseOp CASAH
constexpr int kNumberOfCalleeSavedVRegisters
constexpr AddSubOp SUBS
constexpr FPIntegerConvertOp FCVTNS_xs
constexpr NEONAcrossLanesOp NEON_UMINV
constexpr NEON3DifferentOp NEON_SQDMULL2
constexpr FPDataProcessing3SourceOp FMADD_s
constexpr FPDataProcessing1SourceOp FCVT_dh
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_ST1_s_post
constexpr NEONTableOp NEON_TBL_1v
constexpr NEON2RegMiscOp NEON_FCVTN
constexpr NEONScalar3SameOp NEON_SQRDMULH_scalar
constexpr NEONLoadStoreSingleStructOp NEON_LD1R
constexpr NEONScalar2RegMiscOp NEON_FRECPX_scalar
constexpr NEON3SameOp NEON_URHADD
constexpr NEON3SameOp NEON_CMHI
constexpr NEON3DifferentOp NEON_SSUBW
constexpr NEON3SameOp NEON_URSHL
constexpr NEONByIndexedElementOp NEON_SQDMLSL_byelement
constexpr NEONScalar3SameOp NEON_FRECPS_scalar
constexpr NEONLoadStoreSingleStructOp NEON_LD4_h
constexpr FPIntegerConvertOp FCVTMU_wd
constexpr int kHRegSize
constexpr NEON3DifferentOp NEON_SMULL2
constexpr LoadStoreAcquireReleaseOp CASAL_x
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD2_s_post
constexpr NEONLoadStoreMultiStructPostIndexOp NEON_ST2_post
constexpr FPDataProcessing2SourceOp FMAX_d
constexpr unsigned kShiftAmountXRegMask
constexpr NEONPermOp NEON_ZIP1
constexpr int kZeroRegCode
constexpr NEON3DifferentOp NEON_SSUBL
constexpr int kNumberOfRegisters
constexpr NEON3DifferentOp NEON_SADDW
constexpr ShiftOp ASR
constexpr NEONScalar3SameOp NEON_FACGT_scalar
constexpr NEON3SameOp NEON_UABD
constexpr FPDataProcessing3SourceOp FNMADD_d
constexpr NEON2RegMiscOp NEON_FCVTZU
constexpr NEON3SameOp NEON_FMAX
constexpr NEONLoadStoreSingleStructOp NEON_ST2_h
constexpr NEON3SameOp NEON_ADD
constexpr NEON2RegMiscOp NEON_FCVTMS
constexpr NEON2RegMiscOp NEON_FRINTM
constexpr NEONShiftImmediateOp NEON_URSRA
constexpr NEON3SameOp NEON_UMAX
constexpr FPDataProcessing1SourceOp FRINTM_s
constexpr ShiftOp LSL
constexpr int B
constexpr NEONShiftImmediateOp NEON_SQSHRUN
constexpr FPDataProcessing1SourceOp FRINTA_s
uint64_t MaxUintFromFormat(VectorFormat vform)
constexpr NEON3SameOp NEON_UQADD
constexpr DataProcessing1SourceOp RBIT_x
constexpr NEONLoadStoreMultiStructOp NEON_ST2
constexpr NEONAcrossLanesOp NEON_UMAXV
constexpr NEONScalar2RegMiscOp NEON_CMEQ_zero_scalar
constexpr NEONScalar2RegMiscOp NEON_UQXTN_scalar
constexpr FPDataProcessing2SourceOp FMAXNM_s
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_ST2_b_post
constexpr FPDataProcessing2SourceOp FADD_s
constexpr NEON3SameOp NEON_SHSUB
constexpr NEON3DifferentOp NEON_SMLAL2
constexpr DataProcessing1SourceOp REV32_x
constexpr NEON2RegMiscOp NEON_XTN
constexpr FPConditionalSelectOp FCSEL_d
constexpr NEON3SameOp NEON_SSHL
constexpr NEONByIndexedElementOp NEON_SQDMULH_byelement
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
constexpr NEONScalar2RegMiscOp NEON_FCVTZS_scalar
constexpr NEON2RegMiscOp NEON_FCMGE_zero
constexpr BitfieldOp BFM_w
constexpr FPDataProcessing2SourceOp FNMUL_s
constexpr NEONScalar2RegMiscOp NEON_FRSQRTE_scalar
constexpr NEONScalarShiftImmediateOp NEON_SRSRA_scalar
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD4_b_post
constexpr NEONScalarShiftImmediateOp NEON_SHL_scalar
constexpr NEON3SameOp NEON_FMINNM
constexpr FPFixedPointConvertOp SCVTF_dx_fixed
constexpr NEON2RegMiscOp NEON_URECPE
constexpr LoadStoreAcquireReleaseOp CASP_x
constexpr NEON3DifferentOp NEON_SUBHN2
constexpr NEON3DifferentOp NEON_ADDHN
constexpr NEON3SameOp NEON_FRSQRTS
constexpr FPIntegerConvertOp FCVTAU_ws
constexpr AtomicMemoryOp SWPA_x
constexpr FPIntegerConvertOp FCVTMS_ws
int LaneSizeInBytesLog2FromFormat(VectorFormat vform)
constexpr NEON3SameOp NEON_UABA
constexpr NEON2RegMiscOp NEON_URSQRTE
constexpr NEON2RegMiscOp NEON_FCVTAU
constexpr FPConditionalCompareOp FCCMP_s
constexpr int N
constexpr NEON3SameOp NEON_FADDP
constexpr NEONModifiedImmediateOp NEONModifiedImmediate_ORR
constexpr LoadStoreAcquireReleaseOp STLXR_b
constexpr NEONScalar2RegMiscOp NEON_SQNEG_scalar
constexpr FPIntegerConvertOp UCVTF_dx
constexpr LoadStoreAcquireReleaseOp LDAXR_h
constexpr NEONLoadStoreMultiStructOp NEON_LD1_2v
constexpr int64_t kWordMask
constexpr LoadStoreAcquireReleaseOp CASAL_w
constexpr NEON3SameOp NEON_FMIN
constexpr NEONScalarShiftImmediateOp NEON_SQSHRN_scalar
constexpr NEON3SameOp NEON_UQRSHL
constexpr NEON3DifferentOp NEON_UABAL
constexpr FPIntegerConvertOp FCVTAS_xd
constexpr NEONScalar2RegMiscOp NEON_UCVTF_scalar
constexpr NEON2RegMiscOp NEON_FCVTL
constexpr FPDataProcessing3SourceOp FMSUB_d
constexpr NEON2RegMiscOp NEON_FRINTI
constexpr NEON2RegMiscOp NEON_SQNEG
constexpr NEONByIndexedElementOp NEON_SQDMLAL_byelement
constexpr BitfieldOp UBFM_x
constexpr NEONTableOp NEON_TBX_1v
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_ST4_b_post
constexpr FPDataProcessing1SourceOp FCVT_sh
constexpr NEONScalar3SameOp NEON_FRSQRTS_scalar
constexpr NEONLoadStoreSingleStructOp NEON_LD3_s
constexpr NEON2RegMiscOp NEON_UCVTF
constexpr NEON3DifferentOp NEON_PMULL2
constexpr SystemSysRegOp MRS
constexpr FPIntegerConvertOp FCVTMS_xs
constexpr NEON2RegMiscOp NEON_REV16
std::unique_ptr< T, ArrayDeleter< T > > ArrayUniquePtr
Definition allocation.h:73
const unsigned kPrintfMaxArgCount
constexpr NEONScalar3SameOp NEON_URSHL_scalar
constexpr NEON3DifferentOp NEON_SQDMULL
constexpr NEON2RegMiscOp NEON_NEG
constexpr NEONLoadStoreSingleStructOp NEON_ST4_h
constexpr NEON3SameOp NEON_BIF
constexpr DataProcessing1SourceOp CLZ_w
constexpr NEON2RegMiscOp NEON_REV64
constexpr FPIntegerConvertOp FCVTMS_wd
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD2_b_post
constexpr NEONShiftImmediateOp NEON_UQSHL_imm
constexpr NEON3SameOp NEON_CMHS
constexpr NEON3SameOp NEON_UQSUB
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_ST1_d_post
constexpr FPDataProcessing2SourceOp FDIV_d
constexpr NEONScalar3SameOp NEON_SQSUB_scalar
constexpr NEON3DifferentOp NEON_UADDL
constexpr NEONLoadStoreMultiStructPostIndexOp NEON_ST1_3v_post
constexpr NEONLoadStoreSingleOp NEONLoadStoreSingle1
constexpr NEON3SameOp NEON_FDIV
constexpr SystemSysRegOp MSR
constexpr NEONScalar3SameOp NEON_SQRSHL_scalar
constexpr NEONLoadStoreSingleStructOp NEON_ST1_h
constexpr NEONTableOp NEON_TBX_3v
constexpr AtomicMemoryOp SWPA_w
constexpr NEONByIndexedElementOp NEON_UMULL_byelement
constexpr FPDataProcessing1SourceOp FCVT_hd
constexpr NEONScalarShiftImmediateOp NEON_SQSHRUN_scalar
constexpr LoadStoreAcquireReleaseOp STLR_x
constexpr NEONShiftImmediateOp NEON_SLI
constexpr NEONScalarByIndexedElementOp NEON_SQRDMULH_byelement_scalar
constexpr DataProcessing3SourceOp SMADDL_x
constexpr NEONLoadStoreSingleStructOp NEON_LD4_s
constexpr NEON2RegMiscOp NEON_FRINTA
constexpr NEON3SameOp NEON_SMAXP
constexpr NEONLoadStoreMultiStructPostIndexOp NEON_LD1_1v_post
constexpr NEONScalarShiftImmediateOp NEON_SSRA_scalar
int HighestSetBitPosition(uint64_t value)
constexpr CompareBranchOp CBZ_w
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr NEONAcrossLanesOp NEON_FMAXV
constexpr NEON3SameOp NEON_SHADD
const RegList kCallerSaved
Definition reglist-arm.h:42
constexpr unsigned kRegCodeMask
constexpr MoveWideImmediateOp MOVZ_x
constexpr NEON3SameOp NEON_ADDP
constexpr int kQRegSizeLog2
constexpr NEONScalar2RegMiscOp NEON_SQXTN_scalar
constexpr NEON2RegMiscOp NEON_SADDLP
constexpr int64_t kXRegMask
constexpr FPIntegerConvertOp SCVTF_dw
constexpr AtomicMemoryOp SWPL_w
constexpr FPIntegerConvertOp FCVTZS_xs
constexpr NEON3DifferentOp NEON_SADDL
constexpr DataProcessing1SourceOp REV_w
constexpr NEON3SameOp NEON_CMEQ
constexpr NEON3DifferentOp NEON_SABAL2
constexpr NEONLoadStoreMultiStructPostIndexOp NEON_ST1_1v_post
constexpr NEONScalar3DiffOp NEON_SQDMLAL_scalar
constexpr NEONPermOp NEON_TRN1
constexpr NEONByIndexedElementOp NEON_MUL_byelement
constexpr NEONShiftImmediateOp NEON_SQSHLU
constexpr NEONAcrossLanesOp NEON_FMINNMV
constexpr NEONScalar3SameOp NEON_CMTST_scalar
constexpr NEONScalar3SameOp NEON_FACGE_scalar
constexpr NEON3SameOp NEON_MLA
constexpr AtomicMemoryOp SWPAH
constexpr NEONShiftImmediateOp NEON_USHLL
constexpr NEONScalar3SameOp NEON_SQSHL_scalar
constexpr NEON2RegMiscOp NEON_REV32
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD1R_post
constexpr DataProcessing3SourceOp MADD_w
constexpr FPIntegerConvertOp FCVTNU_wd
constexpr NEONScalarShiftImmediateOp NEON_USHR_scalar
constexpr DataProcessing2SourceOp LSRV_w
constexpr NEON2RegMiscOp NEON_FCMEQ_zero
constexpr DataProcessing3SourceOp UMADDL_x
constexpr NEON3DifferentOp NEON_SABDL2
constexpr NEONScalarShiftImmediateOp NEON_FCVTZU_imm_scalar
constexpr FPDataProcessing2SourceOp FMIN_d
constexpr NEONShiftImmediateOp NEON_SQRSHRUN
constexpr NEONLoadStoreMultiStructOp NEON_LD4
constexpr int S
constexpr NEON3SameOp NEON_FSUB
constexpr DataProcessing2SourceOp SDIV_w
constexpr NEON3SameOp NEON_SQDMULH
constexpr NEON3DifferentOp NEON_SQDMLAL2
constexpr FPIntegerConvertOp FCVTMU_xd
constexpr NEONScalar2RegMiscOp NEON_SQXTUN_scalar
constexpr NEON2RegMiscOp NEON_CNT
constexpr NEONPermOp NEON_ZIP2
constexpr NEONScalar3SameOp NEON_CMHI_scalar
constexpr NEON3SameOp NEON_SMINP
constexpr NEONPermOp NEON_TRN2
constexpr UnconditionalBranchToRegisterOp BR
constexpr DataProcessing1SourceOp CLS_w
constexpr LoadLiteralOp LDR_d_lit
constexpr int kDRegSizeLog2
constexpr NEONScalar3SameOp NEON_UQRSHL_scalar
constexpr NEON2RegMiscOp NEON_FRSQRTE
constexpr FPDataProcessing2SourceOp FMINNM_d
constexpr FPIntegerConvertOp FCVTAS_wd
constexpr NEON3DifferentOp NEON_UMLSL2
constexpr FPIntegerConvertOp FCVTNU_ws
constexpr LogicalOp ANDS
constexpr NEONShiftImmediateOp NEON_USRA
constexpr NEONScalar3SameOp NEON_FCMEQ_scalar
void ShortPrint(Tagged< Object > obj, FILE *out)
Definition objects.cc:1865
constexpr NEON3DifferentOp NEON_UMLSL
constexpr NEONScalar3DiffOp NEON_SQDMLSL_scalar
constexpr AtomicMemoryOp SWPB
constexpr NEON3SameOp NEON_FMUL
constexpr NEON3SameOp NEON_FMINNMP
constexpr NEON2RegMiscOp NEON_FCVTXN
constexpr FPIntegerConvertOp SCVTF_sx
constexpr FPDataProcessing1SourceOp FABS_d
constexpr ConditionalSelectOp CSINV_x
constexpr bool is_intn(int64_t x, unsigned n)
Definition utils.h:568
constexpr NEONScalarShiftImmediateOp NEON_SSHR_scalar
constexpr int kBRegSize
constexpr FPIntegerConvertOp FCVTAS_xs
constexpr NEON2RegMiscOp NEON_SUQADD
constexpr NEONScalarShiftImmediateOp NEON_SQSHL_imm_scalar
constexpr NEONScalarShiftImmediateOp NEON_SLI_scalar
constexpr DataProcessing1SourceOp REV16_x
constexpr NEONScalar3DiffOp NEON_SQDMULL_scalar
constexpr NEONScalarByIndexedElementOp NEON_FMULX_byelement_scalar
constexpr NEONScalarByIndexedElementOp NEON_SQDMLAL_byelement_scalar
constexpr FPIntegerConvertOp FJCVTZS
constexpr NEONShiftImmediateOp NEON_UCVTF_imm
constexpr NEONScalar3SameOp NEON_FCMGT_scalar
constexpr NEON2RegMiscOp NEON_FRINTZ
constexpr ConditionalSelectOp CSINC_x
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_ST3_h_post
constexpr NEONByIndexedElementOp NEON_FMULX_byelement
constexpr NEONScalarPairwiseOp NEON_FMAXNMP_scalar
constexpr CompareBranchOp CBZ_x
constexpr NEON3SameOp NEON_ORR
constexpr NEON3DifferentOp NEON_SADDW2
constexpr NEONScalar3SameOp NEON_FCMGE_scalar
constexpr NEON3SameOp NEON_FCMEQ
uint32_t LoadStoreAcquireReleaseOp
constexpr NEONLoadStoreSingleStructOp NEON_ST4_b
constexpr DataProcessing1SourceOp REV16_w
constexpr NEONScalarShiftImmediateOp NEON_SCVTF_imm_scalar
constexpr DataProcessing2SourceOp UDIV_x
constexpr NEONScalar2RegMiscOp NEON_SQABS_scalar
constexpr FPIntegerConvertOp FCVTAU_xd
uint32_t LoadStorePairOp
constexpr DataProcessing2SourceOp LSRV_x
constexpr NEONAcrossLanesOp NEON_SADDLV
constexpr NEON2RegMiscOp NEON_CMLE_zero
constexpr FPDataProcessing1SourceOp FRINTI_s
constexpr FPIntegerConvertOp UCVTF_sw
constexpr NEON3DifferentOp NEON_UADDL2
constexpr AtomicMemoryOp SWP_x
constexpr NEONScalar2RegMiscOp NEON_FCMGT_zero_scalar
constexpr NEONScalar2RegMiscOp NEON_FCMLT_zero_scalar
constexpr NEONShiftImmediateOp NEON_SQRSHRN
constexpr NEONLoadStoreSingleStructOp NEON_ST1_d
constexpr NEON2RegMiscOp NEON_CMEQ_zero
constexpr NEONLoadStoreMultiStructPostIndexOp NEON_ST1_2v_post
uint32_t MoveWideImmediateOp
constexpr NEON3SameOp NEON_UQSHL
constexpr FPIntegerConvertOp FCVTNS_wd
constexpr MoveWideImmediateOp MOVK_w
constexpr NEON2RegMiscOp NEON_CLS
constexpr NEONAcrossLanesOp NEON_UADDLV
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_ST1_h_post
constexpr NEON3SameOp NEON_SMIN
constexpr NEONScalar2RegMiscOp NEON_FCMLE_zero_scalar
constexpr NEONLoadStoreSingleStructOp NEON_LD3R
constexpr NEONLoadStoreMultiStructPostIndexOp NEON_LD2_post
constexpr LoadStoreAcquireReleaseOp CASP_w
constexpr NEON3SameOp NEON_SQRDMULH
constexpr int kQRegSize
constexpr NEONLoadStoreSingleOp NEONLoadStoreSingle3
constexpr NEONScalar2RegMiscOp NEON_CMGT_zero_scalar
constexpr FPIntegerConvertOp FCVTNS_xd
constexpr NEON3SameOp NEON_AND
constexpr LoadStoreAcquireReleaseOp LDAXR_w
unsigned LaneSizeInBitsFromFormat(VectorFormat vform)
constexpr NEONSHA3Op NEON_BCAX
constexpr Register kWasmTrapHandlerFaultAddressRegister
constexpr NEONScalar2RegMiscOp NEON_CMGE_zero_scalar
constexpr LoadStoreAcquireReleaseOp CASPL_x
constexpr NEON3SameOp NEON_FMINP
constexpr NEONByIndexedElementOp NEON_SMLAL_byelement
constexpr NEONAcrossLanesOp NEON_SMAXV
constexpr NEON3SameOp NEON_FMAXNM
constexpr NEON3ExtensionOp NEON_SDOT
constexpr NEONScalar2RegMiscOp NEON_CMLE_zero_scalar
constexpr FPDataProcessing3SourceOp FNMADD_s
constexpr NEON3DifferentOp NEON_UABDL
constexpr FPDataProcessing2SourceOp FNMUL_d
constexpr LoadLiteralOp LDR_w_lit
constexpr NEON3SameOp NEON_USHL
constexpr DataProcessing3SourceOp MADD_x
constexpr NEONScalarShiftImmediateOp NEON_SQRSHRUN_scalar
constexpr NEON3DifferentOp NEON_UABDL2
constexpr NEONLoadStoreMultiStructPostIndexOp NEON_ST1_4v_post
constexpr NEONPermOp NEON_UZP1
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr DataProcessing1SourceOp CLZ_x
constexpr int kWRegSize
constexpr NEONLoadStoreSingleStructOp NEON_LD1_d
constexpr NEONShiftImmediateOp NEON_SRSRA
constexpr NEON3SameOp NEON_SRSHL
constexpr LoadStoreAcquireReleaseOp CASPA_x
constexpr NEONAcrossLanesOp NEON_ADDV
constexpr FPDataProcessing1SourceOp FNEG_s
constexpr FPIntegerConvertOp FCVTAU_wd
constexpr NEONByIndexedElementOp NEON_FMLA_byelement
constexpr FPConditionalSelectOp FCSEL_s
constexpr NEON3SameOp NEON_SQSHL
constexpr NEONByIndexedElementOp NEON_SMLSL_byelement
constexpr NEONScalar3SameOp NEON_USHL_scalar
constexpr NEONShiftImmediateOp NEON_SSHLL
constexpr NEONScalarPairwiseOp NEON_FMINP_scalar
constexpr NEONShiftImmediateOp NEON_SQSHRN
constexpr DataProcessing3SourceOp SMULH_x
constexpr NEONScalarShiftImmediateOp NEON_USRA_scalar
constexpr Opcode SUB
constexpr FPIntegerConvertOp FMOV_dx
constexpr FPDataProcessing1SourceOp FRINTA_d
constexpr NEONScalarShiftImmediateOp NEON_UQSHRN_scalar
constexpr DataProcessing3SourceOp UMULH_x
constexpr NEONScalar2RegMiscOp NEON_CMLT_zero_scalar
constexpr NEONLoadStoreSingleStructOp NEON_ST2_s
constexpr NEONScalar2RegMiscOp NEON_FCMGE_zero_scalar
constexpr NEONShiftImmediateOp NEON_SRI
constexpr NEONByIndexedElementOp NEON_FMLS_byelement
constexpr NEON3DifferentOp NEON_SQDMLSL2
constexpr NEON3DifferentOp NEON_UMULL
constexpr NEON3DifferentOp NEON_SQDMLAL
constexpr DataProcessing2SourceOp ASRV_w
constexpr LoadStoreAcquireReleaseOp LDAXR_b
constexpr NEONLoadStoreSingleStructOp NEON_ST1_s
constexpr FPDataProcessing1SourceOp FMOV_d
constexpr NEON3DifferentOp NEON_USUBW2
constexpr NEON2RegMiscOp NEON_SQABS
constexpr NEON3DifferentOp NEON_USUBW
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD3_s_post
constexpr NEON2RegMiscOp NEON_UADALP
constexpr NEON3DifferentOp NEON_UADDW
constexpr NEON3SameOp NEON_FACGT
constexpr ConditionalSelectOp CSEL_w
constexpr NEONScalarShiftImmediateOp NEON_SRI_scalar
constexpr NEONShiftImmediateOp NEON_SHRN
constexpr AtomicMemoryOp SWP_w
constexpr NEONScalar2RegMiscOp NEON_FCVTXN_scalar
constexpr NEON3SameOp NEON_BIC
constexpr LoadStoreAcquireReleaseOp STLR_w
constexpr FPDataProcessing2SourceOp FADD_d
constexpr TestBranchOp TBNZ
constexpr NEONScalar3SameOp NEON_SQDMULH_scalar
constexpr unsigned kShiftAmountWRegMask
constexpr NEON3SameOp NEON_CMTST
constexpr NEON3SameOp NEON_FMAXNMP
constexpr FPCompareOp FCMP_s
constexpr DataProcessing1SourceOp CLS_x
constexpr ShiftOp ROR
constexpr NEON3DifferentOp NEON_RSUBHN
constexpr NEONAcrossLanesOp NEON_FMAXNMV
constexpr LoadStoreAcquireReleaseOp CASPL_w
constexpr NEONLoadStoreMultiStructOp NEON_LD1_4v
constexpr NEONLoadStoreMultiStructPostIndexOp NEON_ST3_post
constexpr AtomicMemoryOp SWPL_x
constexpr FPIntegerConvertOp FCVTZS_xd
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_ST1_b_post
uint64_t ObjectPair
constexpr FPDataProcessing2SourceOp FMAXNM_d
constexpr NEON2RegMiscOp NEON_FABS
constexpr NEON2RegMiscOp NEON_FCVTPS
constexpr LoadStoreAcquireReleaseOp STLXR_w
constexpr NEONLoadStoreMultiStructPostIndexOp NEON_ST4_post
constexpr ExceptionOp HLT
constexpr NEONScalarShiftImmediateOp NEON_UQSHL_imm_scalar
constexpr MoveWideImmediateOp MOVN_x
constexpr NEON3DifferentOp NEON_UMLAL
constexpr CompareBranchOp CBNZ_w
constexpr NEON2RegMiscOp NEON_SQXTUN
constexpr NEONLoadStoreSingleStructOp NEON_ST3_s
constexpr NEONByIndexedElementOp NEON_SQRDMULH_byelement
constexpr NEONScalar2RegMiscOp NEON_FCVTAS_scalar
constexpr NEON3SameOp NEON_MUL
constexpr NEON2RegMiscOp NEON_FCMGT_zero
constexpr NEONShiftImmediateOp NEON_SQSHL_imm
constexpr NEON3DifferentOp NEON_SSUBL2
constexpr NEONScalar3SameOp NEON_SRSHL_scalar
constexpr NEONByIndexedElementOp NEON_UMLAL_byelement
constexpr int kNumberOfVRegisters
constexpr NEONTableOp NEON_TBL_3v
constexpr FPDataProcessing1SourceOp FCVT_sd
constexpr NEON3SameOp NEON_BIT
constexpr NEON2RegMiscOp NEON_SHLL
constexpr NEON3SameOp NEON_SQSUB
constexpr FPIntegerConvertOp FCVTZS_ws
constexpr LoadStoreAcquireReleaseOp LDAR_b
constexpr NEON3DifferentOp NEON_SABDL
constexpr NEONScalar2RegMiscOp NEON_FCVTMU_scalar
constexpr NEON3DifferentOp NEON_SSUBW2
constexpr NEON3DifferentOp NEON_UMLAL2
constexpr NEONLoadStoreMultiStructOp NEON_ST4
constexpr NEON2RegMiscOp NEON_FCMLE_zero
constexpr NEON3DifferentOp NEON_PMULL
constexpr int kSPRegInternalCode
constexpr FPDataProcessing1SourceOp FRINTN_s
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD4R_post
constexpr FPDataProcessing1SourceOp FRINTP_d
constexpr NEON3SameOp NEON_SQRSHL
constexpr NEON2RegMiscOp NEON_CMGE_zero
constexpr NEONAcrossLanesOp NEON_FMINV
constexpr FPDataProcessing1SourceOp FRINTP_s
constexpr FPIntegerConvertOp SCVTF_dx
constexpr FPFixedPointConvertOp SCVTF_sw_fixed
constexpr LoadLiteralOp LDR_s_lit
constexpr NEON3SameOp NEON_FMLA
constexpr NEON3DifferentOp NEON_SUBHN
constexpr NEONShiftImmediateOp NEON_UQRSHRN
constexpr NEON3SameOp NEON_FMAXP
constexpr MoveWideImmediateOp MOVK_x
constexpr DataProcessing2SourceOp UDIV_w
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD1_b_post
constexpr NEONShiftImmediateOp NEON_FCVTZU_imm
constexpr NEONByIndexedElementOp NEON_FMUL_byelement
constexpr NEON2RegMiscOp NEON_FRINTP
constexpr NEON3SameOp NEON_MLS
constexpr NEONScalar3SameOp NEON_FABD_scalar
constexpr NEONByIndexedElementOp NEON_SQDMULL_byelement
constexpr NEONLoadStoreMultiStructOp NEON_ST1_2v
constexpr NEON3SameOp NEON_SABA
constexpr FPIntegerConvertOp FMOV_ws
constexpr NEON3DifferentOp NEON_RADDHN2
constexpr NEONLoadStoreSingleStructOp NEON_ST3_b
constexpr Opcode EOR
constexpr LoadStoreAcquireReleaseOp CASAB
constexpr NEON3DifferentOp NEON_USUBL2
constexpr NEONLoadStoreSingleStructOp NEON_ST2_b
constexpr int kDRegSize
constexpr MoveWideImmediateOp MOVN_w
constexpr LoadStoreAcquireReleaseOp CASL_w
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD2R_post
constexpr FPIntegerConvertOp FCVTNU_xs
constexpr NEON3DifferentOp NEON_SQDMLSL
constexpr NEONScalar3SameOp NEON_CMEQ_scalar
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_ST2_s_post
constexpr NEON3SameOp NEON_SRHADD
constexpr NEON3DifferentOp NEON_SMLSL
constexpr LoadLiteralOp LDR_x_lit
constexpr NEONLoadStoreSingleStructOp NEON_ST4_s
constexpr NEONScalar3SameOp NEON_SUB_scalar
constexpr NEON3DifferentOp NEON_RADDHN
constexpr NEONShiftImmediateOp NEON_SCVTF_imm
constexpr LoadStoreAcquireReleaseOp LDAXR_x
int LaneSizeInBytesFromFormat(VectorFormat vform)
constexpr NEON2RegMiscOp NEON_UQXTN
constexpr bool is_uintn(int64_t x, unsigned n)
Definition utils.h:574
constexpr NEONByIndexedElementOp NEON_MLA_byelement
constexpr NEONTableOp NEON_TBL_4v
constexpr NEONTableOp NEON_TBX_2v
uint32_t LoadStoreOp
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD1_d_post
constexpr NEON3DifferentOp NEON_UABAL2
constexpr DataProcessing2SourceOp RORV_x
constexpr NEONLoadStoreMultiStructPostIndexOp NEON_LD1_2v_post
constexpr ConditionalSelectOp CSNEG_x
constexpr UnconditionalBranchToRegisterOp RET
constexpr NEONScalarByIndexedElementOp NEON_FMLA_byelement_scalar
void MemCopy(void *dest, const void *src, size_t size)
Definition memcopy.h:124
constexpr NEONScalarByIndexedElementOp NEON_SQDMULL_byelement_scalar
constexpr NEON3DifferentOp NEON_ADDHN2
constexpr NEON2RegMiscOp NEON_FRECPE
constexpr CompareBranchOp CBNZ_x
constexpr FPIntegerConvertOp UCVTF_dw
constexpr AtomicMemoryOp SWPAL_x
constexpr NEONScalar2RegMiscOp NEON_NEG_scalar
constexpr FPDataProcessing2SourceOp FMAX_s
constexpr NEONScalar3SameOp NEON_CMHS_scalar
constexpr LoadStoreAcquireReleaseOp CAS_w
constexpr NEON2RegMiscOp NEON_USQADD
constexpr int kNumberOfCalleeSavedRegisters
constexpr NEONShiftImmediateOp NEON_SHL
constexpr FPImmediateOp FMOV_s_imm
constexpr uint32_t kSlotsZapValue
Definition globals.h:1014
constexpr NEONModifiedImmediateOp NEONModifiedImmediate_MVNI
constexpr NEONScalarShiftImmediateOp NEON_UQRSHRN_scalar
constexpr FPIntegerConvertOp FCVTZU_ws
constexpr AtomicMemoryOp SWPH
constexpr LoadStoreAcquireReleaseOp STLXR_h
constexpr int kXRegSize
constexpr NEONScalar3SameOp NEON_FMULX_scalar
constexpr NEONByIndexedElementOp NEON_MLS_byelement
constexpr NEONScalar2RegMiscOp NEON_FCMEQ_zero_scalar
constexpr FPDataProcessing1SourceOp FABS_s
constexpr int64_t kHalfWordMask
constexpr ConditionalSelectOp CSINV_w
constexpr NEONLoadStoreMultiStructPostIndexOp NEON_LD1_4v_post
constexpr NEONScalar2RegMiscOp NEON_FCVTZU_scalar
constexpr NEON3SameOp NEON_UMINP
constexpr MoveWideImmediateOp MOVZ_w
constexpr NEONLoadStoreSingleStructOp NEON_LD4R
constexpr FPDataProcessing2SourceOp FSUB_s
constexpr DataProcessing3SourceOp UMSUBL_x
constexpr NEON2RegMiscOp NEON_FRINTN
constexpr NEON2RegMiscOp NEON_SCVTF
constexpr AtomicMemoryOp SWPLH
constexpr NEONPermOp NEON_UZP2
constexpr FPDataProcessing2SourceOp FMUL_d
constexpr NEONScalarByIndexedElementOp NEON_SQDMULH_byelement_scalar
T * NewArray(size_t size)
Definition allocation.h:43
constexpr NEONShiftImmediateOp NEON_USHR
constexpr NEONLoadStoreMultiStructOp NEON_LD1_1v
constexpr DataProcessing2SourceOp RORV_w
constexpr NEONScalarShiftImmediateOp NEON_SRSHR_scalar
constexpr FPIntegerConvertOp SCVTF_sw
constexpr NEONModifiedImmediateOp NEONModifiedImmediate_MOVI
constexpr NEONScalarByIndexedElementOp NEON_FMLS_byelement_scalar
constexpr FPFixedPointConvertOp UCVTF_sx_fixed
constexpr NEONLoadStoreSingleStructOp NEON_LD3_b
constexpr NEON3DifferentOp NEON_USUBL
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform)
constexpr FPFixedPointConvertOp UCVTF_sw_fixed
constexpr NEON3DifferentOp NEON_RSUBHN2
constexpr LoadStoreAcquireReleaseOp CASALB
constexpr NEON2RegMiscOp NEON_FSQRT
constexpr AtomicMemoryOp SWPAB
constexpr LoadStoreAcquireReleaseOp STLR_b
constexpr NEONScalar2RegMiscOp NEON_FCVTPS_scalar
constexpr AtomicMemoryOp SWPALH
constexpr NEON3DifferentOp NEON_SMULL
constexpr NEON3DifferentOp NEON_UADDW2
V8_WARN_UNUSED_RESULT bool IsValidHeapObject(Heap *heap, Tagged< HeapObject > object)
constexpr ConditionalSelectOp CSNEG_w
constexpr NEONByIndexedElementOp NEON_UMLSL_byelement
constexpr LoadStoreAcquireReleaseOp CASPAL_w
constexpr LoadStoreAcquireReleaseOp CASLB
constexpr FPIntegerConvertOp FMOV_sw
constexpr FPImmediateOp FMOV_d_imm
constexpr DataProcessing2SourceOp ASRV_x
constexpr FPIntegerConvertOp FMOV_xd
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_ST4_h_post
constexpr FPDataProcessing2SourceOp FDIV_s
constexpr NEONScalar2RegMiscOp NEON_SCVTF_scalar
constexpr FPDataProcessing1SourceOp FSQRT_s
constexpr FPDataProcessing1SourceOp FCVT_ds
constexpr NEON3SameOp NEON_EOR
constexpr FPDataProcessing2SourceOp FMINNM_s
constexpr FPDataProcessing1SourceOp FRINTI_d
constexpr NEONLoadStoreMultiStructOp NEON_ST1_1v
constexpr NEONLoadStoreSingleStructOp NEON_LD2R
constexpr PCRelAddressingOp ADR
constexpr FPIntegerConvertOp FCVTNS_ws
constexpr NEON2RegMiscOp NEON_FRINTX
constexpr NEON2RegMiscOp NEON_FCVTNS
constexpr NEONLoadStoreSingleStructOp NEON_LD2_b
constexpr NEONLoadStoreMultiStructPostIndexOp NEON_LD1_3v_post
constexpr NEON3SameOp NEON_FCMGT
constexpr FPDataProcessing1SourceOp FRINTZ_s
constexpr NEON3DifferentOp NEON_SMLAL
constexpr FPIntegerConvertOp FCVTAU_xs
const unsigned kDebugMessageOffset
constexpr FPIntegerConvertOp FCVTZU_wd
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_ST2_h_post
constexpr NEON2RegMiscOp NEON_SQXTN
constexpr NEONLoadStoreSingleStructPostIndexOp NEON_LD2_h_post
constexpr NEON2RegMiscOp NEON_FCVTPU
constexpr NEON2RegMiscOp NEON_CLZ
constexpr NEON2RegMiscOp NEON_FCVTNU
constexpr NEON3DifferentOp NEON_UMULL2
constexpr DataProcessing2SourceOp SDIV_x
constexpr LoadStoreAcquireReleaseOp CASL_x
constexpr NEONByIndexedElementOp NEON_SMULL_byelement
constexpr NEONScalar3SameOp NEON_CMGT_scalar
constexpr FPDataProcessing3SourceOp FMSUB_s
@ None
Definition v8-object.h:141
base::SmallVector< RegisterT, kStaticCapacity > registers_
const uintptr_t stack_limit_
#define kCalleeSavedV
#define kCallerSavedV
Node * prev_
#define NOP(...)
#define V8_DEBUGGING_EXPORT
#define UNREACHABLE()
Definition logging.h:67
#define FATAL(...)
Definition logging.h:47
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define UNIMPLEMENTED()
Definition logging.h:66
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
#define arraysize(array)
Definition macros.h:67
#define V8PRIxPTR_FMT
Definition macros.h:340
std::unique_ptr< ValueMirror > value