v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
wasm-interpreter.cc
Go to the documentation of this file.
1// Copyright 2024 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <atomic>
8#include <limits>
9#include <optional>
10#include <type_traits>
11
12#include "include/v8-metrics.h"
20#include "src/wasm/decoder.h"
27
28namespace v8 {
29namespace internal {
30namespace wasm {
31
32#define EMIT_INSTR_HANDLER(name) EmitFnId(k_##name);
33#define EMIT_INSTR_HANDLER_WITH_PC(name, pc) EmitFnId(k_##name, pc);
34
35#define START_EMIT_INSTR_HANDLER() \
36 { \
37 size_t _current_code_offset = code_.size(); \
38 size_t _current_slots_size = slots_.size(); \
39 DCHECK(!no_nested_emit_instr_handler_guard_); \
40 no_nested_emit_instr_handler_guard_ = true; \
41 stack_.clear_history(); \
42 if (v8_flags.drumbrake_compact_bytecode) { \
43 handler_size_ = InstrHandlerSize::Small; \
44 } else { \
45 DCHECK_EQ(handler_size_, InstrHandlerSize::Large); \
46 } \
47 while (true) { \
48 current_instr_encoding_failed_ = false;
49
50#define START_EMIT_INSTR_HANDLER_WITH_ID(name) \
51 START_EMIT_INSTR_HANDLER() \
52 EMIT_INSTR_HANDLER(name)
53
54#define START_EMIT_INSTR_HANDLER_WITH_PC(name, pc) \
55 START_EMIT_INSTR_HANDLER() \
56 EMIT_INSTR_HANDLER_WITH_PC(name, pc)
57
58#define END_EMIT_INSTR_HANDLER() \
59 if (v8_flags.drumbrake_compact_bytecode && current_instr_encoding_failed_) { \
60 code_.resize(_current_code_offset); \
61 slots_.resize(_current_slots_size); \
62 stack_.rollback(); \
63 current_instr_encoding_failed_ = false; \
64 handler_size_ = InstrHandlerSize::Large; \
65 continue; \
66 } \
67 break; \
68 } \
69 DCHECK(!current_instr_encoding_failed_); \
70 no_nested_emit_instr_handler_guard_ = false; \
71 }
72
74// Memory64 macros
75
76#define EMIT_MEM64_INSTR_HANDLER(name, mem64_name, is_memory64) \
77 if (V8_UNLIKELY(is_memory64)) { \
78 EMIT_INSTR_HANDLER(mem64_name); \
79 } else { \
80 EMIT_INSTR_HANDLER(name); \
81 }
82
83#define EMIT_MEM64_INSTR_HANDLER_WITH_PC(name, mem64_name, is_memory64, pc) \
84 if (V8_UNLIKELY(is_memory64)) { \
85 EMIT_INSTR_HANDLER_WITH_PC(mem64_name, pc); \
86 } else { \
87 EMIT_INSTR_HANDLER_WITH_PC(name, pc); \
88 }
89
91
93 const uint8_t* module_start, Zone* zone)
94 : zone_(zone),
95 isolate_(isolate),
96 module_(module),
97 interpreter_code_(zone),
98 bytecode_generation_time_(),
99 generated_code_size_(0) {
100 if (module == nullptr) return;
101 interpreter_code_.reserve(module->functions.size());
102 for (const WasmFunction& function : module->functions) {
103 if (function.imported) {
104 DCHECK(!function.code.is_set());
105 AddFunction(&function, nullptr, nullptr);
106 } else {
107 AddFunction(&function, module_start + function.code.offset(),
108 module_start + function.code.end_offset());
109 }
110 }
111}
112
113void WasmInterpreter::CodeMap::Preprocess(uint32_t function_index) {
114 InterpreterCode* code = &interpreter_code_[function_index];
115 DCHECK_EQ(code->function->imported, code->start == nullptr);
116 DCHECK(!code->bytecode && code->start);
117
119
120 // Compute the control targets map and the local declarations.
121 BytecodeIterator it(code->start, code->end, &code->locals, zone_);
122
123 WasmBytecodeGenerator bytecode_generator(function_index, code, module_);
124 code->bytecode = bytecode_generator.GenerateBytecode();
125
126 // Generate histogram sample to measure the time spent generating the
127 // bytecode. Reuse the WasmCompileModuleMicroSeconds.wasm that is currently
128 // obsolete.
130 base::TimeDelta duration = base::TimeTicks::Now() - start_time;
131 bytecode_generation_time_ += duration;
132 int bytecode_generation_time_usecs =
133 static_cast<int>(bytecode_generation_time_.InMicroseconds());
134
135 // TODO(paolosev@microsoft.com) Do not add a sample for each function!
136 isolate_->counters()->wasm_compile_wasm_module_time()->AddSample(
137 bytecode_generation_time_usecs);
138 }
139
140 // Generate histogram sample to measure the bytecode size. Reuse the
141 // V8.WasmModuleCodeSizeMiB (see {NativeModule::SampleCodeSize}).
142 int prev_code_size_mb = generated_code_size_ == 0
143 ? -1
144 : static_cast<int>(generated_code_size_ / MB);
145 generated_code_size_.fetch_add(code->bytecode->GetCodeSize());
146 int code_size_mb = static_cast<int>(generated_code_size_ / MB);
147 if (prev_code_size_mb < code_size_mb) {
148 Histogram* histogram = isolate_->counters()->wasm_module_code_size_mb();
149 histogram->AddSample(code_size_mb);
150 }
151}
152
153// static
155 nullptr;
156
158 Isolate* isolate) {
159 const int current_thread_id = ThreadId::Current().ToInteger();
160 {
161 base::MutexGuard guard(&mutex_);
162
163 auto it = map_.find(current_thread_id);
164 if (it == map_.end()) {
165 map_[current_thread_id] =
166 std::make_unique<WasmInterpreterThread>(isolate);
167 it = map_.find(current_thread_id);
168 }
169 return it->second.get();
170 }
171}
172
174 base::MutexGuard guard(&mutex_);
175
176 auto it = map_.begin();
177 while (it != map_.end()) {
178 WasmInterpreterThread* thread = it->second.get();
179 if (thread->GetIsolate() == isolate) {
180 thread->TerminateExecutionTimers();
181 it = map_.erase(it);
182 } else {
183 ++it;
184 }
185 }
186}
187
189 uint32_t catch_block_index,
190 DirectHandle<Object> exception) {
191 if (caught_exceptions_.is_null()) {
192 DCHECK_NOT_NULL(current_function_);
193 uint32_t blocks_count = current_function_->GetBlocksCount();
194 DirectHandle<FixedArray> caught_exceptions =
195 isolate->factory()->NewFixedArrayWithHoles(blocks_count);
196 caught_exceptions_ = isolate->global_handles()->Create(*caught_exceptions);
197 }
198 caught_exceptions_->set(catch_block_index, *exception);
199}
200
202 Isolate* isolate, uint32_t catch_block_index) const {
203 DirectHandle<Object> exception(caught_exceptions_->get(catch_block_index),
204 isolate);
205 DCHECK(!IsTheHole(*exception));
206 return exception;
207}
208
210 if (!caught_exceptions_.is_null()) {
211 isolate->global_handles()->Destroy(caught_exceptions_.location());
212 caught_exceptions_ = Handle<FixedArray>::null();
213 }
214}
215
217 bool track_jitless_wasm)
218 : execute_ratio_histogram_(
219 track_jitless_wasm
220 ? isolate->counters()->wasm_jitless_execution_ratio()
221 : isolate->counters()->wasm_jit_execution_ratio()),
222 slow_wasm_histogram_(
223 track_jitless_wasm
224 ? isolate->counters()->wasm_jitless_execution_too_slow()
225 : isolate->counters()->wasm_jit_execution_too_slow()),
226 window_has_started_(false),
227 next_interval_time_(),
228 start_interval_time_(),
229 window_running_time_(),
230 sample_duration_(base::TimeDelta::FromMilliseconds(std::max(
231 0, v8_flags.wasm_exec_time_histogram_sample_duration.value()))),
232 slow_threshold_(v8_flags.wasm_exec_time_histogram_slow_threshold.value()),
233 slow_threshold_samples_count_(std::max(
234 1, v8_flags.wasm_exec_time_slow_threshold_samples_count.value())),
235 isolate_(isolate) {
236 int cooldown_interval_in_msec = std::max(
237 0, v8_flags.wasm_exec_time_histogram_sample_period.value() -
238 v8_flags.wasm_exec_time_histogram_sample_duration.value());
240 base::TimeDelta::FromMilliseconds(cooldown_interval_in_msec);
241}
242
251
260
261void WasmExecutionTimer::AddSample(int running_ratio) {
262 DCHECK(v8_flags.wasm_enable_exec_time_histograms && v8_flags.slow_histograms);
263
264 execute_ratio_histogram_->AddSample(running_ratio);
265
266 // Emit a Jit[less]WasmExecutionTooSlow sample if the average of the last
267 // {v8_flags.wasm_exec_time_slow_threshold_samples_count} samples is above
268 // {v8_flags.wasm_exec_time_histogram_slow_threshold}.
269 samples_.push_back(running_ratio);
271 int sum = 0;
272 for (int sample : samples_) sum += sample;
273 int average = sum / slow_threshold_samples_count_;
274 if (average >= slow_threshold_) {
276
277 if (isolate_ && !isolate_->context().is_null()) {
278 // Skip this event because not(yet) supported by Chromium.
279
280 // HandleScope scope(isolate_);
281 // v8::metrics::WasmInterpreterSlowExecution event;
282 // event.slow_execution = true;
283 // event.jitless = v8_flags.wasm_jitless;
284 // event.cpu_percentage = average;
285 // v8::metrics::Recorder::ContextId context_id =
286 // isolate_->GetOrRegisterRecorderContextId(
287 // isolate_->native_context());
288 // isolate_->metrics_recorder()->DelayMainThreadEvent(event,
289 // context_id);
290 }
291 }
292
293 samples_.clear();
294 }
295}
296
298 DCHECK(v8_flags.wasm_enable_exec_time_histograms && v8_flags.slow_histograms);
300
304 EndInterval();
305 } else {
307 }
308 } else {
309 if (now >= next_interval_time_) {
310 BeginInterval(true);
311 } else {
312 // Ignore this start event.
313 }
314 }
315}
316
318 DCHECK(v8_flags.wasm_enable_exec_time_histograms && v8_flags.slow_histograms);
319
324 window_running_time_ += elapsed;
327 EndInterval();
328 }
329 } else {
330 if (now >= next_interval_time_) {
331 BeginInterval(false);
332 } else {
333 // Ignore this stop event.
334 }
335 }
336}
337
348
349namespace {
350void NopFinalizer(const v8::WeakCallbackInfo<void>& data) {
351 Address* global_handle_location =
352 reinterpret_cast<Address*>(data.GetParameter());
353 GlobalHandles::Destroy(global_handle_location);
354}
355
358 Handle<WasmInstanceObject> weak_instance =
359 isolate->global_handles()->Create<WasmInstanceObject>(*instance_object);
360 Address* global_handle_location = weak_instance.location();
361 GlobalHandles::MakeWeak(global_handle_location, global_handle_location,
362 &NopFinalizer, v8::WeakCallbackType::kParameter);
363 return weak_instance;
364}
365
366std::optional<wasm::ValueType> GetWasmReturnTypeFromSignature(
367 const FunctionSig* wasm_signature) {
368 if (wasm_signature->return_count() == 0) return {};
369
370 DCHECK_EQ(wasm_signature->return_count(), 1);
371 return wasm_signature->GetReturn(0);
372}
373
374} // namespace
375
376// Build the interpreter call stack for the current activation. For each stack
377// frame we need to calculate the Wasm function index and the original Wasm
378// bytecode location, calculated from the current WasmBytecode offset.
379std::vector<WasmInterpreterStackEntry>
381 const TrapStatus* trap_status) const {
382 std::vector<WasmInterpreterStackEntry> stack_trace;
383 const FrameState* frame_state = &current_frame_state_;
384 DCHECK_NOT_NULL(frame_state);
385
386 if (trap_status) {
387 stack_trace.push_back(WasmInterpreterStackEntry{
388 trap_status->trap_function_index, trap_status->trap_pc});
389 } else {
390 if (frame_state->current_function_) {
391 stack_trace.push_back(WasmInterpreterStackEntry{
392 frame_state->current_function_->GetFunctionIndex(),
393 frame_state->current_bytecode_
394 ? static_cast<int>(
395 frame_state->current_function_->GetPcFromTrapCode(
396 frame_state->current_bytecode_))
397 : 0});
398 }
399 }
400
401 frame_state = frame_state->previous_frame_;
402 while (frame_state && frame_state->current_function_) {
403 stack_trace.insert(
404 stack_trace.begin(),
406 frame_state->current_function_->GetFunctionIndex(),
407 frame_state->current_bytecode_
408 ? static_cast<int>(
409 frame_state->current_function_->GetPcFromTrapCode(
410 frame_state->current_bytecode_))
411 : 0});
412 frame_state = frame_state->previous_frame_;
413 }
414
415 // It is possible to have a WasmInterpreterEntryFrame without having a Wasm
416 // current_function_. This can happen if we call from JS a JS function
417 // imported and exported by Wasm. In this case let's add a dummy stack entry.
418 if (stack_trace.empty()) {
419 stack_trace.push_back(WasmInterpreterStackEntry{0, 0});
420 }
421
422 return stack_trace;
423}
424
426 std::vector<int> function_indexes;
427 const FrameState* frame_state = &current_frame_state_;
428 // TODO(paolosev@microsoft.com) - Too slow?
429 while (frame_state->current_function_) {
430 function_indexes.push_back(
431 frame_state->current_function_->GetFunctionIndex());
432 frame_state = frame_state->previous_frame_;
433 }
434
435 if (static_cast<size_t>(index) < function_indexes.size()) {
436 return function_indexes[function_indexes.size() - index - 1];
437 }
438 return -1;
439}
440
442 : isolate_(isolate),
444 trap_reason_(TrapReason::kTrapUnreachable),
446 stack_mem_(nullptr),
447 reference_stack_(isolate_->global_handles()->Create(
448 ReadOnlyRoots(isolate_).empty_fixed_array())),
450 execution_timer_(isolate, true) {
451 PageAllocator* page_allocator = GetPlatformPageAllocator();
452 stack_mem_ = AllocatePages(page_allocator, nullptr, kMaxStackSize,
453 page_allocator->AllocatePageSize(),
455 if (!stack_mem_ ||
459 "WasmInterpreterThread::WasmInterpreterThread",
460 "Cannot allocate Wasm interpreter stack");
461 UNREACHABLE();
462 }
463}
464
469
471 if (V8_LIKELY(current_ref_stack_size_ >= new_size)) return;
472 size_t requested_size = base::bits::RoundUpToPowerOfTwo64(new_size);
473 new_size = std::max(size_t{8},
474 std::max(2 * current_ref_stack_size_, requested_size));
475 int grow_by = static_cast<int>(new_size - current_ref_stack_size_);
476 HandleScope handle_scope(isolate_); // Avoid leaking handles.
477 DirectHandle<FixedArray> new_ref_stack =
479 new_ref_stack->FillWithHoles(static_cast<int>(current_ref_stack_size_),
480 static_cast<int>(new_size));
482 reference_stack_ = isolate_->global_handles()->Create(*new_ref_stack);
483 current_ref_stack_size_ = new_size;
484}
485
487 reference_stack_->FillWithHoles(static_cast<int>(index),
488 static_cast<int>(index + count));
489}
490
492 MessageTemplate message) {
494 if (!isolate->has_exception()) {
495 ClearThreadInWasmScope wasm_flag(isolate);
496 DirectHandle<JSObject> error_obj =
497 isolate->factory()->NewWasmRuntimeError(message);
498 JSObject::AddProperty(isolate, error_obj,
499 isolate->factory()->wasm_uncatchable_symbol(),
500 isolate->factory()->true_value(), NONE);
501 isolate->Throw(*error_obj);
502 }
503}
504
505// static
511
512// static
514 WasmInterpreterThread* current_thread = GetCurrentInterpreterThread(isolate);
515 // TODO(paolosev@microsoft.com): store in new data member?
516 return current_thread->trap_reason_;
517}
518
520 if (v8_flags.wasm_enable_exec_time_histograms && v8_flags.slow_histograms) {
522 }
523}
524
526 if (v8_flags.wasm_enable_exec_time_histograms && v8_flags.slow_histograms) {
528 }
529}
530
532 if (v8_flags.wasm_enable_exec_time_histograms && v8_flags.slow_histograms) {
534 }
535}
536
537#if !defined(V8_DRUMBRAKE_BOUNDS_CHECKS)
538
540#define ITEM_ENUM_DEFINE(name) name##counter,
542#undef ITEM_ENUM_DEFINE
545
546V8_DECLARE_ONCE(init_instruction_table_once);
547V8_DECLARE_ONCE(init_trap_handlers_once);
548
549// A subset of the Wasm instruction handlers is implemented as ASM builtins, and
550// not with normal C++ functions. This is done only for LoadMem and StoreMem
551// builtins, which can trap for out of bounds accesses.
552// V8 already implements out of bounds trap handling for compiled Wasm code and
553// allocates two large guard pages before and after each Wasm memory region to
554// detect out of bounds memory accesses. Once an access violation exception
555// arises, the V8 exception filter intercepts the exception and checks whether
556// it originates from Wasm code.
557// The Wasm interpreter reuses the same logic, and
558// WasmInterpreter::HandleWasmTrap is called by the SEH exception handler to
559// check whether the access violation was caused by an interpreter instruction
560// handler. It is necessary that these handlers are Wasm builtins for two
561// reasons:
562// 1. We want to know precisely the start and end address of each handler to
563// verify if the AV happened inside one of the Load/Store builtins and can be
564// handled with a Wasm trap.
565// 2. If the exception is handled, we interrupt the execution of
566// TrapMemOutOfBounds, which sets the TRAPPED state and breaks the execution of
567// the chain of instruction handlers with a x64 'ret'. This only works if there
568// is no stack cleanup to do in the handler that caused the failure (no
569// registers to pop from the stack before the 'ret'). Therefore we cannot rely
570// on the compiler, we can only make sure that this is the case if we implement
571// the handlers in assembly.
572
573// Forward declaration
575 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
576 int64_t r0, double fp0);
577
580
581 ClearThreadInWasmScope wasm_flag(isolate);
582
583 // Overwrites the instruction handlers that access memory and can cause an
584 // out-of-bounds trap with builtin versions that don't have explicit bounds
585 // check but rely on a trap handler to intercept the access violation and
586 // transform it into a trap.
587 EmbeddedData embedded_data = EmbeddedData::FromBlob();
588#define V(name) \
589 if (v8_flags.drumbrake_compact_bytecode) { \
590 trap_handler::RegisterHandlerData( \
591 reinterpret_cast<Address>(kInstructionTable[k_##name]), \
592 embedded_data.InstructionSizeOf(Builtin::k##name##_s), 0, nullptr); \
593 } \
594 trap_handler::RegisterHandlerData( \
595 reinterpret_cast<Address>( \
596 kInstructionTable[k_##name + kInstructionCount]), \
597 embedded_data.InstructionSizeOf(Builtin::k##name##_l), 0, nullptr);
599#undef V
600}
601
603 size_t index = 0;
604#define V(name) \
605 if (v8_flags.drumbrake_compact_bytecode) { \
606 kInstructionTable[index] = reinterpret_cast<PWasmOp*>( \
607 isolate->builtins()->code(Builtin::k##name##_s)->instruction_start()); \
608 } \
609 kInstructionTable[kInstructionCount + index++] = reinterpret_cast<PWasmOp*>( \
610 isolate->builtins()->code(Builtin::k##name##_l)->instruction_start());
611
612#ifdef __clang__
613#pragma clang diagnostic push
614#pragma clang diagnostic ignored "-Wcast-calling-convention"
615#endif // __clang__
617#ifdef __clang__
618#pragma clang diagnostic pop
619#endif // __clang__
620#undef V
621
622#ifdef V8_ENABLE_DRUMBRAKE_TRACING
623 if (v8_flags.trace_drumbrake_compact_bytecode) {
624 index = 0;
625#define DEFINE_INSTR_HANDLER(name) kInstructionHandlerNames[index++] = #name;
627 FOREACH_TRACE_INSTR_HANDLER(DEFINE_INSTR_HANDLER)
628#undef DEFINE_INSTR_HANDLER
629 }
630#endif // V8_ENABLE_DRUMBRAKE_TRACING
631}
632#endif // !V8_DRUMBRAKE_BOUNDS_CHECKS
633
635 Isolate* isolate, const WasmModule* module,
636 const ModuleWireBytes& wire_bytes,
638 : zone_(isolate->allocator(), ZONE_NAME),
639 instance_object_(MakeWeak(isolate, instance_object)),
640 module_bytes_(wire_bytes.start(), wire_bytes.end(), &zone_),
641 codemap_(isolate, module, module_bytes_.data(), &zone_) {
642 wasm_runtime_ = std::make_shared<WasmInterpreterRuntime>(
643 module, isolate, instance_object_, &codemap_);
644 module->SetWasmInterpreter(wasm_runtime_);
645
646#if !defined(V8_DRUMBRAKE_BOUNDS_CHECKS)
647 // TODO(paolosev@microsoft.com) - For modules that have 64-bit Wasm memory we
648 // need to use explicit bound checks; memory guard pages only work with 32-bit
649 // memories. This could be implemented by allocating a different dispatch
650 // table for each instance (probably in the WasmInterpreterRuntime object) and
651 // patching the entries of Load/Store instructions with bultin handlers only
652 // for instances related to modules that have 32-bit memories. 64-bit memories
653 // are not supported yet by DrumBrake.
654 base::CallOnce(&init_instruction_table_once, &InitInstructionTableOnce,
655 isolate);
656 base::CallOnce(&init_trap_handlers_once, &InitTrapHandlersOnce, isolate);
657
659#endif // !V8_DRUMBRAKE_BOUNDS_CHECKS
660}
661
663 WasmInterpreterThread* thread, bool called_from_js) {
664 wasm_runtime_->ContinueExecution(thread, called_from_js);
665 return thread->state();
666}
667
669//
670// DrumBrake: implementation of an interpreter for WebAssembly.
671//
673
674constexpr uint32_t kFloat32SignBitMask = uint32_t{1} << 31;
675constexpr uint64_t kFloat64SignBitMask = uint64_t{1} << 63;
676
677#ifdef DRUMBRAKE_ENABLE_PROFILING
678
679static const char* prev_op_name_s = nullptr;
680static std::map<std::pair<const char*, const char*>, uint64_t>*
681 ops_pairs_count_s = nullptr;
682static std::map<const char*, uint64_t>* ops_count_s = nullptr;
683static void ProfileOp(const char* op_name) {
684 if (!ops_pairs_count_s) {
685 ops_pairs_count_s =
686 new std::map<std::pair<const char*, const char*>, uint64_t>();
687 ops_count_s = new std::map<const char*, uint64_t>();
688 }
689 if (prev_op_name_s) {
690 (*ops_pairs_count_s)[{prev_op_name_s, op_name}]++;
691 }
692 (*ops_count_s)[op_name]++;
693 prev_op_name_s = op_name;
694}
695
696template <typename A, typename B>
697std::pair<B, A> flip_pair(const std::pair<A, B>& p) {
698 return std::pair<B, A>(p.second, p.first);
699}
700template <typename A, typename B>
701std::multimap<B, A> flip_map(const std::map<A, B>& src) {
702 std::multimap<B, A> dst;
703 std::transform(src.begin(), src.end(), std::inserter(dst, dst.begin()),
704 flip_pair<A, B>);
705 return dst;
706}
707
708static void PrintOpsCount() {
709 std::multimap<uint64_t, const char*> count_ops_map = flip_map(*ops_count_s);
710 uint64_t total_count = 0;
711 for (auto& pair : count_ops_map) {
712 printf("%10lld, %s\n", pair.first, pair.second);
713 total_count += pair.first;
714 }
715 printf("Total count: %10lld\n\n", total_count);
716
717 std::multimap<uint64_t, std::pair<const char*, const char*>>
718 count_pairs_ops_map = flip_map(*ops_pairs_count_s);
719 for (auto& pair : count_pairs_ops_map) {
720 printf("%10lld, %s -> %s\n", pair.first, pair.second.first,
721 pair.second.second);
722 }
723}
724
725static void PrintAndClearProfilingData() {
726 PrintOpsCount();
727 delete ops_count_s;
728 ops_count_s = nullptr;
729 delete ops_pairs_count_s;
730 ops_pairs_count_s = nullptr;
731}
732
733#define NextOp() \
734 ProfileOp(__FUNCTION__); \
735 MUSTTAIL return kInstructionTable[ReadFnId(code) & kInstructionTableMask]( \
736 code, sp, wasm_runtime, r0, fp0)
737
738#else // DRUMBRAKE_ENABLE_PROFILING
739
740#define NextOp() \
741 MUSTTAIL return kInstructionTable[ReadFnId(code) & kInstructionTableMask]( \
742 code, sp, wasm_runtime, r0, fp0)
743
744#endif // DRUMBRAKE_ENABLE_PROFILING
745
746static int StructFieldOffset(const StructType* struct_type, int field_index) {
747 return wasm::ObjectAccess::ToTagged(WasmStruct::kHeaderSize +
748 struct_type->field_offset(field_index));
749}
750
751InstructionHandler s_unwind_code = InstructionHandler::k_s2s_Unwind;
752
754 public:
755 INSTRUCTION_HANDLER_FUNC s2s_Unreachable(const uint8_t* code, uint32_t* sp,
757 int64_t r0, double fp0) {
759 TrapReason::kTrapUnreachable, fp0);
760 }
761
763 s2s_Unwind(const uint8_t* code, uint32_t* sp,
764 WasmInterpreterRuntime* wasm_runtime, int64_t r0, double fp0) {
765 // Break the chain of calls.
766 }
767
768 INSTRUCTION_HANDLER_FUNC Trap(const uint8_t* code, uint32_t* sp,
770 int64_t r0, double fp0) {
771 TrapReason trap_reason = static_cast<TrapReason>(r0);
772 wasm_runtime->SetTrap(trap_reason, code);
773 MUSTTAIL return s_unwind_func_addr(code, sp, wasm_runtime, trap_reason, .0);
774 }
775
776 static constexpr PWasmOp* s_unwind_func_addr = HandlersBase::s2s_Unwind;
777};
778
779#define TRAP(trap_reason) \
780 MUSTTAIL return HandlersBase::Trap(code, sp, wasm_runtime, trap_reason, fp0);
781
782#define INLINED_TRAP(trap_reason) \
783 wasm_runtime->SetTrap(trap_reason, code); \
784 MUSTTAIL return s_unwind_func_addr(code, sp, wasm_runtime, trap_reason, .0);
785
786template <bool Compressed>
787class Handlers : public HandlersBase {
789 using slot_offset_t = traits::slot_offset_t;
790 using memory_offset32_t = traits::memory_offset32_t;
791 using memory_offset64_t = traits::memory_offset64_t;
792
793 public:
794 template <typename T>
795 static inline T Read(const uint8_t*& code) {
796 T res = base::ReadUnalignedValue<T>(reinterpret_cast<Address>(code));
797 code += sizeof(T);
798 return res;
799 }
800 template <>
803 reinterpret_cast<Address>(code));
804#if V8_ENABLE_DRUMBRAKE_TRACING
805 if (v8_flags.trace_drumbrake_compact_bytecode) {
806 printf("Read slot_offset_t %d\n", res);
807 }
808#endif // V8_ENABLE_DRUMBRAKE_TRACING
809 code += sizeof(slot_offset_t);
810 return res;
811 }
812
813 // Returns the maximum of the two parameters according to JavaScript
814 // semantics.
815 template <typename T>
816 static inline T JSMax(T x, T y) {
817 if (std::isnan(x) || std::isnan(y)) {
818 return std::numeric_limits<T>::quiet_NaN();
819 }
820 if (std::signbit(x) < std::signbit(y)) return x;
821 return x > y ? x : y;
822 }
823
824 // Returns the minimum of the two parameters according to JavaScript
825 // semantics.
826 template <typename T>
827 static inline T JSMin(T x, T y) {
828 if (std::isnan(x) || std::isnan(y)) {
829 return std::numeric_limits<T>::quiet_NaN();
830 }
831 if (std::signbit(x) < std::signbit(y)) return y;
832 return x > y ? y : x;
833 }
834
835 static inline uint8_t* ReadMemoryAddress(uint8_t*& code) {
836 Address res =
837 base::ReadUnalignedValue<Address>(reinterpret_cast<Address>(code));
838 code += sizeof(Address);
839 return reinterpret_cast<uint8_t*>(res);
840 }
841
842 static inline uint32_t ReadGlobalIndex(const uint8_t*& code) {
843 uint32_t res =
844 base::ReadUnalignedValue<uint32_t>(reinterpret_cast<Address>(code));
845 code += sizeof(uint32_t);
846 return res;
847 }
848
849 template <typename T>
850 static inline void push(uint32_t*& sp, const uint8_t*& code,
853 base::WriteUnalignedValue<T>(reinterpret_cast<Address>(sp + offset), val);
854#ifdef V8_ENABLE_DRUMBRAKE_TRACING
855 if (v8_flags.trace_drumbrake_execution)
856 wasm_runtime->TracePush<T>(offset * kSlotSize);
857#endif // V8_ENABLE_DRUMBRAKE_TRACING
858 }
859
860 template <>
861 inline void push(uint32_t*& sp, const uint8_t*& code,
864 uint32_t ref_stack_index = Read<int32_t>(code);
867 wasm_runtime->StoreWasmRef(ref_stack_index, ref);
868#ifdef V8_ENABLE_DRUMBRAKE_TRACING
869 if (v8_flags.trace_drumbrake_execution)
870 wasm_runtime->TracePush<WasmRef>(offset * kSlotSize);
871#endif // V8_ENABLE_DRUMBRAKE_TRACING
872 }
873
874 template <typename T>
875 static inline T pop(uint32_t*& sp, const uint8_t*& code,
878#ifdef V8_ENABLE_DRUMBRAKE_TRACING
879 if (v8_flags.trace_drumbrake_execution) wasm_runtime->TracePop();
880#endif // V8_ENABLE_DRUMBRAKE_TRACING
881 return base::ReadUnalignedValue<T>(reinterpret_cast<Address>(sp + offset));
882 }
883
884 template <>
885 inline WasmRef pop(uint32_t*& sp, const uint8_t*& code,
887 uint32_t ref_stack_index = Read<int32_t>(code);
888#ifdef V8_ENABLE_DRUMBRAKE_TRACING
889 if (v8_flags.trace_drumbrake_execution) wasm_runtime->TracePop();
890#endif // V8_ENABLE_DRUMBRAKE_TRACING
891 return wasm_runtime->ExtractWasmRef(ref_stack_index);
892 }
893
894 template <typename T>
895 static inline T ExecuteRemS(T lval, T rval) {
896 if (rval == -1) return 0;
897 return lval % rval;
898 }
899
900 template <typename T>
901 static inline T ExecuteRemU(T lval, T rval) {
902 return lval % rval;
903 }
904
906 // GlobalGet
907
908 template <typename IntT>
911 int64_t r0, double fp0) {
912 uint32_t index = ReadGlobalIndex(code);
913 uint8_t* src_addr = wasm_runtime->GetGlobalAddress(index);
914 r0 = base::ReadUnalignedValue<IntT>(reinterpret_cast<Address>(src_addr));
915
916 NextOp();
917 }
920
921 template <typename FloatT>
924 int64_t r0, double fp0) {
925 uint32_t index = ReadGlobalIndex(code);
926 uint8_t* src_addr = wasm_runtime->GetGlobalAddress(index);
927 fp0 = base::ReadUnalignedValue<FloatT>(reinterpret_cast<Address>(src_addr));
928
929 NextOp();
930 }
933
934 template <typename T>
937 int64_t r0, double fp0) {
938 uint32_t index = ReadGlobalIndex(code);
939 uint8_t* src_addr = wasm_runtime->GetGlobalAddress(index);
940 push<T>(sp, code, wasm_runtime,
941 base::ReadUnalignedValue<T>(reinterpret_cast<Address>(src_addr)));
942
943 NextOp();
944 }
947 static auto constexpr s2s_F32GlobalGet = s2s_GlobalGet<float>;
950
952 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
953 int64_t r0, double fp0) {
954 uint32_t index = ReadGlobalIndex(code);
955 push<WasmRef>(sp, code, wasm_runtime, wasm_runtime->GetGlobalRef(index));
956
957 NextOp();
958 }
959
961 // GlobalSet
962
963 template <typename IntT>
966 int64_t r0, double fp0) {
967 uint32_t index = ReadGlobalIndex(code);
968 uint8_t* dst_addr = wasm_runtime->GetGlobalAddress(index);
969 base::WriteUnalignedValue<IntT>(reinterpret_cast<Address>(dst_addr),
970 static_cast<IntT>(r0)); // r0: value
971 NextOp();
972 }
975
976 template <typename FloatT>
979 int64_t r0, double fp0) {
980 uint32_t index = ReadGlobalIndex(code);
981 uint8_t* dst_addr = wasm_runtime->GetGlobalAddress(index);
982 base::WriteUnalignedValue<FloatT>(reinterpret_cast<Address>(dst_addr),
983 static_cast<FloatT>(fp0)); // fp0: value
984 NextOp();
985 }
988
989 template <typename T>
992 int64_t r0, double fp0) {
993 uint32_t index = ReadGlobalIndex(code);
994 uint8_t* dst_addr = wasm_runtime->GetGlobalAddress(index);
995 base::WriteUnalignedValue<T>(reinterpret_cast<Address>(dst_addr),
996 pop<T>(sp, code, wasm_runtime));
997 NextOp();
998 }
1004
1006 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
1007 int64_t r0, double fp0) {
1008 uint32_t index = ReadGlobalIndex(code);
1009 wasm_runtime->SetGlobalRef(index, pop<WasmRef>(sp, code, wasm_runtime));
1010
1011 NextOp();
1012 }
1013
1015 // Drop
1016
1017 template <typename T>
1020 int64_t r0, double fp0) {
1021 NextOp();
1022 }
1023 static auto constexpr r2s_I32Drop = r2s_Drop<int32_t>;
1024 static auto constexpr r2s_I64Drop = r2s_Drop<int64_t>;
1025 static auto constexpr r2s_F32Drop = r2s_Drop<float>;
1026 static auto constexpr r2s_F64Drop = r2s_Drop<double>;
1027
1030 int64_t r0, double fp0) {
1031 UNREACHABLE();
1032 }
1033
1034 template <typename T>
1037 int64_t r0, double fp0) {
1038 pop<T>(sp, code, wasm_runtime);
1039
1040 NextOp();
1041 }
1042 static auto constexpr s2s_I32Drop = s2s_Drop<int32_t>;
1043 static auto constexpr s2s_I64Drop = s2s_Drop<int64_t>;
1044 static auto constexpr s2s_F32Drop = s2s_Drop<float>;
1045 static auto constexpr s2s_F64Drop = s2s_Drop<double>;
1046 static auto constexpr s2s_S128Drop = s2s_Drop<Simd128>;
1047
1050 int64_t r0, double fp0) {
1052
1053 NextOp();
1054 }
1055
1057 // LoadMem
1058
1059 template <typename IntT, typename IntU = IntT, typename MemIdx = uint32_t,
1060 typename MemOffsetT = memory_offset32_t>
1063 int64_t r0, double fp0) {
1064 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
1065 uint64_t offset = Read<MemOffsetT>(code);
1066 MemIdx index = static_cast<MemIdx>(r0);
1067 uint64_t effective_index = offset + index;
1068
1069 if (V8_UNLIKELY(
1070 effective_index < index ||
1071 !base::IsInBounds<uint64_t>(effective_index, sizeof(IntU),
1072 wasm_runtime->GetMemorySize()))) {
1073 TRAP(TrapReason::kTrapMemOutOfBounds)
1074 }
1075
1076 uint8_t* address = memory_start + effective_index;
1077
1078 IntU value =
1079 base::ReadUnalignedValue<IntU>(reinterpret_cast<Address>(address));
1080 r0 = static_cast<IntT>(value);
1081
1082 NextOp();
1083 }
1084 static auto constexpr r2r_I32LoadMem8S_Idx64 =
1086 static auto constexpr r2r_I32LoadMem8U_Idx64 =
1088 static auto constexpr r2r_I32LoadMem16S_Idx64 =
1090 static auto constexpr r2r_I32LoadMem16U_Idx64 =
1092 static auto constexpr r2r_I64LoadMem8S_Idx64 =
1094 static auto constexpr r2r_I64LoadMem8U_Idx64 =
1096 static auto constexpr r2r_I64LoadMem16S_Idx64 =
1098 static auto constexpr r2r_I64LoadMem16U_Idx64 =
1100 static auto constexpr r2r_I64LoadMem32S_Idx64 =
1102 static auto constexpr r2r_I64LoadMem32U_Idx64 =
1104 static auto constexpr r2r_I32LoadMem_Idx64 =
1106 static auto constexpr r2r_I64LoadMem_Idx64 =
1108
1109 template <typename FloatT, typename MemIdx = uint32_t,
1110 typename MemOffsetT = memory_offset32_t>
1113 int64_t r0, double fp0) {
1114 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
1115 MemOffsetT offset = Read<MemOffsetT>(code);
1116 MemIdx index = static_cast<MemIdx>(r0);
1117 uint64_t effective_index = offset + index;
1118
1119 if (V8_UNLIKELY(
1120 effective_index < index ||
1121 !base::IsInBounds<uint64_t>(effective_index, sizeof(FloatT),
1122 wasm_runtime->GetMemorySize()))) {
1123 TRAP(TrapReason::kTrapMemOutOfBounds)
1124 }
1125
1126 uint8_t* address = memory_start + effective_index;
1127
1128 fp0 = base::ReadUnalignedValue<FloatT>(reinterpret_cast<Address>(address));
1129
1130 NextOp();
1131 }
1132 static auto constexpr r2r_F32LoadMem_Idx64 =
1134 static auto constexpr r2r_F64LoadMem_Idx64 =
1136
1137 template <typename T, typename U = T, typename MemIdx = uint32_t,
1138 typename MemOffsetT = memory_offset32_t>
1141 int64_t r0, double fp0) {
1142 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
1143 MemOffsetT offset = Read<MemOffsetT>(code);
1144 MemIdx index = static_cast<MemIdx>(r0);
1145 uint64_t effective_index = offset + index;
1146
1147 if (V8_UNLIKELY(
1148 effective_index < index ||
1149 !base::IsInBounds<uint64_t>(effective_index, sizeof(U),
1150 wasm_runtime->GetMemorySize()))) {
1151 TRAP(TrapReason::kTrapMemOutOfBounds)
1152 }
1153
1154 uint8_t* address = memory_start + effective_index;
1155
1156 U value = base::ReadUnalignedValue<U>(reinterpret_cast<Address>(address));
1157
1158 push<T>(sp, code, wasm_runtime, value);
1159
1160 NextOp();
1161 }
1162 static auto constexpr r2s_I32LoadMem8S_Idx64 =
1164 static auto constexpr r2s_I32LoadMem8U_Idx64 =
1166 static auto constexpr r2s_I32LoadMem16S_Idx64 =
1168 static auto constexpr r2s_I32LoadMem16U_Idx64 =
1170 static auto constexpr r2s_I64LoadMem8S_Idx64 =
1172 static auto constexpr r2s_I64LoadMem8U_Idx64 =
1174 static auto constexpr r2s_I64LoadMem16S_Idx64 =
1176 static auto constexpr r2s_I64LoadMem16U_Idx64 =
1178 static auto constexpr r2s_I64LoadMem32S_Idx64 =
1180 static auto constexpr r2s_I64LoadMem32U_Idx64 =
1182 static auto constexpr r2s_I32LoadMem_Idx64 =
1184 static auto constexpr r2s_I64LoadMem_Idx64 =
1186 static auto constexpr r2s_F32LoadMem_Idx64 =
1188 static auto constexpr r2s_F64LoadMem_Idx64 =
1190
1191 template <typename IntT, typename IntU = IntT, typename MemIdx = uint32_t,
1192 typename MemOffsetT = memory_offset32_t>
1195 int64_t r0, double fp0) {
1196 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
1197 MemOffsetT offset = Read<MemOffsetT>(code);
1198 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
1199 uint64_t effective_index = offset + index;
1200
1201 if (V8_UNLIKELY(
1202 effective_index < index ||
1203 !base::IsInBounds<uint64_t>(effective_index, sizeof(IntU),
1204 wasm_runtime->GetMemorySize()))) {
1205 TRAP(TrapReason::kTrapMemOutOfBounds)
1206 }
1207
1208 uint8_t* address = memory_start + effective_index;
1209
1210 r0 = static_cast<IntT>(
1211 base::ReadUnalignedValue<IntU>(reinterpret_cast<Address>(address)));
1212
1213 NextOp();
1214 }
1215 static auto constexpr s2r_I32LoadMem8S_Idx64 =
1217 static auto constexpr s2r_I32LoadMem8U_Idx64 =
1219 static auto constexpr s2r_I32LoadMem16S_Idx64 =
1221 static auto constexpr s2r_I32LoadMem16U_Idx64 =
1223 static auto constexpr s2r_I64LoadMem8S_Idx64 =
1225 static auto constexpr s2r_I64LoadMem8U_Idx64 =
1227 static auto constexpr s2r_I64LoadMem16S_Idx64 =
1229 static auto constexpr s2r_I64LoadMem16U_Idx64 =
1231 static auto constexpr s2r_I64LoadMem32S_Idx64 =
1233 static auto constexpr s2r_I64LoadMem32U_Idx64 =
1235 static auto constexpr s2r_I32LoadMem_Idx64 =
1237 static auto constexpr s2r_I64LoadMem_Idx64 =
1239
1240 template <typename FloatT, typename MemIdx = uint32_t,
1241 typename MemOffsetT = memory_offset32_t>
1244 int64_t r0, double fp0) {
1245 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
1246 MemOffsetT offset = Read<MemOffsetT>(code);
1247 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
1248 uint64_t effective_index = offset + index;
1249
1250 if (V8_UNLIKELY(
1251 effective_index < index ||
1252 !base::IsInBounds<uint64_t>(effective_index, sizeof(FloatT),
1253 wasm_runtime->GetMemorySize()))) {
1254 TRAP(TrapReason::kTrapMemOutOfBounds)
1255 }
1256
1257 uint8_t* address = memory_start + effective_index;
1258
1259 fp0 = static_cast<FloatT>(
1260 base::ReadUnalignedValue<FloatT>(reinterpret_cast<Address>(address)));
1261
1262 NextOp();
1263 }
1264 static auto constexpr s2r_F32LoadMem_Idx64 =
1266 static auto constexpr s2r_F64LoadMem_Idx64 =
1268
1269 template <typename T, typename U = T, typename MemIdx = uint32_t,
1270 typename MemOffsetT = memory_offset32_t>
1273 int64_t r0, double fp0) {
1274 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
1275 MemOffsetT offset = Read<MemOffsetT>(code);
1276 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
1277 uint64_t effective_index = offset + index;
1278
1279 if (V8_UNLIKELY(
1280 effective_index < index ||
1281 !base::IsInBounds<uint64_t>(effective_index, sizeof(U),
1282 wasm_runtime->GetMemorySize()))) {
1283 TRAP(TrapReason::kTrapMemOutOfBounds)
1284 }
1285
1286 uint8_t* address = memory_start + effective_index;
1287
1288 U value = base::ReadUnalignedValue<U>(reinterpret_cast<Address>(address));
1289
1290 push<T>(sp, code, wasm_runtime, value);
1291
1292 NextOp();
1293 }
1294 static auto constexpr s2s_I32LoadMem8S_Idx64 =
1296 static auto constexpr s2s_I32LoadMem8U_Idx64 =
1298 static auto constexpr s2s_I32LoadMem16S_Idx64 =
1300 static auto constexpr s2s_I32LoadMem16U_Idx64 =
1302 static auto constexpr s2s_I64LoadMem8S_Idx64 =
1304 static auto constexpr s2s_I64LoadMem8U_Idx64 =
1306 static auto constexpr s2s_I64LoadMem16S_Idx64 =
1308 static auto constexpr s2s_I64LoadMem16U_Idx64 =
1310 static auto constexpr s2s_I64LoadMem32S_Idx64 =
1312 static auto constexpr s2s_I64LoadMem32U_Idx64 =
1314 static auto constexpr s2s_I32LoadMem_Idx64 =
1316 static auto constexpr s2s_I64LoadMem_Idx64 =
1318 static auto constexpr s2s_F32LoadMem_Idx64 =
1320 static auto constexpr s2s_F64LoadMem_Idx64 =
1322
1323 // LoadMem_LocalSet
1324 template <typename T, typename U = T, typename MemIdx = uint32_t,
1325 typename MemOffsetT = memory_offset32_t>
1327 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
1328 int64_t r0, double fp0) {
1329 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
1330 MemOffsetT offset = Read<MemOffsetT>(code);
1331 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
1332 uint64_t effective_index = offset + index;
1333
1334 if (V8_UNLIKELY(
1335 effective_index < index ||
1336 !base::IsInBounds<uint64_t>(effective_index, sizeof(U),
1337 wasm_runtime->GetMemorySize()))) {
1338 TRAP(TrapReason::kTrapMemOutOfBounds)
1339 }
1340
1341 uint8_t* address = memory_start + effective_index;
1342
1343 U value = base::ReadUnalignedValue<U>(reinterpret_cast<Address>(address));
1344
1346 base::WriteUnalignedValue<T>(reinterpret_cast<Address>(sp + to),
1347 static_cast<T>(value));
1348
1349 NextOp();
1350 }
1351 static auto constexpr s2s_I32LoadMem8S_LocalSet_Idx64 =
1353 static auto constexpr s2s_I32LoadMem8U_LocalSet_Idx64 =
1355 static auto constexpr s2s_I32LoadMem16S_LocalSet_Idx64 =
1357 static auto constexpr s2s_I32LoadMem16U_LocalSet_Idx64 =
1359 static auto constexpr s2s_I64LoadMem8S_LocalSet_Idx64 =
1361 static auto constexpr s2s_I64LoadMem8U_LocalSet_Idx64 =
1363 static auto constexpr s2s_I64LoadMem16S_LocalSet_Idx64 =
1365 static auto constexpr s2s_I64LoadMem16U_LocalSet_Idx64 =
1367 static auto constexpr s2s_I64LoadMem32S_LocalSet_Idx64 =
1369 static auto constexpr s2s_I64LoadMem32U_LocalSet_Idx64 =
1371 static auto constexpr s2s_I32LoadMem_LocalSet_Idx64 =
1373 static auto constexpr s2s_I64LoadMem_LocalSet_Idx64 =
1375 static auto constexpr s2s_F32LoadMem_LocalSet_Idx64 =
1377 static auto constexpr s2s_F64LoadMem_LocalSet_Idx64 =
1379
1380 // StoreMem
1381 template <typename IntT, typename IntU = IntT, typename MemIdx = uint32_t,
1382 typename MemOffsetT = memory_offset32_t>
1385 int64_t r0, double fp0) {
1386 IntT value = static_cast<IntT>(r0);
1387
1388 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
1389 MemOffsetT offset = Read<MemOffsetT>(code);
1390 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
1391 uint64_t effective_index = offset + index;
1392
1393 if (V8_UNLIKELY(
1394 effective_index < index ||
1395 !base::IsInBounds<uint64_t>(effective_index, sizeof(IntU),
1396 wasm_runtime->GetMemorySize()))) {
1397 TRAP(TrapReason::kTrapMemOutOfBounds)
1398 }
1399
1400 uint8_t* address = memory_start + effective_index;
1401
1403 reinterpret_cast<Address>(address),
1404 base::ReadUnalignedValue<IntU>(reinterpret_cast<Address>(&value)));
1405
1406 NextOp();
1407 }
1408 static auto constexpr r2s_I32StoreMem8_Idx64 =
1410 static auto constexpr r2s_I32StoreMem16_Idx64 =
1412 static auto constexpr r2s_I64StoreMem8_Idx64 =
1414 static auto constexpr r2s_I64StoreMem16_Idx64 =
1416 static auto constexpr r2s_I64StoreMem32_Idx64 =
1418 static auto constexpr r2s_I32StoreMem_Idx64 =
1420 static auto constexpr r2s_I64StoreMem_Idx64 =
1422
1423 template <typename FloatT, typename MemIdx = uint32_t,
1424 typename MemOffsetT = memory_offset32_t>
1427 int64_t r0, double fp0) {
1428 FloatT value = static_cast<FloatT>(fp0);
1429
1430 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
1431 MemOffsetT offset = Read<MemOffsetT>(code);
1432 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
1433 uint64_t effective_index = offset + index;
1434
1435 if (V8_UNLIKELY(
1436 effective_index < index ||
1437 !base::IsInBounds<uint64_t>(effective_index, sizeof(FloatT),
1438 wasm_runtime->GetMemorySize()))) {
1439 TRAP(TrapReason::kTrapMemOutOfBounds)
1440 }
1441
1442 uint8_t* address = memory_start + effective_index;
1443
1445 reinterpret_cast<Address>(address),
1446 base::ReadUnalignedValue<FloatT>(reinterpret_cast<Address>(&value)));
1447
1448 NextOp();
1449 }
1450 static auto constexpr r2s_F32StoreMem_Idx64 =
1452 static auto constexpr r2s_F64StoreMem_Idx64 =
1454
1455 template <typename T, typename U = T, typename MemIdx = uint32_t,
1456 typename MemOffsetT = memory_offset32_t>
1459 int64_t r0, double fp0) {
1460 T value = pop<T>(sp, code, wasm_runtime);
1461
1462 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
1463 MemOffsetT offset = Read<MemOffsetT>(code);
1464 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
1465 uint64_t effective_index = offset + index;
1466
1467 if (V8_UNLIKELY(
1468 effective_index < index ||
1469 !base::IsInBounds<uint64_t>(effective_index, sizeof(U),
1470 wasm_runtime->GetMemorySize()))) {
1471 TRAP(TrapReason::kTrapMemOutOfBounds)
1472 }
1473
1474 uint8_t* address = memory_start + effective_index;
1475
1477 reinterpret_cast<Address>(address),
1478 base::ReadUnalignedValue<U>(reinterpret_cast<Address>(&value)));
1479
1480 NextOp();
1481 }
1482 static auto constexpr s2s_I32StoreMem8_Idx64 =
1484 static auto constexpr s2s_I32StoreMem16_Idx64 =
1486 static auto constexpr s2s_I64StoreMem8_Idx64 =
1488 static auto constexpr s2s_I64StoreMem16_Idx64 =
1490 static auto constexpr s2s_I64StoreMem32_Idx64 =
1492 static auto constexpr s2s_I32StoreMem_Idx64 =
1494 static auto constexpr s2s_I64StoreMem_Idx64 =
1496 static auto constexpr s2s_F32StoreMem_Idx64 =
1498 static auto constexpr s2s_F64StoreMem_Idx64 =
1500
1501 // LoadStoreMem
1502 template <typename T, typename MemIdx = uint32_t,
1503 typename MemOffsetT = memory_offset32_t>
1505 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
1506 int64_t r0, double fp0) {
1507 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
1508
1509 MemOffsetT load_offset = Read<MemOffsetT>(code);
1510 uint64_t load_index = r0;
1511 uint64_t effective_load_index = load_offset + load_index;
1512
1513 MemOffsetT store_offset = Read<MemOffsetT>(code);
1514 uint64_t store_index = pop<MemIdx>(sp, code, wasm_runtime);
1515 uint64_t effective_store_index = store_offset + store_index;
1516
1517 if (V8_UNLIKELY(
1518 effective_load_index < load_index ||
1519 !base::IsInBounds<uint64_t>(effective_load_index, sizeof(T),
1520 wasm_runtime->GetMemorySize()) ||
1521 effective_store_index < store_offset ||
1522 !base::IsInBounds<uint64_t>(effective_store_index, sizeof(T),
1523 wasm_runtime->GetMemorySize()))) {
1524 TRAP(TrapReason::kTrapMemOutOfBounds)
1525 }
1526
1527 uint8_t* load_address = memory_start + effective_load_index;
1528 uint8_t* store_address = memory_start + effective_store_index;
1529
1531 reinterpret_cast<Address>(store_address),
1532 base::ReadUnalignedValue<T>(reinterpret_cast<Address>(load_address)));
1533
1534 NextOp();
1535 }
1536 static auto constexpr r2s_I32LoadStoreMem_Idx64 =
1538 static auto constexpr r2s_I64LoadStoreMem_Idx64 =
1540 static auto constexpr r2s_F32LoadStoreMem_Idx64 =
1542 static auto constexpr r2s_F64LoadStoreMem_Idx64 =
1544
1545 template <typename T, typename MemIdx = uint32_t,
1546 typename MemOffsetT = memory_offset32_t>
1548 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
1549 int64_t r0, double fp0) {
1550 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
1551
1552 MemOffsetT load_offset = Read<MemOffsetT>(code);
1553 uint64_t load_index = pop<MemIdx>(sp, code, wasm_runtime);
1554 uint64_t effective_load_index = load_offset + load_index;
1555
1556 MemOffsetT store_offset = Read<MemOffsetT>(code);
1557 uint64_t store_index = pop<MemIdx>(sp, code, wasm_runtime);
1558 uint64_t effective_store_index = store_offset + store_index;
1559
1560 if (V8_UNLIKELY(
1561 effective_load_index < load_index ||
1562 !base::IsInBounds<uint64_t>(effective_load_index, sizeof(T),
1563 wasm_runtime->GetMemorySize()) ||
1564 effective_store_index < store_offset ||
1565 !base::IsInBounds<uint64_t>(effective_store_index, sizeof(T),
1566 wasm_runtime->GetMemorySize()))) {
1567 TRAP(TrapReason::kTrapMemOutOfBounds)
1568 }
1569
1570 uint8_t* load_address = memory_start + effective_load_index;
1571 uint8_t* store_address = memory_start + effective_store_index;
1572
1574 reinterpret_cast<Address>(store_address),
1575 base::ReadUnalignedValue<T>(reinterpret_cast<Address>(load_address)));
1576
1577 NextOp();
1578 }
1579 static auto constexpr s2s_I32LoadStoreMem_Idx64 =
1581 static auto constexpr s2s_I64LoadStoreMem_Idx64 =
1583 static auto constexpr s2s_F32LoadStoreMem_Idx64 =
1585 static auto constexpr s2s_F64LoadStoreMem_Idx64 =
1587
1588#if defined(V8_DRUMBRAKE_BOUNDS_CHECKS)
1589 static auto constexpr r2r_I32LoadMem8S = r2r_LoadMemI<int32_t, int8_t>;
1590 static auto constexpr r2r_I32LoadMem8U = r2r_LoadMemI<int32_t, uint8_t>;
1591 static auto constexpr r2r_I32LoadMem16S = r2r_LoadMemI<int32_t, int16_t>;
1592 static auto constexpr r2r_I32LoadMem16U = r2r_LoadMemI<int32_t, uint16_t>;
1593 static auto constexpr r2r_I64LoadMem8S = r2r_LoadMemI<int64_t, int8_t>;
1594 static auto constexpr r2r_I64LoadMem8U = r2r_LoadMemI<int64_t, uint8_t>;
1595 static auto constexpr r2r_I64LoadMem16S = r2r_LoadMemI<int64_t, int16_t>;
1596 static auto constexpr r2r_I64LoadMem16U = r2r_LoadMemI<int64_t, uint16_t>;
1597 static auto constexpr r2r_I64LoadMem32S = r2r_LoadMemI<int64_t, int32_t>;
1598 static auto constexpr r2r_I64LoadMem32U = r2r_LoadMemI<int64_t, uint32_t>;
1599 static auto constexpr r2r_I32LoadMem = r2r_LoadMemI<int32_t>;
1600 static auto constexpr r2r_I64LoadMem = r2r_LoadMemI<int64_t>;
1601
1602 static auto constexpr r2r_F32LoadMem = r2r_LoadMemF<float>;
1603 static auto constexpr r2r_F64LoadMem = r2r_LoadMemF<double>;
1604
1605 static auto constexpr r2s_I32LoadMem8S = r2s_LoadMem<int32_t, int8_t>;
1606 static auto constexpr r2s_I32LoadMem8U = r2s_LoadMem<int32_t, uint8_t>;
1607 static auto constexpr r2s_I32LoadMem16S = r2s_LoadMem<int32_t, int16_t>;
1608 static auto constexpr r2s_I32LoadMem16U = r2s_LoadMem<int32_t, uint16_t>;
1609 static auto constexpr r2s_I64LoadMem8S = r2s_LoadMem<int64_t, int8_t>;
1610 static auto constexpr r2s_I64LoadMem8U = r2s_LoadMem<int64_t, uint8_t>;
1611 static auto constexpr r2s_I64LoadMem16S = r2s_LoadMem<int64_t, int16_t>;
1612 static auto constexpr r2s_I64LoadMem16U = r2s_LoadMem<int64_t, uint16_t>;
1613 static auto constexpr r2s_I64LoadMem32S = r2s_LoadMem<int64_t, int32_t>;
1614 static auto constexpr r2s_I64LoadMem32U = r2s_LoadMem<int64_t, uint32_t>;
1615 static auto constexpr r2s_I32LoadMem = r2s_LoadMem<int32_t>;
1616 static auto constexpr r2s_I64LoadMem = r2s_LoadMem<int64_t>;
1617 static auto constexpr r2s_F32LoadMem = r2s_LoadMem<float>;
1618 static auto constexpr r2s_F64LoadMem = r2s_LoadMem<double>;
1619
1620 static auto constexpr s2r_I32LoadMem8S = s2r_LoadMemI<int32_t, int8_t>;
1621 static auto constexpr s2r_I32LoadMem8U = s2r_LoadMemI<int32_t, uint8_t>;
1622 static auto constexpr s2r_I32LoadMem16S = s2r_LoadMemI<int32_t, int16_t>;
1623 static auto constexpr s2r_I32LoadMem16U = s2r_LoadMemI<int32_t, uint16_t>;
1624 static auto constexpr s2r_I64LoadMem8S = s2r_LoadMemI<int64_t, int8_t>;
1625 static auto constexpr s2r_I64LoadMem8U = s2r_LoadMemI<int64_t, uint8_t>;
1626 static auto constexpr s2r_I64LoadMem16S = s2r_LoadMemI<int64_t, int16_t>;
1627 static auto constexpr s2r_I64LoadMem16U = s2r_LoadMemI<int64_t, uint16_t>;
1628 static auto constexpr s2r_I64LoadMem32S = s2r_LoadMemI<int64_t, int32_t>;
1629 static auto constexpr s2r_I64LoadMem32U = s2r_LoadMemI<int64_t, uint32_t>;
1630 static auto constexpr s2r_I32LoadMem = s2r_LoadMemI<int32_t>;
1631 static auto constexpr s2r_I64LoadMem = s2r_LoadMemI<int64_t>;
1632
1633 static auto constexpr s2r_F32LoadMem = s2r_LoadMemF<float>;
1634 static auto constexpr s2r_F64LoadMem = s2r_LoadMemF<double>;
1635
1636 static auto constexpr s2s_I32LoadMem8S = s2s_LoadMem<int32_t, int8_t>;
1637 static auto constexpr s2s_I32LoadMem8U = s2s_LoadMem<int32_t, uint8_t>;
1638 static auto constexpr s2s_I32LoadMem16S = s2s_LoadMem<int32_t, int16_t>;
1639 static auto constexpr s2s_I32LoadMem16U = s2s_LoadMem<int32_t, uint16_t>;
1640 static auto constexpr s2s_I64LoadMem8S = s2s_LoadMem<int64_t, int8_t>;
1641 static auto constexpr s2s_I64LoadMem8U = s2s_LoadMem<int64_t, uint8_t>;
1642 static auto constexpr s2s_I64LoadMem16S = s2s_LoadMem<int64_t, int16_t>;
1643 static auto constexpr s2s_I64LoadMem16U = s2s_LoadMem<int64_t, uint16_t>;
1644 static auto constexpr s2s_I64LoadMem32S = s2s_LoadMem<int64_t, int32_t>;
1645 static auto constexpr s2s_I64LoadMem32U = s2s_LoadMem<int64_t, uint32_t>;
1646 static auto constexpr s2s_I32LoadMem = s2s_LoadMem<int32_t>;
1647 static auto constexpr s2s_I64LoadMem = s2s_LoadMem<int64_t>;
1648 static auto constexpr s2s_F32LoadMem = s2s_LoadMem<float>;
1649 static auto constexpr s2s_F64LoadMem = s2s_LoadMem<double>;
1650
1652 // LoadMem_LocalSet
1653
1654 template <typename T, typename U = T>
1655 INSTRUCTION_HANDLER_FUNC r2s_LoadMem_LocalSet(
1656 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
1657 int64_t r0, double fp0) {
1658 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
1660 uint64_t index = static_cast<uint32_t>(r0);
1661 uint64_t effective_index = offset + index;
1662
1663 if (V8_UNLIKELY(
1664 effective_index < index ||
1665 !base::IsInBounds<uint64_t>(effective_index, sizeof(U),
1666 wasm_runtime->GetMemorySize()))) {
1667 TRAP(TrapReason::kTrapMemOutOfBounds)
1668 }
1669
1670 uint8_t* address = memory_start + effective_index;
1671
1672 U value = base::ReadUnalignedValue<U>(reinterpret_cast<Address>(address));
1673
1675 base::WriteUnalignedValue<T>(reinterpret_cast<Address>(sp + to),
1676 static_cast<T>(value));
1677
1678 NextOp();
1679 }
1680 static auto constexpr r2s_I32LoadMem8S_LocalSet =
1681 r2s_LoadMem_LocalSet<int32_t, int8_t>;
1682 static auto constexpr r2s_I32LoadMem8U_LocalSet =
1683 r2s_LoadMem_LocalSet<int32_t, uint8_t>;
1684 static auto constexpr r2s_I32LoadMem16S_LocalSet =
1685 r2s_LoadMem_LocalSet<int32_t, int16_t>;
1686 static auto constexpr r2s_I32LoadMem16U_LocalSet =
1687 r2s_LoadMem_LocalSet<int32_t, uint16_t>;
1688 static auto constexpr r2s_I64LoadMem8S_LocalSet =
1689 r2s_LoadMem_LocalSet<int64_t, int8_t>;
1690 static auto constexpr r2s_I64LoadMem8U_LocalSet =
1691 r2s_LoadMem_LocalSet<int64_t, uint8_t>;
1692 static auto constexpr r2s_I64LoadMem16S_LocalSet =
1693 r2s_LoadMem_LocalSet<int64_t, int16_t>;
1694 static auto constexpr r2s_I64LoadMem16U_LocalSet =
1695 r2s_LoadMem_LocalSet<int64_t, uint16_t>;
1696 static auto constexpr r2s_I64LoadMem32S_LocalSet =
1697 r2s_LoadMem_LocalSet<int64_t, int32_t>;
1698 static auto constexpr r2s_I64LoadMem32U_LocalSet =
1699 r2s_LoadMem_LocalSet<int64_t, uint32_t>;
1700 static auto constexpr r2s_I32LoadMem_LocalSet = r2s_LoadMem_LocalSet<int32_t>;
1701 static auto constexpr r2s_I64LoadMem_LocalSet = r2s_LoadMem_LocalSet<int64_t>;
1702 static auto constexpr r2s_F32LoadMem_LocalSet = r2s_LoadMem_LocalSet<float>;
1703 static auto constexpr r2s_F64LoadMem_LocalSet = r2s_LoadMem_LocalSet<double>;
1704
1705 static auto constexpr s2s_I32LoadMem8S_LocalSet =
1707 static auto constexpr s2s_I32LoadMem8U_LocalSet =
1709 static auto constexpr s2s_I32LoadMem16S_LocalSet =
1711 static auto constexpr s2s_I32LoadMem16U_LocalSet =
1713 static auto constexpr s2s_I64LoadMem8S_LocalSet =
1715 static auto constexpr s2s_I64LoadMem8U_LocalSet =
1717 static auto constexpr s2s_I64LoadMem16S_LocalSet =
1719 static auto constexpr s2s_I64LoadMem16U_LocalSet =
1721 static auto constexpr s2s_I64LoadMem32S_LocalSet =
1723 static auto constexpr s2s_I64LoadMem32U_LocalSet =
1725 static auto constexpr s2s_I32LoadMem_LocalSet = s2s_LoadMem_LocalSet<int32_t>;
1726 static auto constexpr s2s_I64LoadMem_LocalSet = s2s_LoadMem_LocalSet<int64_t>;
1727 static auto constexpr s2s_F32LoadMem_LocalSet = s2s_LoadMem_LocalSet<float>;
1728 static auto constexpr s2s_F64LoadMem_LocalSet = s2s_LoadMem_LocalSet<double>;
1729
1731 // StoreMem
1732 static auto constexpr r2s_I32StoreMem8 = r2s_StoreMemI<int32_t, int8_t>;
1733 static auto constexpr r2s_I32StoreMem16 = r2s_StoreMemI<int32_t, int16_t>;
1734 static auto constexpr r2s_I64StoreMem8 = r2s_StoreMemI<int64_t, int8_t>;
1735 static auto constexpr r2s_I64StoreMem16 = r2s_StoreMemI<int64_t, int16_t>;
1736 static auto constexpr r2s_I64StoreMem32 = r2s_StoreMemI<int64_t, int32_t>;
1737 static auto constexpr r2s_I32StoreMem = r2s_StoreMemI<int32_t>;
1738 static auto constexpr r2s_I64StoreMem = r2s_StoreMemI<int64_t>;
1739
1740 static auto constexpr r2s_F32StoreMem = r2s_StoreMemF<float>;
1741 static auto constexpr r2s_F64StoreMem = r2s_StoreMemF<double>;
1742
1743 static auto constexpr s2s_I32StoreMem8 = s2s_StoreMem<int32_t, int8_t>;
1744 static auto constexpr s2s_I32StoreMem16 = s2s_StoreMem<int32_t, int16_t>;
1745 static auto constexpr s2s_I64StoreMem8 = s2s_StoreMem<int64_t, int8_t>;
1746 static auto constexpr s2s_I64StoreMem16 = s2s_StoreMem<int64_t, int16_t>;
1747 static auto constexpr s2s_I64StoreMem32 = s2s_StoreMem<int64_t, int32_t>;
1748 static auto constexpr s2s_I32StoreMem = s2s_StoreMem<int32_t>;
1749 static auto constexpr s2s_I64StoreMem = s2s_StoreMem<int64_t>;
1750 static auto constexpr s2s_F32StoreMem = s2s_StoreMem<float>;
1751 static auto constexpr s2s_F64StoreMem = s2s_StoreMem<double>;
1752
1754 // LocalGet_StoreMem
1755
1756 template <typename T, typename U = T>
1757 INSTRUCTION_HANDLER_FUNC s2s_LocalGet_StoreMem(
1758 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
1759 int64_t r0, double fp0) {
1761 T value = base::ReadUnalignedValue<T>(reinterpret_cast<Address>(sp + from));
1762
1763 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
1765 uint64_t index = pop<uint32_t>(sp, code, wasm_runtime);
1766 uint64_t effective_index = offset + index;
1767
1768 if (V8_UNLIKELY(
1769 effective_index < index ||
1770 !base::IsInBounds<uint64_t>(effective_index, sizeof(U),
1771 wasm_runtime->GetMemorySize()))) {
1772 TRAP(TrapReason::kTrapMemOutOfBounds)
1773 }
1774
1775 uint8_t* address = memory_start + effective_index;
1776
1778 reinterpret_cast<Address>(address),
1779 base::ReadUnalignedValue<U>(reinterpret_cast<Address>(&value)));
1780
1781 NextOp();
1782 }
1783 static auto constexpr s2s_LocalGet_I32StoreMem8 =
1784 s2s_LocalGet_StoreMem<int32_t, int8_t>;
1785 static auto constexpr s2s_LocalGet_I32StoreMem16 =
1786 s2s_LocalGet_StoreMem<int32_t, int16_t>;
1787 static auto constexpr s2s_LocalGet_I64StoreMem8 =
1788 s2s_LocalGet_StoreMem<int64_t, int8_t>;
1789 static auto constexpr s2s_LocalGet_I64StoreMem16 =
1790 s2s_LocalGet_StoreMem<int64_t, int16_t>;
1791 static auto constexpr s2s_LocalGet_I64StoreMem32 =
1792 s2s_LocalGet_StoreMem<int64_t, int32_t>;
1793 static auto constexpr s2s_LocalGet_I32StoreMem =
1794 s2s_LocalGet_StoreMem<int32_t>;
1795 static auto constexpr s2s_LocalGet_I64StoreMem =
1796 s2s_LocalGet_StoreMem<int64_t>;
1797 static auto constexpr s2s_LocalGet_F32StoreMem = s2s_LocalGet_StoreMem<float>;
1798 static auto constexpr s2s_LocalGet_F64StoreMem =
1799 s2s_LocalGet_StoreMem<double>;
1800
1802 // LoadStoreMem
1803 static auto constexpr r2s_I32LoadStoreMem = r2s_LoadStoreMem<int32_t>;
1804 static auto constexpr r2s_I64LoadStoreMem = r2s_LoadStoreMem<int64_t>;
1805 static auto constexpr r2s_F32LoadStoreMem = r2s_LoadStoreMem<float>;
1806 static auto constexpr r2s_F64LoadStoreMem = r2s_LoadStoreMem<double>;
1807
1808 static auto constexpr s2s_I32LoadStoreMem = s2s_LoadStoreMem<int32_t>;
1809 static auto constexpr s2s_I64LoadStoreMem = s2s_LoadStoreMem<int64_t>;
1810 static auto constexpr s2s_F32LoadStoreMem = s2s_LoadStoreMem<float>;
1811 static auto constexpr s2s_F64LoadStoreMem = s2s_LoadStoreMem<double>;
1812
1813#endif // V8_DRUMBRAKE_BOUNDS_CHECKS
1814
1816 // Select
1817
1818 template <typename IntT>
1821 int64_t r0, double fp0) {
1822 IntT val2 = pop<IntT>(sp, code, wasm_runtime);
1823 IntT val1 = pop<IntT>(sp, code, wasm_runtime);
1824
1825 // r0: condition
1826 r0 = r0 ? val1 : val2;
1827
1828 NextOp();
1829 }
1830 static auto constexpr r2r_I32Select = r2r_SelectI<int32_t>;
1831 static auto constexpr r2r_I64Select = r2r_SelectI<int64_t>;
1832
1833 template <typename FloatT>
1836 int64_t r0, double fp0) {
1837 FloatT val2 = pop<FloatT>(sp, code, wasm_runtime);
1838 FloatT val1 = pop<FloatT>(sp, code, wasm_runtime);
1839
1840 // r0: condition
1841 fp0 = r0 ? val1 : val2;
1842
1843 NextOp();
1844 }
1845 static auto constexpr r2r_F32Select = r2r_SelectF<float>;
1846 static auto constexpr r2r_F64Select = r2r_SelectF<double>;
1847
1848 template <typename T>
1851 int64_t r0, double fp0) {
1852 T val2 = pop<T>(sp, code, wasm_runtime);
1853 T val1 = pop<T>(sp, code, wasm_runtime);
1854
1855 push<T>(sp, code, wasm_runtime, r0 ? val1 : val2);
1856
1857 NextOp();
1858 }
1859 static auto constexpr r2s_I32Select = r2s_Select<int32_t>;
1860 static auto constexpr r2s_I64Select = r2s_Select<int64_t>;
1861 static auto constexpr r2s_F32Select = r2s_Select<float>;
1862 static auto constexpr r2s_F64Select = r2s_Select<double>;
1863 static auto constexpr r2s_S128Select = r2s_Select<Simd128>;
1864
1867 int64_t r0, double fp0) {
1868 WasmRef val2 = pop<WasmRef>(sp, code, wasm_runtime);
1869 WasmRef val1 = pop<WasmRef>(sp, code, wasm_runtime);
1870 push<WasmRef>(sp, code, wasm_runtime, r0 ? val1 : val2);
1871
1872 NextOp();
1873 }
1874
1875 template <typename IntT>
1878 int64_t r0, double fp0) {
1879 int32_t cond = pop<int32_t>(sp, code, wasm_runtime);
1880 IntT val2 = pop<IntT>(sp, code, wasm_runtime);
1881 IntT val1 = pop<IntT>(sp, code, wasm_runtime);
1882
1883 r0 = cond ? val1 : val2;
1884
1885 NextOp();
1886 }
1887 static auto constexpr s2r_I32Select = s2r_SelectI<int32_t>;
1888 static auto constexpr s2r_I64Select = s2r_SelectI<int64_t>;
1889
1890 template <typename FloatT>
1893 int64_t r0, double fp0) {
1894 int32_t cond = pop<int32_t>(sp, code, wasm_runtime);
1895 FloatT val2 = pop<FloatT>(sp, code, wasm_runtime);
1896 FloatT val1 = pop<FloatT>(sp, code, wasm_runtime);
1897
1898 fp0 = cond ? val1 : val2;
1899
1900 NextOp();
1901 }
1902 static auto constexpr s2r_F32Select = s2r_SelectF<float>;
1903 static auto constexpr s2r_F64Select = s2r_SelectF<double>;
1904
1905 template <typename T>
1908 int64_t r0, double fp0) {
1909 int32_t cond = pop<int32_t>(sp, code, wasm_runtime);
1910 T val2 = pop<T>(sp, code, wasm_runtime);
1911 T val1 = pop<T>(sp, code, wasm_runtime);
1912
1913 push<T>(sp, code, wasm_runtime, cond ? val1 : val2);
1914
1915 NextOp();
1916 }
1917 static auto constexpr s2s_I32Select = s2s_Select<int32_t>;
1918 static auto constexpr s2s_I64Select = s2s_Select<int64_t>;
1919 static auto constexpr s2s_F32Select = s2s_Select<float>;
1920 static auto constexpr s2s_F64Select = s2s_Select<double>;
1921 static auto constexpr s2s_S128Select = s2s_Select<Simd128>;
1922
1925 int64_t r0, double fp0) {
1926 int32_t cond = pop<int32_t>(sp, code, wasm_runtime);
1927 WasmRef val2 = pop<WasmRef>(sp, code, wasm_runtime);
1928 WasmRef val1 = pop<WasmRef>(sp, code, wasm_runtime);
1929 push<WasmRef>(sp, code, wasm_runtime, cond ? val1 : val2);
1930
1931 NextOp();
1932 }
1933
1935 // Binary arithmetic operators
1936
1937#define FOREACH_ARITHMETIC_BINOP(V) \
1938 V(I32Add, uint32_t, r0, +, I32) \
1939 V(I32Sub, uint32_t, r0, -, I32) \
1940 V(I32Mul, uint32_t, r0, *, I32) \
1941 V(I32And, uint32_t, r0, &, I32) \
1942 V(I32Ior, uint32_t, r0, |, I32) \
1943 V(I32Xor, uint32_t, r0, ^, I32) \
1944 V(I64Add, uint64_t, r0, +, I64) \
1945 V(I64Sub, uint64_t, r0, -, I64) \
1946 V(I64Mul, uint64_t, r0, *, I64) \
1947 V(I64And, uint64_t, r0, &, I64) \
1948 V(I64Ior, uint64_t, r0, |, I64) \
1949 V(I64Xor, uint64_t, r0, ^, I64) \
1950 V(F32Add, float, fp0, +, F32) \
1951 V(F32Sub, float, fp0, -, F32) \
1952 V(F32Mul, float, fp0, *, F32) \
1953 V(F32Div, float, fp0, /, F32) \
1954 V(F64Add, double, fp0, +, F64) \
1955 V(F64Sub, double, fp0, -, F64) \
1956 V(F64Mul, double, fp0, *, F64) \
1957 V(F64Div, double, fp0, /, F64)
1958
1959#define DEFINE_BINOP(name, ctype, reg, op, type) \
1960 INSTRUCTION_HANDLER_FUNC r2r_##name(const uint8_t* code, uint32_t* sp, \
1961 WasmInterpreterRuntime* wasm_runtime, \
1962 int64_t r0, double fp0) { \
1963 ctype rval = static_cast<ctype>(reg); \
1964 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
1965 reg = static_cast<ctype>(lval op rval); \
1966 NextOp(); \
1967 } \
1968 \
1969 INSTRUCTION_HANDLER_FUNC r2s_##name(const uint8_t* code, uint32_t* sp, \
1970 WasmInterpreterRuntime* wasm_runtime, \
1971 int64_t r0, double fp0) { \
1972 ctype rval = static_cast<ctype>(reg); \
1973 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
1974 push<ctype>(sp, code, wasm_runtime, lval op rval); \
1975 NextOp(); \
1976 } \
1977 \
1978 INSTRUCTION_HANDLER_FUNC s2r_##name(const uint8_t* code, uint32_t* sp, \
1979 WasmInterpreterRuntime* wasm_runtime, \
1980 int64_t r0, double fp0) { \
1981 ctype rval = pop<ctype>(sp, code, wasm_runtime); \
1982 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
1983 reg = static_cast<ctype>(lval op rval); \
1984 NextOp(); \
1985 } \
1986 \
1987 INSTRUCTION_HANDLER_FUNC s2s_##name(const uint8_t* code, uint32_t* sp, \
1988 WasmInterpreterRuntime* wasm_runtime, \
1989 int64_t r0, double fp0) { \
1990 ctype rval = pop<ctype>(sp, code, wasm_runtime); \
1991 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
1992 push<ctype>(sp, code, wasm_runtime, lval op rval); \
1993 NextOp(); \
1994 }
1996#undef DEFINE_BINOP
1997
1999 // Binary arithmetic operators that can trap
2000
2001#define FOREACH_SIGNED_DIV_BINOP(V) \
2002 V(I32DivS, int32_t, r0, /, I32) \
2003 V(I64DivS, int64_t, r0, /, I64)
2004
2005#define FOREACH_UNSIGNED_DIV_BINOP(V) \
2006 V(I32DivU, uint32_t, r0, /, I32) \
2007 V(I64DivU, uint64_t, r0, /, I64)
2008
2009#define FOREACH_REM_BINOP(V) \
2010 V(I32RemS, int32_t, r0, ExecuteRemS, I32) \
2011 V(I64RemS, int64_t, r0, ExecuteRemS, I64) \
2012 V(I32RemU, uint32_t, r0, ExecuteRemU, I32) \
2013 V(I64RemU, uint64_t, r0, ExecuteRemU, I64)
2014
2015#define FOREACH_TRAPPING_BINOP(V) \
2016 FOREACH_SIGNED_DIV_BINOP(V) \
2017 FOREACH_UNSIGNED_DIV_BINOP(V) \
2018 FOREACH_REM_BINOP(V)
2019
2020#define DEFINE_BINOP(name, ctype, reg, op, type) \
2021 INSTRUCTION_HANDLER_FUNC r2r_##name(const uint8_t* code, uint32_t* sp, \
2022 WasmInterpreterRuntime* wasm_runtime, \
2023 int64_t r0, double fp0) { \
2024 ctype rval = static_cast<ctype>(reg); \
2025 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2026 if (rval == 0) { \
2027 TRAP(TrapReason::kTrapDivByZero) \
2028 } else if (rval == -1 && lval == std::numeric_limits<ctype>::min()) { \
2029 TRAP(TrapReason::kTrapDivUnrepresentable) \
2030 } else { \
2031 reg = static_cast<ctype>(lval op rval); \
2032 } \
2033 NextOp(); \
2034 } \
2035 \
2036 INSTRUCTION_HANDLER_FUNC r2s_##name(const uint8_t* code, uint32_t* sp, \
2037 WasmInterpreterRuntime* wasm_runtime, \
2038 int64_t r0, double fp0) { \
2039 ctype rval = static_cast<ctype>(reg); \
2040 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2041 if (rval == 0) { \
2042 TRAP(TrapReason::kTrapDivByZero) \
2043 } else if (rval == -1 && lval == std::numeric_limits<ctype>::min()) { \
2044 TRAP(TrapReason::kTrapDivUnrepresentable) \
2045 } else { \
2046 push<ctype>(sp, code, wasm_runtime, lval op rval); \
2047 } \
2048 NextOp(); \
2049 } \
2050 \
2051 INSTRUCTION_HANDLER_FUNC s2r_##name(const uint8_t* code, uint32_t* sp, \
2052 WasmInterpreterRuntime* wasm_runtime, \
2053 int64_t r0, double fp0) { \
2054 ctype rval = pop<ctype>(sp, code, wasm_runtime); \
2055 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2056 if (rval == 0) { \
2057 TRAP(TrapReason::kTrapDivByZero) \
2058 } else if (rval == -1 && lval == std::numeric_limits<ctype>::min()) { \
2059 TRAP(TrapReason::kTrapDivUnrepresentable) \
2060 } else { \
2061 reg = static_cast<ctype>(lval op rval); \
2062 } \
2063 NextOp(); \
2064 } \
2065 \
2066 INSTRUCTION_HANDLER_FUNC s2s_##name(const uint8_t* code, uint32_t* sp, \
2067 WasmInterpreterRuntime* wasm_runtime, \
2068 int64_t r0, double fp0) { \
2069 ctype rval = pop<ctype>(sp, code, wasm_runtime); \
2070 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2071 if (rval == 0) { \
2072 TRAP(TrapReason::kTrapDivByZero) \
2073 } else if (rval == -1 && lval == std::numeric_limits<ctype>::min()) { \
2074 TRAP(TrapReason::kTrapDivUnrepresentable) \
2075 } else { \
2076 push<ctype>(sp, code, wasm_runtime, lval op rval); \
2077 } \
2078 NextOp(); \
2079 }
2081#undef DEFINE_BINOP
2082
2083#define DEFINE_BINOP(name, ctype, reg, op, type) \
2084 INSTRUCTION_HANDLER_FUNC r2r_##name(const uint8_t* code, uint32_t* sp, \
2085 WasmInterpreterRuntime* wasm_runtime, \
2086 int64_t r0, double fp0) { \
2087 ctype rval = static_cast<ctype>(reg); \
2088 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2089 if (rval == 0) { \
2090 TRAP(TrapReason::kTrapDivByZero) \
2091 } else { \
2092 reg = static_cast<ctype>(lval op rval); \
2093 } \
2094 NextOp(); \
2095 } \
2096 \
2097 INSTRUCTION_HANDLER_FUNC r2s_##name(const uint8_t* code, uint32_t* sp, \
2098 WasmInterpreterRuntime* wasm_runtime, \
2099 int64_t r0, double fp0) { \
2100 ctype rval = static_cast<ctype>(reg); \
2101 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2102 if (rval == 0) { \
2103 TRAP(TrapReason::kTrapDivByZero) \
2104 } else { \
2105 push<ctype>(sp, code, wasm_runtime, lval op rval); \
2106 } \
2107 NextOp(); \
2108 } \
2109 \
2110 INSTRUCTION_HANDLER_FUNC s2r_##name(const uint8_t* code, uint32_t* sp, \
2111 WasmInterpreterRuntime* wasm_runtime, \
2112 int64_t r0, double fp0) { \
2113 ctype rval = pop<ctype>(sp, code, wasm_runtime); \
2114 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2115 if (rval == 0) { \
2116 TRAP(TrapReason::kTrapDivByZero) \
2117 } else { \
2118 reg = static_cast<ctype>(lval op rval); \
2119 } \
2120 NextOp(); \
2121 } \
2122 \
2123 INSTRUCTION_HANDLER_FUNC s2s_##name(const uint8_t* code, uint32_t* sp, \
2124 WasmInterpreterRuntime* wasm_runtime, \
2125 int64_t r0, double fp0) { \
2126 ctype rval = pop<ctype>(sp, code, wasm_runtime); \
2127 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2128 if (rval == 0) { \
2129 TRAP(TrapReason::kTrapDivByZero) \
2130 } else { \
2131 push<ctype>(sp, code, wasm_runtime, lval op rval); \
2132 } \
2133 NextOp(); \
2134 }
2136#undef DEFINE_BINOP
2137
2138#define DEFINE_BINOP(name, ctype, reg, op, type) \
2139 INSTRUCTION_HANDLER_FUNC r2r_##name(const uint8_t* code, uint32_t* sp, \
2140 WasmInterpreterRuntime* wasm_runtime, \
2141 int64_t r0, double fp0) { \
2142 ctype rval = static_cast<ctype>(reg); \
2143 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2144 if (rval == 0) { \
2145 TRAP(TrapReason::kTrapRemByZero) \
2146 } else { \
2147 reg = static_cast<ctype>(op(lval, rval)); \
2148 } \
2149 NextOp(); \
2150 } \
2151 \
2152 INSTRUCTION_HANDLER_FUNC r2s_##name(const uint8_t* code, uint32_t* sp, \
2153 WasmInterpreterRuntime* wasm_runtime, \
2154 int64_t r0, double fp0) { \
2155 ctype rval = static_cast<ctype>(reg); \
2156 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2157 if (rval == 0) { \
2158 TRAP(TrapReason::kTrapRemByZero) \
2159 } else { \
2160 push<ctype>(sp, code, wasm_runtime, op(lval, rval)); \
2161 } \
2162 NextOp(); \
2163 } \
2164 \
2165 INSTRUCTION_HANDLER_FUNC s2r_##name(const uint8_t* code, uint32_t* sp, \
2166 WasmInterpreterRuntime* wasm_runtime, \
2167 int64_t r0, double fp0) { \
2168 ctype rval = pop<ctype>(sp, code, wasm_runtime); \
2169 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2170 if (rval == 0) { \
2171 TRAP(TrapReason::kTrapRemByZero); \
2172 } else { \
2173 reg = static_cast<ctype>(op(lval, rval)); \
2174 } \
2175 NextOp(); \
2176 } \
2177 \
2178 INSTRUCTION_HANDLER_FUNC s2s_##name(const uint8_t* code, uint32_t* sp, \
2179 WasmInterpreterRuntime* wasm_runtime, \
2180 int64_t r0, double fp0) { \
2181 ctype rval = pop<ctype>(sp, code, wasm_runtime); \
2182 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2183 if (rval == 0) { \
2184 TRAP(TrapReason::kTrapRemByZero) \
2185 } else { \
2186 push<ctype>(sp, code, wasm_runtime, op(lval, rval)); \
2187 } \
2188 NextOp(); \
2189 }
2191#undef DEFINE_BINOP
2192
2194 // Comparison operators
2195
2196#define FOREACH_COMPARISON_BINOP(V) \
2197 V(I32Eq, uint32_t, r0, ==, I32) \
2198 V(I32Ne, uint32_t, r0, !=, I32) \
2199 V(I32LtU, uint32_t, r0, <, I32) \
2200 V(I32LeU, uint32_t, r0, <=, I32) \
2201 V(I32GtU, uint32_t, r0, >, I32) \
2202 V(I32GeU, uint32_t, r0, >=, I32) \
2203 V(I32LtS, int32_t, r0, <, I32) \
2204 V(I32LeS, int32_t, r0, <=, I32) \
2205 V(I32GtS, int32_t, r0, >, I32) \
2206 V(I32GeS, int32_t, r0, >=, I32) \
2207 V(I64Eq, uint64_t, r0, ==, I64) \
2208 V(I64Ne, uint64_t, r0, !=, I64) \
2209 V(I64LtU, uint64_t, r0, <, I64) \
2210 V(I64LeU, uint64_t, r0, <=, I64) \
2211 V(I64GtU, uint64_t, r0, >, I64) \
2212 V(I64GeU, uint64_t, r0, >=, I64) \
2213 V(I64LtS, int64_t, r0, <, I64) \
2214 V(I64LeS, int64_t, r0, <=, I64) \
2215 V(I64GtS, int64_t, r0, >, I64) \
2216 V(I64GeS, int64_t, r0, >=, I64) \
2217 V(F32Eq, float, fp0, ==, F32) \
2218 V(F32Ne, float, fp0, !=, F32) \
2219 V(F32Lt, float, fp0, <, F32) \
2220 V(F32Le, float, fp0, <=, F32) \
2221 V(F32Gt, float, fp0, >, F32) \
2222 V(F32Ge, float, fp0, >=, F32) \
2223 V(F64Eq, double, fp0, ==, F64) \
2224 V(F64Ne, double, fp0, !=, F64) \
2225 V(F64Lt, double, fp0, <, F64) \
2226 V(F64Le, double, fp0, <=, F64) \
2227 V(F64Gt, double, fp0, >, F64) \
2228 V(F64Ge, double, fp0, >=, F64)
2229
2230#define DEFINE_BINOP(name, ctype, reg, op, type) \
2231 INSTRUCTION_HANDLER_FUNC r2r_##name(const uint8_t* code, uint32_t* sp, \
2232 WasmInterpreterRuntime* wasm_runtime, \
2233 int64_t r0, double fp0) { \
2234 ctype rval = static_cast<ctype>(reg); \
2235 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2236 r0 = (lval op rval) ? 1 : 0; \
2237 NextOp(); \
2238 } \
2239 \
2240 INSTRUCTION_HANDLER_FUNC r2s_##name(const uint8_t* code, uint32_t* sp, \
2241 WasmInterpreterRuntime* wasm_runtime, \
2242 int64_t r0, double fp0) { \
2243 ctype rval = static_cast<ctype>(reg); \
2244 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2245 push<int32_t>(sp, code, wasm_runtime, lval op rval ? 1 : 0); \
2246 NextOp(); \
2247 } \
2248 \
2249 INSTRUCTION_HANDLER_FUNC s2r_##name(const uint8_t* code, uint32_t* sp, \
2250 WasmInterpreterRuntime* wasm_runtime, \
2251 int64_t r0, double fp0) { \
2252 ctype rval = pop<ctype>(sp, code, wasm_runtime); \
2253 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2254 r0 = (lval op rval) ? 1 : 0; \
2255 NextOp(); \
2256 } \
2257 \
2258 INSTRUCTION_HANDLER_FUNC s2s_##name(const uint8_t* code, uint32_t* sp, \
2259 WasmInterpreterRuntime* wasm_runtime, \
2260 int64_t r0, double fp0) { \
2261 ctype rval = pop<ctype>(sp, code, wasm_runtime); \
2262 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2263 push<int32_t>(sp, code, wasm_runtime, lval op rval ? 1 : 0); \
2264 NextOp(); \
2265 }
2267#undef DEFINE_BINOP
2268
2270 // More binary operators
2271
2272#define FOREACH_MORE_BINOP(V) \
2273 V(I32Shl, uint32_t, r0, (lval << (rval & 31)), I32) \
2274 V(I32ShrU, uint32_t, r0, (lval >> (rval & 31)), I32) \
2275 V(I32ShrS, int32_t, r0, (lval >> (rval & 31)), I32) \
2276 V(I64Shl, uint64_t, r0, (lval << (rval & 63)), I64) \
2277 V(I64ShrU, uint64_t, r0, (lval >> (rval & 63)), I64) \
2278 V(I64ShrS, int64_t, r0, (lval >> (rval & 63)), I64) \
2279 V(I32Rol, uint32_t, r0, (base::bits::RotateLeft32(lval, rval & 31)), I32) \
2280 V(I32Ror, uint32_t, r0, (base::bits::RotateRight32(lval, rval & 31)), I32) \
2281 V(I64Rol, uint64_t, r0, (base::bits::RotateLeft64(lval, rval & 63)), I64) \
2282 V(I64Ror, uint64_t, r0, (base::bits::RotateRight64(lval, rval & 63)), I64) \
2283 V(F32Min, float, fp0, (JSMin<float>(lval, rval)), F32) \
2284 V(F32Max, float, fp0, (JSMax<float>(lval, rval)), F32) \
2285 V(F64Min, double, fp0, (JSMin<double>(lval, rval)), F64) \
2286 V(F64Max, double, fp0, (JSMax<double>(lval, rval)), F64) \
2287 V(F32CopySign, float, fp0, \
2288 Float32::FromBits((base::ReadUnalignedValue<uint32_t>( \
2289 reinterpret_cast<Address>(&lval)) & \
2290 ~kFloat32SignBitMask) | \
2291 (base::ReadUnalignedValue<uint32_t>( \
2292 reinterpret_cast<Address>(&rval)) & \
2293 kFloat32SignBitMask)) \
2294 .get_scalar(), \
2295 F32) \
2296 V(F64CopySign, double, fp0, \
2297 Float64::FromBits((base::ReadUnalignedValue<uint64_t>( \
2298 reinterpret_cast<Address>(&lval)) & \
2299 ~kFloat64SignBitMask) | \
2300 (base::ReadUnalignedValue<uint64_t>( \
2301 reinterpret_cast<Address>(&rval)) & \
2302 kFloat64SignBitMask)) \
2303 .get_scalar(), \
2304 F64)
2305
2306#define DEFINE_BINOP(name, ctype, reg, op, type) \
2307 INSTRUCTION_HANDLER_FUNC r2r_##name(const uint8_t* code, uint32_t* sp, \
2308 WasmInterpreterRuntime* wasm_runtime, \
2309 int64_t r0, double fp0) { \
2310 ctype rval = static_cast<ctype>(reg); \
2311 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2312 reg = static_cast<ctype>(op); \
2313 NextOp(); \
2314 } \
2315 \
2316 INSTRUCTION_HANDLER_FUNC r2s_##name(const uint8_t* code, uint32_t* sp, \
2317 WasmInterpreterRuntime* wasm_runtime, \
2318 int64_t r0, double fp0) { \
2319 ctype rval = static_cast<ctype>(reg); \
2320 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2321 push<ctype>(sp, code, wasm_runtime, op); \
2322 NextOp(); \
2323 } \
2324 \
2325 INSTRUCTION_HANDLER_FUNC s2r_##name(const uint8_t* code, uint32_t* sp, \
2326 WasmInterpreterRuntime* wasm_runtime, \
2327 int64_t r0, double fp0) { \
2328 ctype rval = pop<ctype>(sp, code, wasm_runtime); \
2329 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2330 reg = static_cast<ctype>(op); \
2331 NextOp(); \
2332 } \
2333 \
2334 INSTRUCTION_HANDLER_FUNC s2s_##name(const uint8_t* code, uint32_t* sp, \
2335 WasmInterpreterRuntime* wasm_runtime, \
2336 int64_t r0, double fp0) { \
2337 ctype rval = pop<ctype>(sp, code, wasm_runtime); \
2338 ctype lval = pop<ctype>(sp, code, wasm_runtime); \
2339 push<ctype>(sp, code, wasm_runtime, op); \
2340 NextOp(); \
2341 }
2343#undef DEFINE_BINOP
2344
2346 // Unary operators
2347
2348#define FOREACH_SIMPLE_UNOP(V) \
2349 V(F32Abs, float, fp0, abs(val), F32) \
2350 V(F32Neg, float, fp0, -val, F32) \
2351 V(F32Ceil, float, fp0, ceilf(val), F32) \
2352 V(F32Floor, float, fp0, floorf(val), F32) \
2353 V(F32Trunc, float, fp0, truncf(val), F32) \
2354 V(F32NearestInt, float, fp0, nearbyintf(val), F32) \
2355 V(F32Sqrt, float, fp0, sqrt(val), F32) \
2356 V(F64Abs, double, fp0, abs(val), F64) \
2357 V(F64Neg, double, fp0, (-val), F64) \
2358 V(F64Ceil, double, fp0, ceil(val), F64) \
2359 V(F64Floor, double, fp0, floor(val), F64) \
2360 V(F64Trunc, double, fp0, trunc(val), F64) \
2361 V(F64NearestInt, double, fp0, nearbyint(val), F64) \
2362 V(F64Sqrt, double, fp0, sqrt(val), F64)
2363
2364#define DEFINE_UNOP(name, ctype, reg, op, type) \
2365 INSTRUCTION_HANDLER_FUNC r2r_##name(const uint8_t* code, uint32_t* sp, \
2366 WasmInterpreterRuntime* wasm_runtime, \
2367 int64_t r0, double fp0) { \
2368 ctype val = static_cast<ctype>(reg); \
2369 reg = static_cast<ctype>(op); \
2370 NextOp(); \
2371 } \
2372 \
2373 INSTRUCTION_HANDLER_FUNC r2s_##name(const uint8_t* code, uint32_t* sp, \
2374 WasmInterpreterRuntime* wasm_runtime, \
2375 int64_t r0, double fp0) { \
2376 ctype val = static_cast<ctype>(reg); \
2377 push<ctype>(sp, code, wasm_runtime, op); \
2378 NextOp(); \
2379 } \
2380 \
2381 INSTRUCTION_HANDLER_FUNC s2r_##name(const uint8_t* code, uint32_t* sp, \
2382 WasmInterpreterRuntime* wasm_runtime, \
2383 int64_t r0, double fp0) { \
2384 ctype val = pop<ctype>(sp, code, wasm_runtime); \
2385 reg = static_cast<ctype>(op); \
2386 NextOp(); \
2387 } \
2388 \
2389 INSTRUCTION_HANDLER_FUNC s2s_##name(const uint8_t* code, uint32_t* sp, \
2390 WasmInterpreterRuntime* wasm_runtime, \
2391 int64_t r0, double fp0) { \
2392 ctype val = pop<ctype>(sp, code, wasm_runtime); \
2393 push<ctype>(sp, code, wasm_runtime, op); \
2394 NextOp(); \
2395 }
2397#undef DEFINE_UNOP
2398
2400 // Numeric conversion operators
2401
2402#define FOREACH_ADDITIONAL_CONVERT_UNOP(V) \
2403 V(I32ConvertI64, int64_t, I64, r0, int32_t, I32, r0)
2404
2406 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
2407 int64_t r0, double fp0) {
2408 r0 &= 0xffffffff;
2409 NextOp();
2410 }
2412 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
2413 int64_t r0, double fp0) {
2414 push<int32_t>(sp, code, wasm_runtime, r0 & 0xffffffff);
2415 NextOp();
2416 }
2418 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
2419 int64_t r0, double fp0) {
2420 r0 = 0xffffffff & pop<int64_t>(sp, code, wasm_runtime);
2421 NextOp();
2422 }
2424 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
2425 int64_t r0, double fp0) {
2427 0xffffffff & pop<int64_t>(sp, code, wasm_runtime));
2428 NextOp();
2429 }
2430
2431#define FOREACH_I64_CONVERT_FROM_FLOAT_UNOP(V) \
2432 V(I64SConvertF32, float, F32, fp0, int64_t, I64, r0) \
2433 V(I64SConvertF64, double, F64, fp0, int64_t, I64, r0) \
2434 V(I64UConvertF32, float, F32, fp0, uint64_t, I64, r0) \
2435 V(I64UConvertF64, double, F64, fp0, uint64_t, I64, r0)
2436
2437#define FOREACH_I32_CONVERT_FROM_FLOAT_UNOP(V) \
2438 V(I32SConvertF32, float, F32, fp0, int32_t, I32, r0) \
2439 V(I32UConvertF32, float, F32, fp0, uint32_t, I32, r0) \
2440 V(I32SConvertF64, double, F64, fp0, int32_t, I32, r0) \
2441 V(I32UConvertF64, double, F64, fp0, uint32_t, I32, r0)
2442
2443#define FOREACH_OTHER_CONVERT_UNOP(V) \
2444 V(I64SConvertI32, int32_t, I32, r0, int64_t, I64, r0) \
2445 V(I64UConvertI32, uint32_t, I32, r0, uint64_t, I64, r0) \
2446 V(F32SConvertI32, int32_t, I32, r0, float, F32, fp0) \
2447 V(F32UConvertI32, uint32_t, I32, r0, float, F32, fp0) \
2448 V(F32SConvertI64, int64_t, I64, r0, float, F32, fp0) \
2449 V(F32UConvertI64, uint64_t, I64, r0, float, F32, fp0) \
2450 V(F32ConvertF64, double, F64, fp0, float, F32, fp0) \
2451 V(F64SConvertI32, int32_t, I32, r0, double, F64, fp0) \
2452 V(F64UConvertI32, uint32_t, I32, r0, double, F64, fp0) \
2453 V(F64SConvertI64, int64_t, I64, r0, double, F64, fp0) \
2454 V(F64UConvertI64, uint64_t, I64, r0, double, F64, fp0) \
2455 V(F64ConvertF32, float, F32, fp0, double, F64, fp0)
2456
2457#define FOREACH_CONVERT_UNOP(V) \
2458 FOREACH_I64_CONVERT_FROM_FLOAT_UNOP(V) \
2459 FOREACH_I32_CONVERT_FROM_FLOAT_UNOP(V) \
2460 FOREACH_OTHER_CONVERT_UNOP(V)
2461
2462#define DEFINE_UNOP(name, from_ctype, from_type, from_reg, to_ctype, to_type, \
2463 to_reg) \
2464 INSTRUCTION_HANDLER_FUNC r2r_##name(const uint8_t* code, uint32_t* sp, \
2465 WasmInterpreterRuntime* wasm_runtime, \
2466 int64_t r0, double fp0) { \
2467 if (!base::IsValueInRangeForNumericType<to_ctype>(from_reg)) { \
2468 TRAP(TrapReason::kTrapFloatUnrepresentable) \
2469 } else { \
2470 to_reg = static_cast<to_ctype>(static_cast<from_ctype>(from_reg)); \
2471 } \
2472 NextOp(); \
2473 } \
2474 \
2475 INSTRUCTION_HANDLER_FUNC r2s_##name(const uint8_t* code, uint32_t* sp, \
2476 WasmInterpreterRuntime* wasm_runtime, \
2477 int64_t r0, double fp0) { \
2478 if (!base::IsValueInRangeForNumericType<to_ctype>(from_reg)) { \
2479 TRAP(TrapReason::kTrapFloatUnrepresentable) \
2480 } else { \
2481 to_ctype val = static_cast<from_ctype>(from_reg); \
2482 push<to_ctype>(sp, code, wasm_runtime, val); \
2483 } \
2484 NextOp(); \
2485 } \
2486 \
2487 INSTRUCTION_HANDLER_FUNC s2r_##name(const uint8_t* code, uint32_t* sp, \
2488 WasmInterpreterRuntime* wasm_runtime, \
2489 int64_t r0, double fp0) { \
2490 from_ctype from_val = pop<from_ctype>(sp, code, wasm_runtime); \
2491 if (!base::IsValueInRangeForNumericType<to_ctype>(from_val)) { \
2492 TRAP(TrapReason::kTrapFloatUnrepresentable) \
2493 } else { \
2494 to_reg = static_cast<to_ctype>(from_val); \
2495 } \
2496 NextOp(); \
2497 } \
2498 \
2499 INSTRUCTION_HANDLER_FUNC s2s_##name(const uint8_t* code, uint32_t* sp, \
2500 WasmInterpreterRuntime* wasm_runtime, \
2501 int64_t r0, double fp0) { \
2502 from_ctype from_val = pop<from_ctype>(sp, code, wasm_runtime); \
2503 if (!base::IsValueInRangeForNumericType<to_ctype>(from_val)) { \
2504 TRAP(TrapReason::kTrapFloatUnrepresentable) \
2505 } else { \
2506 to_ctype val = static_cast<to_ctype>(from_val); \
2507 push<to_ctype>(sp, code, wasm_runtime, val); \
2508 } \
2509 NextOp(); \
2510 }
2512#undef DEFINE_UNOP
2513
2514#define DEFINE_UNOP(name, from_ctype, from_type, from_reg, to_ctype, to_type, \
2515 to_reg) \
2516 INSTRUCTION_HANDLER_FUNC r2r_##name(const uint8_t* code, uint32_t* sp, \
2517 WasmInterpreterRuntime* wasm_runtime, \
2518 int64_t r0, double fp0) { \
2519 if (!is_inbounds<to_ctype>(from_reg)) { \
2520 TRAP(TrapReason::kTrapFloatUnrepresentable) \
2521 } else { \
2522 to_reg = static_cast<to_ctype>(static_cast<from_ctype>(from_reg)); \
2523 } \
2524 NextOp(); \
2525 } \
2526 \
2527 INSTRUCTION_HANDLER_FUNC r2s_##name(const uint8_t* code, uint32_t* sp, \
2528 WasmInterpreterRuntime* wasm_runtime, \
2529 int64_t r0, double fp0) { \
2530 if (!is_inbounds<to_ctype>(from_reg)) { \
2531 TRAP(TrapReason::kTrapFloatUnrepresentable) \
2532 } else { \
2533 to_ctype val = static_cast<from_ctype>(from_reg); \
2534 push<to_ctype>(sp, code, wasm_runtime, val); \
2535 } \
2536 NextOp(); \
2537 } \
2538 \
2539 INSTRUCTION_HANDLER_FUNC s2r_##name(const uint8_t* code, uint32_t* sp, \
2540 WasmInterpreterRuntime* wasm_runtime, \
2541 int64_t r0, double fp0) { \
2542 from_ctype from_val = pop<from_ctype>(sp, code, wasm_runtime); \
2543 if (!is_inbounds<to_ctype>(from_val)) { \
2544 TRAP(TrapReason::kTrapFloatUnrepresentable) \
2545 } else { \
2546 to_reg = static_cast<to_ctype>(from_val); \
2547 } \
2548 NextOp(); \
2549 } \
2550 \
2551 INSTRUCTION_HANDLER_FUNC s2s_##name(const uint8_t* code, uint32_t* sp, \
2552 WasmInterpreterRuntime* wasm_runtime, \
2553 int64_t r0, double fp0) { \
2554 from_ctype from_val = pop<from_ctype>(sp, code, wasm_runtime); \
2555 if (!is_inbounds<to_ctype>(from_val)) { \
2556 TRAP(TrapReason::kTrapFloatUnrepresentable) \
2557 } else { \
2558 to_ctype val = static_cast<to_ctype>(from_val); \
2559 push<to_ctype>(sp, code, wasm_runtime, val); \
2560 } \
2561 NextOp(); \
2562 }
2564#undef DEFINE_UNOP
2565
2566#define DEFINE_UNOP(name, from_ctype, from_type, from_reg, to_ctype, to_type, \
2567 to_reg) \
2568 INSTRUCTION_HANDLER_FUNC r2r_##name(const uint8_t* code, uint32_t* sp, \
2569 WasmInterpreterRuntime* wasm_runtime, \
2570 int64_t r0, double fp0) { \
2571 to_reg = static_cast<to_ctype>(static_cast<from_ctype>(from_reg)); \
2572 NextOp(); \
2573 } \
2574 \
2575 INSTRUCTION_HANDLER_FUNC r2s_##name(const uint8_t* code, uint32_t* sp, \
2576 WasmInterpreterRuntime* wasm_runtime, \
2577 int64_t r0, double fp0) { \
2578 to_ctype val = static_cast<from_ctype>(from_reg); \
2579 push<to_ctype>(sp, code, wasm_runtime, val); \
2580 NextOp(); \
2581 } \
2582 \
2583 INSTRUCTION_HANDLER_FUNC s2r_##name(const uint8_t* code, uint32_t* sp, \
2584 WasmInterpreterRuntime* wasm_runtime, \
2585 int64_t r0, double fp0) { \
2586 to_reg = static_cast<to_ctype>(pop<from_ctype>(sp, code, wasm_runtime)); \
2587 NextOp(); \
2588 } \
2589 \
2590 INSTRUCTION_HANDLER_FUNC s2s_##name(const uint8_t* code, uint32_t* sp, \
2591 WasmInterpreterRuntime* wasm_runtime, \
2592 int64_t r0, double fp0) { \
2593 to_ctype val = pop<from_ctype>(sp, code, wasm_runtime); \
2594 push<to_ctype>(sp, code, wasm_runtime, val); \
2595 NextOp(); \
2596 }
2598#undef DEFINE_UNOP
2599
2601 // Numeric reinterpret operators
2602
2603#define FOREACH_REINTERPRET_UNOP(V) \
2604 V(F32ReinterpretI32, int32_t, I32, r0, float, F32, fp0) \
2605 V(F64ReinterpretI64, int64_t, I64, r0, double, F64, fp0) \
2606 V(I32ReinterpretF32, float, F32, fp0, int32_t, I32, r0) \
2607 V(I64ReinterpretF64, double, F64, fp0, int64_t, I64, r0)
2608
2609#define DEFINE_UNOP(name, from_ctype, from_type, from_reg, to_ctype, to_type, \
2610 to_reg) \
2611 INSTRUCTION_HANDLER_FUNC r2r_##name(const uint8_t* code, uint32_t* sp, \
2612 WasmInterpreterRuntime* wasm_runtime, \
2613 int64_t r0, double fp0) { \
2614 from_ctype value = static_cast<from_ctype>(from_reg); \
2615 to_reg = \
2616 base::ReadUnalignedValue<to_ctype>(reinterpret_cast<Address>(&value)); \
2617 NextOp(); \
2618 } \
2619 \
2620 INSTRUCTION_HANDLER_FUNC r2s_##name(const uint8_t* code, uint32_t* sp, \
2621 WasmInterpreterRuntime* wasm_runtime, \
2622 int64_t r0, double fp0) { \
2623 from_ctype val = static_cast<from_ctype>(from_reg); \
2624 push<to_ctype>( \
2625 sp, code, wasm_runtime, \
2626 base::ReadUnalignedValue<to_ctype>(reinterpret_cast<Address>(&val))); \
2627 NextOp(); \
2628 } \
2629 \
2630 INSTRUCTION_HANDLER_FUNC s2r_##name(const uint8_t* code, uint32_t* sp, \
2631 WasmInterpreterRuntime* wasm_runtime, \
2632 int64_t r0, double fp0) { \
2633 from_ctype val = pop<from_ctype>(sp, code, wasm_runtime); \
2634 to_reg = \
2635 base::ReadUnalignedValue<to_ctype>(reinterpret_cast<Address>(&val)); \
2636 NextOp(); \
2637 } \
2638 \
2639 INSTRUCTION_HANDLER_FUNC s2s_##name(const uint8_t* code, uint32_t* sp, \
2640 WasmInterpreterRuntime* wasm_runtime, \
2641 int64_t r0, double fp0) { \
2642 from_ctype val = pop<from_ctype>(sp, code, wasm_runtime); \
2643 push<to_ctype>( \
2644 sp, code, wasm_runtime, \
2645 base::ReadUnalignedValue<to_ctype>(reinterpret_cast<Address>(&val))); \
2646 NextOp(); \
2647 }
2649#undef DEFINE_UNOP
2650
2652 // Bit operators
2653
2654#define FOREACH_BITS_UNOP(V) \
2655 V(I32Clz, uint32_t, I32, uint32_t, I32, base::bits::CountLeadingZeros(val)) \
2656 V(I32Ctz, uint32_t, I32, uint32_t, I32, base::bits::CountTrailingZeros(val)) \
2657 V(I32Popcnt, uint32_t, I32, uint32_t, I32, base::bits::CountPopulation(val)) \
2658 V(I32Eqz, uint32_t, I32, int32_t, I32, val == 0 ? 1 : 0) \
2659 V(I64Clz, uint64_t, I64, uint64_t, I64, base::bits::CountLeadingZeros(val)) \
2660 V(I64Ctz, uint64_t, I64, uint64_t, I64, base::bits::CountTrailingZeros(val)) \
2661 V(I64Popcnt, uint64_t, I64, uint64_t, I64, base::bits::CountPopulation(val)) \
2662 V(I64Eqz, uint64_t, I64, int32_t, I32, val == 0 ? 1 : 0)
2663
2664#define DEFINE_REG_BINOP(name, from_ctype, from_type, to_ctype, to_type, op) \
2665 INSTRUCTION_HANDLER_FUNC r2r_##name(const uint8_t* code, uint32_t* sp, \
2666 WasmInterpreterRuntime* wasm_runtime, \
2667 int64_t r0, double fp0) { \
2668 from_ctype val = static_cast<from_ctype>(r0); \
2669 r0 = static_cast<to_ctype>(op); \
2670 NextOp(); \
2671 } \
2672 \
2673 INSTRUCTION_HANDLER_FUNC r2s_##name(const uint8_t* code, uint32_t* sp, \
2674 WasmInterpreterRuntime* wasm_runtime, \
2675 int64_t r0, double fp0) { \
2676 from_ctype val = static_cast<from_ctype>(r0); \
2677 push<to_ctype>(sp, code, wasm_runtime, op); \
2678 NextOp(); \
2679 } \
2680 \
2681 INSTRUCTION_HANDLER_FUNC s2r_##name(const uint8_t* code, uint32_t* sp, \
2682 WasmInterpreterRuntime* wasm_runtime, \
2683 int64_t r0, double fp0) { \
2684 from_ctype val = pop<from_ctype>(sp, code, wasm_runtime); \
2685 r0 = op; \
2686 NextOp(); \
2687 } \
2688 \
2689 INSTRUCTION_HANDLER_FUNC s2s_##name(const uint8_t* code, uint32_t* sp, \
2690 WasmInterpreterRuntime* wasm_runtime, \
2691 int64_t r0, double fp0) { \
2692 from_ctype val = pop<from_ctype>(sp, code, wasm_runtime); \
2693 push<to_ctype>(sp, code, wasm_runtime, op); \
2694 NextOp(); \
2695 }
2697#undef DEFINE_REG_BINOP
2698
2700 // Sign extension operators
2701
2702#define FOREACH_EXTENSION_UNOP(V) \
2703 V(I32SExtendI8, int8_t, I32, int32_t, I32) \
2704 V(I32SExtendI16, int16_t, I32, int32_t, I32) \
2705 V(I64SExtendI8, int8_t, I64, int64_t, I64) \
2706 V(I64SExtendI16, int16_t, I64, int64_t, I64) \
2707 V(I64SExtendI32, int32_t, I64, int64_t, I64)
2708
2709#define DEFINE_UNOP(name, from_ctype, from_type, to_ctype, to_type) \
2710 INSTRUCTION_HANDLER_FUNC r2r_##name(const uint8_t* code, uint32_t* sp, \
2711 WasmInterpreterRuntime* wasm_runtime, \
2712 int64_t r0, double fp0) { \
2713 from_ctype val = static_cast<from_ctype>(static_cast<to_ctype>(r0)); \
2714 r0 = static_cast<to_ctype>(val); \
2715 NextOp(); \
2716 } \
2717 \
2718 INSTRUCTION_HANDLER_FUNC r2s_##name(const uint8_t* code, uint32_t* sp, \
2719 WasmInterpreterRuntime* wasm_runtime, \
2720 int64_t r0, double fp0) { \
2721 from_ctype val = static_cast<from_ctype>(static_cast<to_ctype>(r0)); \
2722 push<to_ctype>(sp, code, wasm_runtime, val); \
2723 NextOp(); \
2724 } \
2725 \
2726 INSTRUCTION_HANDLER_FUNC s2r_##name(const uint8_t* code, uint32_t* sp, \
2727 WasmInterpreterRuntime* wasm_runtime, \
2728 int64_t r0, double fp0) { \
2729 from_ctype val = \
2730 static_cast<from_ctype>(pop<to_ctype>(sp, code, wasm_runtime)); \
2731 r0 = static_cast<to_ctype>(val); \
2732 NextOp(); \
2733 } \
2734 \
2735 INSTRUCTION_HANDLER_FUNC s2s_##name(const uint8_t* code, uint32_t* sp, \
2736 WasmInterpreterRuntime* wasm_runtime, \
2737 int64_t r0, double fp0) { \
2738 from_ctype val = \
2739 static_cast<from_ctype>(pop<to_ctype>(sp, code, wasm_runtime)); \
2740 push<to_ctype>(sp, code, wasm_runtime, val); \
2741 NextOp(); \
2742 }
2744#undef DEFINE_UNOP
2745
2747 // Saturated truncation operators
2748
2749#define FOREACH_TRUNCSAT_UNOP(V) \
2750 V(I32SConvertSatF32, float, F32, fp0, int32_t, I32, r0) \
2751 V(I32UConvertSatF32, float, F32, fp0, uint32_t, I32, r0) \
2752 V(I32SConvertSatF64, double, F64, fp0, int32_t, I32, r0) \
2753 V(I32UConvertSatF64, double, F64, fp0, uint32_t, I32, r0) \
2754 V(I64SConvertSatF32, float, F32, fp0, int64_t, I64, r0) \
2755 V(I64UConvertSatF32, float, F32, fp0, uint64_t, I64, r0) \
2756 V(I64SConvertSatF64, double, F64, fp0, int64_t, I64, r0) \
2757 V(I64UConvertSatF64, double, F64, fp0, uint64_t, I64, r0)
2758
2759#define DEFINE_UNOP(name, from_ctype, from_type, from_reg, to_ctype, to_type, \
2760 to_reg) \
2761 INSTRUCTION_HANDLER_FUNC r2r_##name(const uint8_t* code, uint32_t* sp, \
2762 WasmInterpreterRuntime* wasm_runtime, \
2763 int64_t r0, double fp0) { \
2764 to_reg = \
2765 base::saturated_cast<to_ctype>(static_cast<from_ctype>(from_reg)); \
2766 NextOp(); \
2767 } \
2768 \
2769 INSTRUCTION_HANDLER_FUNC r2s_##name(const uint8_t* code, uint32_t* sp, \
2770 WasmInterpreterRuntime* wasm_runtime, \
2771 int64_t r0, double fp0) { \
2772 to_ctype val = \
2773 base::saturated_cast<to_ctype>(static_cast<from_ctype>(from_reg)); \
2774 push<to_ctype>(sp, code, wasm_runtime, val); \
2775 NextOp(); \
2776 } \
2777 \
2778 INSTRUCTION_HANDLER_FUNC s2r_##name(const uint8_t* code, uint32_t* sp, \
2779 WasmInterpreterRuntime* wasm_runtime, \
2780 int64_t r0, double fp0) { \
2781 to_reg = base::saturated_cast<to_ctype>( \
2782 pop<from_ctype>(sp, code, wasm_runtime)); \
2783 NextOp(); \
2784 } \
2785 \
2786 INSTRUCTION_HANDLER_FUNC s2s_##name(const uint8_t* code, uint32_t* sp, \
2787 WasmInterpreterRuntime* wasm_runtime, \
2788 int64_t r0, double fp0) { \
2789 to_ctype val = base::saturated_cast<to_ctype>( \
2790 pop<from_ctype>(sp, code, wasm_runtime)); \
2791 push<to_ctype>(sp, code, wasm_runtime, val); \
2792 NextOp(); \
2793 }
2795#undef DEFINE_UNOP
2796
2798
2801 int64_t r0, double fp0) {
2802 uint32_t delta_pages = pop<uint32_t>(sp, code, wasm_runtime);
2803
2804 int32_t result = wasm_runtime->MemoryGrow(delta_pages);
2805
2807
2808 NextOp();
2809 }
2810
2812 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
2813 int64_t r0, double fp0) {
2814 int64_t result = -1;
2815
2816 uint64_t delta_pages = pop<uint64_t>(sp, code, wasm_runtime);
2817
2818 if (delta_pages <= std::numeric_limits<uint32_t>::max()) {
2819 result = wasm_runtime->MemoryGrow(static_cast<uint32_t>(delta_pages));
2820 }
2821
2823
2824 NextOp();
2825 }
2826
2829 int64_t r0, double fp0) {
2830 uint64_t result = wasm_runtime->MemorySize();
2831 push<uint32_t>(sp, code, wasm_runtime, static_cast<uint32_t>(result));
2832
2833 NextOp();
2834 }
2835
2837 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
2838 int64_t r0, double fp0) {
2839 uint64_t result = wasm_runtime->MemorySize();
2841
2842 NextOp();
2843 }
2844
2847 int64_t r0, double fp0) {
2848 // Break the chain of calls.
2849 Read<int32_t>(code);
2850 }
2851
2854 int64_t r0, double fp0) {
2855 int32_t target_offset = Read<int32_t>(code);
2856 code += (target_offset - kCodeOffsetSize);
2857
2858 NextOp();
2859 }
2860
2863 int64_t r0, double fp0) {
2864 int64_t cond = r0;
2865
2866 int32_t if_true_offset = Read<int32_t>(code);
2867 if (cond) {
2868 // If condition is true, jump to the target branch.
2869 code += (if_true_offset - kCodeOffsetSize);
2870 }
2871
2872 NextOp();
2873 }
2874
2877 int64_t r0, double fp0) {
2878 int32_t cond = pop<int32_t>(sp, code, wasm_runtime);
2879
2880 int32_t if_true_offset = Read<int32_t>(code);
2881 if (cond) {
2882 // If condition is true, jump to the target branch.
2883 code += (if_true_offset - kCodeOffsetSize);
2884 }
2885
2886 NextOp();
2887 }
2888
2890 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
2891 int64_t r0, double fp0) {
2892 int64_t cond = r0;
2893
2894 int32_t if_false_offset = Read<int32_t>(code);
2895 if (!cond) {
2896 // If condition is not true, jump to the false branch.
2897 code += (if_false_offset - kCodeOffsetSize);
2898 }
2899
2900 NextOp();
2901 }
2902
2904 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
2905 int64_t r0, double fp0) {
2906 int32_t cond = pop<int32_t>(sp, code, wasm_runtime);
2907
2908 int32_t if_false_offset = Read<int32_t>(code);
2909 if (!cond) {
2910 // If condition is not true, jump to the false branch.
2911 code += (if_false_offset - kCodeOffsetSize);
2912 }
2913
2914 NextOp();
2915 }
2916
2919 int64_t r0, double fp0) {
2920 int64_t cond = r0;
2921
2922 int32_t target_offset = Read<int32_t>(code);
2923 if (!cond) {
2924 code += (target_offset - kCodeOffsetSize);
2925 }
2926
2927 NextOp();
2928 }
2929
2932 int64_t r0, double fp0) {
2933 int32_t cond = pop<int32_t>(sp, code, wasm_runtime);
2934
2935 int32_t target_offset = Read<int32_t>(code);
2936 if (!cond) {
2937 code += (target_offset - kCodeOffsetSize);
2938 }
2939
2940 NextOp();
2941 }
2942
2945 int64_t r0, double fp0) {
2946 int32_t target_offset = Read<int32_t>(code);
2947 code += (target_offset - kCodeOffsetSize);
2948
2949 NextOp();
2950 }
2951
2954 int64_t r0, double fp0) {
2955 int32_t target_offset = Read<int32_t>(code);
2956 code += (target_offset - kCodeOffsetSize);
2957
2958 NextOp();
2959 }
2960
2962 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
2963 int64_t r0, double fp0) {
2964 uint32_t function_index = Read<int32_t>(code);
2965 uint32_t stack_pos = Read<int32_t>(code);
2966 slot_offset_t slot_offset = Read<slot_offset_t>(code);
2967 uint32_t ref_stack_fp_offset = Read<int32_t>(code);
2968 slot_offset_t return_slot_offset = 0;
2969#ifdef V8_ENABLE_DRUMBRAKE_TRACING
2970 if (v8_flags.trace_drumbrake_execution) {
2971 return_slot_offset = Read<slot_offset_t>(code);
2972 }
2973#endif // V8_ENABLE_DRUMBRAKE_TRACING
2974
2975 wasm_runtime->ExecuteFunction(code, function_index, stack_pos,
2976 ref_stack_fp_offset, slot_offset,
2977 return_slot_offset);
2978 NextOp();
2979 }
2980
2983 int64_t r0, double fp0) {
2984 slot_offset_t rets_size = Read<slot_offset_t>(code);
2985 slot_offset_t args_size = Read<slot_offset_t>(code);
2986 uint32_t rets_refs = Read<int32_t>(code);
2987 uint32_t args_refs = Read<int32_t>(code);
2988 uint32_t function_index = Read<int32_t>(code);
2989 uint32_t stack_pos = Read<int32_t>(code);
2990 slot_offset_t slot_offset = Read<slot_offset_t>(code);
2991 uint32_t ref_stack_fp_offset = Read<int32_t>(code);
2992 slot_offset_t return_slot_offset = 0;
2993#ifdef V8_ENABLE_DRUMBRAKE_TRACING
2994 if (v8_flags.trace_drumbrake_execution) {
2995 return_slot_offset = Read<slot_offset_t>(code);
2996 }
2997#endif // V8_ENABLE_DRUMBRAKE_TRACING
2998
2999 // Moves back the stack frame to the caller stack frame.
3000 wasm_runtime->UnwindCurrentStackFrame(sp, slot_offset, rets_size, args_size,
3001 rets_refs, args_refs,
3002 ref_stack_fp_offset);
3003
3004 // Do not call wasm_runtime->ExecuteFunction(), which would add a
3005 // new C++ stack frame.
3006 wasm_runtime->PrepareTailCall(code, function_index, stack_pos,
3007 return_slot_offset);
3008 NextOp();
3009 }
3010
3012 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3013 int64_t r0, double fp0) {
3014 uint32_t function_index = Read<int32_t>(code);
3015 uint32_t stack_pos = Read<int32_t>(code);
3016 slot_offset_t slot_offset = Read<slot_offset_t>(code);
3017 uint32_t ref_stack_fp_offset = Read<int32_t>(code);
3018 slot_offset_t return_slot_offset = 0;
3019#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3020 if (v8_flags.trace_drumbrake_execution) {
3021 return_slot_offset = Read<slot_offset_t>(code);
3022 }
3023#endif // V8_ENABLE_DRUMBRAKE_TRACING
3024
3025 wasm_runtime->ExecuteImportedFunction(code, function_index, stack_pos,
3026 ref_stack_fp_offset, slot_offset,
3027 return_slot_offset);
3028 NextOp();
3029 }
3030
3032 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3033 int64_t r0, double fp0) {
3034 slot_offset_t rets_size = Read<slot_offset_t>(code);
3035 slot_offset_t args_size = Read<slot_offset_t>(code);
3036 uint32_t rets_refs = Read<int32_t>(code);
3037 uint32_t args_refs = Read<int32_t>(code);
3038 uint32_t function_index = Read<int32_t>(code);
3039 uint32_t stack_pos = Read<int32_t>(code);
3040 slot_offset_t slot_offset = Read<slot_offset_t>(code);
3041 uint32_t ref_stack_fp_offset = Read<int32_t>(code);
3042 slot_offset_t return_slot_offset = 0;
3043#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3044 if (v8_flags.trace_drumbrake_execution) {
3045 return_slot_offset = Read<slot_offset_t>(code);
3046 }
3047#endif // V8_ENABLE_DRUMBRAKE_TRACING
3048
3049 // Moves back the stack frame to the caller stack frame.
3050 wasm_runtime->UnwindCurrentStackFrame(sp, slot_offset, rets_size, args_size,
3051 rets_refs, args_refs,
3052 ref_stack_fp_offset);
3053
3054 wasm_runtime->ExecuteImportedFunction(code, function_index, stack_pos, 0, 0,
3055 return_slot_offset, true);
3056
3057 NextOp();
3058 }
3059
3061 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3062 int64_t r0, double fp0) {
3063 uint32_t entry_index = pop<uint32_t>(sp, code, wasm_runtime);
3064 uint32_t table_index = Read<int32_t>(code);
3065 uint32_t sig_index = Read<int32_t>(code);
3066 uint32_t stack_pos = Read<int32_t>(code);
3067 slot_offset_t slot_offset = Read<slot_offset_t>(code);
3068 uint32_t ref_stack_fp_offset = Read<int32_t>(code);
3069 slot_offset_t return_slot_offset = 0;
3070#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3071 if (v8_flags.trace_drumbrake_execution) {
3072 return_slot_offset = Read<slot_offset_t>(code);
3073 }
3074#endif // V8_ENABLE_DRUMBRAKE_TRACING
3075
3076 // This function can trap.
3077 wasm_runtime->ExecuteIndirectCall(code, table_index, sig_index, entry_index,
3078 stack_pos, sp, ref_stack_fp_offset,
3079 slot_offset, return_slot_offset, false);
3080 NextOp();
3081 }
3082
3084 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3085 int64_t r0, double fp0) {
3086 uint64_t entry_index_64 = pop<uint64_t>(sp, code, wasm_runtime);
3087 if (entry_index_64 > std::numeric_limits<uint32_t>::max()) {
3088 TRAP(TrapReason::kTrapTableOutOfBounds)
3089 }
3090 uint32_t entry_index = static_cast<uint32_t>(entry_index_64);
3091 uint32_t table_index = Read<int32_t>(code);
3092 uint32_t sig_index = Read<int32_t>(code);
3093 uint32_t stack_pos = Read<int32_t>(code);
3094 slot_offset_t slot_offset = Read<slot_offset_t>(code);
3095 uint32_t ref_stack_fp_offset = Read<int32_t>(code);
3096 slot_offset_t return_slot_offset = 0;
3097#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3098 if (v8_flags.trace_drumbrake_execution) {
3099 return_slot_offset = Read<slot_offset_t>(code);
3100 }
3101#endif // V8_ENABLE_DRUMBRAKE_TRACING
3102
3103 // This function can trap.
3104 wasm_runtime->ExecuteIndirectCall(code, table_index, sig_index, entry_index,
3105 stack_pos, sp, ref_stack_fp_offset,
3106 slot_offset, return_slot_offset, false);
3107 NextOp();
3108 }
3109
3111 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3112 int64_t r0, double fp0) {
3113 slot_offset_t rets_size = Read<slot_offset_t>(code);
3114 slot_offset_t args_size = Read<slot_offset_t>(code);
3115 uint32_t rets_refs = Read<int32_t>(code);
3116 uint32_t args_refs = Read<int32_t>(code);
3117 uint32_t entry_index = pop<uint32_t>(sp, code, wasm_runtime);
3118 uint32_t table_index = Read<int32_t>(code);
3119 uint32_t sig_index = Read<int32_t>(code);
3120 uint32_t stack_pos = Read<int32_t>(code);
3121 slot_offset_t slot_offset = Read<slot_offset_t>(code);
3122 uint32_t ref_stack_fp_offset = Read<int32_t>(code);
3123 slot_offset_t return_slot_offset = 0;
3124#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3125 if (v8_flags.trace_drumbrake_execution) {
3126 return_slot_offset = Read<slot_offset_t>(code);
3127 }
3128#endif // V8_ENABLE_DRUMBRAKE_TRACING
3129
3130 // Moves back the stack frame to the caller stack frame.
3131 wasm_runtime->UnwindCurrentStackFrame(sp, slot_offset, rets_size, args_size,
3132 rets_refs, args_refs,
3133 ref_stack_fp_offset);
3134
3135 // This function can trap.
3136 wasm_runtime->ExecuteIndirectCall(code, table_index, sig_index, entry_index,
3137 stack_pos, sp, 0, 0, return_slot_offset,
3138 true);
3139 NextOp();
3140 }
3141
3143 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3144 int64_t r0, double fp0) {
3145 slot_offset_t rets_size = Read<slot_offset_t>(code);
3146 slot_offset_t args_size = Read<slot_offset_t>(code);
3147 uint32_t rets_refs = Read<int32_t>(code);
3148 uint32_t args_refs = Read<int32_t>(code);
3149 uint64_t entry_index_64 = pop<uint64_t>(sp, code, wasm_runtime);
3150 if (entry_index_64 > std::numeric_limits<uint32_t>::max()) {
3151 TRAP(TrapReason::kTrapTableOutOfBounds)
3152 }
3153 uint32_t entry_index = static_cast<uint32_t>(entry_index_64);
3154 uint32_t table_index = Read<int32_t>(code);
3155 uint32_t sig_index = Read<int32_t>(code);
3156 uint32_t stack_pos = Read<int32_t>(code);
3157 slot_offset_t slot_offset = Read<slot_offset_t>(code);
3158 uint32_t ref_stack_fp_offset = Read<int32_t>(code);
3159 slot_offset_t return_slot_offset = 0;
3160#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3161 if (v8_flags.trace_drumbrake_execution) {
3162 return_slot_offset = Read<slot_offset_t>(code);
3163 }
3164#endif // V8_ENABLE_DRUMBRAKE_TRACING
3165
3166 // Moves back the stack frame to the caller stack frame.
3167 wasm_runtime->UnwindCurrentStackFrame(sp, slot_offset, rets_size, args_size,
3168 rets_refs, args_refs,
3169 ref_stack_fp_offset);
3170
3171 // This function can trap.
3172 wasm_runtime->ExecuteIndirectCall(code, table_index, sig_index, entry_index,
3173 stack_pos, sp, 0, 0, return_slot_offset,
3174 true);
3175 NextOp();
3176 }
3177
3180 int64_t r0, double fp0) {
3181 uint32_t cond = static_cast<int32_t>(r0);
3182
3183 uint32_t table_length = Read<int32_t>(code);
3184 uint32_t index = cond < table_length ? cond : table_length;
3185
3187 reinterpret_cast<Address>(code + index * kCodeOffsetSize));
3188 code += (target_offset + index * kCodeOffsetSize);
3189
3190 NextOp();
3191 }
3192
3195 int64_t r0, double fp0) {
3196 uint32_t cond = pop<uint32_t>(sp, code, wasm_runtime);
3197
3198 uint32_t table_length = Read<int32_t>(code);
3199 uint32_t index = cond < table_length ? cond : table_length;
3200
3202 reinterpret_cast<Address>(code + index * kCodeOffsetSize));
3203 code += (target_offset + index * kCodeOffsetSize);
3204
3205 NextOp();
3206 }
3207
3209 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3210 int64_t r0, double fp0) {
3211 uint32_t params_count = Read<int32_t>(code);
3212 DCHECK(params_count > 1 && params_count < 32);
3213
3214 uint32_t arg_size_mask = Read<int32_t>(code);
3215
3217 for (uint32_t i = 0; i < params_count; i++) {
3219 bool is_64 = arg_size_mask & (1 << i);
3220 if (is_64) {
3222 reinterpret_cast<Address>(sp + to),
3224 reinterpret_cast<Address>(sp + from)));
3225
3226#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3227 if (v8_flags.trace_drumbrake_execution &&
3228 v8_flags.trace_drumbrake_execution_verbose) {
3229 wasm_runtime->Trace("COPYSLOT64 %d %d %" PRIx64 "\n", from, to,
3231 reinterpret_cast<Address>(sp + to)));
3232 }
3233#endif // V8_ENABLE_DRUMBRAKE_TRACING
3234
3235 to += sizeof(uint64_t) / sizeof(uint32_t);
3236 } else {
3238 reinterpret_cast<Address>(sp + to),
3240 reinterpret_cast<Address>(sp + from)));
3241
3242#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3243 if (v8_flags.trace_drumbrake_execution &&
3244 v8_flags.trace_drumbrake_execution_verbose) {
3245 wasm_runtime->Trace("COPYSLOT32 %d %d %08x\n", from, to,
3246 *reinterpret_cast<int32_t*>(sp + to));
3247 }
3248#endif // V8_ENABLE_DRUMBRAKE_TRACING
3249
3250 to += sizeof(uint32_t) / sizeof(uint32_t);
3251 }
3252 }
3253
3254 NextOp();
3255 }
3256
3259 int64_t r0, double fp0) {
3261 slot_offset_t from0 = Read<slot_offset_t>(code);
3262 slot_offset_t from1 = Read<slot_offset_t>(code);
3263
3265 reinterpret_cast<Address>(sp + to),
3267 reinterpret_cast<Address>(sp + from0)));
3268
3269#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3270 if (v8_flags.trace_drumbrake_execution &&
3271 v8_flags.trace_drumbrake_execution_verbose) {
3272 wasm_runtime->Trace("COPYSLOT32 %d %d %08x\n", from0, to,
3273 *reinterpret_cast<int32_t*>(sp + to));
3274 }
3275#endif // V8_ENABLE_DRUMBRAKE_TRACING
3276
3277 to += sizeof(uint32_t) / sizeof(uint32_t);
3278
3280 reinterpret_cast<Address>(sp + to),
3282 reinterpret_cast<Address>(sp + from1)));
3283
3284#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3285 if (v8_flags.trace_drumbrake_execution &&
3286 v8_flags.trace_drumbrake_execution_verbose) {
3287 wasm_runtime->Trace("COPYSLOT32 %d %d %08x\n", from1, to,
3288 *reinterpret_cast<int32_t*>(sp + to));
3289 }
3290#endif // V8_ENABLE_DRUMBRAKE_TRACING
3291
3292 NextOp();
3293 }
3294
3297 int64_t r0, double fp0) {
3299 slot_offset_t from0 = Read<slot_offset_t>(code);
3300 slot_offset_t from1 = Read<slot_offset_t>(code);
3301
3303 reinterpret_cast<Address>(sp + to),
3305 reinterpret_cast<Address>(sp + from0)));
3306
3307#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3308 if (v8_flags.trace_drumbrake_execution &&
3309 v8_flags.trace_drumbrake_execution_verbose) {
3310 wasm_runtime->Trace("COPYSLOT64 %d %d %" PRIx64 "\n", from0, to,
3312 reinterpret_cast<Address>(sp + to)));
3313 }
3314#endif // V8_ENABLE_DRUMBRAKE_TRACING
3315
3316 to += sizeof(uint64_t) / sizeof(uint32_t);
3317
3319 reinterpret_cast<Address>(sp + to),
3321 reinterpret_cast<Address>(sp + from1)));
3322
3323#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3324 if (v8_flags.trace_drumbrake_execution &&
3325 v8_flags.trace_drumbrake_execution_verbose) {
3326 wasm_runtime->Trace("COPYSLOT32 %d %d %08x\n", from1, to,
3327 *reinterpret_cast<int32_t*>(sp + to));
3328 }
3329#endif // V8_ENABLE_DRUMBRAKE_TRACING
3330
3331 NextOp();
3332 }
3333
3336 int64_t r0, double fp0) {
3338 slot_offset_t from0 = Read<slot_offset_t>(code);
3339 slot_offset_t from1 = Read<slot_offset_t>(code);
3340
3342 reinterpret_cast<Address>(sp + to),
3344 reinterpret_cast<Address>(sp + from0)));
3345
3346#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3347 if (v8_flags.trace_drumbrake_execution &&
3348 v8_flags.trace_drumbrake_execution_verbose) {
3349 wasm_runtime->Trace("COPYSLOT32 %d %d %08x\n", from0, to,
3350 *reinterpret_cast<int32_t*>(sp + to));
3351 }
3352#endif // V8_ENABLE_DRUMBRAKE_TRACING
3353
3354 to += sizeof(uint32_t) / sizeof(uint32_t);
3355
3357 reinterpret_cast<Address>(sp + to),
3359 reinterpret_cast<Address>(sp + from1)));
3360
3361#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3362 if (v8_flags.trace_drumbrake_execution &&
3363 v8_flags.trace_drumbrake_execution_verbose) {
3364 wasm_runtime->Trace("COPYSLOT64 %d %d %" PRIx64 "\n", from1, to,
3366 reinterpret_cast<Address>(sp + to)));
3367 }
3368#endif // V8_ENABLE_DRUMBRAKE_TRACING
3369
3370 NextOp();
3371 }
3372
3375 int64_t r0, double fp0) {
3377 slot_offset_t from0 = Read<slot_offset_t>(code);
3378 slot_offset_t from1 = Read<slot_offset_t>(code);
3379
3381 reinterpret_cast<Address>(sp + to),
3383 reinterpret_cast<Address>(sp + from0)));
3384
3385#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3386 if (v8_flags.trace_drumbrake_execution &&
3387 v8_flags.trace_drumbrake_execution_verbose) {
3388 wasm_runtime->Trace("COPYSLOT64 %d %d %" PRIx64 "\n", from0, to,
3390 reinterpret_cast<Address>(sp + to)));
3391 }
3392#endif // V8_ENABLE_DRUMBRAKE_TRACING
3393
3394 to += sizeof(uint64_t) / sizeof(uint32_t);
3395
3397 reinterpret_cast<Address>(sp + to),
3399 reinterpret_cast<Address>(sp + from1)));
3400
3401#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3402 if (v8_flags.trace_drumbrake_execution &&
3403 v8_flags.trace_drumbrake_execution_verbose) {
3404 wasm_runtime->Trace("COPYSLOT64 %d %d %" PRIx64 "\n", from1, to,
3406 reinterpret_cast<Address>(sp + to)));
3407 }
3408#endif // V8_ENABLE_DRUMBRAKE_TRACING
3409
3410 NextOp();
3411 }
3412
3415 int64_t r0, double fp0) {
3419 reinterpret_cast<Address>(sp + to),
3421 reinterpret_cast<Address>(sp + from)));
3422
3423#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3424 if (v8_flags.trace_drumbrake_execution &&
3425 v8_flags.trace_drumbrake_execution_verbose) {
3426 wasm_runtime->Trace("COPYSLOT32 %d %d %08x\n", from, to,
3427 *reinterpret_cast<int32_t*>(sp + to));
3428 }
3429#endif // V8_ENABLE_DRUMBRAKE_TRACING
3430
3431 NextOp();
3432 }
3433
3435 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3436 int64_t r0, double fp0) {
3440 reinterpret_cast<Address>(sp + to),
3442 reinterpret_cast<Address>(sp + from)));
3443#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3444 if (v8_flags.trace_drumbrake_execution &&
3445 v8_flags.trace_drumbrake_execution_verbose) {
3446 wasm_runtime->Trace("COPYSLOT32 %d %d %08x\n", from, to,
3448 reinterpret_cast<Address>(sp + to)));
3449 }
3450#endif // V8_ENABLE_DRUMBRAKE_TRACING
3451
3452 from = Read<slot_offset_t>(code);
3453 to = Read<slot_offset_t>(code);
3455 reinterpret_cast<Address>(sp + to),
3457 reinterpret_cast<Address>(sp + from)));
3458#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3459 if (v8_flags.trace_drumbrake_execution &&
3460 v8_flags.trace_drumbrake_execution_verbose) {
3461 wasm_runtime->Trace("COPYSLOT32 %d %d %08x\n", from, to,
3463 reinterpret_cast<Address>(sp + to)));
3464 }
3465#endif // V8_ENABLE_DRUMBRAKE_TRACING
3466
3467 NextOp();
3468 }
3469
3472 int64_t r0, double fp0) {
3476 reinterpret_cast<Address>(sp + to),
3478 reinterpret_cast<Address>(sp + from)));
3479
3480#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3481 if (v8_flags.trace_drumbrake_execution &&
3482 v8_flags.trace_drumbrake_execution_verbose) {
3483 wasm_runtime->Trace("COPYSLOT64 %d %d %" PRIx64 "\n", from, to,
3485 reinterpret_cast<Address>(sp + to)));
3486 }
3487#endif // V8_ENABLE_DRUMBRAKE_TRACING
3488
3489 NextOp();
3490 }
3491
3494 int64_t r0, double fp0) {
3498 reinterpret_cast<Address>(sp + to),
3500 reinterpret_cast<Address>(sp + from)));
3501
3502#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3503 if (v8_flags.trace_drumbrake_execution &&
3504 v8_flags.trace_drumbrake_execution_verbose) {
3505 wasm_runtime->Trace(
3506 "COPYSLOT128 %d %d %" PRIx64 "`%" PRIx64 "\n", from, to,
3508 reinterpret_cast<Address>(sp + to)),
3510 reinterpret_cast<Address>(sp + to + sizeof(uint64_t))));
3511 }
3512#endif // V8_ENABLE_DRUMBRAKE_TRACING
3513
3514 NextOp();
3515 }
3516
3518 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3519 int64_t r0, double fp0) {
3523 reinterpret_cast<Address>(sp + to),
3525 reinterpret_cast<Address>(sp + from)));
3526#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3527 if (v8_flags.trace_drumbrake_execution &&
3528 v8_flags.trace_drumbrake_execution_verbose) {
3529 wasm_runtime->Trace("COPYSLOT64 %d %d %" PRIx64 "\n", from, to,
3531 reinterpret_cast<Address>(sp + to)));
3532 }
3533#endif // V8_ENABLE_DRUMBRAKE_TRACING
3534
3535 from = Read<slot_offset_t>(code);
3536 to = Read<slot_offset_t>(code);
3538 reinterpret_cast<Address>(sp + to),
3540 reinterpret_cast<Address>(sp + from)));
3541#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3542 if (v8_flags.trace_drumbrake_execution &&
3543 v8_flags.trace_drumbrake_execution_verbose) {
3544 wasm_runtime->Trace("COPYSLOT64 %d %d %" PRIx64 "\n", from, to,
3546 reinterpret_cast<Address>(sp + to)));
3547 }
3548#endif // V8_ENABLE_DRUMBRAKE_TRACING
3549
3550 NextOp();
3551 }
3552
3555 int64_t r0, double fp0) {
3556 uint32_t from = Read<int32_t>(code);
3557 uint32_t to = Read<int32_t>(code);
3558 wasm_runtime->StoreWasmRef(to, wasm_runtime->ExtractWasmRef(from));
3559
3560#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3561 if (v8_flags.trace_drumbrake_execution &&
3562 v8_flags.trace_drumbrake_execution_verbose) {
3563 wasm_runtime->Trace("COPYSLOTREF %d %d\n", from, to);
3564 }
3565#endif // V8_ENABLE_DRUMBRAKE_TRACING
3566
3567 NextOp();
3568 }
3569
3571 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3572 int64_t r0, double fp0) {
3575 slot_offset_t preserve = Read<slot_offset_t>(code);
3576
3578 reinterpret_cast<Address>(sp + preserve),
3579 base::ReadUnalignedValue<uint32_t>(reinterpret_cast<Address>(sp + to)));
3581 reinterpret_cast<Address>(sp + to),
3583 reinterpret_cast<Address>(sp + from)));
3584
3585#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3586 if (v8_flags.trace_drumbrake_execution &&
3587 v8_flags.trace_drumbrake_execution_verbose) {
3588 wasm_runtime->Trace("PRESERVECOPYSLOT32 %d %d %08x\n", from, to,
3590 reinterpret_cast<Address>(sp + to)));
3591 }
3592#endif // V8_ENABLE_DRUMBRAKE_TRACING
3593
3594 NextOp();
3595 }
3596
3598 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3599 int64_t r0, double fp0) {
3602 slot_offset_t preserve = Read<slot_offset_t>(code);
3603
3605 reinterpret_cast<Address>(sp + preserve),
3606 base::ReadUnalignedValue<uint64_t>(reinterpret_cast<Address>(sp + to)));
3608 reinterpret_cast<Address>(sp + to),
3610 reinterpret_cast<Address>(sp + from)));
3611
3612#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3613 if (v8_flags.trace_drumbrake_execution &&
3614 v8_flags.trace_drumbrake_execution_verbose) {
3615 wasm_runtime->Trace("PRESERVECOPYSLOT64 %d %d %" PRIx64 "\n", from, to,
3617 reinterpret_cast<Address>(sp + to)));
3618 }
3619#endif // V8_ENABLE_DRUMBRAKE_TRACING
3620
3621 NextOp();
3622 }
3623
3625 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3626 int64_t r0, double fp0) {
3629 slot_offset_t preserve = Read<slot_offset_t>(code);
3630
3632 reinterpret_cast<Address>(sp + preserve),
3633 base::ReadUnalignedValue<Simd128>(reinterpret_cast<Address>(sp + to)));
3635 reinterpret_cast<Address>(sp + to),
3637 reinterpret_cast<Address>(sp + from)));
3638
3639#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3640 if (v8_flags.trace_drumbrake_execution &&
3641 v8_flags.trace_drumbrake_execution_verbose) {
3642 wasm_runtime->Trace(
3643 "PRESERVECOPYSLOT64 %d %d %" PRIx64 "`%" PRIx64 "\n", from, to,
3645 reinterpret_cast<Address>(sp + to)),
3647 reinterpret_cast<Address>(sp + to + sizeof(uint64_t))));
3648 }
3649#endif // V8_ENABLE_DRUMBRAKE_TRACING
3650
3651 NextOp();
3652 }
3653
3655 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3656 int64_t r0, double fp0) {
3658 base::WriteUnalignedValue<int32_t>(reinterpret_cast<Address>(sp + to),
3659 static_cast<int32_t>(r0));
3660
3661#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3662 if (v8_flags.trace_drumbrake_execution &&
3663 v8_flags.trace_drumbrake_execution_verbose) {
3664 wasm_runtime->Trace("COPYR0TOSLOT32 %d %08x\n", to,
3666 reinterpret_cast<Address>(sp + to)));
3667 }
3668#endif // V8_ENABLE_DRUMBRAKE_TRACING
3669
3670 NextOp();
3671 }
3672
3674 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3675 int64_t r0, double fp0) {
3677 base::WriteUnalignedValue<int64_t>(reinterpret_cast<Address>(sp + to), r0);
3678
3679#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3680 if (v8_flags.trace_drumbrake_execution &&
3681 v8_flags.trace_drumbrake_execution_verbose) {
3682 wasm_runtime->Trace("COPYR0TOSLOT64 %d %" PRIx64 "\n", to,
3684 reinterpret_cast<Address>(sp + to)));
3685 }
3686#endif // V8_ENABLE_DRUMBRAKE_TRACING
3687
3688 NextOp();
3689 }
3690
3692 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3693 int64_t r0, double fp0) {
3695 base::WriteUnalignedValue<float>(reinterpret_cast<Address>(sp + to),
3696 static_cast<float>(fp0));
3697
3698#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3699 if (v8_flags.trace_drumbrake_execution &&
3700 v8_flags.trace_drumbrake_execution_verbose) {
3701 wasm_runtime->Trace("COPYFP0TOSLOT32 %d %08x\n", to,
3703 reinterpret_cast<Address>(sp + to)));
3704 }
3705#endif // V8_ENABLE_DRUMBRAKE_TRACING
3706
3707 NextOp();
3708 }
3709
3711 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3712 int64_t r0, double fp0) {
3714 base::WriteUnalignedValue<double>(reinterpret_cast<Address>(sp + to), fp0);
3715
3716#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3717 if (v8_flags.trace_drumbrake_execution &&
3718 v8_flags.trace_drumbrake_execution_verbose) {
3719 wasm_runtime->Trace("COPYFP0TOSLOT64 %d %" PRIx64 "\n", to,
3721 reinterpret_cast<Address>(sp + to)));
3722 }
3723#endif // V8_ENABLE_DRUMBRAKE_TRACING
3724
3725 NextOp();
3726 }
3727
3729 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3730 int64_t r0, double fp0) {
3732 slot_offset_t preserve = Read<slot_offset_t>(code);
3734 reinterpret_cast<Address>(sp + preserve),
3735 base::ReadUnalignedValue<int32_t>(reinterpret_cast<Address>(sp + to)));
3736 base::WriteUnalignedValue<int32_t>(reinterpret_cast<Address>(sp + to),
3737 static_cast<int32_t>(r0));
3738
3739#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3740 if (v8_flags.trace_drumbrake_execution &&
3741 v8_flags.trace_drumbrake_execution_verbose) {
3742 wasm_runtime->Trace("PRESERVECOPYR0TOSLOT32 %d %d %08x\n", to, preserve,
3744 reinterpret_cast<Address>(sp + to)));
3745 }
3746#endif // V8_ENABLE_DRUMBRAKE_TRACING
3747
3748 NextOp();
3749 }
3750
3752 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3753 int64_t r0, double fp0) {
3755 slot_offset_t preserve = Read<slot_offset_t>(code);
3757 reinterpret_cast<Address>(sp + preserve),
3758 base::ReadUnalignedValue<int64_t>(reinterpret_cast<Address>(sp + to)));
3759 base::WriteUnalignedValue<int64_t>(reinterpret_cast<Address>(sp + to), r0);
3760
3761#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3762 if (v8_flags.trace_drumbrake_execution &&
3763 v8_flags.trace_drumbrake_execution_verbose) {
3764 wasm_runtime->Trace("PRESERVECOPYR0TOSLOT64 %d %d %" PRIx64 "\n", to,
3765 preserve,
3767 reinterpret_cast<Address>(sp + to)));
3768 }
3769#endif // V8_ENABLE_DRUMBRAKE_TRACING
3770
3771 NextOp();
3772 }
3773
3775 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3776 int64_t r0, double fp0) {
3778 slot_offset_t preserve = Read<slot_offset_t>(code);
3780 reinterpret_cast<Address>(sp + preserve),
3781 base::ReadUnalignedValue<float>(reinterpret_cast<Address>(sp + to)));
3782 base::WriteUnalignedValue<float>(reinterpret_cast<Address>(sp + to),
3783 static_cast<float>(fp0));
3784
3785#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3786 if (v8_flags.trace_drumbrake_execution &&
3787 v8_flags.trace_drumbrake_execution_verbose) {
3788 wasm_runtime->Trace("PRESERVECOPYFP0TOSLOT32 %d %d %08x\n", to, preserve,
3790 reinterpret_cast<Address>(sp + to)));
3791 }
3792#endif // V8_ENABLE_DRUMBRAKE_TRACING
3793
3794 NextOp();
3795 }
3796
3798 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3799 int64_t r0, double fp0) {
3801 slot_offset_t preserve = Read<slot_offset_t>(code);
3803 reinterpret_cast<Address>(sp + preserve),
3804 base::ReadUnalignedValue<double>(reinterpret_cast<Address>(sp + to)));
3805 base::WriteUnalignedValue<double>(reinterpret_cast<Address>(sp + to), fp0);
3806
3807#ifdef V8_ENABLE_DRUMBRAKE_TRACING
3808 if (v8_flags.trace_drumbrake_execution &&
3809 v8_flags.trace_drumbrake_execution_verbose) {
3810 wasm_runtime->Trace("PRESERVECOPYFP0TOSLOT64 %d %d %" PRIx64 "\n", to,
3811 preserve,
3813 reinterpret_cast<Address>(sp + to)));
3814 }
3815#endif // V8_ENABLE_DRUMBRAKE_TRACING
3816
3817 NextOp();
3818 }
3819
3822 int64_t r0, double fp0) {
3823 const uint32_t ref_bitfield = Read<int32_t>(code);
3824 ValueType ref_type = ValueType::FromRawBitField(ref_bitfield);
3825
3827 handle(wasm_runtime->GetNullValue(ref_type),
3828 wasm_runtime->GetIsolate()));
3829
3830 NextOp();
3831 }
3832
3835 int64_t r0, double fp0) {
3836 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
3837 push<int32_t>(sp, code, wasm_runtime, wasm_runtime->IsRefNull(ref) ? 1 : 0);
3838
3839 NextOp();
3840 }
3841
3844 int64_t r0, double fp0) {
3845 uint32_t index = Read<int32_t>(code);
3846 push<WasmRef>(sp, code, wasm_runtime, wasm_runtime->GetFunctionRef(index));
3847
3848 NextOp();
3849 }
3850
3853 int64_t r0, double fp0) {
3854 WasmRef lhs = pop<WasmRef>(sp, code, wasm_runtime);
3855 WasmRef rhs = pop<WasmRef>(sp, code, wasm_runtime);
3856 push<int32_t>(sp, code, wasm_runtime, lhs.is_identical_to(rhs) ? 1 : 0);
3857
3858 NextOp();
3859 }
3860
3863 int64_t r0, double fp0) {
3864 uint32_t data_segment_index = Read<int32_t>(code);
3865 uint64_t size = pop<uint32_t>(sp, code, wasm_runtime);
3866 uint64_t src = pop<uint32_t>(sp, code, wasm_runtime);
3867 uint64_t dst = pop<uint32_t>(sp, code, wasm_runtime);
3868
3869 // This function can trap.
3870 wasm_runtime->MemoryInit(code, data_segment_index, dst, src, size);
3871
3872 NextOp();
3873 }
3874
3876 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3877 int64_t r0, double fp0) {
3878 uint32_t data_segment_index = Read<int32_t>(code);
3879 uint64_t size = pop<uint32_t>(sp, code, wasm_runtime);
3880 uint64_t src = pop<uint32_t>(sp, code, wasm_runtime);
3881 uint64_t dst = pop<uint64_t>(sp, code, wasm_runtime);
3882
3883 // This function can trap.
3884 wasm_runtime->MemoryInit(code, data_segment_index, dst, src, size);
3885
3886 NextOp();
3887 }
3888
3891 int64_t r0, double fp0) {
3892 uint32_t index = Read<int32_t>(code);
3893
3894 wasm_runtime->DataDrop(index);
3895
3896 NextOp();
3897 }
3898
3901 int64_t r0, double fp0) {
3902 uint64_t size = pop<uint32_t>(sp, code, wasm_runtime);
3903 uint64_t src = pop<uint32_t>(sp, code, wasm_runtime);
3904 uint64_t dst = pop<uint32_t>(sp, code, wasm_runtime);
3905
3906 // This function can trap.
3907 wasm_runtime->MemoryCopy(code, dst, src, size);
3908
3909 NextOp();
3910 }
3911
3913 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3914 int64_t r0, double fp0) {
3915 uint64_t size = pop<uint64_t>(sp, code, wasm_runtime);
3916 uint64_t value = pop<uint64_t>(sp, code, wasm_runtime);
3917 uint64_t dst = pop<uint64_t>(sp, code, wasm_runtime);
3918
3919 // This function can trap.
3920 wasm_runtime->MemoryCopy(code, dst, value, size);
3921
3922 NextOp();
3923 }
3924
3927 int64_t r0, double fp0) {
3928 uint64_t size = pop<uint32_t>(sp, code, wasm_runtime);
3929 uint32_t value = pop<uint32_t>(sp, code, wasm_runtime);
3930 uint64_t dst = pop<uint32_t>(sp, code, wasm_runtime);
3931
3932 // This function can trap.
3933 wasm_runtime->MemoryFill(code, dst, value, size);
3934
3935 NextOp();
3936 }
3937
3939 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
3940 int64_t r0, double fp0) {
3941 uint64_t size = pop<uint64_t>(sp, code, wasm_runtime);
3942 uint32_t value = pop<uint32_t>(sp, code, wasm_runtime);
3943 uint64_t dst = pop<uint64_t>(sp, code, wasm_runtime);
3944
3945 // This function can trap.
3946 wasm_runtime->MemoryFill(code, dst, value, size);
3947
3948 NextOp();
3949 }
3950
3953 int64_t r0, double fp0) {
3954 uint32_t table_index = Read<int32_t>(code);
3955 uint32_t entry_index = pop<uint32_t>(sp, code, wasm_runtime);
3956
3957 // This function can trap.
3958 WasmRef ref;
3959 if (wasm_runtime->TableGet(code, table_index, entry_index, &ref)) {
3960 push<WasmRef>(sp, code, wasm_runtime, ref);
3961 }
3962
3963 NextOp();
3964 }
3965
3968 int64_t r0, double fp0) {
3969 uint32_t table_index = Read<int32_t>(code);
3970 uint64_t entry_index_64 = pop<uint64_t>(sp, code, wasm_runtime);
3971
3972 if (entry_index_64 > std::numeric_limits<uint32_t>::max()) {
3973 TRAP(TrapReason::kTrapTableOutOfBounds)
3974 }
3975
3976 uint32_t entry_index = static_cast<uint32_t>(entry_index_64);
3977
3978 // This function can trap.
3979 WasmRef ref;
3980 if (wasm_runtime->TableGet(code, table_index, entry_index, &ref)) {
3981 push<WasmRef>(sp, code, wasm_runtime, ref);
3982 }
3983
3984 NextOp();
3985 }
3986
3989 int64_t r0, double fp0) {
3990 uint32_t table_index = Read<int32_t>(code);
3991 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
3992 uint32_t entry_index = pop<uint32_t>(sp, code, wasm_runtime);
3993
3994 // This function can trap.
3995 wasm_runtime->TableSet(code, table_index, entry_index, ref);
3996
3997 NextOp();
3998 }
3999
4002 int64_t r0, double fp0) {
4003 uint32_t table_index = Read<int32_t>(code);
4004 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
4005 uint64_t entry_index_64 = pop<uint64_t>(sp, code, wasm_runtime);
4006
4007 if (entry_index_64 > std::numeric_limits<uint32_t>::max()) {
4008 TRAP(TrapReason::kTrapTableOutOfBounds)
4009 }
4010
4011 uint32_t entry_index = static_cast<uint32_t>(entry_index_64);
4012
4013 // This function can trap.
4014 wasm_runtime->TableSet(code, table_index, entry_index, ref);
4015
4016 NextOp();
4017 }
4018
4021 int64_t r0, double fp0) {
4022 uint32_t table_index = Read<int32_t>(code);
4023 uint32_t element_segment_index = Read<int32_t>(code);
4024 uint32_t size = pop<uint32_t>(sp, code, wasm_runtime);
4025 uint32_t src = pop<uint32_t>(sp, code, wasm_runtime);
4026 uint32_t dst = pop<uint32_t>(sp, code, wasm_runtime);
4027
4028 // This function can trap.
4029 wasm_runtime->TableInit(code, table_index, element_segment_index, dst, src,
4030 size);
4031
4032 NextOp();
4033 }
4034
4037 int64_t r0, double fp0) {
4038 uint32_t table_index = Read<int32_t>(code);
4039 uint32_t element_segment_index = Read<int32_t>(code);
4040 uint32_t size = pop<uint32_t>(sp, code, wasm_runtime);
4041 uint32_t src = pop<uint32_t>(sp, code, wasm_runtime);
4042 uint64_t dst_64 = pop<uint64_t>(sp, code, wasm_runtime);
4043
4044 if (dst_64 > std::numeric_limits<uint32_t>::max()) {
4045 TRAP(TrapReason::kTrapTableOutOfBounds)
4046 }
4047
4048 uint32_t dst = static_cast<uint32_t>(dst_64);
4049
4050 // This function can trap.
4051 wasm_runtime->TableInit(code, table_index, element_segment_index, dst, src,
4052 size);
4053
4054 NextOp();
4055 }
4056
4059 int64_t r0, double fp0) {
4060 uint32_t index = Read<int32_t>(code);
4061
4062 wasm_runtime->ElemDrop(index);
4063
4064 NextOp();
4065 }
4066
4069 int64_t r0, double fp0) {
4070 uint32_t dst_table_index = Read<int32_t>(code);
4071 uint32_t src_table_index = Read<int32_t>(code);
4072 auto size = pop<uint32_t>(sp, code, wasm_runtime);
4073 auto src = pop<uint32_t>(sp, code, wasm_runtime);
4074 auto dst = pop<uint32_t>(sp, code, wasm_runtime);
4075
4076 // This function can trap.
4077 wasm_runtime->TableCopy(code, dst_table_index, src_table_index, dst, src,
4078 size);
4079
4080 NextOp();
4081 }
4082
4083 template <typename IntN, typename IntM, typename IntK>
4085 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
4086 int64_t r0, double fp0) {
4087 uint32_t dst_table_index = Read<int32_t>(code);
4088 uint32_t src_table_index = Read<int32_t>(code);
4089 auto size_64 = pop<IntK>(sp, code, wasm_runtime);
4090 auto src_64 = pop<IntM>(sp, code, wasm_runtime);
4091 auto dst_64 = pop<IntN>(sp, code, wasm_runtime);
4092
4093 if (src_64 > std::numeric_limits<uint32_t>::max() ||
4094 dst_64 > std::numeric_limits<uint32_t>::max() ||
4095 size_64 > std::numeric_limits<uint32_t>::max()) {
4096 TRAP(TrapReason::kTrapTableOutOfBounds)
4097 }
4098
4099 uint32_t size = static_cast<uint32_t>(size_64);
4100 uint32_t src = static_cast<uint32_t>(src_64);
4101 uint32_t dst = static_cast<uint32_t>(dst_64);
4102
4103 // This function can trap.
4104 wasm_runtime->TableCopy(code, dst_table_index, src_table_index, dst, src,
4105 size);
4106
4107 NextOp();
4108 }
4109 static auto constexpr s2s_Table64Copy_32_64_32 =
4111 static auto constexpr s2s_Table64Copy_64_32_32 =
4113 static auto constexpr s2s_Table64Copy_64_64_64 =
4115
4118 int64_t r0, double fp0) {
4119 uint32_t table_index = Read<int32_t>(code);
4120 uint32_t delta = pop<uint32_t>(sp, code, wasm_runtime);
4121 WasmRef value = pop<WasmRef>(sp, code, wasm_runtime);
4122
4123 uint32_t result = wasm_runtime->TableGrow(table_index, delta, value);
4125
4126 NextOp();
4127 }
4128
4131 int64_t r0, double fp0) {
4132 uint32_t table_index = Read<int32_t>(code);
4133 uint64_t delta_64 = pop<uint64_t>(sp, code, wasm_runtime);
4134 WasmRef value = pop<WasmRef>(sp, code, wasm_runtime);
4135
4136 if (delta_64 > std::numeric_limits<uint32_t>::max()) {
4137 push<int64_t>(sp, code, wasm_runtime, -1);
4138 } else {
4139 uint32_t delta = static_cast<uint32_t>(delta_64);
4140 uint32_t result = wasm_runtime->TableGrow(table_index, delta, value);
4142 static_cast<int64_t>(static_cast<int32_t>(result)));
4143 }
4144
4145 NextOp();
4146 }
4147
4150 int64_t r0, double fp0) {
4151 uint32_t table_index = Read<int32_t>(code);
4152
4153 uint32_t size = wasm_runtime->TableSize(table_index);
4154 push<int32_t>(sp, code, wasm_runtime, size);
4155
4156 NextOp();
4157 }
4158
4161 int64_t r0, double fp0) {
4162 uint32_t table_index = Read<int32_t>(code);
4163
4164 uint64_t size = wasm_runtime->TableSize(table_index);
4165 push<uint64_t>(sp, code, wasm_runtime, size);
4166
4167 NextOp();
4168 }
4169
4172 int64_t r0, double fp0) {
4173 uint32_t table_index = Read<int32_t>(code);
4174 uint32_t count = pop<uint32_t>(sp, code, wasm_runtime);
4175 WasmRef value = pop<WasmRef>(sp, code, wasm_runtime);
4176 uint32_t start = pop<uint32_t>(sp, code, wasm_runtime);
4177
4178 // This function can trap.
4179 wasm_runtime->TableFill(code, table_index, count, value, start);
4180
4181 NextOp();
4182 }
4183
4186 int64_t r0, double fp0) {
4187 uint32_t table_index = Read<int32_t>(code);
4188 uint64_t count_64 = pop<uint64_t>(sp, code, wasm_runtime);
4189 WasmRef value = pop<WasmRef>(sp, code, wasm_runtime);
4190 uint64_t start_64 = pop<uint64_t>(sp, code, wasm_runtime);
4191
4192 if (count_64 > std::numeric_limits<uint32_t>::max() ||
4193 start_64 > std::numeric_limits<uint32_t>::max()) {
4194 TRAP(TrapReason::kTrapTableOutOfBounds)
4195 }
4196
4197 uint32_t count = static_cast<uint32_t>(count_64);
4198 uint32_t start = static_cast<uint32_t>(start_64);
4199
4200 // This function can trap.
4201 wasm_runtime->TableFill(code, table_index, count, value, start);
4202
4203 NextOp();
4204 }
4205
4208 int64_t r0, double fp0) {
4209 wasm_runtime->WasmStackCheck(code, code);
4210 wasm_runtime->ResetCurrentHandleScope();
4211
4212 NextOp();
4213 }
4214
4216 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
4217 int64_t r0, double fp0) {
4218 wasm_runtime->WasmStackCheck(code, code);
4219
4220 NextOp();
4221 }
4222
4224 // Atomics operators
4225
4226 template <typename MemIdx = uint32_t, typename MemOffsetT = memory_offset32_t>
4228 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
4229 int64_t r0, double fp0) {
4230 int32_t val = pop<int32_t>(sp, code, wasm_runtime);
4231
4232 uint64_t offset = Read<MemOffsetT>(code);
4233 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
4234 uint64_t effective_index = offset + index;
4235 // Check alignment.
4236 const uint32_t align_mask = sizeof(int32_t) - 1;
4237 if (V8_UNLIKELY((effective_index & align_mask) != 0)) {
4238 TRAP(TrapReason::kTrapUnalignedAccess)
4239 }
4240 // Check bounds.
4241 if (V8_UNLIKELY(
4242 effective_index < index ||
4243 !base::IsInBounds<uint64_t>(effective_index, sizeof(uint64_t),
4244 wasm_runtime->GetMemorySize()))) {
4245 TRAP(TrapReason::kTrapMemOutOfBounds)
4246 }
4247
4248 int32_t result = wasm_runtime->AtomicNotify(effective_index, val);
4250
4251 NextOp();
4252 }
4253 static auto constexpr s2s_AtomicNotify_Idx64 =
4255
4256 template <typename MemIdx = uint32_t, typename MemOffsetT = memory_offset32_t>
4258 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
4259 int64_t r0, double fp0) {
4260 int64_t timeout = pop<int64_t>(sp, code, wasm_runtime);
4261 int32_t val = pop<int32_t>(sp, code, wasm_runtime);
4262
4263 uint64_t offset = Read<MemOffsetT>(code);
4264 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
4265 uint64_t effective_index = offset + index;
4266 // Check alignment.
4267 const uint32_t align_mask = sizeof(int32_t) - 1;
4268 if (V8_UNLIKELY((effective_index & align_mask) != 0)) {
4269 TRAP(TrapReason::kTrapUnalignedAccess)
4270 }
4271 // Check bounds.
4272 if (V8_UNLIKELY(
4273 effective_index < index ||
4274 !base::IsInBounds<uint64_t>(effective_index, sizeof(uint64_t),
4275 wasm_runtime->GetMemorySize()))) {
4276 TRAP(TrapReason::kTrapMemOutOfBounds)
4277 }
4278 // Check atomics wait allowed.
4279 if (!wasm_runtime->AllowsAtomicsWait()) {
4280 TRAP(TrapReason::kTrapUnreachable)
4281 }
4282
4283 int32_t result = wasm_runtime->I32AtomicWait(effective_index, val, timeout);
4285
4286 NextOp();
4287 }
4288 static auto constexpr s2s_I32AtomicWait_Idx64 =
4290
4291 template <typename MemIdx = uint32_t, typename MemOffsetT = memory_offset32_t>
4293 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
4294 int64_t r0, double fp0) {
4295 int64_t timeout = pop<int64_t>(sp, code, wasm_runtime);
4296 int64_t val = pop<int64_t>(sp, code, wasm_runtime);
4297
4298 uint64_t offset = Read<MemOffsetT>(code);
4299 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
4300 uint64_t effective_index = offset + index;
4301 // Check alignment.
4302 const uint32_t align_mask = sizeof(int64_t) - 1;
4303 if (V8_UNLIKELY((effective_index & align_mask) != 0)) {
4304 TRAP(TrapReason::kTrapUnalignedAccess)
4305 }
4306 // Check bounds.
4307 if (V8_UNLIKELY(
4308 effective_index < index ||
4309 !base::IsInBounds<uint64_t>(effective_index, sizeof(uint64_t),
4310 wasm_runtime->GetMemorySize()))) {
4311 TRAP(TrapReason::kTrapMemOutOfBounds)
4312 }
4313 // Check atomics wait allowed.
4314 if (!wasm_runtime->AllowsAtomicsWait()) {
4315 TRAP(TrapReason::kTrapUnreachable)
4316 }
4317
4318 int32_t result = wasm_runtime->I64AtomicWait(effective_index, val, timeout);
4320
4321 NextOp();
4322 }
4323 static auto constexpr s2s_I64AtomicWait_Idx64 =
4325
4328 int64_t r0, double fp0) {
4329 std::atomic_thread_fence(std::memory_order_seq_cst);
4330 NextOp();
4331 }
4332
4333#define FOREACH_ATOMIC_BINOP(V) \
4334 V(I32AtomicAdd, Uint32, uint32_t, I32, uint32_t, I32, std::atomic_fetch_add) \
4335 V(I32AtomicAdd8U, Uint8, uint8_t, I32, uint32_t, I32, std::atomic_fetch_add) \
4336 V(I32AtomicAdd16U, Uint16, uint16_t, I32, uint32_t, I32, \
4337 std::atomic_fetch_add) \
4338 V(I32AtomicSub, Uint32, uint32_t, I32, uint32_t, I32, std::atomic_fetch_sub) \
4339 V(I32AtomicSub8U, Uint8, uint8_t, I32, uint32_t, I32, std::atomic_fetch_sub) \
4340 V(I32AtomicSub16U, Uint16, uint16_t, I32, uint32_t, I32, \
4341 std::atomic_fetch_sub) \
4342 V(I32AtomicAnd, Uint32, uint32_t, I32, uint32_t, I32, std::atomic_fetch_and) \
4343 V(I32AtomicAnd8U, Uint8, uint8_t, I32, uint32_t, I32, std::atomic_fetch_and) \
4344 V(I32AtomicAnd16U, Uint16, uint16_t, I32, uint32_t, I32, \
4345 std::atomic_fetch_and) \
4346 V(I32AtomicOr, Uint32, uint32_t, I32, uint32_t, I32, std::atomic_fetch_or) \
4347 V(I32AtomicOr8U, Uint8, uint8_t, I32, uint32_t, I32, std::atomic_fetch_or) \
4348 V(I32AtomicOr16U, Uint16, uint16_t, I32, uint32_t, I32, \
4349 std::atomic_fetch_or) \
4350 V(I32AtomicXor, Uint32, uint32_t, I32, uint32_t, I32, std::atomic_fetch_xor) \
4351 V(I32AtomicXor8U, Uint8, uint8_t, I32, uint32_t, I32, std::atomic_fetch_xor) \
4352 V(I32AtomicXor16U, Uint16, uint16_t, I32, uint32_t, I32, \
4353 std::atomic_fetch_xor) \
4354 V(I32AtomicExchange, Uint32, uint32_t, I32, uint32_t, I32, \
4355 std::atomic_exchange) \
4356 V(I32AtomicExchange8U, Uint8, uint8_t, I32, uint32_t, I32, \
4357 std::atomic_exchange) \
4358 V(I32AtomicExchange16U, Uint16, uint16_t, I32, uint32_t, I32, \
4359 std::atomic_exchange) \
4360 V(I64AtomicAdd, Uint64, uint64_t, I64, uint64_t, I64, std::atomic_fetch_add) \
4361 V(I64AtomicAdd8U, Uint8, uint8_t, I32, uint64_t, I64, std::atomic_fetch_add) \
4362 V(I64AtomicAdd16U, Uint16, uint16_t, I32, uint64_t, I64, \
4363 std::atomic_fetch_add) \
4364 V(I64AtomicAdd32U, Uint32, uint32_t, I32, uint64_t, I64, \
4365 std::atomic_fetch_add) \
4366 V(I64AtomicSub, Uint64, uint64_t, I64, uint64_t, I64, std::atomic_fetch_sub) \
4367 V(I64AtomicSub8U, Uint8, uint8_t, I32, uint64_t, I64, std::atomic_fetch_sub) \
4368 V(I64AtomicSub16U, Uint16, uint16_t, I32, uint64_t, I64, \
4369 std::atomic_fetch_sub) \
4370 V(I64AtomicSub32U, Uint32, uint32_t, I32, uint64_t, I64, \
4371 std::atomic_fetch_sub) \
4372 V(I64AtomicAnd, Uint64, uint64_t, I64, uint64_t, I64, std::atomic_fetch_and) \
4373 V(I64AtomicAnd8U, Uint8, uint8_t, I32, uint64_t, I64, std::atomic_fetch_and) \
4374 V(I64AtomicAnd16U, Uint16, uint16_t, I32, uint64_t, I64, \
4375 std::atomic_fetch_and) \
4376 V(I64AtomicAnd32U, Uint32, uint32_t, I32, uint64_t, I64, \
4377 std::atomic_fetch_and) \
4378 V(I64AtomicOr, Uint64, uint64_t, I64, uint64_t, I64, std::atomic_fetch_or) \
4379 V(I64AtomicOr8U, Uint8, uint8_t, I32, uint64_t, I64, std::atomic_fetch_or) \
4380 V(I64AtomicOr16U, Uint16, uint16_t, I32, uint64_t, I64, \
4381 std::atomic_fetch_or) \
4382 V(I64AtomicOr32U, Uint32, uint32_t, I32, uint64_t, I64, \
4383 std::atomic_fetch_or) \
4384 V(I64AtomicXor, Uint64, uint64_t, I64, uint64_t, I64, std::atomic_fetch_xor) \
4385 V(I64AtomicXor8U, Uint8, uint8_t, I32, uint64_t, I64, std::atomic_fetch_xor) \
4386 V(I64AtomicXor16U, Uint16, uint16_t, I32, uint64_t, I64, \
4387 std::atomic_fetch_xor) \
4388 V(I64AtomicXor32U, Uint32, uint32_t, I32, uint64_t, I64, \
4389 std::atomic_fetch_xor) \
4390 V(I64AtomicExchange, Uint64, uint64_t, I64, uint64_t, I64, \
4391 std::atomic_exchange) \
4392 V(I64AtomicExchange8U, Uint8, uint8_t, I32, uint64_t, I64, \
4393 std::atomic_exchange) \
4394 V(I64AtomicExchange16U, Uint16, uint16_t, I32, uint64_t, I64, \
4395 std::atomic_exchange) \
4396 V(I64AtomicExchange32U, Uint32, uint32_t, I32, uint64_t, I64, \
4397 std::atomic_exchange)
4398
4399#define ATOMIC_BINOP(name, Type, ctype, type, op_ctype, op_type, operation) \
4400 template <typename MemIdx, typename MemOffsetT> \
4401 INSTRUCTION_HANDLER_FUNC s2s_##name##I(const uint8_t* code, uint32_t* sp, \
4402 WasmInterpreterRuntime* wasm_runtime, \
4403 int64_t r0, double fp0) { \
4404 ctype val = static_cast<ctype>(pop<op_ctype>(sp, code, wasm_runtime)); \
4405 \
4406 uint64_t offset = Read<MemOffsetT>(code); \
4407 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime); \
4408 uint64_t effective_index = offset + index; \
4409 /* Check alignment. */ \
4410 if (V8_UNLIKELY(!IsAligned(effective_index, sizeof(ctype)))) { \
4411 TRAP(TrapReason::kTrapUnalignedAccess) \
4412 } \
4413 /* Check bounds. */ \
4414 if (V8_UNLIKELY( \
4415 effective_index < index || \
4416 !base::IsInBounds<uint64_t>(effective_index, sizeof(ctype), \
4417 wasm_runtime->GetMemorySize()))) { \
4418 TRAP(TrapReason::kTrapMemOutOfBounds) \
4419 } \
4420 static_assert(sizeof(std::atomic<ctype>) == sizeof(ctype), \
4421 "Size mismatch for types std::atomic<" #ctype \
4422 ">, and " #ctype); \
4423 \
4424 uint8_t* memory_start = wasm_runtime->GetMemoryStart(); \
4425 uint8_t* address = memory_start + effective_index; \
4426 op_ctype result = static_cast<op_ctype>( \
4427 operation(reinterpret_cast<std::atomic<ctype>*>(address), val)); \
4428 push<op_ctype>(sp, code, wasm_runtime, result); \
4429 NextOp(); \
4430 } \
4431 static auto constexpr s2s_##name = \
4432 s2s_##name##I<uint32_t, memory_offset32_t>; \
4433 static auto constexpr s2s_##name##_Idx64 = \
4434 s2s_##name##I<uint64_t, memory_offset64_t>;
4436#undef ATOMIC_BINOP
4437
4438#define FOREACH_ATOMIC_COMPARE_EXCHANGE_OP(V) \
4439 V(I32AtomicCompareExchange, Uint32, uint32_t, I32, uint32_t, I32) \
4440 V(I32AtomicCompareExchange8U, Uint8, uint8_t, I32, uint32_t, I32) \
4441 V(I32AtomicCompareExchange16U, Uint16, uint16_t, I32, uint32_t, I32) \
4442 V(I64AtomicCompareExchange, Uint64, uint64_t, I64, uint64_t, I64) \
4443 V(I64AtomicCompareExchange8U, Uint8, uint8_t, I32, uint64_t, I64) \
4444 V(I64AtomicCompareExchange16U, Uint16, uint16_t, I32, uint64_t, I64) \
4445 V(I64AtomicCompareExchange32U, Uint32, uint32_t, I32, uint64_t, I64)
4446
4447#define ATOMIC_COMPARE_EXCHANGE_OP(name, Type, ctype, type, op_ctype, op_type) \
4448 template <typename MemIdx = uint32_t, typename MemOffsetT> \
4449 INSTRUCTION_HANDLER_FUNC s2s_##name##I(const uint8_t* code, uint32_t* sp, \
4450 WasmInterpreterRuntime* wasm_runtime, \
4451 int64_t r0, double fp0) { \
4452 ctype new_val = static_cast<ctype>(pop<op_ctype>(sp, code, wasm_runtime)); \
4453 ctype old_val = static_cast<ctype>(pop<op_ctype>(sp, code, wasm_runtime)); \
4454 \
4455 uint64_t offset = Read<MemOffsetT>(code); \
4456 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime); \
4457 uint64_t effective_index = offset + index; \
4458 /* Check alignment. */ \
4459 if (V8_UNLIKELY(!IsAligned(effective_index, sizeof(ctype)))) { \
4460 TRAP(TrapReason::kTrapUnalignedAccess) \
4461 } \
4462 /* Check bounds. */ \
4463 if (V8_UNLIKELY( \
4464 effective_index < index || \
4465 !base::IsInBounds<uint64_t>(effective_index, sizeof(ctype), \
4466 wasm_runtime->GetMemorySize()))) { \
4467 TRAP(TrapReason::kTrapMemOutOfBounds) \
4468 } \
4469 static_assert(sizeof(std::atomic<ctype>) == sizeof(ctype), \
4470 "Size mismatch for types std::atomic<" #ctype \
4471 ">, and " #ctype); \
4472 \
4473 uint8_t* memory_start = wasm_runtime->GetMemoryStart(); \
4474 uint8_t* address = memory_start + effective_index; \
4475 \
4476 std::atomic_compare_exchange_strong( \
4477 reinterpret_cast<std::atomic<ctype>*>(address), &old_val, new_val); \
4478 push<op_ctype>(sp, code, wasm_runtime, static_cast<op_ctype>(old_val)); \
4479 NextOp(); \
4480 } \
4481 static auto constexpr s2s_##name = \
4482 s2s_##name##I<uint32_t, memory_offset32_t>; \
4483 static auto constexpr s2s_##name##_Idx64 = \
4484 s2s_##name##I<uint64_t, memory_offset64_t>;
4486#undef ATOMIC_COMPARE_EXCHANGE_OP
4487
4488#define FOREACH_ATOMIC_LOAD_OP(V) \
4489 V(I32AtomicLoad, Uint32, uint32_t, I32, uint32_t, I32) \
4490 V(I32AtomicLoad8U, Uint8, uint8_t, I32, uint32_t, I32) \
4491 V(I32AtomicLoad16U, Uint16, uint16_t, I32, uint32_t, I32) \
4492 V(I64AtomicLoad, Uint64, uint64_t, I64, uint64_t, I64) \
4493 V(I64AtomicLoad8U, Uint8, uint8_t, I32, uint64_t, I64) \
4494 V(I64AtomicLoad16U, Uint16, uint16_t, I32, uint64_t, I64) \
4495 V(I64AtomicLoad32U, Uint32, uint32_t, I32, uint64_t, I64)
4496
4497#define ATOMIC_LOAD_OP(name, Type, ctype, type, op_ctype, op_type) \
4498 template <typename MemIdx, typename MemOffsetT> \
4499 INSTRUCTION_HANDLER_FUNC s2s_##name##I(const uint8_t* code, uint32_t* sp, \
4500 WasmInterpreterRuntime* wasm_runtime, \
4501 int64_t r0, double fp0) { \
4502 uint64_t offset = Read<MemOffsetT>(code); \
4503 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime); \
4504 uint64_t effective_index = offset + index; \
4505 /* Check alignment. */ \
4506 if (V8_UNLIKELY(!IsAligned(effective_index, sizeof(ctype)))) { \
4507 TRAP(TrapReason::kTrapUnalignedAccess) \
4508 } \
4509 /* Check bounds. */ \
4510 if (V8_UNLIKELY( \
4511 effective_index < index || \
4512 !base::IsInBounds<uint64_t>(effective_index, sizeof(ctype), \
4513 wasm_runtime->GetMemorySize()))) { \
4514 TRAP(TrapReason::kTrapMemOutOfBounds) \
4515 } \
4516 static_assert(sizeof(std::atomic<ctype>) == sizeof(ctype), \
4517 "Size mismatch for types std::atomic<" #ctype \
4518 ">, and " #ctype); \
4519 \
4520 uint8_t* memory_start = wasm_runtime->GetMemoryStart(); \
4521 uint8_t* address = memory_start + effective_index; \
4522 \
4523 ctype val = \
4524 std::atomic_load(reinterpret_cast<std::atomic<ctype>*>(address)); \
4525 push<op_ctype>(sp, code, wasm_runtime, static_cast<op_ctype>(val)); \
4526 NextOp(); \
4527 } \
4528 static auto constexpr s2s_##name = \
4529 s2s_##name##I<uint32_t, memory_offset32_t>; \
4530 static auto constexpr s2s_##name##_Idx64 = \
4531 s2s_##name##I<uint64_t, memory_offset64_t>;
4533#undef ATOMIC_LOAD_OP
4534
4535#define FOREACH_ATOMIC_STORE_OP(V) \
4536 V(I32AtomicStore, Uint32, uint32_t, I32, uint32_t, I32) \
4537 V(I32AtomicStore8U, Uint8, uint8_t, I32, uint32_t, I32) \
4538 V(I32AtomicStore16U, Uint16, uint16_t, I32, uint32_t, I32) \
4539 V(I64AtomicStore, Uint64, uint64_t, I64, uint64_t, I64) \
4540 V(I64AtomicStore8U, Uint8, uint8_t, I32, uint64_t, I64) \
4541 V(I64AtomicStore16U, Uint16, uint16_t, I32, uint64_t, I64) \
4542 V(I64AtomicStore32U, Uint32, uint32_t, I32, uint64_t, I64)
4543
4544#define ATOMIC_STORE_OP(name, Type, ctype, type, op_ctype, op_type) \
4545 template <typename MemIdx = uint32_t, typename MemOffsetT> \
4546 INSTRUCTION_HANDLER_FUNC s2s_##name##I(const uint8_t* code, uint32_t* sp, \
4547 WasmInterpreterRuntime* wasm_runtime, \
4548 int64_t r0, double fp0) { \
4549 ctype val = static_cast<ctype>(pop<op_ctype>(sp, code, wasm_runtime)); \
4550 \
4551 uint64_t offset = Read<MemOffsetT>(code); \
4552 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime); \
4553 uint64_t effective_index = offset + index; \
4554 /* Check alignment. */ \
4555 if (V8_UNLIKELY(!IsAligned(effective_index, sizeof(ctype)))) { \
4556 TRAP(TrapReason::kTrapUnalignedAccess) \
4557 } \
4558 /* Check bounds. */ \
4559 if (V8_UNLIKELY( \
4560 effective_index < index || \
4561 !base::IsInBounds<uint64_t>(effective_index, sizeof(ctype), \
4562 wasm_runtime->GetMemorySize()))) { \
4563 TRAP(TrapReason::kTrapMemOutOfBounds) \
4564 } \
4565 static_assert(sizeof(std::atomic<ctype>) == sizeof(ctype), \
4566 "Size mismatch for types std::atomic<" #ctype \
4567 ">, and " #ctype); \
4568 \
4569 uint8_t* memory_start = wasm_runtime->GetMemoryStart(); \
4570 uint8_t* address = memory_start + effective_index; \
4571 \
4572 std::atomic_store(reinterpret_cast<std::atomic<ctype>*>(address), val); \
4573 NextOp(); \
4574 } \
4575 static auto constexpr s2s_##name = \
4576 s2s_##name##I<uint32_t, memory_offset32_t>; \
4577 static auto constexpr s2s_##name##_Idx64 = \
4578 s2s_##name##I<uint64_t, memory_offset64_t>;
4580#undef ATOMIC_STORE_OP
4581
4583 // SIMD instructions.
4584
4585#if V8_TARGET_BIG_ENDIAN
4586#define LANE(i, type) ((sizeof(type.val) / sizeof(type.val[0])) - (i)-1)
4587#else
4588#define LANE(i, type) (i)
4589#endif
4590
4591#define SPLAT_CASE(format, stype, valType, op_type, num) \
4592 INSTRUCTION_HANDLER_FUNC s2s_Simd##format##Splat( \
4593 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime, \
4594 int64_t r0, double fp0) { \
4595 valType v = pop<valType>(sp, code, wasm_runtime); \
4596 stype s; \
4597 for (int i = 0; i < num; i++) s.val[i] = v; \
4598 push<Simd128>(sp, code, wasm_runtime, Simd128(s)); \
4599 NextOp(); \
4600 }
4601 SPLAT_CASE(F64x2, float64x2, double, F64, 2)
4602 SPLAT_CASE(F32x4, float32x4, float, F32, 4)
4603 SPLAT_CASE(I64x2, int64x2, int64_t, I64, 2)
4604 SPLAT_CASE(I32x4, int32x4, int32_t, I32, 4)
4605 SPLAT_CASE(I16x8, int16x8, int32_t, I32, 8)
4606 SPLAT_CASE(I8x16, int8x16, int32_t, I32, 16)
4607#undef SPLAT_CASE
4608
4609#define EXTRACT_LANE_CASE(format, stype, op_type, name) \
4610 INSTRUCTION_HANDLER_FUNC s2s_Simd##format##ExtractLane( \
4611 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime, \
4612 int64_t r0, double fp0) { \
4613 uint16_t lane = Read<int16_t>(code); \
4614 DCHECK_LT(lane, 4); \
4615 Simd128 v = pop<Simd128>(sp, code, wasm_runtime); \
4616 stype s = v.to_##name(); \
4617 push(sp, code, wasm_runtime, s.val[LANE(lane, s)]); \
4618 NextOp(); \
4619 }
4620 EXTRACT_LANE_CASE(F64x2, float64x2, F64, f64x2)
4621 EXTRACT_LANE_CASE(F32x4, float32x4, F32, f32x4)
4622 EXTRACT_LANE_CASE(I64x2, int64x2, I64, i64x2)
4623 EXTRACT_LANE_CASE(I32x4, int32x4, I32, i32x4)
4624#undef EXTRACT_LANE_CASE
4625
4626// Unsigned extracts require a bit more care. The underlying array in Simd128 is
4627// signed (see wasm-value.h), so when casted to uint32_t it will be signed
4628// extended, e.g. int8_t -> int32_t -> uint32_t. So for unsigned extracts, we
4629// will cast it int8_t -> uint8_t -> uint32_t. We add the DCHECK to ensure that
4630// if the array type changes, we know to change this function.
4631#define EXTRACT_LANE_EXTEND_CASE(format, stype, name, sign, extended_type) \
4632 INSTRUCTION_HANDLER_FUNC s2s_Simd##format##ExtractLane##sign( \
4633 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime, \
4634 int64_t r0, double fp0) { \
4635 uint16_t lane = Read<int16_t>(code); \
4636 DCHECK_LT(lane, 16); \
4637 Simd128 s = pop<Simd128>(sp, code, wasm_runtime); \
4638 stype ss = s.to_##name(); \
4639 auto res = ss.val[LANE(lane, ss)]; \
4640 DCHECK(std::is_signed<decltype(res)>::value); \
4641 if (std::is_unsigned<extended_type>::value) { \
4642 using unsigned_type = std::make_unsigned<decltype(res)>::type; \
4643 push(sp, code, wasm_runtime, \
4644 static_cast<extended_type>(static_cast<unsigned_type>(res))); \
4645 } else { \
4646 push(sp, code, wasm_runtime, static_cast<extended_type>(res)); \
4647 } \
4648 NextOp(); \
4649 }
4651 EXTRACT_LANE_EXTEND_CASE(I16x8, int16x8, i16x8, U, uint32_t)
4653 EXTRACT_LANE_EXTEND_CASE(I8x16, int8x16, i8x16, U, uint32_t)
4654#undef EXTRACT_LANE_EXTEND_CASE
4655
4656#define BINOP_CASE(op, name, stype, count, expr) \
4657 INSTRUCTION_HANDLER_FUNC s2s_Simd##op(const uint8_t* code, uint32_t* sp, \
4658 WasmInterpreterRuntime* wasm_runtime, \
4659 int64_t r0, double fp0) { \
4660 stype s2 = pop<Simd128>(sp, code, wasm_runtime).to_##name(); \
4661 stype s1 = pop<Simd128>(sp, code, wasm_runtime).to_##name(); \
4662 stype res; \
4663 for (size_t i = 0; i < count; ++i) { \
4664 auto a = s1.val[LANE(i, s1)]; \
4665 auto b = s2.val[LANE(i, s2)]; \
4666 res.val[LANE(i, res)] = expr; \
4667 } \
4668 push<Simd128>(sp, code, wasm_runtime, Simd128(res)); \
4669 NextOp(); \
4670 }
4671 BINOP_CASE(F64x2Add, f64x2, float64x2, 2, a + b)
4672 BINOP_CASE(F64x2Sub, f64x2, float64x2, 2, a - b)
4673 BINOP_CASE(F64x2Mul, f64x2, float64x2, 2, a* b)
4674 BINOP_CASE(F64x2Div, f64x2, float64x2, 2, base::Divide(a, b))
4675 BINOP_CASE(F64x2Min, f64x2, float64x2, 2, JSMin(a, b))
4676 BINOP_CASE(F64x2Max, f64x2, float64x2, 2, JSMax(a, b))
4677 BINOP_CASE(F64x2Pmin, f64x2, float64x2, 2, std::min(a, b))
4678 BINOP_CASE(F64x2Pmax, f64x2, float64x2, 2, std::max(a, b))
4679 BINOP_CASE(F32x4RelaxedMin, f32x4, float32x4, 4, std::min(a, b))
4680 BINOP_CASE(F32x4RelaxedMax, f32x4, float32x4, 4, std::max(a, b))
4681 BINOP_CASE(F64x2RelaxedMin, f64x2, float64x2, 2, std::min(a, b))
4682 BINOP_CASE(F64x2RelaxedMax, f64x2, float64x2, 2, std::max(a, b))
4683 BINOP_CASE(F32x4Add, f32x4, float32x4, 4, a + b)
4684 BINOP_CASE(F32x4Sub, f32x4, float32x4, 4, a - b)
4685 BINOP_CASE(F32x4Mul, f32x4, float32x4, 4, a* b)
4686 BINOP_CASE(F32x4Div, f32x4, float32x4, 4, a / b)
4687 BINOP_CASE(F32x4Min, f32x4, float32x4, 4, JSMin(a, b))
4688 BINOP_CASE(F32x4Max, f32x4, float32x4, 4, JSMax(a, b))
4689 BINOP_CASE(F32x4Pmin, f32x4, float32x4, 4, std::min(a, b))
4690 BINOP_CASE(F32x4Pmax, f32x4, float32x4, 4, std::max(a, b))
4691 BINOP_CASE(I64x2Add, i64x2, int64x2, 2, base::AddWithWraparound(a, b))
4692 BINOP_CASE(I64x2Sub, i64x2, int64x2, 2, base::SubWithWraparound(a, b))
4693 BINOP_CASE(I64x2Mul, i64x2, int64x2, 2, base::MulWithWraparound(a, b))
4694 BINOP_CASE(I32x4Add, i32x4, int32x4, 4, base::AddWithWraparound(a, b))
4695 BINOP_CASE(I32x4Sub, i32x4, int32x4, 4, base::SubWithWraparound(a, b))
4696 BINOP_CASE(I32x4Mul, i32x4, int32x4, 4, base::MulWithWraparound(a, b))
4697 BINOP_CASE(I32x4MinS, i32x4, int32x4, 4, a < b ? a : b)
4698 BINOP_CASE(I32x4MinU, i32x4, int32x4, 4,
4699 static_cast<uint32_t>(a) < static_cast<uint32_t>(b) ? a : b)
4700 BINOP_CASE(I32x4MaxS, i32x4, int32x4, 4, a > b ? a : b)
4701 BINOP_CASE(I32x4MaxU, i32x4, int32x4, 4,
4702 static_cast<uint32_t>(a) > static_cast<uint32_t>(b) ? a : b)
4703 BINOP_CASE(S128And, i32x4, int32x4, 4, a& b)
4704 BINOP_CASE(S128Or, i32x4, int32x4, 4, a | b)
4705 BINOP_CASE(S128Xor, i32x4, int32x4, 4, a ^ b)
4706 BINOP_CASE(S128AndNot, i32x4, int32x4, 4, a & ~b)
4707 BINOP_CASE(I16x8Add, i16x8, int16x8, 8, base::AddWithWraparound(a, b))
4708 BINOP_CASE(I16x8Sub, i16x8, int16x8, 8, base::SubWithWraparound(a, b))
4709 BINOP_CASE(I16x8Mul, i16x8, int16x8, 8, base::MulWithWraparound(a, b))
4710 BINOP_CASE(I16x8MinS, i16x8, int16x8, 8, a < b ? a : b)
4711 BINOP_CASE(I16x8MinU, i16x8, int16x8, 8,
4712 static_cast<uint16_t>(a) < static_cast<uint16_t>(b) ? a : b)
4713 BINOP_CASE(I16x8MaxS, i16x8, int16x8, 8, a > b ? a : b)
4714 BINOP_CASE(I16x8MaxU, i16x8, int16x8, 8,
4715 static_cast<uint16_t>(a) > static_cast<uint16_t>(b) ? a : b)
4716 BINOP_CASE(I16x8AddSatS, i16x8, int16x8, 8, SaturateAdd<int16_t>(a, b))
4717 BINOP_CASE(I16x8AddSatU, i16x8, int16x8, 8, SaturateAdd<uint16_t>(a, b))
4718 BINOP_CASE(I16x8SubSatS, i16x8, int16x8, 8, SaturateSub<int16_t>(a, b))
4719 BINOP_CASE(I16x8SubSatU, i16x8, int16x8, 8, SaturateSub<uint16_t>(a, b))
4720 BINOP_CASE(I16x8RoundingAverageU, i16x8, int16x8, 8,
4722 BINOP_CASE(I16x8Q15MulRSatS, i16x8, int16x8, 8,
4723 SaturateRoundingQMul<int16_t>(a, b))
4724 BINOP_CASE(I16x8RelaxedQ15MulRS, i16x8, int16x8, 8,
4725 SaturateRoundingQMul<int16_t>(a, b))
4726 BINOP_CASE(I8x16Add, i8x16, int8x16, 16, base::AddWithWraparound(a, b))
4727 BINOP_CASE(I8x16Sub, i8x16, int8x16, 16, base::SubWithWraparound(a, b))
4728 BINOP_CASE(I8x16MinS, i8x16, int8x16, 16, a < b ? a : b)
4729 BINOP_CASE(I8x16MinU, i8x16, int8x16, 16,
4730 static_cast<uint8_t>(a) < static_cast<uint8_t>(b) ? a : b)
4731 BINOP_CASE(I8x16MaxS, i8x16, int8x16, 16, a > b ? a : b)
4732 BINOP_CASE(I8x16MaxU, i8x16, int8x16, 16,
4733 static_cast<uint8_t>(a) > static_cast<uint8_t>(b) ? a : b)
4734 BINOP_CASE(I8x16AddSatS, i8x16, int8x16, 16, SaturateAdd<int8_t>(a, b))
4735 BINOP_CASE(I8x16AddSatU, i8x16, int8x16, 16, SaturateAdd<uint8_t>(a, b))
4736 BINOP_CASE(I8x16SubSatS, i8x16, int8x16, 16, SaturateSub<int8_t>(a, b))
4737 BINOP_CASE(I8x16SubSatU, i8x16, int8x16, 16, SaturateSub<uint8_t>(a, b))
4738 BINOP_CASE(I8x16RoundingAverageU, i8x16, int8x16, 16,
4740#undef BINOP_CASE
4741
4742#define UNOP_CASE(op, name, stype, count, expr) \
4743 INSTRUCTION_HANDLER_FUNC s2s_Simd##op(const uint8_t* code, uint32_t* sp, \
4744 WasmInterpreterRuntime* wasm_runtime, \
4745 int64_t r0, double fp0) { \
4746 stype s = pop<Simd128>(sp, code, wasm_runtime).to_##name(); \
4747 stype res; \
4748 for (size_t i = 0; i < count; ++i) { \
4749 auto a = s.val[LANE(i, s)]; \
4750 res.val[LANE(i, res)] = expr; \
4751 } \
4752 push<Simd128>(sp, code, wasm_runtime, Simd128(res)); \
4753 NextOp(); \
4754 }
4755 UNOP_CASE(F64x2Abs, f64x2, float64x2, 2, std::abs(a))
4756 UNOP_CASE(F64x2Neg, f64x2, float64x2, 2, -a)
4757 UNOP_CASE(F64x2Sqrt, f64x2, float64x2, 2, std::sqrt(a))
4758 UNOP_CASE(F64x2Ceil, f64x2, float64x2, 2, ceil(a))
4759 UNOP_CASE(F64x2Floor, f64x2, float64x2, 2, floor(a))
4760 UNOP_CASE(F64x2Trunc, f64x2, float64x2, 2, trunc(a))
4761 UNOP_CASE(F64x2NearestInt, f64x2, float64x2, 2, nearbyint(a))
4762 UNOP_CASE(F32x4Abs, f32x4, float32x4, 4, std::abs(a))
4763 UNOP_CASE(F32x4Neg, f32x4, float32x4, 4, -a)
4764 UNOP_CASE(F32x4Sqrt, f32x4, float32x4, 4, std::sqrt(a))
4765 UNOP_CASE(F32x4Ceil, f32x4, float32x4, 4, ceilf(a))
4766 UNOP_CASE(F32x4Floor, f32x4, float32x4, 4, floorf(a))
4767 UNOP_CASE(F32x4Trunc, f32x4, float32x4, 4, truncf(a))
4768 UNOP_CASE(F32x4NearestInt, f32x4, float32x4, 4, nearbyintf(a))
4771 // Use llabs which will work correctly on both 64-bit and 32-bit.
4772 UNOP_CASE(I64x2Abs, i64x2, int64x2, 2, std::llabs(a))
4773 UNOP_CASE(I32x4Abs, i32x4, int32x4, 4, std::abs(a))
4774 UNOP_CASE(S128Not, i32x4, int32x4, 4, ~a)
4776 UNOP_CASE(I16x8Abs, i16x8, int16x8, 8, std::abs(a))
4778 UNOP_CASE(I8x16Abs, i8x16, int8x16, 16, std::abs(a))
4779 UNOP_CASE(I8x16Popcnt, i8x16, int8x16, 16,
4781#undef UNOP_CASE
4782
4783#define BITMASK_CASE(op, name, stype, count) \
4784 INSTRUCTION_HANDLER_FUNC s2s_Simd##op(const uint8_t* code, uint32_t* sp, \
4785 WasmInterpreterRuntime* wasm_runtime, \
4786 int64_t r0, double fp0) { \
4787 stype s = pop<Simd128>(sp, code, wasm_runtime).to_##name(); \
4788 int32_t res = 0; \
4789 for (size_t i = 0; i < count; ++i) { \
4790 bool sign = std::signbit(static_cast<double>(s.val[LANE(i, s)])); \
4791 res |= (sign << i); \
4792 } \
4793 push<int32_t>(sp, code, wasm_runtime, res); \
4794 NextOp(); \
4795 }
4796 BITMASK_CASE(I8x16BitMask, i8x16, int8x16, 16)
4797 BITMASK_CASE(I16x8BitMask, i16x8, int16x8, 8)
4798 BITMASK_CASE(I32x4BitMask, i32x4, int32x4, 4)
4799 BITMASK_CASE(I64x2BitMask, i64x2, int64x2, 2)
4800#undef BITMASK_CASE
4801
4802#define CMPOP_CASE(op, name, stype, out_stype, count, expr) \
4803 INSTRUCTION_HANDLER_FUNC s2s_Simd##op(const uint8_t* code, uint32_t* sp, \
4804 WasmInterpreterRuntime* wasm_runtime, \
4805 int64_t r0, double fp0) { \
4806 stype s2 = pop<Simd128>(sp, code, wasm_runtime).to_##name(); \
4807 stype s1 = pop<Simd128>(sp, code, wasm_runtime).to_##name(); \
4808 out_stype res; \
4809 for (size_t i = 0; i < count; ++i) { \
4810 auto a = s1.val[LANE(i, s1)]; \
4811 auto b = s2.val[LANE(i, s2)]; \
4812 auto result = expr; \
4813 res.val[LANE(i, res)] = result ? -1 : 0; \
4814 } \
4815 push<Simd128>(sp, code, wasm_runtime, Simd128(res)); \
4816 NextOp(); \
4817 }
4818
4819 CMPOP_CASE(F64x2Eq, f64x2, float64x2, int64x2, 2, a == b)
4820 CMPOP_CASE(F64x2Ne, f64x2, float64x2, int64x2, 2, a != b)
4821 CMPOP_CASE(F64x2Gt, f64x2, float64x2, int64x2, 2, a > b)
4822 CMPOP_CASE(F64x2Ge, f64x2, float64x2, int64x2, 2, a >= b)
4823 CMPOP_CASE(F64x2Lt, f64x2, float64x2, int64x2, 2, a < b)
4824 CMPOP_CASE(F64x2Le, f64x2, float64x2, int64x2, 2, a <= b)
4825 CMPOP_CASE(F32x4Eq, f32x4, float32x4, int32x4, 4, a == b)
4826 CMPOP_CASE(F32x4Ne, f32x4, float32x4, int32x4, 4, a != b)
4827 CMPOP_CASE(F32x4Gt, f32x4, float32x4, int32x4, 4, a > b)
4828 CMPOP_CASE(F32x4Ge, f32x4, float32x4, int32x4, 4, a >= b)
4829 CMPOP_CASE(F32x4Lt, f32x4, float32x4, int32x4, 4, a < b)
4830 CMPOP_CASE(F32x4Le, f32x4, float32x4, int32x4, 4, a <= b)
4831 CMPOP_CASE(I64x2Eq, i64x2, int64x2, int64x2, 2, a == b)
4832 CMPOP_CASE(I64x2Ne, i64x2, int64x2, int64x2, 2, a != b)
4833 CMPOP_CASE(I64x2LtS, i64x2, int64x2, int64x2, 2, a < b)
4834 CMPOP_CASE(I64x2GtS, i64x2, int64x2, int64x2, 2, a > b)
4835 CMPOP_CASE(I64x2LeS, i64x2, int64x2, int64x2, 2, a <= b)
4836 CMPOP_CASE(I64x2GeS, i64x2, int64x2, int64x2, 2, a >= b)
4837 CMPOP_CASE(I32x4Eq, i32x4, int32x4, int32x4, 4, a == b)
4838 CMPOP_CASE(I32x4Ne, i32x4, int32x4, int32x4, 4, a != b)
4839 CMPOP_CASE(I32x4GtS, i32x4, int32x4, int32x4, 4, a > b)
4840 CMPOP_CASE(I32x4GeS, i32x4, int32x4, int32x4, 4, a >= b)
4841 CMPOP_CASE(I32x4LtS, i32x4, int32x4, int32x4, 4, a < b)
4842 CMPOP_CASE(I32x4LeS, i32x4, int32x4, int32x4, 4, a <= b)
4843 CMPOP_CASE(I32x4GtU, i32x4, int32x4, int32x4, 4,
4844 static_cast<uint32_t>(a) > static_cast<uint32_t>(b))
4845 CMPOP_CASE(I32x4GeU, i32x4, int32x4, int32x4, 4,
4846 static_cast<uint32_t>(a) >= static_cast<uint32_t>(b))
4847 CMPOP_CASE(I32x4LtU, i32x4, int32x4, int32x4, 4,
4848 static_cast<uint32_t>(a) < static_cast<uint32_t>(b))
4849 CMPOP_CASE(I32x4LeU, i32x4, int32x4, int32x4, 4,
4850 static_cast<uint32_t>(a) <= static_cast<uint32_t>(b))
4851 CMPOP_CASE(I16x8Eq, i16x8, int16x8, int16x8, 8, a == b)
4852 CMPOP_CASE(I16x8Ne, i16x8, int16x8, int16x8, 8, a != b)
4853 CMPOP_CASE(I16x8GtS, i16x8, int16x8, int16x8, 8, a > b)
4854 CMPOP_CASE(I16x8GeS, i16x8, int16x8, int16x8, 8, a >= b)
4855 CMPOP_CASE(I16x8LtS, i16x8, int16x8, int16x8, 8, a < b)
4856 CMPOP_CASE(I16x8LeS, i16x8, int16x8, int16x8, 8, a <= b)
4857 CMPOP_CASE(I16x8GtU, i16x8, int16x8, int16x8, 8,
4858 static_cast<uint16_t>(a) > static_cast<uint16_t>(b))
4859 CMPOP_CASE(I16x8GeU, i16x8, int16x8, int16x8, 8,
4860 static_cast<uint16_t>(a) >= static_cast<uint16_t>(b))
4861 CMPOP_CASE(I16x8LtU, i16x8, int16x8, int16x8, 8,
4862 static_cast<uint16_t>(a) < static_cast<uint16_t>(b))
4863 CMPOP_CASE(I16x8LeU, i16x8, int16x8, int16x8, 8,
4864 static_cast<uint16_t>(a) <= static_cast<uint16_t>(b))
4865 CMPOP_CASE(I8x16Eq, i8x16, int8x16, int8x16, 16, a == b)
4866 CMPOP_CASE(I8x16Ne, i8x16, int8x16, int8x16, 16, a != b)
4867 CMPOP_CASE(I8x16GtS, i8x16, int8x16, int8x16, 16, a > b)
4868 CMPOP_CASE(I8x16GeS, i8x16, int8x16, int8x16, 16, a >= b)
4869 CMPOP_CASE(I8x16LtS, i8x16, int8x16, int8x16, 16, a < b)
4870 CMPOP_CASE(I8x16LeS, i8x16, int8x16, int8x16, 16, a <= b)
4871 CMPOP_CASE(I8x16GtU, i8x16, int8x16, int8x16, 16,
4872 static_cast<uint8_t>(a) > static_cast<uint8_t>(b))
4873 CMPOP_CASE(I8x16GeU, i8x16, int8x16, int8x16, 16,
4874 static_cast<uint8_t>(a) >= static_cast<uint8_t>(b))
4875 CMPOP_CASE(I8x16LtU, i8x16, int8x16, int8x16, 16,
4876 static_cast<uint8_t>(a) < static_cast<uint8_t>(b))
4877 CMPOP_CASE(I8x16LeU, i8x16, int8x16, int8x16, 16,
4878 static_cast<uint8_t>(a) <= static_cast<uint8_t>(b))
4879#undef CMPOP_CASE
4880
4881#define REPLACE_LANE_CASE(format, name, stype, ctype, op_type) \
4882 INSTRUCTION_HANDLER_FUNC s2s_Simd##format##ReplaceLane( \
4883 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime, \
4884 int64_t r0, double fp0) { \
4885 uint16_t lane = Read<int16_t>(code); \
4886 DCHECK_LT(lane, 16); \
4887 ctype new_val = pop<ctype>(sp, code, wasm_runtime); \
4888 Simd128 simd_val = pop<Simd128>(sp, code, wasm_runtime); \
4889 stype s = simd_val.to_##name(); \
4890 s.val[LANE(lane, s)] = new_val; \
4891 push<Simd128>(sp, code, wasm_runtime, Simd128(s)); \
4892 NextOp(); \
4893 }
4894 REPLACE_LANE_CASE(F64x2, f64x2, float64x2, double, F64)
4895 REPLACE_LANE_CASE(F32x4, f32x4, float32x4, float, F32)
4896 REPLACE_LANE_CASE(I64x2, i64x2, int64x2, int64_t, I64)
4897 REPLACE_LANE_CASE(I32x4, i32x4, int32x4, int32_t, I32)
4898 REPLACE_LANE_CASE(I16x8, i16x8, int16x8, int32_t, I32)
4899 REPLACE_LANE_CASE(I8x16, i8x16, int8x16, int32_t, I32)
4900#undef REPLACE_LANE_CASE
4901
4902 template <typename MemIdx, typename MemOffsetT>
4903 INSTRUCTION_HANDLER_FUNC s2s_SimdS128LoadMemI(
4904 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
4905 int64_t r0, double fp0) {
4906 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
4907 uint64_t offset = Read<MemOffsetT>(code);
4908
4909 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
4910 uint64_t effective_index = offset + index;
4911
4912 if (V8_UNLIKELY(
4913 effective_index < index ||
4914 !base::IsInBounds<uint64_t>(effective_index, sizeof(Simd128),
4915 wasm_runtime->GetMemorySize()))) {
4916 TRAP(TrapReason::kTrapMemOutOfBounds)
4917 }
4918
4919 uint8_t* address = memory_start + effective_index;
4920 Simd128 s =
4921 base::ReadUnalignedValue<Simd128>(reinterpret_cast<Address>(address));
4923
4924 NextOp();
4925 }
4926 static auto constexpr s2s_SimdS128LoadMem =
4927 s2s_SimdS128LoadMemI<uint32_t, memory_offset32_t>;
4928 static auto constexpr s2s_SimdS128LoadMem_Idx64 =
4929 s2s_SimdS128LoadMemI<uint64_t, memory_offset64_t>;
4930
4931 template <typename MemIdx, typename MemOffsetT>
4933 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
4934 int64_t r0, double fp0) {
4935 Simd128 val = pop<Simd128>(sp, code, wasm_runtime);
4936
4937 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
4938 uint64_t offset = Read<MemOffsetT>(code);
4939
4940 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
4941 uint64_t effective_index = offset + index;
4942
4943 if (V8_UNLIKELY(
4944 effective_index < index ||
4945 !base::IsInBounds<uint64_t>(effective_index, sizeof(Simd128),
4946 wasm_runtime->GetMemorySize()))) {
4947 TRAP(TrapReason::kTrapMemOutOfBounds)
4948 }
4949
4950 uint8_t* address = memory_start + effective_index;
4951 base::WriteUnalignedValue<Simd128>(reinterpret_cast<Address>(address), val);
4952
4953 NextOp();
4954 }
4955 static auto constexpr s2s_SimdS128StoreMem =
4957 static auto constexpr s2s_SimdS128StoreMem_Idx64 =
4959
4960#define SHIFT_CASE(op, name, stype, count, expr) \
4961 INSTRUCTION_HANDLER_FUNC s2s_Simd##op(const uint8_t* code, uint32_t* sp, \
4962 WasmInterpreterRuntime* wasm_runtime, \
4963 int64_t r0, double fp0) { \
4964 uint32_t shift = pop<uint32_t>(sp, code, wasm_runtime); \
4965 stype s = pop<Simd128>(sp, code, wasm_runtime).to_##name(); \
4966 stype res; \
4967 for (size_t i = 0; i < count; ++i) { \
4968 auto a = s.val[LANE(i, s)]; \
4969 res.val[LANE(i, res)] = expr; \
4970 } \
4971 push<Simd128>(sp, code, wasm_runtime, Simd128(res)); \
4972 NextOp(); \
4973 }
4974 SHIFT_CASE(I64x2Shl, i64x2, int64x2, 2,
4975 static_cast<uint64_t>(a) << (shift % 64))
4976 SHIFT_CASE(I64x2ShrS, i64x2, int64x2, 2, a >> (shift % 64))
4977 SHIFT_CASE(I64x2ShrU, i64x2, int64x2, 2,
4978 static_cast<uint64_t>(a) >> (shift % 64))
4979 SHIFT_CASE(I32x4Shl, i32x4, int32x4, 4,
4980 static_cast<uint32_t>(a) << (shift % 32))
4981 SHIFT_CASE(I32x4ShrS, i32x4, int32x4, 4, a >> (shift % 32))
4982 SHIFT_CASE(I32x4ShrU, i32x4, int32x4, 4,
4983 static_cast<uint32_t>(a) >> (shift % 32))
4984 SHIFT_CASE(I16x8Shl, i16x8, int16x8, 8,
4985 static_cast<uint16_t>(a) << (shift % 16))
4986 SHIFT_CASE(I16x8ShrS, i16x8, int16x8, 8, a >> (shift % 16))
4987 SHIFT_CASE(I16x8ShrU, i16x8, int16x8, 8,
4988 static_cast<uint16_t>(a) >> (shift % 16))
4989 SHIFT_CASE(I8x16Shl, i8x16, int8x16, 16,
4990 static_cast<uint8_t>(a) << (shift % 8))
4991 SHIFT_CASE(I8x16ShrS, i8x16, int8x16, 16, a >> (shift % 8))
4992 SHIFT_CASE(I8x16ShrU, i8x16, int8x16, 16,
4993 static_cast<uint8_t>(a) >> (shift % 8))
4994#undef SHIFT_CASE
4995
4996 template <typename s_type, typename d_type, typename narrow, typename wide,
4997 uint32_t start>
4999 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5000 int64_t r0, double fp0) {
5001 s_type s2 = pop<Simd128>(sp, code, wasm_runtime).template to<s_type>();
5002 s_type s1 = pop<Simd128>(sp, code, wasm_runtime).template to<s_type>();
5003 auto end = start + (kSimd128Size / sizeof(wide));
5004 d_type res;
5005 uint32_t i = start;
5006 for (size_t dst = 0; i < end; ++i, ++dst) {
5007 // Need static_cast for unsigned narrow types.
5008 res.val[LANE(dst, res)] =
5009 MultiplyLong<wide>(static_cast<narrow>(s1.val[LANE(start, s1)]),
5010 static_cast<narrow>(s2.val[LANE(start, s2)]));
5011 }
5013 NextOp();
5014 }
5015 static auto constexpr s2s_SimdI16x8ExtMulLowI8x16S =
5017 static auto constexpr s2s_SimdI16x8ExtMulHighI8x16S =
5019 static auto constexpr s2s_SimdI16x8ExtMulLowI8x16U =
5021 static auto constexpr s2s_SimdI16x8ExtMulHighI8x16U =
5023 static auto constexpr s2s_SimdI32x4ExtMulLowI16x8S =
5025 static auto constexpr s2s_SimdI32x4ExtMulHighI16x8S =
5027 static auto constexpr s2s_SimdI32x4ExtMulLowI16x8U =
5029 static auto constexpr s2s_SimdI32x4ExtMulHighI16x8U =
5031 static auto constexpr s2s_SimdI64x2ExtMulLowI32x4S =
5033 static auto constexpr s2s_SimdI64x2ExtMulHighI32x4S =
5035 static auto constexpr s2s_SimdI64x2ExtMulLowI32x4U =
5037 static auto constexpr s2s_SimdI64x2ExtMulHighI32x4U =
5039#undef EXT_MUL_CASE
5040
5041#define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \
5042 expr) \
5043 INSTRUCTION_HANDLER_FUNC s2s_Simd##op(const uint8_t* code, uint32_t* sp, \
5044 WasmInterpreterRuntime* wasm_runtime, \
5045 int64_t r0, double fp0) { \
5046 src_type s = pop<Simd128>(sp, code, wasm_runtime).to_##name(); \
5047 dst_type res = {0}; \
5048 for (size_t i = 0; i < count; ++i) { \
5049 ctype a = s.val[LANE(start_index + i, s)]; \
5050 res.val[LANE(i, res)] = expr; \
5051 } \
5052 push<Simd128>(sp, code, wasm_runtime, Simd128(res)); \
5053 NextOp(); \
5054 }
5055 CONVERT_CASE(F32x4SConvertI32x4, int32x4, i32x4, float32x4, 4, 0, int32_t,
5056 static_cast<float>(a))
5057 CONVERT_CASE(F32x4UConvertI32x4, int32x4, i32x4, float32x4, 4, 0, uint32_t,
5058 static_cast<float>(a))
5059 CONVERT_CASE(I32x4SConvertF32x4, float32x4, f32x4, int32x4, 4, 0, float,
5060 base::saturated_cast<int32_t>(a))
5061 CONVERT_CASE(I32x4UConvertF32x4, float32x4, f32x4, int32x4, 4, 0, float,
5062 base::saturated_cast<uint32_t>(a))
5063 CONVERT_CASE(I32x4RelaxedTruncF32x4S, float32x4, f32x4, int32x4, 4, 0, float,
5064 base::saturated_cast<int32_t>(a))
5065 CONVERT_CASE(I32x4RelaxedTruncF32x4U, float32x4, f32x4, int32x4, 4, 0, float,
5066 base::saturated_cast<uint32_t>(a))
5067 CONVERT_CASE(I64x2SConvertI32x4Low, int32x4, i32x4, int64x2, 2, 0, int32_t, a)
5068 CONVERT_CASE(I64x2SConvertI32x4High, int32x4, i32x4, int64x2, 2, 2, int32_t,
5069 a)
5070 CONVERT_CASE(I64x2UConvertI32x4Low, int32x4, i32x4, int64x2, 2, 0, uint32_t,
5071 a)
5072 CONVERT_CASE(I64x2UConvertI32x4High, int32x4, i32x4, int64x2, 2, 2, uint32_t,
5073 a)
5074 CONVERT_CASE(I32x4SConvertI16x8High, int16x8, i16x8, int32x4, 4, 4, int16_t,
5075 a)
5076 CONVERT_CASE(I32x4UConvertI16x8High, int16x8, i16x8, int32x4, 4, 4, uint16_t,
5077 a)
5078 CONVERT_CASE(I32x4SConvertI16x8Low, int16x8, i16x8, int32x4, 4, 0, int16_t, a)
5079 CONVERT_CASE(I32x4UConvertI16x8Low, int16x8, i16x8, int32x4, 4, 0, uint16_t,
5080 a)
5081 CONVERT_CASE(I16x8SConvertI8x16High, int8x16, i8x16, int16x8, 8, 8, int8_t, a)
5082 CONVERT_CASE(I16x8UConvertI8x16High, int8x16, i8x16, int16x8, 8, 8, uint8_t,
5083 a)
5084 CONVERT_CASE(I16x8SConvertI8x16Low, int8x16, i8x16, int16x8, 8, 0, int8_t, a)
5085 CONVERT_CASE(I16x8UConvertI8x16Low, int8x16, i8x16, int16x8, 8, 0, uint8_t, a)
5086 CONVERT_CASE(F64x2ConvertLowI32x4S, int32x4, i32x4, float64x2, 2, 0, int32_t,
5087 static_cast<double>(a))
5088 CONVERT_CASE(F64x2ConvertLowI32x4U, int32x4, i32x4, float64x2, 2, 0, uint32_t,
5089 static_cast<double>(a))
5090 CONVERT_CASE(I32x4TruncSatF64x2SZero, float64x2, f64x2, int32x4, 2, 0, double,
5091 base::saturated_cast<int32_t>(a))
5092 CONVERT_CASE(I32x4TruncSatF64x2UZero, float64x2, f64x2, int32x4, 2, 0, double,
5093 base::saturated_cast<uint32_t>(a))
5094 CONVERT_CASE(I32x4RelaxedTruncF64x2SZero, float64x2, f64x2, int32x4, 2, 0,
5095 double, base::saturated_cast<int32_t>(a))
5096 CONVERT_CASE(I32x4RelaxedTruncF64x2UZero, float64x2, f64x2, int32x4, 2, 0,
5097 double, base::saturated_cast<uint32_t>(a))
5098 CONVERT_CASE(F32x4DemoteF64x2Zero, float64x2, f64x2, float32x4, 2, 0, float,
5100 CONVERT_CASE(F64x2PromoteLowF32x4, float32x4, f32x4, float64x2, 2, 0, float,
5101 static_cast<double>(a))
5102#undef CONVERT_CASE
5103
5104#define PACK_CASE(op, src_type, name, dst_type, count, dst_ctype) \
5105 INSTRUCTION_HANDLER_FUNC s2s_Simd##op(const uint8_t* code, uint32_t* sp, \
5106 WasmInterpreterRuntime* wasm_runtime, \
5107 int64_t r0, double fp0) { \
5108 src_type s2 = pop<Simd128>(sp, code, wasm_runtime).to_##name(); \
5109 src_type s1 = pop<Simd128>(sp, code, wasm_runtime).to_##name(); \
5110 dst_type res; \
5111 for (size_t i = 0; i < count; ++i) { \
5112 int64_t v = i < count / 2 ? s1.val[LANE(i, s1)] \
5113 : s2.val[LANE(i - count / 2, s2)]; \
5114 res.val[LANE(i, res)] = base::saturated_cast<dst_ctype>(v); \
5115 } \
5116 push<Simd128>(sp, code, wasm_runtime, Simd128(res)); \
5117 NextOp(); \
5118 }
5119 PACK_CASE(I16x8SConvertI32x4, int32x4, i32x4, int16x8, 8, int16_t)
5120 PACK_CASE(I16x8UConvertI32x4, int32x4, i32x4, int16x8, 8, uint16_t)
5121 PACK_CASE(I8x16SConvertI16x8, int16x8, i16x8, int8x16, 16, int8_t)
5122 PACK_CASE(I8x16UConvertI16x8, int16x8, i16x8, int8x16, 16, uint8_t)
5123#undef PACK_CASE
5124
5125 INSTRUCTION_HANDLER_FUNC s2s_DoSimdSelect(
5127 int64_t r0, double fp0) {
5128 int32x4 bool_val = pop<Simd128>(sp, code, wasm_runtime).to_i32x4();
5129 int32x4 v2 = pop<Simd128>(sp, code, wasm_runtime).to_i32x4();
5130 int32x4 v1 = pop<Simd128>(sp, code, wasm_runtime).to_i32x4();
5132 for (size_t i = 0; i < 4; ++i) {
5133 res.val[LANE(i, res)] =
5134 v2.val[LANE(i, v2)] ^ ((v1.val[LANE(i, v1)] ^ v2.val[LANE(i, v2)]) &
5135 bool_val.val[LANE(i, bool_val)]);
5136 }
5139 }
5140 // Do these 5 instructions really have the same implementation?
5141 static constexpr auto s2s_SimdI8x16RelaxedLaneSelect = s2s_DoSimdSelect;
5142 static constexpr auto s2s_SimdI16x8RelaxedLaneSelect = s2s_DoSimdSelect;
5143 static constexpr auto s2s_SimdI32x4RelaxedLaneSelect = s2s_DoSimdSelect;
5144 static constexpr auto s2s_SimdI64x2RelaxedLaneSelect = s2s_DoSimdSelect;
5145 static constexpr auto s2s_SimdS128Select = s2s_DoSimdSelect;
5146
5148 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5149 int64_t r0, double fp0) {
5150 int16x8 v2 = pop<Simd128>(sp, code, wasm_runtime).to_i16x8();
5151 int16x8 v1 = pop<Simd128>(sp, code, wasm_runtime).to_i16x8();
5152 int32x4 res;
5153 for (size_t i = 0; i < 4; i++) {
5154 int32_t lo = (v1.val[LANE(i * 2, v1)] * v2.val[LANE(i * 2, v2)]);
5155 int32_t hi = (v1.val[LANE(i * 2 + 1, v1)] * v2.val[LANE(i * 2 + 1, v2)]);
5156 res.val[LANE(i, res)] = base::AddWithWraparound(lo, hi);
5157 }
5159 NextOp();
5160 }
5161
5163 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5164 int64_t r0, double fp0) {
5165 int8x16 v2 = pop<Simd128>(sp, code, wasm_runtime).to_i8x16();
5166 int8x16 v1 = pop<Simd128>(sp, code, wasm_runtime).to_i8x16();
5167 int16x8 res;
5168 for (size_t i = 0; i < 8; i++) {
5169 int16_t lo = (v1.val[LANE(i * 2, v1)] * v2.val[LANE(i * 2, v2)]);
5170 int16_t hi = (v1.val[LANE(i * 2 + 1, v1)] * v2.val[LANE(i * 2 + 1, v2)]);
5171 res.val[LANE(i, res)] = base::AddWithWraparound(lo, hi);
5172 }
5174 NextOp();
5175 }
5176
5178 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5179 int64_t r0, double fp0) {
5180 int32x4 v3 = pop<Simd128>(sp, code, wasm_runtime).to_i32x4();
5181 int8x16 v2 = pop<Simd128>(sp, code, wasm_runtime).to_i8x16();
5182 int8x16 v1 = pop<Simd128>(sp, code, wasm_runtime).to_i8x16();
5183 int32x4 res;
5184 for (size_t i = 0; i < 4; i++) {
5185 int32_t a = (v1.val[LANE(i * 4, v1)] * v2.val[LANE(i * 4, v2)]);
5186 int32_t b = (v1.val[LANE(i * 4 + 1, v1)] * v2.val[LANE(i * 4 + 1, v2)]);
5187 int32_t c = (v1.val[LANE(i * 4 + 2, v1)] * v2.val[LANE(i * 4 + 2, v2)]);
5188 int32_t d = (v1.val[LANE(i * 4 + 3, v1)] * v2.val[LANE(i * 4 + 3, v2)]);
5189 int32_t acc = v3.val[LANE(i, v3)];
5190 // a + b + c + d should not wrap
5191 res.val[LANE(i, res)] = base::AddWithWraparound(a + b + c + d, acc);
5192 }
5194 NextOp();
5195 }
5196
5198 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5199 int64_t r0, double fp0) {
5200 int8x16 v2 = pop<Simd128>(sp, code, wasm_runtime).to_i8x16();
5201 int8x16 v1 = pop<Simd128>(sp, code, wasm_runtime).to_i8x16();
5202 int8x16 res;
5203 for (size_t i = 0; i < kSimd128Size; ++i) {
5204 int lane = v2.val[LANE(i, v2)];
5205 res.val[LANE(i, res)] =
5206 lane < kSimd128Size && lane >= 0 ? v1.val[LANE(lane, v1)] : 0;
5207 }
5209 NextOp();
5210 }
5212
5214 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5215 int64_t r0, double fp0) {
5216 int8x16 value = pop<Simd128>(sp, code, wasm_runtime).to_i8x16();
5217 int8x16 v2 = pop<Simd128>(sp, code, wasm_runtime).to_i8x16();
5218 int8x16 v1 = pop<Simd128>(sp, code, wasm_runtime).to_i8x16();
5219 int8x16 res;
5220 for (size_t i = 0; i < kSimd128Size; ++i) {
5221 int lane = value.val[i];
5222 res.val[LANE(i, res)] = lane < kSimd128Size
5223 ? v1.val[LANE(lane, v1)]
5224 : v2.val[LANE(lane - kSimd128Size, v2)];
5225 }
5227 NextOp();
5228 }
5229
5231 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5232 int64_t r0, double fp0) {
5233 int32x4 s = pop<Simd128>(sp, code, wasm_runtime).to_i32x4();
5234 bool res = s.val[LANE(0, s)] | s.val[LANE(1, s)] | s.val[LANE(2, s)] |
5235 s.val[LANE(3, s)];
5237 NextOp();
5238 }
5239
5240#define REDUCTION_CASE(op, name, stype, count) \
5241 INSTRUCTION_HANDLER_FUNC s2s_Simd##op(const uint8_t* code, uint32_t* sp, \
5242 WasmInterpreterRuntime* wasm_runtime, \
5243 int64_t r0, double fp0) { \
5244 stype s = pop<Simd128>(sp, code, wasm_runtime).to_##name(); \
5245 bool res = true; \
5246 for (size_t i = 0; i < count; ++i) { \
5247 res = res & static_cast<bool>(s.val[LANE(i, s)]); \
5248 } \
5249 push<int32_t>(sp, code, wasm_runtime, res); \
5250 NextOp(); \
5251 }
5252 REDUCTION_CASE(I64x2AllTrue, i64x2, int64x2, 2)
5253 REDUCTION_CASE(I32x4AllTrue, i32x4, int32x4, 4)
5254 REDUCTION_CASE(I16x8AllTrue, i16x8, int16x8, 8)
5255 REDUCTION_CASE(I8x16AllTrue, i8x16, int8x16, 16)
5256#undef REDUCTION_CASE
5257
5258#define QFM_CASE(op, name, stype, count, operation) \
5259 INSTRUCTION_HANDLER_FUNC s2s_Simd##op(const uint8_t* code, uint32_t* sp, \
5260 WasmInterpreterRuntime* wasm_runtime, \
5261 int64_t r0, double fp0) { \
5262 stype c = pop<Simd128>(sp, code, wasm_runtime).to_##name(); \
5263 stype b = pop<Simd128>(sp, code, wasm_runtime).to_##name(); \
5264 stype a = pop<Simd128>(sp, code, wasm_runtime).to_##name(); \
5265 stype res; \
5266 for (size_t i = 0; i < count; i++) { \
5267 res.val[LANE(i, res)] = \
5268 operation(a.val[LANE(i, a)] * b.val[LANE(i, b)]) + \
5269 c.val[LANE(i, c)]; \
5270 } \
5271 push<Simd128>(sp, code, wasm_runtime, Simd128(res)); \
5272 NextOp(); \
5273 }
5274 QFM_CASE(F32x4Qfma, f32x4, float32x4, 4, +)
5275 QFM_CASE(F32x4Qfms, f32x4, float32x4, 4, -)
5276 QFM_CASE(F64x2Qfma, f64x2, float64x2, 2, +)
5277 QFM_CASE(F64x2Qfms, f64x2, float64x2, 2, -)
5278#undef QFM_CASE
5279
5280 template <typename s_type, typename load_type, typename MemIdx,
5281 typename MemOffsetT>
5283 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5284 int64_t r0, double fp0) {
5285 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
5286 uint64_t offset = Read<MemOffsetT>(code);
5287
5288 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
5289 uint64_t effective_index = offset + index;
5290
5291 if (V8_UNLIKELY(
5292 effective_index < index ||
5293 !base::IsInBounds<uint64_t>(effective_index, sizeof(load_type),
5294 wasm_runtime->GetMemorySize()))) {
5295 TRAP(TrapReason::kTrapMemOutOfBounds)
5296 }
5297
5298 uint8_t* address = memory_start + effective_index;
5299 load_type v =
5300 base::ReadUnalignedValue<load_type>(reinterpret_cast<Address>(address));
5301 s_type s;
5302 for (size_t i = 0; i < arraysize(s.val); i++) {
5303 s.val[LANE(i, s)] = v;
5304 }
5306
5307 NextOp();
5308 }
5309 static auto constexpr s2s_SimdS128Load8Splat =
5311 static auto constexpr s2s_SimdS128Load8Splat_Idx64 =
5313 static auto constexpr s2s_SimdS128Load16Splat =
5315 static auto constexpr s2s_SimdS128Load16Splat_Idx64 =
5317 static auto constexpr s2s_SimdS128Load32Splat =
5319 static auto constexpr s2s_SimdS128Load32Splat_Idx64 =
5321 static auto constexpr s2s_SimdS128Load64Splat =
5323 static auto constexpr s2s_SimdS128Load64Splat_Idx64 =
5325
5326 template <typename s_type, typename wide_type, typename narrow_type,
5327 typename MemIdx, typename MemOffsetT>
5329 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5330 int64_t r0, double fp0) {
5331 static_assert(sizeof(wide_type) == sizeof(narrow_type) * 2,
5332 "size mismatch for wide and narrow types");
5333 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
5334 uint64_t offset = Read<MemOffsetT>(code);
5335
5336 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
5337 uint64_t effective_index = offset + index;
5338
5339 // Load 8 bytes and sign/zero extend to 16 bytes.
5340 if (V8_UNLIKELY(
5341 effective_index < index ||
5342 !base::IsInBounds<uint64_t>(effective_index, sizeof(uint64_t),
5343 wasm_runtime->GetMemorySize()))) {
5344 TRAP(TrapReason::kTrapMemOutOfBounds)
5345 }
5346
5347 uint8_t* address = memory_start + effective_index;
5348 uint64_t v =
5349 base::ReadUnalignedValue<uint64_t>(reinterpret_cast<Address>(address));
5350 constexpr int lanes = kSimd128Size / sizeof(wide_type);
5351 s_type s;
5352 for (int i = 0; i < lanes; i++) {
5353 uint8_t shift = i * (sizeof(narrow_type) * 8);
5354 narrow_type el = static_cast<narrow_type>(v >> shift);
5355 s.val[LANE(i, s)] = static_cast<wide_type>(el);
5356 }
5358
5359 NextOp();
5360 }
5361 static auto constexpr s2s_SimdS128Load8x8S =
5362 s2s_DoSimdLoadExtend<int16x8, int16_t, int8_t, uint32_t,
5364 static auto constexpr s2s_SimdS128Load8x8S_Idx64 =
5365 s2s_DoSimdLoadExtend<int16x8, int16_t, int8_t, uint64_t,
5367 static auto constexpr s2s_SimdS128Load8x8U =
5370 static auto constexpr s2s_SimdS128Load8x8U_Idx64 =
5373 static auto constexpr s2s_SimdS128Load16x4S =
5376 static auto constexpr s2s_SimdS128Load16x4S_Idx64 =
5377 s2s_DoSimdLoadExtend<int32x4, int32_t, int16_t, uint64_t,
5379 static auto constexpr s2s_SimdS128Load16x4U =
5382 static auto constexpr s2s_SimdS128Load16x4U_Idx64 =
5385 static auto constexpr s2s_SimdS128Load32x2S =
5388 static auto constexpr s2s_SimdS128Load32x2S_Idx64 =
5389 s2s_DoSimdLoadExtend<int64x2, int64_t, int32_t, uint64_t,
5391 static auto constexpr s2s_SimdS128Load32x2U =
5394 static auto constexpr s2s_SimdS128Load32x2U_Idx64 =
5395 s2s_DoSimdLoadExtend<int64x2, uint64_t, uint32_t, uint64_t,
5397
5398 template <typename s_type, typename load_type, typename MemIdx,
5399 typename MemOffsetT>
5401 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5402 int64_t r0, double fp0) {
5403 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
5404 uint64_t offset = Read<MemOffsetT>(code);
5405
5406 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
5407 uint64_t effective_index = offset + index;
5408
5409 // Load a single 32-bit or 64-bit element into the lowest bits of a v128
5410 // vector, and initialize all other bits of the v128 vector to zero.
5411 if (V8_UNLIKELY(
5412 effective_index < index ||
5413 !base::IsInBounds<uint64_t>(effective_index, sizeof(load_type),
5414 wasm_runtime->GetMemorySize()))) {
5415 TRAP(TrapReason::kTrapMemOutOfBounds)
5416 }
5417
5418 uint8_t* address = memory_start + effective_index;
5419 load_type v =
5420 base::ReadUnalignedValue<load_type>(reinterpret_cast<Address>(address));
5421 s_type s;
5422 // All lanes are 0.
5423 for (size_t i = 0; i < arraysize(s.val); i++) {
5424 s.val[LANE(i, s)] = 0;
5425 }
5426 // Lane 0 is set to the loaded value.
5427 s.val[LANE(0, s)] = v;
5429
5430 NextOp();
5431 }
5432 static auto constexpr s2s_SimdS128Load32Zero =
5434 static auto constexpr s2s_SimdS128Load32Zero_Idx64 =
5436 static auto constexpr s2s_SimdS128Load64Zero =
5438 static auto constexpr s2s_SimdS128Load64Zero_Idx64 =
5440
5441 template <typename s_type, typename memory_type, typename MemIdx = uint32_t,
5442 typename MemOffsetT = memory_offset32_t>
5444 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5445 int64_t r0, double fp0) {
5446 s_type value = pop<Simd128>(sp, code, wasm_runtime).template to<s_type>();
5447
5448 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
5449 uint64_t offset = Read<MemOffsetT>(code);
5450
5451 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
5452 uint64_t effective_index = offset + index;
5453
5454 if (V8_UNLIKELY(
5455 effective_index < index ||
5456 !base::IsInBounds<uint64_t>(effective_index, sizeof(memory_type),
5457 wasm_runtime->GetMemorySize()))) {
5458 TRAP(TrapReason::kTrapMemOutOfBounds)
5459 }
5460
5461 uint8_t* address = memory_start + effective_index;
5462 memory_type loaded = base::ReadUnalignedValue<memory_type>(
5463 reinterpret_cast<Address>(address));
5464 uint16_t lane = Read<uint16_t>(code);
5465 value.val[LANE(lane, value)] = loaded;
5466 push<Simd128>(sp, code, wasm_runtime, Simd128(value));
5467
5468 NextOp();
5469 }
5470 static auto constexpr s2s_SimdS128Load8Lane =
5472 static auto constexpr s2s_SimdS128Load16Lane =
5474 static auto constexpr s2s_SimdS128Load32Lane =
5476 static auto constexpr s2s_SimdS128Load64Lane =
5478 static auto constexpr s2s_SimdS128Load8Lane_Idx64 =
5480 static auto constexpr s2s_SimdS128Load16Lane_Idx64 =
5482 static auto constexpr s2s_SimdS128Load32Lane_Idx64 =
5484 static auto constexpr s2s_SimdS128Load64Lane_Idx64 =
5486
5487 template <typename s_type, typename memory_type, typename MemIdx = uint32_t,
5488 typename MemOffsetT = memory_offset32_t>
5490 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5491 int64_t r0, double fp0) {
5492 // Extract a single lane, push it onto the stack, then store the lane.
5493 s_type value = pop<Simd128>(sp, code, wasm_runtime).template to<s_type>();
5494
5495 uint8_t* memory_start = wasm_runtime->GetMemoryStart();
5496 uint64_t offset = Read<MemOffsetT>(code);
5497
5498 uint64_t index = pop<MemIdx>(sp, code, wasm_runtime);
5499 uint64_t effective_index = offset + index;
5500
5501 if (V8_UNLIKELY(
5502 effective_index < index ||
5503 !base::IsInBounds<uint64_t>(effective_index, sizeof(memory_type),
5504 wasm_runtime->GetMemorySize()))) {
5505 TRAP(TrapReason::kTrapMemOutOfBounds)
5506 }
5507 uint8_t* address = memory_start + effective_index;
5508
5509 uint16_t lane = Read<uint16_t>(code);
5510 memory_type res = value.val[LANE(lane, value)];
5511 base::WriteUnalignedValue<memory_type>(reinterpret_cast<Address>(address),
5512 res);
5513
5514 NextOp();
5515 }
5516 static auto constexpr s2s_SimdS128Store8Lane =
5518 static auto constexpr s2s_SimdS128Store16Lane =
5520 static auto constexpr s2s_SimdS128Store32Lane =
5522 static auto constexpr s2s_SimdS128Store64Lane =
5524 static auto constexpr s2s_SimdS128Store8Lane_Idx64 =
5526 static auto constexpr s2s_SimdS128Store16Lane_Idx64 =
5528 static auto constexpr s2s_SimdS128Store32Lane_Idx64 =
5530 static auto constexpr s2s_SimdS128Store64Lane_Idx64 =
5532
5533 template <typename DstSimdType, typename SrcSimdType, typename Wide,
5534 typename Narrow>
5536 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5537 int64_t r0, double fp0) {
5538 constexpr int lanes = kSimd128Size / sizeof(DstSimdType::val[0]);
5539 auto v = pop<Simd128>(sp, code, wasm_runtime).template to<SrcSimdType>();
5540 DstSimdType res;
5541 for (int i = 0; i < lanes; ++i) {
5542 res.val[LANE(i, res)] =
5543 AddLong<Wide>(static_cast<Narrow>(v.val[LANE(i * 2, v)]),
5544 static_cast<Narrow>(v.val[LANE(i * 2 + 1, v)]));
5545 }
5547
5548 NextOp();
5549 }
5558
5560
5561 // Allocate, initialize and throw a new exception. The exception values are
5562 // being popped off the operand stack.
5565 int64_t r0, double fp0) {
5566 Isolate* isolate = wasm_runtime->GetIsolate();
5567 {
5568 HandleScope handle_scope(isolate); // Avoid leaking handles.
5569
5570 uint32_t tag_index = Read<int32_t>(code);
5571
5572 DirectHandle<WasmExceptionPackage> exception_object =
5573 wasm_runtime->CreateWasmExceptionPackage(tag_index);
5575 WasmExceptionPackage::GetExceptionValues(isolate, exception_object));
5576
5577 // Encode the exception values on the operand stack into the exception
5578 // package allocated above.
5579 const WasmTagSig* sig = wasm_runtime->GetWasmTag(tag_index).sig;
5580 uint32_t encoded_index = 0;
5581 for (size_t index = 0; index < sig->parameter_count(); index++) {
5582 switch (sig->GetParam(index).kind()) {
5583 case kI32: {
5584 uint32_t u32 = pop<uint32_t>(sp, code, wasm_runtime);
5585 EncodeI32ExceptionValue(encoded_values, &encoded_index, u32);
5586 break;
5587 }
5588 case kF32: {
5589 float f32 = pop<float>(sp, code, wasm_runtime);
5590 EncodeI32ExceptionValue(encoded_values, &encoded_index,
5591 *reinterpret_cast<uint32_t*>(&f32));
5592 break;
5593 }
5594 case kI64: {
5595 uint64_t u64 = pop<uint64_t>(sp, code, wasm_runtime);
5596 EncodeI64ExceptionValue(encoded_values, &encoded_index, u64);
5597 break;
5598 }
5599 case kF64: {
5600 double f64 = pop<double>(sp, code, wasm_runtime);
5601 EncodeI64ExceptionValue(encoded_values, &encoded_index,
5602 *reinterpret_cast<uint64_t*>(&f64));
5603 break;
5604 }
5605 case kS128: {
5606 int32x4 s128 = pop<Simd128>(sp, code, wasm_runtime).to_i32x4();
5607 EncodeI32ExceptionValue(encoded_values, &encoded_index,
5608 s128.val[0]);
5609 EncodeI32ExceptionValue(encoded_values, &encoded_index,
5610 s128.val[1]);
5611 EncodeI32ExceptionValue(encoded_values, &encoded_index,
5612 s128.val[2]);
5613 EncodeI32ExceptionValue(encoded_values, &encoded_index,
5614 s128.val[3]);
5615 break;
5616 }
5617 case kRef:
5618 case kRefNull: {
5620 if (IsWasmNull(*ref, isolate)) {
5621 ref = direct_handle(ReadOnlyRoots(isolate).null_value(), isolate);
5622 }
5623 encoded_values->set(encoded_index++, *ref);
5624 break;
5625 }
5626 default:
5627 UNREACHABLE();
5628 }
5629 }
5630
5631 wasm_runtime->ThrowException(code, sp, *exception_object);
5632 }
5633 NextOp();
5634 }
5635
5638 int64_t r0, double fp0) {
5639 uint32_t catch_block_index = Read<int32_t>(code);
5640 wasm_runtime->RethrowException(code, sp, catch_block_index);
5641
5642 NextOp();
5643 }
5644
5646 // GC instruction handlers.
5647
5649 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5650 int64_t r0, double fp0) {
5651 // TODO(paolosev@microsoft.com): Implement peek<T>?
5652 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
5653
5654 const uint32_t ref_bitfield = Read<int32_t>(code);
5655 ValueType ref_type = ValueType::FromRawBitField(ref_bitfield);
5656
5657 push<WasmRef>(sp, code, wasm_runtime, ref);
5658
5659 int32_t if_null_offset = Read<int32_t>(code);
5660 if (wasm_runtime->IsNullTypecheck(ref, ref_type)) {
5661 // If condition is true (ref is null), jump to the target branch.
5662 code += (if_null_offset - kCodeOffsetSize);
5663 }
5664
5665 NextOp();
5666 }
5667
5668 /*
5669 * Notice that in s2s_BranchOnNullWithParams the branch happens when the
5670 * condition is false, not true, as follows:
5671 *
5672 * > s2s_BranchOnNullWithParams
5673 * pop - ref
5674 * i32: ref value_tye
5675 * push - ref
5676 * branch_offset (if NOT NULL) ----+
5677 * > s2s_CopySlot |
5678 * .... |
5679 * > s2s_Branch (gets here if NULL) |
5680 * branch_offset |
5681 * > (next instruction) <---------------+
5682 */
5684 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5685 int64_t r0, double fp0) {
5686 // TO(paolosev@microsoft.com): Implement peek<T>?
5687 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
5688
5689 const uint32_t ref_bitfield = Read<int32_t>(code);
5690 ValueType ref_type = ValueType::FromRawBitField(ref_bitfield);
5691
5692 push<WasmRef>(sp, code, wasm_runtime, ref);
5693
5694 int32_t if_null_offset = Read<int32_t>(code);
5695 if (!wasm_runtime->IsNullTypecheck(ref, ref_type)) {
5696 // If condition is false (ref is not null), jump to the false branch.
5697 code += (if_null_offset - kCodeOffsetSize);
5698 }
5699
5700 NextOp();
5701 }
5702
5704 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5705 int64_t r0, double fp0) {
5706 // TO(paolosev@microsoft.com): Implement peek<T>?
5707 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
5708
5709 const uint32_t ref_bitfield = Read<int32_t>(code);
5710 ValueType ref_type = ValueType::FromRawBitField(ref_bitfield);
5711
5712 push<WasmRef>(sp, code, wasm_runtime, ref);
5713
5714 int32_t if_non_null_offset = Read<int32_t>(code);
5715 if (!wasm_runtime->IsNullTypecheck(ref, ref_type)) {
5716 // If condition is true (ref is not null), jump to the target branch.
5717 code += (if_non_null_offset - kCodeOffsetSize);
5718 }
5719
5720 NextOp();
5721 }
5722
5724 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5725 int64_t r0, double fp0) {
5726 // TO(paolosev@microsoft.com): Implement peek<T>?
5727 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
5728
5729 const uint32_t ref_bitfield = Read<int32_t>(code);
5730 ValueType ref_type = ValueType::FromRawBitField(ref_bitfield);
5731
5732 push<WasmRef>(sp, code, wasm_runtime, ref);
5733
5734 int32_t if_non_null_offset = Read<int32_t>(code);
5735 if (wasm_runtime->IsNullTypecheck(ref, ref_type)) {
5736 // If condition is false (ref is null), jump to the false branch.
5737 code += (if_non_null_offset - kCodeOffsetSize);
5738 }
5739
5740 NextOp();
5741 }
5742
5743 static bool DoRefCast(WasmRef ref, ValueType ref_type, HeapType target_type,
5744 bool null_succeeds,
5746 if (target_type.is_index()) {
5747 DirectHandle<Map> rtt =
5748 wasm_runtime->RttCanon(target_type.ref_index().index);
5749 return wasm_runtime->SubtypeCheck(ref, ref_type, rtt,
5750 target_type.ref_index(), null_succeeds);
5751 } else {
5752 switch (target_type.representation()) {
5753 case HeapType::kEq:
5754 return wasm_runtime->RefIsEq(ref, ref_type, null_succeeds);
5755 case HeapType::kI31:
5756 return wasm_runtime->RefIsI31(ref, ref_type, null_succeeds);
5757 case HeapType::kStruct:
5758 return wasm_runtime->RefIsStruct(ref, ref_type, null_succeeds);
5759 case HeapType::kArray:
5760 return wasm_runtime->RefIsArray(ref, ref_type, null_succeeds);
5761 case HeapType::kString:
5762 return wasm_runtime->RefIsString(ref, ref_type, null_succeeds);
5763 case HeapType::kNone:
5765 case HeapType::kNoFunc:
5767 return wasm_runtime->IsNullTypecheck(ref, ref_type);
5768 case HeapType::kAny:
5769 // Any may never need a cast as it is either implicitly convertible or
5770 // never convertible for any given type.
5771 default:
5772 UNREACHABLE();
5773 }
5774 }
5775 }
5776
5777 /*
5778 * Notice that in s2s_BranchOnCast the branch happens when the condition is
5779 * false, not true, as follows:
5780 *
5781 * > s2s_BranchOnCast
5782 * i32: null_succeeds
5783 * i32: target_type HeapType representation
5784 * pop - ref
5785 * i32: ref value_tye
5786 * push - ref
5787 * branch_offset (if CAST FAILS) --------+
5788 * > s2s_CopySlot |
5789 * .... |
5790 * > s2s_Branch (gets here if CAST SUCCEEDS) |
5791 * branch_offset |
5792 * > (next instruction) <--------------------+
5793 */
5795 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5796 int64_t r0, double fp0) {
5797 bool null_succeeds = Read<int32_t>(code);
5798
5799 HeapType target_type =
5800 HeapType::FromBits(static_cast<uint32_t>(Read<int32_t>(code)));
5801
5802 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
5803 const uint32_t ref_bitfield = Read<int32_t>(code);
5804 ValueType ref_type = ValueType::FromRawBitField(ref_bitfield);
5805 push<WasmRef>(sp, code, wasm_runtime, ref);
5806 int32_t no_branch_offset = Read<int32_t>(code);
5807
5808 if (!DoRefCast(ref, ref_type, target_type, null_succeeds, wasm_runtime)) {
5809 // If condition is not true, jump to the 'false' branch.
5810 code += (no_branch_offset - kCodeOffsetSize);
5811 }
5812
5813 NextOp();
5814 }
5815
5816 /*
5817 * Notice that in s2s_BranchOnCastFail the branch happens when the condition
5818 * is false, not true, as follows:
5819 *
5820 * > s2s_BranchOnCastFail
5821 * i32: null_succeeds
5822 * i32: target_type HeapType representation
5823 * pop - ref
5824 * i32: ref value_tye
5825 * push - ref
5826 * branch_offset (if CAST SUCCEEDS) --+
5827 * > s2s_CopySlot |
5828 * .... |
5829 * > s2s_Branch (gets here if CAST FAILS) |
5830 * branch_offset |
5831 * > (next instruction) <-----------------+
5832 */
5834 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5835 int64_t r0, double fp0) {
5836 bool null_succeeds = Read<int32_t>(code);
5837
5838 HeapType target_type =
5839 HeapType::FromBits(static_cast<uint32_t>(Read<int32_t>(code)));
5840
5841 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
5842 const uint32_t ref_bitfield = Read<int32_t>(code);
5843 ValueType ref_type = ValueType::FromRawBitField(ref_bitfield);
5844 push<WasmRef>(sp, code, wasm_runtime, ref);
5845 int32_t branch_offset = Read<int32_t>(code);
5846
5847 if (DoRefCast(ref, ref_type, target_type, null_succeeds, wasm_runtime)) {
5848 // If condition is true, jump to the 'true' branch.
5849 code += (branch_offset - kCodeOffsetSize);
5850 }
5851
5852 NextOp();
5853 }
5854
5857 int64_t r0, double fp0) {
5858 WasmRef func_ref = pop<WasmRef>(sp, code, wasm_runtime);
5859 uint32_t sig_index = Read<int32_t>(code);
5860 uint32_t stack_pos = Read<int32_t>(code);
5861 slot_offset_t slot_offset = Read<slot_offset_t>(code);
5862 uint32_t ref_stack_fp_offset = Read<int32_t>(code);
5863 slot_offset_t return_slot_offset = 0;
5864#ifdef V8_ENABLE_DRUMBRAKE_TRACING
5865 if (v8_flags.trace_drumbrake_execution) {
5866 return_slot_offset = Read<slot_offset_t>(code);
5867 }
5868#endif // V8_ENABLE_DRUMBRAKE_TRACING
5869
5870 if (V8_UNLIKELY(wasm_runtime->IsRefNull(func_ref))) {
5871 TRAP(TrapReason::kTrapNullDereference)
5872 }
5873
5874 // This can trap.
5875 wasm_runtime->ExecuteCallRef(code, func_ref, sig_index, stack_pos, sp,
5876 ref_stack_fp_offset, slot_offset,
5877 return_slot_offset, false);
5878 NextOp();
5879 }
5880
5882 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
5883 int64_t r0, double fp0) {
5884 slot_offset_t rets_size = Read<slot_offset_t>(code);
5885 slot_offset_t args_size = Read<slot_offset_t>(code);
5886 uint32_t rets_refs = Read<int32_t>(code);
5887 uint32_t args_refs = Read<int32_t>(code);
5888
5889 WasmRef func_ref = pop<WasmRef>(sp, code, wasm_runtime);
5890 uint32_t sig_index = Read<int32_t>(code);
5891 uint32_t stack_pos = Read<int32_t>(code);
5892 slot_offset_t slot_offset = Read<slot_offset_t>(code);
5893 uint32_t ref_stack_fp_offset = Read<int32_t>(code);
5894 slot_offset_t return_slot_offset = 0;
5895#ifdef V8_ENABLE_DRUMBRAKE_TRACING
5896 if (v8_flags.trace_drumbrake_execution) {
5897 return_slot_offset = Read<slot_offset_t>(code);
5898 }
5899#endif // V8_ENABLE_DRUMBRAKE_TRACING
5900
5901 if (V8_UNLIKELY(wasm_runtime->IsRefNull(func_ref))) {
5902 TRAP(TrapReason::kTrapNullDereference)
5903 }
5904
5905 // Moves back the stack frame to the caller stack frame.
5906 wasm_runtime->UnwindCurrentStackFrame(sp, slot_offset, rets_size, args_size,
5907 rets_refs, args_refs,
5908 ref_stack_fp_offset);
5909
5910 // TODO(paolosev@microsoft.com) - This calls adds a new C++ stack frame,
5911 // which is not ideal in a tail-call.
5912 wasm_runtime->ExecuteCallRef(code, func_ref, sig_index, stack_pos, sp, 0, 0,
5913 return_slot_offset, true);
5914
5915 NextOp();
5916 }
5917
5919 uint32_t offset, Tagged<Object> value,
5920 WriteBarrierMode mode) {
5921 DCHECK_EQ(dst_addr, host.ptr() + offset - kHeapObjectTag);
5922
5923 // Only stores the lower 32-bit.
5925 dst_addr, V8HeapCompressionScheme::CompressObject(value.ptr()));
5926
5927 // Need to generate a GC write barrier.
5928 CONDITIONAL_WRITE_BARRIER(host, offset, value, mode);
5929 }
5930
5933 int64_t r0, double fp0) {
5934 uint32_t index = Read<int32_t>(code);
5935 std::pair<DirectHandle<WasmStruct>, const StructType*> struct_new_result =
5936 wasm_runtime->StructNewUninitialized(index);
5937 DirectHandle<HeapObject> struct_obj = struct_new_result.first;
5938 const StructType* struct_type = struct_new_result.second;
5939
5940 {
5941 // The new struct is uninitialized, which means GC might fail until
5942 // initialization.
5944
5945 for (uint32_t i = struct_type->field_count(); i > 0;) {
5946 i--;
5947 int field_offset = StructFieldOffset(struct_type, i);
5948 Address field_addr = (*struct_obj).ptr() + field_offset;
5949
5950 ValueKind kind = struct_type->field(i).kind();
5951 switch (kind) {
5952 case kI8:
5953 *reinterpret_cast<int8_t*>(field_addr) =
5955 break;
5956 case kI16:
5958 field_addr, pop<int32_t>(sp, code, wasm_runtime));
5959 break;
5960 case kI32:
5962 field_addr, pop<int32_t>(sp, code, wasm_runtime));
5963 break;
5964 case kI64:
5966 field_addr, pop<int64_t>(sp, code, wasm_runtime));
5967 break;
5968 case kF32:
5970 field_addr, pop<float>(sp, code, wasm_runtime));
5971 break;
5972 case kF64:
5974 field_addr, pop<double>(sp, code, wasm_runtime));
5975 break;
5976 case kS128:
5978 field_addr, pop<Simd128>(sp, code, wasm_runtime));
5979 break;
5980 case kRef:
5981 case kRefNull: {
5982 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
5984 *struct_obj, field_addr,
5985 field_offset + kHeapObjectTag, // field_offset is offset into
5986 // tagged object.
5987 *ref, SKIP_WRITE_BARRIER);
5988 break;
5989 }
5990 default:
5991 UNREACHABLE();
5992 }
5993 }
5994 }
5995
5996 push<WasmRef>(sp, code, wasm_runtime, struct_obj);
5997
5998 NextOp();
5999 }
6000
6002 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6003 int64_t r0, double fp0) {
6004 uint32_t index = Read<int32_t>(code);
6005 std::pair<DirectHandle<WasmStruct>, const StructType*> struct_new_result =
6006 wasm_runtime->StructNewUninitialized(index);
6007 DirectHandle<HeapObject> struct_obj = struct_new_result.first;
6008 const StructType* struct_type = struct_new_result.second;
6009
6010 {
6011 // The new struct is uninitialized, which means GC might fail until
6012 // initialization.
6014
6015 for (uint32_t i = struct_type->field_count(); i > 0;) {
6016 i--;
6017 int field_offset = StructFieldOffset(struct_type, i);
6018 Address field_addr = (*struct_obj).ptr() + field_offset;
6019
6020 const ValueType value_type = struct_type->field(i);
6021 const ValueKind kind = value_type.kind();
6022 switch (kind) {
6023 case kI8:
6024 *reinterpret_cast<int8_t*>(field_addr) = int8_t{};
6025 break;
6026 case kI16:
6027 base::WriteUnalignedValue<int16_t>(field_addr, int16_t{});
6028 break;
6029 case kI32:
6031 break;
6032 case kI64:
6033 base::WriteUnalignedValue<int64_t>(field_addr, int64_t{});
6034 break;
6035 case kF32:
6036 base::WriteUnalignedValue<float>(field_addr, float{});
6037 break;
6038 case kF64:
6039 base::WriteUnalignedValue<double>(field_addr, double{});
6040 break;
6041 case kS128:
6043 break;
6044 case kRef:
6045 case kRefNull:
6047 *struct_obj, field_addr,
6048 field_offset + kHeapObjectTag, // field_offset is offset into
6049 // tagged object.
6051 break;
6052 default:
6053 UNREACHABLE();
6054 }
6055 }
6056 }
6057
6058 push<WasmRef>(sp, code, wasm_runtime, struct_obj);
6059
6060 NextOp();
6061 }
6062
6063 template <typename T, typename U = T>
6066 int64_t r0, double fp0) {
6067 WasmRef struct_obj = pop<WasmRef>(sp, code, wasm_runtime);
6068
6069 if (V8_UNLIKELY(wasm_runtime->IsRefNull(struct_obj))) {
6070 TRAP(TrapReason::kTrapNullDereference)
6071 }
6072 int offset = Read<int32_t>(code);
6073 Address field_addr = (*struct_obj).ptr() + offset;
6075
6076 NextOp();
6077 }
6087
6089 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6090 int64_t r0, double fp0) {
6091 WasmRef struct_obj = pop<WasmRef>(sp, code, wasm_runtime);
6092 if (V8_UNLIKELY(wasm_runtime->IsRefNull(struct_obj))) {
6093 TRAP(TrapReason::kTrapNullDereference)
6094 }
6095 int offset = Read<int32_t>(code);
6096 Address field_addr = (*struct_obj).ptr() + offset;
6097 // DrumBrake expects pointer compression.
6098 Tagged_t ref_tagged = base::ReadUnalignedValue<uint32_t>(field_addr);
6099 Isolate* isolate = wasm_runtime->GetIsolate();
6100 Tagged<Object> ref_uncompressed(
6101 V8HeapCompressionScheme::DecompressTagged(isolate, ref_tagged));
6102 WasmRef ref_handle = handle(ref_uncompressed, isolate);
6103 push<WasmRef>(sp, code, wasm_runtime, ref_handle);
6104
6105 NextOp();
6106 }
6107
6108 template <typename T, typename U = T>
6111 int64_t r0, double fp0) {
6112 int offset = Read<int32_t>(code);
6113 T value = pop<T>(sp, code, wasm_runtime);
6114 WasmRef struct_obj = pop<WasmRef>(sp, code, wasm_runtime);
6115 if (V8_UNLIKELY(wasm_runtime->IsRefNull(struct_obj))) {
6116 TRAP(TrapReason::kTrapNullDereference)
6117 }
6118 Address field_addr = (*struct_obj).ptr() + offset;
6119 base::WriteUnalignedValue<U>(field_addr, value);
6120
6121 NextOp();
6122 }
6130
6132 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6133 int64_t r0, double fp0) {
6134 int field_offset = Read<int32_t>(code);
6135 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
6136 WasmRef struct_obj = pop<WasmRef>(sp, code, wasm_runtime);
6137 if (V8_UNLIKELY(wasm_runtime->IsRefNull(struct_obj))) {
6138 TRAP(TrapReason::kTrapNullDereference)
6139 }
6140 Address field_addr = (*struct_obj).ptr() + field_offset;
6142 Cast<HeapObject>(*struct_obj), field_addr,
6143 field_offset +
6144 kHeapObjectTag, // field_offset is offset into tagged object.
6145 *ref, UPDATE_WRITE_BARRIER);
6146
6147 NextOp();
6148 }
6149
6150 template <typename T, typename U = T>
6153 int64_t r0, double fp0) {
6154 const uint32_t array_index = Read<int32_t>(code);
6155 const uint32_t elem_count = pop<int32_t>(sp, code, wasm_runtime);
6156 const T value = pop<T>(sp, code, wasm_runtime);
6157
6158 std::pair<DirectHandle<WasmArray>, const ArrayType*> array_new_result =
6159 wasm_runtime->ArrayNewUninitialized(elem_count, array_index);
6160 DirectHandle<WasmArray> array = array_new_result.first;
6161 if (V8_UNLIKELY(array.is_null())) {
6162 TRAP(TrapReason::kTrapArrayTooLarge)
6163 }
6164
6165 {
6166 // The new array is uninitialized, which means GC might fail until
6167 // initialization.
6169
6170 const ArrayType* array_type = array_new_result.second;
6171 const ValueKind kind = array_type->element_type().kind();
6172 const uint32_t element_size = value_kind_size(kind);
6173 DCHECK_EQ(element_size, sizeof(U));
6174
6175 Address element_addr = array->ElementAddress(0);
6176 for (uint32_t i = 0; i < elem_count; i++) {
6177 base::WriteUnalignedValue<U>(element_addr, value);
6178 element_addr += element_size;
6179 }
6180 }
6181
6182 push<WasmRef>(sp, code, wasm_runtime, array);
6183
6184 NextOp();
6185 }
6190 static auto constexpr s2s_F32ArrayNew = s2s_ArrayNew<float>;
6191 static auto constexpr s2s_F64ArrayNew = s2s_ArrayNew<double>;
6193
6196 int64_t r0, double fp0) {
6197 const uint32_t array_index = Read<int32_t>(code);
6198 const uint32_t elem_count = pop<int32_t>(sp, code, wasm_runtime);
6199 const WasmRef value = pop<WasmRef>(sp, code, wasm_runtime);
6200
6201 std::pair<DirectHandle<WasmArray>, const ArrayType*> array_new_result =
6202 wasm_runtime->ArrayNewUninitialized(elem_count, array_index);
6203 DirectHandle<WasmArray> array = array_new_result.first;
6204 if (V8_UNLIKELY(array.is_null())) {
6205 TRAP(TrapReason::kTrapArrayTooLarge)
6206 }
6207
6208#if DEBUG
6209 const ArrayType* array_type = array_new_result.second;
6210 DCHECK_EQ(value_kind_size(array_type->element_type().kind()),
6211 sizeof(Tagged_t));
6212#endif
6213
6214 {
6215 // The new array is uninitialized, which means GC might fail until
6216 // initialization.
6218
6219 Address element_addr = array->ElementAddress(0);
6220 uint32_t element_offset = array->element_offset(0);
6221 for (uint32_t i = 0; i < elem_count; i++) {
6222 StoreRefIntoMemory(Cast<HeapObject>(*array), element_addr,
6223 element_offset, *value, SKIP_WRITE_BARRIER);
6224 element_addr += sizeof(Tagged_t);
6225 element_offset += sizeof(Tagged_t);
6226 }
6227 }
6228
6229 push<WasmRef>(sp, code, wasm_runtime, array);
6230
6231 NextOp();
6232 }
6233
6235 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6236 int64_t r0, double fp0) {
6237 const uint32_t array_index = Read<int32_t>(code);
6238 const uint32_t elem_count = Read<int32_t>(code);
6239
6240 std::pair<DirectHandle<WasmArray>, const ArrayType*> array_new_result =
6241 wasm_runtime->ArrayNewUninitialized(elem_count, array_index);
6242 DirectHandle<WasmArray> array = array_new_result.first;
6243 if (V8_UNLIKELY(array.is_null())) {
6244 TRAP(TrapReason::kTrapArrayTooLarge)
6245 }
6246
6247 {
6248 // The new array is uninitialized, which means GC might fail until
6249 // initialization.
6251
6252 if (elem_count > 0) {
6253 const ArrayType* array_type = array_new_result.second;
6254 const ValueKind kind = array_type->element_type().kind();
6255 const uint32_t element_size = value_kind_size(kind);
6256
6257 Address element_addr = array->ElementAddress(elem_count - 1);
6258 uint32_t element_offset = array->element_offset(elem_count - 1);
6259 for (uint32_t i = 0; i < elem_count; i++) {
6260 switch (kind) {
6261 case kI8:
6262 *reinterpret_cast<int8_t*>(element_addr) =
6264 break;
6265 case kI16:
6267 element_addr, pop<int32_t>(sp, code, wasm_runtime));
6268 break;
6269 case kI32:
6271 element_addr, pop<int32_t>(sp, code, wasm_runtime));
6272 break;
6273 case kI64:
6275 element_addr, pop<int64_t>(sp, code, wasm_runtime));
6276 break;
6277 case kF32:
6279 element_addr, pop<float>(sp, code, wasm_runtime));
6280 break;
6281 case kF64:
6283 element_addr, pop<double>(sp, code, wasm_runtime));
6284 break;
6285 case kS128:
6287 element_addr, pop<Simd128>(sp, code, wasm_runtime));
6288 break;
6289 case kRef:
6290 case kRefNull: {
6291 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
6292 StoreRefIntoMemory(Cast<HeapObject>(*array), element_addr,
6293 element_offset, *ref, SKIP_WRITE_BARRIER);
6294 break;
6295 }
6296 default:
6297 UNREACHABLE();
6298 }
6299 element_addr -= element_size;
6300 element_offset -= element_size;
6301 }
6302 }
6303 }
6304
6305 push<WasmRef>(sp, code, wasm_runtime, array);
6306
6307 NextOp();
6308 }
6309
6311 s2s_ArrayNewDefault(const uint8_t* code, uint32_t* sp,
6313 double fp0) {
6314 const uint32_t array_index = Read<int32_t>(code);
6315 const uint32_t elem_count = pop<int32_t>(sp, code, wasm_runtime);
6316
6317 std::pair<DirectHandle<WasmArray>, const ArrayType*> array_new_result =
6318 wasm_runtime->ArrayNewUninitialized(elem_count, array_index);
6319 DirectHandle<WasmArray> array = array_new_result.first;
6320 if (V8_UNLIKELY(array.is_null())) {
6321 TRAP(TrapReason::kTrapArrayTooLarge)
6322 }
6323
6324 {
6325 // The new array is uninitialized, which means GC might fail until
6326 // initialization.
6328
6329 const ArrayType* array_type = array_new_result.second;
6330 const ValueType element_type = array_type->element_type();
6331 const ValueKind kind = element_type.kind();
6332 const uint32_t element_size = value_kind_size(kind);
6333
6334 Address element_addr = array->ElementAddress(0);
6335 uint32_t element_offset = array->element_offset(0);
6336 for (uint32_t i = 0; i < elem_count; i++) {
6337 switch (kind) {
6338 case kI8:
6339 *reinterpret_cast<int8_t*>(element_addr) = int8_t{};
6340 break;
6341 case kI16:
6342 base::WriteUnalignedValue<int16_t>(element_addr, int16_t{});
6343 break;
6344 case kI32:
6346 break;
6347 case kI64:
6348 base::WriteUnalignedValue<int64_t>(element_addr, int64_t{});
6349 break;
6350 case kF32:
6351 base::WriteUnalignedValue<float>(element_addr, float{});
6352 break;
6353 case kF64:
6354 base::WriteUnalignedValue<double>(element_addr, double{});
6355 break;
6356 case kS128:
6358 break;
6359 case kRef:
6360 case kRefNull:
6362 Cast<HeapObject>(*array), element_addr, element_offset,
6363 wasm_runtime->GetNullValue(element_type), SKIP_WRITE_BARRIER);
6364 break;
6365 default:
6366 UNREACHABLE();
6367 }
6368 element_addr += element_size;
6369 element_offset += element_size;
6370 }
6371 }
6372
6373 push<WasmRef>(sp, code, wasm_runtime, array);
6374
6375 NextOp();
6376 }
6377
6378 template <TrapReason OutOfBoundsError>
6380 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6381 int64_t r0, double fp0) {
6382 const uint32_t array_index = Read<int32_t>(code);
6383 // TODO(paolosev@microsoft.com): already validated?
6384 if (V8_UNLIKELY(!Smi::IsValid(array_index))) {
6385 TRAP(TrapReason::kTrapArrayOutOfBounds)
6386 }
6387
6388 const uint32_t data_index = Read<int32_t>(code);
6389 // TODO(paolosev@microsoft.com): already validated?
6390 if (V8_UNLIKELY(!Smi::IsValid(data_index))) {
6391 TRAP(OutOfBoundsError)
6392 }
6393
6394 uint32_t length = pop<int32_t>(sp, code, wasm_runtime);
6395 uint32_t offset = pop<int32_t>(sp, code, wasm_runtime);
6397 TRAP(OutOfBoundsError)
6398 }
6399 if (V8_UNLIKELY(length >= static_cast<uint32_t>(WasmArray::MaxLength(
6400 wasm_runtime->GetArrayType(array_index))))) {
6401 TRAP(TrapReason::kTrapArrayTooLarge)
6402 }
6403
6404 WasmRef result = wasm_runtime->WasmArrayNewSegment(array_index, data_index,
6405 offset, length);
6406 if (V8_UNLIKELY(result.is_null())) {
6408 wasm_runtime->GetIsolate());
6409 INLINED_TRAP(reason)
6410 }
6412
6413 NextOp();
6414 }
6415 // The instructions array.new_data and array.new_elem have the same
6416 // implementation after validation. The only difference is that
6417 // array.init_elem is used with arrays that contain elements of reference
6418 // types, and array.init_data with arrays that contain elements of numeric
6419 // types.
6420 static auto constexpr s2s_ArrayNewData =
6422 static auto constexpr s2s_ArrayNewElem =
6424
6425 template <bool init_data>
6427 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6428 int64_t r0, double fp0) {
6429 const uint32_t array_index = Read<int32_t>(code);
6430 // TODO(paolosev@microsoft.com): already validated?
6431 if (V8_UNLIKELY(!Smi::IsValid(array_index))) {
6432 TRAP(TrapReason::kTrapArrayOutOfBounds)
6433 }
6434
6435 const uint32_t data_index = Read<int32_t>(code);
6436 // TODO(paolosev@microsoft.com): already validated?
6437 if (V8_UNLIKELY(!Smi::IsValid(data_index))) {
6438 TRAP(TrapReason::kTrapElementSegmentOutOfBounds)
6439 }
6440
6441 uint32_t size = pop<int32_t>(sp, code, wasm_runtime);
6442 uint32_t src_offset = pop<int32_t>(sp, code, wasm_runtime);
6443 uint32_t dest_offset = pop<int32_t>(sp, code, wasm_runtime);
6444 if (V8_UNLIKELY(!Smi::IsValid(size)) || !Smi::IsValid(dest_offset)) {
6445 TRAP(TrapReason::kTrapArrayOutOfBounds)
6446 }
6447 if (V8_UNLIKELY(!Smi::IsValid(src_offset))) {
6448 TrapReason reason = init_data
6449 ? TrapReason::kTrapDataSegmentOutOfBounds
6450 : TrapReason::kTrapElementSegmentOutOfBounds;
6451 INLINED_TRAP(reason);
6452 }
6453
6454 WasmRef array = pop<WasmRef>(sp, code, wasm_runtime);
6455 if (V8_UNLIKELY(wasm_runtime->IsRefNull(array))) {
6456 TRAP(TrapReason::kTrapNullDereference)
6457 }
6458
6459 bool ok = wasm_runtime->WasmArrayInitSegment(data_index, array, dest_offset,
6460 src_offset, size);
6461 if (V8_UNLIKELY(!ok)) {
6463 wasm_runtime->GetIsolate());
6464 INLINED_TRAP(reason)
6465 }
6466
6467 NextOp();
6468 }
6469 // The instructions array.init_data and array.init_elem have the same
6470 // implementation after validation. The only difference is that
6471 // array.init_elem is used with arrays that contain elements of reference
6472 // types, and array.init_data with arrays that contain elements of numeric
6473 // types.
6476
6479 int64_t r0, double fp0) {
6480 WasmRef array_obj = pop<WasmRef>(sp, code, wasm_runtime);
6481 if (V8_UNLIKELY(wasm_runtime->IsRefNull(array_obj))) {
6482 TRAP(TrapReason::kTrapNullDereference)
6483 }
6484 DCHECK(IsWasmArray(*array_obj));
6485
6486 Tagged<WasmArray> array = Cast<WasmArray>(*array_obj);
6487 push<int32_t>(sp, code, wasm_runtime, array->length());
6488
6489 NextOp();
6490 }
6491
6494 int64_t r0, double fp0) {
6495 const uint32_t dest_array_index = Read<int32_t>(code);
6496 const uint32_t src_array_index = Read<int32_t>(code);
6497 // TODO(paolosev@microsoft.com): already validated?
6498 if (V8_UNLIKELY(!Smi::IsValid(dest_array_index) ||
6499 !Smi::IsValid(src_array_index))) {
6500 TRAP(TrapReason::kTrapArrayOutOfBounds)
6501 }
6502
6503 uint32_t size = pop<int32_t>(sp, code, wasm_runtime);
6504 uint32_t src_offset = pop<int32_t>(sp, code, wasm_runtime);
6505 WasmRef src_array = pop<WasmRef>(sp, code, wasm_runtime);
6506 uint32_t dest_offset = pop<int32_t>(sp, code, wasm_runtime);
6507 WasmRef dest_array = pop<WasmRef>(sp, code, wasm_runtime);
6508
6509 if (V8_UNLIKELY(!Smi::IsValid(size) || !Smi::IsValid(src_offset) ||
6510 !Smi::IsValid(dest_offset))) {
6511 TRAP(TrapReason::kTrapArrayOutOfBounds)
6512 } else if (V8_UNLIKELY(wasm_runtime->IsRefNull(dest_array))) {
6513 TRAP(TrapReason::kTrapNullDereference)
6514 } else if (V8_UNLIKELY(dest_offset + size >
6515 Cast<WasmArray>(*dest_array)->length())) {
6516 TRAP(TrapReason::kTrapArrayOutOfBounds)
6517 } else if (V8_UNLIKELY(wasm_runtime->IsRefNull(src_array))) {
6518 TRAP(TrapReason::kTrapNullDereference)
6519 } else if (V8_UNLIKELY(src_offset + size >
6520 Cast<WasmArray>(*src_array)->length())) {
6521 TRAP(TrapReason::kTrapArrayOutOfBounds)
6522 }
6523
6524 bool ok = true;
6525 if (size > 0) {
6526 ok = wasm_runtime->WasmArrayCopy(dest_array, dest_offset, src_array,
6527 src_offset, size);
6528 }
6529
6530 if (V8_UNLIKELY(!ok)) {
6532 wasm_runtime->GetIsolate());
6533 INLINED_TRAP(reason)
6534 }
6535
6536 NextOp();
6537 }
6538
6539 template <typename T, typename U = T>
6542 int64_t r0, double fp0) {
6543 uint32_t index = pop<uint32_t>(sp, code, wasm_runtime);
6544 WasmRef array_obj = pop<WasmRef>(sp, code, wasm_runtime);
6545 if (V8_UNLIKELY(wasm_runtime->IsRefNull(array_obj))) {
6546 TRAP(TrapReason::kTrapNullDereference)
6547 }
6548 DCHECK(IsWasmArray(*array_obj));
6549
6550 Tagged<WasmArray> array = Cast<WasmArray>(*array_obj);
6551 if (V8_UNLIKELY(index >= array->length())) {
6552 TRAP(TrapReason::kTrapArrayOutOfBounds)
6553 }
6554
6555 Address element_addr = array->ElementAddress(index);
6556 push<T>(sp, code, wasm_runtime, base::ReadUnalignedValue<U>(element_addr));
6557
6558 NextOp();
6559 }
6566 static auto constexpr s2s_F32ArrayGet = s2s_ArrayGet<float>;
6567 static auto constexpr s2s_F64ArrayGet = s2s_ArrayGet<double>;
6569
6572 int64_t r0, double fp0) {
6573 uint32_t index = pop<uint32_t>(sp, code, wasm_runtime);
6574 WasmRef array_obj = pop<WasmRef>(sp, code, wasm_runtime);
6575 if (V8_UNLIKELY(wasm_runtime->IsRefNull(array_obj))) {
6576 TRAP(TrapReason::kTrapNullDereference)
6577 }
6578 DCHECK(IsWasmArray(*array_obj));
6579
6580 Tagged<WasmArray> array = Cast<WasmArray>(*array_obj);
6581 if (V8_UNLIKELY(index >= array->length())) {
6582 TRAP(TrapReason::kTrapArrayOutOfBounds)
6583 }
6584
6585 WasmRef element =
6586 Handle<Object>(*wasm_runtime->GetWasmArrayRefElement(array, index),
6587 wasm_runtime->GetIsolate());
6588 push<WasmRef>(sp, code, wasm_runtime, element);
6589
6590 NextOp();
6591 }
6592
6593 template <typename T, typename U = T>
6596 int64_t r0, double fp0) {
6597 const T value = pop<T>(sp, code, wasm_runtime);
6598 const uint32_t index = pop<uint32_t>(sp, code, wasm_runtime);
6599 WasmRef array_obj = pop<WasmRef>(sp, code, wasm_runtime);
6600 if (V8_UNLIKELY(wasm_runtime->IsRefNull(array_obj))) {
6601 TRAP(TrapReason::kTrapNullDereference)
6602 }
6603 DCHECK(IsWasmArray(*array_obj));
6604
6605 Tagged<WasmArray> array = Cast<WasmArray>(*array_obj);
6606 if (V8_UNLIKELY(index >= array->length())) {
6607 TRAP(TrapReason::kTrapArrayOutOfBounds)
6608 }
6609
6610 Address element_addr = array->ElementAddress(index);
6611 base::WriteUnalignedValue<U>(element_addr, value);
6612
6613 NextOp();
6614 }
6619 static auto constexpr s2s_F32ArraySet = s2s_ArraySet<float>;
6620 static auto constexpr s2s_F64ArraySet = s2s_ArraySet<double>;
6622
6625 int64_t r0, double fp0) {
6626 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
6627 const uint32_t index = pop<uint32_t>(sp, code, wasm_runtime);
6628 WasmRef array_obj = pop<WasmRef>(sp, code, wasm_runtime);
6629 if (V8_UNLIKELY(wasm_runtime->IsRefNull(array_obj))) {
6630 TRAP(TrapReason::kTrapNullDereference)
6631 }
6632 DCHECK(IsWasmArray(*array_obj));
6633
6634 Tagged<WasmArray> array = Cast<WasmArray>(*array_obj);
6635 if (V8_UNLIKELY(index >= array->length())) {
6636 TRAP(TrapReason::kTrapArrayOutOfBounds)
6637 }
6638
6639 Address element_addr = array->ElementAddress(index);
6640 uint32_t element_offset = array->element_offset(index);
6641 StoreRefIntoMemory(array, element_addr, element_offset, *ref,
6643
6644 NextOp();
6645 }
6646
6647 template <typename T, typename U = T>
6650 int64_t r0, double fp0) {
6651 uint32_t size = pop<uint32_t>(sp, code, wasm_runtime);
6652 T value = pop<U>(sp, code, wasm_runtime);
6653 uint32_t offset = pop<uint32_t>(sp, code, wasm_runtime);
6654
6655 WasmRef array_obj = pop<WasmRef>(sp, code, wasm_runtime);
6656 if (V8_UNLIKELY(wasm_runtime->IsRefNull(array_obj))) {
6657 TRAP(TrapReason::kTrapNullDereference)
6658 }
6659 DCHECK(IsWasmArray(*array_obj));
6660
6661 Tagged<WasmArray> array = Cast<WasmArray>(*array_obj);
6662 if (V8_UNLIKELY(static_cast<uint64_t>(offset) + size > array->length())) {
6663 TRAP(TrapReason::kTrapArrayOutOfBounds)
6664 }
6665
6666 Address element_addr = array->ElementAddress(offset);
6667 for (uint32_t i = 0; i < size; i++) {
6668 base::WriteUnalignedValue<T>(element_addr, value);
6669 element_addr += sizeof(T);
6670 }
6671
6672 NextOp();
6673 }
6681
6683 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6684 int64_t r0, double fp0) {
6685 // DrumBrake currently only works with pointer compression.
6686 static_assert(COMPRESS_POINTERS_BOOL);
6687
6688 uint32_t size = pop<uint32_t>(sp, code, wasm_runtime);
6689 WasmRef value = pop<WasmRef>(sp, code, wasm_runtime);
6690 Tagged<Object> tagged_value = *value;
6691 uint32_t offset = pop<uint32_t>(sp, code, wasm_runtime);
6692
6693 WasmRef array_obj = pop<WasmRef>(sp, code, wasm_runtime);
6694 if (V8_UNLIKELY(wasm_runtime->IsRefNull(array_obj))) {
6695 TRAP(TrapReason::kTrapNullDereference)
6696 }
6697 DCHECK(IsWasmArray(*array_obj));
6698
6699 Tagged<WasmArray> array = Cast<WasmArray>(*array_obj);
6700 if (V8_UNLIKELY(static_cast<uint64_t>(offset) + size > array->length())) {
6701 TRAP(TrapReason::kTrapArrayOutOfBounds)
6702 }
6703
6704 Address element_addr = array->ElementAddress(offset);
6705 uint32_t element_offset = array->element_offset(offset);
6706 for (uint32_t i = 0; i < size; i++) {
6707 StoreRefIntoMemory(array, element_addr, element_offset, tagged_value,
6709 element_addr += kTaggedSize;
6710 element_offset += kTaggedSize;
6711 }
6712
6713 NextOp();
6714 }
6715
6718 int64_t r0, double fp0) {
6719 uint32_t value = pop<int32_t>(sp, code, wasm_runtime);
6720
6721 // Truncate high bit.
6722 Tagged<Smi> smi(Internals::IntToSmi(value & 0x7fffffff));
6724 handle(smi, wasm_runtime->GetIsolate()));
6725
6726 NextOp();
6727 }
6728
6731 int64_t r0, double fp0) {
6732 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
6733 if (V8_UNLIKELY(wasm_runtime->IsRefNull(ref))) {
6734 TRAP(TrapReason::kTrapNullDereference)
6735 }
6736 DCHECK(IsSmi(*ref));
6737 push<int32_t>(sp, code, wasm_runtime, i::Smi::ToInt(*ref));
6738
6739 NextOp();
6740 }
6741
6744 int64_t r0, double fp0) {
6745 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
6746 if (V8_UNLIKELY(wasm_runtime->IsRefNull(ref))) {
6747 TRAP(TrapReason::kTrapNullDereference)
6748 }
6749 DCHECK(IsSmi(*ref));
6751 0x7fffffff & static_cast<uint32_t>(i::Smi::ToInt(*ref)));
6752
6753 NextOp();
6754 }
6755
6756 template <bool null_succeeds>
6759 int64_t r0, double fp0) {
6760 HeapType target_type =
6761 HeapType::FromBits(static_cast<uint32_t>(Read<int32_t>(code)));
6762
6763 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
6764
6765 const uint32_t ref_bitfield = Read<int32_t>(code);
6766 ValueType ref_type = ValueType::FromRawBitField(ref_bitfield);
6767
6768 if (!DoRefCast(ref, ref_type, target_type, null_succeeds, wasm_runtime)) {
6769 TRAP(TrapReason::kTrapIllegalCast)
6770 }
6771
6772 push<WasmRef>(sp, code, wasm_runtime, ref);
6773
6774 NextOp();
6775 }
6776 static auto constexpr s2s_RefCast = RefCast<false>;
6777 static auto constexpr s2s_RefCastNull = RefCast<true>;
6778
6779 template <bool null_succeeds>
6782 int64_t r0, double fp0) {
6783 HeapType target_type =
6784 HeapType::FromBits(static_cast<uint32_t>(Read<int32_t>(code)));
6785
6786 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
6787
6788 const uint32_t ref_bitfield = Read<int32_t>(code);
6789 ValueType ref_type = ValueType::FromRawBitField(ref_bitfield);
6790
6791 bool cast_succeeds =
6792 DoRefCast(ref, ref_type, target_type, null_succeeds, wasm_runtime);
6793 push<int32_t>(sp, code, wasm_runtime, cast_succeeds ? 1 : 0);
6794
6795 NextOp();
6796 }
6797 static auto constexpr s2s_RefTest = RefTest<false>;
6798 static auto constexpr s2s_RefTestNull = RefTest<true>;
6799
6801 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6802 int64_t r0, double fp0) {
6803 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
6804
6805 const uint32_t ref_bitfield = Read<int32_t>(code);
6806 ValueType ref_type = ValueType::FromRawBitField(ref_bitfield);
6807 if (!wasm_runtime->IsNullTypecheck(ref, ref_type)) {
6808 TRAP(TrapReason::kTrapIllegalCast)
6809 }
6810 push<WasmRef>(sp, code, wasm_runtime, ref);
6811
6812 NextOp();
6813 }
6814
6816 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6817 int64_t r0, double fp0) {
6818 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
6819
6820 const uint32_t ref_bitfield = Read<int32_t>(code);
6821 ValueType ref_type = ValueType::FromRawBitField(ref_bitfield);
6822 if (wasm_runtime->IsNullTypecheck(ref, ref_type)) {
6823 TRAP(TrapReason::kTrapIllegalCast)
6824 }
6825 push<WasmRef>(sp, code, wasm_runtime, ref);
6826
6827 NextOp();
6828 }
6829
6831 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6832 int64_t r0, double fp0){TRAP(TrapReason::kTrapIllegalCast)}
6833
6835 s2s_RefTestSucceeds(const uint8_t* code, uint32_t* sp,
6837 double fp0) {
6839 push<int32_t>(sp, code, wasm_runtime, 1); // true
6840
6841 NextOp();
6842 }
6843
6845 s2s_RefTestFails(const uint8_t* code, uint32_t* sp,
6847 double fp0) {
6849 push<int32_t>(sp, code, wasm_runtime, 0); // false
6850
6851 NextOp();
6852 }
6853
6855 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6856 int64_t r0, double fp0) {
6857 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
6858 push<int32_t>(sp, code, wasm_runtime, wasm_runtime->IsRefNull(ref) ? 0 : 1);
6859
6860 NextOp();
6861 }
6862
6864 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6865 int64_t r0, double fp0) {
6866 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
6867 if (V8_UNLIKELY(wasm_runtime->IsRefNull(ref))) {
6868 TRAP(TrapReason::kTrapNullDereference)
6869 }
6870 push<WasmRef>(sp, code, wasm_runtime, ref);
6871
6872 NextOp();
6873 }
6874
6876 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6877 int64_t r0, double fp0) {
6878 WasmRef extern_ref = pop<WasmRef>(sp, code, wasm_runtime);
6879 // Pass 0 as canonical type index; see implementation of builtin
6880 // WasmAnyConvertExtern.
6881 WasmRef result = wasm_runtime->WasmJSToWasmObject(
6882 extern_ref, kWasmAnyRef, 0 /* canonical type index */);
6883 if (V8_UNLIKELY(result.is_null())) {
6885 wasm_runtime->GetIsolate());
6886 INLINED_TRAP(reason)
6887 }
6889
6890 NextOp();
6891 }
6892
6894 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6895 int64_t r0, double fp0) {
6896 WasmRef ref = pop<WasmRef>(sp, code, wasm_runtime);
6897
6898 if (wasm_runtime->IsNullTypecheck(ref, kWasmAnyRef)) {
6899 ref = handle(wasm_runtime->GetNullValue(kWasmExternRef),
6900 wasm_runtime->GetIsolate());
6901 }
6902 push<WasmRef>(sp, code, wasm_runtime, ref);
6903
6904 NextOp();
6905 }
6906
6907#ifdef V8_ENABLE_DRUMBRAKE_TRACING
6908
6909 INSTRUCTION_HANDLER_FUNC s2s_TraceInstruction(
6910 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6911 int64_t r0, double fp0) {
6912 uint32_t pc = Read<int32_t>(code);
6913 uint32_t opcode = Read<int32_t>(code);
6914 uint32_t reg_mode = Read<int32_t>(code);
6915
6916 if (v8_flags.trace_drumbrake_execution) {
6917 wasm_runtime->Trace(
6918 "@%-3u: %-24s: ", pc,
6919 wasm::WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(opcode)));
6920 wasm_runtime->PrintStack(sp, static_cast<RegMode>(reg_mode), r0, fp0);
6921 }
6922
6923 NextOp();
6924 }
6925
6926 INSTRUCTION_HANDLER_FUNC trace_UpdateStack(
6927 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6928 int64_t r0, double fp0) {
6929 uint32_t stack_index = Read<int32_t>(code);
6930 slot_offset_t slot_offset = Read<slot_offset_t>(code);
6931 wasm_runtime->TraceUpdate(stack_index, slot_offset);
6932
6933 NextOp();
6934 }
6935
6936 template <typename T>
6937 INSTRUCTION_HANDLER_FUNC trace_PushConstSlot(
6938 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6939 int64_t r0, double fp0) {
6940 slot_offset_t slot_offset = Read<slot_offset_t>(code);
6941 wasm_runtime->TracePush<T>(slot_offset);
6942
6943 NextOp();
6944 }
6945 static auto constexpr trace_PushConstI32Slot = trace_PushConstSlot<int32_t>;
6946 static auto constexpr trace_PushConstI64Slot = trace_PushConstSlot<int64_t>;
6947 static auto constexpr trace_PushConstF32Slot = trace_PushConstSlot<float>;
6948 static auto constexpr trace_PushConstF64Slot = trace_PushConstSlot<double>;
6949 static auto constexpr trace_PushConstS128Slot = trace_PushConstSlot<Simd128>;
6950 static auto constexpr trace_PushConstRefSlot = trace_PushConstSlot<WasmRef>;
6951
6952 INSTRUCTION_HANDLER_FUNC trace_PushCopySlot(
6953 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6954 int64_t r0, double fp0) {
6955 uint32_t stack_index = Read<int32_t>(code);
6956
6957 wasm_runtime->TracePushCopy(stack_index);
6958
6959 NextOp();
6960 }
6961
6962 INSTRUCTION_HANDLER_FUNC trace_PopSlot(const uint8_t* code, uint32_t* sp,
6963 WasmInterpreterRuntime* wasm_runtime,
6964 int64_t r0, double fp0) {
6965 wasm_runtime->TracePop();
6966
6967 NextOp();
6968 }
6969
6970 INSTRUCTION_HANDLER_FUNC trace_SetSlotType(
6971 const uint8_t* code, uint32_t* sp, WasmInterpreterRuntime* wasm_runtime,
6972 int64_t r0, double fp0) {
6973 uint32_t stack_index = Read<int32_t>(code);
6974 uint32_t type = Read<int32_t>(code);
6975 wasm_runtime->TraceSetSlotType(stack_index, type);
6976
6977 NextOp();
6978 }
6979
6980#endif // V8_ENABLE_DRUMBRAKE_TRACING
6981}; // class Handlers<Compressed>
6982
6983#ifdef V8_ENABLE_DRUMBRAKE_TRACING
6984
6985void WasmBytecodeGenerator::TracePushConstSlot(uint32_t slot_index) {
6986 if (v8_flags.trace_drumbrake_execution) {
6988 switch (slots_[slot_index].kind()) {
6989 case kI32:
6990 EMIT_INSTR_HANDLER(trace_PushConstI32Slot);
6991 break;
6992 case kI64:
6993 EMIT_INSTR_HANDLER(trace_PushConstI64Slot);
6994 break;
6995 case kF32:
6996 EMIT_INSTR_HANDLER(trace_PushConstF32Slot);
6997 break;
6998 case kF64:
6999 EMIT_INSTR_HANDLER(trace_PushConstF64Slot);
7000 break;
7001 case kS128:
7002 EMIT_INSTR_HANDLER(trace_PushConstS128Slot);
7003 break;
7004 case kRef:
7005 case kRefNull:
7006 EMIT_INSTR_HANDLER(trace_PushConstRefSlot);
7007 break;
7008 default:
7009 UNREACHABLE();
7010 }
7011 EmitSlotOffset(slots_[slot_index].slot_offset * kSlotSize);
7012 }
7014 }
7015}
7016
7017void WasmBytecodeGenerator::TracePushCopySlot(uint32_t from_stack_index) {
7018 if (v8_flags.trace_drumbrake_execution) {
7019 START_EMIT_INSTR_HANDLER_WITH_ID(trace_PushCopySlot) {
7020 EmitI32Const(from_stack_index);
7021 }
7023 }
7024}
7025
7026void WasmBytecodeGenerator::TraceSetSlotType(uint32_t stack_index,
7027 ValueType type) {
7028 if (v8_flags.trace_drumbrake_execution) {
7029 START_EMIT_INSTR_HANDLER_WITH_ID(trace_SetSlotType) {
7030 EmitI32Const(stack_index);
7031 EmitRefValueType(type.raw_bit_field());
7032 }
7034 }
7035}
7036
7037void ShadowStack::Print(WasmInterpreterRuntime* wasm_runtime,
7038 const uint32_t* sp, size_t start_params,
7039 size_t start_locals, size_t start_stack,
7040 RegMode reg_mode, int64_t r0, double fp0) const {
7041 for (size_t i = 0; i < stack_.size(); i++) {
7042 char slot_kind = i < start_locals - start_params ? 'p'
7043 : i < start_stack - start_params ? 'l'
7044 : 's';
7045 const uint8_t* addr =
7046 reinterpret_cast<const uint8_t*>(sp) + stack_[i].slot_offset_;
7047 stack_[i].Print(wasm_runtime, start_params + i, slot_kind, addr);
7048 }
7049
7050 switch (reg_mode) {
7051 case RegMode::kI32Reg:
7052 ShadowStack::Slot::Print(wasm_runtime, kWasmI32,
7053 start_params + stack_.size(), 'R',
7054 reinterpret_cast<const uint8_t*>(&r0));
7055 break;
7056 case RegMode::kI64Reg:
7057 ShadowStack::Slot::Print(wasm_runtime, kWasmI64,
7058 start_params + stack_.size(), 'R',
7059 reinterpret_cast<const uint8_t*>(&r0));
7060 break;
7061 case RegMode::kF32Reg: {
7062 float f = static_cast<float>(fp0);
7063 ShadowStack::Slot::Print(wasm_runtime, kWasmF32,
7064 start_params + stack_.size(), 'R',
7065 reinterpret_cast<const uint8_t*>(&f));
7066 } break;
7067 case RegMode::kF64Reg:
7068 ShadowStack::Slot::Print(wasm_runtime, kWasmF64,
7069 start_params + stack_.size(), 'R',
7070 reinterpret_cast<const uint8_t*>(&fp0));
7071 break;
7072 default:
7073 break;
7074 }
7075
7076 wasm_runtime->Trace("\n");
7077}
7078
7079// static
7080void ShadowStack::Slot::Print(WasmInterpreterRuntime* wasm_runtime,
7081 ValueType type, size_t index, char kind,
7082 const uint8_t* addr) {
7083 switch (type.kind()) {
7084 case kI32:
7085 wasm_runtime->Trace(
7086 "%c%zu:i32:%d ", kind, index,
7087 base::ReadUnalignedValue<int32_t>(reinterpret_cast<Address>(addr)));
7088 break;
7089 case kI64:
7090 wasm_runtime->Trace(
7091 "%c%zu:i64:%" PRId64, kind, index,
7092 base::ReadUnalignedValue<int64_t>(reinterpret_cast<Address>(addr)));
7093 break;
7094 case kF32: {
7095 float f =
7096 base::ReadUnalignedValue<float>(reinterpret_cast<Address>(addr));
7097 wasm_runtime->Trace("%c%zu:f32:%f ", kind, index, static_cast<double>(f));
7098 } break;
7099 case kF64:
7100 wasm_runtime->Trace(
7101 "%c%zu:f64:%f ", kind, index,
7102 base::ReadUnalignedValue<double>(reinterpret_cast<Address>(addr)));
7103 break;
7104 case kS128: {
7105 // This defaults to tracing all S128 values as i32x4 values for now,
7106 // when there is more state to know what type of values are on the
7107 // stack, the right format should be printed here.
7108 int32x4 s;
7109 s.val[0] =
7110 base::ReadUnalignedValue<uint32_t>(reinterpret_cast<Address>(addr));
7112 reinterpret_cast<Address>(addr + 4));
7114 reinterpret_cast<Address>(addr + 8));
7116 reinterpret_cast<Address>(addr + 12));
7117 wasm_runtime->Trace("%c%zu:s128:%08x,%08x,%08x,%08x ", kind, index,
7118 s.val[0], s.val[1], s.val[2], s.val[3]);
7119 break;
7120 }
7121 case kRef:
7122 case kRefNull:
7123 DCHECK_EQ(sizeof(uint64_t), sizeof(WasmRef));
7124 // TODO(paolosev@microsoft.com): Extract actual ref value from the
7125 // thread's reference_stack_.
7126 wasm_runtime->Trace(
7127 "%c%zu:ref:%" PRIx64, kind, index,
7128 base::ReadUnalignedValue<uint64_t>(reinterpret_cast<Address>(addr)));
7129 break;
7130 default:
7131 UNREACHABLE();
7132 }
7133}
7134
7135char const* kInstructionHandlerNames[kInstructionTableSize];
7136
7137#endif // V8_ENABLE_DRUMBRAKE_TRACING
7138
7140
7141// 1. Add "small" (compressed) instruction handlers.
7142
7143#if !V8_DRUMBRAKE_BOUNDS_CHECKS
7144// For this case, this table will be initialized in
7145// InitInstructionTableOnce.
7146#define V(_) nullptr,
7148#undef V
7149
7150#else // !V8_DRUMBRAKE_BOUNDS_CHECKS
7151#define V(name) Handlers<true>::name,
7154#undef V
7155
7156#endif // !V8_DRUMBRAKE_BOUNDS_CHECKS
7157
7158#define V(name) Handlers<true>::name,
7160#ifdef V8_ENABLE_DRUMBRAKE_TRACING
7161 FOREACH_TRACE_INSTR_HANDLER(V)
7162#endif // V8_ENABLE_DRUMBRAKE_TRACING
7163#undef V
7164
7165// 2. Add "large" instruction handlers.
7166
7167#if !V8_DRUMBRAKE_BOUNDS_CHECKS
7168// For this case, this table will be initialized in
7169// InitInstructionTableOnce.
7170#define V(_) nullptr,
7172#undef V
7173
7174#else // !V8_DRUMBRAKE_BOUNDS_CHECKS
7175#define V(name) Handlers<false>::name,
7178#undef V
7179#endif // !V8_DRUMBRAKE_BOUNDS_CHECKS
7180
7181#define V(name) Handlers<false>::name,
7183#ifdef V8_ENABLE_DRUMBRAKE_TRACING
7184 FOREACH_TRACE_INSTR_HANDLER(V)
7185#endif // V8_ENABLE_DRUMBRAKE_TRACING
7186#undef V
7187};
7188
7191
7193 CodeOffset code_offset) const {
7194 const auto& catch_it = code_trycatch_map_.find(code_offset);
7195 if (catch_it == code_trycatch_map_.end()) return nullptr;
7196 BlockIndex try_block_index = catch_it->second;
7197
7198 const auto& try_it = try_blocks_.find(try_block_index);
7199 DCHECK_NE(try_it, try_blocks_.end());
7200 const WasmEHData::TryBlock* try_block = &try_it->second;
7201 if (try_block->IsTryDelegate()) {
7202 try_block = GetDelegateTryBlock(try_block);
7203 }
7204 return try_block;
7205}
7206
7208 const WasmEHData::TryBlock* try_block) const {
7209 const auto& try_it =
7211 return try_it != try_blocks_.end() ? &try_it->second : nullptr;
7212}
7213
7215 const WasmEHData::TryBlock* try_block) const {
7216 DCHECK_GE(try_block->delegate_try_index, 0);
7218 return nullptr;
7219 }
7220 const auto& try_it = try_blocks_.find(try_block->delegate_try_index);
7221 DCHECK_NE(try_it, try_blocks_.end());
7222 return &try_it->second;
7223}
7224
7226 WasmEHData::BlockIndex catch_block_index) const {
7227 int try_block_index = GetTryBranchOf(catch_block_index);
7228 DCHECK_GE(try_block_index, 0);
7229
7230 const auto& it = try_blocks_.find(try_block_index);
7231 DCHECK_NE(it, try_blocks_.end());
7232 return it->second.end_instruction_code_offset;
7233}
7234
7237 WasmEHData::BlockIndex catch_block_index) const {
7238 const auto& it = catch_blocks_.find(catch_block_index);
7239 DCHECK_NE(it, catch_blocks_.end());
7240 return {it->second.first_param_slot_offset,
7241 it->second.first_param_ref_stack_index};
7242}
7243
7245 WasmEHData::BlockIndex catch_block_index) const {
7246 const auto& it = catch_blocks_.find(catch_block_index);
7247 if (it == catch_blocks_.end()) return -1;
7248 return it->second.try_block_index;
7249}
7250
7252 BlockIndex try_block_index, BlockIndex parent_or_matching_try_block_index,
7253 BlockIndex ancestor_try_block_index) {
7254 DCHECK_EQ(try_blocks_.find(try_block_index), try_blocks_.end());
7255 try_blocks_.insert(
7256 {try_block_index,
7257 TryBlock{parent_or_matching_try_block_index, ancestor_try_block_index}});
7258 current_try_block_index_ = try_block_index;
7259}
7260
7262 int tag_index,
7263 uint32_t first_param_slot_offset,
7264 uint32_t first_param_ref_stack_index,
7265 CodeOffset code_offset) {
7266 DCHECK_EQ(catch_blocks_.find(catch_block_index), catch_blocks_.end());
7267 catch_blocks_.insert(
7268 {catch_block_index,
7269 CatchBlock{current_try_block_index_, first_param_slot_offset,
7270 first_param_ref_stack_index}});
7271
7272 auto it = try_blocks_.find(current_try_block_index_);
7273 DCHECK_NE(it, try_blocks_.end());
7274 it->second.catch_handlers.emplace_back(
7275 CatchHandler{catch_block_index, tag_index, code_offset});
7276}
7277
7279 BlockIndex delegate_try_block_index) {
7280 auto it = try_blocks_.find(current_try_block_index_);
7281 DCHECK_NE(it, try_blocks_.end());
7282 TryBlock& try_block = it->second;
7283 DCHECK(try_block.catch_handlers.empty());
7284 try_block.SetDelegated(delegate_try_block_index);
7285}
7286
7288 WasmEHData::BlockIndex block_index, CodeOffset code_offset) {
7289 WasmEHData::BlockIndex try_block_index = GetTryBranchOf(block_index);
7290 if (try_block_index < 0) {
7291 // No catch/catch_all blocks.
7292 try_block_index = block_index;
7293 }
7294
7295 const auto& try_it = try_blocks_.find(try_block_index);
7296 DCHECK_NE(try_it, try_blocks_.end());
7297 try_it->second.end_instruction_code_offset = code_offset;
7298 current_try_block_index_ = try_it->second.parent_or_matching_try_block;
7299 return try_block_index;
7300}
7301
7303 WasmOpcode opcode, CodeOffset code_offset) {
7304 if (current_try_block_index_ < 0) {
7305 return; // Not inside a try block.
7306 }
7307
7308 BlockIndex try_block_index = current_try_block_index_;
7309 const auto& try_it = try_blocks_.find(current_try_block_index_);
7310 DCHECK_NE(try_it, try_blocks_.end());
7311 const TryBlock& try_block = try_it->second;
7312
7313 bool inside_catch_handler = !try_block.catch_handlers.empty();
7314 if (inside_catch_handler) {
7315 // If we are throwing from inside a catch block, the exception should only
7316 // be caught by the catch handler of an ancestor try block.
7317 try_block_index = try_block.ancestor_try_index;
7318 if (try_block_index < 0) return;
7319 }
7320
7321 code_trycatch_map_[code_offset] = try_block_index;
7322}
7323
7324WasmBytecode::WasmBytecode(int func_index, const uint8_t* code_data,
7325 size_t code_length, uint32_t stack_frame_size,
7326 const FunctionSig* signature,
7327 const CanonicalSig* canonical_signature,
7328 const InterpreterCode* interpreter_code,
7329 size_t blocks_count, const uint8_t* const_slots_data,
7330 size_t const_slots_length, uint32_t ref_slots_count,
7331 const WasmEHData&& eh_data,
7332 const std::map<CodeOffset, pc_t>&& code_pc_map)
7333 : code_(code_data, code_data + code_length),
7334 code_bytes_(code_.data()),
7335 signature_(signature),
7336 canonical_signature_(canonical_signature),
7337 interpreter_code_(interpreter_code),
7338 const_slots_values_(const_slots_data,
7339 const_slots_data + const_slots_length),
7340 func_index_(func_index),
7341 blocks_count_(static_cast<uint32_t>(blocks_count)),
7342 args_count_(static_cast<uint32_t>(signature_->parameter_count())),
7343 args_slots_size_(ArgsSizeInSlots(signature_)),
7344 return_count_(static_cast<uint32_t>(signature_->return_count())),
7345 rets_slots_size_(RetsSizeInSlots(signature_)),
7346 locals_count_(
7347 static_cast<uint32_t>(interpreter_code_->locals.num_locals)),
7348 locals_slots_size_(LocalsSizeInSlots(interpreter_code_)),
7349 total_frame_size_in_bytes_(stack_frame_size * kSlotSize +
7350 args_slots_size_ * kSlotSize +
7351 rets_slots_size_ * kSlotSize),
7352 ref_args_count_(RefArgsCount(signature_)),
7353 ref_rets_count_(RefRetsCount(signature_)),
7354 ref_locals_count_(RefLocalsCount(interpreter_code)),
7355 ref_slots_count_(ref_slots_count),
7356 eh_data_(eh_data),
7357 code_pc_map_(code_pc_map) {}
7358
7359pc_t WasmBytecode::GetPcFromTrapCode(const uint8_t* current_code) const {
7360 DCHECK_GE(current_code, code_bytes_);
7361 size_t code_offset = current_code - code_bytes_;
7362
7363 auto it = code_pc_map_.lower_bound(code_offset);
7364 if (it == code_pc_map_.begin()) return 0;
7365 it--;
7366
7367 return it->second;
7368}
7369
7370// static
7371std::atomic<size_t> WasmBytecodeGenerator::total_bytecode_size_ = 0;
7372// static
7374// static
7376 0;
7377
7379 InterpreterCode* wasm_code,
7380 const WasmModule* module)
7381 : const_slot_offset_(0),
7382 slot_offset_(0),
7383 ref_slots_count_(0),
7384 function_index_(function_index),
7385 wasm_code_(wasm_code),
7386 args_count_(0),
7387 args_slots_size_(0),
7388 return_count_(0),
7389 rets_slots_size_(0),
7390 locals_count_(0),
7391 current_block_index_(-1),
7392 is_instruction_reachable_(true),
7393 unreachable_block_count_(0),
7394#ifdef DEBUG
7395 was_current_instruction_reachable_(true),
7396#endif // DEBUG
7397 module_(module),
7398 last_instr_offset_(kInvalidCodeOffset),
7399 handler_size_(InstrHandlerSize::Large),
7400 current_instr_encoding_failed_(false)
7401#ifdef DEBUG
7402 ,
7403 no_nested_emit_instr_handler_guard_(false)
7404#endif // DEBUG
7405{
7406 DCHECK(v8_flags.wasm_jitless);
7407
7408 // Multiple memories not supported.
7409 DCHECK_LE(module->memories.size(), 1);
7410
7411 size_t wasm_code_size = wasm_code_->end - wasm_code_->start;
7412 code_.reserve(wasm_code_size * 6);
7413 slots_.reserve(wasm_code_size / 2);
7414 stack_.reserve(wasm_code_size / 4);
7415 blocks_.reserve(wasm_code_size / 8);
7416
7417 const FunctionSig* sig = module_->functions[function_index].sig;
7418 args_count_ = static_cast<uint32_t>(sig->parameter_count());
7420 return_count_ = static_cast<uint32_t>(sig->return_count());
7422 locals_count_ = static_cast<uint32_t>(wasm_code->locals.num_locals);
7423
7425 if (is_memory64_) {
7428 } else {
7431 }
7432}
7433
7435 const Simd128& s128) const {
7436 static_assert(sizeof(size_t) == sizeof(uint64_t));
7437 const int64x2 s = s128.to_i64x2();
7438 return s.val[0] ^ s.val[1];
7439}
7440
7441// Look if the slot that hold the value at {stack_index} is being shared with
7442// other slots. This can happen if there are multiple load.get operations that
7443// copy from the same local.
7444bool WasmBytecodeGenerator::HasSharedSlot(uint32_t stack_index) const {
7445 // Only consider stack entries added in the current block.
7446 // We don't need to consider ancestor blocks because if a block has a
7447 // non-empty signature we always pass arguments and results into separate
7448 // slots, emitting CopySlot operations.
7449 uint32_t start_slot_index = blocks_[current_block_index_].stack_size_;
7450
7451 for (uint32_t i = start_slot_index; i < stack_.size(); i++) {
7452 if (stack_[i] == stack_[stack_index]) {
7453 return true;
7454 }
7455 }
7456 return false;
7457}
7458
7459// Look if the slot that hold the value at {stack_index} is being shared with
7460// other slots. This can happen if there are multiple load.get operations that
7461// copy from the same local. In this case when we modify the value of the slot
7462// with a local.set or local.tee we need to first duplicate the slot to make
7463// sure that the old value is preserved in the other shared slots.
7465 uint32_t* new_slot_index) {
7466 *new_slot_index = UINT_MAX;
7467 ValueType value_type = slots_[stack_[stack_index]].value_type;
7468 if (value_type.is_reference()) return false;
7469
7470 // Only consider stack entries added in the current block.
7471 // We don't need to consider ancestor blocks because if a block has a
7472 // non-empty signature we always pass arguments and results into separate
7473 // slots, emitting CopySlot operations.
7474 uint32_t start_slot_index = blocks_[current_block_index_].stack_size_;
7475
7476 for (uint32_t i = start_slot_index; i < stack_.size(); i++) {
7477 if (stack_[i] == stack_[stack_index]) {
7478 // Allocate new slot to preserve the old value of a shared slot.
7479 *new_slot_index = CreateSlot(value_type);
7480 break;
7481 }
7482 }
7483
7484 if (*new_slot_index == UINT_MAX) return false;
7485
7486 // If there was a collision and we allocated a new slot to preserve the old
7487 // value, we need to do two things to keep the state up to date:
7488 // 1. For each shared slot, we update the stack value to refer to the new
7489 // slot. This track the change at bytecode generation time.
7490 // 2. We return {true} to indicate that the slot was shared and the caller
7491 // should emit a 's2s_PreserveCopySlot...' instruction to copy the old slot
7492 // value into the new slot, at runtime.
7493
7494 // This loop works because stack_index is always greater or equal to the index
7495 // of args/globals.
7496 DCHECK_GT(start_slot_index, stack_index);
7497 for (uint32_t i = start_slot_index; i < stack_.size(); i++) {
7498 if (stack_[i] == stack_[stack_index]) {
7499 // Copy value into the new slot.
7500 UpdateStack(i, *new_slot_index);
7501#ifdef V8_ENABLE_DRUMBRAKE_TRACING
7502 if (v8_flags.trace_drumbrake_execution &&
7503 v8_flags.trace_drumbrake_execution_verbose) {
7504 START_EMIT_INSTR_HANDLER_WITH_ID(trace_UpdateStack) {
7506 EmitSlotOffset(slots_[*new_slot_index].slot_offset * kSlotSize);
7507 printf("Preserve UpdateStack: [%d] = %d\n", i,
7508 slots_[*new_slot_index].slot_offset);
7509 }
7511 }
7512#endif // V8_ENABLE_DRUMBRAKE_TRACING
7513 }
7514 }
7515
7516 return true;
7517}
7518
7520 uint32_t from_slot_index,
7521 uint32_t to_slot_index,
7522 bool copy_from_reg) {
7524 const ValueKind kind = value_type.kind();
7525 switch (kind) {
7526 case kI32:
7527 if (copy_from_reg) {
7528 EMIT_INSTR_HANDLER(r2s_CopyR0ToSlot32);
7529 } else {
7530 EMIT_INSTR_HANDLER(s2s_CopySlot32);
7531 }
7532 break;
7533 case kI64:
7534 if (copy_from_reg) {
7535 EMIT_INSTR_HANDLER(r2s_CopyR0ToSlot64);
7536 } else {
7537 EMIT_INSTR_HANDLER(s2s_CopySlot64);
7538 }
7539 break;
7540 case kF32:
7541 if (copy_from_reg) {
7542 EMIT_INSTR_HANDLER(r2s_CopyFp0ToSlot32);
7543 } else {
7544 EMIT_INSTR_HANDLER(s2s_CopySlot32);
7545 }
7546 break;
7547 case kF64:
7548 if (copy_from_reg) {
7549 EMIT_INSTR_HANDLER(r2s_CopyFp0ToSlot64);
7550 } else {
7551 EMIT_INSTR_HANDLER(s2s_CopySlot64);
7552 }
7553 break;
7554 case kS128:
7555 DCHECK(!copy_from_reg);
7556 EMIT_INSTR_HANDLER(s2s_CopySlot128);
7557 break;
7558 case kRef:
7559 case kRefNull:
7560 DCHECK(!copy_from_reg);
7561 EMIT_INSTR_HANDLER(s2s_CopySlotRef);
7562 break;
7563 default:
7564 UNREACHABLE();
7565 }
7566
7567 if (kind == kRefNull || kind == kRef) {
7568 DCHECK(!copy_from_reg);
7569 EmitRefStackIndex(slots_[from_slot_index].ref_stack_index);
7570 EmitRefStackIndex(slots_[to_slot_index].ref_stack_index);
7571 } else {
7572 if (!copy_from_reg) {
7573 EmitSlotOffset(slots_[from_slot_index].slot_offset);
7574 }
7575 EmitSlotOffset(slots_[to_slot_index].slot_offset);
7576 }
7577 }
7579
7580#ifdef V8_ENABLE_DRUMBRAKE_TRACING
7581 if (v8_flags.trace_drumbrake_bytecode_generator &&
7582 v8_flags.trace_drumbrake_execution_verbose) {
7583 printf("emit CopySlot: %d(%d) -> %d(%d)\n", from_slot_index,
7584 slots_[from_slot_index].slot_offset, to_slot_index,
7585 slots_[to_slot_index].slot_offset);
7586 }
7587#endif // V8_ENABLE_DRUMBRAKE_TRACING
7588}
7589
7590// When a Wasm function starts the values for the function args and locals are
7591// already present in the Wasm stack. The stack entries for args and locals
7592// can be directly accessed with {local.get} and modified with {local.set} and
7593// {local.tee}, but they can never be popped, they are always present until
7594// the function returns.
7595// During the execution of the function other values are then pushed/popped
7596// into/from the stack, but these other entries are only accessible indirectly
7597// as operands/results of operations, not directly with local.get/set
7598// instructions.
7599//
7600// DrumBrake implements a "args/locals propagation" optimization that allows the
7601// stack slots for "local" stack entries to be shared with other stack entries
7602// (using the {stack_} and {slots_} arrays), in order to avoid emitting calls to
7603// 'local.get' instruction handlers.
7604
7605// When an arg/local value is modified, and its slot is shared with other
7606// entries in the stack, we need to preserve the old value of the stack entry in
7607// a new slot.
7609 uint32_t from_slot_index,
7610 uint32_t to_stack_index,
7611 bool copy_from_reg) {
7612 const ValueKind kind = value_type.kind();
7613 uint32_t to_slot_index = stack_[to_stack_index];
7614 DCHECK(copy_from_reg || CheckEqualKind(kind, slots_[from_slot_index].kind()));
7615 DCHECK(CheckEqualKind(slots_[to_slot_index].kind(), kind));
7616
7617 uint32_t new_slot_index;
7618 // If the slot is shared {FindSharedSlot} creates a new slot and makes all the
7619 // 'non-locals' stack entries that shared the old slot point to this new slot.
7620 // We need to emit a {PreserveCopySlot} instruction to dynamically copy the
7621 // old value into the new slot.
7622 if (FindSharedSlot(to_stack_index, &new_slot_index)) {
7624 switch (kind) {
7625 case kI32:
7626 if (copy_from_reg) {
7627 EMIT_INSTR_HANDLER(r2s_PreserveCopyR0ToSlot32);
7628 } else {
7629 EMIT_INSTR_HANDLER(s2s_PreserveCopySlot32);
7630 }
7631 break;
7632 case kI64:
7633 if (copy_from_reg) {
7634 EMIT_INSTR_HANDLER(r2s_PreserveCopyR0ToSlot64);
7635 } else {
7636 EMIT_INSTR_HANDLER(s2s_PreserveCopySlot64);
7637 }
7638 break;
7639 case kF32:
7640 if (copy_from_reg) {
7641 EMIT_INSTR_HANDLER(r2s_PreserveCopyFp0ToSlot32);
7642 } else {
7643 EMIT_INSTR_HANDLER(s2s_PreserveCopySlot32);
7644 }
7645 break;
7646 case kF64:
7647 if (copy_from_reg) {
7648 EMIT_INSTR_HANDLER(r2s_PreserveCopyFp0ToSlot64);
7649 } else {
7650 EMIT_INSTR_HANDLER(s2s_PreserveCopySlot64);
7651 }
7652 break;
7653 case kS128:
7654 DCHECK(!copy_from_reg);
7655 EMIT_INSTR_HANDLER(s2s_PreserveCopySlot128);
7656 break;
7657 case kRef:
7658 case kRefNull:
7659 default:
7660 UNREACHABLE();
7661 }
7662
7663 if (kind == kRefNull || kind == kRef) {
7664 DCHECK(!copy_from_reg);
7665 EmitRefStackIndex(slots_[from_slot_index].ref_stack_index);
7666 EmitRefStackIndex(slots_[to_slot_index].ref_stack_index);
7667 EmitRefStackIndex(slots_[new_slot_index].ref_stack_index);
7668 } else {
7669 if (!copy_from_reg) {
7670 EmitSlotOffset(slots_[from_slot_index].slot_offset);
7671 }
7672 EmitSlotOffset(slots_[to_slot_index].slot_offset);
7673 EmitSlotOffset(slots_[new_slot_index].slot_offset);
7674 }
7675
7676#ifdef V8_ENABLE_DRUMBRAKE_TRACING
7677 if (v8_flags.trace_drumbrake_execution &&
7678 v8_flags.trace_drumbrake_execution_verbose) {
7679 printf("emit s2s_PreserveCopySlot: %d %d %d\n",
7680 slots_[from_slot_index].slot_offset,
7681 slots_[to_slot_index].slot_offset,
7682 slots_[new_slot_index].slot_offset);
7683 }
7684#endif // V8_ENABLE_DRUMBRAKE_TRACING
7685 }
7687 } else {
7688 EmitCopySlot(value_type, from_slot_index, to_slot_index, copy_from_reg);
7689 }
7690}
7691
7692// Used for 'local.tee' and 'local.set' instructions.
7694 uint32_t to_stack_index,
7695 bool is_tee, bool copy_from_reg) {
7696 DCHECK(!stack_.empty());
7697 DCHECK_LT(to_stack_index, stack_.size() - (copy_from_reg ? 0 : 1));
7698
7699 // LocalGet uses a "copy-on-write" mechanism: the arg/local value is not
7700 // copied and instead the stack entry references the same slot. When the
7701 // arg/local value is modified, we need to preserve the old value of the stack
7702 // entry in a new slot.
7703 CopyToSlot(value_type, stack_.back(), to_stack_index, copy_from_reg);
7704
7705 if (!is_tee && !copy_from_reg) {
7706 PopSlot();
7707
7708#ifdef V8_ENABLE_DRUMBRAKE_TRACING
7709 if (v8_flags.trace_drumbrake_execution) {
7710 START_EMIT_INSTR_HANDLER_WITH_ID(trace_PopSlot) {}
7712 }
7713#endif // V8_ENABLE_DRUMBRAKE_TRACING
7714 }
7715}
7716
7717// This function is called when we enter a new 'block', 'loop' or 'if' block
7718// statement. Checks whether any of the 'non-locals' stack entries share a slot
7719// with an arg/local stack entry. In that case stop make sure the local stack
7720// entry will get its own slot. This is necessary because at runtime we could
7721// jump at the block after having modified the local value in some other code
7722// path.
7723// TODO(paolosev@microsoft.com) - Understand why this is not required only for
7724// 'loop' blocks.
7726 uint32_t num_args_and_locals = args_count_ + locals_count_;
7727
7728 // If there are only args/locals entries in the stack, nothing to do.
7729 if (num_args_and_locals >= stack_size()) return;
7730
7731 for (uint32_t local_index = 0; local_index < num_args_and_locals;
7732 ++local_index) {
7733 uint32_t new_slot_index;
7734 if (FindSharedSlot(local_index, &new_slot_index)) {
7735 ValueType value_type = slots_[stack_[local_index]].value_type;
7736 EmitCopySlot(value_type, stack_[local_index], new_slot_index);
7737 }
7738 }
7739}
7740
7742 uint8_t opcode, const WasmInstruction::Optional::Block& block_data,
7743 size_t* rets_slots_count, size_t* params_slots_count) {
7744 uint32_t first_slot_index = 0;
7745 *rets_slots_count = 0;
7746 *params_slots_count = 0;
7747 bool first_slot_found = false;
7748 const ValueType value_type = block_data.value_type();
7749 if (value_type == kWasmBottom) {
7750 const FunctionSig* sig = module_->signature(block_data.sig_index);
7751 *rets_slots_count = sig->return_count();
7752 for (uint32_t i = 0; i < *rets_slots_count; i++) {
7753 uint32_t slot_index = CreateSlot(sig->GetReturn(i));
7754 if (!first_slot_found) {
7755 first_slot_index = slot_index;
7756 first_slot_found = true;
7757 }
7758 }
7759 *params_slots_count = sig->parameter_count();
7760 for (uint32_t i = 0; i < *params_slots_count; i++) {
7761 uint32_t slot_index = CreateSlot(sig->GetParam(i));
7762 if (!first_slot_found) {
7763 first_slot_index = slot_index;
7764 first_slot_found = true;
7765 }
7766 }
7767 } else if (value_type != kWasmVoid) {
7768 *rets_slots_count = 1;
7769 first_slot_index = CreateSlot(value_type);
7770 }
7771 return first_slot_index;
7772}
7773
7775 uint32_t target_block_index, bool update_stack) {
7776 const WasmBytecodeGenerator::BlockData& target_block_data =
7777 blocks_[target_block_index];
7778 DCHECK_EQ(target_block_data.opcode_, kExprLoop);
7779
7780 uint32_t params_count = ParamsCount(target_block_data);
7781 uint32_t rets_count = ReturnsCount(target_block_data);
7782 uint32_t first_param_slot_index =
7783 target_block_data.first_block_index_ + rets_count;
7784 for (uint32_t i = 0; i < params_count; i++) {
7785 uint32_t from_slot_index =
7786 stack_[stack_top_index() - (params_count - 1) + i];
7787 uint32_t to_slot_index = first_param_slot_index + i;
7788 if (from_slot_index != to_slot_index) {
7789 EmitCopySlot(GetParamType(target_block_data, i), from_slot_index,
7790 to_slot_index);
7791 if (update_stack) {
7792 DCHECK_EQ(GetParamType(target_block_data, i),
7793 slots_[first_param_slot_index + i].value_type);
7794 DCHECK_EQ(GetParamType(target_block_data, i),
7795 slots_[first_param_slot_index + i].value_type);
7796 UpdateStack(stack_top_index() - (params_count - 1) + i,
7797 first_param_slot_index + i);
7798
7799#ifdef V8_ENABLE_DRUMBRAKE_TRACING
7800 if (v8_flags.trace_drumbrake_execution) {
7801 START_EMIT_INSTR_HANDLER_WITH_ID(trace_UpdateStack) {
7802 EmitStackIndex(stack_top_index() - (params_count - 1) + i);
7803 EmitSlotOffset(slots_[first_param_slot_index + i].slot_offset *
7804 kSlotSize);
7805 }
7807 }
7808#endif // V8_ENABLE_DRUMBRAKE_TRACING
7809 }
7810 }
7811 }
7812}
7813
7815 uint32_t target_block_index, WasmOpcode opcode) {
7816 bool is_branch = kExprBr == opcode || kExprBrIf == opcode ||
7817 kExprBrTable == opcode || kExprBrOnNull == opcode ||
7818 kExprBrOnNonNull == opcode || kExprBrOnCast == opcode;
7819 const WasmBytecodeGenerator::BlockData& target_block_data =
7820 blocks_[target_block_index];
7821 bool is_target_loop_block = target_block_data.opcode_ == kExprLoop;
7822 if (is_target_loop_block && is_branch) {
7823 StoreBlockParamsIntoSlots(target_block_index, false);
7824 }
7825
7826 // Ignore params if this is the function main block.
7827 uint32_t params_count =
7828 target_block_index == 0 ? 0 : ParamsCount(target_block_data);
7829 uint32_t rets_count = ReturnsCount(target_block_data);
7830
7831 // If we are branching to a loop block we go back to the beginning of the
7832 // block, therefore we don't need to store the block results.
7833 if (!is_target_loop_block || !is_branch) {
7834 // There could be valid code where there are not enough elements in the
7835 // stack if some code in unreachable (for example if a 'i32.const 0' is
7836 // followed by a 'br_if' the if branch is never reachable).
7837 uint32_t count = std::min(static_cast<uint32_t>(stack_.size()), rets_count);
7838 for (uint32_t i = 0; i < count; i++) {
7839 uint32_t from_slot_index = stack_[stack_top_index() - (count - 1) + i];
7840 uint32_t to_slot_index = target_block_data.first_block_index_ + i;
7841 if (from_slot_index != to_slot_index) {
7842 EmitCopySlot(GetReturnType(target_block_data, i), from_slot_index,
7843 to_slot_index);
7844 }
7845 }
7846 }
7847
7848 bool is_else = (kExprElse == opcode);
7849 bool is_return = (kExprReturn == opcode);
7850 bool is_catch = (kExprCatch == opcode || kExprCatchAll == opcode);
7851 if (!is_branch && !is_return && !is_else && !is_catch) {
7852 uint32_t new_stack_height =
7853 target_block_data.stack_size_ - params_count + rets_count;
7854 DCHECK(new_stack_height <= stack_.size() ||
7855 !was_current_instruction_reachable_);
7856 stack_.resize(new_stack_height);
7857
7858 for (uint32_t i = 0; i < rets_count; i++) {
7859 DCHECK_EQ(GetReturnType(target_block_data, i),
7860 slots_[target_block_data.first_block_index_ + i].value_type);
7861 DCHECK_EQ(GetReturnType(target_block_data, i),
7862 slots_[target_block_data.first_block_index_ + i].value_type);
7863 UpdateStack(target_block_data.stack_size_ - params_count + i,
7864 target_block_data.first_block_index_ + i);
7865
7866#ifdef V8_ENABLE_DRUMBRAKE_TRACING
7867 if (v8_flags.trace_drumbrake_execution) {
7868 START_EMIT_INSTR_HANDLER_WITH_ID(trace_UpdateStack) {
7869 EmitStackIndex(target_block_data.stack_size_ - params_count + i);
7871 slots_[target_block_data.first_block_index_ + i].slot_offset *
7872 kSlotSize);
7873 }
7875 }
7876#endif // V8_ENABLE_DRUMBRAKE_TRACING
7877 }
7878 }
7879}
7880
7881void WasmBytecodeGenerator::RestoreIfElseParams(uint32_t if_block_index) {
7882 const WasmBytecodeGenerator::BlockData& if_block_data =
7883 blocks_[if_block_index];
7884 DCHECK_EQ(if_block_data.opcode_, kExprIf);
7885
7886 stack_.resize(blocks_[if_block_index].stack_size_);
7887 uint32_t params_count = if_block_index == 0 ? 0 : ParamsCount(if_block_data);
7888 for (uint32_t i = 0; i < params_count; i++) {
7889 UpdateStack(if_block_data.stack_size_ - params_count + i,
7890 if_block_data.GetParam(i), GetParamType(if_block_data, i));
7891#ifdef V8_ENABLE_DRUMBRAKE_TRACING
7892 if (v8_flags.trace_drumbrake_execution) {
7893 START_EMIT_INSTR_HANDLER_WITH_ID(trace_UpdateStack) {
7894 EmitStackIndex(if_block_data.stack_size_ - params_count + i);
7895 EmitSlotOffset(slots_[if_block_data.GetParam(i)].slot_offset *
7896 kSlotSize);
7897 }
7899 }
7900#endif // V8_ENABLE_DRUMBRAKE_TRACING
7901 }
7902}
7903
7906 uint32_t const_slots_size = 0;
7908 pc_t limit = wasm_code_->end - wasm_code_->start;
7909 while (pc < limit) {
7910 uint32_t opcode = wasm_code_->start[pc];
7911 if (opcode == kExprI32Const || opcode == kExprF32Const) {
7912 const_slots_size += sizeof(uint32_t) / kSlotSize;
7913 } else if (opcode == kExprI64Const || opcode == kExprF64Const) {
7914 const_slots_size += sizeof(uint64_t) / kSlotSize;
7915 } else if (opcode == kSimdPrefix) {
7916 auto [opcode_index, opcode_len] =
7918 wasm_code_->start + pc + 1, "prefixed opcode index");
7919 opcode = (kSimdPrefix << 8) | opcode_index;
7920 if (opcode == kExprS128Const || opcode == kExprI8x16Shuffle) {
7921 const_slots_size += sizeof(Simd128) / kSlotSize;
7922 }
7923 }
7924 pc++;
7925 }
7926 return const_slots_size;
7927}
7928
7931 bool is_try_catch =
7932 block_data.IsTry() || block_data.IsCatch() || block_data.IsCatchAll();
7933
7935
7936 block_data.end_code_offset_ = CurrentCodePos();
7937 if (opcode == kExprEnd && block_data.IsElse()) {
7938 DCHECK_GT(block_data.if_else_block_index_, 0);
7939 blocks_[block_data.if_else_block_index_].end_code_offset_ =
7941 }
7942
7943 if (!is_try_catch) {
7944 current_block_index_ = blocks_[current_block_index_].parent_block_index_;
7945 }
7946
7947 if (is_try_catch && (opcode == kExprEnd || opcode == kExprDelegate)) {
7948 int32_t try_block_index =
7950 DCHECK_GE(try_block_index, 0);
7951 current_block_index_ = blocks_[try_block_index].parent_block_index_;
7952 }
7953
7955
7956 return current_block_index_;
7957}
7958
7960 if (current_block_index_ >= 0) {
7962 }
7963
7965 const WasmBytecodeGenerator::BlockData& target_block_data = blocks_[0];
7966 uint32_t final_stack_size =
7967 target_block_data.stack_size_ + ReturnsCount(target_block_data);
7968 EmitStackIndex(final_stack_size);
7969 }
7971}
7972
7974 Decoder& decoder) {
7975 pc_t limit = wasm_code_->end - wasm_code_->start;
7976 if (pc >= limit) return WasmInstruction();
7977
7978 int len = 1;
7979 uint8_t orig = wasm_code_->start[pc];
7980 WasmOpcode opcode = static_cast<WasmOpcode>(orig);
7981 if (WasmOpcodes::IsPrefixOpcode(opcode)) {
7982 uint32_t prefixed_opcode_length;
7983 std::tie(opcode, prefixed_opcode_length) =
7985 wasm_code_->at(pc));
7986 // skip breakpoint by switching on original code.
7987 len = prefixed_opcode_length;
7988 }
7989
7991 switch (orig) {
7992 case kExprUnreachable:
7993 break;
7994 case kExprNop:
7995 break;
7996 case kExprBlock:
7997 case kExprLoop:
7998 case kExprIf:
7999 case kExprTry: {
8002 if (imm.sig_index.valid()) {
8003 // The block has at least one argument or at least two results, its
8004 // signature is identified by sig_index.
8005 optional.block.sig_index = imm.sig_index;
8007 } else if (imm.sig.return_count() + imm.sig.parameter_count() == 0) {
8008 // Void signature: no arguments and no results.
8011 } else {
8012 // No arguments and one result.
8014 std::optional<wasm::ValueType> wasm_return_type =
8015 GetWasmReturnTypeFromSignature(&imm.sig);
8016 DCHECK(wasm_return_type.has_value());
8017 optional.block.value_type_bitfield =
8018 wasm_return_type.value().raw_bit_field();
8019 }
8020 len = 1 + imm.length;
8021 break;
8022 }
8023 case kExprElse:
8024 break;
8025 case kExprCatch: {
8026 TagIndexImmediate imm(&decoder, wasm_code_->at(pc + 1),
8028 optional.index = imm.index;
8029 len = 1 + imm.length;
8030 break;
8031 }
8032 case kExprCatchAll:
8033 break;
8034 case kExprEnd:
8035 break;
8036 case kExprThrow: {
8037 TagIndexImmediate imm(&decoder, wasm_code_->at(pc + 1),
8039 len = 1 + imm.length;
8040 optional.index = imm.index;
8041 break;
8042 }
8043 case kExprRethrow:
8044 case kExprBr:
8045 case kExprBrIf:
8046 case kExprBrOnNull:
8047 case kExprBrOnNonNull:
8048 case kExprDelegate: {
8049 BranchDepthImmediate imm(&decoder, wasm_code_->at(pc + 1),
8051 len = 1 + imm.length;
8052 optional.depth = imm.depth;
8053 break;
8054 }
8055 case kExprBrTable: {
8056 BranchTableImmediate imm(&decoder, wasm_code_->at(pc + 1),
8058 BranchTableIterator<Decoder::NoValidationTag> iterator(&decoder, imm);
8059 optional.br_table.table_count = imm.table_count;
8060 optional.br_table.labels_index =
8061 static_cast<uint32_t>(br_table_labels_.size());
8062 for (uint32_t i = 0; i <= imm.table_count; i++) {
8063 DCHECK(iterator.has_next());
8065 }
8066 len = static_cast<int>(1 + iterator.pc() - imm.start);
8067 break;
8068 }
8069 case kExprReturn:
8070 break;
8071 case kExprCallFunction:
8072 case kExprReturnCall: {
8073 CallFunctionImmediate imm(&decoder, wasm_code_->at(pc + 1),
8075 len = 1 + imm.length;
8076 optional.index = imm.index;
8077 break;
8078 }
8079 case kExprCallIndirect:
8080 case kExprReturnCallIndirect: {
8081 CallIndirectImmediate imm(&decoder, wasm_code_->at(pc + 1),
8083 len = 1 + imm.length;
8085 optional.indirect_call.sig_index = imm.sig_imm.index.index;
8086 break;
8087 }
8088 case kExprDrop:
8089 break;
8090 case kExprSelect:
8091 break;
8092 case kExprSelectWithType: {
8095 len = 1 + imm.length;
8096 break;
8097 }
8098 case kExprLocalGet: {
8099 IndexImmediate imm(&decoder, wasm_code_->at(pc + 1), "local index",
8101 len = 1 + imm.length;
8102 optional.index = imm.index;
8103 break;
8104 }
8105 case kExprLocalSet: {
8106 IndexImmediate imm(&decoder, wasm_code_->at(pc + 1), "local index",
8108 len = 1 + imm.length;
8109 optional.index = imm.index;
8110 break;
8111 }
8112 case kExprLocalTee: {
8113 IndexImmediate imm(&decoder, wasm_code_->at(pc + 1), "local index",
8115 len = 1 + imm.length;
8116 optional.index = imm.index;
8117 break;
8118 }
8119 case kExprGlobalGet: {
8120 GlobalIndexImmediate imm(&decoder, wasm_code_->at(pc + 1),
8122 len = 1 + imm.length;
8123 optional.index = imm.index;
8124 break;
8125 }
8126 case kExprGlobalSet: {
8127 GlobalIndexImmediate imm(&decoder, wasm_code_->at(pc + 1),
8129 len = 1 + imm.length;
8130 optional.index = imm.index;
8131 break;
8132 }
8133 case kExprTableGet: {
8134 IndexImmediate imm(&decoder, wasm_code_->at(pc + 1), "table index",
8136 len = 1 + imm.length;
8137 optional.index = imm.index;
8138 break;
8139 }
8140 case kExprTableSet: {
8141 IndexImmediate imm(&decoder, wasm_code_->at(pc + 1), "table index",
8143 len = 1 + imm.length;
8144 optional.index = imm.index;
8145 break;
8146 }
8147
8148#define LOAD_CASE(name, ctype, mtype, rep, type) \
8149 case kExpr##name: { \
8150 MemoryAccessImmediate imm( \
8151 &decoder, wasm_code_->at(pc + 1), sizeof(ctype), \
8152 Decoder::kNoValidation); \
8153 len = 1 + imm.length; \
8154 optional.offset = imm.offset; \
8155 break; \
8156 }
8157 LOAD_CASE(I32LoadMem8S, int32_t, int8_t, kWord8, I32);
8158 LOAD_CASE(I32LoadMem8U, int32_t, uint8_t, kWord8, I32);
8159 LOAD_CASE(I32LoadMem16S, int32_t, int16_t, kWord16, I32);
8160 LOAD_CASE(I32LoadMem16U, int32_t, uint16_t, kWord16, I32);
8161 LOAD_CASE(I64LoadMem8S, int64_t, int8_t, kWord8, I64);
8162 LOAD_CASE(I64LoadMem8U, int64_t, uint8_t, kWord16, I64);
8163 LOAD_CASE(I64LoadMem16S, int64_t, int16_t, kWord16, I64);
8164 LOAD_CASE(I64LoadMem16U, int64_t, uint16_t, kWord16, I64);
8165 LOAD_CASE(I64LoadMem32S, int64_t, int32_t, kWord32, I64);
8166 LOAD_CASE(I64LoadMem32U, int64_t, uint32_t, kWord32, I64);
8167 LOAD_CASE(I32LoadMem, int32_t, int32_t, kWord32, I32);
8168 LOAD_CASE(I64LoadMem, int64_t, int64_t, kWord64, I64);
8169 LOAD_CASE(F32LoadMem, Float32, uint32_t, kFloat32, F32);
8170 LOAD_CASE(F64LoadMem, Float64, uint64_t, kFloat64, F64);
8171#undef LOAD_CASE
8172
8173#define STORE_CASE(name, ctype, mtype, rep, type) \
8174 case kExpr##name: { \
8175 MemoryAccessImmediate imm( \
8176 &decoder, wasm_code_->at(pc + 1), sizeof(ctype), \
8177 Decoder::kNoValidation); \
8178 len = 1 + imm.length; \
8179 optional.offset = imm.offset; \
8180 break; \
8181 }
8182 STORE_CASE(I32StoreMem8, int32_t, int8_t, kWord8, I32);
8183 STORE_CASE(I32StoreMem16, int32_t, int16_t, kWord16, I32);
8184 STORE_CASE(I64StoreMem8, int64_t, int8_t, kWord8, I64);
8185 STORE_CASE(I64StoreMem16, int64_t, int16_t, kWord16, I64);
8186 STORE_CASE(I64StoreMem32, int64_t, int32_t, kWord32, I64);
8187 STORE_CASE(I32StoreMem, int32_t, int32_t, kWord32, I32);
8188 STORE_CASE(I64StoreMem, int64_t, int64_t, kWord64, I64);
8189 STORE_CASE(F32StoreMem, Float32, uint32_t, kFloat32, F32);
8190 STORE_CASE(F64StoreMem, Float64, uint64_t, kFloat64, F64);
8191#undef STORE_CASE
8192
8193 case kExprMemorySize: {
8194 MemoryIndexImmediate imm(&decoder, wasm_code_->at(pc + 1),
8196 len = 1 + imm.length;
8197 break;
8198 }
8199 case kExprMemoryGrow: {
8200 MemoryIndexImmediate imm(&decoder, wasm_code_->at(pc + 1),
8202 len = 1 + imm.length;
8203 break;
8204 }
8205 case kExprI32Const: {
8206 ImmI32Immediate imm(&decoder, wasm_code_->at(pc + 1),
8208 len = 1 + imm.length;
8209 optional.i32 = imm.value;
8210 break;
8211 }
8212 case kExprI64Const: {
8213 ImmI64Immediate imm(&decoder, wasm_code_->at(pc + 1),
8215 len = 1 + imm.length;
8216 optional.i64 = imm.value;
8217 break;
8218 }
8219 case kExprF32Const: {
8220 ImmF32Immediate imm(&decoder, wasm_code_->at(pc + 1),
8222 len = 1 + imm.length;
8223 optional.f32 = imm.value;
8224 break;
8225 }
8226 case kExprF64Const: {
8227 ImmF64Immediate imm(&decoder, wasm_code_->at(pc + 1),
8229 len = 1 + imm.length;
8230 optional.f64 = imm.value;
8231 break;
8232 }
8233
8234#define EXECUTE_BINOP(name, ctype, reg, op, type) \
8235 case kExpr##name: \
8236 break;
8237
8242#undef EXECUTE_BINOP
8243
8244#define EXECUTE_UNOP(name, ctype, reg, op, type) \
8245 case kExpr##name: \
8246 break;
8247
8249#undef EXECUTE_UNOP
8250
8251#define EXECUTE_UNOP(name, from_ctype, from_type, from_reg, to_ctype, to_type, \
8252 to_reg) \
8253 case kExpr##name: \
8254 break;
8255
8259#undef EXECUTE_UNOP
8260
8261#define EXECUTE_UNOP(name, from_ctype, from_type, to_ctype, to_type, op) \
8262 case kExpr##name: \
8263 break;
8264
8266#undef EXECUTE_UNOP
8267
8268#define EXECUTE_UNOP(name, from_ctype, from_type, to_ctype, to_type) \
8269 case kExpr##name: \
8270 break;
8271
8273#undef EXECUTE_UNOP
8274
8275 case kExprRefNull: {
8279 optional.ref_type_bit_field = imm.type.raw_bit_field();
8280 len = 1 + imm.length;
8281 break;
8282 }
8283 case kExprRefIsNull:
8284 case kExprRefEq:
8285 case kExprRefAsNonNull: {
8286 len = 1;
8287 break;
8288 }
8289 case kExprRefFunc: {
8290 IndexImmediate imm(&decoder, wasm_code_->at(pc + 1), "function index",
8292 optional.index = imm.index;
8293 len = 1 + imm.length;
8294 break;
8295 }
8296
8297 case kGCPrefix:
8298 DecodeGCOp(opcode, &optional, &decoder, wasm_code_, pc, &len);
8299 break;
8300
8301 case kNumericPrefix:
8302 DecodeNumericOp(opcode, &optional, &decoder, wasm_code_, pc, &len);
8303 break;
8304
8305 case kAtomicPrefix:
8306 DecodeAtomicOp(opcode, &optional, &decoder, wasm_code_, pc, &len);
8307 break;
8308
8309 case kSimdPrefix: {
8310 bool is_valid_simd_op =
8311 DecodeSimdOp(opcode, &optional, &decoder, wasm_code_, pc, &len);
8312 if (V8_UNLIKELY(!is_valid_simd_op)) {
8313 UNREACHABLE();
8314 }
8315 break;
8316 }
8317
8318 case kExprCallRef:
8319 case kExprReturnCallRef: {
8320 SigIndexImmediate imm(&decoder, wasm_code_->at(pc + 1),
8322 optional.index = imm.index.index;
8323 len = 1 + imm.length;
8324 break;
8325 }
8326
8327 default:
8328 // Not implemented yet
8329 UNREACHABLE();
8330 }
8331
8332 return WasmInstruction{orig, opcode, len, static_cast<uint32_t>(pc),
8333 optional};
8334}
8335
8337 WasmInstruction::Optional* optional,
8338 Decoder* decoder, InterpreterCode* code,
8339 pc_t pc, int* const len) {
8340 switch (opcode) {
8341 case kExprStructNew:
8342 case kExprStructNewDefault: {
8343 StructIndexImmediate imm(decoder, code->at(pc + *len),
8345 optional->index = imm.index.index;
8346 *len += imm.length;
8347 break;
8348 }
8349 case kExprStructGet:
8350 case kExprStructGetS:
8351 case kExprStructGetU:
8352 case kExprStructSet: {
8353 FieldImmediate imm(decoder, code->at(pc + *len), Decoder::kNoValidation);
8354 optional->gc_field_immediate = {imm.struct_imm.index.index,
8355 imm.field_imm.index};
8356 *len += imm.length;
8357 break;
8358 }
8359 case kExprArrayNew:
8360 case kExprArrayNewDefault:
8361 case kExprArrayGet:
8362 case kExprArrayGetS:
8363 case kExprArrayGetU:
8364 case kExprArraySet:
8365 case kExprArrayFill: {
8366 ArrayIndexImmediate imm(decoder, code->at(pc + *len),
8368 optional->index = imm.index.index;
8369 *len += imm.length;
8370 break;
8371 }
8372
8373 case kExprArrayNewFixed: {
8374 ArrayIndexImmediate array_imm(decoder, code->at(pc + *len),
8376 optional->gc_array_new_fixed.array_index = array_imm.index.index;
8377 *len += array_imm.length;
8378 IndexImmediate data_imm(decoder, code->at(pc + *len), "array length",
8380 optional->gc_array_new_fixed.length = data_imm.index;
8381 *len += data_imm.length;
8382 break;
8383 }
8384
8385 case kExprArrayNewData:
8386 case kExprArrayNewElem:
8387 case kExprArrayInitData:
8388 case kExprArrayInitElem: {
8389 ArrayIndexImmediate array_imm(decoder, code->at(pc + *len),
8391 optional->gc_array_new_or_init_data.array_index = array_imm.index.index;
8392 *len += array_imm.length;
8393 IndexImmediate data_imm(decoder, code->at(pc + *len), "segment index",
8395 optional->gc_array_new_or_init_data.data_index = data_imm.index;
8396 *len += data_imm.length;
8397 break;
8398 }
8399
8400 case kExprArrayCopy: {
8401 ArrayIndexImmediate dest_array_imm(decoder, code->at(pc + *len),
8403 optional->gc_array_copy.dest_array_index = dest_array_imm.index.index;
8404 *len += dest_array_imm.length;
8405 ArrayIndexImmediate src_array_imm(decoder, code->at(pc + *len),
8407 optional->gc_array_copy.src_array_index = src_array_imm.index.index;
8408 *len += src_array_imm.length;
8409 break;
8410 }
8411
8412 case kExprRefI31:
8413 case kExprI31GetS:
8414 case kExprI31GetU:
8415 case kExprAnyConvertExtern:
8416 case kExprExternConvertAny:
8417 case kExprArrayLen:
8418 break;
8419
8420 case kExprRefCast:
8421 case kExprRefCastNull:
8422 case kExprRefTest:
8423 case kExprRefTestNull: {
8425 code->at(pc + *len), Decoder::kNoValidation);
8427 optional->gc_heap_type_immediate.length = imm.length;
8429 imm.type.raw_bit_field();
8430 *len += imm.length;
8431 break;
8432 }
8433
8434 case kExprBrOnCast:
8435 case kExprBrOnCastFail: {
8436 BrOnCastImmediate flags_imm(decoder, code->at(pc + *len),
8438 *len += flags_imm.length;
8439 BranchDepthImmediate branch(decoder, code->at(pc + *len),
8441 *len += branch.length;
8442 HeapTypeImmediate source_imm(WasmEnabledFeatures::All(), decoder,
8443 code->at(pc + *len), Decoder::kNoValidation);
8445 *len += source_imm.length;
8446 HeapTypeImmediate target_imm(WasmEnabledFeatures::All(), decoder,
8447 code->at(pc + *len), Decoder::kNoValidation);
8449 *len += target_imm.length;
8450 DCHECK(target_imm.type.raw_bit_field() <
8453 branch.depth, flags_imm.flags.src_is_null,
8454 flags_imm.flags.res_is_null, target_imm.type.raw_bit_field()};
8455 break;
8456 }
8457
8458 default:
8459 FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
8460 WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(code->start[pc])));
8461 UNREACHABLE();
8462 }
8463}
8464
8466 WasmInstruction::Optional* optional,
8467 Decoder* decoder,
8468 InterpreterCode* code, pc_t pc,
8469 int* const len) {
8470 switch (opcode) {
8471#define DECODE_UNOP(name, from_ctype, from_type, from_reg, to_ctype, to_type, \
8472 to_reg) \
8473 case kExpr##name: \
8474 break;
8475
8477#undef DECODE_UNOP
8478
8479 case kExprMemoryInit: {
8480 MemoryInitImmediate imm(decoder, code->at(pc + *len),
8482 DCHECK_LT(imm.data_segment.index, module_->num_declared_data_segments);
8483 optional->index = imm.data_segment.index;
8484 *len += imm.length;
8485 break;
8486 }
8487 case kExprDataDrop: {
8488 IndexImmediate imm(decoder, code->at(pc + *len), "data segment index",
8490 DCHECK_LT(imm.index, module_->num_declared_data_segments);
8491 optional->index = imm.index;
8492 *len += imm.length;
8493 break;
8494 }
8495 case kExprMemoryCopy: {
8496 MemoryCopyImmediate imm(decoder, code->at(pc + *len),
8498 *len += imm.length;
8499 break;
8500 }
8501 case kExprMemoryFill: {
8502 MemoryIndexImmediate imm(decoder, code->at(pc + *len),
8504 *len += imm.length;
8505 break;
8506 }
8507 case kExprTableInit: {
8508 TableInitImmediate imm(decoder, code->at(pc + *len),
8510 optional->table_init.table_index = imm.table.index;
8512 *len += imm.length;
8513 break;
8514 }
8515 case kExprElemDrop: {
8516 IndexImmediate imm(decoder, code->at(pc + *len), "element segment index",
8518 optional->index = imm.index;
8519 *len += imm.length;
8520 break;
8521 }
8522 case kExprTableCopy: {
8523 TableCopyImmediate imm(decoder, code->at(pc + *len),
8525 optional->table_copy.dst_table_index = imm.table_dst.index;
8526 optional->table_copy.src_table_index = imm.table_src.index;
8527 *len += imm.length;
8528 break;
8529 }
8530 case kExprTableGrow: {
8531 IndexImmediate imm(decoder, code->at(pc + *len), "table index",
8533 optional->index = imm.index;
8534 *len += imm.length;
8535 break;
8536 }
8537 case kExprTableSize: {
8538 IndexImmediate imm(decoder, code->at(pc + *len), "table index",
8540 optional->index = imm.index;
8541 *len += imm.length;
8542 break;
8543 }
8544 case kExprTableFill: {
8545 IndexImmediate imm(decoder, code->at(pc + *len), "table index",
8547 optional->index = imm.index;
8548 *len += imm.length;
8549 break;
8550 }
8551 default:
8552 FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
8553 WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(code->start[pc])));
8554 UNREACHABLE();
8555 }
8556}
8557
8559 WasmInstruction::Optional* optional,
8560 Decoder* decoder,
8561 InterpreterCode* code, pc_t pc,
8562 int* const len) {
8563 switch (opcode) {
8564 case kExprAtomicNotify:
8565 case kExprI32AtomicWait: {
8566 MachineType memtype = MachineType::Uint32();
8567 MemoryAccessImmediate imm(decoder, code->at(pc + *len),
8570 optional->offset = imm.offset;
8571 *len += imm.length;
8572 break;
8573 }
8574 case kExprI64AtomicWait: {
8575 MachineType memtype = MachineType::Uint64();
8576 MemoryAccessImmediate imm(decoder, code->at(pc + *len),
8579 optional->offset = imm.offset;
8580 *len += imm.length;
8581 break;
8582 }
8583 case kExprAtomicFence:
8584 *len += 1;
8585 break;
8586
8587#define ATOMIC_BINOP(name, Type, ctype, type, op_ctype, op_type, operation) \
8588 case kExpr##name: { \
8589 MachineType memtype = MachineType::Type(); \
8590 MemoryAccessImmediate imm(decoder, code->at(pc + *len), \
8591 ElementSizeLog2Of(memtype.representation()), \
8592 Decoder::kNoValidation); \
8593 optional->offset = imm.offset; \
8594 *len += imm.length; \
8595 break; \
8596 }
8598#undef ATOMIC_BINOP
8599
8600#define ATOMIC_OP(name, Type, ctype, type, op_ctype, op_type) \
8601 case kExpr##name: { \
8602 MachineType memtype = MachineType::Type(); \
8603 MemoryAccessImmediate imm(decoder, code->at(pc + *len), \
8604 ElementSizeLog2Of(memtype.representation()), \
8605 Decoder::kNoValidation); \
8606 optional->offset = imm.offset; \
8607 *len += imm.length; \
8608 break; \
8609 }
8613#undef ATOMIC_OP
8614
8615 default:
8616 FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
8617 WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(code->start[pc])));
8618 UNREACHABLE();
8619 }
8620}
8621
8622const char* GetRegModeString(RegMode reg_mode) {
8623 switch (reg_mode) {
8624 case RegMode::kNoReg:
8625 return "NoReg";
8626 case RegMode::kAnyReg:
8627 return "AnyReg";
8628 case RegMode::kI32Reg:
8629 return "I32Reg";
8630 case RegMode::kI64Reg:
8631 return "I64Reg";
8632 case RegMode::kF32Reg:
8633 return "F32Reg";
8634 case RegMode::kF64Reg:
8635 return "F64Reg";
8636 default:
8637 UNREACHABLE();
8638 }
8639}
8640
8642 switch (mode) {
8643 case kR2R:
8644 return "R2R";
8645 case kR2S:
8646 return "R2S";
8647 case kS2R:
8648 return "S2R";
8649 case kS2S:
8650 return "S2S";
8651 default:
8652 UNREACHABLE();
8653 }
8654}
8655
8656#if !defined(V8_DRUMBRAKE_BOUNDS_CHECKS)
8658TrapMemOutOfBounds(const uint8_t* code, uint32_t* sp,
8660 double fp0) {
8661 TRAP(TrapReason::kTrapMemOutOfBounds)
8662}
8663#endif // !defined(V8_DRUMBRAKE_BOUNDS_CHECKS)
8664
8665// static
8669
8670// static
8672 // TODO(paolosev@microsoft.com): Support multithreading.
8673
8674#ifdef DRUMBRAKE_ENABLE_PROFILING
8675 PrintAndClearProfilingData();
8676#endif // DRUMBRAKE_ENABLE_PROFILING
8677
8678 if (v8_flags.drumbrake_compact_bytecode) {
8680 }
8681
8683}
8684
8685// static
8687 size_t total_bytecode_size = std::atomic_load(&total_bytecode_size_);
8688 printf("Total bytecode size: %zu bytes.\n", total_bytecode_size);
8689 size_t space_saved_in_bytes =
8690 2 * std::atomic_load(&emitted_short_slot_offset_count_) +
8691 4 * std::atomic_load(&emitted_short_memory_offset_count_);
8692 double saved_pct = (total_bytecode_size + space_saved_in_bytes == 0)
8693 ? .0
8694 : 100.0 * space_saved_in_bytes /
8695 (total_bytecode_size + space_saved_in_bytes);
8696 printf("Bytes saved: %zu (%.1f%%).\n", space_saved_in_bytes, saved_pct);
8697}
8698
8700 bool is_indirect_call) {
8701 size_t stack_index;
8702 if (is_indirect_call) {
8703 // Subtract one to discard the function index on the top of the stack.
8704 DCHECK_LE(sig->parameter_count(), stack_.size() - 1);
8705 stack_index = stack_.size() - sig->parameter_count() - 1;
8706 } else {
8707 DCHECK_LE(sig->parameter_count(), stack_.size());
8708 stack_index = stack_.size() - sig->parameter_count();
8709 }
8710
8711 bool fast_path = sig->parameter_count() > 1 && sig->parameter_count() < 32 &&
8713 if (fast_path) {
8714 if (sig->parameter_count() == 2) {
8715 const ValueType type0 = sig->GetParam(0);
8716 const ValueKind kind0 = type0.kind();
8717 ValueType type1 = sig->GetParam(1);
8718 const ValueKind kind1 = type1.kind();
8719 uint32_t to = CreateSlot(type0);
8720 CreateSlot(type1);
8721
8722 uint32_t copyslot32_two_args_func_id =
8723 ((kind0 == kI64 || kind0 == kF64) ? 0x01 : 0x00) |
8724 ((kind1 == kI64 || kind1 == kF64) ? 0x02 : 0x00);
8725 static const InstructionHandler kCopySlot32TwoArgFuncs[4] = {
8726 k_s2s_CopySlot_ll, k_s2s_CopySlot_lq, k_s2s_CopySlot_ql,
8727 k_s2s_CopySlot_qq};
8728
8730 EmitFnId(kCopySlot32TwoArgFuncs[copyslot32_two_args_func_id]);
8731 EmitSlotOffset(slots_[to].slot_offset);
8732 EmitSlotOffset(slots_[stack_[stack_index]].slot_offset);
8733 stack_index++;
8734 EmitSlotOffset(slots_[stack_[stack_index]].slot_offset);
8735 stack_index++;
8736 }
8738 } else {
8739 START_EMIT_INSTR_HANDLER_WITH_ID(s2s_CopySlotMulti) {
8740 EmitI32Const(static_cast<uint32_t>(sig->parameter_count()));
8741
8742 uint32_t arg_size_mask = 0;
8743 for (size_t index = 0; index < sig->parameter_count(); index++) {
8744 const ValueType value_type = sig->GetParam(index);
8745 const ValueKind kind = value_type.kind();
8746 if (kind == kI64 || kind == kF64) {
8747 arg_size_mask |= (1 << index);
8748 }
8749 }
8750 EmitI32Const(arg_size_mask);
8751
8752 uint32_t to = 0;
8753 for (size_t index = 0; index < sig->parameter_count(); index++) {
8754 const ValueType value_type = sig->GetParam(index);
8755 to = CreateSlot(value_type);
8756 if (index == 0) {
8757 EmitSlotOffset(slots_[to].slot_offset);
8758 }
8759 EmitSlotOffset(slots_[stack_[stack_index]].slot_offset);
8760 stack_index++;
8761 }
8762 }
8764 }
8765 } else {
8766 // Slow path.
8767 for (size_t index = 0; index < sig->parameter_count(); index++) {
8768 ValueType value_type = sig->GetParam(index);
8769 uint32_t to = CreateSlot(value_type);
8770 EmitCopySlot(value_type, stack_[stack_index], to);
8771 stack_index++;
8772 }
8773 }
8774}
8775
8776// static
8780
8781// Checks if {obj} is a subtype of type, thus checking will always
8782// succeed.
8787
8788// Returns true if type checking will always fail, either because the types
8789// are unrelated or because the target_type is one of the null sentinels and
8790// conversion to null does not succeed.
8792 HeapType expected_type,
8793 bool null_succeeds) const {
8794 bool types_unrelated =
8795 !IsSubtypeOf(ValueType::Ref(expected_type), obj_type, module_) &&
8797 // (Comment copied from function-body-decoder-impl.h).
8798 // For "unrelated" types the check can still succeed for the null value on
8799 // instructions treating null as a successful check.
8800 // TODO(12868): For string views, this implementation anticipates that
8801 // https://github.com/WebAssembly/stringref/issues/40 will be resolved
8802 // by making the views standalone types.
8803 return (types_unrelated &&
8804 (!null_succeeds || !obj_type.is_nullable() ||
8805 obj_type.is_string_view() || expected_type.is_string_view())) ||
8806 (!null_succeeds &&
8807 (expected_type.representation() == HeapType::kNone ||
8808 expected_type.representation() == HeapType::kNoFunc ||
8809 expected_type.representation() == HeapType::kNoExtern));
8810}
8811
8812#ifdef DEBUG
8813// static
8814bool WasmBytecodeGenerator::HasSideEffects(WasmOpcode opcode) {
8815 switch (opcode) {
8816 case kExprBlock:
8817 case kExprLoop:
8818 case kExprTry:
8819 case kExprIf:
8820 case kExprElse:
8821 case kExprCatch:
8822 case kExprCatchAll:
8823 case kExprDelegate:
8824 case kExprEnd:
8825 case kExprBr:
8826 case kExprBrIf:
8827 case kExprBrOnNull:
8828 case kExprBrOnNonNull:
8829 case kExprBrOnCast:
8830 case kExprBrOnCastFail:
8831 case kExprBrTable:
8832 case kExprReturn:
8833 case kExprCallFunction:
8834 case kExprReturnCall:
8835 case kExprCallIndirect:
8836 case kExprReturnCallIndirect:
8837 case kExprCallRef:
8838 case kExprReturnCallRef:
8839 case kExprLocalSet:
8840 case kExprLocalTee:
8841 case kExprI32Const:
8842 case kExprI64Const:
8843 case kExprF32Const:
8844 case kExprF64Const:
8845 case kExprS128Const:
8846 case kExprI8x16Shuffle:
8847 return true;
8848
8849 case kExprUnreachable:
8850 case kExprNop:
8851 case kExprThrow:
8852 case kExprRethrow:
8853 case kExprDrop:
8854 case kExprSelect:
8855 case kExprSelectWithType:
8856 case kExprLocalGet:
8857 case kExprGlobalGet:
8858 case kExprGlobalSet:
8859 case kExprTableGet:
8860 case kExprTableSet:
8861 case kExprI32LoadMem:
8862 case kExprI32LoadMem8S:
8863 case kExprI32LoadMem8U:
8864 case kExprI32LoadMem16S:
8865 case kExprI32LoadMem16U:
8866 case kExprI64LoadMem:
8867 case kExprI64LoadMem8S:
8868 case kExprI64LoadMem8U:
8869 case kExprI64LoadMem16S:
8870 case kExprI64LoadMem16U:
8871 case kExprI64LoadMem32S:
8872 case kExprI64LoadMem32U: // 0x28 - 0x35
8873 case kExprI32StoreMem:
8874 case kExprI32StoreMem8:
8875 case kExprI32StoreMem16:
8876 case kExprI64StoreMem:
8877 case kExprI64StoreMem8:
8878 case kExprI64StoreMem16:
8879 case kExprI64StoreMem32: // 0x36 - 0x3e
8880 case kExprMemoryGrow:
8881 case kExprMemorySize:
8882 case kExprI32Eqz:
8883 case kExprI32Eq:
8884 case kExprI32Ne:
8885 case kExprI32LtS:
8886 case kExprI32LtU:
8887 case kExprI32GtS:
8888 case kExprI32GtU:
8889 case kExprI32LeS:
8890 case kExprI32LeU:
8891 case kExprI32GeS:
8892 case kExprI32GeU:
8893 case kExprI32Clz:
8894 case kExprI32Ctz:
8895 case kExprI32Popcnt: // 0x45 - 0x69
8896 case kExprI32Add:
8897 case kExprI32Sub:
8898 case kExprI32Mul: // 0x6a - 0x6c
8899 case kExprI32DivS:
8900 case kExprI32DivU:
8901 case kExprI32RemS:
8902 case kExprI32RemU:
8903 case kExprI32And:
8904 case kExprI32Ior:
8905 case kExprI32Xor:
8906 case kExprI32Shl:
8907 case kExprI32ShrS:
8908 case kExprI32ShrU:
8909 case kExprI32Rol:
8910 case kExprI32Ror: // 0x6d - 0x78
8911 case kExprI64Clz:
8912 case kExprI64Ctz:
8913 case kExprI64Popcnt: // 0x79 - 0x7b
8914 case kExprI64Add:
8915 case kExprI64Sub:
8916 case kExprI64Mul: // 0x7c - 0x7e
8917 case kExprI64DivS:
8918 case kExprI64DivU:
8919 case kExprI64RemS:
8920 case kExprI64RemU:
8921 case kExprI64And:
8922 case kExprI64Ior:
8923 case kExprI64Xor:
8924 case kExprI64Shl:
8925 case kExprI64ShrS:
8926 case kExprI64ShrU:
8927 case kExprI64Rol:
8928 case kExprI64Ror: // 0x7f - 0x8a
8929 case kExprF32Abs:
8930 case kExprF32Neg:
8931 case kExprF32Ceil:
8932 case kExprF32Floor:
8933 case kExprF32Trunc:
8934 case kExprF32NearestInt:
8935 case kExprF32Sqrt: // 0x8b - 0x91
8936 case kExprF32Add:
8937 case kExprF32Sub:
8938 case kExprF32Mul:
8939 case kExprF32Div:
8940 case kExprF32Min:
8941 case kExprF32Max:
8942 case kExprF32CopySign: // 0x92 - 0x98
8943 case kExprF64Abs:
8944 case kExprF64Neg:
8945 case kExprF64Ceil:
8946 case kExprF64Floor:
8947 case kExprF64Trunc:
8948 case kExprF64NearestInt:
8949 case kExprF64Sqrt: // 0x99 - 0x9f
8950 case kExprF64Add:
8951 case kExprF64Sub:
8952 case kExprF64Mul:
8953 case kExprF64Div:
8954 case kExprF64Min:
8955 case kExprF64Max:
8956 case kExprF64CopySign: // 0xa0 - 0xa6
8957 case kExprI32ConvertI64:
8958 case kExprI32SConvertF32:
8959 case kExprI32UConvertF32:
8960 case kExprI32SConvertF64:
8961 case kExprI32UConvertF64:
8962 case kExprI64SConvertI32:
8963 case kExprI64UConvertI32:
8964 case kExprI64SConvertF32:
8965 case kExprI64UConvertF32:
8966 case kExprI64SConvertF64:
8967 case kExprI64UConvertF64:
8968 case kExprF32SConvertI32:
8969 case kExprF32UConvertI32:
8970 case kExprF32SConvertI64:
8971 case kExprF32UConvertI64:
8972 case kExprF32ConvertF64:
8973 case kExprF64SConvertI32:
8974 case kExprF64UConvertI32:
8975 case kExprF64SConvertI64:
8976 case kExprF64UConvertI64:
8977 case kExprF64ConvertF32:
8978 case kExprI32ReinterpretF32:
8979 case kExprI64ReinterpretF64:
8980 case kExprF32ReinterpretI32:
8981 case kExprF64ReinterpretI64:
8982 case kExprI32SExtendI8:
8983 case kExprI32SExtendI16:
8984 case kExprI64SExtendI8:
8985 case kExprI64SExtendI16:
8986 case kExprI64SExtendI32: // 0xa7 - 0xc4
8987 case kExprRefNull:
8988 case kExprRefIsNull:
8989 case kExprRefFunc:
8990 case kExprRefEq:
8991 case kExprRefAsNonNull: // 0xd0 - 0xd4
8992 // WasmGC
8993 case kGCPrefix:
8994 case kExprStructNew:
8995 case kExprStructNewDefault:
8996 case kExprStructGet:
8997 case kExprStructGetS:
8998 case kExprStructGetU:
8999 case kExprStructSet:
9000 case kExprArrayNew:
9001 case kExprArrayNewDefault:
9002 case kExprArrayGet:
9003 case kExprArrayGetS:
9004 case kExprArrayGetU:
9005 case kExprArraySet:
9006 case kExprArrayFill:
9007 case kExprRefI31:
9008 case kExprI31GetS:
9009 case kExprI31GetU:
9010 case kExprRefCast:
9011 case kExprRefCastNull:
9012 case kExprAnyConvertExtern:
9013 case kExprExternConvertAny:
9014 case kExprArrayLen:
9015 case kExprRefTest:
9016 case kExprRefTestNull:
9017 // Numeric
9018 case kNumericPrefix:
9019 case kExprI32SConvertSatF32:
9020 case kExprI32UConvertSatF32:
9021 case kExprI32SConvertSatF64:
9022 case kExprI32UConvertSatF64:
9023 case kExprI64SConvertSatF32:
9024 case kExprI64UConvertSatF32:
9025 case kExprI64SConvertSatF64:
9026 case kExprI64UConvertSatF64:
9027 case kExprMemoryInit:
9028 case kExprDataDrop:
9029 case kExprMemoryCopy:
9030 case kExprMemoryFill:
9031 case kExprTableInit:
9032 case kExprElemDrop:
9033 case kExprTableCopy:
9034 case kExprTableGrow:
9035 case kExprTableSize:
9036 case kExprTableFill:
9037 // Atomics
9038 case kAtomicPrefix:
9039 case kExprAtomicNotify:
9040 case kExprI32AtomicWait:
9041 case kExprI64AtomicWait:
9042 case kExprAtomicFence: // 0xfe00 - 0xfe03
9043 case kExprI32AtomicLoad:
9044 case kExprI64AtomicLoad:
9045 case kExprI32AtomicStore:
9046 case kExprI64AtomicStore:
9047 case kExprI32AtomicAdd:
9048 case kExprI64AtomicAdd:
9049 case kExprI32AtomicSub:
9050 case kExprI64AtomicSub:
9051 case kExprI32AtomicAnd:
9052 case kExprI64AtomicAnd:
9053 case kExprI32AtomicOr:
9054 case kExprI64AtomicOr:
9055 case kExprI32AtomicXor:
9056 case kExprI64AtomicXor:
9057 case kExprI32AtomicExchange:
9058 case kExprI64AtomicExchange:
9059 case kExprI32AtomicCompareExchange:
9060 case kExprI64AtomicCompareExchange: // 0xfe10 - 0xfe4e
9061
9062 // SIMD
9063 case kSimdPrefix:
9064 case kExprS128LoadMem:
9065 case kExprS128Load8Splat:
9066 case kExprS128Load16Splat:
9067 case kExprS128Load32Splat:
9068 case kExprS128Load64Splat:
9069 case kExprS128StoreMem:
9070 case kExprI8x16Swizzle:
9071 case kExprI8x16Splat:
9072 case kExprI16x8Splat:
9073 case kExprI32x4Splat:
9074 case kExprI64x2Splat:
9075 case kExprF32x4Splat:
9076 case kExprF64x2Splat:
9077 case kExprI8x16ExtractLaneS:
9078 case kExprI8x16ExtractLaneU:
9079 case kExprI16x8ExtractLaneS:
9080 case kExprI16x8ExtractLaneU:
9081 case kExprI32x4ExtractLane:
9082 case kExprI64x2ExtractLane:
9083 case kExprF32x4ExtractLane:
9084 case kExprF64x2ExtractLane:
9085 case kExprI8x16ReplaceLane:
9086 case kExprI16x8ReplaceLane:
9087 case kExprI32x4ReplaceLane:
9088 case kExprI64x2ReplaceLane:
9089 case kExprF32x4ReplaceLane:
9090 case kExprF64x2ReplaceLane:
9091 case kExprI8x16Eq:
9092 case kExprI8x16Ne:
9093 case kExprI8x16LtS:
9094 case kExprI8x16LtU:
9095 case kExprI8x16GtS:
9096 case kExprI8x16GtU:
9097 case kExprI8x16LeS:
9098 case kExprI8x16LeU:
9099 case kExprI8x16GeS:
9100 case kExprI8x16GeU:
9101 case kExprI16x8Eq:
9102 case kExprI16x8Ne:
9103 case kExprI16x8LtS:
9104 case kExprI16x8LtU:
9105 case kExprI16x8GtS:
9106 case kExprI16x8GtU:
9107 case kExprI16x8LeS:
9108 case kExprI16x8LeU:
9109 case kExprI16x8GeS:
9110 case kExprI16x8GeU:
9111 case kExprI32x4Eq:
9112 case kExprI32x4Ne:
9113 case kExprI32x4LtS:
9114 case kExprI32x4LtU:
9115 case kExprI32x4GtS:
9116 case kExprI32x4GtU:
9117 case kExprI32x4LeS:
9118 case kExprI32x4LeU:
9119 case kExprI32x4GeS:
9120 case kExprI32x4GeU:
9121 case kExprI64x2Eq:
9122 case kExprI64x2Ne:
9123 case kExprI64x2LtS:
9124 case kExprI64x2GtS:
9125 case kExprI64x2LeS:
9126 case kExprI64x2GeS:
9127 case kExprF32x4Eq:
9128 case kExprF32x4Ne:
9129 case kExprF32x4Lt:
9130 case kExprF32x4Gt:
9131 case kExprF32x4Le:
9132 case kExprF32x4Ge:
9133 case kExprF64x2Eq:
9134 case kExprF64x2Ne:
9135 case kExprF64x2Lt:
9136 case kExprF64x2Gt:
9137 case kExprF64x2Le:
9138 case kExprF64x2Ge:
9139 case kExprS128Not:
9140 case kExprS128And:
9141 case kExprS128AndNot:
9142 case kExprS128Or:
9143 case kExprS128Xor:
9144 case kExprS128Select:
9145 case kExprV128AnyTrue:
9146 case kExprS128Load8Lane: // 0xfd54
9147 case kExprS128Load16Lane: // 0xfd55
9148 case kExprS128Load32Lane: // 0xfd56
9149 case kExprS128Load64Lane: // 0xfd57
9150 case kExprS128Store8Lane: // 0xfd58
9151 case kExprS128Store16Lane: // 0xfd59
9152 case kExprS128Store32Lane: // 0xfd5a
9153 case kExprS128Store64Lane: // 0xfd5b
9154 case kExprS128Load32Zero: // 0xfd5c
9155 case kExprS128Load64Zero: // 0xfd5d
9156 case kExprF32x4DemoteF64x2Zero: // 0xfd5e
9157 case kExprI32x4Neg: // 0xfda1
9158 case kExprI32x4AllTrue: // 0xfda3
9159 case kExprI32x4BitMask: // 0xfda4
9160 case kExprI32x4SConvertI16x8Low: // 0xfda7
9161 case kExprI32x4Add: // 0xfdae
9162 case kExprI32x4Sub: // 0xfdb1
9163 case kExprI32x4Mul: // 0xfdb5
9164 case kExprI32x4ExtMulLowI16x8S: // 0xfdbc
9165 case kExprI32x4ExtMulHighI16x8S: // 0xfdbd
9166 case kExprI32x4ExtMulLowI16x8U: // 0xfdbe
9167 case kExprI32x4ExtMulHighI16x8U: // 0xfdbf
9168 case kExprI64x2Neg: // 0xfdc1
9169 case kExprI64x2AllTrue: // 0xfdc3
9170 case kExprI64x2BitMask: // 0xfdc4
9171 case kExprI64x2SConvertI32x4Low: // 0xfdc7
9172 case kExprI64x2SConvertI32x4High: // 0xfdc8
9173 case kExprI64x2UConvertI32x4Low: // 0xfdc9
9174 case kExprI64x2UConvertI32x4High: // 0xfdca
9175 case kExprI64x2Add: // 0xfdce
9176 case kExprI64x2Sub: // 0xfdd1
9177 case kExprI64x2Mul: // 0xfdd5
9178 case kExprF32x4Neg: // 0xfde1
9179 case kExprF32x4Sqrt: // 0xfde3 // 0xfde5
9180 case kExprF64x2ConvertLowI32x4S: // 0xfde6
9181 case kExprF64x2ConvertLowI32x4U: // 0xfde7
9182
9183 // Relaxed SIMD
9184 case kExprI8x16RelaxedSwizzle:
9185 case kExprI32x4RelaxedTruncF32x4S:
9186 case kExprI32x4RelaxedTruncF32x4U:
9187 case kExprI32x4RelaxedTruncF64x2SZero:
9188 case kExprI32x4RelaxedTruncF64x2UZero:
9189 case kExprF32x4Qfma:
9190 case kExprF32x4Qfms:
9191 case kExprF64x2Qfma:
9192 case kExprF64x2Qfms:
9193 case kExprI8x16RelaxedLaneSelect:
9194 case kExprI16x8RelaxedLaneSelect:
9195 case kExprI32x4RelaxedLaneSelect:
9196 case kExprI64x2RelaxedLaneSelect:
9197 case kExprF32x4RelaxedMin:
9198 case kExprF32x4RelaxedMax:
9199 case kExprF64x2RelaxedMin:
9200 case kExprF64x2RelaxedMax:
9201 case kExprI16x8RelaxedQ15MulRS:
9202 case kExprI16x8DotI8x16I7x16S:
9203 case kExprI32x4DotI8x16I7x16AddS:
9204
9205 // FP16 SIMD
9206 case kExprF16x8Splat:
9207 case kExprF16x8ExtractLane:
9208 case kExprF16x8ReplaceLane:
9209 case kExprF16x8Abs:
9210 case kExprF16x8Neg:
9211 case kExprF16x8Sqrt:
9212 case kExprF16x8Ceil:
9213 case kExprF16x8Floor:
9214 case kExprF16x8Trunc:
9215 case kExprF16x8NearestInt:
9216 case kExprF16x8Eq:
9217 case kExprF16x8Ne:
9218 case kExprF16x8Lt:
9219 case kExprF16x8Gt:
9220 case kExprF16x8Le:
9221 case kExprF16x8Ge:
9222 case kExprF16x8Add:
9223 case kExprF16x8Sub:
9224 case kExprF16x8Mul:
9225 case kExprF16x8Div:
9226 case kExprF16x8Min:
9227 case kExprF16x8Max:
9228 case kExprF16x8Pmin:
9229 case kExprF16x8Pmax:
9230 case kExprI16x8SConvertF16x8:
9231 case kExprI16x8UConvertF16x8:
9232 case kExprF16x8SConvertI16x8:
9233 case kExprF16x8UConvertI16x8:
9234 case kExprF16x8DemoteF32x4Zero:
9235 case kExprF16x8DemoteF64x2Zero:
9236 case kExprF32x4PromoteLowF16x8:
9237 case kExprF16x8Qfma:
9238 case kExprF16x8Qfms: // 0xfd100 - 0xfd14f
9239
9240 // Not handled by DrumBrake
9241
9242 case kExprNopForTestingUnsupportedInLiftoff:
9243 case kExprTryTable:
9244 case kExprThrowRef:
9245 case kExprF64Acos:
9246 case kExprF64Asin:
9247 case kExprF64Atan:
9248 case kExprF64Atan2:
9249 case kExprF64Cos:
9250 case kExprF64Sin:
9251 case kExprF64Tan:
9252 case kExprF64Exp:
9253 case kExprF64Log:
9254 case kExprF64Pow: // 0xdc - 0xe6
9255 case kExprI32AsmjsDivS:
9256 case kExprI32AsmjsDivU:
9257 case kExprI32AsmjsRemS:
9258 case kExprI32AsmjsRemU:
9259 case kExprI32AsmjsSConvertF32:
9260 case kExprI32AsmjsUConvertF32:
9261 case kExprI32AsmjsSConvertF64:
9262 case kExprI32AsmjsUConvertF64: // 0xe7 - 0xfa
9263 case kExprRefCastNop:
9264
9265 // StringRef
9266 case kExprStringNewUtf8:
9267 case kExprStringNewWtf16:
9268 case kExprStringConst:
9269 case kExprStringMeasureUtf8:
9270 case kExprStringMeasureWtf8:
9271 case kExprStringMeasureWtf16:
9272 case kExprStringEncodeUtf8:
9273 case kExprStringEncodeWtf16:
9274 case kExprStringConcat:
9275 case kExprStringEq:
9276 case kExprStringIsUSVSequence:
9277 case kExprStringNewLossyUtf8:
9278 case kExprStringNewWtf8:
9279 case kExprStringEncodeLossyUtf8:
9280 case kExprStringEncodeWtf8:
9281 case kExprStringNewUtf8Try:
9282 case kExprStringAsWtf8:
9283 case kExprStringViewWtf8Advance:
9284 case kExprStringViewWtf8EncodeUtf8:
9285 case kExprStringViewWtf8Slice:
9286 case kExprStringViewWtf8EncodeLossyUtf8:
9287 case kExprStringViewWtf8EncodeWtf8: // 0xfb80 - 0xfb95
9288 case kExprStringAsWtf16:
9289 case kExprStringViewWtf16Length:
9290 case kExprStringViewWtf16GetCodeunit:
9291 case kExprStringViewWtf16Encode:
9292 case kExprStringViewWtf16Slice:
9293 case kExprStringAsIter:
9294 case kExprStringViewIterNext:
9295 case kExprStringViewIterAdvance:
9296 case kExprStringViewIterRewind:
9297 case kExprStringViewIterSlice:
9298 case kExprStringCompare:
9299 case kExprStringFromCodePoint:
9300 case kExprStringHash:
9301 case kExprStringNewUtf8Array:
9302 case kExprStringNewWtf16Array:
9303 case kExprStringEncodeUtf8Array:
9304 case kExprStringEncodeWtf16Array:
9305 case kExprStringNewLossyUtf8Array:
9306 case kExprStringNewWtf8Array:
9307 case kExprStringEncodeLossyUtf8Array:
9308 case kExprStringEncodeWtf8Array:
9309 case kExprStringNewUtf8ArrayTry:
9310
9311 // FP16
9312 case kExprF32LoadMemF16:
9313 case kExprF32StoreMemF16:
9314 default:
9315 return false;
9316 }
9317}
9318#endif // DEBUG
9319
9321 RegMode curr_reg_mode,
9322 RegMode next_reg_mode) {
9323 if (!v8_flags.drumbrake_compact_bytecode) {
9325 return DoEncodeInstruction(instr, curr_reg_mode, next_reg_mode);
9326 }
9327
9328 size_t current_instr_code_offset = code_.size();
9329 size_t current_slots_size = slots_.size();
9333
9334 RegMode reg_mode = DoEncodeInstruction(instr, curr_reg_mode, next_reg_mode);
9336 DCHECK(!HasSideEffects(instr.opcode));
9337 code_.resize(current_instr_code_offset);
9338 slots_.resize(current_slots_size);
9339 stack_.rollback();
9342 reg_mode = DoEncodeInstruction(instr, curr_reg_mode, next_reg_mode);
9344 }
9345
9346 return reg_mode;
9347}
9348
9350 RegMode curr_reg_mode,
9351 RegMode next_reg_mode) {
9352 DCHECK(curr_reg_mode != RegMode::kAnyReg);
9353
9354#ifdef DEBUG
9355 was_current_instruction_reachable_ = is_instruction_reachable_;
9356#endif // DEBUG
9358 if (instr.opcode == kExprBlock || instr.opcode == kExprLoop ||
9359 instr.opcode == kExprIf || instr.opcode == kExprTry) {
9361 } else if (instr.opcode == kExprEnd || instr.opcode == kExprDelegate) {
9363 if (0 == --unreachable_block_count_) {
9365 }
9366 } else if (instr.opcode == kExprElse || instr.opcode == kExprCatch ||
9367 instr.opcode == kExprCatchAll) {
9368 if (1 == unreachable_block_count_) {
9371 }
9372 }
9373 }
9375
9376 ValueKind top_stack_slot_type = GetTopStackType(curr_reg_mode);
9377
9378 OperatorMode mode = kS2S;
9379 if (v8_flags.drumbrake_register_optimization) {
9380 switch (next_reg_mode) {
9381 case RegMode::kNoReg:
9382 if (curr_reg_mode != RegMode::kNoReg) {
9383 mode = kR2S;
9384 }
9385 break;
9386 case RegMode::kAnyReg:
9387 default: // kI32Reg|kI64Reg|kF32Reg|kF64Reg
9388 if (curr_reg_mode == RegMode::kNoReg) {
9390 mode = kS2R;
9391 } else {
9392 mode = kS2S;
9393 }
9394 } else {
9396 mode = kR2R;
9397 } else {
9398 mode = kR2S;
9399 }
9400 }
9401 break;
9402 }
9403 }
9404
9405#ifdef V8_ENABLE_DRUMBRAKE_TRACING
9406 if (v8_flags.trace_drumbrake_bytecode_generator) {
9407 printf("PRE @%-3u: %-24s: %3s %-7s -> %-7s\n", instr.pc,
9408 wasm::WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(instr.opcode)),
9409 GetOperatorModeString(mode), GetRegModeString(curr_reg_mode),
9410 GetRegModeString(next_reg_mode));
9411 }
9412
9413 if (v8_flags.trace_drumbrake_execution) {
9414 EMIT_INSTR_HANDLER(s2s_TraceInstruction);
9415 EmitI32Const(instr.pc);
9416 EmitI32Const(instr.opcode);
9417 EmitI32Const(static_cast<int>(curr_reg_mode));
9418 }
9419#endif // V8_ENABLE_DRUMBRAKE_TRACING
9420
9421 switch (instr.opcode) {
9422 case kExprUnreachable: {
9423 EMIT_INSTR_HANDLER_WITH_PC(s2s_Unreachable, instr.pc);
9425 break;
9426 }
9427 case kExprNop:
9428 break;
9429 case kExprBlock:
9430 case kExprLoop: {
9432 BeginBlock(instr.opcode, instr.optional.block);
9433 break;
9434 }
9435 case kExprTry: {
9437 int parent_or_matching_try_block_index = GetCurrentTryBlockIndex(true);
9438 int ancestor_try_block_index = GetCurrentTryBlockIndex(false);
9439 int try_block_index = BeginBlock(instr.opcode, instr.optional.block);
9440 eh_data_.AddTryBlock(try_block_index, parent_or_matching_try_block_index,
9441 ancestor_try_block_index);
9442 break;
9443 }
9444 case kExprIf: {
9446 if (mode == kR2S) {
9447 EMIT_INSTR_HANDLER(r2s_If);
9448 } else {
9449 DCHECK_EQ(mode, kS2S);
9450 EMIT_INSTR_HANDLER(s2s_If);
9451 I32Pop(); // cond
9452 }
9453 BeginBlock(instr.opcode, instr.optional.block);
9455 break;
9456 }
9457 case kExprElse: {
9461 EMIT_INSTR_HANDLER(s2s_Else);
9462 EmitIfElseBranchOffset(); // Jumps to the end of the 'else' block.
9463 break;
9464 }
9465 case kExprCatch:
9466 case kExprCatchAll: {
9468
9469 int try_block_index = eh_data_.GetCurrentTryBlockIndex();
9470 DCHECK_GT(try_block_index, 0);
9471
9472 EndBlock(instr.opcode); // End previous try or catch.
9473
9474 stack_.resize(blocks_[try_block_index].stack_size_);
9475 int32_t catch_block_index =
9476 BeginBlock(instr.opcode, blocks_[try_block_index].signature_);
9477
9478 EMIT_INSTR_HANDLER(s2s_Catch);
9479 EmitTryCatchBranchOffset(); // Jumps to the end of the try/catch blocks.
9480
9481 uint32_t first_param_slot_index = UINT_MAX;
9482 uint32_t first_ref_param_slot_index = UINT_MAX;
9483 if (instr.opcode == kExprCatch) {
9484 // Exception arguments are pushed into the stack.
9485 const WasmTag& tag = module_->tags[instr.optional.index];
9486 const FunctionSig* sig = tag.sig;
9487 for (size_t i = 0; i < sig->parameter_count(); ++i) {
9488 const ValueType value_type = sig->GetParam(i);
9489 const ValueKind kind = value_type.kind();
9490 switch (kind) {
9491 case kI32:
9492 case kI64:
9493 case kF32:
9494 case kF64:
9495 case kS128:
9496 case kRef:
9497 case kRefNull: {
9498 uint32_t slot_index = CreateSlot(value_type);
9499 if (first_param_slot_index == UINT_MAX) {
9500 first_param_slot_index = slot_index;
9501 }
9502 if ((kind == kRefNull || kind == kRef) &&
9503 first_ref_param_slot_index == UINT_MAX) {
9504 first_ref_param_slot_index = slot_index;
9505 }
9506 PushSlot(slot_index);
9507 slots_[slot_index].value_type = value_type;
9508 break;
9509 }
9510 default:
9511 UNREACHABLE();
9512 }
9513 }
9514 }
9515
9516 blocks_[catch_block_index].first_block_index_ =
9517 blocks_[try_block_index].first_block_index_;
9518
9519 if (instr.opcode == kExprCatch) {
9521 current_block_index_, instr.optional.index,
9522 first_param_slot_index == UINT_MAX
9523 ? 0
9524 : slots_[first_param_slot_index].slot_offset,
9525 first_ref_param_slot_index == UINT_MAX
9526 ? 0
9527 : slots_[first_ref_param_slot_index].ref_stack_index,
9528 static_cast<int>(code_.size()));
9529 } else { // kExprCatchAll
9532 static_cast<int>(code_.size()));
9533 }
9534
9535 break;
9536 }
9537 case kExprDelegate: {
9538 int32_t target_block_index = GetTargetBranch(instr.optional.depth + 1);
9539 DCHECK_LT(target_block_index, blocks_.size());
9540 int32_t delegated_try_block_index = WasmEHData::kDelegateToCallerIndex;
9541 if (target_block_index > 0) {
9542 const BlockData& target_block = blocks_[target_block_index];
9543 delegated_try_block_index = target_block.IsTry()
9544 ? target_block_index
9545 : target_block.parent_try_block_index_;
9546 }
9547 eh_data_.AddDelegatedBlock(delegated_try_block_index);
9548 EndBlock(kExprDelegate);
9549 break;
9550 }
9551 case kExprThrow: {
9552 EMIT_INSTR_HANDLER(s2s_Throw);
9553 EmitI32Const(instr.optional.index);
9554
9555 // Exception arguments are popped from the stack (in reverse order!)
9556 const WasmTag& tag = module_->tags[instr.optional.index];
9557 const WasmTagSig* sig = tag.sig;
9558 DCHECK_GE(stack_.size(), sig->parameter_count());
9559 size_t stack_index = stack_.size() - sig->parameter_count();
9560 for (size_t index = 0; index < sig->parameter_count();
9561 index++, stack_index++) {
9562 ValueKind kind = sig->GetParam(index).kind();
9563 DCHECK(CheckEqualKind(kind, slots_[stack_[stack_index]].kind()));
9564 switch (kind) {
9565 case kI32:
9566 case kI64:
9567 case kF32:
9568 case kF64:
9569 case kS128: {
9570 EmitSlotOffset(slots_[stack_[stack_index]].slot_offset);
9571 break;
9572 }
9573 case kRef:
9574 case kRefNull: {
9575 uint32_t ref_index = slots_[stack_[stack_index]].ref_stack_index;
9576 Emit(&ref_index, sizeof(uint32_t));
9577 break;
9578 }
9579 default:
9580 UNREACHABLE();
9581 }
9582 }
9583
9584 stack_.resize(stack_.size() - sig->parameter_count());
9586 CurrentCodePos());
9588 break;
9589 }
9590 case kExprRethrow: {
9591 EMIT_INSTR_HANDLER(s2s_Rethrow);
9592 int32_t target_branch_index = GetTargetBranch(instr.optional.depth);
9593 DCHECK(blocks_[target_branch_index].IsCatch() ||
9594 blocks_[target_branch_index].IsCatchAll());
9595 Emit(&target_branch_index, sizeof(int32_t));
9597 CurrentCodePos());
9599 break;
9600 }
9601 case kExprEnd: {
9602 // If there is an 'if...end' statement without an 'else' branch, create
9603 // a dummy else branch used to store results.
9604 if (blocks_[current_block_index_].IsIf()) {
9605 uint32_t if_block_index = current_block_index_;
9606 DCHECK(!blocks_[if_block_index].HasElseBranch());
9607 uint32_t params_count = ParamsCount(blocks_[if_block_index]);
9608 if (params_count > 0) {
9609 BeginElseBlock(if_block_index, true);
9610 EMIT_INSTR_HANDLER(s2s_Else);
9611 EmitIfElseBranchOffset(); // Jumps to the end of the 'else' block.
9612 }
9613 }
9614
9615 if (EndBlock(kExprEnd) < 0) {
9616 Return();
9617 }
9618 break;
9619 }
9620 case kExprBr: {
9621 int32_t target_branch_index = GetTargetBranch(instr.optional.depth);
9622 StoreBlockParamsAndResultsIntoSlots(target_branch_index, kExprBr);
9623
9624 EMIT_INSTR_HANDLER(s2s_Branch);
9625 EmitBranchOffset(instr.optional.depth);
9627 break;
9628 }
9629 case kExprBrIf: {
9630 int32_t target_branch_index = GetTargetBranch(instr.optional.depth);
9631 const WasmBytecodeGenerator::BlockData& target_block_data =
9632 blocks_[target_branch_index];
9633 if (HasVoidSignature(target_block_data)) {
9634 if (mode == kR2S) {
9635 EMIT_INSTR_HANDLER(r2s_BranchIf);
9636 } else {
9637 DCHECK_EQ(mode, kS2S);
9638 EMIT_INSTR_HANDLER(s2s_BranchIf);
9639 I32Pop(); // condition
9640 }
9641 // Emit code offset to branch to if the condition is true.
9642 EmitBranchOffset(instr.optional.depth);
9643 } else {
9644 if (mode == kR2S) {
9645 EMIT_INSTR_HANDLER(r2s_BranchIfWithParams);
9646 } else {
9647 DCHECK_EQ(mode, kS2S);
9648 EMIT_INSTR_HANDLER(s2s_BranchIfWithParams);
9649 I32Pop(); // condition
9650 }
9651
9652 // Emit code offset to branch to if the condition is not true.
9653 const uint32_t if_false_code_offset = CurrentCodePos();
9654 Emit(&if_false_code_offset, sizeof(if_false_code_offset));
9655
9656 StoreBlockParamsAndResultsIntoSlots(target_branch_index, kExprBrIf);
9657
9658 EMIT_INSTR_HANDLER(s2s_Branch);
9659 EmitBranchOffset(instr.optional.depth);
9660
9661 // Patch the 'if-false' offset with the correct jump offset.
9662 int32_t delta = CurrentCodePos() - if_false_code_offset;
9664 reinterpret_cast<Address>(code_.data() + if_false_code_offset),
9665 delta);
9666 }
9667 break;
9668 }
9669 case kExprBrOnNull: {
9670 DCHECK_EQ(mode, kS2S);
9671 int32_t target_branch_index = GetTargetBranch(instr.optional.depth);
9672 const WasmBytecodeGenerator::BlockData& target_block_data =
9673 blocks_[target_branch_index];
9674 if (HasVoidSignature(target_block_data)) {
9675 EMIT_INSTR_HANDLER(s2s_BranchOnNull);
9676 ValueType value_type = RefPop(); // pop condition
9678 // Remove nullability.
9679 if (value_type.kind() == kRefNull) {
9681 }
9682 RefPush(value_type); // re-push condition value
9683 // Emit code offset to branch to if the condition is true.
9684 EmitBranchOffset(instr.optional.depth);
9685 } else {
9686 EMIT_INSTR_HANDLER(s2s_BranchOnNullWithParams);
9687 ValueType value_type = RefPop(); // pop condition
9689 // Remove nullability.
9690 if (value_type.kind() == kRefNull) {
9692 }
9693 RefPush(value_type); // re-push condition value
9694
9695 // Emit code offset to branch to if the condition is not true.
9696 const uint32_t if_false_code_offset = CurrentCodePos();
9697 Emit(&if_false_code_offset, sizeof(if_false_code_offset));
9698
9699 uint32_t stack_top = stack_.back();
9700 RefPop(false); // Drop the null reference.
9701
9702 StoreBlockParamsAndResultsIntoSlots(target_branch_index, kExprBrIf);
9703
9704 EMIT_INSTR_HANDLER(s2s_Branch);
9705 EmitBranchOffset(instr.optional.depth);
9706
9707 stack_.push_back(stack_top); // re-push non-null ref on top of stack
9708
9709 // Patch the 'if-false' offset with the correct jump offset.
9710 int32_t delta = CurrentCodePos() - if_false_code_offset;
9712 reinterpret_cast<Address>(code_.data() + if_false_code_offset),
9713 delta);
9714 }
9715 break;
9716 }
9717 case kExprBrOnNonNull: {
9718 DCHECK_EQ(mode, kS2S);
9719 int32_t target_branch_index = GetTargetBranch(instr.optional.depth);
9720 const WasmBytecodeGenerator::BlockData& target_block_data =
9721 blocks_[target_branch_index];
9722 if (HasVoidSignature(target_block_data)) {
9723 EMIT_INSTR_HANDLER(s2s_BranchOnNonNull);
9724 ValueType value_type = RefPop(); // pop condition
9726 RefPush(value_type); // re-push condition value
9727 // Emit code offset to branch to if the condition is true.
9728 EmitBranchOffset(instr.optional.depth);
9729
9730 RefPop(false); // Drop the null reference.
9731 } else {
9732 EMIT_INSTR_HANDLER(s2s_BranchOnNonNullWithParams);
9733 ValueType value_type = RefPop(); // pop condition
9735 RefPush(value_type); // re-push condition value
9736
9737 // Emit code offset to branch to if the condition is not true.
9738 const uint32_t if_false_code_offset = CurrentCodePos();
9739 Emit(&if_false_code_offset, sizeof(if_false_code_offset));
9740
9741 StoreBlockParamsAndResultsIntoSlots(target_branch_index, kExprBrIf);
9742
9743 EMIT_INSTR_HANDLER(s2s_Branch);
9744 EmitBranchOffset(instr.optional.depth);
9745
9746 // Patch the 'if-false' offset with the correct jump offset.
9747 int32_t delta = CurrentCodePos() - if_false_code_offset;
9749 reinterpret_cast<Address>(code_.data() + if_false_code_offset),
9750 delta);
9751
9752 RefPop(false); // Drop the null reference.
9753 }
9754 break;
9755 }
9756 case kExprBrOnCast: {
9757 const BranchOnCastData& br_on_cast_data = instr.optional.br_on_cast_data;
9758 const int32_t target_branch_index =
9759 GetTargetBranch(br_on_cast_data.label_depth);
9760 bool null_succeeds = br_on_cast_data.res_is_null;
9761 const ValueType target_type = ValueType::RefMaybeNull(
9764
9765 const ValueType obj_type = slots_[stack_.back()].value_type;
9766 DCHECK(obj_type.is_object_reference());
9767
9768 // This logic ensures that code generation can assume that functions can
9769 // only be cast to function types, and data objects to data types.
9770 if (V8_UNLIKELY(
9771 TypeCheckAlwaysSucceeds(obj_type, target_type.heap_type()))) {
9772 StoreBlockParamsAndResultsIntoSlots(target_branch_index, kExprBrOnCast);
9773 // The branch will still not be taken on null if not {null_succeeds}.
9774 if (obj_type.is_nullable() && !null_succeeds) {
9775 EMIT_INSTR_HANDLER(s2s_BranchOnNull);
9776 RefPop(); // pop condition
9777 EmitRefValueType(obj_type.raw_bit_field());
9778 RefPush(target_type); // re-push condition value with a new HeapType.
9779 EmitBranchOffset(br_on_cast_data.label_depth);
9780 } else {
9781 EMIT_INSTR_HANDLER(s2s_Branch);
9782 EmitBranchOffset(br_on_cast_data.label_depth);
9783 }
9784 } else if (V8_LIKELY(!TypeCheckAlwaysFails(
9785 obj_type, target_type.heap_type(), null_succeeds))) {
9786 EMIT_INSTR_HANDLER(s2s_BranchOnCast);
9788 HeapType br_on_cast_data_target_type(
9790 EmitI32Const(br_on_cast_data_target_type.is_index()
9791 ? br_on_cast_data_target_type.raw_bit_field()
9792 : target_type.heap_type().raw_bit_field());
9796 // Emit code offset to branch to if the condition is not true.
9797 const uint32_t no_branch_code_offset = CurrentCodePos();
9798 Emit(&no_branch_code_offset, sizeof(no_branch_code_offset));
9799 StoreBlockParamsAndResultsIntoSlots(target_branch_index, kExprBrOnCast);
9800 EMIT_INSTR_HANDLER(s2s_Branch);
9801 EmitBranchOffset(br_on_cast_data.label_depth);
9802 // Patch the 'if-false' offset with the correct jump offset.
9803 int32_t delta = CurrentCodePos() - no_branch_code_offset;
9805 reinterpret_cast<Address>(code_.data() + no_branch_code_offset),
9806 delta);
9807 }
9808 break;
9809 }
9810 case kExprBrOnCastFail: {
9811 const BranchOnCastData& br_on_cast_data = instr.optional.br_on_cast_data;
9812 int32_t target_branch_index =
9813 GetTargetBranch(br_on_cast_data.label_depth);
9814 bool null_succeeds = br_on_cast_data.res_is_null;
9815 HeapType br_on_cast_data_target_type =
9817 const ValueType target_type =
9818 ValueType::RefMaybeNull(br_on_cast_data_target_type,
9820
9821 const ValueType obj_type = slots_[stack_.back()].value_type;
9822 DCHECK(obj_type.is_object_reference());
9823
9824 // This logic ensures that code generation can assume that functions can
9825 // only be cast to function types, and data objects to data types.
9827 null_succeeds))) {
9828 StoreBlockParamsAndResultsIntoSlots(target_branch_index, kExprBrOnCast);
9829 EMIT_INSTR_HANDLER(s2s_Branch);
9830 EmitBranchOffset(br_on_cast_data.label_depth);
9832 obj_type, target_type.heap_type()))) {
9833 // The branch can still be taken on null.
9834 if (obj_type.is_nullable() && !null_succeeds) {
9835 StoreBlockParamsAndResultsIntoSlots(target_branch_index,
9836 kExprBrOnCast);
9837 EMIT_INSTR_HANDLER(s2s_BranchOnNull);
9838 RefPop(); // pop condition
9839 EmitRefValueType(obj_type.raw_bit_field());
9840 RefPush(target_type); // re-push condition value with a new HeapType.
9841 EmitBranchOffset(br_on_cast_data.label_depth);
9842 } else {
9843 // Fallthrough.
9844 }
9845 } else {
9846 EMIT_INSTR_HANDLER(s2s_BranchOnCastFail);
9848 EmitI32Const(br_on_cast_data_target_type.is_index()
9849 ? br_on_cast_data_target_type.raw_bit_field()
9850 : target_type.heap_type().raw_bit_field());
9854 // Emit code offset to branch to if the condition is not true.
9855 const uint32_t no_branch_code_offset = CurrentCodePos();
9856 Emit(&no_branch_code_offset, sizeof(no_branch_code_offset));
9857 StoreBlockParamsAndResultsIntoSlots(target_branch_index, kExprBrOnCast);
9858 EMIT_INSTR_HANDLER(s2s_Branch);
9859 EmitBranchOffset(br_on_cast_data.label_depth);
9860 // Patch the 'if-false' offset with the correct jump offset.
9861 int32_t delta = CurrentCodePos() - no_branch_code_offset;
9863 reinterpret_cast<Address>(code_.data() + no_branch_code_offset),
9864 delta);
9865 }
9866 break;
9867 }
9868 case kExprBrTable: {
9869 if (mode == kR2S) {
9870 EMIT_INSTR_HANDLER(r2s_BrTable);
9871 } else {
9872 DCHECK_EQ(mode, kS2S);
9873 EMIT_INSTR_HANDLER(s2s_BrTable);
9874 I32Pop(); // branch label
9875 }
9876
9877 // We emit the following bytecode for a br_table instruction:
9878 // s2s_BrTable handler id
9879 // (uint32) labels_count
9880 // (uint32) offset branch 0
9881 // (uint32) offset branch 1
9882 // ...
9883 // (uint32) offset branch labels_count - 1
9884 // (uint32) offset branch labels_count (default branch)
9885 // { Branch 0 slots }
9886 // { Branch 1 slots }
9887 // ...
9888 // { Branch labels_count slots }
9889 //
9890 // Where each {Branch i slots} contains the slots to execute a Branch
9891 // instruction:
9892 // { CopySlots for branch results, if present }
9893 // s2s_Branch handler id
9894 // (uint32) branch_offset (to be patched later)
9895 //
9896 const uint32_t labels_count = instr.optional.br_table.table_count;
9897 EmitI32Const(labels_count);
9898 uint32_t labels_offset_start = CurrentCodePos();
9899 for (uint32_t i = 0; i <= labels_count; i++) {
9900 // Here we don't know what will be the offset of this branch yet, so we
9901 // pass the current bytecode position as offset. This value will be
9902 // overwritten in the next loop.
9903 const uint32_t label_offset = CurrentCodePos();
9904 Emit(&label_offset, sizeof(label_offset));
9905 }
9906 for (uint32_t i = 0; i <= labels_count; i++) {
9907 uint32_t label =
9908 br_table_labels_[instr.optional.br_table.labels_index + i];
9909 int32_t target_branch_index = GetTargetBranch(label);
9910 uint32_t branch_code_start = CurrentCodePos();
9911 StoreBlockParamsAndResultsIntoSlots(target_branch_index, kExprBrTable);
9912
9913 EMIT_INSTR_HANDLER(s2s_Branch);
9915
9916 // Patch the branch offset with the correct jump offset.
9917 uint32_t label_offset = labels_offset_start + i * sizeof(uint32_t);
9918 int32_t delta = branch_code_start - label_offset;
9920 reinterpret_cast<Address>(code_.data() + label_offset), delta);
9921 }
9923 break;
9924 }
9925 case kExprReturn: {
9926 Return();
9928 break;
9929 }
9930 case kExprCallFunction:
9931 case kExprReturnCall: {
9932 uint32_t function_index = instr.optional.index;
9934
9935 // Layout of a frame:
9936 // ------------------
9937 // stack slot #N-1 ‾\
9938 // ... |
9939 // stack slot #0 _/
9940 // local #L-1 ‾\
9941 // ... |
9942 // local #0 _/
9943 // const #C-1 ‾\
9944 // ... |
9945 // const #0 _/
9946 // param #P-1 ‾\
9947 // ... |
9948 // param #0 _/
9949 // return #R-1 ‾\
9950 // ... |
9951 // return #0 _/
9952 // ------------------
9953
9954 const bool is_imported = (module_->functions[function_index].imported);
9955 const bool is_tail_call = (instr.opcode == kExprReturnCall);
9956 uint32_t slot_offset = GetStackFrameSize() * kSlotSize;
9957 uint32_t ref_stack_fp_offset = ref_slots_count_;
9958
9959 std::vector<uint32_t> rets_slots;
9960 rets_slots.resize(sig->return_count());
9961 for (size_t index = 0; index < sig->return_count(); index++) {
9962 rets_slots[index] = is_tail_call ? static_cast<uint32_t>(index)
9963 : CreateSlot(sig->GetReturn(index));
9964 }
9965
9967
9968 if (is_imported) {
9969 if (is_tail_call) {
9970 EMIT_INSTR_HANDLER_WITH_PC(s2s_ReturnCallImportedFunction, instr.pc);
9975 } else {
9976 EMIT_INSTR_HANDLER_WITH_PC(s2s_CallImportedFunction, instr.pc);
9977 }
9978 } else {
9979 if (is_tail_call) {
9980 EMIT_INSTR_HANDLER_WITH_PC(s2s_ReturnCall, instr.pc);
9985 } else {
9986 EMIT_INSTR_HANDLER_WITH_PC(s2s_CallFunction, instr.pc);
9987 }
9988 }
9990 EmitStackIndex(static_cast<uint32_t>(stack_.size()));
9991 EmitSlotOffset(slot_offset);
9992 EmitRefStackIndex(ref_stack_fp_offset);
9993
9994 // Function arguments are popped from the stack.
9995 for (size_t index = sig->parameter_count(); index > 0; index--) {
9996 Pop(sig->GetParam(index - 1).kind(), false);
9997 }
9998
9999#ifdef V8_ENABLE_DRUMBRAKE_TRACING
10000 if (v8_flags.trace_drumbrake_execution) {
10001 EmitSlotOffset(rets_slots.empty()
10002 ? 0
10003 : slots_[rets_slots[0]].slot_offset * kSlotSize);
10004 }
10005#endif // V8_ENABLE_DRUMBRAKE_TRACING
10006
10007 if (!is_tail_call) {
10009 CurrentCodePos());
10010 }
10011
10012 // Function results are pushed to the stack.
10013 for (size_t index = 0; index < sig->return_count(); index++) {
10014 const ValueType value_type = sig->GetReturn(index);
10015 const ValueKind kind = value_type.kind();
10016 switch (kind) {
10017 case kI32:
10018 case kI64:
10019 case kF32:
10020 case kF64:
10021 case kS128:
10022 case kRef:
10023 case kRefNull:
10024 PushSlot(rets_slots[index]);
10026 break;
10027 default:
10028 UNREACHABLE();
10029 }
10030 }
10031
10032 // If this is a tail call, the following instructions in this block are
10033 // unreachable.
10034 if (is_tail_call) {
10036 }
10037
10038 return RegMode::kNoReg;
10039 }
10040 case kExprCallIndirect:
10041 case kExprReturnCallIndirect: {
10042 const FunctionSig* sig = module_->signature(
10043 ModuleTypeIndex({instr.optional.indirect_call.sig_index}));
10044
10045 const bool is_tail_call = (instr.opcode == kExprReturnCallIndirect);
10046 uint32_t slot_offset = GetStackFrameSize() * kSlotSize;
10047 uint32_t ref_stack_fp_offset = ref_slots_count_;
10048
10049 // Reserve space for return values.
10050 std::vector<uint32_t> rets_slots;
10051 rets_slots.resize(sig->return_count());
10052 for (size_t index = 0; index < sig->return_count(); index++) {
10053 rets_slots[index] = is_tail_call ? static_cast<uint32_t>(index)
10054 : CreateSlot(sig->GetReturn(index));
10055 }
10056
10058
10059 bool is_table64 =
10060 module_->tables[instr.optional.indirect_call.table_index]
10061 .is_table64();
10062
10063 if (is_tail_call) {
10064 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_ReturnCallIndirect,
10065 s2s_ReturnCallIndirect64, is_table64,
10066 instr.pc);
10071 } else {
10072 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_CallIndirect, s2s_CallIndirect64,
10073 is_table64, instr.pc);
10074 }
10075
10076 // Pops the index of the function to call.
10077 is_table64 ? I64Pop() : I32Pop();
10078
10079 EmitI32Const(instr.optional.indirect_call.table_index);
10080 EmitI32Const(instr.optional.indirect_call.sig_index);
10081
10083 EmitSlotOffset(slot_offset);
10084 EmitRefStackIndex(ref_stack_fp_offset);
10085
10086 // Function arguments are popped from the stack.
10087 for (size_t index = sig->parameter_count(); index > 0; index--) {
10088 Pop(sig->GetParam(index - 1).kind(), false);
10089 }
10090
10091#ifdef V8_ENABLE_DRUMBRAKE_TRACING
10092 if (v8_flags.trace_drumbrake_execution) {
10093 EmitSlotOffset(rets_slots.empty()
10094 ? 0
10095 : slots_[rets_slots[0]].slot_offset * kSlotSize);
10096 }
10097#endif // V8_ENABLE_DRUMBRAKE_TRACING
10098
10099 if (!is_tail_call) {
10101 CurrentCodePos());
10102 }
10103
10104 // Function result is pushed to the stack.
10105 for (size_t index = 0; index < sig->return_count(); index++) {
10106 ValueType value_type = sig->GetReturn(index);
10107 switch (value_type.kind()) {
10108 case kI32:
10109 case kI64:
10110 case kF32:
10111 case kF64:
10112 case kS128:
10113 case kRef:
10114 case kRefNull:
10115 PushSlot(rets_slots[index]);
10117 break;
10118 default:
10119 UNREACHABLE();
10120 }
10121 }
10122
10123 // If this is a tail call, the following instructions in this block are
10124 // unreachable.
10125 if (is_tail_call) {
10127 }
10128
10129 return RegMode::kNoReg;
10130 }
10131
10132 case kExprCallRef:
10133 case kExprReturnCallRef: {
10134 const FunctionSig* sig =
10135 module_->signature(ModuleTypeIndex({instr.optional.index}));
10136 const bool is_tail_call = (instr.opcode == kExprReturnCallRef);
10137 uint32_t slot_offset = GetStackFrameSize() * kSlotSize;
10138 uint32_t ref_stack_fp_offset = ref_slots_count_;
10139
10140 // Reserve space for return values.
10141 std::vector<uint32_t> rets_slots;
10142 rets_slots.resize(sig->return_count());
10143 for (size_t index = 0; index < sig->return_count(); index++) {
10144 rets_slots[index] = is_tail_call ? static_cast<uint32_t>(index)
10145 : CreateSlot(sig->GetReturn(index));
10146 }
10147
10149
10150 if (is_tail_call) {
10151 EMIT_INSTR_HANDLER_WITH_PC(s2s_ReturnCallRef, instr.pc);
10156 } else {
10157 EMIT_INSTR_HANDLER_WITH_PC(s2s_CallRef, instr.pc);
10158 }
10159
10160 // Pops the function to call.
10161 RefPop();
10162
10163 EmitI32Const(instr.optional.index); // Signature index.
10165 EmitSlotOffset(slot_offset);
10166 EmitRefStackIndex(ref_stack_fp_offset);
10167
10168 // Function arguments are popped from the stack.
10169 for (size_t index = sig->parameter_count(); index > 0; index--) {
10170 Pop(sig->GetParam(index - 1).kind(), false);
10171 }
10172
10173#ifdef V8_ENABLE_DRUMBRAKE_TRACING
10174 if (v8_flags.trace_drumbrake_execution) {
10175 EmitSlotOffset(rets_slots.empty()
10176 ? 0
10177 : slots_[rets_slots[0]].slot_offset * kSlotSize);
10178 }
10179#endif // V8_ENABLE_DRUMBRAKE_TRACING
10180
10181 if (!is_tail_call) {
10183 CurrentCodePos());
10184 }
10185
10186 // Function result is pushed to the stack.
10187 for (size_t index = 0; index < sig->return_count(); index++) {
10188 const ValueType value_type = sig->GetReturn(index);
10189 const ValueKind kind = value_type.kind();
10190 switch (kind) {
10191 case kI32:
10192 case kI64:
10193 case kF32:
10194 case kF64:
10195 case kS128:
10196 case kRef:
10197 case kRefNull:
10198 PushSlot(rets_slots[index]);
10200 break;
10201 default:
10202 UNREACHABLE();
10203 }
10204 }
10205
10206 // If this is a tail call, the following instructions in this block are
10207 // unreachable.
10208 if (is_tail_call) {
10210 }
10211
10212 return RegMode::kNoReg;
10213 }
10214
10215 case kExprDrop: {
10216 switch (top_stack_slot_type) {
10217 case kI32:
10218 switch (mode) {
10219 case kR2R:
10220 case kS2R:
10221 UNREACHABLE();
10222 case kR2S:
10223 EMIT_INSTR_HANDLER(r2s_I32Drop);
10224 return RegMode::kNoReg;
10225 case kS2S:
10226 EMIT_INSTR_HANDLER(s2s_I32Drop);
10227 I32Pop();
10228 return RegMode::kNoReg;
10229 }
10230 break;
10231 case kI64:
10232 switch (mode) {
10233 case kR2R:
10234 case kS2R:
10235 UNREACHABLE();
10236 case kR2S:
10237 EMIT_INSTR_HANDLER(r2s_I64Drop);
10238 return RegMode::kNoReg;
10239 case kS2S:
10240 EMIT_INSTR_HANDLER(s2s_I64Drop);
10241 I64Pop();
10242 return RegMode::kNoReg;
10243 }
10244 break;
10245 case kF32:
10246 switch (mode) {
10247 case kR2R:
10248 case kS2R:
10249 UNREACHABLE();
10250 case kR2S:
10251 EMIT_INSTR_HANDLER(r2s_F32Drop);
10252 return RegMode::kNoReg;
10253 case kS2S:
10254 EMIT_INSTR_HANDLER(s2s_F32Drop);
10255 F32Pop();
10256 return RegMode::kNoReg;
10257 }
10258 break;
10259 case kF64:
10260 switch (mode) {
10261 case kR2R:
10262 case kS2R:
10263 UNREACHABLE();
10264 case kR2S:
10265 EMIT_INSTR_HANDLER(r2s_F64Drop);
10266 return RegMode::kNoReg;
10267 case kS2S:
10268 EMIT_INSTR_HANDLER(s2s_F64Drop);
10269 F64Pop();
10270 return RegMode::kNoReg;
10271 }
10272 break;
10273 case kS128:
10274 switch (mode) {
10275 case kR2R:
10276 case kR2S:
10277 case kS2R:
10278 UNREACHABLE();
10279 case kS2S:
10280 EMIT_INSTR_HANDLER(s2s_S128Drop);
10281 S128Pop();
10282 return RegMode::kNoReg;
10283 }
10284 break;
10285 case kRef:
10286 case kRefNull:
10287 switch (mode) {
10288 case kR2R:
10289 case kS2R:
10290 UNREACHABLE();
10291 case kR2S:
10292 EMIT_INSTR_HANDLER(r2s_RefDrop);
10293 return RegMode::kNoReg;
10294 case kS2S:
10295 EMIT_INSTR_HANDLER(s2s_RefDrop);
10296 RefPop();
10297 return RegMode::kNoReg;
10298 }
10299 break;
10300 default:
10301 UNREACHABLE();
10302 }
10303 break;
10304 }
10305 case kExprSelect:
10306 case kExprSelectWithType: {
10307 DCHECK_GE(stack_size(), 2);
10308 switch (slots_[stack_[stack_size() - 2]].kind()) {
10309 case kI32:
10310 switch (mode) {
10311 case kR2R:
10312 EMIT_INSTR_HANDLER(r2r_I32Select);
10313 I32Pop(); // val2
10314 I32Pop(); // val1
10315 return RegMode::kI32Reg;
10316 case kR2S:
10317 EMIT_INSTR_HANDLER(r2s_I32Select);
10318 I32Pop(); // val2
10319 I32Pop(); // val1
10320 I32Push(); // result
10321 return RegMode::kNoReg;
10322 case kS2R:
10323 EMIT_INSTR_HANDLER(s2r_I32Select);
10324 I32Pop(); // condition
10325 I32Pop(); // val2
10326 I32Pop(); // val1
10327 return RegMode::kI32Reg;
10328 case kS2S:
10329 EMIT_INSTR_HANDLER(s2s_I32Select);
10330 I32Pop(); // condition
10331 I32Pop(); // val2
10332 I32Pop(); // val1
10333 I32Push(); // result
10334 return RegMode::kNoReg;
10335 }
10336 break;
10337 case kI64:
10338 switch (mode) {
10339 case kR2R:
10340 EMIT_INSTR_HANDLER(r2r_I64Select);
10341 I64Pop(); // val2
10342 I64Pop(); // val1
10343 return RegMode::kI64Reg;
10344 case kR2S:
10345 EMIT_INSTR_HANDLER(r2s_I64Select);
10346 I64Pop(); // val2
10347 I64Pop(); // val1
10348 I64Push(); // result
10349 return RegMode::kNoReg;
10350 case kS2R:
10351 EMIT_INSTR_HANDLER(s2r_I64Select);
10352 I32Pop(); // condition
10353 I64Pop(); // val2
10354 I64Pop(); // val1
10355 return RegMode::kI64Reg;
10356 case kS2S:
10357 EMIT_INSTR_HANDLER(s2s_I64Select);
10358 I32Pop(); // condition
10359 I64Pop();
10360 I64Pop();
10361 I64Push();
10362 return RegMode::kNoReg;
10363 }
10364 break;
10365 case kF32:
10366 switch (mode) {
10367 case kR2R:
10368 EMIT_INSTR_HANDLER(r2r_F32Select);
10369 F32Pop(); // val2
10370 F32Pop(); // val1
10371 return RegMode::kF32Reg;
10372 case kR2S:
10373 EMIT_INSTR_HANDLER(r2s_F32Select);
10374 F32Pop(); // val2
10375 F32Pop(); // val1
10376 F32Push(); // result
10377 return RegMode::kNoReg;
10378 case kS2R:
10379 EMIT_INSTR_HANDLER(s2r_F32Select);
10380 I32Pop(); // condition
10381 F32Pop(); // val2
10382 F32Pop(); // val1
10383 return RegMode::kF32Reg;
10384 case kS2S:
10385 EMIT_INSTR_HANDLER(s2s_F32Select);
10386 I32Pop(); // condition
10387 F32Pop();
10388 F32Pop();
10389 F32Push();
10390 return RegMode::kNoReg;
10391 }
10392 break;
10393 case kF64:
10394 switch (mode) {
10395 case kR2R:
10396 EMIT_INSTR_HANDLER(r2r_F64Select);
10397 F64Pop(); // val2
10398 F64Pop(); // val1
10399 return RegMode::kF64Reg;
10400 case kR2S:
10401 EMIT_INSTR_HANDLER(r2s_F64Select);
10402 F64Pop(); // val2
10403 F64Pop(); // val1
10404 F64Push(); // result
10405 return RegMode::kNoReg;
10406 case kS2R:
10407 EMIT_INSTR_HANDLER(s2r_F64Select);
10408 I32Pop(); // condition
10409 F64Pop(); // val2
10410 F64Pop(); // val1
10411 return RegMode::kF64Reg;
10412 case kS2S:
10413 EMIT_INSTR_HANDLER(s2s_F64Select);
10414 I32Pop(); // condition
10415 F64Pop();
10416 F64Pop();
10417 F64Push();
10418 return RegMode::kNoReg;
10419 }
10420 break;
10421 case kS128:
10422 switch (mode) {
10423 case kR2R:
10424 case kS2R:
10425 UNREACHABLE();
10426 case kR2S:
10427 EMIT_INSTR_HANDLER(r2s_S128Select);
10428 S128Pop();
10429 S128Pop();
10430 S128Push();
10431 return RegMode::kNoReg;
10432 case kS2S:
10433 EMIT_INSTR_HANDLER(s2s_S128Select);
10434 I32Pop(); // condition
10435 S128Pop();
10436 S128Pop();
10437 S128Push();
10438 return RegMode::kNoReg;
10439 }
10440 break;
10441 case kRef:
10442 case kRefNull:
10443 switch (mode) {
10444 case kR2R:
10445 case kS2R:
10446 UNREACHABLE();
10447 case kR2S: {
10448 EMIT_INSTR_HANDLER(r2s_RefSelect);
10449 RefPop(); // val2
10450 ValueType type = RefPop(); // val1
10451 RefPush(type); // result
10452 return RegMode::kNoReg;
10453 }
10454 case kS2S: {
10455 EMIT_INSTR_HANDLER(s2s_RefSelect);
10456 I32Pop(); // condition
10457 RefPop();
10458 ValueType type = RefPop();
10459 RefPush(type);
10460 return RegMode::kNoReg;
10461 }
10462 }
10463 break;
10464 default:
10465 UNREACHABLE();
10466 }
10467 break;
10468 }
10469
10470 case kExprLocalGet: {
10471 switch (slots_[stack_[instr.optional.index]].kind()) {
10472 case kI32:
10473 case kI64:
10474 case kF32:
10475 case kF64:
10476 case kS128:
10477 case kRef:
10478 case kRefNull:
10479 switch (mode) {
10480 case kR2R:
10481 case kR2S:
10482 case kS2R:
10483 UNREACHABLE();
10484 case kS2S:
10485 PushCopySlot(instr.optional.index);
10486 return RegMode::kNoReg;
10487 }
10488 break;
10489 default:
10490 UNREACHABLE();
10491 }
10492 break;
10493 }
10494 case kExprLocalSet: {
10495 DCHECK_LE(instr.optional.index, stack_size());
10496 // Validation ensures that the target slot type must be the same as the
10497 // stack top slot type.
10498 const ValueType value_type =
10499 slots_[stack_[instr.optional.index]].value_type;
10500 const ValueKind kind = value_type.kind();
10501 DCHECK(CheckEqualKind(kind, top_stack_slot_type));
10502 switch (kind) {
10503 case kI32:
10504 case kI64:
10505 case kF32:
10506 case kF64:
10507 switch (mode) {
10508 case kR2R:
10509 case kS2R:
10510 UNREACHABLE();
10511 case kR2S:
10512 CopyToSlotAndPop(value_type, instr.optional.index, false, true);
10513 return RegMode::kNoReg;
10514 case kS2S:
10515 CopyToSlotAndPop(value_type, instr.optional.index, false, false);
10516 return RegMode::kNoReg;
10517 }
10518 break;
10519 case kS128:
10520 switch (mode) {
10521 case kR2R:
10522 case kR2S:
10523 case kS2R:
10524 UNREACHABLE();
10525 case kS2S:
10526 CopyToSlotAndPop(value_type, instr.optional.index, false, false);
10527 return RegMode::kNoReg;
10528 }
10529 break;
10530 case kRef:
10531 case kRefNull:
10532 switch (mode) {
10533 case kR2R:
10534 case kR2S:
10535 case kS2R:
10536 UNREACHABLE();
10537 case kS2S:
10538 CopyToSlotAndPop(slots_[stack_.back()].value_type,
10539 instr.optional.index, false, false);
10540 return RegMode::kNoReg;
10541 }
10542 break;
10543 default:
10544 UNREACHABLE();
10545 }
10546 break;
10547 }
10548 case kExprLocalTee: {
10549 DCHECK_LE(instr.optional.index, stack_size());
10550 // Validation ensures that the target slot type must be the same as the
10551 // stack top slot type.
10552 const ValueType value_type =
10553 slots_[stack_[instr.optional.index]].value_type;
10554 const ValueKind kind = value_type.kind();
10555 DCHECK(CheckEqualKind(kind, top_stack_slot_type));
10556 switch (kind) {
10557 case kI32:
10558 case kI64:
10559 case kF32:
10560 case kF64:
10561 switch (mode) {
10562 case kR2R:
10563 CopyToSlotAndPop(value_type, instr.optional.index, true, true);
10564 return GetRegMode(value_type.kind());
10565 case kR2S:
10566 UNREACHABLE();
10567 case kS2R:
10568 UNREACHABLE();
10569 case kS2S:
10570 CopyToSlotAndPop(value_type, instr.optional.index, true, false);
10571 return RegMode::kNoReg;
10572 }
10573 break;
10574 case kS128:
10575 switch (mode) {
10576 case kR2R:
10577 case kR2S:
10578 case kS2R:
10579 UNREACHABLE();
10580 case kS2S:
10581 CopyToSlotAndPop(value_type, instr.optional.index, true, false);
10582 return RegMode::kNoReg;
10583 }
10584 break;
10585 case kRef:
10586 case kRefNull:
10587 switch (mode) {
10588 case kR2R:
10589 case kR2S:
10590 case kS2R:
10591 UNREACHABLE();
10592 case kS2S:
10593 CopyToSlotAndPop(slots_[stack_.back()].value_type,
10594 instr.optional.index, true, false);
10595 return RegMode::kNoReg;
10596 }
10597 break;
10598 default:
10599 UNREACHABLE();
10600 }
10601 break;
10602 }
10603 case kExprGlobalGet: {
10604 switch (GetGlobalType(instr.optional.index)) {
10605 case kI32:
10606 switch (mode) {
10607 case kR2R:
10608 case kR2S:
10609 UNREACHABLE();
10610 case kS2R:
10611 EMIT_INSTR_HANDLER(s2r_I32GlobalGet);
10612 EmitGlobalIndex(instr.optional.index);
10613 return RegMode::kI32Reg;
10614 case kS2S:
10615 EMIT_INSTR_HANDLER(s2s_I32GlobalGet);
10616 EmitGlobalIndex(instr.optional.index);
10617 I32Push();
10618 return RegMode::kNoReg;
10619 }
10620 break;
10621 case kI64:
10622 switch (mode) {
10623 case kR2R:
10624 case kR2S:
10625 UNREACHABLE();
10626 case kS2R:
10627 EMIT_INSTR_HANDLER(s2r_I64GlobalGet);
10628 EmitGlobalIndex(instr.optional.index);
10629 return RegMode::kI64Reg;
10630 case kS2S:
10631 EMIT_INSTR_HANDLER(s2s_I64GlobalGet);
10632 EmitGlobalIndex(instr.optional.index);
10633 I64Push();
10634 return RegMode::kNoReg;
10635 }
10636 break;
10637 case kF32:
10638 switch (mode) {
10639 case kR2R:
10640 case kR2S:
10641 UNREACHABLE();
10642 case kS2R:
10643 EMIT_INSTR_HANDLER(s2r_F32GlobalGet);
10644 EmitGlobalIndex(instr.optional.index);
10645 return RegMode::kF32Reg;
10646 case kS2S:
10647 EMIT_INSTR_HANDLER(s2s_F32GlobalGet);
10648 EmitGlobalIndex(instr.optional.index);
10649 F32Push();
10650 return RegMode::kNoReg;
10651 }
10652 break;
10653 case kF64:
10654 switch (mode) {
10655 case kR2R:
10656 case kR2S:
10657 UNREACHABLE();
10658 case kS2R:
10659 EMIT_INSTR_HANDLER(s2r_F64GlobalGet);
10660 EmitGlobalIndex(instr.optional.index);
10661 return RegMode::kF64Reg;
10662 case kS2S:
10663 EMIT_INSTR_HANDLER(s2s_F64GlobalGet);
10664 EmitGlobalIndex(instr.optional.index);
10665 F64Push();
10666 return RegMode::kNoReg;
10667 }
10668 break;
10669 case kS128:
10670 switch (mode) {
10671 case kR2R:
10672 case kR2S:
10673 case kS2R:
10674 UNREACHABLE();
10675 case kS2S:
10676 EMIT_INSTR_HANDLER(s2s_S128GlobalGet);
10677 EmitGlobalIndex(instr.optional.index);
10678 S128Push();
10679 return RegMode::kNoReg;
10680 }
10681 break;
10682 case kRef:
10683 case kRefNull:
10684 switch (mode) {
10685 case kR2R:
10686 case kR2S:
10687 case kS2R:
10688 UNREACHABLE();
10689 case kS2S:
10690 EMIT_INSTR_HANDLER(s2s_RefGlobalGet);
10691 EmitGlobalIndex(instr.optional.index);
10692 RefPush(module_->globals[instr.optional.index].type);
10693 return RegMode::kNoReg;
10694 }
10695 break;
10696 default:
10697 UNREACHABLE();
10698 }
10699 break;
10700 }
10701 case kExprGlobalSet: {
10702 switch (top_stack_slot_type) {
10703 case kI32:
10704 switch (mode) {
10705 case kR2R:
10706 case kS2R:
10707 UNREACHABLE();
10708 case kR2S:
10709 EMIT_INSTR_HANDLER(r2s_I32GlobalSet);
10710 EmitGlobalIndex(instr.optional.index);
10711 return RegMode::kNoReg;
10712 case kS2S:
10713 EMIT_INSTR_HANDLER(s2s_I32GlobalSet);
10714 EmitGlobalIndex(instr.optional.index);
10715 I32Pop();
10716 return RegMode::kNoReg;
10717 }
10718 break;
10719 case kI64:
10720 switch (mode) {
10721 case kR2R:
10722 case kS2R:
10723 UNREACHABLE();
10724 case kR2S:
10725 EMIT_INSTR_HANDLER(r2s_I64GlobalSet);
10726 EmitGlobalIndex(instr.optional.index);
10727 return RegMode::kNoReg;
10728 case kS2S:
10729 EMIT_INSTR_HANDLER(s2s_I64GlobalSet);
10730 EmitGlobalIndex(instr.optional.index);
10731 I64Pop();
10732 return RegMode::kNoReg;
10733 }
10734 break;
10735 case kF32:
10736 switch (mode) {
10737 case kR2R:
10738 case kS2R:
10739 UNREACHABLE();
10740 case kR2S:
10741 EMIT_INSTR_HANDLER(r2s_F32GlobalSet);
10742 EmitGlobalIndex(instr.optional.index);
10743 return RegMode::kNoReg;
10744 case kS2S:
10745 EMIT_INSTR_HANDLER(s2s_F32GlobalSet);
10746 EmitGlobalIndex(instr.optional.index);
10747 F32Pop();
10748 return RegMode::kNoReg;
10749 }
10750 break;
10751 case kF64:
10752 switch (mode) {
10753 case kR2R:
10754 case kS2R:
10755 UNREACHABLE();
10756 case kR2S:
10757 EMIT_INSTR_HANDLER(r2s_F64GlobalSet);
10758 EmitGlobalIndex(instr.optional.index);
10759 return RegMode::kNoReg;
10760 case kS2S:
10761 EMIT_INSTR_HANDLER(s2s_F64GlobalSet);
10762 EmitGlobalIndex(instr.optional.index);
10763 F64Pop();
10764 return RegMode::kNoReg;
10765 }
10766 break;
10767 case kS128:
10768 switch (mode) {
10769 case kR2R:
10770 case kR2S:
10771 case kS2R:
10772 UNREACHABLE();
10773 case kS2S:
10774 EMIT_INSTR_HANDLER(s2s_S128GlobalSet);
10775 EmitGlobalIndex(instr.optional.index);
10776 S128Pop();
10777 return RegMode::kNoReg;
10778 }
10779 break;
10780 case kRef:
10781 case kRefNull:
10782 switch (mode) {
10783 case kR2R:
10784 case kR2S:
10785 case kS2R:
10786 UNREACHABLE();
10787 case kS2S:
10788 EMIT_INSTR_HANDLER(s2s_RefGlobalSet);
10789 EmitGlobalIndex(instr.optional.index);
10790 RefPop();
10791 return RegMode::kNoReg;
10792 }
10793 break;
10794 default:
10795 UNREACHABLE();
10796 }
10797 break;
10798 }
10799
10800 case kExprTableGet: {
10801 bool is_table64 = module_->tables[instr.optional.index].is_table64();
10802 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_TableGet, s2s_Table64Get, is_table64,
10803 instr.pc);
10804 EmitI32Const(instr.optional.index);
10805 is_table64 ? I64Pop() : I32Pop();
10806 RefPush(module_->tables[instr.optional.index].type);
10807 break;
10808 }
10809
10810 case kExprTableSet: {
10811 bool is_table64 = module_->tables[instr.optional.index].is_table64();
10812 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_TableSet, s2s_Table64Set, is_table64,
10813 instr.pc);
10814 EmitI32Const(instr.optional.index);
10815 RefPop();
10816 is_table64 ? I64Pop() : I32Pop();
10817 break;
10818 }
10819
10820#define LOAD_CASE(name, ctype, mtype, rep, type) \
10821 case kExpr##name: { \
10822 switch (mode) { \
10823 case kR2R: \
10824 EMIT_MEM64_INSTR_HANDLER_WITH_PC(r2r_##name, r2r_##name##_Idx64, \
10825 is_memory64_, instr.pc); \
10826 EmitMemoryOffset(instr.optional.offset); \
10827 return RegMode::k##type##Reg; \
10828 case kR2S: \
10829 EMIT_MEM64_INSTR_HANDLER_WITH_PC(r2s_##name, r2s_##name##_Idx64, \
10830 is_memory64_, instr.pc); \
10831 EmitMemoryOffset(instr.optional.offset); \
10832 type##Push(); \
10833 return RegMode::kNoReg; \
10834 case kS2R: \
10835 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2r_##name, s2r_##name##_Idx64, \
10836 is_memory64_, instr.pc); \
10837 EmitMemoryOffset(instr.optional.offset); \
10838 MemIndexPop(); \
10839 return RegMode::k##type##Reg; \
10840 case kS2S: \
10841 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_##name, s2s_##name##_Idx64, \
10842 is_memory64_, instr.pc); \
10843 EmitMemoryOffset(instr.optional.offset); \
10844 MemIndexPop(); \
10845 type##Push(); \
10846 return RegMode::kNoReg; \
10847 } \
10848 break; \
10849 }
10850 LOAD_CASE(I32LoadMem8S, int32_t, int8_t, kWord8, I32);
10851 LOAD_CASE(I32LoadMem8U, int32_t, uint8_t, kWord8, I32);
10852 LOAD_CASE(I32LoadMem16S, int32_t, int16_t, kWord16, I32);
10853 LOAD_CASE(I32LoadMem16U, int32_t, uint16_t, kWord16, I32);
10854 LOAD_CASE(I64LoadMem8S, int64_t, int8_t, kWord8, I64);
10855 LOAD_CASE(I64LoadMem8U, int64_t, uint8_t, kWord16, I64);
10856 LOAD_CASE(I64LoadMem16S, int64_t, int16_t, kWord16, I64);
10857 LOAD_CASE(I64LoadMem16U, int64_t, uint16_t, kWord16, I64);
10858 LOAD_CASE(I64LoadMem32S, int64_t, int32_t, kWord32, I64);
10859 LOAD_CASE(I64LoadMem32U, int64_t, uint32_t, kWord32, I64);
10860 LOAD_CASE(I32LoadMem, int32_t, int32_t, kWord32, I32);
10861 LOAD_CASE(I64LoadMem, int64_t, int64_t, kWord64, I64);
10862 LOAD_CASE(F32LoadMem, Float32, uint32_t, kFloat32, F32);
10863 LOAD_CASE(F64LoadMem, Float64, uint64_t, kFloat64, F64);
10864#undef LOAD_CASE
10865
10866#define STORE_CASE(name, ctype, mtype, rep, type) \
10867 case kExpr##name: { \
10868 switch (mode) { \
10869 case kR2R: \
10870 case kS2R: \
10871 UNREACHABLE(); \
10872 break; \
10873 case kR2S: \
10874 EMIT_MEM64_INSTR_HANDLER_WITH_PC(r2s_##name, r2s_##name##_Idx64, \
10875 is_memory64_, instr.pc); \
10876 EmitMemoryOffset(instr.optional.offset); \
10877 MemIndexPop(); \
10878 return RegMode::kNoReg; \
10879 case kS2S: \
10880 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_##name, s2s_##name##_Idx64, \
10881 is_memory64_, instr.pc); \
10882 type##Pop(); \
10883 EmitMemoryOffset(instr.optional.offset); \
10884 MemIndexPop(); \
10885 return RegMode::kNoReg; \
10886 } \
10887 break; \
10888 }
10889 STORE_CASE(I32StoreMem8, int32_t, int8_t, kWord8, I32);
10890 STORE_CASE(I32StoreMem16, int32_t, int16_t, kWord16, I32);
10891 STORE_CASE(I64StoreMem8, int64_t, int8_t, kWord8, I64);
10892 STORE_CASE(I64StoreMem16, int64_t, int16_t, kWord16, I64);
10893 STORE_CASE(I64StoreMem32, int64_t, int32_t, kWord32, I64);
10894 STORE_CASE(I32StoreMem, int32_t, int32_t, kWord32, I32);
10895 STORE_CASE(I64StoreMem, int64_t, int64_t, kWord64, I64);
10896 STORE_CASE(F32StoreMem, Float32, uint32_t, kFloat32, F32);
10897 STORE_CASE(F64StoreMem, Float64, uint64_t, kFloat64, F64);
10898#undef STORE_CASE
10899
10900 case kExprMemoryGrow: {
10901 EMIT_MEM64_INSTR_HANDLER(s2s_MemoryGrow, s2s_Memory64Grow, is_memory64_);
10902 MemIndexPop();
10903 MemIndexPush();
10904 break;
10905 }
10906 case kExprMemorySize:
10907 EMIT_MEM64_INSTR_HANDLER(s2s_MemorySize, s2s_Memory64Size, is_memory64_);
10908 MemIndexPush();
10909 break;
10910
10911 case kExprI32Const: {
10912 switch (mode) {
10913 case kR2R:
10914 case kR2S:
10915 case kS2R:
10916 UNREACHABLE();
10917 case kS2S:
10918 PushConstSlot<int32_t>(instr.optional.i32);
10919 return RegMode::kNoReg;
10920 }
10921 break;
10922 }
10923 case kExprI64Const: {
10924 switch (mode) {
10925 case kR2R:
10926 case kR2S:
10927 case kS2R:
10928 UNREACHABLE();
10929 case kS2S:
10930 PushConstSlot<int64_t>(instr.optional.i64);
10931 return RegMode::kNoReg;
10932 }
10933 break;
10934 }
10935 case kExprF32Const: {
10936 switch (mode) {
10937 case kR2R:
10938 case kR2S:
10939 case kS2R:
10940 UNREACHABLE();
10941 case kS2S:
10942 PushConstSlot<float>(instr.optional.f32);
10943 return RegMode::kNoReg;
10944 }
10945 break;
10946 }
10947 case kExprF64Const: {
10948 switch (mode) {
10949 case kR2R:
10950 case kR2S:
10951 case kS2R:
10952 UNREACHABLE();
10953 case kS2S:
10954 PushConstSlot<double>(instr.optional.f64);
10955 return RegMode::kNoReg;
10956 }
10957 break;
10958 }
10959
10960#define EXECUTE_BINOP(name, ctype, reg, op, type) \
10961 case kExpr##name: { \
10962 switch (mode) { \
10963 case kR2R: \
10964 EMIT_INSTR_HANDLER(r2r_##name); \
10965 type##Pop(); \
10966 return RegMode::kI32Reg; \
10967 case kR2S: \
10968 EMIT_INSTR_HANDLER(r2s_##name); \
10969 type##Pop(); \
10970 I32Push(); \
10971 return RegMode::kNoReg; \
10972 case kS2R: \
10973 EMIT_INSTR_HANDLER(s2r_##name); \
10974 type##Pop(); \
10975 type##Pop(); \
10976 return RegMode::kI32Reg; \
10977 case kS2S: \
10978 EMIT_INSTR_HANDLER(s2s_##name); \
10979 type##Pop(); \
10980 type##Pop(); \
10981 I32Push(); \
10982 return RegMode::kNoReg; \
10983 } \
10984 break; \
10985 }
10987#undef EXECUTE_BINOP
10988
10989#define EXECUTE_BINOP(name, ctype, reg, op, type) \
10990 case kExpr##name: { \
10991 switch (mode) { \
10992 case kR2R: \
10993 EMIT_INSTR_HANDLER(r2r_##name); \
10994 type##Pop(); \
10995 return RegMode::k##type##Reg; \
10996 case kR2S: \
10997 EMIT_INSTR_HANDLER(r2s_##name); \
10998 type##Pop(); \
10999 type##Push(); \
11000 return RegMode::kNoReg; \
11001 case kS2R: \
11002 EMIT_INSTR_HANDLER(s2r_##name); \
11003 type##Pop(); \
11004 type##Pop(); \
11005 return RegMode::k##type##Reg; \
11006 case kS2S: \
11007 EMIT_INSTR_HANDLER(s2s_##name); \
11008 type##Pop(); \
11009 type##Pop(); \
11010 type##Push(); \
11011 return RegMode::kNoReg; \
11012 } \
11013 break; \
11014 }
11017#undef EXECUTE_BINOP
11018
11019#define EXECUTE_BINOP(name, ctype, reg, op, type) \
11020 case kExpr##name: { \
11021 switch (mode) { \
11022 case kR2R: \
11023 EMIT_INSTR_HANDLER_WITH_PC(r2r_##name, instr.pc); \
11024 type##Pop(); \
11025 return RegMode::k##type##Reg; \
11026 case kR2S: \
11027 EMIT_INSTR_HANDLER_WITH_PC(r2s_##name, instr.pc); \
11028 type##Pop(); \
11029 type##Push(); \
11030 return RegMode::kNoReg; \
11031 case kS2R: \
11032 EMIT_INSTR_HANDLER_WITH_PC(s2r_##name, instr.pc); \
11033 type##Pop(); \
11034 type##Pop(); \
11035 return RegMode::k##type##Reg; \
11036 case kS2S: \
11037 EMIT_INSTR_HANDLER_WITH_PC(s2s_##name, instr.pc); \
11038 type##Pop(); \
11039 type##Pop(); \
11040 type##Push(); \
11041 return RegMode::kNoReg; \
11042 } \
11043 break; \
11044 }
11046#undef EXECUTE_BINOP
11047
11048#define EXECUTE_UNOP(name, ctype, reg, op, type) \
11049 case kExpr##name: { \
11050 switch (mode) { \
11051 case kR2R: \
11052 EMIT_INSTR_HANDLER(r2r_##name); \
11053 return RegMode::k##type##Reg; \
11054 case kR2S: \
11055 EMIT_INSTR_HANDLER(r2s_##name); \
11056 type##Push(); \
11057 return RegMode::kNoReg; \
11058 case kS2R: \
11059 EMIT_INSTR_HANDLER(s2r_##name); \
11060 type##Pop(); \
11061 return RegMode::k##type##Reg; \
11062 case kS2S: \
11063 EMIT_INSTR_HANDLER(s2s_##name); \
11064 type##Pop(); \
11065 type##Push(); \
11066 return RegMode::kNoReg; \
11067 } \
11068 break; \
11069 }
11071#undef EXECUTE_UNOP
11072
11073#define EXECUTE_UNOP(name, from_ctype, from_type, from_reg, to_ctype, to_type, \
11074 to_reg) \
11075 case kExpr##name: { \
11076 switch (mode) { \
11077 case kR2R: \
11078 EMIT_INSTR_HANDLER(r2r_##name); \
11079 return RegMode::k##to_type##Reg; \
11080 case kR2S: \
11081 EMIT_INSTR_HANDLER(r2s_##name); \
11082 to_type##Push(); \
11083 return RegMode::kNoReg; \
11084 case kS2R: \
11085 EMIT_INSTR_HANDLER(s2r_##name); \
11086 from_type##Pop(); \
11087 return RegMode::k##to_type##Reg; \
11088 case kS2S: \
11089 EMIT_INSTR_HANDLER(s2s_##name); \
11090 from_type##Pop(); \
11091 to_type##Push(); \
11092 return RegMode::kNoReg; \
11093 } \
11094 break; \
11095 }
11100#undef EXECUTE_UNOP
11101
11102#define EXECUTE_UNOP(name, from_ctype, from_type, from_reg, to_ctype, to_type, \
11103 to_reg) \
11104 case kExpr##name: { \
11105 switch (mode) { \
11106 case kR2R: \
11107 EMIT_INSTR_HANDLER_WITH_PC(r2r_##name, instr.pc); \
11108 return RegMode::k##to_type##Reg; \
11109 case kR2S: \
11110 EMIT_INSTR_HANDLER_WITH_PC(r2s_##name, instr.pc); \
11111 to_type##Push(); \
11112 return RegMode::kNoReg; \
11113 case kS2R: \
11114 EMIT_INSTR_HANDLER_WITH_PC(s2r_##name, instr.pc); \
11115 from_type##Pop(); \
11116 return RegMode::k##to_type##Reg; \
11117 case kS2S: \
11118 EMIT_INSTR_HANDLER_WITH_PC(s2s_##name, instr.pc); \
11119 from_type##Pop(); \
11120 to_type##Push(); \
11121 return RegMode::kNoReg; \
11122 } \
11123 break; \
11124 }
11127#undef EXECUTE_UNOP
11128
11129#define EXECUTE_UNOP(name, from_ctype, from_type, to_ctype, to_type, op) \
11130 case kExpr##name: { \
11131 switch (mode) { \
11132 case kR2R: \
11133 EMIT_INSTR_HANDLER(r2r_##name); \
11134 return RegMode::k##to_type##Reg; \
11135 case kR2S: \
11136 EMIT_INSTR_HANDLER(r2s_##name); \
11137 to_type##Push(); \
11138 return RegMode::kNoReg; \
11139 case kS2R: \
11140 EMIT_INSTR_HANDLER(s2r_##name); \
11141 from_type##Pop(); \
11142 return RegMode::k##to_type##Reg; \
11143 case kS2S: \
11144 EMIT_INSTR_HANDLER(s2s_##name); \
11145 from_type##Pop(); \
11146 to_type##Push(); \
11147 return RegMode::kNoReg; \
11148 } \
11149 break; \
11150 }
11152#undef EXECUTE_UNOP
11153
11154#define EXECUTE_UNOP(name, from_ctype, from_type, to_ctype, to_type) \
11155 case kExpr##name: { \
11156 switch (mode) { \
11157 case kR2R: \
11158 EMIT_INSTR_HANDLER(r2r_##name); \
11159 return RegMode::k##to_type##Reg; \
11160 case kR2S: \
11161 EMIT_INSTR_HANDLER(r2s_##name); \
11162 to_type##Push(); \
11163 return RegMode::kNoReg; \
11164 case kS2R: \
11165 EMIT_INSTR_HANDLER(s2r_##name); \
11166 from_type##Pop(); \
11167 return RegMode::k##to_type##Reg; \
11168 case kS2S: \
11169 EMIT_INSTR_HANDLER(s2s_##name); \
11170 from_type##Pop(); \
11171 to_type##Push(); \
11172 return RegMode::kNoReg; \
11173 } \
11174 break; \
11175 }
11177#undef EXECUTE_UNOP
11178
11179 case kExprRefNull: {
11180 EMIT_INSTR_HANDLER(s2s_RefNull);
11182 HeapType::FromBits(instr.optional.ref_type_bit_field));
11185 break;
11186 }
11187
11188 case kExprRefIsNull:
11189 EMIT_INSTR_HANDLER(s2s_RefIsNull);
11190 RefPop();
11191 I32Push();
11192 break;
11193
11194 case kExprRefFunc: {
11195 EMIT_INSTR_HANDLER(s2s_RefFunc);
11196 EmitI32Const(instr.optional.index);
11197 ModuleTypeIndex sig_index =
11198 module_->functions[instr.optional.index].sig_index;
11199 ValueType value_type = ValueType::Ref(module_->heap_type(sig_index));
11201 break;
11202 }
11203
11204 case kExprRefEq:
11205 EMIT_INSTR_HANDLER(s2s_RefEq);
11206 RefPop();
11207 RefPop();
11208 I32Push();
11209 break;
11210
11211 case kExprRefAsNonNull: {
11212 EMIT_INSTR_HANDLER_WITH_PC(s2s_RefAsNonNull, instr.pc);
11215 break;
11216 }
11217
11218 case kExprStructNew: {
11219 EMIT_INSTR_HANDLER(s2s_StructNew);
11220 EmitI32Const(instr.optional.index);
11221 // Pops args
11222 const StructType* struct_type = module_->struct_type(
11223 ModuleTypeIndex({instr.optional.gc_field_immediate.struct_index}));
11224 for (uint32_t i = struct_type->field_count(); i > 0;) {
11225 i--;
11226 ValueKind kind = struct_type->field(i).kind();
11227 Pop(kind);
11228 }
11229
11230 ModuleTypeIndex type_index{instr.optional.index};
11231 RefPush(ValueType::Ref(module_->heap_type(type_index)));
11232 break;
11233 }
11234
11235 case kExprStructNewDefault: {
11236 EMIT_INSTR_HANDLER(s2s_StructNewDefault);
11237 EmitI32Const(instr.optional.index);
11238 ModuleTypeIndex type_index{instr.optional.index};
11239 RefPush(ValueType::Ref(module_->heap_type(type_index)));
11240 break;
11241 }
11242
11243 case kExprStructGet:
11244 case kExprStructGetS:
11245 case kExprStructGetU: {
11246 bool is_signed = (instr.opcode == wasm::kExprStructGetS);
11247 const StructType* struct_type = module_->struct_type(
11248 ModuleTypeIndex({instr.optional.gc_field_immediate.struct_index}));
11249 uint32_t field_index = instr.optional.gc_field_immediate.field_index;
11250 ValueType value_type = struct_type->field(field_index);
11252 int offset = StructFieldOffset(struct_type, field_index);
11253 switch (kind) {
11254 case kI8:
11255 if (is_signed) {
11256 EMIT_INSTR_HANDLER_WITH_PC(s2s_I8SStructGet, instr.pc);
11257 } else {
11258 EMIT_INSTR_HANDLER_WITH_PC(s2s_I8UStructGet, instr.pc);
11259 }
11260 RefPop();
11262 I32Push();
11263 break;
11264 case kI16:
11265 if (is_signed) {
11266 EMIT_INSTR_HANDLER_WITH_PC(s2s_I16SStructGet, instr.pc);
11267 } else {
11268 EMIT_INSTR_HANDLER_WITH_PC(s2s_I16UStructGet, instr.pc);
11269 }
11270 RefPop();
11272 I32Push();
11273 break;
11274 case kI32:
11275 EMIT_INSTR_HANDLER_WITH_PC(s2s_I32StructGet, instr.pc);
11276 RefPop();
11278 I32Push();
11279 break;
11280 case kI64:
11281 EMIT_INSTR_HANDLER_WITH_PC(s2s_I64StructGet, instr.pc);
11282 RefPop();
11284 I64Push();
11285 break;
11286 case kF32:
11287 EMIT_INSTR_HANDLER_WITH_PC(s2s_F32StructGet, instr.pc);
11288 RefPop();
11290 F32Push();
11291 break;
11292 case kF64:
11293 EMIT_INSTR_HANDLER_WITH_PC(s2s_F64StructGet, instr.pc);
11294 RefPop();
11296 F64Push();
11297 break;
11298 case kS128:
11299 EMIT_INSTR_HANDLER_WITH_PC(s2s_S128StructGet, instr.pc);
11300 RefPop();
11302 S128Push();
11303 break;
11304 case kRef:
11305 case kRefNull:
11306 EMIT_INSTR_HANDLER_WITH_PC(s2s_RefStructGet, instr.pc);
11307 RefPop();
11310 break;
11311 default:
11312 UNREACHABLE();
11313 }
11314 break;
11315 }
11316
11317 case kExprStructSet: {
11318 const StructType* struct_type = module_->struct_type(
11319 ModuleTypeIndex({instr.optional.gc_field_immediate.struct_index}));
11320 uint32_t field_index = instr.optional.gc_field_immediate.field_index;
11321 int offset = StructFieldOffset(struct_type, field_index);
11322 ValueKind kind = struct_type->field(field_index).kind();
11323 switch (kind) {
11324 case kI8:
11325 EMIT_INSTR_HANDLER_WITH_PC(s2s_I8StructSet, instr.pc);
11327 I32Pop();
11328 break;
11329 case kI16:
11330 EMIT_INSTR_HANDLER_WITH_PC(s2s_I16StructSet, instr.pc);
11332 I32Pop();
11333 break;
11334 case kI32:
11335 EMIT_INSTR_HANDLER_WITH_PC(s2s_I32StructSet, instr.pc);
11337 I32Pop();
11338 break;
11339 case kI64:
11340 EMIT_INSTR_HANDLER_WITH_PC(s2s_I64StructSet, instr.pc);
11342 I64Pop();
11343 break;
11344 case kF32:
11345 EMIT_INSTR_HANDLER_WITH_PC(s2s_F32StructSet, instr.pc);
11347 F32Pop();
11348 break;
11349 case kF64:
11350 EMIT_INSTR_HANDLER_WITH_PC(s2s_F64StructSet, instr.pc);
11352 F64Pop();
11353 break;
11354 case kS128:
11355 EMIT_INSTR_HANDLER_WITH_PC(s2s_S128StructSet, instr.pc);
11357 S128Pop();
11358 break;
11359 case kRef:
11360 case kRefNull:
11361 EMIT_INSTR_HANDLER_WITH_PC(s2s_RefStructSet, instr.pc);
11363 RefPop();
11364 break;
11365 default:
11366 UNREACHABLE();
11367 }
11368 RefPop(); // The object to set the field to.
11369 break;
11370 }
11371
11372 case kExprArrayNew: {
11373 uint32_t array_index = instr.optional.gc_array_new_fixed.array_index;
11374 const ArrayType* array_type =
11375 module_->array_type(ModuleTypeIndex({array_index}));
11376 ValueType element_type = array_type->element_type();
11377 ValueKind kind = element_type.kind();
11378
11379 // Pop a single value to be used to initialize the array.
11380 switch (kind) {
11381 case kI8:
11382 EMIT_INSTR_HANDLER_WITH_PC(s2s_I8ArrayNew, instr.pc);
11383 EmitI32Const(array_index);
11384 I32Pop(); // Array length.
11385 I32Pop(); // Initialization value.
11386 break;
11387 case kI16:
11388 EMIT_INSTR_HANDLER_WITH_PC(s2s_I16ArrayNew, instr.pc);
11389 EmitI32Const(array_index);
11390 I32Pop();
11391 I32Pop();
11392 break;
11393 case kI32:
11394 EMIT_INSTR_HANDLER_WITH_PC(s2s_I32ArrayNew, instr.pc);
11395 EmitI32Const(array_index);
11396 I32Pop();
11397 I32Pop();
11398 break;
11399 case kI64:
11400 EMIT_INSTR_HANDLER_WITH_PC(s2s_I64ArrayNew, instr.pc);
11401 EmitI32Const(array_index);
11402 I32Pop();
11403 I64Pop();
11404 break;
11405 case kF32:
11406 EMIT_INSTR_HANDLER_WITH_PC(s2s_F32ArrayNew, instr.pc);
11407 EmitI32Const(array_index);
11408 I32Pop();
11409 F32Pop();
11410 break;
11411 case kF64:
11412 EMIT_INSTR_HANDLER_WITH_PC(s2s_F64ArrayNew, instr.pc);
11413 EmitI32Const(array_index);
11414 I32Pop();
11415 F64Pop();
11416 break;
11417 case kS128:
11418 EMIT_INSTR_HANDLER_WITH_PC(s2s_S128ArrayNew, instr.pc);
11419 EmitI32Const(array_index);
11420 I32Pop();
11421 S128Pop();
11422 break;
11423 case kRef:
11424 case kRefNull:
11425 EMIT_INSTR_HANDLER_WITH_PC(s2s_RefArrayNew, instr.pc);
11426 EmitI32Const(array_index);
11427 I32Pop();
11428 RefPop();
11429 break;
11430 default:
11431 UNREACHABLE();
11432 }
11433 // Push the new array.
11434 RefPush(
11435 ValueType::Ref(module_->heap_type(ModuleTypeIndex({array_index}))));
11436 break;
11437 }
11438
11439 case kExprArrayNewFixed: {
11440 EMIT_INSTR_HANDLER_WITH_PC(s2s_ArrayNewFixed, instr.pc);
11441 uint32_t length = instr.optional.gc_array_new_fixed.length;
11442 uint32_t array_index = instr.optional.gc_array_new_fixed.array_index;
11443 EmitI32Const(array_index);
11444 EmitI32Const(length);
11445 const ArrayType* array_type =
11446 module_->array_type(ModuleTypeIndex({array_index}));
11447 ValueType element_type = array_type->element_type();
11448 ValueKind kind = element_type.kind();
11449 // Pop values to initialize the array.
11450 for (uint32_t i = 0; i < length; i++) {
11451 switch (kind) {
11452 case kI8:
11453 case kI16:
11454 case kI32:
11455 I32Pop();
11456 break;
11457 case kI64:
11458 I64Pop();
11459 break;
11460 case kF32:
11461 F32Pop();
11462 break;
11463 case kF64:
11464 F64Pop();
11465 break;
11466 case kS128:
11467 S128Pop();
11468 break;
11469 case kRef:
11470 case kRefNull:
11471 RefPop();
11472 break;
11473 default:
11474 UNREACHABLE();
11475 }
11476 }
11477 // Push the new array.
11478 RefPush(
11479 ValueType::Ref(module_->heap_type(ModuleTypeIndex({array_index}))));
11480 break;
11481 }
11482
11483 case kExprArrayNewDefault: {
11484 EMIT_INSTR_HANDLER_WITH_PC(s2s_ArrayNewDefault, instr.pc);
11485 EmitI32Const(instr.optional.index);
11486 I32Pop();
11487 // Push the new array.
11488 ModuleTypeIndex array_index{instr.optional.index};
11489 RefPush(
11490 ValueType::Ref(module_->heap_type(ModuleTypeIndex({array_index}))));
11491 break;
11492 }
11493
11494 case kExprArrayNewData: {
11495 EMIT_INSTR_HANDLER_WITH_PC(s2s_ArrayNewData, instr.pc);
11496 uint32_t array_index =
11497 instr.optional.gc_array_new_or_init_data.array_index;
11498 EmitI32Const(array_index);
11499 uint32_t data_index = instr.optional.gc_array_new_or_init_data.data_index;
11500 EmitI32Const(data_index);
11501 I32Pop();
11502 I32Pop();
11503 // Push the new array.
11504 RefPush(
11505 ValueType::Ref(module_->heap_type(ModuleTypeIndex({array_index}))));
11506 break;
11507 }
11508
11509 case kExprArrayNewElem: {
11510 EMIT_INSTR_HANDLER_WITH_PC(s2s_ArrayNewElem, instr.pc);
11511 uint32_t array_index =
11512 instr.optional.gc_array_new_or_init_data.array_index;
11513 EmitI32Const(array_index);
11514 uint32_t data_index = instr.optional.gc_array_new_or_init_data.data_index;
11515 EmitI32Const(data_index);
11516 I32Pop();
11517 I32Pop();
11518 // Push the new array.
11519 RefPush(
11520 ValueType::Ref(module_->heap_type(ModuleTypeIndex({array_index}))));
11521 break;
11522 }
11523
11524 case kExprArrayInitData: {
11525 EMIT_INSTR_HANDLER_WITH_PC(s2s_ArrayInitData, instr.pc);
11526 uint32_t array_index =
11527 instr.optional.gc_array_new_or_init_data.array_index;
11528 EmitI32Const(array_index);
11529 uint32_t data_index = instr.optional.gc_array_new_or_init_data.data_index;
11530 EmitI32Const(data_index);
11531 I32Pop(); // size
11532 I32Pop(); // src offset
11533 I32Pop(); // dest offset
11534 RefPop(); // array to initialize
11535 break;
11536 }
11537
11538 case kExprArrayInitElem: {
11539 EMIT_INSTR_HANDLER_WITH_PC(s2s_ArrayInitElem, instr.pc);
11540 uint32_t array_index =
11541 instr.optional.gc_array_new_or_init_data.array_index;
11542 EmitI32Const(array_index);
11543 uint32_t data_index = instr.optional.gc_array_new_or_init_data.data_index;
11544 EmitI32Const(data_index);
11545 I32Pop(); // size
11546 I32Pop(); // src offset
11547 I32Pop(); // dest offset
11548 RefPop(); // array to initialize
11549 break;
11550 }
11551
11552 case kExprArrayLen: {
11553 EMIT_INSTR_HANDLER_WITH_PC(s2s_ArrayLen, instr.pc);
11554 RefPop();
11555 I32Push();
11556 break;
11557 }
11558
11559 case kExprArrayCopy: {
11560 EMIT_INSTR_HANDLER_WITH_PC(s2s_ArrayCopy, instr.pc);
11561 EmitI32Const(instr.optional.gc_array_copy.dest_array_index);
11562 EmitI32Const(instr.optional.gc_array_copy.src_array_index);
11563 I32Pop(); // size
11564 I32Pop(); // src offset
11565 RefPop(); // src array
11566 I32Pop(); // dest offset
11567 RefPop(); // dest array
11568 break;
11569 }
11570
11571 case kExprArrayGet:
11572 case kExprArrayGetS:
11573 case kExprArrayGetU: {
11574 bool is_signed = (instr.opcode == wasm::kExprArrayGetS);
11575 const ArrayType* array_type =
11576 module_->array_type(ModuleTypeIndex({instr.optional.index}));
11577 ValueType element_type = array_type->element_type();
11578 ValueKind kind = element_type.kind();
11579 switch (kind) {
11580 case kI8:
11581 if (is_signed) {
11582 EMIT_INSTR_HANDLER_WITH_PC(s2s_I8SArrayGet, instr.pc);
11583 } else {
11584 EMIT_INSTR_HANDLER_WITH_PC(s2s_I8UArrayGet, instr.pc);
11585 }
11586 I32Pop();
11587 RefPop();
11588 I32Push();
11589 break;
11590 case kI16:
11591 if (is_signed) {
11592 EMIT_INSTR_HANDLER_WITH_PC(s2s_I16SArrayGet, instr.pc);
11593 } else {
11594 EMIT_INSTR_HANDLER_WITH_PC(s2s_I16UArrayGet, instr.pc);
11595 }
11596 I32Pop();
11597 RefPop();
11598 I32Push();
11599 break;
11600 case kI32:
11601 EMIT_INSTR_HANDLER_WITH_PC(s2s_I32ArrayGet, instr.pc);
11602 I32Pop();
11603 RefPop();
11604 I32Push();
11605 break;
11606 case kI64:
11607 EMIT_INSTR_HANDLER_WITH_PC(s2s_I64ArrayGet, instr.pc);
11608 I32Pop();
11609 RefPop();
11610 I64Push();
11611 break;
11612 case kF32:
11613 EMIT_INSTR_HANDLER_WITH_PC(s2s_F32ArrayGet, instr.pc);
11614 I32Pop();
11615 RefPop();
11616 F32Push();
11617 break;
11618 case kF64:
11619 EMIT_INSTR_HANDLER_WITH_PC(s2s_F64ArrayGet, instr.pc);
11620 I32Pop();
11621 RefPop();
11622 F64Push();
11623 break;
11624 case kS128:
11625 EMIT_INSTR_HANDLER_WITH_PC(s2s_S128ArrayGet, instr.pc);
11626 I32Pop();
11627 RefPop();
11628 S128Push();
11629 break;
11630 case kRef:
11631 case kRefNull:
11632 EMIT_INSTR_HANDLER_WITH_PC(s2s_RefArrayGet, instr.pc);
11633 I32Pop();
11634 RefPop();
11635 RefPush(element_type);
11636 break;
11637 default:
11638 UNREACHABLE();
11639 }
11640 break;
11641 }
11642
11643 case kExprArraySet: {
11644 const ArrayType* array_type =
11645 module_->array_type(ModuleTypeIndex({instr.optional.index}));
11646 ValueKind kind = array_type->element_type().kind();
11647 switch (kind) {
11648 case kI8:
11649 EMIT_INSTR_HANDLER_WITH_PC(s2s_I8ArraySet, instr.pc);
11650 I32Pop();
11651 I32Pop();
11652 RefPop();
11653 break;
11654 case kI16:
11655 EMIT_INSTR_HANDLER_WITH_PC(s2s_I16ArraySet, instr.pc);
11656 I32Pop();
11657 I32Pop();
11658 RefPop();
11659 break;
11660 case kI32:
11661 EMIT_INSTR_HANDLER_WITH_PC(s2s_I32ArraySet, instr.pc);
11662 I32Pop();
11663 I32Pop();
11664 RefPop();
11665 break;
11666 case kI64:
11667 EMIT_INSTR_HANDLER_WITH_PC(s2s_I64ArraySet, instr.pc);
11668 I64Pop();
11669 I32Pop();
11670 RefPop();
11671 break;
11672 case kF32:
11673 EMIT_INSTR_HANDLER_WITH_PC(s2s_F32ArraySet, instr.pc);
11674 F32Pop();
11675 I32Pop();
11676 RefPop();
11677 break;
11678 case kF64:
11679 EMIT_INSTR_HANDLER_WITH_PC(s2s_F64ArraySet, instr.pc);
11680 F64Pop();
11681 I32Pop();
11682 RefPop();
11683 break;
11684 case kS128:
11685 EMIT_INSTR_HANDLER_WITH_PC(s2s_S128ArraySet, instr.pc);
11686 S128Pop();
11687 I32Pop();
11688 RefPop();
11689 break;
11690 case kRef:
11691 case kRefNull:
11692 EMIT_INSTR_HANDLER_WITH_PC(s2s_RefArraySet, instr.pc);
11693 RefPop();
11694 I32Pop();
11695 RefPop();
11696 break;
11697 default:
11698 UNREACHABLE();
11699 }
11700 break;
11701 }
11702
11703 case kExprArrayFill: {
11704 const ArrayType* array_type =
11705 module_->array_type(ModuleTypeIndex({instr.optional.index}));
11706 ValueKind kind = array_type->element_type().kind();
11707 switch (kind) {
11708 case kI8:
11709 EMIT_INSTR_HANDLER_WITH_PC(s2s_I8ArrayFill, instr.pc);
11710 I32Pop(); // The size of the filled slice.
11711 I32Pop(); // The value with which to fill the array.
11712 I32Pop(); // The offset at which to begin filling.
11713 RefPop(); // The array to fill.
11714 break;
11715 case kI16:
11716 EMIT_INSTR_HANDLER_WITH_PC(s2s_I16ArrayFill, instr.pc);
11717 I32Pop();
11718 I32Pop();
11719 I32Pop();
11720 RefPop();
11721 break;
11722 case kI32:
11723 EMIT_INSTR_HANDLER_WITH_PC(s2s_I32ArrayFill, instr.pc);
11724 I32Pop();
11725 I32Pop();
11726 I32Pop();
11727 RefPop();
11728 break;
11729 case kI64:
11730 EMIT_INSTR_HANDLER_WITH_PC(s2s_I64ArrayFill, instr.pc);
11731 I32Pop();
11732 I64Pop();
11733 I32Pop();
11734 RefPop();
11735 break;
11736 case kF32:
11737 EMIT_INSTR_HANDLER_WITH_PC(s2s_F32ArrayFill, instr.pc);
11738 I32Pop();
11739 F32Pop();
11740 I32Pop();
11741 RefPop();
11742 break;
11743 case kF64:
11744 EMIT_INSTR_HANDLER_WITH_PC(s2s_F64ArrayFill, instr.pc);
11745 I32Pop();
11746 F64Pop();
11747 I32Pop();
11748 RefPop();
11749 break;
11750 case kS128:
11751 EMIT_INSTR_HANDLER_WITH_PC(s2s_S128ArrayFill, instr.pc);
11752 I32Pop();
11753 S128Pop();
11754 I32Pop();
11755 RefPop();
11756 break;
11757 case kRef:
11758 case kRefNull:
11759 EMIT_INSTR_HANDLER_WITH_PC(s2s_RefArrayFill, instr.pc);
11760 I32Pop();
11761 RefPop();
11762 I32Pop();
11763 RefPop();
11764 break;
11765 default:
11766 UNREACHABLE();
11767 }
11768 break;
11769 }
11770
11771 case kExprRefI31: {
11772 EMIT_INSTR_HANDLER(s2s_RefI31);
11773 I32Pop();
11775 break;
11776 }
11777
11778 case kExprI31GetS: {
11779 EMIT_INSTR_HANDLER_WITH_PC(s2s_I31GetS, instr.pc);
11780 RefPop();
11781 I32Push();
11782 break;
11783 }
11784
11785 case kExprI31GetU: {
11786 EMIT_INSTR_HANDLER_WITH_PC(s2s_I31GetU, instr.pc);
11787 RefPop();
11788 I32Push();
11789 break;
11790 }
11791
11792 case kExprRefCast:
11793 case kExprRefCastNull: {
11794 bool null_succeeds = (instr.opcode == kExprRefCastNull);
11795 HeapType target_type = HeapType::FromBits(
11796 instr.optional.gc_heap_type_immediate.heap_type_bit_field);
11797 ValueType resulting_value_type = ValueType::RefMaybeNull(
11798 target_type, null_succeeds ? kNullable : kNonNullable);
11799
11800 ValueType obj_type = slots_[stack_.back()].value_type;
11801 DCHECK(obj_type.is_object_reference());
11802
11803 // This logic ensures that code generation can assume that functions
11804 // can only be cast to function types, and data objects to data types.
11805 if (V8_UNLIKELY(TypeCheckAlwaysSucceeds(obj_type, target_type))) {
11806 if (obj_type.is_nullable() && !null_succeeds) {
11807 EMIT_INSTR_HANDLER_WITH_PC(s2s_AssertNotNullTypecheck, instr.pc);
11810 RefPush(resulting_value_type);
11811 } else {
11812 // Just forward the ref object.
11813 }
11814 } else if (V8_UNLIKELY(TypeCheckAlwaysFails(obj_type, target_type,
11815 null_succeeds))) {
11816 // Unrelated types. The only way this will not trap is if the object
11817 // is null.
11818 if (obj_type.is_nullable() && null_succeeds) {
11819 EMIT_INSTR_HANDLER_WITH_PC(s2s_AssertNullTypecheck, instr.pc);
11822 RefPush(resulting_value_type);
11823 } else {
11824 // In this case we just trap.
11825 EMIT_INSTR_HANDLER_WITH_PC(s2s_TrapIllegalCast, instr.pc);
11826 }
11827 } else {
11828 if (instr.opcode == kExprRefCast) {
11829 EMIT_INSTR_HANDLER_WITH_PC(s2s_RefCast, instr.pc);
11830 } else {
11831 EMIT_INSTR_HANDLER_WITH_PC(s2s_RefCastNull, instr.pc);
11832 }
11833 EmitI32Const(instr.optional.gc_heap_type_immediate.heap_type_bit_field);
11836 RefPush(resulting_value_type);
11837 }
11838 break;
11839 }
11840
11841 case kExprRefTest:
11842 case kExprRefTestNull: {
11843 bool null_succeeds = (instr.opcode == kExprRefTestNull);
11844 HeapType target_type = HeapType::FromBits(
11845 instr.optional.gc_heap_type_immediate.heap_type_bit_field);
11846
11847 ValueType obj_type = slots_[stack_.back()].value_type;
11848 DCHECK(obj_type.is_object_reference());
11849
11850 // This logic ensures that code generation can assume that functions
11851 // can only be cast to function types, and data objects to data types.
11852 if (V8_UNLIKELY(TypeCheckAlwaysSucceeds(obj_type, target_type))) {
11853 // Type checking can still fail for null.
11854 if (obj_type.is_nullable() && !null_succeeds) {
11855 EMIT_INSTR_HANDLER(s2s_RefIsNonNull);
11856 RefPop();
11857 I32Push(); // bool
11858 } else {
11859 EMIT_INSTR_HANDLER(s2s_RefTestSucceeds);
11860 RefPop();
11861 I32Push(); // bool=true
11862 }
11863 } else if (V8_UNLIKELY(TypeCheckAlwaysFails(obj_type, target_type,
11864 null_succeeds))) {
11865 EMIT_INSTR_HANDLER(s2s_RefTestFails);
11866 RefPop();
11867 I32Push(); // bool=false
11868 } else {
11869 if (instr.opcode == kExprRefTest) {
11870 EMIT_INSTR_HANDLER(s2s_RefTest);
11871 } else {
11872 EMIT_INSTR_HANDLER(s2s_RefTestNull);
11873 }
11874 EmitI32Const(instr.optional.gc_heap_type_immediate.heap_type_bit_field);
11877 I32Push(); // bool
11878 }
11879 break;
11880 }
11881
11882 case kExprAnyConvertExtern: {
11883 EMIT_INSTR_HANDLER_WITH_PC(s2s_AnyConvertExtern, instr.pc);
11884 ValueType extern_val = RefPop();
11885 ValueType intern_type = ValueType::RefMaybeNull(
11886 kWasmAnyRef, Nullability(extern_val.is_nullable()));
11887 RefPush(intern_type);
11888 break;
11889 }
11890
11891 case kExprExternConvertAny: {
11892 EMIT_INSTR_HANDLER(s2s_ExternConvertAny);
11894 ValueType extern_type = ValueType::RefMaybeNull(
11896 RefPush(extern_type);
11897 break;
11898 }
11899
11900 case kExprMemoryInit:
11901 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_MemoryInit, s2s_Memory64Init,
11902 is_memory64_, instr.pc);
11903 EmitI32Const(instr.optional.index);
11904 I32Pop();
11905 I32Pop();
11906 MemIndexPop();
11907 break;
11908
11909 case kExprDataDrop:
11910 EMIT_INSTR_HANDLER(s2s_DataDrop);
11911 EmitI32Const(instr.optional.index);
11912 break;
11913
11914 case kExprMemoryCopy:
11915 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_MemoryCopy, s2s_Memory64Copy,
11916 is_memory64_, instr.pc);
11917 MemIndexPop();
11918 MemIndexPop();
11919 MemIndexPop();
11920 break;
11921
11922 case kExprMemoryFill:
11923 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_MemoryFill, s2s_Memory64Fill,
11924 is_memory64_, instr.pc);
11925 MemIndexPop();
11926 I32Pop();
11927 MemIndexPop();
11928 break;
11929
11930 case kExprTableInit: {
11931 bool is_table64 = module_->tables[instr.optional.index].is_table64();
11932 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_TableInit, s2s_Table64Init,
11933 is_table64, instr.pc);
11934
11935 EmitI32Const(instr.optional.table_init.table_index);
11936 EmitI32Const(instr.optional.table_init.element_segment_index);
11937 I32Pop();
11938 I32Pop();
11939 is_table64 ? I64Pop() : I32Pop();
11940 } break;
11941
11942 case kExprElemDrop:
11943 EMIT_INSTR_HANDLER(s2s_ElemDrop);
11944 EmitI32Const(instr.optional.index);
11945 break;
11946
11947 case kExprTableCopy: {
11948 bool is_src_table64 =
11949 module_->tables[instr.optional.table_copy.src_table_index]
11950 .is_table64();
11951 bool is_dst_table64 =
11952 module_->tables[instr.optional.table_copy.dst_table_index]
11953 .is_table64();
11954
11955 if (is_src_table64) {
11956 if (is_dst_table64) {
11957 EMIT_INSTR_HANDLER_WITH_PC(s2s_Table64Copy_64_64_64, instr.pc);
11958 } else {
11959 EMIT_INSTR_HANDLER_WITH_PC(s2s_Table64Copy_32_64_32, instr.pc);
11960 }
11961 } else {
11962 if (is_dst_table64) {
11963 EMIT_INSTR_HANDLER_WITH_PC(s2s_Table64Copy_64_32_32, instr.pc);
11964 } else {
11965 EMIT_INSTR_HANDLER_WITH_PC(s2s_TableCopy, instr.pc);
11966 }
11967 }
11968 EmitI32Const(instr.optional.table_copy.dst_table_index);
11969 EmitI32Const(instr.optional.table_copy.src_table_index);
11970 is_dst_table64&& is_src_table64 ? I64Pop() : I32Pop();
11971 is_src_table64 ? I64Pop() : I32Pop();
11972 is_dst_table64 ? I64Pop() : I32Pop();
11973 } break;
11974
11975 case kExprTableGrow: {
11976 bool is_table64 = module_->tables[instr.optional.index].is_table64();
11977 if (is_table64) {
11978 EMIT_INSTR_HANDLER(s2s_Table64Grow);
11979 EmitI32Const(instr.optional.index);
11980 I64Pop();
11981 RefPop();
11982 I64Push();
11983 } else {
11984 EMIT_INSTR_HANDLER(s2s_TableGrow);
11985 EmitI32Const(instr.optional.index);
11986 I32Pop();
11987 RefPop();
11988 I32Push();
11989 }
11990 } break;
11991
11992 case kExprTableSize: {
11993 bool is_table64 = module_->tables[instr.optional.index].is_table64();
11994 if (is_table64) {
11995 EMIT_INSTR_HANDLER(s2s_Table64Size);
11996 EmitI32Const(instr.optional.index);
11997 I64Push();
11998 } else {
11999 EMIT_INSTR_HANDLER(s2s_TableSize);
12000 EmitI32Const(instr.optional.index);
12001 I32Push();
12002 }
12003 } break;
12004
12005 case kExprTableFill: {
12006 bool is_table64 = module_->tables[instr.optional.index].is_table64();
12007 if (is_table64) {
12008 EMIT_INSTR_HANDLER_WITH_PC(s2s_Table64Fill, instr.pc);
12009 EmitI32Const(instr.optional.index);
12010 I64Pop();
12011 RefPop();
12012 I64Pop();
12013 } else {
12014 EMIT_INSTR_HANDLER_WITH_PC(s2s_TableFill, instr.pc);
12015 EmitI32Const(instr.optional.index);
12016 I32Pop();
12017 RefPop();
12018 I32Pop();
12019 }
12020 } break;
12021
12022 case kExprAtomicNotify:
12023 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_AtomicNotify, s2s_AtomicNotify_Idx64,
12024 is_memory64_, instr.pc);
12025 I32Pop(); // val
12026 EmitMemoryOffset(instr.optional.offset);
12027 MemIndexPop(); // memory index
12028 I32Push();
12029 break;
12030
12031 case kExprI32AtomicWait:
12033 s2s_I32AtomicWait, s2s_I32AtomicWait_Idx64, is_memory64_, instr.pc);
12034 I64Pop(); // timeout
12035 I32Pop(); // val
12036 EmitMemoryOffset(instr.optional.offset);
12037 MemIndexPop(); // memory index
12038 I32Push();
12039 break;
12040
12041 case kExprI64AtomicWait:
12043 s2s_I64AtomicWait, s2s_I64AtomicWait_Idx64, is_memory64_, instr.pc);
12044 I64Pop(); // timeout
12045 I64Pop(); // val
12046 EmitMemoryOffset(instr.optional.offset);
12047 MemIndexPop(); // memory index
12048 I32Push();
12049 break;
12050
12051 case kExprAtomicFence:
12052 EMIT_INSTR_HANDLER(s2s_AtomicFence);
12053 break;
12054
12055#define ATOMIC_BINOP(name, Type, ctype, type, op_ctype, op_type, operation) \
12056 case kExpr##name: { \
12057 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_##name, s2s_##name##_Idx64, \
12058 is_memory64_, instr.pc); \
12059 op_type##Pop(); \
12060 EmitMemoryOffset(instr.optional.offset); \
12061 MemIndexPop(); \
12062 op_type##Push(); \
12063 return RegMode::kNoReg; \
12064 }
12066#undef ATOMIC_BINOP
12067
12068#define ATOMIC_COMPARE_EXCHANGE_OP(name, Type, ctype, type, op_ctype, op_type) \
12069 case kExpr##name: { \
12070 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_##name, s2s_##name##_Idx64, \
12071 is_memory64_, instr.pc); \
12072 op_type##Pop(); \
12073 op_type##Pop(); \
12074 EmitMemoryOffset(instr.optional.offset); \
12075 MemIndexPop(); \
12076 op_type##Push(); \
12077 return RegMode::kNoReg; \
12078 }
12080#undef ATOMIC_COMPARE_EXCHANGE_OP
12081
12082#define ATOMIC_LOAD_OP(name, Type, ctype, type, op_ctype, op_type) \
12083 case kExpr##name: { \
12084 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_##name, s2s_##name##_Idx64, \
12085 is_memory64_, instr.pc); \
12086 EmitMemoryOffset(instr.optional.offset); \
12087 MemIndexPop(); \
12088 op_type##Push(); \
12089 return RegMode::kNoReg; \
12090 }
12092#undef ATOMIC_LOAD_OP
12093
12094#define ATOMIC_STORE_OP(name, Type, ctype, type, op_ctype, op_type) \
12095 case kExpr##name: { \
12096 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_##name, s2s_##name##_Idx64, \
12097 is_memory64_, instr.pc); \
12098 op_type##Pop(); \
12099 EmitMemoryOffset(instr.optional.offset); \
12100 MemIndexPop(); \
12101 return RegMode::kNoReg; \
12102 }
12104#undef ATOMIC_STORE_OP
12105
12106#define SPLAT_CASE(format, stype, valType, op_type, num) \
12107 case kExpr##format##Splat: { \
12108 EMIT_INSTR_HANDLER(s2s_Simd##format##Splat); \
12109 op_type##Pop(); \
12110 S128Push(); \
12111 return RegMode::kNoReg; \
12112 }
12113 SPLAT_CASE(F64x2, float64x2, double, F64, 2)
12114 SPLAT_CASE(F32x4, float32x4, float, F32, 4)
12115 SPLAT_CASE(I64x2, int64x2, int64_t, I64, 2)
12116 SPLAT_CASE(I32x4, int32x4, int32_t, I32, 4)
12117 SPLAT_CASE(I16x8, int16x8, int32_t, I32, 8)
12118 SPLAT_CASE(I8x16, int8x16, int32_t, I32, 16)
12119#undef SPLAT_CASE
12120
12121#define EXTRACT_LANE_CASE(format, stype, op_type, name) \
12122 case kExpr##format##ExtractLane: { \
12123 EMIT_INSTR_HANDLER(s2s_Simd##format##ExtractLane); \
12124 /* emit 8 bits ? */ \
12125 EmitI16Const(instr.optional.simd_lane); \
12126 S128Pop(); \
12127 op_type##Push(); \
12128 return RegMode::kNoReg; \
12129 }
12130 EXTRACT_LANE_CASE(F64x2, float64x2, F64, f64x2)
12131 EXTRACT_LANE_CASE(F32x4, float32x4, F32, f32x4)
12132 EXTRACT_LANE_CASE(I64x2, int64x2, I64, i64x2)
12133 EXTRACT_LANE_CASE(I32x4, int32x4, I32, i32x4)
12134#undef EXTRACT_LANE_CASE
12135
12136#define EXTRACT_LANE_EXTEND_CASE(format, stype, name, sign, extended_type) \
12137 case kExpr##format##ExtractLane##sign: { \
12138 EMIT_INSTR_HANDLER(s2s_Simd##format##ExtractLane##sign); \
12139 /* emit 8 bits ? */ \
12140 EmitI16Const(instr.optional.simd_lane); \
12141 S128Pop(); \
12142 I32Push(); \
12143 return RegMode::kNoReg; \
12144 }
12145 EXTRACT_LANE_EXTEND_CASE(I16x8, int16x8, i16x8, S, int32_t)
12146 EXTRACT_LANE_EXTEND_CASE(I16x8, int16x8, i16x8, U, uint32_t)
12147 EXTRACT_LANE_EXTEND_CASE(I8x16, int8x16, i8x16, S, int32_t)
12148 EXTRACT_LANE_EXTEND_CASE(I8x16, int8x16, i8x16, U, uint32_t)
12149#undef EXTRACT_LANE_EXTEND_CASE
12150
12151#define BINOP_CASE(op, name, stype, count, expr) \
12152 case kExpr##op: { \
12153 EMIT_INSTR_HANDLER(s2s_Simd##op); \
12154 S128Pop(); \
12155 S128Pop(); \
12156 S128Push(); \
12157 return RegMode::kNoReg; \
12158 }
12159 BINOP_CASE(F64x2Add, f64x2, float64x2, 2, a + b)
12160 BINOP_CASE(F64x2Sub, f64x2, float64x2, 2, a - b)
12161 BINOP_CASE(F64x2Mul, f64x2, float64x2, 2, a * b)
12162 BINOP_CASE(F64x2Div, f64x2, float64x2, 2, base::Divide(a, b))
12163 BINOP_CASE(F64x2Min, f64x2, float64x2, 2, JSMin(a, b))
12164 BINOP_CASE(F64x2Max, f64x2, float64x2, 2, JSMax(a, b))
12165 BINOP_CASE(F64x2Pmin, f64x2, float64x2, 2, std::min(a, b))
12166 BINOP_CASE(F64x2Pmax, f64x2, float64x2, 2, std::max(a, b))
12167 BINOP_CASE(F32x4RelaxedMin, f32x4, float32x4, 4, std::min(a, b))
12168 BINOP_CASE(F32x4RelaxedMax, f32x4, float32x4, 4, std::max(a, b))
12169 BINOP_CASE(F64x2RelaxedMin, f64x2, float64x2, 2, std::min(a, b))
12170 BINOP_CASE(F64x2RelaxedMax, f64x2, float64x2, 2, std::max(a, b))
12171 BINOP_CASE(F32x4Add, f32x4, float32x4, 4, a + b)
12172 BINOP_CASE(F32x4Sub, f32x4, float32x4, 4, a - b)
12173 BINOP_CASE(F32x4Mul, f32x4, float32x4, 4, a * b)
12174 BINOP_CASE(F32x4Div, f32x4, float32x4, 4, a / b)
12175 BINOP_CASE(F32x4Min, f32x4, float32x4, 4, JSMin(a, b))
12176 BINOP_CASE(F32x4Max, f32x4, float32x4, 4, JSMax(a, b))
12177 BINOP_CASE(F32x4Pmin, f32x4, float32x4, 4, std::min(a, b))
12178 BINOP_CASE(F32x4Pmax, f32x4, float32x4, 4, std::max(a, b))
12179 BINOP_CASE(I64x2Add, i64x2, int64x2, 2, base::AddWithWraparound(a, b))
12180 BINOP_CASE(I64x2Sub, i64x2, int64x2, 2, base::SubWithWraparound(a, b))
12181 BINOP_CASE(I64x2Mul, i64x2, int64x2, 2, base::MulWithWraparound(a, b))
12182 BINOP_CASE(I32x4Add, i32x4, int32x4, 4, base::AddWithWraparound(a, b))
12183 BINOP_CASE(I32x4Sub, i32x4, int32x4, 4, base::SubWithWraparound(a, b))
12184 BINOP_CASE(I32x4Mul, i32x4, int32x4, 4, base::MulWithWraparound(a, b))
12185 BINOP_CASE(I32x4MinS, i32x4, int32x4, 4, a < b ? a : b)
12186 BINOP_CASE(I32x4MinU, i32x4, int32x4, 4,
12187 static_cast<uint32_t>(a) < static_cast<uint32_t>(b) ? a : b)
12188 BINOP_CASE(I32x4MaxS, i32x4, int32x4, 4, a > b ? a : b)
12189 BINOP_CASE(I32x4MaxU, i32x4, int32x4, 4,
12190 static_cast<uint32_t>(a) > static_cast<uint32_t>(b) ? a : b)
12191 BINOP_CASE(S128And, i32x4, int32x4, 4, a & b)
12192 BINOP_CASE(S128Or, i32x4, int32x4, 4, a | b)
12193 BINOP_CASE(S128Xor, i32x4, int32x4, 4, a ^ b)
12194 BINOP_CASE(S128AndNot, i32x4, int32x4, 4, a & ~b)
12195 BINOP_CASE(I16x8Add, i16x8, int16x8, 8, base::AddWithWraparound(a, b))
12196 BINOP_CASE(I16x8Sub, i16x8, int16x8, 8, base::SubWithWraparound(a, b))
12197 BINOP_CASE(I16x8Mul, i16x8, int16x8, 8, base::MulWithWraparound(a, b))
12198 BINOP_CASE(I16x8MinS, i16x8, int16x8, 8, a < b ? a : b)
12199 BINOP_CASE(I16x8MinU, i16x8, int16x8, 8,
12200 static_cast<uint16_t>(a) < static_cast<uint16_t>(b) ? a : b)
12201 BINOP_CASE(I16x8MaxS, i16x8, int16x8, 8, a > b ? a : b)
12202 BINOP_CASE(I16x8MaxU, i16x8, int16x8, 8,
12203 static_cast<uint16_t>(a) > static_cast<uint16_t>(b) ? a : b)
12204 BINOP_CASE(I16x8AddSatS, i16x8, int16x8, 8, SaturateAdd<int16_t>(a, b))
12205 BINOP_CASE(I16x8AddSatU, i16x8, int16x8, 8, SaturateAdd<uint16_t>(a, b))
12206 BINOP_CASE(I16x8SubSatS, i16x8, int16x8, 8, SaturateSub<int16_t>(a, b))
12207 BINOP_CASE(I16x8SubSatU, i16x8, int16x8, 8, SaturateSub<uint16_t>(a, b))
12208 BINOP_CASE(I16x8RoundingAverageU, i16x8, int16x8, 8,
12210 BINOP_CASE(I16x8Q15MulRSatS, i16x8, int16x8, 8,
12212 BINOP_CASE(I16x8RelaxedQ15MulRS, i16x8, int16x8, 8,
12214 BINOP_CASE(I8x16Add, i8x16, int8x16, 16, base::AddWithWraparound(a, b))
12215 BINOP_CASE(I8x16Sub, i8x16, int8x16, 16, base::SubWithWraparound(a, b))
12216 BINOP_CASE(I8x16MinS, i8x16, int8x16, 16, a < b ? a : b)
12217 BINOP_CASE(I8x16MinU, i8x16, int8x16, 16,
12218 static_cast<uint8_t>(a) < static_cast<uint8_t>(b) ? a : b)
12219 BINOP_CASE(I8x16MaxS, i8x16, int8x16, 16, a > b ? a : b)
12220 BINOP_CASE(I8x16MaxU, i8x16, int8x16, 16,
12221 static_cast<uint8_t>(a) > static_cast<uint8_t>(b) ? a : b)
12222 BINOP_CASE(I8x16AddSatS, i8x16, int8x16, 16, SaturateAdd<int8_t>(a, b))
12223 BINOP_CASE(I8x16AddSatU, i8x16, int8x16, 16, SaturateAdd<uint8_t>(a, b))
12224 BINOP_CASE(I8x16SubSatS, i8x16, int8x16, 16, SaturateSub<int8_t>(a, b))
12225 BINOP_CASE(I8x16SubSatU, i8x16, int8x16, 16, SaturateSub<uint8_t>(a, b))
12226 BINOP_CASE(I8x16RoundingAverageU, i8x16, int8x16, 16,
12228#undef BINOP_CASE
12229
12230#define UNOP_CASE(op, name, stype, count, expr) \
12231 case kExpr##op: { \
12232 EMIT_INSTR_HANDLER(s2s_Simd##op); \
12233 S128Pop(); \
12234 S128Push(); \
12235 return RegMode::kNoReg; \
12236 }
12237 UNOP_CASE(F64x2Abs, f64x2, float64x2, 2, std::abs(a))
12238 UNOP_CASE(F64x2Neg, f64x2, float64x2, 2, -a)
12239 UNOP_CASE(F64x2Sqrt, f64x2, float64x2, 2, std::sqrt(a))
12240 UNOP_CASE(F64x2Ceil, f64x2, float64x2, 2,
12241 (AixFpOpWorkaround<double, &ceil>(a)))
12242 UNOP_CASE(F64x2Floor, f64x2, float64x2, 2,
12243 (AixFpOpWorkaround<double, &floor>(a)))
12244 UNOP_CASE(F64x2Trunc, f64x2, float64x2, 2,
12245 (AixFpOpWorkaround<double, &trunc>(a)))
12246 UNOP_CASE(F64x2NearestInt, f64x2, float64x2, 2,
12247 (AixFpOpWorkaround<double, &nearbyint>(a)))
12248 UNOP_CASE(F32x4Abs, f32x4, float32x4, 4, std::abs(a))
12249 UNOP_CASE(F32x4Neg, f32x4, float32x4, 4, -a)
12250 UNOP_CASE(F32x4Sqrt, f32x4, float32x4, 4, std::sqrt(a))
12251 UNOP_CASE(F32x4Ceil, f32x4, float32x4, 4,
12252 (AixFpOpWorkaround<float, &ceilf>(a)))
12253 UNOP_CASE(F32x4Floor, f32x4, float32x4, 4,
12254 (AixFpOpWorkaround<float, &floorf>(a)))
12255 UNOP_CASE(F32x4Trunc, f32x4, float32x4, 4,
12256 (AixFpOpWorkaround<float, &truncf>(a)))
12257 UNOP_CASE(F32x4NearestInt, f32x4, float32x4, 4,
12258 (AixFpOpWorkaround<float, &nearbyintf>(a)))
12259 UNOP_CASE(I64x2Neg, i64x2, int64x2, 2, base::NegateWithWraparound(a))
12260 UNOP_CASE(I32x4Neg, i32x4, int32x4, 4, base::NegateWithWraparound(a))
12261 // Use llabs which will work correctly on both 64-bit and 32-bit.
12262 UNOP_CASE(I64x2Abs, i64x2, int64x2, 2, std::llabs(a))
12263 UNOP_CASE(I32x4Abs, i32x4, int32x4, 4, std::abs(a))
12264 UNOP_CASE(S128Not, i32x4, int32x4, 4, ~a)
12265 UNOP_CASE(I16x8Neg, i16x8, int16x8, 8, base::NegateWithWraparound(a))
12266 UNOP_CASE(I16x8Abs, i16x8, int16x8, 8, std::abs(a))
12267 UNOP_CASE(I8x16Neg, i8x16, int8x16, 16, base::NegateWithWraparound(a))
12268 UNOP_CASE(I8x16Abs, i8x16, int8x16, 16, std::abs(a))
12269 UNOP_CASE(I8x16Popcnt, i8x16, int8x16, 16,
12271#undef UNOP_CASE
12272
12273#define BITMASK_CASE(op, name, stype, count) \
12274 case kExpr##op: { \
12275 EMIT_INSTR_HANDLER(s2s_Simd##op); \
12276 S128Pop(); \
12277 I32Push(); \
12278 return RegMode::kNoReg; \
12279 }
12280 BITMASK_CASE(I8x16BitMask, i8x16, int8x16, 16)
12281 BITMASK_CASE(I16x8BitMask, i16x8, int16x8, 8)
12282 BITMASK_CASE(I32x4BitMask, i32x4, int32x4, 4)
12283 BITMASK_CASE(I64x2BitMask, i64x2, int64x2, 2)
12284#undef BITMASK_CASE
12285
12286#define CMPOP_CASE(op, name, stype, out_stype, count, expr) \
12287 case kExpr##op: { \
12288 EMIT_INSTR_HANDLER(s2s_Simd##op); \
12289 S128Pop(); \
12290 S128Pop(); \
12291 S128Push(); \
12292 return RegMode::kNoReg; \
12293 }
12294 CMPOP_CASE(F64x2Eq, f64x2, float64x2, int64x2, 2, a == b)
12295 CMPOP_CASE(F64x2Ne, f64x2, float64x2, int64x2, 2, a != b)
12296 CMPOP_CASE(F64x2Gt, f64x2, float64x2, int64x2, 2, a > b)
12297 CMPOP_CASE(F64x2Ge, f64x2, float64x2, int64x2, 2, a >= b)
12298 CMPOP_CASE(F64x2Lt, f64x2, float64x2, int64x2, 2, a < b)
12299 CMPOP_CASE(F64x2Le, f64x2, float64x2, int64x2, 2, a <= b)
12300 CMPOP_CASE(F32x4Eq, f32x4, float32x4, int32x4, 4, a == b)
12301 CMPOP_CASE(F32x4Ne, f32x4, float32x4, int32x4, 4, a != b)
12302 CMPOP_CASE(F32x4Gt, f32x4, float32x4, int32x4, 4, a > b)
12303 CMPOP_CASE(F32x4Ge, f32x4, float32x4, int32x4, 4, a >= b)
12304 CMPOP_CASE(F32x4Lt, f32x4, float32x4, int32x4, 4, a < b)
12305 CMPOP_CASE(F32x4Le, f32x4, float32x4, int32x4, 4, a <= b)
12306 CMPOP_CASE(I64x2Eq, i64x2, int64x2, int64x2, 2, a == b)
12307 CMPOP_CASE(I64x2Ne, i64x2, int64x2, int64x2, 2, a != b)
12308 CMPOP_CASE(I64x2LtS, i64x2, int64x2, int64x2, 2, a < b)
12309 CMPOP_CASE(I64x2GtS, i64x2, int64x2, int64x2, 2, a > b)
12310 CMPOP_CASE(I64x2LeS, i64x2, int64x2, int64x2, 2, a <= b)
12311 CMPOP_CASE(I64x2GeS, i64x2, int64x2, int64x2, 2, a >= b)
12312 CMPOP_CASE(I32x4Eq, i32x4, int32x4, int32x4, 4, a == b)
12313 CMPOP_CASE(I32x4Ne, i32x4, int32x4, int32x4, 4, a != b)
12314 CMPOP_CASE(I32x4GtS, i32x4, int32x4, int32x4, 4, a > b)
12315 CMPOP_CASE(I32x4GeS, i32x4, int32x4, int32x4, 4, a >= b)
12316 CMPOP_CASE(I32x4LtS, i32x4, int32x4, int32x4, 4, a < b)
12317 CMPOP_CASE(I32x4LeS, i32x4, int32x4, int32x4, 4, a <= b)
12318 CMPOP_CASE(I32x4GtU, i32x4, int32x4, int32x4, 4,
12319 static_cast<uint32_t>(a) > static_cast<uint32_t>(b))
12320 CMPOP_CASE(I32x4GeU, i32x4, int32x4, int32x4, 4,
12321 static_cast<uint32_t>(a) >= static_cast<uint32_t>(b))
12322 CMPOP_CASE(I32x4LtU, i32x4, int32x4, int32x4, 4,
12323 static_cast<uint32_t>(a) < static_cast<uint32_t>(b))
12324 CMPOP_CASE(I32x4LeU, i32x4, int32x4, int32x4, 4,
12325 static_cast<uint32_t>(a) <= static_cast<uint32_t>(b))
12326 CMPOP_CASE(I16x8Eq, i16x8, int16x8, int16x8, 8, a == b)
12327 CMPOP_CASE(I16x8Ne, i16x8, int16x8, int16x8, 8, a != b)
12328 CMPOP_CASE(I16x8GtS, i16x8, int16x8, int16x8, 8, a > b)
12329 CMPOP_CASE(I16x8GeS, i16x8, int16x8, int16x8, 8, a >= b)
12330 CMPOP_CASE(I16x8LtS, i16x8, int16x8, int16x8, 8, a < b)
12331 CMPOP_CASE(I16x8LeS, i16x8, int16x8, int16x8, 8, a <= b)
12332 CMPOP_CASE(I16x8GtU, i16x8, int16x8, int16x8, 8,
12333 static_cast<uint16_t>(a) > static_cast<uint16_t>(b))
12334 CMPOP_CASE(I16x8GeU, i16x8, int16x8, int16x8, 8,
12335 static_cast<uint16_t>(a) >= static_cast<uint16_t>(b))
12336 CMPOP_CASE(I16x8LtU, i16x8, int16x8, int16x8, 8,
12337 static_cast<uint16_t>(a) < static_cast<uint16_t>(b))
12338 CMPOP_CASE(I16x8LeU, i16x8, int16x8, int16x8, 8,
12339 static_cast<uint16_t>(a) <= static_cast<uint16_t>(b))
12340 CMPOP_CASE(I8x16Eq, i8x16, int8x16, int8x16, 16, a == b)
12341 CMPOP_CASE(I8x16Ne, i8x16, int8x16, int8x16, 16, a != b)
12342 CMPOP_CASE(I8x16GtS, i8x16, int8x16, int8x16, 16, a > b)
12343 CMPOP_CASE(I8x16GeS, i8x16, int8x16, int8x16, 16, a >= b)
12344 CMPOP_CASE(I8x16LtS, i8x16, int8x16, int8x16, 16, a < b)
12345 CMPOP_CASE(I8x16LeS, i8x16, int8x16, int8x16, 16, a <= b)
12346 CMPOP_CASE(I8x16GtU, i8x16, int8x16, int8x16, 16,
12347 static_cast<uint8_t>(a) > static_cast<uint8_t>(b))
12348 CMPOP_CASE(I8x16GeU, i8x16, int8x16, int8x16, 16,
12349 static_cast<uint8_t>(a) >= static_cast<uint8_t>(b))
12350 CMPOP_CASE(I8x16LtU, i8x16, int8x16, int8x16, 16,
12351 static_cast<uint8_t>(a) < static_cast<uint8_t>(b))
12352 CMPOP_CASE(I8x16LeU, i8x16, int8x16, int8x16, 16,
12353 static_cast<uint8_t>(a) <= static_cast<uint8_t>(b))
12354#undef CMPOP_CASE
12355
12356#define REPLACE_LANE_CASE(format, name, stype, ctype, op_type) \
12357 case kExpr##format##ReplaceLane: { \
12358 EMIT_INSTR_HANDLER(s2s_Simd##format##ReplaceLane); \
12359 /* emit 8 bits ? */ \
12360 EmitI16Const(instr.optional.simd_lane); \
12361 op_type##Pop(); \
12362 S128Pop(); \
12363 S128Push(); \
12364 return RegMode::kNoReg; \
12365 }
12366 REPLACE_LANE_CASE(F64x2, f64x2, float64x2, double, F64)
12367 REPLACE_LANE_CASE(F32x4, f32x4, float32x4, float, F32)
12368 REPLACE_LANE_CASE(I64x2, i64x2, int64x2, int64_t, I64)
12369 REPLACE_LANE_CASE(I32x4, i32x4, int32x4, int32_t, I32)
12370 REPLACE_LANE_CASE(I16x8, i16x8, int16x8, int32_t, I32)
12371 REPLACE_LANE_CASE(I8x16, i8x16, int8x16, int32_t, I32)
12372#undef REPLACE_LANE_CASE
12373
12374 case kExprS128LoadMem: {
12375 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_SimdS128LoadMem,
12376 s2s_SimdS128LoadMem_Idx64, is_memory64_,
12377 instr.pc);
12378 EmitMemoryOffset(instr.optional.offset);
12379 MemIndexPop();
12380 S128Push();
12381 return RegMode::kNoReg;
12382 }
12383
12384 case kExprS128StoreMem: {
12385 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_SimdS128StoreMem,
12386 s2s_SimdS128StoreMem_Idx64, is_memory64_,
12387 instr.pc);
12388 S128Pop();
12389 EmitMemoryOffset(instr.optional.offset);
12390 MemIndexPop();
12391 return RegMode::kNoReg;
12392 }
12393
12394#define SHIFT_CASE(op, name, stype, count, expr) \
12395 case kExpr##op: { \
12396 EMIT_INSTR_HANDLER(s2s_Simd##op); \
12397 I32Pop(); \
12398 S128Pop(); \
12399 S128Push(); \
12400 return RegMode::kNoReg; \
12401 }
12402 SHIFT_CASE(I64x2Shl, i64x2, int64x2, 2,
12403 static_cast<uint64_t>(a) << (shift % 64))
12404 SHIFT_CASE(I64x2ShrS, i64x2, int64x2, 2, a >> (shift % 64))
12405 SHIFT_CASE(I64x2ShrU, i64x2, int64x2, 2,
12406 static_cast<uint64_t>(a) >> (shift % 64))
12407 SHIFT_CASE(I32x4Shl, i32x4, int32x4, 4,
12408 static_cast<uint32_t>(a) << (shift % 32))
12409 SHIFT_CASE(I32x4ShrS, i32x4, int32x4, 4, a >> (shift % 32))
12410 SHIFT_CASE(I32x4ShrU, i32x4, int32x4, 4,
12411 static_cast<uint32_t>(a) >> (shift % 32))
12412 SHIFT_CASE(I16x8Shl, i16x8, int16x8, 8,
12413 static_cast<uint16_t>(a) << (shift % 16))
12414 SHIFT_CASE(I16x8ShrS, i16x8, int16x8, 8, a >> (shift % 16))
12415 SHIFT_CASE(I16x8ShrU, i16x8, int16x8, 8,
12416 static_cast<uint16_t>(a) >> (shift % 16))
12417 SHIFT_CASE(I8x16Shl, i8x16, int8x16, 16,
12418 static_cast<uint8_t>(a) << (shift % 8))
12419 SHIFT_CASE(I8x16ShrS, i8x16, int8x16, 16, a >> (shift % 8))
12420 SHIFT_CASE(I8x16ShrU, i8x16, int8x16, 16,
12421 static_cast<uint8_t>(a) >> (shift % 8))
12422#undef SHIFT_CASE
12423
12424#define EXT_MUL_CASE(op) \
12425 case kExpr##op: { \
12426 EMIT_INSTR_HANDLER(s2s_Simd##op); \
12427 S128Pop(); \
12428 S128Pop(); \
12429 S128Push(); \
12430 return RegMode::kNoReg; \
12431 }
12432 EXT_MUL_CASE(I16x8ExtMulLowI8x16S)
12433 EXT_MUL_CASE(I16x8ExtMulHighI8x16S)
12434 EXT_MUL_CASE(I16x8ExtMulLowI8x16U)
12435 EXT_MUL_CASE(I16x8ExtMulHighI8x16U)
12436 EXT_MUL_CASE(I32x4ExtMulLowI16x8S)
12437 EXT_MUL_CASE(I32x4ExtMulHighI16x8S)
12438 EXT_MUL_CASE(I32x4ExtMulLowI16x8U)
12439 EXT_MUL_CASE(I32x4ExtMulHighI16x8U)
12440 EXT_MUL_CASE(I64x2ExtMulLowI32x4S)
12441 EXT_MUL_CASE(I64x2ExtMulHighI32x4S)
12442 EXT_MUL_CASE(I64x2ExtMulLowI32x4U)
12443 EXT_MUL_CASE(I64x2ExtMulHighI32x4U)
12444#undef EXT_MUL_CASE
12445
12446#define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \
12447 expr) \
12448 case kExpr##op: { \
12449 EMIT_INSTR_HANDLER(s2s_Simd##op); \
12450 S128Pop(); \
12451 S128Push(); \
12452 return RegMode::kNoReg; \
12453 }
12454 CONVERT_CASE(F32x4SConvertI32x4, int32x4, i32x4, float32x4, 4, 0, int32_t,
12455 static_cast<float>(a))
12456 CONVERT_CASE(F32x4UConvertI32x4, int32x4, i32x4, float32x4, 4, 0,
12457 uint32_t, static_cast<float>(a))
12458 CONVERT_CASE(I32x4SConvertF32x4, float32x4, f32x4, int32x4, 4, 0, float,
12459 base::saturated_cast<int32_t>(a))
12460 CONVERT_CASE(I32x4UConvertF32x4, float32x4, f32x4, int32x4, 4, 0, float,
12461 base::saturated_cast<uint32_t>(a))
12462 CONVERT_CASE(I32x4RelaxedTruncF32x4S, float32x4, f32x4, int32x4, 4, 0,
12463 float, base::saturated_cast<int32_t>(a))
12464 CONVERT_CASE(I32x4RelaxedTruncF32x4U, float32x4, f32x4, int32x4, 4, 0,
12465 float, base::saturated_cast<uint32_t>(a))
12466 CONVERT_CASE(I64x2SConvertI32x4Low, int32x4, i32x4, int64x2, 2, 0,
12467 int32_t, a)
12468 CONVERT_CASE(I64x2SConvertI32x4High, int32x4, i32x4, int64x2, 2, 2,
12469 int32_t, a)
12470 CONVERT_CASE(I64x2UConvertI32x4Low, int32x4, i32x4, int64x2, 2, 0,
12471 uint32_t, a)
12472 CONVERT_CASE(I64x2UConvertI32x4High, int32x4, i32x4, int64x2, 2, 2,
12473 uint32_t, a)
12474 CONVERT_CASE(I32x4SConvertI16x8High, int16x8, i16x8, int32x4, 4, 4,
12475 int16_t, a)
12476 CONVERT_CASE(I32x4UConvertI16x8High, int16x8, i16x8, int32x4, 4, 4,
12477 uint16_t, a)
12478 CONVERT_CASE(I32x4SConvertI16x8Low, int16x8, i16x8, int32x4, 4, 0,
12479 int16_t, a)
12480 CONVERT_CASE(I32x4UConvertI16x8Low, int16x8, i16x8, int32x4, 4, 0,
12481 uint16_t, a)
12482 CONVERT_CASE(I16x8SConvertI8x16High, int8x16, i8x16, int16x8, 8, 8,
12483 int8_t, a)
12484 CONVERT_CASE(I16x8UConvertI8x16High, int8x16, i8x16, int16x8, 8, 8,
12485 uint8_t, a)
12486 CONVERT_CASE(I16x8SConvertI8x16Low, int8x16, i8x16, int16x8, 8, 0, int8_t,
12487 a)
12488 CONVERT_CASE(I16x8UConvertI8x16Low, int8x16, i8x16, int16x8, 8, 0,
12489 uint8_t, a)
12490 CONVERT_CASE(F64x2ConvertLowI32x4S, int32x4, i32x4, float64x2, 2, 0,
12491 int32_t, static_cast<double>(a))
12492 CONVERT_CASE(F64x2ConvertLowI32x4U, int32x4, i32x4, float64x2, 2, 0,
12493 uint32_t, static_cast<double>(a))
12494 CONVERT_CASE(I32x4TruncSatF64x2SZero, float64x2, f64x2, int32x4, 2, 0,
12495 double, base::saturated_cast<int32_t>(a))
12496 CONVERT_CASE(I32x4TruncSatF64x2UZero, float64x2, f64x2, int32x4, 2, 0,
12497 double, base::saturated_cast<uint32_t>(a))
12498 CONVERT_CASE(I32x4RelaxedTruncF64x2SZero, float64x2, f64x2, int32x4, 2, 0,
12499 double, base::saturated_cast<int32_t>(a))
12500 CONVERT_CASE(I32x4RelaxedTruncF64x2UZero, float64x2, f64x2, int32x4, 2, 0,
12501 double, base::saturated_cast<uint32_t>(a))
12502 CONVERT_CASE(F32x4DemoteF64x2Zero, float64x2, f64x2, float32x4, 2, 0,
12503 float, DoubleToFloat32(a))
12504 CONVERT_CASE(F64x2PromoteLowF32x4, float32x4, f32x4, float64x2, 2, 0,
12505 float, static_cast<double>(a))
12506#undef CONVERT_CASE
12507
12508#define PACK_CASE(op, src_type, name, dst_type, count, dst_ctype) \
12509 case kExpr##op: { \
12510 EMIT_INSTR_HANDLER(s2s_Simd##op); \
12511 S128Pop(); \
12512 S128Pop(); \
12513 S128Push(); \
12514 return RegMode::kNoReg; \
12515 }
12516 PACK_CASE(I16x8SConvertI32x4, int32x4, i32x4, int16x8, 8, int16_t)
12517 PACK_CASE(I16x8UConvertI32x4, int32x4, i32x4, int16x8, 8, uint16_t)
12518 PACK_CASE(I8x16SConvertI16x8, int16x8, i16x8, int8x16, 16, int8_t)
12519 PACK_CASE(I8x16UConvertI16x8, int16x8, i16x8, int8x16, 16, uint8_t)
12520#undef PACK_CASE
12521
12522#define SELECT_CASE(op) \
12523 case kExpr##op: { \
12524 EMIT_INSTR_HANDLER(s2s_Simd##op); \
12525 S128Pop(); \
12526 S128Pop(); \
12527 S128Pop(); \
12528 S128Push(); \
12529 return RegMode::kNoReg; \
12530 }
12531 SELECT_CASE(I8x16RelaxedLaneSelect)
12532 SELECT_CASE(I16x8RelaxedLaneSelect)
12533 SELECT_CASE(I32x4RelaxedLaneSelect)
12534 SELECT_CASE(I64x2RelaxedLaneSelect)
12535 SELECT_CASE(S128Select)
12536#undef SELECT_CASE
12537
12538 case kExprI32x4DotI16x8S: {
12539 EMIT_INSTR_HANDLER(s2s_SimdI32x4DotI16x8S);
12540 S128Pop();
12541 S128Pop();
12542 S128Push();
12543 return RegMode::kNoReg;
12544 }
12545
12546 case kExprS128Const: {
12548 simd_immediates_[instr.optional.simd_immediate_index]);
12549 return RegMode::kNoReg;
12550 }
12551
12552 case kExprI16x8DotI8x16I7x16S: {
12553 EMIT_INSTR_HANDLER(s2s_SimdI16x8DotI8x16I7x16S);
12554 S128Pop();
12555 S128Pop();
12556 S128Push();
12557 return RegMode::kNoReg;
12558 }
12559
12560 case kExprI32x4DotI8x16I7x16AddS: {
12561 EMIT_INSTR_HANDLER(s2s_SimdI32x4DotI8x16I7x16AddS);
12562 S128Pop();
12563 S128Pop();
12564 S128Pop();
12565 S128Push();
12566 return RegMode::kNoReg;
12567 }
12568
12569 case kExprI8x16RelaxedSwizzle: {
12570 EMIT_INSTR_HANDLER(s2s_SimdI8x16RelaxedSwizzle);
12571 S128Pop();
12572 S128Pop();
12573 S128Push();
12574 return RegMode::kNoReg;
12575 }
12576
12577 case kExprI8x16Swizzle: {
12578 EMIT_INSTR_HANDLER(s2s_SimdI8x16Swizzle);
12579 S128Pop();
12580 S128Pop();
12581 S128Push();
12582 return RegMode::kNoReg;
12583 }
12584
12585 case kExprI8x16Shuffle: {
12586 uint32_t slot_index = CreateConstSlot(
12587 simd_immediates_[instr.optional.simd_immediate_index]);
12588#ifdef V8_ENABLE_DRUMBRAKE_TRACING
12589 TracePushConstSlot(slot_index);
12590#endif // V8_ENABLE_DRUMBRAKE_TRACING
12591 EMIT_INSTR_HANDLER(s2s_SimdI8x16Shuffle);
12592 PushSlot(slot_index);
12593 S128Pop();
12594 S128Pop();
12595 S128Pop();
12596 S128Push();
12597 return RegMode::kNoReg;
12598 }
12599
12600 case kExprV128AnyTrue: {
12601 EMIT_INSTR_HANDLER(s2s_SimdV128AnyTrue);
12602 S128Pop();
12603 I32Push();
12604 return RegMode::kNoReg;
12605 }
12606
12607#define REDUCTION_CASE(op, name, stype, count, operation) \
12608 case kExpr##op: { \
12609 EMIT_INSTR_HANDLER(s2s_Simd##op); \
12610 S128Pop(); \
12611 I32Push(); \
12612 return RegMode::kNoReg; \
12613 }
12614 REDUCTION_CASE(I64x2AllTrue, i64x2, int64x2, 2, &)
12615 REDUCTION_CASE(I32x4AllTrue, i32x4, int32x4, 4, &)
12616 REDUCTION_CASE(I16x8AllTrue, i16x8, int16x8, 8, &)
12617 REDUCTION_CASE(I8x16AllTrue, i8x16, int8x16, 16, &)
12618#undef REDUCTION_CASE
12619
12620#define QFM_CASE(op, name, stype, count, operation) \
12621 case kExpr##op: { \
12622 EMIT_INSTR_HANDLER(s2s_Simd##op); \
12623 S128Pop(); \
12624 S128Pop(); \
12625 S128Pop(); \
12626 S128Push(); \
12627 return RegMode::kNoReg; \
12628 }
12629 QFM_CASE(F32x4Qfma, f32x4, float32x4, 4, +)
12630 QFM_CASE(F32x4Qfms, f32x4, float32x4, 4, -)
12631 QFM_CASE(F64x2Qfma, f64x2, float64x2, 2, +)
12632 QFM_CASE(F64x2Qfms, f64x2, float64x2, 2, -)
12633#undef QFM_CASE
12634
12635#define LOAD_SPLAT_CASE(op) \
12636 case kExprS128##op: { \
12637 EMIT_MEM64_INSTR_HANDLER_WITH_PC( \
12638 s2s_SimdS128##op, s2s_SimdS128##op##_Idx64, is_memory64_, instr.pc); \
12639 EmitMemoryOffset(instr.optional.offset); \
12640 MemIndexPop(); \
12641 S128Push(); \
12642 return RegMode::kNoReg; \
12643 }
12644 LOAD_SPLAT_CASE(Load8Splat)
12645 LOAD_SPLAT_CASE(Load16Splat)
12646 LOAD_SPLAT_CASE(Load32Splat)
12647 LOAD_SPLAT_CASE(Load64Splat)
12648#undef LOAD_SPLAT_CASE
12649
12650#define LOAD_EXTEND_CASE(op) \
12651 case kExprS128##op: { \
12652 EMIT_MEM64_INSTR_HANDLER_WITH_PC( \
12653 s2s_SimdS128##op, s2s_SimdS128##op##_Idx64, is_memory64_, instr.pc); \
12654 EmitMemoryOffset(instr.optional.offset); \
12655 MemIndexPop(); \
12656 S128Push(); \
12657 return RegMode::kNoReg; \
12658 }
12659 LOAD_EXTEND_CASE(Load8x8S)
12660 LOAD_EXTEND_CASE(Load8x8U)
12661 LOAD_EXTEND_CASE(Load16x4S)
12662 LOAD_EXTEND_CASE(Load16x4U)
12663 LOAD_EXTEND_CASE(Load32x2S)
12664 LOAD_EXTEND_CASE(Load32x2U)
12665#undef LOAD_EXTEND_CASE
12666
12667#define LOAD_ZERO_EXTEND_CASE(op, load_type) \
12668 case kExprS128##op: { \
12669 EMIT_MEM64_INSTR_HANDLER_WITH_PC( \
12670 s2s_SimdS128##op, s2s_SimdS128##op##_Idx64, is_memory64_, instr.pc); \
12671 EmitMemoryOffset(instr.optional.offset); \
12672 MemIndexPop(); \
12673 S128Push(); \
12674 return RegMode::kNoReg; \
12675 }
12676 LOAD_ZERO_EXTEND_CASE(Load32Zero, I32)
12677 LOAD_ZERO_EXTEND_CASE(Load64Zero, I64)
12678#undef LOAD_ZERO_EXTEND_CASE
12679
12680#define LOAD_LANE_CASE(op) \
12681 case kExprS128##op: { \
12682 EMIT_MEM64_INSTR_HANDLER_WITH_PC( \
12683 s2s_SimdS128##op, s2s_SimdS128##op##_Idx64, is_memory64_, instr.pc); \
12684 S128Pop(); \
12685 EmitMemoryOffset(instr.optional.simd_loadstore_lane.offset); \
12686 MemIndexPop(); \
12687 /* emit 8 bits ? */ \
12688 EmitI16Const(instr.optional.simd_loadstore_lane.lane); \
12689 S128Push(); \
12690 return RegMode::kNoReg; \
12691 }
12692 LOAD_LANE_CASE(Load8Lane)
12693 LOAD_LANE_CASE(Load16Lane)
12694 LOAD_LANE_CASE(Load32Lane)
12695 LOAD_LANE_CASE(Load64Lane)
12696#undef LOAD_LANE_CASE
12697
12698#define STORE_LANE_CASE(op) \
12699 case kExprS128##op: { \
12700 EMIT_MEM64_INSTR_HANDLER_WITH_PC( \
12701 s2s_SimdS128##op, s2s_SimdS128##op##_Idx64, is_memory64_, instr.pc); \
12702 S128Pop(); \
12703 EmitMemoryOffset(instr.optional.simd_loadstore_lane.offset); \
12704 MemIndexPop(); \
12705 /* emit 8 bits ? */ \
12706 EmitI16Const(instr.optional.simd_loadstore_lane.lane); \
12707 return RegMode::kNoReg; \
12708 }
12709 STORE_LANE_CASE(Store8Lane)
12710 STORE_LANE_CASE(Store16Lane)
12711 STORE_LANE_CASE(Store32Lane)
12712 STORE_LANE_CASE(Store64Lane)
12713#undef STORE_LANE_CASE
12714
12715#define EXT_ADD_PAIRWISE_CASE(op) \
12716 case kExpr##op: { \
12717 EMIT_INSTR_HANDLER(s2s_Simd##op); \
12718 S128Pop(); \
12719 S128Push(); \
12720 return RegMode::kNoReg; \
12721 }
12722 EXT_ADD_PAIRWISE_CASE(I32x4ExtAddPairwiseI16x8S)
12723 EXT_ADD_PAIRWISE_CASE(I32x4ExtAddPairwiseI16x8U)
12724 EXT_ADD_PAIRWISE_CASE(I16x8ExtAddPairwiseI8x16S)
12725 EXT_ADD_PAIRWISE_CASE(I16x8ExtAddPairwiseI8x16U)
12726#undef EXT_ADD_PAIRWISE_CASE
12727
12728 default:
12729 FATAL("Unknown or unimplemented opcode #%d:%s",
12730 wasm_code_->start[instr.pc],
12732 static_cast<WasmOpcode>(wasm_code_->start[instr.pc])));
12733 UNREACHABLE();
12734 }
12735
12736 return RegMode::kNoReg;
12737}
12738
12740 RegMode& reg_mode, const WasmInstruction& curr_instr,
12741 const WasmInstruction& next_instr) {
12742 if (!v8_flags.drumbrake_compact_bytecode) {
12744 return DoEncodeSuperInstruction(reg_mode, curr_instr, next_instr);
12745 }
12746
12747 size_t current_instr_code_offset = code_.size();
12748 size_t current_slots_size = slots_.size();
12752
12753 bool result = DoEncodeSuperInstruction(reg_mode, curr_instr, next_instr);
12755 code_.resize(current_instr_code_offset);
12756 slots_.resize(current_slots_size);
12757 stack_.rollback();
12760 result = DoEncodeSuperInstruction(reg_mode, curr_instr, next_instr);
12762 }
12763
12764 return result;
12765}
12766
12768 RegMode& reg_mode, const WasmInstruction& curr_instr,
12769 const WasmInstruction& next_instr) {
12770 if (curr_instr.orig >= kExprI32LoadMem &&
12771 curr_instr.orig <= kExprI64LoadMem32U &&
12772 next_instr.orig == kExprLocalSet) {
12773 // Do not optimize if we are updating a shared slot.
12774 uint32_t to_stack_index = next_instr.optional.index;
12775 if (HasSharedSlot(to_stack_index)) return false;
12776
12777 switch (curr_instr.orig) {
12778// The implementation of r2s_LoadMem_LocalSet is identical to the
12779// implementation of r2s_LoadMem, so we can reuse the same builtin.
12780#define LOAD_CASE(name, ctype, mtype, rep, type) \
12781 case kExpr##name: { \
12782 if (reg_mode == RegMode::kNoReg) { \
12783 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_##name##_LocalSet, \
12784 s2s_##name##_LocalSet_Idx64, \
12785 is_memory64_, curr_instr.pc); \
12786 EmitMemoryOffset(curr_instr.optional.offset); \
12787 MemIndexPop(); \
12788 EmitSlotOffset(slots_[stack_[to_stack_index]].slot_offset); \
12789 reg_mode = RegMode::kNoReg; \
12790 } else { \
12791 EMIT_MEM64_INSTR_HANDLER_WITH_PC(r2s_##name, r2s_##name##_Idx64, \
12792 is_memory64_, curr_instr.pc); \
12793 EmitMemoryOffset(static_cast<uint64_t>(curr_instr.optional.offset)); \
12794 EmitSlotOffset(slots_[stack_[to_stack_index]].slot_offset); \
12795 reg_mode = RegMode::kNoReg; \
12796 } \
12797 return true; \
12798 }
12799 LOAD_CASE(I32LoadMem8S, int32_t, int8_t, kWord8, I32);
12800 LOAD_CASE(I32LoadMem8U, int32_t, uint8_t, kWord8, I32);
12801 LOAD_CASE(I32LoadMem16S, int32_t, int16_t, kWord16, I32);
12802 LOAD_CASE(I32LoadMem16U, int32_t, uint16_t, kWord16, I32);
12803 LOAD_CASE(I64LoadMem8S, int64_t, int8_t, kWord8, I64);
12804 LOAD_CASE(I64LoadMem8U, int64_t, uint8_t, kWord16, I64);
12805 LOAD_CASE(I64LoadMem16S, int64_t, int16_t, kWord16, I64);
12806 LOAD_CASE(I64LoadMem16U, int64_t, uint16_t, kWord16, I64);
12807 LOAD_CASE(I64LoadMem32S, int64_t, int32_t, kWord32, I64);
12808 LOAD_CASE(I64LoadMem32U, int64_t, uint32_t, kWord32, I64);
12809 LOAD_CASE(I32LoadMem, int32_t, int32_t, kWord32, I32);
12810 LOAD_CASE(I64LoadMem, int64_t, int64_t, kWord64, I64);
12811 LOAD_CASE(F32LoadMem, Float32, uint32_t, kFloat32, F32);
12812 LOAD_CASE(F64LoadMem, Float64, uint64_t, kFloat64, F64);
12813#undef LOAD_CASE
12814
12815 default:
12816 return false;
12817 }
12818 } else if (curr_instr.orig == kExprI32LoadMem &&
12819 next_instr.orig == kExprI32StoreMem) {
12820 if (reg_mode == RegMode::kNoReg) {
12821 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_I32LoadStoreMem,
12822 s2s_I32LoadStoreMem_Idx64, is_memory64_,
12823 curr_instr.pc);
12824 EmitMemoryOffset(curr_instr.optional.offset); // load_offset
12825 MemIndexPop(); // load_index
12826 } else {
12827 EMIT_MEM64_INSTR_HANDLER_WITH_PC(r2s_I32LoadStoreMem,
12828 r2s_I32LoadStoreMem_Idx64, is_memory64_,
12829 curr_instr.pc);
12830 EmitMemoryOffset(curr_instr.optional.offset); // load_offset
12831 }
12832 EmitMemoryOffset(next_instr.optional.offset); // store_offset
12833 MemIndexPop(); // store_index
12834 reg_mode = RegMode::kNoReg;
12835 return true;
12836 } else if (curr_instr.orig == kExprI64LoadMem &&
12837 next_instr.orig == kExprI64StoreMem) {
12838 if (reg_mode == RegMode::kNoReg) {
12839 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_I64LoadStoreMem,
12840 s2s_I64LoadStoreMem_Idx64, is_memory64_,
12841 curr_instr.pc);
12842 EmitMemoryOffset(curr_instr.optional.offset);
12843 MemIndexPop();
12844 } else {
12845 EMIT_MEM64_INSTR_HANDLER_WITH_PC(r2s_I64LoadStoreMem,
12846 r2s_I64LoadStoreMem_Idx64, is_memory64_,
12847 curr_instr.pc);
12848 EmitMemoryOffset(curr_instr.optional.offset);
12849 }
12850 EmitMemoryOffset(next_instr.optional.offset);
12851 MemIndexPop();
12852 reg_mode = RegMode::kNoReg;
12853 return true;
12854 } else if (curr_instr.orig == kExprF32LoadMem &&
12855 next_instr.orig == kExprF32StoreMem) {
12856 if (reg_mode == RegMode::kNoReg) {
12857 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_F32LoadStoreMem,
12858 s2s_F32LoadStoreMem_Idx64, is_memory64_,
12859 curr_instr.pc);
12860 EmitMemoryOffset(curr_instr.optional.offset);
12861 MemIndexPop();
12862 } else {
12863 EMIT_MEM64_INSTR_HANDLER_WITH_PC(r2s_F32LoadStoreMem,
12864 r2s_F32LoadStoreMem_Idx64, is_memory64_,
12865 curr_instr.pc);
12866 EmitMemoryOffset(curr_instr.optional.offset);
12867 }
12868 EmitMemoryOffset(next_instr.optional.offset);
12869 MemIndexPop();
12870 reg_mode = RegMode::kNoReg;
12871 return true;
12872 } else if (curr_instr.orig == kExprF64LoadMem &&
12873 next_instr.orig == kExprF64StoreMem) {
12874 if (reg_mode == RegMode::kNoReg) {
12875 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_F64LoadStoreMem,
12876 s2s_F64LoadStoreMem_Idx64, is_memory64_,
12877 curr_instr.pc);
12878 EmitMemoryOffset(curr_instr.optional.offset);
12879 MemIndexPop();
12880 } else {
12881 EMIT_MEM64_INSTR_HANDLER_WITH_PC(r2s_F64LoadStoreMem,
12882 r2s_F64LoadStoreMem_Idx64, is_memory64_,
12883 curr_instr.pc);
12884 EmitMemoryOffset(curr_instr.optional.offset);
12885 }
12886 EmitMemoryOffset(next_instr.optional.offset);
12887 MemIndexPop();
12888 reg_mode = RegMode::kNoReg;
12889 return true;
12890 } else if (curr_instr.orig >= kExprI32Const &&
12891 curr_instr.orig <= kExprF32Const &&
12892 next_instr.orig == kExprLocalSet) {
12893 uint32_t to_stack_index = next_instr.optional.index;
12894 switch (curr_instr.orig) {
12895 case kExprI32Const: {
12896 uint32_t from_slot_index =
12898 CopyToSlot(kWasmI32, from_slot_index, to_stack_index, false);
12899 reg_mode = RegMode::kNoReg;
12900 return true;
12901 }
12902 case kExprI64Const: {
12903 uint32_t from_slot_index =
12905 CopyToSlot(kWasmI64, from_slot_index, to_stack_index, false);
12906 reg_mode = RegMode::kNoReg;
12907 return true;
12908 }
12909 case kExprF32Const: {
12910 uint32_t from_slot_index =
12912 CopyToSlot(kWasmF32, from_slot_index, to_stack_index, false);
12913 reg_mode = RegMode::kNoReg;
12914 return true;
12915 }
12916 case kExprF64Const: {
12917 uint32_t from_slot_index =
12919 CopyToSlot(kWasmF64, from_slot_index, to_stack_index, false);
12920 reg_mode = RegMode::kNoReg;
12921 return true;
12922 }
12923 default:
12924 return false;
12925 }
12926 } else if (curr_instr.orig == kExprLocalGet &&
12927 next_instr.orig >= kExprI32StoreMem &&
12928 next_instr.orig <= kExprI64StoreMem32) {
12929 switch (next_instr.orig) {
12930// The implementation of r2s_LocalGet_StoreMem is identical to the
12931// implementation of r2s_StoreMem, so we can reuse the same builtin.
12932#define STORE_CASE(name, ctype, mtype, rep, type) \
12933 case kExpr##name: { \
12934 EMIT_MEM64_INSTR_HANDLER_WITH_PC(s2s_##name, s2s_##name##_Idx64, \
12935 is_memory64_, curr_instr.pc); \
12936 EmitSlotOffset(slots_[stack_[curr_instr.optional.index]].slot_offset); \
12937 EmitMemoryOffset(next_instr.optional.offset); \
12938 MemIndexPop(); \
12939 reg_mode = RegMode::kNoReg; \
12940 return true; \
12941 }
12942 STORE_CASE(I32StoreMem8, int32_t, int8_t, kWord8, I32);
12943 STORE_CASE(I32StoreMem16, int32_t, int16_t, kWord16, I32);
12944 STORE_CASE(I64StoreMem8, int64_t, int8_t, kWord8, I64);
12945 STORE_CASE(I64StoreMem16, int64_t, int16_t, kWord16, I64);
12946 STORE_CASE(I64StoreMem32, int64_t, int32_t, kWord32, I64);
12947 STORE_CASE(I32StoreMem, int32_t, int32_t, kWord32, I32);
12948 STORE_CASE(I64StoreMem, int64_t, int64_t, kWord64, I64);
12949 STORE_CASE(F32StoreMem, Float32, uint32_t, kFloat32, F32);
12950 STORE_CASE(F64StoreMem, Float64, uint64_t, kFloat64, F64);
12951#undef STORE_CASE
12952
12953 default:
12954 return false;
12955 }
12956 }
12957
12958 return false;
12959}
12960
12961std::unique_ptr<WasmBytecode> WasmBytecodeGenerator::GenerateBytecode() {
12962#ifdef V8_ENABLE_DRUMBRAKE_TRACING
12963 if (v8_flags.trace_drumbrake_bytecode_generator) {
12964 printf("\nGenerate bytecode for function: %d\n", function_index_);
12965 }
12966#endif // V8_ENABLE_DRUMBRAKE_TRACING
12967
12968 uint32_t const_slots = ScanConstInstructions();
12969 const_slots_values_.resize(const_slots * kSlotSize);
12970
12972 RegMode reg_mode = RegMode::kNoReg;
12973
12975
12977
12978 // Init stack_ with return values, args and local types.
12979
12980 for (uint32_t index = 0; index < return_count_; index++) {
12981 CreateSlot(wasm_code_->function->sig->GetReturn(index));
12982 }
12983
12984 for (uint32_t index = 0; index < args_count_; index++) {
12985 _PushSlot(wasm_code_->function->sig->GetParam(index));
12986 }
12987
12988 // Reserve space for const slots
12989 slot_offset_ += const_slots;
12990
12991 for (uint32_t index = 0; index < wasm_code_->locals.num_locals; index++) {
12993 }
12994
12996 kExprBlock,
12997 {wasm_code_->function->sig_index, kWasmBottom.raw_bit_field()});
12998
12999 WasmInstruction curr_instr;
13000 WasmInstruction next_instr;
13001
13002 pc_t limit = wasm_code_->end - wasm_code_->start;
13003 while (pc < limit) {
13005
13006 if (!curr_instr) {
13007 curr_instr = DecodeInstruction(pc, decoder);
13008 if (curr_instr) pc += curr_instr.length;
13009 }
13010 if (!curr_instr) break;
13011 DCHECK(!next_instr);
13012 next_instr = DecodeInstruction(pc, decoder);
13013 if (next_instr) pc += next_instr.length;
13014
13015 if (next_instr) {
13016 if (v8_flags.drumbrake_super_instructions && is_instruction_reachable_ &&
13017 EncodeSuperInstruction(reg_mode, curr_instr, next_instr)) {
13018 curr_instr = {};
13019 next_instr = {};
13020 } else {
13021 reg_mode =
13022 EncodeInstruction(curr_instr, reg_mode, next_instr.InputRegMode());
13023 curr_instr = next_instr;
13024 next_instr = {};
13025 }
13026 } else {
13027 reg_mode = EncodeInstruction(curr_instr, reg_mode, RegMode::kNoReg);
13028 curr_instr = {};
13029 }
13030
13031 if (pc == limit && curr_instr) {
13032 reg_mode = EncodeInstruction(curr_instr, reg_mode, RegMode::kNoReg);
13033 }
13034 }
13035
13038
13039 total_bytecode_size_ += code_.size();
13040
13041 CanonicalTypeIndex canonical_sig_index =
13042 module_->canonical_sig_id(module_->functions[function_index_].sig_index);
13043 const CanonicalSig* canonicalized_sig =
13044 GetTypeCanonicalizer()->LookupFunctionSignature(canonical_sig_index);
13045 return std::make_unique<WasmBytecode>(
13046 function_index_, code_.data(), code_.size(), slot_offset_,
13047 module_->functions[function_index_].sig, canonicalized_sig, wasm_code_,
13048 blocks_.size(), const_slots_values_.data(), const_slots_values_.size(),
13049 ref_slots_count_, std::move(eh_data_), std::move(code_pc_map_));
13050}
13051
13053 WasmOpcode opcode, const WasmInstruction::Optional::Block signature) {
13054 int32_t block_index = static_cast<int32_t>(blocks_.size());
13055 uint32_t stack_size = this->stack_size();
13056
13057 uint32_t first_block_index = 0;
13058 size_t rets_slots_count = 0;
13059 size_t params_slots_count = 0;
13060 if (block_index > 0 && (opcode != kExprElse && opcode != kExprCatch &&
13061 opcode != kExprCatchAll)) {
13062 first_block_index = ReserveBlockSlots(opcode, signature, &rets_slots_count,
13063 &params_slots_count);
13064 }
13065
13066 uint32_t parent_block_index = current_block_index_;
13067 if (opcode == kExprCatch || opcode == kExprCatchAll) {
13068 parent_block_index =
13069 blocks_[eh_data_.GetCurrentTryBlockIndex()].parent_block_index_;
13070 }
13071
13072 blocks_.emplace_back(opcode, CurrentCodePos(), parent_block_index, stack_size,
13073 signature, first_block_index, rets_slots_count,
13074 params_slots_count, eh_data_.GetCurrentTryBlockIndex());
13075 current_block_index_ = block_index;
13076
13077 if (opcode == kExprIf && params_slots_count > 0) {
13078 DCHECK_GE(stack_size, params_slots_count);
13079 blocks_.back().SaveParams(&stack_[stack_size - params_slots_count],
13080 params_slots_count);
13081 }
13082
13083 if (opcode == kExprLoop) {
13086 blocks_[current_block_index_].begin_code_offset_ = CurrentCodePos();
13088
13089 START_EMIT_INSTR_HANDLER_WITH_ID(s2s_OnLoopBegin) {}
13091 }
13092 return current_block_index_;
13093}
13094
13096 bool return_matching_try_for_catch_blocks) const {
13098 int index = current_block_index_;
13099 while (index >= 0) {
13100 const auto& block = blocks_[index];
13101 if (block.IsTry()) return index;
13102 if (return_matching_try_for_catch_blocks &&
13103 (block.IsCatch() || block.IsCatchAll())) {
13104 return block.parent_try_block_index_;
13105 }
13106 index = blocks_[index].parent_block_index_;
13107 }
13108 return -1;
13109}
13110
13112 if (ref_slots_count_ == 0) {
13113 for (size_t i = 0; i < loop_begin_code_offsets_.size(); i++) {
13115 reinterpret_cast<Address>(code_.data() + loop_begin_code_offsets_[i]),
13116 k_s2s_OnLoopBeginNoRefSlots);
13117 }
13118 }
13119}
13120
13122 static const uint32_t kElseBlockStartOffset =
13123 sizeof(InstructionHandler) + sizeof(uint32_t);
13124
13125 for (int block_index = 0; block_index < static_cast<int>(blocks_.size());
13126 block_index++) {
13127 const BlockData block_data = blocks_[block_index];
13128 for (size_t i = 0; i < block_data.branch_code_offsets_.size(); i++) {
13129 uint32_t current_code_offset = block_data.branch_code_offsets_[i];
13130 uint32_t target_offset = block_data.end_code_offset_;
13131 if (block_data.IsLoop()) {
13132 target_offset = block_data.begin_code_offset_;
13133 } else if (block_data.IsIf() && block_data.if_else_block_index_ >= 0 &&
13134 current_code_offset == block_data.begin_code_offset_) {
13135 // Jumps to the 'else' branch.
13136 target_offset =
13137 blocks_[block_data.if_else_block_index_].begin_code_offset_ +
13138 kElseBlockStartOffset;
13139 } else if ((block_data.IsCatch() || block_data.IsCatchAll()) &&
13140 current_code_offset == block_data.begin_code_offset_ +
13141 sizeof(InstructionHandler)) {
13142 // Jumps to the end of a sequence of 'try'/'catch' branches.
13143 target_offset = static_cast<uint32_t>(
13145 }
13146
13147 int32_t delta = target_offset - current_code_offset;
13149 reinterpret_cast<Address>(code_.data() + current_code_offset), delta);
13150 }
13151 }
13152}
13153
13155 InstructionHandler func_id) {
13156 if (last_instr_offset_ == kInvalidCodeOffset) return false;
13157 InstructionHandler* prev_instr_addr =
13158 reinterpret_cast<InstructionHandler*>(code_.data() + last_instr_offset_);
13159 InstructionHandler prev_instr_handler = *prev_instr_addr;
13160 if (func_id == k_s2s_CopySlot32 && prev_instr_handler == k_s2s_CopySlot32) {
13161 // Tranforms:
13162 // [CopySlot32: InstrId][from: slot_offset_t][to: slot_offset_t]
13163 // into:
13164 // [CopySlot32x2: InstrId][from0: slot_offset_t][to0: slot_offset_t][from1:
13165 // slot_offset_t][to1: slot_offset_t]
13167 v8_flags.drumbrake_compact_bytecode);
13169 reinterpret_cast<Address>(prev_instr_addr), k_s2s_CopySlot32x2);
13170 return true;
13171 } else if (func_id == k_s2s_CopySlot64 &&
13172 prev_instr_handler == k_s2s_CopySlot64) {
13174 v8_flags.drumbrake_compact_bytecode);
13176 reinterpret_cast<Address>(prev_instr_addr), k_s2s_CopySlot64x2);
13177 return true;
13178 }
13179 return false;
13180}
13181
13188
13189ClearThreadInWasmScope ::~ClearThreadInWasmScope() {
13192 if (!isolate_->has_exception()) {
13194 }
13195 // Otherwise we only want to set the flag if the exception is caught in
13196 // wasm. This is handled by the unwinder.
13197}
13198
13199} // namespace wasm
13200} // namespace internal
13201} // namespace v8
Isolate * isolate_
#define T
int16_t parameter_count
Definition builtins.cc:67
Builtins::Kind kind
Definition builtins.cc:40
virtual size_t AllocatePageSize()=0
TimeDelta Elapsed() const
size_t size() const
void emplace_back(Args &&... args)
double TimesOf(const TimeDelta &other) const
Definition time.h:165
int64_t InMicroseconds() const
Definition time.cc:251
static constexpr TimeDelta FromMilliseconds(int64_t milliseconds)
Definition time.h:84
static TimeTicks Now()
Definition time.cc:736
static bool IsHighResolution()
Definition time.cc:763
V8_INLINE bool is_identical_to(Handle< S > other) const
Definition handles.h:716
static EmbeddedData FromBlob()
Handle< FixedArray > CopyFixedArrayAndGrow(DirectHandle< FixedArray > array, int grow_by, AllocationType allocation=AllocationType::kYoung)
Definition factory.cc:2678
static void Destroy(Address *location)
static void MakeWeak(Address *location, void *parameter, WeakCallbackInfo< void >::Callback weak_callback, v8::WeakCallbackType type)
IndirectHandle< Object > Create(Tagged< Object > value)
V8_INLINE Address * location() const
Definition handles.h:80
V8_EXPORT_PRIVATE void AddSample(int sample)
Definition counters.cc:50
static V8_INLINE constexpr Address IntToSmi(int value)
GlobalHandles * global_handles() const
Definition isolate.h:1416
Counters * counters()
Definition isolate.h:1180
Tagged< Context > context() const
Definition isolate.h:800
v8::internal::Factory * factory()
Definition isolate.h:1527
static V8_EXPORT_PRIVATE void AddProperty(Isolate *isolate, DirectHandle< JSObject > object, DirectHandle< Name > name, DirectHandle< Object > value, PropertyAttributes attributes)
constexpr MachineRepresentation representation() const
static constexpr MachineType Uint64()
static constexpr MachineType Uint32()
size_t return_count() const
Definition signature.h:93
size_t parameter_count() const
Definition signature.h:94
static bool constexpr IsValid(T value)
Definition smi.h:67
V8_INLINE constexpr StorageType ptr() const
constexpr int ToInteger() const
Definition thread-id.h:26
static ThreadId Current()
Definition thread-id.h:32
static V8_INLINE Tagged_t CompressObject(Address tagged)
static V8_INLINE Address DecompressTagged(TOnHeapAddress on_heap_addr, Tagged_t raw_value)
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
static constexpr int MaxLength(uint32_t element_size_bytes)
static DirectHandle< Object > GetExceptionValues(Isolate *isolate, DirectHandle< WasmExceptionPackage > exception_package)
ValueType element_type() const
std::pair< uint32_t, uint32_t > read_u32v(const uint8_t *pc, Name< ValidationTag > name="LEB32")
Definition decoder.h:161
std::pair< WasmOpcode, uint32_t > read_prefixed_opcode(const uint8_t *pc, Name< ValidationTag > name="prefixed opcode")
Definition decoder.h:202
static constexpr struct v8::internal::wasm::Decoder::NoValidationTag kNoValidation
INSTRUCTION_HANDLER_FUNC Trap(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static constexpr PWasmOp * s_unwind_func_addr
INSTRUCTION_HANDLER_FUNC s2s_Unwind(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_Unreachable(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_TableInit(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_LoadMem_LocalSet(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_ReturnCall(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
CONVERT_CASE(F32x4SConvertI32x4, int32x4, i32x4, float32x4, 4, 0, int32_t, static_cast< float >(a)) CONVERT_CASE(F32x4UConvertI32x4
traits::memory_offset32_t memory_offset32_t
static auto constexpr s2s_I64StructGet
static auto constexpr s2s_SimdS128Load32x2U_Idx64
INSTRUCTION_HANDLER_FUNC s2s_GlobalSet(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_SimdI8x16Swizzle(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_Table64Init(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_I31GetS(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdI16x8ExtMulLowI8x16U
static auto constexpr s2s_SimdS128Load8Splat_Idx64
INSTRUCTION_HANDLER_FUNC s2s_Memory64Grow(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_F64Select
static auto constexpr r2s_F64GlobalSet
INSTRUCTION_HANDLER_FUNC s2s_StructNewDefault(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2s_I32StoreMem_Idx64
static auto constexpr s2s_F32ArrayFill
a a a a DoubleToFloat32(a)) CONVERT_CASE(F64x2PromoteLowF32x4
INSTRUCTION_HANDLER_FUNC s2s_Memory64Copy(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2s_F32GlobalSet
INSTRUCTION_HANDLER_FUNC s2s_RefEq(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64LoadMem8U_LocalSet_Idx64
INSTRUCTION_HANDLER_FUNC r2s_CopyFp0ToSlot32(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2s_I32LoadMem8S_Idx64
a a a a uint32_t WasmInterpreterRuntime int64_t r0
static auto constexpr s2s_SimdS128Load64Zero
static auto constexpr s2s_I32LoadMem16U_Idx64
INSTRUCTION_HANDLER_FUNC s2s_PreserveCopySlot32(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_DoSimdExtAddPairwise(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_F64LoadStoreMem_Idx64
INSTRUCTION_HANDLER_FUNC s2s_CopySlotMulti(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2r_I64Select
static auto constexpr s2s_SimdI32x4ExtMulHighI16x8U
static auto constexpr r2r_F64Select
INSTRUCTION_HANDLER_FUNC s2r_GlobalGetF(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_CallIndirect64(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2s_I64LoadMem32U_Idx64
INSTRUCTION_HANDLER_FUNC s2s_ArrayInitSegment(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdS128Load64Lane_Idx64
static void StoreRefIntoMemory(Tagged< HeapObject > host, Address dst_addr, uint32_t offset, Tagged< Object > value, WriteBarrierMode mode)
static auto constexpr s2s_SimdS128Load64Splat_Idx64
INSTRUCTION_HANDLER_FUNC s2s_MemoryGrow(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_RefTest
static auto constexpr s2s_SimdI16x8ExtAddPairwiseI8x16S
INSTRUCTION_HANDLER_FUNC r2s_Drop(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
SHIFT_CASE(I64x2Shl, i64x2, int64x2, 2, static_cast< uint64_t >(a)<<(shift % 64)) SHIFT_CASE(I64x2ShrU
static auto constexpr s2s_I64StoreMem16_Idx64
static auto constexpr s2s_S128Drop
INSTRUCTION_HANDLER_FUNC r2s_StoreMemI(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_LoadStoreMem(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64LoadMem32S_Idx64
INSTRUCTION_HANDLER_FUNC s2s_RefIsNonNull(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I16ArrayFill
static constexpr auto s2s_SimdS128Select
INSTRUCTION_HANDLER_FUNC s2s_ElemDrop(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdS128Load16Lane
INSTRUCTION_HANDLER_FUNC s2s_MemoryFill(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_Select(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_ReturnCallIndirect64(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_TableGrow(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2s_F32LoadMem_Idx64
static auto constexpr s2s_RefTestNull
INSTRUCTION_HANDLER_FUNC s2s_TableSet(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_Table64Fill(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_RefIsNull(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2s_F32Drop
static auto constexpr r2r_I64LoadMem16U_Idx64
INSTRUCTION_HANDLER_FUNC s2s_BranchOnCast(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_BranchIf(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I32LoadStoreMem_Idx64
static auto constexpr s2s_I64AtomicWait_Idx64
static auto constexpr s2s_SimdS128Store16Lane
INSTRUCTION_HANDLER_FUNC s2s_OnLoopBegin(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_RefSelect(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_Rethrow(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_SimdV128AnyTrue(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdS128Load16x4U_Idx64
INSTRUCTION_HANDLER_FUNC s2s_SimdI32x4DotI8x16I7x16AddS(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I32GlobalGet
static auto constexpr s2s_Table64Copy_32_64_32
static auto constexpr r2r_I32LoadMem16S_Idx64
static auto constexpr s2s_I8ArrayFill
static auto constexpr r2s_F64Select
static auto constexpr s2s_S128StructGet
static auto constexpr s2s_RefCast
INSTRUCTION_HANDLER_FUNC r2s_PreserveCopyFp0ToSlot64(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_RefStructGet(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_F64StructGet
static auto constexpr s2s_F32ArrayGet
static auto constexpr r2s_F32LoadStoreMem_Idx64
static auto constexpr r2r_I32LoadMem16U_Idx64
static void push(uint32_t *&sp, const uint8_t *&code, WasmInterpreterRuntime *wasm_runtime, T val)
static auto constexpr s2s_SimdS128Load8x8S_Idx64
static auto constexpr s2s_SimdS128Load8x8U
static auto constexpr r2r_F32LoadMem_Idx64
static auto constexpr s2s_I32StructSet
INSTRUCTION_HANDLER_FUNC s2s_If(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_DoSimdLoadExtend(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64StoreMem_Idx64
static auto constexpr s2s_I8UArrayGet
static auto constexpr r2r_F32Select
static auto constexpr r2r_I64LoadMem32U_Idx64
INSTRUCTION_HANDLER_FUNC s2s_RefGlobalSet(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64LoadMem16U_LocalSet_Idx64
static auto constexpr s2s_SimdS128StoreMem
INSTRUCTION_HANDLER_FUNC s2s_TableFill(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2s_S128Select
static auto constexpr r2r_I64Select
INSTRUCTION_HANDLER_FUNC s2s_Table64Grow(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_LoadMem(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdS128Store32Lane
static auto constexpr r2s_F64LoadStoreMem_Idx64
INSTRUCTION_HANDLER_FUNC r2s_CopyR0ToSlot32(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdI32x4ExtMulHighI16x8S
static auto constexpr s2s_S128ArrayFill
INSTRUCTION_HANDLER_FUNC r2s_BrTable(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_CallRef(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC r2s_If(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdI32x4ExtAddPairwiseI16x8S
static auto constexpr r2s_I64LoadMem32S_Idx64
static auto constexpr s2s_SimdS128Load16Splat_Idx64
static auto constexpr s2s_I64StoreMem32_Idx64
INSTRUCTION_HANDLER_FUNC s2s_CopySlot_ql(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static_cast(a) > static_cast< uint32_t >(b) ? a
static auto constexpr s2s_I32ArrayNew
static bool DoRefCast(WasmRef ref, ValueType ref_type, HeapType target_type, bool null_succeeds, WasmInterpreterRuntime *wasm_runtime)
static auto constexpr s2s_I32GlobalSet
INSTRUCTION_HANDLER_FUNC s2s_DoSimdLoadLane(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_F32StoreMem_Idx64
static auto constexpr s2s_F64ArrayGet
static auto constexpr s2s_SimdS128Store64Lane_Idx64
static auto constexpr s2s_I8UStructGet
WasmRef pop(uint32_t *&sp, const uint8_t *&code, WasmInterpreterRuntime *wasm_runtime)
INSTRUCTION_HANDLER_FUNC s2s_ArraySet(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_DoSimdLoadZeroExtend(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2r_I32LoadMem16U_Idx64
INSTRUCTION_HANDLER_FUNC s2s_Throw(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64LoadStoreMem_Idx64
INSTRUCTION_HANDLER_FUNC s2s_CopySlot_lq(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdI16x8ExtMulLowI8x16S
INSTRUCTION_HANDLER_FUNC s2s_Memory64Fill(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2s_I32LoadMem16U_Idx64
static auto constexpr s2s_SimdI32x4ExtAddPairwiseI16x8U
INSTRUCTION_HANDLER_FUNC s2r_LoadMemF(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdS128Load32Zero_Idx64
INSTRUCTION_HANDLER_FUNC s2s_DataDrop(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I32LoadMem8S_Idx64
static auto constexpr s2s_SimdS128Load8Splat
static auto constexpr s2s_SimdS128Load16Lane_Idx64
static auto constexpr r2r_I64LoadMem8U_Idx64
static T ExecuteRemU(T lval, T rval)
static auto constexpr s2r_I64LoadMem32U_Idx64
static auto constexpr s2s_I64ArrayFill
static auto constexpr s2s_SimdS128Store8Lane_Idx64
static constexpr auto s2s_SimdI16x8RelaxedLaneSelect
static auto constexpr s2s_SimdS128Load8Lane_Idx64
static auto constexpr s2s_I16UStructGet
INSTRUCTION_HANDLER_FUNC s2s_I64AtomicWait(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
uint32_t start INSTRUCTION_HANDLER_FUNC s2s_DoSimdExtMul(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I32LoadMem_LocalSet_Idx64
static auto constexpr s2s_RefCastNull
static constexpr auto s2s_SimdI32x4RelaxedLaneSelect
INSTRUCTION_HANDLER_FUNC s2s_SimdI8x16Shuffle(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdI64x2ExtMulLowI32x4S
INSTRUCTION_HANDLER_FUNC s2s_RefGlobalGet(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_AtomicFence(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_F32ArraySet
static auto constexpr s2s_SimdS128Load8x8U_Idx64
INSTRUCTION_HANDLER_FUNC s2s_SimdI16x8DotI8x16I7x16S(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2s_I64Select
static auto constexpr r2s_I64Drop
static auto constexpr s2s_I8SArrayGet
static auto constexpr s2s_I32LoadMem16U_LocalSet_Idx64
static auto constexpr s2s_I32StructGet
INSTRUCTION_HANDLER_FUNC s2s_AssertNullTypecheck(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC RefTest(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I8ArrayNew
static auto constexpr s2s_I32LoadMem_Idx64
static auto constexpr r2r_I32LoadMem_Idx64
INSTRUCTION_HANDLER_FUNC s2s_StoreMem(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_ArrayNewElem
INSTRUCTION_HANDLER_FUNC s2r_SelectF(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I16UArrayGet
INSTRUCTION_HANDLER_FUNC s2s_ArrayNewDefault(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I16SStructGet
INSTRUCTION_HANDLER_FUNC r2r_LoadMemI(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2r_I64LoadMem_Idx64
static auto constexpr s2r_F64GlobalGet
static auto constexpr s2s_SimdS128Load64Splat
static auto constexpr s2s_I16StructSet
INSTRUCTION_HANDLER_FUNC s2s_AssertNotNullTypecheck(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I32LoadMem16S_Idx64
static T pop(uint32_t *&sp, const uint8_t *&code, WasmInterpreterRuntime *wasm_runtime)
static auto constexpr s2s_I64LoadMem8S_Idx64
static auto constexpr s2s_SimdS128LoadMem
INSTRUCTION_HANDLER_FUNC s2s_CopySlot32x2(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static constexpr auto s2s_SimdI64x2RelaxedLaneSelect
INSTRUCTION_HANDLER_FUNC s2s_CallIndirect(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdS128Load16x4U
INSTRUCTION_HANDLER_FUNC r2s_LoadMem(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_ReturnCallImportedFunction(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_F32GlobalGet
static auto constexpr s2s_F64ArrayNew
static auto constexpr s2s_F64ArraySet
INSTRUCTION_HANDLER_FUNC s2s_BranchOnNonNull(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_RefStructSet(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
a a a a uint32_t WasmInterpreterRuntime * wasm_runtime
INSTRUCTION_HANDLER_FUNC s2s_CallImportedFunction(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I8ArraySet
static auto constexpr s2r_I32LoadMem8S_Idx64
INSTRUCTION_HANDLER_FUNC r2s_PreserveCopyR0ToSlot64(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2r_I32LoadMem16S_Idx64
static auto constexpr s2s_SimdS128Load32Splat
static auto constexpr s2r_I64GlobalGet
static auto constexpr s2s_SimdI16x8ExtMulHighI8x16S
INSTRUCTION_HANDLER_FUNC s2s_CallFunction(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_ReturnCallIndirect(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64LoadMem8U_Idx64
static auto constexpr s2s_I32ArrayGet
INSTRUCTION_HANDLER_FUNC s2s_ArrayLen(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I32LoadMem16S_LocalSet_Idx64
INSTRUCTION_HANDLER_FUNC r2s_PreserveCopyFp0ToSlot32(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2s_I64LoadMem_Idx64
INSTRUCTION_HANDLER_FUNC s2s_ExternConvertAny(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC r2s_BranchIf(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2s_I32LoadMem8U_Idx64
static auto constexpr s2s_F32StructSet
INSTRUCTION_HANDLER_FUNC s2s_TableCopy(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC r2r_LoadMemF(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_ArrayNewData
static auto constexpr s2s_I32LoadMem8U_Idx64
static auto constexpr s2s_I16ArrayNew
static auto constexpr r2s_I64LoadMem8S_Idx64
static auto constexpr s2s_I8StructSet
static auto constexpr s2r_F32Select
static auto constexpr s2s_SimdS128Load32Lane_Idx64
static auto constexpr s2s_F32Select
INSTRUCTION_HANDLER_FUNC s2s_GlobalGet(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_F32StructGet
static auto constexpr s2s_SimdS128Store16Lane_Idx64
INSTRUCTION_HANDLER_FUNC s2s_Else(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_Drop(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_Branch(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_BranchIfWithParams(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_Memory64Init(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_RefFunc(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdI64x2ExtMulHighI32x4S
INSTRUCTION_HANDLER_FUNC r2s_GlobalSetF(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2r_F64LoadMem_Idx64
INSTRUCTION_HANDLER_FUNC s2s_Catch(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_SimdS128StoreMemI(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_S128Select
INSTRUCTION_HANDLER_FUNC s2s_PreserveCopySlot64(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_RefArrayGet(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdS128Load64Zero_Idx64
static auto constexpr r2s_I64LoadMem8U_Idx64
static auto constexpr s2s_I16SArrayGet
static auto constexpr r2s_F64StoreMem_Idx64
INSTRUCTION_HANDLER_FUNC s2s_DoSimdLoadSplat(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64GlobalSet
static auto constexpr r2s_I64LoadMem16S_Idx64
static auto constexpr r2s_I32LoadStoreMem_Idx64
static auto constexpr s2s_I8SStructGet
static auto constexpr r2r_I64LoadMem_Idx64
INSTRUCTION_HANDLER_FUNC s2s_SimdI32x4DotI16x8S(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdI16x8ExtAddPairwiseI8x16U
static auto constexpr s2s_SimdS128Load32Lane
INSTRUCTION_HANDLER_FUNC s2s_Table64Set(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_S128GlobalGet
INSTRUCTION_HANDLER_FUNC r2s_CopyR0ToSlot64(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_RefAsNonNull(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2r_I64LoadMem16S_Idx64
static auto constexpr s2s_I32AtomicWait_Idx64
INSTRUCTION_HANDLER_FUNC s2s_ArrayFill(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_F64LoadMem_LocalSet_Idx64
static auto constexpr s2s_I64LoadMem32U_LocalSet_Idx64
static auto constexpr s2s_SimdS128StoreMem_Idx64
INSTRUCTION_HANDLER_FUNC s2s_TableGet(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_F64GlobalSet
INSTRUCTION_HANDLER_FUNC s2s_ReturnCallRef(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_ArrayNew(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_F64GlobalGet
INSTRUCTION_HANDLER_FUNC s2s_CopySlot64(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2s_F64LoadMem_Idx64
static auto constexpr s2s_I32LoadMem8S_LocalSet_Idx64
INSTRUCTION_HANDLER_FUNC s2s_BrTable(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_TableSize(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_MemorySize(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_F32LoadMem_LocalSet_Idx64
static auto constexpr s2s_SimdS128Load8x8S
static auto constexpr s2s_F64ArrayFill
static auto constexpr s2s_SimdS128LoadMem_Idx64
static auto constexpr r2s_I64LoadMem16U_Idx64
static auto constexpr r2s_I64StoreMem32_Idx64
static auto constexpr s2s_Table64Copy_64_64_64
static auto constexpr s2s_F32GlobalSet
INSTRUCTION_HANDLER_FUNC s2s_CopySlot64x2(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2r_I64LoadMem16S_Idx64
INSTRUCTION_HANDLER_FUNC s2r_I32ConvertI64(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64ArrayNew
static auto constexpr r2r_F64LoadMem_Idx64
INSTRUCTION_HANDLER_FUNC s2s_Table64CopyImpl(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_Memory64Size(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC r2s_BranchIfWithParams(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdS128Load32Splat_Idx64
static auto constexpr s2s_S128ArrayGet
INSTRUCTION_HANDLER_FUNC s2s_RefArrayFill(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64LoadMem8S_LocalSet_Idx64
static auto constexpr s2r_I64LoadMem32S_Idx64
static auto constexpr s2s_SimdI32x4ExtMulLowI16x8S
INSTRUCTION_HANDLER_FUNC s2s_ArrayGet(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_Table64Size(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_ArrayCopy(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
a a a a uint32_t WasmInterpreterRuntime int64_t double fp0
INSTRUCTION_HANDLER_FUNC r2s_CopyFp0ToSlot64(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static T ExecuteRemS(T lval, T rval)
static auto constexpr s2s_F32ArrayNew
INSTRUCTION_HANDLER_FUNC s2s_OnLoopBeginNoRefSlots(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2r_GlobalGetI(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2r_I64LoadMem8U_Idx64
static auto constexpr s2s_S128ArraySet
static T Read(const uint8_t *&code)
static auto constexpr r2s_F32StoreMem_Idx64
INSTRUCTION_HANDLER_FUNC s2s_BranchOnNullWithParams(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_BranchOnCastFail(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdS128Load64Lane
static auto constexpr s2s_Table64Copy_64_32_32
static auto constexpr r2s_I64LoadStoreMem_Idx64
static auto constexpr s2s_F32Drop
static auto constexpr r2r_I64LoadMem8S_Idx64
INSTRUCTION_HANDLER_FUNC r2s_PreserveCopyR0ToSlot32(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC r2s_I32ConvertI64(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_I31GetU(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_TrapIllegalCast(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2r_I64LoadMem8S_Idx64
static auto constexpr s2s_I32StoreMem16_Idx64
static auto constexpr s2s_F64LoadMem_Idx64
static auto constexpr s2s_SimdS128Store8Lane
static auto constexpr s2s_I32Drop
INSTRUCTION_HANDLER_FUNC s2s_BranchOnNonNullWithParams(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2s_F32Select
static auto constexpr s2s_SimdS128Load16x4S
INSTRUCTION_HANDLER_FUNC s2s_MemoryInit(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2s_I32LoadMem_Idx64
static auto constexpr r2r_I32LoadMem8U_Idx64
static auto constexpr r2s_F64Drop
static uint32_t ReadGlobalIndex(const uint8_t *&code)
static auto constexpr s2s_SimdS128Load32Zero
static auto constexpr s2s_I64Select
static auto constexpr r2s_I32StoreMem8_Idx64
static auto constexpr s2s_I64GlobalGet
static constexpr auto s2s_SimdI8x16RelaxedLaneSelect
INSTRUCTION_HANDLER_FUNC s2r_SelectI(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_F64StoreMem_Idx64
static auto constexpr s2s_SimdI64x2ExtMulLowI32x4U
static auto constexpr s2s_I64LoadMem_Idx64
INSTRUCTION_HANDLER_FUNC s2r_LoadMemI(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_RefTestSucceeds(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_CopySlot_ll(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64Drop
static auto constexpr r2s_I32Drop
INSTRUCTION_HANDLER_FUNC s2s_I32ConvertI64(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static uint8_t * ReadMemoryAddress(uint8_t *&code)
static constexpr auto s2s_SimdI8x16RelaxedSwizzle
static auto constexpr r2s_I32LoadMem16S_Idx64
static auto constexpr r2s_I32StoreMem16_Idx64
INSTRUCTION_HANDLER_FUNC s2s_ArrayNewSegment(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_DoSimdStoreLane(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_AtomicNotify(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_CopySlot128(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_RefI31(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_Table64Get(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_AtomicNotify_Idx64
static auto constexpr s2r_I32LoadMem8U_Idx64
static auto constexpr s2s_SimdS128Load32x2U
INSTRUCTION_HANDLER_FUNC r2s_RefSelect(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2r_I32Select
static auto constexpr s2r_F32LoadMem_Idx64
static auto constexpr s2r_I32LoadMem_Idx64
INSTRUCTION_HANDLER_FUNC r2s_GlobalSetI(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64LoadMem32U_Idx64
static auto constexpr s2s_SimdS128Load16Splat
static auto constexpr s2s_SimdS128Store32Lane_Idx64
static auto constexpr r2s_I32GlobalSet
static auto constexpr s2s_I32StoreMem_Idx64
INSTRUCTION_HANDLER_FUNC r2s_StoreMemF(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdS128Store64Lane
static auto constexpr s2r_I64LoadMem16U_Idx64
static auto constexpr s2s_F64StructSet
INSTRUCTION_HANDLER_FUNC r2r_SelectF(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64LoadMem16S_LocalSet_Idx64
static auto constexpr r2s_I64StoreMem8_Idx64
void push(uint32_t *&sp, const uint8_t *&code, WasmInterpreterRuntime *wasm_runtime, WasmRef ref)
static auto constexpr s2s_I32StoreMem8_Idx64
static auto constexpr r2r_I32LoadMem8S_Idx64
static auto constexpr s2s_I32ArrayFill
INSTRUCTION_HANDLER_FUNC s2s_Return(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_CopySlot_qq(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2s_I32Select
static auto constexpr s2s_F64Drop
static auto constexpr s2s_I16ArraySet
static auto constexpr s2s_SimdI32x4ExtMulLowI16x8U
static auto constexpr s2r_F32GlobalGet
INSTRUCTION_HANDLER_FUNC s2s_ArrayNewFixed(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_StructSet(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_AnyConvertExtern(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdS128Load32x2S_Idx64
INSTRUCTION_HANDLER_FUNC RefCast(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC r2s_RefDrop(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I32ArraySet
INSTRUCTION_HANDLER_FUNC s2s_RefDrop(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC r2r_SelectI(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_CopySlotRef(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC r2s_Select(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64StructSet
static auto constexpr s2s_S128StructSet
static auto constexpr s2s_SimdS128Load8Lane
traits::memory_offset64_t memory_offset64_t
static auto constexpr s2s_I64ArraySet
INSTRUCTION_HANDLER_FUNC s2s_RefArrayNew(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2r_I64LoadMem32S_Idx64
static auto constexpr s2s_I32LoadMem8U_LocalSet_Idx64
static auto constexpr s2s_F32LoadStoreMem_Idx64
static auto constexpr s2s_SimdI16x8ExtMulHighI8x16U
static auto constexpr s2s_I32Select
static auto constexpr s2s_F32LoadMem_Idx64
INSTRUCTION_HANDLER_FUNC r2s_LoadStoreMem(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64LoadMem_LocalSet_Idx64
INSTRUCTION_HANDLER_FUNC s2s_CopySlot32(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr r2r_I32Select
static auto constexpr s2s_SimdS128Load32x2S
INSTRUCTION_HANDLER_FUNC s2s_RefArraySet(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
traits::slot_offset_t slot_offset_t
INSTRUCTION_HANDLER_FUNC s2s_MemoryCopy(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_PreserveCopySlot128(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64StoreMem8_Idx64
static auto constexpr r2s_I64StoreMem_Idx64
static auto constexpr s2s_ArrayInitElem
INSTRUCTION_HANDLER_FUNC s2s_BranchOnNull(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_I32AtomicWait(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64ArrayGet
static auto constexpr r2s_I64GlobalSet
static auto constexpr s2s_SimdS128Load16x4S_Idx64
static auto constexpr s2r_I32GlobalGet
static auto constexpr s2s_ArrayInitData
INSTRUCTION_HANDLER_FUNC s2s_RefNull(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_S128GlobalSet
static auto constexpr r2s_I64StoreMem16_Idx64
INSTRUCTION_HANDLER_FUNC r2r_I32ConvertI64(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
INSTRUCTION_HANDLER_FUNC s2s_StructGet(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_SimdI64x2ExtMulHighI32x4U
static auto constexpr s2s_I64LoadMem32S_LocalSet_Idx64
INSTRUCTION_HANDLER_FUNC s2s_RefTestFails(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
static auto constexpr s2s_I64LoadMem16S_Idx64
static auto constexpr s2s_S128ArrayNew
static auto constexpr s2s_I64LoadMem16U_Idx64
static auto constexpr s2r_F64Select
INSTRUCTION_HANDLER_FUNC s2s_StructNew(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
constexpr Representation representation() const
Definition value-type.h:837
constexpr bool is_index() const
Definition value-type.h:840
constexpr ModuleTypeIndex ref_index() const
Definition value-type.h:762
static constexpr HeapType FromBits(uint32_t bits)
Definition value-type.h:721
static constexpr int ToTagged(int offset)
uint32_t field_offset(uint32_t index) const
ValueType field(uint32_t index) const
V8_EXPORT_PRIVATE const CanonicalSig * LookupFunctionSignature(CanonicalTypeIndex index) const
constexpr ValueKind kind() const
Definition value-type.h:631
constexpr bool is_reference() const
Definition value-type.h:600
constexpr bool is_string_view() const
Definition value-type.h:438
constexpr bool is_nullable() const
Definition value-type.h:393
constexpr uint32_t raw_bit_field() const
Definition value-type.h:594
static constexpr ValueType FromRawBitField(uint32_t bits)
Definition value-type.h:913
constexpr HeapType heap_type() const
static constexpr ValueType RefNull(ModuleTypeIndex index, bool shared, RefTypeKind kind)
Definition value-type.h:895
static constexpr ValueType Ref(ModuleTypeIndex index, bool shared, RefTypeKind kind)
Definition value-type.h:887
static constexpr ValueType RefMaybeNull(ModuleTypeIndex index, Nullability nullable, bool shared, RefTypeKind kind)
Definition value-type.h:903
static std::atomic< size_t > emitted_short_memory_offset_count_
ValueType GetParamType(const WasmBytecodeGenerator::BlockData &block_data, size_t index) const
void BeginElseBlock(uint32_t if_block_index, bool dummy)
void Pop(ValueKind kind, bool emit=true)
void EmitFnId(InstructionHandler func_id, uint32_t pc=UINT_MAX)
void CopyToSlotAndPop(ValueType value_type, uint32_t to, bool is_tee, bool copy_from_reg)
void EmitBranchTableOffset(uint32_t delta, uint32_t code_pos)
void EmitCopySlot(ValueType value_type, uint32_t from_slot_index, uint32_t to_slot_index, bool copy_from_reg=false)
uint32_t _PushSlot(ValueType value_type)
void SetSlotType(uint32_t stack_index, ValueType type)
base::SmallVector< uint32_t, 8 > br_table_labels_
bool TypeCheckAlwaysSucceeds(ValueType obj_type, HeapType type) const
const FunctionSig * GetFunctionSignature(uint32_t function_index) const
bool HasVoidSignature(const WasmBytecodeGenerator::BlockData &block_data) const
void DecodeAtomicOp(WasmOpcode opcode, WasmInstruction::Optional *optional, Decoder *decoder, InterpreterCode *code, pc_t pc, int *const len)
base::SmallVector< uint32_t, 16 > loop_begin_code_offsets_
ValueKind GetTopStackType(RegMode reg_mode) const
RegMode DoEncodeInstruction(const WasmInstruction &instr, RegMode curr_reg_mode, RegMode next_reg_mode)
void DecodeGCOp(WasmOpcode opcode, WasmInstruction::Optional *optional, Decoder *decoder, InterpreterCode *code, pc_t pc, int *const len)
void PushCopySlot(uint32_t from_stack_index)
std::map< CodeOffset, pc_t > code_pc_map_
ValueType GetReturnType(const WasmBytecodeGenerator::BlockData &block_data, size_t index) const
void StoreBlockParamsAndResultsIntoSlots(uint32_t target_block_index, WasmOpcode opcode)
static std::atomic< size_t > total_bytecode_size_
uint32_t ReturnsCount(const WasmBytecodeGenerator::BlockData &block_data) const
WasmBytecodeGenerator(uint32_t function_index, InterpreterCode *wasm_code, const WasmModule *module)
uint32_t ParamsCount(const WasmBytecodeGenerator::BlockData &block_data) const
bool DoEncodeSuperInstruction(RegMode &reg_mode, const WasmInstruction &curr_instr, const WasmInstruction &next_instr)
int GetCurrentTryBlockIndex(bool return_matching_try_for_catch_blocks) const
void RestoreIfElseParams(uint32_t if_block_index)
WasmInstruction DecodeInstruction(pc_t pc, Decoder &decoder)
void StoreBlockParamsIntoSlots(uint32_t target_block_index, bool update_stack)
void UpdateStack(uint32_t index, uint32_t slot_index)
bool EncodeSuperInstruction(RegMode &reg_mode, const WasmInstruction &curr_instr, const WasmInstruction &next_instr)
bool HasSharedSlot(uint32_t stack_index) const
ValueKind GetGlobalType(uint32_t index) const
void InitSlotsForFunctionArgs(const FunctionSig *sig, bool is_indirect_call)
uint32_t ReserveBlockSlots(uint8_t opcode, const WasmInstruction::Optional::Block &block_data, size_t *rets_slots_count, size_t *params_slots_count)
bool TypeCheckAlwaysFails(ValueType obj_type, HeapType expected_type, bool null_succeeds) const
int32_t BeginBlock(WasmOpcode opcode, const WasmInstruction::Optional::Block signature)
bool DecodeSimdOp(WasmOpcode opcode, WasmInstruction::Optional *optional, Decoder *decoder, InterpreterCode *code, pc_t pc, int *const len)
void RefPush(ValueType type, bool emit=true)
uint32_t CreateSlot(ValueType value_type)
bool TryCompactInstructionHandler(InstructionHandler func_addr)
RegMode EncodeInstruction(const WasmInstruction &instr, RegMode curr_reg_mode, RegMode next_reg_mode)
void DecodeNumericOp(WasmOpcode opcode, WasmInstruction::Optional *optional, Decoder *decoder, InterpreterCode *code, pc_t pc, int *const len)
int32_t GetTargetBranch(uint32_t delta) const
bool FindSharedSlot(uint32_t stack_index, uint32_t *new_slot_index)
static std::atomic< size_t > emitted_short_slot_offset_count_
void Emit(const void *buff, size_t len)
bool ToRegisterIsAllowed(const WasmInstruction &instr)
void CopyToSlot(ValueType value_type, uint32_t from_slot_index, uint32_t to_stack_index, bool copy_from_reg)
std::unique_ptr< WasmBytecode > GenerateBytecode()
static bool HasRefOrSimdArgs(const FunctionSig *sig)
WasmBytecode(int func_index, const uint8_t *code_data, size_t code_length, uint32_t stack_frame_size, const FunctionSig *signature, const CanonicalSig *canonical_signature, const InterpreterCode *interpreter_code, size_t blocks_count, const uint8_t *const_slots_data, size_t const_slots_length, uint32_t ref_slots_count, const WasmEHData &&eh_data, const std::map< CodeOffset, pc_t > &&code_pc_map)
static uint32_t RetsSizeInSlots(const FunctionSig *sig)
pc_t GetPcFromTrapCode(const uint8_t *current_code) const
static uint32_t ArgsSizeInSlots(const FunctionSig *sig)
std::map< CodeOffset, pc_t > code_pc_map_
static uint32_t RefArgsCount(const FunctionSig *sig)
static uint32_t RefRetsCount(const FunctionSig *sig)
void AddCatchBlock(BlockIndex catch_block_index, int tag_index, uint32_t first_param_slot_offset, uint32_t first_param_ref_stack_index, CodeOffset code_offset)
void AddTryBlock(BlockIndex try_block_index, BlockIndex parent_or_matching_try_block_index, BlockIndex ancestor_try_block_index)
void AddDelegatedBlock(BlockIndex delegated_try_block_index)
void RecordPotentialExceptionThrowingInstruction(WasmOpcode opcode, CodeOffset code_offset)
BlockIndex EndTryCatchBlocks(BlockIndex block_index, CodeOffset code_offset)
ExceptionPayloadSlotOffsets GetExceptionPayloadStartSlotOffsets(BlockIndex catch_block_index) const
std::unordered_map< BlockIndex, CatchBlock > catch_blocks_
std::unordered_map< CodeOffset, BlockIndex > code_trycatch_map_
const TryBlock * GetTryBlock(CodeOffset code_offset) const
std::unordered_map< BlockIndex, TryBlock > try_blocks_
size_t GetEndInstructionOffsetFor(BlockIndex catch_block_index) const
BlockIndex GetTryBranchOf(BlockIndex catch_block_index) const
const TryBlock * GetParentTryBlock(const TryBlock *try_block) const
const TryBlock * GetDelegateTryBlock(const TryBlock *try_block) const
static constexpr WasmEnabledFeatures All()
WasmExecutionTimer(Isolate *isolate, bool track_jitless_wasm)
WasmInterpreterThread * GetCurrentInterpreterThread(Isolate *isolate)
std::vector< WasmInterpreterStackEntry > CaptureStackTrace(const TrapStatus *trap_status=nullptr) const
static void NotifyIsolateDisposal(Isolate *isolate)
static WasmInterpreterThreadMap * thread_interpreter_map_s
void RaiseException(Isolate *isolate, MessageTemplate message)
void ClearRefStackValues(size_t index, size_t count)
static WasmInterpreterThread * GetCurrentInterpreterThread(Isolate *isolate)
static void SetRuntimeLastWasmError(Isolate *isolate, MessageTemplate message)
static TrapReason GetRuntimeLastWasmError(Isolate *isolate)
ZoneVector< InterpreterCode > interpreter_code_
CodeMap(Isolate *isolate, const WasmModule *module, const uint8_t *module_start, Zone *zone)
static void NotifyIsolateDisposal(Isolate *isolate)
WasmInterpreterThread::State ContinueExecution(WasmInterpreterThread *thread, bool called_from_js)
std::shared_ptr< WasmInterpreterRuntime > wasm_runtime_
IndirectHandle< WasmInstanceObject > instance_object_
WasmInterpreter(Isolate *isolate, const WasmModule *module, const ModuleWireBytes &wire_bytes, DirectHandle< WasmInstanceObject > instance)
static constexpr TrapReason MessageIdToTrapReason(MessageTemplate message)
static constexpr const char * OpcodeName(WasmOpcode)
static constexpr bool IsPrefixOpcode(WasmOpcode)
Zone * zone_
base::Mutex & mutex_
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
const MapRef map_
int start
uint32_t count
int end
Label label
ZoneList< RegExpInstruction > code_
#define BINOP_CASE(opcode, assembler_op)
#define FOREACH_LOAD_STORE_DUPLICATED_INSTR_HANDLER(V)
#define FOREACH_LOAD_STORE_INSTR_HANDLER(V,...)
#define FOREACH_NO_BOUNDSCHECK_INSTR_HANDLER(V)
#define FOREACH_INSTR_HANDLER(V)
int32_t offset
Instruction * instr
ZoneVector< RpoNumber > & result
#define ATOMIC_LOAD_OP(name, type)
bool null_succeeds
ValueType obj_type
#define ATOMIC_COMPARE_EXCHANGE_OP(name, type)
const int func_index_
#define ATOMIC_STORE_OP(name, type)
int x
Point to
#define ATOMIC_OP(op, type, kind)
int s
Definition mul-fft.cc:297
STL namespace.
V8_BASE_EXPORT constexpr uint64_t RoundUpToPowerOfTwo64(uint64_t value)
Definition bits.h:235
constexpr unsigned CountPopulation(T value)
Definition bits.h:26
static V ReadUnalignedValue(Address p)
Definition memory.h:28
int16_t MulWithWraparound(int16_t a, int16_t b)
void CallOnce(OnceType *once, std::function< void()> init_func)
Definition once.h:90
constexpr bool IsInBounds(T index, T length, T max)
Definition bounds.h:49
signed_type NegateWithWraparound(signed_type a)
static void WriteUnalignedValue(Address p, V value)
Definition memory.h:41
T Divide(T x, T y)
WordWithBits< 128 > Simd128
Definition index.h:236
void SetLandingPad(uintptr_t landing_pad)
TH_DISABLE_ASAN bool IsThreadInWasm()
static void Populate(HeapType *unfinished_type, const WasmModule *module)
RegMode GetRegMode(ValueKind kind)
static ValueType value_type()
static const size_t kSlotSize
const char * GetOperatorModeString(OperatorMode mode)
PWasmOp * kInstructionTable[kInstructionTableSize]
INSTRUCTION_HANDLER_FUNC TrapMemOutOfBounds(const uint8_t *code, uint32_t *sp, WasmInterpreterRuntime *wasm_runtime, int64_t r0, double fp0)
constexpr uint32_t kBranchOnCastDataTargetTypeBitSize
uint32_t WasmInterpreterRuntime int64_t r0
constexpr uint64_t kFloat64SignBitMask
constexpr IndependentValueType kWasmF32
constexpr IndependentHeapType kWasmAnyRef
void InitInstructionTableOnce(Isolate *isolate)
TypeCanonicalizer * GetTypeCanonicalizer()
uint32_t WasmInterpreterRuntime * wasm_runtime
constexpr IndependentHeapType kWasmExternRef
constexpr IndependentValueType kWasmI32
uint32_t WasmInterpreterRuntime int64_t double fp0
DirectHandle< Object > WasmRef
static const ptrdiff_t kCodeOffsetSize
constexpr IndependentHeapType kWasmRefI31
InstructionHandler s_unwind_code
constexpr IndependentHeapType kWasmVoid
constexpr uint32_t kFloat32SignBitMask
V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype, const WasmModule *sub_module, const WasmModule *super_module)
constexpr int value_kind_size(ValueKind kind)
constexpr IndependentHeapType kWasmBottom
static constexpr uint32_t kInstructionTableSize
void InitTrapHandlersOnce(Isolate *isolate)
const char * GetRegModeString(RegMode reg_mode)
Signature< ValueType > FunctionSig
constexpr IndependentValueType kWasmF64
constexpr IndependentValueType kWasmI64
static int StructFieldOffset(const StructType *struct_type, int field_index)
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
bool SetPermissions(v8::PageAllocator *page_allocator, void *address, size_t size, PageAllocator::Permission access)
constexpr int kTaggedSize
Definition globals.h:542
Wide MultiplyLong(Narrow a, Narrow b)
Definition utils.h:193
constexpr int kSimd128Size
Definition globals.h:706
v8::PageAllocator * GetPlatformPageAllocator()
Definition allocation.cc:66
@ SKIP_WRITE_BARRIER
Definition objects.h:52
@ UPDATE_WRITE_BARRIER
Definition objects.h:55
void EncodeI32ExceptionValue(DirectHandle< FixedArray > encoded_values, uint32_t *encoded_index, uint32_t value)
void * AllocatePages(v8::PageAllocator *page_allocator, void *hint, size_t size, size_t alignment, PageAllocator::Permission access)
T JSMax(T x, T y)
Definition utils.h:75
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
Address Tagged_t
Definition globals.h:547
V8_INLINE DirectHandle< T > direct_handle(Tagged< T > object, Isolate *isolate)
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
T SaturateRoundingQMul(T a, T b)
Definition utils.h:173
constexpr int U
constexpr int S
Tagged< MaybeWeak< T > > MakeWeak(Tagged< T > value)
Definition tagged.h:893
Wide AddLong(Narrow a, Narrow b)
Definition utils.h:208
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
V8_EXPORT_PRIVATE constexpr int ElementSizeLog2Of(MachineRepresentation)
float DoubleToFloat32(double x)
T SaturateAdd(T a, T b)
Definition utils.h:131
return value
Definition map-inl.h:893
T RoundingAverageUnsigned(T a, T b)
Definition utils.h:220
kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset instance_object
T SaturateSub(T a, T b)
Definition utils.h:152
constexpr uint32_t kSlotsZapValue
Definition globals.h:1014
bool is_signed(Condition cond)
T JSMin(T x, T y)
Definition utils.h:84
void FreePages(v8::PageAllocator *page_allocator, void *address, const size_t size)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Definition flags.cc:2197
void EncodeI64ExceptionValue(DirectHandle< FixedArray > encoded_values, uint32_t *encoded_index, uint64_t value)
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
Definition c-api.cc:87
#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode)
#define V8_DECLARE_ONCE(NAME)
Definition once.h:72
#define SHIFT_CASE(from, to)
#define SPLAT_CASE(from, to)
#define FATAL(...)
Definition logging.h:47
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK_LE(lhs, rhs)
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define arraysize(array)
Definition macros.h:67
DirectHandle< Object > GetCaughtException(Isolate *isolate, uint32_t catch_block_index) const
void SetCaughtException(Isolate *isolate, uint32_t catch_block_index, DirectHandle< Object > exception)
void DisposeCaughtExceptionsArray(Isolate *isolate)
static constexpr ModuleTypeIndex Invalid()
Definition value-type.h:73
constexpr bool valid() const
Definition value-type.h:58
void SetDelegated(BlockIndex delegate_try_idx)
std::vector< CatchHandler > catch_handlers
std::vector< WasmMemory > memories
const WasmTagSig * sig
struct v8::internal::wasm::WasmInstruction::Optional::Block block
struct v8::internal::wasm::WasmInstruction::Optional::BrTable br_table
struct v8::internal::wasm::WasmInstruction::Optional::TableInit table_init
struct v8::internal::wasm::WasmInstruction::Optional::GC_ArrayNewFixed gc_array_new_fixed
struct v8::internal::wasm::WasmInstruction::Optional::IndirectCall indirect_call
struct v8::internal::wasm::WasmInstruction::Optional::GC_FieldImmediate gc_field_immediate
struct v8::internal::wasm::WasmInstruction::Optional::GC_ArrayCopy gc_array_copy
struct v8::internal::wasm::WasmInstruction::Optional::GC_HeapTypeImmediate gc_heap_type_immediate
struct v8::internal::wasm::WasmInstruction::Optional::TableCopy table_copy
struct v8::internal::wasm::WasmInstruction::Optional::GC_ArrayNewOrInitData gc_array_new_or_init_data
#define V8_LIKELY(condition)
Definition v8config.h:661
#define V8_UNLIKELY(condition)
Definition v8config.h:660
const wasm::WasmModule * module_
#define DEFINE_UNOP(name, ctype, reg, op, type)
#define FOREACH_I64_CONVERT_FROM_FLOAT_UNOP(V)
#define FOREACH_ATOMIC_COMPARE_EXCHANGE_OP(V)
#define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, expr)
#define DEFINE_BINOP(name, ctype, reg, op, type)
#define EXTRACT_LANE_EXTEND_CASE(format, stype, name, sign, extended_type)
#define LANE(i, type)
#define INLINED_TRAP(trap_reason)
#define EXECUTE_UNOP(name, ctype, reg, op, type)
#define FOREACH_ATOMIC_BINOP(V)
#define FOREACH_REM_BINOP(V)
#define FOREACH_ATOMIC_STORE_OP(V)
#define PACK_CASE(op, src_type, name, dst_type, count, dst_ctype)
#define EMIT_MEM64_INSTR_HANDLER(name, mem64_name, is_memory64)
#define FOREACH_UNSIGNED_DIV_BINOP(V)
#define START_EMIT_INSTR_HANDLER()
#define EXT_ADD_PAIRWISE_CASE(op)
#define FOREACH_ADDITIONAL_CONVERT_UNOP(V)
#define FOREACH_ATOMIC_LOAD_OP(V)
#define REDUCTION_CASE(op, name, stype, count)
#define STORE_CASE(name, ctype, mtype, rep, type)
#define FOREACH_OTHER_CONVERT_UNOP(V)
#define FOREACH_CONVERT_UNOP(V)
#define QFM_CASE(op, name, stype, count, operation)
#define FOREACH_REINTERPRET_UNOP(V)
#define FOREACH_I32_CONVERT_FROM_FLOAT_UNOP(V)
#define EXT_MUL_CASE(op)
#define SELECT_CASE(op)
#define FOREACH_BITS_UNOP(V)
#define TRAP(trap_reason)
#define EMIT_INSTR_HANDLER(name)
#define LOAD_SPLAT_CASE(op)
#define DEFINE_REG_BINOP(name, from_ctype, from_type, to_ctype, to_type, op)
#define START_EMIT_INSTR_HANDLER_WITH_ID(name)
#define FOREACH_SIGNED_DIV_BINOP(V)
#define CMPOP_CASE(op, name, stype, out_stype, count, expr)
#define END_EMIT_INSTR_HANDLER()
#define FOREACH_TRUNCSAT_UNOP(V)
#define EMIT_INSTR_HANDLER_WITH_PC(name, pc)
#define STORE_LANE_CASE(op)
#define LOAD_LANE_CASE(op)
#define EXTRACT_LANE_CASE(format, stype, op_type, name)
#define BITMASK_CASE(op, name, stype, count)
#define LOAD_ZERO_EXTEND_CASE(op, load_type)
#define FOREACH_ARITHMETIC_BINOP(V)
#define DECODE_UNOP(name, from_ctype, from_type, from_reg, to_ctype, to_type, to_reg)
#define EXECUTE_BINOP(name, ctype, reg, op, type)
#define FOREACH_TRAPPING_BINOP(V)
#define ITEM_ENUM_DEFINE(name)
#define FOREACH_SIMPLE_UNOP(V)
#define FOREACH_MORE_BINOP(V)
#define EMIT_MEM64_INSTR_HANDLER_WITH_PC(name, mem64_name, is_memory64, pc)
#define FOREACH_COMPARISON_BINOP(V)
#define ATOMIC_BINOP(name, Type, ctype, type, op_ctype, op_type, operation)
#define FOREACH_EXTENSION_UNOP(V)
#define LOAD_CASE(name, ctype, mtype, rep, type)
#define LOAD_EXTEND_CASE(op)
#define REPLACE_LANE_CASE(format, name, stype, ctype, op_type)
#define UNOP_CASE(op, name, stype, count, expr)
#define DEFINE_INSTR_HANDLER(name)
#define MUSTTAIL
#define INSTRUCTION_HANDLER_FUNC
#define ZONE_NAME
Definition zone.h:22