v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
liftoff-assembler-loong64-inl.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_INL_H_
6#define V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_INL_H_
7
18
19namespace v8::internal::wasm {
20
21namespace liftoff {
22
23// Liftoff Frames.
24//
25// slot Frame
26// +--------------------+---------------------------
27// n+4 | optional padding slot to keep the stack 16 byte aligned.
28// n+3 | parameter n |
29// ... | ... |
30// 4 | parameter 1 | or parameter 2
31// 3 | parameter 0 | or parameter 1
32// 2 | (result address) | or parameter 0
33// -----+--------------------+---------------------------
34// 1 | return addr (ra) |
35// 0 | previous frame (fp)|
36// -----+--------------------+ <-- frame ptr (fp)
37// -1 | StackFrame::WASM |
38// -2 | instance |
39// -3 | feedback vector|
40// -----+--------------------+---------------------------
41// -4 | slot 0 | ^
42// -5 | slot 1 | |
43// | | Frame slots
44// | | |
45// | | v
46// | optional padding slot to keep the stack 16 byte aligned.
47// -----+--------------------+ <-- stack ptr (sp)
48//
49
50inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
51
54}
55
56template <typename T>
58 Register offset, T offset_imm,
59 bool i64_offset = false, unsigned shift_amount = 0) {
60 if (offset != no_reg) {
61 if (!i64_offset) {
62 assm->bstrpick_d(kScratchReg, offset, 31, 0);
64 }
65 if (shift_amount != 0) {
66 assm->alsl_d(kScratchReg, offset, addr, shift_amount);
67 } else {
68 assm->add_d(kScratchReg, offset, addr);
69 }
70 addr = kScratchReg;
71 }
72 if (is_int31(offset_imm)) {
73 int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
74 return MemOperand(addr, offset_imm32);
75 } else {
76 assm->li(kScratchReg2, Operand(offset_imm));
77 assm->add_d(kScratchReg2, addr, kScratchReg2);
78 return MemOperand(kScratchReg2, 0);
79 }
80}
81
82inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
84 switch (kind) {
85 case kI16:
86 assm->Ld_h(dst.gp(), src);
87 break;
88 case kI32:
89 assm->Ld_w(dst.gp(), src);
90 break;
91 case kI64:
92 case kRef:
93 case kRefNull:
94 assm->Ld_d(dst.gp(), src);
95 break;
96 case kF32:
97 assm->Fld_s(dst.fp(), src);
98 break;
99 case kF64:
100 assm->Fld_d(dst.fp(), src);
101 break;
102 case kS128:
103 UNREACHABLE();
104 break;
105 default:
106 UNREACHABLE();
107 }
108}
109
111 ValueKind kind) {
112 switch (kind) {
113 case kI16:
114 assm->St_h(src.gp(), dst);
115 break;
116 case kI32:
117 assm->St_w(src.gp(), dst);
118 break;
119 case kI64:
120 case kRefNull:
121 case kRef:
122 assm->St_d(src.gp(), dst);
123 break;
124 case kF32:
125 assm->Fst_s(src.fp(), dst);
126 break;
127 case kF64:
128 assm->Fst_d(src.fp(), dst);
129 break;
130 default:
131 UNREACHABLE();
132 }
133}
134
135inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
137 MemOperand dst(base, offset);
138 Store(assm, dst, src, kind);
139}
140
142 switch (kind) {
143 case kI32:
144 assm->addi_d(sp, sp, -kSystemPointerSize);
145 assm->St_w(reg.gp(), MemOperand(sp, 0));
146 break;
147 case kI64:
148 case kRefNull:
149 case kRef:
150 assm->Push(reg.gp());
151 break;
152 case kF32:
153 assm->addi_d(sp, sp, -kSystemPointerSize);
154 assm->Fst_s(reg.fp(), MemOperand(sp, 0));
155 break;
156 case kF64:
157 assm->addi_d(sp, sp, -kSystemPointerSize);
158 assm->Fst_d(reg.fp(), MemOperand(sp, 0));
159 break;
160 case kS128:
161 UNREACHABLE();
162 break;
163 default:
164 UNREACHABLE();
165 }
166}
167
168inline void StoreToMemory(LiftoffAssembler* assm, MemOperand dst,
169 const LiftoffAssembler::VarState& src) {
170 if (src.is_reg()) {
171 Store(assm, dst, src.reg(), src.kind());
172 return;
173 }
174
175 UseScratchRegisterScope temps(assm);
176 Register temp = temps.Acquire();
177 if (src.is_const()) {
178 if (src.i32_const() == 0) {
179 temp = zero_reg;
180 } else {
181 assm->li(temp, static_cast<int64_t>(src.i32_const()));
182 }
183 } else {
184 DCHECK(src.is_stack());
185 if (value_kind_size(src.kind()) == 4) {
186 assm->Ld_w(temp, liftoff::GetStackSlot(src.offset()));
187 } else {
188 assm->Ld_d(temp, liftoff::GetStackSlot(src.offset()));
189 }
190 }
191
192 if (value_kind_size(src.kind()) == 4) {
193 assm->St_w(temp, dst);
194 } else {
195 DCHECK_EQ(8, value_kind_size(src.kind()));
196 assm->St_d(temp, dst);
197 }
198}
199
202
203} // namespace liftoff
204
206 int offset = pc_offset();
207 // When constant that represents size of stack frame can't be represented
208 // as 16bit we need three instructions to add it to sp, so we reserve space
209 // for this case.
210 addi_d(sp, sp, 0);
211 nop();
212 nop();
213 return offset;
214}
215
217// The standard library used by gcc tryjobs does not consider `std::find` to be
218// `constexpr`, so wrap it in a `#ifdef __clang__` block.
219#ifdef __clang__
220 static_assert(std::find(std::begin(wasm::kGpParamRegisters),
221 std::end(wasm::kGpParamRegisters),
222 kLiftoffFrameSetupFunctionReg) ==
223 std::end(wasm::kGpParamRegisters));
224#endif
225
226 // On LOONG64, we must push at least {ra} before calling the stub, otherwise
227 // it would get clobbered with no possibility to recover it. So just set
228 // up the frame here.
229 EnterFrame(StackFrame::WASM);
230 LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
231 WasmValue(declared_function_index));
232 CallBuiltin(Builtin::kWasmLiftoffFrameSetup);
233}
234
235void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
236 int stack_param_delta) {
237 UseScratchRegisterScope temps(this);
238 Register scratch = temps.Acquire();
239
240 // Push the return address and frame pointer to complete the stack frame.
241 Ld_d(scratch, MemOperand(fp, 8));
242 Push(scratch);
243 Ld_d(scratch, MemOperand(fp, 0));
244 Push(scratch);
245
246 // Shift the whole frame upwards.
247 int slot_count = num_callee_stack_params + 2;
248 for (int i = slot_count - 1; i >= 0; --i) {
249 Ld_d(scratch, MemOperand(sp, i * 8));
250 St_d(scratch, MemOperand(fp, (i - stack_param_delta) * 8));
251 }
252
253 // Set the new stack and frame pointer.
254 addi_d(sp, fp, -stack_param_delta * 8);
255 Pop(ra, fp);
256}
257
259
261 int offset, SafepointTableBuilder* safepoint_table_builder,
262 bool feedback_vector_slot, size_t stack_param_slots) {
263 // The frame_size includes the frame marker and the instance slot. Both are
264 // pushed as part of frame construction, so we don't need to allocate memory
265 // for them anymore.
266 int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
267 // The frame setup builtin also pushes the feedback vector.
268 if (feedback_vector_slot) {
269 frame_size -= kSystemPointerSize;
270 }
271
272 // We can't run out of space, just pass anything big enough to not cause the
273 // assembler to try to grow the buffer.
274 constexpr int kAvailableSpace = 256;
275 MacroAssembler patching_assembler(
276 nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
277 ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
278
279 if (V8_LIKELY(frame_size < 4 * KB)) {
280 // This is the standard case for small frames: just subtract from SP and be
281 // done with it.
282 patching_assembler.Add_d(sp, sp, Operand(-frame_size));
283 return;
284 }
285
286 // The frame size is bigger than 4KB, so we might overflow the available stack
287 // space if we first allocate the frame and then do the stack check (we will
288 // need some remaining stack space for throwing the exception). That's why we
289 // check the available stack space before we allocate the frame. To do this we
290 // replace the {__ Add_d(sp, sp, -frame_size)} with a jump to OOL code that
291 // does this "extended stack check".
292 //
293 // The OOL code can simply be generated here with the normal assembler,
294 // because all other code generation, including OOL code, has already finished
295 // when {PatchPrepareStackFrame} is called. The function prologue then jumps
296 // to the current {pc_offset()} to execute the OOL code for allocating the
297 // large frame.
298 // Emit the unconditional branch in the function prologue (from {offset} to
299 // {pc_offset()}).
300
301 int imm32 = pc_offset() - offset;
302 CHECK(is_int26(imm32));
303 patching_assembler.b(imm32 >> 2);
304
305 // If the frame is bigger than the stack, we throw the stack overflow
306 // exception unconditionally. Thereby we can avoid the integer overflow
307 // check in the condition code.
308 RecordComment("OOL: stack check for large frame");
309 Label continuation;
310 if (frame_size < v8_flags.stack_size * 1024) {
311 Register stack_limit = kScratchReg;
313 Add_d(stack_limit, stack_limit, Operand(frame_size));
314 Branch(&continuation, uge, sp, Operand(stack_limit));
315 }
316
317 if (v8_flags.experimental_wasm_growable_stacks) {
318 LiftoffRegList regs_to_save;
320 regs_to_save.set(WasmHandleStackOverflowDescriptor::FrameBaseRegister());
321 for (auto reg : kGpParamRegisters) regs_to_save.set(reg);
322 for (auto reg : kFpParamRegisters) regs_to_save.set(reg);
325 Add_d(WasmHandleStackOverflowDescriptor::FrameBaseRegister(), fp,
326 Operand(stack_param_slots * kStackSlotSize +
328 CallBuiltin(Builtin::kWasmHandleStackOverflow);
330 } else {
331 Call(static_cast<Address>(Builtin::kWasmStackOverflow),
333 // The call will not return; just define an empty safepoint.
334 safepoint_table_builder->DefineSafepoint(this);
335 if (v8_flags.debug_code) stop();
336 }
337
339
340 // Now allocate the stack space. Note that this might do more than just
341 // decrementing the SP;
342 Add_d(sp, sp, Operand(-frame_size));
343
344 // Jump back to the start of the function, from {pc_offset()} to
345 // right after the reserved space for the {__ Add_d(sp, sp, -framesize)}
346 // (which is a Branch now).
347 int func_start_offset = offset + 3 * kInstrSize;
348 imm32 = func_start_offset - pc_offset();
349 CHECK(is_int26(imm32));
350 b(imm32 >> 2);
351}
352
354
356
357// static
360}
361
363 switch (kind) {
364 case kS128:
365 return value_kind_size(kind);
366 default:
367 return kStackSlotSize;
368 }
369}
370
372 return kind == kS128 || is_reference(kind);
373}
374
375void LiftoffAssembler::CheckTierUp(int declared_func_index, int budget_used,
376 Label* ool_label,
377 const FreezeCacheState& frozen) {
378 Register budget_array = kScratchReg;
379
381 if (instance_data == no_reg) {
382 instance_data = budget_array; // Reuse the scratch register.
384 }
385
386 constexpr int kArrayOffset = wasm::ObjectAccess::ToTagged(
387 WasmTrustedInstanceData::kTieringBudgetArrayOffset);
388 Ld_d(budget_array, MemOperand(instance_data, kArrayOffset));
389
390 int budget_arr_offset = kInt32Size * declared_func_index;
391
392 Register budget = kScratchReg2;
393 MemOperand budget_addr(budget_array, budget_arr_offset);
394 Ld_w(budget, budget_addr);
395 Sub_w(budget, budget, budget_used);
396 St_w(budget, budget_addr);
397
398 Branch(ool_label, less, budget, Operand(zero_reg));
399}
400
402 if (!v8_flags.experimental_wasm_growable_stacks) {
403 return fp;
404 }
405
406 LiftoffRegister old_fp = GetUnusedRegister(RegClass::kGpReg, {});
407 Label done, call_runtime;
410 &call_runtime, eq, old_fp.gp(),
411 Operand(StackFrame::TypeToMarker(StackFrame::WASM_SEGMENT_START)));
412 mov(old_fp.gp(), fp);
413 jmp(&done);
414
415 bind(&call_runtime);
416 LiftoffRegList regs_to_save = cache_state()->used_registers;
420 CallCFunction(ExternalReference::wasm_load_old_fp(), 1);
421 if (old_fp.gp() != kReturnRegister0) {
422 mov(old_fp.gp(), kReturnRegister0);
423 }
425
426 bind(&done);
427 return old_fp.gp();
428}
429
431 Label done;
432 {
433 UseScratchRegisterScope temps{this};
434 Register scratch = temps.Acquire();
437 &done, ne, scratch,
438 Operand(StackFrame::TypeToMarker(StackFrame::WASM_SEGMENT_START)));
439 }
440 LiftoffRegList regs_to_save;
441 for (auto reg : kGpReturnRegisters) regs_to_save.set(reg);
442 for (auto reg : kFpReturnRegisters) regs_to_save.set(reg);
446 CallCFunction(ExternalReference::wasm_shrink_stack(), 1);
449 bind(&done);
450}
451
452void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
453 switch (value.type().kind()) {
454 case kI32:
455 MacroAssembler::li(reg.gp(), Operand(value.to_i32()));
456 break;
457 case kI64:
458 MacroAssembler::li(reg.gp(), Operand(value.to_i64()));
459 break;
460 case kF32:
461 MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
462 break;
463 case kF64:
464 MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
465 break;
466 default:
467 UNREACHABLE();
468 }
469}
470
473}
474
475void LiftoffAssembler::LoadTrustedPointer(Register dst, Register src_addr,
476 int offset, IndirectPointerTag tag) {
477 MemOperand src{src_addr, offset};
478 LoadTrustedPointerField(dst, src, tag);
479}
480
481void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
482 int offset, int size) {
483 DCHECK_LE(0, offset);
484 switch (size) {
485 case 1:
486 Ld_b(dst, MemOperand(instance, offset));
487 break;
488 case 4:
489 Ld_w(dst, MemOperand(instance, offset));
490 break;
491 case 8:
492 Ld_d(dst, MemOperand(instance, offset));
493 break;
494 default:
496 }
497}
498
500 Register instance,
501 int32_t offset) {
502 LoadTaggedField(dst, MemOperand(instance, offset));
503}
504
505void LiftoffAssembler::SpillInstanceData(Register instance) {
507}
508
510
511void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
512 Register offset_reg,
513 int32_t offset_imm,
514 uint32_t* protected_load_pc,
515 bool needs_shift) {
516 unsigned shift_amount = !needs_shift ? 0 : COMPRESS_POINTERS_BOOL ? 2 : 3;
517 MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm,
518 false, shift_amount);
519 LoadTaggedField(dst, src_op);
520
521 // Since LoadTaggedField might start with an instruction loading an immediate
522 // argument to a register, we have to compute the {protected_load_pc} after
523 // calling it.
524 // In case of compressed pointers, there is an additional instruction
525 // (pointer decompression) after the load.
526 uint8_t protected_instruction_offset_bias =
528 if (protected_load_pc) {
529 *protected_load_pc = pc_offset() - protected_instruction_offset_bias;
530 }
531}
532
533void LiftoffAssembler::LoadProtectedPointer(Register dst, Register src_addr,
534 int32_t offset_imm) {
535 LoadProtectedPointerField(dst, MemOperand{src_addr, offset_imm});
536}
537
538void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
539 int32_t offset_imm) {
540 MemOperand src_op = liftoff::GetMemOp(this, src_addr, no_reg, offset_imm);
541 Ld_d(dst, src_op);
542}
543
544#ifdef V8_ENABLE_SANDBOX
545void LiftoffAssembler::LoadCodeEntrypointViaCodePointer(Register dst,
546 Register src_addr,
547 int32_t offset_imm) {
548 MemOperand src_op = liftoff::GetMemOp(this, src_addr, no_reg, offset_imm);
549 MacroAssembler::LoadCodeEntrypointViaCodePointer(dst, src_op,
551}
552#endif
553
554void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
555 Register offset_reg,
556 int32_t offset_imm, Register src,
557 LiftoffRegList pinned,
558 uint32_t* protected_store_pc,
559 SkipWriteBarrier skip_write_barrier) {
560 UseScratchRegisterScope temps(this);
561 Operand offset_op =
562 offset_reg.is_valid() ? Operand(offset_reg) : Operand(offset_imm);
563 // For the write barrier (below), we cannot have both an offset register and
564 // an immediate offset. Add them to a 32-bit offset initially, but in a 64-bit
565 // register, because that's needed in the MemOperand below.
566 if (offset_reg.is_valid() && offset_imm) {
567 Register effective_offset = temps.Acquire();
568 Add_d(effective_offset, offset_reg, Operand(offset_imm));
569 offset_op = Operand(effective_offset);
570 }
571
572 if (offset_op.is_reg()) {
573 StoreTaggedField(src, MemOperand(dst_addr, offset_op.rm()));
574 } else {
575 StoreTaggedField(src, MemOperand(dst_addr, offset_imm));
576 }
577
578 // Since StoreTaggedField might start with an instruction loading an immediate
579 // argument to a register, we have to compute the {protected_load_pc} after
580 // calling it.
581 if (protected_store_pc) {
582 *protected_store_pc = pc_offset() - kInstrSize;
583 }
584
585 if (skip_write_barrier || v8_flags.disable_write_barriers) return;
586
587 Label exit;
589 kZero, &exit);
590 JumpIfSmi(src, &exit);
593 StubCallMode::kCallWasmRuntimeStub);
594 bind(&exit);
595}
596
597void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
598 Register offset_reg, uintptr_t offset_imm,
599 LoadType type, uint32_t* protected_load_pc,
600 bool is_load_mem, bool i64_offset,
601 bool needs_shift) {
602 BlockTrampolinePoolScope block_trampoline_pool(this);
603 unsigned shift_amount = needs_shift ? type.size_log_2() : 0;
604 MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm,
605 i64_offset, shift_amount);
606
607 switch (type.value()) {
608 case LoadType::kI32Load8U:
609 case LoadType::kI64Load8U:
610 Ld_bu(dst.gp(), src_op);
611 break;
612 case LoadType::kI32Load8S:
613 case LoadType::kI64Load8S:
614 Ld_b(dst.gp(), src_op);
615 break;
616 case LoadType::kI32Load16U:
617 case LoadType::kI64Load16U:
618 MacroAssembler::Ld_hu(dst.gp(), src_op);
619 break;
620 case LoadType::kI32Load16S:
621 case LoadType::kI64Load16S:
622 MacroAssembler::Ld_h(dst.gp(), src_op);
623 break;
624 case LoadType::kI64Load32U:
625 MacroAssembler::Ld_wu(dst.gp(), src_op);
626 break;
627 case LoadType::kI32Load:
628 case LoadType::kI64Load32S:
629 MacroAssembler::Ld_w(dst.gp(), src_op);
630 break;
631 case LoadType::kI64Load:
632 MacroAssembler::Ld_d(dst.gp(), src_op);
633 break;
634 case LoadType::kF32Load:
635 MacroAssembler::Fld_s(dst.fp(), src_op);
636 break;
637 case LoadType::kF32LoadF16:
639 break;
640 case LoadType::kF64Load:
641 MacroAssembler::Fld_d(dst.fp(), src_op);
642 break;
643 case LoadType::kS128Load:
644 UNREACHABLE();
645 break;
646 default:
647 UNREACHABLE();
648 }
649 // protected_load_pc should be the address of the load/store instruction.
650 // The MacroAssembler load/store may contain some instructions for adjusting
651 // MemOperand, so use pc_offset - kInstrSize to locate.
652 if (protected_load_pc) *protected_load_pc = pc_offset() - kInstrSize;
653}
654
655void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
656 uintptr_t offset_imm, LiftoffRegister src,
657 StoreType type, LiftoffRegList pinned,
658 uint32_t* protected_store_pc, bool is_store_mem,
659 bool i64_offset) {
660 BlockTrampolinePoolScope block_trampoline_pool(this);
661 MemOperand dst_op =
662 liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
663
664 switch (type.value()) {
665 case StoreType::kI32Store8:
666 case StoreType::kI64Store8:
667 St_b(src.gp(), dst_op);
668 break;
669 case StoreType::kI32Store16:
670 case StoreType::kI64Store16:
671 MacroAssembler::St_h(src.gp(), dst_op);
672 break;
673 case StoreType::kI32Store:
674 case StoreType::kI64Store32:
675 MacroAssembler::St_w(src.gp(), dst_op);
676 break;
677 case StoreType::kI64Store:
678 MacroAssembler::St_d(src.gp(), dst_op);
679 break;
680 case StoreType::kF32Store:
681 MacroAssembler::Fst_s(src.fp(), dst_op);
682 break;
683 case StoreType::kF32StoreF16:
685 break;
686 case StoreType::kF64Store:
687 MacroAssembler::Fst_d(src.fp(), dst_op);
688 break;
689 case StoreType::kS128Store:
690 UNREACHABLE();
691 break;
692 default:
693 UNREACHABLE();
694 }
695 // protected_store_pc should be the address of the load/store instruction.
696 // The MacroAssembler load/store may contain some instructions for adjusting
697 // MemOperand, so use pc_offset - kInstrSize to locate.
698 if (protected_store_pc) *protected_store_pc = pc_offset() - kInstrSize;
699}
700
701void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
702 Register offset_reg, uintptr_t offset_imm,
703 LoadType type, LiftoffRegList pinned,
704 bool i64_offset) {
705 UseScratchRegisterScope temps(this);
706 MemOperand src_op =
707 liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, i64_offset);
708 switch (type.value()) {
709 case LoadType::kI32Load8U:
710 case LoadType::kI64Load8U: {
711 Ld_bu(dst.gp(), src_op);
712 dbar(0);
713 return;
714 }
715 case LoadType::kI32Load16U:
716 case LoadType::kI64Load16U: {
717 Ld_hu(dst.gp(), src_op);
718 dbar(0);
719 return;
720 }
721 case LoadType::kI32Load: {
722 Ld_w(dst.gp(), src_op);
723 dbar(0);
724 return;
725 }
726 case LoadType::kI64Load32U: {
727 Ld_wu(dst.gp(), src_op);
728 dbar(0);
729 return;
730 }
731 case LoadType::kI64Load: {
732 Ld_d(dst.gp(), src_op);
733 dbar(0);
734 return;
735 }
736 default:
737 UNREACHABLE();
738 }
739}
740
741void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
742 uintptr_t offset_imm, LiftoffRegister src,
743 StoreType type, LiftoffRegList pinned,
744 bool i64_offset) {
745 UseScratchRegisterScope temps(this);
746 MemOperand dst_op =
747 liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
748 switch (type.value()) {
749 case StoreType::kI64Store8:
750 case StoreType::kI32Store8: {
751 dbar(0);
752 St_b(src.gp(), dst_op);
753 return;
754 }
755 case StoreType::kI64Store16:
756 case StoreType::kI32Store16: {
757 dbar(0);
758 St_h(src.gp(), dst_op);
759 return;
760 }
761 case StoreType::kI64Store32:
762 case StoreType::kI32Store: {
763 dbar(0);
764 St_w(src.gp(), dst_op);
765 return;
766 }
767 case StoreType::kI64Store: {
768 dbar(0);
769 St_d(src.gp(), dst_op);
770 return;
771 }
772 default:
773 UNREACHABLE();
774 }
775}
776
777#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, size, \
778 bin_instr, aligned) \
779 do { \
780 Label binop; \
781 andi(temp3, temp0, aligned); \
782 Sub_d(temp0, temp0, Operand(temp3)); \
783 slli_w(temp3, temp3, 3); \
784 dbar(0); \
785 bind(&binop); \
786 load_linked(temp1, MemOperand(temp0, 0)); \
787 ExtractBits(result.gp(), temp1, temp3, size, false); \
788 bin_instr(temp2, result.gp(), Operand(value.gp())); \
789 InsertBits(temp1, temp2, temp3, size); \
790 store_conditional(temp1, MemOperand(temp0, 0)); \
791 BranchShort(&binop, eq, temp1, Operand(zero_reg)); \
792 dbar(0); \
793 } while (0)
794
795#define ATOMIC_BINOP_CASE(name, inst32, inst64, opcode) \
796 void LiftoffAssembler::Atomic##name( \
797 Register dst_addr, Register offset_reg, uintptr_t offset_imm, \
798 LiftoffRegister value, LiftoffRegister result, StoreType type, \
799 bool i64_offset) { \
800 LiftoffRegList pinned{dst_addr, value, result}; \
801 if (offset_reg != no_reg) pinned.set(offset_reg); \
802 Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
803 Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
804 Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
805 Register temp3 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
806 MemOperand dst_op = \
807 liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset); \
808 Add_d(temp0, dst_op.base(), dst_op.offset()); \
809 switch (type.value()) { \
810 case StoreType::kI64Store8: \
811 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 8, inst64, 7); \
812 break; \
813 case StoreType::kI32Store8: \
814 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 8, inst32, 3); \
815 break; \
816 case StoreType::kI64Store16: \
817 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 16, inst64, 7); \
818 break; \
819 case StoreType::kI32Store16: \
820 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 16, inst32, 3); \
821 break; \
822 case StoreType::kI64Store32: \
823 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 32, inst64, 7); \
824 break; \
825 case StoreType::kI32Store: \
826 am##opcode##_db_w(result.gp(), value.gp(), temp0); \
827 break; \
828 case StoreType::kI64Store: \
829 am##opcode##_db_d(result.gp(), value.gp(), temp0); \
830 break; \
831 default: \
832 UNREACHABLE(); \
833 } \
834 }
835
836ATOMIC_BINOP_CASE(Add, Add_w, Add_d, add)
837ATOMIC_BINOP_CASE(And, And, And, and)
838ATOMIC_BINOP_CASE(Or, Or, Or, or)
839ATOMIC_BINOP_CASE(Xor, Xor, Xor, xor)
840
841#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
842 do { \
843 Label binop; \
844 dbar(0); \
845 bind(&binop); \
846 load_linked(result.gp(), MemOperand(temp0, 0)); \
847 bin_instr(temp1, result.gp(), Operand(value.gp())); \
848 store_conditional(temp1, MemOperand(temp0, 0)); \
849 BranchShort(&binop, eq, temp1, Operand(zero_reg)); \
850 dbar(0); \
851 } while (0)
852
853void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
854 uintptr_t offset_imm, LiftoffRegister value,
855 LiftoffRegister result, StoreType type,
856 bool i64_offset) {
857 LiftoffRegList pinned{dst_addr, value, result};
858 if (offset_reg != no_reg) pinned.set(offset_reg);
859 Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
860 Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
861 Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
862 Register temp3 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
863 MemOperand dst_op =
864 liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
865 Add_d(temp0, dst_op.base(), dst_op.offset());
866 switch (type.value()) {
867 case StoreType::kI64Store8:
868 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 8, Sub_d, 7);
869 break;
870 case StoreType::kI32Store8:
871 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 8, Sub_w, 3);
872 break;
873 case StoreType::kI64Store16:
874 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 16, Sub_d, 7);
875 break;
876 case StoreType::kI32Store16:
877 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, 16, Sub_w, 3);
878 break;
879 case StoreType::kI64Store32:
880 ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, 32, Sub_d, 7);
881 break;
882 case StoreType::kI32Store:
884 break;
885 case StoreType::kI64Store:
887 break;
888 default:
889 UNREACHABLE();
890 }
891}
892#undef ASSEMBLE_ATOMIC_BINOP
893#undef ASSEMBLE_ATOMIC_BINOP_EXT
894#undef ATOMIC_BINOP_CASE
895
896#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(load_linked, store_conditional, \
897 size, aligned) \
898 do { \
899 Label exchange; \
900 andi(temp1, temp0, aligned); \
901 Sub_d(temp0, temp0, Operand(temp1)); \
902 slli_w(temp1, temp1, 3); \
903 dbar(0); \
904 bind(&exchange); \
905 load_linked(temp2, MemOperand(temp0, 0)); \
906 ExtractBits(result.gp(), temp2, temp1, size, false); \
907 InsertBits(temp2, value.gp(), temp1, size); \
908 store_conditional(temp2, MemOperand(temp0, 0)); \
909 BranchShort(&exchange, eq, temp2, Operand(zero_reg)); \
910 dbar(0); \
911 } while (0)
912
913void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
914 uintptr_t offset_imm,
915 LiftoffRegister value,
916 LiftoffRegister result, StoreType type,
917 bool i64_offset) {
918 LiftoffRegList pinned{dst_addr, value, result};
919 if (offset_reg != no_reg) pinned.set(offset_reg);
920 Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
921 Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
922 Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
923 MemOperand dst_op =
924 liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
925 Add_d(temp0, dst_op.base(), dst_op.offset());
926 switch (type.value()) {
927 case StoreType::kI64Store8:
929 break;
930 case StoreType::kI32Store8:
932 break;
933 case StoreType::kI64Store16:
935 break;
936 case StoreType::kI32Store16:
938 break;
939 case StoreType::kI64Store32:
941 break;
942 case StoreType::kI32Store:
943 amswap_db_w(result.gp(), value.gp(), temp0);
944 break;
945 case StoreType::kI64Store:
946 amswap_db_d(result.gp(), value.gp(), temp0);
947 break;
948 default:
949 UNREACHABLE();
950 }
951}
952#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
953
954#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
955 store_conditional) \
956 do { \
957 Label compareExchange; \
958 Label exit; \
959 dbar(0); \
960 bind(&compareExchange); \
961 load_linked(result.gp(), MemOperand(temp0, 0)); \
962 BranchShort(&exit, ne, expected.gp(), Operand(result.gp())); \
963 mov(temp2, new_value.gp()); \
964 store_conditional(temp2, MemOperand(temp0, 0)); \
965 BranchShort(&compareExchange, eq, temp2, Operand(zero_reg)); \
966 bind(&exit); \
967 dbar(0); \
968 } while (0)
969
970#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
971 load_linked, store_conditional, size, aligned) \
972 do { \
973 Label compareExchange; \
974 Label exit; \
975 andi(temp1, temp0, aligned); \
976 Sub_d(temp0, temp0, Operand(temp1)); \
977 slli_w(temp1, temp1, 3); \
978 dbar(0); \
979 bind(&compareExchange); \
980 load_linked(temp2, MemOperand(temp0, 0)); \
981 ExtractBits(result.gp(), temp2, temp1, size, false); \
982 ExtractBits(temp2, expected.gp(), zero_reg, size, false); \
983 BranchShort(&exit, ne, temp2, Operand(result.gp())); \
984 InsertBits(temp2, new_value.gp(), temp1, size); \
985 store_conditional(temp2, MemOperand(temp0, 0)); \
986 BranchShort(&compareExchange, eq, temp2, Operand(zero_reg)); \
987 bind(&exit); \
988 dbar(0); \
989 } while (0)
990
992 Register dst_addr, Register offset_reg, uintptr_t offset_imm,
993 LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
994 StoreType type, bool i64_offset) {
995 LiftoffRegList pinned{dst_addr, expected, new_value, result};
996 if (offset_reg != no_reg) pinned.set(offset_reg);
997 Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
998 Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
999 Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
1000 MemOperand dst_op =
1001 liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
1002 Add_d(temp0, dst_op.base(), dst_op.offset());
1003 switch (type.value()) {
1004 case StoreType::kI64Store8:
1006 break;
1007 case StoreType::kI32Store8:
1009 break;
1010 case StoreType::kI64Store16:
1012 break;
1013 case StoreType::kI32Store16:
1015 break;
1016 case StoreType::kI64Store32:
1018 break;
1019 case StoreType::kI32Store:
1021 break;
1022 case StoreType::kI64Store:
1024 break;
1025 default:
1026 UNREACHABLE();
1027 }
1028}
1029#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
1030#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
1031
1033
1034void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
1035 uint32_t caller_slot_idx,
1036 ValueKind kind) {
1037 MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
1038 liftoff::Load(this, dst, src, kind);
1039}
1040
1041void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
1042 uint32_t caller_slot_idx,
1044 Register frame_pointer) {
1045 int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
1046 liftoff::Store(this, frame_pointer, offset, src, kind);
1047}
1048
1049void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
1050 ValueKind kind) {
1051 liftoff::Load(this, dst, MemOperand(sp, offset), kind);
1052}
1053
1054void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
1055 ValueKind kind) {
1056 DCHECK_NE(dst_offset, src_offset);
1057 UseScratchRegisterScope temps(this);
1058 Register scratch = temps.Acquire();
1059
1060 switch (kind) {
1061 case kI32:
1062 case kF32:
1063 Ld_w(scratch, liftoff::GetStackSlot(src_offset));
1064 St_w(scratch, liftoff::GetStackSlot(dst_offset));
1065 break;
1066 case kI64:
1067 case kRefNull:
1068 case kRef:
1069 case kF64:
1070 Ld_d(scratch, liftoff::GetStackSlot(src_offset));
1071 St_d(scratch, liftoff::GetStackSlot(dst_offset));
1072 break;
1073 case kS128:
1074 default:
1075 UNREACHABLE();
1076 }
1077}
1078
1079void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
1080 DCHECK_NE(dst, src);
1081 // TODO(ksreten): Handle different sizes here.
1082 MacroAssembler::Move(dst, src);
1083}
1084
1086 ValueKind kind) {
1087 DCHECK_NE(dst, src);
1088 if (kind != kS128) {
1089 MacroAssembler::Move(dst, src);
1090 } else {
1091 UNREACHABLE();
1092 }
1093}
1094
1095void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
1098 switch (kind) {
1099 case kI32:
1100 St_w(reg.gp(), dst);
1101 break;
1102 case kI64:
1103 case kRef:
1104 case kRefNull:
1105 St_d(reg.gp(), dst);
1106 break;
1107 case kF32:
1108 Fst_s(reg.fp(), dst);
1109 break;
1110 case kF64:
1111 MacroAssembler::Fst_d(reg.fp(), dst);
1112 break;
1113 case kS128:
1114 UNREACHABLE();
1115 break;
1116 default:
1117 UNREACHABLE();
1118 }
1119}
1120
1121void LiftoffAssembler::Spill(int offset, WasmValue value) {
1124 switch (value.type().kind()) {
1125 case kI32: {
1126 UseScratchRegisterScope temps(this);
1127 Register scratch = temps.Acquire();
1128 MacroAssembler::li(scratch, Operand(value.to_i32()));
1129 St_w(scratch, dst);
1130 break;
1131 }
1132 case kI64:
1133 case kRef:
1134 case kRefNull: {
1135 UseScratchRegisterScope temps(this);
1136 Register scratch = temps.Acquire();
1137 MacroAssembler::li(scratch, value.to_i64());
1138 St_d(scratch, dst);
1139 break;
1140 }
1141 default:
1142 // kWasmF32 and kWasmF64 are unreachable, since those
1143 // constants are not tracked.
1144 UNREACHABLE();
1145 }
1146}
1147
1148void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
1150 switch (kind) {
1151 case kI32:
1152 Ld_w(reg.gp(), src);
1153 break;
1154 case kI64:
1155 case kRef:
1156 case kRefNull:
1157 Ld_d(reg.gp(), src);
1158 break;
1159 case kF32:
1160 Fld_s(reg.fp(), src);
1161 break;
1162 case kF64:
1163 MacroAssembler::Fld_d(reg.fp(), src);
1164 break;
1165 case kS128:
1166 UNREACHABLE();
1167 break;
1168 default:
1169 UNREACHABLE();
1170 }
1171}
1172
1174 UNREACHABLE();
1175}
1176
1178 DCHECK_LT(0, size);
1180
1181 if (size <= 12 * kStackSlotSize) {
1182 // Special straight-line code for up to 12 slots. Generates one
1183 // instruction per slot (<= 12 instructions total).
1184 uint32_t remainder = size;
1187 }
1188 DCHECK(remainder == 4 || remainder == 0);
1189 if (remainder) {
1191 }
1192 } else {
1193 // General case for bigger counts (12 instructions).
1194 // Use a0 for start address (inclusive), a1 for end address (exclusive).
1195 Push(a1, a0);
1196 Add_d(a0, fp, Operand(-start - size));
1197 Add_d(a1, fp, Operand(-start));
1198
1199 Label loop;
1200 bind(&loop);
1201 St_d(zero_reg, MemOperand(a0, 0));
1202 addi_d(a0, a0, kSystemPointerSize);
1203 BranchShort(&loop, ne, a0, Operand(a1));
1204
1205 Pop(a1, a0);
1206 }
1207}
1208
1209void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
1210 ValueKind /* kind */) {
1211 Sub_d(dst, fp, Operand(offset));
1212}
1213
1214void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
1215 MacroAssembler::Clz_d(dst.gp(), src.gp());
1216}
1217
1218void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
1219 MacroAssembler::Ctz_d(dst.gp(), src.gp());
1220}
1221
1222bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
1223 LiftoffRegister src) {
1224 MacroAssembler::Popcnt_d(dst.gp(), src.gp());
1225 return true;
1226}
1227
1228void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
1229 UseScratchRegisterScope temps(this);
1230 Register scratch = temps.Acquire();
1233 Ld_w(scratch, MemOperand(dst.gp(), offset));
1234 Add_w(scratch, scratch, Operand(Smi::FromInt(1)));
1235 St_w(scratch, MemOperand(dst.gp(), offset));
1236 } else {
1237 SmiUntag(scratch, MemOperand(dst.gp(), offset));
1238 Add_d(scratch, scratch, Operand(1));
1239 SmiTag(scratch);
1240 St_d(scratch, MemOperand(dst.gp(), offset));
1241 }
1242}
1243
1244void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
1245 MacroAssembler::Mul_w(dst, lhs, rhs);
1246}
1247
1248void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
1249 Label* trap_div_by_zero,
1250 Label* trap_div_unrepresentable) {
1251 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1252
1253 // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
1254 rotri_w(kScratchReg, lhs, 31);
1256 // If lhs == kMinInt, move rhs to kScratchReg.
1259 MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
1260 Operand(zero_reg));
1261
1262 MacroAssembler::Div_w(dst, lhs, rhs);
1263}
1264
1265void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
1266 Label* trap_div_by_zero) {
1267 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1268 MacroAssembler::Div_wu(dst, lhs, rhs);
1269}
1270
1271void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
1272 Label* trap_div_by_zero) {
1273 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1274 MacroAssembler::Mod_w(dst, lhs, rhs);
1275}
1276
1277void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
1278 Label* trap_div_by_zero) {
1279 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1280 MacroAssembler::Mod_wu(dst, lhs, rhs);
1281}
1282
1283#define I32_BINOP(name, instruction) \
1284 void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
1285 Register rhs) { \
1286 instruction(dst, lhs, rhs); \
1287 }
1288
1289// clang-format off
1290I32_BINOP(add, add_w)
1291I32_BINOP(sub, sub_w)
1292I32_BINOP(and, and_)
1293I32_BINOP(or, or_)
1294I32_BINOP(xor, xor_)
1295// clang-format on
1296
1297#undef I32_BINOP
1298
1299#define I32_BINOP_I(name, instruction) \
1300 void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \
1301 int32_t imm) { \
1302 instruction(dst, lhs, Operand(imm)); \
1303 }
1304
1305// clang-format off
1306I32_BINOP_I(add, Add_w)
1307I32_BINOP_I(sub, Sub_w)
1308I32_BINOP_I(and, And)
1309I32_BINOP_I(or, Or)
1310I32_BINOP_I(xor, Xor)
1311// clang-format on
1312
1313#undef I32_BINOP_I
1314
1315void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
1316 MacroAssembler::Clz_w(dst, src);
1317}
1318
1319void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
1320 MacroAssembler::Ctz_w(dst, src);
1321}
1322
1323bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
1324 MacroAssembler::Popcnt_w(dst, src);
1325 return true;
1326}
1327
1328#define I32_SHIFTOP(name, instruction) \
1329 void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
1330 Register amount) { \
1331 instruction(dst, src, amount); \
1332 }
1333#define I32_SHIFTOP_I(name, instruction, instruction1) \
1334 I32_SHIFTOP(name, instruction) \
1335 void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \
1336 int amount) { \
1337 instruction1(dst, src, amount & 0x1f); \
1338 }
1339
1340I32_SHIFTOP_I(shl, sll_w, slli_w)
1341I32_SHIFTOP_I(sar, sra_w, srai_w)
1342I32_SHIFTOP_I(shr, srl_w, srli_w)
1343
1344#undef I32_SHIFTOP
1345#undef I32_SHIFTOP_I
1346
1347void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
1348 int64_t imm) {
1349 MacroAssembler::Add_d(dst.gp(), lhs.gp(), Operand(imm));
1350}
1351
1352void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
1353 LiftoffRegister rhs) {
1354 MacroAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp());
1355}
1356
1357void LiftoffAssembler::emit_i64_muli(LiftoffRegister dst, LiftoffRegister lhs,
1358 int32_t imm) {
1359 if (base::bits::IsPowerOfTwo(imm)) {
1361 return;
1362 }
1363 UseScratchRegisterScope temps(this);
1364 Register scratch = temps.Acquire();
1365 MacroAssembler::li(scratch, Operand(imm));
1366 MacroAssembler::Mul_d(dst.gp(), lhs.gp(), scratch);
1367}
1368
1369bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
1370 LiftoffRegister rhs,
1371 Label* trap_div_by_zero,
1372 Label* trap_div_unrepresentable) {
1373 MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
1374
1375 // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
1376 rotri_d(kScratchReg, lhs.gp(), 63);
1378 // If lhs == MinInt64, move rhs to kScratchReg.
1379 masknez(kScratchReg, rhs.gp(), kScratchReg);
1381 MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
1382 Operand(zero_reg));
1383
1384 MacroAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp());
1385 return true;
1386}
1387
1388bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
1389 LiftoffRegister rhs,
1390 Label* trap_div_by_zero) {
1391 MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
1392 MacroAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp());
1393 return true;
1394}
1395
1396bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
1397 LiftoffRegister rhs,
1398 Label* trap_div_by_zero) {
1399 MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
1400 MacroAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp());
1401 return true;
1402}
1403
1404bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
1405 LiftoffRegister rhs,
1406 Label* trap_div_by_zero) {
1407 MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
1408 MacroAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp());
1409 return true;
1410}
1411
1412#define I64_BINOP(name, instruction) \
1413 void LiftoffAssembler::emit_i64_##name( \
1414 LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
1415 instruction(dst.gp(), lhs.gp(), rhs.gp()); \
1416 }
1417
1418// clang-format off
1419I64_BINOP(add, Add_d)
1420I64_BINOP(sub, Sub_d)
1421I64_BINOP(and, and_)
1422I64_BINOP(or, or_)
1423I64_BINOP(xor, xor_)
1424// clang-format on
1425
1426#undef I64_BINOP
1427
1428#define I64_BINOP_I(name, instruction) \
1429 void LiftoffAssembler::emit_i64_##name##i( \
1430 LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
1431 instruction(dst.gp(), lhs.gp(), Operand(imm)); \
1432 }
1433
1434// clang-format off
1435I64_BINOP_I(and, And)
1436I64_BINOP_I(or, Or)
1437I64_BINOP_I(xor, Xor)
1438// clang-format on
1439
1440#undef I64_BINOP_I
1441
1442#define I64_SHIFTOP(name, instruction) \
1443 void LiftoffAssembler::emit_i64_##name( \
1444 LiftoffRegister dst, LiftoffRegister src, Register amount) { \
1445 instruction(dst.gp(), src.gp(), amount); \
1446 }
1447#define I64_SHIFTOP_I(name, instruction, instructioni) \
1448 I64_SHIFTOP(name, instruction) \
1449 void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \
1450 LiftoffRegister src, int amount) { \
1451 instructioni(dst.gp(), src.gp(), amount & 63); \
1452 }
1453
1454I64_SHIFTOP_I(shl, sll_d, slli_d)
1455I64_SHIFTOP_I(sar, sra_d, srai_d)
1456I64_SHIFTOP_I(shr, srl_d, srli_d)
1457
1458#undef I64_SHIFTOP
1459#undef I64_SHIFTOP_I
1460
1461void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
1462 bstrpick_d(dst, src, 31, 0);
1463}
1464
1465void LiftoffAssembler::clear_i32_upper_half(Register dst) {
1466 // Don't need to clear the upper halves of i32 values for sandbox on
1467 // LoongArch64, because we'll explicitly zero-extend their lower halves before
1468 // using them for memory accesses anyway.
1469}
1470
1472 MacroAssembler::Neg_s(dst, src);
1473}
1474
1476 MacroAssembler::Neg_d(dst, src);
1477}
1478
1480 DoubleRegister rhs) {
1481 Label ool, done;
1482 MacroAssembler::Float32Min(dst, lhs, rhs, &ool);
1483 Branch(&done);
1484
1485 bind(&ool);
1487 bind(&done);
1488}
1489
1491 DoubleRegister rhs) {
1492 Label ool, done;
1493 MacroAssembler::Float32Max(dst, lhs, rhs, &ool);
1494 Branch(&done);
1495
1496 bind(&ool);
1498 bind(&done);
1499}
1500
1502 DoubleRegister rhs) {
1503 fcopysign_s(dst, lhs, rhs);
1504}
1505
1507 DoubleRegister rhs) {
1508 Label ool, done;
1509 MacroAssembler::Float64Min(dst, lhs, rhs, &ool);
1510 Branch(&done);
1511
1512 bind(&ool);
1514 bind(&done);
1515}
1516
1518 DoubleRegister rhs) {
1519 Label ool, done;
1520 MacroAssembler::Float64Max(dst, lhs, rhs, &ool);
1521 Branch(&done);
1522
1523 bind(&ool);
1525 bind(&done);
1526}
1527
1529 DoubleRegister rhs) {
1530 fcopysign_d(dst, lhs, rhs);
1531}
1532
1533#define FP_BINOP(name, instruction) \
1534 void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
1535 DoubleRegister rhs) { \
1536 instruction(dst, lhs, rhs); \
1537 }
1538#define FP_UNOP(name, instruction) \
1539 void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
1540 instruction(dst, src); \
1541 }
1542#define FP_UNOP_RETURN_TRUE(name, instruction) \
1543 bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
1544 instruction(dst, src); \
1545 return true; \
1546 }
1547
1548FP_BINOP(f32_add, fadd_s)
1549FP_BINOP(f32_sub, fsub_s)
1550FP_BINOP(f32_mul, fmul_s)
1551FP_BINOP(f32_div, fdiv_s)
1552FP_UNOP(f32_abs, fabs_s)
1553FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s)
1554FP_UNOP_RETURN_TRUE(f32_floor, Floor_s)
1555FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s)
1556FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s)
1557FP_UNOP(f32_sqrt, fsqrt_s)
1558FP_BINOP(f64_add, fadd_d)
1559FP_BINOP(f64_sub, fsub_d)
1560FP_BINOP(f64_mul, fmul_d)
1561FP_BINOP(f64_div, fdiv_d)
1562FP_UNOP(f64_abs, fabs_d)
1563FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d)
1564FP_UNOP_RETURN_TRUE(f64_floor, Floor_d)
1565FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d)
1566FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d)
1567FP_UNOP(f64_sqrt, fsqrt_d)
1568
1569#undef FP_BINOP
1570#undef FP_UNOP
1571#undef FP_UNOP_RETURN_TRUE
1572
1574 LiftoffRegister dst,
1575 LiftoffRegister src, Label* trap) {
1576 switch (opcode) {
1577 case kExprI32ConvertI64:
1578 MacroAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0);
1579 return true;
1580 case kExprI32SConvertF32: {
1582
1583 // Real conversion.
1584 MacroAssembler::Trunc_s(rounded, src.fp());
1586 movfr2gr_s(dst.gp(), kScratchDoubleReg2);
1587 // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
1588 // because INT32_MIN allows easier out-of-bounds detection.
1589 MacroAssembler::Add_w(kScratchReg, dst.gp(), 1);
1590 MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
1592
1593 // Checking if trap.
1594 movgr2fr_w(kScratchDoubleReg2, dst.gp());
1598 return true;
1599 }
1600 case kExprI32UConvertF32: {
1602
1603 // Real conversion.
1604 MacroAssembler::Trunc_s(rounded, src.fp());
1606 // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
1607 // because 0 allows easier out-of-bounds detection.
1608 MacroAssembler::Add_w(kScratchReg, dst.gp(), 1);
1609 MacroAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
1610
1611 // Checking if trap.
1616 return true;
1617 }
1618 case kExprI32SConvertF64: {
1620
1621 // Real conversion.
1622 MacroAssembler::Trunc_d(rounded, src.fp());
1624 movfr2gr_s(dst.gp(), kScratchDoubleReg2);
1625
1626 // Checking if trap.
1630 return true;
1631 }
1632 case kExprI32UConvertF64: {
1634
1635 // Real conversion.
1636 MacroAssembler::Trunc_d(rounded, src.fp());
1638
1639 // Checking if trap.
1643 return true;
1644 }
1645 case kExprI32ReinterpretF32:
1646 MacroAssembler::FmoveLow(dst.gp(), src.fp());
1647 return true;
1648 case kExprI64SConvertI32:
1649 slli_w(dst.gp(), src.gp(), 0);
1650 return true;
1651 case kExprI64UConvertI32:
1652 MacroAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0);
1653 return true;
1654 case kExprI64SConvertF32: {
1656
1657 // Real conversion.
1658 MacroAssembler::Trunc_s(rounded, src.fp());
1660 movfr2gr_d(dst.gp(), kScratchDoubleReg2);
1661 // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
1662 // because INT64_MIN allows easier out-of-bounds detection.
1663 MacroAssembler::Add_d(kScratchReg, dst.gp(), 1);
1664 MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
1666
1667 // Checking if trap.
1668 movgr2fr_d(kScratchDoubleReg2, dst.gp());
1672 return true;
1673 }
1674 case kExprI64UConvertF32: {
1675 // Real conversion.
1677 kScratchReg);
1678
1679 // Checking if trap.
1680 MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
1681 return true;
1682 }
1683 case kExprI64SConvertF64: {
1685
1686 // Real conversion.
1687 MacroAssembler::Trunc_d(rounded, src.fp());
1689 movfr2gr_d(dst.gp(), kScratchDoubleReg2);
1690 // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
1691 // because INT64_MIN allows easier out-of-bounds detection.
1692 MacroAssembler::Add_d(kScratchReg, dst.gp(), 1);
1693 MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
1695
1696 // Checking if trap.
1697 movgr2fr_d(kScratchDoubleReg2, dst.gp());
1701 return true;
1702 }
1703 case kExprI64UConvertF64: {
1704 // Real conversion.
1706 kScratchReg);
1707
1708 // Checking if trap.
1709 MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
1710 return true;
1711 }
1712 case kExprI64ReinterpretF64:
1713 movfr2gr_d(dst.gp(), src.fp());
1714 return true;
1715 case kExprF32SConvertI32: {
1716 LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
1717 movgr2fr_w(scratch.fp(), src.gp());
1718 ffint_s_w(dst.fp(), scratch.fp());
1719 return true;
1720 }
1721 case kExprF32UConvertI32:
1722 MacroAssembler::Ffint_s_uw(dst.fp(), src.gp());
1723 return true;
1724 case kExprF32ConvertF64:
1725 fcvt_s_d(dst.fp(), src.fp());
1726 return true;
1727 case kExprF32ReinterpretI32:
1728 MacroAssembler::FmoveLow(dst.fp(), src.gp());
1729 return true;
1730 case kExprF64SConvertI32: {
1731 LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
1732 movgr2fr_w(scratch.fp(), src.gp());
1733 ffint_d_w(dst.fp(), scratch.fp());
1734 return true;
1735 }
1736 case kExprF64UConvertI32:
1737 MacroAssembler::Ffint_d_uw(dst.fp(), src.gp());
1738 return true;
1739 case kExprF64ConvertF32:
1740 fcvt_d_s(dst.fp(), src.fp());
1741 return true;
1742 case kExprF64ReinterpretI64:
1743 movgr2fr_d(dst.fp(), src.gp());
1744 return true;
1745 case kExprI32SConvertSatF32:
1746 ftintrz_w_s(kScratchDoubleReg, src.fp());
1747 movfr2gr_s(dst.gp(), kScratchDoubleReg);
1748 return true;
1749 case kExprI32UConvertSatF32: {
1750 Label isnan_or_lessthan_or_equal_zero;
1751 mov(dst.gp(), zero_reg);
1752 MacroAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
1753 CompareF32(src.fp(), kScratchDoubleReg, CULE);
1754 BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
1755 Ftintrz_uw_s(dst.gp(), src.fp(), kScratchDoubleReg);
1756 bind(&isnan_or_lessthan_or_equal_zero);
1757 return true;
1758 }
1759 case kExprI32SConvertSatF64:
1760 ftintrz_w_d(kScratchDoubleReg, src.fp());
1761 movfr2gr_s(dst.gp(), kScratchDoubleReg);
1762 return true;
1763 case kExprI32UConvertSatF64: {
1764 Label isnan_or_lessthan_or_equal_zero;
1765 mov(dst.gp(), zero_reg);
1766 MacroAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
1767 CompareF64(src.fp(), kScratchDoubleReg, CULE);
1768 BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
1769 Ftintrz_uw_d(dst.gp(), src.fp(), kScratchDoubleReg);
1770 bind(&isnan_or_lessthan_or_equal_zero);
1771 return true;
1772 }
1773 case kExprI64SConvertSatF32:
1774 ftintrz_l_s(kScratchDoubleReg, src.fp());
1775 movfr2gr_d(dst.gp(), kScratchDoubleReg);
1776 return true;
1777 case kExprI64UConvertSatF32: {
1778 Label isnan_or_lessthan_or_equal_zero;
1779 mov(dst.gp(), zero_reg);
1780 MacroAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
1781 CompareF32(src.fp(), kScratchDoubleReg, CULE);
1782 BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
1783 Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg);
1784 bind(&isnan_or_lessthan_or_equal_zero);
1785 return true;
1786 }
1787 case kExprI64SConvertSatF64:
1788 ftintrz_l_d(kScratchDoubleReg, src.fp());
1789 movfr2gr_d(dst.gp(), kScratchDoubleReg);
1790 return true;
1791 case kExprI64UConvertSatF64: {
1792 Label isnan_or_lessthan_or_equal_zero;
1793 mov(dst.gp(), zero_reg);
1794 MacroAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
1795 CompareF64(src.fp(), kScratchDoubleReg, CULE);
1796 BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
1797 Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg);
1798 bind(&isnan_or_lessthan_or_equal_zero);
1799 return true;
1800 }
1801 default:
1802 return false;
1803 }
1804}
1805
1806void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
1807 ext_w_b(dst, src);
1808}
1809
1810void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
1811 ext_w_h(dst, src);
1812}
1813
1814void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
1815 LiftoffRegister src) {
1816 ext_w_b(dst.gp(), src.gp());
1817}
1818
1819void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
1820 LiftoffRegister src) {
1821 ext_w_h(dst.gp(), src.gp());
1822}
1823
1824void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
1825 LiftoffRegister src) {
1826 slli_w(dst.gp(), src.gp(), 0);
1827}
1828
1831}
1832
1833void LiftoffAssembler::emit_jump(Register target) {
1834 MacroAssembler::Jump(target);
1835}
1836
1838 ValueKind kind, Register lhs,
1839 Register rhs,
1840 const FreezeCacheState& frozen) {
1841 if (rhs == no_reg) {
1842 if (kind == kI32) {
1843 UseScratchRegisterScope temps(this);
1844 Register scratch0 = temps.Acquire();
1845 slli_w(scratch0, lhs, 0);
1846 MacroAssembler::Branch(label, cond, scratch0, Operand(zero_reg));
1847 } else {
1848 DCHECK(kind == kI64);
1849 MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg));
1850 }
1851 } else {
1852 if (kind == kI64) {
1853 MacroAssembler::Branch(label, cond, lhs, Operand(rhs));
1854 } else {
1855 DCHECK((kind == kI32) || (kind == kRef) || (kind == kRefNull));
1856 MacroAssembler::CompareTaggedAndBranch(label, cond, lhs, Operand(rhs));
1857 }
1858 }
1859}
1860
1862 Register lhs, int32_t imm,
1863 const FreezeCacheState& frozen) {
1864 MacroAssembler::CompareTaggedAndBranch(label, cond, lhs, Operand(imm));
1865}
1866
1868 Register lhs, int32_t imm,
1869 const FreezeCacheState& frozen) {
1870 MacroAssembler::Branch(label, cond, lhs, Operand(imm));
1871}
1872
1873void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
1874 slli_w(dst, src, 0);
1875 sltui(dst, dst, 1);
1876}
1877
1878void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
1879 Register lhs, Register rhs) {
1880 UseScratchRegisterScope temps(this);
1881 Register scratch0 = temps.Acquire();
1882 Register scratch1 = kScratchReg;
1883
1884 slli_w(scratch0, lhs, 0);
1885 slli_w(scratch1, rhs, 0);
1886
1887 CompareWord(cond, dst, scratch0, Operand(scratch1));
1888}
1889
1890void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
1891 sltui(dst, src.gp(), 1);
1892}
1893
1894void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
1895 LiftoffRegister lhs,
1896 LiftoffRegister rhs) {
1897 CompareWord(cond, dst, lhs.gp(), Operand(rhs.gp()));
1898}
1899
1900namespace liftoff {
1901
1903 bool* predicate) {
1904 switch (condition) {
1905 case kEqual:
1906 *predicate = true;
1907 return CEQ;
1908 case kNotEqual:
1909 *predicate = false;
1910 return CEQ;
1911 case kUnsignedLessThan:
1912 *predicate = true;
1913 return CLT;
1915 *predicate = false;
1916 return CLT;
1918 *predicate = true;
1919 return CLE;
1921 *predicate = false;
1922 return CLE;
1923 default:
1924 *predicate = true;
1925 break;
1926 }
1927 UNREACHABLE();
1928}
1929
1930} // namespace liftoff
1931
1933 DoubleRegister lhs,
1934 DoubleRegister rhs) {
1935 Label not_nan, cont;
1938 // If one of the operands is NaN, return 1 for f32.ne, else 0.
1939 if (cond == ne) {
1940 MacroAssembler::li(dst, 1);
1941 } else {
1942 MacroAssembler::Move(dst, zero_reg);
1943 }
1945
1946 bind(&not_nan);
1947
1948 MacroAssembler::li(dst, 1);
1949 bool predicate;
1950 FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
1951 MacroAssembler::CompareF32(lhs, rhs, fcond);
1952 if (predicate) {
1954 } else {
1956 }
1957
1958 bind(&cont);
1959}
1960
1961void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
1962 DoubleRegister lhs,
1963 DoubleRegister rhs) {
1964 Label not_nan, cont;
1967 // If one of the operands is NaN, return 1 for f64.ne, else 0.
1968 if (cond == ne) {
1969 MacroAssembler::li(dst, 1);
1970 } else {
1971 MacroAssembler::Move(dst, zero_reg);
1972 }
1974
1975 bind(&not_nan);
1976
1977 MacroAssembler::li(dst, 1);
1978 bool predicate;
1979 FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
1980 MacroAssembler::CompareF64(lhs, rhs, fcond);
1981 if (predicate) {
1983 } else {
1985 }
1986
1987 bind(&cont);
1988}
1989
1990bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
1991 LiftoffRegister true_value,
1992 LiftoffRegister false_value,
1993 ValueKind kind) {
1994 return false;
1995}
1996
1997void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
1998 SmiCheckMode mode,
1999 const FreezeCacheState& frozen) {
2000 UseScratchRegisterScope temps(this);
2001 Register scratch = temps.Acquire();
2002 And(scratch, obj, Operand(kSmiTagMask));
2003 Condition condition = mode == kJumpOnSmi ? eq : ne;
2004 Branch(target, condition, scratch, Operand(zero_reg));
2005}
2006
2007void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
2008 Register offset_reg, uintptr_t offset_imm,
2009 LoadType type,
2010 LoadTransformationKind transform,
2011 uint32_t* protected_load_pc,
2012 bool i64_offset) {
2013 bailout(kSimd, "load extend and load splat unimplemented");
2014}
2015
2016void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
2017 Register addr, Register offset_reg,
2018 uintptr_t offset_imm, LoadType type,
2019 uint8_t laneidx, uint32_t* protected_load_pc,
2020 bool i64_offset) {
2021 bailout(kSimd, "loadlane");
2022}
2023
2024void LiftoffAssembler::StoreLane(Register dst, Register offset,
2025 uintptr_t offset_imm, LiftoffRegister src,
2026 StoreType type, uint8_t lane,
2027 uint32_t* protected_store_pc,
2028 bool i64_offset) {
2029 bailout(kSimd, "storelane");
2030}
2031
2032void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
2033 LiftoffRegister lhs,
2034 LiftoffRegister rhs,
2035 const uint8_t shuffle[16],
2036 bool is_swizzle) {
2037 bailout(kSimd, "emit_i8x16_shuffle");
2038}
2039
2040void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
2041 LiftoffRegister lhs,
2042 LiftoffRegister rhs) {
2043 bailout(kSimd, "emit_i8x16_swizzle");
2044}
2045
2046void LiftoffAssembler::emit_i8x16_relaxed_swizzle(LiftoffRegister dst,
2047 LiftoffRegister lhs,
2048 LiftoffRegister rhs) {
2049 bailout(kRelaxedSimd, "emit_i8x16_relaxed_swizzle");
2050}
2051
2053 LiftoffRegister src) {
2054 bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f32x4_s");
2055}
2056
2058 LiftoffRegister src) {
2059 bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f32x4_u");
2060}
2061
2063 LiftoffRegister dst, LiftoffRegister src) {
2064 bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f64x2_s_zero");
2065}
2066
2068 LiftoffRegister dst, LiftoffRegister src) {
2069 bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f64x2_u_zero");
2070}
2071
2072void LiftoffAssembler::emit_s128_relaxed_laneselect(LiftoffRegister dst,
2073 LiftoffRegister src1,
2074 LiftoffRegister src2,
2075 LiftoffRegister mask,
2076 int lane_width) {
2077 bailout(kRelaxedSimd, "emit_s128_relaxed_laneselect");
2078}
2079
2080void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
2081 LiftoffRegister src) {
2082 bailout(kSimd, "emit_i8x16_splat");
2083}
2084
2085void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
2086 LiftoffRegister src) {
2087 bailout(kSimd, "emit_i16x8_splat");
2088}
2089
2090void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
2091 LiftoffRegister src) {
2092 bailout(kSimd, "emit_i32x4_splat");
2093}
2094
2095void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
2096 LiftoffRegister src) {
2097 bailout(kSimd, "emit_i64x2_splat");
2098}
2099
2100void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
2101 LiftoffRegister src) {
2102 bailout(kSimd, "emit_f32x4_splat");
2103}
2104
2105void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
2106 LiftoffRegister src) {
2107 bailout(kSimd, "emit_f64x2_splat");
2108}
2109
2110#define SIMD_BINOP(name1, name2) \
2111 void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \
2112 LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
2113 bailout(kSimd, "emit_" #name1 "_extmul_low_" #name2); \
2114 } \
2115 void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \
2116 LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
2117 bailout(kSimd, "emit_" #name1 "_extmul_high_" #name2); \
2118 }
2119
2120SIMD_BINOP(i16x8, i8x16_s)
2121SIMD_BINOP(i16x8, i8x16_u)
2122
2123SIMD_BINOP(i32x4, i16x8_s)
2124SIMD_BINOP(i32x4, i16x8_u)
2125
2126SIMD_BINOP(i64x2, i32x4_s)
2127SIMD_BINOP(i64x2, i32x4_u)
2128
2129#undef SIMD_BINOP
2130
2131#define SIMD_BINOP(name1, name2) \
2132 void LiftoffAssembler::emit_##name1##_extadd_pairwise_##name2( \
2133 LiftoffRegister dst, LiftoffRegister src) { \
2134 bailout(kSimd, "emit_" #name1 "_extadd_pairwise_" #name2); \
2135 }
2136
2137SIMD_BINOP(i16x8, i8x16_s)
2138SIMD_BINOP(i16x8, i8x16_u)
2139SIMD_BINOP(i32x4, i16x8_s)
2140SIMD_BINOP(i32x4, i16x8_u)
2141#undef SIMD_BINOP
2142
2143void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
2144 LiftoffRegister src1,
2145 LiftoffRegister src2) {
2146 bailout(kSimd, "emit_i16x8_q15mulr_sat_s");
2147}
2148
2149void LiftoffAssembler::emit_i16x8_relaxed_q15mulr_s(LiftoffRegister dst,
2150 LiftoffRegister src1,
2151 LiftoffRegister src2) {
2152 bailout(kRelaxedSimd, "emit_i16x8_relaxed_q15mulr_s");
2153}
2154
2155void LiftoffAssembler::emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst,
2156 LiftoffRegister lhs,
2157 LiftoffRegister rhs) {
2158 bailout(kSimd, "emit_i16x8_dot_i8x16_i7x16_s");
2159}
2160
2162 LiftoffRegister lhs,
2163 LiftoffRegister rhs,
2164 LiftoffRegister acc) {
2165 bailout(kSimd, "emit_i32x4_dot_i8x16_i7x16_add_s");
2166}
2167
2168void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
2169 LiftoffRegister rhs) {
2170 bailout(kSimd, "emit_i8x16_eq");
2171}
2172
2173void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
2174 LiftoffRegister rhs) {
2175 bailout(kSimd, "emit_i8x16_ne");
2176}
2177
2178void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
2179 LiftoffRegister rhs) {
2180 bailout(kSimd, "emit_i8x16_gt_s");
2181}
2182
2183void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
2184 LiftoffRegister rhs) {
2185 bailout(kSimd, "emit_i8x16_gt_u");
2186}
2187
2188void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
2189 LiftoffRegister rhs) {
2190 bailout(kSimd, "emit_i8x16_ge_s");
2191}
2192
2193void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
2194 LiftoffRegister rhs) {
2195 bailout(kSimd, "emit_i8x16_ge_u");
2196}
2197
2198void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
2199 LiftoffRegister rhs) {
2200 bailout(kSimd, "emit_i16x8_eq");
2201}
2202
2203void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
2204 LiftoffRegister rhs) {
2205 bailout(kSimd, "emit_i16x8_ne");
2206}
2207
2208void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
2209 LiftoffRegister rhs) {
2210 bailout(kSimd, "emit_i16x8_gt_s");
2211}
2212
2213void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
2214 LiftoffRegister rhs) {
2215 bailout(kSimd, "emit_i16x8_gt_u");
2216}
2217
2218void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
2219 LiftoffRegister rhs) {
2220 bailout(kSimd, "emit_i16x8_ge_s");
2221}
2222
2223void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
2224 LiftoffRegister rhs) {
2225 bailout(kSimd, "emit_i16x8_ge_u");
2226}
2227
2228void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
2229 LiftoffRegister rhs) {
2230 bailout(kSimd, "emit_i32x4_eq");
2231}
2232
2233void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
2234 LiftoffRegister rhs) {
2235 bailout(kSimd, "emit_i32x4_ne");
2236}
2237
2238void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
2239 LiftoffRegister rhs) {
2240 bailout(kSimd, "emit_i32x4_gt_s");
2241}
2242
2243void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
2244 LiftoffRegister rhs) {
2245 bailout(kSimd, "emit_i32x4_gt_u");
2246}
2247
2248void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
2249 LiftoffRegister rhs) {
2250 bailout(kSimd, "emit_i32x4_ge_s");
2251}
2252
2253void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
2254 LiftoffRegister rhs) {
2255 bailout(kSimd, "emit_i32x4_ge_u");
2256}
2257
2258void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
2259 LiftoffRegister rhs) {
2260 bailout(kSimd, "emit_f32x4_eq");
2261}
2262
2263void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
2264 LiftoffRegister rhs) {
2265 bailout(kSimd, "emit_f32x4_ne");
2266}
2267
2268void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
2269 LiftoffRegister rhs) {
2270 bailout(kSimd, "emit_f32x4_lt");
2271}
2272
2273void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
2274 LiftoffRegister rhs) {
2275 bailout(kSimd, "emit_f32x4_le");
2276}
2277
2278void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
2279 LiftoffRegister rhs) {
2280 bailout(kSimd, "emit_i64x2_eq");
2281}
2282
2283void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
2284 LiftoffRegister rhs) {
2285 bailout(kSimd, "emit_i64x2_ne");
2286}
2287
2288void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
2289 LiftoffRegister src) {
2290 bailout(kSimd, "emit_i64x2_abs");
2291}
2292
2293void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
2294 LiftoffRegister rhs) {
2295 bailout(kSimd, "emit_f64x2_eq");
2296}
2297
2298void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
2299 LiftoffRegister rhs) {
2300 bailout(kSimd, "emit_f64x2_ne");
2301}
2302
2303void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
2304 LiftoffRegister rhs) {
2305 bailout(kSimd, "emit_f64x2_lt");
2306}
2307
2308void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
2309 LiftoffRegister rhs) {
2310 bailout(kSimd, "emit_f64x2_le");
2311}
2312
2313void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
2314 const uint8_t imms[16]) {
2315 bailout(kSimd, "emit_s128_const");
2316}
2317
2318void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
2319 bailout(kSimd, "emit_s128_not");
2320}
2321
2322void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
2323 LiftoffRegister rhs) {
2324 bailout(kSimd, "emit_s128_and");
2325}
2326
2327void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
2328 LiftoffRegister rhs) {
2329 bailout(kSimd, "emit_s128_or");
2330}
2331
2332void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
2333 LiftoffRegister rhs) {
2334 bailout(kSimd, "emit_s128_xor");
2335}
2336
2337void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
2338 LiftoffRegister lhs,
2339 LiftoffRegister rhs) {
2340 bailout(kSimd, "emit_s128_and_not");
2341}
2342
2343void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
2344 LiftoffRegister src1,
2345 LiftoffRegister src2,
2346 LiftoffRegister mask) {
2347 bailout(kSimd, "emit_s128_select");
2348}
2349
2350void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
2351 LiftoffRegister src) {
2352 bailout(kSimd, "emit_i8x16_neg");
2353}
2354
2355void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
2356 LiftoffRegister src) {
2357 bailout(kSimd, "emit_v128_anytrue");
2358}
2359
2360void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
2361 LiftoffRegister src) {
2362 bailout(kSimd, "emit_i8x16_alltrue");
2363}
2364
2365void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
2366 LiftoffRegister src) {
2367 bailout(kSimd, "emit_i8x16_bitmask");
2368}
2369
2370void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
2371 LiftoffRegister rhs) {
2372 bailout(kSimd, "emit_i8x16_shl");
2373}
2374
2375void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
2376 int32_t rhs) {
2377 bailout(kSimd, "emit_i8x16_shli");
2378}
2379
2380void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
2381 LiftoffRegister lhs,
2382 LiftoffRegister rhs) {
2383 bailout(kSimd, "emit_i8x16_shr_s");
2384}
2385
2386void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
2387 LiftoffRegister lhs, int32_t rhs) {
2388 bailout(kSimd, "emit_i8x16_shri_s");
2389}
2390
2391void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
2392 LiftoffRegister lhs,
2393 LiftoffRegister rhs) {
2394 bailout(kSimd, "emit_i8x16_shr_u");
2395}
2396
2397void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
2398 LiftoffRegister lhs, int32_t rhs) {
2399 bailout(kSimd, "emit_i8x16_shri_u");
2400}
2401
2402void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
2403 LiftoffRegister rhs) {
2404 bailout(kSimd, "emit_i8x16_add");
2405}
2406
2407void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
2408 LiftoffRegister lhs,
2409 LiftoffRegister rhs) {
2410 bailout(kSimd, "emit_i8x16_add_sat_s");
2411}
2412
2413void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
2414 LiftoffRegister lhs,
2415 LiftoffRegister rhs) {
2416 bailout(kSimd, "emit_i8x16_add_sat_u");
2417}
2418
2419void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
2420 LiftoffRegister rhs) {
2421 bailout(kSimd, "emit_i8x16_sub");
2422}
2423
2424void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
2425 LiftoffRegister lhs,
2426 LiftoffRegister rhs) {
2427 bailout(kSimd, "emit_i8x16_sub_sat_s");
2428}
2429
2430void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
2431 LiftoffRegister lhs,
2432 LiftoffRegister rhs) {
2433 bailout(kSimd, "emit_i8x16_sub_sat_u");
2434}
2435
2436void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
2437 LiftoffRegister lhs,
2438 LiftoffRegister rhs) {
2439 bailout(kSimd, "emit_i8x16_min_s");
2440}
2441
2442void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
2443 LiftoffRegister lhs,
2444 LiftoffRegister rhs) {
2445 bailout(kSimd, "emit_i8x16_min_u");
2446}
2447
2448void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
2449 LiftoffRegister lhs,
2450 LiftoffRegister rhs) {
2451 bailout(kSimd, "emit_i8x16_max_s");
2452}
2453
2454void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
2455 LiftoffRegister lhs,
2456 LiftoffRegister rhs) {
2457 bailout(kSimd, "emit_i8x16_max_u");
2458}
2459
2460void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
2461 LiftoffRegister src) {
2462 bailout(kSimd, "emit_i8x16_popcnt");
2463}
2464
2465void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
2466 LiftoffRegister src) {
2467 bailout(kSimd, "emit_i16x8_neg");
2468}
2469
2470void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
2471 LiftoffRegister src) {
2472 bailout(kSimd, "emit_i16x8_alltrue");
2473}
2474
2475void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
2476 LiftoffRegister src) {
2477 bailout(kSimd, "emit_i16x8_bitmask");
2478}
2479
2480void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
2481 LiftoffRegister rhs) {
2482 bailout(kSimd, "emit_i16x8_shl");
2483}
2484
2485void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
2486 int32_t rhs) {
2487 bailout(kSimd, "emit_i16x8_shli");
2488}
2489
2490void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
2491 LiftoffRegister lhs,
2492 LiftoffRegister rhs) {
2493 bailout(kSimd, "emit_i16x8_shr_s");
2494}
2495
2496void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
2497 LiftoffRegister lhs, int32_t rhs) {
2498 bailout(kSimd, "emit_i16x8_shri_s");
2499}
2500
2501void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
2502 LiftoffRegister lhs,
2503 LiftoffRegister rhs) {
2504 bailout(kSimd, "emit_i16x8_shr_u");
2505}
2506
2507void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
2508 LiftoffRegister lhs, int32_t rhs) {
2509 bailout(kSimd, "emit_i16x8_shri_u");
2510}
2511
2512void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
2513 LiftoffRegister rhs) {
2514 bailout(kSimd, "emit_i16x8_add");
2515}
2516
2517void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
2518 LiftoffRegister lhs,
2519 LiftoffRegister rhs) {
2520 bailout(kSimd, "emit_i16x8_add_sat_s");
2521}
2522
2523void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
2524 LiftoffRegister lhs,
2525 LiftoffRegister rhs) {
2526 bailout(kSimd, "emit_i16x8_add_sat_u");
2527}
2528
2529void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
2530 LiftoffRegister rhs) {
2531 bailout(kSimd, "emit_i16x8_sub");
2532}
2533
2534void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
2535 LiftoffRegister lhs,
2536 LiftoffRegister rhs) {
2537 bailout(kSimd, "emit_i16x8_sub_sat_s");
2538}
2539
2540void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
2541 LiftoffRegister lhs,
2542 LiftoffRegister rhs) {
2543 bailout(kSimd, "emit_i16x8_sub_sat_u");
2544}
2545
2546void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
2547 LiftoffRegister rhs) {
2548 bailout(kSimd, "emit_i16x8_mul");
2549}
2550
2551void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
2552 LiftoffRegister lhs,
2553 LiftoffRegister rhs) {
2554 bailout(kSimd, "emit_i16x8_min_s");
2555}
2556
2557void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
2558 LiftoffRegister lhs,
2559 LiftoffRegister rhs) {
2560 bailout(kSimd, "emit_i16x8_min_u");
2561}
2562
2563void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
2564 LiftoffRegister lhs,
2565 LiftoffRegister rhs) {
2566 bailout(kSimd, "emit_i16x8_max_s");
2567}
2568
2569void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
2570 LiftoffRegister lhs,
2571 LiftoffRegister rhs) {
2572 bailout(kSimd, "emit_i16x8_max_u");
2573}
2574
2575void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
2576 LiftoffRegister src) {
2577 bailout(kSimd, "emit_i32x4_neg");
2578}
2579
2580void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
2581 LiftoffRegister src) {
2582 bailout(kSimd, "emit_i32x4_alltrue");
2583}
2584
2585void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
2586 LiftoffRegister src) {
2587 bailout(kSimd, "emit_i32x4_bitmask");
2588}
2589
2590void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
2591 LiftoffRegister rhs) {
2592 bailout(kSimd, "emit_i32x4_shl");
2593}
2594
2595void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
2596 int32_t rhs) {
2597 bailout(kSimd, "emit_i32x4_shli");
2598}
2599
2600void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
2601 LiftoffRegister lhs,
2602 LiftoffRegister rhs) {
2603 bailout(kSimd, "emit_i32x4_shr_s");
2604}
2605
2606void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
2607 LiftoffRegister lhs, int32_t rhs) {
2608 bailout(kSimd, "emit_i32x4_shri_s");
2609}
2610
2611void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
2612 LiftoffRegister lhs,
2613 LiftoffRegister rhs) {
2614 bailout(kSimd, "emit_i32x4_shr_u");
2615}
2616
2617void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
2618 LiftoffRegister lhs, int32_t rhs) {
2619 bailout(kSimd, "emit_i32x4_shri_u");
2620}
2621
2622void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
2623 LiftoffRegister rhs) {
2624 bailout(kSimd, "emit_i32x4_add");
2625}
2626
2627void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
2628 LiftoffRegister rhs) {
2629 bailout(kSimd, "emit_i32x4_sub");
2630}
2631
2632void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
2633 LiftoffRegister rhs) {
2634 bailout(kSimd, "emit_i32x4_mul");
2635}
2636
2637void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
2638 LiftoffRegister lhs,
2639 LiftoffRegister rhs) {
2640 bailout(kSimd, "emit_i32x4_min_s");
2641}
2642
2643void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
2644 LiftoffRegister lhs,
2645 LiftoffRegister rhs) {
2646 bailout(kSimd, "emit_i32x4_min_u");
2647}
2648
2649void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
2650 LiftoffRegister lhs,
2651 LiftoffRegister rhs) {
2652 bailout(kSimd, "emit_i32x4_max_s");
2653}
2654
2655void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
2656 LiftoffRegister lhs,
2657 LiftoffRegister rhs) {
2658 bailout(kSimd, "emit_i32x4_max_u");
2659}
2660
2661void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
2662 LiftoffRegister lhs,
2663 LiftoffRegister rhs) {
2664 bailout(kSimd, "emit_i32x4_dot_i16x8_s");
2665}
2666
2667void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
2668 LiftoffRegister src) {
2669 bailout(kSimd, "emit_i64x2_neg");
2670}
2671
2672void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
2673 LiftoffRegister src) {
2674 bailout(kSimd, "emit_i64x2_alltrue");
2675}
2676
2677void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
2678 LiftoffRegister src) {
2679 bailout(kSimd, "emit_i64x2_bitmask");
2680}
2681
2682void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
2683 LiftoffRegister rhs) {
2684 bailout(kSimd, "emit_i64x2_shl");
2685}
2686
2687void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
2688 int32_t rhs) {
2689 bailout(kSimd, "emit_i64x2_shli");
2690}
2691
2692void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
2693 LiftoffRegister lhs,
2694 LiftoffRegister rhs) {
2695 bailout(kSimd, "emit_i64x2_shr_s");
2696}
2697
2698void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
2699 LiftoffRegister lhs, int32_t rhs) {
2700 bailout(kSimd, "emit_i64x2_shri_s");
2701}
2702
2703void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
2704 LiftoffRegister lhs,
2705 LiftoffRegister rhs) {
2706 bailout(kSimd, "emit_i64x2_shr_u");
2707}
2708
2709void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
2710 LiftoffRegister lhs, int32_t rhs) {
2711 bailout(kSimd, "emit_i64x2_shri_u");
2712}
2713
2714void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
2715 LiftoffRegister rhs) {
2716 bailout(kSimd, "emit_i64x2_add");
2717}
2718
2719void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
2720 LiftoffRegister rhs) {
2721 bailout(kSimd, "emit_i64x2_sub");
2722}
2723
2724void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
2725 LiftoffRegister rhs) {
2726 bailout(kSimd, "emit_i64x2_mul");
2727}
2728
2729void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
2730 LiftoffRegister rhs) {
2731 bailout(kSimd, "emit_i64x2_gt_s");
2732}
2733
2734void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
2735 LiftoffRegister rhs) {
2736 bailout(kSimd, "emit_i64x2_ge_s");
2737}
2738
2739void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
2740 LiftoffRegister src) {
2741 bailout(kSimd, "emit_f32x4_abs");
2742}
2743
2744void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
2745 LiftoffRegister src) {
2746 bailout(kSimd, "emit_f32x4_neg");
2747}
2748
2749void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
2750 LiftoffRegister src) {
2751 bailout(kSimd, "emit_f32x4_sqrt");
2752}
2753
2754bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
2755 LiftoffRegister src) {
2756 bailout(kSimd, "emit_f32x4_ceil");
2757 return true;
2758}
2759
2760bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
2761 LiftoffRegister src) {
2762 bailout(kSimd, "emit_f32x4_floor");
2763 return true;
2764}
2765
2766bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst,
2767 LiftoffRegister src) {
2768 bailout(kSimd, "emit_f32x4_trunc");
2769 return true;
2770}
2771
2772bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
2773 LiftoffRegister src) {
2774 bailout(kSimd, "emit_f32x4_nearest_int");
2775 return true;
2776}
2777
2778void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
2779 LiftoffRegister rhs) {
2780 bailout(kSimd, "emit_f32x4_add");
2781}
2782
2783void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
2784 LiftoffRegister rhs) {
2785 bailout(kSimd, "emit_f32x4_sub");
2786}
2787
2788void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
2789 LiftoffRegister rhs) {
2790 bailout(kSimd, "emit_f32x4_mul");
2791}
2792
2793void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
2794 LiftoffRegister rhs) {
2795 bailout(kSimd, "emit_f32x4_div");
2796}
2797
2798void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
2799 LiftoffRegister rhs) {
2800 bailout(kSimd, "emit_f32x4_min");
2801}
2802
2803void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
2804 LiftoffRegister rhs) {
2805 bailout(kSimd, "emit_f32x4_max");
2806}
2807
2808void LiftoffAssembler::emit_f32x4_relaxed_min(LiftoffRegister dst,
2809 LiftoffRegister lhs,
2810 LiftoffRegister rhs) {
2811 bailout(kSimd, "emit_f32x4_relaxed_min");
2812}
2813
2814void LiftoffAssembler::emit_f32x4_relaxed_max(LiftoffRegister dst,
2815 LiftoffRegister lhs,
2816 LiftoffRegister rhs) {
2817 bailout(kSimd, "emit_f32x4_relaxed_max");
2818}
2819
2820void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
2821 LiftoffRegister rhs) {
2822 bailout(kSimd, "emit_f32x4_pmin");
2823}
2824
2825void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
2826 LiftoffRegister rhs) {
2827 bailout(kSimd, "emit_f32x4_pmax");
2828}
2829
2830void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
2831 LiftoffRegister src) {
2832 bailout(kSimd, "emit_f64x2_abs");
2833}
2834
2835void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
2836 LiftoffRegister src) {
2837 bailout(kSimd, "emit_f64x2_neg");
2838}
2839
2840void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
2841 LiftoffRegister src) {
2842 bailout(kSimd, "emit_f64x2_sqrt");
2843}
2844
2845bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
2846 LiftoffRegister src) {
2847 bailout(kSimd, "emit_f64x2_ceil");
2848 return true;
2849}
2850
2851bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
2852 LiftoffRegister src) {
2853 bailout(kSimd, "emit_f64x2_floor");
2854 return true;
2855}
2856
2857bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst,
2858 LiftoffRegister src) {
2859 bailout(kSimd, "emit_f64x2_trunc");
2860 return true;
2861}
2862
2863bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
2864 LiftoffRegister src) {
2865 bailout(kSimd, "emit_f64x2_nearest_int");
2866 return true;
2867}
2868
2869void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
2870 LiftoffRegister rhs) {
2871 bailout(kSimd, "emit_f64x2_add");
2872}
2873
2874void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
2875 LiftoffRegister rhs) {
2876 bailout(kSimd, "emit_f64x2_sub");
2877}
2878
2879void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
2880 LiftoffRegister rhs) {
2881 bailout(kSimd, "emit_f64x2_mul");
2882}
2883
2884void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
2885 LiftoffRegister rhs) {
2886 bailout(kSimd, "emit_f64x2_div");
2887}
2888
2889void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
2890 LiftoffRegister rhs) {
2891 bailout(kSimd, "emit_f64x2_min");
2892}
2893
2894void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
2895 LiftoffRegister rhs) {
2896 bailout(kSimd, "emit_f64x2_max");
2897}
2898
2899void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
2900 LiftoffRegister rhs) {
2901 bailout(kSimd, "emit_f64x2_pmin");
2902}
2903
2904void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
2905 LiftoffRegister rhs) {
2906 bailout(kSimd, "emit_f64x2_pmax");
2907}
2908
2909void LiftoffAssembler::emit_f64x2_relaxed_min(LiftoffRegister dst,
2910 LiftoffRegister lhs,
2911 LiftoffRegister rhs) {
2912 bailout(kSimd, "emit_f64x2_relaxed_min");
2913}
2914
2915void LiftoffAssembler::emit_f64x2_relaxed_max(LiftoffRegister dst,
2916 LiftoffRegister lhs,
2917 LiftoffRegister rhs) {
2918 bailout(kSimd, "emit_f64x2_relaxed_max");
2919}
2920
2922 LiftoffRegister src) {
2923 bailout(kSimd, "emit_f64x2_convert_low_i32x4_s");
2924}
2925
2927 LiftoffRegister src) {
2928 bailout(kSimd, "emit_f64x2_convert_low_i32x4_u");
2929}
2930
2931void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
2932 LiftoffRegister src) {
2933 bailout(kSimd, "emit_f64x2_promote_low_f32x4");
2934}
2935
2936void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
2937 LiftoffRegister src) {
2938 bailout(kSimd, "emit_i32x4_sconvert_f32x4");
2939}
2940
2941void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
2942 LiftoffRegister src) {
2943 bailout(kSimd, "emit_i32x4_uconvert_f32x4");
2944}
2945
2947 LiftoffRegister src) {
2948 bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_s_zero");
2949}
2950
2952 LiftoffRegister src) {
2953 bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_u_zero");
2954}
2955
2956void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
2957 LiftoffRegister src) {
2958 bailout(kSimd, "emit_f32x4_sconvert_i32x4");
2959}
2960
2961void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
2962 LiftoffRegister src) {
2963 bailout(kSimd, "emit_f32x4_uconvert_i32x4");
2964}
2965
2966void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
2967 LiftoffRegister src) {
2968 bailout(kSimd, "emit_f32x4_demote_f64x2_zero");
2969}
2970
2971void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
2972 LiftoffRegister lhs,
2973 LiftoffRegister rhs) {
2974 bailout(kSimd, "emit_i8x16_sconvert_i16x8");
2975}
2976
2977void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
2978 LiftoffRegister lhs,
2979 LiftoffRegister rhs) {
2980 bailout(kSimd, "emit_i8x16_uconvert_i16x8");
2981}
2982
2983void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
2984 LiftoffRegister lhs,
2985 LiftoffRegister rhs) {
2986 bailout(kSimd, "emit_i16x8_sconvert_i32x4");
2987}
2988
2989void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
2990 LiftoffRegister lhs,
2991 LiftoffRegister rhs) {
2992 bailout(kSimd, "emit_i16x8_uconvert_i32x4");
2993}
2994
2996 LiftoffRegister src) {
2997 bailout(kSimd, "emit_i16x8_sconvert_i8x16_low");
2998}
2999
3001 LiftoffRegister src) {
3002 bailout(kSimd, "emit_i16x8_sconvert_i8x16_high");
3003}
3004
3006 LiftoffRegister src) {
3007 bailout(kSimd, "emit_i16x8_uconvert_i8x16_low");
3008}
3009
3011 LiftoffRegister src) {
3012 bailout(kSimd, "emit_i16x8_uconvert_i8x16_high");
3013}
3014
3016 LiftoffRegister src) {
3017 bailout(kSimd, "emit_i32x4_sconvert_i16x8_low");
3018}
3019
3021 LiftoffRegister src) {
3022 bailout(kSimd, "emit_i32x4_sconvert_i16x8_high");
3023}
3024
3026 LiftoffRegister src) {
3027 bailout(kSimd, "emit_i32x4_uconvert_i16x8_low");
3028}
3029
3031 LiftoffRegister src) {
3032 bailout(kSimd, "emit_i32x4_uconvert_i16x8_high");
3033}
3034
3036 LiftoffRegister src) {
3037 bailout(kSimd, "emit_i64x2_sconvert_i32x4_low");
3038}
3039
3041 LiftoffRegister src) {
3042 bailout(kSimd, "emit_i64x2_sconvert_i32x4_high");
3043}
3044
3046 LiftoffRegister src) {
3047 bailout(kSimd, "emit_i64x2_uconvert_i32x4_low");
3048}
3049
3051 LiftoffRegister src) {
3052 bailout(kSimd, "emit_i64x2_uconvert_i32x4_high");
3053}
3054
3056 LiftoffRegister lhs,
3057 LiftoffRegister rhs) {
3058 bailout(kSimd, "emit_i8x16_rounding_average_u");
3059}
3060
3062 LiftoffRegister lhs,
3063 LiftoffRegister rhs) {
3064 bailout(kSimd, "emit_i16x8_rounding_average_u");
3065}
3066
3067void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
3068 LiftoffRegister src) {
3069 bailout(kSimd, "emit_i8x16_abs");
3070}
3071
3072void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
3073 LiftoffRegister src) {
3074 bailout(kSimd, "emit_i16x8_abs");
3075}
3076
3077void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
3078 LiftoffRegister src) {
3079 bailout(kSimd, "emit_i32x4_abs");
3080}
3081
3082void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
3083 LiftoffRegister lhs,
3084 uint8_t imm_lane_idx) {
3085 bailout(kSimd, "emit_i8x16_extract_lane_s");
3086}
3087
3088void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
3089 LiftoffRegister lhs,
3090 uint8_t imm_lane_idx) {
3091 bailout(kSimd, "emit_i8x16_extract_lane_u");
3092}
3093
3094void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
3095 LiftoffRegister lhs,
3096 uint8_t imm_lane_idx) {
3097 bailout(kSimd, "emit_i16x8_extract_lane_s");
3098}
3099
3100void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
3101 LiftoffRegister lhs,
3102 uint8_t imm_lane_idx) {
3103 bailout(kSimd, "emit_i16x8_extract_lane_u");
3104}
3105
3106void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
3107 LiftoffRegister lhs,
3108 uint8_t imm_lane_idx) {
3109 bailout(kSimd, "emit_i32x4_extract_lane");
3110}
3111
3112void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
3113 LiftoffRegister lhs,
3114 uint8_t imm_lane_idx) {
3115 bailout(kSimd, "emit_i64x2_extract_lane");
3116}
3117
3118void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
3119 LiftoffRegister lhs,
3120 uint8_t imm_lane_idx) {
3121 bailout(kSimd, "emit_f32x4_extract_lane");
3122}
3123
3124void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
3125 LiftoffRegister lhs,
3126 uint8_t imm_lane_idx) {
3127 bailout(kSimd, "emit_f64x2_extract_lane");
3128}
3129
3130void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
3131 LiftoffRegister src1,
3132 LiftoffRegister src2,
3133 uint8_t imm_lane_idx) {
3134 bailout(kSimd, "emit_i8x16_replace_lane");
3135}
3136
3137void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
3138 LiftoffRegister src1,
3139 LiftoffRegister src2,
3140 uint8_t imm_lane_idx) {
3141 bailout(kSimd, "emit_i16x8_replace_lane");
3142}
3143
3144void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
3145 LiftoffRegister src1,
3146 LiftoffRegister src2,
3147 uint8_t imm_lane_idx) {
3148 bailout(kSimd, "emit_i32x4_replace_lane");
3149}
3150
3151void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
3152 LiftoffRegister src1,
3153 LiftoffRegister src2,
3154 uint8_t imm_lane_idx) {
3155 bailout(kSimd, "emit_i64x2_replace_lane");
3156}
3157
3158void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
3159 LiftoffRegister src1,
3160 LiftoffRegister src2,
3161 uint8_t imm_lane_idx) {
3162 bailout(kSimd, "emit_f32x4_replace_lane");
3163}
3164
3165void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
3166 LiftoffRegister src1,
3167 LiftoffRegister src2,
3168 uint8_t imm_lane_idx) {
3169 bailout(kSimd, "emit_f64x2_replace_lane");
3170}
3171
3172void LiftoffAssembler::emit_f32x4_qfma(LiftoffRegister dst,
3173 LiftoffRegister src1,
3174 LiftoffRegister src2,
3175 LiftoffRegister src3) {
3176 bailout(kRelaxedSimd, "emit_f32x4_qfma");
3177}
3178
3179void LiftoffAssembler::emit_f32x4_qfms(LiftoffRegister dst,
3180 LiftoffRegister src1,
3181 LiftoffRegister src2,
3182 LiftoffRegister src3) {
3183 bailout(kRelaxedSimd, "emit_f32x4_qfms");
3184}
3185
3186void LiftoffAssembler::emit_f64x2_qfma(LiftoffRegister dst,
3187 LiftoffRegister src1,
3188 LiftoffRegister src2,
3189 LiftoffRegister src3) {
3190 bailout(kRelaxedSimd, "emit_f64x2_qfma");
3191}
3192
3193void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst,
3194 LiftoffRegister src1,
3195 LiftoffRegister src2,
3196 LiftoffRegister src3) {
3197 bailout(kRelaxedSimd, "emit_f64x2_qfms");
3198}
3199
3200bool LiftoffAssembler::emit_f16x8_splat(LiftoffRegister dst,
3201 LiftoffRegister src) {
3202 return false;
3203}
3204
3205bool LiftoffAssembler::emit_f16x8_extract_lane(LiftoffRegister dst,
3206 LiftoffRegister lhs,
3207 uint8_t imm_lane_idx) {
3208 return false;
3209}
3210
3211bool LiftoffAssembler::emit_f16x8_replace_lane(LiftoffRegister dst,
3212 LiftoffRegister src1,
3213 LiftoffRegister src2,
3214 uint8_t imm_lane_idx) {
3215 return false;
3216}
3217
3218bool LiftoffAssembler::emit_f16x8_abs(LiftoffRegister dst,
3219 LiftoffRegister src) {
3220 return false;
3221}
3222
3223bool LiftoffAssembler::emit_f16x8_neg(LiftoffRegister dst,
3224 LiftoffRegister src) {
3225 return false;
3226}
3227
3228bool LiftoffAssembler::emit_f16x8_sqrt(LiftoffRegister dst,
3229 LiftoffRegister src) {
3230 return false;
3231}
3232
3233bool LiftoffAssembler::emit_f16x8_ceil(LiftoffRegister dst,
3234 LiftoffRegister src) {
3235 return false;
3236}
3237
3238bool LiftoffAssembler::emit_f16x8_floor(LiftoffRegister dst,
3239 LiftoffRegister src) {
3240 return false;
3241}
3242
3243bool LiftoffAssembler::emit_f16x8_trunc(LiftoffRegister dst,
3244 LiftoffRegister src) {
3245 return false;
3246}
3247
3248bool LiftoffAssembler::emit_f16x8_nearest_int(LiftoffRegister dst,
3249 LiftoffRegister src) {
3250 return false;
3251}
3252
3253bool LiftoffAssembler::emit_f16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
3254 LiftoffRegister rhs) {
3255 return false;
3256}
3257
3258bool LiftoffAssembler::emit_f16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
3259 LiftoffRegister rhs) {
3260 return false;
3261}
3262
3263bool LiftoffAssembler::emit_f16x8_lt(LiftoffRegister dst, LiftoffRegister lhs,
3264 LiftoffRegister rhs) {
3265 return false;
3266}
3267
3268bool LiftoffAssembler::emit_f16x8_le(LiftoffRegister dst, LiftoffRegister lhs,
3269 LiftoffRegister rhs) {
3270 return false;
3271}
3272
3273bool LiftoffAssembler::emit_f16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
3274 LiftoffRegister rhs) {
3275 return false;
3276}
3277
3278bool LiftoffAssembler::emit_f16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
3279 LiftoffRegister rhs) {
3280 return false;
3281}
3282
3283bool LiftoffAssembler::emit_f16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
3284 LiftoffRegister rhs) {
3285 return false;
3286}
3287
3288bool LiftoffAssembler::emit_f16x8_div(LiftoffRegister dst, LiftoffRegister lhs,
3289 LiftoffRegister rhs) {
3290 return false;
3291}
3292
3293bool LiftoffAssembler::emit_f16x8_min(LiftoffRegister dst, LiftoffRegister lhs,
3294 LiftoffRegister rhs) {
3295 return false;
3296}
3297
3298bool LiftoffAssembler::emit_f16x8_max(LiftoffRegister dst, LiftoffRegister lhs,
3299 LiftoffRegister rhs) {
3300 return false;
3301}
3302
3303bool LiftoffAssembler::emit_f16x8_pmin(LiftoffRegister dst, LiftoffRegister lhs,
3304 LiftoffRegister rhs) {
3305 return false;
3306}
3307
3308bool LiftoffAssembler::emit_f16x8_pmax(LiftoffRegister dst, LiftoffRegister lhs,
3309 LiftoffRegister rhs) {
3310 return false;
3311}
3312
3313bool LiftoffAssembler::emit_i16x8_sconvert_f16x8(LiftoffRegister dst,
3314 LiftoffRegister src) {
3315 return false;
3316}
3317
3318bool LiftoffAssembler::emit_i16x8_uconvert_f16x8(LiftoffRegister dst,
3319 LiftoffRegister src) {
3320 return false;
3321}
3322
3323bool LiftoffAssembler::emit_f16x8_sconvert_i16x8(LiftoffRegister dst,
3324 LiftoffRegister src) {
3325 return false;
3326}
3327
3328bool LiftoffAssembler::emit_f16x8_uconvert_i16x8(LiftoffRegister dst,
3329 LiftoffRegister src) {
3330 return false;
3331}
3332
3333bool LiftoffAssembler::emit_f16x8_demote_f32x4_zero(LiftoffRegister dst,
3334 LiftoffRegister src) {
3335 return false;
3336}
3337
3338bool LiftoffAssembler::emit_f16x8_demote_f64x2_zero(LiftoffRegister dst,
3339 LiftoffRegister src) {
3340 return false;
3341}
3342
3343bool LiftoffAssembler::emit_f32x4_promote_low_f16x8(LiftoffRegister dst,
3344 LiftoffRegister src) {
3345 return false;
3346}
3347
3348bool LiftoffAssembler::emit_f16x8_qfma(LiftoffRegister dst,
3349 LiftoffRegister src1,
3350 LiftoffRegister src2,
3351 LiftoffRegister src3) {
3352 return false;
3353}
3354
3355bool LiftoffAssembler::emit_f16x8_qfms(LiftoffRegister dst,
3356 LiftoffRegister src1,
3357 LiftoffRegister src2,
3358 LiftoffRegister src3) {
3359 return false;
3360}
3361
3362bool LiftoffAssembler::supports_f16_mem_access() { return false; }
3363
3364void LiftoffAssembler::StackCheck(Label* ool_code) {
3365 Register limit_address = kScratchReg;
3367 Branch(ool_code, ule, sp, Operand(limit_address));
3368}
3369
3372}
3373
3374void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
3375 LiftoffRegList gp_regs = regs & kGpCacheRegList;
3376 unsigned num_gp_regs = gp_regs.GetNumRegsSet();
3377 if (num_gp_regs) {
3378 unsigned offset = num_gp_regs * kSystemPointerSize;
3379 addi_d(sp, sp, -offset);
3380 while (!gp_regs.is_empty()) {
3381 LiftoffRegister reg = gp_regs.GetFirstRegSet();
3383 St_d(reg.gp(), MemOperand(sp, offset));
3384 gp_regs.clear(reg);
3385 }
3386 DCHECK_EQ(offset, 0);
3387 }
3388 LiftoffRegList fp_regs = regs & kFpCacheRegList;
3389 unsigned num_fp_regs = fp_regs.GetNumRegsSet();
3390 if (num_fp_regs) {
3391 unsigned slot_size = 8;
3392 addi_d(sp, sp, -(num_fp_regs * slot_size));
3393 unsigned offset = 0;
3394 while (!fp_regs.is_empty()) {
3395 LiftoffRegister reg = fp_regs.GetFirstRegSet();
3397 fp_regs.clear(reg);
3398 offset += slot_size;
3399 }
3400 DCHECK_EQ(offset, num_fp_regs * slot_size);
3401 }
3402}
3403
3404void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
3405 LiftoffRegList fp_regs = regs & kFpCacheRegList;
3406 unsigned fp_offset = 0;
3407 while (!fp_regs.is_empty()) {
3408 LiftoffRegister reg = fp_regs.GetFirstRegSet();
3409 MacroAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset));
3410 fp_regs.clear(reg);
3411 fp_offset += 8;
3412 }
3413 if (fp_offset) addi_d(sp, sp, fp_offset);
3414 LiftoffRegList gp_regs = regs & kGpCacheRegList;
3415 unsigned gp_offset = 0;
3416 while (!gp_regs.is_empty()) {
3417 LiftoffRegister reg = gp_regs.GetLastRegSet();
3418 Ld_d(reg.gp(), MemOperand(sp, gp_offset));
3419 gp_regs.clear(reg);
3420 gp_offset += kSystemPointerSize;
3421 }
3422 addi_d(sp, sp, gp_offset);
3423}
3424
3426 SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
3427 LiftoffRegList ref_spills, int spill_offset) {
3428 LiftoffRegList fp_spills = all_spills & kFpCacheRegList;
3429 int spill_space_size = fp_spills.GetNumRegsSet() * kSimd128Size;
3430 LiftoffRegList gp_spills = all_spills & kGpCacheRegList;
3431 while (!gp_spills.is_empty()) {
3432 LiftoffRegister reg = gp_spills.GetFirstRegSet();
3433 if (ref_spills.has(reg)) {
3434 safepoint.DefineTaggedStackSlot(spill_offset);
3435 }
3436 gp_spills.clear(reg);
3437 ++spill_offset;
3438 spill_space_size += kSystemPointerSize;
3439 }
3440 // Record the number of additional spill slots.
3441 RecordOolSpillSpaceSize(spill_space_size);
3442}
3443
3444void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
3445 DCHECK_LT(num_stack_slots,
3446 (1 << 16) / kSystemPointerSize); // 16 bit immediate
3447 Drop(static_cast<int>(num_stack_slots));
3448 Ret();
3449}
3450
3452 const std::initializer_list<VarState> args, const LiftoffRegister* rets,
3453 ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes,
3454 ExternalReference ext_ref) {
3455 addi_d(sp, sp, -stack_bytes);
3456
3457 int arg_offset = 0;
3458 for (const VarState& arg : args) {
3459 liftoff::StoreToMemory(this, MemOperand{sp, arg_offset}, arg);
3460 arg_offset += value_kind_size(arg.kind());
3461 }
3462 DCHECK_LE(arg_offset, stack_bytes);
3463
3464 // Pass a pointer to the buffer with the arguments to the C function.
3465 // On LoongArch, the first argument is passed in {a0}.
3466 constexpr Register kFirstArgReg = a0;
3467 mov(kFirstArgReg, sp);
3468
3469 // Now call the C function.
3470 constexpr int kNumCCallArgs = 1;
3471 PrepareCallCFunction(kNumCCallArgs, kScratchReg);
3472 CallCFunction(ext_ref, kNumCCallArgs);
3473
3474 // Move return value to the right register.
3475 const LiftoffRegister* next_result_reg = rets;
3476 if (return_kind != kVoid) {
3477 constexpr Register kReturnReg = a0;
3478#ifdef USE_SIMULATOR
3479 // When calling a host function in the simulator, if the function returns an
3480 // int32 value, the simulator does not sign-extend it to int64 because in
3481 // the simulator we do not know whether the function returns an int32 or
3482 // an int64. So we need to sign extend it here.
3483 if (return_kind == kI32) {
3484 slli_w(next_result_reg->gp(), kReturnReg, 0);
3485 } else if (kReturnReg != next_result_reg->gp()) {
3486 Move(*next_result_reg, LiftoffRegister(kReturnReg), return_kind);
3487 }
3488#else
3489 if (kReturnReg != next_result_reg->gp()) {
3490 Move(*next_result_reg, LiftoffRegister(kReturnReg), return_kind);
3491 }
3492#endif
3493 ++next_result_reg;
3494 }
3495
3496 // Load potential output value from the buffer on the stack.
3497 if (out_argument_kind != kVoid) {
3498 liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_kind);
3499 }
3500
3501 addi_d(sp, sp, stack_bytes);
3502}
3503
3504void LiftoffAssembler::CallC(const std::initializer_list<VarState> args_list,
3505 ExternalReference ext_ref) {
3506 // First, prepare the stack for the C call.
3507 const int num_args = static_cast<int>(args_list.size());
3509
3510 // Note: If we ever need more than eight arguments we would need to load the
3511 // stack arguments to registers (via LoadToRegister), then push them to the
3512 // stack.
3513
3514 // Execute the parallel register move for register parameters.
3515 DCHECK_GE(arraysize(kCArgRegs), num_args);
3516 const VarState* const args = args_list.begin();
3517 ParallelMove parallel_move{this};
3518 for (int reg_arg = 0; reg_arg < num_args; ++reg_arg) {
3519 parallel_move.LoadIntoRegister(LiftoffRegister{kCArgRegs[reg_arg]},
3520 args[reg_arg]);
3521 }
3522 parallel_move.Execute();
3523
3524 // Now call the C function.
3525 CallCFunction(ext_ref, num_args);
3526}
3527
3530}
3531
3534}
3535
3537 compiler::CallDescriptor* call_descriptor,
3538 Register target) {
3539 // For loong64, we have more cache registers than wasm parameters. That means
3540 // that target will always be in a register.
3541 DCHECK(target.is_valid());
3542 CallWasmCodePointer(target, call_descriptor->signature_hash());
3543}
3544
3546 compiler::CallDescriptor* call_descriptor, Register target) {
3547 DCHECK(target.is_valid());
3548 CallWasmCodePointer(target, call_descriptor->signature_hash(),
3550}
3551
3553 // A direct call to a builtin. Just encode the builtin index. This will be
3554 // patched at relocation.
3555 Call(static_cast<Address>(builtin), RelocInfo::WASM_STUB_CALL);
3556}
3557
3558void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
3559 addi_d(sp, sp, -size);
3560 MacroAssembler::Move(addr, sp);
3561}
3562
3563void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
3564 addi_d(sp, sp, size);
3565}
3566
3568
3569void LiftoffAssembler::emit_store_nonzero_if_nan(Register dst, FPURegister src,
3570 ValueKind kind) {
3571 UseScratchRegisterScope temps(this);
3572 Register scratch = temps.Acquire();
3573 Label not_nan;
3574 if (kind == kF32) {
3575 CompareIsNanF32(src, src);
3576 } else {
3578 CompareIsNanF64(src, src);
3579 }
3580 BranchFalseShortF(&not_nan);
3581 li(scratch, 1);
3582 St_w(scratch, MemOperand(dst, 0));
3583 bind(&not_nan);
3584}
3585
3587 LiftoffRegister src,
3588 Register tmp_gp,
3589 LiftoffRegister tmp_s128,
3590 ValueKind lane_kind) {
3591 UNIMPLEMENTED();
3592}
3593
3594void LiftoffAssembler::emit_store_nonzero(Register dst) {
3595 St_d(dst, MemOperand(dst, 0));
3596}
3597
3598void LiftoffStackSlots::Construct(int param_slots) {
3599 DCHECK_LT(0, slots_.size());
3601 int last_stack_slot = param_slots;
3602 for (auto& slot : slots_) {
3603 const int stack_slot = slot.dst_slot_;
3604 int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
3605 DCHECK_LT(0, stack_decrement);
3606 last_stack_slot = stack_slot;
3607 const LiftoffAssembler::VarState& src = slot.src_;
3608 switch (src.loc()) {
3610 if (src.kind() != kS128) {
3611 asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
3612 asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
3614 } else {
3615 asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
3616 asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_ - 8));
3618 asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
3620 }
3621 break;
3623 int pushed_bytes = SlotSizeInBytes(slot);
3624 asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
3625 liftoff::push(asm_, src.reg(), src.kind());
3626 break;
3627 }
3629 asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
3630 asm_->li(kScratchReg, Operand(src.i32_const()));
3632 break;
3633 }
3634 }
3635 }
3636}
3637
3638} // namespace v8::internal::wasm
3639
3640#endif // V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_INL_H_
Builtins::Kind kind
Definition builtins.cc:40
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
void fcvt_d_s(FPURegister fd, FPURegister fj)
void fcvt_s_d(FPURegister fd, FPURegister fj)
void rotri_d(Register rd, Register rj, int32_t ui6)
void rotri_w(Register rd, Register rj, int32_t ui5)
void masknez(Register rd, Register rj, Register rk)
void ftintrz_w_d(FPURegister fd, FPURegister fj)
void slli_w(Register rd, Register rj, int32_t ui5)
void bstrpick_w(Register rd, Register rj, int32_t msbw, int32_t lsbw)
void ext_w_b(Register rd, Register rj)
void movfr2gr_s(Register rd, FPURegister fj)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void movfr2gr_d(Register rd, FPURegister fj)
void alsl_d(Register rd, Register rj, Register rk, int32_t sa2)
void movgr2fr_d(FPURegister fd, Register rj)
void fcopysign_s(FPURegister fd, FPURegister fj, FPURegister fk)
Simd128Register Simd128Register ra
void movgr2fr_w(FPURegister fd, Register rj)
void ftintrz_l_s(FPURegister fd, FPURegister fj)
void bstrpick_d(Register rd, Register rj, int32_t msbd, int32_t lsbd)
void xori(Register rd, Register rj, int32_t ui12)
friend class UseScratchRegisterScope
void ffint_s_w(FPURegister fd, FPURegister fj)
void ftintrz_w_s(FPURegister fd, FPURegister fj)
void fcopysign_d(FPURegister fd, FPURegister fj, FPURegister fk)
void amswap_db_d(Register rd, Register rk, Register rj)
void dbar(int32_t hint)
void amswap_db_w(Register rd, Register rk, Register rj)
void ffint_s_l(FPURegister fd, FPURegister fj)
void add_d(Register rd, Register rj, Register rk)
void addi_d(Register rd, Register rj, int32_t si12)
void sltui(Register rd, Register rj, int32_t si12)
void ffint_d_l(FPURegister fd, FPURegister fj)
void ffint_d_w(FPURegister fd, FPURegister fj)
void ftintrz_l_d(FPURegister fd, FPURegister fj)
void addi_w(Register rd, Register rj, int32_t si12)
void ext_w_h(Register rd, Register rj)
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
static constexpr int kFixedFrameSizeAboveFp
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
void Ld_b(Register rd, const MemOperand &rj)
void LoadStackLimit(Register destination, StackLimitKind kind)
void Call(Register target, Condition cond=al)
void St_w(Register rd, const MemOperand &rj)
void Drop(int count, Condition cond=al)
void Fst_d(FPURegister fj, const MemOperand &dst)
void mov(Register rd, Register rj)
void SmiUntag(Register reg, SBit s=LeaveCC)
void Neg_s(FPURegister fd, FPURegister fj)
void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2, CFRegister cd=FCC0)
void Sc_d(Register rd, const MemOperand &rj)
void CompareF32(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void BranchFalseF(Label *target, CFRegister cc=FCC0)
void LoadZeroIfNotFPUCondition(Register dest, CFRegister=FCC0)
void Move(Register dst, Tagged< Smi > smi)
void Clz_d(Register rd, Register rj)
void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void LoadTrustedPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void Movz(Register rd, Register rj, Register rk)
void JumpIfSmi(Register value, Label *smi_label)
void Ld_d(Register rd, const MemOperand &rj)
void Ftintrz_ul_s(FPURegister fd, FPURegister fj, FPURegister scratch, Register result=no_reg)
void BranchShort(Label *label, Condition cond, Register r1, const Operand &r2, bool need_link=false)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void Ll_d(Register rd, const MemOperand &rj)
void Ld_bu(Register rd, const MemOperand &rj)
void BranchFalseShortF(Label *target, CFRegister cc=FCC0)
void Ftintrz_ul_d(FPURegister fd, FPURegister fj, FPURegister scratch, Register result=no_reg)
void Movn(Register rd, Register rj, Register rk)
void Fld_d(FPURegister fd, const MemOperand &src)
void Ll_w(Register rd, const MemOperand &rj)
void CompareTaggedAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
void SmiTag(Register reg, SBit s=LeaveCC)
void Ld_h(Register rd, const MemOperand &rj)
void Ctz_w(Register rd, Register rj)
void Ffint_s_uw(FPURegister fd, FPURegister fj)
void Clz_w(Register rd, Register rj)
void CompareWord(Condition cond, Register dst, Register lhs, const Operand &rhs)
void Fst_s(FPURegister fj, const MemOperand &dst)
void Trunc_d(FPURegister fd, FPURegister fj)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void St_d(Register rd, const MemOperand &rj)
void Ffint_d_uw(FPURegister fd, FPURegister fj)
void Trunc_s(FPURegister fd, FPURegister fj)
void Sc_w(Register rd, const MemOperand &rj)
void FmoveLow(Register dst_low, FPURegister src)
void Jump(Register target, Condition cond=al)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void LoadProtectedPointerField(Register destination, MemOperand field_operand)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void Neg_d(FPURegister fd, FPURegister fk)
void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void AllocateStackSpace(Register bytes)
void Ctz_d(Register rd, Register rj)
void Popcnt_d(Register rd, Register rj)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Branch(Label *label, bool need_link=false)
void Ld_hu(Register rd, const MemOperand &rj)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void Ftintrz_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch)
void LoadZeroIfFPUCondition(Register dest, CFRegister=FCC0)
void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2, CFRegister cd=FCC0)
void BranchTrueShortF(Label *target, CFRegister cc=FCC0)
void Popcnt_w(Register rd, Register rj)
void Fld_s(FPURegister fd, const MemOperand &src)
void St_b(Register rd, const MemOperand &rj)
void St_h(Register rd, const MemOperand &rj)
void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void Ftintrz_uw_d(FPURegister fd, FPURegister fj, FPURegister scratch)
void Ld_wu(Register rd, const MemOperand &rj)
void Ld_w(Register rd, const MemOperand &rj)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
constexpr bool has(RegisterT reg) const
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static constexpr int kFrameTypeOffset
void emit_i8x16_swizzle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void emit_store_nonzero_if_nan(Register dst, DoubleRegister src, ValueKind kind)
bool emit_f32x4_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i8x16_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f64x2_u_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_relaxed_q15mulr_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i8x16_extract_lane_s(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_trunc(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i32_rems(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_i32_clz(Register dst, Register src)
void emit_i8x16_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i32_eqz(Register dst, Register src)
void emit_i32x4_uconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i8x16_uconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void FillI64Half(Register, int offset, RegPairHalf)
bool emit_f16x8_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_demote_f64x2_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallCWithStackBuffer(const std::initializer_list< VarState > args, const LiftoffRegister *rets, ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes, ExternalReference ext_ref)
void emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32x4_dot_i16x8_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_muli(LiftoffRegister dst, LiftoffRegister lhs, int32_t imm)
void emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void AtomicSub(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void LoadTransform(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LoadTransformationKind transform, uint32_t *protected_load_pc, bool i64_offset)
void emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_store_nonzero_if_nan(Register dst, LiftoffRegister src, Register tmp_gp, LiftoffRegister tmp_s128, ValueKind lane_kind)
void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_i16x8_sconvert_f16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicCompareExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister value, StoreType type, bool i64_offset)
void emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_not(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_signextend_i16(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister acc)
bool emit_f16x8_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_f64_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i64x2_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_sconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_abs(LiftoffRegister dst, LiftoffRegister src)
void Fill(LiftoffRegister, int offset, ValueKind)
void emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void LoadFullPointer(Register dst, Register src_addr, int32_t offset_imm)
void emit_i8x16_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_abs(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
bool emit_f32x4_promote_low_f16x8(LiftoffRegister dst, LiftoffRegister src)
void Store(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, bool is_store_mem=false, bool i64_offset=false)
bool emit_f16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint32_t *protected_load_pc=nullptr, bool is_load_mem=false, bool i64_offset=false, bool needs_shift=false)
void emit_i64x2_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_f16x8_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_relaxed_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i32_signextend_i16(Register dst, Register src)
void emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_divs(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_f32_neg(DoubleRegister dst, DoubleRegister src)
void emit_f64x2_promote_low_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm)
void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint8_t lane, uint32_t *protected_load_pc, bool i64_offset)
void emit_i16x8_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extract_lane_s(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f64x2_trunc(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_rounding_average_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_relaxed_swizzle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_abs(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void CallFrameSetupStub(int declared_function_index)
void emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_remu(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
bool emit_f64x2_ceil(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadSpillAddress(Register dst, int offset, ValueKind kind)
void emit_f32x4_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f64_neg(DoubleRegister dst, DoubleRegister src)
void emit_i64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32_ctz(Register dst, Register src)
void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind, Register frame_pointer)
void emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void AtomicExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i32x4_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask)
bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void emit_i8x16_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
bool emit_f16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst, LiftoffRegister src)
void CallC(const std::initializer_list< VarState > args, ExternalReference ext_ref)
void emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_signextend_i8(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_bitmask(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero)
bool emit_f16x8_demote_f32x4_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_f64x2_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void DropStackSlotsAndRet(uint32_t num_stack_slots)
void emit_f32x4_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_and_not(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_relaxed_laneselect(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask, int lane_width)
void emit_i32x4_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_f64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32x4_sconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void LoadConstant(LiftoffRegister, WasmValue)
void emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f32x4_trunc(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void PrepareTailCall(int num_callee_stack_params, int stack_param_delta)
void emit_f32x4_relaxed_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_ceil(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_divu(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero)
void emit_cond_jump(Condition, Label *, ValueKind value, Register lhs, Register rhs, const FreezeCacheState &frozen)
void LoadFromInstance(Register dst, Register instance, int offset, int size)
void emit_i8x16_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_smi_check(Register obj, Label *target, SmiCheckMode mode, const FreezeCacheState &frozen)
void emit_f64_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void LoadProtectedPointer(Register dst, Register src_addr, int32_t offset)
void emit_i16x8_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
bool emit_f32x4_ceil(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src)
void emit_u32_to_uintptr(Register dst, Register src)
void emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_mul(Register dst, Register lhs, Register rhs)
void emit_i8x16_extract_lane_u(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f16x8_uconvert_i16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_demote_f64x2_zero(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f32x4_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i64_set_cond(Condition condition, Register dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f64x2_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_i32_popcnt(Register dst, Register src)
void emit_i16x8_rounding_average_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueKind)
void emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind)
void emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_popcnt(LiftoffRegister dst, LiftoffRegister src)
void AtomicStore(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, bool i64_offset)
bool emit_f16x8_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void PatchPrepareStackFrame(int offset, SafepointTableBuilder *, bool feedback_vector_slot, size_t stack_param_slots)
void emit_f32x4_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_ptrsize_cond_jumpi(Condition, Label *, Register lhs, int32_t imm, const FreezeCacheState &frozen)
void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind)
void emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_set_cond(Condition, Register dst, Register lhs, Register rhs)
void emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst, LiftoffRegister src)
void TailCallIndirect(compiler::CallDescriptor *call_descriptor, Register target)
void emit_i16x8_q15mulr_sat_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i16x8_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f32x4_s(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_sqrt(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AllocateStackSlot(Register addr, uint32_t size)
void emit_i32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadTaggedPointer(Register dst, Register src_addr, Register offset_reg, int32_t offset_imm, uint32_t *protected_load_pc=nullptr, bool offset_reg_needs_shift=false)
void emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_cond_jumpi(Condition, Label *, Register lhs, int imm, const FreezeCacheState &frozen)
void emit_i64_eqz(Register dst, LiftoffRegister src)
void StoreTaggedPointer(Register dst_addr, Register offset_reg, int32_t offset_imm, Register src, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, SkipWriteBarrier=kNoSkipWriteBarrier)
void emit_i64_clz(LiftoffRegister dst, LiftoffRegister src)
void bailout(LiftoffBailoutReason reason, const char *detail)
void emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void IncrementSmi(LiftoffRegister dst, int offset)
LiftoffRegister GetUnusedRegister(RegClass rc, std::initializer_list< LiftoffRegister > try_first, LiftoffRegList pinned)
void emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_sconvert_i16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_sqrt(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, const uint8_t shuffle[16], bool is_swizzle)
void emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_select(LiftoffRegister dst, Register condition, LiftoffRegister true_value, LiftoffRegister false_value, ValueKind kind)
bool emit_i16x8_uconvert_f16x8(LiftoffRegister dst, LiftoffRegister src)
bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst, LiftoffRegister src, Label *trap=nullptr)
void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src)
void AtomicLoad(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, bool i64_offset)
void emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind)
void emit_i32x4_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f32x4_u(LiftoffRegister dst, LiftoffRegister src)
void LoadTaggedPointerFromInstance(Register dst, Register instance, int offset)
void emit_v128_anytrue(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_const(LiftoffRegister dst, const uint8_t imms[16])
void StoreLane(Register dst, Register offset, uintptr_t offset_imm, LiftoffRegister src, StoreType type, uint8_t lane, uint32_t *protected_store_pc, bool i64_offset)
void CheckTierUp(int declared_func_index, int budget_used, Label *ool_label, const FreezeCacheState &frozen)
void emit_i16x8_extract_lane_u(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f64x2_relaxed_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void CallIndirect(const ValueKindSig *sig, compiler::CallDescriptor *call_descriptor, Register target)
void RecordSpillsInSafepoint(SafepointTableBuilder::Safepoint &safepoint, LiftoffRegList all_spills, LiftoffRegList ref_spills, int spill_offset)
void emit_f64x2_relaxed_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_sqrt(LiftoffRegister dst, LiftoffRegister src)
void LoadTrustedPointer(Register dst, Register src_addr, int offset, IndirectPointerTag tag)
void emit_i32x4_relaxed_trunc_f64x2_s_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_signextend_i8(Register dst, Register src)
void emit_i8x16_bitmask(LiftoffRegister dst, LiftoffRegister src)
constexpr unsigned GetNumRegsSet() const
constexpr DoubleRegister fp() const
base::SmallVector< Slot, 8 > slots_
static int SlotSizeInBytes(const Slot &slot)
static constexpr int ToTagged(int offset)
#define ATOMIC_BINOP_CASE(op, inst)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, cmp_reg)
#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr)
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( load_linked, store_conditional, sign_extend, size, representation)
#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, size, bin_instr, representation)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( load_linked, store_conditional, sign_extend, size, representation)
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
int start
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
double remainder
ZoneVector< RpoNumber > & result
#define FP_BINOP(name, instruction)
#define FP_UNOP_RETURN_TRUE(name, instruction)
#define SIMD_BINOP(name1, name2)
#define I64_BINOP(name, instruction)
#define I64_BINOP_I(name, instruction)
#define I32_SHIFTOP_I(name, instruction, instruction1)
#define FP_UNOP(name, instruction)
#define I64_SHIFTOP_I(name, instruction, instructioni)
#define I32_BINOP(name, instruction)
#define I32_BINOP_I(name, instruction)
LiftoffRegister reg
MovableLabel continuation
LiftoffRegList regs_to_save
std::optional< OolTrapLabel > trap
uint32_t const mask
int int32_t
Definition unicode.cc:40
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
constexpr int WhichPowerOfTwo(T value)
Definition bits.h:195
void Store(LiftoffAssembler *assm, LiftoffRegister src, MemOperand dst, ValueKind kind)
void StoreToMemory(LiftoffAssembler *assm, MemOperand dst, const LiftoffAssembler::VarState &src)
constexpr DoubleRegister kScratchDoubleReg
void Load(LiftoffAssembler *assm, LiftoffRegister dst, MemOperand src, ValueKind kind)
void push(LiftoffAssembler *assm, LiftoffRegister reg, ValueKind kind, int padding=0)
constexpr DoubleRegister kScratchDoubleReg2
MemOperand GetMemOp(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr, Register offset, int32_t offset_imm, unsigned shift_amount=0)
FPUCondition ConditionToConditionCmpFPU(Condition condition, bool *predicate)
constexpr DoubleRegister kFpReturnRegisters[]
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs
constexpr Register kGpReturnRegisters[]
int declared_function_index(const WasmModule *module, int func_index)
constexpr int value_kind_size(ValueKind kind)
static constexpr LiftoffRegList kGpCacheRegList
static constexpr LiftoffRegList kFpCacheRegList
constexpr bool is_reference(ValueKind kind)
LiftoffAssembler::ValueKindSig ValueKindSig
constexpr Register no_reg
constexpr int kSimd128Size
Definition globals.h:706
DwVfpRegister DoubleRegister
constexpr DoubleRegister kScratchDoubleReg
kWasmInternalFunctionIndirectPointerTag instance_data
constexpr DoubleRegister kScratchDoubleReg2
constexpr Register kScratchReg2
constexpr Register kScratchReg
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Register kReturnRegister0
constexpr bool SmiValuesAre31Bits()
constexpr int kInt32Size
Definition globals.h:401
V8_EXPORT_PRIVATE FlagValues v8_flags
const intptr_t kSmiTagMask
Definition v8-internal.h:88
return value
Definition map-inl.h:893
constexpr uint8_t kInstrSize
constexpr Register kCArgRegs[]
std::unique_ptr< AssemblerBuffer > ExternalAssemblerBuffer(void *start, int size)
Definition assembler.cc:161
#define shr(value, bits)
Definition sha-256.cc:31
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define arraysize(array)
Definition macros.h:67
#define V8_LIKELY(condition)
Definition v8config.h:661