v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
liftoff-assembler-riscv-inl.h
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_WASM_BASELINE_RISCV_LIFTOFF_ASSEMBLER_RISCV_INL_H_
6#define V8_WASM_BASELINE_RISCV_LIFTOFF_ASSEMBLER_RISCV_INL_H_
7
16
17namespace v8::internal::wasm {
18
19namespace liftoff {
20
21inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
22
25}
26
27} // namespace liftoff
29 int offset = pc_offset();
30 // When the frame size is bigger than 4KB, we need two instructions for
31 // stack checking, so we reserve space for this case.
32 addi(sp, sp, 0);
33 nop();
34 nop();
35 return offset;
36}
37
38void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
39 int stack_param_delta) {
40 UseScratchRegisterScope temps(this);
41 Register scratch = temps.Acquire();
42
43 // Push the return address and frame pointer to complete the stack frame.
44 LoadWord(scratch, MemOperand(fp, kSystemPointerSize));
45 Push(scratch);
46 LoadWord(scratch, MemOperand(fp, 0));
47 Push(scratch);
48
49 // Shift the whole frame upwards.
50 int slot_count = num_callee_stack_params + 2;
51 for (int i = slot_count - 1; i >= 0; --i) {
52 LoadWord(scratch, MemOperand(sp, i * kSystemPointerSize));
53 StoreWord(scratch,
54 MemOperand(fp, (i - stack_param_delta) * kSystemPointerSize));
55 }
56
57 // Set the new stack and frame pointer.
58 AddWord(sp, fp, -stack_param_delta * kSystemPointerSize);
59 Pop(ra, fp);
60}
61
63
64void LiftoffAssembler::CheckTierUp(int declared_func_index, int budget_used,
65 Label* ool_label,
66 const FreezeCacheState& frozen) {
67 UseScratchRegisterScope temps(this);
68 Register budget_array = temps.Acquire();
70 if (instance_data == no_reg) {
71 instance_data = budget_array; // Reuse the scratch register.
73 }
74
75 constexpr int kArrayOffset = wasm::ObjectAccess::ToTagged(
76 WasmTrustedInstanceData::kTieringBudgetArrayOffset);
77 LoadWord(budget_array, MemOperand(instance_data, kArrayOffset));
78
79 int budget_arr_offset = kInt32Size * declared_func_index;
80 // Pick a random register from kLiftoffAssemblerGpCacheRegs.
81 // TODO(miladfarca): Use ScratchRegisterScope when available.
82 Register budget = kScratchReg;
83 MemOperand budget_addr(budget_array, budget_arr_offset);
84 Lw(budget, budget_addr);
85 Sub32(budget, budget, Operand{budget_used});
86 Sw(budget, budget_addr);
87 Branch(ool_label, lt, budget, Operand{0});
88}
89
91 if (!v8_flags.experimental_wasm_growable_stacks) {
92 return fp;
93 }
94 LiftoffRegister old_fp = GetUnusedRegister(RegClass::kGpReg, {});
95 Label done, call_runtime;
96 LoadWord(old_fp.gp(), MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
98 &call_runtime, eq, old_fp.gp(),
99 Operand(StackFrame::TypeToMarker(StackFrame::WASM_SEGMENT_START)));
100 mv(old_fp.gp(), fp);
101 jmp(&done);
102 bind(&call_runtime);
103 LiftoffRegList regs_to_save = cache_state()->used_registers;
107 CallCFunction(ExternalReference::wasm_load_old_fp(), 1);
108 if (old_fp.gp() != kReturnRegister0) {
109 mv(old_fp.gp(), kReturnRegister0);
110 }
112 bind(&done);
113 return old_fp.gp();
114}
115
117 Label done;
118 {
119 UseScratchRegisterScope temps{this};
120 Register scratch = temps.Acquire();
123 &done, ne, scratch,
124 Operand(StackFrame::TypeToMarker(StackFrame::WASM_SEGMENT_START)));
125 }
126 LiftoffRegList regs_to_save;
127 for (auto reg : kGpReturnRegisters) regs_to_save.set(reg);
128 for (auto reg : kFpReturnRegisters) regs_to_save.set(reg);
132 CallCFunction(ExternalReference::wasm_shrink_stack(), 1);
133 mv(fp, kReturnRegister0);
135 bind(&done);
136}
137
139 int offset, SafepointTableBuilder* safepoint_table_builder,
140 bool feedback_vector_slot, size_t stack_param_slots) {
141 // The frame_size includes the frame marker and the instance slot. Both are
142 // pushed as part of frame construction, so we don't need to allocate memory
143 // for them anymore.
144 int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
145 // The frame setup builtin also pushes the feedback vector.
146 if (feedback_vector_slot) {
147 frame_size -= kSystemPointerSize;
148 }
149 // We can't run out of space, just pass anything big enough to not cause the
150 // assembler to try to grow the buffer.
151 constexpr int kAvailableSpace = 256;
152 MacroAssembler patching_assembler(
153 zone(), AssemblerOptions{}, CodeObjectRequired::kNo,
154 ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
155
156 if (V8_LIKELY(frame_size < 4 * KB)) {
157 // This is the standard case for small frames: just subtract from SP and be
158 // done with it.
159 patching_assembler.AddWord(sp, sp, Operand(-frame_size));
160 return;
161 }
162
163 // The frame size is bigger than 4KB, so we might overflow the available stack
164 // space if we first allocate the frame and then do the stack check (we will
165 // need some remaining stack space for throwing the exception). That's why we
166 // check the available stack space before we allocate the frame. To do this we
167 // replace the {__ AddWord(sp, sp, -frame_size)} with a jump to OOL code that
168 // does this "extended stack check".
169 //
170 // The OOL code can simply be generated here with the normal assembler,
171 // because all other code generation, including OOL code, has already finished
172 // when {PatchPrepareStackFrame} is called. The function prologue then jumps
173 // to the current {pc_offset()} to execute the OOL code for allocating the
174 // large frame.
175 // Emit the unconditional branch in the function prologue (from {offset} to
176 // {pc_offset()}).
177
178 int imm32 = pc_offset() - offset;
179 patching_assembler.GenPCRelativeJump(kScratchReg, imm32);
180
181 // If the frame is bigger than the stack, we throw the stack overflow
182 // exception unconditionally. Thereby we can avoid the integer overflow
183 // check in the condition code.
184 RecordComment("OOL: stack check for large frame");
185 Label continuation;
186 if (frame_size < v8_flags.stack_size * 1024) {
187 Register stack_limit = kScratchReg;
189 AddWord(stack_limit, stack_limit, Operand(frame_size));
190 Branch(&continuation, uge, sp, Operand(stack_limit));
191 }
192
193 if (v8_flags.experimental_wasm_growable_stacks) {
194 LiftoffRegList regs_to_save;
196 regs_to_save.set(WasmHandleStackOverflowDescriptor::FrameBaseRegister());
197 for (auto reg : kGpParamRegisters) regs_to_save.set(reg);
198 for (auto reg : kFpParamRegisters) regs_to_save.set(reg);
201 AddWord(WasmHandleStackOverflowDescriptor::FrameBaseRegister(), fp,
202 Operand(stack_param_slots * kStackSlotSize +
204 CallBuiltin(Builtin::kWasmHandleStackOverflow);
205 safepoint_table_builder->DefineSafepoint(this);
207 } else {
208 Call(static_cast<Address>(Builtin::kWasmStackOverflow),
210 // The call will not return; just define an empty safepoint.
211 safepoint_table_builder->DefineSafepoint(this);
212 if (v8_flags.debug_code) stop();
213 }
214
216
217 // Now allocate the stack space. Note that this might do more than just
218 // decrementing the SP;
219 AddWord(sp, sp, Operand(-frame_size));
220
221 // Jump back to the start of the function, from {pc_offset()} to
222 // right after the reserved space for the {__ AddWord(sp, sp, -framesize)}
223 // (which is a Branch now).
224 int func_start_offset = offset + 2 * kInstrSize;
225 imm32 = func_start_offset - pc_offset();
227}
228
229void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
230 ValueKind /* kind */) {
231 SubWord(dst, fp, offset);
232}
233
235
237
238// static
241}
242
244 switch (kind) {
245 case kS128:
246 return value_kind_size(kind);
247 default:
248 return kStackSlotSize;
249 }
250}
251
253 switch (kind) {
254 case kS128:
255 return true;
256 default:
257 // No alignment because all other types are kStackSlotSize.
258 return false;
259 }
260}
261
263 LoadWord(dst, liftoff::GetInstanceDataOperand());
264}
265
266void LiftoffAssembler::LoadTrustedPointer(Register dst, Register src_addr,
267 int offset, IndirectPointerTag tag) {
268 MemOperand src{src_addr, offset};
269 LoadTrustedPointerField(dst, src, tag);
270}
271
272void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
273 int offset, int size) {
274 DCHECK_LE(0, offset);
275 MemOperand src{instance, offset};
276 switch (size) {
277 case 1:
278 Lb(dst, MemOperand(src));
279 break;
280 case 4:
281 Lw(dst, MemOperand(src));
282 break;
283 case 8:
284 LoadWord(dst, MemOperand(src));
285 break;
286 default:
288 }
289}
290
292 Register instance,
293 int offset) {
294 DCHECK_LE(0, offset);
295 LoadTaggedField(dst, MemOperand{instance, offset});
296}
297
298
299void LiftoffAssembler::SpillInstanceData(Register instance) {
300 StoreWord(instance, liftoff::GetInstanceDataOperand());
301}
302
304
306 MacroAssembler::Neg_s(dst, src);
307}
308
310 MacroAssembler::Neg_d(dst, src);
311}
312
314 DoubleRegister rhs) {
315 MacroAssembler::Float32Min(dst, lhs, rhs);
316}
317
319 DoubleRegister rhs) {
320 MacroAssembler::Float32Max(dst, lhs, rhs);
321}
322
324 DoubleRegister rhs) {
325 fsgnj_s(dst, lhs, rhs);
326}
327
329 DoubleRegister rhs) {
330 MacroAssembler::Float64Min(dst, lhs, rhs);
331}
332
334 DoubleRegister rhs) {
335 MacroAssembler::Float64Max(dst, lhs, rhs);
336}
337
339 DoubleRegister rhs) {
340 fsgnj_d(dst, lhs, rhs);
341}
342
343#define FP_BINOP(name, instruction) \
344 void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
345 DoubleRegister rhs) { \
346 instruction(dst, lhs, rhs); \
347 }
348#define FP_UNOP(name, instruction) \
349 void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
350 instruction(dst, src); \
351 }
352#define FP_UNOP_RETURN_TRUE(name, instruction) \
353 bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
354 instruction(dst, src, kScratchDoubleReg); \
355 return true; \
356 }
357
358FP_BINOP(f32_add, fadd_s)
359FP_BINOP(f32_sub, fsub_s)
360FP_BINOP(f32_mul, fmul_s)
361FP_BINOP(f32_div, fdiv_s)
362FP_UNOP(f32_abs, fabs_s)
363FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s_s)
364FP_UNOP_RETURN_TRUE(f32_floor, Floor_s_s)
365FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s_s)
366FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s_s)
367FP_UNOP(f32_sqrt, fsqrt_s)
368FP_BINOP(f64_add, fadd_d)
369FP_BINOP(f64_sub, fsub_d)
370FP_BINOP(f64_mul, fmul_d)
371FP_BINOP(f64_div, fdiv_d)
372FP_UNOP(f64_abs, fabs_d)
373FP_UNOP(f64_sqrt, fsqrt_d)
374#undef FP_BINOP
375#undef FP_UNOP
376#undef FP_UNOP_RETURN_TRUE
377
379 switch (condition) {
380 case kEqual:
381 return EQ;
382 case kNotEqual:
383 return NE;
385 return LT;
387 return GE;
389 return LE;
391 return GT;
392 default:
393 break;
394 }
395 UNREACHABLE();
396}
397
399 DoubleRegister lhs,
400 DoubleRegister rhs) {
402 MacroAssembler::CompareF32(dst, fcond, lhs, rhs);
403}
404
405void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
406 DoubleRegister lhs,
407 DoubleRegister rhs) {
409 MacroAssembler::CompareF64(dst, fcond, lhs, rhs);
410}
411
412bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
413 LiftoffRegister true_value,
414 LiftoffRegister false_value,
415 ValueKind kind) {
416 return false;
417}
418
419void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
420 SmiCheckMode mode,
421 const FreezeCacheState& frozen) {
422 UseScratchRegisterScope temps(this);
423 Register scratch = temps.Acquire();
424 And(scratch, obj, Operand(kSmiTagMask));
425 Condition condition = mode == kJumpOnSmi ? eq : ne;
426 Branch(target, condition, scratch, Operand(zero_reg));
427}
428
429// Implemente vector popcnt refer dense_popcnt
430// int dense_popcnt(uint32_t n)
431// {
432// int count = 32; // sizeof(uint32_t) * CHAR_BIT;
433// n ^= 0xFF'FF'FF'FF;
434// while(n)
435// {
436// --count;
437// n &= n - 1;
438// }
439// return count;
440// }
441void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
442 LiftoffRegister src) {
443 VRegister src_v = src.fp().toV();
444 VRegister dst_v = dst.fp().toV();
445 Label t, done;
446 VU.set(kScratchReg, E8, m1);
447 vmv_vv(kSimd128ScratchReg, src_v);
448 li(kScratchReg, 0xFF);
450 vmv_vi(dst_v, 8);
452 bind(&t);
453 vmsne_vi(v0, kSimd128ScratchReg, 0);
454 VU.set(kScratchReg, E16, m1);
455 vmv_xs(kScratchReg, v0);
456 beqz(kScratchReg, &done);
457 VU.set(kScratchReg, E8, m1);
458 vadd_vi(dst_v, dst_v, -1, MaskType::Mask);
462 Branch(&t);
463 bind(&done);
464}
465
466void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
467 LiftoffRegister lhs,
468 LiftoffRegister rhs,
469 const uint8_t shuffle[16],
470 bool is_swizzle) {
471 VRegister dst_v = dst.fp().toV();
472 VRegister lhs_v = lhs.fp().toV();
473 VRegister rhs_v = rhs.fp().toV();
474
476
477 VU.set(kScratchReg, E8, m1);
478 VRegister temp =
479 GetUnusedRegister(kFpReg, LiftoffRegList{lhs, rhs}).fp().toV();
480 if (dst_v == lhs_v) {
481 vmv_vv(temp, lhs_v);
482 lhs_v = temp;
483 } else if (dst_v == rhs_v) {
484 vmv_vv(temp, rhs_v);
485 rhs_v = temp;
486 }
487 vrgather_vv(dst_v, lhs_v, kSimd128ScratchReg2);
489 -16); // The indices in range [16, 31] select the i - 16-th element
490 // of rhs
491 vrgather_vv(kSimd128ScratchReg, rhs_v, kSimd128ScratchReg2);
492 vor_vv(dst_v, dst_v, kSimd128ScratchReg);
493}
494
495void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
496 LiftoffRegister lhs,
497 LiftoffRegister rhs) {
498 VU.set(kScratchReg, E8, m1);
499 if (dst == lhs || dst == rhs) {
500 vrgather_vv(kSimd128ScratchReg, lhs.fp().toV(), rhs.fp().toV());
501 vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
502 } else {
503 vrgather_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
504 }
505}
506
507void LiftoffAssembler::emit_i8x16_relaxed_swizzle(LiftoffRegister dst,
508 LiftoffRegister lhs,
509 LiftoffRegister rhs) {
510 emit_i8x16_swizzle(dst, lhs, rhs);
511}
512
514 LiftoffRegister src1,
515 LiftoffRegister src2,
516 LiftoffRegister mask,
517 int lane_width) {
518 // RISC-V uses bytewise selection for all lane widths.
519 emit_s128_select(dst, src1, src2, mask);
520}
521
522void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
523 LiftoffRegister src) {
524 VU.set(kScratchReg, E8, m1);
525 vmv_vx(dst.fp().toV(), src.gp());
526}
527
528void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
529 LiftoffRegister src) {
530 VU.set(kScratchReg, E16, m1);
531 vmv_vx(dst.fp().toV(), src.gp());
532}
533
534void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
535 LiftoffRegister src) {
536 VU.set(kScratchReg, E32, m1);
537 vmv_vx(dst.fp().toV(), src.gp());
538}
539
540void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
541 LiftoffRegister rhs) {
542 WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
543}
544
545void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
546 LiftoffRegister rhs) {
547 WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
548}
549
550void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
551 LiftoffRegister rhs) {
552 WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
553}
554
555void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
556 LiftoffRegister rhs) {
557 WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
558}
559
560void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
561 LiftoffRegister src) {
562 VU.set(kScratchReg, E32, m1);
563 vfmv_vf(dst.fp().toV(), src.fp());
564}
565
566void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
567 LiftoffRegister src) {
568 VU.set(kScratchReg, E64, m1);
569 vfmv_vf(dst.fp().toV(), src.fp());
570}
571
573 LiftoffRegister src1,
574 LiftoffRegister src2) {
575 VU.set(kScratchReg, E32, mf2);
576 VRegister dst_v = dst.fp().toV();
577 if (dst == src1 || dst == src2) {
578 dst_v = kSimd128ScratchReg3;
579 }
580 vwmul_vv(dst_v, src2.fp().toV(), src1.fp().toV());
581 if (dst == src1 || dst == src2) {
582 VU.set(kScratchReg, E64, m1);
583 vmv_vv(dst.fp().toV(), dst_v);
584 }
585}
586
588 LiftoffRegister src1,
589 LiftoffRegister src2) {
590 VU.set(kScratchReg, E32, mf2);
591 VRegister dst_v = dst.fp().toV();
592 if (dst == src1 || dst == src2) {
593 dst_v = kSimd128ScratchReg3;
594 }
595 vwmulu_vv(dst_v, src2.fp().toV(), src1.fp().toV());
596 if (dst == src1 || dst == src2) {
597 VU.set(kScratchReg, E64, m1);
598 vmv_vv(dst.fp().toV(), dst_v);
599 }
600}
601
603 LiftoffRegister src1,
604 LiftoffRegister src2) {
605 VU.set(kScratchReg, E32, m1);
606 vslidedown_vi(kSimd128ScratchReg, src1.fp().toV(), 2);
607 vslidedown_vi(kSimd128ScratchReg2, src2.fp().toV(), 2);
608 VU.set(kScratchReg, E32, mf2);
609 vwmul_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
610}
611
613 LiftoffRegister src1,
614 LiftoffRegister src2) {
615 VU.set(kScratchReg, E32, m1);
616 vslidedown_vi(kSimd128ScratchReg, src1.fp().toV(), 2);
617 vslidedown_vi(kSimd128ScratchReg2, src2.fp().toV(), 2);
618 VU.set(kScratchReg, E32, mf2);
619 vwmulu_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
620}
621
623 LiftoffRegister src1,
624 LiftoffRegister src2) {
625 VU.set(kScratchReg, E16, mf2);
626 VRegister dst_v = dst.fp().toV();
627 if (dst == src1 || dst == src2) {
628 dst_v = kSimd128ScratchReg3;
629 }
630 vwmul_vv(dst_v, src2.fp().toV(), src1.fp().toV());
631 if (dst == src1 || dst == src2) {
632 VU.set(kScratchReg, E16, m1);
633 vmv_vv(dst.fp().toV(), dst_v);
634 }
635}
636
638 LiftoffRegister src1,
639 LiftoffRegister src2) {
640 VU.set(kScratchReg, E16, mf2);
641 VRegister dst_v = dst.fp().toV();
642 if (dst == src1 || dst == src2) {
643 dst_v = kSimd128ScratchReg3;
644 }
645 vwmulu_vv(dst_v, src2.fp().toV(), src1.fp().toV());
646 if (dst == src1 || dst == src2) {
647 VU.set(kScratchReg, E16, m1);
648 vmv_vv(dst.fp().toV(), dst_v);
649 }
650}
651
653 LiftoffRegister src1,
654 LiftoffRegister src2) {
655 VU.set(kScratchReg, E16, m1);
656 vslidedown_vi(kSimd128ScratchReg, src1.fp().toV(), 4);
657 vslidedown_vi(kSimd128ScratchReg2, src2.fp().toV(), 4);
658 VU.set(kScratchReg, E16, mf2);
659 vwmul_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
660}
661
663 LiftoffRegister src1,
664 LiftoffRegister src2) {
665 VU.set(kScratchReg, E16, m1);
666 vslidedown_vi(kSimd128ScratchReg, src1.fp().toV(), 4);
667 vslidedown_vi(kSimd128ScratchReg2, src2.fp().toV(), 4);
668 VU.set(kScratchReg, E16, mf2);
669 vwmulu_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
670}
671
673 LiftoffRegister src1,
674 LiftoffRegister src2) {
675 VU.set(kScratchReg, E8, mf2);
676 VRegister dst_v = dst.fp().toV();
677 if (dst == src1 || dst == src2) {
678 dst_v = kSimd128ScratchReg3;
679 }
680 vwmul_vv(dst_v, src2.fp().toV(), src1.fp().toV());
681 if (dst == src1 || dst == src2) {
682 VU.set(kScratchReg, E8, m1);
683 vmv_vv(dst.fp().toV(), dst_v);
684 }
685}
686
688 LiftoffRegister src1,
689 LiftoffRegister src2) {
690 VU.set(kScratchReg, E8, mf2);
691 VRegister dst_v = dst.fp().toV();
692 if (dst == src1 || dst == src2) {
693 dst_v = kSimd128ScratchReg3;
694 }
695 vwmulu_vv(dst_v, src2.fp().toV(), src1.fp().toV());
696 if (dst == src1 || dst == src2) {
697 VU.set(kScratchReg, E8, m1);
698 vmv_vv(dst.fp().toV(), dst_v);
699 }
700}
701
703 LiftoffRegister src1,
704 LiftoffRegister src2) {
705 VU.set(kScratchReg, E8, m1);
706 vslidedown_vi(kSimd128ScratchReg, src1.fp().toV(), 8);
707 vslidedown_vi(kSimd128ScratchReg2, src2.fp().toV(), 8);
708 VU.set(kScratchReg, E8, mf2);
709 vwmul_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
710}
711
713 LiftoffRegister src1,
714 LiftoffRegister src2) {
715 VU.set(kScratchReg, E8, m1);
716 vslidedown_vi(kSimd128ScratchReg, src1.fp().toV(), 8);
717 vslidedown_vi(kSimd128ScratchReg2, src2.fp().toV(), 8);
718 VU.set(kScratchReg, E8, mf2);
719 vwmulu_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
720}
721
722#undef SIMD_BINOP
723
724void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
725 LiftoffRegister src1,
726 LiftoffRegister src2) {
727 VU.set(kScratchReg, E16, m1);
728 vsmul_vv(dst.fp().toV(), src1.fp().toV(), src2.fp().toV());
729}
730
732 LiftoffRegister src1,
733 LiftoffRegister src2) {
734 VU.set(kScratchReg, E16, m1);
735 vsmul_vv(dst.fp().toV(), src1.fp().toV(), src2.fp().toV());
736}
737
738void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
739 LiftoffRegister src) {
740 VU.set(kScratchReg, E64, m1);
741 vmv_vx(kSimd128RegZero, zero_reg);
742 vmv_vx(kSimd128ScratchReg, zero_reg);
743 vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
744 VU.set(kScratchReg, E32, m1);
745 vmv_xs(dst.gp(), kSimd128ScratchReg);
746}
747
749 LiftoffRegister src) {
750 VU.set(kScratchReg, E64, m1);
751 vmv_vv(kSimd128ScratchReg, src.fp().toV());
752 vsext_vf2(dst.fp().toV(), kSimd128ScratchReg);
753}
754
756 LiftoffRegister src) {
757 VU.set(kScratchReg, E32, m1);
758 vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), 2);
759 VU.set(kScratchReg, E64, m1);
760 vsext_vf2(dst.fp().toV(), kSimd128ScratchReg);
761}
762
764 LiftoffRegister src) {
765 VU.set(kScratchReg, E64, m1);
766 vmv_vv(kSimd128ScratchReg, src.fp().toV());
767 vzext_vf2(dst.fp().toV(), kSimd128ScratchReg);
768}
769
771 LiftoffRegister src) {
772 VU.set(kScratchReg, E32, m1);
773 vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), 2);
774 VU.set(kScratchReg, E64, m1);
775 vzext_vf2(dst.fp().toV(), kSimd128ScratchReg);
776}
777
778void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
779 LiftoffRegister rhs) {
780 WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
781}
782
783void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
784 LiftoffRegister rhs) {
785 WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
786}
787
788void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
789 LiftoffRegister rhs) {
790 WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
791}
792
793void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
794 LiftoffRegister rhs) {
795 WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
796}
797
798void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
799 LiftoffRegister rhs) {
800 WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
801}
802
803void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
804 LiftoffRegister rhs) {
805 WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
806}
807
808void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
809 LiftoffRegister rhs) {
810 WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
811}
812
813void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
814 LiftoffRegister rhs) {
815 WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
816}
817
818void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
819 LiftoffRegister rhs) {
820 WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
821}
822
823void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
824 LiftoffRegister rhs) {
825 WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
826}
827
828void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
829 LiftoffRegister rhs) {
830 WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
831}
832
833void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
834 LiftoffRegister rhs) {
835 WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
836}
837
838void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
839 LiftoffRegister rhs) {
840 WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
841}
842
843void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
844 LiftoffRegister rhs) {
845 WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
846}
847
848void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
849 LiftoffRegister rhs) {
850 WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
851}
852
853void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
854 LiftoffRegister rhs) {
855 WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
856}
857
858void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
859 LiftoffRegister rhs) {
860 WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
861}
862
863void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
864 LiftoffRegister rhs) {
865 WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
866}
867
868void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
869 LiftoffRegister rhs) {
870 VU.set(kScratchReg, E32, m1);
871 vmfeq_vv(v0, rhs.fp().toV(), lhs.fp().toV());
872 vmv_vx(dst.fp().toV(), zero_reg);
873 vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
874}
875
876void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
877 LiftoffRegister rhs) {
878 VU.set(kScratchReg, E32, m1);
879 vmfne_vv(v0, rhs.fp().toV(), lhs.fp().toV());
880 vmv_vx(dst.fp().toV(), zero_reg);
881 vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
882}
883
884void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
885 LiftoffRegister rhs) {
886 VU.set(kScratchReg, E32, m1);
887 vmflt_vv(v0, lhs.fp().toV(), rhs.fp().toV());
888 vmv_vx(dst.fp().toV(), zero_reg);
889 vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
890}
891
892void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
893 LiftoffRegister rhs) {
894 VU.set(kScratchReg, E32, m1);
895 vmfle_vv(v0, lhs.fp().toV(), rhs.fp().toV());
896 vmv_vx(dst.fp().toV(), zero_reg);
897 vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
898}
899
901 LiftoffRegister src) {
902 VU.set(kScratchReg, E32, mf2);
903 if (dst.fp().toV() != src.fp().toV()) {
904 vfwcvt_f_x_v(dst.fp().toV(), src.fp().toV());
905 } else {
906 vfwcvt_f_x_v(kSimd128ScratchReg3, src.fp().toV());
907 VU.set(kScratchReg, E64, m1);
908 vmv_vv(dst.fp().toV(), kSimd128ScratchReg3);
909 }
910}
911
913 LiftoffRegister src) {
914 VU.set(kScratchReg, E32, mf2);
915 if (dst.fp().toV() != src.fp().toV()) {
916 vfwcvt_f_xu_v(dst.fp().toV(), src.fp().toV());
917 } else {
918 vfwcvt_f_xu_v(kSimd128ScratchReg3, src.fp().toV());
919 VU.set(kScratchReg, E64, m1);
920 vmv_vv(dst.fp().toV(), kSimd128ScratchReg3);
921 }
922}
923
925 LiftoffRegister src) {
926 VU.set(kScratchReg, E32, mf2);
927 if (dst.fp().toV() != src.fp().toV()) {
928 vfwcvt_f_f_v(dst.fp().toV(), src.fp().toV());
929 } else {
930 vfwcvt_f_f_v(kSimd128ScratchReg3, src.fp().toV());
931 VU.set(kScratchReg, E64, m1);
932 vmv_vv(dst.fp().toV(), kSimd128ScratchReg3);
933 }
934}
935
937 LiftoffRegister src) {
938 VU.set(kScratchReg, E32, mf2);
939 vfncvt_f_f_w(dst.fp().toV(), src.fp().toV());
940 VU.set(kScratchReg, E32, m1);
941 vmv_vi(v0, 12);
942 vmerge_vx(dst.fp().toV(), zero_reg, dst.fp().toV());
943}
944
946 LiftoffRegister src) {
947 VU.set(kScratchReg, E64, m1);
948 vmv_vx(kSimd128ScratchReg, zero_reg);
949 vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
950 vmv_vv(kSimd128ScratchReg3, src.fp().toV());
951 VU.set(kScratchReg, E32, m1);
952 VU.set(FPURoundingMode::RTZ);
954 vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
955}
956
958 LiftoffRegister src) {
959 VU.set(kScratchReg, E64, m1);
960 vmv_vx(kSimd128ScratchReg, zero_reg);
961 vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
962 vmv_vv(kSimd128ScratchReg3, src.fp().toV());
963 VU.set(kScratchReg, E32, m1);
964 VU.set(FPURoundingMode::RTZ);
966 vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
967}
968
970 LiftoffRegister src) {
971 VU.set(FPURoundingMode::RTZ);
972 VU.set(kScratchReg, E32, m1);
973 vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
974 vmv_vv(kSimd128ScratchReg, src.fp().toV());
975 vmv_vx(dst.fp().toV(), zero_reg);
976 vfcvt_x_f_v(dst.fp().toV(), kSimd128ScratchReg, MaskType::Mask);
977}
979 LiftoffRegister src) {
980 VU.set(FPURoundingMode::RTZ);
981 VU.set(kScratchReg, E32, m1);
982 vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
983 li(kScratchReg, Operand(-1));
984 vmv_vv(kSimd128ScratchReg, src.fp().toV());
985 vmv_vx(dst.fp().toV(), kScratchReg);
986 vfcvt_xu_f_v(dst.fp().toV(), kSimd128ScratchReg, MaskType::Mask);
987}
988
990 LiftoffRegister dst, LiftoffRegister src) {
991 VU.set(kScratchReg, E64, m1);
992 vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
993
994 VU.set(kScratchReg, E32, m1);
995 VU.set(FPURoundingMode::RTZ);
996 vmv_vv(kSimd128ScratchReg, src.fp().toV());
997 vmv_vx(dst.fp().toV(), zero_reg);
998 vfncvt_x_f_w(dst.fp().toV(), kSimd128ScratchReg, MaskType::Mask);
999}
1000
1002 LiftoffRegister dst, LiftoffRegister src) {
1003 VU.set(kScratchReg, E64, m1);
1004 vmv_vv(kSimd128ScratchReg, v0);
1005 vmv_vv(kSimd128ScratchReg, src.fp().toV());
1006 vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
1007 VU.set(kScratchReg, E32, m1);
1008 VU.set(FPURoundingMode::RTZ);
1009 li(kScratchReg, Operand(-1));
1010 vmv_vv(kSimd128ScratchReg, src.fp().toV());
1011 vmv_vx(dst.fp().toV(), zero_reg);
1012 vmerge_vx(dst.fp().toV(), kScratchReg, dst.fp().toV());
1013 vfncvt_xu_f_w(dst.fp().toV(), kSimd128ScratchReg, MaskType::Mask);
1014}
1015
1016void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
1017 LiftoffRegister rhs) {
1018 VU.set(kScratchReg, E64, m1);
1019 vmfeq_vv(v0, rhs.fp().toV(), lhs.fp().toV());
1020 vmv_vx(dst.fp().toV(), zero_reg);
1021 vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
1022}
1023
1024void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
1025 LiftoffRegister rhs) {
1026 VU.set(kScratchReg, E64, m1);
1027 vmfne_vv(v0, rhs.fp().toV(), lhs.fp().toV());
1028 vmv_vx(dst.fp().toV(), zero_reg);
1029 vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
1030}
1031
1032void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
1033 LiftoffRegister rhs) {
1034 VU.set(kScratchReg, E64, m1);
1035 vmflt_vv(v0, lhs.fp().toV(), rhs.fp().toV());
1036 vmv_vx(dst.fp().toV(), zero_reg);
1037 vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
1038}
1039
1040void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
1041 LiftoffRegister rhs) {
1042 VU.set(kScratchReg, E64, m1);
1043 vmfle_vv(v0, lhs.fp().toV(), rhs.fp().toV());
1044 vmv_vx(dst.fp().toV(), zero_reg);
1045 vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
1046}
1047
1048void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
1049 const uint8_t imms[16]) {
1050 WasmRvvS128const(dst.fp().toV(), imms);
1051}
1052
1053void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
1054 VU.set(kScratchReg, E8, m1);
1055 vnot_vv(dst.fp().toV(), src.fp().toV());
1056}
1057
1058void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
1059 LiftoffRegister rhs) {
1060 VU.set(kScratchReg, E8, m1);
1061 vand_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1062}
1063
1064void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
1065 LiftoffRegister rhs) {
1066 VU.set(kScratchReg, E8, m1);
1067 vor_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1068}
1069
1070void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
1071 LiftoffRegister rhs) {
1072 VU.set(kScratchReg, E8, m1);
1073 vxor_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1074}
1075
1076void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
1077 LiftoffRegister lhs,
1078 LiftoffRegister rhs) {
1079 VU.set(kScratchReg, E8, m1);
1080 vnot_vv(kSimd128ScratchReg, rhs.fp().toV());
1081 vand_vv(dst.fp().toV(), lhs.fp().toV(), kSimd128ScratchReg);
1082}
1083
1084void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
1085 LiftoffRegister src1,
1086 LiftoffRegister src2,
1087 LiftoffRegister mask) {
1088 VU.set(kScratchReg, E8, m1);
1089 vand_vv(kSimd128ScratchReg, src1.fp().toV(), mask.fp().toV());
1090 vnot_vv(kSimd128ScratchReg2, mask.fp().toV());
1091 vand_vv(kSimd128ScratchReg2, src2.fp().toV(), kSimd128ScratchReg2);
1092 vor_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
1093}
1094
1095void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
1096 LiftoffRegister src) {
1097 VU.set(kScratchReg, E8, m1);
1098 vneg_vv(dst.fp().toV(), src.fp().toV());
1099}
1100
1101void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
1102 LiftoffRegister src) {
1103 VU.set(kScratchReg, E8, m1);
1104 Label t;
1105 vmv_sx(kSimd128ScratchReg, zero_reg);
1107 vmv_xs(dst.gp(), kSimd128ScratchReg);
1108 beq(dst.gp(), zero_reg, &t);
1109 li(dst.gp(), 1);
1110 bind(&t);
1111}
1112
1113void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
1114 LiftoffRegister src) {
1115 VU.set(kScratchReg, E8, m1);
1116 Label notalltrue;
1119 vmv_xs(dst.gp(), kSimd128ScratchReg);
1120 beqz(dst.gp(), &notalltrue);
1121 li(dst.gp(), 1);
1122 bind(&notalltrue);
1123}
1124
1125void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
1126 LiftoffRegister src) {
1127 VU.set(kScratchReg, E8, m1);
1128 vmv_vx(kSimd128RegZero, zero_reg);
1129 vmv_vx(kSimd128ScratchReg, zero_reg);
1130 vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
1131 VU.set(kScratchReg, E32, m1);
1132 vmv_xs(dst.gp(), kSimd128ScratchReg);
1133}
1134
1135void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
1136 LiftoffRegister rhs) {
1137 VU.set(kScratchReg, E8, m1);
1138 andi(rhs.gp(), rhs.gp(), 8 - 1);
1139 vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
1140}
1141
1142void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
1143 int32_t rhs) {
1144 VU.set(kScratchReg, E8, m1);
1145 vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 8);
1146}
1147
1148void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
1149 LiftoffRegister lhs,
1150 LiftoffRegister rhs) {
1151 VU.set(kScratchReg, E8, m1);
1152 andi(rhs.gp(), rhs.gp(), 8 - 1);
1153 vsra_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
1154}
1155
1156void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
1157 LiftoffRegister lhs, int32_t rhs) {
1158 VU.set(kScratchReg, E8, m1);
1159 vsra_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 8);
1160}
1161
1162void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
1163 LiftoffRegister lhs,
1164 LiftoffRegister rhs) {
1165 VU.set(kScratchReg, E8, m1);
1166 andi(rhs.gp(), rhs.gp(), 8 - 1);
1167 vsrl_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
1168}
1169
1170void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
1171 LiftoffRegister lhs, int32_t rhs) {
1172 VU.set(kScratchReg, E8, m1);
1173 vsrl_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 8);
1174}
1175
1176void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
1177 LiftoffRegister rhs) {
1178 VU.set(kScratchReg, E8, m1);
1179 vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1180}
1181
1182void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
1183 LiftoffRegister lhs,
1184 LiftoffRegister rhs) {
1185 VU.set(kScratchReg, E8, m1);
1186 vsadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1187}
1188
1189void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
1190 LiftoffRegister lhs,
1191 LiftoffRegister rhs) {
1192 VU.set(kScratchReg, E8, m1);
1193 vsaddu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1194}
1195
1196void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
1197 LiftoffRegister rhs) {
1198 VU.set(kScratchReg, E8, m1);
1199 vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1200}
1201
1202void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
1203 LiftoffRegister lhs,
1204 LiftoffRegister rhs) {
1205 VU.set(kScratchReg, E8, m1);
1206 vssub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1207}
1208
1209void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
1210 LiftoffRegister lhs,
1211 LiftoffRegister rhs) {
1212 VU.set(kScratchReg, E8, m1);
1213 vssubu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1214}
1215
1216void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
1217 LiftoffRegister lhs,
1218 LiftoffRegister rhs) {
1219 VU.set(kScratchReg, E8, m1);
1220 vmin_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1221}
1222
1223void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
1224 LiftoffRegister lhs,
1225 LiftoffRegister rhs) {
1226 VU.set(kScratchReg, E8, m1);
1227 vminu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1228}
1229
1230void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
1231 LiftoffRegister lhs,
1232 LiftoffRegister rhs) {
1233 VU.set(kScratchReg, E8, m1);
1234 vmax_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1235}
1236
1237void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
1238 LiftoffRegister lhs,
1239 LiftoffRegister rhs) {
1240 VU.set(kScratchReg, E8, m1);
1241 vmaxu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1242}
1243
1244void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
1245 LiftoffRegister src) {
1246 VU.set(kScratchReg, E16, m1);
1247 vneg_vv(dst.fp().toV(), src.fp().toV());
1248}
1249
1250void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
1251 LiftoffRegister src) {
1252 VU.set(kScratchReg, E16, m1);
1253 Label notalltrue;
1256 vmv_xs(dst.gp(), kSimd128ScratchReg);
1257 beqz(dst.gp(), &notalltrue);
1258 li(dst.gp(), 1);
1259 bind(&notalltrue);
1260}
1261
1262void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
1263 LiftoffRegister src) {
1264 VU.set(kScratchReg, E16, m1);
1265 vmv_vx(kSimd128RegZero, zero_reg);
1266 vmv_vx(kSimd128ScratchReg, zero_reg);
1267 vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
1268 VU.set(kScratchReg, E32, m1);
1269 vmv_xs(dst.gp(), kSimd128ScratchReg);
1270}
1271
1272void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
1273 LiftoffRegister rhs) {
1274 VU.set(kScratchReg, E16, m1);
1275 andi(rhs.gp(), rhs.gp(), 16 - 1);
1276 vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
1277}
1278
1279void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
1280 int32_t rhs) {
1281 VU.set(kScratchReg, E16, m1);
1282 vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 16);
1283}
1284
1285void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
1286 LiftoffRegister lhs,
1287 LiftoffRegister rhs) {
1288 VU.set(kScratchReg, E16, m1);
1289 andi(rhs.gp(), rhs.gp(), 16 - 1);
1290 vsra_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
1291}
1292
1293void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
1294 LiftoffRegister lhs, int32_t rhs) {
1295 VU.set(kScratchReg, E16, m1);
1296 vsra_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 16);
1297}
1298
1299void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
1300 LiftoffRegister lhs,
1301 LiftoffRegister rhs) {
1302 VU.set(kScratchReg, E16, m1);
1303 andi(rhs.gp(), rhs.gp(), 16 - 1);
1304 vsrl_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
1305}
1306
1307void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
1308 LiftoffRegister lhs, int32_t rhs) {
1309 VU.set(kScratchReg, E16, m1);
1310 vsrl_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 16);
1311}
1312
1313void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
1314 LiftoffRegister rhs) {
1315 VU.set(kScratchReg, E16, m1);
1316 vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1317}
1318
1319void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
1320 LiftoffRegister lhs,
1321 LiftoffRegister rhs) {
1322 VU.set(kScratchReg, E16, m1);
1323 vsadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1324}
1325
1326void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
1327 LiftoffRegister lhs,
1328 LiftoffRegister rhs) {
1329 VU.set(kScratchReg, E16, m1);
1330 vsaddu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1331}
1332
1333void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
1334 LiftoffRegister rhs) {
1335 VU.set(kScratchReg, E16, m1);
1336 vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1337}
1338
1339void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
1340 LiftoffRegister lhs,
1341 LiftoffRegister rhs) {
1342 VU.set(kScratchReg, E16, m1);
1343 vssub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1344}
1345
1346void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
1347 LiftoffRegister lhs,
1348 LiftoffRegister rhs) {
1349 VU.set(kScratchReg, E16, m1);
1350 vssubu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1351}
1352
1353void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
1354 LiftoffRegister rhs) {
1355 VU.set(kScratchReg, E16, m1);
1356 vmul_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1357}
1358
1359void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
1360 LiftoffRegister lhs,
1361 LiftoffRegister rhs) {
1362 VU.set(kScratchReg, E16, m1);
1363 vmin_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1364}
1365
1366void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
1367 LiftoffRegister lhs,
1368 LiftoffRegister rhs) {
1369 VU.set(kScratchReg, E16, m1);
1370 vminu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1371}
1372
1373void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
1374 LiftoffRegister lhs,
1375 LiftoffRegister rhs) {
1376 VU.set(kScratchReg, E16, m1);
1377 vmax_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1378}
1379
1380void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
1381 LiftoffRegister lhs,
1382 LiftoffRegister rhs) {
1383 VU.set(kScratchReg, E16, m1);
1384 vmaxu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1385}
1386
1387void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
1388 LiftoffRegister src) {
1389 VU.set(kScratchReg, E32, m1);
1390 vneg_vv(dst.fp().toV(), src.fp().toV());
1391}
1392
1393void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
1394 LiftoffRegister src) {
1395 VU.set(kScratchReg, E32, m1);
1396 Label notalltrue;
1399 vmv_xs(dst.gp(), kSimd128ScratchReg);
1400 beqz(dst.gp(), &notalltrue);
1401 li(dst.gp(), 1);
1402 bind(&notalltrue);
1403}
1404
1405void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
1406 LiftoffRegister src) {
1407 VU.set(kScratchReg, E32, m1);
1408 vmv_vx(kSimd128RegZero, zero_reg);
1409 vmv_vx(kSimd128ScratchReg, zero_reg);
1410 vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
1411 vmv_xs(dst.gp(), kSimd128ScratchReg);
1412}
1413
1414void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
1415 LiftoffRegister rhs) {
1416 VU.set(kScratchReg, E32, m1);
1417 andi(rhs.gp(), rhs.gp(), 32 - 1);
1418 vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
1419}
1420
1421void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
1422 int32_t rhs) {
1423 if (is_uint5(rhs % 32)) {
1424 vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 32);
1425 } else {
1426 li(kScratchReg, rhs % 32);
1427 vsll_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
1428 }
1429}
1430
1431void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
1432 LiftoffRegister lhs,
1433 LiftoffRegister rhs) {
1434 VU.set(kScratchReg, E32, m1);
1435 andi(rhs.gp(), rhs.gp(), 32 - 1);
1436 vsra_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
1437}
1438
1439void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
1440 LiftoffRegister lhs, int32_t rhs) {
1441 VU.set(kScratchReg, E32, m1);
1442 if (is_uint5(rhs % 32)) {
1443 vsra_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 32);
1444 } else {
1445 li(kScratchReg, rhs % 32);
1446 vsra_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
1447 }
1448}
1449
1450void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
1451 LiftoffRegister lhs,
1452 LiftoffRegister rhs) {
1453 VU.set(kScratchReg, E32, m1);
1454 andi(rhs.gp(), rhs.gp(), 32 - 1);
1455 vsrl_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
1456}
1457
1458void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
1459 LiftoffRegister lhs, int32_t rhs) {
1460 VU.set(kScratchReg, E32, m1);
1461 if (is_uint5(rhs % 32)) {
1462 vsrl_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 32);
1463 } else {
1464 li(kScratchReg, rhs % 32);
1465 vsrl_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
1466 }
1467}
1468
1469void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
1470 LiftoffRegister rhs) {
1471 VU.set(kScratchReg, E32, m1);
1472 vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1473}
1474
1475void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
1476 LiftoffRegister rhs) {
1477 VU.set(kScratchReg, E32, m1);
1478 vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1479}
1480
1481void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
1482 LiftoffRegister rhs) {
1483 VU.set(kScratchReg, E32, m1);
1484 vmul_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1485}
1486
1487void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
1488 LiftoffRegister lhs,
1489 LiftoffRegister rhs) {
1490 VU.set(kScratchReg, E32, m1);
1491 vmin_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1492}
1493
1494void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
1495 LiftoffRegister lhs,
1496 LiftoffRegister rhs) {
1497 VU.set(kScratchReg, E32, m1);
1498 vminu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1499}
1500
1501void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
1502 LiftoffRegister lhs,
1503 LiftoffRegister rhs) {
1504 VU.set(kScratchReg, E32, m1);
1505 vmax_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1506}
1507
1508void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
1509 LiftoffRegister lhs,
1510 LiftoffRegister rhs) {
1511 VU.set(kScratchReg, E32, m1);
1512 vmaxu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1513}
1514
1515void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
1516 LiftoffRegister lhs,
1517 LiftoffRegister rhs) {
1518 VU.set(kScratchReg, E16, m1);
1519 vwmul_vv(kSimd128ScratchReg3, lhs.fp().toV(), rhs.fp().toV());
1520 VU.set(kScratchReg, E32, m2);
1521 li(kScratchReg, 0b01010101);
1522 vmv_sx(v0, kScratchReg);
1523 vcompress_vv(kSimd128ScratchReg, kSimd128ScratchReg3, v0);
1524
1525 li(kScratchReg, 0b10101010);
1527 vcompress_vv(v0, kSimd128ScratchReg3, kSimd128ScratchReg2);
1528 VU.set(kScratchReg, E32, m1);
1529 vadd_vv(dst.fp().toV(), kSimd128ScratchReg, v0);
1530}
1531
1532void LiftoffAssembler::emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst,
1533 LiftoffRegister lhs,
1534 LiftoffRegister rhs) {
1535 VU.set(kScratchReg, E8, m1);
1536 vwmul_vv(kSimd128ScratchReg3, lhs.fp().toV(), rhs.fp().toV());
1537 VU.set(kScratchReg, E16, m2);
1538
1539 constexpr int32_t FIRST_INDEX = 0b0101010101010101;
1540 constexpr int32_t SECOND_INDEX = 0b1010101010101010;
1541 li(kScratchReg, FIRST_INDEX);
1542 vmv_sx(v0, kScratchReg);
1543 vcompress_vv(kSimd128ScratchReg, kSimd128ScratchReg3, v0);
1544
1545 li(kScratchReg, SECOND_INDEX);
1547 vcompress_vv(v0, kSimd128ScratchReg3, kSimd128ScratchReg2);
1548 VU.set(kScratchReg, E16, m1);
1549 vadd_vv(dst.fp().toV(), kSimd128ScratchReg, v0);
1550}
1551
1553 LiftoffRegister lhs,
1554 LiftoffRegister rhs,
1555 LiftoffRegister acc) {
1556 DCHECK_NE(dst, acc);
1557 VU.set(kScratchReg, E8, m1);
1558 VRegister intermediate = kSimd128ScratchReg3;
1559 VRegister kSimd128ScratchReg4 =
1560 GetUnusedRegister(LiftoffRegList{LiftoffRegister(ft10)}).fp().toV();
1561 vwmul_vv(intermediate, lhs.fp().toV(), rhs.fp().toV()); // i16*16 v8 v9
1562
1563 constexpr int32_t FIRST_INDEX = 0b0001000100010001;
1564 constexpr int32_t SECOND_INDEX = 0b0010001000100010;
1565 constexpr int32_t THIRD_INDEX = 0b0100010001000100;
1566 constexpr int32_t FOURTH_INDEX = 0b1000100010001000;
1567
1568 VU.set(kScratchReg, E16, m2);
1569 li(kScratchReg, FIRST_INDEX);
1570 vmv_sx(v0, kScratchReg);
1571 vcompress_vv(kSimd128ScratchReg, intermediate, v0); // i16*4 a
1572 li(kScratchReg, SECOND_INDEX);
1574 vcompress_vv(v0, intermediate, kSimd128ScratchReg2); // i16*4 b
1575
1576 VU.set(kScratchReg, E16, m1);
1577 vwadd_vv(kSimd128ScratchReg4, kSimd128ScratchReg, v0); // i32*4 c
1578
1579 VU.set(kScratchReg, E16, m2);
1580 li(kScratchReg, THIRD_INDEX);
1581 vmv_sx(v0, kScratchReg);
1582 vcompress_vv(kSimd128ScratchReg, intermediate, v0); // i16*4 a
1583
1584 li(kScratchReg, FOURTH_INDEX);
1586 vcompress_vv(v0, intermediate, kSimd128ScratchReg2); // i16*4 b
1587
1588 VU.set(kScratchReg, E16, m1);
1589 vwadd_vv(kSimd128ScratchReg3, kSimd128ScratchReg, v0); // i32*4 c
1590
1591 VU.set(kScratchReg, E32, m1);
1592 vadd_vv(dst.fp().toV(), kSimd128ScratchReg4, kSimd128ScratchReg3);
1593 vadd_vv(dst.fp().toV(), dst.fp().toV(), acc.fp().toV());
1594}
1595
1596void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
1597 LiftoffRegister src) {
1598 VU.set(kScratchReg, E64, m1);
1599 vneg_vv(dst.fp().toV(), src.fp().toV());
1600}
1601
1602void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
1603 LiftoffRegister src) {
1604 VU.set(kScratchReg, E64, m1);
1605 Label notalltrue;
1608 vmv_xs(dst.gp(), kSimd128ScratchReg);
1609 beqz(dst.gp(), &notalltrue);
1610 li(dst.gp(), 1);
1611 bind(&notalltrue);
1612}
1613
1614void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
1615 LiftoffRegister rhs) {
1616 VU.set(kScratchReg, E64, m1);
1617 andi(rhs.gp(), rhs.gp(), 64 - 1);
1618 vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
1619}
1620
1621void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
1622 int32_t rhs) {
1623 VU.set(kScratchReg, E64, m1);
1624 if (is_uint5(rhs % 64)) {
1625 vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 64);
1626 } else {
1627 li(kScratchReg, rhs % 64);
1628 vsll_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
1629 }
1630}
1631
1632void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
1633 LiftoffRegister lhs,
1634 LiftoffRegister rhs) {
1635 VU.set(kScratchReg, E64, m1);
1636 andi(rhs.gp(), rhs.gp(), 64 - 1);
1637 vsra_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
1638}
1639
1640void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
1641 LiftoffRegister lhs, int32_t rhs) {
1642 VU.set(kScratchReg, E64, m1);
1643 if (is_uint5(rhs % 64)) {
1644 vsra_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 64);
1645 } else {
1646 li(kScratchReg, rhs % 64);
1647 vsra_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
1648 }
1649}
1650
1651void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
1652 LiftoffRegister lhs,
1653 LiftoffRegister rhs) {
1654 VU.set(kScratchReg, E64, m1);
1655 andi(rhs.gp(), rhs.gp(), 64 - 1);
1656 vsrl_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
1657}
1658
1659void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
1660 LiftoffRegister lhs, int32_t rhs) {
1661 VU.set(kScratchReg, E64, m1);
1662 if (is_uint5(rhs % 64)) {
1663 vsrl_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 64);
1664 } else {
1665 li(kScratchReg, rhs % 64);
1666 vsrl_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
1667 }
1668}
1669
1670void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
1671 LiftoffRegister rhs) {
1672 VU.set(kScratchReg, E64, m1);
1673 vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1674}
1675
1676void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
1677 LiftoffRegister rhs) {
1678 VU.set(kScratchReg, E64, m1);
1679 vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1680}
1681
1682void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
1683 LiftoffRegister rhs) {
1684 VU.set(kScratchReg, E64, m1);
1685 vmul_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1686}
1687
1688void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
1689 LiftoffRegister src) {
1690 VU.set(kScratchReg, E32, m1);
1691 vfabs_vv(dst.fp().toV(), src.fp().toV());
1692}
1693
1694void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
1695 LiftoffRegister src) {
1696 VU.set(kScratchReg, E32, m1);
1697 vfneg_vv(dst.fp().toV(), src.fp().toV());
1698}
1699
1700void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
1701 LiftoffRegister src) {
1702 VU.set(kScratchReg, E32, m1);
1703 vfsqrt_v(dst.fp().toV(), src.fp().toV());
1704}
1705
1706bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
1707 LiftoffRegister src) {
1708 Ceil_f(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
1709 return true;
1710}
1711
1712bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
1713 LiftoffRegister src) {
1714 Floor_f(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
1715 return true;
1716}
1717
1718bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst,
1719 LiftoffRegister src) {
1720 Trunc_f(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
1721 return true;
1722}
1723
1724bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
1725 LiftoffRegister src) {
1726 Round_f(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
1727 return true;
1728}
1729
1730void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
1731 LiftoffRegister rhs) {
1732 VU.set(kScratchReg, E32, m1);
1733 vfadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1734}
1735
1736void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
1737 LiftoffRegister rhs) {
1738 VU.set(kScratchReg, E32, m1);
1739 vfsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1740}
1741
1742void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
1743 LiftoffRegister rhs) {
1744 VU.set(kScratchReg, E32, m1);
1745 VU.set(FPURoundingMode::RTZ);
1746 vfmul_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1747}
1748
1749void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
1750 LiftoffRegister rhs) {
1751 VU.set(kScratchReg, E32, m1);
1752 vfdiv_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1753}
1754
1755void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
1756 LiftoffRegister rhs) {
1757 const int32_t kNaN = 0x7FC00000;
1758 VU.set(kScratchReg, E32, m1);
1759 vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
1760 vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
1761 vand_vv(v0, v0, kSimd128ScratchReg);
1764 vfmin_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
1765 vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
1766}
1767
1768void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
1769 LiftoffRegister rhs) {
1770 const int32_t kNaN = 0x7FC00000;
1771 VU.set(kScratchReg, E32, m1);
1772 vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
1773 vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
1774 vand_vv(v0, v0, kSimd128ScratchReg);
1777 vfmax_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
1778 vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
1779}
1780
1781void LiftoffAssembler::emit_f32x4_relaxed_min(LiftoffRegister dst,
1782 LiftoffRegister lhs,
1783 LiftoffRegister rhs) {
1784 VU.set(kScratchReg, E32, m1);
1785 vfmin_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
1786}
1787
1788void LiftoffAssembler::emit_f32x4_relaxed_max(LiftoffRegister dst,
1789 LiftoffRegister lhs,
1790 LiftoffRegister rhs) {
1791 VU.set(kScratchReg, E32, m1);
1792 vfmax_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
1793}
1794
1795void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
1796 LiftoffRegister rhs) {
1797 VU.set(kScratchReg, E32, m1);
1798 // b < a ? b : a
1799 vmflt_vv(v0, rhs.fp().toV(), lhs.fp().toV());
1800 vmerge_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
1801}
1802
1803void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
1804 LiftoffRegister rhs) {
1805 VU.set(kScratchReg, E32, m1);
1806 // a < b ? b : a
1807 vmflt_vv(v0, lhs.fp().toV(), rhs.fp().toV());
1808 vmerge_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
1809}
1810
1811void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
1812 LiftoffRegister src) {
1813 VU.set(kScratchReg, E64, m1);
1814 vfabs_vv(dst.fp().toV(), src.fp().toV());
1815}
1816
1817void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
1818 LiftoffRegister src) {
1819 VU.set(kScratchReg, E64, m1);
1820 vfneg_vv(dst.fp().toV(), src.fp().toV());
1821}
1822
1823void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
1824 LiftoffRegister src) {
1825 VU.set(kScratchReg, E64, m1);
1826 vfsqrt_v(dst.fp().toV(), src.fp().toV());
1827}
1828
1829bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
1830 LiftoffRegister src) {
1831 Ceil_d(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
1832 return true;
1833}
1834
1835bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
1836 LiftoffRegister src) {
1837 Floor_d(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
1838 return true;
1839}
1840
1841bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst,
1842 LiftoffRegister src) {
1843 Trunc_d(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
1844 return true;
1845}
1846
1847bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
1848 LiftoffRegister src) {
1849 Round_d(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
1850 return true;
1851}
1852
1853void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
1854 LiftoffRegister rhs) {
1855 VU.set(kScratchReg, E64, m1);
1856 vfadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1857}
1858
1859void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
1860 LiftoffRegister rhs) {
1861 VU.set(kScratchReg, E64, m1);
1862 vfsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1863}
1864
1865void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
1866 LiftoffRegister rhs) {
1867 VU.set(kScratchReg, E64, m1);
1868 vfmul_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1869}
1870
1871void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
1872 LiftoffRegister rhs) {
1873 VU.set(kScratchReg, E64, m1);
1874 vfdiv_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
1875}
1876
1877void LiftoffAssembler::emit_f64x2_relaxed_min(LiftoffRegister dst,
1878 LiftoffRegister lhs,
1879 LiftoffRegister rhs) {
1880 VU.set(kScratchReg, E64, m1);
1881 vfmin_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
1882}
1883
1884void LiftoffAssembler::emit_f64x2_relaxed_max(LiftoffRegister dst,
1885 LiftoffRegister lhs,
1886 LiftoffRegister rhs) {
1887 VU.set(kScratchReg, E64, m1);
1888 vfmax_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
1889}
1890
1891void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
1892 LiftoffRegister rhs) {
1893 VU.set(kScratchReg, E64, m1);
1894 // b < a ? b : a
1895 vmflt_vv(v0, rhs.fp().toV(), lhs.fp().toV());
1896 vmerge_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
1897}
1898
1899void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
1900 LiftoffRegister rhs) {
1901 VU.set(kScratchReg, E64, m1);
1902 // a < b ? b : a
1903 vmflt_vv(v0, lhs.fp().toV(), rhs.fp().toV());
1904 vmerge_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
1905}
1906
1907void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
1908 LiftoffRegister src) {
1909 VU.set(kScratchReg, E32, m1);
1910 VU.set(FPURoundingMode::RTZ);
1911 vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
1912 vmv_vv(kSimd128ScratchReg, src.fp().toV());
1913 vmv_vx(dst.fp().toV(), zero_reg);
1914 vfcvt_x_f_v(dst.fp().toV(), kSimd128ScratchReg, Mask);
1915}
1916
1917void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
1918 LiftoffRegister src) {
1919 VU.set(kScratchReg, E32, m1);
1920 VU.set(FPURoundingMode::RTZ);
1921 vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
1922 vmv_vv(kSimd128ScratchReg, src.fp().toV());
1923 vmv_vx(dst.fp().toV(), zero_reg);
1924 vfcvt_xu_f_v(dst.fp().toV(), kSimd128ScratchReg, Mask);
1925}
1926
1927void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
1928 LiftoffRegister src) {
1929 VU.set(kScratchReg, E32, m1);
1930 VU.set(FPURoundingMode::RTZ);
1931 vfcvt_f_x_v(dst.fp().toV(), src.fp().toV());
1932}
1933
1934void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
1935 LiftoffRegister src) {
1936 VU.set(kScratchReg, E32, m1);
1937 VU.set(FPURoundingMode::RTZ);
1938 vfcvt_f_xu_v(dst.fp().toV(), src.fp().toV());
1939}
1940
1941void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
1942 LiftoffRegister lhs,
1943 LiftoffRegister rhs) {
1944 VU.set(kScratchReg, E16, m1);
1945 vmv_vv(kSimd128ScratchReg, lhs.fp().toV()); // kSimd128ScratchReg v24
1946 vmv_vv(v25, rhs.fp().toV());
1947 VU.set(kScratchReg, E8, m1);
1948 VU.set(FPURoundingMode::RNE);
1949 vnclip_vi(dst.fp().toV(), kSimd128ScratchReg, 0);
1950}
1951
1952void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
1953 LiftoffRegister lhs,
1954 LiftoffRegister rhs) {
1955 VU.set(kScratchReg, E16, m1);
1956 vmv_vv(kSimd128ScratchReg, lhs.fp().toV()); // kSimd128ScratchReg v24
1957 vmv_vv(v25, rhs.fp().toV());
1958 VU.set(kScratchReg, E16, m2);
1959 vmax_vx(kSimd128ScratchReg, kSimd128ScratchReg, zero_reg);
1960 VU.set(kScratchReg, E8, m1);
1961 VU.set(FPURoundingMode::RNE);
1962 vnclipu_vi(dst.fp().toV(), kSimd128ScratchReg, 0);
1963}
1964
1965void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
1966 LiftoffRegister lhs,
1967 LiftoffRegister rhs) {
1968 VU.set(kScratchReg, E32, m1);
1969 vmv_vv(kSimd128ScratchReg, lhs.fp().toV()); // kSimd128ScratchReg v24
1970 vmv_vv(v25, rhs.fp().toV());
1971 VU.set(kScratchReg, E16, m1);
1972 VU.set(FPURoundingMode::RNE);
1973 vnclip_vi(dst.fp().toV(), kSimd128ScratchReg, 0);
1974}
1975
1976void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
1977 LiftoffRegister lhs,
1978 LiftoffRegister rhs) {
1979 VU.set(kScratchReg, E32, m1);
1980 vmv_vv(kSimd128ScratchReg, lhs.fp().toV()); // kSimd128ScratchReg v24
1981 vmv_vv(v25, rhs.fp().toV());
1982 VU.set(kScratchReg, E32, m2);
1983 vmax_vx(kSimd128ScratchReg, kSimd128ScratchReg, zero_reg);
1984 VU.set(kScratchReg, E16, m1);
1985 VU.set(FPURoundingMode::RNE);
1986 vnclipu_vi(dst.fp().toV(), kSimd128ScratchReg, 0);
1987}
1988
1990 LiftoffRegister src) {
1991 VU.set(kScratchReg, E16, m1);
1992 vmv_vv(kSimd128ScratchReg, src.fp().toV());
1993 vsext_vf2(dst.fp().toV(), kSimd128ScratchReg);
1994}
1995
1997 LiftoffRegister src) {
1998 VU.set(kScratchReg, E8, m1);
1999 vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), 8);
2000 VU.set(kScratchReg, E16, m1);
2001 vsext_vf2(dst.fp().toV(), kSimd128ScratchReg);
2002}
2003
2005 LiftoffRegister src) {
2006 VU.set(kScratchReg, E16, m1);
2007 vmv_vv(kSimd128ScratchReg, src.fp().toV());
2008 vzext_vf2(dst.fp().toV(), kSimd128ScratchReg);
2009}
2010
2012 LiftoffRegister src) {
2013 VU.set(kScratchReg, E8, m1);
2014 vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), 8);
2015 VU.set(kScratchReg, E16, m1);
2016 vzext_vf2(dst.fp().toV(), kSimd128ScratchReg);
2017}
2018
2020 LiftoffRegister src) {
2021 VU.set(kScratchReg, E32, m1);
2022 vmv_vv(kSimd128ScratchReg, src.fp().toV());
2023 vsext_vf2(dst.fp().toV(), kSimd128ScratchReg);
2024}
2025
2027 LiftoffRegister src) {
2028 VU.set(kScratchReg, E16, m1);
2029 vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), 4);
2030 VU.set(kScratchReg, E32, m1);
2031 vsext_vf2(dst.fp().toV(), kSimd128ScratchReg);
2032}
2033
2035 LiftoffRegister src) {
2036 VU.set(kScratchReg, E32, m1);
2037 vmv_vv(kSimd128ScratchReg, src.fp().toV());
2038 vzext_vf2(dst.fp().toV(), kSimd128ScratchReg);
2039}
2040
2042 LiftoffRegister src) {
2043 VU.set(kScratchReg, E16, m1);
2044 vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), 4);
2045 VU.set(kScratchReg, E32, m1);
2046 vzext_vf2(dst.fp().toV(), kSimd128ScratchReg);
2047}
2048
2050 LiftoffRegister lhs,
2051 LiftoffRegister rhs) {
2052 VU.set(kScratchReg, E8, m1);
2053 vwaddu_vv(kSimd128ScratchReg, lhs.fp().toV(), rhs.fp().toV());
2054 li(kScratchReg, 1);
2056 li(kScratchReg, 2);
2057 VU.set(kScratchReg2, E16, m2);
2059 VU.set(kScratchReg2, E8, m1);
2060 vnclipu_vi(dst.fp().toV(), kSimd128ScratchReg3, 0);
2061}
2063 LiftoffRegister lhs,
2064 LiftoffRegister rhs) {
2065 VU.set(kScratchReg2, E16, m1);
2066 vwaddu_vv(kSimd128ScratchReg, lhs.fp().toV(), rhs.fp().toV());
2067 li(kScratchReg, 1);
2069 li(kScratchReg, 2);
2070 VU.set(kScratchReg2, E32, m2);
2072 VU.set(kScratchReg2, E16, m1);
2073 vnclipu_vi(dst.fp().toV(), kSimd128ScratchReg3, 0);
2074}
2075
2076void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
2077 LiftoffRegister src) {
2078 VU.set(kScratchReg, E8, m1);
2079 vmv_vx(kSimd128RegZero, zero_reg);
2080 vmv_vv(dst.fp().toV(), src.fp().toV());
2081 vmv_vv(v0, kSimd128RegZero);
2082 vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
2083 vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
2084}
2085
2086void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
2087 LiftoffRegister src) {
2088 VU.set(kScratchReg, E16, m1);
2089 vmv_vx(kSimd128RegZero, zero_reg);
2090 vmv_vv(dst.fp().toV(), src.fp().toV());
2091 vmv_vv(v0, kSimd128RegZero);
2092 vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
2093 vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
2094}
2095
2096void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
2097 LiftoffRegister src) {
2098 VU.set(kScratchReg, E64, m1);
2099 vmv_vx(kSimd128RegZero, zero_reg);
2100 vmv_vv(dst.fp().toV(), src.fp().toV());
2101 vmv_vv(v0, kSimd128RegZero);
2102 vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
2103 vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
2104}
2105
2106void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
2107 LiftoffRegister src) {
2108 VU.set(kScratchReg, E32, m1);
2109 vmv_vx(kSimd128RegZero, zero_reg);
2110 vmv_vv(dst.fp().toV(), src.fp().toV());
2111 vmv_vv(v0, kSimd128RegZero);
2112 vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
2113 vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
2114}
2115
2116void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
2117 LiftoffRegister lhs,
2118 uint8_t imm_lane_idx) {
2119 VU.set(kScratchReg, E8, m1);
2120 vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
2121 vmv_xs(dst.gp(), kSimd128ScratchReg);
2122 slli(dst.gp(), dst.gp(), sizeof(void*) * 8 - 8);
2123 srli(dst.gp(), dst.gp(), sizeof(void*) * 8 - 8);
2124}
2125
2126void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
2127 LiftoffRegister lhs,
2128 uint8_t imm_lane_idx) {
2129 VU.set(kScratchReg, E8, m1);
2130 vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
2131 vmv_xs(dst.gp(), kSimd128ScratchReg);
2132}
2133
2134void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
2135 LiftoffRegister lhs,
2136 uint8_t imm_lane_idx) {
2137 VU.set(kScratchReg, E16, m1);
2138 vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
2139 vmv_xs(dst.gp(), kSimd128ScratchReg);
2140 slli(dst.gp(), dst.gp(), sizeof(void*) * 8 - 16);
2141 srli(dst.gp(), dst.gp(), sizeof(void*) * 8 - 16);
2142}
2143
2144void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
2145 LiftoffRegister lhs,
2146 uint8_t imm_lane_idx) {
2147 VU.set(kScratchReg, E16, m1);
2148 vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
2149 vmv_xs(dst.gp(), kSimd128ScratchReg);
2150}
2151
2152void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
2153 LiftoffRegister lhs,
2154 uint8_t imm_lane_idx) {
2155 VU.set(kScratchReg, E32, m1);
2156 vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
2157 vmv_xs(dst.gp(), kSimd128ScratchReg);
2158}
2159
2160void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
2161 LiftoffRegister lhs,
2162 uint8_t imm_lane_idx) {
2163 VU.set(kScratchReg, E32, m1);
2164 vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
2165 vfmv_fs(dst.fp(), kSimd128ScratchReg);
2166}
2167
2168void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
2169 LiftoffRegister lhs,
2170 uint8_t imm_lane_idx) {
2171 VU.set(kScratchReg, E64, m1);
2172 vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
2173 vfmv_fs(dst.fp(), kSimd128ScratchReg);
2174}
2175
2176void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
2177 LiftoffRegister src1,
2178 LiftoffRegister src2,
2179 uint8_t imm_lane_idx) {
2180 VU.set(kScratchReg, E64, m1);
2181 li(kScratchReg, 0x1 << imm_lane_idx);
2182 vmv_sx(v0, kScratchReg);
2183 VU.set(kScratchReg, E8, m1);
2184 vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
2185}
2186
2187void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
2188 LiftoffRegister src1,
2189 LiftoffRegister src2,
2190 uint8_t imm_lane_idx) {
2191 VU.set(kScratchReg, E16, m1);
2192 li(kScratchReg, 0x1 << imm_lane_idx);
2193 vmv_sx(v0, kScratchReg);
2194 vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
2195}
2196
2197void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
2198 LiftoffRegister src1,
2199 LiftoffRegister src2,
2200 uint8_t imm_lane_idx) {
2201 VU.set(kScratchReg, E32, m1);
2202 li(kScratchReg, 0x1 << imm_lane_idx);
2203 vmv_sx(v0, kScratchReg);
2204 vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
2205}
2206
2207void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
2208 LiftoffRegister src1,
2209 LiftoffRegister src2,
2210 uint8_t imm_lane_idx) {
2211 VU.set(kScratchReg, E32, m1);
2212 li(kScratchReg, 0x1 << imm_lane_idx);
2213 vmv_sx(v0, kScratchReg);
2214 fmv_x_w(kScratchReg, src2.fp());
2215 vmerge_vx(dst.fp().toV(), kScratchReg, src1.fp().toV());
2216}
2217
2218void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
2219 LiftoffRegister src1,
2220 LiftoffRegister src2,
2221 uint8_t imm_lane_idx) {
2222 VU.set(kScratchReg, E64, m1);
2223 li(kScratchReg, 0x1 << imm_lane_idx);
2224 vmv_sx(v0, kScratchReg);
2225 vfmerge_vf(dst.fp().toV(), src2.fp(), src1.fp().toV());
2226}
2227
2229 LiftoffRegister src,
2230 Register tmp_gp,
2231 LiftoffRegister tmp_s128,
2232 ValueKind lane_kind) {
2233 ASM_CODE_COMMENT(this);
2234 if (lane_kind == kF32) {
2235 VU.set(kScratchReg, E32, m1);
2236 vmfeq_vv(kSimd128ScratchReg, src.fp().toV(),
2237 src.fp().toV()); // scratch <- !IsNan(tmp_fp)
2238 } else {
2239 VU.set(kScratchReg, E64, m1);
2240 DCHECK_EQ(lane_kind, kF64);
2241 vmfeq_vv(kSimd128ScratchReg, src.fp().toV(),
2242 src.fp().toV()); // scratch <- !IsNan(tmp_fp)
2243 }
2246 andi(kScratchReg, kScratchReg, int32_t(lane_kind == kF32 ? 0xF : 0x3));
2247 Sw(kScratchReg, MemOperand(dst));
2248}
2249
2250void LiftoffAssembler::emit_f32x4_qfma(LiftoffRegister dst,
2251 LiftoffRegister src1,
2252 LiftoffRegister src2,
2253 LiftoffRegister src3) {
2254 VU.set(kScratchReg, E32, m1);
2255 vmv_vv(kSimd128ScratchReg, src1.fp().toV());
2256 vfmadd_vv(kSimd128ScratchReg, src2.fp().toV(), src3.fp().toV());
2257 vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
2258}
2259
2260void LiftoffAssembler::emit_f32x4_qfms(LiftoffRegister dst,
2261 LiftoffRegister src1,
2262 LiftoffRegister src2,
2263 LiftoffRegister src3) {
2264 VU.set(kScratchReg, E32, m1);
2265 vmv_vv(kSimd128ScratchReg, src1.fp().toV());
2266 vfnmsub_vv(kSimd128ScratchReg, src2.fp().toV(), src3.fp().toV());
2267 vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
2268}
2269
2270void LiftoffAssembler::emit_f64x2_qfma(LiftoffRegister dst,
2271 LiftoffRegister src1,
2272 LiftoffRegister src2,
2273 LiftoffRegister src3) {
2274 VU.set(kScratchReg, E64, m1);
2275 vmv_vv(kSimd128ScratchReg, src1.fp().toV());
2276 vfmadd_vv(kSimd128ScratchReg, src2.fp().toV(), src3.fp().toV());
2277 vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
2278}
2279
2280void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst,
2281 LiftoffRegister src1,
2282 LiftoffRegister src2,
2283 LiftoffRegister src3) {
2284 VU.set(kScratchReg, E64, m1);
2285 vmv_vv(kSimd128ScratchReg, src1.fp().toV());
2286 vfnmsub_vv(kSimd128ScratchReg, src2.fp().toV(), src3.fp().toV());
2287 vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
2288}
2289
2290void LiftoffAssembler::StackCheck(Label* ool_code) {
2291 UseScratchRegisterScope temps(this);
2292 Register limit_address = temps.Acquire();
2294 MacroAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
2295}
2296
2298 if (v8_flags.debug_code) Abort(reason);
2299}
2300
2301void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
2302 LiftoffRegList gp_regs = regs & kGpCacheRegList;
2303 int32_t num_gp_regs = gp_regs.GetNumRegsSet();
2304 if (num_gp_regs) {
2305 int32_t offset = num_gp_regs * kSystemPointerSize;
2306 AddWord(sp, sp, Operand(-offset));
2307 while (!gp_regs.is_empty()) {
2308 LiftoffRegister reg = gp_regs.GetFirstRegSet();
2310 StoreWord(reg.gp(), MemOperand(sp, offset));
2311 gp_regs.clear(reg);
2312 }
2313 DCHECK_EQ(offset, 0);
2314 }
2315 LiftoffRegList fp_regs = regs & kFpCacheRegList;
2316 int32_t num_fp_regs = fp_regs.GetNumRegsSet();
2317 if (num_fp_regs) {
2318 AddWord(sp, sp, Operand(-(num_fp_regs * kStackSlotSize)));
2319 int32_t offset = 0;
2320 while (!fp_regs.is_empty()) {
2321 LiftoffRegister reg = fp_regs.GetFirstRegSet();
2323 fp_regs.clear(reg);
2324 offset += sizeof(double);
2325 }
2326 DCHECK_EQ(offset, num_fp_regs * sizeof(double));
2327 }
2328}
2329
2330void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
2331 LiftoffRegList fp_regs = regs & kFpCacheRegList;
2332 int32_t fp_offset = 0;
2333 while (!fp_regs.is_empty()) {
2334 LiftoffRegister reg = fp_regs.GetFirstRegSet();
2335 MacroAssembler::LoadDouble(reg.fp(), MemOperand(sp, fp_offset));
2336 fp_regs.clear(reg);
2337 fp_offset += sizeof(double);
2338 }
2339 if (fp_offset) AddWord(sp, sp, Operand(fp_offset));
2340 LiftoffRegList gp_regs = regs & kGpCacheRegList;
2341 int32_t gp_offset = 0;
2342 while (!gp_regs.is_empty()) {
2343 LiftoffRegister reg = gp_regs.GetLastRegSet();
2344 LoadWord(reg.gp(), MemOperand(sp, gp_offset));
2345 gp_regs.clear(reg);
2346 gp_offset += kSystemPointerSize;
2347 }
2348 AddWord(sp, sp, Operand(gp_offset));
2349}
2350
2352 SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
2353 LiftoffRegList ref_spills, int spill_offset) {
2354 LiftoffRegList fp_spills = all_spills & kFpCacheRegList;
2355 int spill_space_size = fp_spills.GetNumRegsSet() * kSimd128Size;
2356 LiftoffRegList gp_spills = all_spills & kGpCacheRegList;
2357 while (!gp_spills.is_empty()) {
2358 LiftoffRegister reg = gp_spills.GetFirstRegSet();
2359 if (ref_spills.has(reg)) {
2360 safepoint.DefineTaggedStackSlot(spill_offset);
2361 }
2362 gp_spills.clear(reg);
2363 ++spill_offset;
2364 spill_space_size += kSystemPointerSize;
2365 }
2366 // Record the number of additional spill slots.
2367 RecordOolSpillSpaceSize(spill_space_size);
2368}
2369
2370void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
2371 MacroAssembler::DropAndRet(static_cast<int>(num_stack_slots));
2372}
2373
2376}
2377
2380}
2381
2383 compiler::CallDescriptor* call_descriptor,
2384 Register target) {
2385 DCHECK(target.is_valid());
2386 CallWasmCodePointer(target, call_descriptor->signature_hash());
2387}
2388
2390 compiler::CallDescriptor* call_descriptor, Register target) {
2391 DCHECK(target.is_valid());
2392 CallWasmCodePointer(target, call_descriptor->signature_hash(),
2394}
2395
2397 // A direct call to a builtin. Just encode the builtin index. This will be
2398 // patched at relocation.
2399 Call(static_cast<Address>(builtin), RelocInfo::WASM_STUB_CALL);
2400}
2401
2402void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
2403 AddWord(sp, sp, Operand(-size));
2404 MacroAssembler::Move(addr, sp);
2405}
2406
2407void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
2408 AddWord(sp, sp, Operand(size));
2409}
2410
2412
2413void LiftoffAssembler::emit_store_nonzero(Register dst) {
2414 Sw(dst, MemOperand(dst));
2415}
2416
2417void LiftoffAssembler::emit_store_nonzero_if_nan(Register dst, FPURegister src,
2418 ValueKind kind) {
2419 UseScratchRegisterScope temps(this);
2420 Register scratch = temps.Acquire();
2421 li(scratch, 1);
2422 if (kind == kF32) {
2423 feq_s(scratch, src, src); // rd <- !isNan(src)
2424 } else {
2426 feq_d(scratch, src, src); // rd <- !isNan(src)
2427 }
2428 seqz(scratch, scratch);
2429 Sw(scratch, MemOperand(dst));
2430}
2431
2433// The standard library used by gcc tryjobs does not consider `std::find` to be
2434// `constexpr`, so wrap it in a `#ifdef __clang__` block.
2435#ifdef __clang__
2436 static_assert(std::find(std::begin(wasm::kGpParamRegisters),
2437 std::end(wasm::kGpParamRegisters),
2438 kLiftoffFrameSetupFunctionReg) ==
2439 std::end(wasm::kGpParamRegisters));
2440#endif
2441
2442 // On MIPS64, we must push at least {ra} before calling the stub, otherwise
2443 // it would get clobbered with no possibility to recover it. So just set
2444 // up the frame here.
2445 EnterFrame(StackFrame::WASM);
2446 LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
2447 WasmValue(declared_function_index));
2448 CallBuiltin(Builtin::kWasmLiftoffFrameSetup);
2449}
2450
2451bool LiftoffAssembler::emit_f16x8_splat(LiftoffRegister dst,
2452 LiftoffRegister src) {
2453 return false;
2454}
2455bool LiftoffAssembler::emit_f16x8_extract_lane(LiftoffRegister dst,
2456 LiftoffRegister lhs,
2457 uint8_t imm_lane_idx) {
2458 return false;
2459}
2460bool LiftoffAssembler::emit_f16x8_replace_lane(LiftoffRegister dst,
2461 LiftoffRegister src1,
2462 LiftoffRegister src2,
2463 uint8_t imm_lane_idx) {
2464 return false;
2465}
2466
2467bool LiftoffAssembler::emit_f16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
2468 LiftoffRegister rhs) {
2469 return false;
2470}
2471bool LiftoffAssembler::emit_f16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
2472 LiftoffRegister rhs) {
2473 return false;
2474}
2475bool LiftoffAssembler::emit_f16x8_lt(LiftoffRegister dst, LiftoffRegister lhs,
2476 LiftoffRegister rhs) {
2477 return false;
2478}
2479bool LiftoffAssembler::emit_f16x8_le(LiftoffRegister dst, LiftoffRegister lhs,
2480 LiftoffRegister rhs) {
2481 return false;
2482}
2483
2484bool LiftoffAssembler::emit_f16x8_abs(LiftoffRegister dst,
2485 LiftoffRegister src) {
2486 return false;
2487}
2488bool LiftoffAssembler::emit_f16x8_neg(LiftoffRegister dst,
2489 LiftoffRegister src) {
2490 return false;
2491}
2492bool LiftoffAssembler::emit_f16x8_sqrt(LiftoffRegister dst,
2493 LiftoffRegister src) {
2494 return false;
2495}
2496bool LiftoffAssembler::emit_f16x8_ceil(LiftoffRegister dst,
2497 LiftoffRegister src) {
2498 return false;
2499}
2500bool LiftoffAssembler::emit_f16x8_floor(LiftoffRegister dst,
2501 LiftoffRegister src) {
2502 return false;
2503}
2504bool LiftoffAssembler::emit_f16x8_trunc(LiftoffRegister dst,
2505 LiftoffRegister src) {
2506 return false;
2507}
2508bool LiftoffAssembler::emit_f16x8_nearest_int(LiftoffRegister dst,
2509 LiftoffRegister src) {
2510 return false;
2511}
2512
2513bool LiftoffAssembler::emit_f16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
2514 LiftoffRegister rhs) {
2515 return false;
2516}
2517bool LiftoffAssembler::emit_f16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
2518 LiftoffRegister rhs) {
2519 return false;
2520}
2521bool LiftoffAssembler::emit_f16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
2522 LiftoffRegister rhs) {
2523 return false;
2524}
2525bool LiftoffAssembler::emit_f16x8_div(LiftoffRegister dst, LiftoffRegister lhs,
2526 LiftoffRegister rhs) {
2527 return false;
2528}
2529bool LiftoffAssembler::emit_f16x8_min(LiftoffRegister dst, LiftoffRegister lhs,
2530 LiftoffRegister rhs) {
2531 return false;
2532}
2533bool LiftoffAssembler::emit_f16x8_max(LiftoffRegister dst, LiftoffRegister lhs,
2534 LiftoffRegister rhs) {
2535 return false;
2536}
2537bool LiftoffAssembler::emit_f16x8_pmin(LiftoffRegister dst, LiftoffRegister lhs,
2538 LiftoffRegister rhs) {
2539 return false;
2540}
2541bool LiftoffAssembler::emit_f16x8_pmax(LiftoffRegister dst, LiftoffRegister lhs,
2542 LiftoffRegister rhs) {
2543 return false;
2544}
2545
2546bool LiftoffAssembler::emit_i16x8_sconvert_f16x8(LiftoffRegister dst,
2547 LiftoffRegister src) {
2548 return false;
2549}
2550bool LiftoffAssembler::emit_i16x8_uconvert_f16x8(LiftoffRegister dst,
2551 LiftoffRegister src) {
2552 return false;
2553}
2554bool LiftoffAssembler::emit_f16x8_sconvert_i16x8(LiftoffRegister dst,
2555 LiftoffRegister src) {
2556 return false;
2557}
2558bool LiftoffAssembler::emit_f16x8_uconvert_i16x8(LiftoffRegister dst,
2559 LiftoffRegister src) {
2560 return false;
2561}
2562bool LiftoffAssembler::emit_f16x8_demote_f32x4_zero(LiftoffRegister dst,
2563 LiftoffRegister src) {
2564 return false;
2565}
2566bool LiftoffAssembler::emit_f16x8_demote_f64x2_zero(LiftoffRegister dst,
2567 LiftoffRegister src) {
2568 return false;
2569}
2570bool LiftoffAssembler::emit_f32x4_promote_low_f16x8(LiftoffRegister dst,
2571 LiftoffRegister src) {
2572 return false;
2573}
2574
2575bool LiftoffAssembler::emit_f16x8_qfma(LiftoffRegister dst,
2576 LiftoffRegister src1,
2577 LiftoffRegister src2,
2578 LiftoffRegister src3) {
2579 return false;
2580}
2581
2582bool LiftoffAssembler::emit_f16x8_qfms(LiftoffRegister dst,
2583 LiftoffRegister src1,
2584 LiftoffRegister src2,
2585 LiftoffRegister src3) {
2586 return false;
2587}
2588
2589} // namespace v8::internal::wasm
2590
2591#endif // V8_WASM_BASELINE_RISCV_LIFTOFF_ASSEMBLER_RISCV_INL_H_
Builtins::Kind kind
Definition builtins.cc:40
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
void feq_d(Register rd, FPURegister rs1, FPURegister rs2)
void fsgnj_d(FPURegister rd, FPURegister rs1, FPURegister rs2)
void feq_s(Register rd, FPURegister rs1, FPURegister rs2)
void fsgnj_s(FPURegister rd, FPURegister rs1, FPURegister rs2)
void fmv_x_w(Register rd, FPURegister rs1)
void mv(Register rd, Register rs)
void seqz(Register rd, Register rs)
void srli(Register rd, Register rs1, uint8_t shamt)
void slli(Register rd, Register rs1, uint8_t shamt)
void vmerge_vx(VRegister vd, Register rs1, VRegister vs2)
void vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2)
void vmv_vi(VRegister vd, uint8_t simm5)
void vmv_xs(Register rd, VRegister vs2)
void vfmerge_vf(VRegister vd, FPURegister fs1, VRegister vs2)
void vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1, MaskType mask=NoMask)
void vfabs_vv(VRegister dst, VRegister src, MaskType mask=NoMask)
void vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1, MaskType mask=NoMask)
void vfmv_vf(VRegister vd, FPURegister fs1)
void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2)
void vfneg_vv(VRegister dst, VRegister src, MaskType mask=NoMask)
void vnot_vv(VRegister dst, VRegister src, MaskType mask=NoMask)
void vfmv_fs(FPURegister fd, VRegister vs2)
void vwaddu_wx(VRegister vd, VRegister vs2, Register rs1, MaskType mask=NoMask)
void vmv_sx(VRegister vd, Register rs1)
void vmv_vx(VRegister vd, Register rs1)
void vneg_vv(VRegister dst, VRegister src, MaskType mask=NoMask)
void set(Register rd, VSew sew, Vlmul lmul)
void addi(Register dst, Register src, const Operand &imm)
void beqz(Register rj, int32_t offset)
Simd128Register Simd128Register ra
friend class UseScratchRegisterScope
void andi(Register rd, Register rj, int32_t ui12)
void not_(const VRegister &vd, const VRegister &vn)
void AbortedCodeGeneration() override
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
void beq(Register rj, Register rd, int32_t offset)
static constexpr int kFixedFrameSizeAboveFp
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
void LoadStackLimit(Register destination, StackLimitKind kind)
void Call(Register target, Condition cond=al)
void Round_d(FPURegister fd, FPURegister fj)
void Floor_d(FPURegister fd, FPURegister fj)
void Ceil_f(VRegister dst, VRegister src, Register scratch, VRegister v_scratch)
void Neg_s(FPURegister fd, FPURegister fj)
void Round_f(VRegister dst, VRegister src, Register scratch, VRegister v_scratch)
void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void Lb(Register rd, const MemOperand &rs)
void CompareF32(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void Move(Register dst, Tagged< Smi > smi)
void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void LoadTrustedPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void BranchShort(Label *label, Condition cond, Register r1, const Operand &r2, bool need_link=false)
void WasmRvvS128const(VRegister dst, const uint8_t imms[16])
void StoreDouble(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void Trunc_d(FPURegister fd, FPURegister fj)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void Sw(Register rd, const MemOperand &rs)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void Jump(Register target, Condition cond=al)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void Lw(Register rd, const MemOperand &rs)
void Neg_d(FPURegister fd, FPURegister fk)
void WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void Ceil_d(FPURegister fd, FPURegister fj)
void LoadDouble(FPURegister fd, const MemOperand &src, Trapper &&trapper=[](int){})
void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void Floor_f(VRegister dst, VRegister src, Register scratch, VRegister v_scratch)
void Branch(Label *label, bool need_link=false)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void Trunc_f(VRegister dst, VRegister src, Register scratch, VRegister v_scratch)
void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void GenPCRelativeJump(Register rd, int64_t offset)
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static constexpr int kFrameTypeOffset
void emit_i8x16_swizzle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void emit_store_nonzero_if_nan(Register dst, DoubleRegister src, ValueKind kind)
bool emit_f32x4_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f64x2_u_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_relaxed_q15mulr_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i8x16_extract_lane_s(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_trunc(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i8x16_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i32x4_uconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i8x16_uconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_f16x8_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_demote_f64x2_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32x4_dot_i16x8_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_i32x4_extmul_low_i16x8_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_store_nonzero_if_nan(Register dst, LiftoffRegister src, Register tmp_gp, LiftoffRegister tmp_s128, ValueKind lane_kind)
void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_i16x8_sconvert_f16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_not(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister acc)
bool emit_f16x8_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_f64_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i64x2_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_sconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i8x16_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_extmul_low_i32x4_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i16x8_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_abs(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
bool emit_f32x4_promote_low_f16x8(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_f16x8_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extmul_high_i32x4_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f32x4_relaxed_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32_neg(DoubleRegister dst, DoubleRegister src)
void emit_f64x2_promote_low_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extract_lane_s(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f64x2_trunc(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_rounding_average_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_relaxed_swizzle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_abs(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void CallFrameSetupStub(int declared_function_index)
void emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst, LiftoffRegister src)
bool emit_f64x2_ceil(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadSpillAddress(Register dst, int offset, ValueKind kind)
void emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f32x4_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f64_neg(DoubleRegister dst, DoubleRegister src)
void emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i32x4_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask)
void emit_i8x16_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
bool emit_f16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
bool emit_f16x8_demote_f32x4_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_f64x2_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void DropStackSlotsAndRet(uint32_t num_stack_slots)
void emit_f32x4_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_and_not(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_relaxed_laneselect(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask, int lane_width)
void emit_i32x4_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_f64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32x4_sconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void LoadConstant(LiftoffRegister, WasmValue)
void emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f32x4_trunc(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void PrepareTailCall(int num_callee_stack_params, int stack_param_delta)
void emit_f32x4_relaxed_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_ceil(LiftoffRegister dst, LiftoffRegister src)
void LoadFromInstance(Register dst, Register instance, int offset, int size)
void emit_i8x16_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_smi_check(Register obj, Label *target, SmiCheckMode mode, const FreezeCacheState &frozen)
void emit_f64_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i16x8_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
bool emit_f32x4_ceil(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_extract_lane_u(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f16x8_uconvert_i16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_demote_f64x2_zero(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f32x4_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_f64x2_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_rounding_average_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_popcnt(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void PatchPrepareStackFrame(int offset, SafepointTableBuilder *, bool feedback_vector_slot, size_t stack_param_slots)
void emit_f32x4_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst, LiftoffRegister src)
void TailCallIndirect(compiler::CallDescriptor *call_descriptor, Register target)
void emit_i16x8_q15mulr_sat_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i16x8_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f32x4_s(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_sqrt(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AllocateStackSlot(Register addr, uint32_t size)
void emit_i32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
LiftoffRegister GetUnusedRegister(RegClass rc, std::initializer_list< LiftoffRegister > try_first, LiftoffRegList pinned)
void emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_sconvert_i16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_sqrt(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, const uint8_t shuffle[16], bool is_swizzle)
void emit_i32x4_extmul_high_i16x8_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_select(LiftoffRegister dst, Register condition, LiftoffRegister true_value, LiftoffRegister false_value, ValueKind kind)
bool emit_i16x8_uconvert_f16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f32x4_u(LiftoffRegister dst, LiftoffRegister src)
void LoadTaggedPointerFromInstance(Register dst, Register instance, int offset)
void emit_v128_anytrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_const(LiftoffRegister dst, const uint8_t imms[16])
void CheckTierUp(int declared_func_index, int budget_used, Label *ool_label, const FreezeCacheState &frozen)
void emit_i16x8_extract_lane_u(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f64x2_relaxed_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void CallIndirect(const ValueKindSig *sig, compiler::CallDescriptor *call_descriptor, Register target)
void RecordSpillsInSafepoint(SafepointTableBuilder::Safepoint &safepoint, LiftoffRegList all_spills, LiftoffRegList ref_spills, int spill_offset)
void emit_f64x2_relaxed_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_sqrt(LiftoffRegister dst, LiftoffRegister src)
void LoadTrustedPointer(Register dst, Register src_addr, int offset, IndirectPointerTag tag)
void emit_i32x4_relaxed_trunc_f64x2_s_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_bitmask(LiftoffRegister dst, LiftoffRegister src)
constexpr unsigned GetNumRegsSet() const
static constexpr int ToTagged(int offset)
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
int32_t offset
#define FP_BINOP(name, instruction)
#define FP_UNOP_RETURN_TRUE(name, instruction)
#define FP_UNOP(name, instruction)
LiftoffRegister reg
MovableLabel continuation
LiftoffRegList regs_to_save
uint32_t const mask
int int32_t
Definition unicode.cc:40
constexpr DoubleRegister kFpReturnRegisters[]
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
constexpr Register kGpReturnRegisters[]
static FPUCondition ConditionToConditionCmpFPU(Condition condition)
int declared_function_index(const WasmModule *module, int func_index)
constexpr int value_kind_size(ValueKind kind)
static constexpr LiftoffRegList kGpCacheRegList
static constexpr LiftoffRegList kFpCacheRegList
LiftoffAssembler::ValueKindSig ValueKindSig
constexpr Register no_reg
constexpr int kSimd128Size
Definition globals.h:706
DwVfpRegister DoubleRegister
constexpr Simd128Register kSimd128RegZero
kWasmInternalFunctionIndirectPointerTag instance_data
constexpr Register kScratchReg2
constexpr VRegister kSimd128ScratchReg2
constexpr Register kScratchReg
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Simd128Register kSimd128ScratchReg
constexpr Register kReturnRegister0
constexpr int kInt32Size
Definition globals.h:401
constexpr VRegister kSimd128ScratchReg3
V8_EXPORT_PRIVATE FlagValues v8_flags
const intptr_t kSmiTagMask
Definition v8-internal.h:88
constexpr uint8_t kInstrSize
constexpr Register kCArgRegs[]
std::unique_ptr< AssemblerBuffer > ExternalAssemblerBuffer(void *start, int size)
Definition assembler.cc:161
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define V8_LIKELY(condition)
Definition v8config.h:661