v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
liftoff-assembler-mips64-inl.h
Go to the documentation of this file.
1// Copyright 2017 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_INL_H_
6#define V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_INL_H_
7
16
17namespace v8::internal::wasm {
18
19namespace liftoff {
20
21// Liftoff Frames.
22//
23// slot Frame
24// +--------------------+---------------------------
25// n+4 | optional padding slot to keep the stack 16 byte aligned.
26// n+3 | parameter n |
27// ... | ... |
28// 4 | parameter 1 | or parameter 2
29// 3 | parameter 0 | or parameter 1
30// 2 | (result address) | or parameter 0
31// -----+--------------------+---------------------------
32// 1 | return addr (ra) |
33// 0 | previous frame (fp)|
34// -----+--------------------+ <-- frame ptr (fp)
35// -1 | StackFrame::WASM |
36// -2 | instance |
37// -3 | feedback vector|
38// -----+--------------------+---------------------------
39// -4 | slot 0 | ^
40// -5 | slot 1 | |
41// | | Frame slots
42// | | |
43// | | v
44// | optional padding slot to keep the stack 16 byte aligned.
45// -----+--------------------+ <-- stack ptr (sp)
46//
47
48inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
49
52}
53
54template <typename T>
55inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
56 Register offset, T offset_imm,
57 bool i64_offset = false, unsigned shift_amount = 0) {
58 if (offset != no_reg) {
59 if (!i64_offset) {
60 assm->Dext(kScratchReg, offset, 0, 32);
62 }
63 if (shift_amount != 0) {
64 assm->Dlsa(kScratchReg, addr, offset, shift_amount);
65 } else {
66 assm->daddu(kScratchReg, offset, addr);
67 }
68 addr = kScratchReg;
69 }
70 if (is_int31(offset_imm)) {
71 int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
72 return MemOperand(addr, offset_imm32);
73 } else {
74 assm->li(kScratchReg2, Operand(offset_imm));
75 assm->daddu(kScratchReg, addr, kScratchReg2);
76 return MemOperand(kScratchReg, 0);
77 }
78}
79
80inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
82 switch (kind) {
83 case kI16:
84 assm->Lh(dst.gp(), src);
85 break;
86 case kI32:
87 assm->Lw(dst.gp(), src);
88 break;
89 case kI64:
90 case kRef:
91 case kRefNull:
92 assm->Ld(dst.gp(), src);
93 break;
94 case kF32:
95 assm->Lwc1(dst.fp(), src);
96 break;
97 case kF64:
98 assm->Ldc1(dst.fp(), src);
99 break;
100 case kS128:
101 assm->ld_b(dst.fp().toW(), src);
102 break;
103 default:
104 UNREACHABLE();
105 }
106}
107
108inline void Store(LiftoffAssembler* assm, MemOperand dst, LiftoffRegister src,
109 ValueKind kind) {
110 switch (kind) {
111 case kI16:
112 assm->Ush(src.gp(), dst, t8);
113 break;
114 case kI32:
115 assm->Usw(src.gp(), dst);
116 break;
117 case kI64:
118 case kRefNull:
119 case kRef:
120 assm->Usd(src.gp(), dst);
121 break;
122 case kF32:
123 assm->Uswc1(src.fp(), dst, t8);
124 break;
125 case kF64:
126 assm->Usdc1(src.fp(), dst, t8);
127 break;
128 case kS128:
129 assm->st_b(src.fp().toW(), dst);
130 break;
131 default:
132 UNREACHABLE();
133 }
134}
135
136inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
137 LiftoffRegister src, ValueKind kind) {
138 MemOperand dst(base, offset);
139 Store(assm, dst, src, kind);
140}
141
142inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
143 switch (kind) {
144 case kI32:
145 assm->daddiu(sp, sp, -kSystemPointerSize);
146 assm->sw(reg.gp(), MemOperand(sp, 0));
147 break;
148 case kI64:
149 case kRefNull:
150 case kRef:
151 assm->push(reg.gp());
152 break;
153 case kF32:
154 assm->daddiu(sp, sp, -kSystemPointerSize);
155 assm->swc1(reg.fp(), MemOperand(sp, 0));
156 break;
157 case kF64:
158 assm->daddiu(sp, sp, -kSystemPointerSize);
159 assm->Sdc1(reg.fp(), MemOperand(sp, 0));
160 break;
161 case kS128:
162 assm->daddiu(sp, sp, -kSystemPointerSize * 2);
163 assm->st_b(reg.fp().toW(), MemOperand(sp, 0));
164 break;
165 default:
166 UNREACHABLE();
167 }
168}
169
170#if defined(V8_TARGET_BIG_ENDIAN)
171inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
172 LoadType type, LiftoffRegList pinned) {
173 bool is_float = false;
174 LiftoffRegister tmp = dst;
175 switch (type.value()) {
176 case LoadType::kI64Load8U:
177 case LoadType::kI64Load8S:
178 case LoadType::kI32Load8U:
179 case LoadType::kI32Load8S:
180 // No need to change endianness for byte size.
181 return;
182 case LoadType::kF32Load:
183 is_float = true;
184 tmp = assm->GetUnusedRegister(kGpReg, pinned);
185 assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
186 [[fallthrough]];
187 case LoadType::kI64Load32U:
188 assm->MacroAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4);
189 break;
190 case LoadType::kI32Load:
191 case LoadType::kI64Load32S:
192 assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
193 break;
194 case LoadType::kI32Load16S:
195 case LoadType::kI64Load16S:
196 assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
197 break;
198 case LoadType::kI32Load16U:
199 case LoadType::kI64Load16U:
200 assm->MacroAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
201 break;
202 case LoadType::kF64Load:
203 is_float = true;
204 tmp = assm->GetUnusedRegister(kGpReg, pinned);
205 assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
206 [[fallthrough]];
207 case LoadType::kI64Load:
208 assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
209 break;
210 default:
211 UNREACHABLE();
212 }
213
214 if (is_float) {
215 switch (type.value()) {
216 case LoadType::kF32Load:
217 assm->emit_type_conversion(kExprF32ReinterpretI32, dst, tmp);
218 break;
219 case LoadType::kF64Load:
220 assm->emit_type_conversion(kExprF64ReinterpretI64, dst, tmp);
221 break;
222 default:
223 UNREACHABLE();
224 }
225 }
226}
227
228inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
229 StoreType type, LiftoffRegList pinned) {
230 bool is_float = false;
231 LiftoffRegister tmp = src;
232 switch (type.value()) {
233 case StoreType::kI64Store8:
234 case StoreType::kI32Store8:
235 // No need to change endianness for byte size.
236 return;
237 case StoreType::kF32Store:
238 is_float = true;
239 tmp = assm->GetUnusedRegister(kGpReg, pinned);
240 assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
241 [[fallthrough]];
242 case StoreType::kI32Store:
243 assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
244 break;
245 case StoreType::kI32Store16:
246 assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
247 break;
248 case StoreType::kF64Store:
249 is_float = true;
250 tmp = assm->GetUnusedRegister(kGpReg, pinned);
251 assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
252 [[fallthrough]];
253 case StoreType::kI64Store:
254 assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
255 break;
256 case StoreType::kI64Store32:
257 assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
258 break;
259 case StoreType::kI64Store16:
260 assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
261 break;
262 default:
263 UNREACHABLE();
264 }
265
266 if (is_float) {
267 switch (type.value()) {
268 case StoreType::kF32Store:
269 assm->emit_type_conversion(kExprF32ReinterpretI32, src, tmp);
270 break;
271 case StoreType::kF64Store:
272 assm->emit_type_conversion(kExprF64ReinterpretI64, src, tmp);
273 break;
274 default:
275 UNREACHABLE();
276 }
277 }
278}
279#endif // V8_TARGET_BIG_ENDIAN
280
283
284} // namespace liftoff
285
287 int offset = pc_offset();
288 // When the frame size is bigger than 4KB, we need seven instructions for
289 // stack checking, so we reserve space for this case.
290 daddiu(sp, sp, 0);
291 nop();
292 nop();
293 nop();
294 nop();
295 nop();
296 nop();
297 return offset;
298}
299
301// The standard library used by gcc tryjobs does not consider `std::find` to be
302// `constexpr`, so wrap it in a `#ifdef __clang__` block.
303#ifdef __clang__
304 static_assert(std::find(std::begin(wasm::kGpParamRegisters),
305 std::end(wasm::kGpParamRegisters),
306 kLiftoffFrameSetupFunctionReg) ==
307 std::end(wasm::kGpParamRegisters));
308#endif
309
310 // On MIPS64, we must push at least {ra} before calling the stub, otherwise
311 // it would get clobbered with no possibility to recover it. So just set
312 // up the frame here.
313 EnterFrame(StackFrame::WASM);
314 LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
315 WasmValue(declared_function_index));
316 CallBuiltin(Builtin::kWasmLiftoffFrameSetup);
317}
318
319void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
320 int stack_param_delta) {
321 UseScratchRegisterScope temps(this);
322 Register scratch = temps.Acquire();
323
324 // Push the return address and frame pointer to complete the stack frame.
325 Ld(scratch, MemOperand(fp, 8));
326 Push(scratch);
327 Ld(scratch, MemOperand(fp, 0));
328 Push(scratch);
329
330 // Shift the whole frame upwards.
331 int slot_count = num_callee_stack_params + 2;
332 for (int i = slot_count - 1; i >= 0; --i) {
333 Ld(scratch, MemOperand(sp, i * 8));
334 Sd(scratch, MemOperand(fp, (i - stack_param_delta) * 8));
335 }
336
337 // Set the new stack and frame pointer.
338 daddiu(sp, fp, -stack_param_delta * 8);
339 Pop(ra, fp);
340}
341
343
345 int offset, SafepointTableBuilder* safepoint_table_builder,
346 bool feedback_vector_slot, size_t stack_param_slots) {
347 // The frame_size includes the frame marker and the instance slot. Both are
348 // pushed as part of frame construction, so we don't need to allocate memory
349 // for them anymore.
350 int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
351 // The frame setup builtin also pushes the feedback vector.
352 if (feedback_vector_slot) {
353 frame_size -= kSystemPointerSize;
354 }
355
356 // We can't run out of space, just pass anything big enough to not cause the
357 // assembler to try to grow the buffer.
358 constexpr int kAvailableSpace = 256;
359 MacroAssembler patching_assembler(
360 nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
361 ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
362
363 if (V8_LIKELY(frame_size < 4 * KB)) {
364 // This is the standard case for small frames: just subtract from SP and be
365 // done with it.
366 patching_assembler.Daddu(sp, sp, Operand(-frame_size));
367 return;
368 }
369
370 // The frame size is bigger than 4KB, so we might overflow the available stack
371 // space if we first allocate the frame and then do the stack check (we will
372 // need some remaining stack space for throwing the exception). That's why we
373 // check the available stack space before we allocate the frame. To do this we
374 // replace the {__ Daddu(sp, sp, -frame_size)} with a jump to OOL code that
375 // does this "extended stack check".
376 //
377 // The OOL code can simply be generated here with the normal assembler,
378 // because all other code generation, including OOL code, has already finished
379 // when {PatchPrepareStackFrame} is called. The function prologue then jumps
380 // to the current {pc_offset()} to execute the OOL code for allocating the
381 // large frame.
382 // Emit the unconditional branch in the function prologue (from {offset} to
383 // {pc_offset()}).
384
385 int imm32 = pc_offset() - offset - 3 * kInstrSize;
386 patching_assembler.BranchLong(imm32);
387
388 // If the frame is bigger than the stack, we throw the stack overflow
389 // exception unconditionally. Thereby we can avoid the integer overflow
390 // check in the condition code.
391 RecordComment("OOL: stack check for large frame");
392 Label continuation;
393 if (frame_size < v8_flags.stack_size * 1024) {
394 Register stack_limit = kScratchReg;
396 Daddu(stack_limit, stack_limit, Operand(frame_size));
397 Branch(&continuation, uge, sp, Operand(stack_limit));
398 }
399
400 Call(static_cast<Address>(Builtin::kWasmStackOverflow),
402 // The call will not return; just define an empty safepoint.
403 safepoint_table_builder->DefineSafepoint(this);
404 if (v8_flags.debug_code) stop();
405
407
408 // Now allocate the stack space. Note that this might do more than just
409 // decrementing the SP;
410 Daddu(sp, sp, Operand(-frame_size));
411
412 // Jump back to the start of the function, from {pc_offset()} to
413 // right after the reserved space for the {__ Daddu(sp, sp, -framesize)}
414 // (which is a Branch now).
415 int func_start_offset = offset + 7 * kInstrSize;
416 imm32 = func_start_offset - pc_offset() - 3 * kInstrSize;
417 BranchLong(imm32);
418}
419
421
423
424// static
427}
428
430 switch (kind) {
431 case kS128:
432 return value_kind_size(kind);
433 default:
434 return kStackSlotSize;
435 }
436}
437
439 return kind == kS128 || is_reference(kind);
440}
441
442void LiftoffAssembler::CheckTierUp(int declared_func_index, int budget_used,
443 Label* ool_label,
444 const FreezeCacheState& frozen) {
445 Register budget_array = kScratchReg;
446
448 if (instance_data == no_reg) {
449 instance_data = budget_array; // Reuse the scratch register.
451 }
452
453 constexpr int kArrayOffset = wasm::ObjectAccess::ToTagged(
454 WasmTrustedInstanceData::kTieringBudgetArrayOffset);
455 Ld(budget_array, MemOperand(instance_data, kArrayOffset));
456
457 int budget_arr_offset = kInt32Size * declared_func_index;
458
459 Register budget = kScratchReg2;
460 MemOperand budget_addr(budget_array, budget_arr_offset);
461 Lw(budget, budget_addr);
462 Subu(budget, budget, budget_used);
463 Sw(budget, budget_addr);
464
465 Branch(ool_label, less, budget, Operand(zero_reg));
466}
467
469
471 // TODO(mips64): 42202153
473}
474
475void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
476 switch (value.type().kind()) {
477 case kI32:
478 MacroAssembler::li(reg.gp(), Operand(value.to_i32()));
479 break;
480 case kI64:
481 MacroAssembler::li(reg.gp(), Operand(value.to_i64()));
482 break;
483 case kF32:
484 MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
485 break;
486 case kF64:
487 MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
488 break;
489 default:
490 UNREACHABLE();
491 }
492}
493
496}
497
498void LiftoffAssembler::LoadTrustedPointer(Register dst, Register src_addr,
499 int offset, IndirectPointerTag tag) {
500 static_assert(!V8_ENABLE_SANDBOX_BOOL);
501 static_assert(!COMPRESS_POINTERS_BOOL);
502 Ld(dst, MemOperand{src_addr, offset});
503}
504
505void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
506 int offset, int size) {
507 DCHECK_LE(0, offset);
508 switch (size) {
509 case 1:
510 Lb(dst, MemOperand(instance, offset));
511 break;
512 case 4:
513 Lw(dst, MemOperand(instance, offset));
514 break;
515 case 8:
516 Ld(dst, MemOperand(instance, offset));
517 break;
518 default:
520 }
521}
522
524 Register instance,
525 int32_t offset) {
526 static_assert(kTaggedSize == kSystemPointerSize);
527 Ld(dst, MemOperand(instance, offset));
528}
529
530void LiftoffAssembler::SpillInstanceData(Register instance) {
532}
533
535
536void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
537 Register offset_reg,
538 int32_t offset_imm,
539 uint32_t* protected_load_pc,
540 bool needs_shift) {
541 static_assert(kTaggedSize == kInt64Size);
542 unsigned shift_amount = !needs_shift ? 0 : 3;
543 MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm,
544 false, shift_amount);
545 Ld(dst, src_op);
546
547 // Since LoadTaggedField might start with an instruction loading an immediate
548 // argument to a register, we have to compute the {protected_load_pc} after
549 // calling it.
550 if (protected_load_pc) {
551 *protected_load_pc = pc_offset() - kInstrSize;
552 }
553}
554
555void LiftoffAssembler::LoadProtectedPointer(Register dst, Register src_addr,
556 int32_t offset_imm) {
557 static_assert(!V8_ENABLE_SANDBOX_BOOL);
558 LoadTaggedPointer(dst, src_addr, no_reg, offset_imm);
559}
560
561void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
562 int32_t offset_imm) {
563 MemOperand src_op = liftoff::GetMemOp(this, src_addr, no_reg, offset_imm);
564 Ld(dst, src_op);
565}
566
567void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
568 Register offset_reg,
569 int32_t offset_imm, Register src,
570 LiftoffRegList pinned,
571 uint32_t* protected_store_pc,
572 SkipWriteBarrier skip_write_barrier) {
573 static_assert(kTaggedSize == kInt64Size);
574 Register scratch = kScratchReg2;
575 MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
576 Sd(src, dst_op);
577
578 // Since StoreTaggedField might start with an instruction loading an immediate
579 // argument to a register, we have to compute the {protected_load_pc} after
580 // calling it.
581 if (protected_store_pc) {
582 *protected_store_pc = pc_offset() - kInstrSize;
583 }
584
585 if (skip_write_barrier || v8_flags.disable_write_barriers) return;
586
587 Label exit;
588 CheckPageFlag(dst_addr, scratch,
590 JumpIfSmi(src, &exit);
592 eq, &exit);
593 Daddu(scratch, dst_op.rm(), dst_op.offset());
595 StubCallMode::kCallWasmRuntimeStub);
596 bind(&exit);
597}
598
599void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
600 Register offset_reg, uintptr_t offset_imm,
601 LoadType type, uint32_t* protected_load_pc,
602 bool is_load_mem, bool i64_offset,
603 bool needs_shift) {
604 unsigned shift_amount = needs_shift ? type.size_log_2() : 0;
605 MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm,
606 i64_offset, shift_amount);
607
608 switch (type.value()) {
609 case LoadType::kI32Load8U:
610 case LoadType::kI64Load8U:
611 Lbu(dst.gp(), src_op);
612 break;
613 case LoadType::kI32Load8S:
614 case LoadType::kI64Load8S:
615 Lb(dst.gp(), src_op);
616 break;
617 case LoadType::kI32Load16U:
618 case LoadType::kI64Load16U:
619 MacroAssembler::Ulhu(dst.gp(), src_op);
620 break;
621 case LoadType::kI32Load16S:
622 case LoadType::kI64Load16S:
623 MacroAssembler::Ulh(dst.gp(), src_op);
624 break;
625 case LoadType::kI64Load32U:
626 MacroAssembler::Ulwu(dst.gp(), src_op);
627 break;
628 case LoadType::kI32Load:
629 case LoadType::kI64Load32S:
630 MacroAssembler::Ulw(dst.gp(), src_op);
631 break;
632 case LoadType::kI64Load:
633 MacroAssembler::Uld(dst.gp(), src_op);
634 break;
635 case LoadType::kF32Load:
636 MacroAssembler::Ulwc1(dst.fp(), src_op, t8);
637 break;
638 case LoadType::kF32LoadF16:
640 break;
641 case LoadType::kF64Load:
642 MacroAssembler::Uldc1(dst.fp(), src_op, t8);
643 break;
644 case LoadType::kS128Load:
645 MacroAssembler::ld_b(dst.fp().toW(), src_op);
646 break;
647 default:
648 UNREACHABLE();
649 }
650
651#if defined(V8_TARGET_BIG_ENDIAN)
652 if (is_load_mem) {
653 pinned.set(src_op.rm());
654 liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
655 }
656#endif
657 // Since load macros might start with an instruction loading an immediate
658 // argument to a register, we have to compute the {protected_load_pc} after
659 // calling them.
660 if (protected_load_pc) {
661 *protected_load_pc = pc_offset() - kInstrSize;
662 }
663}
664
665void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
666 uintptr_t offset_imm, LiftoffRegister src,
667 StoreType type, LiftoffRegList pinned,
668 uint32_t* protected_store_pc, bool is_store_mem,
669 bool i64_offset) {
670 MemOperand dst_op =
671 liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
672
673#if defined(V8_TARGET_BIG_ENDIAN)
674 if (is_store_mem) {
675 pinned.set(dst_op.rm());
676 LiftoffRegister tmp = kScratchReg2;
677 // Save original value.
678 Move(tmp, src, type.value_type());
679
680 src = tmp;
681 pinned.set(tmp);
682 liftoff::ChangeEndiannessStore(this, src, type, pinned);
683 }
684#endif
685
686 switch (type.value()) {
687 case StoreType::kI32Store8:
688 case StoreType::kI64Store8:
689 Sb(src.gp(), dst_op);
690 break;
691 case StoreType::kI32Store16:
692 case StoreType::kI64Store16:
693 MacroAssembler::Ush(src.gp(), dst_op, t8);
694 break;
695 case StoreType::kI32Store:
696 case StoreType::kI64Store32:
697 MacroAssembler::Usw(src.gp(), dst_op);
698 break;
699 case StoreType::kI64Store:
700 MacroAssembler::Usd(src.gp(), dst_op);
701 break;
702 case StoreType::kF32Store:
703 MacroAssembler::Uswc1(src.fp(), dst_op, t8);
704 break;
705 case StoreType::kF32StoreF16:
707 break;
708 case StoreType::kF64Store:
709 MacroAssembler::Usdc1(src.fp(), dst_op, t8);
710 break;
711 case StoreType::kS128Store:
712 MacroAssembler::st_b(src.fp().toW(), dst_op);
713 break;
714 default:
715 UNREACHABLE();
716 }
717
718 // Since store macros might start with an instruction loading an immediate
719 // argument to a register, we have to compute the {protected_store_pc} after
720 // calling them.
721 if (protected_store_pc) {
722 *protected_store_pc = pc_offset() - kInstrSize;
723 }
724}
725
726void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
727 Register offset_reg, uintptr_t offset_imm,
728 LoadType type, LiftoffRegList pinned,
729 bool i64_offset) {
730 UseScratchRegisterScope temps(this);
731 MemOperand src_op =
732 liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, i64_offset);
733 switch (type.value()) {
734 case LoadType::kI32Load8U:
735 case LoadType::kI64Load8U: {
736 Lbu(dst.gp(), src_op);
737 sync();
738 return;
739 }
740 case LoadType::kI32Load16U:
741 case LoadType::kI64Load16U: {
742 Lhu(dst.gp(), src_op);
743 sync();
744 return;
745 }
746 case LoadType::kI32Load: {
747 Lw(dst.gp(), src_op);
748 sync();
749 return;
750 }
751 case LoadType::kI64Load32U: {
752 Lwu(dst.gp(), src_op);
753 sync();
754 return;
755 }
756 case LoadType::kI64Load: {
757 Ld(dst.gp(), src_op);
758 sync();
759 return;
760 }
761 default:
762 UNREACHABLE();
763 }
764}
765
766void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
767 uintptr_t offset_imm, LiftoffRegister src,
768 StoreType type, LiftoffRegList pinned,
769 bool i64_offset) {
770 UseScratchRegisterScope temps(this);
771 MemOperand dst_op =
772 liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
773 switch (type.value()) {
774 case StoreType::kI64Store8:
775 case StoreType::kI32Store8: {
776 sync();
777 Sb(src.gp(), dst_op);
778 return;
779 }
780 case StoreType::kI64Store16:
781 case StoreType::kI32Store16: {
782 sync();
783 Sh(src.gp(), dst_op);
784 return;
785 }
786 case StoreType::kI64Store32:
787 case StoreType::kI32Store: {
788 sync();
789 Sw(src.gp(), dst_op);
790 return;
791 }
792 case StoreType::kI64Store: {
793 sync();
794 Sd(src.gp(), dst_op);
795 return;
796 }
797 default:
798 UNREACHABLE();
799 }
800}
801
802#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
803 do { \
804 Label binop; \
805 sync(); \
806 bind(&binop); \
807 load_linked(result.gp(), MemOperand(temp0, 0)); \
808 bin_instr(temp1, result.gp(), Operand(value.gp())); \
809 store_conditional(temp1, MemOperand(temp0, 0)); \
810 BranchShort(&binop, eq, temp1, Operand(zero_reg)); \
811 sync(); \
812 } while (0)
813
814#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, size, \
815 bin_instr, aligned) \
816 do { \
817 Label binop; \
818 andi(temp3, temp0, aligned); \
819 Dsubu(temp0, temp0, Operand(temp3)); \
820 sll(temp3, temp3, 3); \
821 sync(); \
822 bind(&binop); \
823 load_linked(temp1, MemOperand(temp0, 0)); \
824 ExtractBits(result.gp(), temp1, temp3, size, false); \
825 bin_instr(temp2, result.gp(), value.gp()); \
826 InsertBits(temp1, temp2, temp3, size); \
827 store_conditional(temp1, MemOperand(temp0, 0)); \
828 BranchShort(&binop, eq, temp1, Operand(zero_reg)); \
829 sync(); \
830 } while (0)
831
832#define ATOMIC_BINOP_CASE(name, inst32, inst64) \
833 void LiftoffAssembler::Atomic##name( \
834 Register dst_addr, Register offset_reg, uintptr_t offset_imm, \
835 LiftoffRegister value, LiftoffRegister result, StoreType type, \
836 bool i64_offset) { \
837 LiftoffRegList pinned{dst_addr, value, result}; \
838 if (offset_reg != no_reg) pinned.set(offset_reg); \
839 Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
840 Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
841 Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
842 Register temp3 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); \
843 MemOperand dst_op = \
844 liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset); \
845 Daddu(temp0, dst_op.rm(), dst_op.offset()); \
846 switch (type.value()) { \
847 case StoreType::kI64Store8: \
848 ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, 8, inst64, 7); \
849 break; \
850 case StoreType::kI32Store8: \
851 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 8, inst32, 3); \
852 break; \
853 case StoreType::kI64Store16: \
854 ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, 16, inst64, 7); \
855 break; \
856 case StoreType::kI32Store16: \
857 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 16, inst32, 3); \
858 break; \
859 case StoreType::kI64Store32: \
860 ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, 32, inst64, 7); \
861 break; \
862 case StoreType::kI32Store: \
863 ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
864 break; \
865 case StoreType::kI64Store: \
866 ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
867 break; \
868 default: \
869 UNREACHABLE(); \
870 } \
871 }
872
873ATOMIC_BINOP_CASE(Add, Addu, Daddu)
874ATOMIC_BINOP_CASE(Sub, Subu, Dsubu)
875ATOMIC_BINOP_CASE(And, And, And)
876ATOMIC_BINOP_CASE(Or, Or, Or)
877ATOMIC_BINOP_CASE(Xor, Xor, Xor)
878#undef ASSEMBLE_ATOMIC_BINOP
879#undef ASSEMBLE_ATOMIC_BINOP_EXT
880#undef ATOMIC_BINOP_CASE
881
882#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional) \
883 do { \
884 Label exchange; \
885 sync(); \
886 bind(&exchange); \
887 load_linked(result.gp(), MemOperand(temp0, 0)); \
888 mov(temp1, value.gp()); \
889 store_conditional(temp1, MemOperand(temp0, 0)); \
890 BranchShort(&exchange, eq, temp1, Operand(zero_reg)); \
891 sync(); \
892 } while (0)
893
894#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(load_linked, store_conditional, \
895 size, aligned) \
896 do { \
897 Label exchange; \
898 andi(temp1, temp0, aligned); \
899 Dsubu(temp0, temp0, Operand(temp1)); \
900 sll(temp1, temp1, 3); \
901 sync(); \
902 bind(&exchange); \
903 load_linked(temp2, MemOperand(temp0, 0)); \
904 ExtractBits(result.gp(), temp2, temp1, size, false); \
905 InsertBits(temp2, value.gp(), temp1, size); \
906 store_conditional(temp2, MemOperand(temp0, 0)); \
907 BranchShort(&exchange, eq, temp2, Operand(zero_reg)); \
908 sync(); \
909 } while (0)
910
911void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
912 uintptr_t offset_imm,
913 LiftoffRegister value,
914 LiftoffRegister result, StoreType type,
915 bool i64_offset) {
916 LiftoffRegList pinned{dst_addr, value, result};
917 if (offset_reg != no_reg) pinned.set(offset_reg);
918 Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
919 Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
920 Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
921 MemOperand dst_op =
922 liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
923 Daddu(temp0, dst_op.rm(), dst_op.offset());
924 switch (type.value()) {
925 case StoreType::kI64Store8:
927 break;
928 case StoreType::kI32Store8:
930 break;
931 case StoreType::kI64Store16:
933 break;
934 case StoreType::kI32Store16:
936 break;
937 case StoreType::kI64Store32:
939 break;
940 case StoreType::kI32Store:
942 break;
943 case StoreType::kI64Store:
945 break;
946 default:
947 UNREACHABLE();
948 }
949}
950#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
951#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
952
953#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
954 store_conditional) \
955 do { \
956 Label compareExchange; \
957 Label exit; \
958 sync(); \
959 bind(&compareExchange); \
960 load_linked(result.gp(), MemOperand(temp0, 0)); \
961 BranchShort(&exit, ne, expected.gp(), Operand(result.gp())); \
962 mov(temp2, new_value.gp()); \
963 store_conditional(temp2, MemOperand(temp0, 0)); \
964 BranchShort(&compareExchange, eq, temp2, Operand(zero_reg)); \
965 bind(&exit); \
966 sync(); \
967 } while (0)
968
969#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
970 load_linked, store_conditional, size, aligned) \
971 do { \
972 Label compareExchange; \
973 Label exit; \
974 andi(temp1, temp0, aligned); \
975 Dsubu(temp0, temp0, Operand(temp1)); \
976 sll(temp1, temp1, 3); \
977 sync(); \
978 bind(&compareExchange); \
979 load_linked(temp2, MemOperand(temp0, 0)); \
980 ExtractBits(result.gp(), temp2, temp1, size, false); \
981 ExtractBits(temp2, expected.gp(), zero_reg, size, false); \
982 BranchShort(&exit, ne, temp2, Operand(result.gp())); \
983 InsertBits(temp2, new_value.gp(), temp1, size); \
984 store_conditional(temp2, MemOperand(temp0, 0)); \
985 BranchShort(&compareExchange, eq, temp2, Operand(zero_reg)); \
986 bind(&exit); \
987 sync(); \
988 } while (0)
989
991 Register dst_addr, Register offset_reg, uintptr_t offset_imm,
992 LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
993 StoreType type, bool i64_offset) {
994 LiftoffRegList pinned{dst_addr, expected, new_value, result};
995 if (offset_reg != no_reg) pinned.set(offset_reg);
996 Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
997 Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
998 Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
999 MemOperand dst_op =
1000 liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
1001 Daddu(temp0, dst_op.rm(), dst_op.offset());
1002 switch (type.value()) {
1003 case StoreType::kI64Store8:
1005 break;
1006 case StoreType::kI32Store8:
1008 break;
1009 case StoreType::kI64Store16:
1011 break;
1012 case StoreType::kI32Store16:
1014 break;
1015 case StoreType::kI64Store32:
1017 break;
1018 case StoreType::kI32Store:
1020 break;
1021 case StoreType::kI64Store:
1023 break;
1024 default:
1025 UNREACHABLE();
1026 }
1027}
1028#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
1029#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
1030
1032
1033void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
1034 uint32_t caller_slot_idx,
1035 ValueKind kind) {
1036 MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
1037 liftoff::Load(this, dst, src, kind);
1038}
1039
1040void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
1041 uint32_t caller_slot_idx,
1043 Register frame_pointer) {
1044 int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
1045 liftoff::Store(this, frame_pointer, offset, src, kind);
1046}
1047
1048void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
1049 ValueKind kind) {
1050 liftoff::Load(this, dst, MemOperand(sp, offset), kind);
1051}
1052
1053void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
1054 ValueKind kind) {
1055 DCHECK_NE(dst_offset, src_offset);
1056 Register scratch = kScratchReg;
1057
1058 switch (kind) {
1059 case kI32:
1060 case kF32:
1061 Lw(scratch, liftoff::GetStackSlot(src_offset));
1062 Sw(scratch, liftoff::GetStackSlot(dst_offset));
1063 break;
1064 case kI64:
1065 case kRefNull:
1066 case kRef:
1067 case kF64:
1068 Ld(scratch, liftoff::GetStackSlot(src_offset));
1069 Sd(scratch, liftoff::GetStackSlot(dst_offset));
1070 break;
1071 case kS128:
1072 default:
1073 UNREACHABLE();
1074 }
1075}
1076
1077void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
1078 DCHECK_NE(dst, src);
1079 // TODO(ksreten): Handle different sizes here.
1080 MacroAssembler::Move(dst, src);
1081}
1082
1084 ValueKind kind) {
1085 DCHECK_NE(dst, src);
1086 if (kind != kS128) {
1087 MacroAssembler::Move(dst, src);
1088 } else {
1089 MacroAssembler::move_v(dst.toW(), src.toW());
1090 }
1091}
1092
1093void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
1096 switch (kind) {
1097 case kI32:
1098 Sw(reg.gp(), dst);
1099 break;
1100 case kI64:
1101 case kRef:
1102 case kRefNull:
1103 Sd(reg.gp(), dst);
1104 break;
1105 case kF32:
1106 Swc1(reg.fp(), dst);
1107 break;
1108 case kF64:
1109 MacroAssembler::Sdc1(reg.fp(), dst);
1110 break;
1111 case kS128:
1112 MacroAssembler::st_b(reg.fp().toW(), dst);
1113 break;
1114 default:
1115 UNREACHABLE();
1116 }
1117}
1118
1119void LiftoffAssembler::Spill(int offset, WasmValue value) {
1122 switch (value.type().kind()) {
1123 case kI32: {
1124 MacroAssembler::li(kScratchReg, Operand(value.to_i32()));
1125 Sw(kScratchReg, dst);
1126 break;
1127 }
1128 case kI64:
1129 case kRef:
1130 case kRefNull: {
1131 MacroAssembler::li(kScratchReg, value.to_i64());
1132 Sd(kScratchReg, dst);
1133 break;
1134 }
1135 default:
1136 // kWasmF32 and kWasmF64 are unreachable, since those
1137 // constants are not tracked.
1138 UNREACHABLE();
1139 }
1140}
1141
1142void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
1144 switch (kind) {
1145 case kI32:
1146 Lw(reg.gp(), src);
1147 break;
1148 case kI64:
1149 case kRef:
1150 case kRefNull:
1151 Ld(reg.gp(), src);
1152 break;
1153 case kF32:
1154 Lwc1(reg.fp(), src);
1155 break;
1156 case kF64:
1157 MacroAssembler::Ldc1(reg.fp(), src);
1158 break;
1159 case kS128:
1160 MacroAssembler::ld_b(reg.fp().toW(), src);
1161 break;
1162 default:
1163 UNREACHABLE();
1164 }
1165}
1166
1168 UNREACHABLE();
1169}
1170
1172 DCHECK_LT(0, size);
1174
1175 if (size <= 12 * kStackSlotSize) {
1176 // Special straight-line code for up to 12 slots. Generates one
1177 // instruction per slot (<= 12 instructions total).
1178 uint32_t remainder = size;
1181 }
1182 DCHECK(remainder == 4 || remainder == 0);
1183 if (remainder) {
1185 }
1186 } else {
1187 // General case for bigger counts (12 instructions).
1188 // Use a0 for start address (inclusive), a1 for end address (exclusive).
1189 Push(a1, a0);
1190 Daddu(a0, fp, Operand(-start - size));
1191 Daddu(a1, fp, Operand(-start));
1192
1193 Label loop;
1194 bind(&loop);
1195 Sd(zero_reg, MemOperand(a0));
1196 daddiu(a0, a0, kSystemPointerSize);
1197 BranchShort(&loop, ne, a0, Operand(a1));
1198
1199 Pop(a1, a0);
1200 }
1201}
1202
1203void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
1204 ValueKind /* kind */) {
1205 Dsubu(dst, fp, Operand(offset));
1206}
1207
1208void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
1209 MacroAssembler::Dclz(dst.gp(), src.gp());
1210}
1211
1212void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
1213 MacroAssembler::Dctz(dst.gp(), src.gp());
1214}
1215
1216bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
1217 LiftoffRegister src) {
1218 MacroAssembler::Dpopcnt(dst.gp(), src.gp());
1219 return true;
1220}
1221
1222void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
1223 UseScratchRegisterScope temps(this);
1224 Register scratch = temps.Acquire();
1225 SmiUntag(scratch, MemOperand(dst.gp(), offset));
1226 Daddu(scratch, scratch, Operand(1));
1227 SmiTag(scratch);
1228 Sd(scratch, MemOperand(dst.gp(), offset));
1229}
1230
1231void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
1232 MacroAssembler::Mul(dst, lhs, rhs);
1233}
1234
1235void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
1236 Label* trap_div_by_zero,
1237 Label* trap_div_unrepresentable) {
1238 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1239
1240 // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
1241 rotr(kScratchReg, lhs, 31);
1245 MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg2,
1246 Operand(zero_reg));
1247
1248 MacroAssembler::Div(dst, lhs, rhs);
1249}
1250
1251void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
1252 Label* trap_div_by_zero) {
1253 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1254 MacroAssembler::Divu(dst, lhs, rhs);
1255}
1256
1257void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
1258 Label* trap_div_by_zero) {
1259 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1260 MacroAssembler::Mod(dst, lhs, rhs);
1261}
1262
1263void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
1264 Label* trap_div_by_zero) {
1265 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1266 MacroAssembler::Modu(dst, lhs, rhs);
1267}
1268
1269#define I32_BINOP(name, instruction) \
1270 void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
1271 Register rhs) { \
1272 instruction(dst, lhs, rhs); \
1273 }
1274
1275// clang-format off
1276I32_BINOP(add, addu)
1277I32_BINOP(sub, subu)
1278I32_BINOP(and, and_)
1279I32_BINOP(or, or_)
1280I32_BINOP(xor, xor_)
1281// clang-format on
1282
1283#undef I32_BINOP
1284
1285#define I32_BINOP_I(name, instruction) \
1286 void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \
1287 int32_t imm) { \
1288 instruction(dst, lhs, Operand(imm)); \
1289 }
1290
1291// clang-format off
1292I32_BINOP_I(add, Addu)
1293I32_BINOP_I(sub, Subu)
1294I32_BINOP_I(and, And)
1295I32_BINOP_I(or, Or)
1296I32_BINOP_I(xor, Xor)
1297// clang-format on
1298
1299#undef I32_BINOP_I
1300
1301void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
1302 MacroAssembler::Clz(dst, src);
1303}
1304
1305void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
1306 MacroAssembler::Ctz(dst, src);
1307}
1308
1309bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
1310 MacroAssembler::Popcnt(dst, src);
1311 return true;
1312}
1313
1314#define I32_SHIFTOP(name, instruction) \
1315 void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
1316 Register amount) { \
1317 instruction(dst, src, amount); \
1318 }
1319#define I32_SHIFTOP_I(name, instruction) \
1320 I32_SHIFTOP(name, instruction##v) \
1321 void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \
1322 int amount) { \
1323 instruction(dst, src, amount & 31); \
1324 }
1325
1326I32_SHIFTOP_I(shl, sll)
1327I32_SHIFTOP_I(sar, sra)
1328I32_SHIFTOP_I(shr, srl)
1329
1330#undef I32_SHIFTOP
1331#undef I32_SHIFTOP_I
1332
1333void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
1334 int64_t imm) {
1335 MacroAssembler::Daddu(dst.gp(), lhs.gp(), Operand(imm));
1336}
1337
1338void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
1339 LiftoffRegister rhs) {
1340 MacroAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp());
1341}
1342
1343void LiftoffAssembler::emit_i64_muli(LiftoffRegister dst, LiftoffRegister lhs,
1344 int32_t imm) {
1345 if (base::bits::IsPowerOfTwo(imm)) {
1347 return;
1348 }
1349 UseScratchRegisterScope temps(this);
1350 Register scratch = temps.Acquire();
1351 MacroAssembler::li(scratch, Operand(imm));
1352 MacroAssembler::Dmul(dst.gp(), lhs.gp(), scratch);
1353}
1354
1355bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
1356 LiftoffRegister rhs,
1357 Label* trap_div_by_zero,
1358 Label* trap_div_unrepresentable) {
1359 MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
1360
1361 // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
1362 drotr32(kScratchReg, lhs.gp(), 31);
1365 daddu(kScratchReg2, kScratchReg2, rhs.gp());
1366 MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg2,
1367 Operand(zero_reg));
1368
1369 MacroAssembler::Ddiv(dst.gp(), lhs.gp(), rhs.gp());
1370 return true;
1371}
1372
1373bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
1374 LiftoffRegister rhs,
1375 Label* trap_div_by_zero) {
1376 MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
1377 MacroAssembler::Ddivu(dst.gp(), lhs.gp(), rhs.gp());
1378 return true;
1379}
1380
1381bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
1382 LiftoffRegister rhs,
1383 Label* trap_div_by_zero) {
1384 MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
1385 MacroAssembler::Dmod(dst.gp(), lhs.gp(), rhs.gp());
1386 return true;
1387}
1388
1389bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
1390 LiftoffRegister rhs,
1391 Label* trap_div_by_zero) {
1392 MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
1393 MacroAssembler::Dmodu(dst.gp(), lhs.gp(), rhs.gp());
1394 return true;
1395}
1396
1397#define I64_BINOP(name, instruction) \
1398 void LiftoffAssembler::emit_i64_##name( \
1399 LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
1400 instruction(dst.gp(), lhs.gp(), rhs.gp()); \
1401 }
1402
1403// clang-format off
1404I64_BINOP(add, daddu)
1405I64_BINOP(sub, dsubu)
1406I64_BINOP(and, and_)
1407I64_BINOP(or, or_)
1408I64_BINOP(xor, xor_)
1409// clang-format on
1410
1411#undef I64_BINOP
1412
1413#define I64_BINOP_I(name, instruction) \
1414 void LiftoffAssembler::emit_i64_##name##i( \
1415 LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
1416 instruction(dst.gp(), lhs.gp(), Operand(imm)); \
1417 }
1418
1419// clang-format off
1420I64_BINOP_I(and, And)
1421I64_BINOP_I(or, Or)
1422I64_BINOP_I(xor, Xor)
1423// clang-format on
1424
1425#undef I64_BINOP_I
1426
1427#define I64_SHIFTOP(name, instruction) \
1428 void LiftoffAssembler::emit_i64_##name( \
1429 LiftoffRegister dst, LiftoffRegister src, Register amount) { \
1430 instruction(dst.gp(), src.gp(), amount); \
1431 }
1432#define I64_SHIFTOP_I(name, instruction) \
1433 I64_SHIFTOP(name, instruction##v) \
1434 void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \
1435 LiftoffRegister src, int amount) { \
1436 amount &= 63; \
1437 if (amount < 32) \
1438 instruction(dst.gp(), src.gp(), amount); \
1439 else \
1440 instruction##32(dst.gp(), src.gp(), amount - 32); \
1441 }
1442
1443I64_SHIFTOP_I(shl, dsll)
1444I64_SHIFTOP_I(sar, dsra)
1445I64_SHIFTOP_I(shr, dsrl)
1446
1447#undef I64_SHIFTOP
1448#undef I64_SHIFTOP_I
1449
1450void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
1451 Dext(dst, src, 0, 32);
1452}
1453
1454void LiftoffAssembler::clear_i32_upper_half(Register dst) {
1455 // Don't need to clear the upper halves of i32 values for sandbox on MIPS64,
1456 // because we'll explicitly zero-extend their lower halves before using them
1457 // for memory accesses anyway.
1458}
1459
1461 MacroAssembler::Neg_s(dst, src);
1462}
1463
1465 MacroAssembler::Neg_d(dst, src);
1466}
1467
1469 DoubleRegister rhs) {
1470 Label ool, done;
1471 MacroAssembler::Float32Min(dst, lhs, rhs, &ool);
1472 Branch(&done);
1473
1474 bind(&ool);
1476 bind(&done);
1477}
1478
1480 DoubleRegister rhs) {
1481 Label ool, done;
1482 MacroAssembler::Float32Max(dst, lhs, rhs, &ool);
1483 Branch(&done);
1484
1485 bind(&ool);
1487 bind(&done);
1488}
1489
1491 DoubleRegister rhs) {
1492 if (CpuFeatures::IsSupported(MIPS_SIMD)) {
1493 DoubleRegister scratch = rhs;
1494 if (dst == rhs) {
1495 scratch = kScratchDoubleReg;
1496 Move_d(scratch, rhs);
1497 }
1498 if (dst != lhs) {
1499 Move_d(dst, lhs);
1500 }
1501 binsli_w(dst.toW(), scratch.toW(), 0);
1502 } else {
1503 UseScratchRegisterScope temps(this);
1504 Register scratch1 = temps.Acquire();
1505 Register scratch2 = temps.Acquire();
1506 mfc1(scratch1, lhs);
1507 mfc1(scratch2, rhs);
1508 srl(scratch2, scratch2, 31);
1509 Ins(scratch1, scratch2, 31, 1);
1510 mtc1(scratch1, dst);
1511 }
1512}
1513
1515 DoubleRegister rhs) {
1516 Label ool, done;
1517 MacroAssembler::Float64Min(dst, lhs, rhs, &ool);
1518 Branch(&done);
1519
1520 bind(&ool);
1522 bind(&done);
1523}
1524
1526 DoubleRegister rhs) {
1527 Label ool, done;
1528 MacroAssembler::Float64Max(dst, lhs, rhs, &ool);
1529 Branch(&done);
1530
1531 bind(&ool);
1533 bind(&done);
1534}
1535
1537 DoubleRegister rhs) {
1538 if (CpuFeatures::IsSupported(MIPS_SIMD)) {
1539 DoubleRegister scratch = rhs;
1540 if (dst == rhs) {
1541 scratch = kScratchDoubleReg;
1542 Move_d(scratch, rhs);
1543 }
1544 if (dst != lhs) {
1545 Move_d(dst, lhs);
1546 }
1547 binsli_d(dst.toW(), scratch.toW(), 0);
1548 } else {
1549 UseScratchRegisterScope temps(this);
1550 Register scratch1 = temps.Acquire();
1551 Register scratch2 = temps.Acquire();
1552 dmfc1(scratch1, lhs);
1553 dmfc1(scratch2, rhs);
1554 dsrl32(scratch2, scratch2, 31);
1555 Dins(scratch1, scratch2, 63, 1);
1556 dmtc1(scratch1, dst);
1557 }
1558}
1559
1560#define FP_BINOP(name, instruction) \
1561 void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
1562 DoubleRegister rhs) { \
1563 instruction(dst, lhs, rhs); \
1564 }
1565#define FP_UNOP(name, instruction) \
1566 void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
1567 instruction(dst, src); \
1568 }
1569#define FP_UNOP_RETURN_TRUE(name, instruction) \
1570 bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
1571 instruction(dst, src); \
1572 return true; \
1573 }
1574
1575FP_BINOP(f32_add, add_s)
1576FP_BINOP(f32_sub, sub_s)
1577FP_BINOP(f32_mul, mul_s)
1578FP_BINOP(f32_div, div_s)
1579FP_UNOP(f32_abs, abs_s)
1580FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s_s)
1581FP_UNOP_RETURN_TRUE(f32_floor, Floor_s_s)
1582FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s_s)
1583FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s_s)
1584FP_UNOP(f32_sqrt, sqrt_s)
1585FP_BINOP(f64_add, add_d)
1586FP_BINOP(f64_sub, sub_d)
1587FP_BINOP(f64_mul, mul_d)
1588FP_BINOP(f64_div, div_d)
1589FP_UNOP(f64_abs, abs_d)
1590FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d_d)
1591FP_UNOP_RETURN_TRUE(f64_floor, Floor_d_d)
1592FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d_d)
1593FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d_d)
1594FP_UNOP(f64_sqrt, sqrt_d)
1595
1596#undef FP_BINOP
1597#undef FP_UNOP
1598#undef FP_UNOP_RETURN_TRUE
1599
1601 LiftoffRegister dst,
1602 LiftoffRegister src, Label* trap) {
1603 switch (opcode) {
1604 case kExprI32ConvertI64:
1605 MacroAssembler::Ext(dst.gp(), src.gp(), 0, 32);
1606 return true;
1607 case kExprI32SConvertF32: {
1609
1610 // Real conversion.
1611 MacroAssembler::Trunc_s_s(rounded, src.fp());
1612 trunc_w_s(kScratchDoubleReg2, rounded);
1613 mfc1(dst.gp(), kScratchDoubleReg2);
1614 // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
1615 // because INT32_MIN allows easier out-of-bounds detection.
1616 MacroAssembler::Addu(kScratchReg, dst.gp(), 1);
1617 MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
1619
1620 // Checking if trap.
1621 mtc1(dst.gp(), kScratchDoubleReg2);
1625 return true;
1626 }
1627 case kExprI32UConvertF32: {
1629
1630 // Real conversion.
1631 MacroAssembler::Trunc_s_s(rounded, src.fp());
1633 // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
1634 // because 0 allows easier out-of-bounds detection.
1635 MacroAssembler::Addu(kScratchReg, dst.gp(), 1);
1636 MacroAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
1637
1638 // Checking if trap.
1643 return true;
1644 }
1645 case kExprI32SConvertF64: {
1647
1648 // Real conversion.
1649 MacroAssembler::Trunc_d_d(rounded, src.fp());
1650 trunc_w_d(kScratchDoubleReg2, rounded);
1651 mfc1(dst.gp(), kScratchDoubleReg2);
1652
1653 // Checking if trap.
1657 return true;
1658 }
1659 case kExprI32UConvertF64: {
1661
1662 // Real conversion.
1663 MacroAssembler::Trunc_d_d(rounded, src.fp());
1665
1666 // Checking if trap.
1670 return true;
1671 }
1672 case kExprI32ReinterpretF32:
1673 MacroAssembler::FmoveLow(dst.gp(), src.fp());
1674 return true;
1675 case kExprI64SConvertI32:
1676 sll(dst.gp(), src.gp(), 0);
1677 return true;
1678 case kExprI64UConvertI32:
1679 MacroAssembler::Dext(dst.gp(), src.gp(), 0, 32);
1680 return true;
1681 case kExprI64SConvertF32: {
1683
1684 // Real conversion.
1685 MacroAssembler::Trunc_s_s(rounded, src.fp());
1686 trunc_l_s(kScratchDoubleReg2, rounded);
1687 dmfc1(dst.gp(), kScratchDoubleReg2);
1688 // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
1689 // because INT64_MIN allows easier out-of-bounds detection.
1690 MacroAssembler::Daddu(kScratchReg, dst.gp(), 1);
1691 MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
1693
1694 // Checking if trap.
1695 dmtc1(dst.gp(), kScratchDoubleReg2);
1699 return true;
1700 }
1701 case kExprI64UConvertF32: {
1702 // Real conversion.
1704 kScratchReg);
1705
1706 // Checking if trap.
1707 MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
1708 return true;
1709 }
1710 case kExprI64SConvertF64: {
1712
1713 // Real conversion.
1714 MacroAssembler::Trunc_d_d(rounded, src.fp());
1715 trunc_l_d(kScratchDoubleReg2, rounded);
1716 dmfc1(dst.gp(), kScratchDoubleReg2);
1717 // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
1718 // because INT64_MIN allows easier out-of-bounds detection.
1719 MacroAssembler::Daddu(kScratchReg, dst.gp(), 1);
1720 MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
1722
1723 // Checking if trap.
1724 dmtc1(dst.gp(), kScratchDoubleReg2);
1728 return true;
1729 }
1730 case kExprI64UConvertF64: {
1731 // Real conversion.
1733 kScratchReg);
1734
1735 // Checking if trap.
1736 MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
1737 return true;
1738 }
1739 case kExprI64ReinterpretF64:
1740 dmfc1(dst.gp(), src.fp());
1741 return true;
1742 case kExprF32SConvertI32: {
1743 LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
1744 mtc1(src.gp(), scratch.fp());
1745 cvt_s_w(dst.fp(), scratch.fp());
1746 return true;
1747 }
1748 case kExprF32UConvertI32:
1749 MacroAssembler::Cvt_s_uw(dst.fp(), src.gp());
1750 return true;
1751 case kExprF32ConvertF64:
1752 cvt_s_d(dst.fp(), src.fp());
1753 return true;
1754 case kExprF32ReinterpretI32:
1755 MacroAssembler::FmoveLow(dst.fp(), src.gp());
1756 return true;
1757 case kExprF64SConvertI32: {
1758 LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst});
1759 mtc1(src.gp(), scratch.fp());
1760 cvt_d_w(dst.fp(), scratch.fp());
1761 return true;
1762 }
1763 case kExprF64UConvertI32:
1764 MacroAssembler::Cvt_d_uw(dst.fp(), src.gp());
1765 return true;
1766 case kExprF64ConvertF32:
1767 cvt_d_s(dst.fp(), src.fp());
1768 return true;
1769 case kExprF64ReinterpretI64:
1770 dmtc1(src.gp(), dst.fp());
1771 return true;
1772 case kExprI32SConvertSatF32: {
1773 // Other arches use round to zero here, so we follow.
1774 if (CpuFeatures::IsSupported(MIPS_SIMD)) {
1775 trunc_w_s(kScratchDoubleReg, src.fp());
1776 mfc1(dst.gp(), kScratchDoubleReg);
1777 } else {
1778 Label done;
1779 mov(dst.gp(), zero_reg);
1780 CompareIsNanF32(src.fp(), src.fp());
1781 BranchTrueShortF(&done);
1782 li(dst.gp(), static_cast<int32_t>(std::numeric_limits<int32_t>::min()));
1785 static_cast<float>(std::numeric_limits<int32_t>::min()));
1786 CompareF32(OLT, src.fp(), kScratchDoubleReg);
1787 BranchTrueShortF(&done);
1788 trunc_w_s(kScratchDoubleReg, src.fp());
1789 mfc1(dst.gp(), kScratchDoubleReg);
1790 bind(&done);
1791 }
1792 return true;
1793 }
1794 case kExprI32UConvertSatF32: {
1795 Label isnan_or_lessthan_or_equal_zero;
1796 mov(dst.gp(), zero_reg);
1797 MacroAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
1798 CompareF32(ULE, src.fp(), kScratchDoubleReg);
1799 BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
1800 Trunc_uw_s(dst.gp(), src.fp(), kScratchDoubleReg);
1801 bind(&isnan_or_lessthan_or_equal_zero);
1802 return true;
1803 }
1804 case kExprI32SConvertSatF64: {
1805 if (CpuFeatures::IsSupported(MIPS_SIMD)) {
1806 trunc_w_d(kScratchDoubleReg, src.fp());
1807 mfc1(dst.gp(), kScratchDoubleReg);
1808 } else {
1809 Label done;
1810 mov(dst.gp(), zero_reg);
1811 CompareIsNanF64(src.fp(), src.fp());
1812 BranchTrueShortF(&done);
1813 li(dst.gp(), static_cast<int32_t>(std::numeric_limits<int32_t>::min()));
1816 static_cast<double>(std::numeric_limits<int32_t>::min()));
1817 CompareF64(OLT, src.fp(), kScratchDoubleReg);
1818 BranchTrueShortF(&done);
1819 trunc_w_d(kScratchDoubleReg, src.fp());
1820 mfc1(dst.gp(), kScratchDoubleReg);
1821 bind(&done);
1822 }
1823 return true;
1824 }
1825 case kExprI32UConvertSatF64: {
1826 Label isnan_or_lessthan_or_equal_zero;
1827 mov(dst.gp(), zero_reg);
1828 MacroAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
1829 CompareF64(ULE, src.fp(), kScratchDoubleReg);
1830 BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
1831 Trunc_uw_d(dst.gp(), src.fp(), kScratchDoubleReg);
1832 bind(&isnan_or_lessthan_or_equal_zero);
1833 return true;
1834 }
1835 case kExprI64SConvertSatF32: {
1836 if (CpuFeatures::IsSupported(MIPS_SIMD)) {
1837 trunc_l_s(kScratchDoubleReg, src.fp());
1838 dmfc1(dst.gp(), kScratchDoubleReg);
1839 } else {
1840 Label done;
1841 mov(dst.gp(), zero_reg);
1842 CompareIsNanF32(src.fp(), src.fp());
1843 BranchTrueShortF(&done);
1844 li(dst.gp(), static_cast<int64_t>(std::numeric_limits<int64_t>::min()));
1847 static_cast<float>(std::numeric_limits<int64_t>::min()));
1848 CompareF32(OLT, src.fp(), kScratchDoubleReg);
1849 BranchTrueShortF(&done);
1850 trunc_l_s(kScratchDoubleReg, src.fp());
1851 dmfc1(dst.gp(), kScratchDoubleReg);
1852 bind(&done);
1853 }
1854 return true;
1855 }
1856 case kExprI64UConvertSatF32: {
1857 Label isnan_or_lessthan_or_equal_zero;
1858 mov(dst.gp(), zero_reg);
1859 MacroAssembler::Move(kScratchDoubleReg, static_cast<float>(0.0));
1860 CompareF32(ULE, src.fp(), kScratchDoubleReg);
1861 BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
1862 Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, no_reg);
1863 bind(&isnan_or_lessthan_or_equal_zero);
1864 return true;
1865 }
1866 case kExprI64SConvertSatF64: {
1867 if (CpuFeatures::IsSupported(MIPS_SIMD)) {
1868 trunc_l_d(kScratchDoubleReg, src.fp());
1869 dmfc1(dst.gp(), kScratchDoubleReg);
1870 } else {
1871 Label done;
1872 mov(dst.gp(), zero_reg);
1873 CompareIsNanF64(src.fp(), src.fp());
1874 BranchTrueShortF(&done);
1875 li(dst.gp(), static_cast<int64_t>(std::numeric_limits<int64_t>::min()));
1878 static_cast<double>(std::numeric_limits<int64_t>::min()));
1879 CompareF64(OLT, src.fp(), kScratchDoubleReg);
1880 BranchTrueShortF(&done);
1881 trunc_l_d(kScratchDoubleReg, src.fp());
1882 dmfc1(dst.gp(), kScratchDoubleReg);
1883 bind(&done);
1884 }
1885 return true;
1886 }
1887 case kExprI64UConvertSatF64: {
1888 Label isnan_or_lessthan_or_equal_zero;
1889 mov(dst.gp(), zero_reg);
1890 MacroAssembler::Move(kScratchDoubleReg, static_cast<double>(0.0));
1891 CompareF64(ULE, src.fp(), kScratchDoubleReg);
1892 BranchTrueShortF(&isnan_or_lessthan_or_equal_zero);
1893 Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, no_reg);
1894 bind(&isnan_or_lessthan_or_equal_zero);
1895 return true;
1896 }
1897 default:
1898 return false;
1899 }
1900}
1901
1902void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
1903 seb(dst, src);
1904}
1905
1906void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
1907 seh(dst, src);
1908}
1909
1910void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
1911 LiftoffRegister src) {
1912 seb(dst.gp(), src.gp());
1913}
1914
1915void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
1916 LiftoffRegister src) {
1917 seh(dst.gp(), src.gp());
1918}
1919
1920void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
1921 LiftoffRegister src) {
1922 sll(dst.gp(), src.gp(), 0);
1923}
1924
1927}
1928
1929void LiftoffAssembler::emit_jump(Register target) {
1930 MacroAssembler::Jump(target);
1931}
1932
1934 ValueKind kind, Register lhs,
1935 Register rhs,
1936 const FreezeCacheState& frozen) {
1937 if (rhs == no_reg) {
1938 DCHECK(kind == kI32 || kind == kI64);
1939 MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg));
1940 } else {
1941 DCHECK((kind == kI32 || kind == kI64) ||
1942 (is_reference(kind) && (cond == kEqual || cond == kNotEqual)));
1943 MacroAssembler::Branch(label, cond, lhs, Operand(rhs));
1944 }
1945}
1946
1948 Register lhs, int32_t imm,
1949 const FreezeCacheState& frozen) {
1950 MacroAssembler::Branch(label, cond, lhs, Operand(imm));
1951}
1952
1954 Register lhs, int32_t imm,
1955 const FreezeCacheState& frozen) {
1956 MacroAssembler::Branch(label, cond, lhs, Operand(imm));
1957}
1958
1959void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
1960 sltiu(dst, src, 1);
1961}
1962
1963void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
1964 Register lhs, Register rhs) {
1965 CompareWord(cond, dst, lhs, Operand(rhs));
1966}
1967
1968void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
1969 sltiu(dst, src.gp(), 1);
1970}
1971
1972void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
1973 LiftoffRegister lhs,
1974 LiftoffRegister rhs) {
1975 CompareWord(cond, dst, lhs.gp(), Operand(rhs.gp()));
1976}
1977
1978namespace liftoff {
1979
1981 bool* predicate) {
1982 switch (condition) {
1983 case kEqual:
1984 *predicate = true;
1985 return EQ;
1986 case kNotEqual:
1987 *predicate = false;
1988 return EQ;
1989 case kUnsignedLessThan:
1990 *predicate = true;
1991 return OLT;
1993 *predicate = false;
1994 return OLT;
1996 *predicate = true;
1997 return OLE;
1999 *predicate = false;
2000 return OLE;
2001 default:
2002 *predicate = true;
2003 break;
2004 }
2005 UNREACHABLE();
2006}
2007
2008inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
2009 LiftoffRegister src) {
2010 Label all_false;
2011 assm->BranchMSA(&all_false, MSA_BRANCH_V, all_zero, src.fp().toW(),
2013 assm->li(dst.gp(), 0l);
2014 assm->li(dst.gp(), 1);
2015 assm->bind(&all_false);
2016}
2017
2019 LiftoffRegister src, MSABranchDF msa_branch_df) {
2020 Label all_true;
2021 assm->BranchMSA(&all_true, msa_branch_df, all_not_zero, src.fp().toW(),
2023 assm->li(dst.gp(), 1);
2024 assm->li(dst.gp(), 0l);
2025 assm->bind(&all_true);
2026}
2027
2028inline void StoreToMemory(LiftoffAssembler* assm, MemOperand dst,
2029 const LiftoffAssembler::VarState& src) {
2030 if (src.is_reg()) {
2031 Store(assm, dst, src.reg(), src.kind());
2032 return;
2033 }
2034
2035 UseScratchRegisterScope temps(assm);
2036 Register temp = temps.Acquire();
2037 if (src.is_const()) {
2038 if (src.i32_const() == 0) {
2039 temp = zero_reg;
2040 } else {
2041 assm->li(temp, src.i32_const());
2042 }
2043 } else {
2044 DCHECK(src.is_stack());
2045 if (value_kind_size(src.kind()) == 4) {
2046 assm->Lw(temp, liftoff::GetStackSlot(src.offset()));
2047 } else {
2048 assm->Ld(temp, liftoff::GetStackSlot(src.offset()));
2049 }
2050 }
2051
2052 if (value_kind_size(src.kind()) == 4) {
2053 assm->Sw(temp, dst);
2054 } else {
2055 DCHECK_EQ(8, value_kind_size(src.kind()));
2056 assm->Sd(temp, dst);
2057 }
2058}
2059
2060} // namespace liftoff
2061
2062void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
2063 DoubleRegister lhs,
2064 DoubleRegister rhs) {
2065 Label not_nan, cont;
2068 // If one of the operands is NaN, return 1 for f32.ne, else 0.
2069 if (cond == ne) {
2070 MacroAssembler::li(dst, 1);
2071 } else {
2072 MacroAssembler::Move(dst, zero_reg);
2073 }
2075
2076 bind(&not_nan);
2077
2078 MacroAssembler::li(dst, 1);
2079 bool predicate;
2080 FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
2081 MacroAssembler::CompareF32(fcond, lhs, rhs);
2082 if (predicate) {
2084 } else {
2086 }
2087
2088 bind(&cont);
2089}
2090
2091void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
2092 DoubleRegister lhs,
2093 DoubleRegister rhs) {
2094 Label not_nan, cont;
2097 // If one of the operands is NaN, return 1 for f64.ne, else 0.
2098 if (cond == ne) {
2099 MacroAssembler::li(dst, 1);
2100 } else {
2101 MacroAssembler::Move(dst, zero_reg);
2102 }
2104
2105 bind(&not_nan);
2106
2107 MacroAssembler::li(dst, 1);
2108 bool predicate;
2109 FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
2110 MacroAssembler::CompareF64(fcond, lhs, rhs);
2111 if (predicate) {
2113 } else {
2115 }
2116
2117 bind(&cont);
2118}
2119
2120bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
2121 LiftoffRegister true_value,
2122 LiftoffRegister false_value,
2123 ValueKind kind) {
2124 return false;
2125}
2126
2127void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
2128 SmiCheckMode mode,
2129 const FreezeCacheState& frozen) {
2130 UseScratchRegisterScope temps(this);
2131 Register scratch = temps.Acquire();
2132 And(scratch, obj, Operand(kSmiTagMask));
2133 Condition condition = mode == kJumpOnSmi ? eq : ne;
2134 Branch(target, condition, scratch, Operand(zero_reg));
2135}
2136
2137void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
2138 Register offset_reg, uintptr_t offset_imm,
2139 LoadType type,
2140 LoadTransformationKind transform,
2141 uint32_t* protected_load_pc,
2142 bool i64_offset) {
2143 UseScratchRegisterScope temps(this);
2144 Register scratch = temps.Acquire();
2145 MemOperand src_op =
2146 liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, i64_offset);
2147 MSARegister dst_msa = dst.fp().toW();
2148 *protected_load_pc = pc_offset();
2149 MachineType memtype = type.mem_type();
2150
2151 if (transform == LoadTransformationKind::kExtend) {
2152 Ld(scratch, src_op);
2153 if (memtype == MachineType::Int8()) {
2154 fill_d(dst_msa, scratch);
2155 clti_s_b(kSimd128ScratchReg, dst_msa, 0);
2156 ilvr_b(dst_msa, kSimd128ScratchReg, dst_msa);
2157 } else if (memtype == MachineType::Uint8()) {
2159 fill_d(dst_msa, scratch);
2160 ilvr_b(dst_msa, kSimd128RegZero, dst_msa);
2161 } else if (memtype == MachineType::Int16()) {
2162 fill_d(dst_msa, scratch);
2163 clti_s_h(kSimd128ScratchReg, dst_msa, 0);
2164 ilvr_h(dst_msa, kSimd128ScratchReg, dst_msa);
2165 } else if (memtype == MachineType::Uint16()) {
2167 fill_d(dst_msa, scratch);
2168 ilvr_h(dst_msa, kSimd128RegZero, dst_msa);
2169 } else if (memtype == MachineType::Int32()) {
2170 fill_d(dst_msa, scratch);
2171 clti_s_w(kSimd128ScratchReg, dst_msa, 0);
2172 ilvr_w(dst_msa, kSimd128ScratchReg, dst_msa);
2173 } else if (memtype == MachineType::Uint32()) {
2175 fill_d(dst_msa, scratch);
2176 ilvr_w(dst_msa, kSimd128RegZero, dst_msa);
2177 }
2178 } else if (transform == LoadTransformationKind::kZeroExtend) {
2179 xor_v(dst_msa, dst_msa, dst_msa);
2180 if (memtype == MachineType::Int32()) {
2181 Lwu(scratch, src_op);
2182 insert_w(dst_msa, 0, scratch);
2183 } else {
2184 DCHECK_EQ(MachineType::Int64(), memtype);
2185 Ld(scratch, src_op);
2186 insert_d(dst_msa, 0, scratch);
2187 }
2188 } else {
2190 if (memtype == MachineType::Int8()) {
2191 Lb(scratch, src_op);
2192 fill_b(dst_msa, scratch);
2193 } else if (memtype == MachineType::Int16()) {
2194 Lh(scratch, src_op);
2195 fill_h(dst_msa, scratch);
2196 } else if (memtype == MachineType::Int32()) {
2197 Lw(scratch, src_op);
2198 fill_w(dst_msa, scratch);
2199 } else if (memtype == MachineType::Int64()) {
2200 Ld(scratch, src_op);
2201 fill_d(dst_msa, scratch);
2202 }
2203 }
2204}
2205
2206void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
2207 Register addr, Register offset_reg,
2208 uintptr_t offset_imm, LoadType type,
2209 uint8_t laneidx, uint32_t* protected_load_pc,
2210 bool i64_offset) {
2211 MemOperand src_op =
2212 liftoff::GetMemOp(this, addr, offset_reg, offset_imm, i64_offset);
2213 *protected_load_pc = pc_offset();
2214 LoadStoreLaneParams load_params(type.mem_type().representation(), laneidx);
2215 MacroAssembler::LoadLane(load_params.sz, dst.fp().toW(), laneidx, src_op);
2216}
2217
2218void LiftoffAssembler::StoreLane(Register dst, Register offset,
2219 uintptr_t offset_imm, LiftoffRegister src,
2220 StoreType type, uint8_t lane,
2221 uint32_t* protected_store_pc,
2222 bool i64_offset) {
2223 MemOperand dst_op =
2224 liftoff::GetMemOp(this, dst, offset, offset_imm, i64_offset);
2225 if (protected_store_pc) *protected_store_pc = pc_offset();
2226 LoadStoreLaneParams store_params(type.mem_rep(), lane);
2227 MacroAssembler::StoreLane(store_params.sz, src.fp().toW(), lane, dst_op);
2228}
2229
2230void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
2231 LiftoffRegister lhs,
2232 LiftoffRegister rhs,
2233 const uint8_t shuffle[16],
2234 bool is_swizzle) {
2235 MSARegister dst_msa = dst.fp().toW();
2236 MSARegister lhs_msa = lhs.fp().toW();
2237 MSARegister rhs_msa = rhs.fp().toW();
2238
2239 uint64_t control_hi = 0;
2240 uint64_t control_low = 0;
2241 for (int i = 7; i >= 0; i--) {
2242 control_hi <<= 8;
2243 control_hi |= shuffle[i + 8];
2244 control_low <<= 8;
2245 control_low |= shuffle[i];
2246 }
2247
2248 if (dst_msa == lhs_msa) {
2249 move_v(kSimd128ScratchReg, lhs_msa);
2250 lhs_msa = kSimd128ScratchReg;
2251 } else if (dst_msa == rhs_msa) {
2252 move_v(kSimd128ScratchReg, rhs_msa);
2253 rhs_msa = kSimd128ScratchReg;
2254 }
2255
2256 li(kScratchReg, control_low);
2257 insert_d(dst_msa, 0, kScratchReg);
2258 li(kScratchReg, control_hi);
2259 insert_d(dst_msa, 1, kScratchReg);
2260 vshf_b(dst_msa, rhs_msa, lhs_msa);
2261}
2262
2263void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
2264 LiftoffRegister lhs,
2265 LiftoffRegister rhs) {
2266 MSARegister dst_msa = dst.fp().toW();
2267 MSARegister lhs_msa = lhs.fp().toW();
2268 MSARegister rhs_msa = rhs.fp().toW();
2269
2270 if (dst == lhs) {
2271 move_v(kSimd128ScratchReg, lhs_msa);
2272 lhs_msa = kSimd128ScratchReg;
2273 }
2275 move_v(dst_msa, rhs_msa);
2276 vshf_b(dst_msa, kSimd128RegZero, lhs_msa);
2277}
2278
2279void LiftoffAssembler::emit_i8x16_relaxed_swizzle(LiftoffRegister dst,
2280 LiftoffRegister lhs,
2281 LiftoffRegister rhs) {
2282 bailout(kRelaxedSimd, "emit_i8x16_relaxed_swizzle");
2283}
2284
2286 LiftoffRegister src) {
2287 bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f32x4_s");
2288}
2289
2291 LiftoffRegister src) {
2292 bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f32x4_u");
2293}
2294
2296 LiftoffRegister dst, LiftoffRegister src) {
2297 bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f64x2_s_zero");
2298}
2299
2301 LiftoffRegister dst, LiftoffRegister src) {
2302 bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f64x2_u_zero");
2303}
2304
2305void LiftoffAssembler::emit_s128_relaxed_laneselect(LiftoffRegister dst,
2306 LiftoffRegister src1,
2307 LiftoffRegister src2,
2308 LiftoffRegister mask,
2309 int lane_width) {
2310 bailout(kRelaxedSimd, "emit_s128_relaxed_laneselect");
2311}
2312
2313void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
2314 LiftoffRegister src) {
2315 fill_b(dst.fp().toW(), src.gp());
2316}
2317
2318void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
2319 LiftoffRegister src) {
2320 fill_h(dst.fp().toW(), src.gp());
2321}
2322
2323void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
2324 LiftoffRegister src) {
2325 fill_w(dst.fp().toW(), src.gp());
2326}
2327
2328void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
2329 LiftoffRegister src) {
2330 fill_d(dst.fp().toW(), src.gp());
2331}
2332
2333void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
2334 LiftoffRegister src) {
2336 fill_w(dst.fp().toW(), kScratchReg);
2337}
2338
2339void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
2340 LiftoffRegister src) {
2342 fill_d(dst.fp().toW(), kScratchReg);
2343}
2344
2345#define SIMD_BINOP(name1, name2, type) \
2346 void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \
2347 LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
2348 MacroAssembler::ExtMulLow(type, dst.fp().toW(), src1.fp().toW(), \
2349 src2.fp().toW()); \
2350 } \
2351 void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \
2352 LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
2353 MacroAssembler::ExtMulHigh(type, dst.fp().toW(), src1.fp().toW(), \
2354 src2.fp().toW()); \
2355 }
2356
2357SIMD_BINOP(i16x8, i8x16_s, MSAS8)
2358SIMD_BINOP(i16x8, i8x16_u, MSAU8)
2359
2360SIMD_BINOP(i32x4, i16x8_s, MSAS16)
2361SIMD_BINOP(i32x4, i16x8_u, MSAU16)
2362
2363SIMD_BINOP(i64x2, i32x4_s, MSAS32)
2364SIMD_BINOP(i64x2, i32x4_u, MSAU32)
2365
2366#undef SIMD_BINOP
2367
2368#define SIMD_BINOP(name1, name2, type) \
2369 void LiftoffAssembler::emit_##name1##_extadd_pairwise_##name2( \
2370 LiftoffRegister dst, LiftoffRegister src) { \
2371 MacroAssembler::ExtAddPairwise(type, dst.fp().toW(), src.fp().toW()); \
2372 }
2373
2374SIMD_BINOP(i16x8, i8x16_s, MSAS8)
2375SIMD_BINOP(i16x8, i8x16_u, MSAU8)
2376SIMD_BINOP(i32x4, i16x8_s, MSAS16)
2377SIMD_BINOP(i32x4, i16x8_u, MSAU16)
2378#undef SIMD_BINOP
2379
2380void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
2381 LiftoffRegister src1,
2382 LiftoffRegister src2) {
2383 mulr_q_h(dst.fp().toW(), src1.fp().toW(), src2.fp().toW());
2384}
2385
2386void LiftoffAssembler::emit_i16x8_relaxed_q15mulr_s(LiftoffRegister dst,
2387 LiftoffRegister src1,
2388 LiftoffRegister src2) {
2389 bailout(kRelaxedSimd, "emit_i16x8_relaxed_q15mulr_s");
2390}
2391
2392void LiftoffAssembler::emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst,
2393 LiftoffRegister lhs,
2394 LiftoffRegister rhs) {
2395 bailout(kSimd, "emit_i16x8_dot_i8x16_i7x16_s");
2396}
2397
2399 LiftoffRegister lhs,
2400 LiftoffRegister rhs,
2401 LiftoffRegister acc) {
2402 bailout(kSimd, "emit_i32x4_dot_i8x16_i7x16_add_s");
2403}
2404
2405void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
2406 LiftoffRegister rhs) {
2407 ceq_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2408}
2409
2410void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
2411 LiftoffRegister rhs) {
2412 ceq_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2413 nor_v(dst.fp().toW(), dst.fp().toW(), dst.fp().toW());
2414}
2415
2416void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
2417 LiftoffRegister rhs) {
2418 clt_s_b(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
2419}
2420
2421void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
2422 LiftoffRegister rhs) {
2423 clt_u_b(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
2424}
2425
2426void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
2427 LiftoffRegister rhs) {
2428 cle_s_b(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
2429}
2430
2431void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
2432 LiftoffRegister rhs) {
2433 cle_u_b(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
2434}
2435
2436void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
2437 LiftoffRegister rhs) {
2438 ceq_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2439}
2440
2441void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
2442 LiftoffRegister rhs) {
2443 ceq_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2444 nor_v(dst.fp().toW(), dst.fp().toW(), dst.fp().toW());
2445}
2446
2447void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
2448 LiftoffRegister rhs) {
2449 clt_s_h(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
2450}
2451
2452void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
2453 LiftoffRegister rhs) {
2454 clt_u_h(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
2455}
2456
2457void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
2458 LiftoffRegister rhs) {
2459 cle_s_h(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
2460}
2461
2462void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
2463 LiftoffRegister rhs) {
2464 cle_u_h(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
2465}
2466
2467void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
2468 LiftoffRegister rhs) {
2469 ceq_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2470}
2471
2472void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
2473 LiftoffRegister rhs) {
2474 ceq_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2475 nor_v(dst.fp().toW(), dst.fp().toW(), dst.fp().toW());
2476}
2477
2478void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
2479 LiftoffRegister rhs) {
2480 clt_s_w(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
2481}
2482
2483void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
2484 LiftoffRegister rhs) {
2485 clt_u_w(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
2486}
2487
2488void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
2489 LiftoffRegister rhs) {
2490 cle_s_w(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
2491}
2492
2493void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
2494 LiftoffRegister rhs) {
2495 cle_u_w(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
2496}
2497
2498void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
2499 LiftoffRegister rhs) {
2500 fceq_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2501}
2502
2503void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
2504 LiftoffRegister rhs) {
2505 fcune_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2506}
2507
2508void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
2509 LiftoffRegister rhs) {
2510 fclt_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2511}
2512
2513void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
2514 LiftoffRegister rhs) {
2515 fcle_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2516}
2517
2518void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
2519 LiftoffRegister rhs) {
2520 ceq_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2521}
2522
2523void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
2524 LiftoffRegister rhs) {
2525 ceq_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2526 nor_v(dst.fp().toW(), dst.fp().toW(), dst.fp().toW());
2527}
2528
2529void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
2530 LiftoffRegister src) {
2532 add_a_d(dst.fp().toW(), src.fp().toW(), kSimd128RegZero);
2533}
2534
2535void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
2536 LiftoffRegister rhs) {
2537 fceq_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2538}
2539
2540void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
2541 LiftoffRegister rhs) {
2542 fcune_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2543}
2544
2545void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
2546 LiftoffRegister rhs) {
2547 fclt_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2548}
2549
2550void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
2551 LiftoffRegister rhs) {
2552 fcle_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2553}
2554
2555void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
2556 const uint8_t imms[16]) {
2557 MSARegister dst_msa = dst.fp().toW();
2558 uint64_t vals[2];
2559 memcpy(vals, imms, sizeof(vals));
2560 li(kScratchReg, vals[0]);
2561 insert_d(dst_msa, 0, kScratchReg);
2562 li(kScratchReg, vals[1]);
2563 insert_d(dst_msa, 1, kScratchReg);
2564}
2565
2566void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
2567 nor_v(dst.fp().toW(), src.fp().toW(), src.fp().toW());
2568}
2569
2570void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
2571 LiftoffRegister rhs) {
2572 and_v(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2573}
2574
2575void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
2576 LiftoffRegister rhs) {
2577 or_v(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2578}
2579
2580void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
2581 LiftoffRegister rhs) {
2582 xor_v(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2583}
2584
2585void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
2586 LiftoffRegister lhs,
2587 LiftoffRegister rhs) {
2588 nor_v(kSimd128ScratchReg, rhs.fp().toW(), rhs.fp().toW());
2589 and_v(dst.fp().toW(), kSimd128ScratchReg, lhs.fp().toW());
2590}
2591
2592void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
2593 LiftoffRegister src1,
2594 LiftoffRegister src2,
2595 LiftoffRegister mask) {
2596 if (dst == mask) {
2597 bsel_v(dst.fp().toW(), src2.fp().toW(), src1.fp().toW());
2598 } else {
2599 xor_v(kSimd128ScratchReg, src1.fp().toW(), src2.fp().toW());
2601 xor_v(dst.fp().toW(), kSimd128ScratchReg, src2.fp().toW());
2602 }
2603}
2604
2605void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
2606 LiftoffRegister src) {
2608 subv_b(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
2609}
2610
2611void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
2612 LiftoffRegister src) {
2613 liftoff::EmitAnyTrue(this, dst, src);
2614}
2615
2616void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
2617 LiftoffRegister src) {
2618 liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_B);
2619}
2620
2621void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
2622 LiftoffRegister src) {
2623 MSARegister scratch0 = kSimd128RegZero;
2624 MSARegister scratch1 = kSimd128ScratchReg;
2625 srli_b(scratch0, src.fp().toW(), 7);
2626 srli_h(scratch1, scratch0, 7);
2627 or_v(scratch0, scratch0, scratch1);
2628 srli_w(scratch1, scratch0, 14);
2629 or_v(scratch0, scratch0, scratch1);
2630 srli_d(scratch1, scratch0, 28);
2631 or_v(scratch0, scratch0, scratch1);
2632 shf_w(scratch1, scratch0, 0x0E);
2633 ilvev_b(scratch0, scratch1, scratch0);
2634 copy_u_h(dst.gp(), scratch0, 0);
2635}
2636
2637void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
2638 LiftoffRegister rhs) {
2639 fill_b(kSimd128ScratchReg, rhs.gp());
2640 sll_b(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
2641}
2642
2643void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
2644 int32_t rhs) {
2645 slli_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7);
2646}
2647
2648void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
2649 LiftoffRegister lhs,
2650 LiftoffRegister rhs) {
2651 fill_b(kSimd128ScratchReg, rhs.gp());
2652 sra_b(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
2653}
2654
2655void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
2656 LiftoffRegister lhs, int32_t rhs) {
2657 srai_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7);
2658}
2659
2660void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
2661 LiftoffRegister lhs,
2662 LiftoffRegister rhs) {
2663 fill_b(kSimd128ScratchReg, rhs.gp());
2664 srl_b(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
2665}
2666
2667void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
2668 LiftoffRegister lhs, int32_t rhs) {
2669 srli_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7);
2670}
2671
2672void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
2673 LiftoffRegister rhs) {
2674 addv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2675}
2676
2677void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
2678 LiftoffRegister lhs,
2679 LiftoffRegister rhs) {
2680 adds_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2681}
2682
2683void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
2684 LiftoffRegister lhs,
2685 LiftoffRegister rhs) {
2686 adds_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2687}
2688
2689void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
2690 LiftoffRegister rhs) {
2691 subv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2692}
2693
2694void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
2695 LiftoffRegister lhs,
2696 LiftoffRegister rhs) {
2697 subs_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2698}
2699
2700void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
2701 LiftoffRegister lhs,
2702 LiftoffRegister rhs) {
2703 subs_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2704}
2705
2706void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
2707 LiftoffRegister lhs,
2708 LiftoffRegister rhs) {
2709 min_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2710}
2711
2712void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
2713 LiftoffRegister lhs,
2714 LiftoffRegister rhs) {
2715 min_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2716}
2717
2718void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
2719 LiftoffRegister lhs,
2720 LiftoffRegister rhs) {
2721 max_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2722}
2723
2724void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
2725 LiftoffRegister lhs,
2726 LiftoffRegister rhs) {
2727 max_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2728}
2729
2730void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
2731 LiftoffRegister src) {
2732 pcnt_b(dst.fp().toW(), src.fp().toW());
2733}
2734
2735void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
2736 LiftoffRegister src) {
2738 subv_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
2739}
2740
2741void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
2742 LiftoffRegister src) {
2743 liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_H);
2744}
2745
2746void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
2747 LiftoffRegister src) {
2748 MSARegister scratch0 = kSimd128RegZero;
2749 MSARegister scratch1 = kSimd128ScratchReg;
2750 srli_h(scratch0, src.fp().toW(), 15);
2751 srli_w(scratch1, scratch0, 15);
2752 or_v(scratch0, scratch0, scratch1);
2753 srli_d(scratch1, scratch0, 30);
2754 or_v(scratch0, scratch0, scratch1);
2755 shf_w(scratch1, scratch0, 0x0E);
2756 slli_d(scratch1, scratch1, 4);
2757 or_v(scratch0, scratch0, scratch1);
2758 copy_u_b(dst.gp(), scratch0, 0);
2759}
2760
2761void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
2762 LiftoffRegister rhs) {
2763 fill_h(kSimd128ScratchReg, rhs.gp());
2764 sll_h(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
2765}
2766
2767void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
2768 int32_t rhs) {
2769 slli_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15);
2770}
2771
2772void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
2773 LiftoffRegister lhs,
2774 LiftoffRegister rhs) {
2775 fill_h(kSimd128ScratchReg, rhs.gp());
2776 sra_h(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
2777}
2778
2779void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
2780 LiftoffRegister lhs, int32_t rhs) {
2781 srai_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15);
2782}
2783
2784void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
2785 LiftoffRegister lhs,
2786 LiftoffRegister rhs) {
2787 fill_h(kSimd128ScratchReg, rhs.gp());
2788 srl_h(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
2789}
2790
2791void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
2792 LiftoffRegister lhs, int32_t rhs) {
2793 srli_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15);
2794}
2795
2796void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
2797 LiftoffRegister rhs) {
2798 addv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2799}
2800
2801void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
2802 LiftoffRegister lhs,
2803 LiftoffRegister rhs) {
2804 adds_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2805}
2806
2807void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
2808 LiftoffRegister lhs,
2809 LiftoffRegister rhs) {
2810 adds_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2811}
2812
2813void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
2814 LiftoffRegister rhs) {
2815 subv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2816}
2817
2818void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
2819 LiftoffRegister lhs,
2820 LiftoffRegister rhs) {
2821 subs_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2822}
2823
2824void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
2825 LiftoffRegister lhs,
2826 LiftoffRegister rhs) {
2827 subs_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2828}
2829
2830void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
2831 LiftoffRegister rhs) {
2832 mulv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2833}
2834
2835void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
2836 LiftoffRegister lhs,
2837 LiftoffRegister rhs) {
2838 min_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2839}
2840
2841void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
2842 LiftoffRegister lhs,
2843 LiftoffRegister rhs) {
2844 min_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2845}
2846
2847void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
2848 LiftoffRegister lhs,
2849 LiftoffRegister rhs) {
2850 max_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2851}
2852
2853void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
2854 LiftoffRegister lhs,
2855 LiftoffRegister rhs) {
2856 max_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2857}
2858
2859void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
2860 LiftoffRegister src) {
2862 subv_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
2863}
2864
2865void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
2866 LiftoffRegister src) {
2867 liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_W);
2868}
2869
2870void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
2871 LiftoffRegister src) {
2872 MSARegister scratch0 = kSimd128RegZero;
2873 MSARegister scratch1 = kSimd128ScratchReg;
2874 srli_w(scratch0, src.fp().toW(), 31);
2875 srli_d(scratch1, scratch0, 31);
2876 or_v(scratch0, scratch0, scratch1);
2877 shf_w(scratch1, scratch0, 0x0E);
2878 slli_d(scratch1, scratch1, 2);
2879 or_v(scratch0, scratch0, scratch1);
2880 copy_u_b(dst.gp(), scratch0, 0);
2881}
2882
2883void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
2884 LiftoffRegister rhs) {
2885 fill_w(kSimd128ScratchReg, rhs.gp());
2886 sll_w(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
2887}
2888
2889void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
2890 int32_t rhs) {
2891 slli_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31);
2892}
2893
2894void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
2895 LiftoffRegister lhs,
2896 LiftoffRegister rhs) {
2897 fill_w(kSimd128ScratchReg, rhs.gp());
2898 sra_w(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
2899}
2900
2901void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
2902 LiftoffRegister lhs, int32_t rhs) {
2903 srai_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31);
2904}
2905
2906void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
2907 LiftoffRegister lhs,
2908 LiftoffRegister rhs) {
2909 fill_w(kSimd128ScratchReg, rhs.gp());
2910 srl_w(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
2911}
2912
2913void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
2914 LiftoffRegister lhs, int32_t rhs) {
2915 srli_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31);
2916}
2917
2918void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
2919 LiftoffRegister rhs) {
2920 addv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2921}
2922
2923void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
2924 LiftoffRegister rhs) {
2925 subv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2926}
2927
2928void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
2929 LiftoffRegister rhs) {
2930 mulv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2931}
2932
2933void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
2934 LiftoffRegister lhs,
2935 LiftoffRegister rhs) {
2936 min_s_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2937}
2938
2939void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
2940 LiftoffRegister lhs,
2941 LiftoffRegister rhs) {
2942 min_u_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2943}
2944
2945void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
2946 LiftoffRegister lhs,
2947 LiftoffRegister rhs) {
2948 max_s_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2949}
2950
2951void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
2952 LiftoffRegister lhs,
2953 LiftoffRegister rhs) {
2954 max_u_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2955}
2956
2957void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
2958 LiftoffRegister lhs,
2959 LiftoffRegister rhs) {
2960 dotp_s_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
2961}
2962
2963void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
2964 LiftoffRegister src) {
2966 subv_d(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
2967}
2968
2969void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
2970 LiftoffRegister src) {
2971 liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_D);
2972}
2973
2974void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
2975 LiftoffRegister src) {
2976 srli_d(kSimd128RegZero, src.fp().toW(), 63);
2980 copy_u_b(dst.gp(), kSimd128RegZero, 0);
2981}
2982
2983void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
2984 LiftoffRegister rhs) {
2985 fill_d(kSimd128ScratchReg, rhs.gp());
2986 sll_d(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
2987}
2988
2989void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
2990 int32_t rhs) {
2991 slli_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63);
2992}
2993
2994void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
2995 LiftoffRegister lhs,
2996 LiftoffRegister rhs) {
2997 fill_d(kSimd128ScratchReg, rhs.gp());
2998 sra_d(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
2999}
3000
3001void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
3002 LiftoffRegister lhs, int32_t rhs) {
3003 srai_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63);
3004}
3005
3006void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
3007 LiftoffRegister lhs,
3008 LiftoffRegister rhs) {
3009 fill_d(kSimd128ScratchReg, rhs.gp());
3010 srl_d(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
3011}
3012
3013void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
3014 LiftoffRegister lhs, int32_t rhs) {
3015 srli_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63);
3016}
3017
3018void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
3019 LiftoffRegister rhs) {
3020 addv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
3021}
3022
3023void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
3024 LiftoffRegister rhs) {
3025 subv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
3026}
3027
3028void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
3029 LiftoffRegister rhs) {
3030 mulv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
3031}
3032
3033void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
3034 LiftoffRegister rhs) {
3035 clt_s_d(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
3036}
3037
3038void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
3039 LiftoffRegister rhs) {
3040 cle_s_d(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
3041}
3042
3043void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
3044 LiftoffRegister src) {
3045 bclri_w(dst.fp().toW(), src.fp().toW(), 31);
3046}
3047
3048void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
3049 LiftoffRegister src) {
3050 bnegi_w(dst.fp().toW(), src.fp().toW(), 31);
3051}
3052
3053void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
3054 LiftoffRegister src) {
3055 fsqrt_w(dst.fp().toW(), src.fp().toW());
3056}
3057
3058bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
3059 LiftoffRegister src) {
3060 MSARoundW(dst.fp().toW(), src.fp().toW(), kRoundToPlusInf);
3061 return true;
3062}
3063
3064bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
3065 LiftoffRegister src) {
3066 MSARoundW(dst.fp().toW(), src.fp().toW(), kRoundToMinusInf);
3067 return true;
3068}
3069
3070bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst,
3071 LiftoffRegister src) {
3072 MSARoundW(dst.fp().toW(), src.fp().toW(), kRoundToZero);
3073 return true;
3074}
3075
3076bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
3077 LiftoffRegister src) {
3078 MSARoundW(dst.fp().toW(), src.fp().toW(), kRoundToNearest);
3079 return true;
3080}
3081
3082void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
3083 LiftoffRegister rhs) {
3084 fadd_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
3085}
3086
3087void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
3088 LiftoffRegister rhs) {
3089 fsub_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
3090}
3091
3092void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
3093 LiftoffRegister rhs) {
3094 fmul_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
3095}
3096
3097void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
3098 LiftoffRegister rhs) {
3099 fdiv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
3100}
3101
3102void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
3103 LiftoffRegister rhs) {
3104 MSARegister dst_msa = dst.fp().toW();
3105 MSARegister lhs_msa = lhs.fp().toW();
3106 MSARegister rhs_msa = rhs.fp().toW();
3107 MSARegister scratch0 = kSimd128RegZero;
3108 MSARegister scratch1 = kSimd128ScratchReg;
3109 // If inputs are -0.0. and +0.0, then write -0.0 to scratch1.
3110 // scratch1 = (lhs == rhs) ? (lhs | rhs) : (rhs | rhs).
3111 fseq_w(scratch0, lhs_msa, rhs_msa);
3112 bsel_v(scratch0, rhs_msa, lhs_msa);
3113 or_v(scratch1, scratch0, rhs_msa);
3114 // scratch0 = isNaN(scratch1) ? scratch1: lhs.
3115 fseq_w(scratch0, scratch1, scratch1);
3116 bsel_v(scratch0, scratch1, lhs_msa);
3117 // dst = (scratch1 <= scratch0) ? scratch1 : scratch0.
3118 fsle_w(dst_msa, scratch1, scratch0);
3119 bsel_v(dst_msa, scratch0, scratch1);
3120 // Canonicalize the result.
3121 fmin_w(dst_msa, dst_msa, dst_msa);
3122}
3123
3124void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
3125 LiftoffRegister rhs) {
3126 MSARegister dst_msa = dst.fp().toW();
3127 MSARegister lhs_msa = lhs.fp().toW();
3128 MSARegister rhs_msa = rhs.fp().toW();
3129 MSARegister scratch0 = kSimd128RegZero;
3130 MSARegister scratch1 = kSimd128ScratchReg;
3131 // If inputs are -0.0. and +0.0, then write +0.0 to scratch1.
3132 // scratch1 = (lhs == rhs) ? (lhs | rhs) : (rhs | rhs).
3133 fseq_w(scratch0, lhs_msa, rhs_msa);
3134 bsel_v(scratch0, rhs_msa, lhs_msa);
3135 and_v(scratch1, scratch0, rhs_msa);
3136 // scratch0 = isNaN(scratch1) ? scratch1: lhs.
3137 fseq_w(scratch0, scratch1, scratch1);
3138 bsel_v(scratch0, scratch1, lhs_msa);
3139 // dst = (scratch0 <= scratch1) ? scratch1 : scratch0.
3140 fsle_w(dst_msa, scratch0, scratch1);
3141 bsel_v(dst_msa, scratch0, scratch1);
3142 // Canonicalize the result.
3143 fmax_w(dst_msa, dst_msa, dst_msa);
3144}
3145
3146void LiftoffAssembler::emit_f32x4_relaxed_min(LiftoffRegister dst,
3147 LiftoffRegister lhs,
3148 LiftoffRegister rhs) {
3149 bailout(kRelaxedSimd, "emit_f32x4_relaxed_min");
3150}
3151
3152void LiftoffAssembler::emit_f32x4_relaxed_max(LiftoffRegister dst,
3153 LiftoffRegister lhs,
3154 LiftoffRegister rhs) {
3155 bailout(kRelaxedSimd, "emit_f32x4_relaxed_max");
3156}
3157
3158void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
3159 LiftoffRegister rhs) {
3160 MSARegister dst_msa = dst.fp().toW();
3161 MSARegister lhs_msa = lhs.fp().toW();
3162 MSARegister rhs_msa = rhs.fp().toW();
3163 // dst = rhs < lhs ? rhs : lhs
3164 fclt_w(dst_msa, rhs_msa, lhs_msa);
3165 bsel_v(dst_msa, lhs_msa, rhs_msa);
3166}
3167
3168void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
3169 LiftoffRegister rhs) {
3170 MSARegister dst_msa = dst.fp().toW();
3171 MSARegister lhs_msa = lhs.fp().toW();
3172 MSARegister rhs_msa = rhs.fp().toW();
3173 // dst = lhs < rhs ? rhs : lhs
3174 fclt_w(dst_msa, lhs_msa, rhs_msa);
3175 bsel_v(dst_msa, lhs_msa, rhs_msa);
3176}
3177
3178void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
3179 LiftoffRegister src) {
3180 bclri_d(dst.fp().toW(), src.fp().toW(), 63);
3181}
3182
3183void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
3184 LiftoffRegister src) {
3185 bnegi_d(dst.fp().toW(), src.fp().toW(), 63);
3186}
3187
3188void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
3189 LiftoffRegister src) {
3190 fsqrt_d(dst.fp().toW(), src.fp().toW());
3191}
3192
3193bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
3194 LiftoffRegister src) {
3195 MSARoundD(dst.fp().toW(), src.fp().toW(), kRoundToPlusInf);
3196 return true;
3197}
3198
3199bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
3200 LiftoffRegister src) {
3201 MSARoundD(dst.fp().toW(), src.fp().toW(), kRoundToMinusInf);
3202 return true;
3203}
3204
3205bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst,
3206 LiftoffRegister src) {
3207 MSARoundD(dst.fp().toW(), src.fp().toW(), kRoundToZero);
3208 return true;
3209}
3210
3211bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
3212 LiftoffRegister src) {
3213 MSARoundD(dst.fp().toW(), src.fp().toW(), kRoundToNearest);
3214 return true;
3215}
3216
3217void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
3218 LiftoffRegister rhs) {
3219 fadd_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
3220}
3221
3222void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
3223 LiftoffRegister rhs) {
3224 fsub_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
3225}
3226
3227void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
3228 LiftoffRegister rhs) {
3229 fmul_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
3230}
3231
3232void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
3233 LiftoffRegister rhs) {
3234 fdiv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
3235}
3236
3237void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
3238 LiftoffRegister rhs) {
3239 MSARegister dst_msa = dst.fp().toW();
3240 MSARegister lhs_msa = lhs.fp().toW();
3241 MSARegister rhs_msa = rhs.fp().toW();
3242 MSARegister scratch0 = kSimd128RegZero;
3243 MSARegister scratch1 = kSimd128ScratchReg;
3244 // If inputs are -0.0. and +0.0, then write -0.0 to scratch1.
3245 // scratch1 = (lhs == rhs) ? (lhs | rhs) : (rhs | rhs).
3246 fseq_d(scratch0, lhs_msa, rhs_msa);
3247 bsel_v(scratch0, rhs_msa, lhs_msa);
3248 or_v(scratch1, scratch0, rhs_msa);
3249 // scratch0 = isNaN(scratch1) ? scratch1: lhs.
3250 fseq_d(scratch0, scratch1, scratch1);
3251 bsel_v(scratch0, scratch1, lhs_msa);
3252 // dst = (scratch1 <= scratch0) ? scratch1 : scratch0.
3253 fsle_d(dst_msa, scratch1, scratch0);
3254 bsel_v(dst_msa, scratch0, scratch1);
3255 // Canonicalize the result.
3256 fmin_d(dst_msa, dst_msa, dst_msa);
3257}
3258
3259void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
3260 LiftoffRegister rhs) {
3261 MSARegister dst_msa = dst.fp().toW();
3262 MSARegister lhs_msa = lhs.fp().toW();
3263 MSARegister rhs_msa = rhs.fp().toW();
3264 MSARegister scratch0 = kSimd128RegZero;
3265 MSARegister scratch1 = kSimd128ScratchReg;
3266 // If inputs are -0.0. and +0.0, then write +0.0 to scratch1.
3267 // scratch1 = (lhs == rhs) ? (lhs | rhs) : (rhs | rhs).
3268 fseq_d(scratch0, lhs_msa, rhs_msa);
3269 bsel_v(scratch0, rhs_msa, lhs_msa);
3270 and_v(scratch1, scratch0, rhs_msa);
3271 // scratch0 = isNaN(scratch1) ? scratch1: lhs.
3272 fseq_d(scratch0, scratch1, scratch1);
3273 bsel_v(scratch0, scratch1, lhs_msa);
3274 // dst = (scratch0 <= scratch1) ? scratch1 : scratch0.
3275 fsle_d(dst_msa, scratch0, scratch1);
3276 bsel_v(dst_msa, scratch0, scratch1);
3277 // Canonicalize the result.
3278 fmax_d(dst_msa, dst_msa, dst_msa);
3279}
3280
3281void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
3282 LiftoffRegister rhs) {
3283 MSARegister dst_msa = dst.fp().toW();
3284 MSARegister lhs_msa = lhs.fp().toW();
3285 MSARegister rhs_msa = rhs.fp().toW();
3286 // dst = rhs < lhs ? rhs : lhs
3287 fclt_d(dst_msa, rhs_msa, lhs_msa);
3288 bsel_v(dst_msa, lhs_msa, rhs_msa);
3289}
3290
3291void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
3292 LiftoffRegister rhs) {
3293 MSARegister dst_msa = dst.fp().toW();
3294 MSARegister lhs_msa = lhs.fp().toW();
3295 MSARegister rhs_msa = rhs.fp().toW();
3296 // dst = lhs < rhs ? rhs : lhs
3297 fclt_d(dst_msa, lhs_msa, rhs_msa);
3298 bsel_v(dst_msa, lhs_msa, rhs_msa);
3299}
3300
3301void LiftoffAssembler::emit_f64x2_relaxed_min(LiftoffRegister dst,
3302 LiftoffRegister lhs,
3303 LiftoffRegister rhs) {
3304 bailout(kRelaxedSimd, "emit_f64x2_relaxed_min");
3305}
3306
3307void LiftoffAssembler::emit_f64x2_relaxed_max(LiftoffRegister dst,
3308 LiftoffRegister lhs,
3309 LiftoffRegister rhs) {
3310 bailout(kRelaxedSimd, "emit_f64x2_relaxed_max");
3311}
3312
3314 LiftoffRegister src) {
3316 ilvr_w(kSimd128RegZero, kSimd128RegZero, src.fp().toW());
3319 ffint_s_d(dst.fp().toW(), kSimd128RegZero);
3320}
3321
3323 LiftoffRegister src) {
3325 ilvr_w(kSimd128RegZero, kSimd128RegZero, src.fp().toW());
3326 ffint_u_d(dst.fp().toW(), kSimd128RegZero);
3327}
3328
3329void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
3330 LiftoffRegister src) {
3331 fexupr_d(dst.fp().toW(), src.fp().toW());
3332}
3333
3334void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
3335 LiftoffRegister src) {
3336 ftrunc_s_w(dst.fp().toW(), src.fp().toW());
3337}
3338
3339void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
3340 LiftoffRegister src) {
3341 ftrunc_u_w(dst.fp().toW(), src.fp().toW());
3342}
3343
3345 LiftoffRegister src) {
3347 ftrunc_s_d(kSimd128ScratchReg, src.fp().toW());
3349 pckev_w(dst.fp().toW(), kSimd128RegZero, kSimd128ScratchReg);
3350}
3351
3353 LiftoffRegister src) {
3355 ftrunc_u_d(kSimd128ScratchReg, src.fp().toW());
3357 pckev_w(dst.fp().toW(), kSimd128RegZero, kSimd128ScratchReg);
3358}
3359
3360void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
3361 LiftoffRegister src) {
3362 ffint_s_w(dst.fp().toW(), src.fp().toW());
3363}
3364
3365void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
3366 LiftoffRegister src) {
3367 ffint_u_w(dst.fp().toW(), src.fp().toW());
3368}
3369
3370void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
3371 LiftoffRegister src) {
3373 fexdo_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
3374}
3375
3376void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
3377 LiftoffRegister lhs,
3378 LiftoffRegister rhs) {
3379 sat_s_h(kSimd128ScratchReg, lhs.fp().toW(), 7);
3380 sat_s_h(dst.fp().toW(), lhs.fp().toW(), 7);
3381 pckev_b(dst.fp().toW(), dst.fp().toW(), kSimd128ScratchReg);
3382}
3383
3384void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
3385 LiftoffRegister lhs,
3386 LiftoffRegister rhs) {
3388 max_s_h(kSimd128ScratchReg, kSimd128RegZero, lhs.fp().toW());
3390 max_s_h(dst.fp().toW(), kSimd128RegZero, rhs.fp().toW());
3391 sat_u_h(dst.fp().toW(), dst.fp().toW(), 7);
3392 pckev_b(dst.fp().toW(), dst.fp().toW(), kSimd128ScratchReg);
3393}
3394
3395void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
3396 LiftoffRegister lhs,
3397 LiftoffRegister rhs) {
3398 sat_s_w(kSimd128ScratchReg, lhs.fp().toW(), 15);
3399 sat_s_w(dst.fp().toW(), lhs.fp().toW(), 15);
3400 pckev_h(dst.fp().toW(), dst.fp().toW(), kSimd128ScratchReg);
3401}
3402
3403void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
3404 LiftoffRegister lhs,
3405 LiftoffRegister rhs) {
3407 max_s_w(kSimd128ScratchReg, kSimd128RegZero, lhs.fp().toW());
3409 max_s_w(dst.fp().toW(), kSimd128RegZero, rhs.fp().toW());
3410 sat_u_w(dst.fp().toW(), dst.fp().toW(), 15);
3411 pckev_h(dst.fp().toW(), dst.fp().toW(), kSimd128ScratchReg);
3412}
3413
3415 LiftoffRegister src) {
3416 ilvr_b(kSimd128ScratchReg, src.fp().toW(), src.fp().toW());
3417 slli_h(dst.fp().toW(), kSimd128ScratchReg, 8);
3418 srai_h(dst.fp().toW(), dst.fp().toW(), 8);
3419}
3420
3422 LiftoffRegister src) {
3423 ilvl_b(kSimd128ScratchReg, src.fp().toW(), src.fp().toW());
3424 slli_h(dst.fp().toW(), kSimd128ScratchReg, 8);
3425 srai_h(dst.fp().toW(), dst.fp().toW(), 8);
3426}
3427
3429 LiftoffRegister src) {
3431 ilvr_b(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
3432}
3433
3435 LiftoffRegister src) {
3437 ilvl_b(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
3438}
3439
3441 LiftoffRegister src) {
3442 ilvr_h(kSimd128ScratchReg, src.fp().toW(), src.fp().toW());
3443 slli_w(dst.fp().toW(), kSimd128ScratchReg, 16);
3444 srai_w(dst.fp().toW(), dst.fp().toW(), 16);
3445}
3446
3448 LiftoffRegister src) {
3449 ilvl_h(kSimd128ScratchReg, src.fp().toW(), src.fp().toW());
3450 slli_w(dst.fp().toW(), kSimd128ScratchReg, 16);
3451 srai_w(dst.fp().toW(), dst.fp().toW(), 16);
3452}
3453
3455 LiftoffRegister src) {
3457 ilvr_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
3458}
3459
3461 LiftoffRegister src) {
3463 ilvl_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
3464}
3465
3467 LiftoffRegister src) {
3468 ilvr_w(kSimd128ScratchReg, src.fp().toW(), src.fp().toW());
3469 slli_d(dst.fp().toW(), kSimd128ScratchReg, 32);
3470 srai_d(dst.fp().toW(), dst.fp().toW(), 32);
3471}
3472
3474 LiftoffRegister src) {
3475 ilvl_w(kSimd128ScratchReg, src.fp().toW(), src.fp().toW());
3476 slli_d(dst.fp().toW(), kSimd128ScratchReg, 32);
3477 srai_d(dst.fp().toW(), dst.fp().toW(), 32);
3478}
3479
3481 LiftoffRegister src) {
3483 ilvr_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
3484}
3485
3487 LiftoffRegister src) {
3489 ilvl_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
3490}
3491
3493 LiftoffRegister lhs,
3494 LiftoffRegister rhs) {
3495 aver_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
3496}
3497
3499 LiftoffRegister lhs,
3500 LiftoffRegister rhs) {
3501 aver_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
3502}
3503
3504void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
3505 LiftoffRegister src) {
3507 asub_s_b(dst.fp().toW(), src.fp().toW(), kSimd128RegZero);
3508}
3509
3510void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
3511 LiftoffRegister src) {
3513 asub_s_h(dst.fp().toW(), src.fp().toW(), kSimd128RegZero);
3514}
3515
3516void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
3517 LiftoffRegister src) {
3519 asub_s_w(dst.fp().toW(), src.fp().toW(), kSimd128RegZero);
3520}
3521
3522void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
3523 LiftoffRegister lhs,
3524 uint8_t imm_lane_idx) {
3525 copy_s_b(dst.gp(), lhs.fp().toW(), imm_lane_idx);
3526}
3527
3528void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
3529 LiftoffRegister lhs,
3530 uint8_t imm_lane_idx) {
3531 copy_u_b(dst.gp(), lhs.fp().toW(), imm_lane_idx);
3532}
3533
3534void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
3535 LiftoffRegister lhs,
3536 uint8_t imm_lane_idx) {
3537 copy_s_h(dst.gp(), lhs.fp().toW(), imm_lane_idx);
3538}
3539
3540void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
3541 LiftoffRegister lhs,
3542 uint8_t imm_lane_idx) {
3543 copy_u_h(dst.gp(), lhs.fp().toW(), imm_lane_idx);
3544}
3545
3546void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
3547 LiftoffRegister lhs,
3548 uint8_t imm_lane_idx) {
3549 copy_s_w(dst.gp(), lhs.fp().toW(), imm_lane_idx);
3550}
3551
3552void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
3553 LiftoffRegister lhs,
3554 uint8_t imm_lane_idx) {
3555 copy_s_d(dst.gp(), lhs.fp().toW(), imm_lane_idx);
3556}
3557
3558void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
3559 LiftoffRegister lhs,
3560 uint8_t imm_lane_idx) {
3561 copy_u_w(kScratchReg, lhs.fp().toW(), imm_lane_idx);
3563}
3564
3565void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
3566 LiftoffRegister lhs,
3567 uint8_t imm_lane_idx) {
3568 copy_s_d(kScratchReg, lhs.fp().toW(), imm_lane_idx);
3570}
3571
3572void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
3573 LiftoffRegister src1,
3574 LiftoffRegister src2,
3575 uint8_t imm_lane_idx) {
3576 if (dst != src1) {
3577 move_v(dst.fp().toW(), src1.fp().toW());
3578 }
3579 insert_b(dst.fp().toW(), imm_lane_idx, src2.gp());
3580}
3581
3582void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
3583 LiftoffRegister src1,
3584 LiftoffRegister src2,
3585 uint8_t imm_lane_idx) {
3586 if (dst != src1) {
3587 move_v(dst.fp().toW(), src1.fp().toW());
3588 }
3589 insert_h(dst.fp().toW(), imm_lane_idx, src2.gp());
3590}
3591
3592void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
3593 LiftoffRegister src1,
3594 LiftoffRegister src2,
3595 uint8_t imm_lane_idx) {
3596 if (dst != src1) {
3597 move_v(dst.fp().toW(), src1.fp().toW());
3598 }
3599 insert_w(dst.fp().toW(), imm_lane_idx, src2.gp());
3600}
3601
3602void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
3603 LiftoffRegister src1,
3604 LiftoffRegister src2,
3605 uint8_t imm_lane_idx) {
3606 if (dst != src1) {
3607 move_v(dst.fp().toW(), src1.fp().toW());
3608 }
3609 insert_d(dst.fp().toW(), imm_lane_idx, src2.gp());
3610}
3611
3612void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
3613 LiftoffRegister src1,
3614 LiftoffRegister src2,
3615 uint8_t imm_lane_idx) {
3617 if (dst != src1) {
3618 move_v(dst.fp().toW(), src1.fp().toW());
3619 }
3620 insert_w(dst.fp().toW(), imm_lane_idx, kScratchReg);
3621}
3622
3623void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
3624 LiftoffRegister src1,
3625 LiftoffRegister src2,
3626 uint8_t imm_lane_idx) {
3628 if (dst != src1) {
3629 move_v(dst.fp().toW(), src1.fp().toW());
3630 }
3631 insert_d(dst.fp().toW(), imm_lane_idx, kScratchReg);
3632}
3633
3634void LiftoffAssembler::emit_f32x4_qfma(LiftoffRegister dst,
3635 LiftoffRegister src1,
3636 LiftoffRegister src2,
3637 LiftoffRegister src3) {
3638 bailout(kRelaxedSimd, "emit_f32x4_qfma");
3639}
3640
3641void LiftoffAssembler::emit_f32x4_qfms(LiftoffRegister dst,
3642 LiftoffRegister src1,
3643 LiftoffRegister src2,
3644 LiftoffRegister src3) {
3645 bailout(kRelaxedSimd, "emit_f32x4_qfms");
3646}
3647
3648void LiftoffAssembler::emit_f64x2_qfma(LiftoffRegister dst,
3649 LiftoffRegister src1,
3650 LiftoffRegister src2,
3651 LiftoffRegister src3) {
3652 bailout(kRelaxedSimd, "emit_f64x2_qfma");
3653}
3654
3655void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst,
3656 LiftoffRegister src1,
3657 LiftoffRegister src2,
3658 LiftoffRegister src3) {
3659 bailout(kRelaxedSimd, "emit_f64x2_qfms");
3660}
3661
3662bool LiftoffAssembler::emit_f16x8_splat(LiftoffRegister dst,
3663 LiftoffRegister src) {
3664 return false;
3665}
3666
3667bool LiftoffAssembler::emit_f16x8_extract_lane(LiftoffRegister dst,
3668 LiftoffRegister lhs,
3669 uint8_t imm_lane_idx) {
3670 return false;
3671}
3672
3673bool LiftoffAssembler::emit_f16x8_replace_lane(LiftoffRegister dst,
3674 LiftoffRegister src1,
3675 LiftoffRegister src2,
3676 uint8_t imm_lane_idx) {
3677 return false;
3678}
3679
3680bool LiftoffAssembler::emit_f16x8_abs(LiftoffRegister dst,
3681 LiftoffRegister src) {
3682 return false;
3683}
3684
3685bool LiftoffAssembler::emit_f16x8_neg(LiftoffRegister dst,
3686 LiftoffRegister src) {
3687 return false;
3688}
3689
3690bool LiftoffAssembler::emit_f16x8_sqrt(LiftoffRegister dst,
3691 LiftoffRegister src) {
3692 return false;
3693}
3694
3695bool LiftoffAssembler::emit_f16x8_ceil(LiftoffRegister dst,
3696 LiftoffRegister src) {
3697 return false;
3698}
3699
3700bool LiftoffAssembler::emit_f16x8_floor(LiftoffRegister dst,
3701 LiftoffRegister src) {
3702 return false;
3703}
3704
3705bool LiftoffAssembler::emit_f16x8_trunc(LiftoffRegister dst,
3706 LiftoffRegister src) {
3707 return false;
3708}
3709
3710bool LiftoffAssembler::emit_f16x8_nearest_int(LiftoffRegister dst,
3711 LiftoffRegister src) {
3712 return false;
3713}
3714
3715bool LiftoffAssembler::emit_f16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
3716 LiftoffRegister rhs) {
3717 return false;
3718}
3719
3720bool LiftoffAssembler::emit_f16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
3721 LiftoffRegister rhs) {
3722 return false;
3723}
3724
3725bool LiftoffAssembler::emit_f16x8_lt(LiftoffRegister dst, LiftoffRegister lhs,
3726 LiftoffRegister rhs) {
3727 return false;
3728}
3729
3730bool LiftoffAssembler::emit_f16x8_le(LiftoffRegister dst, LiftoffRegister lhs,
3731 LiftoffRegister rhs) {
3732 return false;
3733}
3734
3735bool LiftoffAssembler::emit_f16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
3736 LiftoffRegister rhs) {
3737 return false;
3738}
3739
3740bool LiftoffAssembler::emit_f16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
3741 LiftoffRegister rhs) {
3742 return false;
3743}
3744
3745bool LiftoffAssembler::emit_f16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
3746 LiftoffRegister rhs) {
3747 return false;
3748}
3749
3750bool LiftoffAssembler::emit_f16x8_div(LiftoffRegister dst, LiftoffRegister lhs,
3751 LiftoffRegister rhs) {
3752 return false;
3753}
3754
3755bool LiftoffAssembler::emit_f16x8_min(LiftoffRegister dst, LiftoffRegister lhs,
3756 LiftoffRegister rhs) {
3757 return false;
3758}
3759
3760bool LiftoffAssembler::emit_f16x8_max(LiftoffRegister dst, LiftoffRegister lhs,
3761 LiftoffRegister rhs) {
3762 return false;
3763}
3764
3765bool LiftoffAssembler::emit_f16x8_pmin(LiftoffRegister dst, LiftoffRegister lhs,
3766 LiftoffRegister rhs) {
3767 return false;
3768}
3769
3770bool LiftoffAssembler::emit_f16x8_pmax(LiftoffRegister dst, LiftoffRegister lhs,
3771 LiftoffRegister rhs) {
3772 return false;
3773}
3774
3775bool LiftoffAssembler::emit_i16x8_sconvert_f16x8(LiftoffRegister dst,
3776 LiftoffRegister src) {
3777 return false;
3778}
3779
3780bool LiftoffAssembler::emit_i16x8_uconvert_f16x8(LiftoffRegister dst,
3781 LiftoffRegister src) {
3782 return false;
3783}
3784
3785bool LiftoffAssembler::emit_f16x8_sconvert_i16x8(LiftoffRegister dst,
3786 LiftoffRegister src) {
3787 return false;
3788}
3789
3790bool LiftoffAssembler::emit_f16x8_uconvert_i16x8(LiftoffRegister dst,
3791 LiftoffRegister src) {
3792 return false;
3793}
3794
3795bool LiftoffAssembler::emit_f16x8_demote_f32x4_zero(LiftoffRegister dst,
3796 LiftoffRegister src) {
3797 return false;
3798}
3799
3800bool LiftoffAssembler::emit_f16x8_demote_f64x2_zero(LiftoffRegister dst,
3801 LiftoffRegister src) {
3802 return false;
3803}
3804
3805bool LiftoffAssembler::emit_f32x4_promote_low_f16x8(LiftoffRegister dst,
3806 LiftoffRegister src) {
3807 return false;
3808}
3809
3810bool LiftoffAssembler::emit_f16x8_qfma(LiftoffRegister dst,
3811 LiftoffRegister src1,
3812 LiftoffRegister src2,
3813 LiftoffRegister src3) {
3814 return false;
3815}
3816
3817bool LiftoffAssembler::emit_f16x8_qfms(LiftoffRegister dst,
3818 LiftoffRegister src1,
3819 LiftoffRegister src2,
3820 LiftoffRegister src3) {
3821 return false;
3822}
3823
3824bool LiftoffAssembler::supports_f16_mem_access() { return false; }
3825
3826void LiftoffAssembler::StackCheck(Label* ool_code) {
3827 Register limit_address = kScratchReg;
3829 Branch(ool_code, ule, sp, Operand(limit_address));
3830}
3831
3833 if (v8_flags.debug_code) Abort(reason);
3834}
3835
3836void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
3837 LiftoffRegList gp_regs = regs & kGpCacheRegList;
3838 unsigned num_gp_regs = gp_regs.GetNumRegsSet();
3839 if (num_gp_regs) {
3840 unsigned offset = num_gp_regs * kSystemPointerSize;
3841 daddiu(sp, sp, -offset);
3842 while (!gp_regs.is_empty()) {
3843 LiftoffRegister reg = gp_regs.GetFirstRegSet();
3845 sd(reg.gp(), MemOperand(sp, offset));
3846 gp_regs.clear(reg);
3847 }
3848 DCHECK_EQ(offset, 0);
3849 }
3850 LiftoffRegList fp_regs = regs & kFpCacheRegList;
3851 unsigned num_fp_regs = fp_regs.GetNumRegsSet();
3852 if (num_fp_regs) {
3853 unsigned slot_size = IsEnabled(MIPS_SIMD) ? 16 : 8;
3854 daddiu(sp, sp, -(num_fp_regs * slot_size));
3855 unsigned offset = 0;
3856 while (!fp_regs.is_empty()) {
3857 LiftoffRegister reg = fp_regs.GetFirstRegSet();
3858 if (IsEnabled(MIPS_SIMD)) {
3860 } else {
3862 }
3863 fp_regs.clear(reg);
3864 offset += slot_size;
3865 }
3866 DCHECK_EQ(offset, num_fp_regs * slot_size);
3867 }
3868}
3869
3870void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
3871 LiftoffRegList fp_regs = regs & kFpCacheRegList;
3872 unsigned fp_offset = 0;
3873 while (!fp_regs.is_empty()) {
3874 LiftoffRegister reg = fp_regs.GetFirstRegSet();
3875 if (IsEnabled(MIPS_SIMD)) {
3876 MacroAssembler::ld_d(reg.fp().toW(), MemOperand(sp, fp_offset));
3877 } else {
3878 MacroAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset));
3879 }
3880 fp_regs.clear(reg);
3881 fp_offset += (IsEnabled(MIPS_SIMD) ? 16 : 8);
3882 }
3883 if (fp_offset) daddiu(sp, sp, fp_offset);
3884 LiftoffRegList gp_regs = regs & kGpCacheRegList;
3885 unsigned gp_offset = 0;
3886 while (!gp_regs.is_empty()) {
3887 LiftoffRegister reg = gp_regs.GetLastRegSet();
3888 ld(reg.gp(), MemOperand(sp, gp_offset));
3889 gp_regs.clear(reg);
3890 gp_offset += kSystemPointerSize;
3891 }
3892 daddiu(sp, sp, gp_offset);
3893}
3894
3896 SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
3897 LiftoffRegList ref_spills, int spill_offset) {
3898 LiftoffRegList fp_spills = all_spills & kFpCacheRegList;
3899 int spill_space_size = fp_spills.GetNumRegsSet() * kSimd128Size;
3900 LiftoffRegList gp_spills = all_spills & kGpCacheRegList;
3901 while (!gp_spills.is_empty()) {
3902 LiftoffRegister reg = gp_spills.GetFirstRegSet();
3903 if (ref_spills.has(reg)) {
3904 safepoint.DefineTaggedStackSlot(spill_offset);
3905 }
3906 gp_spills.clear(reg);
3907 ++spill_offset;
3908 spill_space_size += kSystemPointerSize;
3909 }
3910 // Record the number of additional spill slots.
3911 RecordOolSpillSpaceSize(spill_space_size);
3912}
3913
3914void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
3915 DCHECK_LT(num_stack_slots,
3916 (1 << 16) / kSystemPointerSize); // 16 bit immediate
3917 MacroAssembler::DropAndRet(static_cast<int>(num_stack_slots));
3918}
3919
3921 const std::initializer_list<VarState> args, const LiftoffRegister* rets,
3922 ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes,
3923 ExternalReference ext_ref) {
3924 Daddu(sp, sp, -stack_bytes);
3925
3926 int arg_offset = 0;
3927 for (const VarState& arg : args) {
3928 liftoff::StoreToMemory(this, MemOperand{sp, arg_offset}, arg);
3929 arg_offset += value_kind_size(arg.kind());
3930 }
3931 DCHECK_LE(arg_offset, stack_bytes);
3932
3933 // Pass a pointer to the buffer with the arguments to the C function.
3934 // On mips, the first argument is passed in {a0}.
3935 constexpr Register kFirstArgReg = a0;
3936 mov(kFirstArgReg, sp);
3937
3938 // Now call the C function.
3939 constexpr int kNumCCallArgs = 1;
3940 PrepareCallCFunction(kNumCCallArgs, kScratchReg);
3941 CallCFunction(ext_ref, kNumCCallArgs);
3942
3943 // Move return value to the right register.
3944 const LiftoffRegister* next_result_reg = rets;
3945 if (return_kind != kVoid) {
3946 constexpr Register kReturnReg = v0;
3947#ifdef USE_SIMULATOR
3948 // When calling a host function in the simulator, if the function returns an
3949 // int32 value, the simulator does not sign-extend it to int64 because in
3950 // the simulator we do not know whether the function returns an int32 or
3951 // int64. So we need to sign extend it here.
3952 if (return_kind == kI32) {
3953 sll(next_result_reg->gp(), kReturnReg, 0);
3954 } else if (kReturnReg != next_result_reg->gp()) {
3955 Move(*next_result_reg, LiftoffRegister(kReturnReg), return_kind);
3956 }
3957#else
3958 if (kReturnReg != next_result_reg->gp()) {
3959 Move(*next_result_reg, LiftoffRegister(kReturnReg), return_kind);
3960 }
3961#endif
3962 ++next_result_reg;
3963 }
3964
3965 // Load potential output value from the buffer on the stack.
3966 if (out_argument_kind != kVoid) {
3967 liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_kind);
3968 }
3969
3970 Daddu(sp, sp, stack_bytes);
3971}
3972
3973void LiftoffAssembler::CallC(const std::initializer_list<VarState> args_list,
3974 ExternalReference ext_ref) {
3975 // First, prepare the stack for the C call.
3976 const int num_args = static_cast<int>(args_list.size());
3978
3979 // Note: If we ever need more than eight arguments we would need to load the
3980 // stack arguments to registers (via LoadToRegister), then push them to the
3981 // stack.
3982
3983 // Execute the parallel register move for register parameters.
3984 DCHECK_GE(arraysize(kCArgRegs), num_args);
3985 const VarState* const args = args_list.begin();
3986 ParallelMove parallel_move{this};
3987 for (int reg_arg = 0; reg_arg < num_args; ++reg_arg) {
3988 parallel_move.LoadIntoRegister(LiftoffRegister{kCArgRegs[reg_arg]},
3989 args[reg_arg]);
3990 }
3991 parallel_move.Execute();
3992
3993 // Now call the C function.
3994 CallCFunction(ext_ref, num_args);
3995}
3996
3999}
4000
4003}
4004
4006 compiler::CallDescriptor* call_descriptor,
4007 Register target) {
4008 // For mips64, we have more cache registers than wasm parameters. That means
4009 // that target will always be in a register.
4010 DCHECK(target.is_valid());
4011 CallWasmCodePointer(target);
4012}
4013
4015 compiler::CallDescriptor* call_descriptor, Register target) {
4016 DCHECK(target.is_valid());
4017 CallWasmCodePointer(target, CallJumpMode::kTailCall);
4018}
4019
4021 // A direct call to a builtin. Just encode the builtin index. This will be
4022 // patched at relocation.
4023 Call(static_cast<Address>(builtin), RelocInfo::WASM_STUB_CALL);
4024}
4025
4026void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
4027 Daddu(sp, sp, -size);
4028 MacroAssembler::Move(addr, sp);
4029}
4030
4031void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
4032 Daddu(sp, sp, size);
4033}
4034
4036
4037void LiftoffAssembler::emit_store_nonzero_if_nan(Register dst, FPURegister src,
4038 ValueKind kind) {
4039 UseScratchRegisterScope temps(this);
4040 Register scratch = temps.Acquire();
4041 Label not_nan;
4042 if (kind == kF32) {
4043 CompareIsNanF32(src, src);
4044 } else {
4046 CompareIsNanF64(src, src);
4047 }
4049 li(scratch, 1);
4050 Sw(dst, MemOperand(dst));
4051 bind(&not_nan);
4052}
4053
4055 LiftoffRegister src,
4056 Register tmp_gp,
4057 LiftoffRegister tmp_s128,
4058 ValueKind lane_kind) {
4059 Label not_nan;
4060 if (lane_kind == kF32) {
4061 fcun_w(tmp_s128.fp().toW(), src.fp().toW(), src.fp().toW());
4062 } else {
4063 DCHECK_EQ(lane_kind, kF64);
4064 fcun_d(tmp_s128.fp().toW(), src.fp().toW(), src.fp().toW());
4065 }
4066 BranchMSA(&not_nan, MSA_BRANCH_V, all_zero, tmp_s128.fp().toW(),
4068 li(tmp_gp, 1);
4069 Sw(tmp_gp, MemOperand(dst));
4070 bind(&not_nan);
4071}
4072
4073void LiftoffAssembler::emit_store_nonzero(Register dst) {
4074 Sd(dst, MemOperand(dst));
4075}
4076
4077void LiftoffStackSlots::Construct(int param_slots) {
4078 DCHECK_LT(0, slots_.size());
4080 int last_stack_slot = param_slots;
4081 for (auto& slot : slots_) {
4082 const int stack_slot = slot.dst_slot_;
4083 int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
4084 DCHECK_LT(0, stack_decrement);
4085 last_stack_slot = stack_slot;
4086 const LiftoffAssembler::VarState& src = slot.src_;
4087 switch (src.loc()) {
4089 if (src.kind() != kS128) {
4090 asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
4091 asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
4093 } else {
4094 asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
4095 asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_ - 8));
4097 asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
4099 }
4100 break;
4102 int pushed_bytes = SlotSizeInBytes(slot);
4103 asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
4104 liftoff::push(asm_, src.reg(), src.kind());
4105 break;
4106 }
4108 asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
4109 asm_->li(kScratchReg, Operand(src.i32_const()));
4111 break;
4112 }
4113 }
4114 }
4115}
4116
4117} // namespace v8::internal::wasm
4118
4119#endif // V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_INL_H_
Builtins::Kind kind
Definition builtins.cc:40
bool IsEnabled(CpuFeature f)
Definition assembler.h:352
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
void pcnt_b(MSARegister wd, MSARegister ws)
void bnegi_w(MSARegister wd, MSARegister ws, uint32_t m)
void clt_s_w(MSARegister wd, MSARegister ws, MSARegister wt)
void copy_u_h(Register rd, MSARegister ws, uint32_t n)
void ld(Register rd, const MemOperand &rs)
void fcun_w(MSARegister wd, MSARegister ws, MSARegister wt)
void sd(Register rd, const MemOperand &rs)
void addv_b(MSARegister wd, MSARegister ws, MSARegister wt)
void aver_u_h(MSARegister wd, MSARegister ws, MSARegister wt)
void min_s_b(MSARegister wd, MSARegister ws, MSARegister wt)
void fill_w(MSARegister wd, Register rs)
void seb(Register rd, Register rt)
void ld_d(Register rd, Register rj, int32_t si12)
void clti_s_w(MSARegister wd, MSARegister ws, uint32_t imm5)
void fmax_d(FPURegister fd, FPURegister fj, FPURegister fk)
void aver_u_b(MSARegister wd, MSARegister ws, MSARegister wt)
void fadd_w(MSARegister wd, MSARegister ws, MSARegister wt)
void cvt_s_w(FPURegister fd, FPURegister fs)
void fsqrt_d(FPURegister fd, FPURegister fj)
void mulv_d(MSARegister wd, MSARegister ws, MSARegister wt)
void fill_d(MSARegister wd, Register rs)
void fmax_w(MSARegister wd, MSARegister ws, MSARegister wt)
void addu(Register rd, Register rs, Register rt)
void binsli_w(MSARegister wd, MSARegister ws, uint32_t m)
void sll_w(Register rd, Register rj, Register rk)
void asub_s_w(MSARegister wd, MSARegister ws, MSARegister wt)
void srli_b(MSARegister wd, MSARegister ws, uint32_t m)
void ftrunc_s_d(MSARegister wd, MSARegister ws)
void fmul_w(MSARegister wd, MSARegister ws, MSARegister wt)
void slli_w(Register rd, Register rj, int32_t ui5)
void adds_u_h(MSARegister wd, MSARegister ws, MSARegister wt)
void ffint_u_w(MSARegister wd, MSARegister ws)
void fseq_w(MSARegister wd, MSARegister ws, MSARegister wt)
void movn(const Register &rd, uint64_t imm, int shift=-1)
void srai_b(MSARegister wd, MSARegister ws, uint32_t m)
void copy_u_b(Register rd, MSARegister ws, uint32_t n)
void cle_s_b(MSARegister wd, MSARegister ws, MSARegister wt)
void sra_h(MSARegister wd, MSARegister ws, MSARegister wt)
void srl_d(Register rd, Register rj, Register rk)
void and_v(MSARegister wd, MSARegister ws, MSARegister wt)
void asub_s_b(MSARegister wd, MSARegister ws, MSARegister wt)
void pckev_w(MSARegister wd, MSARegister ws, MSARegister wt)
void copy_s_h(Register rd, MSARegister ws, uint32_t n)
void subs_s_b(MSARegister wd, MSARegister ws, MSARegister wt)
void xor_v(MSARegister wd, MSARegister ws, MSARegister wt)
void binsli_d(MSARegister wd, MSARegister ws, uint32_t m)
void sltiu(Register rd, Register rs, int32_t j)
void fsub_w(MSARegister wd, MSARegister ws, MSARegister wt)
void max_u_h(MSARegister wd, MSARegister ws, MSARegister wt)
void subs_u_b(MSARegister wd, MSARegister ws, MSARegister wt)
void shf_w(MSARegister wd, MSARegister ws, uint32_t imm8)
void ceq_b(MSARegister wd, MSARegister ws, MSARegister wt)
void clti_s_b(MSARegister wd, MSARegister ws, uint32_t imm5)
void srli_w(Register rd, Register rj, int32_t ui5)
void copy_s_b(Register rd, MSARegister ws, uint32_t n)
void pckev_b(MSARegister wd, MSARegister ws, MSARegister wt)
void srai_w(Register rd, Register rj, int32_t ui5)
void mulv_w(MSARegister wd, MSARegister ws, MSARegister wt)
void sat_u_d(MSARegister wd, MSARegister ws, uint32_t m)
void clt_s_d(MSARegister wd, MSARegister ws, MSARegister wt)
void cle_s_h(MSARegister wd, MSARegister ws, MSARegister wt)
void ilvr_h(MSARegister wd, MSARegister ws, MSARegister wt)
void cvt_d_s(FPURegister fd, FPURegister fs)
void cvt_d_w(FPURegister fd, FPURegister fs)
void clt_s_b(MSARegister wd, MSARegister ws, MSARegister wt)
void fsub_d(FPURegister fd, FPURegister fj, FPURegister fk)
void addv_w(MSARegister wd, MSARegister ws, MSARegister wt)
void insert_b(MSARegister wd, uint32_t n, Register rs)
void fexdo_w(MSARegister wd, MSARegister ws, MSARegister wt)
void slli_b(MSARegister wd, MSARegister ws, uint32_t m)
void trunc_w_s(FPURegister fd, FPURegister fs)
void slli_h(MSARegister wd, MSARegister ws, uint32_t m)
void ilvl_b(MSARegister wd, MSARegister ws, MSARegister wt)
void trunc_l_d(FPURegister fd, FPURegister fs)
void fill_b(MSARegister wd, Register rs)
Simd128Register Simd128Register ra
void bclri_w(MSARegister wd, MSARegister ws, uint32_t m)
void rotr(Register rd, Register rt, uint16_t sa)
void mtc1(Register rt, FPURegister fs)
void ftrunc_s_w(MSARegister wd, MSARegister ws)
void min_u_h(MSARegister wd, MSARegister ws, MSARegister wt)
void min_s_w(MSARegister wd, MSARegister ws, MSARegister wt)
void dsrl32(Register rt, Register rd, uint16_t sa)
void adds_s_h(MSARegister wd, MSARegister ws, MSARegister wt)
void fsle_d(MSARegister wd, MSARegister ws, MSARegister wt)
void clt_u_b(MSARegister wd, MSARegister ws, MSARegister wt)
void nor_v(MSARegister wd, MSARegister ws, MSARegister wt)
friend class UseScratchRegisterScope
void ilvr_w(MSARegister wd, MSARegister ws, MSARegister wt)
void fdiv_d(FPURegister fd, FPURegister fj, FPURegister fk)
void insert_d(MSARegister wd, uint32_t n, Register rs)
void ilvl_w(MSARegister wd, MSARegister ws, MSARegister wt)
void srai_d(Register rd, Register rj, int32_t ui6)
void ffint_s_w(FPURegister fd, FPURegister fj)
void fseq_d(MSARegister wd, MSARegister ws, MSARegister wt)
void fexupr_d(MSARegister wd, MSARegister ws)
void sll_h(MSARegister wd, MSARegister ws, MSARegister wt)
void trunc_w_d(FPURegister fd, FPURegister fs)
void clt_u_h(MSARegister wd, MSARegister ws, MSARegister wt)
void fsle_w(MSARegister wd, MSARegister ws, MSARegister wt)
void fceq_d(MSARegister wd, MSARegister ws, MSARegister wt)
void clti_s_h(MSARegister wd, MSARegister ws, uint32_t imm5)
void addv_h(MSARegister wd, MSARegister ws, MSARegister wt)
void fcune_w(MSARegister wd, MSARegister ws, MSARegister wt)
void fmin_d(FPURegister fd, FPURegister fj, FPURegister fk)
void fsqrt_w(MSARegister wd, MSARegister ws)
void sra_d(Register rd, Register rj, Register rk)
void trunc_l_s(FPURegister fd, FPURegister fs)
void ceq_w(MSARegister wd, MSARegister ws, MSARegister wt)
void min_u_b(MSARegister wd, MSARegister ws, MSARegister wt)
void fcle_w(MSARegister wd, MSARegister ws, MSARegister wt)
void daddiu(Register rd, Register rs, int32_t j)
void max_s_h(MSARegister wd, MSARegister ws, MSARegister wt)
void st_b(Register rd, Register rj, int32_t si12)
void bsel_v(MSARegister wd, MSARegister ws, MSARegister wt)
void fceq_w(MSARegister wd, MSARegister ws, MSARegister wt)
void subs_s_h(MSARegister wd, MSARegister ws, MSARegister wt)
void ld_b(Register rd, Register rj, int32_t si12)
void addv_d(MSARegister wd, MSARegister ws, MSARegister wt)
void mulr_q_h(MSARegister wd, MSARegister ws, MSARegister wt)
void max_s_b(MSARegister wd, MSARegister ws, MSARegister wt)
void sat_s_d(MSARegister wd, MSARegister ws, uint32_t m)
void ceq_d(MSARegister wd, MSARegister ws, MSARegister wt)
void sra_w(Register rd, Register rj, Register rk)
void subs_u_h(MSARegister wd, MSARegister ws, MSARegister wt)
void srai_h(MSARegister wd, MSARegister ws, uint32_t m)
void max_u_w(MSARegister wd, MSARegister ws, MSARegister wt)
void sll_b(MSARegister wd, MSARegister ws, MSARegister wt)
void fmin_w(MSARegister wd, MSARegister ws, MSARegister wt)
void ilvev_b(MSARegister wd, MSARegister ws, MSARegister wt)
void add_a_d(MSARegister wd, MSARegister ws, MSARegister wt)
void dotp_s_w(MSARegister wd, MSARegister ws, MSARegister wt)
void subv_h(MSARegister wd, MSARegister ws, MSARegister wt)
void srl(Register rd, Register rt, uint16_t sa)
void ffint_u_d(MSARegister wd, MSARegister ws)
void vshf_b(MSARegister wd, MSARegister ws, MSARegister wt)
void adds_s_b(MSARegister wd, MSARegister ws, MSARegister wt)
void asub_s_h(MSARegister wd, MSARegister ws, MSARegister wt)
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop=false)
void fcle_d(MSARegister wd, MSARegister ws, MSARegister wt)
void copy_u_w(Register rd, MSARegister ws, uint32_t n)
void clt_s_h(MSARegister wd, MSARegister ws, MSARegister wt)
void cvt_d_l(FPURegister fd, FPURegister fs)
void insert_h(MSARegister wd, uint32_t n, Register rs)
void mfc1(Register rt, FPURegister fs)
void subv_b(MSARegister wd, MSARegister ws, MSARegister wt)
void cvt_s_d(FPURegister fd, FPURegister fs)
void clt_u_w(MSARegister wd, MSARegister ws, MSARegister wt)
void min_u_w(MSARegister wd, MSARegister ws, MSARegister wt)
void ilvl_h(MSARegister wd, MSARegister ws, MSARegister wt)
void fmul_d(FPURegister fd, FPURegister fj, FPURegister fk)
void cvt_s_l(FPURegister fd, FPURegister fs)
void cle_u_b(MSARegister wd, MSARegister ws, MSARegister wt)
void ftrunc_u_w(MSARegister wd, MSARegister ws)
void sat_s_h(MSARegister wd, MSARegister ws, uint32_t m)
void ilvr_b(MSARegister wd, MSARegister ws, MSARegister wt)
void ffint_s_d(MSARegister wd, MSARegister ws)
void fcune_d(MSARegister wd, MSARegister ws, MSARegister wt)
void dmtc1(Register rt, FPURegister fs)
void copy_s_d(Register rd, MSARegister ws, uint32_t n)
void fcun_d(MSARegister wd, MSARegister ws, MSARegister wt)
void min_s_h(MSARegister wd, MSARegister ws, MSARegister wt)
void subv_w(MSARegister wd, MSARegister ws, MSARegister wt)
void cle_u_h(MSARegister wd, MSARegister ws, MSARegister wt)
void ftrunc_u_d(MSARegister wd, MSARegister ws)
void max_u_b(MSARegister wd, MSARegister ws, MSARegister wt)
void insert_w(MSARegister wd, uint32_t n, Register rs)
void srli_h(MSARegister wd, MSARegister ws, uint32_t m)
void ceq_h(MSARegister wd, MSARegister ws, MSARegister wt)
void cle_u_w(MSARegister wd, MSARegister ws, MSARegister wt)
void cle_s_w(MSARegister wd, MSARegister ws, MSARegister wt)
void sat_s_w(MSARegister wd, MSARegister ws, uint32_t m)
void move_v(MSARegister wd, MSARegister ws)
void sat_u_h(MSARegister wd, MSARegister ws, uint32_t m)
void fadd_d(FPURegister fd, FPURegister fj, FPURegister fk)
void adds_u_b(MSARegister wd, MSARegister ws, MSARegister wt)
void bnegi_d(MSARegister wd, MSARegister ws, uint32_t m)
void daddu(Register rd, Register rs, Register rt)
void fdiv_w(MSARegister wd, MSARegister ws, MSARegister wt)
void srl_h(MSARegister wd, MSARegister ws, MSARegister wt)
void copy_s_w(Register rd, MSARegister ws, uint32_t n)
void mulv_h(MSARegister wd, MSARegister ws, MSARegister wt)
void slli_d(Register rd, Register rj, int32_t ui6)
void st_d(Register rd, Register rj, int32_t si12)
void fill_h(MSARegister wd, Register rs)
void max_s_w(MSARegister wd, MSARegister ws, MSARegister wt)
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
void fclt_w(MSARegister wd, MSARegister ws, MSARegister wt)
void drotr32(Register rd, Register rt, uint16_t sa)
void bclri_d(MSARegister wd, MSARegister ws, uint32_t m)
void srl_b(MSARegister wd, MSARegister ws, MSARegister wt)
void pckev_h(MSARegister wd, MSARegister ws, MSARegister wt)
void srli_d(Register rd, Register rj, int32_t ui6)
void sll_d(Register rd, Register rj, Register rk)
void dmfc1(Register rt, FPURegister fs)
void or_v(MSARegister wd, MSARegister ws, MSARegister wt)
void sra_b(MSARegister wd, MSARegister ws, MSARegister wt)
void fclt_d(MSARegister wd, MSARegister ws, MSARegister wt)
void sat_u_w(MSARegister wd, MSARegister ws, uint32_t m)
void subv_d(MSARegister wd, MSARegister ws, MSARegister wt)
void seh(Register rd, Register rt)
void srl_w(Register rd, Register rj, Register rk)
void cle_s_d(MSARegister wd, MSARegister ws, MSARegister wt)
static bool IsSupported(CpuFeature f)
static constexpr MachineType Uint8()
static constexpr MachineType Int32()
static constexpr MachineType Uint32()
static constexpr MachineType Uint16()
static constexpr MachineType Int16()
static constexpr MachineType Int64()
static constexpr MachineType Int8()
void Mul(const Register &rd, const Register &rn, const Register &rm)
void LoadStackLimit(Register destination, StackLimitKind kind)
void Call(Register target, Condition cond=al)
void Lbu(Register rd, const MemOperand &rs)
void Trunc_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch)
void Ctz(Register rd, Register rs)
void Cvt_s_uw(FPURegister fd, FPURegister fs)
void Scd(Register rd, const MemOperand &rs)
void mov(Register rd, Register rj)
void Dins(Register rt, Register rs, uint16_t pos, uint16_t size)
void Sh(Register rd, const MemOperand &rs)
void SmiUntag(Register reg, SBit s=LeaveCC)
void Neg_s(FPURegister fd, FPURegister fj)
void Uswc1(FPURegister fd, const MemOperand &rs, Register scratch)
void Ext(const VRegister &vd, const VRegister &vn, const VRegister &vm, int index)
void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2, CFRegister cd=FCC0)
void Lb(Register rd, const MemOperand &rs)
void CompareF32(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void BranchFalseF(Label *target, CFRegister cc=FCC0)
void LoadZeroIfNotFPUCondition(Register dest, CFRegister=FCC0)
void Uld(Register rd, const MemOperand &rs)
void Swc1(FPURegister fs, const MemOperand &dst)
void LoadLane(NeonSize sz, NeonListOperand dst_list, uint8_t lane, NeonMemOperand src)
void Move(Register dst, Tagged< Smi > smi)
void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void Movz(Register rd, Register rj, Register rk)
void JumpIfSmi(Register value, Label *smi_label)
void BranchShort(Label *label, Condition cond, Register r1, const Operand &r2, bool need_link=false)
void Lwu(Register rd, const MemOperand &rs)
void BranchFalseShortF(Label *target, CFRegister cc=FCC0)
void Clz(const Register &rd, const Register &rn)
void Ulwu(Register rd, const MemOperand &rs)
void Movn(Register rd, Register rj, Register rk)
void Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void Sd(Register rd, const MemOperand &rs)
void Lwc1(FPURegister fd, const MemOperand &src)
void Move_d(FPURegister dst, FPURegister src)
void Trunc_s_s(FPURegister fd, FPURegister fs)
void Uldc1(FPURegister fd, const MemOperand &rs, Register scratch)
void SmiTag(Register reg, SBit s=LeaveCC)
void CompareWord(Condition cond, Register dst, Register lhs, const Operand &rhs)
void Ins(const VRegister &vd, int vd_index, const VRegister &vn, int vn_index)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void Sw(Register rd, const MemOperand &rs)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void Lhu(Register rd, const MemOperand &rs)
void BranchLong(int32_t offset, BranchDelaySlot bdslot=PROTECT)
void FmoveLow(Register dst_low, FPURegister src)
void Jump(Register target, Condition cond=al)
void Usw(Register rd, const MemOperand &rs)
void Trunc_d_d(FPURegister fd, FPURegister fs)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch)
void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void MSARoundD(MSARegister dst, MSARegister src, FPURoundingMode mode)
void StoreLane(NeonSize sz, NeonListOperand src_list, uint8_t lane, NeonMemOperand dst)
void Sdc1(FPURegister fs, const MemOperand &dst)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void Lld(Register rd, const MemOperand &rs)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void Ulhu(Register rd, const MemOperand &rs)
void Lw(Register rd, const MemOperand &rs)
void Neg_d(FPURegister fd, FPURegister fk)
void Lh(Register rd, const MemOperand &rs)
void Usdc1(FPURegister fd, const MemOperand &rs, Register scratch)
void Cvt_d_uw(FPURegister fd, FPURegister fs)
void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void Usd(Register rd, const MemOperand &rs)
void Sc(Register rd, const MemOperand &rs)
void MSARoundW(MSARegister dst, MSARegister src, FPURoundingMode mode)
void AllocateStackSpace(Register bytes)
void Popcnt(Register dst, Register src)
void Dpopcnt(Register rd, Register rs)
void Ll(Register rd, const MemOperand &rs)
void Ulwc1(FPURegister fd, const MemOperand &rs, Register scratch)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void Sb(Register rd, const MemOperand &rs)
void Branch(Label *label, bool need_link=false)
void Ld(Register rd, const MemOperand &rs)
void Ulw(Register rd, const MemOperand &rs)
void Ulh(Register rd, const MemOperand &rs)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadZeroIfFPUCondition(Register dest, CFRegister=FCC0)
void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2, CFRegister cd=FCC0)
void BranchTrueShortF(Label *target, CFRegister cc=FCC0)
void Dclz(Register rd, Register rs)
void Dctz(Register rd, Register rs)
void BranchMSA(Label *target, MSABranchDF df, MSABranchCondition cond, MSARegister wt, BranchDelaySlot bd=PROTECT)
void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void Dext(Register rt, Register rs, uint16_t pos, uint16_t size)
void Ldc1(FPURegister fd, const MemOperand &src)
void Ush(Register rd, const MemOperand &rs, Register scratch)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
constexpr bool has(RegisterT reg) const
void emit_i8x16_swizzle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void emit_store_nonzero_if_nan(Register dst, DoubleRegister src, ValueKind kind)
bool emit_f32x4_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i8x16_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f64x2_u_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_relaxed_q15mulr_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i8x16_extract_lane_s(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_trunc(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i32_rems(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_i32_clz(Register dst, Register src)
void emit_i8x16_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i32_eqz(Register dst, Register src)
void emit_i32x4_uconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i8x16_uconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void FillI64Half(Register, int offset, RegPairHalf)
bool emit_f16x8_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_demote_f64x2_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallCWithStackBuffer(const std::initializer_list< VarState > args, const LiftoffRegister *rets, ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes, ExternalReference ext_ref)
void emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32x4_dot_i16x8_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_muli(LiftoffRegister dst, LiftoffRegister lhs, int32_t imm)
void emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void LoadTransform(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LoadTransformationKind transform, uint32_t *protected_load_pc, bool i64_offset)
void emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_store_nonzero_if_nan(Register dst, LiftoffRegister src, Register tmp_gp, LiftoffRegister tmp_s128, ValueKind lane_kind)
void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_i16x8_sconvert_f16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicCompareExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister value, StoreType type, bool i64_offset)
void emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_not(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_signextend_i16(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister acc)
bool emit_f16x8_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_pmax(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_f64_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i64x2_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_sconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_abs(LiftoffRegister dst, LiftoffRegister src)
void Fill(LiftoffRegister, int offset, ValueKind)
void emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void LoadFullPointer(Register dst, Register src_addr, int32_t offset_imm)
void emit_i8x16_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_abs(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
bool emit_f32x4_promote_low_f16x8(LiftoffRegister dst, LiftoffRegister src)
void Store(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, bool is_store_mem=false, bool i64_offset=false)
bool emit_f16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint32_t *protected_load_pc=nullptr, bool is_load_mem=false, bool i64_offset=false, bool needs_shift=false)
void emit_i64x2_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_f16x8_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f32x4_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_relaxed_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i32_signextend_i16(Register dst, Register src)
void emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_divs(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_f32_neg(DoubleRegister dst, DoubleRegister src)
void emit_f64x2_promote_low_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm)
void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint8_t lane, uint32_t *protected_load_pc, bool i64_offset)
void emit_i16x8_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extract_lane_s(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f64x2_trunc(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_rounding_average_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_relaxed_swizzle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_abs(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void CallFrameSetupStub(int declared_function_index)
void emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_remu(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
bool emit_f64x2_ceil(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadSpillAddress(Register dst, int offset, ValueKind kind)
void emit_f32x4_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f64_neg(DoubleRegister dst, DoubleRegister src)
void emit_i64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32_ctz(Register dst, Register src)
void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind, Register frame_pointer)
void emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void AtomicExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i32x4_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask)
bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void emit_i8x16_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
bool emit_f16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst, LiftoffRegister src)
void CallC(const std::initializer_list< VarState > args, ExternalReference ext_ref)
void emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_signextend_i8(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_bitmask(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero)
bool emit_f16x8_demote_f32x4_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
bool emit_f64x2_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void DropStackSlotsAndRet(uint32_t num_stack_slots)
void emit_f32x4_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_and_not(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_relaxed_laneselect(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask, int lane_width)
void emit_i32x4_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_f64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32x4_sconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shri_u(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void LoadConstant(LiftoffRegister, WasmValue)
void emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f32x4_trunc(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void PrepareTailCall(int num_callee_stack_params, int stack_param_delta)
void emit_f32x4_relaxed_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_ceil(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_divu(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero)
void emit_cond_jump(Condition, Label *, ValueKind value, Register lhs, Register rhs, const FreezeCacheState &frozen)
void LoadFromInstance(Register dst, Register instance, int offset, int size)
void emit_i8x16_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_smi_check(Register obj, Label *target, SmiCheckMode mode, const FreezeCacheState &frozen)
void emit_f64_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_f32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void LoadProtectedPointer(Register dst, Register src_addr, int32_t offset)
void emit_i16x8_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
bool emit_f32x4_ceil(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src)
void emit_u32_to_uintptr(Register dst, Register src)
void emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_mul(Register dst, Register lhs, Register rhs)
void emit_i8x16_extract_lane_u(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f16x8_uconvert_i16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_min_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_demote_f64x2_zero(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_div(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f32x4_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shri_s(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i64_set_cond(Condition condition, Register dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f64x2_floor(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_le(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_i32_popcnt(Register dst, Register src)
void emit_i16x8_rounding_average_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueKind)
void emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_min_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind)
void emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_popcnt(LiftoffRegister dst, LiftoffRegister src)
void AtomicStore(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, bool i64_offset)
bool emit_f16x8_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void PatchPrepareStackFrame(int offset, SafepointTableBuilder *, bool feedback_vector_slot, size_t stack_param_slots)
void emit_f32x4_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_ptrsize_cond_jumpi(Condition, Label *, Register lhs, int32_t imm, const FreezeCacheState &frozen)
void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind)
void emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64x2_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_set_cond(Condition, Register dst, Register lhs, Register rhs)
void emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst, LiftoffRegister src)
void TailCallIndirect(compiler::CallDescriptor *call_descriptor, Register target)
void emit_i16x8_q15mulr_sat_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs)
void emit_i16x8_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_max_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f32x4_s(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_sqrt(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AllocateStackSlot(Register addr, uint32_t size)
void emit_i32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadTaggedPointer(Register dst, Register src_addr, Register offset_reg, int32_t offset_imm, uint32_t *protected_load_pc=nullptr, bool offset_reg_needs_shift=false)
void emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_alltrue(LiftoffRegister dst, LiftoffRegister src)
void emit_i32x4_shr_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_abs(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shr_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_cond_jumpi(Condition, Label *, Register lhs, int imm, const FreezeCacheState &frozen)
void emit_i64_eqz(Register dst, LiftoffRegister src)
void StoreTaggedPointer(Register dst_addr, Register offset_reg, int32_t offset_imm, Register src, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, SkipWriteBarrier=kNoSkipWriteBarrier)
void emit_i64_clz(LiftoffRegister dst, LiftoffRegister src)
void bailout(LiftoffBailoutReason reason, const char *detail)
void emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void IncrementSmi(LiftoffRegister dst, int offset)
LiftoffRegister GetUnusedRegister(RegClass rc, std::initializer_list< LiftoffRegister > try_first, LiftoffRegList pinned)
void emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src)
bool emit_f16x8_sconvert_i16x8(LiftoffRegister dst, LiftoffRegister src)
void emit_f64x2_sqrt(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, const uint8_t shuffle[16], bool is_swizzle)
void emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i8x16_max_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_select(LiftoffRegister dst, Register condition, LiftoffRegister true_value, LiftoffRegister false_value, ValueKind kind)
bool emit_i16x8_uconvert_f16x8(LiftoffRegister dst, LiftoffRegister src)
bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst, LiftoffRegister src, Label *trap=nullptr)
void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src)
void AtomicLoad(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, bool i64_offset)
void emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind)
void emit_i32x4_neg(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_relaxed_trunc_f32x4_u(LiftoffRegister dst, LiftoffRegister src)
void LoadTaggedPointerFromInstance(Register dst, Register instance, int offset)
void emit_v128_anytrue(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_const(LiftoffRegister dst, const uint8_t imms[16])
void StoreLane(Register dst, Register offset, uintptr_t offset_imm, LiftoffRegister src, StoreType type, uint8_t lane, uint32_t *protected_store_pc, bool i64_offset)
void CheckTierUp(int declared_func_index, int budget_used, Label *ool_label, const FreezeCacheState &frozen)
void emit_i16x8_extract_lane_u(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f64x2_relaxed_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void CallIndirect(const ValueKindSig *sig, compiler::CallDescriptor *call_descriptor, Register target)
void RecordSpillsInSafepoint(SafepointTableBuilder::Safepoint &safepoint, LiftoffRegList all_spills, LiftoffRegList ref_spills, int spill_offset)
void emit_f64x2_relaxed_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_sqrt(LiftoffRegister dst, LiftoffRegister src)
void LoadTrustedPointer(Register dst, Register src_addr, int offset, IndirectPointerTag tag)
void emit_i32x4_relaxed_trunc_f64x2_s_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_signextend_i8(Register dst, Register src)
void emit_i8x16_bitmask(LiftoffRegister dst, LiftoffRegister src)
constexpr unsigned GetNumRegsSet() const
base::SmallVector< Slot, 8 > slots_
static int SlotSizeInBytes(const Slot &slot)
static constexpr int ToTagged(int offset)
#define ATOMIC_BINOP_CASE(op, inst)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, cmp_reg)
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr)
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( load_linked, store_conditional, sign_extend, size, representation)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( load_linked, store_conditional, sign_extend, size, representation)
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
int start
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
double remainder
ZoneVector< RpoNumber > & result
#define FP_BINOP(name, instruction)
#define FP_UNOP_RETURN_TRUE(name, instruction)
#define SIMD_BINOP(name1, name2)
#define I64_BINOP(name, instruction)
#define I64_BINOP_I(name, instruction)
#define I32_SHIFTOP_I(name, instruction, instruction1)
#define FP_UNOP(name, instruction)
#define I64_SHIFTOP_I(name, instruction, instructioni)
#define I32_BINOP(name, instruction)
#define I32_BINOP_I(name, instruction)
LiftoffRegister reg
MovableLabel continuation
Register tmp
std::optional< OolTrapLabel > trap
uint32_t const mask
int int32_t
Definition unicode.cc:40
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
constexpr int WhichPowerOfTwo(T value)
Definition bits.h:195
void EmitAllTrue(LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister src, VectorFormat format)
void Store(LiftoffAssembler *assm, LiftoffRegister src, MemOperand dst, ValueKind kind)
void StoreToMemory(LiftoffAssembler *assm, MemOperand dst, const LiftoffAssembler::VarState &src)
constexpr DoubleRegister kScratchDoubleReg
void EmitAnyTrue(LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister src)
void Load(LiftoffAssembler *assm, LiftoffRegister dst, MemOperand src, ValueKind kind)
void push(LiftoffAssembler *assm, LiftoffRegister reg, ValueKind kind, int padding=0)
constexpr DoubleRegister kScratchDoubleReg2
MemOperand GetMemOp(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr, Register offset, int32_t offset_imm, unsigned shift_amount=0)
FPUCondition ConditionToConditionCmpFPU(Condition condition, bool *predicate)
constexpr Register kGpParamRegisters[]
constexpr DoubleRegList kLiftoffAssemblerFpCacheRegs
int declared_function_index(const WasmModule *module, int func_index)
constexpr int value_kind_size(ValueKind kind)
static constexpr LiftoffRegList kGpCacheRegList
static constexpr LiftoffRegList kFpCacheRegList
constexpr bool is_reference(ValueKind kind)
LiftoffAssembler::ValueKindSig ValueKindSig
constexpr Register no_reg
constexpr VFPRoundingMode kRoundToNearest
constexpr VFPRoundingMode kRoundToMinusInf
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kInt64Size
Definition globals.h:402
constexpr int kSimd128Size
Definition globals.h:706
DwVfpRegister DoubleRegister
constexpr Simd128Register kSimd128RegZero
constexpr DoubleRegister kScratchDoubleReg
kWasmInternalFunctionIndirectPointerTag instance_data
constexpr DoubleRegister kScratchDoubleReg2
constexpr Register kScratchReg2
constexpr Register kScratchReg
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
constexpr VFPRoundingMode kRoundToPlusInf
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Simd128Register kSimd128ScratchReg
constexpr int kInt32Size
Definition globals.h:401
V8_EXPORT_PRIVATE FlagValues v8_flags
const intptr_t kSmiTagMask
Definition v8-internal.h:88
constexpr VFPRoundingMode kRoundToZero
return value
Definition map-inl.h:893
constexpr uint8_t kInstrSize
constexpr Register kCArgRegs[]
std::unique_ptr< AssemblerBuffer > ExternalAssemblerBuffer(void *start, int size)
Definition assembler.cc:161
#define shr(value, bits)
Definition sha-256.cc:31
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define arraysize(array)
Definition macros.h:67
#define V8_LIKELY(condition)
Definition v8config.h:661