v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
liftoff-assembler-riscv32-inl.h
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_WASM_BASELINE_RISCV_LIFTOFF_ASSEMBLER_RISCV32_INL_H_
6#define V8_WASM_BASELINE_RISCV_LIFTOFF_ASSEMBLER_RISCV32_INL_H_
7
12namespace v8::internal::wasm {
13
14namespace liftoff {
15
16// Liftoff Frames.
17//
18// slot Frame
19// +--------------------+---------------------------
20// n+4 | optional padding slot to keep the stack 16 byte aligned.
21// n+3 | parameter n |
22// ... | ... |
23// 4 | parameter 1 | or parameter 2
24// 3 | parameter 0 | or parameter 1
25// 2 | (result address) | or parameter 0
26// -----+--------------------+---------------------------
27// 1 | return addr (ra) |
28// 0 | previous frame (fp)|
29// -----+--------------------+ <-- frame ptr (fp)
30// -1 | StackFrame::WASM |
31// -2 | instance |
32// -3 | feedback vector|
33// -----+--------------------+---------------------------
34// -4 | slot 0 | ^
35// -5 | slot 1 | |
36// | | Frame slots
37// | | |
38// | | v
39// | optional padding slot to keep the stack 16 byte aligned.
40// -----+--------------------+ <-- stack ptr (sp)
41//
42
43#if defined(V8_TARGET_BIG_ENDIAN)
44constexpr int32_t kLowWordOffset = 4;
45constexpr int32_t kHighWordOffset = 0;
46#else
47constexpr int32_t kLowWordOffset = 0;
48constexpr int32_t kHighWordOffset = 4;
49#endif
50
52 int32_t half_offset =
54 return MemOperand(offset > 0 ? fp : sp, -offset + half_offset);
55}
56
58 Register offset, uintptr_t offset_imm,
59 unsigned shift_amount = 0) {
62 if (is_uint31(offset_imm)) {
63 int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
64 if (offset == no_reg) return MemOperand(addr, offset_imm32);
65 if (shift_amount != 0) {
66 assm->CalcScaledAddress(kScratchReg2, addr, offset, shift_amount);
67 } else {
68 assm->AddWord(kScratchReg2, offset, addr);
69 }
70 return MemOperand(kScratchReg2, offset_imm32);
71 }
72 // Offset immediate does not fit in 31 bits.
73 assm->li(kScratchReg2, offset_imm);
74 assm->AddWord(kScratchReg2, kScratchReg2, addr);
75 if (offset != no_reg) {
76 if (shift_amount != 0) {
78 } else {
79 assm->AddWord(kScratchReg2, kScratchReg2, offset);
80 }
81 }
82 return MemOperand(kScratchReg2, 0);
83}
84
85inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
86 int32_t offset, ValueKind kind) {
87 MemOperand src(base, offset);
88
89 switch (kind) {
90 case kI32:
91 case kRef:
92 case kRefNull:
93 assm->Lw(dst.gp(), src);
94 break;
95 case kI64:
96 assm->Lw(dst.low_gp(),
98 assm->Lw(dst.high_gp(),
100 break;
101 case kF32:
102 assm->LoadFloat(dst.fp(), src);
103 break;
104 case kF64:
105 assm->LoadDouble(dst.fp(), src);
106 break;
107 case kS128:{
108 assm->VU.set(kScratchReg, E8, m1);
109 Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
110 if (src.offset() != 0) {
111 assm->AddWord(src_reg, src.rm(), src.offset());
112 }
113 assm->vl(dst.fp().toV(), src_reg, 0, E8);
114 break;
115 }
116 default:
117 UNREACHABLE();
118 }
119}
120
121inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
122 LiftoffRegister src, ValueKind kind) {
123 MemOperand dst(base, offset);
124 switch (kind) {
125 case kI32:
126 case kRefNull:
127 case kRef:
128 assm->Sw(src.gp(), dst);
129 break;
130 case kI64:
131 assm->Sw(src.low_gp(),
133 assm->Sw(src.high_gp(),
135 break;
136 case kF32:
137 assm->StoreFloat(src.fp(), dst);
138 break;
139 case kF64:
140 assm->StoreDouble(src.fp(), dst);
141 break;
142 case kS128:{
143 assm->VU.set(kScratchReg, E8, m1);
144 Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg;
145 if (dst.offset() != 0) {
146 assm->AddWord(kScratchReg, dst.rm(), dst.offset());
147 }
148 assm->vs(src.fp().toV(), dst_reg, 0, VSew::E8);
149 break;
150 }
151 default:
152 UNREACHABLE();
153 }
154}
155
156inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
157 switch (kind) {
158 case kI32:
159 case kRefNull:
160 case kRef:
161 assm->addi(sp, sp, -kSystemPointerSize);
162 assm->Sw(reg.gp(), MemOperand(sp, 0));
163 break;
164 case kI64:
165 assm->Push(reg.high_gp(), reg.low_gp());
166 break;
167 case kF32:
168 assm->addi(sp, sp, -kSystemPointerSize);
169 assm->StoreFloat(reg.fp(), MemOperand(sp, 0));
170 break;
171 case kF64:
172 assm->addi(sp, sp, -kDoubleSize);
173 assm->StoreDouble(reg.fp(), MemOperand(sp, 0));
174 break;
175 case kS128:{
176 assm->VU.set(kScratchReg, E8, m1);
177 assm->addi(sp, sp, -kSystemPointerSize * 4);
178 assm->vs(reg.fp().toV(), sp, 0, VSew::E8);
179 break;
180 }
181 default:
182 UNREACHABLE();
183 }
184}
185
187 LiftoffRegister must_not_alias,
189 if (reg != must_not_alias.low_gp() && reg != must_not_alias.high_gp())
190 return reg;
191 Register tmp = temps->Acquire();
192 DCHECK_NE(must_not_alias.low_gp(), tmp);
193 DCHECK_NE(must_not_alias.high_gp(), tmp);
194 assm->mv(tmp, reg);
195 return tmp;
196}
197} // namespace liftoff
198
199void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
200 switch (value.type().kind()) {
201 case kI32:
202 MacroAssembler::li(reg.gp(), Operand(value.to_i32()));
203 break;
204 case kI64: {
205 int32_t low_word = value.to_i64();
206 int32_t high_word = value.to_i64() >> 32;
207 MacroAssembler::li(reg.low_gp(), Operand(low_word));
208 MacroAssembler::li(reg.high_gp(), Operand(high_word));
209 break;
210 }
211 case kF32:
213 value.to_f32_boxed().get_bits());
214 break;
215 case kF64:
217 value.to_f64_boxed().get_bits());
218 break;
219 default:
220 UNREACHABLE();
221 }
222}
223
224void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
225 Register offset_reg,
226 int32_t offset_imm,
227 uint32_t* protected_load_pc,
228 bool needs_shift) {
229 static_assert(kTaggedSize == kSystemPointerSize);
230 Load(LiftoffRegister(dst), src_addr, offset_reg,
231 static_cast<uint32_t>(offset_imm), LoadType::kI32Load, protected_load_pc,
232 false, false, needs_shift);
233}
234
235void LiftoffAssembler::LoadProtectedPointer(Register dst, Register src_addr,
236 int32_t offset) {
237 static_assert(!V8_ENABLE_SANDBOX_BOOL);
238 LoadTaggedPointer(dst, src_addr, no_reg, offset);
239}
240
241void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
242 int32_t offset_imm) {
243 MemOperand src_op = MemOperand(src_addr, offset_imm);
244 LoadWord(dst, src_op);
245}
246
247void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
248 Register offset_reg,
249 int32_t offset_imm, Register src,
250 LiftoffRegList pinned,
251 uint32_t* protected_store_pc,
252 SkipWriteBarrier skip_write_barrier) {
253 static_assert(kTaggedSize == kInt32Size);
254 UseScratchRegisterScope temps{this};
255 Register actual_offset_reg = offset_reg;
256 if (offset_reg != no_reg && offset_imm != 0) {
257 if (cache_state()->is_used(LiftoffRegister(offset_reg))) {
258 // The code below only needs a scratch register if the {MemOperand} given
259 // to {str} has an offset outside the uint12 range. After doing the
260 // addition below we will not pass an immediate offset to {str} though, so
261 // we can use the scratch register here.
262 actual_offset_reg = temps.Acquire();
263 }
264 Add32(actual_offset_reg, offset_reg, Operand(offset_imm));
265 }
266 MemOperand dst_op = MemOperand(kScratchReg, 0);
267 if (actual_offset_reg == no_reg) {
268 dst_op = MemOperand(dst_addr, offset_imm);
269 } else {
270 AddWord(kScratchReg, dst_addr, actual_offset_reg);
271 dst_op = MemOperand(kScratchReg, 0);
272 }
273 auto trapper = [protected_store_pc](int offset) {
274 if (protected_store_pc) *protected_store_pc = static_cast<uint32_t>(offset);
275 };
276 StoreWord(src, dst_op, trapper);
277 if (protected_store_pc) {
278 DCHECK(InstructionAt(*protected_store_pc)->IsStore());
279 }
280
281 if (skip_write_barrier || v8_flags.disable_write_barriers) return;
282
283 // The write barrier.
284 Label exit;
286 kZero, &exit);
287 JumpIfSmi(src, &exit);
290 dst_addr,
291 actual_offset_reg == no_reg ? Operand(offset_imm)
292 : Operand(actual_offset_reg),
293 SaveFPRegsMode::kSave, StubCallMode::kCallWasmRuntimeStub);
294 bind(&exit);
295}
296
297void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
298 Register offset_reg, uintptr_t offset_imm,
299 LoadType type, uint32_t* protected_load_pc,
300 bool /* is_load_mem */, bool /* i64_offset */,
301 bool needs_shift) {
302 unsigned shift_amount = needs_shift ? type.size_log_2() : 0;
303 MemOperand src_op =
304 liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, shift_amount);
305 auto trapper = [protected_load_pc](int offset) {
306 if (protected_load_pc) *protected_load_pc = static_cast<uint32_t>(offset);
307 };
308 switch (type.value()) {
309 case LoadType::kI32Load8U:
310 Lbu(dst.gp(), src_op, trapper);
311 break;
312 case LoadType::kI64Load8U:
313 Lbu(dst.low_gp(), src_op, trapper);
314 mv(dst.high_gp(), zero_reg);
315 break;
316 case LoadType::kI32Load8S:
317 Lb(dst.gp(), src_op, trapper);
318 break;
319 case LoadType::kI64Load8S:
320 Lb(dst.low_gp(), src_op, trapper);
321 srai(dst.high_gp(), dst.low_gp(), 31);
322 break;
323 case LoadType::kI32Load16U:
324 Lhu(dst.gp(), src_op, trapper);
325 break;
326 case LoadType::kI64Load16U:
327 Lhu(dst.low_gp(), src_op, trapper);
328 mv(dst.high_gp(), zero_reg);
329 break;
330 case LoadType::kI32Load16S:
331 Lh(dst.gp(), src_op, trapper);
332 break;
333 case LoadType::kI64Load16S:
334 Lh(dst.low_gp(), src_op, trapper);
335 srai(dst.high_gp(), dst.low_gp(), 31);
336 break;
337 case LoadType::kI64Load32U:
338 Lw(dst.low_gp(), src_op, trapper);
339 mv(dst.high_gp(), zero_reg);
340 break;
341 case LoadType::kI64Load32S:
342 Lw(dst.low_gp(), src_op, trapper);
343 srai(dst.high_gp(), dst.low_gp(), 31);
344 break;
345 case LoadType::kI32Load:
346 Lw(dst.gp(), src_op, trapper);
347 break;
348 case LoadType::kI64Load: {
349 Lw(dst.low_gp(), src_op, trapper);
350 src_op = liftoff::GetMemOp(this, src_addr, offset_reg,
351 offset_imm + kSystemPointerSize);
352 Lw(dst.high_gp(), src_op);
353 } break;
354 case LoadType::kF32Load:
355 LoadFloat(dst.fp(), src_op, trapper);
356 break;
357 case LoadType::kF64Load:
358 LoadDouble(dst.fp(), src_op, trapper);
359 break;
360 case LoadType::kS128Load: {
361 VU.set(kScratchReg, E8, m1);
362 Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg;
363 if (src_op.offset() != 0) {
364 AddWord(src_reg, src_op.rm(), src_op.offset());
365 }
366 trapper(pc_offset());
367 vl(dst.fp().toV(), src_reg, 0, E8);
368 break;
369 }
370 case LoadType::kF32LoadF16:
372 break;
373 default:
374 UNREACHABLE();
375 }
376 if (protected_load_pc) {
377 DCHECK(InstructionAt(*protected_load_pc)->IsLoad());
378 }
379
380#if defined(V8_TARGET_BIG_ENDIAN)
381 if (is_load_mem) {
382 pinned.set(src_op.rm());
383 liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
384 }
385#endif
386}
387
388void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
389 uintptr_t offset_imm, LiftoffRegister src,
390 StoreType type, LiftoffRegList pinned,
391 uint32_t* protected_store_pc, bool is_store_mem,
392 bool i64_offset) {
393 MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
394
395#if defined(V8_TARGET_BIG_ENDIAN)
396 if (is_store_mem) {
397 pinned.set(dst_op.rm());
398 LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned);
399 // Save original value.
400 Move(tmp, src, type.value_type());
401
402 src = tmp;
403 pinned.set(tmp);
404 liftoff::ChangeEndiannessStore(this, src, type, pinned);
405 }
406#endif
407 auto trapper = [protected_store_pc](int offset) {
408 if (protected_store_pc) *protected_store_pc = static_cast<uint32_t>(offset);
409 };
410 switch (type.value()) {
411 case StoreType::kI32Store8:
412 Sb(src.gp(), dst_op, trapper);
413 break;
414 case StoreType::kI64Store8:
415 Sb(src.low_gp(), dst_op, trapper);
416 break;
417 case StoreType::kI32Store16:
418 Sh(src.gp(), dst_op, trapper);
419 break;
420 case StoreType::kI64Store16:
421 Sh(src.low_gp(), dst_op, trapper);
422 break;
423 case StoreType::kI32Store:
424 Sw(src.gp(), dst_op, trapper);
425 break;
426 case StoreType::kI64Store32:
427 Sw(src.low_gp(), dst_op, trapper);
428 break;
429 case StoreType::kI64Store: {
430 Sw(src.low_gp(), dst_op, trapper);
431 dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg,
432 offset_imm + kSystemPointerSize);
433 Sw(src.high_gp(), dst_op, trapper);
434 break;
435 }
436 case StoreType::kF32Store:
437 StoreFloat(src.fp(), dst_op, trapper);
438 break;
439 case StoreType::kF64Store:
440 StoreDouble(src.fp(), dst_op, trapper);
441 break;
442 case StoreType::kS128Store: {
443 VU.set(kScratchReg, E8, m1);
444 Register dst_reg = dst_op.offset() == 0 ? dst_op.rm() : kScratchReg;
445 if (dst_op.offset() != 0) {
446 AddWord(kScratchReg, dst_op.rm(), dst_op.offset());
447 }
448 trapper(pc_offset());
449 vs(src.fp().toV(), dst_reg, 0, VSew::E8);
450 break;
451 }
452 default:
453 UNREACHABLE();
454 }
455 if (protected_store_pc) {
456 DCHECK(InstructionAt(*protected_store_pc)->IsStore());
457 }
458}
459
460namespace liftoff {
461#define __ lasm->
462
465 Register addr_reg, Register offset_reg,
466 uintptr_t offset_imm,
467 Register result_reg = no_reg) {
468 if (offset_reg == no_reg && offset_imm == 0) {
469 if (result_reg == addr_reg || result_reg == no_reg) return addr_reg;
470 lasm->mv(result_reg, addr_reg);
471 return result_reg;
472 }
473 if (result_reg == no_reg) result_reg = temps.Acquire();
474 if (offset_reg == no_reg) {
475 lasm->AddWord(result_reg, addr_reg, Operand(offset_imm));
476 } else {
477 lasm->AddWord(result_reg, addr_reg, Operand(offset_reg));
478 if (offset_imm != 0)
479 lasm->AddWord(result_reg, result_reg, Operand(offset_imm));
480 }
481 return result_reg;
482}
483
484enum class Binop { kAdd, kSub, kAnd, kOr, kXor, kExchange };
485inline void AtomicBinop64(LiftoffAssembler* lasm, Register dst_addr,
486 Register offset_reg, uintptr_t offset_imm,
488 StoreType type, Binop op) {
489 ASM_CODE_COMMENT(lasm);
490 FrameScope scope(lasm, StackFrame::MANUAL);
491 RegList c_params = {kCArgRegs[0], kCArgRegs[1], kCArgRegs[2]};
492 RegList result_list = {result.low_gp(), result.high_gp()};
493 // Result registers does not need to be pushed.
494 __ MultiPush(c_params - result_list);
495 UseScratchRegisterScope temps(lasm);
496 liftoff::CalculateActualAddress(lasm, temps, dst_addr, offset_reg, offset_imm,
498 __ Mv(kCArgRegs[1], value.low_gp());
499 __ Mv(kCArgRegs[2], value.high_gp());
500 __ Mv(kCArgRegs[0], kScratchReg);
501 __ MultiPush(kJSCallerSaved - c_params - result_list);
502 __ PrepareCallCFunction(3, 0, kScratchReg);
503 ExternalReference extern_func_ref;
504 switch (op) {
505 case Binop::kAdd:
506 extern_func_ref = ExternalReference::atomic_pair_add_function();
507 break;
508 case Binop::kSub:
509 extern_func_ref = ExternalReference::atomic_pair_sub_function();
510 break;
511 case Binop::kAnd:
512 extern_func_ref = ExternalReference::atomic_pair_and_function();
513 break;
514 case Binop::kOr:
515 extern_func_ref = ExternalReference::atomic_pair_or_function();
516 break;
517 case Binop::kXor:
518 extern_func_ref = ExternalReference::atomic_pair_xor_function();
519 break;
520 case Binop::kExchange:
521 extern_func_ref = ExternalReference::atomic_pair_exchange_function();
522 break;
523 default:
524 UNREACHABLE();
525 }
526 __ CallCFunction(extern_func_ref, 3, 0);
527 __ MultiPop(kJSCallerSaved - c_params - result_list);
528 __ Mv(result.low_gp(), kReturnRegister0);
529 __ Mv(result.high_gp(), kReturnRegister1);
530 __ MultiPop(c_params - result_list);
531 return;
532}
533
534inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
535 Register offset_reg, uintptr_t offset_imm,
537 StoreType type, Binop op) {
538 LiftoffRegList pinned{dst_addr, value, result};
539 if (offset_reg != no_reg) pinned.set(offset_reg);
540 Register store_result = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
541
542 // Make sure that {result} is unique.
543 Register result_reg = no_reg;
544 Register value_reg = no_reg;
545 bool change_result = false;
546 switch (type.value()) {
547 case StoreType::kI64Store8:
548 case StoreType::kI64Store16:
549 __ LoadConstant(result.high(), WasmValue(0));
550 result_reg = result.low_gp();
551 value_reg = value.low_gp();
552 break;
553 case StoreType::kI32Store8:
554 case StoreType::kI32Store16:
555 result_reg = result.gp();
556 value_reg = value.gp();
557 break;
558 default:
559 UNREACHABLE();
560 }
561 if (result_reg == value_reg || result_reg == dst_addr ||
562 result_reg == offset_reg) {
563 result_reg = __ GetUnusedRegister(kGpReg, pinned).gp();
564 change_result = true;
565 }
566
567 UseScratchRegisterScope temps(lasm);
568 Register actual_addr = liftoff::CalculateActualAddress(
569 lasm, temps, dst_addr, offset_reg, offset_imm);
570
571 // Allocate an additional {temp} register to hold the result that should be
572 // stored to memory. Note that {temp} and {store_result} are not allowed to be
573 // the same register.
574 Register temp = temps.Acquire();
575
576 Label retry;
577 __ bind(&retry);
578 switch (type.value()) {
579 case StoreType::kI64Store8:
580 case StoreType::kI32Store8:
581 __ lbu(result_reg, actual_addr, 0);
582 __ sync();
583 break;
584 case StoreType::kI64Store16:
585 case StoreType::kI32Store16:
586 __ lhu(result_reg, actual_addr, 0);
587 __ sync();
588 break;
589 case StoreType::kI64Store32:
590 case StoreType::kI32Store:
591 __ lr_w(true, false, result_reg, actual_addr);
592 break;
593 default:
594 UNREACHABLE();
595 }
596
597 switch (op) {
598 case Binop::kAdd:
599 __ add(temp, result_reg, value_reg);
600 break;
601 case Binop::kSub:
602 __ sub(temp, result_reg, value_reg);
603 break;
604 case Binop::kAnd:
605 __ and_(temp, result_reg, value_reg);
606 break;
607 case Binop::kOr:
608 __ or_(temp, result_reg, value_reg);
609 break;
610 case Binop::kXor:
611 __ xor_(temp, result_reg, value_reg);
612 break;
613 case Binop::kExchange:
614 __ mv(temp, value_reg);
615 break;
616 }
617 switch (type.value()) {
618 case StoreType::kI64Store8:
619 case StoreType::kI32Store8:
620 __ sync();
621 __ sb(temp, actual_addr, 0);
622 __ sync();
623 __ mv(store_result, zero_reg);
624 break;
625 case StoreType::kI64Store16:
626 case StoreType::kI32Store16:
627 __ sync();
628 __ sh(temp, actual_addr, 0);
629 __ sync();
630 __ mv(store_result, zero_reg);
631 break;
632 case StoreType::kI64Store32:
633 case StoreType::kI32Store:
634 __ sc_w(false, true, store_result, actual_addr, temp);
635 break;
636 default:
637 UNREACHABLE();
638 }
639
640 __ bnez(store_result, &retry);
641 if (change_result) {
642 switch (type.value()) {
643 case StoreType::kI64Store8:
644 case StoreType::kI64Store16:
645 case StoreType::kI64Store32:
646 __ mv(result.low_gp(), result_reg);
647 break;
648 case StoreType::kI32Store8:
649 case StoreType::kI32Store16:
650 case StoreType::kI32Store:
651 __ mv(result.gp(), result_reg);
652 break;
653 default:
654 UNREACHABLE();
655 }
656 }
657}
658
659#undef __
660} // namespace liftoff
661
662void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
663 Register offset_reg, uintptr_t offset_imm,
664 LoadType type, LiftoffRegList pinned,
665 bool i64_offset) {
666 UseScratchRegisterScope temps(this);
667 Register src_reg = liftoff::CalculateActualAddress(this, temps, src_addr,
668 offset_reg, offset_imm);
669 Register dst_reg = no_reg;
670 switch (type.value()) {
671 case LoadType::kI32Load8U:
672 case LoadType::kI32Load16U:
673 case LoadType::kI32Load:
674 dst_reg = dst.gp();
675 break;
676 case LoadType::kI64Load8U:
677 case LoadType::kI64Load16U:
678 case LoadType::kI64Load32U:
679 dst_reg = dst.low_gp();
680 LoadConstant(dst.high(), WasmValue(0));
681 break;
682 default:
683 break;
684 }
685 switch (type.value()) {
686 case LoadType::kI32Load8U:
687 case LoadType::kI64Load8U:
688 fence(PSR | PSW, PSR | PSW);
689 lbu(dst_reg, src_reg, 0);
690 fence(PSR, PSR | PSW);
691 return;
692 case LoadType::kI32Load16U:
693 case LoadType::kI64Load16U:
694 fence(PSR | PSW, PSR | PSW);
695 lhu(dst_reg, src_reg, 0);
696 fence(PSR, PSR | PSW);
697 return;
698 case LoadType::kI32Load:
699 case LoadType::kI64Load32U:
700 fence(PSR | PSW, PSR | PSW);
701 lw(dst_reg, src_reg, 0);
702 fence(PSR, PSR | PSW);
703 return;
704 case LoadType::kI64Load:
705 fence(PSR | PSW, PSR | PSW);
706 lw(dst.low_gp(), src_reg, liftoff::kLowWordOffset);
707 lw(dst.high_gp(), src_reg, liftoff::kHighWordOffset);
708 fence(PSR, PSR | PSW);
709 return;
710 default:
711 UNREACHABLE();
712 }
713}
714
715void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
716 uintptr_t offset_imm, LiftoffRegister src,
717 StoreType type, LiftoffRegList pinned,
718 bool i64_offset) {
719 UseScratchRegisterScope temps(this);
720 Register dst_reg = liftoff::CalculateActualAddress(this, temps, dst_addr,
721 offset_reg, offset_imm);
722 Register src_reg = no_reg;
723 switch (type.value()) {
724 case StoreType::kI32Store8:
725 case StoreType::kI32Store16:
726 case StoreType::kI32Store:
727 src_reg = src.gp();
728 break;
729 case StoreType::kI64Store8:
730 case StoreType::kI64Store16:
731 case StoreType::kI64Store32:
732 src_reg = src.low_gp();
733 break;
734 default:
735 break;
736 }
737 switch (type.value()) {
738 case StoreType::kI64Store8:
739 case StoreType::kI32Store8:
740 fence(PSR | PSW, PSW);
741 sb(src_reg, dst_reg, 0);
742 return;
743 case StoreType::kI64Store16:
744 case StoreType::kI32Store16:
745 fence(PSR | PSW, PSW);
746 sh(src_reg, dst_reg, 0);
747 return;
748 case StoreType::kI64Store32:
749 case StoreType::kI32Store:
750 fence(PSR | PSW, PSW);
751 sw(src_reg, dst_reg, 0);
752 return;
753 case StoreType::kI64Store:
754 fence(PSR | PSW, PSW);
755 sw(src.low_gp(), dst_reg, liftoff::kLowWordOffset);
756 sw(src.high_gp(), dst_reg, liftoff::kHighWordOffset);
757 return;
758 default:
759 UNREACHABLE();
760 }
761}
762
763void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
764 uint32_t offset_imm, LiftoffRegister value,
765 LiftoffRegister result, StoreType type,
766 bool i64_offset) {
767 if (type.value() == StoreType::kI64Store) {
768 liftoff::AtomicBinop64(this, dst_addr, offset_reg, offset_imm, value,
770 return;
771 }
772 if (type.value() == StoreType::kI32Store ||
773 type.value() == StoreType::kI64Store32) {
774 UseScratchRegisterScope temps(this);
776 this, temps, dst_addr, offset_reg, offset_imm);
777 if (type.value() == StoreType::kI64Store32) {
778 mv(result.high_gp(), zero_reg); // High word of result is always 0.
779 result = result.low();
780 value = value.low();
781 }
782 amoadd_w(true, true, result.gp(), actual_addr, value.gp());
783 return;
784 }
785
786 liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
788}
789
790void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
791 uint32_t offset_imm, LiftoffRegister value,
792 LiftoffRegister result, StoreType type,
793 bool i64_offset) {
794 if (type.value() == StoreType::kI64Store) {
795 liftoff::AtomicBinop64(this, dst_addr, offset_reg, offset_imm, value,
797 return;
798 }
799 if (type.value() == StoreType::kI32Store ||
800 type.value() == StoreType::kI64Store32) {
801 UseScratchRegisterScope temps(this);
803 this, temps, dst_addr, offset_reg, offset_imm);
804 if (type.value() == StoreType::kI64Store32) {
805 mv(result.high_gp(), zero_reg);
806 result = result.low();
807 value = value.low();
808 }
809 sub(kScratchReg, zero_reg, value.gp());
810 amoadd_w(true, true, result.gp(), actual_addr, kScratchReg);
811 return;
812 }
813 liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
815}
816
817void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
818 uint32_t offset_imm, LiftoffRegister value,
819 LiftoffRegister result, StoreType type,
820 bool i64_offset) {
821 if (type.value() == StoreType::kI64Store) {
822 liftoff::AtomicBinop64(this, dst_addr, offset_reg, offset_imm, value,
824 return;
825 }
826 if (type.value() == StoreType::kI32Store ||
827 type.value() == StoreType::kI64Store32) {
828 UseScratchRegisterScope temps(this);
830 this, temps, dst_addr, offset_reg, offset_imm);
831 if (type.value() == StoreType::kI64Store32) {
832 mv(result.high_gp(), zero_reg);
833 result = result.low();
834 value = value.low();
835 }
836 amoand_w(true, true, result.gp(), actual_addr, value.gp());
837 return;
838 }
839 liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
841}
842
843void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
844 uint32_t offset_imm, LiftoffRegister value,
845 LiftoffRegister result, StoreType type,
846 bool i64_offset) {
847 if (type.value() == StoreType::kI64Store) {
848 liftoff::AtomicBinop64(this, dst_addr, offset_reg, offset_imm, value,
850 return;
851 }
852 if (type.value() == StoreType::kI32Store ||
853 type.value() == StoreType::kI64Store32) {
854 UseScratchRegisterScope temps(this);
856 this, temps, dst_addr, offset_reg, offset_imm);
857 if (type.value() == StoreType::kI64Store32) {
858 mv(result.high_gp(), zero_reg);
859 result = result.low();
860 value = value.low();
861 }
862 amoor_w(true, true, result.gp(), actual_addr, value.gp());
863 return;
864 }
865 liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
866 type, liftoff::Binop::kOr);
867}
868
869void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
870 uint32_t offset_imm, LiftoffRegister value,
871 LiftoffRegister result, StoreType type,
872 bool i64_offset) {
873 if (type.value() == StoreType::kI64Store) {
874 liftoff::AtomicBinop64(this, dst_addr, offset_reg, offset_imm, value,
876 return;
877 }
878 if (type.value() == StoreType::kI32Store ||
879 type.value() == StoreType::kI64Store32) {
880 UseScratchRegisterScope temps(this);
882 this, temps, dst_addr, offset_reg, offset_imm);
883 if (type.value() == StoreType::kI64Store32) {
884 mv(result.high_gp(), zero_reg);
885 result = result.low();
886 value = value.low();
887 }
888 amoxor_w(true, true, result.gp(), actual_addr, value.gp());
889 return;
890 }
891 liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
893}
894
895void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
896 uint32_t offset_imm,
897 LiftoffRegister value,
898 LiftoffRegister result, StoreType type,
899 bool i64_offset) {
900 if (type.value() == StoreType::kI64Store) {
901 liftoff::AtomicBinop64(this, dst_addr, offset_reg, offset_imm, value,
903 return;
904 }
905 if (type.value() == StoreType::kI32Store ||
906 type.value() == StoreType::kI64Store32) {
907 UseScratchRegisterScope temps(this);
909 this, temps, dst_addr, offset_reg, offset_imm);
910 if (type.value() == StoreType::kI64Store32) {
911 mv(result.high_gp(), zero_reg);
912 result = result.low();
913 value = value.low();
914 }
915 amoswap_w(true, true, result.gp(), actual_addr, value.gp());
916 return;
917 }
918 liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
920}
921
923 Register dst_addr, Register offset_reg, uintptr_t offset_imm,
924 LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
925 StoreType type, bool i64_offset) {
926 ASM_CODE_COMMENT(this);
927 LiftoffRegList pinned{dst_addr, expected, new_value, result};
928 if (offset_reg != no_reg) pinned.set(offset_reg);
929
930 if (type.value() == StoreType::kI64Store) {
931 UseScratchRegisterScope temps(this);
933 this, temps, dst_addr, offset_reg, offset_imm, kScratchReg);
934 FrameScope scope(this, StackFrame::MANUAL);
935 // NOTE:
936 // a0~a4 are caller-saved registers and also used
937 // to pass parameters for C functions.
938 RegList c_params = {kCArgRegs[0], kCArgRegs[1], kCArgRegs[2], kCArgRegs[3],
939 a4};
940 RegList result_list = {result.low_gp(), result.high_gp()};
941 MultiPush(c_params - result_list);
942
943 Mv(a1, expected.low_gp());
944 Mv(a2, expected.high_gp());
945 Mv(a3, new_value.low_gp());
946 Mv(a4, new_value.high_gp());
947 Mv(a0, actual_addr);
948
949 MultiPush(kJSCallerSaved - c_params - result_list);
951 CallCFunction(ExternalReference::atomic_pair_compare_exchange_function(), 5,
952 0);
953 MultiPop(kJSCallerSaved - c_params - result_list);
955 Mv(result.low_gp(), kReturnRegister0);
956 Mv(result.high_gp(), kScratchReg);
957 MultiPop(c_params - result_list);
958 return;
959 }
960 // Make sure that {result} is unique.
961 switch (type.value()) {
962 case StoreType::kI64Store8:
963 case StoreType::kI64Store16:
964 case StoreType::kI64Store32:
965 LoadConstant(result.high(), WasmValue(0));
966 result = result.low();
967 new_value = new_value.low();
968 expected = expected.low();
969 break;
970 case StoreType::kI32Store8:
971 case StoreType::kI32Store16:
972 case StoreType::kI32Store:
973 break;
974 default:
975 UNREACHABLE();
976 }
977
978 UseScratchRegisterScope temps(this);
980 this, temps, dst_addr, offset_reg, offset_imm, kScratchReg);
981
982 Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
983 Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
984 Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
985
986 if (type.value() != StoreType::kI32Store &&
987 type.value() != StoreType::kI64Store32) {
988 And(temp1, actual_addr, 0x3);
989 SubWord(temp0, actual_addr, Operand(temp1));
990 SllWord(temp1, temp1, 3);
991 }
992 Label retry;
993 Label done;
994 bind(&retry);
995 switch (type.value()) {
996 case StoreType::kI64Store8:
997 case StoreType::kI32Store8:
998 lr_w(true, true, temp2, temp0);
999 ExtractBits(result.gp(), temp2, temp1, 8, false);
1000 ExtractBits(temp2, expected.gp(), zero_reg, 8, false);
1001 Branch(&done, ne, temp2, Operand(result.gp()));
1002 InsertBits(temp2, new_value.gp(), temp1, 8);
1003 sc_w(true, true, temp2, temp0, temp2);
1004 break;
1005 case StoreType::kI64Store16:
1006 case StoreType::kI32Store16:
1007 lr_w(true, true, temp2, temp0);
1008 ExtractBits(result.gp(), temp2, temp1, 16, false);
1009 ExtractBits(temp2, expected.gp(), zero_reg, 16, false);
1010 Branch(&done, ne, temp2, Operand(result.gp()));
1011 InsertBits(temp2, new_value.gp(), temp1, 16);
1012 sc_w(true, true, temp2, temp0, temp2);
1013 break;
1014 case StoreType::kI64Store32:
1015 case StoreType::kI32Store:
1016 lr_w(true, true, result.gp(), actual_addr);
1017 Branch(&done, ne, result.gp(), Operand(expected.gp()));
1018 sc_w(true, true, temp2, actual_addr, new_value.gp());
1019 break;
1020 default:
1021 UNREACHABLE();
1022 }
1023 bnez(temp2, &retry);
1024 bind(&done);
1025}
1026
1028
1029void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
1030 uint32_t caller_slot_idx,
1031 ValueKind kind) {
1032 int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
1033 liftoff::Load(this, dst, fp, offset, kind);
1034}
1035
1036void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
1037 uint32_t caller_slot_idx,
1039 Register frame_pointer) {
1040 int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
1041 liftoff::Store(this, frame_pointer, offset, src, kind);
1042}
1043
1044void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
1045 ValueKind kind) {
1046 liftoff::Load(this, dst, sp, offset, kind);
1047}
1048
1049void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
1050 ValueKind kind) {
1051 DCHECK_NE(dst_offset, src_offset);
1052
1053 MemOperand src = liftoff::GetStackSlot(src_offset);
1054 MemOperand dst = liftoff::GetStackSlot(dst_offset);
1055 switch (kind) {
1056 case kI32:
1057 Lw(kScratchReg, src);
1058 Sw(kScratchReg, dst);
1059 break;
1060 case kI64:
1061 case kRef:
1062 case kRefNull:
1063 Lw(kScratchReg, src);
1064 Sw(kScratchReg, dst);
1065 src = liftoff::GetStackSlot(src_offset - 4);
1066 dst = liftoff::GetStackSlot(dst_offset - 4);
1067 Lw(kScratchReg, src);
1068 Sw(kScratchReg, dst);
1069 break;
1070 case kF32:
1073 break;
1074 case kF64:
1077 break;
1078 case kS128: {
1079 VU.set(kScratchReg, E8, m1);
1080 Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
1081 if (src.offset() != 0) {
1082 MacroAssembler::AddWord(src_reg, src.rm(), src.offset());
1083 }
1084 vl(kSimd128ScratchReg, src_reg, 0, E8);
1085 Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg;
1086 if (dst.offset() != 0) {
1087 AddWord(kScratchReg, dst.rm(), dst.offset());
1088 }
1089 vs(kSimd128ScratchReg, dst_reg, 0, VSew::E8);
1090 break;
1091 }
1092 default:
1093 UNREACHABLE();
1094 }
1095}
1096
1097void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
1098 DCHECK_NE(dst, src);
1099 // TODO(ksreten): Handle different sizes here.
1100 MacroAssembler::Move(dst, src);
1101}
1102
1104 ValueKind kind) {
1105 DCHECK_NE(dst, src);
1106 if (kind != kS128) {
1107 MacroAssembler::Move(dst, src);
1108 } else {
1109 VU.set(kScratchReg, E8, m1);
1110 MacroAssembler::vmv_vv(dst.toV(), src.toV());
1111 }
1112}
1113
1114void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
1117 switch (kind) {
1118 case kI32:
1119 case kRef:
1120 case kRefNull:
1121 Sw(reg.gp(), dst);
1122 break;
1123 case kI64:
1126 break;
1127 case kF32:
1128 StoreFloat(reg.fp(), dst);
1129 break;
1130 case kF64:
1132 break;
1133 case kS128: {
1134 VU.set(kScratchReg, E8, m1);
1135 Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg;
1136 if (dst.offset() != 0) {
1137 AddWord(kScratchReg, dst.rm(), dst.offset());
1138 }
1139 vs(reg.fp().toV(), dst_reg, 0, VSew::E8);
1140 break;
1141 }
1142 default:
1143 UNREACHABLE();
1144 }
1145}
1146
1147void LiftoffAssembler::Spill(int offset, WasmValue value) {
1150 UseScratchRegisterScope assembler_temps(this);
1151 Register tmp = assembler_temps.Acquire();
1152 switch (value.type().kind()) {
1153 case kI32:
1154 case kRef:
1155 case kRefNull: {
1156 MacroAssembler::li(tmp, Operand(value.to_i32()));
1157 Sw(tmp, dst);
1158 break;
1159 }
1160 case kI64: {
1161 int32_t low_word = value.to_i64();
1162 int32_t high_word = value.to_i64() >> 32;
1163 MacroAssembler::li(tmp, Operand(low_word));
1165 MacroAssembler::li(tmp, Operand(high_word));
1167 break;
1168 break;
1169 }
1170 default:
1171 // kWasmF32 and kWasmF64 are unreachable, since those
1172 // constants are not tracked.
1173 UNREACHABLE();
1174 }
1175}
1176
1177void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
1179 switch (kind) {
1180 case kI32:
1181 case kRef:
1182 case kRefNull:
1183 Lw(reg.gp(), src);
1184 break;
1185 case kI64:
1188 break;
1189 case kF32:
1190 LoadFloat(reg.fp(), src);
1191 break;
1192 case kF64:
1194 break;
1195 case kS128: {
1196 VU.set(kScratchReg, E8, m1);
1197 Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
1198 if (src.offset() != 0) {
1199 MacroAssembler::AddWord(src_reg, src.rm(), src.offset());
1200 }
1201 vl(reg.fp().toV(), src_reg, 0, E8);
1202 break;
1203 }
1204 default:
1205 UNREACHABLE();
1206 }
1207}
1208
1209void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
1211}
1212
1214 DCHECK_LT(0, size);
1216
1217 // TODO(riscv32): check
1218
1219 if (size <= 12 * kStackSlotSize) {
1220 // Special straight-line code for up to 12 slots. Generates one
1221 // instruction per slot (<= 12 instructions total).
1222 uint32_t remainder = size;
1225 Sw(zero_reg, liftoff::GetStackSlot(start + remainder - 4));
1226 }
1227 DCHECK(remainder == 4 || remainder == 0);
1228 if (remainder) {
1230 }
1231 } else {
1232 // General case for bigger counts (12 instructions).
1233 // Use a0 for start address (inclusive), a1 for end address (exclusive).
1234 Push(a1, a0);
1235 AddWord(a0, fp, Operand(-start - size));
1236 AddWord(a1, fp, Operand(-start));
1237
1238 Label loop;
1239 bind(&loop);
1240 Sw(zero_reg, MemOperand(a0));
1241 addi(a0, a0, kSystemPointerSize);
1242 BranchShort(&loop, ne, a0, Operand(a1));
1243
1244 Pop(a1, a0);
1245 }
1246}
1247
1248void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
1249 // return high == 0 ? 32 + CLZ32(low) : CLZ32(high);
1250 Label done;
1251 Label high_is_zero;
1252 Branch(&high_is_zero, eq, src.high_gp(), Operand(zero_reg));
1253
1254 Clz32(dst.low_gp(), src.high_gp());
1255 jmp(&done);
1256
1257 bind(&high_is_zero);
1258 Clz32(dst.low_gp(), src.low_gp());
1259 AddWord(dst.low_gp(), dst.low_gp(), Operand(32));
1260
1261 bind(&done);
1262 mv(dst.high_gp(), zero_reg); // High word of result is always 0.
1263}
1264
1265void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
1266 // return low == 0 ? 32 + CTZ32(high) : CTZ32(low);
1267 Label done;
1268 Label low_is_zero;
1269 Branch(&low_is_zero, eq, src.low_gp(), Operand(zero_reg));
1270
1271 Ctz32(dst.low_gp(), src.low_gp());
1272 jmp(&done);
1273
1274 bind(&low_is_zero);
1275 Ctz32(dst.low_gp(), src.high_gp());
1276 AddWord(dst.low_gp(), dst.low_gp(), Operand(32));
1277
1278 bind(&done);
1279 mv(dst.high_gp(), zero_reg); // High word of result is always 0.
1280}
1281
1282bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
1283 LiftoffRegister src) {
1284 // Produce partial popcnts in the two dst registers.
1285 Register src1 = src.high_gp() == dst.low_gp() ? src.high_gp() : src.low_gp();
1286 Register src2 = src.high_gp() == dst.low_gp() ? src.low_gp() : src.high_gp();
1287 MacroAssembler::Popcnt32(dst.low_gp(), src1, kScratchReg);
1288 MacroAssembler::Popcnt32(dst.high_gp(), src2, kScratchReg);
1289 // Now add the two into the lower dst reg and clear the higher dst reg.
1290 AddWord(dst.low_gp(), dst.low_gp(), dst.high_gp());
1291 mv(dst.high_gp(), zero_reg);
1292 return true;
1293}
1294
1295void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
1296 MacroAssembler::Mul(dst, lhs, rhs);
1297}
1298
1299void LiftoffAssembler::emit_i32_muli(Register dst, Register lhs, int32_t imm) {
1300 if (base::bits::IsPowerOfTwo(imm)) {
1302 return;
1303 }
1304 UseScratchRegisterScope temps{this};
1305 Register scratch = temps.Acquire();
1306 li(scratch, Operand{imm});
1307 MacroAssembler::Mul(dst, lhs, scratch);
1308}
1309
1310void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
1311 Label* trap_div_by_zero,
1312 Label* trap_div_unrepresentable) {
1313 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1314
1315 // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
1317 MacroAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne);
1319 MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
1320 Operand(zero_reg));
1321
1322 MacroAssembler::Div(dst, lhs, rhs);
1323}
1324
1325void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
1326 Label* trap_div_by_zero) {
1327 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1328 MacroAssembler::Divu(dst, lhs, rhs);
1329}
1330
1331void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
1332 Label* trap_div_by_zero) {
1333 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1334 MacroAssembler::Mod(dst, lhs, rhs);
1335}
1336
1337void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
1338 Label* trap_div_by_zero) {
1339 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1340 MacroAssembler::Modu(dst, lhs, rhs);
1341}
1342
1343#define I32_BINOP(name, instruction) \
1344 void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
1345 Register rhs) { \
1346 instruction(dst, lhs, rhs); \
1347 }
1348
1349// clang-format off
1350I32_BINOP(add, add)
1351I32_BINOP(sub, sub)
1352I32_BINOP(and, and_)
1353I32_BINOP(or, or_)
1354I32_BINOP(xor, xor_)
1355// clang-format on
1356
1357#undef I32_BINOP
1358
1359#define I32_BINOP_I(name, instruction) \
1360 void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \
1361 int32_t imm) { \
1362 instruction(dst, lhs, Operand(imm)); \
1363 }
1364
1365// clang-format off
1366I32_BINOP_I(add, AddWord)
1367I32_BINOP_I(sub, SubWord)
1368I32_BINOP_I(and, And)
1369I32_BINOP_I(or, Or)
1370I32_BINOP_I(xor, Xor)
1371// clang-format on
1372
1373#undef I32_BINOP_I
1374
1375void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
1376 MacroAssembler::Clz32(dst, src);
1377}
1378
1379void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
1380 MacroAssembler::Ctz32(dst, src);
1381}
1382
1383bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
1385 return true;
1386}
1387
1388#define I32_SHIFTOP(name, instruction) \
1389 void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
1390 Register amount) { \
1391 instruction(dst, src, amount); \
1392 }
1393#define I32_SHIFTOP_I(name, instruction) \
1394 void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \
1395 int amount) { \
1396 instruction(dst, src, amount & 31); \
1397 }
1398
1399I32_SHIFTOP(shl, sll)
1400I32_SHIFTOP(sar, sra)
1401I32_SHIFTOP(shr, srl)
1402
1403I32_SHIFTOP_I(shl, slli)
1404I32_SHIFTOP_I(sar, srai)
1405I32_SHIFTOP_I(shr, srli)
1406
1407#undef I32_SHIFTOP
1408#undef I32_SHIFTOP_I
1409
1410void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
1411 LiftoffRegister rhs) {
1412 MacroAssembler::MulPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
1413 lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
1415}
1416
1417// Implemented by the host function in external-reference.h(Call to host
1418// function wasm::xxx).
1419bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
1420 LiftoffRegister rhs,
1421 Label* trap_div_by_zero,
1422 Label* trap_div_unrepresentable) {
1423 return false;
1424}
1425
1426bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
1427 LiftoffRegister rhs,
1428 Label* trap_div_by_zero) {
1429 return false;
1430}
1431
1432bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
1433 LiftoffRegister rhs,
1434 Label* trap_div_by_zero) {
1435 return false;
1436}
1437
1438bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
1439 LiftoffRegister rhs,
1440 Label* trap_div_by_zero) {
1441 return false;
1442}
1443
1444namespace liftoff {
1445
1447 DCHECK(pair.is_gp_pair());
1448 return pair.low_gp() == reg || pair.high_gp() == reg;
1449}
1450
1453 Register amount,
1454 void (MacroAssembler::*emit_shift)(Register, Register, Register, Register,
1456 LiftoffRegList pinned{dst, src, amount};
1457
1458 // If some of destination registers are in use, get another, unused pair.
1459 // That way we prevent overwriting some input registers while shifting.
1460 // Do this before any branch so that the cache state will be correct for
1461 // all conditions.
1462 Register amount_capped =
1463 pinned.set(assm->GetUnusedRegister(kGpReg, pinned).gp());
1464 assm->And(amount_capped, amount, Operand(63));
1465 if (liftoff::IsRegInRegPair(dst, amount) || dst.overlaps(src)) {
1466 // Do the actual shift.
1467 LiftoffRegister tmp = assm->GetUnusedRegister(kGpRegPair, pinned);
1468 (assm->*emit_shift)(tmp.low_gp(), tmp.high_gp(), src.low_gp(),
1469 src.high_gp(), amount_capped, kScratchReg,
1470 kScratchReg2);
1471
1472 // Place result in destination register.
1473 assm->MacroAssembler::Move(dst.high_gp(), tmp.high_gp());
1474 assm->MacroAssembler::Move(dst.low_gp(), tmp.low_gp());
1475 } else {
1476 (assm->*emit_shift)(dst.low_gp(), dst.high_gp(), src.low_gp(),
1477 src.high_gp(), amount_capped, kScratchReg,
1478 kScratchReg2);
1479 }
1480}
1481} // namespace liftoff
1482
1483void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
1484 LiftoffRegister rhs) {
1485 MacroAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
1486 lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
1488}
1489
1490void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
1491 int64_t imm) {
1492 LiftoffRegister imm_reg =
1493 GetUnusedRegister(kGpRegPair, LiftoffRegList{dst, lhs});
1494 int32_t imm_low_word = static_cast<int32_t>(imm);
1495 int32_t imm_high_word = static_cast<int32_t>(imm >> 32);
1496
1497 // TODO(riscv32): are there some optimization we can make without
1498 // materializing?
1499 MacroAssembler::li(imm_reg.low_gp(), imm_low_word);
1500 MacroAssembler::li(imm_reg.high_gp(), imm_high_word);
1501 MacroAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
1502 lhs.high_gp(), imm_reg.low_gp(), imm_reg.high_gp(),
1504}
1505
1506void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
1507 LiftoffRegister rhs) {
1508 MacroAssembler::SubPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
1509 lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
1511}
1512
1513void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
1514 Register amount) {
1515 ASM_CODE_COMMENT(this);
1516 liftoff::Emit64BitShiftOperation(this, dst, src, amount,
1518}
1519
1520void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
1521 int amount) {
1522 ASM_CODE_COMMENT(this);
1523 UseScratchRegisterScope temps(this);
1524 LiftoffRegister temp = GetUnusedRegister(kGpReg, LiftoffRegList{dst, src});
1525 temps.Include(temp.gp());
1526 // {src.low_gp()} will still be needed after writing {dst.high_gp()} and
1527 // {dst.low_gp()}.
1528 Register src_low = liftoff::EnsureNoAlias(this, src.low_gp(), dst, &temps);
1529 Register src_high = liftoff::EnsureNoAlias(this, src.high_gp(), dst, &temps);
1530 // {src.high_gp()} will still be needed after writing {dst.high_gp()}.
1531 DCHECK_NE(dst.low_gp(), kScratchReg);
1532 DCHECK_NE(dst.high_gp(), kScratchReg);
1533
1534 MacroAssembler::ShlPair(dst.low_gp(), dst.high_gp(), src_low, src_high,
1535 amount & 63, kScratchReg, kScratchReg2);
1536}
1537
1538void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
1539 Register amount) {
1540 liftoff::Emit64BitShiftOperation(this, dst, src, amount,
1542}
1543
1544void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
1545 int amount) {
1546 ASM_CODE_COMMENT(this);
1547 UseScratchRegisterScope temps(this);
1548 LiftoffRegister temp = GetUnusedRegister(kGpReg, LiftoffRegList{dst, src});
1549 temps.Include(temp.gp());
1550 // {src.low_gp()} will still be needed after writing {dst.high_gp()} and
1551 // {dst.low_gp()}.
1552 Register src_low = liftoff::EnsureNoAlias(this, src.low_gp(), dst, &temps);
1553 Register src_high = liftoff::EnsureNoAlias(this, src.high_gp(), dst, &temps);
1554 DCHECK_NE(dst.low_gp(), kScratchReg);
1555 DCHECK_NE(dst.high_gp(), kScratchReg);
1556
1557 MacroAssembler::SarPair(dst.low_gp(), dst.high_gp(), src_low, src_high,
1558 amount & 63, kScratchReg, kScratchReg2);
1559}
1560
1561void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
1562 Register amount) {
1563 liftoff::Emit64BitShiftOperation(this, dst, src, amount,
1565}
1566
1567void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
1568 int amount) {
1569 ASM_CODE_COMMENT(this);
1570 UseScratchRegisterScope temps(this);
1571 LiftoffRegister temp = GetUnusedRegister(kGpReg, LiftoffRegList{dst, src});
1572 temps.Include(temp.gp());
1573 // {src.low_gp()} will still be needed after writing {dst.high_gp()} and
1574 // {dst.low_gp()}.
1575 Register src_low = liftoff::EnsureNoAlias(this, src.low_gp(), dst, &temps);
1576 Register src_high = liftoff::EnsureNoAlias(this, src.high_gp(), dst, &temps);
1577 DCHECK_NE(dst.low_gp(), kScratchReg);
1578 DCHECK_NE(dst.high_gp(), kScratchReg);
1579
1580 MacroAssembler::ShrPair(dst.low_gp(), dst.high_gp(), src_low, src_high,
1581 amount & 63, kScratchReg, kScratchReg2);
1582}
1583
1584#define FP_UNOP_RETURN_FALSE(name) \
1585 bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
1586 return false; \
1587 }
1588
1589FP_UNOP_RETURN_FALSE(f64_ceil)
1590FP_UNOP_RETURN_FALSE(f64_floor)
1591FP_UNOP_RETURN_FALSE(f64_trunc)
1592FP_UNOP_RETURN_FALSE(f64_nearest_int)
1593
1594#undef FP_UNOP_RETURN_FALSE
1595
1597 LiftoffRegister dst,
1598 LiftoffRegister src, Label* trap) {
1599 switch (opcode) {
1600 case kExprI32ConvertI64:
1601 MacroAssembler::Move(dst.gp(), src.low_gp());
1602 return true;
1603 case kExprI32SConvertF32:
1604 case kExprI32UConvertF32:
1605 case kExprI32SConvertF64:
1606 case kExprI32UConvertF64:
1607 case kExprI64SConvertF32:
1608 case kExprI64UConvertF32:
1609 case kExprI64SConvertF64:
1610 case kExprI64UConvertF64:
1611 case kExprF32ConvertF64: {
1612 // real conversion, if src is out-of-bound of target integer types,
1613 // kScratchReg is set to 0
1614 switch (opcode) {
1615 case kExprI32SConvertF32:
1616 Trunc_w_s(dst.gp(), src.fp(), kScratchReg);
1617 break;
1618 case kExprI32UConvertF32:
1619 Trunc_uw_s(dst.gp(), src.fp(), kScratchReg);
1620 break;
1621 case kExprI32SConvertF64:
1622 Trunc_w_d(dst.gp(), src.fp(), kScratchReg);
1623 break;
1624 case kExprI32UConvertF64:
1625 Trunc_uw_d(dst.gp(), src.fp(), kScratchReg);
1626 break;
1627 case kExprF32ConvertF64:
1628 fcvt_s_d(dst.fp(), src.fp());
1629 break;
1630 case kExprI64SConvertF32:
1631 case kExprI64UConvertF32:
1632 case kExprI64SConvertF64:
1633 case kExprI64UConvertF64:
1634 return false;
1635 default:
1636 UNREACHABLE();
1637 }
1638
1639 // Checking if trap.
1640 if (trap != nullptr) {
1641 MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
1642 }
1643
1644 return true;
1645 }
1646 case kExprI32ReinterpretF32:
1647 MacroAssembler::ExtractLowWordFromF64(dst.gp(), src.fp());
1648 return true;
1649 case kExprI64SConvertI32:
1650 MacroAssembler::Move(dst.low_gp(), src.gp());
1651 MacroAssembler::Move(dst.high_gp(), src.gp());
1652 srai(dst.high_gp(), dst.high_gp(), 31);
1653 return true;
1654 case kExprI64UConvertI32:
1655 MacroAssembler::Move(dst.low_gp(), src.gp());
1656 MacroAssembler::Move(dst.high_gp(), zero_reg);
1657 return true;
1658 case kExprI64ReinterpretF64:
1659 SubWord(sp, sp, kDoubleSize);
1660 StoreDouble(src.fp(), MemOperand(sp, 0));
1661 Lw(dst.low_gp(), MemOperand(sp, 0));
1662 Lw(dst.high_gp(), MemOperand(sp, 4));
1663 AddWord(sp, sp, kDoubleSize);
1664 return true;
1665 case kExprF32SConvertI32: {
1666 MacroAssembler::Cvt_s_w(dst.fp(), src.gp());
1667 return true;
1668 }
1669 case kExprF32UConvertI32:
1670 MacroAssembler::Cvt_s_uw(dst.fp(), src.gp());
1671 return true;
1672 case kExprF32ReinterpretI32:
1673 fmv_w_x(dst.fp(), src.gp());
1674 return true;
1675 case kExprF64SConvertI32: {
1676 MacroAssembler::Cvt_d_w(dst.fp(), src.gp());
1677 return true;
1678 }
1679 case kExprF64UConvertI32:
1680 MacroAssembler::Cvt_d_uw(dst.fp(), src.gp());
1681 return true;
1682 case kExprF64ConvertF32:
1683 fcvt_d_s(dst.fp(), src.fp());
1684 return true;
1685 case kExprF64ReinterpretI64:
1686 SubWord(sp, sp, kDoubleSize);
1687 Sw(src.low_gp(), MemOperand(sp, 0));
1688 Sw(src.high_gp(), MemOperand(sp, 4));
1689 LoadDouble(dst.fp(), MemOperand(sp, 0));
1690 AddWord(sp, sp, kDoubleSize);
1691 return true;
1692 case kExprI32SConvertSatF32: {
1693 fcvt_w_s(dst.gp(), src.fp(), RTZ);
1694 Clear_if_nan_s(dst.gp(), src.fp());
1695 return true;
1696 }
1697 case kExprI32UConvertSatF32: {
1698 fcvt_wu_s(dst.gp(), src.fp(), RTZ);
1699 Clear_if_nan_s(dst.gp(), src.fp());
1700 return true;
1701 }
1702 case kExprI32SConvertSatF64: {
1703 fcvt_w_d(dst.gp(), src.fp(), RTZ);
1704 Clear_if_nan_d(dst.gp(), src.fp());
1705 return true;
1706 }
1707 case kExprI32UConvertSatF64: {
1708 fcvt_wu_d(dst.gp(), src.fp(), RTZ);
1709 Clear_if_nan_d(dst.gp(), src.fp());
1710 return true;
1711 }
1712 case kExprI64SConvertSatF32:
1713 case kExprI64UConvertSatF32:
1714 case kExprI64SConvertSatF64:
1715 case kExprI64UConvertSatF64:
1716 return false;
1717 default:
1718 return false;
1719 }
1720}
1721
1722void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
1723 LiftoffRegister lhs,
1724 uint8_t imm_lane_idx) {
1725 VU.set(kScratchReg, E32, m1);
1726 vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), (imm_lane_idx << 0x1) + 1);
1727 vmv_xs(dst.high_gp(), kSimd128ScratchReg);
1728 vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx << 0x1);
1729 vmv_xs(dst.low_gp(), kSimd128ScratchReg);
1730}
1731
1732void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
1733 slli(dst, src, 32 - 8);
1734 srai(dst, dst, 32 - 8);
1735}
1736
1737void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
1738 slli(dst, src, 32 - 16);
1739 srai(dst, dst, 32 - 16);
1740}
1741
1742void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
1743 LiftoffRegister src) {
1744 emit_i32_signextend_i8(dst.low_gp(), src.low_gp());
1745 srai(dst.high_gp(), dst.low_gp(), 31);
1746}
1747
1748void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
1749 LiftoffRegister src) {
1750 emit_i32_signextend_i16(dst.low_gp(), src.low_gp());
1751 srai(dst.high_gp(), dst.low_gp(), 31);
1752}
1753
1754void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
1755 LiftoffRegister src) {
1756 mv(dst.low_gp(), src.low_gp());
1757 srai(dst.high_gp(), src.low_gp(), 31);
1758}
1759
1762}
1763
1764void LiftoffAssembler::emit_jump(Register target) {
1765 MacroAssembler::Jump(target);
1766}
1767
1769 ValueKind kind, Register lhs,
1770 Register rhs,
1771 const FreezeCacheState& frozen) {
1772 if (rhs == no_reg) {
1773 DCHECK(kind == kI32);
1774 MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg));
1775 } else {
1776 DCHECK((kind == kI32) ||
1777 (is_reference(kind) && (cond == kEqual || cond == kNotEqual)));
1778 MacroAssembler::Branch(label, cond, lhs, Operand(rhs));
1779 }
1780}
1781
1783 Register lhs, int32_t imm,
1784 const FreezeCacheState& frozen) {
1785 MacroAssembler::Branch(label, cond, lhs, Operand(imm));
1786}
1787
1788void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
1789 MacroAssembler::Sltu(dst, src, 1);
1790}
1791
1792void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
1793 Register lhs, Register rhs) {
1794 MacroAssembler::CompareI(dst, lhs, Operand(rhs), cond);
1795}
1796
1797void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
1798 Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList{src, dst}).gp();
1799 Sltu(tmp, src.low_gp(), 1);
1800 Sltu(dst, src.high_gp(), 1);
1801 and_(dst, dst, tmp);
1802}
1803
1804namespace liftoff {
1806 switch (cond) {
1807 case kLessThan:
1808 return kUnsignedLessThan;
1809 case kLessThanEqual:
1811 case kGreaterThan:
1812 return kUnsignedGreaterThan;
1813 case kGreaterThanEqual:
1815 default:
1816 return cond;
1817 }
1818}
1819} // namespace liftoff
1820
1821void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
1822 LiftoffRegister lhs,
1823 LiftoffRegister rhs) {
1824 ASM_CODE_COMMENT(this);
1825 Label low, cont;
1826
1827 // For signed i64 comparisons, we still need to use unsigned comparison for
1828 // the low word (the only bit carrying signedness information is the MSB in
1829 // the high word).
1830 Condition unsigned_cond = liftoff::cond_make_unsigned(cond);
1831
1832 Register tmp = dst;
1833 if (liftoff::IsRegInRegPair(lhs, dst) || liftoff::IsRegInRegPair(rhs, dst)) {
1834 tmp = GetUnusedRegister(kGpReg, LiftoffRegList{dst, lhs, rhs}).gp();
1835 }
1836
1837 // Write 1 initially in tmp register.
1838 MacroAssembler::li(tmp, 1);
1839
1840 // If high words are equal, then compare low words, else compare high.
1841 Branch(&low, eq, lhs.high_gp(), Operand(rhs.high_gp()));
1842
1843 Branch(&cont, cond, lhs.high_gp(), Operand(rhs.high_gp()));
1844 mv(tmp, zero_reg);
1845 Branch(&cont);
1846
1847 bind(&low);
1848 if (unsigned_cond == cond) {
1849 Branch(&cont, cond, lhs.low_gp(), Operand(rhs.low_gp()));
1850 mv(tmp, zero_reg);
1851 } else {
1852 Label lt_zero;
1853 Branch(&lt_zero, lt, lhs.high_gp(), Operand(zero_reg));
1854 Branch(&cont, unsigned_cond, lhs.low_gp(), Operand(rhs.low_gp()));
1855 mv(tmp, zero_reg);
1856 Branch(&cont);
1857 bind(&lt_zero);
1858 Branch(&cont, unsigned_cond, rhs.low_gp(), Operand(lhs.low_gp()));
1859 mv(tmp, zero_reg);
1860 Branch(&cont);
1861 }
1862 bind(&cont);
1863 // Move result to dst register if needed.
1864 MacroAssembler::Move(dst, tmp);
1865}
1866
1867void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
1868 UseScratchRegisterScope temps(this);
1869 Register scratch = temps.Acquire();
1870 SmiUntag(scratch, MemOperand(dst.gp(), offset));
1871 AddWord(scratch, scratch, Operand(1));
1872 SmiTag(scratch);
1873 Sw(scratch, MemOperand(dst.gp(), offset));
1874}
1875
1876void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
1877 Register offset_reg, uintptr_t offset_imm,
1878 LoadType type,
1879 LoadTransformationKind transform,
1880 uint32_t* protected_load_pc,
1881 bool i64_offset) {
1882 UseScratchRegisterScope temps(this);
1883 Register scratch = temps.Acquire();
1884 MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
1885 VRegister dst_v = dst.fp().toV();
1886 auto trapper = [protected_load_pc](int offset) {
1887 if (protected_load_pc) *protected_load_pc = static_cast<uint32_t>(offset);
1888 };
1889 MachineType memtype = type.mem_type();
1890 if (transform == LoadTransformationKind::kExtend) {
1891 // TODO(RISCV): need to confirm the performance impact of using floating
1892 // point registers.
1893 LoadDouble(kScratchDoubleReg, src_op, trapper);
1894 if (memtype == MachineType::Int8()) {
1895 VU.set(kScratchReg, E64, m1);
1897 VU.set(kScratchReg, E16, m1);
1898 vsext_vf2(dst_v, kSimd128ScratchReg);
1899 } else if (memtype == MachineType::Uint8()) {
1900 VU.set(kScratchReg, E64, m1);
1902 VU.set(kScratchReg, E16, m1);
1903 vzext_vf2(dst_v, kSimd128ScratchReg);
1904 } else if (memtype == MachineType::Int16()) {
1905 VU.set(kScratchReg, E64, m1);
1907 VU.set(kScratchReg, E32, m1);
1908 vsext_vf2(dst_v, kSimd128ScratchReg);
1909 } else if (memtype == MachineType::Uint16()) {
1910 VU.set(kScratchReg, E64, m1);
1912 VU.set(kScratchReg, E32, m1);
1913 vzext_vf2(dst_v, kSimd128ScratchReg);
1914 } else if (memtype == MachineType::Int32()) {
1915 VU.set(kScratchReg, E64, m1);
1917 vsext_vf2(dst_v, kSimd128ScratchReg);
1918 } else if (memtype == MachineType::Uint32()) {
1919 VU.set(kScratchReg, E64, m1);
1921 vzext_vf2(dst_v, kSimd128ScratchReg);
1922 }
1923 } else if (transform == LoadTransformationKind::kZeroExtend) {
1924 vxor_vv(dst_v, dst_v, dst_v);
1925 if (memtype == MachineType::Int32()) {
1926 VU.set(kScratchReg, E32, m1);
1927 Lw(scratch, src_op, trapper);
1928 vmv_sx(dst_v, scratch);
1929 } else {
1930 DCHECK_EQ(MachineType::Int64(), memtype);
1931 VU.set(kScratchReg, E64, m1);
1932 LoadDouble(kScratchDoubleReg, src_op, trapper);
1933 vfmv_sf(dst_v, kScratchDoubleReg);
1934 }
1935 } else {
1937 if (memtype == MachineType::Int8()) {
1938 VU.set(kScratchReg, E8, m1);
1939 Lb(scratch, src_op, trapper);
1940 vmv_vx(dst_v, scratch);
1941 } else if (memtype == MachineType::Int16()) {
1942 VU.set(kScratchReg, E16, m1);
1943 Lh(scratch, src_op, trapper);
1944 vmv_vx(dst_v, scratch);
1945 } else if (memtype == MachineType::Int32()) {
1946 VU.set(kScratchReg, E32, m1);
1947 Lw(scratch, src_op, trapper);
1948 vmv_vx(dst_v, scratch);
1949 } else if (memtype == MachineType::Int64()) {
1950 VU.set(kScratchReg, E64, m1);
1951 LoadDouble(kScratchDoubleReg, src_op, trapper);
1952 vfmv_vf(dst_v, kScratchDoubleReg);
1953 }
1954 }
1955 if (protected_load_pc) {
1956 DCHECK(InstructionAt(*protected_load_pc)->IsLoad());
1957 }
1958}
1959
1960void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
1961 Register addr, Register offset_reg,
1962 uintptr_t offset_imm, LoadType type,
1963 uint8_t laneidx, uint32_t* protected_load_pc,
1964 bool /* i64_offset */) {
1965 UseScratchRegisterScope temps(this);
1966 Register scratch = temps.Acquire();
1967 MemOperand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm);
1968 MachineType mem_type = type.mem_type();
1969 auto trapper = [protected_load_pc](int offset) {
1970 if (protected_load_pc) *protected_load_pc = static_cast<uint32_t>(offset);
1971 };
1972 if (mem_type == MachineType::Int8()) {
1973 Lbu(scratch, src_op, trapper);
1974 VU.set(kScratchReg, E32, m1);
1975 li(kScratchReg, 0x1 << laneidx);
1976 vmv_sx(v0, kScratchReg);
1977 VU.set(kScratchReg, E8, m1);
1978 vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
1979 } else if (mem_type == MachineType::Int16()) {
1980 Lhu(scratch, src_op, trapper);
1981 VU.set(kScratchReg, E16, m1);
1982 li(kScratchReg, 0x1 << laneidx);
1983 vmv_sx(v0, kScratchReg);
1984 vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
1985 } else if (mem_type == MachineType::Int32()) {
1986 Lw(scratch, src_op, trapper);
1987 VU.set(kScratchReg, E32, m1);
1988 li(kScratchReg, 0x1 << laneidx);
1989 vmv_sx(v0, kScratchReg);
1990 vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
1991 } else if (mem_type == MachineType::Int64()) {
1992 LoadDouble(kScratchDoubleReg, src_op, trapper);
1993 VU.set(kScratchReg, E64, m1);
1994 li(kScratchReg, 0x1 << laneidx);
1995 vmv_sx(v0, kScratchReg);
1996 vfmerge_vf(dst.fp().toV(), kScratchDoubleReg, dst.fp().toV());
1997 } else {
1998 UNREACHABLE();
1999 }
2000 if (protected_load_pc) {
2001 DCHECK(InstructionAt(*protected_load_pc)->IsLoad());
2002 }
2003}
2004
2005void LiftoffAssembler::StoreLane(Register dst, Register offset,
2006 uintptr_t offset_imm, LiftoffRegister src,
2007 StoreType type, uint8_t lane,
2008 uint32_t* protected_store_pc,
2009 bool /* i64_offset */) {
2010 MemOperand dst_op = liftoff::GetMemOp(this, dst, offset, offset_imm);
2011 auto trapper = [protected_store_pc](int offset) {
2012 if (protected_store_pc) *protected_store_pc = static_cast<uint32_t>(offset);
2013 };
2014 MachineRepresentation rep = type.mem_rep();
2015 if (rep == MachineRepresentation::kWord8) {
2016 VU.set(kScratchReg, E8, m1);
2017 vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), lane);
2019 Sb(kScratchReg, dst_op, trapper);
2020 } else if (rep == MachineRepresentation::kWord16) {
2021 VU.set(kScratchReg, E16, m1);
2022 vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), lane);
2024 Sh(kScratchReg, dst_op, trapper);
2025 } else if (rep == MachineRepresentation::kWord32) {
2026 VU.set(kScratchReg, E32, m1);
2027 vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), lane);
2029 Sw(kScratchReg, dst_op, trapper);
2030 } else {
2032 VU.set(kScratchReg, E64, m1);
2033 vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), lane);
2035 StoreDouble(kScratchDoubleReg, dst_op, trapper);
2036 }
2037 if (protected_store_pc) {
2038 DCHECK(InstructionAt(*protected_store_pc)->IsStore());
2039 }
2040}
2041
2042void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
2043 LiftoffRegister src) {
2044 VU.set(kScratchReg, E32, m1);
2045 vmv_vi(v0, 0b0101);
2046 vmv_vx(kSimd128ScratchReg, src.high_gp());
2047 vmerge_vx(dst.fp().toV(), src.low_gp(), kSimd128ScratchReg);
2048}
2049
2050void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
2051 LiftoffRegister src1,
2052 LiftoffRegister src2,
2053 uint8_t imm_lane_idx) {
2054 VU.set(kScratchReg, E32, m1);
2055 vmv_vx(kSimd128ScratchReg, src2.high_gp());
2056 vmv_sx(kSimd128ScratchReg, src2.low_gp());
2057 VU.set(kScratchReg, E64, m1);
2058 li(kScratchReg, 0x1 << imm_lane_idx);
2059 vmv_sx(v0, kScratchReg);
2061 vfmerge_vf(dst.fp().toV(), kScratchDoubleReg, src1.fp().toV());
2062}
2063
2064void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
2065 LiftoffRegister rhs) {
2066 VU.set(kScratchReg, E64, m1);
2067 const int32_t kNaN = 0x7ff80000L, kNaNShift = 32;
2068 vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
2069 vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
2070 vand_vv(v0, v0, kSimd128ScratchReg);
2072 li(kScratchReg2, kNaNShift);
2075 vfmin_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
2076 vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
2077}
2078
2079void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
2080 LiftoffRegister rhs) {
2081 VU.set(kScratchReg, E64, m1);
2082 const int32_t kNaN = 0x7ff80000L, kNaNShift = 32;
2083 vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
2084 vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
2085 vand_vv(v0, v0, kSimd128ScratchReg);
2087 li(kScratchReg2, kNaNShift);
2090 vfmax_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
2091 vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
2092}
2093
2095 LiftoffRegister src) {
2096 VU.set(kScratchReg, E32, m1);
2097 // li(kScratchReg, 0x0006000400020000);
2098 li(kScratchReg, 0x00060004);
2100 li(kScratchReg, 0x00020000);
2102 // li(kScratchReg, 0x0007000500030001);
2103 li(kScratchReg, 0x00070005);
2105 li(kScratchReg, 0x00030001);
2107 VU.set(kScratchReg, E16, m1);
2108 vrgather_vv(kSimd128ScratchReg2, src.fp().toV(), kSimd128ScratchReg);
2109 vrgather_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg3);
2110 VU.set(kScratchReg, E16, mf2);
2111 vwadd_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
2112}
2113
2115 LiftoffRegister src) {
2116 VU.set(kScratchReg, E32, m1);
2117 // li(kScratchReg, 0x0006000400020000);
2118 li(kScratchReg, 0x00060004);
2120 li(kScratchReg, 0x00020000);
2122 // li(kScratchReg, 0x0007000500030001);
2123 li(kScratchReg, 0x00070005);
2125 li(kScratchReg, 0x00030001);
2127 VU.set(kScratchReg, E16, m1);
2128 vrgather_vv(kSimd128ScratchReg2, src.fp().toV(), kSimd128ScratchReg);
2129 vrgather_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg3);
2130 VU.set(kScratchReg, E16, mf2);
2131 vwaddu_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
2132}
2133
2135 LiftoffRegister src) {
2136 VU.set(kScratchReg, E32, m1);
2137 // li(kScratchReg, 0x0E0C0A0806040200);
2138 li(kScratchReg, 0x0E0C0A08);
2140 li(kScratchReg, 0x06040200);
2142 // li(kScratchReg, 0x0F0D0B0907050301);
2143 li(kScratchReg, 0x0F0D0B09);
2145 li(kScratchReg, 0x07050301);
2147 VU.set(kScratchReg, E8, m1);
2148 vrgather_vv(kSimd128ScratchReg2, src.fp().toV(), kSimd128ScratchReg);
2149 vrgather_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg3);
2150 VU.set(kScratchReg, E8, mf2);
2151 vwadd_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
2152}
2153
2155 LiftoffRegister src) {
2156 VU.set(kScratchReg, E32, m1);
2157 // li(kScratchReg, 0x0E0C0A0806040200);
2158 li(kScratchReg, 0x0E0C0A08);
2160 li(kScratchReg, 0x06040200);
2162 // li(kScratchReg, 0x0F0D0B0907050301);
2163 li(kScratchReg, 0x0F0D0B09);
2165 li(kScratchReg, 0x07050301);
2167 VU.set(kScratchReg, E8, m1);
2168 vrgather_vv(kSimd128ScratchReg2, src.fp().toV(), kSimd128ScratchReg);
2169 vrgather_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg3);
2170 VU.set(kScratchReg, E8, mf2);
2171 vwaddu_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
2172}
2173
2175 const std::initializer_list<VarState> args, const LiftoffRegister* rets,
2176 ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes,
2177 ExternalReference ext_ref) {
2178 AddWord(sp, sp, Operand(-stack_bytes));
2179
2180 int arg_offset = 0;
2181 for (const VarState& arg : args) {
2182 UseScratchRegisterScope temps(this);
2183 Register src = no_reg;
2184 MemOperand dst{sp, arg_offset};
2185 if (arg.is_reg()) {
2186 liftoff::Store(this, sp, arg_offset, arg.reg(), arg.kind());
2187 } else if (arg.is_const()) {
2188 DCHECK_EQ(kI32, arg.kind());
2189 if (arg.i32_const() == 0) {
2190 src = zero_reg;
2191 } else {
2192 src = temps.Acquire();
2193 li(src, arg.i32_const());
2194 }
2195 StoreWord(src, dst);
2196 } else {
2197 DCHECK_EQ(value_kind_size(arg.kind()), 4);
2198 MemOperand src = liftoff::GetStackSlot(arg.offset());
2199 auto scratch = temps.Acquire();
2200 Lw(scratch, src);
2201 Sw(scratch, dst);
2202 }
2203 arg_offset += value_kind_size(arg.kind());
2204 }
2205 DCHECK_LE(arg_offset, stack_bytes);
2206
2207 // Pass a pointer to the buffer with the arguments to the C function.
2208 // On RISC-V, the first argument is passed in {a0}.
2209 constexpr Register kFirstArgReg = a0;
2210 mv(kFirstArgReg, sp);
2211
2212 // Now call the C function.
2213 constexpr int kNumCCallArgs = 1;
2214 PrepareCallCFunction(kNumCCallArgs, kScratchReg);
2215 CallCFunction(ext_ref, kNumCCallArgs);
2216
2217 // Move return value to the right register.
2218 const LiftoffRegister* next_result_reg = rets;
2219 if (return_kind != kVoid) {
2220 constexpr Register kReturnReg = a0;
2221 if (kReturnReg != next_result_reg->gp()) {
2222 Move(*next_result_reg, LiftoffRegister(kReturnReg), return_kind);
2223 }
2224 ++next_result_reg;
2225 }
2226
2227 // Load potential output value from the buffer on the stack.
2228 if (out_argument_kind != kVoid) {
2229 liftoff::Load(this, *next_result_reg, sp, 0, out_argument_kind);
2230 }
2231
2232 AddWord(sp, sp, Operand(stack_bytes));
2233}
2234
2235void LiftoffAssembler::CallC(const std::initializer_list<VarState> args,
2236 ExternalReference ext_ref) {
2237 // First, prepare the stack for the C call.
2238 int num_args = static_cast<int>(args.size());
2240 // Then execute the parallel register move and also move values to parameter
2241 // stack slots.
2242 int reg_args = 0;
2243 int stack_args = 0;
2244 ParallelMove parallel_move{this};
2245 for (const VarState& arg : args) {
2246 if (needs_gp_reg_pair(arg.kind())) {
2247 // All i64 arguments (currently) fully fit in the register parameters.
2248 DCHECK_LE(reg_args + 2, arraysize(kCArgRegs));
2249 parallel_move.LoadIntoRegister(
2251 kCArgRegs[reg_args + 1]),
2252 arg);
2253 reg_args += 2;
2254 continue;
2255 }
2256 if (reg_args < int{arraysize(kCArgRegs)}) {
2257 parallel_move.LoadIntoRegister(LiftoffRegister{kCArgRegs[reg_args]}, arg);
2258 ++reg_args;
2259 continue;
2260 }
2261 MemOperand dst{sp, stack_args * kSystemPointerSize};
2262 ++stack_args;
2263 if (arg.is_reg()) {
2264 liftoff::Store(this, dst.rm(), dst.offset(), arg.reg(), arg.kind());
2265 continue;
2266 }
2267 UseScratchRegisterScope temps(this);
2268 Register scratch = temps.Acquire();
2269 if (arg.is_const()) {
2270 DCHECK_EQ(kI32, arg.kind());
2271 li(scratch, Operand(arg.i32_const()));
2272 Sw(scratch, dst);
2273 } else {
2274 // Stack to stack move.
2275 MemOperand src = liftoff::GetStackSlot(arg.offset());
2276 Lw(scratch, src);
2277 Sw(scratch, dst);
2278 }
2279 }
2280 parallel_move.Execute();
2281 // Now call the C function.
2283 CallCFunction(ext_ref, num_args);
2284}
2285
2286void LiftoffStackSlots::Construct(int param_slots) {
2288 DCHECK_LT(0, slots_.size());
2290 int last_stack_slot = param_slots;
2291 for (auto& slot : slots_) {
2292 const int stack_slot = slot.dst_slot_;
2293 int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
2294 DCHECK_LT(0, stack_decrement);
2295 last_stack_slot = stack_slot;
2296 const LiftoffAssembler::VarState& src = slot.src_;
2297 switch (src.loc()) {
2299 switch (src.kind()) {
2300 // i32 and i64 can be treated as similar cases, i64 being previously
2301 // split into two i32 registers
2302 case kI32:
2303 case kI64:
2304 case kF32:
2305 case kRef:
2306 case kRefNull: {
2307 asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
2308 UseScratchRegisterScope temps(asm_);
2309 Register scratch = temps.Acquire();
2310 asm_->Lw(scratch,
2311 liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
2312 asm_->Push(scratch);
2313 } break;
2314 case kF64: {
2315 asm_->AllocateStackSpace(stack_decrement - kDoubleSize);
2316 DCHECK_EQ(kLowWord, slot.half_);
2318 liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
2321 liftoff::GetHalfStackSlot(slot.src_offset_, kLowWord));
2323 } break;
2324 case kS128: {
2325 asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
2326 asm_->Lw(kScratchReg, liftoff::GetStackSlot(slot.src_offset_ - 8));
2328 asm_->Lw(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
2330 } break;
2331 default:
2332 UNREACHABLE();
2333 }
2334 break;
2335 }
2337 int pushed_bytes = SlotSizeInBytes(slot);
2338 asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
2339 if (src.kind() == kI64) {
2341 asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
2342 kI32);
2343 } else {
2344 liftoff::push(asm_, src.reg(), src.kind());
2345 }
2346 break;
2347 }
2349 asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
2350 asm_->li(kScratchReg, Operand(src.i32_const()));
2352 break;
2353 }
2354 }
2355 }
2356}
2357
2358bool LiftoffAssembler::supports_f16_mem_access() { return false; }
2359
2360} // namespace v8::internal::wasm
2361
2362#endif // V8_WASM_BASELINE_RISCV_LIFTOFF_ASSEMBLER_RISCV32_INL_H_
Builtins::Kind kind
Definition builtins.cc:40
void amoand_w(bool aq, bool rl, Register rd, Register rs1, Register rs2)
void amoxor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2)
void amoadd_w(bool aq, bool rl, Register rd, Register rs1, Register rs2)
void amoswap_w(bool aq, bool rl, Register rd, Register rs1, Register rs2)
void amoor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2)
void lr_w(bool aq, bool rl, Register rd, Register rs1)
void fcvt_wu_d(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fcvt_w_d(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fmv_w_x(FPURegister rd, Register rs1)
void fcvt_wu_s(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fcvt_w_s(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void mv(Register rd, Register rs)
void fence(uint8_t pred, uint8_t succ)
void srai(Register rd, Register rs1, uint8_t shamt)
void slli(Register rd, Register rs1, uint8_t shamt)
void vmerge_vx(VRegister vd, Register rs1, VRegister vs2)
void vmv_vi(VRegister vd, uint8_t simm5)
void vmv_xs(Register rd, VRegister vs2)
void vfmerge_vf(VRegister vd, FPURegister fs1, VRegister vs2)
void vfmv_vf(VRegister vd, FPURegister fs1)
void vfmv_fs(FPURegister fd, VRegister vs2)
void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask=NoMask)
void vfmv_sf(VRegister vd, FPURegister fs)
void vmv_sx(VRegister vd, Register rs1)
void vmv_vx(VRegister vd, Register rs1)
void set(Register rd, VSew sew, Vlmul lmul)
void fcvt_d_s(FPURegister fd, FPURegister fj)
void fcvt_s_d(FPURegister fd, FPURegister fj)
void addi(Register dst, Register src, const Operand &imm)
void lbu(Register rd, const MemOperand &rs)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
Instruction * InstructionAt(ptrdiff_t offset) const
void lw(Register rd, const MemOperand &rs)
friend class UseScratchRegisterScope
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void sb(Register rd, const MemOperand &rs)
void sc_w(Register rd, Register rj, int32_t si14)
void lhu(Register rd, const MemOperand &rs)
void sw(Register rd, const MemOperand &rs)
void sh(Register rd, const MemOperand &rs)
void bnez(Register rj, int32_t offset)
static constexpr MachineType Uint8()
static constexpr MachineType Int32()
static constexpr MachineType Uint32()
static constexpr MachineType Uint16()
static constexpr MachineType Int16()
static constexpr MachineType Int64()
static constexpr MachineType Int8()
void Mul(const Register &rd, const Register &rn, const Register &rm)
void Lbu(Register rd, const MemOperand &rs)
void LoadFloat(FPURegister fd, const MemOperand &src, Trapper &&trapper=[](int){})
void ExtractBits(Register dest, Register source, Register pos, int size, bool sign_extend=false)
void Trunc_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch)
void Clear_if_nan_s(Register rd, FPURegister fs)
void Cvt_s_uw(FPURegister fd, FPURegister fs)
void Sh(Register rd, const MemOperand &rs)
void SmiUntag(Register reg, SBit s=LeaveCC)
void Clz32(Register rd, Register rs)
void Lb(Register rd, const MemOperand &rs)
void SarPair(Register high, Register low, uint8_t imm8)
void Move(Register dst, Tagged< Smi > smi)
void JumpIfSmi(Register value, Label *smi_label)
void BranchShort(Label *label, Condition cond, Register r1, const Operand &r2, bool need_link=false)
void Trunc_w_d(FPURegister fd, FPURegister fs)
void LoadFPRImmediate(FPURegister dst, float imm)
void ExtractLowWordFromF64(Register dst_low, FPURegister src)
void Cvt_s_w(FPURegister fd, Register rs)
void SmiTag(Register reg, SBit s=LeaveCC)
void StoreDouble(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void ShlPair(Register high, Register low, uint8_t imm8)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void Sw(Register rd, const MemOperand &rs)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void ShrPair(Register high, Register low, uint8_t imm8)
void Lhu(Register rd, const MemOperand &rs)
void InsertBits(Register dest, Register source, Register pos, int size)
void Jump(Register target, Condition cond=al)
void Popcnt32(Register dst, Register src)
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch)
void Clear_if_nan_d(Register rd, FPURegister fs)
void StoreFloat(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void Cvt_d_w(FPURegister fd, Register rs)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void Lw(Register rd, const MemOperand &rs)
void Lh(Register rd, const MemOperand &rs)
void CompareI(Register rd, Register rs, const Operand &rt, Condition cond)
void Cvt_d_uw(FPURegister fd, FPURegister fs)
void LoadDouble(FPURegister fd, const MemOperand &src, Trapper &&trapper=[](int){})
void AllocateStackSpace(Register bytes)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Sb(Register rd, const MemOperand &rs)
void Branch(Label *label, bool need_link=false)
void Trunc_w_s(Register rd, FPURegister fs, Register result=no_reg)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void Ctz32(Register rd, Register rs)
void CalcScaledAddress(Register rd, Register rt, Register rs, uint8_t sa)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
void AtomicXor(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, Register amount)
void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i32_rems(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_i32_clz(Register dst, Register src)
void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_eqz(Register dst, Register src)
void emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst, LiftoffRegister src)
void FillI64Half(Register, int offset, RegPairHalf)
void emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallCWithStackBuffer(const std::initializer_list< VarState > args, const LiftoffRegister *rets, ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes, ExternalReference ext_ref)
void emit_i32_shli(Register dst, Register src, int32_t amount)
void AtomicAdd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicSub(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void LoadTransform(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LoadTransformationKind transform, uint32_t *protected_load_pc, bool i64_offset)
void AtomicAnd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicCompareExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister value, StoreType type, bool i64_offset)
void emit_i64_signextend_i16(LiftoffRegister dst, LiftoffRegister src)
void Fill(LiftoffRegister, int offset, ValueKind)
void emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void LoadFullPointer(Register dst, Register src_addr, int32_t offset_imm)
void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void Store(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, bool is_store_mem=false, bool i64_offset=false)
void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint32_t *protected_load_pc=nullptr, bool is_load_mem=false, bool i64_offset=false, bool needs_shift=false)
void emit_i32_signextend_i16(Register dst, Register src)
void emit_i32_divs(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm)
void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint8_t lane, uint32_t *protected_load_pc, bool i64_offset)
void emit_i32_remu(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_i64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32_ctz(Register dst, Register src)
void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind, Register frame_pointer)
void AtomicExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallC(const std::initializer_list< VarState > args, ExternalReference ext_ref)
void emit_i64_signextend_i8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero)
void emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadConstant(LiftoffRegister, WasmValue)
void emit_i32_divu(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero)
void emit_cond_jump(Condition, Label *, ValueKind value, Register lhs, Register rhs, const FreezeCacheState &frozen)
void LoadProtectedPointer(Register dst, Register src_addr, int32_t offset)
void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, Register amount)
bool emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_mul(Register dst, Register lhs, Register rhs)
void emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i64_set_cond(Condition condition, Register dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_i32_popcnt(Register dst, Register src)
void MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueKind)
void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind)
void AtomicStore(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, bool i64_offset)
void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind)
void emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_set_cond(Condition, Register dst, Register lhs, Register rhs)
bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, Register amount)
void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src)
void LoadTaggedPointer(Register dst, Register src_addr, Register offset_reg, int32_t offset_imm, uint32_t *protected_load_pc=nullptr, bool offset_reg_needs_shift=false)
void emit_i32_cond_jumpi(Condition, Label *, Register lhs, int imm, const FreezeCacheState &frozen)
void emit_i64_eqz(Register dst, LiftoffRegister src)
void StoreTaggedPointer(Register dst_addr, Register offset_reg, int32_t offset_imm, Register src, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, SkipWriteBarrier=kNoSkipWriteBarrier)
void emit_i64_clz(LiftoffRegister dst, LiftoffRegister src)
void IncrementSmi(LiftoffRegister dst, int offset)
LiftoffRegister GetUnusedRegister(RegClass rc, std::initializer_list< LiftoffRegister > try_first, LiftoffRegList pinned)
void emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicOr(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst, LiftoffRegister src, Label *trap=nullptr)
void AtomicLoad(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, bool i64_offset)
void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind)
void StoreLane(Register dst, Register offset, uintptr_t offset_imm, LiftoffRegister src, StoreType type, uint8_t lane, uint32_t *protected_store_pc, bool i64_offset)
void emit_i32_muli(Register dst, Register lhs, int32_t imm)
void emit_i32_signextend_i8(Register dst, Register src)
constexpr Register set(Register reg)
constexpr DoubleRegister fp() const
bool overlaps(const LiftoffRegister other) const
static LiftoffRegister ForPair(Register low, Register high)
base::SmallVector< Slot, 8 > slots_
static int SlotSizeInBytes(const Slot &slot)
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
int start
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
double remainder
ZoneVector< RpoNumber > & result
#define I32_SHIFTOP_I(name, instruction, instruction1)
#define I32_BINOP(name, instruction)
#define I32_BINOP_I(name, instruction)
#define I32_SHIFTOP(name, instruction)
#define FP_UNOP_RETURN_FALSE(name)
LiftoffRegister reg
Register tmp
std::optional< OolTrapLabel > trap
int int32_t
Definition unicode.cc:40
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
constexpr int WhichPowerOfTwo(T value)
Definition bits.h:195
void Store(LiftoffAssembler *assm, LiftoffRegister src, MemOperand dst, ValueKind kind)
void AtomicBinop(LiftoffAssembler *lasm, Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, Binop op)
Register CalculateActualAddress(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr_reg, Register offset_reg, uintptr_t offset_imm, Register result_reg=no_reg)
MemOperand GetHalfStackSlot(int offset, RegPairHalf half)
void Emit64BitShiftOperation(LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister src, Register amount, void(MacroAssembler::*emit_shift)(Register, Register))
Register EnsureNoAlias(Assembler *assm, Register reg, Register must_not_alias, UseScratchRegisterScope *temps)
void Load(LiftoffAssembler *assm, LiftoffRegister dst, MemOperand src, ValueKind kind)
bool IsRegInRegPair(LiftoffRegister pair, Register reg)
void push(LiftoffAssembler *assm, LiftoffRegister reg, ValueKind kind, int padding=0)
MemOperand GetMemOp(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr, Register offset, int32_t offset_imm, unsigned shift_amount=0)
void AtomicBinop64(LiftoffAssembler *lasm, Binop op, Register dst_addr, Register offset_reg, uint32_t offset_imm, LiftoffRegister value, LiftoffRegister result)
Condition cond_make_unsigned(Condition cond)
static constexpr bool needs_gp_reg_pair(ValueKind kind)
constexpr int value_kind_size(ValueKind kind)
constexpr bool is_reference(ValueKind kind)
constexpr Register no_reg
constexpr int kMinInt
Definition globals.h:375
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kSimd128Size
Definition globals.h:706
DwVfpRegister DoubleRegister
constexpr DoubleRegister kScratchDoubleReg
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr Register kScratchReg2
constexpr VRegister kSimd128ScratchReg2
constexpr Register kScratchReg
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Register kReturnRegister1
constexpr Simd128Register kSimd128ScratchReg
constexpr Register kReturnRegister0
constexpr int kInt32Size
Definition globals.h:401
constexpr VRegister kSimd128ScratchReg3
V8_EXPORT_PRIVATE FlagValues v8_flags
const RegList kJSCallerSaved
Definition reglist-arm.h:23
return value
Definition map-inl.h:893
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
Definition globals.h:407
#define shr(value, bits)
Definition sha-256.cc:31
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define arraysize(array)
Definition macros.h:67