v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
liftoff-assembler-riscv64-inl.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_WASM_BASELINE_RISCV_LIFTOFF_ASSEMBLER_RISCV64_INL_H_
6#define V8_WASM_BASELINE_RISCV_LIFTOFF_ASSEMBLER_RISCV64_INL_H_
7
13
14namespace v8::internal::wasm {
15
16namespace liftoff {
17
18// Liftoff Frames.
19//
20// slot Frame
21// +--------------------+---------------------------
22// n+4 | optional padding slot to keep the stack 16 byte aligned.
23// n+3 | parameter n |
24// ... | ... |
25// 4 | parameter 1 | or parameter 2
26// 3 | parameter 0 | or parameter 1
27// 2 | (result address) | or parameter 0
28// -----+--------------------+---------------------------
29// 1 | return addr (ra) |
30// 0 | previous frame (fp)|
31// -----+--------------------+ <-- frame ptr (fp)
32// -1 | StackFrame::WASM |
33// -2 | instance |
34// -3 | feedback vector|
35// -----+--------------------+---------------------------
36// -4 | slot 0 | ^
37// -5 | slot 1 | |
38// | | Frame slots
39// | | |
40// | | v
41// | optional padding slot to keep the stack 16 byte aligned.
42// -----+--------------------+ <-- stack ptr (sp)
43//
44
46 Register offset, uintptr_t offset_imm,
47 bool i64_offset = false, unsigned shift_amount = 0) {
48 if (offset != no_reg) {
49 if (!i64_offset) {
50 // extract bit[0:31] without sign extend
51 assm->ExtractBits(kScratchReg2, offset, 0, 32, false);
53 }
54 if (shift_amount != 0) {
55 assm->CalcScaledAddress(kScratchReg2, addr, offset, shift_amount);
56 } else {
57 assm->Add64(kScratchReg2, offset, addr);
58 }
59 addr = kScratchReg2;
60 }
61 if (is_int31(offset_imm)) {
62 int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
63 return MemOperand(addr, offset_imm32);
64 } else {
65 assm->li(kScratchReg, Operand(offset_imm));
66 assm->Add64(kScratchReg2, addr, kScratchReg);
67 return MemOperand(kScratchReg2, 0);
68 }
69}
70
71inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
73 switch (kind) {
74 case kI32:
75 assm->Lw(dst.gp(), src);
76 break;
77 case kI64:
78 case kRef:
79 case kRefNull:
80 assm->Ld(dst.gp(), src);
81 break;
82 case kF32:
83 assm->LoadFloat(dst.fp(), src);
84 break;
85 case kF64:
86 assm->LoadDouble(dst.fp(), src);
87 break;
88 case kS128:{
89 assm->VU.set(kScratchReg, E8, m1);
90 Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
91 if (src.offset() != 0) {
92 assm->AddWord(src_reg, src.rm(), src.offset());
93 }
94 assm->vl(dst.fp().toV(), src_reg, 0, E8);
95 break;
96 }
97 default:
99 }
100}
101
102inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
103 LiftoffRegister src, ValueKind kind) {
104 MemOperand dst(base, offset);
105 switch (kind) {
106 case kI32:
107 assm->Sw(src.gp(), dst);
108 break;
109 case kI64:
110 case kRefNull:
111 case kRef:
112 assm->Sd(src.gp(), dst);
113 break;
114 case kF32:
115 assm->StoreFloat(src.fp(), dst);
116 break;
117 case kF64:
118 assm->StoreDouble(src.fp(), dst);
119 break;
120 case kS128:{
121 assm->VU.set(kScratchReg, E8, m1);
122 Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg;
123 if (dst.offset() != 0) {
124 assm->Add64(kScratchReg, dst.rm(), dst.offset());
125 }
126 assm->vs(src.fp().toV(), dst_reg, 0, VSew::E8);
127 break;
128 }
129 default:
130 UNREACHABLE();
131 }
132}
133
134inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
135 switch (kind) {
136 case kI32:
137 assm->addi(sp, sp, -kSystemPointerSize);
138 assm->Sw(reg.gp(), MemOperand(sp, 0));
139 break;
140 case kI64:
141 case kRefNull:
142 case kRef:
143 assm->push(reg.gp());
144 break;
145 case kF32:
146 assm->addi(sp, sp, -kSystemPointerSize);
147 assm->StoreFloat(reg.fp(), MemOperand(sp, 0));
148 break;
149 case kF64:
150 assm->addi(sp, sp, -kSystemPointerSize);
151 assm->StoreDouble(reg.fp(), MemOperand(sp, 0));
152 break;
153 case kS128:{
154 assm->VU.set(kScratchReg, E8, m1);
155 assm->addi(sp, sp, -kSystemPointerSize * 2);
156 assm->vs(reg.fp().toV(), sp, 0, VSew::E8);
157 break;
158 }
159 default:
160 UNREACHABLE();
161 }
162}
163
164inline void StoreToMemory(LiftoffAssembler* assm, MemOperand dst,
165 const LiftoffAssembler::VarState& src) {
166 UseScratchRegisterScope temps(assm);
167 if (src.is_const()) {
168 Register src_reg = no_reg;
169 if (src.i32_const() == 0) {
170 src_reg = zero_reg;
171 } else {
172 src_reg = temps.Acquire();
173 assm->li(src_reg, src.i32_const());
174 }
175 assm->StoreWord(src_reg, dst);
176 } else if (src.is_reg()) {
177 switch (src.kind()) {
178 case kI32:
179 return assm->Sw(src.reg().gp(), dst);
180 case kI64:
181 case kRef:
182 case kRefNull:
183 return assm->Sd(src.reg().gp(), dst);
184 case kF32:
185 return assm->StoreFloat(src.reg().fp(), dst);
186 case kF64:
187 return assm->StoreDouble(src.reg().fp(), dst);
188 case kS128: {
189 assm->VU.set(kScratchReg, E8, m1);
190 Register dst_reg = temps.Acquire();
191 assm->Add64(dst_reg, dst.rm(), dst.offset());
192 assm->vs(src.reg().fp().toV(), dst_reg, 0, VSew::E8);
193 return;
194 }
195 default:
196 UNREACHABLE();
197 }
198 } else {
199 DCHECK(src.is_stack());
200 Register temp = temps.Acquire();
201 switch (src.kind()) {
202 case kI32:
203 assm->Lw(temp, GetStackSlot(src.offset()));
204 assm->Sw(temp, dst);
205 return;
206 case kI64:
207 case kRef:
208 case kRefNull:
209 assm->Ld(temp, GetStackSlot(src.offset()));
210 assm->Sd(temp, dst);
211 return;
212 case kF32:
213 assm->LoadFloat(kScratchDoubleReg, GetStackSlot(src.offset()));
214 assm->StoreFloat(kScratchDoubleReg, dst);
215 return;
216 case kF64:
217 assm->LoadDouble(kScratchDoubleReg, GetStackSlot(src.offset()));
218 assm->StoreDouble(kScratchDoubleReg, dst);
219 return;
220 case kS128: {
221 assm->VU.set(kScratchReg, E8, m1);
222 Register src_reg = temp;
223 assm->Add64(src_reg, sp, src.offset());
224 assm->vl(kScratchDoubleReg.toV(), src_reg, 0, VSew::E8);
225 Register dst_reg = temp;
226 assm->Add64(dst_reg, dst.rm(), dst.offset());
227 assm->vs(kScratchDoubleReg.toV(), dst_reg, 0, VSew::E8);
228 return;
229 }
230 default:
231 UNREACHABLE();
232 }
233 }
234}
235
236} // namespace liftoff
237
238void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
239 switch (value.type().kind()) {
240 case kI32:
241 MacroAssembler::li(reg.gp(), Operand(value.to_i32()));
242 break;
243 case kI64:
244 MacroAssembler::li(reg.gp(), Operand(value.to_i64()));
245 break;
246 case kF32:
248 value.to_f32_boxed().get_bits());
249 break;
250 case kF64:
252 value.to_f64_boxed().get_bits());
253 break;
254 default:
255 UNREACHABLE();
256 }
257}
258
259void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
260 Register offset_reg,
261 int32_t offset_imm,
262 uint32_t* protected_load_pc,
263 bool needs_shift) {
264 unsigned shift_amount = !needs_shift ? 0 : COMPRESS_POINTERS_BOOL ? 2 : 3;
265 MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm,
266 false, shift_amount);
267 Assembler::BlockPoolsScope blocked_pools_scope_(this, 4 * kInstrSize);
268 LoadTaggedField(dst, src_op, [protected_load_pc](int offset) {
269 if (protected_load_pc) *protected_load_pc = offset;
270 });
271 if (protected_load_pc) {
272 DCHECK(InstructionAt(*protected_load_pc)->IsLoad());
273 }
274}
275
276void LiftoffAssembler::LoadProtectedPointer(Register dst, Register src_addr,
277 int32_t offset_imm) {
278 LoadProtectedPointerField(dst, MemOperand{src_addr, offset_imm});
279}
280
281void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
282 int32_t offset_imm) {
283 MemOperand src_op = liftoff::GetMemOp(this, src_addr, no_reg, offset_imm);
284 LoadWord(dst, src_op);
285}
286
287#ifdef V8_ENABLE_SANDBOX
288void LiftoffAssembler::LoadCodeEntrypointViaCodePointer(Register dst,
289 Register src_addr,
290 int32_t offset_imm) {
291 MemOperand src_op = liftoff::GetMemOp(this, src_addr, no_reg, offset_imm);
292 MacroAssembler::LoadCodeEntrypointViaCodePointer(dst, src_op,
294}
295#endif
296
297void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
298 Register offset_reg,
299 int32_t offset_imm, Register src,
300 LiftoffRegList pinned,
301 uint32_t* protected_store_pc,
302 SkipWriteBarrier skip_write_barrier) {
303 UseScratchRegisterScope temps(this);
304 Operand offset_op =
305 offset_reg.is_valid() ? Operand(offset_reg) : Operand(offset_imm);
306 // For the write barrier (below), we cannot have both an offset register and
307 // an immediate offset. Add them to a 32-bit offset initially, but in a 64-bit
308 // register, because that's needed in the MemOperand below.
309 if (offset_reg.is_valid() && offset_imm) {
310 Register effective_offset = temps.Acquire();
311 AddWord(effective_offset, offset_reg, Operand(offset_imm));
312 offset_op = Operand(effective_offset);
313 }
314 auto trapper = [protected_store_pc](int offset) {
315 if (protected_store_pc) *protected_store_pc = static_cast<uint32_t>(offset);
316 };
317 if (offset_op.is_reg()) {
318 AddWord(kScratchReg, dst_addr, offset_op.rm());
319 StoreTaggedField(src, MemOperand(kScratchReg, 0), trapper);
320 } else {
321 StoreTaggedField(src, MemOperand(dst_addr, offset_imm), trapper);
322 }
323 if (protected_store_pc) {
324 DCHECK(InstructionAt(*protected_store_pc)->IsStore());
325 }
326
327 if (skip_write_barrier || v8_flags.disable_write_barriers) return;
328
329 Label exit;
331 kZero, &exit);
332 JumpIfSmi(src, &exit);
335 StubCallMode::kCallWasmRuntimeStub);
336 bind(&exit);
337}
338
339void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
340 Register offset_reg, uintptr_t offset_imm,
341 LoadType type, uint32_t* protected_load_pc,
342 bool is_load_mem, bool i64_offset,
343 bool needs_shift) {
344 unsigned shift_amount = needs_shift ? type.size_log_2() : 0;
345 MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm,
346 i64_offset, shift_amount);
347 Assembler::BlockPoolsScope blocked_pools_scope_(this, 4 * kInstrSize);
348 auto trapper = [protected_load_pc](int offset) {
349 if (protected_load_pc) *protected_load_pc = static_cast<uint32_t>(offset);
350 };
351 switch (type.value()) {
352 case LoadType::kI32Load8U:
353 case LoadType::kI64Load8U:
354 Lbu(dst.gp(), src_op, trapper);
355 break;
356 case LoadType::kI32Load8S:
357 case LoadType::kI64Load8S:
358 Lb(dst.gp(), src_op, trapper);
359 break;
360 case LoadType::kI32Load16U:
361 case LoadType::kI64Load16U:
362 Lhu(dst.gp(), src_op, trapper);
363 break;
364 case LoadType::kI32Load16S:
365 case LoadType::kI64Load16S:
366 Lh(dst.gp(), src_op, trapper);
367 break;
368 case LoadType::kI64Load32U:
369 Lwu(dst.gp(), src_op, trapper);
370 break;
371 case LoadType::kI32Load:
372 case LoadType::kI64Load32S:
373 Lw(dst.gp(), src_op, trapper);
374 break;
375 case LoadType::kI64Load:
376 Ld(dst.gp(), src_op, trapper);
377 break;
378 case LoadType::kF32Load:
379 LoadFloat(dst.fp(), src_op, trapper);
380 break;
381 case LoadType::kF64Load:
382 LoadDouble(dst.fp(), src_op, trapper);
383 break;
384 case LoadType::kS128Load: {
385 VU.set(kScratchReg, E8, m1);
386 Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg;
387 if (src_op.offset() != 0) {
388 MacroAssembler::AddWord(src_reg, src_op.rm(), src_op.offset());
389 }
390 trapper(pc_offset());
391 vl(dst.fp().toV(), src_reg, 0, E8);
392 break;
393 }
394 case LoadType::kF32LoadF16:
396 break;
397 default:
398 UNREACHABLE();
399 }
400 if (protected_load_pc) {
401 DCHECK(InstructionAt(*protected_load_pc)->IsLoad());
402 }
403
404#if defined(V8_TARGET_BIG_ENDIAN)
405 if (is_load_mem) {
406 pinned.set(src_op.rm());
407 liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
408 }
409#endif
410}
411
412void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
413 uintptr_t offset_imm, LiftoffRegister src,
414 StoreType type, LiftoffRegList pinned,
415 uint32_t* protected_store_pc, bool is_store_mem,
416 bool i64_offset) {
417 MemOperand dst_op =
418 liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, i64_offset);
419
420#if defined(V8_TARGET_BIG_ENDIAN)
421 if (is_store_mem) {
422 pinned.set(dst_op.rm());
423 LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned);
424 // Save original value.
425 Move(tmp, src, type.value_type());
426
427 src = tmp;
428 pinned.set(tmp);
429 liftoff::ChangeEndiannessStore(this, src, type, pinned);
430 }
431#endif
432
433 Assembler::BlockPoolsScope blocked_pools_scope_(this, 4 * kInstrSize);
434 auto trapper = [protected_store_pc](int offset) {
435 if (protected_store_pc) *protected_store_pc = static_cast<uint32_t>(offset);
436 };
437 switch (type.value()) {
438 case StoreType::kI32Store8:
439 case StoreType::kI64Store8:
440 Sb(src.gp(), dst_op, trapper);
441 break;
442 case StoreType::kI32Store16:
443 case StoreType::kI64Store16:
444 Sh(src.gp(), dst_op, trapper);
445 break;
446 case StoreType::kI32Store:
447 case StoreType::kI64Store32:
448 Sw(src.gp(), dst_op, trapper);
449 break;
450 case StoreType::kI64Store:
451 Sd(src.gp(), dst_op, trapper);
452 break;
453 case StoreType::kF32Store:
454 StoreFloat(src.fp(), dst_op, trapper);
455 break;
456 case StoreType::kF64Store:
457 StoreDouble(src.fp(), dst_op, trapper);
458 break;
459 case StoreType::kS128Store: {
460 VU.set(kScratchReg, E8, m1);
461 Register dst_reg = dst_op.offset() == 0 ? dst_op.rm() : kScratchReg;
462 if (dst_op.offset() != 0) {
463 Add64(kScratchReg, dst_op.rm(), dst_op.offset());
464 }
465 trapper(pc_offset());
466 vs(src.fp().toV(), dst_reg, 0, VSew::E8);
467 break;
468 }
469 default:
470 UNREACHABLE();
471 }
472 if (protected_store_pc) {
473 DCHECK(InstructionAt(*protected_store_pc)->IsStore());
474 }
475}
476
477namespace liftoff {
478#define __ lasm->
479
480inline Register CalculateActualAddress(LiftoffAssembler* lasm,
481 UseScratchRegisterScope& temps,
482 Register addr_reg, Register offset_reg,
483 uintptr_t offset_imm) {
484 DCHECK_NE(addr_reg, no_reg);
485 if (offset_reg == no_reg && offset_imm == 0) return addr_reg;
486 Register result = temps.Acquire();
487 if (offset_reg == no_reg) {
488 __ AddWord(result, addr_reg, Operand(offset_imm));
489 } else {
490 __ AddWord(result, addr_reg, Operand(offset_reg));
491 if (offset_imm != 0) __ AddWord(result, result, Operand(offset_imm));
492 }
493 return result;
494}
495
496enum class Binop { kAdd, kSub, kAnd, kOr, kXor, kExchange };
497
498inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
499 Register offset_reg, uintptr_t offset_imm,
501 StoreType type, Binop op) {
502 LiftoffRegList pinned{dst_addr, value, result};
503 if (offset_reg != no_reg) pinned.set(offset_reg);
504 Register store_result = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
505
506 // Make sure that {result} is unique.
507 Register result_reg = result.gp();
508 if (result_reg == value.gp() || result_reg == dst_addr ||
509 result_reg == offset_reg) {
510 result_reg = __ GetUnusedRegister(kGpReg, pinned).gp();
511 }
512
513 UseScratchRegisterScope temps(lasm);
514 Register actual_addr = liftoff::CalculateActualAddress(
515 lasm, temps, dst_addr, offset_reg, offset_imm);
516
517 // Allocate an additional {temp} register to hold the result that should be
518 // stored to memory. Note that {temp} and {store_result} are not allowed to be
519 // the same register.
520 Register temp = temps.Acquire();
521
522 Label retry;
523 __ bind(&retry);
524 switch (type.value()) {
525 case StoreType::kI64Store8:
526 case StoreType::kI32Store8:
527 __ lbu(result_reg, actual_addr, 0);
528 __ sync();
529 break;
530 case StoreType::kI64Store16:
531 case StoreType::kI32Store16:
532 __ lhu(result_reg, actual_addr, 0);
533 __ sync();
534 break;
535 case StoreType::kI64Store32:
536 __ lr_w(true, false, result_reg, actual_addr);
537 __ ZeroExtendWord(result_reg, result_reg);
538 break;
539 case StoreType::kI32Store:
540 __ lr_w(true, false, result_reg, actual_addr);
541 break;
542 case StoreType::kI64Store:
543 __ lr_d(true, false, result_reg, actual_addr);
544 break;
545 default:
546 UNREACHABLE();
547 }
548
549 switch (op) {
550 case Binop::kAdd:
551 __ add(temp, result_reg, value.gp());
552 break;
553 case Binop::kSub:
554 __ sub(temp, result_reg, value.gp());
555 break;
556 case Binop::kAnd:
557 __ and_(temp, result_reg, value.gp());
558 break;
559 case Binop::kOr:
560 __ or_(temp, result_reg, value.gp());
561 break;
562 case Binop::kXor:
563 __ xor_(temp, result_reg, value.gp());
564 break;
565 case Binop::kExchange:
566 __ mv(temp, value.gp());
567 break;
568 }
569 switch (type.value()) {
570 case StoreType::kI64Store8:
571 case StoreType::kI32Store8:
572 __ sync();
573 __ sb(temp, actual_addr, 0);
574 __ sync();
575 __ mv(store_result, zero_reg);
576 break;
577 case StoreType::kI64Store16:
578 case StoreType::kI32Store16:
579 __ sync();
580 __ sh(temp, actual_addr, 0);
581 __ sync();
582 __ mv(store_result, zero_reg);
583 break;
584 case StoreType::kI64Store32:
585 case StoreType::kI32Store:
586 __ sc_w(false, true, store_result, actual_addr, temp);
587 break;
588 case StoreType::kI64Store:
589 __ sc_d(false, true, store_result, actual_addr, temp);
590 break;
591 default:
592 UNREACHABLE();
593 }
594
595 __ bnez(store_result, &retry);
596 if (result_reg != result.gp()) {
597 __ mv(result.gp(), result_reg);
598 }
599}
600
601#undef __
602} // namespace liftoff
603
604void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
605 Register offset_reg, uintptr_t offset_imm,
606 LoadType type, LiftoffRegList pinned,
607 bool i64_offset) {
608 UseScratchRegisterScope temps(this);
609 Register src_reg = liftoff::CalculateActualAddress(this, temps, src_addr,
610 offset_reg, offset_imm);
611 switch (type.value()) {
612 case LoadType::kI32Load8U:
613 case LoadType::kI64Load8U:
614 lbu(dst.gp(), src_reg, 0);
615 sync();
616 return;
617 case LoadType::kI32Load16U:
618 case LoadType::kI64Load16U:
619 lhu(dst.gp(), src_reg, 0);
620 sync();
621 return;
622 case LoadType::kI32Load:
623 lw(dst.gp(), src_reg, 0);
624 sync();
625 return;
626 case LoadType::kI64Load32U:
627 lwu(dst.gp(), src_reg, 0);
628 sync();
629 return;
630 case LoadType::kI64Load:
631 ld(dst.gp(), src_reg, 0);
632 sync();
633 return;
634 default:
635 UNREACHABLE();
636 }
637}
638
639void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
640 uintptr_t offset_imm, LiftoffRegister src,
641 StoreType type, LiftoffRegList pinned,
642 bool i64_offset) {
643 UseScratchRegisterScope temps(this);
644 Register dst_reg = liftoff::CalculateActualAddress(this, temps, dst_addr,
645 offset_reg, offset_imm);
646 switch (type.value()) {
647 case StoreType::kI64Store8:
648 case StoreType::kI32Store8:
649 sync();
650 sb(src.gp(), dst_reg, 0);
651 return;
652 case StoreType::kI64Store16:
653 case StoreType::kI32Store16:
654 sync();
655 sh(src.gp(), dst_reg, 0);
656 return;
657 case StoreType::kI64Store32:
658 case StoreType::kI32Store:
659 sync();
660 sw(src.gp(), dst_reg, 0);
661 return;
662 case StoreType::kI64Store:
663 sync();
664 sd(src.gp(), dst_reg, 0);
665 return;
666 default:
667 UNREACHABLE();
668 }
669}
670
671void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
672 uintptr_t offset_imm, LiftoffRegister value,
673 LiftoffRegister result, StoreType type,
674 bool i64_offset) {
675 liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
677}
678
679void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
680 uintptr_t offset_imm, LiftoffRegister value,
681 LiftoffRegister result, StoreType type,
682 bool i64_offset) {
683 liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
685}
686
687void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
688 uintptr_t offset_imm, LiftoffRegister value,
689 LiftoffRegister result, StoreType type,
690 bool i64_offset) {
691 liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
693}
694
695void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
696 uintptr_t offset_imm, LiftoffRegister value,
697 LiftoffRegister result, StoreType type,
698 bool i64_offset) {
699 liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
700 type, liftoff::Binop::kOr);
701}
702
703void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
704 uintptr_t offset_imm, LiftoffRegister value,
705 LiftoffRegister result, StoreType type,
706 bool i64_offset) {
707 liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
709}
710
711void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
712 uintptr_t offset_imm,
713 LiftoffRegister value,
714 LiftoffRegister result, StoreType type,
715 bool i64_offset) {
716 liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
718}
719
720#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
721 store_conditional) \
722 do { \
723 Label compareExchange; \
724 Label exit; \
725 sync(); \
726 bind(&compareExchange); \
727 load_linked(result.gp(), MemOperand(temp0, 0)); \
728 BranchShort(&exit, ne, expected.gp(), Operand(result.gp())); \
729 mv(temp2, new_value.gp()); \
730 store_conditional(temp2, MemOperand(temp0, 0)); \
731 BranchShort(&compareExchange, ne, temp2, Operand(zero_reg)); \
732 bind(&exit); \
733 sync(); \
734 } while (0)
735
736#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
737 load_linked, store_conditional, size, aligned) \
738 do { \
739 Label compareExchange; \
740 Label exit; \
741 andi(temp1, temp0, aligned); \
742 Sub64(temp0, temp0, Operand(temp1)); \
743 Sll32(temp1, temp1, 3); \
744 sync(); \
745 bind(&compareExchange); \
746 load_linked(temp2, MemOperand(temp0, 0)); \
747 ExtractBits(result.gp(), temp2, temp1, size, false); \
748 ExtractBits(temp2, expected.gp(), zero_reg, size, false); \
749 BranchShort(&exit, ne, temp2, Operand(result.gp())); \
750 InsertBits(temp2, new_value.gp(), temp1, size); \
751 store_conditional(temp2, MemOperand(temp0, 0)); \
752 BranchShort(&compareExchange, ne, temp2, Operand(zero_reg)); \
753 bind(&exit); \
754 sync(); \
755 } while (0)
756
758 Register dst_addr, Register offset_reg, uintptr_t offset_imm,
759 LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
760 StoreType type, bool i64_offset) {
761 LiftoffRegList pinned{dst_addr, expected, new_value, result};
762 if (offset_reg != no_reg) pinned.set(offset_reg);
763
764 Register temp0 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
765 Register temp1 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
766 Register temp2 = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
767 MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
768 Add64(temp0, dst_op.rm(), dst_op.offset());
769 switch (type.value()) {
770 case StoreType::kI64Store8:
772 break;
773 case StoreType::kI32Store8:
775 break;
776 case StoreType::kI64Store16:
778 break;
779 case StoreType::kI32Store16:
781 break;
782 case StoreType::kI64Store32:
784 break;
785 case StoreType::kI32Store:
787 break;
788 case StoreType::kI64Store:
790 break;
791 default:
792 UNREACHABLE();
793 }
794}
795#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
796#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
797
799
800void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
801 uint32_t caller_slot_idx,
802 ValueKind kind) {
803 MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
804 liftoff::Load(this, dst, src, kind);
805}
806
807void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
808 uint32_t caller_slot_idx,
810 Register frame_pointer) {
811 int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
812 liftoff::Store(this, frame_pointer, offset, src, kind);
813}
814
815void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
816 ValueKind kind) {
817 liftoff::Load(this, dst, MemOperand(sp, offset), kind);
818}
819
820void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
821 ValueKind kind) {
822 DCHECK_NE(dst_offset, src_offset);
823
824 MemOperand src = liftoff::GetStackSlot(src_offset);
825 MemOperand dst = liftoff::GetStackSlot(dst_offset);
826 switch (kind) {
827 case kI32:
828 Lw(kScratchReg, src);
829 Sw(kScratchReg, dst);
830 break;
831 case kI64:
832 case kRef:
833 case kRefNull:
834 Ld(kScratchReg, src);
835 Sd(kScratchReg, dst);
836 break;
837 case kF32:
840 break;
841 case kF64:
844 break;
845 case kS128: {
846 VU.set(kScratchReg, E8, m1);
847 Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
848 if (src.offset() != 0) {
849 MacroAssembler::Add64(src_reg, src.rm(), src.offset());
850 }
851 vl(kSimd128ScratchReg, src_reg, 0, E8);
852 Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg;
853 if (dst.offset() != 0) {
854 Add64(kScratchReg, dst.rm(), dst.offset());
855 }
856 vs(kSimd128ScratchReg, dst_reg, 0, VSew::E8);
857 break;
858 }
859 case kVoid:
860 case kI8:
861 case kI16:
862 case kTop:
863 case kBottom:
864 case kF16:
865 UNREACHABLE();
866 }
867}
868
869void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
870 DCHECK_NE(dst, src);
871 // TODO(ksreten): Handle different sizes here.
872 MacroAssembler::Move(dst, src);
873}
874
876 ValueKind kind) {
877 DCHECK_NE(dst, src);
878 if (kind != kS128) {
879 MacroAssembler::Move(dst, src);
880 } else {
881 VU.set(kScratchReg, E8, m1);
882 MacroAssembler::vmv_vv(dst.toV(), src.toV());
883 }
884}
885
886void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
889 switch (kind) {
890 case kI32:
891 Sw(reg.gp(), dst);
892 break;
893 case kI64:
894 case kRef:
895 case kRefNull:
896 Sd(reg.gp(), dst);
897 break;
898 case kF32:
899 StoreFloat(reg.fp(), dst);
900 break;
901 case kF64:
903 break;
904 case kS128: {
905 VU.set(kScratchReg, E8, m1);
906 Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg;
907 if (dst.offset() != 0) {
908 Add64(kScratchReg, dst.rm(), dst.offset());
909 }
910 vs(reg.fp().toV(), dst_reg, 0, VSew::E8);
911 break;
912 }
913 default:
914 UNREACHABLE();
915 }
916}
917
918void LiftoffAssembler::Spill(int offset, WasmValue value) {
921 switch (value.type().kind()) {
922 case kI32: {
923 UseScratchRegisterScope temps(this);
924 Register tmp = temps.Acquire();
925 MacroAssembler::li(tmp, Operand(value.to_i32()));
926 Sw(tmp, dst);
927 break;
928 }
929 case kI64:
930 case kRef:
931 case kRefNull: {
932 UseScratchRegisterScope temps(this);
933 Register tmp = temps.Acquire();
934 MacroAssembler::li(tmp, value.to_i64());
935 Sd(tmp, dst);
936 break;
937 }
938 default:
939 // kWasmF32 and kWasmF64 are unreachable, since those
940 // constants are not tracked.
941 UNREACHABLE();
942 }
943}
944
945void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
947 switch (kind) {
948 case kI32:
949 Lw(reg.gp(), src);
950 break;
951 case kI64:
952 case kRef:
953 case kRefNull:
954 Ld(reg.gp(), src);
955 break;
956 case kF32:
957 LoadFloat(reg.fp(), src);
958 break;
959 case kF64:
961 break;
962 case kS128: {
963 VU.set(kScratchReg, E8, m1);
964 Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
965 if (src.offset() != 0) {
966 MacroAssembler::Add64(src_reg, src.rm(), src.offset());
967 }
968 vl(reg.fp().toV(), src_reg, 0, E8);
969 break;
970 }
971 default:
972 UNREACHABLE();
973 }
974}
975
977 UNREACHABLE();
978}
979
981 DCHECK_LT(0, size);
983
984 if (size <= 12 * kStackSlotSize) {
985 // Special straight-line code for up to 12 slots. Generates one
986 // instruction per slot (<= 12 instructions total).
987 uint32_t remainder = size;
990 }
991 DCHECK(remainder == 4 || remainder == 0);
992 if (remainder) {
994 }
995 } else {
996 // General case for bigger counts (12 instructions).
997 // Use a0 for start address (inclusive), a1 for end address (exclusive).
998 Push(a1, a0);
999 Add64(a0, fp, Operand(-start - size));
1000 Add64(a1, fp, Operand(-start));
1001
1002 Label loop;
1003 bind(&loop);
1004 Sd(zero_reg, MemOperand(a0));
1005 addi(a0, a0, kSystemPointerSize);
1006 BranchShort(&loop, ne, a0, Operand(a1));
1007
1008 Pop(a1, a0);
1009 }
1010}
1011
1012void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
1013 MacroAssembler::Clz64(dst.gp(), src.gp());
1014}
1015
1016void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
1017 MacroAssembler::Ctz64(dst.gp(), src.gp());
1018}
1019
1020bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
1021 LiftoffRegister src) {
1022 MacroAssembler::Popcnt64(dst.gp(), src.gp(), kScratchReg);
1023 return true;
1024}
1025
1026void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
1027 MacroAssembler::Mul32(dst, lhs, rhs);
1028}
1029
1030void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
1031 Label* trap_div_by_zero,
1032 Label* trap_div_unrepresentable) {
1033 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1034
1035 // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
1037 MacroAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne);
1039 MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
1040 Operand(zero_reg));
1041
1042 MacroAssembler::Div32(dst, lhs, rhs);
1043}
1044
1045void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
1046 Label* trap_div_by_zero) {
1047 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1048 MacroAssembler::Divu32(dst, lhs, rhs);
1049}
1050
1051void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
1052 Label* trap_div_by_zero) {
1053 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1054 MacroAssembler::Mod32(dst, lhs, rhs);
1055}
1056
1057void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
1058 Label* trap_div_by_zero) {
1059 MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
1060 MacroAssembler::Modu32(dst, lhs, rhs);
1061}
1062
1063#define I32_BINOP(name, instruction) \
1064 void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
1065 Register rhs) { \
1066 instruction(dst, lhs, rhs); \
1067 }
1068
1069// clang-format off
1070I32_BINOP(add, addw)
1071I32_BINOP(sub, subw)
1072I32_BINOP(and, and_)
1073I32_BINOP(or, or_)
1074I32_BINOP(xor, xor_)
1075// clang-format on
1076
1077#undef I32_BINOP
1078
1079#define I32_BINOP_I(name, instruction) \
1080 void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \
1081 int32_t imm) { \
1082 instruction(dst, lhs, Operand(imm)); \
1083 }
1084
1085// clang-format off
1086I32_BINOP_I(add, Add32)
1087I32_BINOP_I(sub, Sub32)
1088I32_BINOP_I(and, And)
1089I32_BINOP_I(or, Or)
1090I32_BINOP_I(xor, Xor)
1091// clang-format on
1092
1093#undef I32_BINOP_I
1094
1095void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
1096 MacroAssembler::Clz32(dst, src);
1097}
1098
1099void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
1100 MacroAssembler::Ctz32(dst, src);
1101}
1102
1103bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
1105 return true;
1106}
1107
1108#define I32_SHIFTOP(name, instruction) \
1109 void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
1110 Register amount) { \
1111 instruction(dst, src, amount); \
1112 }
1113#define I32_SHIFTOP_I(name, instruction) \
1114 void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \
1115 int amount) { \
1116 instruction(dst, src, amount & 31); \
1117 }
1118
1119I32_SHIFTOP(shl, sllw)
1120I32_SHIFTOP(sar, sraw)
1121I32_SHIFTOP(shr, srlw)
1122
1123I32_SHIFTOP_I(shl, slliw)
1124I32_SHIFTOP_I(sar, sraiw)
1125I32_SHIFTOP_I(shr, srliw)
1126
1127#undef I32_SHIFTOP
1128#undef I32_SHIFTOP_I
1129
1130void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
1131 LiftoffRegister rhs) {
1132 MacroAssembler::Mul64(dst.gp(), lhs.gp(), rhs.gp());
1133}
1134
1135void LiftoffAssembler::emit_i64_muli(LiftoffRegister dst, LiftoffRegister lhs,
1136 int32_t imm) {
1137 if (base::bits::IsPowerOfTwo(imm)) {
1139 return;
1140 }
1141 UseScratchRegisterScope temps(this);
1142 Register scratch = temps.Acquire();
1143 li(scratch, imm);
1144 Mul64(dst.gp(), lhs.gp(), scratch);
1145}
1146
1147bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
1148 LiftoffRegister rhs,
1149 Label* trap_div_by_zero,
1150 Label* trap_div_unrepresentable) {
1151 MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
1152
1153 // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
1155 Operand(std::numeric_limits<int64_t>::min()), ne);
1156 MacroAssembler::CompareI(kScratchReg2, rhs.gp(), Operand(-1), ne);
1158 MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
1159 Operand(zero_reg));
1160
1161 MacroAssembler::Div64(dst.gp(), lhs.gp(), rhs.gp());
1162 return true;
1163}
1164
1165bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
1166 LiftoffRegister rhs,
1167 Label* trap_div_by_zero) {
1168 MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
1169 MacroAssembler::Divu64(dst.gp(), lhs.gp(), rhs.gp());
1170 return true;
1171}
1172
1173bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
1174 LiftoffRegister rhs,
1175 Label* trap_div_by_zero) {
1176 MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
1177 MacroAssembler::Mod64(dst.gp(), lhs.gp(), rhs.gp());
1178 return true;
1179}
1180
1181bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
1182 LiftoffRegister rhs,
1183 Label* trap_div_by_zero) {
1184 MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
1185 MacroAssembler::Modu64(dst.gp(), lhs.gp(), rhs.gp());
1186 return true;
1187}
1188
1189#define I64_BINOP(name, instruction) \
1190 void LiftoffAssembler::emit_i64_##name( \
1191 LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
1192 instruction(dst.gp(), lhs.gp(), rhs.gp()); \
1193 }
1194
1195// clang-format off
1196I64_BINOP(add, add)
1197I64_BINOP(sub, sub)
1198I64_BINOP(and, and_)
1199I64_BINOP(or, or_)
1200I64_BINOP(xor, xor_)
1201// clang-format on
1202
1203#undef I64_BINOP
1204
1205#define I64_BINOP_I(name, instruction) \
1206 void LiftoffAssembler::emit_i64_##name##i( \
1207 LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
1208 instruction(dst.gp(), lhs.gp(), Operand(imm)); \
1209 }
1210
1211// clang-format off
1212I64_BINOP_I(and, And)
1213I64_BINOP_I(or, Or)
1214I64_BINOP_I(xor, Xor)
1215// clang-format on
1216
1217#undef I64_BINOP_I
1218
1219#define I64_SHIFTOP(name, instruction) \
1220 void LiftoffAssembler::emit_i64_##name( \
1221 LiftoffRegister dst, LiftoffRegister src, Register amount) { \
1222 instruction(dst.gp(), src.gp(), amount); \
1223 }
1224
1225I64_SHIFTOP(shl, sll)
1226I64_SHIFTOP(sar, sra)
1227I64_SHIFTOP(shr, srl)
1228#undef I64_SHIFTOP
1229
1230void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
1231 int amount) {
1232 if (is_uint6(amount)) {
1233 slli(dst.gp(), src.gp(), amount);
1234 } else {
1235 li(kScratchReg, amount);
1236 sll(dst.gp(), src.gp(), kScratchReg);
1237 }
1238}
1239
1240void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
1241 int amount) {
1242 if (is_uint6(amount)) {
1243 srai(dst.gp(), src.gp(), amount);
1244 } else {
1245 li(kScratchReg, amount);
1246 sra(dst.gp(), src.gp(), kScratchReg);
1247 }
1248}
1249
1250void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
1251 int amount) {
1252 if (is_uint6(amount)) {
1253 srli(dst.gp(), src.gp(), amount);
1254 } else {
1255 li(kScratchReg, amount);
1256 srl(dst.gp(), src.gp(), kScratchReg);
1257 }
1258}
1259
1260void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
1261 int64_t imm) {
1262 MacroAssembler::Add64(dst.gp(), lhs.gp(), Operand(imm));
1263}
1264void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
1265 ZeroExtendWord(dst, src);
1266}
1267
1268void LiftoffAssembler::clear_i32_upper_half(Register dst) {
1269 // Don't need to clear the upper halves of i32 values for sandbox on riscv64,
1270 // because we'll explicitly zero-extend their lower halves before using them
1271 // for memory accesses anyway.
1272}
1273
1274#define FP_UNOP_RETURN_TRUE(name, instruction) \
1275 bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
1276 instruction(dst, src, kScratchDoubleReg); \
1277 return true; \
1278 }
1279
1280FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d_d)
1281FP_UNOP_RETURN_TRUE(f64_floor, Floor_d_d)
1282FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d_d)
1283FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d_d)
1284
1285#undef FP_UNOP_RETURN_TRUE
1286
1288 LiftoffRegister dst,
1289 LiftoffRegister src, Label* trap) {
1290 switch (opcode) {
1291 case kExprI32ConvertI64:
1292 // According to WebAssembly spec, if I64 value does not fit the range of
1293 // I32, the value is undefined. Therefore, We use sign extension to
1294 // implement I64 to I32 truncation
1295 MacroAssembler::SignExtendWord(dst.gp(), src.gp());
1296 return true;
1297 case kExprI32SConvertF32:
1298 case kExprI32UConvertF32:
1299 case kExprI32SConvertF64:
1300 case kExprI32UConvertF64:
1301 case kExprI64SConvertF32:
1302 case kExprI64UConvertF32:
1303 case kExprI64SConvertF64:
1304 case kExprI64UConvertF64:
1305 case kExprF32ConvertF64: {
1306 // real conversion, if src is out-of-bound of target integer types,
1307 // kScratchReg is set to 0
1308 switch (opcode) {
1309 case kExprI32SConvertF32:
1310 Trunc_w_s(dst.gp(), src.fp(), kScratchReg);
1311 break;
1312 case kExprI32UConvertF32:
1313 Trunc_uw_s(dst.gp(), src.fp(), kScratchReg);
1314 break;
1315 case kExprI32SConvertF64:
1316 Trunc_w_d(dst.gp(), src.fp(), kScratchReg);
1317 break;
1318 case kExprI32UConvertF64:
1319 Trunc_uw_d(dst.gp(), src.fp(), kScratchReg);
1320 break;
1321 case kExprI64SConvertF32:
1322 Trunc_l_s(dst.gp(), src.fp(), kScratchReg);
1323 break;
1324 case kExprI64UConvertF32:
1325 Trunc_ul_s(dst.gp(), src.fp(), kScratchReg);
1326 break;
1327 case kExprI64SConvertF64:
1328 Trunc_l_d(dst.gp(), src.fp(), kScratchReg);
1329 break;
1330 case kExprI64UConvertF64:
1331 Trunc_ul_d(dst.gp(), src.fp(), kScratchReg);
1332 break;
1333 case kExprF32ConvertF64:
1334 fcvt_s_d(dst.fp(), src.fp());
1335 break;
1336 default:
1337 UNREACHABLE();
1338 }
1339
1340 // Checking if trap.
1341 if (trap != nullptr) {
1342 MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
1343 }
1344
1345 return true;
1346 }
1347 case kExprI32ReinterpretF32:
1348 MacroAssembler::ExtractLowWordFromF64(dst.gp(), src.fp());
1349 return true;
1350 case kExprI64SConvertI32:
1351 MacroAssembler::SignExtendWord(dst.gp(), src.gp());
1352 return true;
1353 case kExprI64UConvertI32:
1354 MacroAssembler::ZeroExtendWord(dst.gp(), src.gp());
1355 return true;
1356 case kExprI64ReinterpretF64:
1357 fmv_x_d(dst.gp(), src.fp());
1358 return true;
1359 case kExprF32SConvertI32: {
1360 MacroAssembler::Cvt_s_w(dst.fp(), src.gp());
1361 return true;
1362 }
1363 case kExprF32UConvertI32:
1364 MacroAssembler::Cvt_s_uw(dst.fp(), src.gp());
1365 return true;
1366 case kExprF32ReinterpretI32:
1367 fmv_w_x(dst.fp(), src.gp());
1368 return true;
1369 case kExprF64SConvertI32: {
1370 MacroAssembler::Cvt_d_w(dst.fp(), src.gp());
1371 return true;
1372 }
1373 case kExprF64UConvertI32:
1374 MacroAssembler::Cvt_d_uw(dst.fp(), src.gp());
1375 return true;
1376 case kExprF64ConvertF32:
1377 fcvt_d_s(dst.fp(), src.fp());
1378 return true;
1379 case kExprF64ReinterpretI64:
1380 fmv_d_x(dst.fp(), src.gp());
1381 return true;
1382 case kExprI32SConvertSatF32: {
1383 fcvt_w_s(dst.gp(), src.fp(), RTZ);
1384 Clear_if_nan_s(dst.gp(), src.fp());
1385 return true;
1386 }
1387 case kExprI32UConvertSatF32: {
1388 fcvt_wu_s(dst.gp(), src.fp(), RTZ);
1389 Clear_if_nan_s(dst.gp(), src.fp());
1390 return true;
1391 }
1392 case kExprI32SConvertSatF64: {
1393 fcvt_w_d(dst.gp(), src.fp(), RTZ);
1394 Clear_if_nan_d(dst.gp(), src.fp());
1395 return true;
1396 }
1397 case kExprI32UConvertSatF64: {
1398 fcvt_wu_d(dst.gp(), src.fp(), RTZ);
1399 Clear_if_nan_d(dst.gp(), src.fp());
1400 return true;
1401 }
1402 case kExprI64SConvertSatF32: {
1403 fcvt_l_s(dst.gp(), src.fp(), RTZ);
1404 Clear_if_nan_s(dst.gp(), src.fp());
1405 return true;
1406 }
1407 case kExprI64UConvertSatF32: {
1408 fcvt_lu_s(dst.gp(), src.fp(), RTZ);
1409 Clear_if_nan_s(dst.gp(), src.fp());
1410 return true;
1411 }
1412 case kExprI64SConvertSatF64: {
1413 fcvt_l_d(dst.gp(), src.fp(), RTZ);
1414 Clear_if_nan_d(dst.gp(), src.fp());
1415 return true;
1416 }
1417 case kExprI64UConvertSatF64: {
1418 fcvt_lu_d(dst.gp(), src.fp(), RTZ);
1419 Clear_if_nan_d(dst.gp(), src.fp());
1420 return true;
1421 }
1422 default:
1423 return false;
1424 }
1425}
1426
1427void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
1428 LiftoffRegister lhs,
1429 uint8_t imm_lane_idx) {
1430 VU.set(kScratchReg, E64, m1);
1431 vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
1432 vmv_xs(dst.gp(), kSimd128ScratchReg);
1433}
1434
1435void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
1436 slliw(dst, src, 32 - 8);
1437 sraiw(dst, dst, 32 - 8);
1438}
1439
1440void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
1441 slliw(dst, src, 32 - 16);
1442 sraiw(dst, dst, 32 - 16);
1443}
1444
1445void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
1446 LiftoffRegister src) {
1447 slli(dst.gp(), src.gp(), 64 - 8);
1448 srai(dst.gp(), dst.gp(), 64 - 8);
1449}
1450
1451void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
1452 LiftoffRegister src) {
1453 slli(dst.gp(), src.gp(), 64 - 16);
1454 srai(dst.gp(), dst.gp(), 64 - 16);
1455}
1456
1457void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
1458 LiftoffRegister src) {
1459 slli(dst.gp(), src.gp(), 64 - 32);
1460 srai(dst.gp(), dst.gp(), 64 - 32);
1461}
1462
1465}
1466
1467void LiftoffAssembler::emit_jump(Register target) {
1468 MacroAssembler::Jump(target);
1469}
1470
1472 ValueKind kind, Register lhs,
1473 Register rhs,
1474 const FreezeCacheState& frozen) {
1475 if (rhs == no_reg) {
1476 if (kind == kI32) {
1477 UseScratchRegisterScope temps(this);
1478 Register scratch0 = temps.Acquire();
1479 slliw(scratch0, lhs, 0);
1480 MacroAssembler::Branch(label, cond, scratch0, Operand(zero_reg));
1481 } else {
1482 DCHECK(kind == kI64);
1483 MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg));
1484 }
1485 } else {
1486 if (kind == kI64) {
1487 MacroAssembler::Branch(label, cond, lhs, Operand(rhs));
1488 } else {
1489 DCHECK((kind == kI32) || (kind == kRef) || (kind == kRefNull));
1490 MacroAssembler::CompareTaggedAndBranch(label, cond, lhs, Operand(rhs));
1491 }
1492 }
1493}
1494
1496 Register lhs, int32_t imm,
1497 const FreezeCacheState& frozen) {
1498 MacroAssembler::CompareTaggedAndBranch(label, cond, lhs, Operand(imm));
1499}
1500
1502 Register lhs, int32_t imm,
1503 const FreezeCacheState& frozen) {
1504 MacroAssembler::Branch(label, cond, lhs, Operand(imm));
1505}
1506
1507void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
1508 MacroAssembler::slliw(dst, src, 0);
1509 MacroAssembler::Sltu(dst, src, 1);
1510}
1511
1512void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
1513 Register lhs, Register rhs) {
1514 UseScratchRegisterScope temps(this);
1515 Register scratch0 = temps.Acquire();
1516 Register scratch1 = kScratchReg;
1517 MacroAssembler::slliw(scratch0, lhs, 0);
1518 MacroAssembler::slliw(scratch1, rhs, 0);
1519 MacroAssembler::CompareI(dst, scratch0, Operand(scratch1), cond);
1520}
1521
1522void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
1523 MacroAssembler::Sltu(dst, src.gp(), 1);
1524}
1525
1526void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
1527 LiftoffRegister lhs,
1528 LiftoffRegister rhs) {
1529 MacroAssembler::CompareI(dst, lhs.gp(), Operand(rhs.gp()), cond);
1530}
1531
1532void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
1533 UseScratchRegisterScope temps(this);
1536 Register scratch = temps.Acquire();
1537 Lw(scratch, MemOperand(dst.gp(), offset));
1538 Add32(scratch, scratch, Operand(Smi::FromInt(1)));
1539 Sw(scratch, MemOperand(dst.gp(), offset));
1540 } else {
1541 Register scratch = temps.Acquire();
1542 SmiUntag(scratch, MemOperand(dst.gp(), offset));
1543 Add64(scratch, scratch, Operand(1));
1544 SmiTag(scratch);
1545 Sd(scratch, MemOperand(dst.gp(), offset));
1546 }
1547}
1548
1549void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
1550 Register offset_reg, uintptr_t offset_imm,
1551 LoadType type,
1552 LoadTransformationKind transform,
1553 uint32_t* protected_load_pc,
1554 bool i64_offset) {
1555 UseScratchRegisterScope temps(this);
1556 Register scratch = temps.Acquire();
1557 MemOperand src_op =
1558 liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, i64_offset);
1559 VRegister dst_v = dst.fp().toV();
1560 auto trapper = [protected_load_pc](int offset) {
1561 if (protected_load_pc) *protected_load_pc = static_cast<uint32_t>(offset);
1562 };
1563 MachineType memtype = type.mem_type();
1564 if (transform == LoadTransformationKind::kExtend) {
1565 Ld(scratch, src_op, trapper);
1566 if (memtype == MachineType::Int8()) {
1567 VU.set(kScratchReg, E64, m1);
1568 vmv_vx(kSimd128ScratchReg, scratch);
1569 VU.set(kScratchReg, E16, m1);
1570 vsext_vf2(dst_v, kSimd128ScratchReg);
1571 } else if (memtype == MachineType::Uint8()) {
1572 VU.set(kScratchReg, E64, m1);
1573 vmv_vx(kSimd128ScratchReg, scratch);
1574 VU.set(kScratchReg, E16, m1);
1575 vzext_vf2(dst_v, kSimd128ScratchReg);
1576 } else if (memtype == MachineType::Int16()) {
1577 VU.set(kScratchReg, E64, m1);
1578 vmv_vx(kSimd128ScratchReg, scratch);
1579 VU.set(kScratchReg, E32, m1);
1580 vsext_vf2(dst_v, kSimd128ScratchReg);
1581 } else if (memtype == MachineType::Uint16()) {
1582 VU.set(kScratchReg, E64, m1);
1583 vmv_vx(kSimd128ScratchReg, scratch);
1584 VU.set(kScratchReg, E32, m1);
1585 vzext_vf2(dst_v, kSimd128ScratchReg);
1586 } else if (memtype == MachineType::Int32()) {
1587 VU.set(kScratchReg, E64, m1);
1588 vmv_vx(kSimd128ScratchReg, scratch);
1589 vsext_vf2(dst_v, kSimd128ScratchReg);
1590 } else if (memtype == MachineType::Uint32()) {
1591 VU.set(kScratchReg, E64, m1);
1592 vmv_vx(kSimd128ScratchReg, scratch);
1593 vzext_vf2(dst_v, kSimd128ScratchReg);
1594 }
1595 } else if (transform == LoadTransformationKind::kZeroExtend) {
1596 vxor_vv(dst_v, dst_v, dst_v);
1597 if (memtype == MachineType::Int32()) {
1598 VU.set(kScratchReg, E32, m1);
1599 Lwu(scratch, src_op, trapper);
1600 vmv_sx(dst_v, scratch);
1601 } else {
1602 DCHECK_EQ(MachineType::Int64(), memtype);
1603 VU.set(kScratchReg, E64, m1);
1604 Ld(scratch, src_op, trapper);
1605 vmv_sx(dst_v, scratch);
1606 }
1607 } else {
1609 if (memtype == MachineType::Int8()) {
1610 VU.set(kScratchReg, E8, m1);
1611 Lb(scratch, src_op, trapper);
1612 vmv_vx(dst_v, scratch);
1613 } else if (memtype == MachineType::Int16()) {
1614 VU.set(kScratchReg, E16, m1);
1615 Lh(scratch, src_op, trapper);
1616 vmv_vx(dst_v, scratch);
1617 } else if (memtype == MachineType::Int32()) {
1618 VU.set(kScratchReg, E32, m1);
1619 Lw(scratch, src_op, trapper);
1620 vmv_vx(dst_v, scratch);
1621 } else if (memtype == MachineType::Int64()) {
1622 VU.set(kScratchReg, E64, m1);
1623 Ld(scratch, src_op, trapper);
1624 vmv_vx(dst_v, scratch);
1625 }
1626 }
1627 if (protected_load_pc) {
1628 DCHECK(InstructionAt(*protected_load_pc)->IsLoad());
1629 }
1630}
1631
1632void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
1633 Register addr, Register offset_reg,
1634 uintptr_t offset_imm, LoadType type,
1635 uint8_t laneidx, uint32_t* protected_load_pc,
1636 bool i64_offset) {
1637 MemOperand src_op =
1638 liftoff::GetMemOp(this, addr, offset_reg, offset_imm, i64_offset);
1639 MachineType mem_type = type.mem_type();
1640 UseScratchRegisterScope temps(this);
1641 Register scratch = temps.Acquire();
1642 auto trapper = [protected_load_pc](int offset) {
1643 if (protected_load_pc) *protected_load_pc = static_cast<uint32_t>(offset);
1644 };
1645 if (mem_type == MachineType::Int8()) {
1646 Lbu(scratch, src_op, trapper);
1647 VU.set(kScratchReg, E64, m1);
1648 li(kScratchReg, 0x1 << laneidx);
1649 vmv_sx(v0, kScratchReg);
1650 VU.set(kScratchReg, E8, m1);
1651 vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
1652 } else if (mem_type == MachineType::Int16()) {
1653 Lhu(scratch, src_op, trapper);
1654 VU.set(kScratchReg, E16, m1);
1655 li(kScratchReg, 0x1 << laneidx);
1656 vmv_sx(v0, kScratchReg);
1657 vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
1658 } else if (mem_type == MachineType::Int32()) {
1659 Lwu(scratch, src_op, trapper);
1660 VU.set(kScratchReg, E32, m1);
1661 li(kScratchReg, 0x1 << laneidx);
1662 vmv_sx(v0, kScratchReg);
1663 vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
1664 } else if (mem_type == MachineType::Int64()) {
1665 Ld(scratch, src_op, trapper);
1666 VU.set(kScratchReg, E64, m1);
1667 li(kScratchReg, 0x1 << laneidx);
1668 vmv_sx(v0, kScratchReg);
1669 vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
1670 } else {
1671 UNREACHABLE();
1672 }
1673 if (protected_load_pc) {
1674 DCHECK(InstructionAt(*protected_load_pc)->IsLoad());
1675 }
1676}
1677
1678void LiftoffAssembler::StoreLane(Register dst, Register offset,
1679 uintptr_t offset_imm, LiftoffRegister src,
1680 StoreType type, uint8_t lane,
1681 uint32_t* protected_store_pc,
1682 bool i64_offset) {
1683 MemOperand dst_op =
1684 liftoff::GetMemOp(this, dst, offset, offset_imm, i64_offset);
1685 MachineRepresentation rep = type.mem_rep();
1686 auto trapper = [protected_store_pc](int offset) {
1687 if (protected_store_pc) *protected_store_pc = static_cast<uint32_t>(offset);
1688 };
1689 if (rep == MachineRepresentation::kWord8) {
1690 VU.set(kScratchReg, E8, m1);
1691 vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), lane);
1693 Sb(kScratchReg, dst_op, trapper);
1694 } else if (rep == MachineRepresentation::kWord16) {
1695 VU.set(kScratchReg, E16, m1);
1696 vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), lane);
1698 Sh(kScratchReg, dst_op, trapper);
1699 } else if (rep == MachineRepresentation::kWord32) {
1700 VU.set(kScratchReg, E32, m1);
1701 vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), lane);
1703 Sw(kScratchReg, dst_op, trapper);
1704 } else {
1706 VU.set(kScratchReg, E64, m1);
1707 vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), lane);
1709 Sd(kScratchReg, dst_op, trapper);
1710 }
1711 if (protected_store_pc) {
1712 DCHECK(InstructionAt(*protected_store_pc)->IsStore());
1713 }
1714}
1715
1716void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
1717 LiftoffRegister src) {
1718 VU.set(kScratchReg, E64, m1);
1719 vmv_vx(dst.fp().toV(), src.gp());
1720}
1721
1722void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
1723 LiftoffRegister src1,
1724 LiftoffRegister src2,
1725 uint8_t imm_lane_idx) {
1726 VU.set(kScratchReg, E64, m1);
1727 li(kScratchReg, 0x1 << imm_lane_idx);
1728 vmv_sx(v0, kScratchReg);
1729 vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
1730}
1731
1732void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
1733 LiftoffRegister rhs) {
1734 VU.set(kScratchReg, E64, m1);
1735 const int64_t kNaN = 0x7ff8000000000000L;
1736 vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
1737 vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
1738 vand_vv(v0, v0, kSimd128ScratchReg);
1741 vfmin_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
1742 vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
1743}
1744
1745void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
1746 LiftoffRegister rhs) {
1747 VU.set(kScratchReg, E64, m1);
1748 const int64_t kNaN = 0x7ff8000000000000L;
1749 vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
1750 vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
1751 vand_vv(v0, v0, kSimd128ScratchReg);
1754 vfmax_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
1755 vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
1756}
1757
1759 LiftoffRegister src) {
1760 VU.set(kScratchReg, E64, m1);
1763 li(kScratchReg, 0x0006000400020000);
1765 li(kScratchReg, 0x0007000500030001);
1767 VU.set(kScratchReg, E16, m1);
1768 vrgather_vv(kSimd128ScratchReg2, src.fp().toV(), kSimd128ScratchReg);
1769 vrgather_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg3);
1770 VU.set(kScratchReg, E16, mf2);
1771 vwadd_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
1772}
1773
1775 LiftoffRegister src) {
1776 VU.set(kScratchReg, E64, m1);
1779 li(kScratchReg, 0x0006000400020000);
1781 li(kScratchReg, 0x0007000500030001);
1783 VU.set(kScratchReg, E16, m1);
1784 vrgather_vv(kSimd128ScratchReg2, src.fp().toV(), kSimd128ScratchReg);
1785 vrgather_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg3);
1786 VU.set(kScratchReg, E16, mf2);
1787 vwaddu_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
1788}
1789
1791 LiftoffRegister src) {
1792 VU.set(kScratchReg, E64, m1);
1795 li(kScratchReg, 0x0E0C0A0806040200);
1797 li(kScratchReg, 0x0F0D0B0907050301);
1799 VU.set(kScratchReg, E8, m1);
1800 vrgather_vv(kSimd128ScratchReg2, src.fp().toV(), kSimd128ScratchReg);
1801 vrgather_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg3);
1802 VU.set(kScratchReg, E8, mf2);
1803 vwadd_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
1804}
1805
1807 LiftoffRegister src) {
1808 VU.set(kScratchReg, E64, m1);
1811 li(kScratchReg, 0x0E0C0A0806040200);
1813 li(kScratchReg, 0x0F0D0B0907050301);
1815 VU.set(kScratchReg, E8, m1);
1816 vrgather_vv(kSimd128ScratchReg2, src.fp().toV(), kSimd128ScratchReg);
1817 vrgather_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg3);
1818 VU.set(kScratchReg, E8, mf2);
1819 vwaddu_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
1820}
1821
1823 const std::initializer_list<VarState> args, const LiftoffRegister* rets,
1824 ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes,
1825 ExternalReference ext_ref) {
1826 AddWord(sp, sp, Operand(-stack_bytes));
1827
1828 int arg_offset = 0;
1829 for (const VarState& arg : args) {
1830 liftoff::StoreToMemory(this, MemOperand{sp, arg_offset}, arg);
1831 arg_offset += value_kind_size(arg.kind());
1832 }
1833 DCHECK_LE(arg_offset, stack_bytes);
1834
1835 // Pass a pointer to the buffer with the arguments to the C function.
1836 // On RISC-V, the first argument is passed in {a0}.
1837 constexpr Register kFirstArgReg = a0;
1838 mv(kFirstArgReg, sp);
1839
1840 // Now call the C function.
1841 constexpr int kNumCCallArgs = 1;
1842 PrepareCallCFunction(kNumCCallArgs, kScratchReg);
1843 CallCFunction(ext_ref, kNumCCallArgs);
1844
1845 // Move return value to the right register.
1846 const LiftoffRegister* next_result_reg = rets;
1847 if (return_kind != kVoid) {
1848 constexpr Register kReturnReg = a0;
1849 if (kReturnReg != next_result_reg->gp()) {
1850 Move(*next_result_reg, LiftoffRegister(kReturnReg), return_kind);
1851 }
1852 ++next_result_reg;
1853 }
1854
1855 // Load potential output value from the buffer on the stack.
1856 if (out_argument_kind != kVoid) {
1857 liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_kind);
1858 }
1859
1860 AddWord(sp, sp, Operand(stack_bytes));
1861}
1862
1863void LiftoffAssembler::CallC(const std::initializer_list<VarState> args_list,
1864 ExternalReference ext_ref) {
1865 const int num_args = static_cast<int>(args_list.size());
1866 const VarState* const args = args_list.begin();
1867 // Note: If we ever need more than eight arguments we would need to load the
1868 // stack arguments to registers (via LoadToRegister) in pairs of two, then use
1869 // Stp with MemOperand{sp, -2 * kSystemPointerSize, PreIndex} to push them to
1870 // the stack.
1871 // Execute the parallel register move for register parameters.
1872 DCHECK_GE(arraysize(kCArgRegs), num_args);
1873 ParallelMove parallel_move{this};
1874 for (int reg_arg = 0; reg_arg < num_args; ++reg_arg) {
1875 parallel_move.LoadIntoRegister(LiftoffRegister{kCArgRegs[reg_arg]},
1876 args[reg_arg]);
1877 }
1878 parallel_move.Execute();
1879 // Now call the C function.
1881 CallCFunction(ext_ref, num_args);
1882}
1883
1884void LiftoffStackSlots::Construct(int param_slots) {
1885 DCHECK_LT(0, slots_.size());
1887 int last_stack_slot = param_slots;
1888 for (auto& slot : slots_) {
1889 const int stack_slot = slot.dst_slot_;
1890 int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
1891 DCHECK_LT(0, stack_decrement);
1892 last_stack_slot = stack_slot;
1893 const LiftoffAssembler::VarState& src = slot.src_;
1894 switch (src.loc()) {
1896 if (src.kind() != kS128) {
1897 asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
1898 asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
1900 } else {
1901 asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
1902 asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_ - 8));
1904 asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
1906 }
1907 break;
1909 int pushed_bytes = SlotSizeInBytes(slot);
1910 asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
1911 liftoff::push(asm_, src.reg(), src.kind());
1912 break;
1913 }
1915 asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
1916 asm_->li(kScratchReg, Operand(src.i32_const()));
1918 break;
1919 }
1920 }
1921 }
1922}
1923
1924bool LiftoffAssembler::supports_f16_mem_access() { return false; }
1925
1926} // namespace v8::internal::wasm
1927
1928#endif // V8_WASM_BASELINE_RISCV_LIFTOFF_ASSEMBLER_RISCV64_INL_H_
Builtins::Kind kind
Definition builtins.cc:40
void fcvt_wu_d(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fcvt_w_d(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fmv_w_x(FPURegister rd, Register rs1)
void fcvt_wu_s(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fcvt_w_s(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void mv(Register rd, Register rs)
void srai(Register rd, Register rs1, uint8_t shamt)
void srli(Register rd, Register rs1, uint8_t shamt)
void slli(Register rd, Register rs1, uint8_t shamt)
void vmerge_vx(VRegister vd, Register rs1, VRegister vs2)
void vmv_vi(VRegister vd, uint8_t simm5)
void vmv_xs(Register rd, VRegister vs2)
void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask=NoMask)
void vmv_sx(VRegister vd, Register rs1)
void vmv_vx(VRegister vd, Register rs1)
void set(Register rd, VSew sew, Vlmul lmul)
void ld(Register rd, const MemOperand &rs)
void fcvt_d_s(FPURegister fd, FPURegister fj)
void sd(Register rd, const MemOperand &rs)
void fcvt_s_d(FPURegister fd, FPURegister fj)
void addi(Register dst, Register src, const Operand &imm)
void lbu(Register rd, const MemOperand &rs)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
Instruction * InstructionAt(ptrdiff_t offset) const
void lw(Register rd, const MemOperand &rs)
friend class UseScratchRegisterScope
void sb(Register rd, const MemOperand &rs)
void lwu(Register rd, const MemOperand &rs)
void srl(Register rd, Register rt, uint16_t sa)
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop=false)
void lhu(Register rd, const MemOperand &rs)
void sw(Register rd, const MemOperand &rs)
void sh(Register rd, const MemOperand &rs)
void sra(Register rt, Register rd, uint16_t sa)
static constexpr MachineType Uint8()
static constexpr MachineType Int32()
static constexpr MachineType Uint32()
static constexpr MachineType Uint16()
static constexpr MachineType Int16()
static constexpr MachineType Int64()
static constexpr MachineType Int8()
void Lbu(Register rd, const MemOperand &rs)
void LoadFloat(FPURegister fd, const MemOperand &src, Trapper &&trapper=[](int){})
void ExtractBits(Register dest, Register source, Register pos, int size, bool sign_extend=false)
void Trunc_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch)
void Clear_if_nan_s(Register rd, FPURegister fs)
void Cvt_s_uw(FPURegister fd, FPURegister fs)
void Trunc_l_d(FPURegister fd, FPURegister fs)
void Scd(Register rd, const MemOperand &rs)
void Sh(Register rd, const MemOperand &rs)
void SmiUntag(Register reg, SBit s=LeaveCC)
void Clz32(Register rd, Register rs)
void Lb(Register rd, const MemOperand &rs)
void Move(Register dst, Tagged< Smi > smi)
void JumpIfSmi(Register value, Label *smi_label)
void BranchShort(Label *label, Condition cond, Register r1, const Operand &r2, bool need_link=false)
void Lwu(Register rd, const MemOperand &rs)
void Trunc_w_d(FPURegister fd, FPURegister fs)
void LoadFPRImmediate(FPURegister dst, float imm)
void ExtractLowWordFromF64(Register dst_low, FPURegister src)
void Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void Sd(Register rd, const MemOperand &rs)
void CompareTaggedAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
void Cvt_s_w(FPURegister fd, Register rs)
void SmiTag(Register reg, SBit s=LeaveCC)
void StoreDouble(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void Sw(Register rd, const MemOperand &rs)
void Lhu(Register rd, const MemOperand &rs)
void Jump(Register target, Condition cond=al)
void Popcnt32(Register dst, Register src)
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch)
void Clear_if_nan_d(Register rd, FPURegister fs)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void StoreFloat(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void LoadProtectedPointerField(Register destination, MemOperand field_operand)
void Cvt_d_w(FPURegister fd, Register rs)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void Lld(Register rd, const MemOperand &rs)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void Lw(Register rd, const MemOperand &rs)
void Lh(Register rd, const MemOperand &rs)
void Popcnt64(Register dst, Register src)
void CompareI(Register rd, Register rs, const Operand &rt, Condition cond)
void Cvt_d_uw(FPURegister fd, FPURegister fs)
void LoadDouble(FPURegister fd, const MemOperand &src, Trapper &&trapper=[](int){})
void Sc(Register rd, const MemOperand &rs)
void AllocateStackSpace(Register bytes)
void Ll(Register rd, const MemOperand &rs)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void Sb(Register rd, const MemOperand &rs)
void Branch(Label *label, bool need_link=false)
void Ld(Register rd, const MemOperand &rs)
void Trunc_w_s(Register rd, FPURegister fs, Register result=no_reg)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void Ctz32(Register rd, Register rs)
void CalcScaledAddress(Register rd, Register rt, Register rs, uint8_t sa)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
void AtomicXor(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i32_rems(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_i32_clz(Register dst, Register src)
void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_eqz(Register dst, Register src)
void emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst, LiftoffRegister src)
void FillI64Half(Register, int offset, RegPairHalf)
void emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallCWithStackBuffer(const std::initializer_list< VarState > args, const LiftoffRegister *rets, ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes, ExternalReference ext_ref)
void emit_i64_muli(LiftoffRegister dst, LiftoffRegister lhs, int32_t imm)
void AtomicAdd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicSub(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void LoadTransform(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LoadTransformationKind transform, uint32_t *protected_load_pc, bool i64_offset)
void AtomicAnd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicCompareExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister value, StoreType type, bool i64_offset)
void emit_i64_signextend_i16(LiftoffRegister dst, LiftoffRegister src)
void Fill(LiftoffRegister, int offset, ValueKind)
void emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void LoadFullPointer(Register dst, Register src_addr, int32_t offset_imm)
void Store(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, bool is_store_mem=false, bool i64_offset=false)
void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint32_t *protected_load_pc=nullptr, bool is_load_mem=false, bool i64_offset=false, bool needs_shift=false)
void emit_i32_signextend_i16(Register dst, Register src)
void emit_i32_divs(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm)
void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint8_t lane, uint32_t *protected_load_pc, bool i64_offset)
void emit_i32_remu(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_i64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i32_ctz(Register dst, Register src)
void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind, Register frame_pointer)
void AtomicExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallC(const std::initializer_list< VarState > args, ExternalReference ext_ref)
void emit_i64_signextend_i8(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero)
void emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadConstant(LiftoffRegister, WasmValue)
void emit_i32_divu(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero)
void emit_cond_jump(Condition, Label *, ValueKind value, Register lhs, Register rhs, const FreezeCacheState &frozen)
void LoadProtectedPointer(Register dst, Register src_addr, int32_t offset)
bool emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src)
void emit_u32_to_uintptr(Register dst, Register src)
void emit_i32_mul(Register dst, Register lhs, Register rhs)
void emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i64_set_cond(Condition condition, Register dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_i32_popcnt(Register dst, Register src)
void MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueKind)
void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind)
void AtomicStore(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, bool i64_offset)
void emit_ptrsize_cond_jumpi(Condition, Label *, Register lhs, int32_t imm, const FreezeCacheState &frozen)
void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind)
void emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_set_cond(Condition, Register dst, Register lhs, Register rhs)
bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src)
void LoadTaggedPointer(Register dst, Register src_addr, Register offset_reg, int32_t offset_imm, uint32_t *protected_load_pc=nullptr, bool offset_reg_needs_shift=false)
void emit_i32_cond_jumpi(Condition, Label *, Register lhs, int imm, const FreezeCacheState &frozen)
void emit_i64_eqz(Register dst, LiftoffRegister src)
void StoreTaggedPointer(Register dst_addr, Register offset_reg, int32_t offset_imm, Register src, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, SkipWriteBarrier=kNoSkipWriteBarrier)
void emit_i64_clz(LiftoffRegister dst, LiftoffRegister src)
void IncrementSmi(LiftoffRegister dst, int offset)
LiftoffRegister GetUnusedRegister(RegClass rc, std::initializer_list< LiftoffRegister > try_first, LiftoffRegList pinned)
void emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicOr(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst, LiftoffRegister src, Label *trap=nullptr)
void AtomicLoad(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, bool i64_offset)
void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind)
void StoreLane(Register dst, Register offset, uintptr_t offset_imm, LiftoffRegister src, StoreType type, uint8_t lane, uint32_t *protected_store_pc, bool i64_offset)
void emit_i32_signextend_i8(Register dst, Register src)
constexpr Register set(Register reg)
constexpr DoubleRegister fp() const
base::SmallVector< Slot, 8 > slots_
static int SlotSizeInBytes(const Slot &slot)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, cmp_reg)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( load_linked, store_conditional, sign_extend, size, representation)
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
int start
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
double remainder
ZoneVector< RpoNumber > & result
#define FP_UNOP_RETURN_TRUE(name, instruction)
#define I64_BINOP(name, instruction)
#define I64_BINOP_I(name, instruction)
#define I32_SHIFTOP_I(name, instruction, instruction1)
#define I64_SHIFTOP(name, instruction)
#define I32_BINOP(name, instruction)
#define I32_BINOP_I(name, instruction)
#define I32_SHIFTOP(name, instruction)
LiftoffRegister reg
Register tmp
std::optional< OolTrapLabel > trap
int int32_t
Definition unicode.cc:40
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
constexpr int WhichPowerOfTwo(T value)
Definition bits.h:195
void Store(LiftoffAssembler *assm, LiftoffRegister src, MemOperand dst, ValueKind kind)
void AtomicBinop(LiftoffAssembler *lasm, Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, Binop op)
Register CalculateActualAddress(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr_reg, Register offset_reg, uintptr_t offset_imm, Register result_reg=no_reg)
void StoreToMemory(LiftoffAssembler *assm, MemOperand dst, const LiftoffAssembler::VarState &src)
constexpr DoubleRegister kScratchDoubleReg
void Load(LiftoffAssembler *assm, LiftoffRegister dst, MemOperand src, ValueKind kind)
void push(LiftoffAssembler *assm, LiftoffRegister reg, ValueKind kind, int padding=0)
MemOperand GetMemOp(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr, Register offset, int32_t offset_imm, unsigned shift_amount=0)
constexpr int value_kind_size(ValueKind kind)
constexpr Register no_reg
constexpr int kMinInt
Definition globals.h:375
constexpr int kSimd128Size
Definition globals.h:706
DwVfpRegister DoubleRegister
constexpr DoubleRegister kScratchDoubleReg
constexpr Register kScratchReg2
constexpr VRegister kSimd128ScratchReg2
constexpr Register kScratchReg
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Simd128Register kSimd128ScratchReg
constexpr bool SmiValuesAre31Bits()
constexpr VRegister kSimd128ScratchReg3
V8_EXPORT_PRIVATE FlagValues v8_flags
return value
Definition map-inl.h:893
constexpr uint8_t kInstrSize
constexpr Register kCArgRegs[]
#define shr(value, bits)
Definition sha-256.cc:31
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define arraysize(array)
Definition macros.h:67