v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
liftoff-assembler-s390-inl.h
Go to the documentation of this file.
1// Copyright 2017 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_INL_H_
6#define V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_INL_H_
7
17
18namespace v8::internal::wasm {
19
20namespace liftoff {
21
22// half
23// slot Frame
24// -----+--------------------+---------------------------
25// n+3 | parameter n |
26// ... | ... |
27// 4 | parameter 1 | or parameter 2
28// 3 | parameter 0 | or parameter 1
29// 2 | (result address) | or parameter 0
30// -----+--------------------+---------------------------
31// 1 | return addr (lr) |
32// 0 | previous frame (fp)|
33// -----+--------------------+ <-- frame ptr (fp)
34// -1 | StackFrame::WASM |
35// -2 | instance |
36// -3 | feedback vector |
37// -4 | tiering budget |
38// -----+--------------------+---------------------------
39// -5 | slot 0 (high) | ^
40// -6 | slot 0 (low) | |
41// -7 | slot 1 (high) | Frame slots
42// -8 | slot 1 (low) | |
43// | | v
44// -----+--------------------+ <-- stack ptr (sp)
45//
46inline MemOperand GetStackSlot(uint32_t offset) {
47 return MemOperand(fp, -offset);
48}
49
52}
53
56 Register scratch) {
57 if (src.is_reg()) {
58 switch (src.kind()) {
59 case kI16:
60 assm->StoreU16(src.reg().gp(), dst);
61 break;
62 case kI32:
63 assm->StoreU32(src.reg().gp(), dst);
64 break;
65 case kI64:
66 assm->StoreU64(src.reg().gp(), dst);
67 break;
68 case kF32:
69 assm->StoreF32(src.reg().fp(), dst);
70 break;
71 case kF64:
72 assm->StoreF64(src.reg().fp(), dst);
73 break;
74 case kS128:
75 assm->StoreV128(src.reg().fp(), dst, scratch);
76 break;
77 default:
79 }
80 } else if (src.is_const()) {
81 if (src.kind() == kI32) {
82 assm->mov(scratch, Operand(src.i32_const()));
83 assm->StoreU32(scratch, dst);
84 } else {
85 assm->mov(scratch, Operand(static_cast<int64_t>(src.i32_const())));
86 assm->StoreU64(scratch, dst);
87 }
88 } else if (value_kind_size(src.kind()) == 4) {
89 assm->LoadU32(scratch, liftoff::GetStackSlot(src.offset()), scratch);
90 assm->StoreU32(scratch, dst);
91 } else {
92 DCHECK_EQ(8, value_kind_size(src.kind()));
93 assm->LoadU64(scratch, liftoff::GetStackSlot(src.offset()), scratch);
94 assm->StoreU64(scratch, dst);
95 }
96}
97
98} // namespace liftoff
99
101 int offset = pc_offset();
102 lay(sp, MemOperand(sp));
103 return offset;
104}
105
107// The standard library used by gcc tryjobs does not consider `std::find` to be
108// `constexpr`, so wrap it in a `#ifdef __clang__` block.
109#ifdef __clang__
110 static_assert(std::find(std::begin(wasm::kGpParamRegisters),
111 std::end(wasm::kGpParamRegisters),
112 kLiftoffFrameSetupFunctionReg) ==
113 std::end(wasm::kGpParamRegisters));
114#endif
115
116 // On ARM, we must push at least {lr} before calling the stub, otherwise
117 // it would get clobbered with no possibility to recover it.
118 Register scratch = ip;
119 mov(scratch, Operand(StackFrame::TypeToMarker(StackFrame::WASM)));
120 PushCommonFrame(scratch);
121 LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
122 WasmValue(declared_function_index));
123 CallBuiltin(Builtin::kWasmLiftoffFrameSetup);
124}
125
126void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
127 int stack_param_delta) {
128 Register scratch = r1;
129 // Push the return address and frame pointer to complete the stack frame.
130 lay(sp, MemOperand(sp, -2 * kSystemPointerSize));
133 LoadU64(scratch, MemOperand(fp));
134 StoreU64(scratch, MemOperand(sp));
135
136 // Shift the whole frame upwards.
137 int slot_count = num_callee_stack_params + 2;
138 for (int i = slot_count - 1; i >= 0; --i) {
140 StoreU64(scratch,
141 MemOperand(fp, (i - stack_param_delta) * kSystemPointerSize));
142 }
143
144 // Set the new stack and frame pointer.
145 lay(sp, MemOperand(fp, -stack_param_delta * kSystemPointerSize));
146 Pop(r14, fp);
147}
148
150
152 int offset, SafepointTableBuilder* safepoint_table_builder,
153 bool feedback_vector_slot, size_t stack_param_slots) {
154 int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
155 // The frame setup builtin also pushes the feedback vector.
156 if (feedback_vector_slot) {
157 frame_size -= kSystemPointerSize;
158 }
159
160 constexpr int LayInstrSize = 6;
161
162 Assembler patching_assembler(
163 AssemblerOptions{},
165 if (V8_LIKELY(frame_size < 4 * KB)) {
166 patching_assembler.lay(sp, MemOperand(sp, -frame_size));
167 return;
168 }
169
170 // The frame size is bigger than 4KB, so we might overflow the available stack
171 // space if we first allocate the frame and then do the stack check (we will
172 // need some remaining stack space for throwing the exception). That's why we
173 // check the available stack space before we allocate the frame. To do this we
174 // replace the {__ sub(sp, sp, framesize)} with a jump to OOL code that does
175 // this "extended stack check".
176 //
177 // The OOL code can simply be generated here with the normal assembler,
178 // because all other code generation, including OOL code, has already finished
179 // when {PatchPrepareStackFrame} is called. The function prologue then jumps
180 // to the current {pc_offset()} to execute the OOL code for allocating the
181 // large frame.
182
183 // Emit the unconditional branch in the function prologue (from {offset} to
184 // {pc_offset()}).
185
186 int jump_offset = pc_offset() - offset;
187 patching_assembler.branchOnCond(al, jump_offset, true, true);
188
189 // If the frame is bigger than the stack, we throw the stack overflow
190 // exception unconditionally. Thereby we can avoid the integer overflow
191 // check in the condition code.
192 RecordComment("OOL: stack check for large frame");
193 Label continuation;
194 if (frame_size < v8_flags.stack_size * 1024) {
195 Register stack_limit = ip;
197 AddU64(stack_limit, Operand(frame_size));
198 CmpU64(sp, stack_limit);
200 }
201
202 if (v8_flags.experimental_wasm_growable_stacks) {
203 LiftoffRegList regs_to_save;
205 regs_to_save.set(WasmHandleStackOverflowDescriptor::FrameBaseRegister());
206 for (auto reg : kGpParamRegisters) regs_to_save.set(reg);
207 for (auto reg : kFpParamRegisters) regs_to_save.set(reg);
210 AddS64(WasmHandleStackOverflowDescriptor::FrameBaseRegister(), fp,
211 Operand(stack_param_slots * kStackSlotSize +
213 CallBuiltin(Builtin::kWasmHandleStackOverflow);
214 safepoint_table_builder->DefineSafepoint(this);
216 } else {
217 Call(static_cast<Address>(Builtin::kWasmStackOverflow),
219 // The call will not return; just define an empty safepoint.
220 safepoint_table_builder->DefineSafepoint(this);
221 if (v8_flags.debug_code) stop();
222 }
223
225
226 // Now allocate the stack space. Note that this might do more than just
227 // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}.
228 lay(sp, MemOperand(sp, -frame_size));
229
230 // Jump back to the start of the function, from {pc_offset()} to
231 // right after the reserved space for the {__ sub(sp, sp, framesize)} (which
232 // is a branch now).
233 jump_offset = offset - pc_offset() + 6;
235}
236
238
240
241// static
244}
245
247 switch (kind) {
248 case kS128:
249 return value_kind_size(kind);
250 default:
251 return kStackSlotSize;
252 }
253}
254
256 return (kind == kS128 || is_reference(kind));
257}
258
259void LiftoffAssembler::CheckTierUp(int declared_func_index, int budget_used,
260 Label* ool_label,
261 const FreezeCacheState& frozen) {
262 Register budget_array = ip;
264
265 if (instance_data == no_reg) {
266 instance_data = budget_array; // Reuse the temp register.
268 }
269
270 constexpr int kArrayOffset = wasm::ObjectAccess::ToTagged(
271 WasmTrustedInstanceData::kTieringBudgetArrayOffset);
272 LoadU64(budget_array, MemOperand(instance_data, kArrayOffset));
273
274 int budget_arr_offset = kInt32Size * declared_func_index;
275 Register budget = r1;
276 MemOperand budget_addr(budget_array, budget_arr_offset);
277 LoadS32(budget, budget_addr);
278 SubS32(budget, Operand(budget_used));
279 StoreU32(budget, budget_addr);
280 blt(ool_label);
281}
282
284 if (!v8_flags.experimental_wasm_growable_stacks) {
285 return fp;
286 }
287 LiftoffRegister old_fp = GetUnusedRegister(RegClass::kGpReg, {});
288 Label done, call_runtime;
290 CmpU64(old_fp.gp(),
291 Operand(StackFrame::TypeToMarker(StackFrame::WASM_SEGMENT_START)));
292 beq(&call_runtime);
293 mov(old_fp.gp(), fp);
294 jmp(&done);
295
296 bind(&call_runtime);
297 LiftoffRegList regs_to_save = cache_state()->used_registers;
301 CallCFunction(ExternalReference::wasm_load_old_fp(), 1);
302 if (old_fp.gp() != kReturnRegister0) {
303 mov(old_fp.gp(), kReturnRegister0);
304 }
306
307 bind(&done);
308 return old_fp.gp();
309}
310
312 {
313 UseScratchRegisterScope temps{this};
314 Register scratch = temps.Acquire();
316 CmpU64(scratch,
317 Operand(StackFrame::TypeToMarker(StackFrame::WASM_SEGMENT_START)));
318 }
319 Label done;
320 bne(&done);
321 LiftoffRegList regs_to_save;
322 for (auto reg : kGpReturnRegisters) regs_to_save.set(reg);
323 for (auto reg : kFpReturnRegisters) regs_to_save.set(reg);
327 CallCFunction(ExternalReference::wasm_shrink_stack(), 1);
328 // Restore old FP. We don't need to restore old SP explicitly, because
329 // it will be restored from FP in LeaveFrame before return.
332 bind(&done);
333}
334
335void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
336 switch (value.type().kind()) {
337 case kI32:
338 mov(reg.gp(), Operand(value.to_i32()));
339 break;
340 case kI64:
341 mov(reg.gp(), Operand(value.to_i64()));
342 break;
343 case kF32: {
344 UseScratchRegisterScope temps(this);
345 Register scratch = temps.Acquire();
346 LoadF32(reg.fp(), value.to_f32(), scratch);
347 break;
348 }
349 case kF64: {
350 UseScratchRegisterScope temps(this);
351 Register scratch = temps.Acquire();
352 LoadF64(reg.fp(), value.to_f64(), scratch);
353 break;
354 }
355 default:
356 UNREACHABLE();
357 }
358}
359
362}
363
364void LiftoffAssembler::LoadTrustedPointer(Register dst, Register src_addr,
365 int offset, IndirectPointerTag tag) {
366 LoadTaggedField(dst, MemOperand{src_addr, offset});
367}
368
369void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
370 int offset, int size) {
371 DCHECK_LE(0, offset);
372 switch (size) {
373 case 1:
374 LoadU8(dst, MemOperand(instance, offset));
375 break;
376 case 4:
377 LoadU32(dst, MemOperand(instance, offset));
378 break;
379 case 8:
380 LoadU64(dst, MemOperand(instance, offset));
381 break;
382 default:
384 }
385}
386
388 Register instance,
389 int offset) {
390 DCHECK_LE(0, offset);
391 LoadTaggedField(dst, MemOperand(instance, offset));
392}
393
394void LiftoffAssembler::SpillInstanceData(Register instance) {
396}
397
399
400void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
401 Register offset_reg,
402 int32_t offset_imm,
403 uint32_t* protected_load_pc,
404 bool needs_shift) {
405 CHECK(is_int20(offset_imm));
406 unsigned shift_amount = !needs_shift ? 0 : COMPRESS_POINTERS_BOOL ? 2 : 3;
407 if (offset_reg != no_reg && shift_amount != 0) {
408 ShiftLeftU64(ip, offset_reg, Operand(shift_amount));
409 offset_reg = ip;
410 }
411 if (protected_load_pc) *protected_load_pc = pc_offset();
413 dst,
414 MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
415}
416
417void LiftoffAssembler::LoadProtectedPointer(Register dst, Register src_addr,
418 int32_t offset) {
419 static_assert(!V8_ENABLE_SANDBOX_BOOL);
420 LoadTaggedPointer(dst, src_addr, no_reg, offset);
421}
422
423void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
424 int32_t offset_imm) {
425 UseScratchRegisterScope temps(this);
426 LoadU64(dst, MemOperand(src_addr, offset_imm), r1);
427}
428
429void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
430 Register offset_reg,
431 int32_t offset_imm, Register src,
432 LiftoffRegList /* pinned */,
433 uint32_t* protected_store_pc,
434 SkipWriteBarrier skip_write_barrier) {
435 MemOperand dst_op =
436 MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
437 if (protected_store_pc) *protected_store_pc = pc_offset();
438 StoreTaggedField(src, dst_op);
439
440 if (skip_write_barrier || v8_flags.disable_write_barriers) return;
441
442 Label exit;
444 to_condition(kZero), &exit);
445 JumpIfSmi(src, &exit);
447 &exit);
448 lay(r1, dst_op);
450 StubCallMode::kCallWasmRuntimeStub);
451 bind(&exit);
452}
453
454void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
455 Register offset_reg, uintptr_t offset_imm,
456 LoadType type, uint32_t* protected_load_pc,
457 bool is_load_mem, bool i64_offset,
458 bool needs_shift) {
459 UseScratchRegisterScope temps(this);
460 if (offset_reg != no_reg && !i64_offset) {
461 // Clear the upper 32 bits of the 64 bit offset register.
462 llgfr(ip, offset_reg);
463 offset_reg = ip;
464 }
465 unsigned shift_amount = needs_shift ? type.size_log_2() : 0;
466 if (offset_reg != no_reg && shift_amount != 0) {
467 ShiftLeftU64(ip, offset_reg, Operand(shift_amount));
468 offset_reg = ip;
469 }
470 if (!is_int20(offset_imm)) {
471 if (offset_reg != no_reg) {
472 mov(r0, Operand(offset_imm));
473 AddS64(r0, offset_reg);
474 mov(ip, r0);
475 } else {
476 mov(ip, Operand(offset_imm));
477 }
478 offset_reg = ip;
479 offset_imm = 0;
480 }
481 MemOperand src_op =
482 MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
483 if (protected_load_pc) *protected_load_pc = pc_offset();
484 switch (type.value()) {
485 case LoadType::kI32Load8U:
486 case LoadType::kI64Load8U:
487 LoadU8(dst.gp(), src_op);
488 break;
489 case LoadType::kI32Load8S:
490 case LoadType::kI64Load8S:
491 LoadS8(dst.gp(), src_op);
492 break;
493 case LoadType::kI32Load16U:
494 case LoadType::kI64Load16U:
495 if (is_load_mem) {
496 LoadU16LE(dst.gp(), src_op);
497 } else {
498 LoadU16(dst.gp(), src_op);
499 }
500 break;
501 case LoadType::kI32Load16S:
502 case LoadType::kI64Load16S:
503 if (is_load_mem) {
504 LoadS16LE(dst.gp(), src_op);
505 } else {
506 LoadS16(dst.gp(), src_op);
507 }
508 break;
509 case LoadType::kI64Load32U:
510 if (is_load_mem) {
511 LoadU32LE(dst.gp(), src_op);
512 } else {
513 LoadU32(dst.gp(), src_op);
514 }
515 break;
516 case LoadType::kI32Load:
517 case LoadType::kI64Load32S:
518 if (is_load_mem) {
519 LoadS32LE(dst.gp(), src_op);
520 } else {
521 LoadS32(dst.gp(), src_op);
522 }
523 break;
524 case LoadType::kI64Load:
525 if (is_load_mem) {
526 LoadU64LE(dst.gp(), src_op);
527 } else {
528 LoadU64(dst.gp(), src_op);
529 }
530 break;
531 case LoadType::kF32Load:
532 if (is_load_mem) {
533 LoadF32LE(dst.fp(), src_op, r0);
534 } else {
535 LoadF32(dst.fp(), src_op);
536 }
537 break;
538 case LoadType::kF64Load:
539 if (is_load_mem) {
540 LoadF64LE(dst.fp(), src_op, r0);
541 } else {
542 LoadF64(dst.fp(), src_op);
543 }
544 break;
545 case LoadType::kS128Load:
546 if (is_load_mem) {
547 LoadV128LE(dst.fp(), src_op, r1, r0);
548 } else {
549 LoadV128(dst.fp(), src_op, r1);
550 }
551 break;
552 default:
553 UNREACHABLE();
554 }
555}
556
557#define PREP_MEM_OPERAND(offset_reg, offset_imm, scratch) \
558 if (offset_reg != no_reg && !i64_offset) { \
559 /* Clear the upper 32 bits of the 64 bit offset register.*/ \
560 llgfr(scratch, offset_reg); \
561 offset_reg = scratch; \
562 } \
563 if (!is_int20(offset_imm)) { \
564 if (offset_reg != no_reg) { \
565 mov(r0, Operand(offset_imm)); \
566 AddS64(r0, offset_reg); \
567 mov(scratch, r0); \
568 } else { \
569 mov(scratch, Operand(offset_imm)); \
570 } \
571 offset_reg = scratch; \
572 offset_imm = 0; \
573 }
574void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
575 uintptr_t offset_imm, LiftoffRegister src,
576 StoreType type, LiftoffRegList /* pinned */,
577 uint32_t* protected_store_pc, bool is_store_mem,
578 bool i64_offset) {
579 PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
580 MemOperand dst_op =
581 MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
582 if (protected_store_pc) *protected_store_pc = pc_offset();
583 switch (type.value()) {
584 case StoreType::kI32Store8:
585 case StoreType::kI64Store8:
586 StoreU8(src.gp(), dst_op);
587 break;
588 case StoreType::kI32Store16:
589 case StoreType::kI64Store16:
590 if (is_store_mem) {
591 StoreU16LE(src.gp(), dst_op, r1);
592 } else {
593 StoreU16(src.gp(), dst_op, r1);
594 }
595 break;
596 case StoreType::kI32Store:
597 case StoreType::kI64Store32:
598 if (is_store_mem) {
599 StoreU32LE(src.gp(), dst_op, r1);
600 } else {
601 StoreU32(src.gp(), dst_op, r1);
602 }
603 break;
604 case StoreType::kI64Store:
605 if (is_store_mem) {
606 StoreU64LE(src.gp(), dst_op, r1);
607 } else {
608 StoreU64(src.gp(), dst_op, r1);
609 }
610 break;
611 case StoreType::kF32Store:
612 if (is_store_mem) {
613 StoreF32LE(src.fp(), dst_op, r1);
614 } else {
615 StoreF32(src.fp(), dst_op);
616 }
617 break;
618 case StoreType::kF64Store:
619 if (is_store_mem) {
620 StoreF64LE(src.fp(), dst_op, r1);
621 } else {
622 StoreF64(src.fp(), dst_op);
623 }
624 break;
625 case StoreType::kS128Store: {
626 if (is_store_mem) {
627 StoreV128LE(src.fp(), dst_op, r1, r0);
628 } else {
629 StoreV128(src.fp(), dst_op, r1);
630 }
631 break;
632 }
633 default:
634 UNREACHABLE();
635 }
636}
637
638void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
639 Register offset_reg, uintptr_t offset_imm,
640 LoadType type, LiftoffRegList /* pinned */,
641 bool i64_offset) {
642 Load(dst, src_addr, offset_reg, offset_imm, type, nullptr, true, i64_offset);
643}
644
645void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
646 uintptr_t offset_imm, LiftoffRegister src,
647 StoreType type, LiftoffRegList /* pinned */,
648 bool i64_offset) {
649 PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
650 lay(ip,
651 MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
652
653 switch (type.value()) {
654 case StoreType::kI32Store8:
655 case StoreType::kI64Store8: {
656 AtomicExchangeU8(ip, src.gp(), r1, r0);
657 break;
658 }
659 case StoreType::kI32Store16:
660 case StoreType::kI64Store16: {
661#ifdef V8_TARGET_BIG_ENDIAN
662 lrvr(r1, src.gp());
663 ShiftRightU32(r1, r1, Operand(16));
664#else
665 LoadU16(r1, src.gp());
666#endif
667 Push(r2);
668 AtomicExchangeU16(ip, r1, r2, r0);
669 Pop(r2);
670 break;
671 }
672 case StoreType::kI32Store:
673 case StoreType::kI64Store32: {
674#ifdef V8_TARGET_BIG_ENDIAN
675 lrvr(r1, src.gp());
676#else
677 LoadU32(r1, src.gp());
678#endif
679 Label do_cs;
680 bind(&do_cs);
681 cs(r0, r1, MemOperand(ip));
682 bne(&do_cs, Label::kNear);
683 break;
684 }
685 case StoreType::kI64Store: {
686#ifdef V8_TARGET_BIG_ENDIAN
687 lrvgr(r1, src.gp());
688#else
689 mov(r1, src.gp());
690#endif
691 Label do_cs;
692 bind(&do_cs);
693 csg(r0, r1, MemOperand(ip));
694 bne(&do_cs, Label::kNear);
695 break;
696 }
697 default:
698 UNREACHABLE();
699 }
700}
701
702void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
703 uintptr_t offset_imm, LiftoffRegister value,
704 LiftoffRegister result, StoreType type,
705 bool i64_offset) {
706 LiftoffRegList pinned = LiftoffRegList{dst_addr, value, result};
707 if (offset_reg != no_reg) pinned.set(offset_reg);
708 Register tmp1 = GetUnusedRegister(kGpReg, pinned).gp();
709 pinned.set(tmp1);
710 Register tmp2 = GetUnusedRegister(kGpReg, pinned).gp();
711
712 PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
713 lay(ip,
714 MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
715
716 switch (type.value()) {
717 case StoreType::kI32Store8:
718 case StoreType::kI64Store8: {
719 Label doadd;
720 bind(&doadd);
721 LoadU8(tmp1, MemOperand(ip));
722 AddS32(tmp2, tmp1, value.gp());
723 AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
724 b(Condition(4), &doadd);
725 LoadU8(result.gp(), result.gp());
726 break;
727 }
728 case StoreType::kI32Store16:
729 case StoreType::kI64Store16: {
730 Label doadd;
731 bind(&doadd);
732 LoadU16(tmp1, MemOperand(ip));
733#ifdef V8_TARGET_BIG_ENDIAN
734 lrvr(tmp2, tmp1);
735 ShiftRightU32(tmp2, tmp2, Operand(16));
736 AddS32(tmp2, tmp2, value.gp());
737 lrvr(tmp2, tmp2);
738 ShiftRightU32(tmp2, tmp2, Operand(16));
739#else
740 AddS32(tmp2, tmp1, value.gp());
741#endif
742 AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
743 b(Condition(4), &doadd);
744 LoadU16(result.gp(), result.gp());
745#ifdef V8_TARGET_BIG_ENDIAN
746 lrvr(result.gp(), result.gp());
747 ShiftRightU32(result.gp(), result.gp(), Operand(16));
748#endif
749 break;
750 }
751 case StoreType::kI32Store:
752 case StoreType::kI64Store32: {
753 Label doadd;
754 bind(&doadd);
755 LoadU32(tmp1, MemOperand(ip));
756#ifdef V8_TARGET_BIG_ENDIAN
757 lrvr(tmp2, tmp1);
758 AddS32(tmp2, tmp2, value.gp());
759 lrvr(tmp2, tmp2);
760#else
761 AddS32(tmp2, tmp1, value.gp());
762#endif
763 CmpAndSwap(tmp1, tmp2, MemOperand(ip));
764 b(Condition(4), &doadd);
765 LoadU32(result.gp(), tmp1);
766#ifdef V8_TARGET_BIG_ENDIAN
767 lrvr(result.gp(), result.gp());
768#endif
769 break;
770 }
771 case StoreType::kI64Store: {
772 Label doadd;
773 bind(&doadd);
774 LoadU64(tmp1, MemOperand(ip));
775#ifdef V8_TARGET_BIG_ENDIAN
776 lrvgr(tmp2, tmp1);
777 AddS64(tmp2, tmp2, value.gp());
778 lrvgr(tmp2, tmp2);
779#else
780 AddS64(tmp2, tmp1, value.gp());
781#endif
782 CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
783 b(Condition(4), &doadd);
784 mov(result.gp(), tmp1);
785#ifdef V8_TARGET_BIG_ENDIAN
786 lrvgr(result.gp(), result.gp());
787#endif
788 break;
789 }
790 default:
791 UNREACHABLE();
792 }
793}
794
795void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
796 uintptr_t offset_imm, LiftoffRegister value,
797 LiftoffRegister result, StoreType type,
798 bool i64_offset) {
799 LiftoffRegList pinned = LiftoffRegList{dst_addr, value, result};
800 if (offset_reg != no_reg) pinned.set(offset_reg);
801 Register tmp1 = GetUnusedRegister(kGpReg, pinned).gp();
802 pinned.set(tmp1);
803 Register tmp2 = GetUnusedRegister(kGpReg, pinned).gp();
804
805 PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
806 lay(ip,
807 MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
808
809 switch (type.value()) {
810 case StoreType::kI32Store8:
811 case StoreType::kI64Store8: {
812 Label do_again;
813 bind(&do_again);
814 LoadU8(tmp1, MemOperand(ip));
815 SubS32(tmp2, tmp1, value.gp());
816 AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
817 b(Condition(4), &do_again);
818 LoadU8(result.gp(), result.gp());
819 break;
820 }
821 case StoreType::kI32Store16:
822 case StoreType::kI64Store16: {
823 Label do_again;
824 bind(&do_again);
825 LoadU16(tmp1, MemOperand(ip));
826#ifdef V8_TARGET_BIG_ENDIAN
827 lrvr(tmp2, tmp1);
828 ShiftRightU32(tmp2, tmp2, Operand(16));
829 SubS32(tmp2, tmp2, value.gp());
830 lrvr(tmp2, tmp2);
831 ShiftRightU32(tmp2, tmp2, Operand(16));
832#else
833 SubS32(tmp2, tmp1, value.gp());
834#endif
835 AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
836 b(Condition(4), &do_again);
837 LoadU16(result.gp(), result.gp());
838#ifdef V8_TARGET_BIG_ENDIAN
839 lrvr(result.gp(), result.gp());
840 ShiftRightU32(result.gp(), result.gp(), Operand(16));
841#endif
842 break;
843 }
844 case StoreType::kI32Store:
845 case StoreType::kI64Store32: {
846 Label do_again;
847 bind(&do_again);
848 LoadU32(tmp1, MemOperand(ip));
849#ifdef V8_TARGET_BIG_ENDIAN
850 lrvr(tmp2, tmp1);
851 SubS32(tmp2, tmp2, value.gp());
852 lrvr(tmp2, tmp2);
853#else
854 SubS32(tmp2, tmp1, value.gp());
855#endif
856 CmpAndSwap(tmp1, tmp2, MemOperand(ip));
857 b(Condition(4), &do_again);
858 LoadU32(result.gp(), tmp1);
859#ifdef V8_TARGET_BIG_ENDIAN
860 lrvr(result.gp(), result.gp());
861#endif
862 break;
863 }
864 case StoreType::kI64Store: {
865 Label do_again;
866 bind(&do_again);
867 LoadU64(tmp1, MemOperand(ip));
868#ifdef V8_TARGET_BIG_ENDIAN
869 lrvgr(tmp2, tmp1);
870 SubS64(tmp2, tmp2, value.gp());
871 lrvgr(tmp2, tmp2);
872#else
873 SubS64(tmp2, tmp1, value.gp());
874#endif
875 CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
876 b(Condition(4), &do_again);
877 mov(result.gp(), tmp1);
878#ifdef V8_TARGET_BIG_ENDIAN
879 lrvgr(result.gp(), result.gp());
880#endif
881 break;
882 }
883 default:
884 UNREACHABLE();
885 }
886}
887
888void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
889 uintptr_t offset_imm, LiftoffRegister value,
890 LiftoffRegister result, StoreType type,
891 bool i64_offset) {
892 LiftoffRegList pinned = LiftoffRegList{dst_addr, value, result};
893 if (offset_reg != no_reg) pinned.set(offset_reg);
894 Register tmp1 = GetUnusedRegister(kGpReg, pinned).gp();
895 pinned.set(tmp1);
896 Register tmp2 = GetUnusedRegister(kGpReg, pinned).gp();
897
898 PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
899 lay(ip,
900 MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
901
902 switch (type.value()) {
903 case StoreType::kI32Store8:
904 case StoreType::kI64Store8: {
905 Label do_again;
906 bind(&do_again);
907 LoadU8(tmp1, MemOperand(ip));
908 AndP(tmp2, tmp1, value.gp());
909 AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
910 b(Condition(4), &do_again);
911 LoadU8(result.gp(), result.gp());
912 break;
913 }
914 case StoreType::kI32Store16:
915 case StoreType::kI64Store16: {
916 Label do_again;
917 bind(&do_again);
918 LoadU16(tmp1, MemOperand(ip));
919#ifdef V8_TARGET_BIG_ENDIAN
920 lrvr(tmp2, tmp1);
921 ShiftRightU32(tmp2, tmp2, Operand(16));
922 AndP(tmp2, tmp2, value.gp());
923 lrvr(tmp2, tmp2);
924 ShiftRightU32(tmp2, tmp2, Operand(16));
925#else
926 AndP(tmp2, tmp1, value.gp());
927#endif
928 AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
929 b(Condition(4), &do_again);
930 LoadU16(result.gp(), result.gp());
931#ifdef V8_TARGET_BIG_ENDIAN
932 lrvr(result.gp(), result.gp());
933 ShiftRightU32(result.gp(), result.gp(), Operand(16));
934#endif
935 break;
936 }
937 case StoreType::kI32Store:
938 case StoreType::kI64Store32: {
939 Label do_again;
940 bind(&do_again);
941 LoadU32(tmp1, MemOperand(ip));
942#ifdef V8_TARGET_BIG_ENDIAN
943 lrvr(tmp2, tmp1);
944 AndP(tmp2, tmp2, value.gp());
945 lrvr(tmp2, tmp2);
946#else
947 AndP(tmp2, tmp1, value.gp());
948#endif
949 CmpAndSwap(tmp1, tmp2, MemOperand(ip));
950 b(Condition(4), &do_again);
951 LoadU32(result.gp(), tmp1);
952#ifdef V8_TARGET_BIG_ENDIAN
953 lrvr(result.gp(), result.gp());
954#endif
955 break;
956 }
957 case StoreType::kI64Store: {
958 Label do_again;
959 bind(&do_again);
960 LoadU64(tmp1, MemOperand(ip));
961#ifdef V8_TARGET_BIG_ENDIAN
962 lrvgr(tmp2, tmp1);
963 AndP(tmp2, tmp2, value.gp());
964 lrvgr(tmp2, tmp2);
965#else
966 AndP(tmp2, tmp1, value.gp());
967#endif
968 CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
969 b(Condition(4), &do_again);
970 mov(result.gp(), tmp1);
971#ifdef V8_TARGET_BIG_ENDIAN
972 lrvgr(result.gp(), result.gp());
973#endif
974 break;
975 }
976 default:
977 UNREACHABLE();
978 }
979}
980
981void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
982 uintptr_t offset_imm, LiftoffRegister value,
983 LiftoffRegister result, StoreType type,
984 bool i64_offset) {
985 LiftoffRegList pinned = LiftoffRegList{dst_addr, value, result};
986 if (offset_reg != no_reg) pinned.set(offset_reg);
987 Register tmp1 = GetUnusedRegister(kGpReg, pinned).gp();
988 pinned.set(tmp1);
989 Register tmp2 = GetUnusedRegister(kGpReg, pinned).gp();
990
991 PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
992 lay(ip,
993 MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
994
995 switch (type.value()) {
996 case StoreType::kI32Store8:
997 case StoreType::kI64Store8: {
998 Label do_again;
999 bind(&do_again);
1000 LoadU8(tmp1, MemOperand(ip));
1001 OrP(tmp2, tmp1, value.gp());
1002 AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
1003 b(Condition(4), &do_again);
1004 LoadU8(result.gp(), result.gp());
1005 break;
1006 }
1007 case StoreType::kI32Store16:
1008 case StoreType::kI64Store16: {
1009 Label do_again;
1010 bind(&do_again);
1011 LoadU16(tmp1, MemOperand(ip));
1012#ifdef V8_TARGET_BIG_ENDIAN
1013 lrvr(tmp2, tmp1);
1014 ShiftRightU32(tmp2, tmp2, Operand(16));
1015 OrP(tmp2, tmp2, value.gp());
1016 lrvr(tmp2, tmp2);
1017 ShiftRightU32(tmp2, tmp2, Operand(16));
1018#else
1019 OrP(tmp2, tmp1, value.gp());
1020#endif
1021 AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
1022 b(Condition(4), &do_again);
1023 LoadU16(result.gp(), result.gp());
1024#ifdef V8_TARGET_BIG_ENDIAN
1025 lrvr(result.gp(), result.gp());
1026 ShiftRightU32(result.gp(), result.gp(), Operand(16));
1027#endif
1028 break;
1029 }
1030 case StoreType::kI32Store:
1031 case StoreType::kI64Store32: {
1032 Label do_again;
1033 bind(&do_again);
1034 LoadU32(tmp1, MemOperand(ip));
1035#ifdef V8_TARGET_BIG_ENDIAN
1036 lrvr(tmp2, tmp1);
1037 OrP(tmp2, tmp2, value.gp());
1038 lrvr(tmp2, tmp2);
1039#else
1040 OrP(tmp2, tmp1, value.gp());
1041#endif
1042 CmpAndSwap(tmp1, tmp2, MemOperand(ip));
1043 b(Condition(4), &do_again);
1044 LoadU32(result.gp(), tmp1);
1045#ifdef V8_TARGET_BIG_ENDIAN
1046 lrvr(result.gp(), result.gp());
1047#endif
1048 break;
1049 }
1050 case StoreType::kI64Store: {
1051 Label do_again;
1052 bind(&do_again);
1053 LoadU64(tmp1, MemOperand(ip));
1054#ifdef V8_TARGET_BIG_ENDIAN
1055 lrvgr(tmp2, tmp1);
1056 OrP(tmp2, tmp2, value.gp());
1057 lrvgr(tmp2, tmp2);
1058#else
1059 OrP(tmp2, tmp1, value.gp());
1060#endif
1061 CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
1062 b(Condition(4), &do_again);
1063 mov(result.gp(), tmp1);
1064#ifdef V8_TARGET_BIG_ENDIAN
1065 lrvgr(result.gp(), result.gp());
1066#endif
1067 break;
1068 }
1069 default:
1070 UNREACHABLE();
1071 }
1072}
1073
1074void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
1075 uintptr_t offset_imm, LiftoffRegister value,
1076 LiftoffRegister result, StoreType type,
1077 bool i64_offset) {
1078 LiftoffRegList pinned = LiftoffRegList{dst_addr, value, result};
1079 if (offset_reg != no_reg) pinned.set(offset_reg);
1080 Register tmp1 = GetUnusedRegister(kGpReg, pinned).gp();
1081 pinned.set(tmp1);
1082 Register tmp2 = GetUnusedRegister(kGpReg, pinned).gp();
1083
1084 PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
1085 lay(ip,
1086 MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
1087
1088 switch (type.value()) {
1089 case StoreType::kI32Store8:
1090 case StoreType::kI64Store8: {
1091 Label do_again;
1092 bind(&do_again);
1093 LoadU8(tmp1, MemOperand(ip));
1094 XorP(tmp2, tmp1, value.gp());
1095 AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
1096 b(Condition(4), &do_again);
1097 LoadU8(result.gp(), result.gp());
1098 break;
1099 }
1100 case StoreType::kI32Store16:
1101 case StoreType::kI64Store16: {
1102 Label do_again;
1103 bind(&do_again);
1104 LoadU16(tmp1, MemOperand(ip));
1105#ifdef V8_TARGET_BIG_ENDIAN
1106 lrvr(tmp2, tmp1);
1107 ShiftRightU32(tmp2, tmp2, Operand(16));
1108 XorP(tmp2, tmp2, value.gp());
1109 lrvr(tmp2, tmp2);
1110 ShiftRightU32(tmp2, tmp2, Operand(16));
1111#else
1112 XorP(tmp2, tmp1, value.gp());
1113#endif
1114 AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
1115 b(Condition(4), &do_again);
1116 LoadU16(result.gp(), result.gp());
1117#ifdef V8_TARGET_BIG_ENDIAN
1118 lrvr(result.gp(), result.gp());
1119 ShiftRightU32(result.gp(), result.gp(), Operand(16));
1120#endif
1121 break;
1122 }
1123 case StoreType::kI32Store:
1124 case StoreType::kI64Store32: {
1125 Label do_again;
1126 bind(&do_again);
1127 LoadU32(tmp1, MemOperand(ip));
1128#ifdef V8_TARGET_BIG_ENDIAN
1129 lrvr(tmp2, tmp1);
1130 XorP(tmp2, tmp2, value.gp());
1131 lrvr(tmp2, tmp2);
1132#else
1133 XorP(tmp2, tmp1, value.gp());
1134#endif
1135 CmpAndSwap(tmp1, tmp2, MemOperand(ip));
1136 b(Condition(4), &do_again);
1137 LoadU32(result.gp(), tmp1);
1138#ifdef V8_TARGET_BIG_ENDIAN
1139 lrvr(result.gp(), result.gp());
1140#endif
1141 break;
1142 }
1143 case StoreType::kI64Store: {
1144 Label do_again;
1145 bind(&do_again);
1146 LoadU64(tmp1, MemOperand(ip));
1147#ifdef V8_TARGET_BIG_ENDIAN
1148 lrvgr(tmp2, tmp1);
1149 XorP(tmp2, tmp2, value.gp());
1150 lrvgr(tmp2, tmp2);
1151#else
1152 XorP(tmp2, tmp1, value.gp());
1153#endif
1154 CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
1155 b(Condition(4), &do_again);
1156 mov(result.gp(), tmp1);
1157#ifdef V8_TARGET_BIG_ENDIAN
1158 lrvgr(result.gp(), result.gp());
1159#endif
1160 break;
1161 }
1162 default:
1163 UNREACHABLE();
1164 }
1165}
1166
1167void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
1168 uintptr_t offset_imm,
1169 LiftoffRegister value,
1170 LiftoffRegister result, StoreType type,
1171 bool i64_offset) {
1172 PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
1173 lay(ip,
1174 MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
1175
1176 switch (type.value()) {
1177 case StoreType::kI32Store8:
1178 case StoreType::kI64Store8: {
1179 AtomicExchangeU8(ip, value.gp(), result.gp(), r0);
1180 LoadU8(result.gp(), result.gp());
1181 break;
1182 }
1183 case StoreType::kI32Store16:
1184 case StoreType::kI64Store16: {
1185#ifdef V8_TARGET_BIG_ENDIAN
1186 lrvr(r1, value.gp());
1187 ShiftRightU32(r1, r1, Operand(16));
1188#else
1189 LoadU16(r1, value.gp());
1190#endif
1191 AtomicExchangeU16(ip, r1, result.gp(), r0);
1192#ifdef V8_TARGET_BIG_ENDIAN
1193 lrvr(result.gp(), result.gp());
1194 ShiftRightU32(result.gp(), result.gp(), Operand(16));
1195#else
1196 LoadU16(result.gp(), result.gp());
1197#endif
1198 break;
1199 }
1200 case StoreType::kI32Store:
1201 case StoreType::kI64Store32: {
1202#ifdef V8_TARGET_BIG_ENDIAN
1203 lrvr(r1, value.gp());
1204#else
1205 LoadU32(r1, value.gp());
1206#endif
1207 Label do_cs;
1208 bind(&do_cs);
1209 cs(result.gp(), r1, MemOperand(ip));
1210 bne(&do_cs, Label::kNear);
1211#ifdef V8_TARGET_BIG_ENDIAN
1212 lrvr(result.gp(), result.gp());
1213#endif
1214 LoadU32(result.gp(), result.gp());
1215 break;
1216 }
1217 case StoreType::kI64Store: {
1218#ifdef V8_TARGET_BIG_ENDIAN
1219 lrvgr(r1, value.gp());
1220#else
1221 mov(r1, value.gp());
1222#endif
1223 Label do_cs;
1224 bind(&do_cs);
1225 csg(result.gp(), r1, MemOperand(ip));
1226 bne(&do_cs, Label::kNear);
1227#ifdef V8_TARGET_BIG_ENDIAN
1228 lrvgr(result.gp(), result.gp());
1229#endif
1230 break;
1231 }
1232 default:
1233 UNREACHABLE();
1234 }
1235}
1236
1238 Register dst_addr, Register offset_reg, uintptr_t offset_imm,
1239 LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
1240 StoreType type, bool i64_offset) {
1241
1242 LiftoffRegList pinned = LiftoffRegList{dst_addr, expected, new_value, result};
1243 if (offset_reg != no_reg) pinned.set(offset_reg);
1244 Register tmp1 = GetUnusedRegister(kGpReg, pinned).gp();
1245 pinned.set(tmp1);
1246 Register tmp2 = GetUnusedRegister(kGpReg, pinned).gp();
1247
1248 PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
1249 lay(ip,
1250 MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
1251
1252 switch (type.value()) {
1253 case StoreType::kI32Store8:
1254 case StoreType::kI64Store8: {
1255 AtomicCmpExchangeU8(ip, result.gp(), expected.gp(), new_value.gp(), r0,
1256 r1);
1257 LoadU8(result.gp(), result.gp());
1258 break;
1259 }
1260 case StoreType::kI32Store16:
1261 case StoreType::kI64Store16: {
1262#ifdef V8_TARGET_BIG_ENDIAN
1263 lrvr(tmp1, expected.gp());
1264 lrvr(tmp2, new_value.gp());
1265 ShiftRightU32(tmp1, tmp1, Operand(16));
1266 ShiftRightU32(tmp2, tmp2, Operand(16));
1267#else
1268 LoadU16(tmp1, expected.gp());
1269 LoadU16(tmp2, new_value.gp());
1270#endif
1271 AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
1272 LoadU16(result.gp(), result.gp());
1273#ifdef V8_TARGET_BIG_ENDIAN
1274 lrvr(result.gp(), result.gp());
1275 ShiftRightU32(result.gp(), result.gp(), Operand(16));
1276#endif
1277 break;
1278 }
1279 case StoreType::kI32Store:
1280 case StoreType::kI64Store32: {
1281#ifdef V8_TARGET_BIG_ENDIAN
1282 lrvr(tmp1, expected.gp());
1283 lrvr(tmp2, new_value.gp());
1284#else
1285 LoadU32(tmp1, expected.gp());
1286 LoadU32(tmp2, new_value.gp());
1287#endif
1288 CmpAndSwap(tmp1, tmp2, MemOperand(ip));
1289 LoadU32(result.gp(), tmp1);
1290#ifdef V8_TARGET_BIG_ENDIAN
1291 lrvr(result.gp(), result.gp());
1292#endif
1293 break;
1294 }
1295 case StoreType::kI64Store: {
1296#ifdef V8_TARGET_BIG_ENDIAN
1297 lrvgr(tmp1, expected.gp());
1298 lrvgr(tmp2, new_value.gp());
1299#else
1300 mov(tmp1, expected.gp());
1301 mov(tmp2, new_value.gp());
1302#endif
1303 CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
1304 mov(result.gp(), tmp1);
1305#ifdef V8_TARGET_BIG_ENDIAN
1306 lrvgr(result.gp(), result.gp());
1307#endif
1308 break;
1309 }
1310 default:
1311 UNREACHABLE();
1312 }
1313}
1314
1315void LiftoffAssembler::AtomicFence() { bailout(kAtomics, "AtomicFence"); }
1316
1317void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
1318 uint32_t caller_slot_idx,
1319 ValueKind kind) {
1320 int32_t offset = (caller_slot_idx + 1) * 8;
1321 switch (kind) {
1322 case kI32: {
1323#if defined(V8_TARGET_BIG_ENDIAN)
1324 LoadS32(dst.gp(), MemOperand(fp, offset + 4));
1325 break;
1326#else
1327 LoadS32(dst.gp(), MemOperand(fp, offset));
1328 break;
1329#endif
1330 }
1331 case kRef:
1332 case kRefNull:
1333 case kI64: {
1334 LoadU64(dst.gp(), MemOperand(fp, offset));
1335 break;
1336 }
1337 case kF32: {
1338 LoadF32(dst.fp(), MemOperand(fp, offset));
1339 break;
1340 }
1341 case kF64: {
1342 LoadF64(dst.fp(), MemOperand(fp, offset));
1343 break;
1344 }
1345 case kS128: {
1346 UseScratchRegisterScope temps(this);
1347 Register scratch = temps.Acquire();
1348 LoadV128(dst.fp(), MemOperand(fp, offset), scratch);
1349 break;
1350 }
1351 default:
1352 UNREACHABLE();
1353 }
1354}
1355
1356void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
1357 uint32_t caller_slot_idx,
1359 Register frame_pointer) {
1360 int32_t offset = (caller_slot_idx + 1) * 8;
1361 switch (kind) {
1362 case kI32: {
1363#if defined(V8_TARGET_BIG_ENDIAN)
1364 StoreU32(src.gp(), MemOperand(frame_pointer, offset + 4));
1365 break;
1366#else
1367 StoreU32(src.gp(), MemOperand(frame_pointer, offset));
1368 break;
1369#endif
1370 }
1371 case kRef:
1372 case kRefNull:
1373 case kI64: {
1374 StoreU64(src.gp(), MemOperand(frame_pointer, offset));
1375 break;
1376 }
1377 case kF32: {
1378 StoreF32(src.fp(), MemOperand(frame_pointer, offset));
1379 break;
1380 }
1381 case kF64: {
1382 StoreF64(src.fp(), MemOperand(frame_pointer, offset));
1383 break;
1384 }
1385 case kS128: {
1386 UseScratchRegisterScope temps(this);
1387 Register scratch = temps.Acquire();
1388 StoreV128(src.fp(), MemOperand(frame_pointer, offset), scratch);
1389 break;
1390 }
1391 default:
1392 UNREACHABLE();
1393 }
1394}
1395
1396void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
1397 ValueKind kind) {
1398 switch (kind) {
1399 case kI32: {
1400#if defined(V8_TARGET_BIG_ENDIAN)
1401 LoadS32(dst.gp(), MemOperand(sp, offset + 4));
1402 break;
1403#else
1404 LoadS32(dst.gp(), MemOperand(sp, offset));
1405 break;
1406#endif
1407 }
1408 case kRef:
1409 case kRefNull:
1410 case kI64: {
1411 LoadU64(dst.gp(), MemOperand(sp, offset));
1412 break;
1413 }
1414 case kF32: {
1415 LoadF32(dst.fp(), MemOperand(sp, offset));
1416 break;
1417 }
1418 case kF64: {
1419 LoadF64(dst.fp(), MemOperand(sp, offset));
1420 break;
1421 }
1422 case kS128: {
1423 UseScratchRegisterScope temps(this);
1424 Register scratch = temps.Acquire();
1425 LoadV128(dst.fp(), MemOperand(sp, offset), scratch);
1426 break;
1427 }
1428 default:
1429 UNREACHABLE();
1430 }
1431}
1432
1433#ifdef V8_TARGET_BIG_ENDIAN
1434constexpr int stack_bias = -4;
1435#else
1436constexpr int stack_bias = 0;
1437#endif
1438
1439void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
1440 ValueKind kind) {
1441 DCHECK_NE(dst_offset, src_offset);
1442 int length = 0;
1443 switch (kind) {
1444 case kI32:
1445 case kF32:
1446 length = 4;
1447 break;
1448 case kI64:
1449 case kRefNull:
1450 case kRef:
1451 case kF64:
1452 length = 8;
1453 break;
1454 case kS128:
1455 length = 16;
1456 break;
1457 default:
1458 UNREACHABLE();
1459 }
1460
1461 dst_offset += (length == 4 ? stack_bias : 0);
1462 src_offset += (length == 4 ? stack_bias : 0);
1463
1464 if (is_int20(dst_offset)) {
1465 lay(ip, liftoff::GetStackSlot(dst_offset));
1466 } else {
1467 mov(ip, Operand(-dst_offset));
1468 lay(ip, MemOperand(fp, ip));
1469 }
1470
1471 if (is_int20(src_offset)) {
1472 lay(r1, liftoff::GetStackSlot(src_offset));
1473 } else {
1474 mov(r1, Operand(-src_offset));
1475 lay(r1, MemOperand(fp, r1));
1476 }
1477
1478 MoveChar(MemOperand(ip), MemOperand(r1), Operand(length));
1479}
1480
1481void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
1482 mov(dst, src);
1483}
1484
1486 ValueKind kind) {
1487 DCHECK_NE(dst, src);
1488 if (kind == kF32) {
1489 ler(dst, src);
1490 } else if (kind == kF64) {
1491 ldr(dst, src);
1492 } else {
1494 vlr(dst, src, Condition(0), Condition(0), Condition(0));
1495 }
1496}
1497
1498void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
1499 DCHECK_LT(0, offset);
1501
1502 switch (kind) {
1503 case kI32:
1505 break;
1506 case kI64:
1507 case kRefNull:
1508 case kRef:
1510 break;
1511 case kF32:
1513 break;
1514 case kF64:
1516 break;
1517 case kS128: {
1518 UseScratchRegisterScope temps(this);
1519 Register scratch = temps.Acquire();
1520 StoreV128(reg.fp(), liftoff::GetStackSlot(offset), scratch);
1521 break;
1522 }
1523 default:
1524 UNREACHABLE();
1525 }
1526}
1527
1528void LiftoffAssembler::Spill(int offset, WasmValue value) {
1530 UseScratchRegisterScope temps(this);
1531 Register src = no_reg;
1532 src = ip;
1533 switch (value.type().kind()) {
1534 case kI32: {
1535 mov(src, Operand(value.to_i32()));
1537 break;
1538 }
1539 case kI64: {
1540 mov(src, Operand(value.to_i64()));
1542 break;
1543 }
1544 default:
1545 // We do not track f32 and f64 constants, hence they are unreachable.
1546 UNREACHABLE();
1547 }
1548}
1549
1550void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
1551 switch (kind) {
1552 case kI32:
1554 break;
1555 case kI64:
1556 case kRef:
1557 case kRefNull:
1559 break;
1560 case kF32:
1562 break;
1563 case kF64:
1565 break;
1566 case kS128: {
1567 UseScratchRegisterScope temps(this);
1568 Register scratch = temps.Acquire();
1569 LoadV128(reg.fp(), liftoff::GetStackSlot(offset), scratch);
1570 break;
1571 }
1572 default:
1573 UNREACHABLE();
1574 }
1575}
1576
1578 UNREACHABLE();
1579}
1580
1582 DCHECK_LT(0, size);
1583 DCHECK_EQ(0, size % 4);
1585
1586 // We need a zero reg. Always use r0 for that, and push it before to restore
1587 // its value afterwards.
1588 push(r0);
1589 mov(r0, Operand(0));
1590
1591 if (size <= 5 * kStackSlotSize) {
1592 // Special straight-line code for up to five slots. Generates two
1593 // instructions per slot.
1594 uint32_t remainder = size;
1597 }
1598 DCHECK(remainder == 4 || remainder == 0);
1599 if (remainder) {
1601 }
1602 } else {
1603 // General case for bigger counts (9 instructions).
1604 // Use r3 for start address (inclusive), r4 for end address (exclusive).
1605 push(r3);
1606 push(r4);
1607
1608 lay(r3, MemOperand(fp, -start - size));
1609 lay(r4, MemOperand(fp, -start));
1610
1611 Label loop;
1612 bind(&loop);
1613 StoreU64(r0, MemOperand(r3));
1614 lay(r3, MemOperand(r3, kSystemPointerSize));
1615 CmpU64(r3, r4);
1616 bne(&loop);
1617 pop(r4);
1618 pop(r3);
1619 }
1620
1621 pop(r0);
1622}
1623
1624void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
1625 ValueKind kind) {
1626 if (kind == kI32) offset = offset + stack_bias;
1627 SubS64(dst, fp, Operand(offset));
1628}
1629
1630#define SIGN_EXT(r) lgfr(r, r)
1631#define INT32_AND_WITH_1F(x) Operand(x & 0x1f)
1632#define INT32_AND_WITH_3F(x) Operand(x & 0x3f)
1633#define REGISTER_AND_WITH_1F \
1634 ([&](Register rhs) { \
1635 AndP(r1, rhs, Operand(31)); \
1636 return r1; \
1637 })
1638
1639#define LFR_TO_REG(reg) reg.gp()
1640
1641// V(name, instr, dtype, stype, dcast, scast, rcast, return_val, return_type)
1642#define UNOP_LIST(V) \
1643 V(i32_popcnt, Popcnt32, Register, Register, , , USE, true, bool) \
1644 V(i64_popcnt, Popcnt64, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1645 LFR_TO_REG, USE, true, bool) \
1646 V(u32_to_uintptr, LoadU32, Register, Register, , , USE, , void) \
1647 V(i32_signextend_i8, lbr, Register, Register, , , USE, , void) \
1648 V(i32_signextend_i16, lhr, Register, Register, , , USE, , void) \
1649 V(i64_signextend_i8, lgbr, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1650 LFR_TO_REG, USE, , void) \
1651 V(i64_signextend_i16, lghr, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1652 LFR_TO_REG, USE, , void) \
1653 V(i64_signextend_i32, LoadS32, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1654 LFR_TO_REG, USE, , void) \
1655 V(i32_clz, CountLeadingZerosU32, Register, Register, , , USE, , void) \
1656 V(i32_ctz, CountTrailingZerosU32, Register, Register, , , USE, , void) \
1657 V(i64_clz, CountLeadingZerosU64, LiftoffRegister, LiftoffRegister, \
1658 LFR_TO_REG, LFR_TO_REG, USE, , void) \
1659 V(i64_ctz, CountTrailingZerosU64, LiftoffRegister, LiftoffRegister, \
1660 LFR_TO_REG, LFR_TO_REG, USE, , void) \
1661 V(f32_ceil, CeilF32, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1662 V(f32_floor, FloorF32, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1663 V(f32_trunc, TruncF32, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1664 V(f32_nearest_int, NearestIntF32, DoubleRegister, DoubleRegister, , , USE, \
1665 true, bool) \
1666 V(f32_abs, lpebr, DoubleRegister, DoubleRegister, , , USE, , void) \
1667 V(f32_neg, lcebr, DoubleRegister, DoubleRegister, , , USE, , void) \
1668 V(f32_sqrt, sqebr, DoubleRegister, DoubleRegister, , , USE, , void) \
1669 V(f64_ceil, CeilF64, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1670 V(f64_floor, FloorF64, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1671 V(f64_trunc, TruncF64, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1672 V(f64_nearest_int, NearestIntF64, DoubleRegister, DoubleRegister, , , USE, \
1673 true, bool) \
1674 V(f64_abs, lpdbr, DoubleRegister, DoubleRegister, , , USE, , void) \
1675 V(f64_neg, lcdbr, DoubleRegister, DoubleRegister, , , USE, , void) \
1676 V(f64_sqrt, sqdbr, DoubleRegister, DoubleRegister, , , USE, , void)
1677
1678#define EMIT_UNOP_FUNCTION(name, instr, dtype, stype, dcast, scast, rcast, \
1679 ret, return_type) \
1680 return_type LiftoffAssembler::emit_##name(dtype dst, stype src) { \
1681 auto _dst = dcast(dst); \
1682 auto _src = scast(src); \
1683 instr(_dst, _src); \
1684 rcast(_dst); \
1685 return ret; \
1686 }
1688#undef EMIT_UNOP_FUNCTION
1689#undef UNOP_LIST
1690
1691// V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast,
1692// return_val, return_type)
1693#define BINOP_LIST(V) \
1694 V(f32_min, FloatMin, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1695 USE, , void) \
1696 V(f32_max, FloatMax, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1697 USE, , void) \
1698 V(f64_min, DoubleMin, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1699 USE, , void) \
1700 V(f64_max, DoubleMax, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1701 USE, , void) \
1702 V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1703 USE, , void) \
1704 V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1705 USE, , void) \
1706 V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1707 USE, , void) \
1708 V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1709 USE, , void) \
1710 V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1711 USE, , void) \
1712 V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1713 USE, , void) \
1714 V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1715 USE, , void) \
1716 V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1717 USE, , void) \
1718 V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , , \
1719 INT32_AND_WITH_1F, SIGN_EXT, , void) \
1720 V(i32_sari, ShiftRightS32, Register, Register, int32_t, , , \
1721 INT32_AND_WITH_1F, SIGN_EXT, , void) \
1722 V(i32_shri, ShiftRightU32, Register, Register, int32_t, , , \
1723 INT32_AND_WITH_1F, SIGN_EXT, , void) \
1724 V(i32_shl, ShiftLeftU32, Register, Register, Register, , , \
1725 REGISTER_AND_WITH_1F, SIGN_EXT, , void) \
1726 V(i32_sar, ShiftRightS32, Register, Register, Register, , , \
1727 REGISTER_AND_WITH_1F, SIGN_EXT, , void) \
1728 V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
1729 REGISTER_AND_WITH_1F, SIGN_EXT, , void) \
1730 V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, SIGN_EXT, , \
1731 void) \
1732 V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, SIGN_EXT, , \
1733 void) \
1734 V(i32_andi, And, Register, Register, int32_t, , , Operand, SIGN_EXT, , void) \
1735 V(i32_ori, Or, Register, Register, int32_t, , , Operand, SIGN_EXT, , void) \
1736 V(i32_xori, Xor, Register, Register, int32_t, , , Operand, SIGN_EXT, , void) \
1737 V(i32_add, AddS32, Register, Register, Register, , , , SIGN_EXT, , void) \
1738 V(i32_sub, SubS32, Register, Register, Register, , , , SIGN_EXT, , void) \
1739 V(i32_and, And, Register, Register, Register, , , , SIGN_EXT, , void) \
1740 V(i32_or, Or, Register, Register, Register, , , , SIGN_EXT, , void) \
1741 V(i32_xor, Xor, Register, Register, Register, , , , SIGN_EXT, , void) \
1742 V(i32_mul, MulS32, Register, Register, Register, , , , SIGN_EXT, , void) \
1743 V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1744 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1745 V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1746 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1747 V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1748 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1749 V(i64_and, AndP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1750 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1751 V(i64_or, OrP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1752 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1753 V(i64_xor, XorP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1754 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1755 V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
1756 LFR_TO_REG, LFR_TO_REG, , USE, , void) \
1757 V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
1758 LFR_TO_REG, LFR_TO_REG, , USE, , void) \
1759 V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
1760 LFR_TO_REG, LFR_TO_REG, , USE, , void) \
1761 V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
1762 LFR_TO_REG, Operand, USE, , void) \
1763 V(i64_andi, AndP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
1764 LFR_TO_REG, Operand, USE, , void) \
1765 V(i64_ori, OrP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
1766 LFR_TO_REG, Operand, USE, , void) \
1767 V(i64_xori, XorP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
1768 LFR_TO_REG, Operand, USE, , void) \
1769 V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
1770 LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
1771 V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
1772 LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
1773 V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
1774 LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void)
1775
1776#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, \
1777 scast2, rcast, ret, return_type) \
1778 return_type LiftoffAssembler::emit_##name(dtype dst, stype1 lhs, \
1779 stype2 rhs) { \
1780 auto _dst = dcast(dst); \
1781 auto _lhs = scast1(lhs); \
1782 auto _rhs = scast2(rhs); \
1783 instr(_dst, _lhs, _rhs); \
1784 rcast(_dst); \
1785 return ret; \
1786 }
1787
1789#undef BINOP_LIST
1790#undef EMIT_BINOP_FUNCTION
1791#undef SIGN_EXT
1792#undef INT32_AND_WITH_1F
1793#undef REGISTER_AND_WITH_1F
1794#undef LFR_TO_REG
1795
1796void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
1797 UseScratchRegisterScope temps(this);
1800 Register scratch = temps.Acquire();
1801 LoadS32(scratch, MemOperand(dst.gp(), offset));
1802 AddU32(scratch, Operand(Smi::FromInt(1)));
1803 StoreU32(scratch, MemOperand(dst.gp(), offset));
1804 } else {
1805 Register scratch = temps.Acquire();
1806 SmiUntag(scratch, MemOperand(dst.gp(), offset));
1807 AddU64(scratch, Operand(1));
1808 SmiTag(scratch);
1809 StoreU64(scratch, MemOperand(dst.gp(), offset));
1810 }
1811}
1812
1813void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
1814 Label* trap_div_by_zero,
1815 Label* trap_div_unrepresentable) {
1816 Label cont;
1817
1818 // Check for division by zero.
1819 ltr(r0, rhs);
1820 b(eq, trap_div_by_zero);
1821
1822 // Check for kMinInt / -1. This is unrepresentable.
1823 CmpS32(rhs, Operand(-1));
1824 bne(&cont);
1825 CmpS32(lhs, Operand(kMinInt));
1826 b(eq, trap_div_unrepresentable);
1827
1828 bind(&cont);
1829 DivS32(dst, lhs, rhs);
1830}
1831
1832void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
1833 Label* trap_div_by_zero) {
1834 // Check for division by zero.
1835 ltr(r0, rhs);
1836 beq(trap_div_by_zero);
1837 DivU32(dst, lhs, rhs);
1838}
1839
1840void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
1841 Label* trap_div_by_zero) {
1842 Label cont;
1843 Label done;
1844 Label trap_div_unrepresentable;
1845 // Check for division by zero.
1846 ltr(r0, rhs);
1847 beq(trap_div_by_zero);
1848
1849 // Check kMinInt/-1 case.
1850 CmpS32(rhs, Operand(-1));
1851 bne(&cont);
1852 CmpS32(lhs, Operand(kMinInt));
1853 beq(&trap_div_unrepresentable);
1854
1855 // Continue noraml calculation.
1856 bind(&cont);
1857 ModS32(dst, lhs, rhs);
1858 bne(&done);
1859
1860 // trap by kMinInt/-1 case.
1861 bind(&trap_div_unrepresentable);
1862 mov(dst, Operand(0));
1863 bind(&done);
1864}
1865
1866void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
1867 Label* trap_div_by_zero) {
1868 // Check for division by zero.
1869 ltr(r0, rhs);
1870 beq(trap_div_by_zero);
1871 ModU32(dst, lhs, rhs);
1872}
1873
1874bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
1875 LiftoffRegister rhs,
1876 Label* trap_div_by_zero,
1877 Label* trap_div_unrepresentable) {
1878 // Use r0 to check for kMinInt / -1.
1879 constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
1880 Label cont;
1881 // Check for division by zero.
1882 ltgr(r0, rhs.gp());
1883 beq(trap_div_by_zero);
1884
1885 // Check for kMinInt / -1. This is unrepresentable.
1886 CmpS64(rhs.gp(), Operand(-1));
1887 bne(&cont);
1888 mov(r0, Operand(kMinInt64));
1889 CmpS64(lhs.gp(), r0);
1890 b(eq, trap_div_unrepresentable);
1891
1892 bind(&cont);
1893 DivS64(dst.gp(), lhs.gp(), rhs.gp());
1894 return true;
1895}
1896
1897bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
1898 LiftoffRegister rhs,
1899 Label* trap_div_by_zero) {
1900 ltgr(r0, rhs.gp());
1901 b(eq, trap_div_by_zero);
1902 // Do div.
1903 DivU64(dst.gp(), lhs.gp(), rhs.gp());
1904 return true;
1905}
1906
1907bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
1908 LiftoffRegister rhs,
1909 Label* trap_div_by_zero) {
1910 constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
1911
1912 Label trap_div_unrepresentable;
1913 Label done;
1914 Label cont;
1915
1916 // Check for division by zero.
1917 ltgr(r0, rhs.gp());
1918 beq(trap_div_by_zero);
1919
1920 // Check for kMinInt / -1. This is unrepresentable.
1921 CmpS64(rhs.gp(), Operand(-1));
1922 bne(&cont);
1923 mov(r0, Operand(kMinInt64));
1924 CmpS64(lhs.gp(), r0);
1925 beq(&trap_div_unrepresentable);
1926
1927 bind(&cont);
1928 ModS64(dst.gp(), lhs.gp(), rhs.gp());
1929 bne(&done);
1930
1931 bind(&trap_div_unrepresentable);
1932 mov(dst.gp(), Operand(0));
1933 bind(&done);
1934 return true;
1935}
1936
1937bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
1938 LiftoffRegister rhs,
1939 Label* trap_div_by_zero) {
1940 // Check for division by zero.
1941 ltgr(r0, rhs.gp());
1942 beq(trap_div_by_zero);
1943 ModU64(dst.gp(), lhs.gp(), rhs.gp());
1944 return true;
1945}
1946
1948 DoubleRegister rhs) {
1949 constexpr uint64_t kF64SignBit = uint64_t{1} << 63;
1950 UseScratchRegisterScope temps(this);
1951 Register scratch2 = temps.Acquire();
1952 MovDoubleToInt64(r0, lhs);
1953 // Clear sign bit in {r0}.
1954 AndP(r0, Operand(~kF64SignBit));
1955
1956 MovDoubleToInt64(scratch2, rhs);
1957 // Isolate sign bit in {scratch2}.
1958 AndP(scratch2, Operand(kF64SignBit));
1959 // Combine {scratch2} into {r0}.
1960 OrP(r0, r0, scratch2);
1961 MovInt64ToDouble(dst, r0);
1962}
1963
1965 DoubleRegister rhs) {
1966 constexpr uint64_t kF64SignBit = uint64_t{1} << 63;
1967 UseScratchRegisterScope temps(this);
1968 Register scratch2 = temps.Acquire();
1969 MovDoubleToInt64(r0, lhs);
1970 // Clear sign bit in {r0}.
1971 AndP(r0, Operand(~kF64SignBit));
1972
1973 MovDoubleToInt64(scratch2, rhs);
1974 // Isolate sign bit in {scratch2}.
1975 AndP(scratch2, Operand(kF64SignBit));
1976 // Combine {scratch2} into {r0}.
1977 OrP(r0, r0, scratch2);
1978 MovInt64ToDouble(dst, r0);
1979}
1980
1982 LiftoffRegister dst,
1983 LiftoffRegister src, Label* trap) {
1984 switch (opcode) {
1985 case kExprI32ConvertI64:
1986 lgfr(dst.gp(), src.gp());
1987 return true;
1988 case kExprI32SConvertF32: {
1989 ConvertFloat32ToInt32(dst.gp(), src.fp(),
1990 kRoundToZero); // f32 -> i32 round to zero.
1991 b(Condition(1), trap);
1992 return true;
1993 }
1994 case kExprI32UConvertF32: {
1995 ConvertFloat32ToUnsignedInt32(dst.gp(), src.fp(), kRoundToZero);
1996 b(Condition(1), trap);
1997 return true;
1998 }
1999 case kExprI32SConvertF64: {
2000 ConvertDoubleToInt32(dst.gp(), src.fp());
2001 b(Condition(1), trap);
2002 return true;
2003 }
2004 case kExprI32UConvertF64: {
2005 ConvertDoubleToUnsignedInt32(dst.gp(), src.fp(), kRoundToZero);
2006 b(Condition(1), trap);
2007 return true;
2008 }
2009 case kExprI32SConvertSatF32: {
2010 Label done, src_is_nan;
2012 cebr(src.fp(), kScratchDoubleReg);
2013 b(Condition(1), &src_is_nan);
2014
2015 // source is a finite number
2016 ConvertFloat32ToInt32(dst.gp(), src.fp(),
2017 kRoundToZero); // f32 -> i32 round to zero.
2018 b(&done);
2019
2020 bind(&src_is_nan);
2021 lghi(dst.gp(), Operand::Zero());
2022
2023 bind(&done);
2024 return true;
2025 }
2026 case kExprI32UConvertSatF32: {
2027 Label done, src_is_nan;
2029 cebr(src.fp(), kScratchDoubleReg);
2030 b(Condition(1), &src_is_nan);
2031
2032 // source is a finite number
2033 ConvertFloat32ToUnsignedInt32(dst.gp(), src.fp(), kRoundToZero);
2034 b(&done);
2035
2036 bind(&src_is_nan);
2037 lghi(dst.gp(), Operand::Zero());
2038
2039 bind(&done);
2040 return true;
2041 }
2042 case kExprI32SConvertSatF64: {
2043 Label done, src_is_nan;
2045 cdbr(src.fp(), kScratchDoubleReg);
2046 b(Condition(1), &src_is_nan);
2047
2048 ConvertDoubleToInt32(dst.gp(), src.fp());
2049 b(&done);
2050
2051 bind(&src_is_nan);
2052 lghi(dst.gp(), Operand::Zero());
2053
2054 bind(&done);
2055 return true;
2056 }
2057 case kExprI32UConvertSatF64: {
2058 Label done, src_is_nan;
2060 cdbr(src.fp(), kScratchDoubleReg);
2061 b(Condition(1), &src_is_nan);
2062
2063 ConvertDoubleToUnsignedInt32(dst.gp(), src.fp());
2064 b(&done);
2065
2066 bind(&src_is_nan);
2067 lghi(dst.gp(), Operand::Zero());
2068
2069 bind(&done);
2070 return true;
2071 }
2072 case kExprI32ReinterpretF32:
2073 lgdr(dst.gp(), src.fp());
2074 srlg(dst.gp(), dst.gp(), Operand(32));
2075 return true;
2076 case kExprI64SConvertI32:
2077 LoadS32(dst.gp(), src.gp());
2078 return true;
2079 case kExprI64UConvertI32:
2080 llgfr(dst.gp(), src.gp());
2081 return true;
2082 case kExprI64ReinterpretF64:
2083 lgdr(dst.gp(), src.fp());
2084 return true;
2085 case kExprF32SConvertI32: {
2086 ConvertIntToFloat(dst.fp(), src.gp());
2087 return true;
2088 }
2089 case kExprF32UConvertI32: {
2090 ConvertUnsignedIntToFloat(dst.fp(), src.gp());
2091 return true;
2092 }
2093 case kExprF32ConvertF64:
2094 ledbr(dst.fp(), src.fp());
2095 return true;
2096 case kExprF32ReinterpretI32: {
2097 sllg(r0, src.gp(), Operand(32));
2098 ldgr(dst.fp(), r0);
2099 return true;
2100 }
2101 case kExprF64SConvertI32: {
2102 ConvertIntToDouble(dst.fp(), src.gp());
2103 return true;
2104 }
2105 case kExprF64UConvertI32: {
2106 ConvertUnsignedIntToDouble(dst.fp(), src.gp());
2107 return true;
2108 }
2109 case kExprF64ConvertF32:
2110 ldebr(dst.fp(), src.fp());
2111 return true;
2112 case kExprF64ReinterpretI64:
2113 ldgr(dst.fp(), src.gp());
2114 return true;
2115 case kExprF64SConvertI64:
2116 ConvertInt64ToDouble(dst.fp(), src.gp());
2117 return true;
2118 case kExprF64UConvertI64:
2119 ConvertUnsignedInt64ToDouble(dst.fp(), src.gp());
2120 return true;
2121 case kExprI64SConvertF32: {
2122 ConvertFloat32ToInt64(dst.gp(), src.fp()); // f32 -> i64 round to zero.
2123 b(Condition(1), trap);
2124 return true;
2125 }
2126 case kExprI64UConvertF32: {
2128 src.fp()); // f32 -> i64 round to zero.
2129 b(Condition(1), trap);
2130 return true;
2131 }
2132 case kExprF32SConvertI64:
2133 ConvertInt64ToFloat(dst.fp(), src.gp());
2134 return true;
2135 case kExprF32UConvertI64:
2136 ConvertUnsignedInt64ToFloat(dst.fp(), src.gp());
2137 return true;
2138 case kExprI64SConvertF64: {
2139 ConvertDoubleToInt64(dst.gp(), src.fp()); // f64 -> i64 round to zero.
2140 b(Condition(1), trap);
2141 return true;
2142 }
2143 case kExprI64UConvertF64: {
2145 src.fp()); // f64 -> i64 round to zero.
2146 b(Condition(1), trap);
2147 return true;
2148 }
2149 case kExprI64SConvertSatF32: {
2150 Label done, src_is_nan;
2152 cebr(src.fp(), kScratchDoubleReg);
2153 b(Condition(1), &src_is_nan);
2154
2155 // source is a finite number
2156 ConvertFloat32ToInt64(dst.gp(), src.fp()); // f32 -> i64 round to zero.
2157 b(&done);
2158
2159 bind(&src_is_nan);
2160 lghi(dst.gp(), Operand::Zero());
2161
2162 bind(&done);
2163 return true;
2164 }
2165 case kExprI64UConvertSatF32: {
2166 Label done, src_is_nan;
2168 cebr(src.fp(), kScratchDoubleReg);
2169 b(Condition(1), &src_is_nan);
2170
2171 // source is a finite number
2173 src.fp()); // f32 -> i64 round to zero.
2174 b(&done);
2175
2176 bind(&src_is_nan);
2177 lghi(dst.gp(), Operand::Zero());
2178
2179 bind(&done);
2180 return true;
2181 }
2182 case kExprI64SConvertSatF64: {
2183 Label done, src_is_nan;
2185 cdbr(src.fp(), kScratchDoubleReg);
2186 b(Condition(1), &src_is_nan);
2187
2188 ConvertDoubleToInt64(dst.gp(), src.fp()); // f64 -> i64 round to zero.
2189 b(&done);
2190
2191 bind(&src_is_nan);
2192 lghi(dst.gp(), Operand::Zero());
2193
2194 bind(&done);
2195 return true;
2196 }
2197 case kExprI64UConvertSatF64: {
2198 Label done, src_is_nan;
2200 cdbr(src.fp(), kScratchDoubleReg);
2201 b(Condition(1), &src_is_nan);
2202
2204 src.fp()); // f64 -> i64 round to zero.
2205 b(&done);
2206
2207 bind(&src_is_nan);
2208 lghi(dst.gp(), Operand::Zero());
2209
2210 bind(&done);
2211 return true;
2212 }
2213 default:
2214 UNREACHABLE();
2215 }
2216}
2217
2218void LiftoffAssembler::emit_jump(Label* label) { b(al, label); }
2219
2220void LiftoffAssembler::emit_jump(Register target) { Jump(target); }
2221
2223 ValueKind kind, Register lhs,
2224 Register rhs,
2225 const FreezeCacheState& frozen) {
2226 bool use_signed = is_signed(cond);
2227
2228 if (rhs != no_reg) {
2229 switch (kind) {
2230 case kI32:
2231 if (use_signed) {
2232 CmpS32(lhs, rhs);
2233 } else {
2234 CmpU32(lhs, rhs);
2235 }
2236 break;
2237 case kRef:
2238 case kRefNull:
2239 DCHECK(cond == kEqual || cond == kNotEqual);
2240#if defined(V8_COMPRESS_POINTERS)
2241 if (use_signed) {
2242 CmpS32(lhs, rhs);
2243 } else {
2244 CmpU32(lhs, rhs);
2245 }
2246#else
2247 if (use_signed) {
2248 CmpS64(lhs, rhs);
2249 } else {
2250 CmpU64(lhs, rhs);
2251 }
2252#endif
2253 break;
2254 case kI64:
2255 if (use_signed) {
2256 CmpS64(lhs, rhs);
2257 } else {
2258 CmpU64(lhs, rhs);
2259 }
2260 break;
2261 default:
2262 UNREACHABLE();
2263 }
2264 } else {
2266 CHECK(use_signed);
2267 CmpS32(lhs, Operand::Zero());
2268 }
2269
2270 b(to_condition(cond), label);
2271}
2272
2274 Register lhs, int32_t imm,
2275 const FreezeCacheState& frozen) {
2276 bool use_signed = is_signed(cond);
2277 if (use_signed) {
2278 CmpS32(lhs, Operand(imm));
2279 } else {
2280 CmpU32(lhs, Operand(imm));
2281 }
2282 b(to_condition(cond), label);
2283}
2284
2286 Register lhs, int32_t imm,
2287 const FreezeCacheState& frozen) {
2288 bool use_signed = is_signed(cond);
2289 if (use_signed) {
2290 CmpS64(lhs, Operand(imm));
2291 } else {
2292 CmpU64(lhs, Operand(imm));
2293 }
2294 b(to_condition(cond), label);
2295}
2296
2297#define EMIT_EQZ(test, src) \
2298 { \
2299 Label done; \
2300 test(r0, src); \
2301 mov(dst, Operand(1)); \
2302 beq(&done); \
2303 mov(dst, Operand(0)); \
2304 bind(&done); \
2305 }
2306
2307void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
2308 EMIT_EQZ(ltr, src);
2309}
2310
2311#define EMIT_SET_CONDITION(dst, cond) \
2312 { \
2313 Label done; \
2314 lghi(dst, Operand(1)); \
2315 b(cond, &done); \
2316 lghi(dst, Operand(0)); \
2317 bind(&done); \
2318 }
2319
2320void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
2321 Register lhs, Register rhs) {
2322 bool use_signed = is_signed(cond);
2323 if (use_signed) {
2324 CmpS32(lhs, rhs);
2325 } else {
2326 CmpU32(lhs, rhs);
2327 }
2328
2329 EMIT_SET_CONDITION(dst, to_condition(cond));
2330}
2331
2332void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
2333 EMIT_EQZ(ltgr, src.gp());
2334}
2335
2336void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
2337 LiftoffRegister lhs,
2338 LiftoffRegister rhs) {
2339 bool use_signed = is_signed(cond);
2340 if (use_signed) {
2341 CmpS64(lhs.gp(), rhs.gp());
2342 } else {
2343 CmpU64(lhs.gp(), rhs.gp());
2344 }
2345
2346 EMIT_SET_CONDITION(dst, to_condition(cond));
2347}
2348
2349void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
2350 DoubleRegister lhs,
2351 DoubleRegister rhs) {
2352 cebr(lhs, rhs);
2353 EMIT_SET_CONDITION(dst, to_condition(cond));
2354}
2355
2356void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
2357 DoubleRegister lhs,
2358 DoubleRegister rhs) {
2359 cdbr(lhs, rhs);
2360 EMIT_SET_CONDITION(dst, to_condition(cond));
2361}
2362
2363void LiftoffAssembler::emit_i64_muli(LiftoffRegister dst, LiftoffRegister lhs,
2364 int32_t imm) {
2365 if (base::bits::IsPowerOfTwo(imm)) {
2367 return;
2368 }
2369 mov(r0, Operand(imm));
2370 MulS64(dst.gp(), lhs.gp(), r0);
2371}
2372
2373bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
2374 LiftoffRegister true_value,
2375 LiftoffRegister false_value,
2376 ValueKind kind) {
2377 return false;
2378}
2379
2380void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
2381 SmiCheckMode mode,
2382 const FreezeCacheState& frozen) {
2383 TestIfSmi(obj);
2384 Condition condition = mode == kJumpOnSmi ? eq : ne;
2385 b(condition, target); // branch if SMI
2386}
2387
2388void LiftoffAssembler::clear_i32_upper_half(Register dst) { LoadU32(dst, dst); }
2389
2390#define SIMD_BINOP_RR_LIST(V) \
2391 V(f64x2_add, F64x2Add) \
2392 V(f64x2_sub, F64x2Sub) \
2393 V(f64x2_mul, F64x2Mul) \
2394 V(f64x2_div, F64x2Div) \
2395 V(f64x2_min, F64x2Min) \
2396 V(f64x2_max, F64x2Max) \
2397 V(f64x2_eq, F64x2Eq) \
2398 V(f64x2_ne, F64x2Ne) \
2399 V(f64x2_lt, F64x2Lt) \
2400 V(f64x2_le, F64x2Le) \
2401 V(f64x2_pmin, F64x2Pmin) \
2402 V(f64x2_pmax, F64x2Pmax) \
2403 V(f32x4_add, F32x4Add) \
2404 V(f32x4_sub, F32x4Sub) \
2405 V(f32x4_mul, F32x4Mul) \
2406 V(f32x4_div, F32x4Div) \
2407 V(f32x4_min, F32x4Min) \
2408 V(f32x4_max, F32x4Max) \
2409 V(f32x4_eq, F32x4Eq) \
2410 V(f32x4_ne, F32x4Ne) \
2411 V(f32x4_lt, F32x4Lt) \
2412 V(f32x4_le, F32x4Le) \
2413 V(f32x4_pmin, F32x4Pmin) \
2414 V(f32x4_pmax, F32x4Pmax) \
2415 V(i64x2_add, I64x2Add) \
2416 V(i64x2_sub, I64x2Sub) \
2417 V(i64x2_eq, I64x2Eq) \
2418 V(i64x2_ne, I64x2Ne) \
2419 V(i64x2_gt_s, I64x2GtS) \
2420 V(i64x2_ge_s, I64x2GeS) \
2421 V(i32x4_add, I32x4Add) \
2422 V(i32x4_sub, I32x4Sub) \
2423 V(i32x4_mul, I32x4Mul) \
2424 V(i32x4_eq, I32x4Eq) \
2425 V(i32x4_ne, I32x4Ne) \
2426 V(i32x4_gt_s, I32x4GtS) \
2427 V(i32x4_ge_s, I32x4GeS) \
2428 V(i32x4_gt_u, I32x4GtU) \
2429 V(i32x4_min_s, I32x4MinS) \
2430 V(i32x4_min_u, I32x4MinU) \
2431 V(i32x4_max_s, I32x4MaxS) \
2432 V(i32x4_max_u, I32x4MaxU) \
2433 V(i16x8_add, I16x8Add) \
2434 V(i16x8_sub, I16x8Sub) \
2435 V(i16x8_mul, I16x8Mul) \
2436 V(i16x8_eq, I16x8Eq) \
2437 V(i16x8_ne, I16x8Ne) \
2438 V(i16x8_gt_s, I16x8GtS) \
2439 V(i16x8_ge_s, I16x8GeS) \
2440 V(i16x8_gt_u, I16x8GtU) \
2441 V(i16x8_min_s, I16x8MinS) \
2442 V(i16x8_min_u, I16x8MinU) \
2443 V(i16x8_max_s, I16x8MaxS) \
2444 V(i16x8_max_u, I16x8MaxU) \
2445 V(i16x8_rounding_average_u, I16x8RoundingAverageU) \
2446 V(i8x16_add, I8x16Add) \
2447 V(i8x16_sub, I8x16Sub) \
2448 V(i8x16_eq, I8x16Eq) \
2449 V(i8x16_ne, I8x16Ne) \
2450 V(i8x16_gt_s, I8x16GtS) \
2451 V(i8x16_ge_s, I8x16GeS) \
2452 V(i8x16_gt_u, I8x16GtU) \
2453 V(i8x16_min_s, I8x16MinS) \
2454 V(i8x16_min_u, I8x16MinU) \
2455 V(i8x16_max_s, I8x16MaxS) \
2456 V(i8x16_max_u, I8x16MaxU) \
2457 V(i8x16_rounding_average_u, I8x16RoundingAverageU) \
2458 V(s128_and, S128And) \
2459 V(s128_or, S128Or) \
2460 V(s128_xor, S128Xor) \
2461 V(s128_and_not, S128AndNot)
2462
2463#define EMIT_SIMD_BINOP_RR(name, op) \
2464 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2465 LiftoffRegister rhs) { \
2466 op(dst.fp(), lhs.fp(), rhs.fp()); \
2467 }
2469#undef EMIT_SIMD_BINOP_RR
2470#undef SIMD_BINOP_RR_LIST
2471
2472#define SIMD_SHIFT_RR_LIST(V) \
2473 V(i64x2_shl, I64x2Shl) \
2474 V(i64x2_shr_s, I64x2ShrS) \
2475 V(i64x2_shr_u, I64x2ShrU) \
2476 V(i32x4_shl, I32x4Shl) \
2477 V(i32x4_shr_s, I32x4ShrS) \
2478 V(i32x4_shr_u, I32x4ShrU) \
2479 V(i16x8_shl, I16x8Shl) \
2480 V(i16x8_shr_s, I16x8ShrS) \
2481 V(i16x8_shr_u, I16x8ShrU) \
2482 V(i8x16_shl, I8x16Shl) \
2483 V(i8x16_shr_s, I8x16ShrS) \
2484 V(i8x16_shr_u, I8x16ShrU)
2485
2486#define EMIT_SIMD_SHIFT_RR(name, op) \
2487 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2488 LiftoffRegister rhs) { \
2489 op(dst.fp(), lhs.fp(), rhs.gp(), kScratchDoubleReg); \
2490 }
2492#undef EMIT_SIMD_SHIFT_RR
2493#undef SIMD_SHIFT_RR_LIST
2494
2495#define SIMD_SHIFT_RI_LIST(V) \
2496 V(i64x2_shli, I64x2Shl, 63) \
2497 V(i64x2_shri_s, I64x2ShrS, 63) \
2498 V(i64x2_shri_u, I64x2ShrU, 63) \
2499 V(i32x4_shli, I32x4Shl, 31) \
2500 V(i32x4_shri_s, I32x4ShrS, 31) \
2501 V(i32x4_shri_u, I32x4ShrU, 31) \
2502 V(i16x8_shli, I16x8Shl, 15) \
2503 V(i16x8_shri_s, I16x8ShrS, 15) \
2504 V(i16x8_shri_u, I16x8ShrU, 15) \
2505 V(i8x16_shli, I8x16Shl, 7) \
2506 V(i8x16_shri_s, I8x16ShrS, 7) \
2507 V(i8x16_shri_u, I8x16ShrU, 7)
2508
2509#define EMIT_SIMD_SHIFT_RI(name, op, mask) \
2510 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2511 int32_t rhs) { \
2512 op(dst.fp(), lhs.fp(), Operand(rhs & mask), r0, kScratchDoubleReg); \
2513 }
2515#undef EMIT_SIMD_SHIFT_RI
2516#undef SIMD_SHIFT_RI_LIST
2517
2518#define SIMD_UNOP_LIST(V) \
2519 V(f64x2_splat, F64x2Splat, fp, fp, , void) \
2520 V(f64x2_abs, F64x2Abs, fp, fp, , void) \
2521 V(f64x2_neg, F64x2Neg, fp, fp, , void) \
2522 V(f64x2_sqrt, F64x2Sqrt, fp, fp, , void) \
2523 V(f64x2_ceil, F64x2Ceil, fp, fp, true, bool) \
2524 V(f64x2_floor, F64x2Floor, fp, fp, true, bool) \
2525 V(f64x2_trunc, F64x2Trunc, fp, fp, true, bool) \
2526 V(f64x2_nearest_int, F64x2NearestInt, fp, fp, true, bool) \
2527 V(f64x2_convert_low_i32x4_s, F64x2ConvertLowI32x4S, fp, fp, , void) \
2528 V(f64x2_convert_low_i32x4_u, F64x2ConvertLowI32x4U, fp, fp, , void) \
2529 V(f32x4_abs, F32x4Abs, fp, fp, , void) \
2530 V(f32x4_splat, F32x4Splat, fp, fp, , void) \
2531 V(f32x4_neg, F32x4Neg, fp, fp, , void) \
2532 V(f32x4_sqrt, F32x4Sqrt, fp, fp, , void) \
2533 V(f32x4_ceil, F32x4Ceil, fp, fp, true, bool) \
2534 V(f32x4_floor, F32x4Floor, fp, fp, true, bool) \
2535 V(f32x4_trunc, F32x4Trunc, fp, fp, true, bool) \
2536 V(f32x4_nearest_int, F32x4NearestInt, fp, fp, true, bool) \
2537 V(i64x2_abs, I64x2Abs, fp, fp, , void) \
2538 V(i64x2_splat, I64x2Splat, fp, gp, , void) \
2539 V(i64x2_neg, I64x2Neg, fp, fp, , void) \
2540 V(i64x2_sconvert_i32x4_low, I64x2SConvertI32x4Low, fp, fp, , void) \
2541 V(i64x2_sconvert_i32x4_high, I64x2SConvertI32x4High, fp, fp, , void) \
2542 V(i64x2_uconvert_i32x4_low, I64x2UConvertI32x4Low, fp, fp, , void) \
2543 V(i64x2_uconvert_i32x4_high, I64x2UConvertI32x4High, fp, fp, , void) \
2544 V(i32x4_abs, I32x4Abs, fp, fp, , void) \
2545 V(i32x4_neg, I32x4Neg, fp, fp, , void) \
2546 V(i32x4_splat, I32x4Splat, fp, gp, , void) \
2547 V(i32x4_sconvert_i16x8_low, I32x4SConvertI16x8Low, fp, fp, , void) \
2548 V(i32x4_sconvert_i16x8_high, I32x4SConvertI16x8High, fp, fp, , void) \
2549 V(i32x4_uconvert_i16x8_low, I32x4UConvertI16x8Low, fp, fp, , void) \
2550 V(i32x4_uconvert_i16x8_high, I32x4UConvertI16x8High, fp, fp, , void) \
2551 V(i16x8_abs, I16x8Abs, fp, fp, , void) \
2552 V(i16x8_neg, I16x8Neg, fp, fp, , void) \
2553 V(i16x8_splat, I16x8Splat, fp, gp, , void) \
2554 V(i16x8_sconvert_i8x16_low, I16x8SConvertI8x16Low, fp, fp, , void) \
2555 V(i16x8_sconvert_i8x16_high, I16x8SConvertI8x16High, fp, fp, , void) \
2556 V(i16x8_uconvert_i8x16_low, I16x8UConvertI8x16Low, fp, fp, , void) \
2557 V(i16x8_uconvert_i8x16_high, I16x8UConvertI8x16High, fp, fp, , void) \
2558 V(i8x16_abs, I8x16Abs, fp, fp, , void) \
2559 V(i8x16_neg, I8x16Neg, fp, fp, , void) \
2560 V(i8x16_splat, I8x16Splat, fp, gp, , void) \
2561 V(i8x16_popcnt, I8x16Popcnt, fp, fp, , void) \
2562 V(s128_not, S128Not, fp, fp, , void)
2563
2564#define EMIT_SIMD_UNOP(name, op, dtype, stype, return_val, return_type) \
2565 return_type LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2566 LiftoffRegister src) { \
2567 op(dst.dtype(), src.stype()); \
2568 return return_val; \
2569 }
2571#undef EMIT_SIMD_UNOP
2572#undef SIMD_UNOP_LIST
2573
2574#define SIMD_EXTRACT_LANE_LIST(V) \
2575 V(f64x2_extract_lane, F64x2ExtractLane, fp) \
2576 V(f32x4_extract_lane, F32x4ExtractLane, fp) \
2577 V(i64x2_extract_lane, I64x2ExtractLane, gp) \
2578 V(i32x4_extract_lane, I32x4ExtractLane, gp) \
2579 V(i16x8_extract_lane_u, I16x8ExtractLaneU, gp) \
2580 V(i16x8_extract_lane_s, I16x8ExtractLaneS, gp) \
2581 V(i8x16_extract_lane_u, I8x16ExtractLaneU, gp) \
2582 V(i8x16_extract_lane_s, I8x16ExtractLaneS, gp)
2583
2584#define EMIT_SIMD_EXTRACT_LANE(name, op, dtype) \
2585 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
2586 uint8_t imm_lane_idx) { \
2587 op(dst.dtype(), src.fp(), imm_lane_idx, r0); \
2588 }
2590#undef EMIT_SIMD_EXTRACT_LANE
2591#undef SIMD_EXTRACT_LANE_LIST
2592
2593#define SIMD_REPLACE_LANE_LIST(V) \
2594 V(f64x2_replace_lane, F64x2ReplaceLane, fp) \
2595 V(f32x4_replace_lane, F32x4ReplaceLane, fp) \
2596 V(i64x2_replace_lane, I64x2ReplaceLane, gp) \
2597 V(i32x4_replace_lane, I32x4ReplaceLane, gp) \
2598 V(i16x8_replace_lane, I16x8ReplaceLane, gp) \
2599 V(i8x16_replace_lane, I8x16ReplaceLane, gp)
2600
2601#define EMIT_SIMD_REPLACE_LANE(name, op, stype) \
2602 void LiftoffAssembler::emit_##name( \
2603 LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
2604 uint8_t imm_lane_idx) { \
2605 op(dst.fp(), src1.fp(), src2.stype(), imm_lane_idx, r0); \
2606 }
2608#undef EMIT_SIMD_REPLACE_LANE
2609#undef SIMD_REPLACE_LANE_LIST
2610
2611#define SIMD_EXT_MUL_LIST(V) \
2612 V(i64x2_extmul_low_i32x4_s, I64x2ExtMulLowI32x4S) \
2613 V(i64x2_extmul_low_i32x4_u, I64x2ExtMulLowI32x4U) \
2614 V(i64x2_extmul_high_i32x4_s, I64x2ExtMulHighI32x4S) \
2615 V(i64x2_extmul_high_i32x4_u, I64x2ExtMulHighI32x4U) \
2616 V(i32x4_extmul_low_i16x8_s, I32x4ExtMulLowI16x8S) \
2617 V(i32x4_extmul_low_i16x8_u, I32x4ExtMulLowI16x8U) \
2618 V(i32x4_extmul_high_i16x8_s, I32x4ExtMulHighI16x8S) \
2619 V(i32x4_extmul_high_i16x8_u, I32x4ExtMulHighI16x8U) \
2620 V(i16x8_extmul_low_i8x16_s, I16x8ExtMulLowI8x16S) \
2621 V(i16x8_extmul_low_i8x16_u, I16x8ExtMulLowI8x16U) \
2622 V(i16x8_extmul_high_i8x16_s, I16x8ExtMulHighI8x16S) \
2623 V(i16x8_extmul_high_i8x16_u, I16x8ExtMulHighI8x16U)
2624
2625#define EMIT_SIMD_EXT_MUL(name, op) \
2626 void LiftoffAssembler::emit_##name( \
2627 LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
2628 op(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg); \
2629 }
2631#undef EMIT_SIMD_EXT_MUL
2632#undef SIMD_EXT_MUL_LIST
2633
2634#define SIMD_ALL_TRUE_LIST(V) \
2635 V(i64x2_alltrue, I64x2AllTrue) \
2636 V(i32x4_alltrue, I32x4AllTrue) \
2637 V(i16x8_alltrue, I16x8AllTrue) \
2638 V(i8x16_alltrue, I8x16AllTrue)
2639
2640#define EMIT_SIMD_ALL_TRUE(name, op) \
2641 void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2642 LiftoffRegister src) { \
2643 op(dst.gp(), src.fp(), r0, kScratchDoubleReg); \
2644 }
2646#undef EMIT_SIMD_ALL_TRUE
2647#undef SIMD_ALL_TRUE_LIST
2648
2649#define SIMD_ADD_SUB_SAT_LIST(V) \
2650 V(i16x8_add_sat_s, I16x8AddSatS) \
2651 V(i16x8_sub_sat_s, I16x8SubSatS) \
2652 V(i16x8_add_sat_u, I16x8AddSatU) \
2653 V(i16x8_sub_sat_u, I16x8SubSatU) \
2654 V(i8x16_add_sat_s, I8x16AddSatS) \
2655 V(i8x16_sub_sat_s, I8x16SubSatS) \
2656 V(i8x16_add_sat_u, I8x16AddSatU) \
2657 V(i8x16_sub_sat_u, I8x16SubSatU)
2658
2659#define EMIT_SIMD_ADD_SUB_SAT(name, op) \
2660 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2661 LiftoffRegister rhs) { \
2662 Simd128Register src1 = lhs.fp(); \
2663 Simd128Register src2 = rhs.fp(); \
2664 Simd128Register dest = dst.fp(); \
2665 /* lhs and rhs are unique based on their selection under liftoff-compiler \
2666 * `EmitBinOp`. */ \
2667 /* Make sure dst and temp are also unique. */ \
2668 if (dest == src1 || dest == src2) { \
2669 dest = GetUnusedRegister(kFpReg, LiftoffRegList{src1, src2}).fp(); \
2670 } \
2671 Simd128Register temp = \
2672 GetUnusedRegister(kFpReg, LiftoffRegList{dest, src1, src2}).fp(); \
2673 op(dest, src1, src2, kScratchDoubleReg, temp); \
2674 /* Original dst register needs to be populated. */ \
2675 if (dest != dst.fp()) { \
2676 vlr(dst.fp(), dest, Condition(0), Condition(0), Condition(0)); \
2677 } \
2678 }
2680#undef EMIT_SIMD_ADD_SUB_SAT
2681#undef SIMD_ADD_SUB_SAT_LIST
2683#define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
2684 V(i32x4_extadd_pairwise_i16x8_s, I32x4ExtAddPairwiseI16x8S) \
2685 V(i32x4_extadd_pairwise_i16x8_u, I32x4ExtAddPairwiseI16x8U) \
2686 V(i16x8_extadd_pairwise_i8x16_s, I16x8ExtAddPairwiseI8x16S) \
2687 V(i16x8_extadd_pairwise_i8x16_u, I16x8ExtAddPairwiseI8x16U)
2689#define EMIT_SIMD_EXT_ADD_PAIRWISE(name, op) \
2690 void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2691 LiftoffRegister src) { \
2692 Simd128Register src1 = src.fp(); \
2693 Simd128Register dest = dst.fp(); \
2694 /* Make sure dst and temp are unique. */ \
2695 if (dest == src1) { \
2696 dest = GetUnusedRegister(kFpReg, LiftoffRegList{src1}).fp(); \
2697 } \
2698 Simd128Register temp = \
2699 GetUnusedRegister(kFpReg, LiftoffRegList{dest, src1}).fp(); \
2700 op(dest, src1, kScratchDoubleReg, temp); \
2701 if (dest != dst.fp()) { \
2702 vlr(dst.fp(), dest, Condition(0), Condition(0), Condition(0)); \
2703 } \
2704 }
2706#undef EMIT_SIMD_EXT_ADD_PAIRWISE
2707#undef SIMD_EXT_ADD_PAIRWISE_LIST
2709#define SIMD_QFM_LIST(V) \
2710 V(f64x2_qfma, F64x2Qfma) \
2711 V(f64x2_qfms, F64x2Qfms) \
2712 V(f32x4_qfma, F32x4Qfma) \
2713 V(f32x4_qfms, F32x4Qfms)
2715#define EMIT_SIMD_QFM(name, op) \
2716 void LiftoffAssembler::emit_##name( \
2717 LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
2718 LiftoffRegister src3) { \
2719 op(dst.fp(), src1.fp(), src2.fp(), src3.fp()); \
2720 }
2722#undef EMIT_SIMD_QFM
2723#undef SIMD_QFM_LIST
2725#define SIMD_RELAXED_BINOP_LIST(V) \
2726 V(i8x16_relaxed_swizzle, i8x16_swizzle) \
2727 V(f64x2_relaxed_min, f64x2_pmin) \
2728 V(f64x2_relaxed_max, f64x2_pmax) \
2729 V(f32x4_relaxed_min, f32x4_pmin) \
2730 V(f32x4_relaxed_max, f32x4_pmax) \
2731 V(i16x8_relaxed_q15mulr_s, i16x8_q15mulr_sat_s)
2733#define SIMD_VISIT_RELAXED_BINOP(name, op) \
2734 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2735 LiftoffRegister rhs) { \
2736 emit_##op(dst, lhs, rhs); \
2737 }
2739#undef SIMD_VISIT_RELAXED_BINOP
2740#undef SIMD_RELAXED_BINOP_LIST
2742#define SIMD_RELAXED_UNOP_LIST(V) \
2743 V(i32x4_relaxed_trunc_f32x4_s, i32x4_sconvert_f32x4) \
2744 V(i32x4_relaxed_trunc_f32x4_u, i32x4_uconvert_f32x4) \
2745 V(i32x4_relaxed_trunc_f64x2_s_zero, i32x4_trunc_sat_f64x2_s_zero) \
2746 V(i32x4_relaxed_trunc_f64x2_u_zero, i32x4_trunc_sat_f64x2_u_zero)
2748#define SIMD_VISIT_RELAXED_UNOP(name, op) \
2749 void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2750 LiftoffRegister src) { \
2751 emit_##op(dst, src); \
2752 }
2754#undef SIMD_VISIT_RELAXED_UNOP
2755#undef SIMD_RELAXED_UNOP_LIST
2757#define F16_UNOP_LIST(V) \
2758 V(f16x8_splat) \
2759 V(f16x8_abs) \
2760 V(f16x8_neg) \
2761 V(f16x8_sqrt) \
2762 V(f16x8_ceil) \
2763 V(f16x8_floor) \
2764 V(f16x8_trunc) \
2765 V(f16x8_nearest_int) \
2766 V(i16x8_sconvert_f16x8) \
2767 V(i16x8_uconvert_f16x8) \
2768 V(f16x8_sconvert_i16x8) \
2769 V(f16x8_uconvert_i16x8) \
2770 V(f16x8_demote_f32x4_zero) \
2771 V(f32x4_promote_low_f16x8) \
2772 V(f16x8_demote_f64x2_zero)
2774#define VISIT_F16_UNOP(name) \
2775 bool LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2776 LiftoffRegister src) { \
2777 return false; \
2778 }
2780#undef VISIT_F16_UNOP
2781#undef F16_UNOP_LIST
2783#define F16_BINOP_LIST(V) \
2784 V(f16x8_eq) \
2785 V(f16x8_ne) \
2786 V(f16x8_lt) \
2787 V(f16x8_le) \
2788 V(f16x8_add) \
2789 V(f16x8_sub) \
2790 V(f16x8_mul) \
2791 V(f16x8_div) \
2792 V(f16x8_min) \
2793 V(f16x8_max) \
2794 V(f16x8_pmin) \
2795 V(f16x8_pmax)
2797#define VISIT_F16_BINOP(name) \
2798 bool LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2799 LiftoffRegister rhs) { \
2800 return false; \
2801 }
2803#undef VISIT_F16_BINOP
2804#undef F16_BINOP_LIST
2805
2806bool LiftoffAssembler::supports_f16_mem_access() { return false; }
2807
2808bool LiftoffAssembler::emit_f16x8_extract_lane(LiftoffRegister dst,
2809 LiftoffRegister lhs,
2810 uint8_t imm_lane_idx) {
2811 return false;
2812}
2813
2814bool LiftoffAssembler::emit_f16x8_replace_lane(LiftoffRegister dst,
2815 LiftoffRegister src1,
2816 LiftoffRegister src2,
2817 uint8_t imm_lane_idx) {
2818 return false;
2819}
2820
2821bool LiftoffAssembler::emit_f16x8_qfma(LiftoffRegister dst,
2822 LiftoffRegister src1,
2823 LiftoffRegister src2,
2824 LiftoffRegister src3) {
2825 return false;
2826}
2827
2828bool LiftoffAssembler::emit_f16x8_qfms(LiftoffRegister dst,
2829 LiftoffRegister src1,
2830 LiftoffRegister src2,
2831 LiftoffRegister src3) {
2832 return false;
2833}
2834
2835void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
2836 Register offset_reg, uintptr_t offset_imm,
2837 LoadType type,
2838 LoadTransformationKind transform,
2839 uint32_t* protected_load_pc,
2840 bool i64_offset) {
2841 if (!is_int20(offset_imm)) {
2842 mov(ip, Operand(offset_imm));
2843 if (offset_reg != no_reg) {
2844 AddS64(ip, offset_reg);
2845 }
2846 offset_reg = ip;
2847 offset_imm = 0;
2848 }
2849 MemOperand src_op =
2850 MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
2851 *protected_load_pc = pc_offset();
2852 MachineType memtype = type.mem_type();
2853 if (transform == LoadTransformationKind::kExtend) {
2854 if (memtype == MachineType::Int8()) {
2855 LoadAndExtend8x8SLE(dst.fp(), src_op, r1);
2856 } else if (memtype == MachineType::Uint8()) {
2857 LoadAndExtend8x8ULE(dst.fp(), src_op, r1);
2858 } else if (memtype == MachineType::Int16()) {
2859 LoadAndExtend16x4SLE(dst.fp(), src_op, r1);
2860 } else if (memtype == MachineType::Uint16()) {
2861 LoadAndExtend16x4ULE(dst.fp(), src_op, r1);
2862 } else if (memtype == MachineType::Int32()) {
2863 LoadAndExtend32x2SLE(dst.fp(), src_op, r1);
2864 } else if (memtype == MachineType::Uint32()) {
2865 LoadAndExtend32x2ULE(dst.fp(), src_op, r1);
2866 }
2867 } else if (transform == LoadTransformationKind::kZeroExtend) {
2868 if (memtype == MachineType::Int32()) {
2869 LoadV32ZeroLE(dst.fp(), src_op, r1);
2870 } else {
2871 DCHECK_EQ(MachineType::Int64(), memtype);
2872 LoadV64ZeroLE(dst.fp(), src_op, r1);
2873 }
2874 } else {
2876 if (memtype == MachineType::Int8()) {
2877 LoadAndSplat8x16LE(dst.fp(), src_op, r1);
2878 } else if (memtype == MachineType::Int16()) {
2879 LoadAndSplat16x8LE(dst.fp(), src_op, r1);
2880 } else if (memtype == MachineType::Int32()) {
2881 LoadAndSplat32x4LE(dst.fp(), src_op, r1);
2882 } else if (memtype == MachineType::Int64()) {
2883 LoadAndSplat64x2LE(dst.fp(), src_op, r1);
2884 }
2885 }
2886}
2887
2888void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
2889 Register addr, Register offset_reg,
2890 uintptr_t offset_imm, LoadType type,
2891 uint8_t laneidx, uint32_t* protected_load_pc,
2892 bool i64_offset) {
2893 PREP_MEM_OPERAND(offset_reg, offset_imm, ip)
2894 MemOperand src_op =
2895 MemOperand(addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
2896
2897 MachineType mem_type = type.mem_type();
2898 if (dst != src) {
2899 vlr(dst.fp(), src.fp(), Condition(0), Condition(0), Condition(0));
2900 }
2901
2902 if (protected_load_pc) *protected_load_pc = pc_offset();
2903 if (mem_type == MachineType::Int8()) {
2904 LoadLane8LE(dst.fp(), src_op, 15 - laneidx, r1);
2905 } else if (mem_type == MachineType::Int16()) {
2906 LoadLane16LE(dst.fp(), src_op, 7 - laneidx, r1);
2907 } else if (mem_type == MachineType::Int32()) {
2908 LoadLane32LE(dst.fp(), src_op, 3 - laneidx, r1);
2909 } else {
2910 DCHECK_EQ(MachineType::Int64(), mem_type);
2911 LoadLane64LE(dst.fp(), src_op, 1 - laneidx, r1);
2912 }
2913}
2914
2915void LiftoffAssembler::StoreLane(Register dst, Register offset,
2916 uintptr_t offset_imm, LiftoffRegister src,
2917 StoreType type, uint8_t lane,
2918 uint32_t* protected_store_pc,
2919 bool i64_offset) {
2920 PREP_MEM_OPERAND(offset, offset_imm, ip)
2921 MemOperand dst_op =
2922 MemOperand(dst, offset == no_reg ? r0 : offset, offset_imm);
2923
2924 if (protected_store_pc) *protected_store_pc = pc_offset();
2925
2926 MachineRepresentation rep = type.mem_rep();
2927 if (rep == MachineRepresentation::kWord8) {
2928 StoreLane8LE(src.fp(), dst_op, 15 - lane, r1);
2929 } else if (rep == MachineRepresentation::kWord16) {
2930 StoreLane16LE(src.fp(), dst_op, 7 - lane, r1);
2931 } else if (rep == MachineRepresentation::kWord32) {
2932 StoreLane32LE(src.fp(), dst_op, 3 - lane, r1);
2933 } else {
2935 StoreLane64LE(src.fp(), dst_op, 1 - lane, r1);
2936 }
2937}
2938
2939void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
2940 LiftoffRegister rhs) {
2941 I64x2Mul(dst.fp(), lhs.fp(), rhs.fp(), r0, r1, ip);
2942}
2943
2944void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
2945 LiftoffRegister rhs) {
2946 I32x4GeU(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
2947}
2948
2949void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
2950 LiftoffRegister rhs) {
2951 I16x8GeU(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
2952}
2953
2954void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
2955 LiftoffRegister rhs) {
2956 I8x16GeU(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
2957}
2958
2959void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
2960 LiftoffRegister lhs,
2961 LiftoffRegister rhs) {
2962 Simd128Register src1 = lhs.fp();
2963 Simd128Register src2 = rhs.fp();
2964 Simd128Register dest = dst.fp();
2965 I8x16Swizzle(dest, src1, src2, r0, r1, kScratchDoubleReg);
2966}
2967
2968void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
2969 LiftoffRegister src) {
2970 F64x2PromoteLowF32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0, r1, ip);
2971}
2972
2973void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
2974 LiftoffRegister src) {
2975 I64x2BitMask(dst.gp(), src.fp(), r0, kScratchDoubleReg);
2976}
2977
2978void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
2979 LiftoffRegister src) {
2980 I32x4BitMask(dst.gp(), src.fp(), r0, kScratchDoubleReg);
2981}
2982
2983void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
2984 LiftoffRegister lhs,
2985 LiftoffRegister rhs) {
2986 I32x4DotI16x8S(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
2987}
2988
2989void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
2990 LiftoffRegister src) {
2991 I16x8BitMask(dst.gp(), src.fp(), r0, kScratchDoubleReg);
2992}
2993
2994void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
2995 LiftoffRegister src1,
2996 LiftoffRegister src2) {
2997 Simd128Register s1 = src1.fp();
2998 Simd128Register s2 = src2.fp();
2999 Simd128Register dest = dst.fp();
3000 // Make sure temp registers are unique.
3001 Simd128Register temp1 =
3002 GetUnusedRegister(kFpReg, LiftoffRegList{dest, s1, s2}).fp();
3003 Simd128Register temp2 =
3004 GetUnusedRegister(kFpReg, LiftoffRegList{dest, s1, s2, temp1}).fp();
3005 I16x8Q15MulRSatS(dest, s1, s2, kScratchDoubleReg, temp1, temp2);
3006}
3007
3008void LiftoffAssembler::emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst,
3009 LiftoffRegister lhs,
3010 LiftoffRegister rhs) {
3011 I16x8DotI8x16S(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
3012}
3013
3015 LiftoffRegister lhs,
3016 LiftoffRegister rhs,
3017 LiftoffRegister acc) {
3018 // Make sure temp register is unique.
3019 Simd128Register temp =
3020 GetUnusedRegister(kFpReg, LiftoffRegList{dst, lhs, rhs, acc}).fp();
3021 I32x4DotI8x16AddS(dst.fp(), lhs.fp(), rhs.fp(), acc.fp(), kScratchDoubleReg,
3022 temp);
3023}
3024
3025void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
3026 LiftoffRegister lhs,
3027 LiftoffRegister rhs,
3028 const uint8_t shuffle[16],
3029 bool is_swizzle) {
3030 // Remap the shuffle indices to match IBM lane numbering.
3031 // TODO(miladfarca): Put this in a function and share it with the instrction
3032 // selector.
3033 int max_index = 15;
3034 int total_lane_count = 2 * kSimd128Size;
3035 uint8_t shuffle_remapped[kSimd128Size];
3036 for (int i = 0; i < kSimd128Size; i++) {
3037 uint8_t current_index = shuffle[i];
3038 shuffle_remapped[i] = (current_index <= max_index
3039 ? max_index - current_index
3040 : total_lane_count - current_index + max_index);
3041 }
3042 uint64_t vals[2];
3043 memcpy(vals, shuffle_remapped, sizeof(shuffle_remapped));
3044#ifdef V8_TARGET_BIG_ENDIAN
3045 vals[0] = ByteReverse(vals[0]);
3046 vals[1] = ByteReverse(vals[1]);
3047#endif
3048 I8x16Shuffle(dst.fp(), lhs.fp(), rhs.fp(), vals[1], vals[0], r0, ip,
3050}
3051
3052void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
3053 LiftoffRegister src) {
3054 V128AnyTrue(dst.gp(), src.fp(), r0);
3055}
3056
3057void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
3058 LiftoffRegister src) {
3059 I8x16BitMask(dst.gp(), src.fp(), r0, ip, kScratchDoubleReg);
3060}
3061
3062void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
3063 const uint8_t imms[16]) {
3064 uint64_t vals[2];
3065 memcpy(vals, imms, sizeof(vals));
3066#ifdef V8_TARGET_BIG_ENDIAN
3067 vals[0] = ByteReverse(vals[0]);
3068 vals[1] = ByteReverse(vals[1]);
3069#endif
3070 S128Const(dst.fp(), vals[1], vals[0], r0, ip);
3071}
3072
3073void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
3074 LiftoffRegister src1,
3075 LiftoffRegister src2,
3076 LiftoffRegister mask) {
3077 S128Select(dst.fp(), src1.fp(), src2.fp(), mask.fp());
3078}
3079
3080void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
3081 LiftoffRegister src) {
3082 I32x4SConvertF32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0);
3083}
3084
3085void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
3086 LiftoffRegister src) {
3087 I32x4UConvertF32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0);
3088}
3089
3090void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
3091 LiftoffRegister src) {
3092 F32x4SConvertI32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0);
3093}
3094
3095void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
3096 LiftoffRegister src) {
3097 F32x4UConvertI32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0);
3098}
3099
3100void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
3101 LiftoffRegister src) {
3102 F32x4DemoteF64x2Zero(dst.fp(), src.fp(), kScratchDoubleReg, r0, r1, ip);
3103}
3104
3105void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
3106 LiftoffRegister lhs,
3107 LiftoffRegister rhs) {
3108 I8x16SConvertI16x8(dst.fp(), lhs.fp(), rhs.fp());
3109}
3110
3111void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
3112 LiftoffRegister lhs,
3113 LiftoffRegister rhs) {
3114 I8x16UConvertI16x8(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
3115}
3116
3117void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
3118 LiftoffRegister lhs,
3119 LiftoffRegister rhs) {
3120 I16x8SConvertI32x4(dst.fp(), lhs.fp(), rhs.fp());
3121}
3122
3123void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
3124 LiftoffRegister lhs,
3125 LiftoffRegister rhs) {
3126 I16x8UConvertI32x4(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
3127}
3128
3130 LiftoffRegister src) {
3131 I32x4TruncSatF64x2SZero(dst.fp(), src.fp(), kScratchDoubleReg);
3132}
3133
3135 LiftoffRegister src) {
3136 I32x4TruncSatF64x2UZero(dst.fp(), src.fp(), kScratchDoubleReg);
3137}
3138
3139void LiftoffAssembler::emit_s128_relaxed_laneselect(LiftoffRegister dst,
3140 LiftoffRegister src1,
3141 LiftoffRegister src2,
3142 LiftoffRegister mask,
3143 int lane_width) {
3144 // S390 uses bytewise selection for all lane widths.
3145 emit_s128_select(dst, src1, src2, mask);
3146}
3147
3148void LiftoffAssembler::StackCheck(Label* ool_code) {
3149 Register limit_address = ip;
3151 CmpU64(sp, limit_address);
3152 b(le, ool_code);
3153}
3154
3156 // Asserts unreachable within the wasm code.
3158}
3159
3160void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
3161 MultiPush(regs.GetGpList());
3162 MultiPushF64OrV128(regs.GetFpList(), ip);
3163}
3164
3165void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
3166 MultiPopF64OrV128(regs.GetFpList(), ip);
3167 MultiPop(regs.GetGpList());
3168}
3169
3171 SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
3172 LiftoffRegList ref_spills, int spill_offset) {
3173 LiftoffRegList fp_spills = all_spills & kFpCacheRegList;
3174 int spill_space_size = fp_spills.GetNumRegsSet() * kSimd128Size;
3175 LiftoffRegList gp_spills = all_spills & kGpCacheRegList;
3176 while (!gp_spills.is_empty()) {
3177 LiftoffRegister reg = gp_spills.GetLastRegSet();
3178 if (ref_spills.has(reg)) {
3179 safepoint.DefineTaggedStackSlot(spill_offset);
3180 }
3181 gp_spills.clear(reg);
3182 ++spill_offset;
3183 spill_space_size += kSystemPointerSize;
3184 }
3185 // Record the number of additional spill slots.
3186 RecordOolSpillSpaceSize(spill_space_size);
3187}
3188
3189void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
3190 Drop(num_stack_slots);
3191 Ret();
3192}
3193
3195 const std::initializer_list<VarState> args, const LiftoffRegister* rets,
3196 ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes,
3197 ExternalReference ext_ref) {
3198 int size = RoundUp(stack_bytes, 8);
3199
3200 lay(sp, MemOperand(sp, -size));
3201
3202 int arg_offset = 0;
3203 for (const VarState& arg : args) {
3204 MemOperand dst{sp, arg_offset};
3205 liftoff::StoreToMemory(this, dst, arg, ip);
3206 arg_offset += value_kind_size(arg.kind());
3207 }
3208 DCHECK_LE(arg_offset, stack_bytes);
3209
3210 // Pass a pointer to the buffer with the arguments to the C function.
3211 mov(r2, sp);
3212
3213 // Now call the C function.
3214 constexpr int kNumCCallArgs = 1;
3215 PrepareCallCFunction(kNumCCallArgs, no_reg);
3216 CallCFunction(ext_ref, kNumCCallArgs);
3217
3218 // Move return value to the right register.
3219 const LiftoffRegister* result_reg = rets;
3220 if (return_kind != kVoid) {
3221 constexpr Register kReturnReg = r2;
3222 if (kReturnReg != rets->gp()) {
3223 Move(*rets, LiftoffRegister(kReturnReg), return_kind);
3224 }
3225 result_reg++;
3226 }
3227
3228 // Load potential output value from the buffer on the stack.
3229 if (out_argument_kind != kVoid) {
3230 switch (out_argument_kind) {
3231 case kI16:
3232 LoadS16(result_reg->gp(), MemOperand(sp));
3233 break;
3234 case kI32:
3235 LoadS32(result_reg->gp(), MemOperand(sp));
3236 break;
3237 case kI64:
3238 case kRefNull:
3239 case kRef:
3240 LoadU64(result_reg->gp(), MemOperand(sp));
3241 break;
3242 case kF32:
3243 LoadF32(result_reg->fp(), MemOperand(sp));
3244 break;
3245 case kF64:
3246 LoadF64(result_reg->fp(), MemOperand(sp));
3247 break;
3248 case kS128:
3249 LoadV128(result_reg->fp(), MemOperand(sp), ip);
3250 break;
3251 default:
3252 UNREACHABLE();
3253 }
3254 }
3255 lay(sp, MemOperand(sp, size));
3256}
3257
3258void LiftoffAssembler::CallC(const std::initializer_list<VarState> args,
3259 ExternalReference ext_ref) {
3260 // First, prepare the stack for the C call.
3261 int num_args = static_cast<int>(args.size());
3262 PrepareCallCFunction(num_args, r0);
3263
3264 // Then execute the parallel register move and also move values to parameter
3265 // stack slots.
3266 int reg_args = 0;
3267 int stack_args = 0;
3268 ParallelMove parallel_move{this};
3269 for (const VarState& arg : args) {
3270 if (reg_args < int{arraysize(kCArgRegs)}) {
3271 parallel_move.LoadIntoRegister(LiftoffRegister{kCArgRegs[reg_args]}, arg);
3272 ++reg_args;
3273 } else {
3274 int bias = 0;
3275 // On BE machines values with less than 8 bytes are right justified.
3276 // bias here is relative to the stack pointer.
3277 if (arg.kind() == kI32 || arg.kind() == kF32) bias = -stack_bias;
3278 int offset =
3280 MemOperand dst{sp, offset + bias};
3281 liftoff::StoreToMemory(this, dst, arg, ip);
3282 ++stack_args;
3283 }
3284 }
3285 parallel_move.Execute();
3286
3287 // Now call the C function.
3288 CallCFunction(ext_ref, num_args);
3289}
3290
3293}
3294
3297}
3298
3300 compiler::CallDescriptor* call_descriptor,
3301 Register target) {
3302 DCHECK(target != no_reg);
3303 CallWasmCodePointer(target);
3304}
3305
3307 compiler::CallDescriptor* call_descriptor, Register target) {
3308 DCHECK(target != no_reg);
3309 CallWasmCodePointer(target, CallJumpMode::kTailCall);
3310}
3311
3313 // A direct call to a builtin. Just encode the builtin index. This will be
3314 // patched at relocation.
3315 Call(static_cast<Address>(builtin), RelocInfo::WASM_STUB_CALL);
3316}
3317
3318void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
3319 lay(sp, MemOperand(sp, -size));
3320 MacroAssembler::Move(addr, sp);
3321}
3322
3323void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
3324 lay(sp, MemOperand(sp, size));
3325}
3326
3328
3330 DoubleRegister src,
3331 ValueKind kind) {
3332 Label return_nan, done;
3333 if (kind == kF32) {
3334 cebr(src, src);
3335 bunordered(&return_nan);
3336 } else {
3338 cdbr(src, src);
3339 bunordered(&return_nan);
3340 }
3341 b(&done);
3342 bind(&return_nan);
3343 StoreF32(src, MemOperand(dst));
3344 bind(&done);
3345}
3346
3348 LiftoffRegister src,
3349 Register tmp_gp,
3350 LiftoffRegister tmp_s128,
3351 ValueKind lane_kind) {
3352 Label return_nan, done;
3353 if (lane_kind == kF32) {
3354 vfce(tmp_s128.fp(), src.fp(), src.fp(), Condition(1), Condition(0),
3355 Condition(2));
3356 b(Condition(0x5), &return_nan); // If any or all are NaN.
3357 } else {
3358 DCHECK_EQ(lane_kind, kF64);
3359 vfce(tmp_s128.fp(), src.fp(), src.fp(), Condition(1), Condition(0),
3360 Condition(3));
3361 b(Condition(0x5), &return_nan);
3362 }
3363 b(&done);
3364 bind(&return_nan);
3365 mov(r0, Operand(1));
3366 StoreU32(r0, MemOperand(dst));
3367 bind(&done);
3368}
3369
3370void LiftoffAssembler::emit_store_nonzero(Register dst) {
3371 StoreU32(dst, MemOperand(dst));
3372}
3373
3374void LiftoffStackSlots::Construct(int param_slots) {
3375 DCHECK_LT(0, slots_.size());
3377 int last_stack_slot = param_slots;
3378 for (auto& slot : slots_) {
3379 const int stack_slot = slot.dst_slot_;
3380 int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
3381 DCHECK_LT(0, stack_decrement);
3382 last_stack_slot = stack_slot;
3383 const LiftoffAssembler::VarState& src = slot.src_;
3384 switch (src.loc()) {
3386 switch (src.kind()) {
3387 case kI32:
3388 case kRef:
3389 case kRefNull:
3390 case kI64: {
3391 asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
3392 UseScratchRegisterScope temps(asm_);
3393 Register scratch = temps.Acquire();
3394 asm_->LoadU64(scratch, liftoff::GetStackSlot(slot.src_offset_));
3395 asm_->Push(scratch);
3396 break;
3397 }
3398 case kF32: {
3399 asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
3401 liftoff::GetStackSlot(slot.src_offset_ + stack_bias));
3404 break;
3405 }
3406 case kF64: {
3407 asm_->AllocateStackSpace(stack_decrement - kDoubleSize);
3409 liftoff::GetStackSlot(slot.src_offset_));
3411 break;
3412 }
3413 case kS128: {
3414 asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
3415 UseScratchRegisterScope temps(asm_);
3416 Register scratch = temps.Acquire();
3418 liftoff::GetStackSlot(slot.src_offset_), scratch);
3419 asm_->lay(sp, MemOperand(sp, -kSimd128Size));
3421 break;
3422 }
3423 default:
3424 UNREACHABLE();
3425 }
3426 break;
3427 }
3429 int pushed_bytes = SlotSizeInBytes(slot);
3430 asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
3431 switch (src.kind()) {
3432 case kI64:
3433 case kI32:
3434 case kRef:
3435 case kRefNull:
3436 asm_->push(src.reg().gp());
3437 break;
3438 case kF32:
3440 asm_->StoreF32(src.reg().fp(), MemOperand(sp));
3441 break;
3442 case kF64:
3443 asm_->push(src.reg().fp());
3444 break;
3445 case kS128: {
3446 UseScratchRegisterScope temps(asm_);
3447 Register scratch = temps.Acquire();
3448 asm_->lay(sp, MemOperand(sp, -kSimd128Size));
3449 asm_->StoreV128(src.reg().fp(), MemOperand(sp), scratch);
3450 break;
3451 }
3452 default:
3453 UNREACHABLE();
3454 }
3455 break;
3456 }
3458 asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
3459 DCHECK(src.kind() == kI32 || src.kind() == kI64);
3460 UseScratchRegisterScope temps(asm_);
3461 Register scratch = temps.Acquire();
3462
3463 switch (src.kind()) {
3464 case kI32:
3465 asm_->mov(scratch, Operand(src.i32_const()));
3466 break;
3467 case kI64:
3468 asm_->mov(scratch, Operand(int64_t{slot.src_.i32_const()}));
3469 break;
3470 default:
3471 UNREACHABLE();
3472 }
3473 asm_->push(scratch);
3474 break;
3475 }
3476 }
3477 }
3478}
3479
3480} // namespace v8::internal::wasm
3481
3482#undef BAILOUT
3483
3484#endif // V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_INL_H_
Builtins::Kind kind
Definition builtins.cc:40
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
void lzer(DoubleRegister r1)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void bne(Register rj, Register rd, int32_t offset)
void branchOnCond(Condition c, int branch_offset, bool is_bound=false, bool force_long_branch=false)
void lzdr(DoubleRegister r1)
void blt(Register rj, Register rd, int32_t offset)
void ledbr(R1 r1, R2 r2)
friend class UseScratchRegisterScope
static constexpr int kGap
uint64_t jump_offset(Label *L)
void bge(Register rj, Register rd, int32_t offset)
void bunordered(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
Assembler(const AssemblerOptions &, std::unique_ptr< AssemblerBuffer >={})
void AbortedCodeGeneration() override
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
void beq(Register rj, Register rd, int32_t offset)
static constexpr int kFixedFrameSizeAboveFp
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
static constexpr MachineType Uint8()
static constexpr MachineType Int32()
static constexpr MachineType Uint32()
static constexpr MachineType Uint16()
static constexpr MachineType Int16()
static constexpr MachineType Int64()
static constexpr MachineType Int8()
void I16x8DotI8x16S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void F32x4UConvertI32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void I32x4TruncSatF64x2SZero(Simd128Register dst, Simd128Register src, Simd128Register scratch)
void ConvertIntToFloat(Register src, DoubleRegister dst)
void LoadStackLimit(Register destination, StackLimitKind kind)
void StoreV128(Simd128Register src, const MemOperand &mem, Register scratch)
void ConvertIntToDouble(Register src, DoubleRegister dst)
void LoadS16LE(Register dst, const MemOperand &mem, Register scratch)
void Call(Register target, Condition cond=al)
void S128Select(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register mask)
void LoadAndSplat64x2LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void AtomicExchangeU8(Register addr, Register value, Register output, Register scratch)
void LoadU64LE(Register dst, const MemOperand &mem, Register scratch)
void LoadU8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2, Simd128Register scratch3)
void Drop(int count, Condition cond=al)
void XorP(Register dst, Register src)
void LoadAndSplat16x8LE(Simd128Register dst, const MemOperand &me, Register scratch)
void mov(Register rd, Register rj)
void ModS32(Register dst, Register src, Register value)
void ModS64(Register dst, Register src, Register value)
void I8x16UConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void SmiUntag(Register reg, SBit s=LeaveCC)
void TestIfSmi(Register value, Register scratch)
void I8x16GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void ConvertFloat32ToUnsignedInt32(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void LoadLane32LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void F32x4SConvertI32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void StoreF64(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
void LoadV64ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void StoreU64LE(Register src, const MemOperand &mem, Register scratch)
void LoadF32(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst)
void ModU64(Register dst, Register src, Register value)
void LoadV32ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void Move(Register dst, Tagged< Smi > smi)
void LoadAndExtend32x2SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void I32x4BitMask(Register dst, VRegister src)
void StoreF32LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
void I32x4GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void StoreU16LE(Register src, const MemOperand &mem, Register scratch)
void AtomicCmpExchangeU16(Register addr, Register output, Register old_value, Register new_value, Register temp0, Register temp1)
void JumpIfSmi(Register value, Label *smi_label)
void LoadU16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void I32x4UConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void ConvertFloat32ToUnsignedInt64(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void DivS32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void LoadS32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void LoadAndExtend8x8SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void I64x2Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scrahc2, Register scratch3, Simd128Register scratch4)
void I32x4SConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src)
void StoreLane8LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void StoreV128LE(Simd128Register src, const MemOperand &mem, Register scratch1, Register scratch2)
void CmpAndSwap64(Register old_val, Register new_val, const MemOperand &opnd)
void LoadLane64LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadS32LE(Register dst, const MemOperand &mem, Register scratch)
void CmpU32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void LoadU16LE(Register dst, const MemOperand &mem, Register scratch)
void ModU32(Register dst, Register src, Register value)
void SmiTag(Register reg, SBit s=LeaveCC)
void StoreLane32LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void MovDoubleToInt64(Register dst, DoubleRegister src)
void ConvertFloat32ToInt32(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode)
void I8x16Shuffle(Simd128Register dst, Simd128Register src1, Simd128Register src2, uint64_t high, uint64_t low, Register scratch1, Register scratch2, Simd128Register scratch3)
void ConvertDoubleToUnsignedInt32(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void AddS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst)
void SubS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void ShiftRightU32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void StoreF64LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
void PushCommonFrame(Register marker_reg=no_reg)
void StoreF32(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
void V128AnyTrue(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3)
void CmpU64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void LoadF64(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void AddS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
void Jump(Register target, Condition cond=al)
void StoreU32(Register src, const MemOperand &mem, Register scratch)
void LoadAndSplat8x16LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void MultiPushF64OrV128(DoubleRegList dregs, Register scratch, Register location=sp)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void MovInt64ToDouble(DoubleRegister dst, Register src)
void LoadF32LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void LoadU32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void S128Const(Simd128Register dst, uint64_t high, uint64_t low, Register scratch1, Register scratch2)
void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst)
void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst)
void AddU32(Register dst, Register src1, Register src2)
void LoadS8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertDoubleToInt32(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void LoadV128LE(DoubleRegister dst, const MemOperand &mem, Register scratch0, Register scratch1)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void I16x8BitMask(Register dst, VRegister src)
void SubS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void I64x2BitMask(Register dst, QwNeonRegister src)
void LoadLane8LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadAndExtend8x8ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void ConvertDoubleToUnsignedInt64(const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode=kRoundToZero)
void DivU32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void I16x8UConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void I16x8SConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2)
void MulS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void LoadAndSplat32x4LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void ShiftLeftU64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void CmpS64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void DivU64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void F32x4DemoteF64x2Zero(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2, Register scratch3, Register scratch4)
void LoadU32LE(Register dst, const MemOperand &mem, Register scratch)
void OrP(Register dst, Register src)
void I32x4DotI8x16AddS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register src3)
void AllocateStackSpace(Register bytes)
void MoveChar(const MemOperand &opnd1, const MemOperand &opnd2, const Operand &length)
void LoadAndExtend16x4SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void LoadAndExtend32x2ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void CmpS32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void StoreU64(Register src, const MemOperand &mem, Register scratch=no_reg)
void StoreU16(Register src, const MemOperand &mem, Register scratch)
void StoreU8(Register src, const MemOperand &mem, Register scratch)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void AddU64(Register dst, const Operand &imm)
void I8x16BitMask(Register dst, VRegister src, VRegister temp=NoVReg)
void DivS64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void I16x8GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadU64(Register dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertFloat32ToInt64(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void LoadF64LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void LoadAndExtend16x4ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void StoreU32LE(Register src, const MemOperand &mem, Register scratch)
void CmpAndSwap(Register old_val, Register new_val, const MemOperand &opnd)
void MultiPopF64OrV128(DoubleRegList dregs, Register scratch, Register location=sp)
void I8x16SConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2)
void AtomicCmpExchangeU8(Register addr, Register output, Register old_value, Register new_value, Register temp0, Register temp1)
void ConvertInt64ToDouble(Register src, DoubleRegister double_dst)
void ConvertInt64ToFloat(Register src, DoubleRegister double_dst)
void AtomicExchangeU16(Register addr, Register value, Register output, Register scratch)
void I32x4TruncSatF64x2UZero(Simd128Register dst, Simd128Register src, Simd128Register scratch)
void LoadS16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void StoreLane16LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void ConvertDoubleToInt64(const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode=kRoundToZero)
void LoadV128(Simd128Register dst, const MemOperand &mem, Register scratch)
void StoreLane64LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void I8x16Swizzle(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scratch2, Simd128Register scratch3)
void AndP(Register dst, Register src)
void LoadLane16LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static V8_INLINE Operand Zero()
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static constexpr int kFrameTypeOffset
void emit_i8x16_swizzle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_store_nonzero_if_nan(Register dst, DoubleRegister src, ValueKind kind)
void AtomicXor(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f16x8_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
bool emit_f16x8_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i32_rems(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_i32_eqz(Register dst, Register src)
void emit_i32x4_uconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_uconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void FillI64Half(Register, int offset, RegPairHalf)
void emit_f32x4_demote_f64x2_zero(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallCWithStackBuffer(const std::initializer_list< VarState > args, const LiftoffRegister *rets, ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes, ExternalReference ext_ref)
void emit_i32x4_dot_i16x8_s(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i64_muli(LiftoffRegister dst, LiftoffRegister lhs, int32_t imm)
void emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void AtomicAdd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicSub(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void LoadTransform(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LoadTransformationKind transform, uint32_t *protected_load_pc, bool i64_offset)
void emit_s128_store_nonzero_if_nan(Register dst, LiftoffRegister src, Register tmp_gp, LiftoffRegister tmp_s128, ValueKind lane_kind)
void AtomicAnd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i16x8_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicCompareExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister value, StoreType type, bool i64_offset)
void emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister acc)
void emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i8x16_sconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_f64_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
void Fill(LiftoffRegister, int offset, ValueKind)
void LoadFullPointer(Register dst, Register src_addr, int32_t offset_imm)
bool emit_f16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void Store(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, bool is_store_mem=false, bool i64_offset=false)
void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint32_t *protected_load_pc=nullptr, bool is_load_mem=false, bool i64_offset=false, bool needs_shift=false)
void emit_f32x4_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_divs(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void emit_f64x2_promote_low_f32x4(LiftoffRegister dst, LiftoffRegister src)
void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint8_t lane, uint32_t *protected_load_pc, bool i64_offset)
void emit_i32x4_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void CallFrameSetupStub(int declared_function_index)
void emit_i32_remu(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void LoadSpillAddress(Register dst, int offset, ValueKind kind)
void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind, Register frame_pointer)
void AtomicExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask)
bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallC(const std::initializer_list< VarState > args, ExternalReference ext_ref)
void emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_bitmask(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero)
void DropStackSlotsAndRet(uint32_t num_stack_slots)
void emit_s128_relaxed_laneselect(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask, int lane_width)
void emit_i32x4_sconvert_f32x4(LiftoffRegister dst, LiftoffRegister src)
void LoadConstant(LiftoffRegister, WasmValue)
void PrepareTailCall(int num_callee_stack_params, int stack_param_delta)
void emit_i32_divu(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero)
void emit_cond_jump(Condition, Label *, ValueKind value, Register lhs, Register rhs, const FreezeCacheState &frozen)
void LoadFromInstance(Register dst, Register instance, int offset, int size)
void emit_smi_check(Register obj, Label *target, SmiCheckMode mode, const FreezeCacheState &frozen)
void LoadProtectedPointer(Register dst, Register src_addr, int32_t offset)
void emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst, LiftoffRegister src)
void emit_f32_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
void emit_i64_set_cond(Condition condition, Register dst, LiftoffRegister lhs, LiftoffRegister rhs)
void MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueKind)
void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind)
void emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i16x8_uconvert_i32x4(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void AtomicStore(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, bool i64_offset)
void PatchPrepareStackFrame(int offset, SafepointTableBuilder *, bool feedback_vector_slot, size_t stack_param_slots)
void emit_f32x4_sconvert_i32x4(LiftoffRegister dst, LiftoffRegister src)
void emit_ptrsize_cond_jumpi(Condition, Label *, Register lhs, int32_t imm, const FreezeCacheState &frozen)
void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind)
void emit_i32_set_cond(Condition, Register dst, Register lhs, Register rhs)
bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void TailCallIndirect(compiler::CallDescriptor *call_descriptor, Register target)
void emit_i16x8_q15mulr_sat_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2)
void emit_i16x8_bitmask(LiftoffRegister dst, LiftoffRegister src)
void AllocateStackSlot(Register addr, uint32_t size)
void LoadTaggedPointer(Register dst, Register src_addr, Register offset_reg, int32_t offset_imm, uint32_t *protected_load_pc=nullptr, bool offset_reg_needs_shift=false)
void emit_i32_cond_jumpi(Condition, Label *, Register lhs, int imm, const FreezeCacheState &frozen)
void emit_i64_eqz(Register dst, LiftoffRegister src)
void StoreTaggedPointer(Register dst_addr, Register offset_reg, int32_t offset_imm, Register src, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, SkipWriteBarrier=kNoSkipWriteBarrier)
void bailout(LiftoffBailoutReason reason, const char *detail)
void IncrementSmi(LiftoffRegister dst, int offset)
LiftoffRegister GetUnusedRegister(RegClass rc, std::initializer_list< LiftoffRegister > try_first, LiftoffRegList pinned)
void emit_i8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, const uint8_t shuffle[16], bool is_swizzle)
void AtomicOr(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
bool emit_select(LiftoffRegister dst, Register condition, LiftoffRegister true_value, LiftoffRegister false_value, ValueKind kind)
bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst, LiftoffRegister src, Label *trap=nullptr)
void AtomicLoad(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, bool i64_offset)
void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind)
void emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void LoadTaggedPointerFromInstance(Register dst, Register instance, int offset)
void emit_v128_anytrue(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_const(LiftoffRegister dst, const uint8_t imms[16])
void StoreLane(Register dst, Register offset, uintptr_t offset_imm, LiftoffRegister src, StoreType type, uint8_t lane, uint32_t *protected_store_pc, bool i64_offset)
void CheckTierUp(int declared_func_index, int budget_used, Label *ool_label, const FreezeCacheState &frozen)
void CallIndirect(const ValueKindSig *sig, compiler::CallDescriptor *call_descriptor, Register target)
void RecordSpillsInSafepoint(SafepointTableBuilder::Safepoint &safepoint, LiftoffRegList all_spills, LiftoffRegList ref_spills, int spill_offset)
bool emit_f16x8_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void LoadTrustedPointer(Register dst, Register src_addr, int offset, IndirectPointerTag tag)
void emit_i8x16_bitmask(LiftoffRegister dst, LiftoffRegister src)
constexpr unsigned GetNumRegsSet() const
base::SmallVector< Slot, 8 > slots_
static int SlotSizeInBytes(const Slot &slot)
static constexpr int ToTagged(int offset)
#define EMIT_SIMD_UNOP(name)
#define EMIT_SIMD_QFM(name)
#define EMIT_SIMD_ALL_TRUE(name)
#define EMIT_SIMD_EXT_ADD_PAIRWISE(name)
#define SIMD_EXTRACT_LANE_LIST(V)
#define EMIT_SIMD_REPLACE_LANE(name, stype)
#define EMIT_SIMD_EXT_MUL(name)
#define EMIT_SIMD_ADD_SUB_SAT(name)
#define SIMD_REPLACE_LANE_LIST(V)
#define EMIT_SIMD_EXTRACT_LANE(name, dtype)
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
int start
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
double remainder
ZoneVector< RpoNumber > & result
#define EMIT_UNOP_FUNCTION(name, instr, dtype, stype, dcast, scast, rcast, ret, return_type)
#define SIMD_VISIT_RELAXED_BINOP(name, op)
#define SIMD_RELAXED_BINOP_LIST(V)
#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast, ret, return_type)
#define SIMD_RELAXED_UNOP_LIST(V)
#define EMIT_SIMD_SHIFT_RR(name, op)
#define SIMD_SHIFT_RI_LIST(V)
#define UNOP_LIST(V)
#define F16_UNOP_LIST(V)
#define SIMD_VISIT_RELAXED_UNOP(name, op)
#define VISIT_F16_BINOP(name)
#define F16_BINOP_LIST(V)
#define VISIT_F16_UNOP(name)
#define EMIT_SIMD_SHIFT_RI(name, op, mask)
#define BINOP_LIST(V)
#define SIMD_SHIFT_RR_LIST(V)
#define EMIT_SIMD_BINOP_RR(name, op)
#define EMIT_SET_CONDITION(dst, cond)
#define PREP_MEM_OPERAND(offset_reg, offset_imm, scratch)
#define SIMD_BINOP_RR_LIST(V)
#define EMIT_EQZ(test, src)
LiftoffRegister reg
MovableLabel continuation
LiftoffRegList regs_to_save
std::optional< OolTrapLabel > trap
uint32_t const mask
#define SIMD_UNOP_LIST(V)
#define SIMD_ALL_TRUE_LIST(V)
#define SIMD_QFM_LIST(V)
#define SIMD_EXT_ADD_PAIRWISE_LIST(V)
#define SIMD_EXT_MUL_LIST(V)
#define SIMD_ADD_SUB_SAT_LIST(V)
int int32_t
Definition unicode.cc:40
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
constexpr int WhichPowerOfTwo(T value)
Definition bits.h:195
void StoreToMemory(LiftoffAssembler *assm, MemOperand dst, const LiftoffAssembler::VarState &src)
constexpr DoubleRegister kFpReturnRegisters[]
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register kGpReturnRegisters[]
int declared_function_index(const WasmModule *module, int func_index)
constexpr int value_kind_size(ValueKind kind)
static constexpr LiftoffRegList kGpCacheRegList
static constexpr LiftoffRegList kFpCacheRegList
constexpr bool is_reference(ValueKind kind)
LiftoffAssembler::ValueKindSig ValueKindSig
constexpr Register no_reg
constexpr int kMinInt
Definition globals.h:375
constexpr int kSimd128Size
Definition globals.h:706
DwVfpRegister DoubleRegister
constexpr DoubleRegister kScratchDoubleReg
const int kStackFrameExtraParamSlot
kWasmInternalFunctionIndirectPointerTag instance_data
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
QwNeonRegister Simd128Register
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Register kReturnRegister0
constexpr bool SmiValuesAre31Bits()
constexpr int kInt32Size
Definition globals.h:401
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr VFPRoundingMode kRoundToZero
return value
Definition map-inl.h:893
constexpr Register kCArgRegs[]
std::unique_ptr< AssemblerBuffer > ExternalAssemblerBuffer(void *start, int size)
Definition assembler.cc:161
constexpr int kDoubleSize
Definition globals.h:407
Condition to_condition(Condition cond)
bool is_signed(Condition cond)
static V ByteReverse(V value)
Definition utils.h:796
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
#define arraysize(array)
Definition macros.h:67
#define V8_LIKELY(condition)
Definition v8config.h:661