v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
liftoff-assembler-ppc-inl.h
Go to the documentation of this file.
1// Copyright 2017 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_INL_H_
6#define V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_INL_H_
7
17
18namespace v8::internal::wasm {
19
20namespace liftoff {
21
22// half
23// slot Frame
24// -----+--------------------+---------------------------
25// n+3 | parameter n |
26// ... | ... |
27// 4 | parameter 1 | or parameter 2
28// 3 | parameter 0 | or parameter 1
29// 2 | (result address) | or parameter 0
30// -----+--------------------+---------------------------
31// 2 | return addr (lr) |
32// 1 | previous frame (fp)|
33// 0 | const pool (r28) | if const pool is enabled
34// -----+--------------------+ <-- frame ptr (fp) or cp
35// -1 | StackFrame::WASM |
36// -2 | instance |
37// -3 | feedback vector |
38// -4 | tiering budget |
39// -----+--------------------+---------------------------
40// -5 | slot 0 (high) | ^
41// -6 | slot 0 (low) | |
42// -7 | slot 1 (high) | Frame slots
43// -8 | slot 1 (low) | |
44// | | v
45// -----+--------------------+ <-- stack ptr (sp)
46//
47//
48
49
50// TODO(tpearson): Much of this logic is already implemented in
51// the MacroAssembler GenerateMemoryOperationWithAlignPrefixed()
52// macro. Deduplicate this code using that macro where possible.
54 Register offset, uintptr_t offset_imm,
55 Register scratch, bool i64_offset = false,
56 unsigned shift_amount = 0) {
57 Register kScratchReg2 = scratch;
60 if (offset != no_reg) {
61 if (!i64_offset) {
62 // extract least significant 32 bits without sign extend
63 assm->ExtractBitRange(kScratchReg2, offset, 31, 0, LeaveRC, false);
65 }
66 if (shift_amount != 0) {
67 assm->ShiftLeftU64(kScratchReg2, offset, Operand(shift_amount));
68 }
69 assm->AddS64(kScratchReg2, offset, addr);
70 addr = kScratchReg2;
71 }
72 if (is_int31(offset_imm)) {
73 int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
74 return MemOperand(addr, offset_imm32);
75 } else {
76 // Offset immediate does not fit in 31 bits.
77 assm->mov(kScratchReg2, Operand(offset_imm));
78 assm->AddS64(kScratchReg2, addr, kScratchReg2);
79 return MemOperand(kScratchReg2, 0);
80 }
81}
82
83inline MemOperand GetStackSlot(uint32_t offset) {
84 return MemOperand(fp, -static_cast<int32_t>(offset));
85}
86
89}
90
93 Register scratch1, Register scratch2) {
94 if (src.is_reg()) {
95 switch (src.kind()) {
96 case kI16:
97 assm->StoreU16(src.reg().gp(), dst, scratch1);
98 break;
99 case kI32:
100 assm->StoreU32(src.reg().gp(), dst, scratch1);
101 break;
102 case kI64:
103 assm->StoreU64(src.reg().gp(), dst, scratch1);
104 break;
105 case kF32:
106 assm->StoreF32(src.reg().fp(), dst, scratch1);
107 break;
108 case kF64:
109 assm->StoreF64(src.reg().fp(), dst, scratch1);
110 break;
111 case kS128:
112 assm->StoreSimd128(src.reg().fp().toSimd(), dst, scratch1);
113 break;
114 default:
115 UNREACHABLE();
116 }
117 } else if (src.is_const()) {
118 if (src.kind() == kI32) {
119 assm->mov(scratch2, Operand(src.i32_const()));
120 assm->StoreU32(scratch2, dst, scratch1);
121 } else {
122 assm->mov(scratch2, Operand(static_cast<int64_t>(src.i32_const())));
123 assm->StoreU64(scratch2, dst, scratch1);
124 }
125 } else if (value_kind_size(src.kind()) == 4) {
126 assm->LoadU32(scratch2, liftoff::GetStackSlot(src.offset()), scratch1);
127 assm->StoreU32(scratch2, dst, scratch1);
128 } else {
129 DCHECK_EQ(8, value_kind_size(src.kind()));
130 assm->LoadU64(scratch2, liftoff::GetStackSlot(src.offset()), scratch1);
131 assm->StoreU64(scratch2, dst, scratch1);
132 }
133}
134
135} // namespace liftoff
136
138 int offset = pc_offset();
139 addi(sp, sp, Operand::Zero());
140 return offset;
141}
142
144// The standard library used by gcc tryjobs does not consider `std::find` to be
145// `constexpr`, so wrap it in a `#ifdef __clang__` block.
146#ifdef __clang__
147 static_assert(std::find(std::begin(wasm::kGpParamRegisters),
148 std::end(wasm::kGpParamRegisters),
149 kLiftoffFrameSetupFunctionReg) ==
150 std::end(wasm::kGpParamRegisters));
151#endif
152
153 Register scratch = ip;
154 mov(scratch, Operand(StackFrame::TypeToMarker(StackFrame::WASM)));
155 PushCommonFrame(scratch);
156 LoadConstant(LiftoffRegister(kLiftoffFrameSetupFunctionReg),
157 WasmValue(declared_function_index));
158 CallBuiltin(Builtin::kWasmLiftoffFrameSetup);
159}
160
161void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
162 int stack_param_delta) {
163 Register scratch = ip;
164 // Push the return address and frame pointer to complete the stack frame.
165 AddS64(sp, sp, Operand(-2 * kSystemPointerSize), r0);
166 LoadU64(scratch, MemOperand(fp, kSystemPointerSize), r0);
168 LoadU64(scratch, MemOperand(fp), r0);
169 StoreU64(scratch, MemOperand(sp), r0);
170
171 // Shift the whole frame upwards.
172 int slot_count = num_callee_stack_params + 2;
173 for (int i = slot_count - 1; i >= 0; --i) {
175 StoreU64(scratch,
176 MemOperand(fp, (i - stack_param_delta) * kSystemPointerSize), r0);
177 }
178
179 // Set the new stack and frame pointer.
180 AddS64(sp, fp, Operand(-stack_param_delta * kSystemPointerSize), r0);
181 Pop(r0, fp);
182 mtlr(r0);
183}
184
186
188 int offset, SafepointTableBuilder* safepoint_table_builder,
189 bool feedback_vector_slot, size_t stack_param_slots) {
190 int frame_size =
193 // The frame setup builtin also pushes the feedback vector.
194 if (feedback_vector_slot) {
195 frame_size -= kSystemPointerSize;
196 }
197
198 Assembler patching_assembler(
199 AssemblerOptions{},
201
202 if (V8_LIKELY(frame_size < 4 * KB)) {
203 patching_assembler.addi(sp, sp, Operand(-frame_size));
204 return;
205 }
206
207 // The frame size is bigger than 4KB, so we might overflow the available stack
208 // space if we first allocate the frame and then do the stack check (we will
209 // need some remaining stack space for throwing the exception). That's why we
210 // check the available stack space before we allocate the frame. To do this we
211 // replace the {__ sub(sp, sp, framesize)} with a jump to OOL code that does
212 // this "extended stack check".
213 //
214 // The OOL code can simply be generated here with the normal assembler,
215 // because all other code generation, including OOL code, has already finished
216 // when {PatchPrepareStackFrame} is called. The function prologue then jumps
217 // to the current {pc_offset()} to execute the OOL code for allocating the
218 // large frame.
219
220 // Emit the unconditional branch in the function prologue (from {offset} to
221 // {pc_offset()}).
222
223 int jump_offset = pc_offset() - offset;
224 if (!is_int26(jump_offset)) {
225 bailout(kUnsupportedArchitecture, "branch offset overflow");
226 return;
227 }
228 patching_assembler.b(jump_offset, LeaveLK);
229
230 // If the frame is bigger than the stack, we throw the stack overflow
231 // exception unconditionally. Thereby we can avoid the integer overflow
232 // check in the condition code.
233 RecordComment("OOL: stack check for large frame");
234 Label continuation;
235 if (frame_size < v8_flags.stack_size * 1024) {
236 Register stack_limit = ip;
238 AddS64(stack_limit, stack_limit, Operand(frame_size), r0);
239 CmpU64(sp, stack_limit);
241 }
242
243 if (v8_flags.experimental_wasm_growable_stacks) {
244 LiftoffRegList regs_to_save;
246 regs_to_save.set(WasmHandleStackOverflowDescriptor::FrameBaseRegister());
247 for (auto reg : kGpParamRegisters) regs_to_save.set(reg);
248 for (auto reg : kFpParamRegisters) regs_to_save.set(reg);
251 AddS64(WasmHandleStackOverflowDescriptor::FrameBaseRegister(), fp,
252 Operand(stack_param_slots * kStackSlotSize +
254 CallBuiltin(Builtin::kWasmHandleStackOverflow);
255 safepoint_table_builder->DefineSafepoint(this);
257 } else {
258 Call(static_cast<Address>(Builtin::kWasmStackOverflow),
260 // The call will not return; just define an empty safepoint.
261 safepoint_table_builder->DefineSafepoint(this);
262 if (v8_flags.debug_code) stop();
263 }
264
266
267 // Now allocate the stack space. Note that this might do more than just
268 // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}.
269 SubS64(sp, sp, Operand(frame_size), r0);
270
271 // Jump back to the start of the function, from {pc_offset()} to
272 // right after the reserved space for the {__ sub(sp, sp, framesize)} (which
273 // is a branch now).
275 if (!is_int26(jump_offset)) {
276 bailout(kUnsupportedArchitecture, "branch offset overflow");
277 return;
278 }
280}
281
283
285
286// static
289}
290
292 switch (kind) {
293 case kS128:
294 return value_kind_size(kind);
295 default:
296 return kStackSlotSize;
297 }
298}
299
301 return (kind == kS128 || is_reference(kind));
302}
303
304void LiftoffAssembler::CheckTierUp(int declared_func_index, int budget_used,
305 Label* ool_label,
306 const FreezeCacheState& frozen) {
307 Register budget_array = ip;
309
310 if (instance_data == no_reg) {
311 instance_data = budget_array; // Reuse the temp register.
313 }
314
315 constexpr int kArrayOffset = wasm::ObjectAccess::ToTagged(
316 WasmTrustedInstanceData::kTieringBudgetArrayOffset);
317 LoadU64(budget_array, MemOperand(instance_data, kArrayOffset), r0);
318
319 int budget_arr_offset = kInt32Size * declared_func_index;
320 // Pick a random register from kLiftoffAssemblerGpCacheRegs.
321 // TODO(miladfarca): Use ScratchRegisterScope when available.
322 Register budget = r15;
323 push(budget);
324 MemOperand budget_addr(budget_array, budget_arr_offset);
325 LoadS32(budget, budget_addr, r0);
326 mov(r0, Operand(budget_used));
327 sub(budget, budget, r0, LeaveOE, SetRC);
328 StoreU32(budget, budget_addr, r0);
329 pop(budget);
330 blt(ool_label, cr0);
331}
332
334 if (!v8_flags.experimental_wasm_growable_stacks) {
335 return fp;
336 }
337 LiftoffRegister old_fp = GetUnusedRegister(RegClass::kGpReg, {});
338 Label done, call_runtime;
340 CmpU64(old_fp.gp(),
341 Operand(StackFrame::TypeToMarker(StackFrame::WASM_SEGMENT_START)), r0);
342 beq(&call_runtime);
343 mr(old_fp.gp(), fp);
344 jmp(&done);
345
346 bind(&call_runtime);
347 LiftoffRegList regs_to_save = cache_state()->used_registers;
351 CallCFunction(ExternalReference::wasm_load_old_fp(), 1);
352 if (old_fp.gp() != kReturnRegister0) {
353 mr(old_fp.gp(), kReturnRegister0);
354 }
356
357 bind(&done);
358 return old_fp.gp();
359}
360
362 {
363 UseScratchRegisterScope temps{this};
364 Register scratch = temps.Acquire();
366 CmpU64(scratch,
367 Operand(StackFrame::TypeToMarker(StackFrame::WASM_SEGMENT_START)),
368 r0);
369 }
370 Label done;
371 bne(&done);
372 LiftoffRegList regs_to_save;
373 for (auto reg : kGpReturnRegisters) regs_to_save.set(reg);
374 for (auto reg : kFpReturnRegisters) regs_to_save.set(reg);
378 CallCFunction(ExternalReference::wasm_shrink_stack(), 1);
379 // Restore old FP. We don't need to restore old SP explicitly, because
380 // it will be restored from FP in LeaveFrame before return.
381 mr(fp, kReturnRegister0);
383 bind(&done);
384}
385
386void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
387 switch (value.type().kind()) {
388 case kI32:
389 mov(reg.gp(), Operand(value.to_i32()));
390 break;
391 case kI64:
392 mov(reg.gp(), Operand(value.to_i64()));
393 break;
394 case kF32: {
395 UseScratchRegisterScope temps(this);
396 Register scratch = temps.Acquire();
397 mov(scratch, Operand(value.to_f32_boxed().get_bits()));
398 MovIntToFloat(reg.fp(), scratch, ip);
399 break;
400 }
401 case kF64: {
402 UseScratchRegisterScope temps(this);
403 Register scratch = temps.Acquire();
404 mov(scratch, Operand(value.to_f64_boxed().get_bits()));
405 MovInt64ToDouble(reg.fp(), scratch);
406 break;
407 }
408 default:
409 UNREACHABLE();
410 }
411}
412
415}
416
417void LiftoffAssembler::LoadTrustedPointer(Register dst, Register src_addr,
418 int offset, IndirectPointerTag tag) {
419 MemOperand src{src_addr, offset};
420 LoadTrustedPointerField(dst, src, tag, r0);
421}
422
423void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
424 int offset, int size) {
425 DCHECK_LE(0, offset);
426 switch (size) {
427 case 1:
428 LoadU8(dst, MemOperand(instance, offset), r0);
429 break;
430 case 4:
431 LoadU32(dst, MemOperand(instance, offset), r0);
432 break;
433 case 8:
434 LoadU64(dst, MemOperand(instance, offset), r0);
435 break;
436 default:
438 }
439}
440
442 Register instance,
443 int offset) {
444 LoadTaggedField(dst, MemOperand(instance, offset), r0);
445}
446
447void LiftoffAssembler::SpillInstanceData(Register instance) {
449}
450
452
453void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
454 Register offset_reg,
455 int32_t offset_imm,
456 uint32_t* protected_load_pc,
457 bool needs_shift) {
458 unsigned shift_amount = !needs_shift ? 0 : COMPRESS_POINTERS_BOOL ? 2 : 3;
459 if (offset_reg != no_reg && shift_amount != 0) {
460 ShiftLeftU64(ip, offset_reg, Operand(shift_amount));
461 offset_reg = ip;
462 }
463 if (protected_load_pc) *protected_load_pc = pc_offset();
464 LoadTaggedField(dst, MemOperand(src_addr, offset_reg, offset_imm), r0);
465}
466
467void LiftoffAssembler::LoadProtectedPointer(Register dst, Register src_addr,
468 int32_t offset) {
469 static_assert(!V8_ENABLE_SANDBOX_BOOL);
470 LoadTaggedPointer(dst, src_addr, no_reg, offset);
471}
472
473void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
474 int32_t offset_imm) {
475 LoadU64(dst, MemOperand(src_addr, offset_imm), r0);
476}
477
478#ifdef V8_ENABLE_SANDBOX
479void LiftoffAssembler::LoadCodeEntrypointViaCodePointer(Register dst,
480 Register src_addr,
481 int32_t offset_imm) {
482 UseScratchRegisterScope temps(this);
483 Register scratch = temps.Acquire();
484 MemOperand src_op =
485 liftoff::GetMemOp(this, src_addr, no_reg, offset_imm, scratch);
486 MacroAssembler::LoadCodeEntrypointViaCodePointer(dst, src_op, scratch);
487}
488#endif
489
490void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
491 Register offset_reg,
492 int32_t offset_imm, Register src,
493 LiftoffRegList /* pinned */,
494 uint32_t* protected_store_pc,
495 SkipWriteBarrier skip_write_barrier) {
496 MemOperand dst_op = MemOperand(dst_addr, offset_reg, offset_imm);
497 if (protected_store_pc) *protected_store_pc = pc_offset();
498 StoreTaggedField(src, dst_op, r0);
499
500 if (skip_write_barrier || v8_flags.disable_write_barriers) return;
501
502 Label exit;
503 // NOTE: to_condition(kZero) is the equality condition (eq)
504 // This line verifies the masked address is equal to dst_addr,
505 // not that it is zero!
507 to_condition(kZero), &exit);
508 JumpIfSmi(src, &exit);
510 &exit);
511 mov(ip, Operand(offset_imm));
512 add(ip, ip, dst_addr);
513 if (offset_reg != no_reg) {
514 add(ip, ip, offset_reg);
515 }
517 StubCallMode::kCallWasmRuntimeStub);
518 bind(&exit);
519}
520
521void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
522 Register offset_reg, uintptr_t offset_imm,
523 LoadType type, uint32_t* protected_load_pc,
524 bool is_load_mem, bool i64_offset,
525 bool needs_shift) {
526 if (!i64_offset && offset_reg != no_reg) {
527 ZeroExtWord32(ip, offset_reg);
528 offset_reg = ip;
529 }
530 unsigned shift_amount = needs_shift ? type.size_log_2() : 0;
531 if (offset_reg != no_reg && shift_amount != 0) {
532 ShiftLeftU64(ip, offset_reg, Operand(shift_amount));
533 offset_reg = ip;
534 }
535 MemOperand src_op = MemOperand(src_addr, offset_reg, offset_imm);
536 if (protected_load_pc) *protected_load_pc = pc_offset();
537 switch (type.value()) {
538 case LoadType::kI32Load8U:
539 case LoadType::kI64Load8U:
540 LoadU8(dst.gp(), src_op, r0);
541 break;
542 case LoadType::kI32Load8S:
543 case LoadType::kI64Load8S:
544 LoadS8(dst.gp(), src_op, r0);
545 break;
546 case LoadType::kI32Load16U:
547 case LoadType::kI64Load16U:
548 if (is_load_mem) {
549 LoadU16LE(dst.gp(), src_op, r0);
550 } else {
551 LoadU16(dst.gp(), src_op, r0);
552 }
553 break;
554 case LoadType::kI32Load16S:
555 case LoadType::kI64Load16S:
556 if (is_load_mem) {
557 LoadS16LE(dst.gp(), src_op, r0);
558 } else {
559 LoadS16(dst.gp(), src_op, r0);
560 }
561 break;
562 case LoadType::kI64Load32U:
563 if (is_load_mem) {
564 LoadU32LE(dst.gp(), src_op, r0);
565 } else {
566 LoadU32(dst.gp(), src_op, r0);
567 }
568 break;
569 case LoadType::kI32Load:
570 case LoadType::kI64Load32S:
571 if (is_load_mem) {
572 LoadS32LE(dst.gp(), src_op, r0);
573 } else {
574 LoadS32(dst.gp(), src_op, r0);
575 }
576 break;
577 case LoadType::kI64Load:
578 if (is_load_mem) {
579 LoadU64LE(dst.gp(), src_op, r0);
580 } else {
581 LoadU64(dst.gp(), src_op, r0);
582 }
583 break;
584 case LoadType::kF32Load:
585 if (is_load_mem) {
586 // `ip` could be used as offset_reg.
587 Register scratch = ip;
588 if (offset_reg == ip) {
589 scratch = GetRegisterThatIsNotOneOf(src_addr);
590 push(scratch);
591 }
592 LoadF32LE(dst.fp(), src_op, r0, scratch);
593 if (offset_reg == ip) {
594 pop(scratch);
595 }
596 } else {
597 LoadF32(dst.fp(), src_op, r0);
598 }
599 break;
600 case LoadType::kF64Load:
601 if (is_load_mem) {
602 // `ip` could be used as offset_reg.
603 Register scratch = ip;
604 if (offset_reg == ip) {
605 scratch = GetRegisterThatIsNotOneOf(src_addr);
606 push(scratch);
607 }
608 LoadF64LE(dst.fp(), src_op, r0, scratch);
609 if (offset_reg == ip) {
610 pop(scratch);
611 }
612 } else {
613 LoadF64(dst.fp(), src_op, r0);
614 }
615 break;
616 case LoadType::kS128Load:
617 if (is_load_mem) {
618 LoadSimd128LE(dst.fp().toSimd(), src_op, r0);
619 } else {
620 LoadSimd128(dst.fp().toSimd(), src_op, r0);
621 }
622 break;
623 default:
624 UNREACHABLE();
625 }
626}
627
628void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
629 uintptr_t offset_imm, LiftoffRegister src,
630 StoreType type, LiftoffRegList pinned,
631 uint32_t* protected_store_pc, bool is_store_mem,
632 bool i64_offset) {
633 if (!i64_offset && offset_reg != no_reg) {
634 ZeroExtWord32(ip, offset_reg);
635 offset_reg = ip;
636 }
637 MemOperand dst_op = MemOperand(dst_addr, offset_reg, offset_imm);
638 if (protected_store_pc) *protected_store_pc = pc_offset();
639 switch (type.value()) {
640 case StoreType::kI32Store8:
641 case StoreType::kI64Store8:
642 StoreU8(src.gp(), dst_op, r0);
643 break;
644 case StoreType::kI32Store16:
645 case StoreType::kI64Store16:
646 if (is_store_mem) {
647 StoreU16LE(src.gp(), dst_op, r0);
648 } else {
649 StoreU16(src.gp(), dst_op, r0);
650 }
651 break;
652 case StoreType::kI32Store:
653 case StoreType::kI64Store32:
654 if (is_store_mem) {
655 StoreU32LE(src.gp(), dst_op, r0);
656 } else {
657 StoreU32(src.gp(), dst_op, r0);
658 }
659 break;
660 case StoreType::kI64Store:
661 if (is_store_mem) {
662 StoreU64LE(src.gp(), dst_op, r0);
663 } else {
664 StoreU64(src.gp(), dst_op, r0);
665 }
666 break;
667 case StoreType::kF32Store:
668 if (is_store_mem) {
669 Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
670 StoreF32LE(src.fp(), dst_op, r0, scratch2);
671 } else {
672 StoreF32(src.fp(), dst_op, r0);
673 }
674 break;
675 case StoreType::kF64Store:
676 if (is_store_mem) {
677 Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
678 StoreF64LE(src.fp(), dst_op, r0, scratch2);
679 } else {
680 StoreF64(src.fp(), dst_op, r0);
681 }
682 break;
683 case StoreType::kS128Store: {
684 if (is_store_mem) {
685 StoreSimd128LE(src.fp().toSimd(), dst_op, r0, kScratchSimd128Reg);
686 } else {
687 StoreSimd128(src.fp().toSimd(), dst_op, r0);
688 }
689 break;
690 }
691 default:
692 UNREACHABLE();
693 }
694}
695
696void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
697 Register offset_reg, uintptr_t offset_imm,
698 LoadType type, LiftoffRegList /* pinned */,
699 bool i64_offset) {
700 Load(dst, src_addr, offset_reg, offset_imm, type, nullptr, true, i64_offset);
701 lwsync();
702}
703
704void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
705 uintptr_t offset_imm, LiftoffRegister src,
706 StoreType type, LiftoffRegList pinned,
707 bool i64_offset) {
708 lwsync();
709 Store(dst_addr, offset_reg, offset_imm, src, type, pinned, nullptr, true,
710 i64_offset);
711 sync();
712}
713
714#ifdef V8_TARGET_BIG_ENDIAN
715constexpr bool is_be = true;
716#else
717constexpr bool is_be = false;
718#endif
719
720#define ATOMIC_OP(instr) \
721 { \
722 if (!i64_offset && offset_reg != no_reg) { \
723 ZeroExtWord32(ip, offset_reg); \
724 offset_reg = ip; \
725 } \
726 \
727 Register offset = r0; \
728 if (offset_imm != 0) { \
729 mov(offset, Operand(offset_imm)); \
730 if (offset_reg != no_reg) add(offset, offset, offset_reg); \
731 mr(ip, offset); \
732 offset = ip; \
733 } else if (offset_reg != no_reg) { \
734 offset = offset_reg; \
735 } \
736 \
737 MemOperand dst = MemOperand(offset, dst_addr); \
738 \
739 switch (type.value()) { \
740 case StoreType::kI32Store8: \
741 case StoreType::kI64Store8: { \
742 auto op_func = [&](Register dst, Register lhs, Register rhs) { \
743 instr(dst, lhs, rhs); \
744 }; \
745 AtomicOps<uint8_t>(dst, value.gp(), result.gp(), r0, op_func); \
746 break; \
747 } \
748 case StoreType::kI32Store16: \
749 case StoreType::kI64Store16: { \
750 auto op_func = [&](Register dst, Register lhs, Register rhs) { \
751 if (is_be) { \
752 Register scratch = GetRegisterThatIsNotOneOf(lhs, rhs, dst); \
753 push(scratch); \
754 ByteReverseU16(dst, lhs, scratch); \
755 instr(dst, dst, rhs); \
756 ByteReverseU16(dst, dst, scratch); \
757 pop(scratch); \
758 } else { \
759 instr(dst, lhs, rhs); \
760 } \
761 }; \
762 AtomicOps<uint16_t>(dst, value.gp(), result.gp(), r0, op_func); \
763 if (is_be) { \
764 ByteReverseU16(result.gp(), result.gp(), ip); \
765 } \
766 break; \
767 } \
768 case StoreType::kI32Store: \
769 case StoreType::kI64Store32: { \
770 auto op_func = [&](Register dst, Register lhs, Register rhs) { \
771 if (is_be) { \
772 Register scratch = GetRegisterThatIsNotOneOf(lhs, rhs, dst); \
773 push(scratch); \
774 ByteReverseU32(dst, lhs, scratch); \
775 instr(dst, dst, rhs); \
776 ByteReverseU32(dst, dst, scratch); \
777 pop(scratch); \
778 } else { \
779 instr(dst, lhs, rhs); \
780 } \
781 }; \
782 AtomicOps<uint32_t>(dst, value.gp(), result.gp(), r0, op_func); \
783 if (is_be) { \
784 ByteReverseU32(result.gp(), result.gp(), ip); \
785 } \
786 break; \
787 } \
788 case StoreType::kI64Store: { \
789 auto op_func = [&](Register dst, Register lhs, Register rhs) { \
790 if (is_be) { \
791 ByteReverseU64(dst, lhs); \
792 instr(dst, dst, rhs); \
793 ByteReverseU64(dst, dst); \
794 } else { \
795 instr(dst, lhs, rhs); \
796 } \
797 }; \
798 AtomicOps<uint64_t>(dst, value.gp(), result.gp(), r0, op_func); \
799 if (is_be) { \
800 ByteReverseU64(result.gp(), result.gp()); \
801 } \
802 break; \
803 } \
804 default: \
805 UNREACHABLE(); \
806 } \
807 }
808
809void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
810 uintptr_t offset_imm, LiftoffRegister value,
811 LiftoffRegister result, StoreType type,
812 bool i64_offset) {
813 ATOMIC_OP(add);
814}
815
816void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
817 uintptr_t offset_imm, LiftoffRegister value,
818 LiftoffRegister result, StoreType type,
819 bool i64_offset) {
820 ATOMIC_OP(sub);
821}
822
823void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
824 uintptr_t offset_imm, LiftoffRegister value,
825 LiftoffRegister result, StoreType type,
826 bool i64_offset) {
828}
829
830void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
831 uintptr_t offset_imm, LiftoffRegister value,
832 LiftoffRegister result, StoreType type,
833 bool i64_offset) {
834 ATOMIC_OP(orx);
835}
836
837void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
838 uintptr_t offset_imm, LiftoffRegister value,
839 LiftoffRegister result, StoreType type,
840 bool i64_offset) {
842}
843
844void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
845 uintptr_t offset_imm,
846 LiftoffRegister value,
847 LiftoffRegister result, StoreType type,
848 bool i64_offset) {
849 if (!i64_offset && offset_reg != no_reg) {
850 ZeroExtWord32(ip, offset_reg);
851 offset_reg = ip;
852 }
853
855 if (offset_imm != 0) {
856 mov(offset, Operand(offset_imm));
857 if (offset_reg != no_reg) add(offset, offset, offset_reg);
858 mr(ip, offset);
859 offset = ip;
860 } else if (offset_reg != no_reg) {
861 offset = offset_reg;
862 }
863 MemOperand dst = MemOperand(offset, dst_addr);
864 switch (type.value()) {
865 case StoreType::kI32Store8:
866 case StoreType::kI64Store8: {
867 MacroAssembler::AtomicExchange<uint8_t>(dst, value.gp(), result.gp());
868 break;
869 }
870 case StoreType::kI32Store16:
871 case StoreType::kI64Store16: {
872 if (is_be) {
873 Register scratch = GetRegisterThatIsNotOneOf(value.gp(), result.gp());
874 push(scratch);
875 ByteReverseU16(r0, value.gp(), scratch);
876 pop(scratch);
878 ByteReverseU16(result.gp(), result.gp(), ip);
879 } else {
881 }
882 break;
883 }
884 case StoreType::kI32Store:
885 case StoreType::kI64Store32: {
886 if (is_be) {
887 Register scratch = GetRegisterThatIsNotOneOf(value.gp(), result.gp());
888 push(scratch);
889 ByteReverseU32(r0, value.gp(), scratch);
890 pop(scratch);
892 ByteReverseU32(result.gp(), result.gp(), ip);
893 } else {
895 }
896 break;
897 }
898 case StoreType::kI64Store: {
899 if (is_be) {
900 ByteReverseU64(r0, value.gp());
902 ByteReverseU64(result.gp(), result.gp());
903 } else {
905 }
906 break;
907 }
908 default:
909 UNREACHABLE();
910 }
911}
912
914 Register dst_addr, Register offset_reg, uintptr_t offset_imm,
915 LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
916 StoreType type, bool i64_offset) {
917 if (!i64_offset && offset_reg != no_reg) {
918 ZeroExtWord32(ip, offset_reg);
919 offset_reg = ip;
920 }
921
923 if (offset_imm != 0) {
924 mov(offset, Operand(offset_imm));
925 if (offset_reg != no_reg) add(offset, offset, offset_reg);
926 mr(ip, offset);
927 offset = ip;
928 } else if (offset_reg != no_reg) {
929 offset = offset_reg;
930 }
931 MemOperand dst = MemOperand(offset, dst_addr);
932 switch (type.value()) {
933 case StoreType::kI32Store8:
934 case StoreType::kI64Store8: {
936 dst, expected.gp(), new_value.gp(), result.gp(), r0);
937 break;
938 }
939 case StoreType::kI32Store16:
940 case StoreType::kI64Store16: {
941 if (is_be) {
942 Push(new_value.gp(), expected.gp());
944 new_value.gp(), expected.gp(), result.gp());
945 push(scratch);
946 ByteReverseU16(new_value.gp(), new_value.gp(), scratch);
947 ByteReverseU16(expected.gp(), expected.gp(), scratch);
948 pop(scratch);
950 dst, expected.gp(), new_value.gp(), result.gp(), r0);
951 ByteReverseU16(result.gp(), result.gp(), r0);
952 Pop(new_value.gp(), expected.gp());
953 } else {
955 dst, expected.gp(), new_value.gp(), result.gp(), r0);
956 }
957 break;
958 }
959 case StoreType::kI32Store:
960 case StoreType::kI64Store32: {
961 if (is_be) {
962 Push(new_value.gp(), expected.gp());
964 new_value.gp(), expected.gp(), result.gp());
965 push(scratch);
966 ByteReverseU32(new_value.gp(), new_value.gp(), scratch);
967 ByteReverseU32(expected.gp(), expected.gp(), scratch);
968 pop(scratch);
970 dst, expected.gp(), new_value.gp(), result.gp(), r0);
971 ByteReverseU32(result.gp(), result.gp(), r0);
972 Pop(new_value.gp(), expected.gp());
973 } else {
975 dst, expected.gp(), new_value.gp(), result.gp(), r0);
976 }
977 break;
978 }
979 case StoreType::kI64Store: {
980 if (is_be) {
981 Push(new_value.gp(), expected.gp());
982 ByteReverseU64(new_value.gp(), new_value.gp());
983 ByteReverseU64(expected.gp(), expected.gp());
985 dst, expected.gp(), new_value.gp(), result.gp(), r0);
986 ByteReverseU64(result.gp(), result.gp());
987 Pop(new_value.gp(), expected.gp());
988 } else {
990 dst, expected.gp(), new_value.gp(), result.gp(), r0);
991 }
992 break;
993 }
994 default:
995 UNREACHABLE();
996 }
997}
998
1000
1001void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
1002 uint32_t caller_slot_idx,
1003 ValueKind kind) {
1004 int32_t offset = (caller_slot_idx + 1) * kSystemPointerSize;
1005 switch (kind) {
1006 case kI32: {
1007#if defined(V8_TARGET_BIG_ENDIAN)
1008 LoadS32(dst.gp(), MemOperand(fp, offset + 4), r0);
1009 break;
1010#else
1011 LoadS32(dst.gp(), MemOperand(fp, offset), r0);
1012 break;
1013#endif
1014 }
1015 case kRef:
1016 case kRefNull:
1017 case kI64: {
1018 LoadU64(dst.gp(), MemOperand(fp, offset), r0);
1019 break;
1020 }
1021 case kF32: {
1022 LoadF32(dst.fp(), MemOperand(fp, offset), r0);
1023 break;
1024 }
1025 case kF64: {
1026 LoadF64(dst.fp(), MemOperand(fp, offset), r0);
1027 break;
1028 }
1029 case kS128: {
1030 LoadSimd128(dst.fp().toSimd(), MemOperand(fp, offset), r0);
1031 break;
1032 }
1033 default:
1034 UNREACHABLE();
1035 }
1036}
1037
1038void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
1039 uint32_t caller_slot_idx,
1041 Register frame_pointer) {
1042 int32_t offset = (caller_slot_idx + 1) * kSystemPointerSize;
1043 switch (kind) {
1044 case kI32: {
1045#if defined(V8_TARGET_BIG_ENDIAN)
1046 StoreU32(src.gp(), MemOperand(frame_pointer, offset + 4), r0);
1047 break;
1048#else
1049 StoreU32(src.gp(), MemOperand(frame_pointer, offset), r0);
1050 break;
1051#endif
1052 }
1053 case kRef:
1054 case kRefNull:
1055 case kI64: {
1056 StoreU64(src.gp(), MemOperand(frame_pointer, offset), r0);
1057 break;
1058 }
1059 case kF32: {
1060 StoreF32(src.fp(), MemOperand(frame_pointer, offset), r0);
1061 break;
1062 }
1063 case kF64: {
1064 StoreF64(src.fp(), MemOperand(frame_pointer, offset), r0);
1065 break;
1066 }
1067 case kS128: {
1068 StoreSimd128(src.fp().toSimd(), MemOperand(frame_pointer, offset), r0);
1069 break;
1070 }
1071 default:
1072 UNREACHABLE();
1073 }
1074}
1075
1076void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
1077 ValueKind kind) {
1078 switch (kind) {
1079 case kI32: {
1080#if defined(V8_TARGET_BIG_ENDIAN)
1081 LoadS32(dst.gp(), MemOperand(sp, offset + 4), r0);
1082 break;
1083#else
1084 LoadS32(dst.gp(), MemOperand(sp, offset), r0);
1085 break;
1086#endif
1087 }
1088 case kRef:
1089 case kRefNull:
1090 case kI64: {
1091 LoadU64(dst.gp(), MemOperand(sp, offset), r0);
1092 break;
1093 }
1094 case kF32: {
1095 LoadF32(dst.fp(), MemOperand(sp, offset), r0);
1096 break;
1097 }
1098 case kF64: {
1099 LoadF64(dst.fp(), MemOperand(sp, offset), r0);
1100 break;
1101 }
1102 case kS128: {
1103 LoadSimd128(dst.fp().toSimd(), MemOperand(sp, offset), r0);
1104 break;
1105 }
1106 default:
1107 UNREACHABLE();
1108 }
1109}
1110
1111#ifdef V8_TARGET_BIG_ENDIAN
1112constexpr int stack_bias = -4;
1113#else
1114constexpr int stack_bias = 0;
1115#endif
1116
1117void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
1118 ValueKind kind) {
1119 DCHECK_NE(dst_offset, src_offset);
1120
1121 switch (kind) {
1122 case kI32:
1123 case kF32:
1124 LoadU32(ip, liftoff::GetStackSlot(src_offset + stack_bias), r0);
1125 StoreU32(ip, liftoff::GetStackSlot(dst_offset + stack_bias), r0);
1126 break;
1127 case kI64:
1128 case kRefNull:
1129 case kRef:
1130 case kF64:
1131 LoadU64(ip, liftoff::GetStackSlot(src_offset), r0);
1132 StoreU64(ip, liftoff::GetStackSlot(dst_offset), r0);
1133 break;
1134 case kS128:
1137 break;
1138 default:
1139 UNREACHABLE();
1140 }
1141}
1142
1144 mr(dst, src);
1145}
1146
1148 ValueKind kind) {
1149 if (kind == kF32 || kind == kF64) {
1150 fmr(dst, src);
1151 } else {
1153 vor(dst.toSimd(), src.toSimd(), src.toSimd());
1154 }
1155}
1156
1157void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
1158 DCHECK_LT(0, offset);
1160
1161 switch (kind) {
1162 case kI32:
1164 break;
1165 case kI64:
1166 case kRefNull:
1167 case kRef:
1169 break;
1170 case kF32:
1172 break;
1173 case kF64:
1175 break;
1176 case kS128: {
1178 break;
1179 }
1180 default:
1181 UNREACHABLE();
1182 }
1183}
1184
1185void LiftoffAssembler::Spill(int offset, WasmValue value) {
1187 UseScratchRegisterScope temps(this);
1188 Register src = no_reg;
1189 src = ip;
1190 switch (value.type().kind()) {
1191 case kI32: {
1192 mov(src, Operand(value.to_i32()));
1194 break;
1195 }
1196 case kI64: {
1197 mov(src, Operand(value.to_i64()));
1199 break;
1200 }
1201 default:
1202 // We do not track f32 and f64 constants, hence they are unreachable.
1203 UNREACHABLE();
1204 }
1205}
1206
1207void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
1208 switch (kind) {
1209 case kI32:
1211 break;
1212 case kI64:
1213 case kRef:
1214 case kRefNull:
1216 break;
1217 case kF32:
1219 break;
1220 case kF64:
1222 break;
1223 case kS128: {
1224 LoadSimd128(reg.fp().toSimd(), liftoff::GetStackSlot(offset), r0);
1225 break;
1226 }
1227 default:
1228 UNREACHABLE();
1229 }
1230}
1231
1233 bailout(kUnsupportedArchitecture, "FillI64Half");
1234}
1235
1237 DCHECK_LT(0, size);
1238 DCHECK_EQ(0, size % 8);
1240
1241 // We need a zero reg. Always use r0 for that, and push it before to restore
1242 // its value afterwards.
1243
1244 if (size <= 36) {
1245 // Special straight-line code for up to nine words. Generates one
1246 // instruction per word.
1247 mov(ip, Operand::Zero());
1248 uint32_t remainder = size;
1251 }
1252 DCHECK(remainder == 4 || remainder == 0);
1253 if (remainder) {
1255 }
1256 } else {
1257 Label loop;
1258 push(r4);
1259
1260 mov(r4, Operand(size / kSystemPointerSize));
1261 mtctr(r4);
1262
1263 SubS64(r4, fp, Operand(start + size + kSystemPointerSize), r0);
1264 mov(r0, Operand::Zero());
1265
1266 bind(&loop);
1268 bdnz(&loop);
1269
1270 pop(r4);
1271 }
1272}
1273
1274void LiftoffAssembler::LoadSpillAddress(Register dst, int offset,
1275 ValueKind kind) {
1276 if (kind == kI32) offset = offset + stack_bias;
1277 SubS64(dst, fp, Operand(offset));
1278}
1279
1280#define SIGN_EXT(r) extsw(r, r)
1281#define ROUND_F64_TO_F32(fpr) frsp(fpr, fpr)
1282#define INT32_AND_WITH_1F(x) Operand(x & 0x1f)
1283#define INT32_AND_WITH_3F(x) Operand(x & 0x3f)
1284#define REGISTER_AND_WITH_1F \
1285 ([&](Register rhs) { \
1286 andi(r0, rhs, Operand(31)); \
1287 return r0; \
1288 })
1289
1290#define REGISTER_AND_WITH_3F \
1291 ([&](Register rhs) { \
1292 andi(r0, rhs, Operand(63)); \
1293 return r0; \
1294 })
1295
1296#define LFR_TO_REG(reg) reg.gp()
1297
1298// V(name, instr, dtype, stype, dcast, scast, rcast, return_val, return_type)
1299#define UNOP_LIST(V) \
1300 V(f32_abs, fabs, DoubleRegister, DoubleRegister, , , USE, , void) \
1301 V(f32_neg, fneg, DoubleRegister, DoubleRegister, , , USE, , void) \
1302 V(f32_sqrt, fsqrt, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, , \
1303 void) \
1304 V(f32_floor, frim, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
1305 true, bool) \
1306 V(f32_ceil, frip, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
1307 true, bool) \
1308 V(f32_trunc, friz, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
1309 true, bool) \
1310 V(f64_abs, fabs, DoubleRegister, DoubleRegister, , , USE, , void) \
1311 V(f64_neg, fneg, DoubleRegister, DoubleRegister, , , USE, , void) \
1312 V(f64_sqrt, fsqrt, DoubleRegister, DoubleRegister, , , USE, , void) \
1313 V(f64_floor, frim, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1314 V(f64_ceil, frip, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1315 V(f64_trunc, friz, DoubleRegister, DoubleRegister, , , USE, true, bool) \
1316 V(i32_clz, CountLeadingZerosU32, Register, Register, , , USE, , void) \
1317 V(i32_ctz, CountTrailingZerosU32, Register, Register, , , USE, , void) \
1318 V(i64_clz, CountLeadingZerosU64, LiftoffRegister, LiftoffRegister, \
1319 LFR_TO_REG, LFR_TO_REG, USE, , void) \
1320 V(i64_ctz, CountTrailingZerosU64, LiftoffRegister, LiftoffRegister, \
1321 LFR_TO_REG, LFR_TO_REG, USE, , void) \
1322 V(u32_to_uintptr, ZeroExtWord32, Register, Register, , , USE, , void) \
1323 V(i32_signextend_i8, extsb, Register, Register, , , USE, , void) \
1324 V(i32_signextend_i16, extsh, Register, Register, , , USE, , void) \
1325 V(i64_signextend_i8, extsb, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1326 LFR_TO_REG, USE, , void) \
1327 V(i64_signextend_i16, extsh, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1328 LFR_TO_REG, USE, , void) \
1329 V(i64_signextend_i32, extsw, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1330 LFR_TO_REG, USE, , void) \
1331 V(i32_popcnt, Popcnt32, Register, Register, , , USE, true, bool) \
1332 V(i64_popcnt, Popcnt64, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1333 LFR_TO_REG, USE, true, bool)
1334
1335#define EMIT_UNOP_FUNCTION(name, instr, dtype, stype, dcast, scast, rcast, \
1336 ret, return_type) \
1337 return_type LiftoffAssembler::emit_##name(dtype dst, stype src) { \
1338 auto _dst = dcast(dst); \
1339 auto _src = scast(src); \
1340 instr(_dst, _src); \
1341 rcast(_dst); \
1342 return ret; \
1343 }
1345#undef EMIT_UNOP_FUNCTION
1346#undef UNOP_LIST
1347
1348// V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast,
1349// return_val, return_type)
1350#define BINOP_LIST(V) \
1351 V(f32_copysign, CopySignF64, DoubleRegister, DoubleRegister, DoubleRegister, \
1352 , , , USE, , void) \
1353 V(f64_copysign, CopySignF64, DoubleRegister, DoubleRegister, DoubleRegister, \
1354 , , , USE, , void) \
1355 V(f32_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1356 USE, , void) \
1357 V(f32_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1358 USE, , void) \
1359 V(f64_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1360 USE, , void) \
1361 V(f64_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1362 USE, , void) \
1363 V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1364 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1365 V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1366 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1367 V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
1368 LFR_TO_REG, Operand, USE, , void) \
1369 V(i32_sub, SubS32, Register, Register, Register, , , , USE, , void) \
1370 V(i32_add, AddS32, Register, Register, Register, , , , USE, , void) \
1371 V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, USE, , void) \
1372 V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, USE, , void) \
1373 V(i32_mul, MulS32, Register, Register, Register, , , , USE, , void) \
1374 V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1375 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1376 V(i32_andi, AndU32, Register, Register, int32_t, , , Operand, USE, , void) \
1377 V(i32_ori, OrU32, Register, Register, int32_t, , , Operand, USE, , void) \
1378 V(i32_xori, XorU32, Register, Register, int32_t, , , Operand, USE, , void) \
1379 V(i32_and, AndU32, Register, Register, Register, , , , USE, , void) \
1380 V(i32_or, OrU32, Register, Register, Register, , , , USE, , void) \
1381 V(i32_xor, XorU32, Register, Register, Register, , , , USE, , void) \
1382 V(i64_and, AndU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1383 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1384 V(i64_or, OrU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1385 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1386 V(i64_xor, XorU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
1387 LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
1388 V(i64_andi, AndU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
1389 LFR_TO_REG, Operand, USE, , void) \
1390 V(i64_ori, OrU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
1391 LFR_TO_REG, Operand, USE, , void) \
1392 V(i64_xori, XorU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
1393 LFR_TO_REG, Operand, USE, , void) \
1394 V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , , \
1395 INT32_AND_WITH_1F, USE, , void) \
1396 V(i32_sari, ShiftRightS32, Register, Register, int32_t, , , \
1397 INT32_AND_WITH_1F, USE, , void) \
1398 V(i32_shri, ShiftRightU32, Register, Register, int32_t, , , \
1399 INT32_AND_WITH_1F, USE, , void) \
1400 V(i32_shl, ShiftLeftU32, Register, Register, Register, , , \
1401 REGISTER_AND_WITH_1F, USE, , void) \
1402 V(i32_sar, ShiftRightS32, Register, Register, Register, , , \
1403 REGISTER_AND_WITH_1F, USE, , void) \
1404 V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
1405 REGISTER_AND_WITH_1F, USE, , void) \
1406 V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
1407 LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
1408 V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
1409 LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
1410 V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
1411 LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
1412 V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
1413 LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
1414 V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
1415 LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
1416 V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
1417 LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
1418 V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1419 USE, , void) \
1420 V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1421 USE, , void) \
1422 V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1423 USE, , void) \
1424 V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1425 USE, , void) \
1426 V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1427 USE, , void) \
1428 V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1429 USE, , void) \
1430 V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1431 USE, , void) \
1432 V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
1433 USE, , void)
1434
1435#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, \
1436 scast2, rcast, ret, return_type) \
1437 return_type LiftoffAssembler::emit_##name(dtype dst, stype1 lhs, \
1438 stype2 rhs) { \
1439 auto _dst = dcast(dst); \
1440 auto _lhs = scast1(lhs); \
1441 auto _rhs = scast2(rhs); \
1442 instr(_dst, _lhs, _rhs); \
1443 rcast(_dst); \
1444 return ret; \
1445 }
1446
1448#undef BINOP_LIST
1449#undef EMIT_BINOP_FUNCTION
1450#undef SIGN_EXT
1451#undef INT32_AND_WITH_1F
1452#undef REGISTER_AND_WITH_1F
1453#undef LFR_TO_REG
1454
1456 DoubleRegister src) {
1457 return false;
1458}
1459
1461 DoubleRegister src) {
1462 return false;
1463}
1464
1465void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
1466 UseScratchRegisterScope temps(this);
1469 Register scratch = temps.Acquire();
1470 LoadS32(scratch, MemOperand(dst.gp(), offset), r0);
1471 AddS64(scratch, scratch, Operand(Smi::FromInt(1)));
1472 StoreU32(scratch, MemOperand(dst.gp(), offset), r0);
1473 } else {
1474 Register scratch = temps.Acquire();
1475 SmiUntag(scratch, MemOperand(dst.gp(), offset), LeaveRC, r0);
1476 AddS64(scratch, scratch, Operand(1));
1477 SmiTag(scratch);
1478 StoreU64(scratch, MemOperand(dst.gp(), offset), r0);
1479 }
1480}
1481
1482void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
1483 Label* trap_div_by_zero,
1484 Label* trap_div_unrepresentable) {
1485 Label cont;
1486
1487 // Check for division by zero.
1488 CmpS32(rhs, Operand::Zero(), r0);
1489 b(eq, trap_div_by_zero);
1490
1491 // Check for kMinInt / -1. This is unrepresentable.
1492 CmpS32(rhs, Operand(-1), r0);
1493 bne(&cont);
1494 CmpS32(lhs, Operand(kMinInt), r0);
1495 b(eq, trap_div_unrepresentable);
1496
1497 bind(&cont);
1498 DivS32(dst, lhs, rhs);
1499}
1500
1501void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
1502 Label* trap_div_by_zero) {
1503 CmpS32(rhs, Operand::Zero(), r0);
1504 beq(trap_div_by_zero);
1505 DivU32(dst, lhs, rhs);
1506}
1507
1508void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
1509 Label* trap_div_by_zero) {
1510 Label cont, done, trap_div_unrepresentable;
1511 // Check for division by zero.
1512 CmpS32(rhs, Operand::Zero(), r0);
1513 beq(trap_div_by_zero);
1514
1515 // Check kMinInt/-1 case.
1516 CmpS32(rhs, Operand(-1), r0);
1517 bne(&cont);
1518 CmpS32(lhs, Operand(kMinInt), r0);
1519 beq(&trap_div_unrepresentable);
1520
1521 // Continue noraml calculation.
1522 bind(&cont);
1523 ModS32(dst, lhs, rhs);
1524 bne(&done);
1525
1526 // trap by kMinInt/-1 case.
1527 bind(&trap_div_unrepresentable);
1528 mov(dst, Operand(0));
1529 bind(&done);
1530}
1531
1532void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
1533 Label* trap_div_by_zero) {
1534 CmpS32(rhs, Operand::Zero(), r0);
1535 beq(trap_div_by_zero);
1536 ModU32(dst, lhs, rhs);
1537}
1538
1539bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
1540 LiftoffRegister rhs,
1541 Label* trap_div_by_zero,
1542 Label* trap_div_unrepresentable) {
1543 constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
1544 Label cont;
1545 // Check for division by zero.
1546 CmpS64(rhs.gp(), Operand::Zero(), r0);
1547 beq(trap_div_by_zero);
1548
1549 // Check for kMinInt / -1. This is unrepresentable.
1550 CmpS64(rhs.gp(), Operand(-1), r0);
1551 bne(&cont);
1552 CmpS64(lhs.gp(), Operand(kMinInt64), r0);
1553 beq(trap_div_unrepresentable);
1554
1555 bind(&cont);
1556 DivS64(dst.gp(), lhs.gp(), rhs.gp());
1557 return true;
1558}
1559
1560bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
1561 LiftoffRegister rhs,
1562 Label* trap_div_by_zero) {
1563 CmpS64(rhs.gp(), Operand::Zero(), r0);
1564 beq(trap_div_by_zero);
1565 // Do div.
1566 DivU64(dst.gp(), lhs.gp(), rhs.gp());
1567 return true;
1568}
1569
1570bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
1571 LiftoffRegister rhs,
1572 Label* trap_div_by_zero) {
1573 constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
1574
1575 Label trap_div_unrepresentable;
1576 Label done;
1577 Label cont;
1578
1579 // Check for division by zero.
1580 CmpS64(rhs.gp(), Operand::Zero(), r0);
1581 beq(trap_div_by_zero);
1582
1583 // Check for kMinInt / -1. This is unrepresentable.
1584 CmpS64(rhs.gp(), Operand(-1), r0);
1585 bne(&cont);
1586 CmpS64(lhs.gp(), Operand(kMinInt64), r0);
1587 beq(&trap_div_unrepresentable);
1588
1589 bind(&cont);
1590 ModS64(dst.gp(), lhs.gp(), rhs.gp());
1591 bne(&done);
1592
1593 bind(&trap_div_unrepresentable);
1594 mov(dst.gp(), Operand(0));
1595 bind(&done);
1596 return true;
1597}
1598
1599bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
1600 LiftoffRegister rhs,
1601 Label* trap_div_by_zero) {
1602 CmpS64(rhs.gp(), Operand::Zero(), r0);
1603 beq(trap_div_by_zero);
1604 ModU64(dst.gp(), lhs.gp(), rhs.gp());
1605 return true;
1606}
1607
1609 LiftoffRegister dst,
1610 LiftoffRegister src, Label* trap) {
1611 switch (opcode) {
1612 case kExprI32ConvertI64:
1613 extsw(dst.gp(), src.gp());
1614 return true;
1615 case kExprI64SConvertI32:
1616 extsw(dst.gp(), src.gp());
1617 return true;
1618 case kExprI64UConvertI32:
1619 ZeroExtWord32(dst.gp(), src.gp());
1620 return true;
1621 case kExprF32ConvertF64:
1622 frsp(dst.fp(), src.fp());
1623 return true;
1624 case kExprF64ConvertF32:
1625 fmr(dst.fp(), src.fp());
1626 return true;
1627 case kExprF32SConvertI32: {
1628 ConvertIntToFloat(src.gp(), dst.fp());
1629 return true;
1630 }
1631 case kExprF32UConvertI32: {
1632 ConvertUnsignedIntToFloat(src.gp(), dst.fp());
1633 return true;
1634 }
1635 case kExprF64SConvertI32: {
1636 ConvertIntToDouble(src.gp(), dst.fp());
1637 return true;
1638 }
1639 case kExprF64UConvertI32: {
1640 ConvertUnsignedIntToDouble(src.gp(), dst.fp());
1641 return true;
1642 }
1643 case kExprF64SConvertI64: {
1644 ConvertInt64ToDouble(src.gp(), dst.fp());
1645 return true;
1646 }
1647 case kExprF64UConvertI64: {
1648 ConvertUnsignedInt64ToDouble(src.gp(), dst.fp());
1649 return true;
1650 }
1651 case kExprF32SConvertI64: {
1652 ConvertInt64ToFloat(src.gp(), dst.fp());
1653 return true;
1654 }
1655 case kExprF32UConvertI64: {
1656 ConvertUnsignedInt64ToFloat(src.gp(), dst.fp());
1657 return true;
1658 }
1659 case kExprI32SConvertF64:
1660 case kExprI32SConvertF32: {
1661 LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
1662 fcmpu(src.fp(), kScratchDoubleReg);
1664
1665 mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
1666 fctiwz(kScratchDoubleReg, src.fp());
1668 mcrfs(cr0, VXCVI);
1669 boverflow(trap, cr0);
1670 return true;
1671 }
1672 case kExprI32UConvertF64:
1673 case kExprI32UConvertF32: {
1674 mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
1676 kRoundToZero);
1677 mcrfs(cr0, VXCVI); // extract FPSCR field containing VXCVI into cr0
1678 boverflow(trap, cr0);
1679 ZeroExtWord32(dst.gp(), r0);
1680 CmpU64(dst.gp(), r0);
1681 bne(trap);
1682 return true;
1683 }
1684 case kExprI64SConvertF64:
1685 case kExprI64SConvertF32: {
1686 LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
1687 fcmpu(src.fp(), kScratchDoubleReg);
1689
1690 mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
1691 fctidz(kScratchDoubleReg, src.fp());
1693 mcrfs(cr0, VXCVI);
1694 boverflow(trap, cr0);
1695 return true;
1696 }
1697 case kExprI64UConvertF64:
1698 case kExprI64UConvertF32: {
1699 LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
1700 fcmpu(src.fp(), kScratchDoubleReg);
1702
1703 mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
1704 fctiduz(kScratchDoubleReg, src.fp());
1706 mcrfs(cr0, VXCVI);
1707 boverflow(trap, cr0);
1708 return true;
1709 }
1710 case kExprI32SConvertSatF64:
1711 case kExprI32SConvertSatF32: {
1712 Label done, src_is_nan;
1713 LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
1714 fcmpu(src.fp(), kScratchDoubleReg);
1715 bunordered(&src_is_nan);
1716
1717 mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
1718 fctiwz(kScratchDoubleReg, src.fp());
1720 b(&done);
1721
1722 bind(&src_is_nan);
1723 mov(dst.gp(), Operand::Zero());
1724
1725 bind(&done);
1726 return true;
1727 }
1728 case kExprI32UConvertSatF64:
1729 case kExprI32UConvertSatF32: {
1730 Label done, src_is_nan;
1731 LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
1732 fcmpu(src.fp(), kScratchDoubleReg);
1733 bunordered(&src_is_nan);
1734
1735 mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
1736 fctiwuz(kScratchDoubleReg, src.fp());
1738 b(&done);
1739
1740 bind(&src_is_nan);
1741 mov(dst.gp(), Operand::Zero());
1742
1743 bind(&done);
1744 return true;
1745 }
1746 case kExprI64SConvertSatF64:
1747 case kExprI64SConvertSatF32: {
1748 Label done, src_is_nan;
1749 LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
1750 fcmpu(src.fp(), kScratchDoubleReg);
1751 bunordered(&src_is_nan);
1752
1753 mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
1754 fctidz(kScratchDoubleReg, src.fp());
1756 b(&done);
1757
1758 bind(&src_is_nan);
1759 mov(dst.gp(), Operand::Zero());
1760
1761 bind(&done);
1762 return true;
1763 }
1764 case kExprI64UConvertSatF64:
1765 case kExprI64UConvertSatF32: {
1766 Label done, src_is_nan;
1767 LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
1768 fcmpu(src.fp(), kScratchDoubleReg);
1769 bunordered(&src_is_nan);
1770
1771 mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
1772 fctiduz(kScratchDoubleReg, src.fp());
1774 b(&done);
1775
1776 bind(&src_is_nan);
1777 mov(dst.gp(), Operand::Zero());
1778
1779 bind(&done);
1780 return true;
1781 }
1782 case kExprI32ReinterpretF32: {
1783 MovFloatToInt(dst.gp(), src.fp(), kScratchDoubleReg);
1784 return true;
1785 }
1786 case kExprI64ReinterpretF64: {
1787 MovDoubleToInt64(dst.gp(), src.fp());
1788 return true;
1789 }
1790 case kExprF32ReinterpretI32: {
1791 MovIntToFloat(dst.fp(), src.gp(), r0);
1792 return true;
1793 }
1794 case kExprF64ReinterpretI64: {
1795 MovInt64ToDouble(dst.fp(), src.gp());
1796 return true;
1797 }
1798 default:
1799 UNREACHABLE();
1800 }
1801}
1802
1803void LiftoffAssembler::emit_jump(Label* label) { b(al, label); }
1804
1805void LiftoffAssembler::emit_jump(Register target) { Jump(target); }
1806
1808 ValueKind kind, Register lhs,
1809 Register rhs,
1810 const FreezeCacheState& frozen) {
1811 bool use_signed = is_signed(cond);
1812
1813 if (rhs != no_reg) {
1814 switch (kind) {
1815 case kI32:
1816 if (use_signed) {
1817 CmpS32(lhs, rhs);
1818 } else {
1819 CmpU32(lhs, rhs);
1820 }
1821 break;
1822 case kRef:
1823 case kRefNull:
1824 DCHECK(cond == kEqual || cond == kNotEqual);
1825#if defined(V8_COMPRESS_POINTERS)
1826 if (use_signed) {
1827 CmpS32(lhs, rhs);
1828 } else {
1829 CmpU32(lhs, rhs);
1830 }
1831#else
1832 if (use_signed) {
1833 CmpS64(lhs, rhs);
1834 } else {
1835 CmpU64(lhs, rhs);
1836 }
1837#endif
1838 break;
1839 case kI64:
1840 if (use_signed) {
1841 CmpS64(lhs, rhs);
1842 } else {
1843 CmpU64(lhs, rhs);
1844 }
1845 break;
1846 default:
1847 UNREACHABLE();
1848 }
1849 } else {
1851 CHECK(use_signed);
1852 CmpS32(lhs, Operand::Zero(), r0);
1853 }
1854
1855 b(to_condition(cond), label);
1856}
1857
1859 Register lhs, int32_t imm,
1860 const FreezeCacheState& frozen) {
1861 bool use_signed = is_signed(cond);
1862 if (use_signed) {
1863 CmpS32(lhs, Operand(imm), r0);
1864 } else {
1865 CmpU32(lhs, Operand(imm), r0);
1866 }
1867 b(to_condition(cond), label);
1868}
1869
1871 Register lhs, int32_t imm,
1872 const FreezeCacheState& frozen) {
1873 bool use_signed = is_signed(cond);
1874 if (use_signed) {
1875 CmpS64(lhs, Operand(imm), r0);
1876 } else {
1877 CmpU64(lhs, Operand(imm), r0);
1878 }
1879 b(to_condition(cond), label);
1880}
1881
1882void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
1883 Label done;
1884 CmpS32(src, Operand(0), r0);
1885 mov(dst, Operand(1));
1886 beq(&done);
1887 mov(dst, Operand::Zero());
1888 bind(&done);
1889}
1890
1891void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
1892 Register lhs, Register rhs) {
1893 bool use_signed = is_signed(cond);
1894 if (use_signed) {
1895 CmpS32(lhs, rhs);
1896 } else {
1897 CmpU32(lhs, rhs);
1898 }
1899 Label done;
1900 mov(dst, Operand(1));
1901 b(to_condition(to_condition(cond)), &done);
1902 mov(dst, Operand::Zero());
1903 bind(&done);
1904}
1905
1906void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
1907 Label done;
1908 cmpi(src.gp(), Operand(0));
1909 mov(dst, Operand(1));
1910 beq(&done);
1911 mov(dst, Operand::Zero());
1912 bind(&done);
1913}
1914
1915void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
1916 LiftoffRegister lhs,
1917 LiftoffRegister rhs) {
1918 bool use_signed = is_signed(cond);
1919 if (use_signed) {
1920 CmpS64(lhs.gp(), rhs.gp());
1921 } else {
1922 CmpU64(lhs.gp(), rhs.gp());
1923 }
1924 Label done;
1925 mov(dst, Operand(1));
1926 b(to_condition(to_condition(cond)), &done);
1927 mov(dst, Operand::Zero());
1928 bind(&done);
1929}
1930
1931void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
1932 DoubleRegister lhs,
1933 DoubleRegister rhs) {
1934 fcmpu(lhs, rhs, cr0);
1935 Label nan, done;
1936 bunordered(&nan, cr0);
1937 mov(dst, Operand::Zero());
1938 b(NegateCondition(to_condition(to_condition(cond))), &done, cr0);
1939 mov(dst, Operand(1));
1940 b(&done);
1941 bind(&nan);
1942 if (cond == kNotEqual) {
1943 mov(dst, Operand(1));
1944 } else {
1945 mov(dst, Operand::Zero());
1946 }
1947 bind(&done);
1948}
1949
1950void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
1951 DoubleRegister lhs,
1952 DoubleRegister rhs) {
1953 emit_f32_set_cond(to_condition(cond), dst, lhs, rhs);
1954}
1955
1956void LiftoffAssembler::emit_i64_muli(LiftoffRegister dst, LiftoffRegister lhs,
1957 int32_t imm) {
1958 if (base::bits::IsPowerOfTwo(imm)) {
1960 return;
1961 }
1962 // TODO(miladfarca): Try to use mulli once simulator supports it.
1963 mov(r0, Operand(imm));
1964 MulS64(dst.gp(), lhs.gp(), r0);
1965}
1966
1967bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
1968 LiftoffRegister true_value,
1969 LiftoffRegister false_value,
1970 ValueKind kind) {
1971 return false;
1972}
1973
1974void LiftoffAssembler::clear_i32_upper_half(Register dst) {
1975 ZeroExtWord32(dst, dst);
1976}
1977
1978#define SIMD_BINOP_LIST(V) \
1979 V(f64x2_add, F64x2Add) \
1980 V(f64x2_sub, F64x2Sub) \
1981 V(f64x2_mul, F64x2Mul) \
1982 V(f64x2_div, F64x2Div) \
1983 V(f64x2_eq, F64x2Eq) \
1984 V(f64x2_lt, F64x2Lt) \
1985 V(f64x2_le, F64x2Le) \
1986 V(f32x4_add, F32x4Add) \
1987 V(f32x4_sub, F32x4Sub) \
1988 V(f32x4_mul, F32x4Mul) \
1989 V(f32x4_div, F32x4Div) \
1990 V(f32x4_min, F32x4Min) \
1991 V(f32x4_max, F32x4Max) \
1992 V(f32x4_eq, F32x4Eq) \
1993 V(f32x4_lt, F32x4Lt) \
1994 V(f32x4_le, F32x4Le) \
1995 V(i64x2_add, I64x2Add) \
1996 V(i64x2_sub, I64x2Sub) \
1997 V(i64x2_eq, I64x2Eq) \
1998 V(i64x2_gt_s, I64x2GtS) \
1999 V(i32x4_add, I32x4Add) \
2000 V(i32x4_sub, I32x4Sub) \
2001 V(i32x4_mul, I32x4Mul) \
2002 V(i32x4_min_s, I32x4MinS) \
2003 V(i32x4_min_u, I32x4MinU) \
2004 V(i32x4_max_s, I32x4MaxS) \
2005 V(i32x4_max_u, I32x4MaxU) \
2006 V(i32x4_eq, I32x4Eq) \
2007 V(i32x4_gt_s, I32x4GtS) \
2008 V(i32x4_gt_u, I32x4GtU) \
2009 V(i32x4_dot_i16x8_s, I32x4DotI16x8S) \
2010 V(i16x8_add, I16x8Add) \
2011 V(i16x8_sub, I16x8Sub) \
2012 V(i16x8_mul, I16x8Mul) \
2013 V(i16x8_min_s, I16x8MinS) \
2014 V(i16x8_min_u, I16x8MinU) \
2015 V(i16x8_max_s, I16x8MaxS) \
2016 V(i16x8_max_u, I16x8MaxU) \
2017 V(i16x8_eq, I16x8Eq) \
2018 V(i16x8_gt_s, I16x8GtS) \
2019 V(i16x8_gt_u, I16x8GtU) \
2020 V(i16x8_add_sat_s, I16x8AddSatS) \
2021 V(i16x8_sub_sat_s, I16x8SubSatS) \
2022 V(i16x8_add_sat_u, I16x8AddSatU) \
2023 V(i16x8_sub_sat_u, I16x8SubSatU) \
2024 V(i16x8_sconvert_i32x4, I16x8SConvertI32x4) \
2025 V(i16x8_uconvert_i32x4, I16x8UConvertI32x4) \
2026 V(i16x8_rounding_average_u, I16x8RoundingAverageU) \
2027 V(i16x8_q15mulr_sat_s, I16x8Q15MulRSatS) \
2028 V(i8x16_add, I8x16Add) \
2029 V(i8x16_sub, I8x16Sub) \
2030 V(i8x16_min_s, I8x16MinS) \
2031 V(i8x16_min_u, I8x16MinU) \
2032 V(i8x16_max_s, I8x16MaxS) \
2033 V(i8x16_max_u, I8x16MaxU) \
2034 V(i8x16_eq, I8x16Eq) \
2035 V(i8x16_gt_s, I8x16GtS) \
2036 V(i8x16_gt_u, I8x16GtU) \
2037 V(i8x16_add_sat_s, I8x16AddSatS) \
2038 V(i8x16_sub_sat_s, I8x16SubSatS) \
2039 V(i8x16_add_sat_u, I8x16AddSatU) \
2040 V(i8x16_sub_sat_u, I8x16SubSatU) \
2041 V(i8x16_sconvert_i16x8, I8x16SConvertI16x8) \
2042 V(i8x16_uconvert_i16x8, I8x16UConvertI16x8) \
2043 V(i8x16_rounding_average_u, I8x16RoundingAverageU) \
2044 V(s128_and, S128And) \
2045 V(s128_or, S128Or) \
2046 V(s128_xor, S128Xor) \
2047 V(s128_and_not, S128AndNot)
2048
2049#define EMIT_SIMD_BINOP(name, op) \
2050 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2051 LiftoffRegister rhs) { \
2052 op(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.fp().toSimd()); \
2053 }
2055#undef EMIT_SIMD_BINOP
2056#undef SIMD_BINOP_LIST
2057
2058#define SIMD_BINOP_WITH_SCRATCH_LIST(V) \
2059 V(f64x2_ne, F64x2Ne) \
2060 V(f64x2_pmin, F64x2Pmin) \
2061 V(f64x2_pmax, F64x2Pmax) \
2062 V(f32x4_ne, F32x4Ne) \
2063 V(f32x4_pmin, F32x4Pmin) \
2064 V(f32x4_pmax, F32x4Pmax) \
2065 V(i64x2_ne, I64x2Ne) \
2066 V(i64x2_ge_s, I64x2GeS) \
2067 V(i64x2_extmul_low_i32x4_s, I64x2ExtMulLowI32x4S) \
2068 V(i64x2_extmul_low_i32x4_u, I64x2ExtMulLowI32x4U) \
2069 V(i64x2_extmul_high_i32x4_s, I64x2ExtMulHighI32x4S) \
2070 V(i64x2_extmul_high_i32x4_u, I64x2ExtMulHighI32x4U) \
2071 V(i32x4_ne, I32x4Ne) \
2072 V(i32x4_ge_s, I32x4GeS) \
2073 V(i32x4_ge_u, I32x4GeU) \
2074 V(i32x4_extmul_low_i16x8_s, I32x4ExtMulLowI16x8S) \
2075 V(i32x4_extmul_low_i16x8_u, I32x4ExtMulLowI16x8U) \
2076 V(i32x4_extmul_high_i16x8_s, I32x4ExtMulHighI16x8S) \
2077 V(i32x4_extmul_high_i16x8_u, I32x4ExtMulHighI16x8U) \
2078 V(i16x8_ne, I16x8Ne) \
2079 V(i16x8_ge_s, I16x8GeS) \
2080 V(i16x8_ge_u, I16x8GeU) \
2081 V(i16x8_extmul_low_i8x16_s, I16x8ExtMulLowI8x16S) \
2082 V(i16x8_extmul_low_i8x16_u, I16x8ExtMulLowI8x16U) \
2083 V(i16x8_extmul_high_i8x16_s, I16x8ExtMulHighI8x16S) \
2084 V(i16x8_extmul_high_i8x16_u, I16x8ExtMulHighI8x16U) \
2085 V(i16x8_dot_i8x16_i7x16_s, I16x8DotI8x16S) \
2086 V(i8x16_ne, I8x16Ne) \
2087 V(i8x16_ge_s, I8x16GeS) \
2088 V(i8x16_ge_u, I8x16GeU) \
2089 V(i8x16_swizzle, I8x16Swizzle)
2090
2091#define EMIT_SIMD_BINOP_WITH_SCRATCH(name, op) \
2092 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2093 LiftoffRegister rhs) { \
2094 op(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.fp().toSimd(), \
2095 kScratchSimd128Reg); \
2096 }
2098#undef EMIT_SIMD_BINOP_WITH_SCRATCH
2099#undef SIMD_BINOP_WITH_SCRATCH_LIST
2100
2101#define SIMD_SHIFT_RR_LIST(V) \
2102 V(i64x2_shl, I64x2Shl) \
2103 V(i64x2_shr_s, I64x2ShrS) \
2104 V(i64x2_shr_u, I64x2ShrU) \
2105 V(i32x4_shl, I32x4Shl) \
2106 V(i32x4_shr_s, I32x4ShrS) \
2107 V(i32x4_shr_u, I32x4ShrU) \
2108 V(i16x8_shl, I16x8Shl) \
2109 V(i16x8_shr_s, I16x8ShrS) \
2110 V(i16x8_shr_u, I16x8ShrU) \
2111 V(i8x16_shl, I8x16Shl) \
2112 V(i8x16_shr_s, I8x16ShrS) \
2113 V(i8x16_shr_u, I8x16ShrU)
2114
2115#define EMIT_SIMD_SHIFT_RR(name, op) \
2116 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2117 LiftoffRegister rhs) { \
2118 op(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.gp(), kScratchSimd128Reg); \
2119 }
2121#undef EMIT_SIMD_SHIFT_RR
2122#undef SIMD_SHIFT_RR_LIST
2123
2124#define SIMD_SHIFT_RI_LIST(V) \
2125 V(i64x2_shli, I64x2Shl, 63) \
2126 V(i64x2_shri_s, I64x2ShrS, 63) \
2127 V(i64x2_shri_u, I64x2ShrU, 63) \
2128 V(i32x4_shli, I32x4Shl, 31) \
2129 V(i32x4_shri_s, I32x4ShrS, 31) \
2130 V(i32x4_shri_u, I32x4ShrU, 31) \
2131 V(i16x8_shli, I16x8Shl, 15) \
2132 V(i16x8_shri_s, I16x8ShrS, 15) \
2133 V(i16x8_shri_u, I16x8ShrU, 15) \
2134 V(i8x16_shli, I8x16Shl, 7) \
2135 V(i8x16_shri_s, I8x16ShrS, 7) \
2136 V(i8x16_shri_u, I8x16ShrU, 7)
2137
2138#define EMIT_SIMD_SHIFT_RI(name, op, mask) \
2139 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2140 int32_t rhs) { \
2141 op(dst.fp().toSimd(), lhs.fp().toSimd(), Operand(rhs & mask), r0, \
2142 kScratchSimd128Reg); \
2143 }
2145#undef EMIT_SIMD_SHIFT_RI
2146#undef SIMD_SHIFT_RI_LIST
2147
2148#define SIMD_UNOP_LIST(V) \
2149 V(f64x2_abs, F64x2Abs, , void) \
2150 V(f64x2_neg, F64x2Neg, , void) \
2151 V(f64x2_sqrt, F64x2Sqrt, , void) \
2152 V(f64x2_ceil, F64x2Ceil, true, bool) \
2153 V(f64x2_floor, F64x2Floor, true, bool) \
2154 V(f64x2_trunc, F64x2Trunc, true, bool) \
2155 V(f64x2_promote_low_f32x4, F64x2PromoteLowF32x4, , void) \
2156 V(f32x4_abs, F32x4Abs, , void) \
2157 V(f32x4_neg, F32x4Neg, , void) \
2158 V(f32x4_sqrt, F32x4Sqrt, , void) \
2159 V(f32x4_ceil, F32x4Ceil, true, bool) \
2160 V(f32x4_floor, F32x4Floor, true, bool) \
2161 V(f32x4_trunc, F32x4Trunc, true, bool) \
2162 V(f32x4_sconvert_i32x4, F32x4SConvertI32x4, , void) \
2163 V(f32x4_uconvert_i32x4, F32x4UConvertI32x4, , void) \
2164 V(i64x2_neg, I64x2Neg, , void) \
2165 V(f64x2_convert_low_i32x4_s, F64x2ConvertLowI32x4S, , void) \
2166 V(i64x2_sconvert_i32x4_low, I64x2SConvertI32x4Low, , void) \
2167 V(i64x2_sconvert_i32x4_high, I64x2SConvertI32x4High, , void) \
2168 V(i32x4_neg, I32x4Neg, , void) \
2169 V(i32x4_sconvert_i16x8_low, I32x4SConvertI16x8Low, , void) \
2170 V(i32x4_sconvert_i16x8_high, I32x4SConvertI16x8High, , void) \
2171 V(i32x4_uconvert_f32x4, I32x4UConvertF32x4, , void) \
2172 V(i16x8_sconvert_i8x16_low, I16x8SConvertI8x16Low, , void) \
2173 V(i16x8_sconvert_i8x16_high, I16x8SConvertI8x16High, , void) \
2174 V(i8x16_popcnt, I8x16Popcnt, , void) \
2175 V(s128_not, S128Not, , void)
2176
2177#define EMIT_SIMD_UNOP(name, op, return_val, return_type) \
2178 return_type LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2179 LiftoffRegister src) { \
2180 op(dst.fp().toSimd(), src.fp().toSimd()); \
2181 return return_val; \
2182 }
2184#undef EMIT_SIMD_UNOP
2185#undef SIMD_UNOP_LIST
2186
2187#define SIMD_UNOP_WITH_SCRATCH_LIST(V) \
2188 V(f32x4_demote_f64x2_zero, F32x4DemoteF64x2Zero, , void) \
2189 V(i64x2_abs, I64x2Abs, , void) \
2190 V(i32x4_abs, I32x4Abs, , void) \
2191 V(i32x4_sconvert_f32x4, I32x4SConvertF32x4, , void) \
2192 V(i32x4_trunc_sat_f64x2_s_zero, I32x4TruncSatF64x2SZero, , void) \
2193 V(i32x4_trunc_sat_f64x2_u_zero, I32x4TruncSatF64x2UZero, , void) \
2194 V(i16x8_abs, I16x8Abs, , void) \
2195 V(i16x8_neg, I16x8Neg, , void) \
2196 V(i8x16_abs, I8x16Abs, , void) \
2197 V(i8x16_neg, I8x16Neg, , void)
2198
2199#define EMIT_SIMD_UNOP_WITH_SCRATCH(name, op, return_val, return_type) \
2200 return_type LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2201 LiftoffRegister src) { \
2202 op(dst.fp().toSimd(), src.fp().toSimd(), kScratchSimd128Reg); \
2203 return return_val; \
2204 }
2206#undef EMIT_SIMD_UNOP_WITH_SCRATCH
2207#undef SIMD_UNOP_WITH_SCRATCH_LIST
2208
2209#define SIMD_ALL_TRUE_LIST(V) \
2210 V(i64x2_alltrue, I64x2AllTrue) \
2211 V(i32x4_alltrue, I32x4AllTrue) \
2212 V(i16x8_alltrue, I16x8AllTrue) \
2213 V(i8x16_alltrue, I8x16AllTrue)
2214#define EMIT_SIMD_ALL_TRUE(name, op) \
2215 void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2216 LiftoffRegister src) { \
2217 op(dst.gp(), src.fp().toSimd(), r0, ip, kScratchSimd128Reg); \
2218 }
2220#undef EMIT_SIMD_ALL_TRUE
2221#undef SIMD_ALL_TRUE_LIST
2222
2223#define SIMD_QFM_LIST(V) \
2224 V(f64x2_qfma, F64x2Qfma) \
2225 V(f64x2_qfms, F64x2Qfms) \
2226 V(f32x4_qfma, F32x4Qfma) \
2227 V(f32x4_qfms, F32x4Qfms)
2228
2229#define EMIT_SIMD_QFM(name, op) \
2230 void LiftoffAssembler::emit_##name( \
2231 LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
2232 LiftoffRegister src3) { \
2233 op(dst.fp().toSimd(), src1.fp().toSimd(), src2.fp().toSimd(), \
2234 src3.fp().toSimd(), kScratchSimd128Reg); \
2235 }
2237#undef EMIT_SIMD_QFM
2238#undef SIMD_QFM_LIST
2239
2240#define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
2241 V(i32x4_extadd_pairwise_i16x8_s, I32x4ExtAddPairwiseI16x8S) \
2242 V(i32x4_extadd_pairwise_i16x8_u, I32x4ExtAddPairwiseI16x8U) \
2243 V(i16x8_extadd_pairwise_i8x16_s, I16x8ExtAddPairwiseI8x16S) \
2244 V(i16x8_extadd_pairwise_i8x16_u, I16x8ExtAddPairwiseI8x16U)
2245#define EMIT_SIMD_EXT_ADD_PAIRWISE(name, op) \
2246 void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2247 LiftoffRegister src) { \
2248 op(dst.fp().toSimd(), src.fp().toSimd(), kScratchSimd128Reg, \
2249 kScratchSimd128Reg2); \
2250 }
2252#undef EMIT_SIMD_EXT_ADD_PAIRWISE
2253#undef SIMD_EXT_ADD_PAIRWISE_LIST
2254
2255#define SIMD_RELAXED_BINOP_LIST(V) \
2256 V(i8x16_relaxed_swizzle, i8x16_swizzle) \
2257 V(f64x2_relaxed_min, f64x2_pmin) \
2258 V(f64x2_relaxed_max, f64x2_pmax) \
2259 V(f32x4_relaxed_min, f32x4_pmin) \
2260 V(f32x4_relaxed_max, f32x4_pmax) \
2261 V(i16x8_relaxed_q15mulr_s, i16x8_q15mulr_sat_s)
2262
2263#define SIMD_VISIT_RELAXED_BINOP(name, op) \
2264 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2265 LiftoffRegister rhs) { \
2266 emit_##op(dst, lhs, rhs); \
2267 }
2269#undef SIMD_VISIT_RELAXED_BINOP
2270#undef SIMD_RELAXED_BINOP_LIST
2271
2272#define SIMD_RELAXED_UNOP_LIST(V) \
2273 V(i32x4_relaxed_trunc_f32x4_s, i32x4_sconvert_f32x4) \
2274 V(i32x4_relaxed_trunc_f32x4_u, i32x4_uconvert_f32x4) \
2275 V(i32x4_relaxed_trunc_f64x2_s_zero, i32x4_trunc_sat_f64x2_s_zero) \
2276 V(i32x4_relaxed_trunc_f64x2_u_zero, i32x4_trunc_sat_f64x2_u_zero)
2277
2278#define SIMD_VISIT_RELAXED_UNOP(name, op) \
2279 void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2280 LiftoffRegister src) { \
2281 emit_##op(dst, src); \
2282 }
2284#undef SIMD_VISIT_RELAXED_UNOP
2285#undef SIMD_RELAXED_UNOP_LIST
2286
2287#define F16_UNOP_LIST(V) \
2288 V(f16x8_splat) \
2289 V(f16x8_abs) \
2290 V(f16x8_neg) \
2291 V(f16x8_sqrt) \
2292 V(f16x8_ceil) \
2293 V(f16x8_floor) \
2294 V(f16x8_trunc) \
2295 V(f16x8_nearest_int) \
2296 V(i16x8_sconvert_f16x8) \
2297 V(i16x8_uconvert_f16x8) \
2298 V(f16x8_sconvert_i16x8) \
2299 V(f16x8_uconvert_i16x8) \
2300 V(f16x8_demote_f32x4_zero) \
2301 V(f32x4_promote_low_f16x8) \
2302 V(f16x8_demote_f64x2_zero)
2303
2304#define VISIT_F16_UNOP(name) \
2305 bool LiftoffAssembler::emit_##name(LiftoffRegister dst, \
2306 LiftoffRegister src) { \
2307 return false; \
2308 }
2310#undef VISIT_F16_UNOP
2311#undef F16_UNOP_LIST
2312
2313#define F16_BINOP_LIST(V) \
2314 V(f16x8_eq) \
2315 V(f16x8_ne) \
2316 V(f16x8_lt) \
2317 V(f16x8_le) \
2318 V(f16x8_add) \
2319 V(f16x8_sub) \
2320 V(f16x8_mul) \
2321 V(f16x8_div) \
2322 V(f16x8_min) \
2323 V(f16x8_max) \
2324 V(f16x8_pmin) \
2325 V(f16x8_pmax)
2326
2327#define VISIT_F16_BINOP(name) \
2328 bool LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2329 LiftoffRegister rhs) { \
2330 return false; \
2331 }
2333#undef VISIT_F16_BINOP
2334#undef F16_BINOP_LIST
2335
2336bool LiftoffAssembler::emit_f16x8_extract_lane(LiftoffRegister dst,
2337 LiftoffRegister lhs,
2338 uint8_t imm_lane_idx) {
2339 return false;
2340}
2341
2342bool LiftoffAssembler::emit_f16x8_replace_lane(LiftoffRegister dst,
2343 LiftoffRegister src1,
2344 LiftoffRegister src2,
2345 uint8_t imm_lane_idx) {
2346 return false;
2347}
2348
2349bool LiftoffAssembler::emit_f16x8_qfma(LiftoffRegister dst,
2350 LiftoffRegister src1,
2351 LiftoffRegister src2,
2352 LiftoffRegister src3) {
2353 return false;
2354}
2355
2356bool LiftoffAssembler::emit_f16x8_qfms(LiftoffRegister dst,
2357 LiftoffRegister src1,
2358 LiftoffRegister src2,
2359 LiftoffRegister src3) {
2360 return false;
2361}
2362
2363bool LiftoffAssembler::supports_f16_mem_access() { return false; }
2364
2365void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
2366 LiftoffRegister src) {
2367 F64x2Splat(dst.fp().toSimd(), src.fp(), r0);
2368}
2369
2370void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
2371 LiftoffRegister src) {
2372 F32x4Splat(dst.fp().toSimd(), src.fp(), kScratchDoubleReg, r0);
2373}
2374
2375void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
2376 LiftoffRegister src) {
2377 I64x2Splat(dst.fp().toSimd(), src.gp());
2378}
2379
2380void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
2381 LiftoffRegister src) {
2382 I32x4Splat(dst.fp().toSimd(), src.gp());
2383}
2384
2385void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
2386 LiftoffRegister src) {
2387 I16x8Splat(dst.fp().toSimd(), src.gp());
2388}
2389
2390void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
2391 LiftoffRegister src) {
2392 I8x16Splat(dst.fp().toSimd(), src.gp());
2393}
2394
2395void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
2396 LiftoffRegister lhs,
2397 uint8_t imm_lane_idx) {
2398 F64x2ExtractLane(dst.fp(), lhs.fp().toSimd(), imm_lane_idx,
2400}
2401
2402void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
2403 LiftoffRegister lhs,
2404 uint8_t imm_lane_idx) {
2405 F32x4ExtractLane(dst.fp(), lhs.fp().toSimd(), imm_lane_idx,
2406 kScratchSimd128Reg, r0, ip);
2407}
2408
2409void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
2410 LiftoffRegister lhs,
2411 uint8_t imm_lane_idx) {
2412 I64x2ExtractLane(dst.gp(), lhs.fp().toSimd(), imm_lane_idx,
2414}
2415
2416void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
2417 LiftoffRegister lhs,
2418 uint8_t imm_lane_idx) {
2419 I32x4ExtractLane(dst.gp(), lhs.fp().toSimd(), imm_lane_idx,
2421}
2422
2423void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
2424 LiftoffRegister lhs,
2425 uint8_t imm_lane_idx) {
2426 I16x8ExtractLaneU(dst.gp(), lhs.fp().toSimd(), imm_lane_idx,
2428}
2429
2430void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
2431 LiftoffRegister lhs,
2432 uint8_t imm_lane_idx) {
2433 I16x8ExtractLaneS(dst.gp(), lhs.fp().toSimd(), imm_lane_idx,
2435}
2436
2437void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
2438 LiftoffRegister lhs,
2439 uint8_t imm_lane_idx) {
2440 I8x16ExtractLaneU(dst.gp(), lhs.fp().toSimd(), imm_lane_idx,
2442}
2443
2444void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
2445 LiftoffRegister lhs,
2446 uint8_t imm_lane_idx) {
2447 I8x16ExtractLaneS(dst.gp(), lhs.fp().toSimd(), imm_lane_idx,
2449}
2450
2451void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
2452 LiftoffRegister src1,
2453 LiftoffRegister src2,
2454 uint8_t imm_lane_idx) {
2455 F64x2ReplaceLane(dst.fp().toSimd(), src1.fp().toSimd(), src2.fp(),
2456 imm_lane_idx, r0, kScratchSimd128Reg);
2457}
2458
2459void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
2460 LiftoffRegister src1,
2461 LiftoffRegister src2,
2462 uint8_t imm_lane_idx) {
2463 F32x4ReplaceLane(dst.fp().toSimd(), src1.fp().toSimd(), src2.fp(),
2464 imm_lane_idx, r0, kScratchDoubleReg, kScratchSimd128Reg);
2465}
2466
2467void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
2468 LiftoffRegister src1,
2469 LiftoffRegister src2,
2470 uint8_t imm_lane_idx) {
2471 I64x2ReplaceLane(dst.fp().toSimd(), src1.fp().toSimd(), src2.gp(),
2472 imm_lane_idx, kScratchSimd128Reg);
2473}
2474
2475void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
2476 LiftoffRegister src1,
2477 LiftoffRegister src2,
2478 uint8_t imm_lane_idx) {
2479 I32x4ReplaceLane(dst.fp().toSimd(), src1.fp().toSimd(), src2.gp(),
2480 imm_lane_idx, kScratchSimd128Reg);
2481}
2482
2483void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
2484 LiftoffRegister src1,
2485 LiftoffRegister src2,
2486 uint8_t imm_lane_idx) {
2487 I16x8ReplaceLane(dst.fp().toSimd(), src1.fp().toSimd(), src2.gp(),
2488 imm_lane_idx, kScratchSimd128Reg);
2489}
2490
2491void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
2492 LiftoffRegister src1,
2493 LiftoffRegister src2,
2494 uint8_t imm_lane_idx) {
2495 I8x16ReplaceLane(dst.fp().toSimd(), src1.fp().toSimd(), src2.gp(),
2496 imm_lane_idx, kScratchSimd128Reg);
2497}
2498
2499void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
2500 LiftoffRegister rhs) {
2501 // TODO(miladfarca): Make use of UseScratchRegisterScope.
2502 Register scratch = GetRegisterThatIsNotOneOf(ip, r0);
2503 push(scratch);
2504 I64x2Mul(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.fp().toSimd(), ip, r0,
2505 scratch, kScratchSimd128Reg);
2506 pop(scratch);
2507}
2508
2509void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
2510 LiftoffRegister rhs) {
2511 F64x2Min(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.fp().toSimd(),
2513}
2514
2515void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
2516 LiftoffRegister rhs) {
2517 F64x2Max(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.fp().toSimd(),
2519}
2520
2521bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
2522 LiftoffRegister src) {
2523 return false;
2524}
2525
2526bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
2527 LiftoffRegister src) {
2528 return false;
2529}
2530
2531void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
2532 Register offset_reg, uintptr_t offset_imm,
2533 LoadType type,
2534 LoadTransformationKind transform,
2535 uint32_t* protected_load_pc,
2536 bool i64_offset) {
2537 MemOperand src_op = MemOperand(src_addr, offset_reg, offset_imm);
2538 *protected_load_pc = pc_offset();
2539 MachineType memtype = type.mem_type();
2540 if (transform == LoadTransformationKind::kExtend) {
2541 if (memtype == MachineType::Int8()) {
2542 LoadAndExtend8x8SLE(dst.fp().toSimd(), src_op, r0);
2543 } else if (memtype == MachineType::Uint8()) {
2544 LoadAndExtend8x8ULE(dst.fp().toSimd(), src_op, r0, kScratchSimd128Reg);
2545 } else if (memtype == MachineType::Int16()) {
2546 LoadAndExtend16x4SLE(dst.fp().toSimd(), src_op, r0);
2547 } else if (memtype == MachineType::Uint16()) {
2548 LoadAndExtend16x4ULE(dst.fp().toSimd(), src_op, r0, kScratchSimd128Reg);
2549 } else if (memtype == MachineType::Int32()) {
2550 LoadAndExtend32x2SLE(dst.fp().toSimd(), src_op, r0);
2551 } else if (memtype == MachineType::Uint32()) {
2552 LoadAndExtend32x2ULE(dst.fp().toSimd(), src_op, r0, kScratchSimd128Reg);
2553 }
2554 } else if (transform == LoadTransformationKind::kZeroExtend) {
2555 if (memtype == MachineType::Int32()) {
2556 LoadV32ZeroLE(dst.fp().toSimd(), src_op, r0, kScratchSimd128Reg);
2557 } else {
2558 DCHECK_EQ(MachineType::Int64(), memtype);
2559 LoadV64ZeroLE(dst.fp().toSimd(), src_op, r0, kScratchSimd128Reg);
2560 }
2561 } else {
2563 if (memtype == MachineType::Int8()) {
2564 LoadAndSplat8x16LE(dst.fp().toSimd(), src_op, r0);
2565 } else if (memtype == MachineType::Int16()) {
2566 LoadAndSplat16x8LE(dst.fp().toSimd(), src_op, r0);
2567 } else if (memtype == MachineType::Int32()) {
2568 LoadAndSplat32x4LE(dst.fp().toSimd(), src_op, r0);
2569 } else if (memtype == MachineType::Int64()) {
2570 LoadAndSplat64x2LE(dst.fp().toSimd(), src_op, r0);
2571 }
2572 }
2573}
2574
2575void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
2576 SmiCheckMode mode,
2577 const FreezeCacheState& frozen) {
2578 TestIfSmi(obj, r0);
2579 Condition condition = mode == kJumpOnSmi ? eq : ne;
2580 b(condition, target, cr0); // branch if SMI
2581}
2582
2583void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
2584 Register addr, Register offset_reg,
2585 uintptr_t offset_imm, LoadType type,
2586 uint8_t laneidx, uint32_t* protected_load_pc,
2587 bool i64_offset) {
2588 if (!i64_offset && offset_reg != no_reg) {
2589 ZeroExtWord32(ip, offset_reg);
2590 offset_reg = ip;
2591 }
2592 MemOperand src_op = MemOperand(addr, offset_reg, offset_imm);
2593
2594 MachineType mem_type = type.mem_type();
2595 if (dst != src) {
2596 vor(dst.fp().toSimd(), src.fp().toSimd(), src.fp().toSimd());
2597 }
2598
2599 if (protected_load_pc) *protected_load_pc = pc_offset();
2600 if (mem_type == MachineType::Int8()) {
2601 LoadLane8LE(dst.fp().toSimd(), src_op, laneidx, r0, kScratchSimd128Reg);
2602 } else if (mem_type == MachineType::Int16()) {
2603 LoadLane16LE(dst.fp().toSimd(), src_op, laneidx, r0, kScratchSimd128Reg);
2604 } else if (mem_type == MachineType::Int32()) {
2605 LoadLane32LE(dst.fp().toSimd(), src_op, laneidx, r0, kScratchSimd128Reg);
2606 } else {
2607 DCHECK_EQ(MachineType::Int64(), mem_type);
2608 LoadLane64LE(dst.fp().toSimd(), src_op, laneidx, r0, kScratchSimd128Reg);
2609 }
2610}
2611
2612void LiftoffAssembler::StoreLane(Register dst, Register offset,
2613 uintptr_t offset_imm, LiftoffRegister src,
2614 StoreType type, uint8_t lane,
2615 uint32_t* protected_store_pc,
2616 bool i64_offset) {
2617 if (!i64_offset && offset != no_reg) {
2618 ZeroExtWord32(ip, offset);
2619 offset = ip;
2620 }
2621 MemOperand dst_op = MemOperand(dst, offset, offset_imm);
2622
2623 if (protected_store_pc) *protected_store_pc = pc_offset();
2624
2625 MachineRepresentation rep = type.mem_rep();
2626 if (rep == MachineRepresentation::kWord8) {
2627 StoreLane8LE(src.fp().toSimd(), dst_op, lane, r0, kScratchSimd128Reg);
2628 } else if (rep == MachineRepresentation::kWord16) {
2629 StoreLane16LE(src.fp().toSimd(), dst_op, lane, r0, kScratchSimd128Reg);
2630 } else if (rep == MachineRepresentation::kWord32) {
2631 StoreLane32LE(src.fp().toSimd(), dst_op, lane, r0, kScratchSimd128Reg);
2632 } else {
2634 StoreLane64LE(src.fp().toSimd(), dst_op, lane, r0, kScratchSimd128Reg);
2635 }
2636}
2637
2638void LiftoffAssembler::emit_s128_relaxed_laneselect(LiftoffRegister dst,
2639 LiftoffRegister src1,
2640 LiftoffRegister src2,
2641 LiftoffRegister mask,
2642 int lane_width) {
2643 // PPC uses bytewise selection for all lane widths.
2644 emit_s128_select(dst, src1, src2, mask);
2645}
2646
2648 LiftoffRegister src) {
2649 F64x2ConvertLowI32x4U(dst.fp().toSimd(), src.fp().toSimd(), r0,
2651}
2652
2653void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
2654 LiftoffRegister src) {
2655 I64x2BitMask(dst.gp(), src.fp().toSimd(), r0, kScratchSimd128Reg);
2656}
2657
2659 LiftoffRegister src) {
2660 I64x2UConvertI32x4Low(dst.fp().toSimd(), src.fp().toSimd(), r0,
2662}
2663
2665 LiftoffRegister src) {
2666 I64x2UConvertI32x4High(dst.fp().toSimd(), src.fp().toSimd(), r0,
2668}
2669
2670void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
2671 LiftoffRegister src) {
2672 I32x4BitMask(dst.gp(), src.fp().toSimd(), r0, kScratchSimd128Reg);
2673}
2674
2675void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
2676 LiftoffRegister src) {
2677 I16x8BitMask(dst.gp(), src.fp().toSimd(), r0, kScratchSimd128Reg);
2678}
2679
2681 LiftoffRegister lhs,
2682 LiftoffRegister rhs,
2683 LiftoffRegister acc) {
2684 I32x4DotI8x16AddS(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.fp().toSimd(),
2685 acc.fp().toSimd());
2686}
2687
2688void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
2689 LiftoffRegister lhs,
2690 LiftoffRegister rhs,
2691 const uint8_t shuffle[16],
2692 bool is_swizzle) {
2693 // Remap the shuffle indices to match IBM lane numbering.
2694 // TODO(miladfarca): Put this in a function and share it with the instruction
2695 // selector.
2696 int max_index = 15;
2697 int total_lane_count = 2 * kSimd128Size;
2698 uint8_t shuffle_remapped[kSimd128Size];
2699 for (int i = 0; i < kSimd128Size; i++) {
2700 uint8_t current_index = shuffle[i];
2701 shuffle_remapped[i] = (current_index <= max_index
2702 ? max_index - current_index
2703 : total_lane_count - current_index + max_index);
2704 }
2705 uint64_t vals[2];
2706 memcpy(vals, shuffle_remapped, sizeof(shuffle_remapped));
2707#ifdef V8_TARGET_BIG_ENDIAN
2708 vals[0] = ByteReverse(vals[0]);
2709 vals[1] = ByteReverse(vals[1]);
2710#endif
2711 I8x16Shuffle(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.fp().toSimd(), vals[1],
2712 vals[0], r0, ip, kScratchSimd128Reg);
2713}
2714
2715void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
2716 LiftoffRegister src) {
2717 V128AnyTrue(dst.gp(), src.fp().toSimd(), r0, ip, kScratchSimd128Reg);
2718}
2719
2720void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
2721 LiftoffRegister src) {
2722 I8x16BitMask(dst.gp(), src.fp().toSimd(), r0, ip, kScratchSimd128Reg);
2723}
2724
2725void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
2726 const uint8_t imms[16]) {
2727 uint64_t vals[2];
2728 memcpy(vals, imms, sizeof(vals));
2729#ifdef V8_TARGET_BIG_ENDIAN
2730 vals[0] = ByteReverse(vals[0]);
2731 vals[1] = ByteReverse(vals[1]);
2732#endif
2733 S128Const(dst.fp().toSimd(), vals[1], vals[0], r0, ip);
2734}
2735
2736void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
2737 LiftoffRegister src1,
2738 LiftoffRegister src2,
2739 LiftoffRegister mask) {
2740 S128Select(dst.fp().toSimd(), src1.fp().toSimd(), src2.fp().toSimd(),
2741 mask.fp().toSimd());
2742}
2743
2745 LiftoffRegister src) {
2746 I16x8UConvertI8x16Low(dst.fp().toSimd(), src.fp().toSimd(), r0,
2748}
2749
2751 LiftoffRegister src) {
2752 I16x8UConvertI8x16High(dst.fp().toSimd(), src.fp().toSimd(), r0,
2754}
2755
2757 LiftoffRegister src) {
2758 I32x4UConvertI16x8Low(dst.fp().toSimd(), src.fp().toSimd(), r0,
2760}
2761
2763 LiftoffRegister src) {
2764 I32x4UConvertI16x8High(dst.fp().toSimd(), src.fp().toSimd(), r0,
2766}
2767
2768void LiftoffAssembler::StackCheck(Label* ool_code) {
2769 Register limit_address = ip;
2771 CmpU64(sp, limit_address);
2772 ble(ool_code);
2773}
2774
2776 if (v8_flags.debug_code) Abort(reason);
2777}
2778
2779void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
2780 MultiPush(regs.GetGpList());
2781 DoubleRegList fp_regs = regs.GetFpList();
2782 MultiPushF64AndV128(fp_regs, Simd128RegList::FromBits(fp_regs.bits()), ip,
2783 r0);
2784}
2785
2786void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
2787 DoubleRegList fp_regs = regs.GetFpList();
2788 MultiPopF64AndV128(fp_regs, Simd128RegList::FromBits(fp_regs.bits()), ip, r0);
2789 MultiPop(regs.GetGpList());
2790}
2791
2793 SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
2794 LiftoffRegList ref_spills, int spill_offset) {
2795 LiftoffRegList fp_spills = all_spills & kFpCacheRegList;
2796 int spill_space_size = fp_spills.GetNumRegsSet() * kSimd128Size;
2797 LiftoffRegList gp_spills = all_spills & kGpCacheRegList;
2798 while (!gp_spills.is_empty()) {
2799 LiftoffRegister reg = gp_spills.GetLastRegSet();
2800 if (ref_spills.has(reg)) {
2801 safepoint.DefineTaggedStackSlot(spill_offset);
2802 }
2803 gp_spills.clear(reg);
2804 ++spill_offset;
2805 spill_space_size += kSystemPointerSize;
2806 }
2807 // Record the number of additional spill slots.
2808 RecordOolSpillSpaceSize(spill_space_size);
2809}
2810
2811void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
2812 Drop(num_stack_slots);
2813 Ret();
2814}
2815
2817 const std::initializer_list<VarState> args, const LiftoffRegister* rets,
2818 ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes,
2819 ExternalReference ext_ref) {
2820 int size = RoundUp(stack_bytes, kSystemPointerSize);
2821
2822 SubS64(sp, sp, Operand(size), r0);
2823
2824 int arg_offset = 0;
2825 for (const VarState& arg : args) {
2826 MemOperand dst{sp, arg_offset};
2827 liftoff::StoreToMemory(this, dst, arg, r0, ip);
2828 arg_offset += value_kind_size(arg.kind());
2829 }
2830 DCHECK_LE(arg_offset, stack_bytes);
2831
2832 // Pass a pointer to the buffer with the arguments to the C function.
2833 mr(r3, sp);
2834
2835 // Now call the C function.
2836 constexpr int kNumCCallArgs = 1;
2837 PrepareCallCFunction(kNumCCallArgs, r0);
2838 CallCFunction(ext_ref, kNumCCallArgs);
2839
2840 // Move return value to the right register.
2841 const LiftoffRegister* result_reg = rets;
2842 if (return_kind != kVoid) {
2843 constexpr Register kReturnReg = r3;
2844 if (kReturnReg != rets->gp()) {
2845 Move(*rets, LiftoffRegister(kReturnReg), return_kind);
2846 }
2847 result_reg++;
2848 }
2849
2850 // Load potential output value from the buffer on the stack.
2851 if (out_argument_kind != kVoid) {
2852 switch (out_argument_kind) {
2853 case kI16:
2854 LoadS16(result_reg->gp(), MemOperand(sp));
2855 break;
2856 case kI32:
2857 LoadS32(result_reg->gp(), MemOperand(sp));
2858 break;
2859 case kI64:
2860 case kRefNull:
2861 case kRef:
2862 LoadU64(result_reg->gp(), MemOperand(sp));
2863 break;
2864 case kF32:
2865 LoadF32(result_reg->fp(), MemOperand(sp));
2866 break;
2867 case kF64:
2868 LoadF64(result_reg->fp(), MemOperand(sp));
2869 break;
2870 case kS128:
2871 LoadSimd128(result_reg->fp().toSimd(), MemOperand(sp), r0);
2872 break;
2873 default:
2874 UNREACHABLE();
2875 }
2876 }
2877 AddS64(sp, sp, Operand(size), r0);
2878}
2879
2880void LiftoffAssembler::CallC(const std::initializer_list<VarState> args,
2881 ExternalReference ext_ref) {
2882 // First, prepare the stack for the C call.
2883 int num_args = static_cast<int>(args.size());
2884 PrepareCallCFunction(num_args, r0);
2885
2886 // Then execute the parallel register move and also move values to parameter
2887 // stack slots.
2888 int reg_args = 0;
2889 int stack_args = 0;
2890 ParallelMove parallel_move{this};
2891 for (const VarState& arg : args) {
2892 if (reg_args < int{arraysize(kCArgRegs)}) {
2893 parallel_move.LoadIntoRegister(LiftoffRegister{kCArgRegs[reg_args]}, arg);
2894 ++reg_args;
2895 } else {
2896 int bias = 0;
2897 // On BE machines values with less than 8 bytes are right justified.
2898 // bias here is relative to the stack pointer.
2899 if (arg.kind() == kI32 || arg.kind() == kF32) bias = -stack_bias;
2900 int offset =
2902 MemOperand dst{sp, offset + bias};
2903 liftoff::StoreToMemory(this, dst, arg, r0, ip);
2904 ++stack_args;
2905 }
2906 }
2907 parallel_move.Execute();
2908
2909 // Now call the C function.
2910 CallCFunction(ext_ref, num_args);
2911}
2912
2915}
2916
2919}
2920
2922 compiler::CallDescriptor* call_descriptor,
2923 Register target) {
2924 DCHECK(target != no_reg);
2925 CallWasmCodePointer(target);
2926}
2927
2929 compiler::CallDescriptor* call_descriptor, Register target) {
2930 DCHECK(target != no_reg);
2931 CallWasmCodePointer(target, CallJumpMode::kTailCall);
2932}
2933
2935 // A direct call to a builtin. Just encode the builtin index. This will be
2936 // patched at relocation.
2937 Call(static_cast<Address>(builtin), RelocInfo::WASM_STUB_CALL);
2938}
2939
2940void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
2941 SubS64(sp, sp, Operand(size), r0);
2942 mr(addr, sp);
2943}
2944
2945void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
2946 AddS64(sp, sp, Operand(size));
2947}
2948
2950
2952 DoubleRegister src,
2953 ValueKind kind) {
2954 Label return_nan, done;
2955 fcmpu(src, src);
2956 bunordered(&return_nan);
2957 b(&done);
2958 bind(&return_nan);
2959 StoreF32(src, MemOperand(dst), r0);
2960 bind(&done);
2961}
2962
2964 LiftoffRegister src,
2965 Register tmp_gp,
2966 LiftoffRegister tmp_s128,
2967 ValueKind lane_kind) {
2968 Label done;
2969 if (lane_kind == kF32) {
2970 xvcmpeqsp(tmp_s128.fp().toSimd(), src.fp().toSimd(), src.fp().toSimd(),
2971 SetRC);
2972 } else {
2973 DCHECK_EQ(lane_kind, kF64);
2974 xvcmpeqdp(tmp_s128.fp().toSimd(), src.fp().toSimd(), src.fp().toSimd(),
2975 SetRC);
2976 }
2977 // CR_LT which is targeting cr6 bit 0, indicating if all lanes true (no lanes
2978 // are NaN).
2979 Condition all_lanes_true = lt;
2980 b(all_lanes_true, &done, cr6);
2981 // Do not use the src register as a Fp register to store a value.
2982 // We use two different sets for Fp and Simd registers on PPC.
2983 li(tmp_gp, Operand(1));
2984 StoreU32(tmp_gp, MemOperand(dst), r0);
2985 bind(&done);
2986}
2987
2988void LiftoffAssembler::emit_store_nonzero(Register dst) {
2989 StoreU32(dst, MemOperand(dst), r0);
2990}
2991
2992void LiftoffStackSlots::Construct(int param_slots) {
2993 DCHECK_LT(0, slots_.size());
2995 int last_stack_slot = param_slots;
2996 for (auto& slot : slots_) {
2997 const int stack_slot = slot.dst_slot_;
2998 int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
2999 DCHECK_LT(0, stack_decrement);
3000 last_stack_slot = stack_slot;
3001 const LiftoffAssembler::VarState& src = slot.src_;
3002 switch (src.loc()) {
3004 switch (src.kind()) {
3005 case kI32:
3006 case kRef:
3007 case kRefNull:
3008 case kI64: {
3009 asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
3010 UseScratchRegisterScope temps(asm_);
3011 Register scratch = temps.Acquire();
3012 asm_->LoadU64(scratch, liftoff::GetStackSlot(slot.src_offset_), r0);
3013 asm_->Push(scratch);
3014 break;
3015 }
3016 case kF32: {
3017 asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
3019 liftoff::GetStackSlot(slot.src_offset_ + stack_bias),
3020 r0);
3021 asm_->AddS64(sp, sp, Operand(-kSystemPointerSize));
3023 break;
3024 }
3025 case kF64: {
3026 asm_->AllocateStackSpace(stack_decrement - kDoubleSize);
3028 liftoff::GetStackSlot(slot.src_offset_), r0);
3029 asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
3031 break;
3032 }
3033 case kS128: {
3034 asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
3036 liftoff::GetStackSlot(slot.src_offset_), r0);
3037 asm_->AddS64(sp, sp, Operand(-kSimd128Size));
3039 break;
3040 }
3041 default:
3042 UNREACHABLE();
3043 }
3044 break;
3045 }
3047 int pushed_bytes = SlotSizeInBytes(slot);
3048 asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
3049 switch (src.kind()) {
3050 case kI64:
3051 case kI32:
3052 case kRef:
3053 case kRefNull:
3054 asm_->push(src.reg().gp());
3055 break;
3056 case kF32:
3057 asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
3058 asm_->StoreF32(src.reg().fp(), MemOperand(sp), r0);
3059 break;
3060 case kF64:
3061 asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
3062 asm_->StoreF64(src.reg().fp(), MemOperand(sp), r0);
3063 break;
3064 case kS128: {
3065 asm_->AddS64(sp, sp, Operand(-kSimd128Size), r0);
3066 asm_->StoreSimd128(src.reg().fp().toSimd(), MemOperand(sp), r0);
3067 break;
3068 }
3069 default:
3070 UNREACHABLE();
3071 }
3072 break;
3073 }
3075 asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
3076 DCHECK(src.kind() == kI32 || src.kind() == kI64);
3077 UseScratchRegisterScope temps(asm_);
3078 Register scratch = temps.Acquire();
3079
3080 switch (src.kind()) {
3081 case kI32:
3082 asm_->mov(scratch, Operand(src.i32_const()));
3083 break;
3084 case kI64:
3085 asm_->mov(scratch, Operand(int64_t{slot.src_.i32_const()}));
3086 break;
3087 default:
3088 UNREACHABLE();
3089 }
3090 asm_->push(scratch);
3091 break;
3092 }
3093 }
3094 }
3095}
3096
3097} // namespace v8::internal::wasm
3098
3099#undef BAILOUT
3100
3101#endif // V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_INL_H_
Builtins::Kind kind
Definition builtins.cc:40
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
void addi(Register dst, Register src, const Operand &imm)
void mtfsb0(FPSCRBit bit, RCBit rc=LeaveRC)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void mr(Register dst, Register src)
void extsw(Register rs, Register ra, RCBit rc=LeaveRC)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void bne(Register rj, Register rd, int32_t offset)
void mtctr(Register src)
void fctiduz(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void blt(Register rj, Register rd, int32_t offset)
void frsp(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void ble(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
friend class UseScratchRegisterScope
void mtlr(Register src)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void xor_(Register dst, int32_t imm32)
static constexpr int kGap
void mcrfs(CRegister cr, FPSCRBit bit)
uint64_t jump_offset(Label *L)
void fcmpu(const DoubleRegister fra, const DoubleRegister frb, CRegister cr=cr0)
void fctiwz(const DoubleRegister frt, const DoubleRegister frb)
void fctiwuz(const DoubleRegister frt, const DoubleRegister frb)
void cmpi(Register src1, const Operand &src2, CRegister cr=cr0)
void fctidz(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void bge(Register rj, Register rd, int32_t offset)
void bunordered(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void fmr(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
Assembler(const AssemblerOptions &, std::unique_ptr< AssemblerBuffer >={})
void boverflow(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
void beq(Register rj, Register rd, int32_t offset)
void bdnz(Label *L, LKBit lk=LeaveLK)
static constexpr int kFixedFrameSizeAboveFp
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
static constexpr MachineType Uint8()
static constexpr MachineType Int32()
static constexpr MachineType Uint32()
static constexpr MachineType Uint16()
static constexpr MachineType Int16()
static constexpr MachineType Int64()
static constexpr MachineType Int8()
void F32x4ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2, Register scratch3)
void LoadSimd128LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void ConvertIntToFloat(Register src, DoubleRegister dst)
void LoadStackLimit(Register destination, StackLimitKind kind)
void MultiPushF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs, Register scratch1, Register scratch2, Register location=sp)
void ConvertIntToDouble(Register src, DoubleRegister dst)
void LoadS16LE(Register dst, const MemOperand &mem, Register scratch)
void Call(Register target, Condition cond=al)
void S128Select(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register mask)
void I64x2UConvertI32x4High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void LoadAndSplat64x2LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void LoadU64LE(Register dst, const MemOperand &mem, Register scratch)
void ByteReverseU64(Register dst, Register val, Register=r0)
void I16x8UConvertI8x16Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void LoadU8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void Drop(int count, Condition cond=al)
void LoadAndSplat16x8LE(Simd128Register dst, const MemOperand &me, Register scratch)
void I32x4UConvertI16x8Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void mov(Register rd, Register rj)
void ModS32(Register dst, Register src, Register value)
void ModS64(Register dst, Register src, Register value)
void SmiUntag(Register reg, SBit s=LeaveCC)
void TestIfSmi(Register value, Register scratch)
void MultiPopF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs, Register scratch1, Register scratch2, Register location=sp)
void LoadLane32LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void StoreF64(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
void I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void I8x16ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void I64x2ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void LoadV64ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void MovDoubleLowToInt(Register dst, DoubleRegister src)
void StoreU64WithUpdate(Register src, const MemOperand &mem, Register scratch=no_reg)
void StoreU64LE(Register src, const MemOperand &mem, Register scratch)
void LoadF32(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst)
void ModU64(Register dst, Register src, Register value)
void LoadV32ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void Move(Register dst, Tagged< Smi > smi)
void LoadAndExtend32x2SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void I32x4BitMask(Register dst, VRegister src)
void StoreF32LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
void StoreU16LE(Register src, const MemOperand &mem, Register scratch)
void ZeroExtWord32(Register dst, Register src)
void LoadTrustedPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void I16x8Splat(Simd128Register dst, Register src)
void JumpIfSmi(Register value, Label *smi_label)
void LoadU16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void AtomicExchange(MemOperand dst, Register new_value, Register output)
void DivS32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void LoadS32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void LoadAndExtend8x8SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void I64x2Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scrahc2, Register scratch3, Simd128Register scratch4)
void LoadDoubleLiteral(DoubleRegister result, base::Double value, Register scratch)
void ExtractBitRange(Register dst, Register src, int rangeStart, int rangeEnd, RCBit rc=LeaveRC, bool test=false)
void F64x2Max(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2)
void StoreLane8LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadLane64LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadS32LE(Register dst, const MemOperand &mem, Register scratch)
void CmpU32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void LoadU16LE(Register dst, const MemOperand &mem, Register scratch)
void ModU32(Register dst, Register src, Register value)
void AtomicCompareExchange(MemOperand dst, Register old_value, Register new_value, Register output, Register scratch)
void SmiTag(Register reg, SBit s=LeaveCC)
void StoreSimd128(Simd128Register src, const MemOperand &mem, Register scratch)
void StoreLane32LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void MovDoubleToInt64(Register dst, DoubleRegister src)
void F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, DoubleRegister scratch2, Simd128Register scratch3)
void I8x16Shuffle(Simd128Register dst, Simd128Register src1, Simd128Register src2, uint64_t high, uint64_t low, Register scratch1, Register scratch2, Simd128Register scratch3)
void AddS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst)
void I16x8ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void SubS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void StoreF64LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
void MovFloatToInt(Register dst, DoubleRegister src, DoubleRegister scratch)
void I16x8ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void PushCommonFrame(Register marker_reg=no_reg)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void StoreF32(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
void V128AnyTrue(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3)
void CmpU64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void LoadF64(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void I32x4Splat(Simd128Register dst, Register src)
void LoadSimd128(Simd128Register dst, const MemOperand &mem, Register scratch)
void Jump(Register target, Condition cond=al)
void StoreU32(Register src, const MemOperand &mem, Register scratch)
void LoadAndSplat8x16LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void I8x16ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void MovInt64ToDouble(DoubleRegister dst, Register src)
void LoadF32LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void LoadU32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void S128Const(Simd128Register dst, uint64_t high, uint64_t low, Register scratch1, Register scratch2)
void I64x2Splat(Simd128Register dst, Register src)
void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst)
void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst)
void I32x4UConvertI16x8High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void LoadS8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void F64x2Min(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void I16x8BitMask(Register dst, VRegister src)
void I32x4ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void I64x2BitMask(Register dst, QwNeonRegister src)
void LoadLane8LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadAndExtend8x8ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void ConvertDoubleToUnsignedInt64(const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode=kRoundToZero)
void DivU32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void MulS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void LoadAndSplat32x4LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void ShiftLeftU64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void CmpS64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void DivU64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void LoadU32LE(Register dst, const MemOperand &mem, Register scratch)
void MovIntToFloat(DoubleRegister dst, Register src, Register scratch)
void I32x4DotI8x16AddS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register src3)
void F64x2ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2)
void AllocateStackSpace(Register bytes)
void LoadAndExtend16x4SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void I16x8UConvertI8x16High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void LoadAndExtend32x2ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void CmpS32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void StoreU64(Register src, const MemOperand &mem, Register scratch=no_reg)
void StoreU16(Register src, const MemOperand &mem, Register scratch)
void StoreU8(Register src, const MemOperand &mem, Register scratch)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void I8x16BitMask(Register dst, VRegister src, VRegister temp=NoVReg)
void DivS64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadU64(Register dst, const MemOperand &mem, Register scratch=no_reg)
void I64x2UConvertI32x4Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void I8x16Splat(Simd128Register dst, Register src)
void LoadF64LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void F32x4Splat(Simd128Register dst, DoubleRegister src, DoubleRegister scratch1, Register scratch2)
void ByteReverseU16(Register dst, Register val, Register scratch)
void LoadAndExtend16x4ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void StoreU32LE(Register src, const MemOperand &mem, Register scratch)
void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, Simd128Register scratch2)
void ConvertInt64ToDouble(Register src, DoubleRegister double_dst)
void StoreSimd128LE(Simd128Register src, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void ConvertInt64ToFloat(Register src, DoubleRegister double_dst)
void F64x2Splat(Simd128Register dst, DoubleRegister src, Register scratch)
void LoadS16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void StoreLane16LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void ByteReverseU32(Register dst, Register val, Register scratch)
void StoreLane64LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadLane16LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static V8_INLINE Operand Zero()
static constexpr RegListBase FromBits()
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static constexpr int kFrameTypeOffset
void emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst, LiftoffRegister src)
void emit_store_nonzero_if_nan(Register dst, DoubleRegister src, ValueKind kind)
void AtomicXor(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, int32_t amount)
void emit_i8x16_extract_lane_s(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
bool emit_f16x8_qfms(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
bool emit_f16x8_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i32_rems(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32_eqz(Register dst, Register src)
void emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src)
void FillI64Half(Register, int offset, RegPairHalf)
void emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void CallCWithStackBuffer(const std::initializer_list< VarState > args, const LiftoffRegister *rets, ValueKind return_kind, ValueKind out_argument_kind, int stack_bytes, ExternalReference ext_ref)
bool emit_f32_nearest_int(DoubleRegister dst, DoubleRegister src)
void emit_f64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void emit_i64_muli(LiftoffRegister dst, LiftoffRegister lhs, int32_t imm)
void emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst, LiftoffRegister src)
void AtomicAdd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicSub(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void LoadTransform(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LoadTransformationKind transform, uint32_t *protected_load_pc, bool i64_offset)
void emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_store_nonzero_if_nan(Register dst, LiftoffRegister src, Register tmp_gp, LiftoffRegister tmp_s128, ValueKind lane_kind)
void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void AtomicAnd(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void AtomicCompareExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister value, StoreType type, bool i64_offset)
void emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister acc)
void emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst, LiftoffRegister src)
void emit_f64_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
void Fill(LiftoffRegister, int offset, ValueKind)
void emit_i64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void LoadFullPointer(Register dst, Register src_addr, int32_t offset_imm)
bool emit_f16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void Store(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, bool is_store_mem=false, bool i64_offset=false)
void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint32_t *protected_load_pc=nullptr, bool is_load_mem=false, bool i64_offset=false, bool needs_shift=false)
void emit_i32_divs(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr, Register offset_reg, uintptr_t offset_imm, LoadType type, uint8_t lane, uint32_t *protected_load_pc, bool i64_offset)
void emit_i32x4_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i16x8_extract_lane_s(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void CallFrameSetupStub(int declared_function_index)
void emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_remu(Register dst, Register lhs, Register rhs, Label *trap_rem_by_zero)
void LoadSpillAddress(Register dst, int offset, ValueKind kind)
void emit_i64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind, Register frame_pointer)
void AtomicExchange(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask)
bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_rem_by_zero)
void emit_i8x16_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void CallC(const std::initializer_list< VarState > args, ExternalReference ext_ref)
void emit_i64x2_bitmask(LiftoffRegister dst, LiftoffRegister src)
bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero)
void emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs)
bool emit_f64x2_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void DropStackSlotsAndRet(uint32_t num_stack_slots)
void emit_s128_relaxed_laneselect(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister mask, int lane_width)
void emit_f64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_i16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void LoadConstant(LiftoffRegister, WasmValue)
void emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst, LiftoffRegister src)
void PrepareTailCall(int num_callee_stack_params, int stack_param_delta)
bool emit_f64_nearest_int(DoubleRegister dst, DoubleRegister src)
void emit_i32_divu(Register dst, Register lhs, Register rhs, Label *trap_div_by_zero)
void emit_cond_jump(Condition, Label *, ValueKind value, Register lhs, Register rhs, const FreezeCacheState &frozen)
void LoadFromInstance(Register dst, Register instance, int offset, int size)
void emit_smi_check(Register obj, Label *target, SmiCheckMode mode, const FreezeCacheState &frozen)
void emit_f32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void LoadProtectedPointer(Register dst, Register src_addr, int32_t offset)
void emit_i8x16_extract_lane_u(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void emit_f32_set_cond(Condition condition, Register dst, DoubleRegister lhs, DoubleRegister rhs)
bool emit_f32x4_nearest_int(LiftoffRegister dst, LiftoffRegister src)
void emit_i64_set_cond(Condition condition, Register dst, LiftoffRegister lhs, LiftoffRegister rhs)
void emit_i32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx)
void MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueKind)
void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind)
void AtomicStore(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, bool i64_offset)
void PatchPrepareStackFrame(int offset, SafepointTableBuilder *, bool feedback_vector_slot, size_t stack_param_slots)
void emit_ptrsize_cond_jumpi(Condition, Label *, Register lhs, int32_t imm, const FreezeCacheState &frozen)
void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueKind)
void emit_i32_set_cond(Condition, Register dst, Register lhs, Register rhs)
bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label *trap_div_by_zero, Label *trap_div_unrepresentable)
void TailCallIndirect(compiler::CallDescriptor *call_descriptor, Register target)
void emit_i16x8_bitmask(LiftoffRegister dst, LiftoffRegister src)
void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src)
void AllocateStackSlot(Register addr, uint32_t size)
void emit_i32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void LoadTaggedPointer(Register dst, Register src_addr, Register offset_reg, int32_t offset_imm, uint32_t *protected_load_pc=nullptr, bool offset_reg_needs_shift=false)
void emit_f32x4_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i32_cond_jumpi(Condition, Label *, Register lhs, int imm, const FreezeCacheState &frozen)
void emit_i64_eqz(Register dst, LiftoffRegister src)
void StoreTaggedPointer(Register dst_addr, Register offset_reg, int32_t offset_imm, Register src, LiftoffRegList pinned, uint32_t *protected_store_pc=nullptr, SkipWriteBarrier=kNoSkipWriteBarrier)
void bailout(LiftoffBailoutReason reason, const char *detail)
void IncrementSmi(LiftoffRegister dst, int offset)
LiftoffRegister GetUnusedRegister(RegClass rc, std::initializer_list< LiftoffRegister > try_first, LiftoffRegList pinned)
void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src)
void emit_i8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, const uint8_t shuffle[16], bool is_swizzle)
void AtomicOr(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, bool i64_offset)
bool emit_select(LiftoffRegister dst, Register condition, LiftoffRegister true_value, LiftoffRegister false_value, ValueKind kind)
bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst, LiftoffRegister src, Label *trap=nullptr)
void AtomicLoad(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, bool i64_offset)
void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind)
void LoadTaggedPointerFromInstance(Register dst, Register instance, int offset)
void emit_v128_anytrue(LiftoffRegister dst, LiftoffRegister src)
void emit_s128_const(LiftoffRegister dst, const uint8_t imms[16])
void StoreLane(Register dst, Register offset, uintptr_t offset_imm, LiftoffRegister src, StoreType type, uint8_t lane, uint32_t *protected_store_pc, bool i64_offset)
void CheckTierUp(int declared_func_index, int budget_used, Label *ool_label, const FreezeCacheState &frozen)
void emit_i16x8_extract_lane_u(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx)
void CallIndirect(const ValueKindSig *sig, compiler::CallDescriptor *call_descriptor, Register target)
void RecordSpillsInSafepoint(SafepointTableBuilder::Safepoint &safepoint, LiftoffRegList all_spills, LiftoffRegList ref_spills, int spill_offset)
bool emit_f16x8_qfma(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, LiftoffRegister src3)
void LoadTrustedPointer(Register dst, Register src_addr, int offset, IndirectPointerTag tag)
void emit_i8x16_bitmask(LiftoffRegister dst, LiftoffRegister src)
constexpr unsigned GetNumRegsSet() const
base::SmallVector< Slot, 8 > slots_
static int SlotSizeInBytes(const Slot &slot)
static constexpr int ToTagged(int offset)
#define EMIT_SIMD_BINOP_WITH_SCRATCH(name)
#define EMIT_SIMD_UNOP(name)
#define EMIT_SIMD_QFM(name)
#define EMIT_SIMD_ALL_TRUE(name)
#define EMIT_SIMD_BINOP(name)
#define EMIT_SIMD_EXT_ADD_PAIRWISE(name)
#define EMIT_SIMD_UNOP_WITH_SCRATCH(name)
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define V8_EMBEDDED_CONSTANT_POOL_BOOL
Definition globals.h:81
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
int start
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
double remainder
ZoneVector< RpoNumber > & result
#define EMIT_UNOP_FUNCTION(name, instr, dtype, stype, dcast, scast, rcast, ret, return_type)
#define SIMD_VISIT_RELAXED_BINOP(name, op)
#define SIMD_RELAXED_BINOP_LIST(V)
#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast, ret, return_type)
#define SIMD_RELAXED_UNOP_LIST(V)
#define EMIT_SIMD_SHIFT_RR(name, op)
#define SIMD_SHIFT_RI_LIST(V)
#define UNOP_LIST(V)
#define F16_UNOP_LIST(V)
#define SIMD_VISIT_RELAXED_UNOP(name, op)
#define VISIT_F16_BINOP(name)
#define F16_BINOP_LIST(V)
#define VISIT_F16_UNOP(name)
#define EMIT_SIMD_SHIFT_RI(name, op, mask)
#define BINOP_LIST(V)
#define SIMD_SHIFT_RR_LIST(V)
LiftoffRegister reg
MovableLabel continuation
LiftoffRegList regs_to_save
std::optional< OolTrapLabel > trap
uint32_t const mask
#define ATOMIC_OP(op, type, kind)
#define SIMD_UNOP_LIST(V)
#define SIMD_ALL_TRUE_LIST(V)
#define SIMD_BINOP_WITH_SCRATCH_LIST(V)
#define SIMD_BINOP_LIST(V)
#define SIMD_UNOP_WITH_SCRATCH_LIST(V)
#define SIMD_QFM_LIST(V)
#define SIMD_EXT_ADD_PAIRWISE_LIST(V)
int int32_t
Definition unicode.cc:40
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
constexpr int WhichPowerOfTwo(T value)
Definition bits.h:195
void StoreToMemory(LiftoffAssembler *assm, MemOperand dst, const LiftoffAssembler::VarState &src)
MemOperand GetMemOp(LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr, Register offset, int32_t offset_imm, unsigned shift_amount=0)
constexpr DoubleRegister kFpReturnRegisters[]
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register kGpReturnRegisters[]
int declared_function_index(const WasmModule *module, int func_index)
constexpr int value_kind_size(ValueKind kind)
static constexpr LiftoffRegList kGpCacheRegList
static constexpr LiftoffRegList kFpCacheRegList
constexpr bool is_reference(ValueKind kind)
LiftoffAssembler::ValueKindSig ValueKindSig
constexpr Register no_reg
constexpr int kMinInt
Definition globals.h:375
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
constexpr int kSimd128Size
Definition globals.h:706
DwVfpRegister DoubleRegister
constexpr DoubleRegister kScratchDoubleReg
constexpr Simd128Register kScratchSimd128Reg
const int kStackFrameExtraParamSlot
kWasmInternalFunctionIndirectPointerTag instance_data
constexpr Register kScratchReg2
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Register kReturnRegister0
constexpr bool SmiValuesAre31Bits()
Condition NegateCondition(Condition cond)
constexpr int kInt32Size
Definition globals.h:401
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr VFPRoundingMode kRoundToZero
constexpr uint8_t kInstrSize
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
constexpr Register kCArgRegs[]
std::unique_ptr< AssemblerBuffer > ExternalAssemblerBuffer(void *start, int size)
Definition assembler.cc:161
constexpr int kDoubleSize
Definition globals.h:407
constexpr Simd128Register kScratchSimd128Reg2
Condition to_condition(Condition cond)
bool is_signed(Condition cond)
static V ByteReverse(V value)
Definition utils.h:796
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
#define arraysize(array)
Definition macros.h:67
#define V8_LIKELY(condition)
Definition v8config.h:661