17#if V8_ENABLE_WEBASSEMBLY
63 switch (constant.type()) {
65 return Operand(constant.ToInt32());
71 return Operand(constant.ToInt64());
73 return Operand(constant.ToExternalReference());
83 const size_t index = *first_index;
109 size_t first_index = 0) {
130#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
168class OutOfLineRecordWrite final :
public OutOfLineCode {
173 UnwindingInfoWriter* unwinding_info_writer)
174 : OutOfLineCode(
gen),
181#if V8_ENABLE_WEBASSEMBLY
182 stub_mode_(stub_mode),
191 void Generate() final {
193 __ DecompressTagged(value_, value_);
199 SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
208 __ CallEphemeronKeyBarrier(object_,
scratch1_, save_fp_mode);
209#if V8_ENABLE_WEBASSEMBLY
210 }
else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
211 __ CallRecordWriteStubSaveRegisters(object_,
scratch1_, save_fp_mode,
212 StubCallMode::kCallWasmRuntimeStub);
215 __ CallRecordWriteStubSaveRegisters(object_,
scratch1_, save_fp_mode);
231#if V8_ENABLE_WEBASSEMBLY
247 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
254 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
261 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
268 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
283 case kS390_Mul64WithOverflow:
298 case kS390_Mul64WithOverflow:
310#define GET_MEMOPERAND32(ret, fi) \
312 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
313 MemOperand mem(r0); \
314 if (mode != kMode_None) { \
315 size_t first_index = (fi); \
316 mem = i.MemoryOperand(&mode, &first_index); \
319 mem = i.InputStackSlot32(fi); \
324#define GET_MEMOPERAND(ret, fi) \
326 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
327 MemOperand mem(r0); \
328 if (mode != kMode_None) { \
329 size_t first_index = (fi); \
330 mem = i.MemoryOperand(&mode, &first_index); \
333 mem = i.InputStackSlot(fi); \
338#define RRInstr(instr) \
340 DCHECK(i.OutputRegister() == i.InputRegister(0)); \
341 __ instr(i.OutputRegister(), i.InputRegister(1)); \
344#define RIInstr(instr) \
346 DCHECK(i.OutputRegister() == i.InputRegister(0)); \
347 __ instr(i.OutputRegister(), i.InputImmediate(1)); \
350#define RMInstr(instr, GETMEM) \
352 DCHECK(i.OutputRegister() == i.InputRegister(0)); \
354 __ instr(i.OutputRegister(), GETMEM(ret, 1)); \
357#define RM32Instr(instr) RMInstr(instr, GET_MEMOPERAND32)
358#define RM64Instr(instr) RMInstr(instr, GET_MEMOPERAND)
360#define RRRInstr(instr) \
362 __ instr(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); \
365#define RRIInstr(instr) \
367 __ instr(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1)); \
370#define RRMInstr(instr, GETMEM) \
373 __ instr(i.OutputRegister(), i.InputRegister(0), GETMEM(ret, 1)); \
376#define RRM32Instr(instr) RRMInstr(instr, GET_MEMOPERAND32)
377#define RRM64Instr(instr) RRMInstr(instr, GET_MEMOPERAND)
379#define DDInstr(instr) \
381 DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
382 __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
386#define DMInstr(instr) \
388 DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
390 __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1)); \
394#define DMTInstr(instr) \
396 DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
398 __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1), \
399 kScratchDoubleReg); \
403#define R_MInstr(instr) \
406 __ instr(i.OutputRegister(), GET_MEMOPERAND(ret, 0)); \
410#define R_DInstr(instr) \
412 __ instr(i.OutputRegister(), i.InputDoubleRegister(0)); \
416#define D_DInstr(instr) \
418 __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
422#define D_MInstr(instr) \
425 __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0)); \
429#define D_MTInstr(instr) \
432 __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0), \
433 kScratchDoubleReg); \
439template <
int numOfOperand,
class RType,
class MType,
class IType>
440static inline int AssembleOp(Instruction*
instr, RType
r, MType
m, IType
i) {
453template <
class _RR,
class _RM,
class _RI>
454static inline int AssembleBinOp(Instruction*
instr, _RR _rr, _RM _rm, _RI _ri) {
455 return AssembleOp<2>(
instr, _rr, _rm, _ri);
458template <
class _R,
class _M,
class _I>
459static inline int AssembleUnaryOp(Instruction*
instr, _R _r, _M _m, _I _i) {
460 return AssembleOp<1>(
instr, _r, _m, _i);
463#define ASSEMBLE_BIN_OP(_rr, _rm, _ri) AssembleBinOp(instr, _rr, _rm, _ri)
464#define ASSEMBLE_UNARY_OP(_r, _m, _i) AssembleUnaryOp(instr, _r, _m, _i)
466#define CHECK_AND_ZERO_EXT_OUTPUT(num) \
468 DCHECK(HasImmediateInput(instr, (index))); \
469 int doZeroExt = i.InputInt32(index); \
470 if (doZeroExt) __ LoadU32(i.OutputRegister(), i.OutputRegister()); \
473#define ASSEMBLE_BIN32_OP(_rr, _rm, _ri) \
474 { CHECK_AND_ZERO_EXT_OUTPUT(AssembleBinOp(instr, _rr, _rm, _ri)); }
478#define ASSEMBLE_FLOAT_UNOP(asm_instr) \
480 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
483#define ASSEMBLE_FLOAT_BINOP(asm_instr) \
485 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
486 i.InputDoubleRegister(1)); \
489#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
491 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
492 if (mode != kMode_None) { \
493 size_t first_index = 1; \
494 MemOperand operand = i.MemoryOperand(&mode, &first_index); \
495 if (i.CompareLogical()) { \
496 __ cmpl_instr(i.InputRegister(0), operand); \
498 __ cmp_instr(i.InputRegister(0), operand); \
500 } else if (HasRegisterInput(instr, 1)) { \
501 if (i.CompareLogical()) { \
502 __ cmpl_instr(i.InputRegister(0), i.InputRegister(1)); \
504 __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
506 } else if (HasImmediateInput(instr, 1)) { \
507 if (i.CompareLogical()) { \
508 __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
510 __ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
513 DCHECK(HasStackSlotInput(instr, 1)); \
514 if (i.CompareLogical()) { \
515 __ cmpl_instr(i.InputRegister(0), i.InputStackSlot(1)); \
517 __ cmp_instr(i.InputRegister(0), i.InputStackSlot(1)); \
522#define ASSEMBLE_COMPARE32(cmp_instr, cmpl_instr) \
524 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
525 if (mode != kMode_None) { \
526 size_t first_index = 1; \
527 MemOperand operand = i.MemoryOperand(&mode, &first_index); \
528 if (i.CompareLogical()) { \
529 __ cmpl_instr(i.InputRegister(0), operand); \
531 __ cmp_instr(i.InputRegister(0), operand); \
533 } else if (HasRegisterInput(instr, 1)) { \
534 if (i.CompareLogical()) { \
535 __ cmpl_instr(i.InputRegister(0), i.InputRegister(1)); \
537 __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
539 } else if (HasImmediateInput(instr, 1)) { \
540 if (i.CompareLogical()) { \
541 __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
543 __ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
546 DCHECK(HasStackSlotInput(instr, 1)); \
547 if (i.CompareLogical()) { \
548 __ cmpl_instr(i.InputRegister(0), i.InputStackSlot32(1)); \
550 __ cmp_instr(i.InputRegister(0), i.InputStackSlot32(1)); \
555#define ASSEMBLE_FLOAT_COMPARE(cmp_rr_instr, cmp_rm_instr, load_instr) \
557 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
558 if (mode != kMode_None) { \
559 size_t first_index = 1; \
560 MemOperand operand = i.MemoryOperand(&mode, &first_index); \
561 __ cmp_rm_instr(i.InputDoubleRegister(0), operand); \
562 } else if (HasFPRegisterInput(instr, 1)) { \
563 __ cmp_rr_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
565 USE(HasFPStackSlotInput); \
566 DCHECK(HasFPStackSlotInput(instr, 1)); \
567 MemOperand operand = i.InputStackSlot(1); \
568 if (operand.offset() >= 0) { \
569 __ cmp_rm_instr(i.InputDoubleRegister(0), operand); \
571 __ load_instr(kScratchDoubleReg, operand); \
572 __ cmp_rr_instr(i.InputDoubleRegister(0), kScratchDoubleReg); \
581#define ASSEMBLE_MODULO(div_instr, shift_instr) \
583 __ mov(r0, i.InputRegister(0)); \
584 __ shift_instr(r0, Operand(32)); \
585 __ div_instr(r0, i.InputRegister(1)); \
586 __ LoadU32(i.OutputRegister(), r0); \
589#define ASSEMBLE_FLOAT_MODULO() \
591 FrameScope scope(masm(), StackFrame::MANUAL); \
592 __ Push(r2, r3, r4, r5); \
593 __ PrepareCallCFunction(0, 2, kScratchReg); \
594 __ MovToFloatParameters(i.InputDoubleRegister(0), \
595 i.InputDoubleRegister(1)); \
596 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); \
597 __ MovFromFloatResult(i.OutputDoubleRegister()); \
598 __ Pop(r2, r3, r4, r5); \
601#define ASSEMBLE_IEEE754_UNOP(name) \
605 FrameScope scope(masm(), StackFrame::MANUAL); \
606 __ Push(r2, r3, r4, r5); \
607 __ PrepareCallCFunction(0, 1, kScratchReg); \
608 __ MovToFloatParameter(i.InputDoubleRegister(0)); \
609 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
611 __ MovFromFloatResult(i.OutputDoubleRegister()); \
612 __ Pop(r2, r3, r4, r5); \
615#define ASSEMBLE_IEEE754_BINOP(name) \
619 FrameScope scope(masm(), StackFrame::MANUAL); \
620 __ Push(r2, r3, r4, r5); \
621 __ PrepareCallCFunction(0, 2, kScratchReg); \
622 __ MovToFloatParameters(i.InputDoubleRegister(0), \
623 i.InputDoubleRegister(1)); \
624 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
626 __ MovFromFloatResult(i.OutputDoubleRegister()); \
627 __ Pop(r2, r3, r4, r5); \
632#define ASSEMBLE_LOAD_FLOAT(asm_instr) \
634 DoubleRegister result = i.OutputDoubleRegister(); \
635 AddressingMode mode = kMode_None; \
636 MemOperand operand = i.MemoryOperand(&mode); \
637 __ asm_instr(result, operand); \
640#define ASSEMBLE_LOAD_INTEGER(asm_instr) \
642 Register result = i.OutputRegister(); \
643 AddressingMode mode = kMode_None; \
644 MemOperand operand = i.MemoryOperand(&mode); \
645 __ asm_instr(result, operand); \
648#define ASSEMBLE_LOADANDTEST64(asm_instr_rr, asm_instr_rm) \
650 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
651 Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0; \
652 if (mode != kMode_None) { \
653 size_t first_index = 0; \
654 MemOperand operand = i.MemoryOperand(&mode, &first_index); \
655 __ asm_instr_rm(dst, operand); \
656 } else if (HasRegisterInput(instr, 0)) { \
657 __ asm_instr_rr(dst, i.InputRegister(0)); \
659 DCHECK(HasStackSlotInput(instr, 0)); \
660 __ asm_instr_rm(dst, i.InputStackSlot(0)); \
664#define ASSEMBLE_LOADANDTEST32(asm_instr_rr, asm_instr_rm) \
666 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
667 Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0; \
668 if (mode != kMode_None) { \
669 size_t first_index = 0; \
670 MemOperand operand = i.MemoryOperand(&mode, &first_index); \
671 __ asm_instr_rm(dst, operand); \
672 } else if (HasRegisterInput(instr, 0)) { \
673 __ asm_instr_rr(dst, i.InputRegister(0)); \
675 DCHECK(HasStackSlotInput(instr, 0)); \
676 __ asm_instr_rm(dst, i.InputStackSlot32(0)); \
680#define ASSEMBLE_STORE_FLOAT32() \
683 AddressingMode mode = kMode_None; \
684 MemOperand operand = i.MemoryOperand(&mode, &index); \
685 DoubleRegister value = i.InputDoubleRegister(index); \
686 __ StoreF32(value, operand); \
689#define ASSEMBLE_STORE_DOUBLE() \
692 AddressingMode mode = kMode_None; \
693 MemOperand operand = i.MemoryOperand(&mode, &index); \
694 DoubleRegister value = i.InputDoubleRegister(index); \
695 __ StoreF64(value, operand); \
698#define ASSEMBLE_STORE_INTEGER(asm_instr) \
701 AddressingMode mode = kMode_None; \
702 MemOperand operand = i.MemoryOperand(&mode, &index); \
703 Register value = i.InputRegister(index); \
704 __ asm_instr(value, operand); \
708#if defined(V8_ENABLE_WEBASSEMBLY) && defined(V8_TARGET_BIG_ENDIAN)
709 return info->IsWasm();
715#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(load_and_ext) \
717 Register old_value = i.InputRegister(0); \
718 Register new_value = i.InputRegister(1); \
719 Register output = i.OutputRegister(); \
720 Register addr = kScratchReg; \
721 Register temp0 = r0; \
722 Register temp1 = r1; \
724 AddressingMode mode = kMode_None; \
725 MemOperand op = i.MemoryOperand(&mode, &index); \
727 __ AtomicCmpExchangeU8(addr, output, old_value, new_value, temp0, temp1); \
728 __ load_and_ext(output, output); \
731#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(load_and_ext) \
733 Register old_value = i.InputRegister(0); \
734 Register new_value = i.InputRegister(1); \
735 Register output = i.OutputRegister(); \
736 Register addr = kScratchReg; \
737 Register temp0 = r0; \
738 Register temp1 = r1; \
740 AddressingMode mode = kMode_None; \
741 MemOperand op = i.MemoryOperand(&mode, &index); \
743 if (is_wasm_on_be(info())) { \
745 GetRegisterThatIsNotOneOf(output, old_value, new_value); \
747 GetRegisterThatIsNotOneOf(output, old_value, new_value, temp2); \
748 __ Push(temp2, temp3); \
749 __ lrvr(temp2, old_value); \
750 __ lrvr(temp3, new_value); \
751 __ ShiftRightU32(temp2, temp2, Operand(16)); \
752 __ ShiftRightU32(temp3, temp3, Operand(16)); \
753 __ AtomicCmpExchangeU16(addr, output, temp2, temp3, temp0, temp1); \
754 __ lrvr(output, output); \
755 __ ShiftRightU32(output, output, Operand(16)); \
756 __ Pop(temp2, temp3); \
758 __ AtomicCmpExchangeU16(addr, output, old_value, new_value, temp0, \
761 __ load_and_ext(output, output); \
764#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD() \
766 Register new_val = i.InputRegister(1); \
767 Register output = i.OutputRegister(); \
768 Register addr = kScratchReg; \
770 AddressingMode mode = kMode_None; \
771 MemOperand op = i.MemoryOperand(&mode, &index); \
773 if (is_wasm_on_be(info())) { \
774 __ lrvr(r0, output); \
775 __ lrvr(r1, new_val); \
776 __ CmpAndSwap(r0, r1, MemOperand(addr)); \
777 __ lrvr(output, r0); \
779 __ CmpAndSwap(output, new_val, MemOperand(addr)); \
781 __ LoadU32(output, output); \
784#define ASSEMBLE_ATOMIC_BINOP_WORD(load_and_op, op) \
786 Register value = i.InputRegister(2); \
787 Register result = i.OutputRegister(0); \
788 Register addr = r1; \
789 AddressingMode mode = kMode_None; \
790 MemOperand op = i.MemoryOperand(&mode); \
792 if (is_wasm_on_be(info())) { \
795 __ LoadU32(r0, MemOperand(addr)); \
797 __ op(ip, ip, value); \
799 __ CmpAndSwap(r0, ip, MemOperand(addr)); \
800 __ bne(&do_cs, Label::kNear); \
801 __ lrvr(result, r0); \
803 __ load_and_op(result, value, MemOperand(addr)); \
805 __ LoadU32(result, result); \
808#define ASSEMBLE_ATOMIC_BINOP_WORD64(load_and_op, op) \
810 Register value = i.InputRegister(2); \
811 Register result = i.OutputRegister(0); \
812 Register addr = r1; \
813 AddressingMode mode = kMode_None; \
814 MemOperand op = i.MemoryOperand(&mode); \
816 if (is_wasm_on_be(info())) { \
819 __ LoadU64(r0, MemOperand(addr)); \
821 __ op(ip, ip, value); \
823 __ CmpAndSwap64(r0, ip, MemOperand(addr)); \
824 __ bne(&do_cs, Label::kNear); \
825 __ lrvgr(result, r0); \
828 __ load_and_op(result, value, MemOperand(addr)); \
831#define ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, \
832 maybe_reverse_bytes) \
835 bool reverse_bytes = maybe_reverse_bytes && is_wasm_on_be(info()); \
836 USE(reverse_bytes); \
838 __ LoadU32(prev, MemOperand(addr, offset)); \
840 if (reverse_bytes) { \
841 Register temp2 = GetRegisterThatIsNotOneOf(value, result, prev); \
843 __ lrvr(temp2, prev); \
844 __ RotateInsertSelectBits(temp2, temp2, Operand(start), Operand(end), \
845 Operand(static_cast<intptr_t>(shift_amount)), \
847 __ RotateInsertSelectBits(temp, value, Operand(start), Operand(end), \
848 Operand(static_cast<intptr_t>(shift_amount)), \
850 __ bin_inst(new_val, temp2, temp); \
851 __ lrvr(temp2, new_val); \
853 __ RotateInsertSelectBits(temp, temp2, Operand(start), Operand(end), \
854 Operand(static_cast<intptr_t>(shift_amount)), \
858 __ RotateInsertSelectBits(temp, value, Operand(start), Operand(end), \
859 Operand(static_cast<intptr_t>(shift_amount)), \
861 __ bin_inst(new_val, prev, temp); \
863 __ RotateInsertSelectBits(temp, new_val, Operand(start), Operand(end), \
864 Operand::Zero(), false); \
866 __ CmpAndSwap(prev, temp, MemOperand(addr, offset)); \
867 __ bne(&do_cs, Label::kNear); \
870#ifdef V8_TARGET_BIG_ENDIAN
871#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
873 constexpr int offset = -(2 * index); \
874 constexpr int shift_amount = 16 - (index * 16); \
875 constexpr int start = 48 - shift_amount; \
876 constexpr int end = start + 15; \
877 ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, true); \
880#define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
882 constexpr int offset = -(index); \
883 constexpr int shift_amount = 24 - (index * 8); \
884 constexpr int start = 56 - shift_amount; \
885 constexpr int end = start + 7; \
886 ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, false); \
890#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
892 constexpr int offset = -(2 * index); \
893 constexpr int shift_amount = index * 16; \
894 constexpr int start = 48 - shift_amount; \
895 constexpr int end = start + 15; \
896 ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, false); \
899#define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
901 constexpr int offset = -(index); \
902 constexpr int shift_amount = index * 8; \
903 constexpr int start = 56 - shift_amount; \
904 constexpr int end = start + 7; \
905 ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, false); \
910#define ASSEMBLE_ATOMIC_BINOP_HALFWORD(bin_inst, extract_result) \
912 Register value = i.InputRegister(2); \
913 Register result = i.OutputRegister(0); \
914 Register prev = i.TempRegister(0); \
915 Register new_val = r0; \
916 Register addr = r1; \
917 Register temp = kScratchReg; \
918 AddressingMode mode = kMode_None; \
919 MemOperand op = i.MemoryOperand(&mode); \
922 __ tmll(addr, Operand(3)); \
923 __ b(Condition(2), &two); \
925 ATOMIC_BIN_OP_HALFWORD(bin_inst, 0, extract_result); \
929 ATOMIC_BIN_OP_HALFWORD(bin_inst, 1, extract_result); \
933#define ASSEMBLE_ATOMIC_BINOP_BYTE(bin_inst, extract_result) \
935 Register value = i.InputRegister(2); \
936 Register result = i.OutputRegister(0); \
937 Register addr = i.TempRegister(0); \
938 Register prev = r0; \
939 Register new_val = r1; \
940 Register temp = kScratchReg; \
941 AddressingMode mode = kMode_None; \
942 MemOperand op = i.MemoryOperand(&mode); \
943 Label done, one, two, three; \
945 __ tmll(addr, Operand(3)); \
946 __ b(Condition(1), &three); \
947 __ b(Condition(2), &two); \
948 __ b(Condition(4), &one); \
950 ATOMIC_BIN_OP_BYTE(bin_inst, 0, extract_result); \
954 ATOMIC_BIN_OP_BYTE(bin_inst, 1, extract_result); \
958 ATOMIC_BIN_OP_BYTE(bin_inst, 2, extract_result); \
962 ATOMIC_BIN_OP_BYTE(bin_inst, 3, extract_result); \
966#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64() \
968 Register new_val = i.InputRegister(1); \
969 Register output = i.OutputRegister(); \
970 Register addr = kScratchReg; \
972 AddressingMode mode = kMode_None; \
973 MemOperand op = i.MemoryOperand(&mode, &index); \
975 if (is_wasm_on_be(info())) { \
976 __ lrvgr(r0, output); \
977 __ lrvgr(r1, new_val); \
978 __ CmpAndSwap64(r0, r1, MemOperand(addr)); \
979 __ lrvgr(output, r0); \
981 __ CmpAndSwap64(output, new_val, MemOperand(addr)); \
992 __ RestoreFrameStateForTailCall();
999void FlushPendingPushRegisters(MacroAssembler* masm,
1000 FrameAccessState* frame_access_state,
1001 ZoneVector<Register>* pending_pushes) {
1002 switch (pending_pushes->size()) {
1006 masm->Push((*pending_pushes)[0]);
1009 masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
1012 masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
1013 (*pending_pushes)[2]);
1018 frame_access_state->IncreaseSPDelta(pending_pushes->size());
1019 pending_pushes->clear();
1022void AdjustStackPointerForTailCall(
1023 MacroAssembler* masm, FrameAccessState* state,
int new_slot_above_sp,
1024 ZoneVector<Register>* pending_pushes =
nullptr,
1025 bool allow_shrinkage =
true) {
1026 int current_sp_offset = state->GetSPToFPSlotCount() +
1028 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
1029 if (stack_slot_delta > 0) {
1030 if (pending_pushes !=
nullptr) {
1031 FlushPendingPushRegisters(masm, state, pending_pushes);
1034 state->IncreaseSPDelta(stack_slot_delta);
1035 }
else if (allow_shrinkage && stack_slot_delta < 0) {
1036 if (pending_pushes !=
nullptr) {
1037 FlushPendingPushRegisters(masm, state, pending_pushes);
1040 state->IncreaseSPDelta(stack_slot_delta);
1047 int first_unused_slot_offset) {
1048 ZoneVector<MoveOperands*> pushes(
zone());
1051 if (!pushes.empty() &&
1053 first_unused_slot_offset)) {
1054 S390OperandConverter g(
this,
instr);
1055 ZoneVector<Register> pending_pushes(
zone());
1056 for (
auto move : pushes) {
1057 LocationOperand destination_location(
1059 InstructionOperand
source(move->source());
1060 AdjustStackPointerForTailCall(
1062 destination_location.index() - pending_pushes.size(),
1065 DCHECK(source.IsRegister());
1067 pending_pushes.push_back(source_location.GetRegister());
1070 if (pending_pushes.size() == 3) {
1079 first_unused_slot_offset,
nullptr,
false);
1083 int first_unused_slot_offset) {
1085 first_unused_slot_offset);
1091 __ ComputeCodeStartAddress(scratch);
1093 __ Assert(
eq, AbortReason::kWrongFunctionCodeStart);
1096#ifdef V8_ENABLE_LEAPTIERING
1097void CodeGenerator::AssembleDispatchHandleRegisterCheck() {
1108 Instruction*
instr) {
1109 S390OperandConverter
i(
this,
instr);
1114 __ RecordComment(
reinterpret_cast<const char*
>(
i.InputInt64(0)),
1117 case kArchCallCodeObject: {
1123 __ CallCodeObject(
reg);
1131 case kArchCallBuiltinPointer: {
1133 Register builtin_index =
i.InputRegister(0);
1138 __ CallBuiltinByIndex(builtin_index, target);
1143#if V8_ENABLE_WEBASSEMBLY
1144 case kArchCallWasmFunction:
1145 case kArchCallWasmFunctionIndirect: {
1148 if (
instr->InputAt(0)->IsImmediate()) {
1149 DCHECK_EQ(opcode, kArchCallWasmFunction);
1150 Constant constant =
i.ToConstant(
instr->InputAt(0));
1152 __ Call(wasm_code, constant.rmode());
1153 }
else if (opcode == kArchCallWasmFunctionIndirect) {
1154 __ CallWasmCodePointer(
i.InputRegister(0));
1156 __ Call(
i.InputRegister(0));
1162 case kArchTailCallWasm:
1163 case kArchTailCallWasmIndirect: {
1166 if (
instr->InputAt(0)->IsImmediate()) {
1168 Constant constant =
i.ToConstant(
instr->InputAt(0));
1170 __ Jump(wasm_code, constant.rmode());
1171 }
else if (opcode == kArchTailCallWasmIndirect) {
1174 __ Jump(
i.InputRegister(0));
1181 case kArchTailCallCodeObject: {
1187 __ JumpCodeObject(
reg);
1191 ConstantPoolUnavailableScope constant_pool_unavailable(
masm());
1198 case kArchTailCallAddress: {
1209 case kArchCallJSFunction: {
1216 __ Assert(
eq, AbortReason::kWrongFunctionContext);
1218 uint32_t num_arguments =
1219 i.InputUint32(
instr->JSCallArgumentCountInputIndex());
1220 __ CallJSFunction(func, num_arguments);
1225 case kArchPrepareCallCFunction: {
1228 __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters,
1234 case kArchSaveCallerRegisters: {
1248 case kArchRestoreCallerRegisters: {
1261 case kArchPrepareTailCall:
1264 case kArchCallCFunctionWithFrameState:
1265 case kArchCallCFunction: {
1268 int num_fp_parameters = fp_param_field;
1270 Label return_location;
1271 bool has_function_descriptor =
false;
1272#if ABI_USES_FUNCTION_DESCRIPTORS
1274 num_fp_parameters = kNumFPParametersMask & fp_param_field;
1275 has_function_descriptor =
1279#if V8_ENABLE_WEBASSEMBLY
1280 if (
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
1282 __ larl(r0, &return_location);
1284 MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
1289 if (
instr->InputAt(0)->IsImmediate()) {
1290 ExternalReference ref =
i.InputExternalReference(0);
1291 pc_offset =
__ CallCFunction(ref, num_gp_parameters, num_fp_parameters,
1292 set_isolate_data_slots,
1293 has_function_descriptor, &return_location);
1296 pc_offset =
__ CallCFunction(func, num_gp_parameters, num_fp_parameters,
1297 set_isolate_data_slots,
1298 has_function_descriptor, &return_location);
1302 bool const needs_frame_state =
1303 (opcode == kArchCallCFunctionWithFrameState);
1304 if (needs_frame_state) {
1330 case kArchBinarySearchSwitch:
1333 case kArchTableSwitch:
1336 case kArchAbortCSADcheck:
1337 DCHECK(
i.InputRegister(0) == r3);
1342 __ CallBuiltin(Builtin::kAbortCSADcheck);
1346 case kArchDebugBreak:
1350 case kArchThrowTerminator:
1353 case kArchDeoptimize: {
1354 DeoptimizationExit* exit =
1356 __ b(exit->label());
1362 case kArchFramePointer:
1363 __ mov(
i.OutputRegister(), fp);
1365 case kArchParentFramePointer:
1369 __ mov(
i.OutputRegister(), fp);
1372#if V8_ENABLE_WEBASSEMBLY
1373 case kArchStackPointer:
1374 __ mov(
i.OutputRegister(), sp);
1376 case kArchSetStackPointer: {
1378 __ mov(sp,
i.InputRegister(0));
1382 case kArchStackPointerGreaterThan: {
1391 lhs_register =
i.TempRegister(0);
1392 __ SubS64(lhs_register, sp, Operand(
offset));
1395 constexpr size_t kValueIndex = 0;
1396 DCHECK(
instr->InputAt(kValueIndex)->IsRegister());
1397 __ CmpU64(lhs_register,
i.InputRegister(kValueIndex));
1400 case kArchStackCheckOffset:
1401 __ LoadSmiLiteral(
i.OutputRegister(),
1404 case kArchTruncateDoubleToI:
1408 case kArchStoreWithWriteBarrier: {
1414 MemOperand operand =
i.MemoryOperand(&addressing_mode, &index);
1415 Register value =
i.InputRegister(index);
1423 __ Check(
ne, AbortReason::kOperandIsCleared);
1426 OutOfLineRecordWrite* ool =
zone()->
New<OutOfLineRecordWrite>(
1427 this, object, operand,
value, scratch0, scratch1,
mode,
1429 __ StoreTaggedField(value, operand);
1432 __ JumpIfSmi(value, ool->exit());
1434 __ CheckPageFlag(
object, scratch0,
1437 __ bind(ool->exit());
1440 case kArchStoreIndirectWithWriteBarrier:
1442 case kArchStackSlot: {
1445 __ AddS64(
i.OutputRegister(),
offset.from_stack_pointer() ? sp : fp,
1446 Operand(
offset.offset()));
1450 int reverse_slot =
i.InputInt32(0);
1453 if (
instr->OutputAt(0)->IsFPRegister()) {
1471 __ lpr(
i.OutputRegister(0),
i.InputRegister(0));
1474 __ lpgr(
i.OutputRegister(0),
i.InputRegister(0));
1521 case kS390_ShiftLeft32:
1530 case kS390_ShiftLeft64:
1533 case kS390_ShiftRight32:
1541 case kS390_ShiftRight64:
1544 case kS390_ShiftRightArith32:
1552 case kS390_ShiftRightArith64:
1555 case kS390_RotRight32: {
1561 __ rll(
i.OutputRegister(),
i.InputRegister(0),
1562 Operand(32 -
i.InputInt32(1)));
1567 case kS390_RotRight64:
1573 __ rllg(
i.OutputRegister(),
i.InputRegister(0),
1574 Operand(64 -
i.InputInt32(1)));
1578 case kS390_RotLeftAndClear64:
1580 int shiftAmount =
i.InputInt32(1);
1581 int endBit = 63 - shiftAmount;
1582 int startBit = 63 -
i.InputInt32(2);
1583 __ RotateInsertSelectBits(
i.OutputRegister(),
i.InputRegister(0),
1584 Operand(startBit), Operand(endBit),
1585 Operand(shiftAmount),
true);
1587 int shiftAmount =
i.InputInt32(1);
1588 int clearBit = 63 -
i.InputInt32(2);
1589 __ rllg(
i.OutputRegister(),
i.InputRegister(0), Operand(shiftAmount));
1590 __ sllg(
i.OutputRegister(),
i.OutputRegister(), Operand(clearBit));
1591 __ srlg(
i.OutputRegister(),
i.OutputRegister(),
1592 Operand(clearBit + shiftAmount));
1593 __ sllg(
i.OutputRegister(),
i.OutputRegister(), Operand(shiftAmount));
1596 case kS390_RotLeftAndClearLeft64:
1598 int shiftAmount =
i.InputInt32(1);
1600 int startBit = 63 -
i.InputInt32(2);
1601 __ RotateInsertSelectBits(
i.OutputRegister(),
i.InputRegister(0),
1602 Operand(startBit), Operand(endBit),
1603 Operand(shiftAmount),
true);
1605 int shiftAmount =
i.InputInt32(1);
1606 int clearBit = 63 -
i.InputInt32(2);
1607 __ rllg(
i.OutputRegister(),
i.InputRegister(0), Operand(shiftAmount));
1608 __ sllg(
i.OutputRegister(),
i.OutputRegister(), Operand(clearBit));
1609 __ srlg(
i.OutputRegister(),
i.OutputRegister(), Operand(clearBit));
1612 case kS390_RotLeftAndClearRight64:
1614 int shiftAmount =
i.InputInt32(1);
1615 int endBit = 63 -
i.InputInt32(2);
1617 __ RotateInsertSelectBits(
i.OutputRegister(),
i.InputRegister(0),
1618 Operand(startBit), Operand(endBit),
1619 Operand(shiftAmount),
true);
1621 int shiftAmount =
i.InputInt32(1);
1622 int clearBit =
i.InputInt32(2);
1623 __ rllg(
i.OutputRegister(),
i.InputRegister(0), Operand(shiftAmount));
1624 __ srlg(
i.OutputRegister(),
i.OutputRegister(), Operand(clearBit));
1625 __ sllg(
i.OutputRegister(),
i.OutputRegister(), Operand(clearBit));
1644 case kS390_AddFloat:
1647 case kS390_AddDouble:
1665 case kS390_SubFloat:
1668 case kS390_SubDouble:
1679 case kS390_Mul32WithOverflow:
1683 RRIInstr(Mul32WithOverflowIfCCUnequal));
1688 case kS390_Mul64WithOverflow: {
1689 Register dst =
i.OutputRegister(), src1 =
i.InputRegister(0),
1690 src2 =
i.InputRegister(1);
1693 __ msgrkc(dst, src1, src2);
1696 __ MulHighS64(r1, src1, src2);
1699 __ MulS64(dst, src2);
1701 __ ShiftRightS64(r0, dst, Operand(63));
1706 case kS390_MulHigh32:
1711 case kS390_MulHighU32:
1716 case kS390_MulHighU64:
1719 case kS390_MulHighS64:
1722 case kS390_MulFloat:
1725 case kS390_MulDouble:
1739 case kS390_DivU32: {
1744 case kS390_DivFloat:
1747 case kS390_DivDouble:
1764 case kS390_AbsFloat:
1765 __ lpebr(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1767 case kS390_SqrtFloat:
1770 case kS390_SqrtDouble:
1773 case kS390_FloorFloat:
1774 __ FloorF32(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1776 case kS390_CeilFloat:
1777 __ CeilF32(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1779 case kS390_TruncateFloat:
1780 __ TruncF32(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1783 case kS390_ModDouble:
1786 case kIeee754Float64Acos:
1789 case kIeee754Float64Acosh:
1792 case kIeee754Float64Asin:
1795 case kIeee754Float64Asinh:
1798 case kIeee754Float64Atanh:
1801 case kIeee754Float64Atan:
1804 case kIeee754Float64Atan2:
1807 case kIeee754Float64Tan:
1810 case kIeee754Float64Tanh:
1813 case kIeee754Float64Cbrt:
1816 case kIeee754Float64Sin:
1819 case kIeee754Float64Sinh:
1822 case kIeee754Float64Cos:
1825 case kIeee754Float64Cosh:
1828 case kIeee754Float64Exp:
1831 case kIeee754Float64Expm1:
1834 case kIeee754Float64Log:
1837 case kIeee754Float64Log1p:
1840 case kIeee754Float64Log2:
1843 case kIeee754Float64Log10:
1846 case kIeee754Float64Pow:
1850 __ lcr(
i.OutputRegister(),
i.InputRegister(0));
1854 __ lcgr(
i.OutputRegister(),
i.InputRegister(0));
1856 case kS390_MaxFloat:
1857 __ FloatMax(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1858 i.InputDoubleRegister(1));
1860 case kS390_MaxDouble:
1861 __ DoubleMax(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1862 i.InputDoubleRegister(1));
1864 case kS390_MinFloat:
1865 __ FloatMin(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1866 i.InputDoubleRegister(1));
1868 case kS390_FloatNearestInt:
1869 __ NearestIntF32(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1871 case kS390_MinDouble:
1872 __ DoubleMin(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1873 i.InputDoubleRegister(1));
1875 case kS390_AbsDouble:
1876 __ lpdbr(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1878 case kS390_FloorDouble:
1879 __ FloorF64(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1881 case kS390_CeilDouble:
1882 __ CeilF64(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1884 case kS390_TruncateDouble:
1885 __ TruncF64(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1887 case kS390_RoundDouble:
1889 i.InputDoubleRegister(0));
1891 case kS390_DoubleNearestInt:
1892 __ NearestIntF64(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1894 case kS390_NegFloat:
1897 case kS390_NegDouble:
1900 case kS390_Cntlz32: {
1901 __ CountLeadingZerosU32(
i.OutputRegister(),
i.InputRegister(0), r0);
1904 case kS390_Cntlz64: {
1905 __ CountLeadingZerosU64(
i.OutputRegister(),
i.InputRegister(0), r0);
1908 case kS390_Popcnt32:
1909 __ Popcnt32(
i.OutputRegister(),
i.InputRegister(0));
1911 case kS390_Popcnt64:
1912 __ Popcnt64(
i.OutputRegister(),
i.InputRegister(0));
1920 case kS390_CmpFloat:
1924 case kS390_CmpDouble:
1930 __ And(r0,
i.InputRegister(0),
i.InputRegister(1));
1933 Operand opnd =
i.InputImmediate(1);
1934 if (is_uint16(opnd.immediate())) {
1935 __ tmll(
i.InputRegister(0), opnd);
1937 __ lr(r0,
i.InputRegister(0));
1944 __ AndP(r0,
i.InputRegister(0),
i.InputRegister(1));
1946 Operand opnd =
i.InputImmediate(1);
1947 if (is_uint16(opnd.immediate())) {
1948 __ tmll(
i.InputRegister(0), opnd);
1950 __ AndP(r0,
i.InputRegister(0), opnd);
1954 case kS390_Float64SilenceNaN: {
1961 int stack_decrement =
i.InputInt32(0);
1990 case kS390_PushFrame: {
1991 int num_slots =
i.InputInt32(1);
1993 if (
instr->InputAt(0)->IsFPRegister()) {
2006 case kS390_StoreToStackSlot: {
2007 int slot =
i.InputInt32(1);
2008 if (
instr->InputAt(0)->IsFPRegister()) {
2011 __ StoreF64(
i.InputDoubleRegister(0),
2014 __ StoreF32(
i.InputDoubleRegister(0),
2018 __ StoreV128(
i.InputDoubleRegister(0),
2022 __ StoreU64(
i.InputRegister(0),
2027 case kS390_SignExtendWord8ToInt32:
2028 __ lbr(
i.OutputRegister(),
i.InputRegister(0));
2031 case kS390_SignExtendWord16ToInt32:
2032 __ lhr(
i.OutputRegister(),
i.InputRegister(0));
2035 case kS390_SignExtendWord8ToInt64:
2036 __ lgbr(
i.OutputRegister(),
i.InputRegister(0));
2038 case kS390_SignExtendWord16ToInt64:
2039 __ lghr(
i.OutputRegister(),
i.InputRegister(0));
2041 case kS390_SignExtendWord32ToInt64:
2042 __ lgfr(
i.OutputRegister(),
i.InputRegister(0));
2044 case kS390_Uint32ToUint64:
2046 __ llgfr(
i.OutputRegister(),
i.InputRegister(0));
2048 case kS390_Int64ToInt32:
2050 __ lgfr(
i.OutputRegister(),
i.InputRegister(0));
2053 case kS390_Int64ToFloat32:
2054 __ ConvertInt64ToFloat(
i.OutputDoubleRegister(),
i.InputRegister(0));
2056 case kS390_Int64ToDouble:
2057 __ ConvertInt64ToDouble(
i.OutputDoubleRegister(),
i.InputRegister(0));
2059 case kS390_Uint64ToFloat32:
2060 __ ConvertUnsignedInt64ToFloat(
i.OutputDoubleRegister(),
2061 i.InputRegister(0));
2063 case kS390_Uint64ToDouble:
2064 __ ConvertUnsignedInt64ToDouble(
i.OutputDoubleRegister(),
2065 i.InputRegister(0));
2067 case kS390_Int32ToFloat32:
2068 __ ConvertIntToFloat(
i.OutputDoubleRegister(),
i.InputRegister(0));
2070 case kS390_Int32ToDouble:
2071 __ ConvertIntToDouble(
i.OutputDoubleRegister(),
i.InputRegister(0));
2073 case kS390_Uint32ToFloat32:
2074 __ ConvertUnsignedIntToFloat(
i.OutputDoubleRegister(),
2075 i.InputRegister(0));
2077 case kS390_Uint32ToDouble:
2078 __ ConvertUnsignedIntToDouble(
i.OutputDoubleRegister(),
2079 i.InputRegister(0));
2081 case kS390_DoubleToInt32: {
2083 if (
i.OutputCount() > 1) {
2084 __ mov(
i.OutputRegister(1), Operand(1));
2086 __ ConvertDoubleToInt32(
i.OutputRegister(0),
i.InputDoubleRegister(0),
2089 if (
i.OutputCount() > 1) {
2097 case kS390_DoubleToUint32: {
2099 if (
i.OutputCount() > 1) {
2100 __ mov(
i.OutputRegister(1), Operand(1));
2102 __ ConvertDoubleToUnsignedInt32(
i.OutputRegister(0),
2103 i.InputDoubleRegister(0));
2105 if (
i.OutputCount() > 1) {
2113 case kS390_DoubleToInt64: {
2115 if (
i.OutputCount() > 1) {
2116 __ mov(
i.OutputRegister(1), Operand(1));
2118 __ ConvertDoubleToInt64(
i.OutputRegister(0),
i.InputDoubleRegister(0));
2120 if (
i.OutputCount() > 1) {
2128 case kS390_DoubleToUint64: {
2130 if (
i.OutputCount() > 1) {
2131 __ mov(
i.OutputRegister(1), Operand(1));
2133 __ ConvertDoubleToUnsignedInt64(
i.OutputRegister(0),
2134 i.InputDoubleRegister(0));
2136 if (
i.OutputCount() > 1) {
2144 case kS390_Float32ToInt32: {
2146 __ ConvertFloat32ToInt32(
i.OutputRegister(0),
i.InputDoubleRegister(0),
2149 if (set_overflow_to_min_i32) {
2153 __ llilh(
i.OutputRegister(0), Operand(0x8000));
2158 case kS390_Float32ToUint32: {
2160 __ ConvertFloat32ToUnsignedInt32(
i.OutputRegister(0),
2161 i.InputDoubleRegister(0));
2163 if (set_overflow_to_min_u32) {
2172 case kS390_Float32ToUint64: {
2174 if (
i.OutputCount() > 1) {
2175 __ mov(
i.OutputRegister(1), Operand(1));
2177 __ ConvertFloat32ToUnsignedInt64(
i.OutputRegister(0),
2178 i.InputDoubleRegister(0));
2180 if (
i.OutputCount() > 1) {
2188 case kS390_Float32ToInt64: {
2190 if (
i.OutputCount() > 1) {
2191 __ mov(
i.OutputRegister(1), Operand(1));
2193 __ ConvertFloat32ToInt64(
i.OutputRegister(0),
i.InputDoubleRegister(0));
2195 if (
i.OutputCount() > 1) {
2203 case kS390_DoubleToFloat32:
2206 case kS390_Float32ToDouble:
2209 case kS390_DoubleExtractLowWord32:
2210 __ lgdr(
i.OutputRegister(),
i.InputDoubleRegister(0));
2211 __ llgfr(
i.OutputRegister(),
i.OutputRegister());
2213 case kS390_DoubleExtractHighWord32:
2214 __ lgdr(
i.OutputRegister(),
i.InputDoubleRegister(0));
2215 __ srlg(
i.OutputRegister(),
i.OutputRegister(), Operand(32));
2217 case kS390_DoubleFromWord32Pair:
2219 __ ShiftLeftU64(
i.TempRegister(0),
i.InputRegister(0), Operand(32));
2221 __ MovInt64ToDouble(
i.OutputDoubleRegister(),
i.TempRegister(0));
2223 case kS390_DoubleInsertLowWord32:
2228 case kS390_DoubleInsertHighWord32:
2230 __ lgdr(r0,
i.InputDoubleRegister(0));
2234 case kS390_DoubleConstruct:
2241 case kS390_LoadWordS8:
2244 case kS390_BitcastFloat32ToInt32:
2247 case kS390_BitcastInt32ToFloat32:
2248 __ MovIntToFloat(
i.OutputDoubleRegister(),
i.InputRegister(0));
2250 case kS390_BitcastDoubleToInt64:
2251 __ MovDoubleToInt64(
i.OutputRegister(),
i.InputDoubleRegister(0));
2253 case kS390_BitcastInt64ToDouble:
2254 __ MovInt64ToDouble(
i.OutputDoubleRegister(),
i.InputRegister(0));
2256 case kS390_LoadWordU8:
2259 case kS390_LoadWordU16:
2262 case kS390_LoadWordS16:
2265 case kS390_LoadWordU32:
2268 case kS390_LoadWordS32:
2271 case kS390_LoadReverse16:
2274 case kS390_LoadReverse32:
2277 case kS390_LoadReverse64:
2280 case kS390_LoadReverse16RR:
2281 __ lrvr(
i.OutputRegister(),
i.InputRegister(0));
2282 __ rll(
i.OutputRegister(),
i.OutputRegister(), Operand(16));
2284 case kS390_LoadReverse32RR:
2285 __ lrvr(
i.OutputRegister(),
i.InputRegister(0));
2287 case kS390_LoadReverse64RR:
2288 __ lrvgr(
i.OutputRegister(),
i.InputRegister(0));
2290 case kS390_LoadReverseSimd128RR:
2298 case kS390_LoadReverseSimd128: {
2303 is_uint12(operand.offset())) {
2306 __ lrvg(r0, operand);
2309 __ vlvgp(dst, r1, r0);
2313 case kS390_LoadWord64:
2316 case kS390_LoadAndTestWord32: {
2320 case kS390_LoadAndTestWord64: {
2324 case kS390_LoadFloat32:
2327 case kS390_LoadDouble:
2330 case kS390_LoadSimd128: {
2333 __ vl(
i.OutputSimd128Register(), operand,
Condition(0));
2336 case kS390_StoreWord8:
2339 case kS390_StoreWord16:
2342 case kS390_StoreWord32:
2345 case kS390_StoreWord64:
2348 case kS390_StoreReverse16:
2351 case kS390_StoreReverse32:
2354 case kS390_StoreReverse64:
2357 case kS390_StoreReverseSimd128: {
2360 MemOperand operand =
i.MemoryOperand(&mode, &index);
2362 is_uint12(operand.offset())) {
2363 __ vstbr(
i.InputSimd128Register(index), operand,
Condition(4));
2365 __ vlgv(r0,
i.InputSimd128Register(index),
MemOperand(r0, 1),
2367 __ vlgv(r1,
i.InputSimd128Register(index),
MemOperand(r0, 0),
2369 __ strvg(r0, operand);
2375 case kS390_StoreFloat32:
2378 case kS390_StoreDouble:
2381 case kS390_StoreSimd128: {
2384 MemOperand operand =
i.MemoryOperand(&mode, &index);
2385 __ vst(
i.InputSimd128Register(index), operand,
Condition(0));
2390 if (!is_int20(mem.offset())) {
2393 DCHECK(is_int32(mem.offset()));
2394 __ AddS64(ip, mem.rb(), Operand(mem.offset()));
2397 __ lay(
i.OutputRegister(), mem);
2400 case kAtomicExchangeInt8:
2401 case kAtomicExchangeUint8: {
2407 __ AtomicExchangeU8(r1, value, output, r0);
2408 if (opcode == kAtomicExchangeInt8) {
2409 __ LoadS8(output, output);
2411 __ LoadU8(output, output);
2415 case kAtomicExchangeInt16:
2416 case kAtomicExchangeUint16: {
2424 if (reverse_bytes) {
2429 __ AtomicExchangeU16(r1,
value_, output, r0);
2430 if (reverse_bytes) {
2431 __ lrvr(output, output);
2432 __ ShiftRightU32(output, output, Operand(16));
2434 if (opcode == kAtomicExchangeInt16) {
2435 __ lghr(output, output);
2437 __ llghr(output, output);
2441 case kAtomicExchangeWord32: {
2450 if (reverse_bytes) {
2458 if (reverse_bytes) {
2459 __ lrvr(output, output);
2460 __ LoadU32(output, output);
2464 case kAtomicCompareExchangeInt8:
2467 case kAtomicCompareExchangeUint8:
2470 case kAtomicCompareExchangeInt16:
2473 case kAtomicCompareExchangeUint16:
2476 case kAtomicCompareExchangeWord32:
2479#define ATOMIC_BINOP_CASE(op, inst) \
2480 case kAtomic##op##Int8: \
2481 ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
2482 intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
2483 __ srlk(result, prev, Operand(shift_right)); \
2484 __ LoadS8(result, result); \
2487 case kAtomic##op##Uint8: \
2488 ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
2489 int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
2490 __ RotateInsertSelectBits(result, prev, Operand(56), Operand(63), \
2491 Operand(static_cast<intptr_t>(rotate_left)), \
2495 case kAtomic##op##Int16: \
2496 ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
2497 intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
2498 __ srlk(result, prev, Operand(shift_right)); \
2499 if (is_wasm_on_be(info())) { \
2500 __ lrvr(result, result); \
2501 __ ShiftRightS32(result, result, Operand(16)); \
2503 __ LoadS16(result, result); \
2506 case kAtomic##op##Uint16: \
2507 ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
2508 int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
2509 __ RotateInsertSelectBits(result, prev, Operand(48), Operand(63), \
2510 Operand(static_cast<intptr_t>(rotate_left)), \
2512 if (is_wasm_on_be(info())) { \
2513 __ lrvr(result, result); \
2514 __ ShiftRightU32(result, result, Operand(16)); \
2523#undef ATOMIC_BINOP_CASE
2524 case kAtomicAddWord32:
2527 case kAtomicSubWord32:
2530 case kAtomicAndWord32:
2533 case kAtomicOrWord32:
2536 case kAtomicXorWord32:
2539 case kS390_Word64AtomicAddUint64:
2542 case kS390_Word64AtomicSubUint64:
2545 case kS390_Word64AtomicAndUint64:
2548 case kS390_Word64AtomicOrUint64:
2551 case kS390_Word64AtomicXorUint64:
2554 case kS390_Word64AtomicExchangeUint64: {
2563 if (reverse_bytes) {
2571 if (reverse_bytes) {
2572 __ lrvgr(output, output);
2576 case kS390_Word64AtomicCompareExchangeUint64:
2580#define SIMD_SHIFT_LIST(V) \
2594#define EMIT_SIMD_SHIFT(name) \
2595 case kS390_##name: { \
2596 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2597 i.InputRegister(1), kScratchDoubleReg); \
2601#undef EMIT_SIMD_SHIFT
2602#undef SIMD_SHIFT_LIST
2604#define SIMD_BINOP_LIST(V) \
2659 V(I16x8RoundingAverageU) \
2671 V(I8x16RoundingAverageU) \
2677#define EMIT_SIMD_BINOP(name) \
2678 case kS390_##name: { \
2679 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2680 i.InputSimd128Register(1)); \
2684#undef EMIT_SIMD_BINOP
2685#undef SIMD_BINOP_LIST
2687#define SIMD_UNOP_LIST(V) \
2688 V(F64x2Splat, Simd128Register, DoubleRegister) \
2689 V(F64x2Abs, Simd128Register, Simd128Register) \
2690 V(F64x2Neg, Simd128Register, Simd128Register) \
2691 V(F64x2Sqrt, Simd128Register, Simd128Register) \
2692 V(F64x2Ceil, Simd128Register, Simd128Register) \
2693 V(F64x2Floor, Simd128Register, Simd128Register) \
2694 V(F64x2Trunc, Simd128Register, Simd128Register) \
2695 V(F64x2NearestInt, Simd128Register, Simd128Register) \
2696 V(F32x4Splat, Simd128Register, DoubleRegister) \
2697 V(F32x4Abs, Simd128Register, Simd128Register) \
2698 V(F32x4Neg, Simd128Register, Simd128Register) \
2699 V(F32x4Sqrt, Simd128Register, Simd128Register) \
2700 V(F32x4Ceil, Simd128Register, Simd128Register) \
2701 V(F32x4Floor, Simd128Register, Simd128Register) \
2702 V(F32x4Trunc, Simd128Register, Simd128Register) \
2703 V(F32x4NearestInt, Simd128Register, Simd128Register) \
2704 V(I64x2Splat, Simd128Register, Register) \
2705 V(I64x2Abs, Simd128Register, Simd128Register) \
2706 V(I64x2Neg, Simd128Register, Simd128Register) \
2707 V(I64x2SConvertI32x4Low, Simd128Register, Simd128Register) \
2708 V(I64x2SConvertI32x4High, Simd128Register, Simd128Register) \
2709 V(I64x2UConvertI32x4Low, Simd128Register, Simd128Register) \
2710 V(I64x2UConvertI32x4High, Simd128Register, Simd128Register) \
2711 V(I32x4Splat, Simd128Register, Register) \
2712 V(I32x4Abs, Simd128Register, Simd128Register) \
2713 V(I32x4Neg, Simd128Register, Simd128Register) \
2714 V(I32x4SConvertI16x8Low, Simd128Register, Simd128Register) \
2715 V(I32x4SConvertI16x8High, Simd128Register, Simd128Register) \
2716 V(I32x4UConvertI16x8Low, Simd128Register, Simd128Register) \
2717 V(I32x4UConvertI16x8High, Simd128Register, Simd128Register) \
2718 V(I16x8Splat, Simd128Register, Register) \
2719 V(I16x8Abs, Simd128Register, Simd128Register) \
2720 V(I16x8Neg, Simd128Register, Simd128Register) \
2721 V(I16x8SConvertI8x16Low, Simd128Register, Simd128Register) \
2722 V(I16x8SConvertI8x16High, Simd128Register, Simd128Register) \
2723 V(I16x8UConvertI8x16Low, Simd128Register, Simd128Register) \
2724 V(I16x8UConvertI8x16High, Simd128Register, Simd128Register) \
2725 V(I8x16Splat, Simd128Register, Register) \
2726 V(I8x16Abs, Simd128Register, Simd128Register) \
2727 V(I8x16Neg, Simd128Register, Simd128Register) \
2728 V(S128Not, Simd128Register, Simd128Register)
2730#define EMIT_SIMD_UNOP(name, dtype, stype) \
2731 case kS390_##name: { \
2732 __ name(i.Output##dtype(), i.Input##stype(0)); \
2736#undef EMIT_SIMD_UNOP
2737#undef SIMD_UNOP_LIST
2739#define SIMD_EXTRACT_LANE_LIST(V) \
2740 V(F64x2ExtractLane, DoubleRegister) \
2741 V(F32x4ExtractLane, DoubleRegister) \
2742 V(I64x2ExtractLane, Register) \
2743 V(I32x4ExtractLane, Register) \
2744 V(I16x8ExtractLaneU, Register) \
2745 V(I16x8ExtractLaneS, Register) \
2746 V(I8x16ExtractLaneU, Register) \
2747 V(I8x16ExtractLaneS, Register)
2749#define EMIT_SIMD_EXTRACT_LANE(name, dtype) \
2750 case kS390_##name: { \
2751 __ name(i.Output##dtype(), i.InputSimd128Register(0), i.InputInt8(1), \
2756#undef EMIT_SIMD_EXTRACT_LANE
2757#undef SIMD_EXTRACT_LANE_LIST
2759#define SIMD_REPLACE_LANE_LIST(V) \
2760 V(F64x2ReplaceLane, DoubleRegister) \
2761 V(F32x4ReplaceLane, DoubleRegister) \
2762 V(I64x2ReplaceLane, Register) \
2763 V(I32x4ReplaceLane, Register) \
2764 V(I16x8ReplaceLane, Register) \
2765 V(I8x16ReplaceLane, Register)
2767#define EMIT_SIMD_REPLACE_LANE(name, stype) \
2768 case kS390_##name: { \
2769 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2770 i.Input##stype(2), i.InputInt8(1), kScratchReg); \
2774#undef EMIT_SIMD_REPLACE_LANE
2775#undef SIMD_REPLACE_LANE_LIST
2777#define SIMD_EXT_MUL_LIST(V) \
2778 V(I64x2ExtMulLowI32x4S) \
2779 V(I64x2ExtMulHighI32x4S) \
2780 V(I64x2ExtMulLowI32x4U) \
2781 V(I64x2ExtMulHighI32x4U) \
2782 V(I32x4ExtMulLowI16x8S) \
2783 V(I32x4ExtMulHighI16x8S) \
2784 V(I32x4ExtMulLowI16x8U) \
2785 V(I32x4ExtMulHighI16x8U) \
2786 V(I16x8ExtMulLowI8x16S) \
2787 V(I16x8ExtMulHighI8x16S) \
2788 V(I16x8ExtMulLowI8x16U) \
2789 V(I16x8ExtMulHighI8x16U)
2791#define EMIT_SIMD_EXT_MUL(name) \
2792 case kS390_##name: { \
2793 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2794 i.InputSimd128Register(1), kScratchDoubleReg); \
2798#undef EMIT_SIMD_EXT_MUL
2799#undef SIMD_EXT_MUL_LIST
2801#define SIMD_ALL_TRUE_LIST(V) \
2807#define EMIT_SIMD_ALL_TRUE(name) \
2808 case kS390_##name: { \
2809 __ name(i.OutputRegister(), i.InputSimd128Register(0), kScratchReg, \
2810 kScratchDoubleReg); \
2814#undef EMIT_SIMD_ALL_TRUE
2815#undef SIMD_ALL_TRUE_LIST
2817#define SIMD_QFM_LIST(V) \
2823#define EMIT_SIMD_QFM(name) \
2824 case kS390_##name: { \
2825 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2826 i.InputSimd128Register(1), i.InputSimd128Register(2)); \
2833#define SIMD_ADD_SUB_SAT_LIST(V) \
2843#define EMIT_SIMD_ADD_SUB_SAT(name) \
2844 case kS390_##name: { \
2845 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2846 i.InputSimd128Register(1), kScratchDoubleReg, \
2847 i.ToSimd128Register(instr->TempAt(0))); \
2851#undef EMIT_SIMD_ADD_SUB_SAT
2852#undef SIMD_ADD_SUB_SAT_LIST
2854#define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
2855 V(I32x4ExtAddPairwiseI16x8S) \
2856 V(I32x4ExtAddPairwiseI16x8U) \
2857 V(I16x8ExtAddPairwiseI8x16S) \
2858 V(I16x8ExtAddPairwiseI8x16U)
2860#define EMIT_SIMD_EXT_ADD_PAIRWISE(name) \
2861 case kS390_##name: { \
2862 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2863 kScratchDoubleReg, i.ToSimd128Register(instr->TempAt(0))); \
2867#undef EMIT_SIMD_EXT_ADD_PAIRWISE
2868#undef SIMD_EXT_ADD_PAIRWISE_LIST
2870 case kS390_I64x2Mul: {
2871 __ I64x2Mul(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2872 i.InputSimd128Register(1), r0, r1, ip);
2875 case kS390_I32x4GeU: {
2876 __ I32x4GeU(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2880 case kS390_I16x8GeU: {
2881 __ I16x8GeU(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2885 case kS390_I8x16GeU: {
2886 __ I8x16GeU(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2891 case kS390_V128AnyTrue: {
2892 __ V128AnyTrue(
i.OutputRegister(),
i.InputSimd128Register(0),
2897 case kS390_S128Const: {
2898 uint64_t low =
make_uint64(
i.InputUint32(1),
i.InputUint32(0));
2899 uint64_t high =
make_uint64(
i.InputUint32(3),
i.InputUint32(2));
2900 __ S128Const(
i.OutputSimd128Register(), high, low, r0, ip);
2903 case kS390_S128Zero: {
2905 __ S128Zero(dst, dst);
2908 case kS390_S128AllOnes: {
2910 __ S128AllOnes(dst, dst);
2913 case kS390_S128Select: {
2918 __ S128Select(dst, src1, src2,
mask);
2922 case kS390_I32x4SConvertF32x4: {
2923 __ I32x4SConvertF32x4(
i.OutputSimd128Register(),
2928 case kS390_I32x4UConvertF32x4: {
2929 __ I32x4UConvertF32x4(
i.OutputSimd128Register(),
2934 case kS390_F32x4SConvertI32x4: {
2935 __ F32x4SConvertI32x4(
i.OutputSimd128Register(),
2940 case kS390_F32x4UConvertI32x4: {
2941 __ F32x4UConvertI32x4(
i.OutputSimd128Register(),
2946 case kS390_I16x8SConvertI32x4: {
2947 __ I16x8SConvertI32x4(
i.OutputSimd128Register(),
2948 i.InputSimd128Register(0),
2949 i.InputSimd128Register(1));
2952 case kS390_I8x16SConvertI16x8: {
2953 __ I8x16SConvertI16x8(
i.OutputSimd128Register(),
2954 i.InputSimd128Register(0),
2955 i.InputSimd128Register(1));
2958 case kS390_I16x8UConvertI32x4: {
2959 __ I16x8UConvertI32x4(
i.OutputSimd128Register(),
2960 i.InputSimd128Register(0),
2964 case kS390_I8x16UConvertI16x8: {
2965 __ I8x16UConvertI16x8(
i.OutputSimd128Register(),
2966 i.InputSimd128Register(0),
2970 case kS390_I8x16Shuffle: {
2971 uint64_t low =
make_uint64(
i.InputUint32(3),
i.InputUint32(2));
2972 uint64_t high =
make_uint64(
i.InputUint32(5),
i.InputUint32(4));
2973 __ I8x16Shuffle(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2974 i.InputSimd128Register(1), high, low, r0, ip,
2978 case kS390_I8x16Swizzle: {
2979 __ I8x16Swizzle(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2983 case kS390_I64x2BitMask: {
2984 __ I64x2BitMask(
i.OutputRegister(),
i.InputSimd128Register(0),
2988 case kS390_I32x4BitMask: {
2989 __ I32x4BitMask(
i.OutputRegister(),
i.InputSimd128Register(0),
2993 case kS390_I16x8BitMask: {
2994 __ I16x8BitMask(
i.OutputRegister(),
i.InputSimd128Register(0),
2998 case kS390_I8x16BitMask: {
2999 __ I8x16BitMask(
i.OutputRegister(),
i.InputSimd128Register(0), r0, ip,
3003 case kS390_I32x4DotI16x8S: {
3004 __ I32x4DotI16x8S(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3009 case kS390_I16x8DotI8x16S: {
3010 __ I16x8DotI8x16S(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3014 case kS390_I32x4DotI8x16AddS: {
3015 __ I32x4DotI8x16AddS(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3016 i.InputSimd128Register(1),
i.InputSimd128Register(2),
3020 case kS390_I16x8Q15MulRSatS: {
3021 __ I16x8Q15MulRSatS(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
3023 i.ToSimd128Register(
instr->TempAt(0)),
3024 i.ToSimd128Register(
instr->TempAt(1)));
3027 case kS390_I8x16Popcnt: {
3028 __ I8x16Popcnt(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
3031 case kS390_F64x2ConvertLowI32x4S: {
3032 __ F64x2ConvertLowI32x4S(
i.OutputSimd128Register(),
3033 i.InputSimd128Register(0));
3036 case kS390_F64x2ConvertLowI32x4U: {
3037 __ F64x2ConvertLowI32x4U(
i.OutputSimd128Register(),
3038 i.InputSimd128Register(0));
3041 case kS390_F64x2PromoteLowF32x4: {
3042 __ F64x2PromoteLowF32x4(
i.OutputSimd128Register(),
3047 case kS390_F32x4DemoteF64x2Zero: {
3048 __ F32x4DemoteF64x2Zero(
i.OutputSimd128Register(),
3053 case kS390_I32x4TruncSatF64x2SZero: {
3054 __ I32x4TruncSatF64x2SZero(
i.OutputSimd128Register(),
3058 case kS390_I32x4TruncSatF64x2UZero: {
3059 __ I32x4TruncSatF64x2UZero(
i.OutputSimd128Register(),
3063#define LOAD_SPLAT(type) \
3064 AddressingMode mode = kMode_None; \
3065 MemOperand operand = i.MemoryOperand(&mode); \
3066 Simd128Register dst = i.OutputSimd128Register(); \
3067 __ LoadAndSplat##type##LE(dst, operand, kScratchReg);
3068 case kS390_S128Load64Splat: {
3072 case kS390_S128Load32Splat: {
3076 case kS390_S128Load16Splat: {
3080 case kS390_S128Load8Splat: {
3085#define LOAD_EXTEND(type) \
3086 AddressingMode mode = kMode_None; \
3087 MemOperand operand = i.MemoryOperand(&mode); \
3088 Simd128Register dst = i.OutputSimd128Register(); \
3089 __ LoadAndExtend##type##LE(dst, operand, kScratchReg);
3090 case kS390_S128Load32x2U: {
3094 case kS390_S128Load32x2S: {
3098 case kS390_S128Load16x4U: {
3102 case kS390_S128Load16x4S: {
3106 case kS390_S128Load8x8U: {
3110 case kS390_S128Load8x8S: {
3115#define LOAD_AND_ZERO(type) \
3116 AddressingMode mode = kMode_None; \
3117 MemOperand operand = i.MemoryOperand(&mode); \
3118 Simd128Register dst = i.OutputSimd128Register(); \
3119 __ LoadV##type##ZeroLE(dst, operand, kScratchReg);
3120 case kS390_S128Load32Zero: {
3124 case kS390_S128Load64Zero: {
3130#define LOAD_LANE(type, lane) \
3131 AddressingMode mode = kMode_None; \
3133 MemOperand operand = i.MemoryOperand(&mode, &index); \
3134 Simd128Register dst = i.OutputSimd128Register(); \
3135 DCHECK_EQ(dst, i.InputSimd128Register(0)); \
3136 __ LoadLane##type##LE(dst, operand, lane, kScratchReg);
3137 case kS390_S128Load8Lane: {
3141 case kS390_S128Load16Lane: {
3145 case kS390_S128Load32Lane: {
3149 case kS390_S128Load64Lane: {
3154#define STORE_LANE(type, lane) \
3155 AddressingMode mode = kMode_None; \
3157 MemOperand operand = i.MemoryOperand(&mode, &index); \
3158 Simd128Register src = i.InputSimd128Register(0); \
3159 __ StoreLane##type##LE(src, operand, lane, kScratchReg);
3160 case kS390_S128Store8Lane: {
3164 case kS390_S128Store16Lane: {
3168 case kS390_S128Store32Lane: {
3172 case kS390_S128Store64Lane: {
3177 case kS390_StoreCompressTagged: {
3181 MemOperand operand =
i.MemoryOperand(&mode, &index);
3182 Register value =
i.InputRegister(index);
3183 __ StoreTaggedField(value, operand, r1);
3186 case kS390_LoadDecompressTaggedSigned: {
3188 __ DecompressTaggedSigned(
i.OutputRegister(),
i.MemoryOperand());
3191 case kS390_LoadDecompressTagged: {
3193 __ DecompressTagged(
i.OutputRegister(),
i.MemoryOperand());
3204 S390OperandConverter
i(
this,
instr);
3205 Label* tlabel = branch->true_label;
3206 Label* flabel = branch->false_label;
3211 if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
3214 if (cond ==
le || cond ==
eq || cond ==
lt) {
3215 __ bunordered(flabel);
3216 }
else if (cond ==
gt || cond ==
ne || cond ==
ge) {
3217 __ bunordered(tlabel);
3221 if (!branch->fallthru)
__ b(flabel);
3225 BranchInfo* branch) {
3234#if V8_ENABLE_WEBASSEMBLY
3235void CodeGenerator::AssembleArchTrap(Instruction*
instr,
3239 OutOfLineTrap(CodeGenerator*
gen, Instruction*
instr)
3242 void Generate() final {
3243 S390OperandConverter
i(
gen_, instr_);
3245 static_cast<TrapId
>(
i.InputInt32(instr_->InputCount() - 1));
3246 GenerateCallToTrap(trap_id);
3250 void GenerateCallToTrap(TrapId trap_id) {
3251 gen_->AssembleSourcePosition(instr_);
3255 __ Call(
static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
3256 ReferenceMap* reference_map =
3257 gen_->zone()->New<ReferenceMap>(
gen_->zone());
3258 gen_->RecordSafepoint(reference_map);
3264 Instruction* instr_;
3265 CodeGenerator*
gen_;
3268 Label* tlabel = ool->entry();
3273 if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
3275 if (cond ==
le || cond ==
eq || cond ==
lt) {
3276 __ bunordered(&
end);
3277 }
else if (cond ==
gt || cond ==
ne || cond ==
ge) {
3278 __ bunordered(tlabel);
3289 S390OperandConverter
i(
this,
instr);
3291 bool check_unordered = (op == kS390_CmpDouble || op == kS390_CmpFloat);
3295 (op == kS390_Add32 || op == kS390_Add64 || op == kS390_Sub32 ||
3296 op == kS390_Sub64 || op == kS390_Mul32 ||
3297 op == kS390_Mul64WithOverflow));
3305 if (check_unordered) {
3308 __ bunordered(&done);
3324 BranchInfo* branch) {
3329 S390OperandConverter
i(
this,
instr);
3331 std::vector<std::pair<int32_t, Label*>> cases;
3332 for (
size_t index = 2; index <
instr->InputCount(); index += 2) {
3333 cases.push_back({
i.InputInt32(index + 0),
GetLabel(
i.InputRpo(index + 1))});
3336 cases.data() + cases.size());
3340 S390OperandConverter
i(
this,
instr);
3344 for (int32_t index = 0; index < case_count; ++
index) {
3348 __ CmpU64(input, Operand(case_count));
3363 const DoubleRegList double_saves = call_descriptor->CalleeSavedFPRegisters();
3366 if (!double_saves.is_empty()) {
3367 frame->AlignSavedCalleeRegisterSlots();
3373 const RegList saves = call_descriptor->CalleeSavedRegisters();
3374 if (!saves.is_empty()) {
3377 frame->AllocateSavedCalleeRegisterSlots(num_saves);
3385 if (call_descriptor->IsCFunctionCall()) {
3386#if V8_ENABLE_WEBASSEMBLY
3387 if (
info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
3388 __ StubPrologue(StackFrame::C_WASM_ENTRY);
3399 }
else if (call_descriptor->IsJSFunctionCall()) {
3405 __ StubPrologue(type);
3406#if V8_ENABLE_WEBASSEMBLY
3407 if (call_descriptor->IsAnyWasmFunctionCall() ||
3408 call_descriptor->IsWasmImportWrapper() ||
3409 call_descriptor->IsWasmCapiFunction()) {
3416 if (call_descriptor->IsWasmCapiFunction()) {
3425 int required_slots =
3426 frame()->GetTotalFrameSlotCount() -
frame()->GetFixedSlotCount();
3427 if (
info()->is_osr()) {
3429 __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
3435 __ RecordComment(
"-- OSR entrypoint --");
3440 const DoubleRegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
3441 const RegList saves = call_descriptor->CalleeSavedRegisters();
3443 if (required_slots > 0) {
3444#if V8_ENABLE_WEBASSEMBLY
3458 __ AddS64(stack_limit, stack_limit,
3460 __ CmpU64(sp, stack_limit);
3464 if (
v8_flags.experimental_wasm_growable_stacks) {
3468 WasmHandleStackOverflowDescriptor::FrameBaseRegister());
3473 __ MultiPushF64OrV128(fp_regs_to_save, r1);
3477 WasmHandleStackOverflowDescriptor::FrameBaseRegister(), fp,
3480 __ CallBuiltin(Builtin::kWasmHandleStackOverflow);
3481 __ MultiPopF64OrV128(fp_regs_to_save, r1);
3484 __ Call(
static_cast<intptr_t
>(Builtin::kWasmStackOverflow),
3488 ReferenceMap* reference_map =
zone()->
New<ReferenceMap>(
zone());
3498 required_slots -= saves.Count();
3499 required_slots -=
frame()->GetReturnSlotCount();
3505 if (!saves_fp.is_empty()) {
3506 __ MultiPushDoubles(saves_fp);
3511 if (!saves.is_empty()) {
3512 __ MultiPush(saves);
3516 const int returns =
frame()->GetReturnSlotCount();
3533 const int returns =
frame()->GetReturnSlotCount();
3540 const RegList saves = call_descriptor->CalleeSavedRegisters();
3541 if (!saves.is_empty()) {
3546 const DoubleRegList double_saves = call_descriptor->CalleeSavedFPRegisters();
3547 if (!double_saves.is_empty()) {
3548 __ MultiPopDoubles(double_saves);
3553 S390OperandConverter g(
this,
nullptr);
3554 const int parameter_slots =
3555 static_cast<int>(call_descriptor->ParameterSlotCount());
3559 if (parameter_slots != 0) {
3560 if (additional_pop_count->IsImmediate()) {
3561 DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
3563 __ CmpS64(g.ToRegister(additional_pop_count), Operand(0));
3564 __ Assert(
eq, AbortReason::kUnexpectedAdditionalPopValue);
3568#if V8_ENABLE_WEBASSEMBLY
3569 if (call_descriptor->IsAnyWasmFunctionCall() &&
3570 v8_flags.experimental_wasm_growable_stacks) {
3572 UseScratchRegisterScope temps{
masm()};
3573 Register scratch = temps.Acquire();
3587 __ MultiPushF64OrV128(fp_regs_to_save, r1);
3589 __ PrepareCallCFunction(1, r0);
3590 __ CallCFunction(ExternalReference::wasm_shrink_stack(), 1);
3594 __ MultiPopF64OrV128(fp_regs_to_save, r1);
3605 const bool drop_jsargs = parameter_slots != 0 &&
3607 call_descriptor->IsJSFunctionCall();
3609 if (call_descriptor->IsCFunctionCall()) {
3614 if (additional_pop_count->IsImmediate() &&
3615 g.ToConstant(additional_pop_count).ToInt32() == 0) {
3625 DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
3636 DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
3637 if (parameter_slots > 1) {
3639 __ CmpS64(argc_reg, Operand(parameter_slots));
3641 __ mov(argc_reg, Operand(parameter_slots));
3644 __ DropArguments(argc_reg);
3645 }
else if (additional_pop_count->IsImmediate()) {
3646 int additional_count = g.ToConstant(additional_pop_count).ToInt32();
3647 __ Drop(parameter_slots + additional_count);
3648 }
else if (parameter_slots == 0) {
3649 __ Drop(g.ToRegister(additional_pop_count));
3653 __ Drop(parameter_slots);
3661 ZoneDeque<DeoptimizationExit*>* exits) {}
3666 S390OperandConverter g(
this,
nullptr);
3667 int last_frame_slot_id =
3670 int slot_id = last_frame_slot_id + sp_delta + new_slots;
3672 if (source->IsFloatStackSlot() || source->IsDoubleStackSlot()) {
3673 __ LoadU64(r1, g.ToMemOperand(source));
3688 S390OperandConverter g(
this,
nullptr);
3689 if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) {
3692 __ StoreU64(r1, g.ToMemOperand(dest));
3694 int last_frame_slot_id =
3697 int slot_id = last_frame_slot_id + sp_delta;
3721 int scratch_reg_code =
3737 int scratch_reg_code =
3749 if ((move->source().IsConstant() || move->source().IsFPStackSlot()) &&
3750 !move->destination().IsFPRegister()) {
3757 S390OperandConverter g(
this,
nullptr);
3762 if (source->IsRegister()) {
3764 Register src = g.ToRegister(source);
3770 }
else if (source->IsStackSlot()) {
3777 __ LoadU64(temp, src, r0);
3780 }
else if (source->IsConstant()) {
3781 Constant src = g.ToConstant(source);
3784 switch (src.type()) {
3786 __ mov(dst, Operand(src.ToInt32(), src.rmode()));
3789 __ mov(dst, Operand(src.ToInt64(), src.rmode()));
3798 __ Move(dst, src.ToExternalReference());
3801 Handle<HeapObject> src_object = src.ToHeapObject();
3804 __ LoadRoot(dst, index);
3806 __ Move(dst, src_object);
3811 Handle<HeapObject> src_object = src.ToHeapObject();
3814 __ LoadTaggedRoot(dst, index);
3832 : src.ToFloat64().value();
3834 __ LoadF32<float>(dst, src.ToFloat32(), r1);
3836 __ LoadF64<double>(dst, value, r1);
3845 }
else if (source->IsFPRegister()) {
3849 __ vlr(g.ToSimd128Register(
destination), g.ToSimd128Register(source),
3853 __ StoreV128(g.ToSimd128Register(source), g.ToMemOperand(
destination),
3871 }
else if (source->IsFPStackSlot()) {
3882 __ LoadV128(g.ToSimd128Register(
destination), g.ToMemOperand(source),
3889 __ LoadF64(temp, src);
3892 __ LoadF32(temp, src);
3915 S390OperandConverter g(
this,
nullptr);
3916 if (source->IsRegister()) {
3917 Register src = g.ToRegister(source);
3924 }
else if (source->IsStackSlot()) {
3928 }
else if (source->IsFloatRegister()) {
3936 }
else if (source->IsDoubleRegister()) {
3944 }
else if (source->IsFloatStackSlot()) {
3946 __ SwapFloat32(g.ToMemOperand(source), g.ToMemOperand(
destination),
3948 }
else if (source->IsDoubleStackSlot()) {
3950 __ SwapDouble(g.ToMemOperand(source), g.ToMemOperand(
destination),
3952 }
else if (source->IsSimd128Register()) {
3960 }
else if (source->IsSimd128StackSlot()) {
3962 __ SwapSimd128(g.ToMemOperand(source), g.ToMemOperand(
destination),
3970 for (
auto target : targets) {
3971 __ emit_label_addr(target);
#define Assert(condition)
static constexpr T decode(U value)
static constexpr int kFixedSlotCountAboveFp
static constexpr int kFixedFrameSizeAboveFp
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static Operand EmbeddedNumber(double number)
static V8_INLINE Operand Zero()
StackFrame::Type GetOutputStackFrameType() const
constexpr void set(RegisterT reg)
constexpr int8_t code() const
@ COMPRESSED_EMBEDDED_OBJECT
static constexpr Tagged< Smi > FromInt(int value)
static constexpr int32_t TypeToMarker(Type type)
static constexpr int kArgCOffset
static constexpr int kFrameTypeOffset
static constexpr Register GapRegister()
base::Vector< T > AllocateVector(size_t length)
void MoveToTempLocation(InstructionOperand *src, MachineRepresentation rep) final
void AssembleArchJump(RpoNumber target)
void AssembleTailCallAfterGap(Instruction *instr, int first_unused_stack_slot)
void AssembleReturn(InstructionOperand *pop)
void AssembleTailCallBeforeGap(Instruction *instr, int first_unused_stack_slot)
FrameAccessState * frame_access_state() const
void AssembleConstructFrame()
CodeGenResult AssembleArchInstruction(Instruction *instr)
void PopTempStackSlots() final
DeoptimizationExit * BuildTranslation(Instruction *instr, int pc_offset, size_t frame_state_offset, size_t immediate_args_count, OutputFrameStateCombine state_combine)
Isolate * isolate() const
void AssembleArchBinarySearchSwitch(Instruction *instr)
void AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target)
FrameAccessState * frame_access_state_
static void GetPushCompatibleMoves(Instruction *instr, PushTypeFlags push_type, ZoneVector< MoveOperands * > *pushes)
void FinishFrame(Frame *frame)
Linkage * linkage() const
void AssembleArchBoolean(Instruction *instr, FlagsCondition condition)
void AssembleJumpTable(base::Vector< Label * > targets)
void AssembleArchBranch(Instruction *instr, BranchInfo *branch)
void AssembleMove(InstructionOperand *source, InstructionOperand *destination) final
void SetPendingMove(MoveOperands *move) final
void AssembleCodeStartRegisterCheck()
bool ShouldApplyOffsetToStackCheck(Instruction *instr, uint32_t *offset)
void RecordSafepoint(ReferenceMap *references, int pc_offset=0)
void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block, std::pair< int32_t, Label * > *begin, std::pair< int32_t, Label * > *end)
void PrepareForDeoptimizationExits(ZoneDeque< DeoptimizationExit * > *exits)
StubCallMode DetermineStubCallMode() const
bool caller_registers_saved_
void AssembleArchTableSwitch(Instruction *instr)
void BailoutIfDeoptimized()
bool IsMaterializableFromRoot(Handle< HeapObject > object, RootIndex *index_return)
void AssembleDeconstructFrame()
UnwindingInfoWriter unwinding_info_writer_
void AssembleArchConditionalBranch(Instruction *instr, BranchInfo *branch)
AllocatedOperand Push(InstructionOperand *src) final
void MoveTempLocationTo(InstructionOperand *dst, MachineRepresentation rep) final
friend class OutOfLineCode
uint32_t GetStackCheckOffset()
void AssembleArchDeoptBranch(Instruction *instr, BranchInfo *branch)
Label * GetLabel(RpoNumber rpo)
void RecordCallPosition(Instruction *instr)
void AssembleSwap(InstructionOperand *source, InstructionOperand *destination) final
MoveCycleState move_cycle_
void AssemblePrepareTailCall()
Label * AddJumpTable(base::Vector< Label * > targets)
void AssembleArchConditionalBoolean(Instruction *instr)
void RecordDeoptInfo(Instruction *instr, int pc_offset)
OptimizedCompilationInfo * info() const
const Frame * frame() const
void AssembleArchSelect(Instruction *instr, FlagsCondition condition)
void Pop(InstructionOperand *src, MachineRepresentation rep) final
void SetFrameAccessToDefault()
void SetFrameAccessToSP()
void SetFrameAccessToFP()
FrameOffset GetFrameOffset(int spill_slot) const
const Frame * frame() const
void IncreaseSPDelta(int amount)
FrameAccessState * frame_access_state() const
int32_t InputInt32(size_t index)
Constant ToConstant(InstructionOperand *op) const
Register InputRegister(size_t index) const
bool IsFPStackSlot() const
const InstructionOperand * OutputAt(size_t i) const
InstructionCode opcode() const
const InstructionOperand * InputAt(size_t i) const
size_t OutputCount() const
FlagsCondition flags_condition() const
CallDescriptor * GetIncomingDescriptor() const
MachineRepresentation representation() const
static LocationOperand * cast(InstructionOperand *op)
size_t UnoptimizedFrameSlots()
static OutputFrameStateCombine Ignore()
bool Is32BitOperand(int index)
MemOperand MemoryOperand(AddressingMode *mode, size_t *first_index)
MemOperand InputStackSlot32(size_t index)
MemOperand InputStackSlot(size_t index)
S390OperandConverter(CodeGenerator *gen, Instruction *instr)
MemOperand SlotToMemOperand(int slot) const
bool CompareLogical() const
MemOperand ToMemOperand(InstructionOperand *op) const
Operand InputImmediate(size_t index)
MemOperand MemoryOperand(AddressingMode *mode=nullptr, size_t first_index=0)
bool Is64BitOperand(int index)
void MarkFrameConstructed(int at_pc)
void MarkFrameDeconstructed(int at_pc)
UnwindingInfoWriter *const unwinding_info_writer_
#define ATOMIC_BINOP_CASE(op, inst)
#define ASSEMBLE_IEEE754_UNOP(name)
#define ASSEMBLE_IEEE754_BINOP(name)
RecordWriteMode const mode_
#define ASSEMBLE_COMPARE(asm_instr)
#define EMIT_SIMD_UNOP(name)
#define EMIT_SIMD_QFM(name)
#define ASSEMBLE_FLOAT_COMPARE(cmp_instr)
#define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrp, asm_instrx)
#define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrp, asm_instrx, must_be_aligned)
#define ASSEMBLE_FLOAT_MODULO()
#define EMIT_SIMD_ALL_TRUE(name)
#define EMIT_SIMD_BINOP(name)
#define EMIT_SIMD_EXT_ADD_PAIRWISE(name)
#define EMIT_SIMD_SHIFT(name)
#define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrp, asm_instrx, must_be_aligned)
#define ASSEMBLE_ATOMIC_BINOP_WORD(load_and_op, op)
#define LOAD_EXTEND(type)
#define SIMD_EXTRACT_LANE_LIST(V)
#define EMIT_SIMD_REPLACE_LANE(name, stype)
#define EMIT_SIMD_EXT_MUL(name)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD()
#define ASSEMBLE_BIN_OP(_rr, _rm, _ri)
#define ASSEMBLE_STORE_DOUBLE()
#define ASSEMBLE_BIN32_OP(_rr, _rm, _ri)
#define STORE_LANE(type, lane)
#define ASSEMBLE_ATOMIC_BINOP_WORD64(load_and_op, op)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(load_and_ext)
#define LOAD_AND_ZERO(type)
#define ASSEMBLE_UNARY_OP(_r, _m, _i)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(load_and_ext)
#define ASSEMBLE_STORE_FLOAT32()
#define EMIT_SIMD_ADD_SUB_SAT(name)
#define RRM32Instr(instr)
#define ASSEMBLE_COMPARE32(cmp_instr, cmpl_instr)
#define LOAD_LANE(type, lane)
#define RRM64Instr(instr)
#define SIMD_REPLACE_LANE_LIST(V)
#define CHECK_AND_ZERO_EXT_OUTPUT(num)
#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64()
#define ASSEMBLE_LOADANDTEST32(asm_instr_rr, asm_instr_rm)
#define EMIT_SIMD_EXTRACT_LANE(name, dtype)
#define ASSEMBLE_LOADANDTEST64(asm_instr_rr, asm_instr_rm)
#define COMPRESS_POINTERS_BOOL
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
ZoneVector< RpoNumber > & result
LiftoffRegList regs_to_save
#define SIMD_UNOP_LIST(V)
#define SIMD_ALL_TRUE_LIST(V)
#define SIMD_BINOP_LIST(V)
#define SIMD_SHIFT_LIST(V)
#define SIMD_EXT_ADD_PAIRWISE_LIST(V)
#define SIMD_EXT_MUL_LIST(V)
#define SIMD_ADD_SUB_SAT_LIST(V)
InstructionOperand source
InstructionOperand destination
v8::SourceLocation SourceLocation
static bool HasImmediateInput(Instruction *instr, size_t index)
static bool HasRegisterInput(Instruction *instr, size_t index)
static bool HasFPStackSlotInput(Instruction *instr, size_t index)
static Condition FlagsConditionToCondition(FlagsCondition condition)
@ kSignedGreaterThanOrEqual
@ kUnsignedLessThanOrEqual
@ kUnsignedGreaterThanOrEqual
static bool is_wasm_on_be(OptimizedCompilationInfo *info)
static bool HasRegisterOutput(Instruction *instr, int index=0)
static bool HasFPRegisterInput(Instruction *instr, int index)
static bool HasStackSlotInput(Instruction *instr, size_t index)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
constexpr Register kGpReturnRegisters[]
constexpr Register kRootRegister
constexpr VFPRoundingMode kRoundToNearest
RegListBase< DoubleRegister > DoubleRegList
constexpr int kSimd128Size
V8_EXPORT_PRIVATE constexpr int ElementSizeInPointers(MachineRepresentation rep)
DwVfpRegister DoubleRegister
constexpr DoubleRegister kScratchDoubleReg
RegListBase< Register > RegList
constexpr int kSystemPointerSizeLog2
constexpr Register kScratchReg
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
@ ROUND_TO_NEAREST_AWAY_FROM_0
QwNeonRegister Simd128Register
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
constexpr bool IsFloatingPoint(MachineRepresentation rep)
constexpr Register kReturnRegister0
constexpr Register kWasmImplicitArgRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
const int kNumCalleeSavedDoubles
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int kHasFunctionDescriptorBitMask
constexpr Register kJavaScriptCallCodeStartRegister
constexpr VFPRoundingMode kRoundToZero
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
const int kNumCalleeSaved
const uint32_t kClearedWeakHeapObjectLower32
static int FrameSlotToFPOffset(int slot)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
constexpr bool IsAligned(T value, U alignment)
uint64_t make_uint64(uint32_t high, uint32_t low)
bool pending_double_scratch_register_use