18#if V8_ENABLE_WEBASSEMBLY
29#define kScratchReg r11
70 switch (constant.type()) {
72 return Operand(constant.ToInt32());
78 return Operand(constant.ToInt64());
80 return Operand(constant.ToExternalReference());
91 return Operand(constant.ToHeapObject());
101 const size_t index = *first_index;
103 if (mode) *mode = addr_mode;
121 size_t first_index = 0) {
143class OutOfLineRecordWrite final :
public OutOfLineCode {
145 OutOfLineRecordWrite(
148 StubCallMode stub_mode, UnwindingInfoWriter* unwinding_info_writer,
150 : OutOfLineCode(
gen),
158#if V8_ENABLE_WEBASSEMBLY
159 stub_mode_(stub_mode),
169 OutOfLineRecordWrite(
170 CodeGenerator*
gen, Register
object, int32_t
offset, Register value,
172 StubCallMode stub_mode, UnwindingInfoWriter* unwinding_info_writer,
174 : OutOfLineCode(
gen),
182#if V8_ENABLE_WEBASSEMBLY
183 stub_mode_(stub_mode),
191 void Generate() final {
192 ConstantPoolUnavailableScope constant_pool_unavailable(masm());
197 __ DecompressTagged(value_, value_);
208 SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
218 __ CallEphemeronKeyBarrier(object_,
scratch1_, save_fp_mode);
221 __ CallIndirectPointerBarrier(object_,
scratch1_, save_fp_mode,
223#if V8_ENABLE_WEBASSEMBLY
224 }
else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
225 __ CallRecordWriteStubSaveRegisters(object_,
scratch1_, save_fp_mode,
226 StubCallMode::kCallWasmRuntimeStub);
229 __ CallRecordWriteStubSaveRegisters(object_,
scratch1_, save_fp_mode);
247#if V8_ENABLE_WEBASSEMBLY
280 case kPPC_AddWithOverflow32:
281 case kPPC_SubWithOverflow32:
292 case kPPC_AddWithOverflow32:
293 case kPPC_SubWithOverflow32:
307#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round) \
309 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
312 __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
316#define ASSEMBLE_FLOAT_BINOP_RC(asm_instr, round) \
318 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
319 i.InputDoubleRegister(1), i.OutputRCBit()); \
321 __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
325#define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm) \
327 if (HasRegisterInput(instr, 1)) { \
328 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
329 i.InputRegister(1)); \
331 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
332 i.InputImmediate(1)); \
336#define ASSEMBLE_BINOP_RC(asm_instr_reg, asm_instr_imm) \
338 if (HasRegisterInput(instr, 1)) { \
339 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
340 i.InputRegister(1), i.OutputRCBit()); \
342 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
343 i.InputImmediate(1), i.OutputRCBit()); \
347#define ASSEMBLE_BINOP_INT_RC(asm_instr_reg, asm_instr_imm) \
349 if (HasRegisterInput(instr, 1)) { \
350 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
351 i.InputRegister(1), i.OutputRCBit()); \
353 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
354 i.InputImmediate(1), i.OutputRCBit()); \
358#define ASSEMBLE_ADD_WITH_OVERFLOW() \
360 if (HasRegisterInput(instr, 1)) { \
361 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
362 i.InputRegister(1), kScratchReg, r0); \
364 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
365 i.InputInt32(1), kScratchReg, r0); \
369#define ASSEMBLE_SUB_WITH_OVERFLOW() \
371 if (HasRegisterInput(instr, 1)) { \
372 __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
373 i.InputRegister(1), kScratchReg, r0); \
375 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
376 -i.InputInt32(1), kScratchReg, r0); \
380#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
382 ASSEMBLE_ADD_WITH_OVERFLOW(); \
383 __ extsw(kScratchReg, kScratchReg, SetRC); \
386#define ASSEMBLE_SUB_WITH_OVERFLOW32() \
388 ASSEMBLE_SUB_WITH_OVERFLOW(); \
389 __ extsw(kScratchReg, kScratchReg, SetRC); \
392#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
394 const CRegister cr = cr0; \
395 if (HasRegisterInput(instr, 1)) { \
396 if (i.CompareLogical()) { \
397 __ cmpl_instr(i.InputRegister(0), i.InputRegister(1), cr); \
399 __ cmp_instr(i.InputRegister(0), i.InputRegister(1), cr); \
402 if (i.CompareLogical()) { \
403 __ cmpl_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
405 __ cmp_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
408 DCHECK_EQ(SetRC, i.OutputRCBit()); \
411#define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
413 const CRegister cr = cr0; \
414 __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1), cr); \
415 DCHECK_EQ(SetRC, i.OutputRCBit()); \
418#define ASSEMBLE_MODULO(div_instr, mul_instr) \
420 const Register scratch = kScratchReg; \
421 __ div_instr(scratch, i.InputRegister(0), i.InputRegister(1)); \
422 __ mul_instr(scratch, scratch, i.InputRegister(1)); \
423 __ sub(i.OutputRegister(), i.InputRegister(0), scratch, LeaveOE, \
427#define ASSEMBLE_FLOAT_MODULO() \
429 FrameScope scope(masm(), StackFrame::MANUAL); \
430 __ PrepareCallCFunction(0, 2, kScratchReg); \
431 __ MovToFloatParameters(i.InputDoubleRegister(0), \
432 i.InputDoubleRegister(1)); \
433 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); \
434 __ MovFromFloatResult(i.OutputDoubleRegister()); \
435 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
438#define ASSEMBLE_IEEE754_UNOP(name) \
442 FrameScope scope(masm(), StackFrame::MANUAL); \
443 __ PrepareCallCFunction(0, 1, kScratchReg); \
444 __ MovToFloatParameter(i.InputDoubleRegister(0)); \
445 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
447 __ MovFromFloatResult(i.OutputDoubleRegister()); \
448 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
451#define ASSEMBLE_IEEE754_BINOP(name) \
455 FrameScope scope(masm(), StackFrame::MANUAL); \
456 __ PrepareCallCFunction(0, 2, kScratchReg); \
457 __ MovToFloatParameters(i.InputDoubleRegister(0), \
458 i.InputDoubleRegister(1)); \
459 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
461 __ MovFromFloatResult(i.OutputDoubleRegister()); \
462 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
465#define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrp, asm_instrx) \
467 DoubleRegister result = i.OutputDoubleRegister(); \
469 AddressingMode mode = kMode_None; \
470 MemOperand operand = i.MemoryOperand(&mode, &index); \
471 bool is_atomic = i.InputInt32(index); \
472 if (mode == kMode_MRI) { \
473 intptr_t offset = operand.offset(); \
474 if (is_int16(offset)) { \
475 __ asm_instr(result, operand); \
477 CHECK(CpuFeatures::IsSupported(PPC_10_PLUS)); \
478 __ asm_instrp(result, operand); \
481 __ asm_instrx(result, operand); \
483 if (is_atomic) __ lwsync(); \
484 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
487#define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrp, asm_instrx, \
490 Register result = i.OutputRegister(); \
492 AddressingMode mode = kMode_None; \
493 MemOperand operand = i.MemoryOperand(&mode, &index); \
494 bool is_atomic = i.InputInt32(index); \
495 if (mode == kMode_MRI) { \
496 intptr_t offset = operand.offset(); \
497 bool misaligned = offset & 3; \
498 if (is_int16(offset) && (!must_be_aligned || !misaligned)) { \
499 __ asm_instr(result, operand); \
501 CHECK(CpuFeatures::IsSupported(PPC_10_PLUS)); \
502 __ asm_instrp(result, operand); \
505 __ asm_instrx(result, operand); \
507 if (is_atomic) __ lwsync(); \
508 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
511#define ASSEMBLE_LOAD_INTEGER_RR(asm_instr) \
513 Register result = i.OutputRegister(); \
515 AddressingMode mode = kMode_None; \
516 MemOperand operand = i.MemoryOperand(&mode, &index); \
517 DCHECK_EQ(mode, kMode_MRR); \
518 bool is_atomic = i.InputInt32(index); \
519 __ asm_instr(result, operand); \
520 if (is_atomic) __ lwsync(); \
521 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
524#define ASSEMBLE_STORE_FLOAT(asm_instr, asm_instrp, asm_instrx) \
527 AddressingMode mode = kMode_None; \
528 MemOperand operand = i.MemoryOperand(&mode, &index); \
529 DoubleRegister value = i.InputDoubleRegister(index); \
530 bool is_atomic = i.InputInt32(3); \
531 if (is_atomic) __ lwsync(); \
534 if (mode == kMode_MRI) { \
535 intptr_t offset = operand.offset(); \
536 if (is_int16(offset)) { \
537 __ asm_instr(value, operand); \
539 CHECK(CpuFeatures::IsSupported(PPC_10_PLUS)); \
540 __ asm_instrp(value, operand); \
543 __ asm_instrx(value, operand); \
545 if (is_atomic) __ sync(); \
546 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
549#define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrp, asm_instrx, \
553 AddressingMode mode = kMode_None; \
554 MemOperand operand = i.MemoryOperand(&mode, &index); \
555 Register value = i.InputRegister(index); \
556 bool is_atomic = i.InputInt32(index + 1); \
557 if (is_atomic) __ lwsync(); \
558 if (mode == kMode_MRI) { \
559 intptr_t offset = operand.offset(); \
560 bool misaligned = offset & 3; \
561 if (is_int16(offset) && (!must_be_aligned || !misaligned)) { \
562 __ asm_instr(value, operand); \
564 CHECK(CpuFeatures::IsSupported(PPC_10_PLUS)); \
565 __ asm_instrp(value, operand); \
568 __ asm_instrx(value, operand); \
570 if (is_atomic) __ sync(); \
571 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
574#define ASSEMBLE_STORE_INTEGER_RR(asm_instr) \
577 AddressingMode mode = kMode_None; \
578 MemOperand operand = i.MemoryOperand(&mode, &index); \
579 DCHECK_EQ(mode, kMode_MRR); \
580 Register value = i.InputRegister(index); \
581 bool is_atomic = i.InputInt32(index + 1); \
582 if (is_atomic) __ lwsync(); \
583 __ asm_instr(value, operand); \
584 if (is_atomic) __ sync(); \
585 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
589#define CleanUInt32(x) __ ClearLeftImm(x, x, Operand(32))
591#if V8_ENABLE_WEBASSEMBLY
593#if V8_TARGET_BIG_ENDIAN
601#if V8_ENABLE_WEBASSEMBLY
602#define MAYBE_REVERSE_IF_WASM(dst, src, op, scratch, reset) \
603 if (is_wasm_on_be(info()->IsWasm())) { \
604 __ op(dst, src, scratch); \
605 if (reset) src = dst; \
608#define MAYBE_REVERSE_IF_WASM(dst, src, op, scratch, reset)
611#define ASSEMBLE_ATOMIC_EXCHANGE(_type, reverse_op) \
613 Register val = i.InputRegister(2); \
614 Register dst = i.OutputRegister(); \
615 MAYBE_REVERSE_IF_WASM(ip, val, reverse_op, kScratchReg, true); \
616 __ AtomicExchange<_type>( \
617 MemOperand(i.InputRegister(0), i.InputRegister(1)), val, dst); \
618 MAYBE_REVERSE_IF_WASM(dst, dst, reverse_op, kScratchReg, false); \
621#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(_type, reverse_op) \
623 Register expected_val = i.InputRegister(2); \
624 Register new_val = i.InputRegister(3); \
625 Register dst = i.OutputRegister(); \
626 MAYBE_REVERSE_IF_WASM(ip, expected_val, reverse_op, kScratchReg, true); \
627 MAYBE_REVERSE_IF_WASM(r0, new_val, reverse_op, kScratchReg, true); \
628 __ AtomicCompareExchange<_type>( \
629 MemOperand(i.InputRegister(0), i.InputRegister(1)), expected_val, \
630 new_val, dst, kScratchReg); \
631 MAYBE_REVERSE_IF_WASM(dst, dst, reverse_op, kScratchReg, false); \
634#define ASSEMBLE_ATOMIC_BINOP_BYTE(bin_inst, _type) \
636 auto bin_op = [&](Register dst, Register lhs, Register rhs) { \
637 if (std::is_signed<_type>::value) { \
638 __ extsb(dst, lhs); \
639 __ bin_inst(dst, dst, rhs); \
641 __ bin_inst(dst, lhs, rhs); \
644 MemOperand dst_operand = \
645 MemOperand(i.InputRegister(0), i.InputRegister(1)); \
646 __ AtomicOps<_type>(dst_operand, i.InputRegister(2), i.OutputRegister(), \
647 kScratchReg, bin_op); \
651#define ASSEMBLE_ATOMIC_BINOP(bin_inst, _type, reverse_op, scratch) \
653 auto bin_op = [&](Register dst, Register lhs, Register rhs) { \
654 Register _lhs = lhs; \
655 MAYBE_REVERSE_IF_WASM(dst, _lhs, reverse_op, scratch, true); \
656 if (std::is_signed<_type>::value) { \
657 switch (sizeof(_type)) { \
662 __ extsh(dst, _lhs); \
665 __ extsw(dst, _lhs); \
673 __ bin_inst(dst, _lhs, rhs); \
674 MAYBE_REVERSE_IF_WASM(dst, dst, reverse_op, scratch, false); \
676 MemOperand dst_operand = \
677 MemOperand(i.InputRegister(0), i.InputRegister(1)); \
678 __ AtomicOps<_type>(dst_operand, i.InputRegister(2), i.OutputRegister(), \
679 kScratchReg, bin_op); \
680 MAYBE_REVERSE_IF_WASM(i.OutputRegister(), i.OutputRegister(), reverse_op, \
692 __ RestoreFrameStateForTailCall();
699void FlushPendingPushRegisters(MacroAssembler* masm,
700 FrameAccessState* frame_access_state,
701 ZoneVector<Register>* pending_pushes) {
702 switch (pending_pushes->size()) {
706 masm->Push((*pending_pushes)[0]);
709 masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
712 masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
713 (*pending_pushes)[2]);
718 frame_access_state->IncreaseSPDelta(pending_pushes->size());
719 pending_pushes->clear();
722void AdjustStackPointerForTailCall(
723 MacroAssembler* masm, FrameAccessState* state,
int new_slot_above_sp,
724 ZoneVector<Register>* pending_pushes =
nullptr,
725 bool allow_shrinkage =
true) {
726 int current_sp_offset = state->GetSPToFPSlotCount() +
728 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
729 if (stack_slot_delta > 0) {
730 if (pending_pushes !=
nullptr) {
731 FlushPendingPushRegisters(masm, state, pending_pushes);
734 state->IncreaseSPDelta(stack_slot_delta);
735 }
else if (allow_shrinkage && stack_slot_delta < 0) {
736 if (pending_pushes !=
nullptr) {
737 FlushPendingPushRegisters(masm, state, pending_pushes);
740 state->IncreaseSPDelta(stack_slot_delta);
747 int first_unused_slot_offset) {
748 ZoneVector<MoveOperands*> pushes(
zone());
751 if (!pushes.empty() &&
753 first_unused_slot_offset)) {
754 PPCOperandConverter g(
this,
instr);
755 ZoneVector<Register> pending_pushes(
zone());
756 for (
auto move : pushes) {
757 LocationOperand destination_location(
759 InstructionOperand
source(move->source());
760 AdjustStackPointerForTailCall(
762 destination_location.index() - pending_pushes.size(),
765 DCHECK(source.IsRegister());
767 pending_pushes.push_back(source_location.GetRegister());
770 if (pending_pushes.size() == 3) {
779 first_unused_slot_offset,
nullptr,
false);
783 int first_unused_slot_offset) {
785 first_unused_slot_offset);
791 __ ComputeCodeStartAddress(scratch);
793 __ Assert(
eq, AbortReason::kWrongFunctionCodeStart);
796#ifdef V8_ENABLE_LEAPTIERING
797void CodeGenerator::AssembleDispatchHandleRegisterCheck() {
806 Instruction*
instr) {
807 PPCOperandConverter
i(
this,
instr);
811 case kArchCallCodeObject: {
819 __ CallCodeObject(
reg);
828 case kArchCallBuiltinPointer: {
830 Register builtin_index =
i.InputRegister(0);
835 __ CallBuiltinByIndex(builtin_index, target);
840#if V8_ENABLE_WEBASSEMBLY
841 case kArchCallWasmFunction:
842 case kArchCallWasmFunctionIndirect: {
845 if (
instr->InputAt(0)->IsImmediate()) {
846 DCHECK_EQ(opcode, kArchCallWasmFunction);
847 Constant constant =
i.ToConstant(
instr->InputAt(0));
849 __ Call(wasm_code, constant.rmode());
850 }
else if (opcode == kArchCallWasmFunctionIndirect) {
851 __ CallWasmCodePointer(
i.InputRegister(0));
853 __ Call(
i.InputRegister(0));
860 case kArchTailCallWasm:
861 case kArchTailCallWasmIndirect: {
864 if (
instr->InputAt(0)->IsImmediate()) {
866 Constant constant =
i.ToConstant(
instr->InputAt(0));
868 __ Jump(wasm_code, constant.rmode());
869 }
else if (opcode == kArchTailCallWasmIndirect) {
872 __ Jump(
i.InputRegister(0));
880 case kArchTailCallCodeObject: {
886 __ JumpCodeObject(
reg);
890 ConstantPoolUnavailableScope constant_pool_unavailable(
masm());
898 case kArchTailCallAddress: {
909 case kArchCallJSFunction: {
918 __ Assert(
eq, AbortReason::kWrongFunctionContext);
920 uint32_t num_arguments =
921 i.InputUint32(
instr->JSCallArgumentCountInputIndex());
928 case kArchPrepareCallCFunction: {
931 __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters,
937 case kArchSaveCallerRegisters: {
951 case kArchRestoreCallerRegisters: {
964 case kArchPrepareTailCall:
968 __ RecordComment(
reinterpret_cast<const char*
>(
i.InputInt64(0)),
971 case kArchCallCFunctionWithFrameState:
972 case kArchCallCFunction: {
975 int num_fp_parameters = fp_param_field;
976 bool has_function_descriptor =
false;
978#if ABI_USES_FUNCTION_DESCRIPTORS
981 num_fp_parameters = kNumFPParametersMask & fp_param_field;
982 has_function_descriptor =
985#if V8_ENABLE_WEBASSEMBLY
987 int start_pc_offset = 0;
988 bool isWasmCapiFunction =
990 if (isWasmCapiFunction) {
993 __ bind(&start_call);
1001 MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
1007 if (
instr->InputAt(0)->IsImmediate()) {
1008 ExternalReference ref =
i.InputExternalReference(0);
1010 __ CallCFunction(ref, num_gp_parameters, num_fp_parameters,
1011 set_isolate_data_slots, has_function_descriptor);
1015 __ CallCFunction(func, num_gp_parameters, num_fp_parameters,
1016 set_isolate_data_slots, has_function_descriptor);
1018#if V8_ENABLE_WEBASSEMBLY
1019 if (isWasmCapiFunction) {
1020 int offset_since_start_call =
pc_offset - start_pc_offset;
1028 offset_since_start_call);
1033 bool const needs_frame_state =
1034 (opcode == kArchCallCFunctionWithFrameState);
1035 if (needs_frame_state) {
1062 case kArchBinarySearchSwitch:
1065 case kArchTableSwitch:
1069 case kArchAbortCSADcheck:
1070 DCHECK(
i.InputRegister(0) == r4);
1075 __ CallBuiltin(Builtin::kAbortCSADcheck);
1079 case kArchDebugBreak:
1083 case kArchThrowTerminator:
1087 case kArchDeoptimize: {
1088 DeoptimizationExit* exit =
1090 __ b(exit->label());
1097 case kArchFramePointer:
1098 __ mr(
i.OutputRegister(), fp);
1101 case kArchParentFramePointer:
1105 __ mr(
i.OutputRegister(), fp);
1108#if V8_ENABLE_WEBASSEMBLY
1109 case kArchStackPointer:
1110 __ mr(
i.OutputRegister(), sp);
1112 case kArchSetStackPointer: {
1114 __ mr(sp,
i.InputRegister(0));
1118 case kArchStackPointerGreaterThan: {
1127 lhs_register =
i.TempRegister(0);
1131 constexpr size_t kValueIndex = 0;
1132 DCHECK(
instr->InputAt(kValueIndex)->IsRegister());
1133 __ CmpU64(lhs_register,
i.InputRegister(kValueIndex), cr0);
1136 case kArchStackCheckOffset:
1137 __ LoadSmiLiteral(
i.OutputRegister(),
1140 case kArchTruncateDoubleToI:
1145 case kArchStoreWithWriteBarrier: {
1153 OutOfLineRecordWrite* ool;
1159 __ Check(
ne, AbortReason::kOperandIsCleared);
1164 if (addressing_mode == kMode_MRI) {
1166 ool =
zone()->
New<OutOfLineRecordWrite>(
1173 ool =
zone()->
New<OutOfLineRecordWrite>(
1183 __ JumpIfSmi(value, ool->exit());
1185 __ CheckPageFlag(
object, scratch0,
1188 __ bind(ool->exit());
1191 case kArchStoreIndirectWithWriteBarrier: {
1195 OutOfLineRecordWrite* ool;
1203 if (addressing_mode == kMode_MRI) {
1204 uint64_t
offset =
i.InputInt64(1);
1205 ool =
zone()->
New<OutOfLineRecordWrite>(
1212 ool =
zone()->
New<OutOfLineRecordWrite>(
1217 __ CheckPageFlag(
object, scratch0,
1220 __ bind(ool->exit());
1223 case kArchStackSlot: {
1226 __ AddS64(
i.OutputRegister(),
offset.from_stack_pointer() ? sp : fp,
1227 Operand(
offset.offset()), r0);
1231 int reverse_slot =
i.InputInt32(0);
1234 if (
instr->OutputAt(0)->IsFPRegister()) {
1256 __ and_(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1259 __ andi(
i.OutputRegister(),
i.InputRegister(0),
i.InputImmediate(1));
1262 case kPPC_AndComplement:
1263 __ andc(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1268 __ orx(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1271 __ ori(
i.OutputRegister(),
i.InputRegister(0),
i.InputImmediate(1));
1275 case kPPC_OrComplement:
1276 __ orc(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1281 __ xor_(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1284 __ xori(
i.OutputRegister(),
i.InputRegister(0),
i.InputImmediate(1));
1288 case kPPC_ShiftLeft32:
1291 case kPPC_ShiftLeft64:
1294 case kPPC_ShiftRight32:
1297 case kPPC_ShiftRight64:
1300 case kPPC_ShiftRightAlg32:
1303 case kPPC_ShiftRightAlg64:
1306 case kPPC_RotRight32:
1312 int sh =
i.InputInt32(1);
1313 __ rotrwi(
i.OutputRegister(),
i.InputRegister(0), sh,
i.OutputRCBit());
1316 case kPPC_RotRight64:
1322 int sh =
i.InputInt32(1);
1323 __ rotrdi(
i.OutputRegister(),
i.InputRegister(0), sh,
i.OutputRCBit());
1327 __ notx(
i.OutputRegister(),
i.InputRegister(0),
i.OutputRCBit());
1329 case kPPC_RotLeftAndMask32:
1330 __ rlwinm(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt32(1),
1331 31 -
i.InputInt32(2), 31 -
i.InputInt32(3),
i.OutputRCBit());
1333 case kPPC_RotLeftAndClear64:
1334 __ rldic(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt32(1),
1335 63 -
i.InputInt32(2),
i.OutputRCBit());
1337 case kPPC_RotLeftAndClearLeft64:
1338 __ rldicl(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt32(1),
1339 63 -
i.InputInt32(2),
i.OutputRCBit());
1341 case kPPC_RotLeftAndClearRight64:
1342 __ rldicr(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt32(1),
1343 63 -
i.InputInt32(2),
i.OutputRCBit());
1350 __ add(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1353 __ AddS64(
i.OutputRegister(),
i.InputRegister(0),
i.InputImmediate(1),
1356 __ extsw(
i.OutputRegister(),
i.OutputRegister());
1364 __ add(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1367 __ AddS64(
i.OutputRegister(),
i.InputRegister(0),
i.InputImmediate(1),
1372 case kPPC_AddWithOverflow32:
1375 case kPPC_AddDouble:
1383 __ sub(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1386 __ SubS64(
i.OutputRegister(),
i.InputRegister(0),
i.InputImmediate(1),
1391 case kPPC_SubWithOverflow32:
1394 case kPPC_SubDouble:
1398 __ mullw(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1402 __ mulld(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1405 case kPPC_Mul32WithHigh32:
1406 if (
i.OutputRegister(0) ==
i.InputRegister(0) ||
1407 i.OutputRegister(0) ==
i.InputRegister(1) ||
1408 i.OutputRegister(1) ==
i.InputRegister(0) ||
1409 i.OutputRegister(1) ==
i.InputRegister(1)) {
1411 __ mulhw(
i.OutputRegister(1),
i.InputRegister(0),
1412 i.InputRegister(1));
1415 __ mullw(
i.OutputRegister(0),
i.InputRegister(0),
1416 i.InputRegister(1));
1417 __ mulhw(
i.OutputRegister(1),
i.InputRegister(0),
1418 i.InputRegister(1));
1421 case kPPC_MulHighS64:
1422 __ mulhd(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1425 case kPPC_MulHighU64:
1426 __ mulhdu(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1429 case kPPC_MulHigh32:
1430 __ mulhw(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1435 case kPPC_MulHighU32:
1436 __ mulhwu(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1441 case kPPC_MulDouble:
1445 __ divw(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1449 __ divd(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1453 __ divwu(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1457 __ divdu(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1460 case kPPC_DivDouble:
1465 __ modsw(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1472 __ modsd(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1479 __ moduw(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1486 __ modud(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1491 case kPPC_ModDouble:
1496 case kIeee754Float64Acos:
1499 case kIeee754Float64Acosh:
1502 case kIeee754Float64Asin:
1505 case kIeee754Float64Asinh:
1508 case kIeee754Float64Atan:
1511 case kIeee754Float64Atan2:
1514 case kIeee754Float64Atanh:
1517 case kIeee754Float64Tan:
1520 case kIeee754Float64Tanh:
1523 case kIeee754Float64Cbrt:
1526 case kIeee754Float64Sin:
1529 case kIeee754Float64Sinh:
1532 case kIeee754Float64Cos:
1535 case kIeee754Float64Cosh:
1538 case kIeee754Float64Exp:
1541 case kIeee754Float64Expm1:
1544 case kIeee754Float64Log:
1547 case kIeee754Float64Log1p:
1550 case kIeee754Float64Log2:
1553 case kIeee754Float64Log10:
1556 case kIeee754Float64Pow:
1560 __ neg(
i.OutputRegister(),
i.InputRegister(0),
LeaveOE,
i.OutputRCBit());
1562 case kPPC_MaxDouble:
1563 __ MaxF64(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1566 case kPPC_MinDouble:
1567 __ MinF64(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1570 case kPPC_AbsDouble:
1573 case kPPC_SqrtDouble:
1576 case kPPC_FloorDouble:
1579 case kPPC_CeilDouble:
1582 case kPPC_TruncateDouble:
1585 case kPPC_RoundDouble:
1588 case kPPC_NegDouble:
1592 __ cntlzw(
i.OutputRegister(),
i.InputRegister(0));
1596 __ cntlzd(
i.OutputRegister(),
i.InputRegister(0));
1600 __ Popcnt32(
i.OutputRegister(),
i.InputRegister(0));
1604 __ Popcnt64(
i.OutputRegister(),
i.InputRegister(0));
1613 case kPPC_CmpDouble:
1618 __ and_(r0,
i.InputRegister(0),
i.InputRegister(1),
i.OutputRCBit());
1620 __ andi(r0,
i.InputRegister(0),
i.InputImmediate(1));
1622 __ extsw(r0, r0,
i.OutputRCBit());
1627 __ and_(r0,
i.InputRegister(0),
i.InputRegister(1),
i.OutputRCBit());
1629 __ andi(r0,
i.InputRegister(0),
i.InputImmediate(1));
1633 case kPPC_Float64SilenceNaN: {
1640 int stack_decrement =
i.InputInt32(0);
1651 __ StoreF32WithUpdate(
i.InputDoubleRegister(1),
1655 __ StoreF64WithUpdate(
i.InputDoubleRegister(1),
1660 __ StoreSimd128(
i.InputSimd128Register(1),
MemOperand(r0, sp),
1664 __ StoreU64WithUpdate(
i.InputRegister(1),
1672 case kPPC_PushFrame: {
1673 int num_slots =
i.InputInt32(1);
1674 if (
instr->InputAt(0)->IsFPRegister()) {
1677 __ StoreF64WithUpdate(
i.InputDoubleRegister(0),
1682 __ StoreF32WithUpdate(
i.InputDoubleRegister(0),
1687 __ StoreU64WithUpdate(
i.InputRegister(0),
1693 case kPPC_StoreToStackSlot: {
1694 int slot =
i.InputInt32(1);
1695 if (
instr->InputAt(0)->IsFPRegister()) {
1698 __ StoreF64(
i.InputDoubleRegister(0),
1701 __ StoreF32(
i.InputDoubleRegister(0),
1705 __ StoreSimd128(
i.InputSimd128Register(0),
1710 __ StoreU64(
i.InputRegister(0),
1715 case kPPC_ExtendSignWord8:
1716 __ extsb(
i.OutputRegister(),
i.InputRegister(0));
1719 case kPPC_ExtendSignWord16:
1720 __ extsh(
i.OutputRegister(),
i.InputRegister(0));
1723 case kPPC_ExtendSignWord32:
1724 __ extsw(
i.OutputRegister(),
i.InputRegister(0));
1727 case kPPC_Uint32ToUint64:
1729 __ clrldi(
i.OutputRegister(),
i.InputRegister(0), Operand(32));
1732 case kPPC_Int64ToInt32:
1733 __ extsw(
i.OutputRegister(),
i.InputRegister(0));
1736 case kPPC_Int64ToFloat32:
1737 __ ConvertInt64ToFloat(
i.InputRegister(0),
i.OutputDoubleRegister());
1740 case kPPC_Int64ToDouble:
1741 __ ConvertInt64ToDouble(
i.InputRegister(0),
i.OutputDoubleRegister());
1744 case kPPC_Uint64ToFloat32:
1745 __ ConvertUnsignedInt64ToFloat(
i.InputRegister(0),
1746 i.OutputDoubleRegister());
1749 case kPPC_Uint64ToDouble:
1750 __ ConvertUnsignedInt64ToDouble(
i.InputRegister(0),
1751 i.OutputDoubleRegister());
1754 case kPPC_Int32ToFloat32:
1755 __ ConvertIntToFloat(
i.InputRegister(0),
i.OutputDoubleRegister());
1758 case kPPC_Int32ToDouble:
1759 __ ConvertIntToDouble(
i.InputRegister(0),
i.OutputDoubleRegister());
1762 case kPPC_Uint32ToFloat32:
1763 __ ConvertUnsignedIntToFloat(
i.InputRegister(0),
1764 i.OutputDoubleRegister());
1767 case kPPC_Uint32ToDouble:
1768 __ ConvertUnsignedIntToDouble(
i.InputRegister(0),
1769 i.OutputDoubleRegister());
1772 case kPPC_Float32ToInt32: {
1774 if (set_overflow_to_min_i32) {
1779 if (set_overflow_to_min_i32) {
1789 __ isel(
i.OutputRegister(0),
kScratchReg,
i.OutputRegister(0), crbit);
1793 case kPPC_Float32ToUint32: {
1795 if (set_overflow_to_min_u32) {
1800 if (set_overflow_to_min_u32) {
1808 __ isel(
i.OutputRegister(0),
kScratchReg,
i.OutputRegister(0), crbit);
1812#define DOUBLE_TO_INT32(op) \
1813 bool check_conversion = i.OutputCount() > 1; \
1814 CRegister cr = cr0; \
1815 FPSCRBit fps_bit = VXCVI; \
1816 int cr_bit = v8::internal::Assembler::encode_crbit( \
1817 cr, static_cast<CRBit>(fps_bit % CRWIDTH)); \
1818 __ mtfsb0(fps_bit); \
1819 __ op(kScratchDoubleReg, i.InputDoubleRegister(0)); \
1820 __ MovDoubleLowToInt(i.OutputRegister(0), kScratchDoubleReg); \
1821 __ mcrfs(cr, VXCVI); \
1822 if (check_conversion) { \
1823 __ li(i.OutputRegister(1), Operand(1)); \
1824 __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), cr_bit); \
1826 __ isel(i.OutputRegister(0), r0, i.OutputRegister(0), cr_bit); \
1828 case kPPC_DoubleToInt32: {
1832 case kPPC_DoubleToUint32: {
1836#undef DOUBLE_TO_INT32
1837 case kPPC_DoubleToInt64: {
1838 bool check_conversion =
i.OutputCount() > 1;
1840 __ ConvertDoubleToInt64(
i.InputDoubleRegister(0),
1847 if (check_conversion) {
1848 __ li(
i.OutputRegister(1), Operand(1));
1849 __ isel(
i.OutputRegister(1), r0,
i.OutputRegister(1), crbit);
1851 __ isel(
i.OutputRegister(0), r0,
i.OutputRegister(0), crbit);
1856 case kPPC_DoubleToUint64: {
1857 bool check_conversion = (
i.OutputCount() > 1);
1858 if (check_conversion) {
1861 __ ConvertDoubleToUnsignedInt64(
i.InputDoubleRegister(0),
1863 if (check_conversion) {
1869 __ li(
i.OutputRegister(1), Operand(1));
1870 __ isel(
i.OutputRegister(1), r0,
i.OutputRegister(1), crbit);
1875 case kPPC_DoubleToFloat32:
1878 case kPPC_Float32ToDouble:
1880 __ Move(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1883 case kPPC_DoubleExtractLowWord32:
1884 __ MovDoubleLowToInt(
i.OutputRegister(),
i.InputDoubleRegister(0));
1887 case kPPC_DoubleExtractHighWord32:
1888 __ MovDoubleHighToInt(
i.OutputRegister(),
i.InputDoubleRegister(0));
1891 case kPPC_DoubleFromWord32Pair:
1893 __ ShiftLeftU64(
i.TempRegister(0),
i.InputRegister(0), Operand(32));
1895 __ MovInt64ToDouble(
i.OutputDoubleRegister(),
i.TempRegister(0));
1897 case kPPC_DoubleInsertLowWord32:
1898 __ InsertDoubleLow(
i.OutputDoubleRegister(),
i.InputRegister(1), r0);
1901 case kPPC_DoubleInsertHighWord32:
1902 __ InsertDoubleHigh(
i.OutputDoubleRegister(),
i.InputRegister(1), r0);
1905 case kPPC_DoubleConstruct:
1906 __ MovInt64ComponentsToDouble(
i.OutputDoubleRegister(),
1907 i.InputRegister(0),
i.InputRegister(1), r0);
1910 case kPPC_BitcastFloat32ToInt32:
1911 __ MovFloatToInt(
i.OutputRegister(),
i.InputDoubleRegister(0),
1914 case kPPC_BitcastInt32ToFloat32:
1915 __ MovIntToFloat(
i.OutputDoubleRegister(),
i.InputRegister(0), ip);
1917 case kPPC_BitcastDoubleToInt64:
1918 __ MovDoubleToInt64(
i.OutputRegister(),
i.InputDoubleRegister(0));
1920 case kPPC_BitcastInt64ToDouble:
1921 __ MovInt64ToDouble(
i.OutputDoubleRegister(),
i.InputRegister(0));
1923 case kPPC_LoadWordU8:
1926 case kPPC_LoadWordS8:
1928 __ extsb(
i.OutputRegister(),
i.OutputRegister());
1930 case kPPC_LoadWordU16:
1933 case kPPC_LoadWordS16:
1936 case kPPC_LoadWordU32:
1939 case kPPC_LoadWordS32:
1942 case kPPC_LoadWord64:
1945 case kPPC_LoadFloat32:
1948 case kPPC_LoadDouble:
1951 case kPPC_LoadSimd128: {
1955 bool is_atomic =
i.InputInt32(2);
1958 if (is_atomic)
__ lwsync();
1962 case kPPC_LoadReverseSimd128RR: {
1963 __ xxbrq(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
1966 case kPPC_StoreWord8:
1969 case kPPC_StoreWord16:
1972 case kPPC_StoreWord32:
1975 case kPPC_StoreWord64:
1978 case kPPC_StoreFloat32:
1981 case kPPC_StoreDouble:
1984 case kPPC_StoreSimd128: {
1987 MemOperand operand =
i.MemoryOperand(&mode, &index);
1989 bool is_atomic =
i.InputInt32(3);
1990 if (is_atomic)
__ lwsync();
1993 if (is_atomic)
__ sync();
1997 case kAtomicLoadInt8:
1998 case kAtomicLoadInt16:
2000 case kAtomicExchangeInt8:
2001 __ AtomicExchange<int8_t>(
2003 i.InputRegister(2),
i.OutputRegister());
2005 case kPPC_AtomicExchangeUint8:
2006 __ AtomicExchange<uint8_t>(
2008 i.InputRegister(2),
i.OutputRegister());
2010 case kAtomicExchangeInt16: {
2012 __ extsh(
i.OutputRegister(),
i.OutputRegister());
2015 case kPPC_AtomicExchangeUint16: {
2019 case kPPC_AtomicExchangeWord32: {
2023 case kPPC_AtomicExchangeWord64: {
2027 case kAtomicCompareExchangeInt8:
2028 __ AtomicCompareExchange<int8_t>(
2030 i.InputRegister(2),
i.InputRegister(3),
i.OutputRegister(),
2033 case kPPC_AtomicCompareExchangeUint8:
2034 __ AtomicCompareExchange<uint8_t>(
2036 i.InputRegister(2),
i.InputRegister(3),
i.OutputRegister(),
2039 case kAtomicCompareExchangeInt16: {
2041 __ extsh(
i.OutputRegister(),
i.OutputRegister());
2044 case kPPC_AtomicCompareExchangeUint16: {
2048 case kPPC_AtomicCompareExchangeWord32: {
2052 case kPPC_AtomicCompareExchangeWord64: {
2056#define ATOMIC_BINOP_CASE(op, inst) \
2057 case kPPC_Atomic##op##Int8: \
2058 ASSEMBLE_ATOMIC_BINOP_BYTE(inst, int8_t); \
2059 __ extsb(i.OutputRegister(), i.OutputRegister()); \
2061 case kPPC_Atomic##op##Uint8: \
2062 ASSEMBLE_ATOMIC_BINOP_BYTE(inst, uint8_t); \
2064 case kPPC_Atomic##op##Int16: \
2065 ASSEMBLE_ATOMIC_BINOP(inst, int16_t, ByteReverseU16, r0); \
2066 __ extsh(i.OutputRegister(), i.OutputRegister()); \
2068 case kPPC_Atomic##op##Uint16: \
2069 ASSEMBLE_ATOMIC_BINOP(inst, uint16_t, ByteReverseU16, r0); \
2071 case kPPC_Atomic##op##Int32: \
2072 ASSEMBLE_ATOMIC_BINOP(inst, int32_t, ByteReverseU32, r0); \
2073 __ extsw(i.OutputRegister(), i.OutputRegister()); \
2075 case kPPC_Atomic##op##Uint32: \
2076 ASSEMBLE_ATOMIC_BINOP(inst, uint32_t, ByteReverseU32, r0); \
2078 case kPPC_Atomic##op##Int64: \
2079 case kPPC_Atomic##op##Uint64: \
2080 ASSEMBLE_ATOMIC_BINOP(inst, uint64_t, ByteReverseU64, r0); \
2087#undef ATOMIC_BINOP_CASE
2089 case kPPC_ByteRev32: {
2094 __ brw(output, input);
2095 __ extsw(output, output);
2098 __ rotlwi(temp1, input, 8);
2099 __ rlwimi(temp1, input, 24, 0, 7);
2100 __ rlwimi(temp1, input, 24, 16, 23);
2101 __ extsw(output, temp1);
2104 case kPPC_LoadByteRev32: {
2108 case kPPC_StoreByteRev32: {
2112 case kPPC_ByteRev64: {
2119 __ brd(output, input);
2122 __ rldicl(temp1, input, 32, 32);
2123 __ rotlwi(temp2, input, 8);
2124 __ rlwimi(temp2, input, 24, 0, 7);
2125 __ rotlwi(temp3, temp1, 8);
2126 __ rlwimi(temp2, input, 24, 16, 23);
2127 __ rlwimi(temp3, temp1, 24, 0, 7);
2128 __ rlwimi(temp3, temp1, 24, 16, 23);
2129 __ rldicr(temp2, temp2, 32, 31);
2130 __ orx(output, temp2, temp3);
2133 case kPPC_LoadByteRev64: {
2137 case kPPC_StoreByteRev64: {
2142#define SIMD_BINOP_LIST(V) \
2188 V(I16x8SConvertI32x4) \
2189 V(I16x8UConvertI32x4) \
2190 V(I16x8RoundingAverageU) \
2191 V(I16x8Q15MulRSatS) \
2205 V(I8x16SConvertI16x8) \
2206 V(I8x16UConvertI16x8) \
2207 V(I8x16RoundingAverageU) \
2213#define EMIT_SIMD_BINOP(name) \
2214 case kPPC_##name: { \
2215 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2216 i.InputSimd128Register(1)); \
2220#undef EMIT_SIMD_BINOP
2221#undef SIMD_BINOP_LIST
2223#define SIMD_BINOP_WITH_SCRATCH_LIST(V) \
2232 V(I64x2ExtMulLowI32x4S) \
2233 V(I64x2ExtMulHighI32x4S) \
2234 V(I64x2ExtMulLowI32x4U) \
2235 V(I64x2ExtMulHighI32x4U) \
2239 V(I32x4ExtMulLowI16x8S) \
2240 V(I32x4ExtMulHighI16x8S) \
2241 V(I32x4ExtMulLowI16x8U) \
2242 V(I32x4ExtMulHighI16x8U) \
2246 V(I16x8ExtMulLowI8x16S) \
2247 V(I16x8ExtMulHighI8x16S) \
2248 V(I16x8ExtMulLowI8x16U) \
2249 V(I16x8ExtMulHighI8x16U) \
2256#define EMIT_SIMD_BINOP_WITH_SCRATCH(name) \
2257 case kPPC_##name: { \
2258 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2259 i.InputSimd128Register(1), kScratchSimd128Reg); \
2263#undef EMIT_SIMD_BINOP_WITH_SCRATCH
2264#undef SIMD_BINOP_WITH_SCRATCH_LIST
2266#define SIMD_SHIFT_LIST(V) \
2280#define EMIT_SIMD_SHIFT(name) \
2281 case kPPC_##name: { \
2282 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2283 i.InputRegister(1), kScratchSimd128Reg); \
2287#undef EMIT_SIMD_SHIFT
2288#undef SIMD_SHIFT_LIST
2290#define SIMD_UNOP_LIST(V) \
2297 V(F64x2PromoteLowF32x4) \
2300 V(F32x4SConvertI32x4) \
2301 V(F32x4UConvertI32x4) \
2308 V(F64x2ConvertLowI32x4S) \
2309 V(I64x2SConvertI32x4Low) \
2310 V(I64x2SConvertI32x4High) \
2311 V(I32x4SConvertI16x8Low) \
2312 V(I32x4SConvertI16x8High) \
2313 V(I32x4UConvertF32x4) \
2314 V(I16x8SConvertI8x16Low) \
2315 V(I16x8SConvertI8x16High) \
2319#define EMIT_SIMD_UNOP(name) \
2320 case kPPC_##name: { \
2321 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
2325#undef EMIT_SIMD_UNOP
2326#undef SIMD_UNOP_LIST
2328#define SIMD_UNOP_WITH_SCRATCH_LIST(V) \
2329 V(F32x4DemoteF64x2Zero) \
2332 V(I32x4SConvertF32x4) \
2333 V(I32x4TruncSatF64x2SZero) \
2334 V(I32x4TruncSatF64x2UZero) \
2340#define EMIT_SIMD_UNOP_WITH_SCRATCH(name) \
2341 case kPPC_##name: { \
2342 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2343 kScratchSimd128Reg); \
2347#undef EMIT_SIMD_UNOP_WITH_SCRATCH
2348#undef SIMD_UNOP_WITH_SCRATCH_LIST
2350#define SIMD_ALL_TRUE_LIST(V) \
2355#define EMIT_SIMD_ALL_TRUE(name) \
2356 case kPPC_##name: { \
2357 __ name(i.OutputRegister(), i.InputSimd128Register(0), r0, ip, \
2358 kScratchSimd128Reg); \
2362#undef EMIT_SIMD_ALL_TRUE
2363#undef SIMD_ALL_TRUE_LIST
2365#define SIMD_QFM_LIST(V) \
2370#define EMIT_SIMD_QFM(name) \
2371 case kPPC_##name: { \
2372 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2373 i.InputSimd128Register(1), i.InputSimd128Register(2), \
2374 kScratchSimd128Reg); \
2381#define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
2382 V(I32x4ExtAddPairwiseI16x8S) \
2383 V(I32x4ExtAddPairwiseI16x8U) \
2384 V(I16x8ExtAddPairwiseI8x16S) \
2385 V(I16x8ExtAddPairwiseI8x16U)
2386#define EMIT_SIMD_EXT_ADD_PAIRWISE(name) \
2387 case kPPC_##name: { \
2388 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2389 kScratchSimd128Reg, kScratchSimd128Reg2); \
2393#undef EMIT_SIMD_EXT_ADD_PAIRWISE
2394#undef SIMD_EXT_ADD_PAIRWISE_LIST
2396#define SIMD_LOAD_LANE_LIST(V) \
2397 V(S128Load64Lane, LoadLane64LE) \
2398 V(S128Load32Lane, LoadLane32LE) \
2399 V(S128Load16Lane, LoadLane16LE) \
2400 V(S128Load8Lane, LoadLane8LE)
2402#define EMIT_SIMD_LOAD_LANE(name, op) \
2403 case kPPC_##name: { \
2404 Simd128Register dst = i.OutputSimd128Register(); \
2405 DCHECK_EQ(dst, i.InputSimd128Register(0)); \
2406 AddressingMode mode = kMode_None; \
2408 MemOperand operand = i.MemoryOperand(&mode, &index); \
2409 DCHECK_EQ(mode, kMode_MRR); \
2410 __ op(dst, operand, i.InputUint8(3), kScratchReg, kScratchSimd128Reg); \
2414#undef EMIT_SIMD_LOAD_LANE
2415#undef SIMD_LOAD_LANE_LIST
2417#define SIMD_STORE_LANE_LIST(V) \
2418 V(S128Store64Lane, StoreLane64LE) \
2419 V(S128Store32Lane, StoreLane32LE) \
2420 V(S128Store16Lane, StoreLane16LE) \
2421 V(S128Store8Lane, StoreLane8LE)
2423#define EMIT_SIMD_STORE_LANE(name, op) \
2424 case kPPC_##name: { \
2425 AddressingMode mode = kMode_None; \
2427 MemOperand operand = i.MemoryOperand(&mode, &index); \
2428 DCHECK_EQ(mode, kMode_MRR); \
2429 __ op(i.InputSimd128Register(0), operand, i.InputUint8(3), kScratchReg, \
2430 kScratchSimd128Reg); \
2434#undef EMIT_SIMD_STORE_LANE
2435#undef SIMD_STORE_LANE_LIST
2437#define SIMD_LOAD_SPLAT(V) \
2438 V(S128Load64Splat, LoadAndSplat64x2LE) \
2439 V(S128Load32Splat, LoadAndSplat32x4LE) \
2440 V(S128Load16Splat, LoadAndSplat16x8LE) \
2441 V(S128Load8Splat, LoadAndSplat8x16LE)
2443#define EMIT_SIMD_LOAD_SPLAT(name, op) \
2444 case kPPC_##name: { \
2445 AddressingMode mode = kMode_None; \
2446 MemOperand operand = i.MemoryOperand(&mode); \
2447 DCHECK_EQ(mode, kMode_MRR); \
2448 __ op(i.OutputSimd128Register(), operand, kScratchReg); \
2452#undef EMIT_SIMD_LOAD_SPLAT
2453#undef SIMD_LOAD_SPLAT
2457 switch (lane_size) {
2459 __ F32x4Splat(
i.OutputSimd128Register(),
i.InputDoubleRegister(0),
2464 __ F64x2Splat(
i.OutputSimd128Register(),
i.InputDoubleRegister(0),
2475 switch (lane_size) {
2477 __ I8x16Splat(
i.OutputSimd128Register(),
i.InputRegister(0));
2481 __ I16x8Splat(
i.OutputSimd128Register(),
i.InputRegister(0));
2485 __ I32x4Splat(
i.OutputSimd128Register(),
i.InputRegister(0));
2489 __ I64x2Splat(
i.OutputSimd128Register(),
i.InputRegister(0));
2497 case kPPC_FExtractLane: {
2499 switch (lane_size) {
2501 __ F32x4ExtractLane(
i.OutputDoubleRegister(),
2502 i.InputSimd128Register(0),
i.InputInt8(1),
2507 __ F64x2ExtractLane(
i.OutputDoubleRegister(),
2508 i.InputSimd128Register(0),
i.InputInt8(1),
2517 case kPPC_IExtractLane: {
2519 switch (lane_size) {
2521 __ I32x4ExtractLane(
i.OutputRegister(),
i.InputSimd128Register(0),
2526 __ I64x2ExtractLane(
i.OutputRegister(),
i.InputSimd128Register(0),
2535 case kPPC_IExtractLaneU: {
2537 switch (lane_size) {
2539 __ I8x16ExtractLaneU(
i.OutputRegister(),
i.InputSimd128Register(0),
2544 __ I16x8ExtractLaneU(
i.OutputRegister(),
i.InputSimd128Register(0),
2553 case kPPC_IExtractLaneS: {
2555 switch (lane_size) {
2557 __ I8x16ExtractLaneS(
i.OutputRegister(),
i.InputSimd128Register(0),
2562 __ I16x8ExtractLaneS(
i.OutputRegister(),
i.InputSimd128Register(0),
2571 case kPPC_FReplaceLane: {
2572 DCHECK_EQ(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2574 switch (lane_size) {
2576 __ F32x4ReplaceLane(
2577 i.OutputSimd128Register(),
i.InputSimd128Register(0),
2583 __ F64x2ReplaceLane(
i.OutputSimd128Register(),
2584 i.InputSimd128Register(0),
2585 i.InputDoubleRegister(2),
i.InputInt8(1),
2594 case kPPC_IReplaceLane: {
2595 DCHECK_EQ(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2597 switch (lane_size) {
2599 __ I8x16ReplaceLane(
i.OutputSimd128Register(),
2600 i.InputSimd128Register(0),
i.InputRegister(2),
2605 __ I16x8ReplaceLane(
i.OutputSimd128Register(),
2606 i.InputSimd128Register(0),
i.InputRegister(2),
2611 __ I32x4ReplaceLane(
i.OutputSimd128Register(),
2612 i.InputSimd128Register(0),
i.InputRegister(2),
2617 __ I64x2ReplaceLane(
i.OutputSimd128Register(),
2618 i.InputSimd128Register(0),
i.InputRegister(2),
2627 case kPPC_I64x2Mul: {
2628 __ I64x2Mul(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2629 i.InputSimd128Register(1), ip, r0,
2633 case kPPC_F64x2Min: {
2634 __ F64x2Min(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2639 case kPPC_F64x2Max: {
2640 __ F64x2Max(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2645 case kPPC_S128Const: {
2646 uint64_t low =
make_uint64(
i.InputUint32(1),
i.InputUint32(0));
2647 uint64_t high =
make_uint64(
i.InputUint32(3),
i.InputUint32(2));
2648 __ S128Const(
i.OutputSimd128Register(), high, low, r0, ip);
2651 case kPPC_S128Zero: {
2653 __ vxor(dst, dst, dst);
2656 case kPPC_S128AllOnes: {
2658 __ vcmpequb(dst, dst, dst);
2661 case kPPC_S128Select: {
2666 __ S128Select(dst, src1, src2,
mask);
2669 case kPPC_V128AnyTrue: {
2670 __ V128AnyTrue(
i.OutputRegister(),
i.InputSimd128Register(0), r0, ip,
2674 case kPPC_F64x2ConvertLowI32x4U: {
2675 __ F64x2ConvertLowI32x4U(
i.OutputSimd128Register(),
2680 case kPPC_I64x2UConvertI32x4Low: {
2681 __ I64x2UConvertI32x4Low(
i.OutputSimd128Register(),
2686 case kPPC_I64x2UConvertI32x4High: {
2687 __ I64x2UConvertI32x4High(
i.OutputSimd128Register(),
2692 case kPPC_I32x4UConvertI16x8Low: {
2693 __ I32x4UConvertI16x8Low(
i.OutputSimd128Register(),
2698 case kPPC_I32x4UConvertI16x8High: {
2699 __ I32x4UConvertI16x8High(
i.OutputSimd128Register(),
2704 case kPPC_I16x8UConvertI8x16Low: {
2705 __ I16x8UConvertI8x16Low(
i.OutputSimd128Register(),
2710 case kPPC_I16x8UConvertI8x16High: {
2711 __ I16x8UConvertI8x16High(
i.OutputSimd128Register(),
2716 case kPPC_I8x16Shuffle: {
2717 uint64_t low =
make_uint64(
i.InputUint32(3),
i.InputUint32(2));
2718 uint64_t high =
make_uint64(
i.InputUint32(5),
i.InputUint32(4));
2719 __ I8x16Shuffle(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2720 i.InputSimd128Register(1), high, low, r0, ip,
2724 case kPPC_I64x2BitMask: {
2725 __ I64x2BitMask(
i.OutputRegister(),
i.InputSimd128Register(0),
2729 case kPPC_I32x4BitMask: {
2730 __ I32x4BitMask(
i.OutputRegister(),
i.InputSimd128Register(0),
2734 case kPPC_I16x8BitMask: {
2735 __ I16x8BitMask(
i.OutputRegister(),
i.InputSimd128Register(0),
2739 case kPPC_I8x16BitMask: {
2740 __ I8x16BitMask(
i.OutputRegister(),
i.InputSimd128Register(0), r0, ip,
2744 case kPPC_I32x4DotI8x16AddS: {
2745 __ I32x4DotI8x16AddS(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2746 i.InputSimd128Register(1),
2747 i.InputSimd128Register(2));
2750#define PREP_LOAD_EXTEND() \
2751 AddressingMode mode = kMode_None; \
2752 MemOperand operand = i.MemoryOperand(&mode); \
2753 DCHECK_EQ(mode, kMode_MRR);
2754 case kPPC_S128Load8x8S: {
2756 __ LoadAndExtend8x8SLE(
i.OutputSimd128Register(), operand,
kScratchReg);
2759 case kPPC_S128Load8x8U: {
2761 __ LoadAndExtend8x8ULE(
i.OutputSimd128Register(), operand,
kScratchReg,
2765 case kPPC_S128Load16x4S: {
2767 __ LoadAndExtend16x4SLE(
i.OutputSimd128Register(), operand,
kScratchReg);
2770 case kPPC_S128Load16x4U: {
2772 __ LoadAndExtend16x4ULE(
i.OutputSimd128Register(), operand,
kScratchReg,
2776 case kPPC_S128Load32x2S: {
2778 __ LoadAndExtend32x2SLE(
i.OutputSimd128Register(), operand,
kScratchReg);
2781 case kPPC_S128Load32x2U: {
2783 __ LoadAndExtend32x2ULE(
i.OutputSimd128Register(), operand,
kScratchReg,
2787 case kPPC_S128Load32Zero: {
2789 __ LoadV32ZeroLE(
i.OutputSimd128Register(), operand,
kScratchReg,
2793 case kPPC_S128Load64Zero: {
2795 __ LoadV64ZeroLE(
i.OutputSimd128Register(), operand,
kScratchReg,
2799#undef PREP_LOAD_EXTEND
2800 case kPPC_StoreCompressTagged: {
2803 MemOperand operand =
i.MemoryOperand(&mode, &index);
2804 Register value =
i.InputRegister(index);
2805 bool is_atomic =
i.InputInt32(index + 1);
2806 if (is_atomic)
__ lwsync();
2807 __ StoreTaggedField(value, operand, r0);
2808 if (is_atomic)
__ sync();
2812 case kPPC_StoreIndirectPointer: {
2816 Register value =
i.InputRegister(index);
2817 bool is_atomic =
i.InputInt32(index + 1);
2818 if (is_atomic)
__ lwsync();
2820 if (is_atomic)
__ sync();
2824 case kPPC_LoadDecodeSandboxedPointer: {
2828 bool is_atomic =
i.InputInt32(index);
2829 __ LoadSandboxedPointerField(
i.OutputRegister(), mem,
kScratchReg);
2830 if (is_atomic)
__ lwsync();
2834 case kPPC_StoreEncodeSandboxedPointer: {
2838 Register value =
i.InputRegister(index);
2839 bool is_atomic =
i.InputInt32(index + 1);
2840 if (is_atomic)
__ lwsync();
2842 if (is_atomic)
__ sync();
2846 case kPPC_LoadDecompressTaggedSigned: {
2851 case kPPC_LoadDecompressTagged: {
2865 PPCOperandConverter
i(
this,
instr);
2866 Label* tlabel = branch->true_label;
2867 Label* flabel = branch->false_label;
2873 if (op == kPPC_CmpDouble) {
2876 __ bunordered(flabel, cr);
2878 }
else if (cond ==
gt) {
2879 __ bunordered(tlabel, cr);
2883 __ b(cond, tlabel, cr);
2884 if (!branch->fallthru)
__ b(flabel);
2888 BranchInfo* branch) {
2897#if V8_ENABLE_WEBASSEMBLY
2898void CodeGenerator::AssembleArchTrap(Instruction*
instr,
2902 OutOfLineTrap(CodeGenerator*
gen, Instruction*
instr)
2905 void Generate() final {
2906 PPCOperandConverter
i(
gen_, instr_);
2908 static_cast<TrapId
>(
i.InputInt32(instr_->InputCount() - 1));
2909 GenerateCallToTrap(trap_id);
2913 void GenerateCallToTrap(TrapId trap_id) {
2914 gen_->AssembleSourcePosition(instr_);
2918 __ Call(
static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
2919 ReferenceMap* reference_map =
2920 gen_->zone()->New<ReferenceMap>(
gen_->zone());
2921 gen_->RecordSafepoint(reference_map);
2927 Instruction* instr_;
2928 CodeGenerator*
gen_;
2931 Label* tlabel = ool->entry();
2937 if (op == kPPC_CmpDouble) {
2940 __ bunordered(&
end, cr);
2942 }
else if (cond ==
gt) {
2943 __ bunordered(tlabel, cr);
2947 __ b(cond, tlabel, cr);
2955 PPCOperandConverter
i(
this,
instr);
2967 if (op == kPPC_CmpDouble) {
2972 __ bunordered(&done, cr);
2973 }
else if (cond ==
gt) {
2975 __ li(
reg, Operand(1));
2976 __ bunordered(&done, cr);
2984 if (reg_value != 1)
__ li(
reg, Operand(1));
2991 if (reg_value != 1)
__ li(
reg, Operand(1));
3006 BranchInfo* branch) {
3011 PPCOperandConverter
i(
this,
instr);
3013 std::vector<std::pair<int32_t, Label*>> cases;
3014 for (
size_t index = 2; index <
instr->InputCount(); index += 2) {
3015 cases.push_back({
i.InputInt32(index + 0),
GetLabel(
i.InputRpo(index + 1))});
3018 cases.data() + cases.size());
3022 PPCOperandConverter
i(
this,
instr);
3026 for (int32_t index = 0; index < case_count; ++
index) {
3030 __ CmpU64(input, Operand(case_count), r0);
3045 const DoubleRegList double_saves = call_descriptor->CalleeSavedFPRegisters();
3048 if (!double_saves.is_empty()) {
3049 frame->AlignSavedCalleeRegisterSlots();
3058 : call_descriptor->CalleeSavedRegisters();
3059 if (!saves.is_empty()) {
3061 const int num_saves =
3063 frame->AllocateSavedCalleeRegisterSlots(num_saves);
3070 if (call_descriptor->IsCFunctionCall()) {
3071#if V8_ENABLE_WEBASSEMBLY
3072 if (
info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
3073 __ StubPrologue(StackFrame::C_WASM_ENTRY);
3092 }
else if (call_descriptor->IsJSFunctionCall()) {
3098 __ StubPrologue(type);
3099#if V8_ENABLE_WEBASSEMBLY
3100 if (call_descriptor->IsAnyWasmFunctionCall() ||
3101 call_descriptor->IsWasmImportWrapper() ||
3102 call_descriptor->IsWasmCapiFunction()) {
3109 if (call_descriptor->IsWasmCapiFunction()) {
3118 int required_slots =
3119 frame()->GetTotalFrameSlotCount() -
frame()->GetFixedSlotCount();
3120 if (
info()->is_osr()) {
3122 __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
3128 __ RecordComment(
"-- OSR entrypoint --");
3133 const DoubleRegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
3137 : call_descriptor->CalleeSavedRegisters();
3139 if (required_slots > 0) {
3140#if V8_ENABLE_WEBASSEMBLY
3154 __ AddS64(stack_limit, stack_limit,
3156 __ CmpU64(sp, stack_limit);
3160 if (
v8_flags.experimental_wasm_growable_stacks) {
3164 WasmHandleStackOverflowDescriptor::FrameBaseRegister());
3169 __ MultiPushF64AndV128(fp_regs_to_save,
3175 WasmHandleStackOverflowDescriptor::FrameBaseRegister(), fp,
3178 __ CallBuiltin(Builtin::kWasmHandleStackOverflow);
3179 __ MultiPopF64AndV128(fp_regs_to_save,
3184 __ Call(
static_cast<intptr_t
>(Builtin::kWasmStackOverflow),
3188 ReferenceMap* reference_map =
zone()->
New<ReferenceMap>(
zone());
3198 required_slots -= saves.Count();
3199 required_slots -=
frame()->GetReturnSlotCount();
3205 if (!saves_fp.is_empty()) {
3206 __ MultiPushDoubles(saves_fp);
3211 if (!saves.is_empty()) {
3212 __ MultiPush(saves);
3216 const int returns =
frame()->GetReturnSlotCount();
3233 const int returns =
frame()->GetReturnSlotCount();
3243 : call_descriptor->CalleeSavedRegisters();
3244 if (!saves.is_empty()) {
3249 const DoubleRegList double_saves = call_descriptor->CalleeSavedFPRegisters();
3250 if (!double_saves.is_empty()) {
3251 __ MultiPopDoubles(double_saves);
3256 PPCOperandConverter g(
this,
nullptr);
3257 const int parameter_slots =
3258 static_cast<int>(call_descriptor->ParameterSlotCount());
3262 if (parameter_slots != 0) {
3263 if (additional_pop_count->IsImmediate()) {
3264 DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
3266 __ cmpi(g.ToRegister(additional_pop_count), Operand(0));
3267 __ Assert(
eq, AbortReason::kUnexpectedAdditionalPopValue);
3271#if V8_ENABLE_WEBASSEMBLY
3272 if (call_descriptor->IsAnyWasmFunctionCall() &&
3273 v8_flags.experimental_wasm_growable_stacks) {
3275 UseScratchRegisterScope temps{
masm()};
3276 Register scratch = temps.Acquire();
3291 __ MultiPushF64AndV128(fp_regs_to_save,
3295 __ PrepareCallCFunction(1, r0);
3296 __ CallCFunction(ExternalReference::wasm_shrink_stack(), 1);
3300 __ MultiPopF64AndV128(fp_regs_to_save,
3313 const bool drop_jsargs = parameter_slots != 0 &&
3315 call_descriptor->IsJSFunctionCall();
3317 if (call_descriptor->IsCFunctionCall()) {
3322 if (additional_pop_count->IsImmediate() &&
3323 g.ToConstant(additional_pop_count).ToInt32() == 0) {
3333 DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
3339 ConstantPoolUnavailableScope constant_pool_unavailable(
masm());
3345 DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
3346 if (parameter_slots > 1) {
3348 __ CmpS64(argc_reg, Operand(parameter_slots), r0);
3350 __ mov(argc_reg, Operand(parameter_slots));
3353 __ DropArguments(argc_reg);
3354 }
else if (additional_pop_count->IsImmediate()) {
3355 int additional_count = g.ToConstant(additional_pop_count).ToInt32();
3356 __ Drop(parameter_slots + additional_count);
3357 }
else if (parameter_slots == 0) {
3358 __ Drop(g.ToRegister(additional_pop_count));
3362 __ Drop(parameter_slots);
3370 ZoneDeque<DeoptimizationExit*>* exits) {
3378 __ CheckTrampolinePoolQuick(total_size);
3384 PPCOperandConverter g(
this,
nullptr);
3385 int last_frame_slot_id =
3388 int slot_id = last_frame_slot_id + sp_delta + new_slots;
3390 if (source->IsFloatStackSlot() || source->IsDoubleStackSlot()) {
3391 __ LoadU64(r0, g.ToMemOperand(source), r0);
3406 PPCOperandConverter g(
this,
nullptr);
3407 if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) {
3409 UseScratchRegisterScope temps(
masm());
3410 Register scratch = temps.Acquire();
3412 __ StoreU64(scratch, g.ToMemOperand(dest), r0);
3414 int last_frame_slot_id =
3417 int slot_id = last_frame_slot_id + sp_delta;
3441 int scratch_reg_code;
3463 int scratch_reg_code =
3475 if ((move->source().IsConstant() || move->source().IsFPStackSlot()) &&
3476 !move->destination().IsFPRegister()) {
3483 PPCOperandConverter g(
this,
nullptr);
3488 if (source->IsRegister()) {
3490 Register src = g.ToRegister(source);
3496 }
else if (source->IsStackSlot()) {
3503 __ LoadU64(temp, src, r0);
3506 }
else if (source->IsConstant()) {
3507 Constant src = g.ToConstant(source);
3510 switch (src.type()) {
3512 __ mov(dst, Operand(src.ToInt32(), src.rmode()));
3515 __ mov(dst, Operand(src.ToInt64(), src.rmode()));
3524 __ Move(dst, src.ToExternalReference());
3527 Handle<HeapObject> src_object = src.ToHeapObject();
3530 __ LoadRoot(dst, index);
3532 __ Move(dst, src_object);
3537 Handle<HeapObject> src_object = src.ToHeapObject();
3540 __ LoadTaggedRoot(dst, index);
3560#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
3564 uint32_t val = src.ToFloat32AsInt();
3565 if ((val & 0x7F800000) == 0x7F800000) {
3566 uint64_t dval =
static_cast<uint64_t
>(val);
3567 dval = ((dval & 0xC0000000) << 32) | ((dval & 0x40000000) << 31) |
3568 ((dval & 0x40000000) << 30) | ((dval & 0x7FFFFFFF) << 29);
3569 value = base::Double(dval);
3571 value = base::Double(
static_cast<double>(src.ToFloat32()));
3574 value = base::Double(src.ToFloat64());
3578 ? base::Double(
static_cast<double>(src.ToFloat32()))
3579 :
base::Double(src.ToFloat64());
3581 __ LoadDoubleLiteral(dst, value, r0);
3588 }
else if (source->IsFPRegister()) {
3592 __ vor(g.ToSimd128Register(
destination), g.ToSimd128Register(source),
3593 g.ToSimd128Register(source));
3597 __ StoreSimd128(g.ToSimd128Register(source), dst, r0);
3614 }
else if (source->IsFPStackSlot()) {
3632 __ LoadF64(temp, src, r0);
3635 __ LoadF32(temp, src, r0);
3660 PPCOperandConverter g(
this,
nullptr);
3661 if (source->IsRegister()) {
3662 Register src = g.ToRegister(source);
3669 }
else if (source->IsStackSlot()) {
3673 }
else if (source->IsFloatRegister()) {
3681 }
else if (source->IsDoubleRegister()) {
3689 }
else if (source->IsFloatStackSlot()) {
3691 __ SwapFloat32(g.ToMemOperand(source), g.ToMemOperand(
destination),
3693 }
else if (source->IsDoubleStackSlot()) {
3695 __ SwapDouble(g.ToMemOperand(source), g.ToMemOperand(
destination),
3698 }
else if (source->IsSimd128Register()) {
3707 }
else if (source->IsSimd128StackSlot()) {
3709 __ SwapSimd128(g.ToMemOperand(source), g.ToMemOperand(
destination),
3720 for (
auto target : targets) {
3721 __ emit_label_addr(target);
#define Assert(condition)
static constexpr T decode(U value)
static int encode_crbit(const CRegister &cr, enum CRBit crbit)
static constexpr int kConstantPoolOffset
static constexpr int kFixedSlotCountAboveFp
static constexpr int kFixedFrameSizeAboveFp
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
Bootstrapper * bootstrapper()
RootsTable & roots_table()
Tagged_t ReadOnlyRootPtr(RootIndex index)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static Operand EmbeddedNumber(double number)
static V8_INLINE Operand Zero()
StackFrame::Type GetOutputStackFrameType() const
constexpr void set(RegisterT reg)
static constexpr RegListBase FromBits()
constexpr int8_t code() const
bool IsRootHandle(IndirectHandle< T > handle, RootIndex *index) const
static constexpr Tagged< Smi > FromInt(int value)
static constexpr int32_t TypeToMarker(Type type)
static constexpr int kArgCOffset
static constexpr int kFrameTypeOffset
static constexpr Register GapRegister()
base::Vector< T > AllocateVector(size_t length)
void MoveToTempLocation(InstructionOperand *src, MachineRepresentation rep) final
void AssembleArchJump(RpoNumber target)
void AssembleTailCallAfterGap(Instruction *instr, int first_unused_stack_slot)
void AssembleReturn(InstructionOperand *pop)
void AssembleTailCallBeforeGap(Instruction *instr, int first_unused_stack_slot)
FrameAccessState * frame_access_state() const
void AssembleConstructFrame()
CodeGenResult AssembleArchInstruction(Instruction *instr)
void PopTempStackSlots() final
DeoptimizationExit * BuildTranslation(Instruction *instr, int pc_offset, size_t frame_state_offset, size_t immediate_args_count, OutputFrameStateCombine state_combine)
Isolate * isolate() const
void AssembleArchBinarySearchSwitch(Instruction *instr)
void AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target)
FrameAccessState * frame_access_state_
static void GetPushCompatibleMoves(Instruction *instr, PushTypeFlags push_type, ZoneVector< MoveOperands * > *pushes)
void FinishFrame(Frame *frame)
Linkage * linkage() const
void AssembleArchBoolean(Instruction *instr, FlagsCondition condition)
void AssembleJumpTable(base::Vector< Label * > targets)
void AssembleArchBranch(Instruction *instr, BranchInfo *branch)
void AssembleMove(InstructionOperand *source, InstructionOperand *destination) final
void SetPendingMove(MoveOperands *move) final
void AssembleCodeStartRegisterCheck()
bool ShouldApplyOffsetToStackCheck(Instruction *instr, uint32_t *offset)
void RecordSafepoint(ReferenceMap *references, int pc_offset=0)
void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block, std::pair< int32_t, Label * > *begin, std::pair< int32_t, Label * > *end)
void PrepareForDeoptimizationExits(ZoneDeque< DeoptimizationExit * > *exits)
StubCallMode DetermineStubCallMode() const
bool caller_registers_saved_
void AssembleArchTableSwitch(Instruction *instr)
void BailoutIfDeoptimized()
bool IsMaterializableFromRoot(Handle< HeapObject > object, RootIndex *index_return)
void AssembleDeconstructFrame()
ZoneDeque< DeoptimizationExit * > deoptimization_exits_
UnwindingInfoWriter unwinding_info_writer_
void AssembleArchConditionalBranch(Instruction *instr, BranchInfo *branch)
AllocatedOperand Push(InstructionOperand *src) final
void MoveTempLocationTo(InstructionOperand *dst, MachineRepresentation rep) final
friend class OutOfLineCode
uint32_t GetStackCheckOffset()
void AssembleArchDeoptBranch(Instruction *instr, BranchInfo *branch)
Label * GetLabel(RpoNumber rpo)
void RecordCallPosition(Instruction *instr)
void AssembleSwap(InstructionOperand *source, InstructionOperand *destination) final
MoveCycleState move_cycle_
void AssemblePrepareTailCall()
Label * AddJumpTable(base::Vector< Label * > targets)
void AssembleArchConditionalBoolean(Instruction *instr)
void RecordDeoptInfo(Instruction *instr, int pc_offset)
OptimizedCompilationInfo * info() const
const Frame * frame() const
void AssembleArchSelect(Instruction *instr, FlagsCondition condition)
void Pop(InstructionOperand *src, MachineRepresentation rep) final
void SetFrameAccessToDefault()
void SetFrameAccessToSP()
void SetFrameAccessToFP()
FrameOffset GetFrameOffset(int spill_slot) const
const Frame * frame() const
void IncreaseSPDelta(int amount)
FrameAccessState * frame_access_state() const
Constant ToConstant(InstructionOperand *op) const
int64_t InputInt64(size_t index)
Register InputRegister(size_t index) const
bool IsFPStackSlot() const
FlagsMode flags_mode() const
InstructionCode opcode() const
const InstructionOperand * InputAt(size_t i) const
size_t OutputCount() const
FlagsCondition flags_condition() const
CallDescriptor * GetIncomingDescriptor() const
MachineRepresentation representation() const
static LocationOperand * cast(InstructionOperand *op)
size_t UnoptimizedFrameSlots()
static OutputFrameStateCombine Ignore()
Operand InputImmediate(size_t index)
PPCOperandConverter(CodeGenerator *gen, Instruction *instr)
MemOperand ToMemOperand(InstructionOperand *op) const
MemOperand MemoryOperand(AddressingMode *mode=NULL, size_t first_index=0)
bool CompareLogical() const
RCBit OutputRCBit() const
MemOperand SlotToMemOperand(int slot) const
MemOperand MemoryOperand(AddressingMode *mode, size_t *first_index)
void MarkFrameConstructed(int at_pc)
void MarkFrameDeconstructed(int at_pc)
IndirectPointerTag indirect_pointer_tag_
UnwindingInfoWriter *const unwinding_info_writer_
#define ATOMIC_BINOP_CASE(op, inst)
#define ASSEMBLE_IEEE754_UNOP(name)
#define ASSEMBLE_IEEE754_BINOP(name)
RecordWriteMode const mode_
#define ASSEMBLE_COMPARE(asm_instr)
#define EMIT_SIMD_BINOP_WITH_SCRATCH(name)
#define ASSEMBLE_STORE_FLOAT(asm_instr, asm_instrp, asm_instrx)
#define ASSEMBLE_ADD_WITH_OVERFLOW32()
#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round)
#define EMIT_SIMD_UNOP(name)
#define EMIT_SIMD_QFM(name)
#define ASSEMBLE_FLOAT_COMPARE(cmp_instr)
#define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrp, asm_instrx)
#define ASSEMBLE_BINOP_RC(asm_instr_reg, asm_instr_imm)
#define ASSEMBLE_SUB_WITH_OVERFLOW()
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(_type, reverse_op)
#define ASSEMBLE_LOAD_INTEGER_RR(asm_instr)
int32_t const offset_immediate_
#define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrp, asm_instrx, must_be_aligned)
#define EMIT_SIMD_STORE_LANE(name, op)
#define SIMD_LOAD_LANE_LIST(V)
#define ASSEMBLE_FLOAT_BINOP_RC(asm_instr, round)
#define EMIT_SIMD_LOAD_SPLAT(name, op)
#define PREP_LOAD_EXTEND()
#define ASSEMBLE_STORE_INTEGER_RR(asm_instr)
#define ASSEMBLE_FLOAT_MODULO()
#define ASSEMBLE_MODULO(div_instr, mul_instr)
#define DOUBLE_TO_INT32(op)
#define ASSEMBLE_BINOP_INT_RC(asm_instr_reg, asm_instr_imm)
#define EMIT_SIMD_ALL_TRUE(name)
#define ASSEMBLE_ATOMIC_EXCHANGE(_type, reverse_op)
#define EMIT_SIMD_BINOP(name)
#define EMIT_SIMD_EXT_ADD_PAIRWISE(name)
#define ASSEMBLE_ADD_WITH_OVERFLOW()
#define SIMD_STORE_LANE_LIST(V)
#define SIMD_LOAD_SPLAT(V)
#define EMIT_SIMD_SHIFT(name)
#define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrp, asm_instrx, must_be_aligned)
#define EMIT_SIMD_UNOP_WITH_SCRATCH(name)
#define ASSEMBLE_SUB_WITH_OVERFLOW32()
#define EMIT_SIMD_LOAD_LANE(name, op)
#define COMPRESS_POINTERS_BOOL
#define V8_EMBEDDED_CONSTANT_POOL_BOOL
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
ZoneVector< RpoNumber > & result
LiftoffRegList regs_to_save
#define SIMD_UNOP_LIST(V)
#define SIMD_ALL_TRUE_LIST(V)
#define SIMD_BINOP_WITH_SCRATCH_LIST(V)
#define SIMD_BINOP_LIST(V)
#define SIMD_UNOP_WITH_SCRATCH_LIST(V)
#define SIMD_SHIFT_LIST(V)
#define SIMD_EXT_ADD_PAIRWISE_LIST(V)
InstructionOperand source
InstructionOperand destination
v8::SourceLocation SourceLocation
@ kValueIsIndirectPointer
static bool HasRegisterInput(Instruction *instr, size_t index)
static Condition FlagsConditionToCondition(FlagsCondition condition)
@ kSignedGreaterThanOrEqual
@ kUnsignedLessThanOrEqual
@ kUnsignedGreaterThanOrEqual
static bool is_wasm_on_be(OptimizedCompilationInfo *info)
@ kFlags_conditional_branch
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register kGpReturnRegisters[]
constexpr Register no_reg
constexpr Register kRootRegister
RegListBase< DoubleRegister > DoubleRegList
constexpr int kSimd128Size
V8_EXPORT_PRIVATE constexpr int ElementSizeInPointers(MachineRepresentation rep)
DwVfpRegister DoubleRegister
constexpr DoubleRegister kScratchDoubleReg
@ kIndirectPointerNullTag
constexpr Simd128Register kScratchSimd128Reg
RegListBase< Register > RegList
V8_INLINE constexpr bool IsValidIndirectPointerTag(IndirectPointerTag tag)
constexpr int kSystemPointerSizeLog2
constexpr Register kScratchReg
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
constexpr Register kConstantPoolRegister
QwNeonRegister Simd128Register
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
constexpr bool IsFloatingPoint(MachineRepresentation rep)
constexpr Register kReturnRegister0
Condition NegateCondition(Condition cond)
constexpr Register kWasmImplicitArgRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
const int kNumCalleeSavedDoubles
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool IsSimd128(MachineRepresentation rep)
constexpr int kHasFunctionDescriptorBitMask
constexpr Register kJavaScriptCallCodeStartRegister
constexpr Register kPtrComprCageBaseRegister
constexpr uint8_t kInstrSize
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
const int kNumCalleeSaved
constexpr Simd128Register kScratchSimd128Reg2
const uint32_t kClearedWeakHeapObjectLower32
static int FrameSlotToFPOffset(int slot)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
constexpr bool IsAligned(T value, U alignment)
uint64_t make_uint64(uint32_t high, uint32_t low)
bool pending_double_scratch_register_use
#define V8_STATIC_ROOTS_BOOL