24void Int32NegateWithOverflow::SetValueLocationConstraints() {
29void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm,
30 const ProcessingState& state) {
34 static_assert(Int32NegateWithOverflow::kProperties.can_eager_deopt());
36 Label* fail =
__ GetDeoptLabel(
this, DeoptimizeReason::kOverflow);
37 __ RecordComment(
"-- Jump to eager deopt");
40 MaglevAssembler::TemporaryRegisterScope temps(masm);
41 Register scratch = temps.AcquireScratch();
42 __ neg(scratch, value);
46 __ RecordComment(
"-- Jump to eager deopt");
51 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
55 const ProcessingState& state) {
65 MaglevAssembler::TemporaryRegisterScope temps(masm);
66 Register scratch = temps.AcquireScratch();
71 Label* fail =
__ GetDeoptLabel(
this, DeoptimizeReason::kOverflow);
72 __ RecordComment(
"-- Jump to eager deopt");
79 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
82void Int32IncrementWithOverflow::SetValueLocationConstraints() {
87void Int32IncrementWithOverflow::GenerateCode(MaglevAssembler* masm,
88 const ProcessingState& state) {
92 MaglevAssembler::TemporaryRegisterScope temps(masm);
93 Register scratch = temps.AcquireScratch();
94 __ Add32(scratch, value, Operand(1));
96 static_assert(Int32IncrementWithOverflow::kProperties.can_eager_deopt());
97 Label* fail =
__ GetDeoptLabel(
this, DeoptimizeReason::kOverflow);
98 __ RecordComment(
"-- Jump to eager deopt");
104 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
107void Int32DecrementWithOverflow::SetValueLocationConstraints() {
112void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm,
113 const ProcessingState& state) {
117 MaglevAssembler::TemporaryRegisterScope temps(masm);
118 Register scratch = temps.AcquireScratch();
119 __ Sub32(scratch, value, Operand(1));
121 static_assert(Int32DecrementWithOverflow::kProperties.can_eager_deopt());
122 Label* fail =
__ GetDeoptLabel(
this, DeoptimizeReason::kOverflow);
123 __ RecordComment(
"-- Jump to eager deopt");
129 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
141 set_temporaries_needed(2);
145 const ProcessingState& state) {
146 MaglevAssembler::TemporaryRegisterScope temps(masm);
150 int32_t char_code = constant->value() & 0xFFFF;
152 __ LoadSingleCharacterString(result_string, char_code);
154 __ AllocateTwoByteString(register_snapshot(), result_string, 1);
155 __ Move(scratch, char_code);
160 __ StringFromCharCode(register_snapshot(),
nullptr, result_string,
176 const ProcessingState& state) {
180 __ AddWord(out, value, Operand(
offset()));
187 const ProcessingState& state) {
191 __ Sub64(out, out, Operand(1));
197 const ProcessingState& state) {
204 __ Mv(length, zero_reg);
206 __ UncheckedSmiTagInt32(length);
217 const ProcessingState& state) {
221 Operand(std::numeric_limits<int32_t>::max()));
224 Operand(std::numeric_limits<int32_t>::min()));
227void Int32AddWithOverflow::SetValueLocationConstraints() {
233void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm,
234 const ProcessingState& state) {
239 static_assert(Int32AddWithOverflow::kProperties.can_eager_deopt());
240 MaglevAssembler::TemporaryRegisterScope temps(masm);
241 Register scratch = temps.AcquireScratch();
242 Label* fail =
__ GetDeoptLabel(
this, DeoptimizeReason::kOverflow);
243 __ Add64(scratch, left, right);
244 __ Add32(out, left, right);
245 __ RecordComment(
"-- Jump to eager deopt");
251 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
254void Int32SubtractWithOverflow::SetValueLocationConstraints() {
259void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm,
260 const ProcessingState& state) {
265 static_assert(Int32SubtractWithOverflow::kProperties.can_eager_deopt());
266 MaglevAssembler::TemporaryRegisterScope temps(masm);
267 Register scratch = temps.AcquireScratch();
268 Label* fail =
__ GetDeoptLabel(
this, DeoptimizeReason::kOverflow);
269 __ Sub64(scratch, left, right);
270 __ Sub32(out, left, right);
271 __ RecordComment(
"-- Jump to eager deopt");
277 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
280void Int32MultiplyWithOverflow::SetValueLocationConstraints() {
284 set_temporaries_needed(2);
286void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm,
287 const ProcessingState& state) {
294 MaglevAssembler::TemporaryRegisterScope temps(masm);
295 bool out_alias_input = out == left || out == right;
297 if (out_alias_input) {
298 res = temps.Acquire();
302 __ MulOverflow32(res, left, Operand(right), scratch,
false);
304 static_assert(Int32MultiplyWithOverflow::kProperties.can_eager_deopt());
307 Label* fail =
__ GetDeoptLabel(
this, DeoptimizeReason::kOverflow);
308 __ RecordComment(
"-- Jump to eager deopt");
317 __ Or(maybeNegative, left, Operand(right));
320 __ And(maybeNegative, maybeNegative, Operand(0x80000000));
325 __ RecordComment(
"-- Jump to eager deopt if the result is negative zero");
326 Label* deopt_label =
__ GetDeoptLabel(
this, DeoptimizeReason::kOverflow);
332 if (out_alias_input) {
339 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
342void Int32DivideWithOverflow::SetValueLocationConstraints() {
346 set_temporaries_needed(2);
348void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
349 const ProcessingState& state) {
356 static_assert(Int32DivideWithOverflow::kProperties.can_eager_deopt());
357 ZoneLabelRef done(masm);
358 Label* deferred_overflow_checks =
__ MakeDeferredCode(
359 [](MaglevAssembler* masm, ZoneLabelRef done, Register left,
360 Register right, Int32DivideWithOverflow* node) {
367 Label* deopt =
__ GetDeoptLabel(node, DeoptimizeReason::kNotInt32);
370 __ RecordComment(
"-- Jump to eager deopt if right is zero");
374 __ RecordComment(
"-- Jump to eager deopt if left is zero");
381 __ JumpToDeopt(deopt);
383 done, left, right,
this);
391 MaglevAssembler::TemporaryRegisterScope temps(masm);
392 bool out_alias_input = out == left || out == right;
394 if (out_alias_input) {
395 res = temps.Acquire();
397 __ Div32(res, left, right);
401 __ remw(temp, left, right);
402 Label* deopt =
__ GetDeoptLabel(
this, DeoptimizeReason::kNotInt32);
403 __ RecordComment(
"-- Jump to eager deopt if remainder is zero");
409 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
413void Int32ModulusWithOverflow::SetValueLocationConstraints() {
417 set_temporaries_needed(1);
419void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm,
420 const ProcessingState& state) {
445 static_assert(Int32ModulusWithOverflow::kProperties.can_eager_deopt());
447 DeoptimizeReason::kDivisionByZero;
454 Label* deopt =
__ GetDeoptLabel(
this, DeoptimizeReason::kDivisionByZero);
455 __ RecordComment(
"-- Jump to eager deopt");
457 __ Move(out, zero_reg);
463 ZoneLabelRef done(masm);
464 ZoneLabelRef rhs_checked(masm);
466 Label* deferred_rhs_check =
__ MakeDeferredCode(
467 [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs,
468 Int32ModulusWithOverflow* node) {
471 __ EmitEagerDeopt(node, deopt_reason);
473 rhs_checked, rhs,
this);
476 __ bind(*rhs_checked);
478 Label* deferred_lhs_check =
__ MakeDeferredCode(
479 [](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs,
480 Register out, Int32ModulusWithOverflow* node) {
481 MaglevAssembler::TemporaryRegisterScope temps(masm);
482 Register lhs_abs = temps.AcquireScratch();
483 __ negw(lhs_abs, lhs);
485 __ remw(res, lhs_abs, rhs);
490 __ EmitEagerDeopt(node, deopt_reason);
492 done, lhs, rhs, out,
this);
495 Label rhs_not_power_of_2;
496 MaglevAssembler::TemporaryRegisterScope temps(masm);
497 Register scratch = temps.AcquireScratch();
498 Register msk = temps.AcquireScratch();
499 __ Sub32(msk, rhs, Operand(1));
500 __ And(scratch, rhs, msk);
504 __ And(out, lhs, msk);
507 __ bind(&rhs_not_power_of_2);
508 __ remw(out, lhs, rhs);
513#define DEF_BITWISE_BINOP(Instruction, opcode) \
514 void Instruction::SetValueLocationConstraints() { \
515 UseRegister(left_input()); \
516 UseRegister(right_input()); \
517 DefineAsRegister(this); \
520 void Instruction::GenerateCode(MaglevAssembler* masm, \
521 const ProcessingState& state) { \
522 Register lhs = ToRegister(left_input()); \
523 Register rhs = ToRegister(right_input()); \
524 Register out = ToRegister(result()); \
525 __ opcode(out, lhs, Operand(rhs)); \
527 __ ZeroExtendWord(out, out); \
532#undef DEF_BITWISE_BINOP
534#define DEF_SHIFT_BINOP(Instruction, opcode) \
535 void Instruction::SetValueLocationConstraints() { \
536 UseRegister(left_input()); \
537 if (right_input().node()->Is<Int32Constant>()) { \
538 UseAny(right_input()); \
540 UseRegister(right_input()); \
542 DefineAsRegister(this); \
545 void Instruction::GenerateCode(MaglevAssembler* masm, \
546 const ProcessingState& state) { \
547 Register out = ToRegister(result()); \
548 Register lhs = ToRegister(left_input()); \
549 if (Int32Constant* constant = \
550 right_input().node()->TryCast<Int32Constant>()) { \
551 uint32_t shift = constant->value() & 31; \
553 __ ZeroExtendWord(out, lhs); \
556 __ opcode(out, lhs, Operand(shift)); \
558 Register rhs = ToRegister(right_input()); \
559 __ opcode(out, lhs, Operand(rhs)); \
565#undef DEF_SHIFT_BINOP
573 const ProcessingState& state) {
577 __ ZeroExtendWord(out, out);
580void Float64Add::SetValueLocationConstraints() {
586void Float64Add::GenerateCode(MaglevAssembler* masm,
587 const ProcessingState& state) {
591 __ fadd_d(out, left, right);
594void Float64Subtract::SetValueLocationConstraints() {
600void Float64Subtract::GenerateCode(MaglevAssembler* masm,
601 const ProcessingState& state) {
605 __ fsub_d(out, left, right);
608void Float64Multiply::SetValueLocationConstraints() {
614void Float64Multiply::GenerateCode(MaglevAssembler* masm,
615 const ProcessingState& state) {
619 __ fmul_d(out, left, right);
622void Float64Divide::SetValueLocationConstraints() {
628void Float64Divide::GenerateCode(MaglevAssembler* masm,
629 const ProcessingState& state) {
633 __ fdiv_d(out, left, right);
636int Float64Modulus::MaxCallStackArgs()
const {
return 0; }
637void Float64Modulus::SetValueLocationConstraints() {
642void Float64Modulus::GenerateCode(MaglevAssembler* masm,
643 const ProcessingState& state) {
644 AllowExternalCallThatCantCauseGC scope(masm);
645 __ PrepareCallCFunction(0, 2);
646 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
654 const ProcessingState& state) {
657 __ fneg_d(out, value);
661 const ProcessingState& state) {
668 const ProcessingState& state) {
671 MaglevAssembler::TemporaryRegisterScope temps(masm);
680 __ LoadFPRImmediate(half_one, 0.5);
682 __ fadd_d(tmp, in, half_one);
683 __ Floor_d_d(out, tmp, fscratch1);
684 __ fsgnj_d(out, out, in);
686 __ Ceil_d_d(out, in, fscratch1);
688 __ Floor_d_d(out, in, fscratch1);
694int Float64Exponentiate::MaxCallStackArgs()
const {
return 0; }
695void Float64Exponentiate::SetValueLocationConstraints() {
700void Float64Exponentiate::GenerateCode(MaglevAssembler* masm,
701 const ProcessingState& state) {
702 AllowExternalCallThatCantCauseGC scope(masm);
703 __ PrepareCallCFunction(0, 2);
704 __ CallCFunction(ExternalReference::ieee754_pow_function(), 2);
713 const ProcessingState& state) {
714 AllowExternalCallThatCantCauseGC scope(masm);
715 __ PrepareCallCFunction(0, 1);
724 const ProcessingState& state) {
728 __ AssertObjectType(
object, JS_TYPED_ARRAY_TYPE,
729 AbortReason::kUnexpectedValue);
731 __ LoadBoundedSizeFromObject(result_register,
object,
732 JSTypedArray::kRawByteLengthOffset);
734 if (shift_size > 0) {
736 DCHECK(shift_size == 1 || shift_size == 2 || shift_size == 3);
737 __ SrlWord(result_register, result_register, Operand(shift_size));
745 set_temporaries_needed(1);
748 const ProcessingState& state) {
749 MaglevAssembler::TemporaryRegisterScope temps(masm);
753 __ AssertObjectType(
object, JS_DATA_VIEW_TYPE,
754 AbortReason::kUnexpectedValue);
758 Register byte_length = temps.Acquire();
759 __ LoadBoundedSizeFromObject(byte_length,
object,
760 JSDataView::kRawByteLengthOffset);
764 if (element_size > 1) {
765 __ SubWord(byte_length, byte_length, Operand(element_size - 1));
768 __ EmitEagerDeopt(
this, DeoptimizeReason::kOutOfBounds);
772 __ EmitEagerDeopt(
this, DeoptimizeReason::kOutOfBounds);
782 const ProcessingState& state) {
790enum class ReduceInterruptBudgetType {
kLoop, kReturn };
792void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
793 Node* node, ReduceInterruptBudgetType type,
797 if (type == ReduceInterruptBudgetType::kLoop) {
812 SaveRegisterStateForCall save_register_state(masm,
813 node->register_snapshot());
815 __ LoadWord(function,
821 __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, 1);
822 save_register_state.DefineSafepointWithLazyDeopt(node->lazy_deopt_info());
830 SaveRegisterStateForCall save_register_state(masm,
831 node->register_snapshot());
833 __ LoadWord(function,
840 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Maglev, 1);
841 save_register_state.DefineSafepoint();
846void GenerateReduceInterruptBudget(MaglevAssembler* masm, Node* node,
847 Register feedback_cell,
848 ReduceInterruptBudgetType type,
int amount) {
849 MaglevAssembler::TemporaryRegisterScope temps(masm);
855 __ Sub32(budget, budget, Operand(amount));
859 ZoneLabelRef done(masm);
860 Label* deferred_code =
__ MakeDeferredCode(
861 [](MaglevAssembler* masm, ZoneLabelRef done, Node* node,
862 ReduceInterruptBudgetType type, Register scratch) {
863 HandleInterruptsAndTiering(masm, done, node, type, scratch);
876 set_temporaries_needed(1);
879 const ProcessingState& state) {
881 ReduceInterruptBudgetType::kLoop,
amount());
887 set_temporaries_needed(1);
890 MaglevAssembler* masm,
const ProcessingState& state) {
892 ReduceInterruptBudgetType::kReturn,
amount());
906 int formal_params_size =
907 masm->compilation_info()->toplevel_compilation_unit()->parameter_count();
918 __ LoadWord(actual_params_size,
922 __ LeaveFrame(StackFrame::MAGLEV);
926 Label corrected_args_count;
928 Operand(formal_params_size),
930 __ Move(actual_params_size, formal_params_size);
932 __ bind(&corrected_args_count);
934 __ DropArguments(actual_params_size);
int GetStackParameterCount() const
void Branch(Label *label, bool need_link=false)
static constexpr int kArgCOffset
static constexpr int kFunctionOffset
static const int32_t kMaxOneByteCharCode
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
ExternalArrayType element_type_
void SetValueLocationConstraints()
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ExternalReference ieee_function_ref() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
Input & allocation_block_input()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
static constexpr OpProperties kProperties
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ElementsKind elements_kind_
constexpr bool can_eager_deopt() const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void SetValueLocationConstraints()
int MaxCallStackArgs() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
int formal_parameter_count() const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ZoneVector< RpoNumber > & result
#define DEF_SHIFT_BINOP(Instruction, opcode)
#define DEF_BITWISE_BINOP(Instruction, opcode)
#define DCHECK_REGLIST_EMPTY(...)
int ExternalArrayElementSize(const ExternalArrayType element_type)
void DefineAsRegister(Node *node)
DoubleRegister ToDoubleRegister(const compiler::InstructionOperand &operand)
void DefineSameAsFirst(Node *node)
Register ToRegister(const compiler::InstructionOperand &operand)
void UseAndClobberRegister(Input &input)
void UseAny(Input &input)
void UseRegister(Input &input)
void UseFixed(Input &input, Register reg)
void Or(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
bool TryCast(Tagged< From > value, Tagged< To > *out)
bool Is(IndirectHandle< U > value)
DwVfpRegister DoubleRegister
RegListBase< Register > RegList
MemOperand FieldMemOperand(Register object, int offset)
constexpr int ElementsKindToShiftSize(ElementsKind elements_kind)
constexpr Register kReturnRegister0
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
V8_EXPORT_PRIVATE FlagValues v8_flags
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define OFFSET_OF_DATA_START(Type)