26std::optional<int32_t> TryGetAddImmediateInt32ConstantInput(Node* node,
28 if (
auto res = node->TryGetInt32ConstantInput(index)) {
36std::optional<int32_t> TryGetLogicalImmediateInt32ConstantInput(Node* node,
38 if (
auto res = node->TryGetInt32ConstantInput(index)) {
52void Int32NegateWithOverflow::SetValueLocationConstraints() {
57void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm,
58 const ProcessingState& state) {
63 static_assert(Int32NegateWithOverflow::kProperties.can_eager_deopt());
64 Label* fail =
__ GetDeoptLabel(
this, DeoptimizeReason::kOverflow);
65 __ RecordComment(
"-- Jump to eager deopt");
71 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
72 __ EmitEagerDeoptIf(
vs, DeoptimizeReason::kOverflow,
this);
76 const ProcessingState& state) {
80 __ Cmp(out, Immediate(0));
85 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
86 __ EmitEagerDeoptIf(
vs, DeoptimizeReason::kOverflow,
this);
90void Int32IncrementWithOverflow::SetValueLocationConstraints() {
95void Int32IncrementWithOverflow::GenerateCode(MaglevAssembler* masm,
96 const ProcessingState& state) {
99 __ Adds(out, value, Immediate(1));
102 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
103 __ EmitEagerDeoptIf(
vs, DeoptimizeReason::kOverflow,
this);
106void Int32DecrementWithOverflow::SetValueLocationConstraints() {
111void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm,
112 const ProcessingState& state) {
115 __ Subs(out, value, Immediate(1));
118 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
119 __ EmitEagerDeoptIf(
vs, DeoptimizeReason::kOverflow,
this);
131 set_temporaries_needed(1);
135 const ProcessingState& state) {
136 MaglevAssembler::TemporaryRegisterScope temps(masm);
140 int32_t char_code = constant->value() & 0xFFFF;
142 __ LoadSingleCharacterString(result_string, char_code);
144 __ AllocateTwoByteString(register_snapshot(), result_string, 1);
145 __ Move(scratch, char_code);
151 __ StringFromCharCode(register_snapshot(),
nullptr, result_string,
167 const ProcessingState& state) {
177 const ProcessingState& state) {
180 __ Sub(argc, argc, 1);
186 const ProcessingState& state) {
194 __ UncheckedSmiTagInt32(length);
205 const ProcessingState& state) {
207 __ CompareAndBranch(input_reg.X(),
208 Immediate(std::numeric_limits<int32_t>::max()),
gt,
209 __ GetDeoptLabel(
this, DeoptimizeReason::kNotInt32));
210 __ CompareAndBranch(input_reg.X(),
211 Immediate(std::numeric_limits<int32_t>::min()),
lt,
212 __ GetDeoptLabel(
this, DeoptimizeReason::kNotInt32));
217 set_temporaries_needed((
value().get_scalar() == 0) ? 1 : 0);
218 set_double_temporaries_needed(
219 value().is_nan() || (
value().get_scalar() == 0) ? 0 : 1);
222 const ProcessingState& state) {
223 Label* fail =
__ GetDeoptLabel(
this, deoptimize_reason());
224 MaglevAssembler::TemporaryRegisterScope temps(masm);
226 if (
value().is_nan()) {
227 __ JumpIfNotNan(target, fail);
228 }
else if (
value().get_scalar() == 0) {
229 Register scratch = temps.AcquireScratch();
230 __ Fcmp(target,
value().get_scalar());
232 __ Fmov(scratch, target);
233 if (
value().get_bits() == 0) {
234 __ Tbnz(scratch, 63, fail);
236 __ Tbz(scratch, 63, fail);
240 __ Move(double_scratch,
value());
241 __ CompareFloat64AndJumpIf(double_scratch, target,
kNotEqual, fail, fail);
245void Int32AddWithOverflow::SetValueLocationConstraints() {
247 if (TryGetAddImmediateInt32ConstantInput(
this, kRightIndex)) {
255void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm,
256 const ProcessingState& state) {
259 if (!right_input().operand().IsRegister()) {
260 auto right_const = TryGetInt32ConstantInput(kRightIndex);
262 __ Adds(out, left, *right_const);
265 __ Adds(out, left, right);
270 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
271 __ EmitEagerDeoptIf(
vs, DeoptimizeReason::kOverflow,
this);
274void Int32SubtractWithOverflow::SetValueLocationConstraints() {
276 if (TryGetAddImmediateInt32ConstantInput(
this, kRightIndex)) {
283void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm,
284 const ProcessingState& state) {
287 if (!right_input().operand().IsRegister()) {
288 auto right_const = TryGetInt32ConstantInput(kRightIndex);
290 __ Subs(out, left, *right_const);
293 __ Subs(out, left, right);
298 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
299 __ EmitEagerDeoptIf(
vs, DeoptimizeReason::kOverflow,
this);
302void Int32MultiplyWithOverflow::SetValueLocationConstraints() {
307void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm,
308 const ProcessingState& state) {
315 MaglevAssembler::TemporaryRegisterScope temps(masm);
316 bool out_alias_input = out == left || out == right;
318 if (out_alias_input) {
319 res = temps.AcquireScratch();
322 __ Smull(res, left, right);
326 __ Cmp(res, Operand(res.W(),
SXTW));
327 __ EmitEagerDeoptIf(
ne, DeoptimizeReason::kOverflow,
this);
331 __ CompareAndBranch(res, Immediate(0),
ne, &
end);
333 MaglevAssembler::TemporaryRegisterScope temps(masm);
334 Register temp = temps.AcquireScratch().W();
335 __ Orr(temp, left, right);
340 __ RecordComment(
"-- Jump to eager deopt if the result is negative zero");
341 __ Tbnz(temp, temp.SizeInBits() - 1,
342 __ GetDeoptLabel(
this, DeoptimizeReason::kOverflow));
345 if (out_alias_input) {
346 __ Move(out, res.W());
350void Int32DivideWithOverflow::SetValueLocationConstraints() {
355void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
356 const ProcessingState& state) {
368 __ Cmp(right, Immediate(0));
369 ZoneLabelRef done(masm);
372 [](MaglevAssembler* masm, ZoneLabelRef done, Register left,
373 Register right, Int32DivideWithOverflow* node) {
380 Label* deopt =
__ GetDeoptLabel(node, DeoptimizeReason::kNotInt32);
384 __ JumpIf(
eq, deopt);
387 __ CompareAndBranch(left, Immediate(0),
eq, deopt);
392 __ JumpIf(
ne, *done);
393 __ Cmp(right, Immediate(-1));
394 __ JumpIf(
ne, *done);
395 __ JumpToDeopt(deopt);
397 done, left, right,
this);
401 MaglevAssembler::TemporaryRegisterScope temps(masm);
402 bool out_alias_input = out == left || out == right;
404 if (out_alias_input) {
405 res = temps.AcquireScratch().W();
407 __ Sdiv(res, left, right);
410 Register temp = temps.AcquireScratch().W();
411 __ Msub(temp, res, right, left);
412 __ CompareAndBranch(temp, Immediate(0),
ne,
413 __ GetDeoptLabel(
this, DeoptimizeReason::kNotInt32));
418void Int32ModulusWithOverflow::SetValueLocationConstraints() {
423void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm,
424 const ProcessingState& state) {
450 DeoptimizeReason::kDivisionByZero;
458 __ EmitEagerDeoptIf(
mi, deopt_reason,
this);
465 ZoneLabelRef done(masm);
466 ZoneLabelRef rhs_checked(masm);
467 __ Cmp(rhs, Immediate(0));
470 [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs,
471 Int32ModulusWithOverflow* node) {
473 __ B(*rhs_checked,
ne);
474 __ EmitEagerDeopt(node, deopt_reason);
476 rhs_checked, rhs,
this);
477 __ Bind(*rhs_checked);
479 __ Cmp(lhs, Immediate(0));
482 [](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs,
483 Register out, Int32ModulusWithOverflow* node) {
484 MaglevAssembler::TemporaryRegisterScope temps(masm);
485 Register res = temps.AcquireScratch().W();
487 __ Udiv(res, lhs, rhs);
488 __ Msub(out, res, rhs, lhs);
493 __ EmitEagerDeopt(node, deopt_reason);
495 done, lhs, rhs, out,
this);
497 Label rhs_not_power_of_2;
498 MaglevAssembler::TemporaryRegisterScope temps(masm);
502 __ JumpIf(
ne, &rhs_not_power_of_2);
508 __ Bind(&rhs_not_power_of_2);
514 __ Udiv(res, lhs, rhs);
515 __ Msub(out, res, rhs, lhs);
520#define DEF_BITWISE_BINOP(Instruction, opcode) \
521 void Instruction::SetValueLocationConstraints() { \
522 UseRegister(left_input()); \
523 if (TryGetLogicalImmediateInt32ConstantInput(this, kRightIndex)) { \
524 UseAny(right_input()); \
526 UseRegister(right_input()); \
528 DefineAsRegister(this); \
531 void Instruction::GenerateCode(MaglevAssembler* masm, \
532 const ProcessingState& state) { \
533 Register left = ToRegister(left_input()).W(); \
534 Register out = ToRegister(result()).W(); \
535 if (!right_input().operand().IsRegister()) { \
536 auto right_const = TryGetInt32ConstantInput(kRightIndex); \
537 DCHECK(right_const); \
538 __ opcode(out, left, *right_const); \
540 Register right = ToRegister(right_input()).W(); \
541 __ opcode(out, left, right); \
547#undef DEF_BITWISE_BINOP
549#define DEF_SHIFT_BINOP(Instruction, opcode) \
550 void Instruction::SetValueLocationConstraints() { \
551 UseRegister(left_input()); \
552 if (TryGetInt32ConstantInput(kRightIndex)) { \
553 UseAny(right_input()); \
555 UseRegister(right_input()); \
557 DefineAsRegister(this); \
560 void Instruction::GenerateCode(MaglevAssembler* masm, \
561 const ProcessingState& state) { \
562 Register out = ToRegister(result()).W(); \
563 Register left = ToRegister(left_input()).W(); \
564 if (auto right_const = TryGetInt32ConstantInput(kRightIndex)) { \
565 int right = *right_const & 31; \
567 __ Move(out, left); \
569 __ opcode(out, left, right); \
572 Register right = ToRegister(right_input()).W(); \
573 __ opcode##v(out, left, right); \
579#undef DEF_SHIFT_BINOP
587 const ProcessingState& state) {
593void Float64Add::SetValueLocationConstraints() {
599void Float64Add::GenerateCode(MaglevAssembler* masm,
600 const ProcessingState& state) {
604 __ Fadd(out, left, right);
607void Float64Subtract::SetValueLocationConstraints() {
613void Float64Subtract::GenerateCode(MaglevAssembler* masm,
614 const ProcessingState& state) {
618 __ Fsub(out, left, right);
621void Float64Multiply::SetValueLocationConstraints() {
627void Float64Multiply::GenerateCode(MaglevAssembler* masm,
628 const ProcessingState& state) {
632 __ Fmul(out, left, right);
635void Float64Divide::SetValueLocationConstraints() {
641void Float64Divide::GenerateCode(MaglevAssembler* masm,
642 const ProcessingState& state) {
646 __ Fdiv(out, left, right);
649int Float64Modulus::MaxCallStackArgs()
const {
return 0; }
650void Float64Modulus::SetValueLocationConstraints() {
655void Float64Modulus::GenerateCode(MaglevAssembler* masm,
656 const ProcessingState& state) {
657 AllowExternalCallThatCantCauseGC scope(masm);
658 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
666 const ProcessingState& state) {
673 const ProcessingState& state) {
680 const ProcessingState& state) {
684 MaglevAssembler::TemporaryRegisterScope temps(masm);
692 __ Fsub(temp, temp, out);
693 __ Move(half_one, 0.5);
694 __ Fcmp(temp, half_one);
698 __ Fadd(out, out, half_one);
699 __ Fadd(out, out, half_one);
708int Float64Exponentiate::MaxCallStackArgs()
const {
return 0; }
709void Float64Exponentiate::SetValueLocationConstraints() {
714void Float64Exponentiate::GenerateCode(MaglevAssembler* masm,
715 const ProcessingState& state) {
716 AllowExternalCallThatCantCauseGC scope(masm);
717 __ CallCFunction(ExternalReference::ieee754_pow_function(), 2);
726 const ProcessingState& state) {
727 AllowExternalCallThatCantCauseGC scope(masm);
736 const ProcessingState& state) {
740 __ AssertObjectType(
object, JS_TYPED_ARRAY_TYPE,
741 AbortReason::kUnexpectedValue);
743 __ LoadBoundedSizeFromObject(result_register,
object,
744 JSTypedArray::kRawByteLengthOffset);
746 if (shift_size > 0) {
748 DCHECK(shift_size == 1 || shift_size == 2 || shift_size == 3);
749 __ Lsr(result_register, result_register, shift_size);
757 set_temporaries_needed(1);
760 const ProcessingState& state) {
761 MaglevAssembler::TemporaryRegisterScope temps(masm);
765 __ AssertObjectType(
object, JS_DATA_VIEW_TYPE,
766 AbortReason::kUnexpectedValue);
770 Register byte_length = temps.Acquire();
771 __ LoadBoundedSizeFromObject(byte_length,
object,
772 JSDataView::kRawByteLengthOffset);
775 if (element_size > 1) {
776 __ Subs(byte_length, byte_length, Immediate(element_size - 1));
777 __ EmitEagerDeoptIf(
mi, DeoptimizeReason::kOutOfBounds,
this);
779 __ Cmp(index, byte_length);
780 __ EmitEagerDeoptIf(
hs, DeoptimizeReason::kOutOfBounds,
this);
788 const ProcessingState& state) {
796enum class ReduceInterruptBudgetType {
kLoop, kReturn };
798void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
799 Node* node, ReduceInterruptBudgetType type,
803 if (type == ReduceInterruptBudgetType::kLoop) {
810 __ Cmp(sp, stack_limit);
818 SaveRegisterStateForCall save_register_state(masm,
819 node->register_snapshot());
826 __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, 1);
827 save_register_state.DefineSafepointWithLazyDeopt(node->lazy_deopt_info());
835 SaveRegisterStateForCall save_register_state(masm,
836 node->register_snapshot());
844 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Maglev, 1);
845 save_register_state.DefineSafepoint();
850void GenerateReduceInterruptBudget(MaglevAssembler* masm, Node* node,
851 Register feedback_cell,
852 ReduceInterruptBudgetType type,
int amount) {
853 MaglevAssembler::TemporaryRegisterScope temps(masm);
858 __ Subs(budget, budget, Immediate(amount));
861 ZoneLabelRef done(masm);
862 __ JumpToDeferredIf(
lt, HandleInterruptsAndTiering, done, node, type,
872 set_temporaries_needed(1);
875 const ProcessingState& state) {
877 ReduceInterruptBudgetType::kLoop,
amount());
883 set_temporaries_needed(1);
886 MaglevAssembler* masm,
const ProcessingState& state) {
888 ReduceInterruptBudgetType::kReturn,
amount());
901 int formal_params_size =
902 masm->compilation_info()->toplevel_compilation_unit()->parameter_count();
914 __ Ldr(actual_params_size,
916 __ Mov(params_size, Immediate(formal_params_size));
920 Label corrected_args_count;
921 __ CompareAndBranch(params_size, actual_params_size,
ge,
922 &corrected_args_count);
923 __ Mov(params_size, actual_params_size);
924 __ Bind(&corrected_args_count);
927 __ LeaveFrame(StackFrame::MAGLEV);
930 __ DropArguments(params_size);
static constexpr bool IsImmAddSub(int64_t immediate)
static bool IsImmLogical(uint64_t value, unsigned width, unsigned *n, unsigned *imm_s, unsigned *imm_r)
int GetStackParameterCount() const
static constexpr int kArgCOffset
static constexpr int kFunctionOffset
static const int32_t kMaxOneByteCharCode
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void SetValueLocationConstraints()
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
ExternalArrayType element_type_
void SetValueLocationConstraints()
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ExternalReference ieee_function_ref() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
Input & allocation_block_input()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ElementsKind elements_kind_
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void SetValueLocationConstraints()
int MaxCallStackArgs() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
int formal_parameter_count() const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ZoneVector< RpoNumber > & result
#define DEF_SHIFT_BINOP(Instruction, opcode)
#define DEF_BITWISE_BINOP(Instruction, opcode)
#define DCHECK_REGLIST_EMPTY(...)
void Add(RWDigits Z, Digits X, Digits Y)
int ExternalArrayElementSize(const ExternalArrayType element_type)
void DefineAsRegister(Node *node)
DoubleRegister ToDoubleRegister(const compiler::InstructionOperand &operand)
void DefineSameAsFirst(Node *node)
Register ToRegister(const compiler::InstructionOperand &operand)
void UseAndClobberRegister(Input &input)
void UseAny(Input &input)
void UseRegister(Input &input)
void UseFixed(Input &input, Register reg)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void Sub(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
bool TryCast(Tagged< From > value, Tagged< To > *out)
bool Is(IndirectHandle< U > value)
DwVfpRegister DoubleRegister
RegListBase< Register > RegList
MemOperand FieldMemOperand(Register object, int offset)
constexpr int ElementsKindToShiftSize(ElementsKind elements_kind)
constexpr Register kReturnRegister0
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
V8_EXPORT_PRIVATE FlagValues v8_flags
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define OFFSET_OF_DATA_START(Type)