21void Int32NegateWithOverflow::SetValueLocationConstraints() {
26void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm,
27 const ProcessingState& state) {
32 __ cmp(value, Operand(0));
33 __ EmitEagerDeoptIf(
eq, DeoptimizeReason::kOverflow,
this);
35 __ rsb(out, value, Operand(0),
SetCC);
38 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
39 __ EmitEagerDeoptIf(
vs, DeoptimizeReason::kOverflow,
this);
51 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
52 __ EmitEagerDeoptIf(
vs, DeoptimizeReason::kOverflow,
this);
56void Int32IncrementWithOverflow::SetValueLocationConstraints() {
61void Int32IncrementWithOverflow::GenerateCode(MaglevAssembler* masm,
62 const ProcessingState& state) {
68 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
69 __ EmitEagerDeoptIf(
vs, DeoptimizeReason::kOverflow,
this);
72void Int32DecrementWithOverflow::SetValueLocationConstraints() {
77void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm,
78 const ProcessingState& state) {
81 __ sub(out, value, Operand(1),
SetCC);
84 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
85 __ EmitEagerDeoptIf(
vs, DeoptimizeReason::kOverflow,
this);
97 set_temporaries_needed(1);
106 int32_t char_code = constant->value() & 0xFFFF;
108 __ LoadSingleCharacterString(result_string, char_code);
111 __ AllocateTwoByteString(register_snapshot(), result_string, 1);
112 __ Move(scratch, char_code);
117 __ StringFromCharCode(register_snapshot(),
nullptr, result_string,
160 __ UncheckedSmiTagInt32(length);
177 set_temporaries_needed((
value().get_scalar() == 0) ? 1 : 0);
178 set_double_temporaries_needed(
value().is_nan() ? 0 : 1);
182 Label* fail =
__ GetDeoptLabel(
this, deoptimize_reason());
186 if (
value().is_nan()) {
187 __ JumpIfNotNan(target, fail);
189 __ Move(double_scratch,
value());
190 __ CompareFloat64AndJumpIf(double_scratch, target,
kNotEqual, fail, fail);
191 if (
value().get_scalar() == 0) {
193 __ VmovHigh(scratch, target);
200void Int32AddWithOverflow::SetValueLocationConstraints() {
206void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm,
207 const ProcessingState& state) {
211 __ add(out, left, right,
SetCC);
215 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
216 __ EmitEagerDeoptIf(
vs, DeoptimizeReason::kOverflow,
this);
219void Int32SubtractWithOverflow::SetValueLocationConstraints() {
224void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm,
225 const ProcessingState& state) {
229 __ sub(out, left, right,
SetCC);
233 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
234 __ EmitEagerDeoptIf(
vs, DeoptimizeReason::kOverflow,
this);
237void Int32MultiplyWithOverflow::SetValueLocationConstraints() {
242void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm,
243 const ProcessingState& state) {
250 MaglevAssembler::TemporaryRegisterScope temps(masm);
251 bool out_alias_input = out == left || out == right;
253 if (out_alias_input) {
254 res_low = temps.AcquireScratch();
256 Register res_high = temps.AcquireScratch();
257 __ smull(res_low, res_high, left, right);
261 __ cmp(res_high, Operand(res_low,
ASR, 31));
262 __ EmitEagerDeoptIf(
ne, DeoptimizeReason::kOverflow,
this);
266 __ tst(res_low, res_low);
269 __ orr(temp, left, right,
SetCC);
272 __ EmitEagerDeoptIf(
mi, DeoptimizeReason::kOverflow,
this);
275 if (out_alias_input) {
276 __ Move(out, res_low);
280void Int32DivideWithOverflow::SetValueLocationConstraints() {
285void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
286 const ProcessingState& state) {
298 __ cmp(right, Operand(0));
299 ZoneLabelRef done(masm);
302 [](MaglevAssembler* masm, ZoneLabelRef done, Register left,
303 Register right, Int32DivideWithOverflow* node) {
310 Label* deopt =
__ GetDeoptLabel(node, DeoptimizeReason::kNotInt32);
314 __ JumpIf(
eq, deopt);
318 __ JumpIf(
eq, deopt);
323 __ JumpIf(
ne, *done);
324 __ cmp(right, Operand(-1));
325 __ JumpIf(
ne, *done);
326 __ JumpToDeopt(deopt);
328 done, left, right,
this);
332 MaglevAssembler::TemporaryRegisterScope temps(masm);
333 bool out_alias_input = out == left || out == right;
335 if (out_alias_input) {
336 res = temps.AcquireScratch();
339 CpuFeatureScope scope(masm, SUDIV);
340 __ sdiv(res, left, right);
342 UseScratchRegisterScope temps(masm);
343 LowDwVfpRegister double_right = temps.AcquireLowD();
344 SwVfpRegister tmp = double_right.low();
345 DwVfpRegister double_left = temps.AcquireD();
346 DwVfpRegister double_res = double_left;
348 __ vcvt_f64_s32(double_left, tmp);
350 __ vcvt_f64_s32(double_right, tmp);
351 __ vdiv(double_res, double_left, double_right);
352 __ vcvt_s32_f64(tmp, double_res);
357 Register temp = temps.AcquireScratch();
358 __ mul(temp, res, right);
360 __ EmitEagerDeoptIf(
ne, DeoptimizeReason::kNotInt32,
this);
366void Uint32Mod(MaglevAssembler* masm, Register out, Register left,
368 MaglevAssembler::TemporaryRegisterScope temps(masm);
369 Register res = temps.AcquireScratch();
371 CpuFeatureScope scope(masm, SUDIV);
372 __ udiv(res, left, right);
374 UseScratchRegisterScope temps(masm);
375 LowDwVfpRegister double_right = temps.AcquireLowD();
376 SwVfpRegister tmp = double_right.low();
377 DwVfpRegister double_left = temps.AcquireD();
378 DwVfpRegister double_res = double_left;
380 __ vcvt_f64_s32(double_left, tmp);
382 __ vcvt_f64_s32(double_right, tmp);
383 __ vdiv(double_res, double_left, double_right);
384 __ vcvt_s32_f64(tmp, double_res);
388 __ mls(out, res, right, left);
390 __ mul(res, res, right);
391 __ sub(out, left, res);
396void Int32ModulusWithOverflow::SetValueLocationConstraints() {
401void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm,
402 const ProcessingState& state) {
428 DeoptimizeReason::kDivisionByZero;
436 __ EmitEagerDeoptIf(
mi, deopt_reason,
this);
443 ZoneLabelRef done(masm);
444 ZoneLabelRef rhs_checked(masm);
445 __ cmp(rhs, Operand(0));
448 [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs,
449 Int32ModulusWithOverflow* node) {
450 __ rsb(rhs, rhs, Operand(0),
SetCC);
451 __ b(
ne, *rhs_checked);
452 __ EmitEagerDeopt(node, deopt_reason);
454 rhs_checked, rhs,
this);
455 __ bind(*rhs_checked);
457 __ cmp(lhs, Operand(0));
460 [](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs,
461 Register out, Int32ModulusWithOverflow* node) {
462 __ rsb(lhs, lhs, Operand(0));
463 Uint32Mod(masm, out, lhs, rhs);
464 __ rsb(out, out, Operand(0),
SetCC);
468 __ EmitEagerDeopt(node, deopt_reason);
470 done, lhs, rhs, out,
this);
472 Label rhs_not_power_of_2;
474 MaglevAssembler::TemporaryRegisterScope temps(masm);
476 __ add(
mask, rhs, Operand(-1));
478 __ JumpIf(
ne, &rhs_not_power_of_2);
486 __ bind(&rhs_not_power_of_2);
487 Uint32Mod(masm, out, lhs, rhs);
491#define DEF_BITWISE_BINOP(Instruction, opcode) \
492 void Instruction::SetValueLocationConstraints() { \
493 UseRegister(left_input()); \
494 UseRegister(right_input()); \
495 DefineAsRegister(this); \
498 void Instruction::GenerateCode(MaglevAssembler* masm, \
499 const ProcessingState& state) { \
500 Register left = ToRegister(left_input()); \
501 Register right = ToRegister(right_input()); \
502 Register out = ToRegister(result()); \
503 __ opcode(out, left, right); \
508#undef DEF_BITWISE_BINOP
510#define DEF_SHIFT_BINOP(Instruction, opcode) \
511 void Instruction::SetValueLocationConstraints() { \
512 UseRegister(left_input()); \
513 if (right_input().node()->Is<Int32Constant>()) { \
514 UseAny(right_input()); \
516 UseRegister(right_input()); \
518 DefineAsRegister(this); \
520 void Instruction::GenerateCode(MaglevAssembler* masm, \
521 const ProcessingState& state) { \
522 Register left = ToRegister(left_input()); \
523 Register out = ToRegister(result()); \
524 if (Int32Constant* constant = \
525 right_input().node()->TryCast<Int32Constant>()) { \
526 uint32_t shift = constant->value() & 31; \
531 __ Move(out, left); \
533 __ opcode(out, left, Operand(shift)); \
536 MaglevAssembler::TemporaryRegisterScope temps(masm); \
537 Register scratch = temps.AcquireScratch(); \
538 Register right = ToRegister(right_input()); \
539 __ and_(scratch, right, Operand(31)); \
540 __ opcode(out, left, Operand(scratch)); \
546#undef DEF_SHIFT_BINOP
560void Float64Add::SetValueLocationConstraints() {
566void Float64Add::GenerateCode(MaglevAssembler* masm,
567 const ProcessingState& state) {
571 __ vadd(out, left, right);
574void Float64Subtract::SetValueLocationConstraints() {
580void Float64Subtract::GenerateCode(MaglevAssembler* masm,
581 const ProcessingState& state) {
585 __ vsub(out, left, right);
588void Float64Multiply::SetValueLocationConstraints() {
594void Float64Multiply::GenerateCode(MaglevAssembler* masm,
595 const ProcessingState& state) {
599 __ vmul(out, left, right);
602void Float64Divide::SetValueLocationConstraints() {
608void Float64Divide::GenerateCode(MaglevAssembler* masm,
609 const ProcessingState& state) {
613 __ vdiv(out, left, right);
616int Float64Modulus::MaxCallStackArgs()
const {
return 0; }
617void Float64Modulus::SetValueLocationConstraints() {
622void Float64Modulus::GenerateCode(MaglevAssembler* masm,
623 const ProcessingState& state) {
625 __ PrepareCallCFunction(0, 2);
628 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
664 __ vsub(temp, temp, out);
666 __ Move(half_one, 0.5);
667 __ VFPCompareAndSetFlags(temp, half_one);
671 __ vadd(out, out, half_one);
672 __ vadd(out, out, half_one);
681int Float64Exponentiate::MaxCallStackArgs()
const {
return 0; }
682void Float64Exponentiate::SetValueLocationConstraints() {
687void Float64Exponentiate::GenerateCode(MaglevAssembler* masm,
688 const ProcessingState& state) {
693 __ PrepareCallCFunction(0, 2);
694 __ MovToFloatParameters(left, right);
695 __ CallCFunction(ExternalReference::ieee754_pow_function(), 0, 2);
696 __ MovFromFloatResult(out);
709 __ PrepareCallCFunction(0, 1);
710 __ MovToFloatParameter(value);
712 __ MovFromFloatResult(out);
724 __ AssertObjectType(
object, JS_TYPED_ARRAY_TYPE,
725 AbortReason::kUnexpectedValue);
727 __ LoadBoundedSizeFromObject(result_register,
object,
728 JSTypedArray::kRawByteLengthOffset);
730 if (shift_size > 0) {
732 DCHECK(shift_size == 1 || shift_size == 2 || shift_size == 3);
733 __ lsr(result_register, result_register,
Operand(shift_size));
741 set_temporaries_needed(1);
749 __ AssertObjectType(
object, JS_DATA_VIEW_TYPE,
750 AbortReason::kUnexpectedValue);
754 Register byte_length = temps.Acquire();
755 __ LoadBoundedSizeFromObject(byte_length,
object,
756 JSDataView::kRawByteLengthOffset);
759 if (element_size > 1) {
760 __ sub(byte_length, byte_length,
Operand(element_size - 1),
SetCC);
761 __ EmitEagerDeoptIf(
mi, DeoptimizeReason::kOutOfBounds,
this);
763 __ cmp(index, byte_length);
764 __ EmitEagerDeoptIf(
hs, DeoptimizeReason::kOutOfBounds,
this);
780enum class ReduceInterruptBudgetType { kLoop, kReturn };
782void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
783 Node* node, ReduceInterruptBudgetType type,
787 if (type == ReduceInterruptBudgetType::kLoop) {
792 Register stack_limit = scratch0;
794 __ cmp(sp, stack_limit);
802 SaveRegisterStateForCall save_register_state(masm,
803 node->register_snapshot());
804 Register function = scratch0;
810 __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, 1);
811 save_register_state.DefineSafepointWithLazyDeopt(node->lazy_deopt_info());
819 SaveRegisterStateForCall save_register_state(masm,
820 node->register_snapshot());
828 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Maglev, 1);
829 save_register_state.DefineSafepoint();
834void GenerateReduceInterruptBudget(MaglevAssembler* masm, Node* node,
835 Register feedback_cell,
836 ReduceInterruptBudgetType type,
int amount) {
837 MaglevAssembler::TemporaryRegisterScope temps(masm);
841 __ sub(budget, budget, Operand(amount),
SetCC);
844 ZoneLabelRef done(masm);
845 __ JumpToDeferredIf(
lt, HandleInterruptsAndTiering, done, node, type, budget);
854 set_temporaries_needed(1);
859 ReduceInterruptBudgetType::kLoop,
amount());
865 set_temporaries_needed(1);
870 ReduceInterruptBudgetType::kReturn,
amount());
884 int formal_params_size =
895 __ ldr(actual_params_size,
899 __ LeaveFrame(StackFrame::MAGLEV);
903 Label corrected_args_count;
904 __ Move(params_size, formal_params_size);
905 __ cmp(params_size, actual_params_size);
907 __ Move(params_size, actual_params_size);
908 __ bind(&corrected_args_count);
911 __ DropArguments(params_size);
int GetStackParameterCount() const
static bool IsSupported(CpuFeature f)
static constexpr int kArgCOffset
static constexpr int kFunctionOffset
static const int32_t kMaxOneByteCharCode
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void SetValueLocationConstraints()
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
ExternalArrayType element_type_
void SetValueLocationConstraints()
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ExternalReference ieee_function_ref() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
Input & allocation_block_input()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ElementsKind elements_kind_
DoubleRegister AcquireDouble()
Register AcquireScratch()
DoubleRegister AcquireScratchDouble()
MaglevCompilationInfo * compilation_info() const
MaglevCompilationUnit * toplevel_compilation_unit() const
uint16_t parameter_count() const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void SetValueLocationConstraints()
int MaxCallStackArgs() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
int formal_parameter_count() const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ZoneVector< RpoNumber > & result
#define DEF_SHIFT_BINOP(Instruction, opcode)
#define DEF_BITWISE_BINOP(Instruction, opcode)
#define DCHECK_REGLIST_EMPTY(...)
int ExternalArrayElementSize(const ExternalArrayType element_type)
void DefineAsRegister(Node *node)
DoubleRegister ToDoubleRegister(const compiler::InstructionOperand &operand)
void DefineSameAsFirst(Node *node)
Register ToRegister(const compiler::InstructionOperand &operand)
void UseAndClobberRegister(Input &input)
void UseAny(Input &input)
void UseRegister(Input &input)
void UseFixed(Input &input, Register reg)
bool TryCast(Tagged< From > value, Tagged< To > *out)
bool Is(IndirectHandle< U > value)
DwVfpRegister DoubleRegister
RegListBase< Register > RegList
MemOperand FieldMemOperand(Register object, int offset)
constexpr int ElementsKindToShiftSize(ElementsKind elements_kind)
constexpr Register kReturnRegister0
constexpr Register kContextRegister
V8_EXPORT_PRIVATE FlagValues v8_flags
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define OFFSET_OF_DATA_START(Type)