21void Int32NegateWithOverflow::SetValueLocationConstraints() {
26void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm,
27 const ProcessingState& state) {
32 __ CmpS32(value, Operand(0));
33 __ EmitEagerDeoptIf(
eq, DeoptimizeReason::kOverflow,
this);
40 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
41 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow,
this);
45 const ProcessingState& state) {
50 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
51 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow,
this);
55void Int32IncrementWithOverflow::SetValueLocationConstraints() {
60void Int32IncrementWithOverflow::GenerateCode(MaglevAssembler* masm,
61 const ProcessingState& state) {
64 __ AddS32(out, value, Operand(1));
69 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
70 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow,
this);
73void Int32DecrementWithOverflow::SetValueLocationConstraints() {
78void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm,
79 const ProcessingState& state) {
82 __ AddS32(out, value, Operand(-1));
87 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
88 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow,
this);
100 set_temporaries_needed(1);
104 const ProcessingState& state) {
105 MaglevAssembler::TemporaryRegisterScope temps(masm);
106 Register scratch = temps.AcquireScratch();
109 int32_t char_code = constant->value() & 0xFFFF;
111 __ LoadSingleCharacterString(result_string, char_code);
115 bool reallocate_result = (scratch == result_string);
116 if (reallocate_result) {
117 result_string = temps.AcquireScratch();
119 DCHECK(scratch != result_string);
120 __ AllocateTwoByteString(register_snapshot(), result_string, 1);
121 __ Move(scratch, char_code);
125 if (reallocate_result) {
130 __ StringFromCharCode(register_snapshot(),
nullptr, result_string,
146 const ProcessingState& state) {
156 const ProcessingState& state) {
159 __ SubS64(argc, Operand(1));
165 const ProcessingState& state) {
173 __ UncheckedSmiTagInt32(length);
184 const ProcessingState& state) {
186 Label* deopt =
__ GetDeoptLabel(
this, DeoptimizeReason::kNotInt32);
188 __ CmpS64(input_reg, Operand(std::numeric_limits<int32_t>::max()));
190 __ CmpS64(input_reg, Operand(std::numeric_limits<int32_t>::min()));
196 set_temporaries_needed((
value().get_scalar() == 0) ? 1 : 0);
197 set_double_temporaries_needed(
value().is_nan() ? 0 : 1);
200 const ProcessingState& state) {
201 Label* fail =
__ GetDeoptLabel(
this, deoptimize_reason());
202 MaglevAssembler::TemporaryRegisterScope temps(masm);
205 if (
value().is_nan()) {
206 __ JumpIfNotNan(target, fail);
208 __ Move(double_scratch,
value());
209 __ CompareFloat64AndJumpIf(double_scratch, target,
kNotEqual, fail, fail);
210 if (
value().get_scalar() == 0) {
211 Register scratch = temps.AcquireScratch();
212 __ MovDoubleToInt64(scratch, target);
213 __ CmpU64(scratch, Operand(0));
219void Int32AddWithOverflow::SetValueLocationConstraints() {
225void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm,
226 const ProcessingState& state) {
230 __ AddS32(out, left, right);
231 __ LoadS32(out, out);
235 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
236 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow,
this);
239void Int32SubtractWithOverflow::SetValueLocationConstraints() {
244void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm,
245 const ProcessingState& state) {
249 __ SubS32(out, left, right);
250 __ LoadS32(out, out);
254 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
255 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow,
this);
258void Int32MultiplyWithOverflow::SetValueLocationConstraints() {
262 set_temporaries_needed(1);
264void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm,
265 const ProcessingState& state) {
272 MaglevAssembler::TemporaryRegisterScope temps(masm);
273 Register temp = temps.AcquireScratch();
278 __ lgfr(temp, right);
281 __ Or(temp, left, right);
282 __ MulS32(out, left, right);
283 __ LoadS32(out, out);
290 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
291 __ EmitEagerDeoptIf(cond, DeoptimizeReason::kOverflow,
this);
300 __ EmitEagerDeoptIf(
lt, DeoptimizeReason::kOverflow,
this);
305void Int32DivideWithOverflow::SetValueLocationConstraints() {
310void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
311 const ProcessingState& state) {
323 __ CmpS32(right, Operand(0));
324 ZoneLabelRef done(masm);
327 [](MaglevAssembler* masm, ZoneLabelRef done, Register left,
328 Register right, Int32DivideWithOverflow* node) {
335 Label* deopt =
__ GetDeoptLabel(node, DeoptimizeReason::kNotInt32);
339 __ JumpIf(
eq, deopt);
343 __ JumpIf(
eq, deopt);
348 __ JumpIf(
ne, *done);
349 __ CmpS32(right, Operand(-1));
350 __ JumpIf(
ne, *done);
351 __ JumpToDeopt(deopt);
353 done, left, right,
this);
357 __ DivS32(out, left, right);
358 __ LoadS32(out, out);
362 __ EmitEagerDeoptIf(
ne, DeoptimizeReason::kNotInt32,
this);
365void Int32ModulusWithOverflow::SetValueLocationConstraints() {
370void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm,
371 const ProcessingState& state) {
397 DeoptimizeReason::kDivisionByZero;
405 __ EmitEagerDeoptIf(
lt, deopt_reason,
this);
412 ZoneLabelRef done(masm);
413 ZoneLabelRef rhs_checked(masm);
414 __ CmpS32(rhs, Operand(0));
417 [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs,
418 Int32ModulusWithOverflow* node) {
420 __ bne(*rhs_checked);
421 __ EmitEagerDeopt(node, deopt_reason);
423 rhs_checked, rhs,
this);
424 __ bind(*rhs_checked);
426 __ CmpS32(lhs, Operand(0));
429 [](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs,
430 Register out, Int32ModulusWithOverflow* node) {
432 __ ModU32(out, lhs, rhs);
437 __ EmitEagerDeopt(node, deopt_reason);
439 done, lhs, rhs, out,
this);
441 Label rhs_not_power_of_2;
442 MaglevAssembler::TemporaryRegisterScope temps(masm);
444 __ AddS32(
mask, rhs, Operand(-1));
446 __ JumpIf(
ne, &rhs_not_power_of_2);
452 temps.IncludeScratch(
mask);
454 __ bind(&rhs_not_power_of_2);
455 __ ModU32(out, lhs, rhs);
457 __ LoadS32(out, out);
460#define DEF_BITWISE_BINOP(Instruction, opcode) \
461 void Instruction::SetValueLocationConstraints() { \
462 UseRegister(left_input()); \
463 UseRegister(right_input()); \
464 DefineAsRegister(this); \
467 void Instruction::GenerateCode(MaglevAssembler* masm, \
468 const ProcessingState& state) { \
469 Register left = ToRegister(left_input()); \
470 Register right = ToRegister(right_input()); \
471 Register out = ToRegister(result()); \
472 __ opcode(out, left, right); \
473 __ LoadS32(out, out); \
478#undef DEF_BITWISE_BINOP
480#define DEF_SHIFT_BINOP(Instruction, opcode) \
481 void Instruction::SetValueLocationConstraints() { \
482 UseRegister(left_input()); \
483 if (right_input().node()->Is<Int32Constant>()) { \
484 UseAny(right_input()); \
486 UseRegister(right_input()); \
488 DefineAsRegister(this); \
490 void Instruction::GenerateCode(MaglevAssembler* masm, \
491 const ProcessingState& state) { \
492 Register left = ToRegister(left_input()); \
493 Register out = ToRegister(result()); \
494 if (Int32Constant* constant = \
495 right_input().node()->TryCast<Int32Constant>()) { \
496 uint32_t shift = constant->value() & 31; \
498 __ Move(out, left); \
501 __ opcode(out, left, Operand(shift)); \
502 __ LoadS32(out, out); \
504 MaglevAssembler::TemporaryRegisterScope temps(masm); \
505 Register scratch = temps.AcquireScratch(); \
506 Register right = ToRegister(right_input()); \
507 __ And(scratch, right, Operand(31)); \
508 __ opcode(out, left, scratch); \
509 __ LoadS32(out, out); \
515#undef DEF_SHIFT_BINOP
523 const ProcessingState& state) {
526 __ Not32(out, value);
527 __ LoadS32(out, out);
530void Float64Add::SetValueLocationConstraints() {
536void Float64Add::GenerateCode(MaglevAssembler* masm,
537 const ProcessingState& state) {
541 __ AddF64(out, left, right);
544void Float64Subtract::SetValueLocationConstraints() {
550void Float64Subtract::GenerateCode(MaglevAssembler* masm,
551 const ProcessingState& state) {
555 __ SubF64(out, left, right);
558void Float64Multiply::SetValueLocationConstraints() {
564void Float64Multiply::GenerateCode(MaglevAssembler* masm,
565 const ProcessingState& state) {
569 __ MulF64(out, left, right);
572void Float64Divide::SetValueLocationConstraints() {
578void Float64Divide::GenerateCode(MaglevAssembler* masm,
579 const ProcessingState& state) {
583 __ DivF64(out, left, right);
586void Float64Modulus::SetValueLocationConstraints() {
591void Float64Modulus::GenerateCode(MaglevAssembler* masm,
592 const ProcessingState& state) {
594 __ Push(r2, r3, r4, r5);
595 __ PrepareCallCFunction(0, 2);
596 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
597 __ Pop(r2, r3, r4, r5);
605 const ProcessingState& state) {
608 __ lcdbr(out, value);
612 const ProcessingState& state) {
619 const ProcessingState& state) {
623 MaglevAssembler::TemporaryRegisterScope temps(masm);
627 __ NearestIntF64(out, in);
628 __ SubF64(temp, temp, out);
630 __ CmpF64(temp, temp2);
633 __ AddF64(out, out, temp2);
634 __ AddF64(out, out, temp2);
639 __ FloorF64(out, in);
643int Float64Exponentiate::MaxCallStackArgs()
const {
return 0; }
644void Float64Exponentiate::SetValueLocationConstraints() {
649void Float64Exponentiate::GenerateCode(MaglevAssembler* masm,
650 const ProcessingState& state) {
652 __ Push(r2, r3, r4, r5);
653 __ PrepareCallCFunction(0, 2);
654 __ CallCFunction(ExternalReference::ieee754_pow_function(), 0, 2);
655 __ Pop(r2, r3, r4, r5);
664 const ProcessingState& state) {
666 __ Push(r2, r3, r4, r5);
667 __ PrepareCallCFunction(0, 1);
669 __ Pop(r2, r3, r4, r5);
678 const ProcessingState& state) {
682 __ AssertObjectType(
object, JS_TYPED_ARRAY_TYPE,
683 AbortReason::kUnexpectedValue);
686 __ LoadBoundedSizeFromObject(result_register,
object,
687 JSTypedArray::kRawByteLengthOffset);
689 if (shift_size > 0) {
691 DCHECK(shift_size == 1 || shift_size == 2 || shift_size == 3);
692 __ ShiftRightU64(result_register, result_register, Operand(shift_size));
700 set_temporaries_needed(1);
703 const ProcessingState& state) {
705 MaglevAssembler::TemporaryRegisterScope temps(masm);
709 __ AssertObjectType(
object, JS_DATA_VIEW_TYPE,
710 AbortReason::kUnexpectedValue);
714 Register byte_length = temps.AcquireScratch();
715 __ LoadBoundedSizeFromObject(byte_length,
object,
716 JSDataView::kRawByteLengthOffset);
719 if (element_size > 1) {
720 __ SubS64(byte_length, Operand(element_size - 1));
721 __ EmitEagerDeoptIf(
lt, DeoptimizeReason::kOutOfBounds,
this);
723 __ CmpS32(index, byte_length);
724 __ EmitEagerDeoptIf(
ge, DeoptimizeReason::kOutOfBounds,
this);
732 const ProcessingState& state) {
742enum class ReduceInterruptBudgetType {
kLoop, kReturn };
744void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
745 Node* node, ReduceInterruptBudgetType type,
749 if (type == ReduceInterruptBudgetType::kLoop) {
756 __ CmpU64(sp, stack_limit);
764 SaveRegisterStateForCall save_register_state(masm,
765 node->register_snapshot());
773 __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, 1);
774 save_register_state.DefineSafepointWithLazyDeopt(node->lazy_deopt_info());
782 SaveRegisterStateForCall save_register_state(masm,
783 node->register_snapshot());
792 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Maglev, 1);
793 save_register_state.DefineSafepoint();
798void GenerateReduceInterruptBudget(MaglevAssembler* masm, Node* node,
799 Register feedback_cell,
800 ReduceInterruptBudgetType type,
int amount) {
801 MaglevAssembler::TemporaryRegisterScope temps(masm);
802 Register budget = temps.AcquireScratch();
804 FeedbackCell::kInterruptBudgetOffset));
805 __ SubS32(budget, Operand(amount));
807 FeedbackCell::kInterruptBudgetOffset));
808 ZoneLabelRef done(masm);
809 __ JumpToDeferredIf(
lt, HandleInterruptsAndTiering, done, node, type, budget);
820 const ProcessingState& state) {
822 ReduceInterruptBudgetType::kLoop,
amount());
828 set_temporaries_needed(1);
831 MaglevAssembler* masm,
const ProcessingState& state) {
833 ReduceInterruptBudgetType::kReturn,
amount());
847 int formal_params_size =
848 masm->compilation_info()->toplevel_compilation_unit()->parameter_count();
857 __ LoadU64(actual_params_size,
861 __ LeaveFrame(StackFrame::MAGLEV);
865 Label drop_dynamic_arg_size;
866 __ CmpS32(actual_params_size, Operand(formal_params_size));
867 __ bgt(&drop_dynamic_arg_size);
868 __ mov(actual_params_size, Operand(formal_params_size));
869 __ bind(&drop_dynamic_arg_size);
872 __ DropArguments(actual_params_size);
int GetStackParameterCount() const
static bool IsSupported(CpuFeature f)
static V8_INLINE Operand Zero()
static constexpr int kArgCOffset
static constexpr int kFunctionOffset
static const int32_t kMaxOneByteCharCode
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void SetValueLocationConstraints()
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
ExternalArrayType element_type_
void SetValueLocationConstraints()
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ExternalReference ieee_function_ref() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
Input & allocation_block_input()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ElementsKind elements_kind_
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void SetValueLocationConstraints()
int MaxCallStackArgs() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
int formal_parameter_count() const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ZoneVector< RpoNumber > & result
#define DEF_SHIFT_BINOP(Instruction, opcode)
#define DEF_BITWISE_BINOP(Instruction, opcode)
#define DCHECK_REGLIST_EMPTY(...)
int ExternalArrayElementSize(const ExternalArrayType element_type)
void DefineAsRegister(Node *node)
DoubleRegister ToDoubleRegister(const compiler::InstructionOperand &operand)
void DefineSameAsFirst(Node *node)
Register ToRegister(const compiler::InstructionOperand &operand)
void UseAndClobberRegister(Input &input)
void UseAny(Input &input)
void UseRegister(Input &input)
void UseFixed(Input &input, Register reg)
void Or(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
bool TryCast(Tagged< From > value, Tagged< To > *out)
bool Is(IndirectHandle< U > value)
DwVfpRegister DoubleRegister
RegListBase< Register > RegList
MemOperand FieldMemOperand(Register object, int offset)
constexpr int ElementsKindToShiftSize(ElementsKind elements_kind)
constexpr Register kReturnRegister0
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define OFFSET_OF_DATA_START(Type)