39 const ProcessingState& state) {
49 const ProcessingState& state) {
58 const ProcessingState& state) {
66 __ UncheckedSmiTagInt32(length);
74 const ProcessingState& state) {
78 __ AssertNotSmi(
object);
82 __ LoadBoundedSizeFromObject(result_register,
object,
83 JSTypedArray::kRawByteLengthOffset);
87 DCHECK(shift_size == 1 || shift_size == 2 || shift_size == 3);
88 __ shrq(result_register, Immediate(shift_size));
97 const ProcessingState& state) {
102 __ AssertNotSmi(
object);
108 __ LoadBoundedSizeFromObject(byte_length,
object,
109 JSDataView::kRawByteLengthOffset);
112 if (element_size > 1) {
113 __ subq(byte_length, Immediate(element_size - 1));
114 __ EmitEagerDeoptIf(
negative, DeoptimizeReason::kOutOfBounds,
this);
116 __ cmpl(index, byte_length);
117 __ EmitEagerDeoptIf(
above_equal, DeoptimizeReason::kOutOfBounds,
this);
130 const ProcessingState& state) {
136 __ EmitEagerDeoptIf(
not_equal, DeoptimizeReason::kNotInt32,
this);
143 const ProcessingState& state) {
144 Label* fail =
__ GetDeoptLabel(
this, deoptimize_reason());
145 MaglevAssembler::TemporaryRegisterScope temps(masm);
148 if (
value().is_nan()) {
149 __ JumpIfNotNan(target, fail);
151 __ Move(double_scratch,
value());
152 __ CompareFloat64AndJumpIf(double_scratch, target,
kNotEqual, fail, fail);
153 if (
value().get_scalar() == 0) {
154 Register scratch = temps.AcquireScratch();
155 __ movq(scratch, target);
156 __ testq(scratch, scratch);
170 set_temporaries_needed(1);
175 const ProcessingState& state) {
178 int32_t char_code = constant->value() & 0xFFFF;
180 __ LoadSingleCharacterString(result_string, char_code);
182 __ AllocateTwoByteString(register_snapshot(), result_string, 1);
185 Immediate(char_code));
188 MaglevAssembler::TemporaryRegisterScope temps(masm);
191 __ StringFromCharCode(register_snapshot(),
nullptr, result_string,
197void Int32AddWithOverflow::SetValueLocationConstraints() {
199 if (TryGetInt32ConstantInput(kRightIndex)) {
207void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm,
208 const ProcessingState& state) {
210 if (!right_input().operand().IsRegister()) {
211 auto right_const = TryGetInt32ConstantInput(kRightIndex);
213 __ addl(left, Immediate(*right_const));
216 __ addl(left, right);
221 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
222 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow,
this);
225void Int32SubtractWithOverflow::SetValueLocationConstraints() {
227 if (TryGetInt32ConstantInput(kRightIndex)) {
235void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm,
236 const ProcessingState& state) {
238 if (!right_input().operand().IsRegister()) {
239 auto right_const = TryGetInt32ConstantInput(kRightIndex);
241 __ subl(left, Immediate(*right_const));
244 __ subl(left, right);
249 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
250 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow,
this);
253void Int32MultiplyWithOverflow::SetValueLocationConstraints() {
257 set_temporaries_needed(1);
260void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm,
261 const ProcessingState& state) {
266 MaglevAssembler::TemporaryRegisterScope temps(masm);
267 Register saved_left = temps.Acquire();
274 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
275 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow,
this);
282 __ orl(saved_left, right);
283 __ cmpl(saved_left, Immediate(0));
288 __ EmitEagerDeoptIf(
less, DeoptimizeReason::kOverflow,
this);
293void Int32ModulusWithOverflow::SetValueLocationConstraints() {
298 RequireSpecificTemporary(rax);
299 RequireSpecificTemporary(rdx);
302void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm,
303 const ProcessingState& state) {
328 DeoptimizeReason::kDivisionByZero;
336 __ EmitEagerDeoptIf(
negative, deopt_reason,
this);
343 ZoneLabelRef done(masm);
344 ZoneLabelRef rhs_checked(masm);
346 __ cmpl(rhs, Immediate(0));
349 [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs,
350 Int32ModulusWithOverflow* node) {
353 __ EmitEagerDeopt(node, deopt_reason);
355 rhs_checked, rhs,
this);
356 __ bind(*rhs_checked);
358 __ cmpl(lhs, Immediate(0));
361 [](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs,
362 Int32ModulusWithOverflow* node) {
373 __ EmitEagerDeopt(node, deopt_reason);
375 done, lhs, rhs,
this);
377 Label rhs_not_power_of_2;
379 __ leal(
mask, Operand(rhs, -1));
388 __ bind(&rhs_not_power_of_2);
400void Int32DivideWithOverflow::SetValueLocationConstraints() {
405 RequireSpecificTemporary(rax);
406 RequireSpecificTemporary(rdx);
409void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
410 const ProcessingState& state) {
425 __ cmpl(right, Immediate(0));
426 ZoneLabelRef done(masm);
429 [](MaglevAssembler* masm, ZoneLabelRef done, Register right,
430 Int32DivideWithOverflow* node) {
439 __ EmitEagerDeoptIf(
equal, DeoptimizeReason::kNotInt32, node);
443 __ cmpl(rax, Immediate(0));
445 __ EmitEagerDeoptIf(
equal, DeoptimizeReason::kNotInt32, node);
451 __ cmpl(right, Immediate(-1));
455 __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
464 __ cmpl(rdx, Immediate(0));
468 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
469 __ EmitEagerDeoptIf(
not_equal, DeoptimizeReason::kNotInt32,
this);
473#define DEF_BITWISE_BINOP(Instruction, opcode) \
474 void Instruction::SetValueLocationConstraints() { \
475 UseRegister(left_input()); \
476 if (TryGetInt32ConstantInput(kRightIndex)) { \
477 UseAny(right_input()); \
479 UseRegister(right_input()); \
481 DefineSameAsFirst(this); \
484 void Instruction::GenerateCode(MaglevAssembler* masm, \
485 const ProcessingState& state) { \
486 Register left = ToRegister(left_input()); \
487 if (!right_input().operand().IsRegister()) { \
488 auto right_const = TryGetInt32ConstantInput(kRightIndex); \
489 DCHECK(right_const); \
490 __ opcode(left, Immediate(*right_const)); \
492 Register right = ToRegister(right_input()); \
493 __ opcode(left, right); \
499#undef DEF_BITWISE_BINOP
501#define DEF_SHIFT_BINOP(Instruction, opcode) \
502 void Instruction::SetValueLocationConstraints() { \
503 UseRegister(left_input()); \
504 if (TryGetInt32ConstantInput(kRightIndex)) { \
505 UseAny(right_input()); \
507 UseFixed(right_input(), rcx); \
509 DefineSameAsFirst(this); \
512 void Instruction::GenerateCode(MaglevAssembler* masm, \
513 const ProcessingState& state) { \
514 Register left = ToRegister(left_input()); \
515 if (auto right_const = TryGetInt32ConstantInput(kRightIndex)) { \
516 DCHECK(right_const); \
517 int right = *right_const & 31; \
519 __ opcode(left, Immediate(right)); \
522 DCHECK_EQ(rcx, ToRegister(right_input())); \
523 __ opcode##_cl(left); \
529#undef DEF_SHIFT_BINOP
531void Int32IncrementWithOverflow::SetValueLocationConstraints() {
536void Int32IncrementWithOverflow::GenerateCode(MaglevAssembler* masm,
537 const ProcessingState& state) {
540 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow,
this);
543void Int32DecrementWithOverflow::SetValueLocationConstraints() {
548void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm,
549 const ProcessingState& state) {
552 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow,
this);
555void Int32NegateWithOverflow::SetValueLocationConstraints() {
560void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm,
561 const ProcessingState& state) {
564 __ testl(value, value);
565 __ EmitEagerDeoptIf(zero, DeoptimizeReason::kOverflow,
this);
568 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow,
this);
572 const ProcessingState& state) {
575 __ cmpl(value, Immediate(0));
578 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow,
this);
588 const ProcessingState& state) {
593void Float64Add::SetValueLocationConstraints() {
599void Float64Add::GenerateCode(MaglevAssembler* masm,
600 const ProcessingState& state) {
603 __ Addsd(left, right);
606void Float64Subtract::SetValueLocationConstraints() {
612void Float64Subtract::GenerateCode(MaglevAssembler* masm,
613 const ProcessingState& state) {
616 __ Subsd(left, right);
619void Float64Multiply::SetValueLocationConstraints() {
625void Float64Multiply::GenerateCode(MaglevAssembler* masm,
626 const ProcessingState& state) {
629 __ Mulsd(left, right);
632void Float64Divide::SetValueLocationConstraints() {
638void Float64Divide::GenerateCode(MaglevAssembler* masm,
639 const ProcessingState& state) {
642 __ Divsd(left, right);
645void Float64Modulus::SetValueLocationConstraints() {
648 RequireSpecificTemporary(rax);
652void Float64Modulus::GenerateCode(MaglevAssembler* masm,
653 const ProcessingState& state) {
657 Operand scratch_stack_space = Operand(rsp, 0);
659 __ fld_d(scratch_stack_space);
661 __ fld_d(scratch_stack_space);
671 CpuFeatureScope sahf_scope(masm, SAHF);
674 __ shrl(rax, Immediate(8));
675 __ andl(rax, Immediate(0xFF));
682 __ fstp_d(scratch_stack_space);
693 const ProcessingState& state) {
699 const ProcessingState& state) {
705 const ProcessingState& state) {
710 MaglevAssembler::TemporaryRegisterScope temps(masm);
733int Float64Exponentiate::MaxCallStackArgs()
const {
736void Float64Exponentiate::SetValueLocationConstraints() {
741void Float64Exponentiate::GenerateCode(MaglevAssembler* masm,
742 const ProcessingState& state) {
743 AllowExternalCallThatCantCauseGC scope(masm);
744 __ PrepareCallCFunction(2);
745 __ CallCFunction(ExternalReference::ieee754_pow_function(), 2);
756 const ProcessingState& state) {
757 AllowExternalCallThatCantCauseGC scope(masm);
758 __ PrepareCallCFunction(1);
767 const ProcessingState& state) {
777enum class ReduceInterruptBudgetType {
kLoop, kReturn };
779void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
780 Node* node, ReduceInterruptBudgetType type) {
783 if (type == ReduceInterruptBudgetType::kLoop) {
795 SaveRegisterStateForCall save_register_state(masm,
796 node->register_snapshot());
799 __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, 1);
800 save_register_state.DefineSafepointWithLazyDeopt(node->lazy_deopt_info());
809 SaveRegisterStateForCall save_register_state(masm,
810 node->register_snapshot());
814 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Maglev, 1);
815 save_register_state.DefineSafepoint();
820void GenerateReduceInterruptBudget(MaglevAssembler* masm, Node* node,
821 Register feedback_cell,
822 ReduceInterruptBudgetType type,
int amount) {
823 MaglevAssembler::TemporaryRegisterScope temps(masm);
824 __ subl(
FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
826 ZoneLabelRef done(masm);
827 __ JumpToDeferredIf(
less, HandleInterruptsAndTiering, done, node, type);
838 const ProcessingState& state) {
840 ReduceInterruptBudgetType::kLoop,
amount());
846 set_temporaries_needed(1);
849 MaglevAssembler* masm,
const ProcessingState& state) {
851 ReduceInterruptBudgetType::kReturn,
amount());
865 int formal_params_size =
866 masm->compilation_info()->toplevel_compilation_unit()->parameter_count();
875 __ movq(actual_params_size,
879 __ LeaveFrame(StackFrame::MAGLEV);
883 Label drop_dynamic_arg_size;
884 __ cmpq(actual_params_size, Immediate(formal_params_size));
890 __ bind(&drop_dynamic_arg_size);
892 __ DropArguments(actual_params_size, r9);
#define Assert(condition)
int GetStackParameterCount() const
static bool IsSupported(CpuFeature f)
static int ArgumentStackSlotsForCFunctionCall(int num_arguments)
static constexpr int kArgCOffset
static constexpr int kFunctionOffset
static const int32_t kMaxOneByteCharCode
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void SetValueLocationConstraints()
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ExternalArrayType element_type_
void SetValueLocationConstraints()
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ExternalReference ieee_function_ref() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
Input & allocation_block_input()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ElementsKind elements_kind_
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int MaxCallStackArgs() const
void SetValueLocationConstraints()
int MaxCallStackArgs() const
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
int formal_parameter_count() const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void SetValueLocationConstraints()
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ZoneVector< RpoNumber > & result
#define DEF_SHIFT_BINOP(Instruction, opcode)
#define DEF_BITWISE_BINOP(Instruction, opcode)
#define DCHECK_REGLIST_EMPTY(...)
int ExternalArrayElementSize(const ExternalArrayType element_type)
void DefineAsRegister(Node *node)
DoubleRegister ToDoubleRegister(const compiler::InstructionOperand &operand)
void DefineSameAsFirst(Node *node)
Register ToRegister(const compiler::InstructionOperand &operand)
void DefineAsFixed(Node *node, Register reg)
void UseAndClobberRegister(Input &input)
void UseAny(Input &input)
void UseRegister(Input &input)
void UseFixed(Input &input, Register reg)
constexpr VFPRoundingMode kRoundToNearest
bool TryCast(Tagged< From > value, Tagged< To > *out)
bool Is(IndirectHandle< U > value)
DwVfpRegister DoubleRegister
constexpr DoubleRegister kScratchDoubleReg
RegListBase< Register > RegList
Operand FieldOperand(Register object, int offset)
constexpr int kSystemPointerSize
constexpr int ElementsKindToShiftSize(ElementsKind elements_kind)
constexpr Register kReturnRegister0
constexpr Register kScratchRegister
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int kDoubleSize
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define OFFSET_OF_DATA_START(Type)