v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-ir-arm64.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/base/logging.h"
17
18namespace v8 {
19namespace internal {
20namespace maglev {
21
22#define __ masm->
23
24namespace {
25
26std::optional<int32_t> TryGetAddImmediateInt32ConstantInput(Node* node,
27 int index) {
28 if (auto res = node->TryGetInt32ConstantInput(index)) {
30 return res;
31 }
32 }
33 return {};
34}
35
36std::optional<int32_t> TryGetLogicalImmediateInt32ConstantInput(Node* node,
37 int index) {
38 if (auto res = node->TryGetInt32ConstantInput(index)) {
39 if (*res <= 0) {
40 return {};
41 }
42 unsigned u1, u2, u3;
43 if (MacroAssemblerBase::IsImmLogical(*res, 32, &u1, &u2, &u3)) {
44 return res;
45 }
46 }
47 return {};
48}
49
50} // namespace
51
52void Int32NegateWithOverflow::SetValueLocationConstraints() {
53 UseRegister(value_input());
54 DefineAsRegister(this);
55}
56
57void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm,
58 const ProcessingState& state) {
59 Register value = ToRegister(value_input()).W();
60 Register out = ToRegister(result()).W();
61
62 // Deopt when result would be -0.
63 static_assert(Int32NegateWithOverflow::kProperties.can_eager_deopt());
64 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kOverflow);
65 __ RecordComment("-- Jump to eager deopt");
66 __ Cbz(value, fail);
67
68 __ Negs(out, value);
69 // Output register must not be a register input into the eager deopt info.
71 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
72 __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
73}
74
75void Int32AbsWithOverflow::GenerateCode(MaglevAssembler* masm,
76 const ProcessingState& state) {
77 Register out = ToRegister(result()).W();
78 Label done;
79 DCHECK(ToRegister(input()).W().Aliases(out));
80 __ Cmp(out, Immediate(0));
81 __ JumpIf(ge, &done);
82 __ Negs(out, out);
83 // Output register must not be a register input into the eager deopt info.
85 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
86 __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
87 __ bind(&done);
88}
89
90void Int32IncrementWithOverflow::SetValueLocationConstraints() {
91 UseRegister(value_input());
92 DefineAsRegister(this);
93}
94
95void Int32IncrementWithOverflow::GenerateCode(MaglevAssembler* masm,
96 const ProcessingState& state) {
97 Register value = ToRegister(value_input()).W();
98 Register out = ToRegister(result()).W();
99 __ Adds(out, value, Immediate(1));
100 // Output register must not be a register input into the eager deopt info.
102 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
103 __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
104}
105
106void Int32DecrementWithOverflow::SetValueLocationConstraints() {
107 UseRegister(value_input());
108 DefineAsRegister(this);
109}
110
111void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm,
112 const ProcessingState& state) {
113 Register value = ToRegister(value_input()).W();
114 Register out = ToRegister(result()).W();
115 __ Subs(out, value, Immediate(1));
116 // Output register must not be a register input into the eager deopt info.
118 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
119 __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
120}
121
124}
126 if (code_input().node()->Is<Int32Constant>()) {
128 } else {
130 }
131 set_temporaries_needed(1);
132 DefineAsRegister(this);
133}
134void BuiltinStringFromCharCode::GenerateCode(MaglevAssembler* masm,
135 const ProcessingState& state) {
136 MaglevAssembler::TemporaryRegisterScope temps(masm);
137 Register scratch = temps.Acquire();
138 Register result_string = ToRegister(result());
139 if (Int32Constant* constant = code_input().node()->TryCast<Int32Constant>()) {
140 int32_t char_code = constant->value() & 0xFFFF;
141 if (0 <= char_code && char_code < String::kMaxOneByteCharCode) {
142 __ LoadSingleCharacterString(result_string, char_code);
143 } else {
144 __ AllocateTwoByteString(register_snapshot(), result_string, 1);
145 __ Move(scratch, char_code);
146 __ Strh(scratch.W(),
147 FieldMemOperand(result_string,
148 OFFSET_OF_DATA_START(SeqTwoByteString)));
149 }
150 } else {
151 __ StringFromCharCode(register_snapshot(), nullptr, result_string,
152 ToRegister(code_input()), scratch,
154 }
155}
156
159 if (offset() == 0) {
160 DefineSameAsFirst(this);
161 } else {
162 DefineAsRegister(this);
163 }
164}
165
166void InlinedAllocation::GenerateCode(MaglevAssembler* masm,
167 const ProcessingState& state) {
168 if (offset() != 0) {
170 offset());
171 }
172}
173
175
176void ArgumentsLength::GenerateCode(MaglevAssembler* masm,
177 const ProcessingState& state) {
178 Register argc = ToRegister(result());
180 __ Sub(argc, argc, 1); // Remove receiver.
181}
182
184
185void RestLength::GenerateCode(MaglevAssembler* masm,
186 const ProcessingState& state) {
187 Register length = ToRegister(result());
188 Label done;
190 __ Subs(length, length, formal_parameter_count() + 1);
191 __ B(kGreaterThanEqual, &done);
192 __ Move(length, 0);
193 __ Bind(&done);
194 __ UncheckedSmiTagInt32(length);
195}
196
197int CheckedObjectToIndex::MaxCallStackArgs() const { return 0; }
198
201 DefineSameAsFirst(this);
202}
203
204void CheckedIntPtrToInt32::GenerateCode(MaglevAssembler* masm,
205 const ProcessingState& state) {
206 Register input_reg = ToRegister(input());
207 __ CompareAndBranch(input_reg.X(),
208 Immediate(std::numeric_limits<int32_t>::max()), gt,
209 __ GetDeoptLabel(this, DeoptimizeReason::kNotInt32));
210 __ CompareAndBranch(input_reg.X(),
211 Immediate(std::numeric_limits<int32_t>::min()), lt,
212 __ GetDeoptLabel(this, DeoptimizeReason::kNotInt32));
213}
214
217 set_temporaries_needed((value().get_scalar() == 0) ? 1 : 0);
218 set_double_temporaries_needed(
219 value().is_nan() || (value().get_scalar() == 0) ? 0 : 1);
220}
221void CheckFloat64SameValue::GenerateCode(MaglevAssembler* masm,
222 const ProcessingState& state) {
223 Label* fail = __ GetDeoptLabel(this, deoptimize_reason());
224 MaglevAssembler::TemporaryRegisterScope temps(masm);
226 if (value().is_nan()) {
227 __ JumpIfNotNan(target, fail);
228 } else if (value().get_scalar() == 0) { // If value is +0.0 or -0.0.
229 Register scratch = temps.AcquireScratch();
230 __ Fcmp(target, value().get_scalar());
231 __ JumpIf(kNotEqual, fail);
232 __ Fmov(scratch, target);
233 if (value().get_bits() == 0) {
234 __ Tbnz(scratch, 63, fail);
235 } else {
236 __ Tbz(scratch, 63, fail);
237 }
238 } else {
239 DoubleRegister double_scratch = temps.AcquireScratchDouble();
240 __ Move(double_scratch, value());
241 __ CompareFloat64AndJumpIf(double_scratch, target, kNotEqual, fail, fail);
242 }
243}
244
245void Int32AddWithOverflow::SetValueLocationConstraints() {
246 UseRegister(left_input());
247 if (TryGetAddImmediateInt32ConstantInput(this, kRightIndex)) {
248 UseAny(right_input());
249 } else {
250 UseRegister(right_input());
251 }
252 DefineAsRegister(this);
253}
254
255void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm,
256 const ProcessingState& state) {
257 Register left = ToRegister(left_input()).W();
258 Register out = ToRegister(result()).W();
259 if (!right_input().operand().IsRegister()) {
260 auto right_const = TryGetInt32ConstantInput(kRightIndex);
261 DCHECK(right_const);
262 __ Adds(out, left, *right_const);
263 } else {
264 Register right = ToRegister(right_input()).W();
265 __ Adds(out, left, right);
266 }
267 // The output register shouldn't be a register input into the eager deopt
268 // info.
270 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
271 __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
272}
273
274void Int32SubtractWithOverflow::SetValueLocationConstraints() {
275 UseRegister(left_input());
276 if (TryGetAddImmediateInt32ConstantInput(this, kRightIndex)) {
277 UseAny(right_input());
278 } else {
279 UseRegister(right_input());
280 }
281 DefineAsRegister(this);
282}
283void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm,
284 const ProcessingState& state) {
285 Register left = ToRegister(left_input()).W();
286 Register out = ToRegister(result()).W();
287 if (!right_input().operand().IsRegister()) {
288 auto right_const = TryGetInt32ConstantInput(kRightIndex);
289 DCHECK(right_const);
290 __ Subs(out, left, *right_const);
291 } else {
292 Register right = ToRegister(right_input()).W();
293 __ Subs(out, left, right);
294 }
295 // The output register shouldn't be a register input into the eager deopt
296 // info.
298 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
299 __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
300}
301
302void Int32MultiplyWithOverflow::SetValueLocationConstraints() {
303 UseRegister(left_input());
304 UseRegister(right_input());
305 DefineAsRegister(this);
306}
307void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm,
308 const ProcessingState& state) {
309 Register left = ToRegister(left_input()).W();
310 Register right = ToRegister(right_input()).W();
311 Register out = ToRegister(result()).W();
312
313 // TODO(leszeks): peephole optimise multiplication by a constant.
314
315 MaglevAssembler::TemporaryRegisterScope temps(masm);
316 bool out_alias_input = out == left || out == right;
317 Register res = out.X();
318 if (out_alias_input) {
319 res = temps.AcquireScratch();
320 }
321
322 __ Smull(res, left, right);
323
324 // if res != (res[0:31] sign extended to 64 bits), then the multiplication
325 // result is too large for 32 bits.
326 __ Cmp(res, Operand(res.W(), SXTW));
327 __ EmitEagerDeoptIf(ne, DeoptimizeReason::kOverflow, this);
328
329 // If the result is zero, check if either lhs or rhs is negative.
330 Label end;
331 __ CompareAndBranch(res, Immediate(0), ne, &end);
332 {
333 MaglevAssembler::TemporaryRegisterScope temps(masm);
334 Register temp = temps.AcquireScratch().W();
335 __ Orr(temp, left, right);
336 // If one of them is negative, we must have a -0 result, which is non-int32,
337 // so deopt.
338 // TODO(leszeks): Consider splitting these deopts to have distinct deopt
339 // reasons. Otherwise, the reason has to match the above.
340 __ RecordComment("-- Jump to eager deopt if the result is negative zero");
341 __ Tbnz(temp, temp.SizeInBits() - 1,
342 __ GetDeoptLabel(this, DeoptimizeReason::kOverflow));
343 }
344 __ Bind(&end);
345 if (out_alias_input) {
346 __ Move(out, res.W());
347 }
348}
349
350void Int32DivideWithOverflow::SetValueLocationConstraints() {
351 UseRegister(left_input());
352 UseRegister(right_input());
353 DefineAsRegister(this);
354}
355void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
356 const ProcessingState& state) {
357 Register left = ToRegister(left_input()).W();
358 Register right = ToRegister(right_input()).W();
359 Register out = ToRegister(result()).W();
360
361 // TODO(leszeks): peephole optimise division by a constant.
362
363 // Pre-check for overflow, since idiv throws a division exception on overflow
364 // rather than setting the overflow flag. Logic copied from
365 // effect-control-linearizer.cc
366
367 // Check if {right} is positive (and not zero).
368 __ Cmp(right, Immediate(0));
369 ZoneLabelRef done(masm);
370 __ JumpToDeferredIf(
371 le,
372 [](MaglevAssembler* masm, ZoneLabelRef done, Register left,
373 Register right, Int32DivideWithOverflow* node) {
374 // {right} is negative or zero.
375
376 // TODO(leszeks): Using kNotInt32 here, but in same places
377 // kDivisionByZerokMinusZero/kMinusZero/kOverflow would be better. Right
378 // now all eager deopts in a node have to be the same -- we should allow
379 // a node to emit multiple eager deopts with different reasons.
380 Label* deopt = __ GetDeoptLabel(node, DeoptimizeReason::kNotInt32);
381
382 // Check if {right} is zero.
383 // We've already done the compare and flags won't be cleared yet.
384 __ JumpIf(eq, deopt);
385
386 // Check if {left} is zero, as that would produce minus zero.
387 __ CompareAndBranch(left, Immediate(0), eq, deopt);
388
389 // Check if {left} is kMinInt and {right} is -1, in which case we'd have
390 // to return -kMinInt, which is not representable as Int32.
391 __ Cmp(left, Immediate(kMinInt));
392 __ JumpIf(ne, *done);
393 __ Cmp(right, Immediate(-1));
394 __ JumpIf(ne, *done);
395 __ JumpToDeopt(deopt);
396 },
397 done, left, right, this);
398 __ Bind(*done);
399
400 // Perform the actual integer division.
401 MaglevAssembler::TemporaryRegisterScope temps(masm);
402 bool out_alias_input = out == left || out == right;
403 Register res = out;
404 if (out_alias_input) {
405 res = temps.AcquireScratch().W();
406 }
407 __ Sdiv(res, left, right);
408
409 // Check that the remainder is zero.
410 Register temp = temps.AcquireScratch().W();
411 __ Msub(temp, res, right, left);
412 __ CompareAndBranch(temp, Immediate(0), ne,
413 __ GetDeoptLabel(this, DeoptimizeReason::kNotInt32));
414
415 __ Move(out, res);
416}
417
418void Int32ModulusWithOverflow::SetValueLocationConstraints() {
419 UseAndClobberRegister(left_input());
420 UseAndClobberRegister(right_input());
421 DefineAsRegister(this);
422}
423void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm,
424 const ProcessingState& state) {
425 // If AreAliased(lhs, rhs):
426 // deopt if lhs < 0 // Minus zero.
427 // 0
428 //
429 // Using same algorithm as in EffectControlLinearizer:
430 // if rhs <= 0 then
431 // rhs = -rhs
432 // deopt if rhs == 0
433 // if lhs < 0 then
434 // let lhs_abs = -lsh in
435 // let res = lhs_abs % rhs in
436 // deopt if res == 0
437 // -res
438 // else
439 // let msk = rhs - 1 in
440 // if rhs & msk == 0 then
441 // lhs & msk
442 // else
443 // lhs % rhs
444
445 Register lhs = ToRegister(left_input()).W();
446 Register rhs = ToRegister(right_input()).W();
447 Register out = ToRegister(result()).W();
448
449 static constexpr DeoptimizeReason deopt_reason =
450 DeoptimizeReason::kDivisionByZero;
451
452 if (lhs == rhs) {
453 // For the modulus algorithm described above, lhs and rhs must not alias
454 // each other.
455 __ Tst(lhs, lhs);
456 // TODO(victorgomes): This ideally should be kMinusZero, but Maglev only
457 // allows one deopt reason per IR.
458 __ EmitEagerDeoptIf(mi, deopt_reason, this);
459 __ Move(ToRegister(result()), 0);
460 return;
461 }
462
463 DCHECK(!AreAliased(lhs, rhs));
464
465 ZoneLabelRef done(masm);
466 ZoneLabelRef rhs_checked(masm);
467 __ Cmp(rhs, Immediate(0));
468 __ JumpToDeferredIf(
469 le,
470 [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs,
471 Int32ModulusWithOverflow* node) {
472 __ Negs(rhs, rhs);
473 __ B(*rhs_checked, ne);
474 __ EmitEagerDeopt(node, deopt_reason);
475 },
476 rhs_checked, rhs, this);
477 __ Bind(*rhs_checked);
478
479 __ Cmp(lhs, Immediate(0));
480 __ JumpToDeferredIf(
481 lt,
482 [](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs,
483 Register out, Int32ModulusWithOverflow* node) {
484 MaglevAssembler::TemporaryRegisterScope temps(masm);
485 Register res = temps.AcquireScratch().W();
486 __ Neg(lhs, lhs);
487 __ Udiv(res, lhs, rhs);
488 __ Msub(out, res, rhs, lhs);
489 __ Negs(out, out);
490 __ B(*done, ne);
491 // TODO(victorgomes): This ideally should be kMinusZero, but Maglev
492 // only allows one deopt reason per IR.
493 __ EmitEagerDeopt(node, deopt_reason);
494 },
495 done, lhs, rhs, out, this);
496
497 Label rhs_not_power_of_2;
498 MaglevAssembler::TemporaryRegisterScope temps(masm);
499 Register mask = temps.AcquireScratch().W();
500 __ Add(mask, rhs, Immediate(-1));
501 __ Tst(mask, rhs);
502 __ JumpIf(ne, &rhs_not_power_of_2);
503
504 // {rhs} is power of 2.
505 __ And(out, mask, lhs);
506 __ Jump(*done);
507
508 __ Bind(&rhs_not_power_of_2);
509
510 // We store the result of the Udiv in a temporary register in case {out} is
511 // the same as {lhs} or {rhs}: we'll still need those 2 registers intact to
512 // get the remainder.
513 Register res = mask;
514 __ Udiv(res, lhs, rhs);
515 __ Msub(out, res, rhs, lhs);
516
517 __ Bind(*done);
518}
519
520#define DEF_BITWISE_BINOP(Instruction, opcode) \
521 void Instruction::SetValueLocationConstraints() { \
522 UseRegister(left_input()); \
523 if (TryGetLogicalImmediateInt32ConstantInput(this, kRightIndex)) { \
524 UseAny(right_input()); \
525 } else { \
526 UseRegister(right_input()); \
527 } \
528 DefineAsRegister(this); \
529 } \
530 \
531 void Instruction::GenerateCode(MaglevAssembler* masm, \
532 const ProcessingState& state) { \
533 Register left = ToRegister(left_input()).W(); \
534 Register out = ToRegister(result()).W(); \
535 if (!right_input().operand().IsRegister()) { \
536 auto right_const = TryGetInt32ConstantInput(kRightIndex); \
537 DCHECK(right_const); \
538 __ opcode(out, left, *right_const); \
539 } else { \
540 Register right = ToRegister(right_input()).W(); \
541 __ opcode(out, left, right); \
542 } \
543 }
544DEF_BITWISE_BINOP(Int32BitwiseAnd, and_)
545DEF_BITWISE_BINOP(Int32BitwiseOr, orr)
546DEF_BITWISE_BINOP(Int32BitwiseXor, eor)
547#undef DEF_BITWISE_BINOP
548
549#define DEF_SHIFT_BINOP(Instruction, opcode) \
550 void Instruction::SetValueLocationConstraints() { \
551 UseRegister(left_input()); \
552 if (TryGetInt32ConstantInput(kRightIndex)) { \
553 UseAny(right_input()); \
554 } else { \
555 UseRegister(right_input()); \
556 } \
557 DefineAsRegister(this); \
558 } \
559 \
560 void Instruction::GenerateCode(MaglevAssembler* masm, \
561 const ProcessingState& state) { \
562 Register out = ToRegister(result()).W(); \
563 Register left = ToRegister(left_input()).W(); \
564 if (auto right_const = TryGetInt32ConstantInput(kRightIndex)) { \
565 int right = *right_const & 31; \
566 if (right == 0) { \
567 __ Move(out, left); \
568 } else { \
569 __ opcode(out, left, right); \
570 } \
571 } else { \
572 Register right = ToRegister(right_input()).W(); \
573 __ opcode##v(out, left, right); \
574 } \
575 }
576DEF_SHIFT_BINOP(Int32ShiftLeft, lsl)
577DEF_SHIFT_BINOP(Int32ShiftRight, asr)
578DEF_SHIFT_BINOP(Int32ShiftRightLogical, lsr)
579#undef DEF_SHIFT_BINOP
580
583 DefineAsRegister(this);
584}
585
586void Int32BitwiseNot::GenerateCode(MaglevAssembler* masm,
587 const ProcessingState& state) {
588 Register value = ToRegister(value_input()).W();
589 Register out = ToRegister(result()).W();
590 __ Mvn(out, value);
591}
592
593void Float64Add::SetValueLocationConstraints() {
594 UseRegister(left_input());
595 UseRegister(right_input());
596 DefineAsRegister(this);
597}
598
599void Float64Add::GenerateCode(MaglevAssembler* masm,
600 const ProcessingState& state) {
601 DoubleRegister left = ToDoubleRegister(left_input());
602 DoubleRegister right = ToDoubleRegister(right_input());
604 __ Fadd(out, left, right);
605}
606
607void Float64Subtract::SetValueLocationConstraints() {
608 UseRegister(left_input());
609 UseRegister(right_input());
610 DefineAsRegister(this);
611}
612
613void Float64Subtract::GenerateCode(MaglevAssembler* masm,
614 const ProcessingState& state) {
615 DoubleRegister left = ToDoubleRegister(left_input());
616 DoubleRegister right = ToDoubleRegister(right_input());
618 __ Fsub(out, left, right);
619}
620
621void Float64Multiply::SetValueLocationConstraints() {
622 UseRegister(left_input());
623 UseRegister(right_input());
624 DefineAsRegister(this);
625}
626
627void Float64Multiply::GenerateCode(MaglevAssembler* masm,
628 const ProcessingState& state) {
629 DoubleRegister left = ToDoubleRegister(left_input());
630 DoubleRegister right = ToDoubleRegister(right_input());
632 __ Fmul(out, left, right);
633}
634
635void Float64Divide::SetValueLocationConstraints() {
636 UseRegister(left_input());
637 UseRegister(right_input());
638 DefineAsRegister(this);
639}
640
641void Float64Divide::GenerateCode(MaglevAssembler* masm,
642 const ProcessingState& state) {
643 DoubleRegister left = ToDoubleRegister(left_input());
644 DoubleRegister right = ToDoubleRegister(right_input());
646 __ Fdiv(out, left, right);
647}
648
649int Float64Modulus::MaxCallStackArgs() const { return 0; }
650void Float64Modulus::SetValueLocationConstraints() {
651 UseFixed(left_input(), v0);
652 UseFixed(right_input(), v1);
653 DefineSameAsFirst(this);
654}
655void Float64Modulus::GenerateCode(MaglevAssembler* masm,
656 const ProcessingState& state) {
657 AllowExternalCallThatCantCauseGC scope(masm);
658 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
659}
660
663 DefineAsRegister(this);
664}
665void Float64Negate::GenerateCode(MaglevAssembler* masm,
666 const ProcessingState& state) {
669 __ Fneg(out, value);
670}
671
672void Float64Abs::GenerateCode(MaglevAssembler* masm,
673 const ProcessingState& state) {
676 __ Fabs(out, in);
677}
678
679void Float64Round::GenerateCode(MaglevAssembler* masm,
680 const ProcessingState& state) {
683 if (kind_ == Kind::kNearest) {
684 MaglevAssembler::TemporaryRegisterScope temps(masm);
685 DoubleRegister temp = temps.AcquireScratchDouble();
686 DoubleRegister half_one = temps.AcquireScratchDouble();
687 __ Move(temp, in);
688 // Frintn rounds to even on tie, while JS expects it to round towards
689 // +Infinity. Fix the difference by checking if we rounded down by exactly
690 // 0.5, and if so, round to the other side.
691 __ Frintn(out, in);
692 __ Fsub(temp, temp, out);
693 __ Move(half_one, 0.5);
694 __ Fcmp(temp, half_one);
695 Label done;
696 __ JumpIf(ne, &done, Label::kNear);
697 // Fix wrong tie-to-even by adding 0.5 twice.
698 __ Fadd(out, out, half_one);
699 __ Fadd(out, out, half_one);
700 __ bind(&done);
701 } else if (kind_ == Kind::kCeil) {
702 __ Frintp(out, in);
703 } else if (kind_ == Kind::kFloor) {
704 __ Frintm(out, in);
705 }
706}
707
708int Float64Exponentiate::MaxCallStackArgs() const { return 0; }
709void Float64Exponentiate::SetValueLocationConstraints() {
710 UseFixed(left_input(), v0);
711 UseFixed(right_input(), v1);
712 DefineSameAsFirst(this);
713}
714void Float64Exponentiate::GenerateCode(MaglevAssembler* masm,
715 const ProcessingState& state) {
716 AllowExternalCallThatCantCauseGC scope(masm);
717 __ CallCFunction(ExternalReference::ieee754_pow_function(), 2);
718}
719
720int Float64Ieee754Unary::MaxCallStackArgs() const { return 0; }
722 UseFixed(input(), v0);
723 DefineSameAsFirst(this);
724}
725void Float64Ieee754Unary::GenerateCode(MaglevAssembler* masm,
726 const ProcessingState& state) {
727 AllowExternalCallThatCantCauseGC scope(masm);
728 __ CallCFunction(ieee_function_ref(), 1);
729}
730
733 DefineAsRegister(this);
734}
735void LoadTypedArrayLength::GenerateCode(MaglevAssembler* masm,
736 const ProcessingState& state) {
738 Register result_register = ToRegister(result());
739 if (v8_flags.debug_code) {
740 __ AssertObjectType(object, JS_TYPED_ARRAY_TYPE,
741 AbortReason::kUnexpectedValue);
742 }
743 __ LoadBoundedSizeFromObject(result_register, object,
744 JSTypedArray::kRawByteLengthOffset);
745 int shift_size = ElementsKindToShiftSize(elements_kind_);
746 if (shift_size > 0) {
747 // TODO(leszeks): Merge this shift with the one in LoadBoundedSize.
748 DCHECK(shift_size == 1 || shift_size == 2 || shift_size == 3);
749 __ Lsr(result_register, result_register, shift_size);
750 }
751}
752
753int CheckJSDataViewBounds::MaxCallStackArgs() const { return 1; }
757 set_temporaries_needed(1);
758}
759void CheckJSDataViewBounds::GenerateCode(MaglevAssembler* masm,
760 const ProcessingState& state) {
761 MaglevAssembler::TemporaryRegisterScope temps(masm);
764 if (v8_flags.debug_code) {
765 __ AssertObjectType(object, JS_DATA_VIEW_TYPE,
766 AbortReason::kUnexpectedValue);
767 }
768
769 // Normal DataView (backed by AB / SAB) or non-length tracking backed by GSAB.
770 Register byte_length = temps.Acquire();
771 __ LoadBoundedSizeFromObject(byte_length, object,
772 JSDataView::kRawByteLengthOffset);
773
775 if (element_size > 1) {
776 __ Subs(byte_length, byte_length, Immediate(element_size - 1));
777 __ EmitEagerDeoptIf(mi, DeoptimizeReason::kOutOfBounds, this);
778 }
779 __ Cmp(index, byte_length);
780 __ EmitEagerDeoptIf(hs, DeoptimizeReason::kOutOfBounds, this);
781}
782
785 DefineAsRegister(this);
786}
787void HoleyFloat64ToMaybeNanFloat64::GenerateCode(MaglevAssembler* masm,
788 const ProcessingState& state) {
789 // The hole value is a signalling NaN, so just silence it to get the float64
790 // value.
791 __ CanonicalizeNaN(ToDoubleRegister(result()), ToDoubleRegister(input()));
792}
793
794namespace {
795
796enum class ReduceInterruptBudgetType { kLoop, kReturn };
797
798void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
799 Node* node, ReduceInterruptBudgetType type,
800 Register scratch0) {
801 // For loops, first check for interrupts. Don't do this for returns, as we
802 // can't lazy deopt to the end of a return.
803 if (type == ReduceInterruptBudgetType::kLoop) {
804 Label next;
805 // Here, we only care about interrupts since we've already guarded against
806 // real stack overflows on function entry.
807 {
808 Register stack_limit = scratch0;
809 __ LoadStackLimit(stack_limit, StackLimitKind::kInterruptStackLimit);
810 __ Cmp(sp, stack_limit);
811 __ B(&next, hi);
812 }
813
814 // An interrupt has been requested and we must call into runtime to handle
815 // it; since we already pay the call cost, combine with the TieringManager
816 // call.
817 {
818 SaveRegisterStateForCall save_register_state(masm,
819 node->register_snapshot());
820 Register function = scratch0;
822 __ Push(function);
823 // Move into kContextRegister after the load into scratch0, just in case
824 // scratch0 happens to be kContextRegister.
825 __ Move(kContextRegister, masm->native_context().object());
826 __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, 1);
827 save_register_state.DefineSafepointWithLazyDeopt(node->lazy_deopt_info());
828 }
829 __ B(*done); // All done, continue.
830 __ Bind(&next);
831 }
832
833 // No pending interrupts. Call into the TieringManager if needed.
834 {
835 SaveRegisterStateForCall save_register_state(masm,
836 node->register_snapshot());
837 Register function = scratch0;
839 __ Push(function);
840 // Move into kContextRegister after the load into scratch0, just in case
841 // scratch0 happens to be kContextRegister.
842 __ Move(kContextRegister, masm->native_context().object());
843 // Note: must not cause a lazy deopt!
844 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Maglev, 1);
845 save_register_state.DefineSafepoint();
846 }
847 __ B(*done);
848}
849
850void GenerateReduceInterruptBudget(MaglevAssembler* masm, Node* node,
851 Register feedback_cell,
852 ReduceInterruptBudgetType type, int amount) {
853 MaglevAssembler::TemporaryRegisterScope temps(masm);
854 Register scratch = temps.Acquire();
855 Register budget = scratch.W();
856 __ Ldr(budget,
857 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
858 __ Subs(budget, budget, Immediate(amount));
859 __ Str(budget,
860 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
861 ZoneLabelRef done(masm);
862 __ JumpToDeferredIf(lt, HandleInterruptsAndTiering, done, node, type,
863 scratch);
864 __ Bind(*done);
865}
866
867} // namespace
868
872 set_temporaries_needed(1);
873}
874void ReduceInterruptBudgetForLoop::GenerateCode(MaglevAssembler* masm,
875 const ProcessingState& state) {
876 GenerateReduceInterruptBudget(masm, this, ToRegister(feedback_cell()),
877 ReduceInterruptBudgetType::kLoop, amount());
878}
879
883 set_temporaries_needed(1);
884}
886 MaglevAssembler* masm, const ProcessingState& state) {
887 GenerateReduceInterruptBudget(masm, this, ToRegister(feedback_cell()),
888 ReduceInterruptBudgetType::kReturn, amount());
889}
890
891// ---
892// Control nodes
893// ---
896}
897void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
899 // Read the formal number of parameters from the top level compilation unit
900 // (i.e. the outermost, non inlined function).
901 int formal_params_size =
902 masm->compilation_info()->toplevel_compilation_unit()->parameter_count();
903
904 // We're not going to continue execution, so we can use an arbitrary register
905 // here instead of relying on temporaries from the register allocator.
906 // We cannot use scratch registers, since they're used in LeaveFrame and
907 // DropArguments.
908 Register actual_params_size = x9;
909 Register params_size = x10;
910
911 // Compute the size of the actual parameters + receiver (in bytes).
912 // TODO(leszeks): Consider making this an input into Return to reuse the
913 // incoming argc's register (if it's still valid).
914 __ Ldr(actual_params_size,
916 __ Mov(params_size, Immediate(formal_params_size));
917
918 // If actual is bigger than formal, then we should use it to free up the stack
919 // arguments.
920 Label corrected_args_count;
921 __ CompareAndBranch(params_size, actual_params_size, ge,
922 &corrected_args_count);
923 __ Mov(params_size, actual_params_size);
924 __ Bind(&corrected_args_count);
925
926 // Leave the frame.
927 __ LeaveFrame(StackFrame::MAGLEV);
928
929 // Drop receiver + arguments according to dynamic arguments size.
930 __ DropArguments(params_size);
931 __ Ret();
932}
933
934} // namespace maglev
935} // namespace internal
936} // namespace v8
static constexpr bool IsImmAddSub(int64_t immediate)
static bool IsImmLogical(uint64_t value, unsigned width, unsigned *n, unsigned *imm_s, unsigned *imm_r)
static const int32_t kMaxOneByteCharCode
Definition string.h:500
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ExternalReference ieee_function_ref() const
Definition maglev-ir.cc:502
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int end
Node * node
ZoneVector< RpoNumber > & result
uint32_t const mask
#define DEF_SHIFT_BINOP(Instruction, opcode)
#define DEF_BITWISE_BINOP(Instruction, opcode)
#define DCHECK_REGLIST_EMPTY(...)
int int32_t
Definition unicode.cc:40
void Add(RWDigits Z, Digits X, Digits Y)
int ExternalArrayElementSize(const ExternalArrayType element_type)
Definition globals.h:156
void DefineAsRegister(Node *node)
DoubleRegister ToDoubleRegister(const compiler::InstructionOperand &operand)
void DefineSameAsFirst(Node *node)
Register ToRegister(const compiler::InstructionOperand &operand)
void UseAndClobberRegister(Input &input)
void UseAny(Input &input)
void UseRegister(Input &input)
void UseFixed(Input &input, Register reg)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void Sub(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
constexpr int kMinInt
Definition globals.h:375
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
constexpr int W
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
DwVfpRegister DoubleRegister
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr int B
MemOperand FieldMemOperand(Register object, int offset)
constexpr int ElementsKindToShiftSize(ElementsKind elements_kind)
constexpr Register kReturnRegister0
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
V8_EXPORT_PRIVATE FlagValues v8_flags
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define OFFSET_OF_DATA_START(Type)