v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-ir-arm.cc
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/base/logging.h"
14
15namespace v8 {
16namespace internal {
17namespace maglev {
18
19#define __ masm->
20
21void Int32NegateWithOverflow::SetValueLocationConstraints() {
22 UseRegister(value_input());
23 DefineAsRegister(this);
24}
25
26void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm,
27 const ProcessingState& state) {
28 Register value = ToRegister(value_input());
29 Register out = ToRegister(result());
30
31 // Deopt when result would be -0.
32 __ cmp(value, Operand(0));
33 __ EmitEagerDeoptIf(eq, DeoptimizeReason::kOverflow, this);
34
35 __ rsb(out, value, Operand(0), SetCC);
36 // Output register must not be a register input into the eager deopt info.
38 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
39 __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
40}
41
43 const ProcessingState& state) {
44 Register out = ToRegister(result());
45 Label done;
46 __ cmp(out, Operand(0));
47 __ JumpIf(ge, &done);
48 __ rsb(out, out, Operand(0), SetCC);
49 // Output register must not be a register input into the eager deopt info.
51 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
52 __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
53 __ bind(&done);
54}
55
56void Int32IncrementWithOverflow::SetValueLocationConstraints() {
57 UseRegister(value_input());
58 DefineAsRegister(this);
59}
60
61void Int32IncrementWithOverflow::GenerateCode(MaglevAssembler* masm,
62 const ProcessingState& state) {
63 Register value = ToRegister(value_input());
64 Register out = ToRegister(result());
65 __ add(out, value, Operand(1), SetCC);
66 // Output register must not be a register input into the eager deopt info.
68 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
69 __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
70}
71
72void Int32DecrementWithOverflow::SetValueLocationConstraints() {
73 UseRegister(value_input());
74 DefineAsRegister(this);
75}
76
77void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm,
78 const ProcessingState& state) {
79 Register value = ToRegister(value_input());
80 Register out = ToRegister(result());
81 __ sub(out, value, Operand(1), SetCC);
82 // Output register must not be a register input into the eager deopt info.
84 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
85 __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
86}
87
94 } else {
96 }
97 set_temporaries_needed(1);
98 DefineAsRegister(this);
99}
101 const ProcessingState& state) {
103 Register scratch = temps.Acquire();
104 Register result_string = ToRegister(result());
105 if (Int32Constant* constant = code_input().node()->TryCast<Int32Constant>()) {
106 int32_t char_code = constant->value() & 0xFFFF;
107 if (0 <= char_code && char_code < String::kMaxOneByteCharCode) {
108 __ LoadSingleCharacterString(result_string, char_code);
109 } else {
110 DCHECK_NE(scratch, result_string);
111 __ AllocateTwoByteString(register_snapshot(), result_string, 1);
112 __ Move(scratch, char_code);
113 __ strh(scratch, FieldMemOperand(result_string,
115 }
116 } else {
117 __ StringFromCharCode(register_snapshot(), nullptr, result_string,
118 ToRegister(code_input()), scratch,
120 }
121}
122
125 if (offset() == 0) {
126 DefineSameAsFirst(this);
127 } else {
128 DefineAsRegister(this);
129 }
130}
131
133 const ProcessingState& state) {
134 if (offset() != 0) {
136 Operand(offset()));
137 }
138}
139
141
143 const ProcessingState& state) {
144 Register argc = ToRegister(result());
146 __ sub(argc, argc, Operand(1)); // Remove receiver.
147}
148
150
152 const ProcessingState& state) {
153 Register length = ToRegister(result());
154 Label done;
156 __ sub(length, length, Operand(formal_parameter_count() + 1), SetCC);
157 __ b(kGreaterThanEqual, &done);
158 __ Move(length, 0);
159 __ bind(&done);
160 __ UncheckedSmiTagInt32(length);
161}
162
164
169
171 const ProcessingState& state) {
172 // On 32-bit platforms, IntPtr is the same as Int32.
173}
174
177 set_temporaries_needed((value().get_scalar() == 0) ? 1 : 0);
178 set_double_temporaries_needed(value().is_nan() ? 0 : 1);
179}
181 const ProcessingState& state) {
182 Label* fail = __ GetDeoptLabel(this, deoptimize_reason());
184 DoubleRegister double_scratch = temps.AcquireScratchDouble();
186 if (value().is_nan()) {
187 __ JumpIfNotNan(target, fail);
188 } else {
189 __ Move(double_scratch, value());
190 __ CompareFloat64AndJumpIf(double_scratch, target, kNotEqual, fail, fail);
191 if (value().get_scalar() == 0) { // If value is +0.0 or -0.0.
192 Register scratch = temps.AcquireScratch();
193 __ VmovHigh(scratch, target);
194 __ cmp(scratch, Operand(0));
195 __ JumpIf(value().get_bits() == 0 ? kNotEqual : kEqual, fail);
196 }
197 }
198}
199
200void Int32AddWithOverflow::SetValueLocationConstraints() {
201 UseRegister(left_input());
202 UseRegister(right_input());
203 DefineAsRegister(this);
204}
205
206void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm,
207 const ProcessingState& state) {
208 Register left = ToRegister(left_input());
209 Register right = ToRegister(right_input());
210 Register out = ToRegister(result());
211 __ add(out, left, right, SetCC);
212 // The output register shouldn't be a register input into the eager deopt
213 // info.
215 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
216 __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
217}
218
219void Int32SubtractWithOverflow::SetValueLocationConstraints() {
220 UseRegister(left_input());
221 UseRegister(right_input());
222 DefineAsRegister(this);
223}
224void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm,
225 const ProcessingState& state) {
226 Register left = ToRegister(left_input());
227 Register right = ToRegister(right_input());
228 Register out = ToRegister(result());
229 __ sub(out, left, right, SetCC);
230 // The output register shouldn't be a register input into the eager deopt
231 // info.
233 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
234 __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this);
235}
236
237void Int32MultiplyWithOverflow::SetValueLocationConstraints() {
238 UseRegister(left_input());
239 UseRegister(right_input());
240 DefineAsRegister(this);
241}
242void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm,
243 const ProcessingState& state) {
244 Register left = ToRegister(left_input());
245 Register right = ToRegister(right_input());
246 Register out = ToRegister(result());
247
248 // TODO(leszeks): peephole optimise multiplication by a constant.
249
250 MaglevAssembler::TemporaryRegisterScope temps(masm);
251 bool out_alias_input = out == left || out == right;
252 Register res_low = out;
253 if (out_alias_input) {
254 res_low = temps.AcquireScratch();
255 }
256 Register res_high = temps.AcquireScratch();
257 __ smull(res_low, res_high, left, right);
258
259 // ARM doesn't set the overflow flag for multiplication, so we need to
260 // test on kNotEqual.
261 __ cmp(res_high, Operand(res_low, ASR, 31));
262 __ EmitEagerDeoptIf(ne, DeoptimizeReason::kOverflow, this);
263
264 // If the result is zero, check if either lhs or rhs is negative.
265 Label end;
266 __ tst(res_low, res_low);
267 __ b(ne, &end);
268 Register temp = res_high;
269 __ orr(temp, left, right, SetCC);
270 // If one of them is negative, we must have a -0 result, which is non-int32,
271 // so deopt.
272 __ EmitEagerDeoptIf(mi, DeoptimizeReason::kOverflow, this);
273
274 __ bind(&end);
275 if (out_alias_input) {
276 __ Move(out, res_low);
277 }
278}
279
280void Int32DivideWithOverflow::SetValueLocationConstraints() {
281 UseRegister(left_input());
282 UseRegister(right_input());
283 DefineAsRegister(this);
284}
285void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
286 const ProcessingState& state) {
287 Register left = ToRegister(left_input());
288 Register right = ToRegister(right_input());
289 Register out = ToRegister(result());
290
291 // TODO(leszeks): peephole optimise division by a constant.
292
293 // Pre-check for overflow, since idiv throws a division exception on overflow
294 // rather than setting the overflow flag. Logic copied from
295 // effect-control-linearizer.cc
296
297 // Check if {right} is positive (and not zero).
298 __ cmp(right, Operand(0));
299 ZoneLabelRef done(masm);
300 __ JumpToDeferredIf(
301 le,
302 [](MaglevAssembler* masm, ZoneLabelRef done, Register left,
303 Register right, Int32DivideWithOverflow* node) {
304 // {right} is negative or zero.
305
306 // TODO(leszeks): Using kNotInt32 here, but in same places
307 // kDivisionByZerokMinusZero/kMinusZero/kOverflow would be better. Right
308 // now all eager deopts in a node have to be the same -- we should allow
309 // a node to emit multiple eager deopts with different reasons.
310 Label* deopt = __ GetDeoptLabel(node, DeoptimizeReason::kNotInt32);
311
312 // Check if {right} is zero.
313 // We've already done the compare and flags won't be cleared yet.
314 __ JumpIf(eq, deopt);
315
316 // Check if {left} is zero, as that would produce minus zero.
317 __ tst(left, left);
318 __ JumpIf(eq, deopt);
319
320 // Check if {left} is kMinInt and {right} is -1, in which case we'd have
321 // to return -kMinInt, which is not representable as Int32.
322 __ cmp(left, Operand(kMinInt));
323 __ JumpIf(ne, *done);
324 __ cmp(right, Operand(-1));
325 __ JumpIf(ne, *done);
326 __ JumpToDeopt(deopt);
327 },
328 done, left, right, this);
329 __ bind(*done);
330
331 // Perform the actual integer division.
332 MaglevAssembler::TemporaryRegisterScope temps(masm);
333 bool out_alias_input = out == left || out == right;
334 Register res = out;
335 if (out_alias_input) {
336 res = temps.AcquireScratch();
337 }
338 if (CpuFeatures::IsSupported(SUDIV)) {
339 CpuFeatureScope scope(masm, SUDIV);
340 __ sdiv(res, left, right);
341 } else {
342 UseScratchRegisterScope temps(masm);
343 LowDwVfpRegister double_right = temps.AcquireLowD();
344 SwVfpRegister tmp = double_right.low();
345 DwVfpRegister double_left = temps.AcquireD();
346 DwVfpRegister double_res = double_left;
347 __ vmov(tmp, left);
348 __ vcvt_f64_s32(double_left, tmp);
349 __ vmov(tmp, right);
350 __ vcvt_f64_s32(double_right, tmp);
351 __ vdiv(double_res, double_left, double_right);
352 __ vcvt_s32_f64(tmp, double_res);
353 __ vmov(res, tmp);
354 }
355
356 // Check that the remainder is zero.
357 Register temp = temps.AcquireScratch();
358 __ mul(temp, res, right);
359 __ cmp(temp, left);
360 __ EmitEagerDeoptIf(ne, DeoptimizeReason::kNotInt32, this);
361
362 __ Move(out, res);
363}
364
365namespace {
366void Uint32Mod(MaglevAssembler* masm, Register out, Register left,
367 Register right) {
368 MaglevAssembler::TemporaryRegisterScope temps(masm);
369 Register res = temps.AcquireScratch();
370 if (CpuFeatures::IsSupported(SUDIV)) {
371 CpuFeatureScope scope(masm, SUDIV);
372 __ udiv(res, left, right);
373 } else {
374 UseScratchRegisterScope temps(masm);
375 LowDwVfpRegister double_right = temps.AcquireLowD();
376 SwVfpRegister tmp = double_right.low();
377 DwVfpRegister double_left = temps.AcquireD();
378 DwVfpRegister double_res = double_left;
379 __ vmov(tmp, left);
380 __ vcvt_f64_s32(double_left, tmp);
381 __ vmov(tmp, right);
382 __ vcvt_f64_s32(double_right, tmp);
383 __ vdiv(double_res, double_left, double_right);
384 __ vcvt_s32_f64(tmp, double_res);
385 __ vmov(res, tmp);
386 }
387 if (CpuFeatures::IsSupported(ARMv7)) {
388 __ mls(out, res, right, left);
389 } else {
390 __ mul(res, res, right);
391 __ sub(out, left, res);
392 }
393}
394} // namespace
395
396void Int32ModulusWithOverflow::SetValueLocationConstraints() {
397 UseAndClobberRegister(left_input());
398 UseAndClobberRegister(right_input());
399 DefineAsRegister(this);
400}
401void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm,
402 const ProcessingState& state) {
403 // If AreAliased(lhs, rhs):
404 // deopt if lhs < 0 // Minus zero.
405 // 0
406 //
407 // Using same algorithm as in EffectControlLinearizer:
408 // if rhs <= 0 then
409 // rhs = -rhs
410 // deopt if rhs == 0
411 // if lhs < 0 then
412 // let lhs_abs = -lsh in
413 // let res = lhs_abs % rhs in
414 // deopt if res == 0
415 // -res
416 // else
417 // let msk = rhs - 1 in
418 // if rhs & msk == 0 then
419 // lhs & msk
420 // else
421 // lhs % rhs
422
423 Register lhs = ToRegister(left_input());
424 Register rhs = ToRegister(right_input());
425 Register out = ToRegister(result());
426
427 static constexpr DeoptimizeReason deopt_reason =
428 DeoptimizeReason::kDivisionByZero;
429
430 if (lhs == rhs) {
431 // For the modulus algorithm described above, lhs and rhs must not alias
432 // each other.
433 __ tst(lhs, lhs);
434 // TODO(victorgomes): This ideally should be kMinusZero, but Maglev only
435 // allows one deopt reason per IR.
436 __ EmitEagerDeoptIf(mi, deopt_reason, this);
437 __ Move(ToRegister(result()), 0);
438 return;
439 }
440
441 DCHECK_NE(lhs, rhs);
442
443 ZoneLabelRef done(masm);
444 ZoneLabelRef rhs_checked(masm);
445 __ cmp(rhs, Operand(0));
446 __ JumpToDeferredIf(
447 le,
448 [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs,
449 Int32ModulusWithOverflow* node) {
450 __ rsb(rhs, rhs, Operand(0), SetCC);
451 __ b(ne, *rhs_checked);
452 __ EmitEagerDeopt(node, deopt_reason);
453 },
454 rhs_checked, rhs, this);
455 __ bind(*rhs_checked);
456
457 __ cmp(lhs, Operand(0));
458 __ JumpToDeferredIf(
459 lt,
460 [](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs,
461 Register out, Int32ModulusWithOverflow* node) {
462 __ rsb(lhs, lhs, Operand(0));
463 Uint32Mod(masm, out, lhs, rhs);
464 __ rsb(out, out, Operand(0), SetCC);
465 // TODO(victorgomes): This ideally should be kMinusZero, but Maglev
466 // only allows one deopt reason per IR.
467 __ b(ne, *done);
468 __ EmitEagerDeopt(node, deopt_reason);
469 },
470 done, lhs, rhs, out, this);
471
472 Label rhs_not_power_of_2;
473 {
474 MaglevAssembler::TemporaryRegisterScope temps(masm);
475 Register mask = temps.AcquireScratch();
476 __ add(mask, rhs, Operand(-1));
477 __ tst(mask, rhs);
478 __ JumpIf(ne, &rhs_not_power_of_2);
479
480 // {rhs} is power of 2.
481 __ and_(out, mask, lhs);
482 __ Jump(*done);
483 // {mask} can be reused from now on.
484 }
485
486 __ bind(&rhs_not_power_of_2);
487 Uint32Mod(masm, out, lhs, rhs);
488 __ bind(*done);
489}
490
491#define DEF_BITWISE_BINOP(Instruction, opcode) \
492 void Instruction::SetValueLocationConstraints() { \
493 UseRegister(left_input()); \
494 UseRegister(right_input()); \
495 DefineAsRegister(this); \
496 } \
497 \
498 void Instruction::GenerateCode(MaglevAssembler* masm, \
499 const ProcessingState& state) { \
500 Register left = ToRegister(left_input()); \
501 Register right = ToRegister(right_input()); \
502 Register out = ToRegister(result()); \
503 __ opcode(out, left, right); \
504 }
505DEF_BITWISE_BINOP(Int32BitwiseAnd, and_)
506DEF_BITWISE_BINOP(Int32BitwiseOr, orr)
507DEF_BITWISE_BINOP(Int32BitwiseXor, eor)
508#undef DEF_BITWISE_BINOP
509
510#define DEF_SHIFT_BINOP(Instruction, opcode) \
511 void Instruction::SetValueLocationConstraints() { \
512 UseRegister(left_input()); \
513 if (right_input().node()->Is<Int32Constant>()) { \
514 UseAny(right_input()); \
515 } else { \
516 UseRegister(right_input()); \
517 } \
518 DefineAsRegister(this); \
519 } \
520 void Instruction::GenerateCode(MaglevAssembler* masm, \
521 const ProcessingState& state) { \
522 Register left = ToRegister(left_input()); \
523 Register out = ToRegister(result()); \
524 if (Int32Constant* constant = \
525 right_input().node()->TryCast<Int32Constant>()) { \
526 uint32_t shift = constant->value() & 31; \
527 if (shift == 0) { \
528 /* TODO(victorgomes): Arm will do a shift of 32 if right == 0. Ideally \
529 * we should not even emit the shift in the first place. We do a move \
530 * here for the moment. */ \
531 __ Move(out, left); \
532 } else { \
533 __ opcode(out, left, Operand(shift)); \
534 } \
535 } else { \
536 MaglevAssembler::TemporaryRegisterScope temps(masm); \
537 Register scratch = temps.AcquireScratch(); \
538 Register right = ToRegister(right_input()); \
539 __ and_(scratch, right, Operand(31)); \
540 __ opcode(out, left, Operand(scratch)); \
541 } \
542 }
543DEF_SHIFT_BINOP(Int32ShiftLeft, lsl)
544DEF_SHIFT_BINOP(Int32ShiftRight, asr)
545DEF_SHIFT_BINOP(Int32ShiftRightLogical, lsr)
546#undef DEF_SHIFT_BINOP
547
550 DefineAsRegister(this);
552
554 const ProcessingState& state) {
556 Register out = ToRegister(result());
557 __ mvn(out, Operand(value));
558}
559
560void Float64Add::SetValueLocationConstraints() {
561 UseRegister(left_input());
562 UseRegister(right_input());
563 DefineAsRegister(this);
564}
565
566void Float64Add::GenerateCode(MaglevAssembler* masm,
567 const ProcessingState& state) {
568 DoubleRegister left = ToDoubleRegister(left_input());
569 DoubleRegister right = ToDoubleRegister(right_input());
571 __ vadd(out, left, right);
572}
573
574void Float64Subtract::SetValueLocationConstraints() {
575 UseRegister(left_input());
576 UseRegister(right_input());
577 DefineAsRegister(this);
578}
579
580void Float64Subtract::GenerateCode(MaglevAssembler* masm,
581 const ProcessingState& state) {
582 DoubleRegister left = ToDoubleRegister(left_input());
583 DoubleRegister right = ToDoubleRegister(right_input());
585 __ vsub(out, left, right);
586}
587
588void Float64Multiply::SetValueLocationConstraints() {
589 UseRegister(left_input());
590 UseRegister(right_input());
591 DefineAsRegister(this);
592}
593
594void Float64Multiply::GenerateCode(MaglevAssembler* masm,
595 const ProcessingState& state) {
596 DoubleRegister left = ToDoubleRegister(left_input());
597 DoubleRegister right = ToDoubleRegister(right_input());
599 __ vmul(out, left, right);
600}
601
602void Float64Divide::SetValueLocationConstraints() {
603 UseRegister(left_input());
604 UseRegister(right_input());
605 DefineAsRegister(this);
606}
607
608void Float64Divide::GenerateCode(MaglevAssembler* masm,
609 const ProcessingState& state) {
610 DoubleRegister left = ToDoubleRegister(left_input());
611 DoubleRegister right = ToDoubleRegister(right_input());
613 __ vdiv(out, left, right);
614}
615
616int Float64Modulus::MaxCallStackArgs() const { return 0; }
617void Float64Modulus::SetValueLocationConstraints() {
618 UseFixed(left_input(), d0);
619 UseFixed(right_input(), d1);
620 DefineAsRegister(this);
621}
622void Float64Modulus::GenerateCode(MaglevAssembler* masm,
623 const ProcessingState& state) {
624 FrameScope scope(masm, StackFrame::MANUAL);
625 __ PrepareCallCFunction(0, 2);
626 __ MovToFloatParameters(ToDoubleRegister(left_input()),
627 ToDoubleRegister(right_input()));
628 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
629 // Move the result in the double result register.
630 __ MovFromFloatResult(ToDoubleRegister(result()));
641 __ vneg(out, value);
648 __ vabs(out, in);
650
652 const ProcessingState& state) {
655 CpuFeatureScope scope(masm, ARMv8);
656 if (kind_ == Kind::kNearest) {
658 DoubleRegister temp = temps.AcquireDouble();
659 __ Move(temp, in);
660 // vrintn rounds to even on tie, while JS expects it to round towards
661 // +Infinity. Fix the difference by checking if we rounded down by exactly
662 // 0.5, and if so, round to the other side.
663 __ vrintn(out, in);
664 __ vsub(temp, temp, out);
665 DoubleRegister half_one = temps.AcquireScratchDouble();
666 __ Move(half_one, 0.5);
667 __ VFPCompareAndSetFlags(temp, half_one);
668 Label done;
669 __ JumpIf(ne, &done, Label::kNear);
670 // Fix wrong tie-to-even by adding 0.5 twice.
671 __ vadd(out, out, half_one);
672 __ vadd(out, out, half_one);
673 __ bind(&done);
674 } else if (kind_ == Kind::kCeil) {
675 __ vrintp(out, in);
676 } else if (kind_ == Kind::kFloor) {
677 __ vrintm(out, in);
678 }
679}
680
681int Float64Exponentiate::MaxCallStackArgs() const { return 0; }
682void Float64Exponentiate::SetValueLocationConstraints() {
683 UseFixed(left_input(), d0);
684 UseFixed(right_input(), d1);
685 DefineAsRegister(this);
686}
687void Float64Exponentiate::GenerateCode(MaglevAssembler* masm,
688 const ProcessingState& state) {
689 DoubleRegister left = ToDoubleRegister(left_input());
690 DoubleRegister right = ToDoubleRegister(right_input());
692 FrameScope scope(masm, StackFrame::MANUAL);
693 __ PrepareCallCFunction(0, 2);
694 __ MovToFloatParameters(left, right);
695 __ CallCFunction(ExternalReference::ieee754_pow_function(), 0, 2);
696 __ MovFromFloatResult(out);
703}
705 const ProcessingState& state) {
708 FrameScope scope(masm, StackFrame::MANUAL);
709 __ PrepareCallCFunction(0, 1);
710 __ MovToFloatParameter(value);
711 __ CallCFunction(ieee_function_ref(), 0, 1);
712 __ MovFromFloatResult(out);
718}
720 const ProcessingState& state) {
722 Register result_register = ToRegister(result());
723 if (v8_flags.debug_code) {
724 __ AssertObjectType(object, JS_TYPED_ARRAY_TYPE,
725 AbortReason::kUnexpectedValue);
726 }
727 __ LoadBoundedSizeFromObject(result_register, object,
728 JSTypedArray::kRawByteLengthOffset);
729 int shift_size = ElementsKindToShiftSize(elements_kind_);
730 if (shift_size > 0) {
731 // TODO(leszeks): Merge this shift with the one in LoadBoundedSize.
732 DCHECK(shift_size == 1 || shift_size == 2 || shift_size == 3);
733 __ lsr(result_register, result_register, Operand(shift_size));
734 }
741 set_temporaries_needed(1);
742}
744 const ProcessingState& state) {
748 if (v8_flags.debug_code) {
749 __ AssertObjectType(object, JS_DATA_VIEW_TYPE,
750 AbortReason::kUnexpectedValue);
751 }
752
753 // Normal DataView (backed by AB / SAB) or non-length tracking backed by GSAB.
754 Register byte_length = temps.Acquire();
755 __ LoadBoundedSizeFromObject(byte_length, object,
756 JSDataView::kRawByteLengthOffset);
757
759 if (element_size > 1) {
760 __ sub(byte_length, byte_length, Operand(element_size - 1), SetCC);
761 __ EmitEagerDeoptIf(mi, DeoptimizeReason::kOutOfBounds, this);
762 }
763 __ cmp(index, byte_length);
764 __ EmitEagerDeoptIf(hs, DeoptimizeReason::kOutOfBounds, this);
770}
772 const ProcessingState& state) {
773 // The hole value is a signalling NaN, so just silence it to get the float64
774 // value.
775 __ VFPCanonicalizeNaN(ToDoubleRegister(result()), ToDoubleRegister(input()));
776}
777
778namespace {
779
780enum class ReduceInterruptBudgetType { kLoop, kReturn };
781
782void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
783 Node* node, ReduceInterruptBudgetType type,
784 Register scratch0) {
785 // For loops, first check for interrupts. Don't do this for returns, as we
786 // can't lazy deopt to the end of a return.
787 if (type == ReduceInterruptBudgetType::kLoop) {
788 Label next;
789 // Here, we only care about interrupts since we've already guarded against
790 // real stack overflows on function entry.
791 {
792 Register stack_limit = scratch0;
793 __ LoadStackLimit(stack_limit, StackLimitKind::kInterruptStackLimit);
794 __ cmp(sp, stack_limit);
795 __ b(hi, &next);
796 }
797
798 // An interrupt has been requested and we must call into runtime to handle
799 // it; since we already pay the call cost, combine with the TieringManager
800 // call.
801 {
802 SaveRegisterStateForCall save_register_state(masm,
803 node->register_snapshot());
804 Register function = scratch0;
806 __ Push(function);
807 // Move into kContextRegister after the load into scratch0, just in case
808 // scratch0 happens to be kContextRegister.
809 __ Move(kContextRegister, masm->native_context().object());
810 __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, 1);
811 save_register_state.DefineSafepointWithLazyDeopt(node->lazy_deopt_info());
812 }
813 __ b(*done); // All done, continue.
814 __ bind(&next);
815 }
816
817 // No pending interrupts. Call into the TieringManager if needed.
818 {
819 SaveRegisterStateForCall save_register_state(masm,
820 node->register_snapshot());
821 Register function = scratch0;
823 __ Push(function);
824 // Move into kContextRegister after the load into scratch0, just in case
825 // scratch0 happens to be kContextRegister.
826 __ Move(kContextRegister, masm->native_context().object());
827 // Note: must not cause a lazy deopt!
828 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Maglev, 1);
829 save_register_state.DefineSafepoint();
830 }
831 __ b(*done);
832}
833
834void GenerateReduceInterruptBudget(MaglevAssembler* masm, Node* node,
835 Register feedback_cell,
836 ReduceInterruptBudgetType type, int amount) {
837 MaglevAssembler::TemporaryRegisterScope temps(masm);
838 Register budget = temps.Acquire();
839 __ ldr(budget,
840 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
841 __ sub(budget, budget, Operand(amount), SetCC);
842 __ str(budget,
843 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
844 ZoneLabelRef done(masm);
845 __ JumpToDeferredIf(lt, HandleInterruptsAndTiering, done, node, type, budget);
846 __ bind(*done);
847}
848
849} // namespace
854 set_temporaries_needed(1);
855}
857 const ProcessingState& state) {
858 GenerateReduceInterruptBudget(masm, this, ToRegister(feedback_cell()),
859 ReduceInterruptBudgetType::kLoop, amount());
865 set_temporaries_needed(1);
866}
868 MaglevAssembler* masm, const ProcessingState& state) {
869 GenerateReduceInterruptBudget(masm, this, ToRegister(feedback_cell()),
870 ReduceInterruptBudgetType::kReturn, amount());
871}
872
873// ---
874// Control nodes
875// ---
878}
879void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
881
882 // Read the formal number of parameters from the top level compilation unit
883 // (i.e. the outermost, non inlined function).
884 int formal_params_size =
886
887 // We're not going to continue execution, so we can use an arbitrary register
888 // here instead of relying on temporaries from the register allocator.
889 Register actual_params_size = r4;
890 Register params_size = r8;
891
892 // Compute the size of the actual parameters + receiver (in bytes).
893 // TODO(leszeks): Consider making this an input into Return to reuse the
894 // incoming argc's register (if it's still valid).
895 __ ldr(actual_params_size,
897
898 // Leave the frame.
899 __ LeaveFrame(StackFrame::MAGLEV);
900
901 // If actual is bigger than formal, then we should use it to free up the stack
902 // arguments.
903 Label corrected_args_count;
904 __ Move(params_size, formal_params_size);
905 __ cmp(params_size, actual_params_size);
906 __ b(kGreaterThanEqual, &corrected_args_count);
907 __ Move(params_size, actual_params_size);
908 __ bind(&corrected_args_count);
909
910 // Drop receiver + arguments according to dynamic arguments size.
911 __ DropArguments(params_size);
912 __ Ret();
913}
914
915} // namespace maglev
916} // namespace internal
917} // namespace v8
static bool IsSupported(CpuFeature f)
static const int32_t kMaxOneByteCharCode
Definition string.h:500
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ExternalReference ieee_function_ref() const
Definition maglev-ir.cc:502
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
MaglevCompilationInfo * compilation_info() const
MaglevCompilationUnit * toplevel_compilation_unit() const
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int end
Node * node
ZoneVector< RpoNumber > & result
uint32_t const mask
#define DEF_SHIFT_BINOP(Instruction, opcode)
#define DEF_BITWISE_BINOP(Instruction, opcode)
#define DCHECK_REGLIST_EMPTY(...)
int ExternalArrayElementSize(const ExternalArrayType element_type)
Definition globals.h:156
void DefineAsRegister(Node *node)
DoubleRegister ToDoubleRegister(const compiler::InstructionOperand &operand)
void DefineSameAsFirst(Node *node)
Register ToRegister(const compiler::InstructionOperand &operand)
void UseAndClobberRegister(Input &input)
void UseAny(Input &input)
void UseRegister(Input &input)
void UseFixed(Input &input, Register reg)
constexpr int kMinInt
Definition globals.h:375
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
DwVfpRegister DoubleRegister
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr ShiftOp ASR
MemOperand FieldMemOperand(Register object, int offset)
constexpr int ElementsKindToShiftSize(ElementsKind elements_kind)
constexpr Register kReturnRegister0
constexpr Register kContextRegister
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr SBit SetCC
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define OFFSET_OF_DATA_START(Type)