v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-ir-riscv.cc
Go to the documentation of this file.
1// Copyright 2024 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/base/logging.h"
17
18namespace v8 {
19namespace internal {
20namespace maglev {
21
22#define __ masm->
23
24void Int32NegateWithOverflow::SetValueLocationConstraints() {
25 UseRegister(value_input());
26 DefineAsRegister(this);
27}
28
29void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm,
30 const ProcessingState& state) {
31 Register value = ToRegister(value_input());
32 Register out = ToRegister(result());
33
34 static_assert(Int32NegateWithOverflow::kProperties.can_eager_deopt());
35 // Deopt when result would be -0.
36 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kOverflow);
37 __ RecordComment("-- Jump to eager deopt");
38 __ MacroAssembler::Branch(fail, equal, value, Operand(zero_reg));
39
40 MaglevAssembler::TemporaryRegisterScope temps(masm);
41 Register scratch = temps.AcquireScratch();
42 __ neg(scratch, value);
43 __ negw(out, value);
44
45 // Are the results of NEG and NEGW on the operand different?
46 __ RecordComment("-- Jump to eager deopt");
47 __ MacroAssembler::Branch(fail, not_equal, scratch, Operand(out));
48
49 // Output register must not be a register input into the eager deopt info.
51 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
52}
53
54void Int32AbsWithOverflow::GenerateCode(MaglevAssembler* masm,
55 const ProcessingState& state) {
56 Register out = ToRegister(result());
57
59 Label done;
60 DCHECK(ToRegister(input()) == out);
61 // fast-path
62 __ MacroAssembler::Branch(&done, greater_equal, out, Operand(zero_reg),
64
65 MaglevAssembler::TemporaryRegisterScope temps(masm);
66 Register scratch = temps.AcquireScratch();
67 __ neg(scratch, out);
68 __ negw(out, out);
69
70 // Are the results of NEG and NEGW on the operand different?
71 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kOverflow);
72 __ RecordComment("-- Jump to eager deopt");
73 __ MacroAssembler::Branch(fail, not_equal, scratch, Operand(out));
74
75 __ bind(&done);
76
77 // Output register must not be a register input into the eager deopt info.
79 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
80}
81
82void Int32IncrementWithOverflow::SetValueLocationConstraints() {
83 UseRegister(value_input());
84 DefineAsRegister(this);
85}
86
87void Int32IncrementWithOverflow::GenerateCode(MaglevAssembler* masm,
88 const ProcessingState& state) {
89 Register value = ToRegister(value_input());
90 Register out = ToRegister(result());
91
92 MaglevAssembler::TemporaryRegisterScope temps(masm);
93 Register scratch = temps.AcquireScratch();
94 __ Add32(scratch, value, Operand(1));
95
96 static_assert(Int32IncrementWithOverflow::kProperties.can_eager_deopt());
97 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kOverflow);
98 __ RecordComment("-- Jump to eager deopt");
99 __ MacroAssembler::Branch(fail, less, scratch, Operand(value));
100 __ Mv(out, scratch);
101
102 // Output register must not be a register input into the eager deopt info.
104 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
105}
106
107void Int32DecrementWithOverflow::SetValueLocationConstraints() {
108 UseRegister(value_input());
109 DefineAsRegister(this);
110}
111
112void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm,
113 const ProcessingState& state) {
114 Register value = ToRegister(value_input());
115 Register out = ToRegister(result());
116
117 MaglevAssembler::TemporaryRegisterScope temps(masm);
118 Register scratch = temps.AcquireScratch();
119 __ Sub32(scratch, value, Operand(1));
120
121 static_assert(Int32DecrementWithOverflow::kProperties.can_eager_deopt());
122 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kOverflow);
123 __ RecordComment("-- Jump to eager deopt");
124 __ MacroAssembler::Branch(fail, greater, scratch, Operand(value));
125 __ Mv(out, scratch);
126
127 // Output register must not be a register input into the eager deopt info.
129 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
130}
131
134}
136 if (code_input().node()->Is<Int32Constant>()) {
138 } else {
140 }
141 set_temporaries_needed(2);
142 DefineAsRegister(this);
143}
144void BuiltinStringFromCharCode::GenerateCode(MaglevAssembler* masm,
145 const ProcessingState& state) {
146 MaglevAssembler::TemporaryRegisterScope temps(masm);
147 Register scratch = temps.Acquire();
148 Register result_string = ToRegister(result());
149 if (Int32Constant* constant = code_input().node()->TryCast<Int32Constant>()) {
150 int32_t char_code = constant->value() & 0xFFFF;
151 if (0 <= char_code && char_code < String::kMaxOneByteCharCode) {
152 __ LoadSingleCharacterString(result_string, char_code);
153 } else {
154 __ AllocateTwoByteString(register_snapshot(), result_string, 1);
155 __ Move(scratch, char_code);
156 __ Sh(scratch, FieldMemOperand(result_string,
157 OFFSET_OF_DATA_START(SeqTwoByteString)));
158 }
159 } else {
160 __ StringFromCharCode(register_snapshot(), nullptr, result_string,
161 ToRegister(code_input()), scratch,
163 }
164}
165
168 if (offset() == 0) {
169 DefineSameAsFirst(this);
170 } else {
171 DefineAsRegister(this);
172 }
173}
174
175void InlinedAllocation::GenerateCode(MaglevAssembler* masm,
176 const ProcessingState& state) {
177 Register out = ToRegister(result());
179 if (offset() != 0) {
180 __ AddWord(out, value, Operand(offset()));
181 }
182}
183
185
186void ArgumentsLength::GenerateCode(MaglevAssembler* masm,
187 const ProcessingState& state) {
188 Register out = ToRegister(result());
189
191 __ Sub64(out, out, Operand(1)); // Remove receiver.
192}
193
195
196void RestLength::GenerateCode(MaglevAssembler* masm,
197 const ProcessingState& state) {
198 Register length = ToRegister(result());
199 Label done;
201 __ Sub64(length, length, Operand(formal_parameter_count() + 1));
202 __ MacroAssembler::Branch(&done, greater_equal, length, Operand(zero_reg),
204 __ Mv(length, zero_reg);
205 __ bind(&done);
206 __ UncheckedSmiTagInt32(length);
207}
208
209int CheckedObjectToIndex::MaxCallStackArgs() const { return 0; }
210
213 DefineSameAsFirst(this);
214}
215
216void CheckedIntPtrToInt32::GenerateCode(MaglevAssembler* masm,
217 const ProcessingState& state) {
218 Register input_reg = ToRegister(input());
219 __ MacroAssembler::Branch(__ GetDeoptLabel(this, DeoptimizeReason::kNotInt32),
220 gt, input_reg,
221 Operand(std::numeric_limits<int32_t>::max()));
222 __ MacroAssembler::Branch(__ GetDeoptLabel(this, DeoptimizeReason::kNotInt32),
223 lt, input_reg,
224 Operand(std::numeric_limits<int32_t>::min()));
225}
226
227void Int32AddWithOverflow::SetValueLocationConstraints() {
228 UseRegister(left_input());
229 UseRegister(right_input());
230 DefineAsRegister(this);
231}
232
233void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm,
234 const ProcessingState& state) {
235 Register left = ToRegister(left_input());
236 Register right = ToRegister(right_input());
237 Register out = ToRegister(result());
238
239 static_assert(Int32AddWithOverflow::kProperties.can_eager_deopt());
240 MaglevAssembler::TemporaryRegisterScope temps(masm);
241 Register scratch = temps.AcquireScratch();
242 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kOverflow);
243 __ Add64(scratch, left, right);
244 __ Add32(out, left, right);
245 __ RecordComment("-- Jump to eager deopt");
246 __ MacroAssembler::Branch(fail, not_equal, scratch, Operand(out));
247
248 // The output register shouldn't be a register input into the eager deopt
249 // info.
251 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
252}
253
254void Int32SubtractWithOverflow::SetValueLocationConstraints() {
255 UseRegister(left_input());
256 UseRegister(right_input());
257 DefineAsRegister(this);
258}
259void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm,
260 const ProcessingState& state) {
261 Register left = ToRegister(left_input());
262 Register right = ToRegister(right_input());
263 Register out = ToRegister(result());
264
265 static_assert(Int32SubtractWithOverflow::kProperties.can_eager_deopt());
266 MaglevAssembler::TemporaryRegisterScope temps(masm);
267 Register scratch = temps.AcquireScratch();
268 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kOverflow);
269 __ Sub64(scratch, left, right);
270 __ Sub32(out, left, right);
271 __ RecordComment("-- Jump to eager deopt");
272 __ MacroAssembler::Branch(fail, ne, scratch, Operand(out));
273
274 // The output register shouldn't be a register input into the eager deopt
275 // info.
277 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
278}
279
280void Int32MultiplyWithOverflow::SetValueLocationConstraints() {
281 UseRegister(left_input());
282 UseRegister(right_input());
283 DefineAsRegister(this);
284 set_temporaries_needed(2);
285}
286void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm,
287 const ProcessingState& state) {
288 Register left = ToRegister(left_input());
289 Register right = ToRegister(right_input());
290 Register out = ToRegister(result());
291
292 // TODO(leszeks): peephole optimise multiplication by a constant.
293
294 MaglevAssembler::TemporaryRegisterScope temps(masm);
295 bool out_alias_input = out == left || out == right;
296 Register res = out;
297 if (out_alias_input) {
298 res = temps.Acquire();
299 }
300
301 Register scratch = temps.Acquire();
302 __ MulOverflow32(res, left, Operand(right), scratch, false);
303
304 static_assert(Int32MultiplyWithOverflow::kProperties.can_eager_deopt());
305 // if res != (res[0:31] sign extended to 64 bits), then the multiplication
306 // result is too large for 32 bits.
307 Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kOverflow);
308 __ RecordComment("-- Jump to eager deopt");
309 __ MacroAssembler::Branch(fail, ne, scratch, Operand(zero_reg));
310
311 // If the result is zero, check if either lhs or rhs is negative.
312 Label end;
313 __ MacroAssembler::Branch(&end, ne, res, Operand(zero_reg),
315 {
316 Register maybeNegative = scratch;
317 __ Or(maybeNegative, left, Operand(right));
318 // TODO(Vladimir Kempik): consider usage of bexti instruction if Zbs
319 // extension is available
320 __ And(maybeNegative, maybeNegative, Operand(0x80000000)); // 1 << 31
321 // If one of them is negative, we must have a -0 result, which is non-int32,
322 // so deopt.
323 // TODO(leszeks): Consider splitting these deopts to have distinct deopt
324 // reasons. Otherwise, the reason has to match the above.
325 __ RecordComment("-- Jump to eager deopt if the result is negative zero");
326 Label* deopt_label = __ GetDeoptLabel(this, DeoptimizeReason::kOverflow);
327 __ MacroAssembler::Branch(deopt_label, ne, maybeNegative,
328 Operand(zero_reg));
329 }
330
331 __ bind(&end);
332 if (out_alias_input) {
333 __ Move(out, res);
334 }
335
336 // The output register shouldn't be a register input into the eager deopt
337 // info.
339 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
340}
341
342void Int32DivideWithOverflow::SetValueLocationConstraints() {
343 UseRegister(left_input());
344 UseRegister(right_input());
345 DefineAsRegister(this);
346 set_temporaries_needed(2);
347}
348void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
349 const ProcessingState& state) {
350 Register left = ToRegister(left_input());
351 Register right = ToRegister(right_input());
352 Register out = ToRegister(result());
353
354 // TODO(leszeks): peephole optimise division by a constant.
355
356 static_assert(Int32DivideWithOverflow::kProperties.can_eager_deopt());
357 ZoneLabelRef done(masm);
358 Label* deferred_overflow_checks = __ MakeDeferredCode(
359 [](MaglevAssembler* masm, ZoneLabelRef done, Register left,
360 Register right, Int32DivideWithOverflow* node) {
361 // {right} is negative or zero.
362
363 // TODO(leszeks): Using kNotInt32 here, but in same places
364 // kDivisionByZerokMinusZero/kMinusZero/kOverflow would be better. Right
365 // now all eager deopts in a node have to be the same -- we should allow
366 // a node to emit multiple eager deopts with different reasons.
367 Label* deopt = __ GetDeoptLabel(node, DeoptimizeReason::kNotInt32);
368
369 // Check if {right} is zero.
370 __ RecordComment("-- Jump to eager deopt if right is zero");
371 __ MacroAssembler::Branch(deopt, eq, right, Operand(zero_reg));
372
373 // Check if {left} is zero, as that would produce minus zero.
374 __ RecordComment("-- Jump to eager deopt if left is zero");
375 __ MacroAssembler::Branch(deopt, eq, left, Operand(zero_reg));
376
377 // Check if {left} is kMinInt and {right} is -1, in which case we'd have
378 // to return -kMinInt, which is not representable as Int32.
379 __ MacroAssembler::Branch(*done, ne, left, Operand(kMinInt));
380 __ MacroAssembler::Branch(*done, ne, right, Operand(-1));
381 __ JumpToDeopt(deopt);
382 },
383 done, left, right, this);
384
385 // Check if {right} is positive and not zero.
386 __ MacroAssembler::Branch(deferred_overflow_checks, less_equal, right,
387 Operand(zero_reg));
388 __ bind(*done);
389
390 // Perform the actual integer division.
391 MaglevAssembler::TemporaryRegisterScope temps(masm);
392 bool out_alias_input = out == left || out == right;
393 Register res = out;
394 if (out_alias_input) {
395 res = temps.Acquire();
396 }
397 __ Div32(res, left, right);
398
399 // Check that the remainder is zero.
400 Register temp = temps.Acquire();
401 __ remw(temp, left, right);
402 Label* deopt = __ GetDeoptLabel(this, DeoptimizeReason::kNotInt32);
403 __ RecordComment("-- Jump to eager deopt if remainder is zero");
404 __ MacroAssembler::Branch(deopt, ne, temp, Operand(zero_reg));
405
406 // The output register shouldn't be a register input into the eager deopt
407 // info.
409 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
410 __ Move(out, res);
411}
412
413void Int32ModulusWithOverflow::SetValueLocationConstraints() {
414 UseAndClobberRegister(left_input());
415 UseAndClobberRegister(right_input());
416 DefineAsRegister(this);
417 set_temporaries_needed(1);
418}
419void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm,
420 const ProcessingState& state) {
421 // If AreAliased(lhs, rhs):
422 // deopt if lhs < 0 // Minus zero.
423 // 0
424 //
425 // Using same algorithm as in EffectControlLinearizer:
426 // if rhs <= 0 then
427 // rhs = -rhs
428 // deopt if rhs == 0
429 // if lhs < 0 then
430 // let lhs_abs = -lhs in
431 // let res = lhs_abs % rhs in
432 // deopt if res == 0
433 // -res
434 // else
435 // let msk = rhs - 1 in
436 // if rhs & msk == 0 then
437 // lhs & msk
438 // else
439 // lhs % rhs
440
441 Register lhs = ToRegister(left_input());
442 Register rhs = ToRegister(right_input());
443 Register out = ToRegister(result());
444
445 static_assert(Int32ModulusWithOverflow::kProperties.can_eager_deopt());
446 static constexpr DeoptimizeReason deopt_reason =
447 DeoptimizeReason::kDivisionByZero;
448
449 // For the modulus algorithm described above, lhs and rhs must not alias
450 // each other.
451 if (lhs == rhs) {
452 // TODO(victorgomes): This ideally should be kMinusZero, but Maglev only
453 // allows one deopt reason per IR.
454 Label* deopt = __ GetDeoptLabel(this, DeoptimizeReason::kDivisionByZero);
455 __ RecordComment("-- Jump to eager deopt");
456 __ MacroAssembler::Branch(deopt, less, lhs, Operand(zero_reg));
457 __ Move(out, zero_reg);
458 return;
459 }
460
461 DCHECK(!AreAliased(lhs, rhs));
462
463 ZoneLabelRef done(masm);
464 ZoneLabelRef rhs_checked(masm);
465
466 Label* deferred_rhs_check = __ MakeDeferredCode(
467 [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs,
468 Int32ModulusWithOverflow* node) {
469 __ negw(rhs, rhs);
470 __ MacroAssembler::Branch(*rhs_checked, ne, rhs, Operand(zero_reg));
471 __ EmitEagerDeopt(node, deopt_reason);
472 },
473 rhs_checked, rhs, this);
474 __ MacroAssembler::Branch(deferred_rhs_check, less_equal, rhs,
475 Operand(zero_reg));
476 __ bind(*rhs_checked);
477
478 Label* deferred_lhs_check = __ MakeDeferredCode(
479 [](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs,
480 Register out, Int32ModulusWithOverflow* node) {
481 MaglevAssembler::TemporaryRegisterScope temps(masm);
482 Register lhs_abs = temps.AcquireScratch();
483 __ negw(lhs_abs, lhs);
484 Register res = lhs_abs;
485 __ remw(res, lhs_abs, rhs);
486 __ negw(out, res);
487 __ MacroAssembler::Branch(*done, ne, res, Operand(zero_reg));
488 // TODO(victorgomes): This ideally should be kMinusZero, but Maglev
489 // only allows one deopt reason per IR.
490 __ EmitEagerDeopt(node, deopt_reason);
491 },
492 done, lhs, rhs, out, this);
493 __ MacroAssembler::Branch(deferred_lhs_check, less, lhs, Operand(zero_reg));
494
495 Label rhs_not_power_of_2;
496 MaglevAssembler::TemporaryRegisterScope temps(masm);
497 Register scratch = temps.AcquireScratch();
498 Register msk = temps.AcquireScratch();
499 __ Sub32(msk, rhs, Operand(1));
500 __ And(scratch, rhs, msk);
501 __ MacroAssembler::Branch(&rhs_not_power_of_2, not_equal, scratch,
502 Operand(zero_reg), Label::kNear);
503 // {rhs} is power of 2.
504 __ And(out, lhs, msk);
506
507 __ bind(&rhs_not_power_of_2);
508 __ remw(out, lhs, rhs);
509
510 __ bind(*done);
511}
512
513#define DEF_BITWISE_BINOP(Instruction, opcode) \
514 void Instruction::SetValueLocationConstraints() { \
515 UseRegister(left_input()); \
516 UseRegister(right_input()); \
517 DefineAsRegister(this); \
518 } \
519 \
520 void Instruction::GenerateCode(MaglevAssembler* masm, \
521 const ProcessingState& state) { \
522 Register lhs = ToRegister(left_input()); \
523 Register rhs = ToRegister(right_input()); \
524 Register out = ToRegister(result()); \
525 __ opcode(out, lhs, Operand(rhs)); \
526 /* TODO: is zero extension really needed here? */ \
527 __ ZeroExtendWord(out, out); \
528 }
529DEF_BITWISE_BINOP(Int32BitwiseAnd, And)
530DEF_BITWISE_BINOP(Int32BitwiseOr, Or)
531DEF_BITWISE_BINOP(Int32BitwiseXor, Xor)
532#undef DEF_BITWISE_BINOP
533
534#define DEF_SHIFT_BINOP(Instruction, opcode) \
535 void Instruction::SetValueLocationConstraints() { \
536 UseRegister(left_input()); \
537 if (right_input().node()->Is<Int32Constant>()) { \
538 UseAny(right_input()); \
539 } else { \
540 UseRegister(right_input()); \
541 } \
542 DefineAsRegister(this); \
543 } \
544 \
545 void Instruction::GenerateCode(MaglevAssembler* masm, \
546 const ProcessingState& state) { \
547 Register out = ToRegister(result()); \
548 Register lhs = ToRegister(left_input()); \
549 if (Int32Constant* constant = \
550 right_input().node()->TryCast<Int32Constant>()) { \
551 uint32_t shift = constant->value() & 31; \
552 if (shift == 0) { \
553 __ ZeroExtendWord(out, lhs); \
554 return; \
555 } \
556 __ opcode(out, lhs, Operand(shift)); \
557 } else { \
558 Register rhs = ToRegister(right_input()); \
559 __ opcode(out, lhs, Operand(rhs)); \
560 } \
561 }
562DEF_SHIFT_BINOP(Int32ShiftLeft, Sll32)
563DEF_SHIFT_BINOP(Int32ShiftRight, Sra32)
564DEF_SHIFT_BINOP(Int32ShiftRightLogical, Srl32)
565#undef DEF_SHIFT_BINOP
566
569 DefineAsRegister(this);
570}
571
572void Int32BitwiseNot::GenerateCode(MaglevAssembler* masm,
573 const ProcessingState& state) {
575 Register out = ToRegister(result());
576 __ not_(out, value);
577 __ ZeroExtendWord(out, out); // TODO(Yuri Gaevsky): is it really needed?
578}
579
580void Float64Add::SetValueLocationConstraints() {
581 UseRegister(left_input());
582 UseRegister(right_input());
583 DefineAsRegister(this);
584}
585
586void Float64Add::GenerateCode(MaglevAssembler* masm,
587 const ProcessingState& state) {
588 DoubleRegister left = ToDoubleRegister(left_input());
589 DoubleRegister right = ToDoubleRegister(right_input());
591 __ fadd_d(out, left, right);
592}
593
594void Float64Subtract::SetValueLocationConstraints() {
595 UseRegister(left_input());
596 UseRegister(right_input());
597 DefineAsRegister(this);
598}
599
600void Float64Subtract::GenerateCode(MaglevAssembler* masm,
601 const ProcessingState& state) {
602 DoubleRegister left = ToDoubleRegister(left_input());
603 DoubleRegister right = ToDoubleRegister(right_input());
605 __ fsub_d(out, left, right);
606}
607
608void Float64Multiply::SetValueLocationConstraints() {
609 UseRegister(left_input());
610 UseRegister(right_input());
611 DefineAsRegister(this);
612}
613
614void Float64Multiply::GenerateCode(MaglevAssembler* masm,
615 const ProcessingState& state) {
616 DoubleRegister left = ToDoubleRegister(left_input());
617 DoubleRegister right = ToDoubleRegister(right_input());
619 __ fmul_d(out, left, right);
620}
621
622void Float64Divide::SetValueLocationConstraints() {
623 UseRegister(left_input());
624 UseRegister(right_input());
625 DefineAsRegister(this);
626}
627
628void Float64Divide::GenerateCode(MaglevAssembler* masm,
629 const ProcessingState& state) {
630 DoubleRegister left = ToDoubleRegister(left_input());
631 DoubleRegister right = ToDoubleRegister(right_input());
633 __ fdiv_d(out, left, right);
634}
635
636int Float64Modulus::MaxCallStackArgs() const { return 0; }
637void Float64Modulus::SetValueLocationConstraints() {
638 UseFixed(left_input(), fa0);
639 UseFixed(right_input(), fa1);
640 DefineSameAsFirst(this);
641}
642void Float64Modulus::GenerateCode(MaglevAssembler* masm,
643 const ProcessingState& state) {
644 AllowExternalCallThatCantCauseGC scope(masm);
645 __ PrepareCallCFunction(0, 2);
646 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
647}
648
651 DefineAsRegister(this);
652}
653void Float64Negate::GenerateCode(MaglevAssembler* masm,
654 const ProcessingState& state) {
657 __ fneg_d(out, value);
658}
659
660void Float64Abs::GenerateCode(MaglevAssembler* masm,
661 const ProcessingState& state) {
664 __ fabs_d(out, in);
665}
666
667void Float64Round::GenerateCode(MaglevAssembler* masm,
668 const ProcessingState& state) {
671 MaglevAssembler::TemporaryRegisterScope temps(masm);
672 DoubleRegister fscratch1 = temps.AcquireScratchDouble();
673
674 if (kind_ == Kind::kNearest) {
675 // RISC-V Rounding Mode RNE means "Round to Nearest, ties to Even", while JS
676 // expects it to round towards +Infinity (see ECMA-262, 20.2.2.28).
677 // The best seems to be to add 0.5 then round with RDN mode.
678
679 DoubleRegister half_one = temps.AcquireDouble(); // available in this mode
680 __ LoadFPRImmediate(half_one, 0.5);
681 DoubleRegister tmp = half_one;
682 __ fadd_d(tmp, in, half_one);
683 __ Floor_d_d(out, tmp, fscratch1);
684 __ fsgnj_d(out, out, in);
685 } else if (kind_ == Kind::kCeil) {
686 __ Ceil_d_d(out, in, fscratch1);
687 } else if (kind_ == Kind::kFloor) {
688 __ Floor_d_d(out, in, fscratch1);
689 } else {
690 UNREACHABLE();
691 }
692}
693
694int Float64Exponentiate::MaxCallStackArgs() const { return 0; }
695void Float64Exponentiate::SetValueLocationConstraints() {
696 UseFixed(left_input(), fa0);
697 UseFixed(right_input(), fa1);
698 DefineSameAsFirst(this);
699}
700void Float64Exponentiate::GenerateCode(MaglevAssembler* masm,
701 const ProcessingState& state) {
702 AllowExternalCallThatCantCauseGC scope(masm);
703 __ PrepareCallCFunction(0, 2);
704 __ CallCFunction(ExternalReference::ieee754_pow_function(), 2);
705}
706
707int Float64Ieee754Unary::MaxCallStackArgs() const { return 0; }
709 UseFixed(input(), fa0);
710 DefineSameAsFirst(this);
711}
712void Float64Ieee754Unary::GenerateCode(MaglevAssembler* masm,
713 const ProcessingState& state) {
714 AllowExternalCallThatCantCauseGC scope(masm);
715 __ PrepareCallCFunction(0, 1);
716 __ CallCFunction(ieee_function_ref(), 1);
717}
718
721 DefineAsRegister(this);
722}
723void LoadTypedArrayLength::GenerateCode(MaglevAssembler* masm,
724 const ProcessingState& state) {
726 Register result_register = ToRegister(result());
727 if (v8_flags.debug_code) {
728 __ AssertObjectType(object, JS_TYPED_ARRAY_TYPE,
729 AbortReason::kUnexpectedValue);
730 }
731 __ LoadBoundedSizeFromObject(result_register, object,
732 JSTypedArray::kRawByteLengthOffset);
733 int shift_size = ElementsKindToShiftSize(elements_kind_);
734 if (shift_size > 0) {
735 // TODO(leszeks): Merge this shift with the one in LoadBoundedSize.
736 DCHECK(shift_size == 1 || shift_size == 2 || shift_size == 3);
737 __ SrlWord(result_register, result_register, Operand(shift_size));
738 }
739}
740
741int CheckJSDataViewBounds::MaxCallStackArgs() const { return 1; }
745 set_temporaries_needed(1);
746}
747void CheckJSDataViewBounds::GenerateCode(MaglevAssembler* masm,
748 const ProcessingState& state) {
749 MaglevAssembler::TemporaryRegisterScope temps(masm);
752 if (v8_flags.debug_code) {
753 __ AssertObjectType(object, JS_DATA_VIEW_TYPE,
754 AbortReason::kUnexpectedValue);
755 }
756
757 // Normal DataView (backed by AB / SAB) or non-length tracking backed by GSAB.
758 Register byte_length = temps.Acquire();
759 __ LoadBoundedSizeFromObject(byte_length, object,
760 JSDataView::kRawByteLengthOffset);
761
763 Label ok;
764 if (element_size > 1) {
765 __ SubWord(byte_length, byte_length, Operand(element_size - 1));
766 __ MacroAssembler::Branch(&ok, ge, byte_length, Operand(zero_reg),
768 __ EmitEagerDeopt(this, DeoptimizeReason::kOutOfBounds);
769 }
770 __ MacroAssembler::Branch(&ok, ult, index, Operand(byte_length),
772 __ EmitEagerDeopt(this, DeoptimizeReason::kOutOfBounds);
773
774 __ bind(&ok);
775}
776
779 DefineAsRegister(this);
780}
781void HoleyFloat64ToMaybeNanFloat64::GenerateCode(MaglevAssembler* masm,
782 const ProcessingState& state) {
783 // The hole value is a signalling NaN, so just silence it to get the float64
784 // value.
785 __ FPUCanonicalizeNaN(ToDoubleRegister(result()), ToDoubleRegister(input()));
786}
787
788namespace {
789
790enum class ReduceInterruptBudgetType { kLoop, kReturn };
791
792void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
793 Node* node, ReduceInterruptBudgetType type,
794 Register scratch0) {
795 // For loops, first check for interrupts. Don't do this for returns, as we
796 // can't lazy deopt to the end of a return.
797 if (type == ReduceInterruptBudgetType::kLoop) {
798 Label next;
799 // Here, we only care about interrupts since we've already guarded against
800 // real stack overflows on function entry.
801 {
802 Register stack_limit = scratch0;
803 __ LoadStackLimit(stack_limit, StackLimitKind::kInterruptStackLimit);
804 __ MacroAssembler::Branch(&next, ugt, sp, Operand(stack_limit),
806 }
807
808 // An interrupt has been requested and we must call into runtime to handle
809 // it; since we already pay the call cost, combine with the TieringManager
810 // call.
811 {
812 SaveRegisterStateForCall save_register_state(masm,
813 node->register_snapshot());
814 Register function = scratch0;
815 __ LoadWord(function,
817 __ Push(function);
818 // Move into kContextRegister after the load into scratch0, just in case
819 // scratch0 happens to be kContextRegister.
820 __ Move(kContextRegister, masm->native_context().object());
821 __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, 1);
822 save_register_state.DefineSafepointWithLazyDeopt(node->lazy_deopt_info());
823 }
824 __ MacroAssembler::Branch(*done); // All done, continue.
825 __ bind(&next);
826 }
827
828 // No pending interrupts. Call into the TieringManager if needed.
829 {
830 SaveRegisterStateForCall save_register_state(masm,
831 node->register_snapshot());
832 Register function = scratch0;
833 __ LoadWord(function,
835 __ Push(function);
836 // Move into kContextRegister after the load into scratch0, just in case
837 // scratch0 happens to be kContextRegister.
838 __ Move(kContextRegister, masm->native_context().object());
839 // Note: must not cause a lazy deopt!
840 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Maglev, 1);
841 save_register_state.DefineSafepoint();
842 }
844}
845
846void GenerateReduceInterruptBudget(MaglevAssembler* masm, Node* node,
847 Register feedback_cell,
848 ReduceInterruptBudgetType type, int amount) {
849 MaglevAssembler::TemporaryRegisterScope temps(masm);
850 Register scratch = temps.Acquire();
851 Register budget = scratch;
852
853 __ Lw(budget,
854 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
855 __ Sub32(budget, budget, Operand(amount));
856 __ Sw(budget,
857 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
858
859 ZoneLabelRef done(masm);
860 Label* deferred_code = __ MakeDeferredCode(
861 [](MaglevAssembler* masm, ZoneLabelRef done, Node* node,
862 ReduceInterruptBudgetType type, Register scratch) {
863 HandleInterruptsAndTiering(masm, done, node, type, scratch);
864 },
865 done, node, type, scratch);
866 __ MacroAssembler::Branch(deferred_code, lt, budget, Operand(zero_reg));
867
868 __ bind(*done);
869}
870
871} // namespace
872
876 set_temporaries_needed(1);
877}
878void ReduceInterruptBudgetForLoop::GenerateCode(MaglevAssembler* masm,
879 const ProcessingState& state) {
880 GenerateReduceInterruptBudget(masm, this, ToRegister(feedback_cell()),
881 ReduceInterruptBudgetType::kLoop, amount());
882}
883
887 set_temporaries_needed(1);
888}
890 MaglevAssembler* masm, const ProcessingState& state) {
891 GenerateReduceInterruptBudget(masm, this, ToRegister(feedback_cell()),
892 ReduceInterruptBudgetType::kReturn, amount());
893}
894
895// ---
896// Control nodes
897// ---
900}
901
902void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
904 // Read the formal number of parameters from the top level compilation unit
905 // (i.e. the outermost, non inlined function).
906 int formal_params_size =
907 masm->compilation_info()->toplevel_compilation_unit()->parameter_count();
908
909 // We're not going to continue execution, so we can use an arbitrary register
910 // here instead of relying on temporaries from the register allocator.
911 // We cannot use scratch registers, since they're used in LeaveFrame and
912 // DropArguments.
913 Register actual_params_size = a5;
914
915 // Compute the size of the actual parameters + receiver (in bytes).
916 // TODO(leszeks): Consider making this an input into Return to reuse the
917 // incoming argc's register (if it's still valid).
918 __ LoadWord(actual_params_size,
920
921 // Leave the frame.
922 __ LeaveFrame(StackFrame::MAGLEV);
923
924 // If actual is bigger than formal, then we should use it to free up the stack
925 // arguments.
926 Label corrected_args_count;
927 __ MacroAssembler::Branch(&corrected_args_count, gt, actual_params_size,
928 Operand(formal_params_size),
930 __ Move(actual_params_size, formal_params_size);
931
932 __ bind(&corrected_args_count);
933 // Drop receiver + arguments according to dynamic arguments size.
934 __ DropArguments(actual_params_size);
935 __ Ret();
936}
937
938} // namespace maglev
939} // namespace internal
940} // namespace v8
void Branch(Label *label, bool need_link=false)
static const int32_t kMaxOneByteCharCode
Definition string.h:500
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ExternalReference ieee_function_ref() const
Definition maglev-ir.cc:502
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
static constexpr OpProperties kProperties
Definition maglev-ir.h:4203
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
constexpr bool can_eager_deopt() const
Definition maglev-ir.h:1018
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int end
Node * node
ZoneVector< RpoNumber > & result
#define DEF_SHIFT_BINOP(Instruction, opcode)
#define DEF_BITWISE_BINOP(Instruction, opcode)
#define DCHECK_REGLIST_EMPTY(...)
int int32_t
Definition unicode.cc:40
int ExternalArrayElementSize(const ExternalArrayType element_type)
Definition globals.h:156
void DefineAsRegister(Node *node)
DoubleRegister ToDoubleRegister(const compiler::InstructionOperand &operand)
void DefineSameAsFirst(Node *node)
Register ToRegister(const compiler::InstructionOperand &operand)
void UseAndClobberRegister(Input &input)
void UseAny(Input &input)
void UseRegister(Input &input)
void UseFixed(Input &input, Register reg)
void Or(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
constexpr int kMinInt
Definition globals.h:375
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
DwVfpRegister DoubleRegister
RegListBase< Register > RegList
Definition reglist-arm.h:14
MemOperand FieldMemOperand(Register object, int offset)
constexpr int ElementsKindToShiftSize(ElementsKind elements_kind)
constexpr Register kReturnRegister0
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
V8_EXPORT_PRIVATE FlagValues v8_flags
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define OFFSET_OF_DATA_START(Type)
wasm::ValueType type