v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-ir-s390.cc
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/base/logging.h"
14
15namespace v8 {
16namespace internal {
17namespace maglev {
18
19#define __ masm->
20
21void Int32NegateWithOverflow::SetValueLocationConstraints() {
22 UseRegister(value_input());
23 DefineAsRegister(this);
24}
25
26void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm,
27 const ProcessingState& state) {
28 Register value = ToRegister(value_input());
29 Register out = ToRegister(result());
30
31 // Deopt when result would be -0.
32 __ CmpS32(value, Operand(0));
33 __ EmitEagerDeoptIf(eq, DeoptimizeReason::kOverflow, this);
34
35 __ lcr(out, value);
36 __ LoadS32(out, out);
37
38 // Output register must not be a register input into the eager deopt info.
40 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
41 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
42}
43
44void Int32AbsWithOverflow::GenerateCode(MaglevAssembler* masm,
45 const ProcessingState& state) {
46 Register out = ToRegister(result());
47 __ lpr(out, out);
48 // Output register must not be a register input into the eager deopt info.
50 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
51 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
52 __ lgfr(out, out);
53}
54
55void Int32IncrementWithOverflow::SetValueLocationConstraints() {
56 UseRegister(value_input());
57 DefineAsRegister(this);
58}
59
60void Int32IncrementWithOverflow::GenerateCode(MaglevAssembler* masm,
61 const ProcessingState& state) {
62 Register value = ToRegister(value_input());
63 Register out = ToRegister(result());
64 __ AddS32(out, value, Operand(1));
65 __ LoadS32(out, out);
66
67 // Output register must not be a register input into the eager deopt info.
69 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
70 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
71}
72
73void Int32DecrementWithOverflow::SetValueLocationConstraints() {
74 UseRegister(value_input());
75 DefineAsRegister(this);
76}
77
78void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm,
79 const ProcessingState& state) {
80 Register value = ToRegister(value_input());
81 Register out = ToRegister(result());
82 __ AddS32(out, value, Operand(-1));
83 __ LoadS32(out, out);
84
85 // Output register must not be a register input into the eager deopt info.
87 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
88 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
89}
90
93}
97 } else {
99 }
100 set_temporaries_needed(1);
101 DefineAsRegister(this);
102}
103void BuiltinStringFromCharCode::GenerateCode(MaglevAssembler* masm,
104 const ProcessingState& state) {
105 MaglevAssembler::TemporaryRegisterScope temps(masm);
106 Register scratch = temps.AcquireScratch();
107 Register result_string = ToRegister(result());
108 if (Int32Constant* constant = code_input().node()->TryCast<Int32Constant>()) {
109 int32_t char_code = constant->value() & 0xFFFF;
110 if (0 <= char_code && char_code < String::kMaxOneByteCharCode) {
111 __ LoadSingleCharacterString(result_string, char_code);
112 } else {
113 // Ensure that {result_string} never aliases {scratch}, otherwise the
114 // store will fail.
115 bool reallocate_result = (scratch == result_string);
116 if (reallocate_result) {
117 result_string = temps.AcquireScratch();
118 }
119 DCHECK(scratch != result_string);
120 __ AllocateTwoByteString(register_snapshot(), result_string, 1);
121 __ Move(scratch, char_code);
122 __ StoreU16(scratch,
123 FieldMemOperand(result_string,
124 OFFSET_OF_DATA_START(SeqTwoByteString)));
125 if (reallocate_result) {
126 __ Move(ToRegister(result()), result_string);
127 }
128 }
129 } else {
130 __ StringFromCharCode(register_snapshot(), nullptr, result_string,
131 ToRegister(code_input()), scratch,
133 }
134}
135
138 if (offset() == 0) {
139 DefineSameAsFirst(this);
140 } else {
141 DefineAsRegister(this);
142 }
143}
144
145void InlinedAllocation::GenerateCode(MaglevAssembler* masm,
146 const ProcessingState& state) {
147 if (offset() != 0) {
148 __ lay(ToRegister(result()),
150 }
151}
152
154
155void ArgumentsLength::GenerateCode(MaglevAssembler* masm,
156 const ProcessingState& state) {
157 Register argc = ToRegister(result());
159 __ SubS64(argc, Operand(1)); // Remove receiver.
160}
161
163
164void RestLength::GenerateCode(MaglevAssembler* masm,
165 const ProcessingState& state) {
166 Register length = ToRegister(result());
167 Label done;
169 __ SubS32(length, Operand(formal_parameter_count() + 1));
170 __ bge(&done);
171 __ Move(length, 0);
172 __ bind(&done);
173 __ UncheckedSmiTagInt32(length);
174}
175
176int CheckedObjectToIndex::MaxCallStackArgs() const { return 0; }
177
180 DefineSameAsFirst(this);
181}
182
183void CheckedIntPtrToInt32::GenerateCode(MaglevAssembler* masm,
184 const ProcessingState& state) {
185 Register input_reg = ToRegister(input());
186 Label* deopt = __ GetDeoptLabel(this, DeoptimizeReason::kNotInt32);
187
188 __ CmpS64(input_reg, Operand(std::numeric_limits<int32_t>::max()));
189 __ bgt(deopt);
190 __ CmpS64(input_reg, Operand(std::numeric_limits<int32_t>::min()));
191 __ blt(deopt);
192}
193
196 set_temporaries_needed((value().get_scalar() == 0) ? 1 : 0);
197 set_double_temporaries_needed(value().is_nan() ? 0 : 1);
198}
199void CheckFloat64SameValue::GenerateCode(MaglevAssembler* masm,
200 const ProcessingState& state) {
201 Label* fail = __ GetDeoptLabel(this, deoptimize_reason());
202 MaglevAssembler::TemporaryRegisterScope temps(masm);
203 DoubleRegister double_scratch = temps.AcquireScratchDouble();
205 if (value().is_nan()) {
206 __ JumpIfNotNan(target, fail);
207 } else {
208 __ Move(double_scratch, value());
209 __ CompareFloat64AndJumpIf(double_scratch, target, kNotEqual, fail, fail);
210 if (value().get_scalar() == 0) { // If value is +0.0 or -0.0.
211 Register scratch = temps.AcquireScratch();
212 __ MovDoubleToInt64(scratch, target);
213 __ CmpU64(scratch, Operand(0));
214 __ JumpIf(value().get_bits() == 0 ? kNotEqual : kEqual, fail);
215 }
216 }
217}
218
219void Int32AddWithOverflow::SetValueLocationConstraints() {
220 UseRegister(left_input());
221 UseRegister(right_input());
222 DefineAsRegister(this);
223}
224
225void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm,
226 const ProcessingState& state) {
227 Register left = ToRegister(left_input());
228 Register right = ToRegister(right_input());
229 Register out = ToRegister(result());
230 __ AddS32(out, left, right);
231 __ LoadS32(out, out);
232 // The output register shouldn't be a register input into the eager deopt
233 // info.
235 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
236 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
237}
238
239void Int32SubtractWithOverflow::SetValueLocationConstraints() {
240 UseRegister(left_input());
241 UseRegister(right_input());
242 DefineAsRegister(this);
243}
244void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm,
245 const ProcessingState& state) {
246 Register left = ToRegister(left_input());
247 Register right = ToRegister(right_input());
248 Register out = ToRegister(result());
249 __ SubS32(out, left, right);
250 __ LoadS32(out, out);
251 // The output register shouldn't be a register input into the eager deopt
252 // info.
254 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
255 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
256}
257
258void Int32MultiplyWithOverflow::SetValueLocationConstraints() {
259 UseRegister(left_input());
260 UseRegister(right_input());
261 DefineAsRegister(this);
262 set_temporaries_needed(1);
263}
264void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm,
265 const ProcessingState& state) {
266 Register left = ToRegister(left_input());
267 Register right = ToRegister(right_input());
268 Register out = ToRegister(result());
269
270 // TODO(leszeks): peephole optimise multiplication by a constant.
271
272 MaglevAssembler::TemporaryRegisterScope temps(masm);
273 Register temp = temps.AcquireScratch();
274 Condition cond = overflow;
275 if (!CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
276 DCHECK(!AreAliased(r0, temp));
277 __ lgfr(r0, left);
278 __ lgfr(temp, right);
279 __ MulS64(r0, temp);
280 }
281 __ Or(temp, left, right);
282 __ MulS32(out, left, right);
283 __ LoadS32(out, out);
284 if (!CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
285 // Test whether {high} is a sign-extension of {result}.
286 __ CmpU64(r0, out);
287 cond = ne;
288 }
289 DCHECK_REGLIST_EMPTY(RegList{temp, out} &
290 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
291 __ EmitEagerDeoptIf(cond, DeoptimizeReason::kOverflow, this);
292
293 // If the result is zero, check if either lhs or rhs is negative.
294 Label end;
295 __ CmpS32(out, Operand::Zero());
296 __ bne(&end);
297 __ CmpS32(temp, Operand::Zero());
298 // If one of them is negative, we must have a -0 result, which is non-int32,
299 // so deopt.
300 __ EmitEagerDeoptIf(lt, DeoptimizeReason::kOverflow, this);
301
302 __ bind(&end);
303}
304
305void Int32DivideWithOverflow::SetValueLocationConstraints() {
306 UseRegister(left_input());
307 UseRegister(right_input());
308 DefineAsRegister(this);
309}
310void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
311 const ProcessingState& state) {
312 Register left = ToRegister(left_input());
313 Register right = ToRegister(right_input());
314 Register out = ToRegister(result());
315
316 // TODO(leszeks): peephole optimise division by a constant.
317
318 // Pre-check for overflow, since idiv throws a division exception on overflow
319 // rather than setting the overflow flag. Logic copied from
320 // effect-control-linearizer.cc
321
322 // Check if {right} is positive (and not zero).
323 __ CmpS32(right, Operand(0));
324 ZoneLabelRef done(masm);
325 __ JumpToDeferredIf(
326 le,
327 [](MaglevAssembler* masm, ZoneLabelRef done, Register left,
328 Register right, Int32DivideWithOverflow* node) {
329 // {right} is negative or zero.
330
331 // TODO(leszeks): Using kNotInt32 here, but in same places
332 // kDivisionByZerokMinusZero/kMinusZero/kOverflow would be better. Right
333 // now all eager deopts in a node have to be the same -- we should allow
334 // a node to emit multiple eager deopts with different reasons.
335 Label* deopt = __ GetDeoptLabel(node, DeoptimizeReason::kNotInt32);
336
337 // Check if {right} is zero.
338 // We've already done the compare and flags won't be cleared yet.
339 __ JumpIf(eq, deopt);
340
341 // Check if {left} is zero, as that would produce minus zero.
342 __ CmpS32(left, Operand::Zero());
343 __ JumpIf(eq, deopt);
344
345 // Check if {left} is kMinInt and {right} is -1, in which case we'd have
346 // to return -kMinInt, which is not representable as Int32.
347 __ CmpS32(left, Operand(kMinInt));
348 __ JumpIf(ne, *done);
349 __ CmpS32(right, Operand(-1));
350 __ JumpIf(ne, *done);
351 __ JumpToDeopt(deopt);
352 },
353 done, left, right, this);
354 __ bind(*done);
355
356 // Perform the actual integer division.
357 __ DivS32(out, left, right);
358 __ LoadS32(out, out);
359
360 // Check that the remainder is zero.
361 __ CmpS64(r0, Operand::Zero());
362 __ EmitEagerDeoptIf(ne, DeoptimizeReason::kNotInt32, this);
363}
364
365void Int32ModulusWithOverflow::SetValueLocationConstraints() {
366 UseAndClobberRegister(left_input());
367 UseAndClobberRegister(right_input());
368 DefineAsRegister(this);
369}
370void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm,
371 const ProcessingState& state) {
372 // If AreAliased(lhs, rhs):
373 // deopt if lhs < 0 // Minus zero.
374 // 0
375
376 // Using same algorithm as in EffectControlLinearizer:
377 // if rhs <= 0 then
378 // rhs = -rhs
379 // deopt if rhs == 0
380 // if lhs < 0 then
381 // let lhs_abs = -lsh in
382 // let res = lhs_abs % rhs in
383 // deopt if res == 0
384 // -res
385 // else
386 // let msk = rhs - 1 in
387 // if rhs & msk == 0 then
388 // lhs & msk
389 // else
390 // lhs % rhs
391
392 Register lhs = ToRegister(left_input());
393 Register rhs = ToRegister(right_input());
394 Register out = ToRegister(result());
395
396 static constexpr DeoptimizeReason deopt_reason =
397 DeoptimizeReason::kDivisionByZero;
398
399 if (lhs == rhs) {
400 // For the modulus algorithm described above, lhs and rhs must not alias
401 // each other.
402 __ CmpS32(lhs, Operand::Zero());
403 // TODO(victorgomes): This ideally should be kMinusZero, but Maglev only
404 // allows one deopt reason per IR.
405 __ EmitEagerDeoptIf(lt, deopt_reason, this);
406 __ Move(out, 0);
407 return;
408 }
409
410 DCHECK_NE(lhs, rhs);
411
412 ZoneLabelRef done(masm);
413 ZoneLabelRef rhs_checked(masm);
414 __ CmpS32(rhs, Operand(0));
415 __ JumpToDeferredIf(
416 le,
417 [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs,
418 Int32ModulusWithOverflow* node) {
419 __ lcr(rhs, rhs);
420 __ bne(*rhs_checked);
421 __ EmitEagerDeopt(node, deopt_reason);
422 },
423 rhs_checked, rhs, this);
424 __ bind(*rhs_checked);
425
426 __ CmpS32(lhs, Operand(0));
427 __ JumpToDeferredIf(
428 lt,
429 [](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs,
430 Register out, Int32ModulusWithOverflow* node) {
431 __ lcr(lhs, lhs);
432 __ ModU32(out, lhs, rhs);
433 __ lcr(out, out);
434 // TODO(victorgomes): This ideally should be kMinusZero, but Maglev
435 // only allows one deopt reason per IR.
436 __ bne(*done);
437 __ EmitEagerDeopt(node, deopt_reason);
438 },
439 done, lhs, rhs, out, this);
440
441 Label rhs_not_power_of_2;
442 MaglevAssembler::TemporaryRegisterScope temps(masm);
443 Register mask = temps.AcquireScratch();
444 __ AddS32(mask, rhs, Operand(-1));
445 __ And(r0, mask, rhs);
446 __ JumpIf(ne, &rhs_not_power_of_2);
447
448 // {rhs} is power of 2.
449 __ And(out, mask, lhs);
450 __ Jump(*done);
451 // {mask} can be reused from now on.
452 temps.IncludeScratch(mask);
453
454 __ bind(&rhs_not_power_of_2);
455 __ ModU32(out, lhs, rhs);
456 __ bind(*done);
457 __ LoadS32(out, out);
458}
459
460#define DEF_BITWISE_BINOP(Instruction, opcode) \
461 void Instruction::SetValueLocationConstraints() { \
462 UseRegister(left_input()); \
463 UseRegister(right_input()); \
464 DefineAsRegister(this); \
465 } \
466 \
467 void Instruction::GenerateCode(MaglevAssembler* masm, \
468 const ProcessingState& state) { \
469 Register left = ToRegister(left_input()); \
470 Register right = ToRegister(right_input()); \
471 Register out = ToRegister(result()); \
472 __ opcode(out, left, right); \
473 __ LoadS32(out, out); \
474 }
475DEF_BITWISE_BINOP(Int32BitwiseAnd, And)
476DEF_BITWISE_BINOP(Int32BitwiseOr, Or)
477DEF_BITWISE_BINOP(Int32BitwiseXor, Xor)
478#undef DEF_BITWISE_BINOP
479
480#define DEF_SHIFT_BINOP(Instruction, opcode) \
481 void Instruction::SetValueLocationConstraints() { \
482 UseRegister(left_input()); \
483 if (right_input().node()->Is<Int32Constant>()) { \
484 UseAny(right_input()); \
485 } else { \
486 UseRegister(right_input()); \
487 } \
488 DefineAsRegister(this); \
489 } \
490 void Instruction::GenerateCode(MaglevAssembler* masm, \
491 const ProcessingState& state) { \
492 Register left = ToRegister(left_input()); \
493 Register out = ToRegister(result()); \
494 if (Int32Constant* constant = \
495 right_input().node()->TryCast<Int32Constant>()) { \
496 uint32_t shift = constant->value() & 31; \
497 if (shift == 0) { \
498 __ Move(out, left); \
499 return; \
500 } \
501 __ opcode(out, left, Operand(shift)); \
502 __ LoadS32(out, out); \
503 } else { \
504 MaglevAssembler::TemporaryRegisterScope temps(masm); \
505 Register scratch = temps.AcquireScratch(); \
506 Register right = ToRegister(right_input()); \
507 __ And(scratch, right, Operand(31)); \
508 __ opcode(out, left, scratch); \
509 __ LoadS32(out, out); \
510 } \
511 }
512DEF_SHIFT_BINOP(Int32ShiftLeft, ShiftLeftU32)
513DEF_SHIFT_BINOP(Int32ShiftRight, ShiftRightS32)
514DEF_SHIFT_BINOP(Int32ShiftRightLogical, ShiftRightU32)
515#undef DEF_SHIFT_BINOP
516
519 DefineAsRegister(this);
520}
521
522void Int32BitwiseNot::GenerateCode(MaglevAssembler* masm,
523 const ProcessingState& state) {
525 Register out = ToRegister(result());
526 __ Not32(out, value);
527 __ LoadS32(out, out);
528}
529
530void Float64Add::SetValueLocationConstraints() {
531 UseRegister(left_input());
532 UseRegister(right_input());
533 DefineAsRegister(this);
534}
535
536void Float64Add::GenerateCode(MaglevAssembler* masm,
537 const ProcessingState& state) {
538 DoubleRegister left = ToDoubleRegister(left_input());
539 DoubleRegister right = ToDoubleRegister(right_input());
541 __ AddF64(out, left, right);
542}
543
544void Float64Subtract::SetValueLocationConstraints() {
545 UseRegister(left_input());
546 UseRegister(right_input());
547 DefineAsRegister(this);
548}
549
550void Float64Subtract::GenerateCode(MaglevAssembler* masm,
551 const ProcessingState& state) {
552 DoubleRegister left = ToDoubleRegister(left_input());
553 DoubleRegister right = ToDoubleRegister(right_input());
555 __ SubF64(out, left, right);
556}
557
558void Float64Multiply::SetValueLocationConstraints() {
559 UseRegister(left_input());
560 UseRegister(right_input());
561 DefineAsRegister(this);
562}
563
564void Float64Multiply::GenerateCode(MaglevAssembler* masm,
565 const ProcessingState& state) {
566 DoubleRegister left = ToDoubleRegister(left_input());
567 DoubleRegister right = ToDoubleRegister(right_input());
569 __ MulF64(out, left, right);
570}
571
572void Float64Divide::SetValueLocationConstraints() {
573 UseRegister(left_input());
574 UseRegister(right_input());
575 DefineAsRegister(this);
576}
577
578void Float64Divide::GenerateCode(MaglevAssembler* masm,
579 const ProcessingState& state) {
580 DoubleRegister left = ToDoubleRegister(left_input());
581 DoubleRegister right = ToDoubleRegister(right_input());
583 __ DivF64(out, left, right);
584}
585
586void Float64Modulus::SetValueLocationConstraints() {
587 UseFixed(left_input(), d0);
588 UseFixed(right_input(), d2);
589 DefineSameAsFirst(this);
590}
591void Float64Modulus::GenerateCode(MaglevAssembler* masm,
592 const ProcessingState& state) {
593 FrameScope scope(masm, StackFrame::MANUAL);
594 __ Push(r2, r3, r4, r5);
595 __ PrepareCallCFunction(0, 2);
596 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
597 __ Pop(r2, r3, r4, r5);
598}
599
602 DefineAsRegister(this);
603}
604void Float64Negate::GenerateCode(MaglevAssembler* masm,
605 const ProcessingState& state) {
608 __ lcdbr(out, value);
609}
610
611void Float64Abs::GenerateCode(MaglevAssembler* masm,
612 const ProcessingState& state) {
615 __ lpdbr(out, in);
616}
617
618void Float64Round::GenerateCode(MaglevAssembler* masm,
619 const ProcessingState& state) {
622 if (kind_ == Kind::kNearest) {
623 MaglevAssembler::TemporaryRegisterScope temps(masm);
624 DoubleRegister temp = temps.AcquireScratchDouble();
625 DoubleRegister temp2 = temps.AcquireScratchDouble();
626 __ Move(temp, in);
627 __ NearestIntF64(out, in);
628 __ SubF64(temp, temp, out);
629 __ Move(temp2, 0.5);
630 __ CmpF64(temp, temp2);
631 Label done;
632 __ JumpIf(ne, &done, Label::kNear);
633 __ AddF64(out, out, temp2);
634 __ AddF64(out, out, temp2);
635 __ bind(&done);
636 } else if (kind_ == Kind::kCeil) {
637 __ CeilF64(out, in);
638 } else if (kind_ == Kind::kFloor) {
639 __ FloorF64(out, in);
640 }
641}
642
643int Float64Exponentiate::MaxCallStackArgs() const { return 0; }
644void Float64Exponentiate::SetValueLocationConstraints() {
645 UseFixed(left_input(), d0);
646 UseFixed(right_input(), d2);
647 DefineSameAsFirst(this);
648}
649void Float64Exponentiate::GenerateCode(MaglevAssembler* masm,
650 const ProcessingState& state) {
651 FrameScope scope(masm, StackFrame::MANUAL);
652 __ Push(r2, r3, r4, r5);
653 __ PrepareCallCFunction(0, 2);
654 __ CallCFunction(ExternalReference::ieee754_pow_function(), 0, 2);
655 __ Pop(r2, r3, r4, r5);
656}
657
658int Float64Ieee754Unary::MaxCallStackArgs() const { return 0; }
660 UseFixed(input(), d0);
661 DefineSameAsFirst(this);
662}
663void Float64Ieee754Unary::GenerateCode(MaglevAssembler* masm,
664 const ProcessingState& state) {
665 FrameScope scope(masm, StackFrame::MANUAL);
666 __ Push(r2, r3, r4, r5);
667 __ PrepareCallCFunction(0, 1);
668 __ CallCFunction(ieee_function_ref(), 0, 1);
669 __ Pop(r2, r3, r4, r5);
670}
671
674 DefineAsRegister(this);
675}
676
677void LoadTypedArrayLength::GenerateCode(MaglevAssembler* masm,
678 const ProcessingState& state) {
680 Register result_register = ToRegister(result());
681 if (v8_flags.debug_code) {
682 __ AssertObjectType(object, JS_TYPED_ARRAY_TYPE,
683 AbortReason::kUnexpectedValue);
684 }
685
686 __ LoadBoundedSizeFromObject(result_register, object,
687 JSTypedArray::kRawByteLengthOffset);
688 int shift_size = ElementsKindToShiftSize(elements_kind_);
689 if (shift_size > 0) {
690 // TODO(leszeks): Merge this shift with the one in LoadBoundedSize.
691 DCHECK(shift_size == 1 || shift_size == 2 || shift_size == 3);
692 __ ShiftRightU64(result_register, result_register, Operand(shift_size));
693 }
694}
695
696int CheckJSDataViewBounds::MaxCallStackArgs() const { return 1; }
700 set_temporaries_needed(1);
701}
702void CheckJSDataViewBounds::GenerateCode(MaglevAssembler* masm,
703 const ProcessingState& state) {
705 MaglevAssembler::TemporaryRegisterScope temps(masm);
708 if (v8_flags.debug_code) {
709 __ AssertObjectType(object, JS_DATA_VIEW_TYPE,
710 AbortReason::kUnexpectedValue);
711 }
712
713 // Normal DataView (backed by AB / SAB) or non-length tracking backed by GSAB.
714 Register byte_length = temps.AcquireScratch();
715 __ LoadBoundedSizeFromObject(byte_length, object,
716 JSDataView::kRawByteLengthOffset);
717
719 if (element_size > 1) {
720 __ SubS64(byte_length, Operand(element_size - 1));
721 __ EmitEagerDeoptIf(lt, DeoptimizeReason::kOutOfBounds, this);
722 }
723 __ CmpS32(index, byte_length);
724 __ EmitEagerDeoptIf(ge, DeoptimizeReason::kOutOfBounds, this);
725}
726
729 DefineSameAsFirst(this);
730}
731void HoleyFloat64ToMaybeNanFloat64::GenerateCode(MaglevAssembler* masm,
732 const ProcessingState& state) {
734 // The hole value is a signalling NaN, so just silence it to get the float64
735 // value.
736 __ lzdr(kDoubleRegZero);
737 __ SubF64(value, value, kDoubleRegZero);
738}
739
740namespace {
741
742enum class ReduceInterruptBudgetType { kLoop, kReturn };
743
744void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
745 Node* node, ReduceInterruptBudgetType type,
746 Register scratch0) {
747 // For loops, first check for interrupts. Don't do this for returns, as we
748 // can't lazy deopt to the end of a return.
749 if (type == ReduceInterruptBudgetType::kLoop) {
750 Label next;
751 // Here, we only care about interrupts since we've already guarded against
752 // real stack overflows on function entry.
753 {
754 Register stack_limit = scratch0;
755 __ LoadStackLimit(stack_limit, StackLimitKind::kInterruptStackLimit);
756 __ CmpU64(sp, stack_limit);
757 __ bgt(&next);
758 }
759
760 // An interrupt has been requested and we must call into runtime to handle
761 // it; since we already pay the call cost, combine with the TieringManager
762 // call.
763 {
764 SaveRegisterStateForCall save_register_state(masm,
765 node->register_snapshot());
766 Register function = scratch0;
767 __ LoadU64(function,
769 __ Push(function);
770 // Move into kContextRegister after the load into scratch0, just in case
771 // scratch0 happens to be kContextRegister.
772 __ Move(kContextRegister, masm->native_context().object());
773 __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, 1);
774 save_register_state.DefineSafepointWithLazyDeopt(node->lazy_deopt_info());
775 }
776 __ b(*done); // All done, continue.
777 __ bind(&next);
778 }
779
780 // No pending interrupts. Call into the TieringManager if needed.
781 {
782 SaveRegisterStateForCall save_register_state(masm,
783 node->register_snapshot());
784 Register function = scratch0;
785 __ LoadU64(function,
787 __ Push(function);
788 // Move into kContextRegister after the load into scratch0, just in case
789 // scratch0 happens to be kContextRegister.
790 __ Move(kContextRegister, masm->native_context().object());
791 // Note: must not cause a lazy deopt!
792 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Maglev, 1);
793 save_register_state.DefineSafepoint();
794 }
795 __ b(*done);
796}
797
798void GenerateReduceInterruptBudget(MaglevAssembler* masm, Node* node,
799 Register feedback_cell,
800 ReduceInterruptBudgetType type, int amount) {
801 MaglevAssembler::TemporaryRegisterScope temps(masm);
802 Register budget = temps.AcquireScratch();
803 __ LoadU32(budget, FieldMemOperand(feedback_cell,
804 FeedbackCell::kInterruptBudgetOffset));
805 __ SubS32(budget, Operand(amount));
806 __ StoreU32(budget, FieldMemOperand(feedback_cell,
807 FeedbackCell::kInterruptBudgetOffset));
808 ZoneLabelRef done(masm);
809 __ JumpToDeferredIf(lt, HandleInterruptsAndTiering, done, node, type, budget);
810 __ bind(*done);
811}
812
813} // namespace
814
818}
819void ReduceInterruptBudgetForLoop::GenerateCode(MaglevAssembler* masm,
820 const ProcessingState& state) {
821 GenerateReduceInterruptBudget(masm, this, ToRegister(feedback_cell()),
822 ReduceInterruptBudgetType::kLoop, amount());
823}
824
828 set_temporaries_needed(1);
829}
831 MaglevAssembler* masm, const ProcessingState& state) {
832 GenerateReduceInterruptBudget(masm, this, ToRegister(feedback_cell()),
833 ReduceInterruptBudgetType::kReturn, amount());
834}
835
836// ---
837// Control nodes
838// ---
841}
842void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
844
845 // Read the formal number of parameters from the top level compilation unit
846 // (i.e. the outermost, non inlined function).
847 int formal_params_size =
848 masm->compilation_info()->toplevel_compilation_unit()->parameter_count();
849
850 // We're not going to continue execution, so we can use an arbitrary register
851 // here instead of relying on temporaries from the register allocator.
852 Register actual_params_size = r6;
853
854 // Compute the size of the actual parameters + receiver (in bytes).
855 // TODO(leszeks): Consider making this an input into Return to reuse the
856 // incoming argc's register (if it's still valid).
857 __ LoadU64(actual_params_size,
859
860 // Leave the frame.
861 __ LeaveFrame(StackFrame::MAGLEV);
862
863 // If actual is bigger than formal, then we should use it to free up the stack
864 // arguments.
865 Label drop_dynamic_arg_size;
866 __ CmpS32(actual_params_size, Operand(formal_params_size));
867 __ bgt(&drop_dynamic_arg_size);
868 __ mov(actual_params_size, Operand(formal_params_size));
869 __ bind(&drop_dynamic_arg_size);
870
871 // Drop receiver + arguments according to dynamic arguments size.
872 __ DropArguments(actual_params_size);
873 __ Ret();
874}
875
876} // namespace maglev
877} // namespace internal
878} // namespace v8
static bool IsSupported(CpuFeature f)
static V8_INLINE Operand Zero()
static const int32_t kMaxOneByteCharCode
Definition string.h:500
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ExternalReference ieee_function_ref() const
Definition maglev-ir.cc:502
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int end
Node * node
ZoneVector< RpoNumber > & result
uint32_t const mask
#define DEF_SHIFT_BINOP(Instruction, opcode)
#define DEF_BITWISE_BINOP(Instruction, opcode)
#define DCHECK_REGLIST_EMPTY(...)
int int32_t
Definition unicode.cc:40
int ExternalArrayElementSize(const ExternalArrayType element_type)
Definition globals.h:156
void DefineAsRegister(Node *node)
DoubleRegister ToDoubleRegister(const compiler::InstructionOperand &operand)
void DefineSameAsFirst(Node *node)
Register ToRegister(const compiler::InstructionOperand &operand)
void UseAndClobberRegister(Input &input)
void UseAny(Input &input)
void UseRegister(Input &input)
void UseFixed(Input &input, Register reg)
void Or(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
constexpr int kMinInt
Definition globals.h:375
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
DwVfpRegister DoubleRegister
RegListBase< Register > RegList
Definition reglist-arm.h:14
MemOperand FieldMemOperand(Register object, int offset)
constexpr int ElementsKindToShiftSize(ElementsKind elements_kind)
constexpr Register kReturnRegister0
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
#define OFFSET_OF_DATA_START(Type)