v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-ir-x64.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/base/logging.h"
18
19namespace v8 {
20namespace internal {
21namespace maglev {
22
23#define __ masm->
24
25// ---
26// Nodes
27// ---
28
31 if (offset() == 0) {
33 } else {
34 DefineAsRegister(this);
35 }
36}
37
38void InlinedAllocation::GenerateCode(MaglevAssembler* masm,
39 const ProcessingState& state) {
40 if (offset() != 0) {
41 __ leaq(ToRegister(result()),
43 }
44}
45
47
48void ArgumentsLength::GenerateCode(MaglevAssembler* masm,
49 const ProcessingState& state) {
50 __ movq(ToRegister(result()),
52 __ decl(ToRegister(result())); // Remove receiver.
53}
54
56
57void RestLength::GenerateCode(MaglevAssembler* masm,
58 const ProcessingState& state) {
59 Register length = ToRegister(result());
60 Label done;
61 __ movq(length, Operand(rbp, StandardFrameConstants::kArgCOffset));
62 __ subl(length, Immediate(formal_parameter_count() + 1));
64 __ Move(length, 0);
65 __ bind(&done);
66 __ UncheckedSmiTagInt32(length);
67}
68
71 DefineAsRegister(this);
72}
73void LoadTypedArrayLength::GenerateCode(MaglevAssembler* masm,
74 const ProcessingState& state) {
76 Register result_register = ToRegister(result());
77 if (v8_flags.debug_code) {
78 __ AssertNotSmi(object);
79 __ CmpObjectType(object, JS_TYPED_ARRAY_TYPE, kScratchRegister);
80 __ Assert(equal, AbortReason::kUnexpectedValue);
81 }
82 __ LoadBoundedSizeFromObject(result_register, object,
83 JSTypedArray::kRawByteLengthOffset);
85 if (shift_size > 0) {
86 // TODO(leszeks): Merge this shift with the one in LoadBoundedSize.
87 DCHECK(shift_size == 1 || shift_size == 2 || shift_size == 3);
88 __ shrq(result_register, Immediate(shift_size));
89 }
90}
91
95}
96void CheckJSDataViewBounds::GenerateCode(MaglevAssembler* masm,
97 const ProcessingState& state) {
100 Register byte_length = kScratchRegister;
101 if (v8_flags.debug_code) {
102 __ AssertNotSmi(object);
103 __ CmpObjectType(object, JS_DATA_VIEW_TYPE, kScratchRegister);
104 __ Assert(equal, AbortReason::kUnexpectedValue);
105 }
106
107 // Normal DataView (backed by AB / SAB) or non-length tracking backed by GSAB.
108 __ LoadBoundedSizeFromObject(byte_length, object,
109 JSDataView::kRawByteLengthOffset);
110
112 if (element_size > 1) {
113 __ subq(byte_length, Immediate(element_size - 1));
114 __ EmitEagerDeoptIf(negative, DeoptimizeReason::kOutOfBounds, this);
115 }
116 __ cmpl(index, byte_length);
117 __ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this);
118}
119
122}
123
126 DefineSameAsFirst(this);
127}
128
129void CheckedIntPtrToInt32::GenerateCode(MaglevAssembler* masm,
130 const ProcessingState& state) {
131 Register input_reg = ToRegister(input());
132
133 // Copy input(32 bit) to scratch. Is input equal(64 bit) to scratch?
134 __ movl(kScratchRegister, input_reg);
135 __ cmpq(kScratchRegister, input_reg);
136 __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, this);
137}
138
141}
142void CheckFloat64SameValue::GenerateCode(MaglevAssembler* masm,
143 const ProcessingState& state) {
144 Label* fail = __ GetDeoptLabel(this, deoptimize_reason());
145 MaglevAssembler::TemporaryRegisterScope temps(masm);
146 DoubleRegister double_scratch = temps.AcquireScratchDouble();
148 if (value().is_nan()) {
149 __ JumpIfNotNan(target, fail);
150 } else {
151 __ Move(double_scratch, value());
152 __ CompareFloat64AndJumpIf(double_scratch, target, kNotEqual, fail, fail);
153 if (value().get_scalar() == 0) { // If value is +0.0 or -0.0.
154 Register scratch = temps.AcquireScratch();
155 __ movq(scratch, target);
156 __ testq(scratch, scratch);
157 __ JumpIf(value().get_bits() == 0 ? kNotEqual : kEqual, fail);
158 }
159 }
160}
161
164}
166 if (code_input().node()->Is<Int32Constant>()) {
168 } else {
170 set_temporaries_needed(1);
171 }
172 DefineAsRegister(this);
173}
174void BuiltinStringFromCharCode::GenerateCode(MaglevAssembler* masm,
175 const ProcessingState& state) {
176 Register result_string = ToRegister(result());
177 if (Int32Constant* constant = code_input().node()->TryCast<Int32Constant>()) {
178 int32_t char_code = constant->value() & 0xFFFF;
179 if (0 <= char_code && char_code < String::kMaxOneByteCharCode) {
180 __ LoadSingleCharacterString(result_string, char_code);
181 } else {
182 __ AllocateTwoByteString(register_snapshot(), result_string, 1);
183 __ movw(
184 FieldOperand(result_string, OFFSET_OF_DATA_START(SeqTwoByteString)),
185 Immediate(char_code));
186 }
187 } else {
188 MaglevAssembler::TemporaryRegisterScope temps(masm);
189 Register scratch = temps.Acquire();
190 Register char_code = ToRegister(code_input());
191 __ StringFromCharCode(register_snapshot(), nullptr, result_string,
192 char_code, scratch,
194 }
195}
196
197void Int32AddWithOverflow::SetValueLocationConstraints() {
198 UseRegister(left_input());
199 if (TryGetInt32ConstantInput(kRightIndex)) {
200 UseAny(right_input());
201 } else {
202 UseRegister(right_input());
203 }
204 DefineSameAsFirst(this);
205}
206
207void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm,
208 const ProcessingState& state) {
209 Register left = ToRegister(left_input());
210 if (!right_input().operand().IsRegister()) {
211 auto right_const = TryGetInt32ConstantInput(kRightIndex);
212 DCHECK(right_const);
213 __ addl(left, Immediate(*right_const));
214 } else {
215 Register right = ToRegister(right_input());
216 __ addl(left, right);
217 }
218 // None of the mutated input registers should be a register input into the
219 // eager deopt info.
221 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
222 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
223}
224
225void Int32SubtractWithOverflow::SetValueLocationConstraints() {
226 UseRegister(left_input());
227 if (TryGetInt32ConstantInput(kRightIndex)) {
228 UseAny(right_input());
229 } else {
230 UseRegister(right_input());
231 }
232 DefineSameAsFirst(this);
233}
234
235void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm,
236 const ProcessingState& state) {
237 Register left = ToRegister(left_input());
238 if (!right_input().operand().IsRegister()) {
239 auto right_const = TryGetInt32ConstantInput(kRightIndex);
240 DCHECK(right_const);
241 __ subl(left, Immediate(*right_const));
242 } else {
243 Register right = ToRegister(right_input());
244 __ subl(left, right);
245 }
246 // None of the mutated input registers should be a register input into the
247 // eager deopt info.
249 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
250 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
251}
252
253void Int32MultiplyWithOverflow::SetValueLocationConstraints() {
254 UseRegister(left_input());
255 UseRegister(right_input());
256 DefineSameAsFirst(this);
257 set_temporaries_needed(1);
258}
259
260void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm,
261 const ProcessingState& state) {
262 Register result = ToRegister(this->result());
263 Register right = ToRegister(right_input());
264 DCHECK_EQ(result, ToRegister(left_input()));
265
266 MaglevAssembler::TemporaryRegisterScope temps(masm);
267 Register saved_left = temps.Acquire();
268 __ movl(saved_left, result);
269 // TODO(leszeks): peephole optimise multiplication by a constant.
270 __ imull(result, right);
271 // None of the mutated input registers should be a register input into the
272 // eager deopt info.
273 DCHECK_REGLIST_EMPTY(RegList{saved_left, result} &
274 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
275 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
276
277 // If the result is zero, check if either lhs or rhs is negative.
278 Label end;
279 __ cmpl(result, Immediate(0));
280 __ j(not_zero, &end);
281 {
282 __ orl(saved_left, right);
283 __ cmpl(saved_left, Immediate(0));
284 // If one of them is negative, we must have a -0 result, which is non-int32,
285 // so deopt.
286 // TODO(leszeks): Consider splitting these deopts to have distinct deopt
287 // reasons. Otherwise, the reason has to match the above.
288 __ EmitEagerDeoptIf(less, DeoptimizeReason::kOverflow, this);
289 }
290 __ bind(&end);
291}
292
293void Int32ModulusWithOverflow::SetValueLocationConstraints() {
294 UseRegister(left_input());
295 UseAndClobberRegister(right_input());
296 DefineAsFixed(this, rdx);
297 // rax,rdx are clobbered by div.
298 RequireSpecificTemporary(rax);
299 RequireSpecificTemporary(rdx);
300}
301
302void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm,
303 const ProcessingState& state) {
304 // If AreAliased(lhs, rhs):
305 // deopt if lhs < 0 // Minus zero.
306 // 0
307 //
308 // Otherwise, use the same algorithm as in EffectControlLinearizer:
309 // if rhs <= 0 then
310 // rhs = -rhs
311 // deopt if rhs == 0
312 // if lhs < 0 then
313 // let lhs_abs = -lhs in
314 // let res = lhs_abs % rhs in
315 // deopt if res == 0
316 // -res
317 // else
318 // let msk = rhs - 1 in
319 // if rhs & msk == 0 then
320 // lhs & msk
321 // else
322 // lhs % rhs
323
324 Register lhs = ToRegister(left_input());
325 Register rhs = ToRegister(right_input());
326
327 static constexpr DeoptimizeReason deopt_reason =
328 DeoptimizeReason::kDivisionByZero;
329
330 if (lhs == rhs) {
331 // For the modulus algorithm described above, lhs and rhs must not alias
332 // each other.
333 __ testl(lhs, lhs);
334 // TODO(victorgomes): This ideally should be kMinusZero, but Maglev only
335 // allows one deopt reason per IR.
336 __ EmitEagerDeoptIf(negative, deopt_reason, this);
337 __ Move(ToRegister(result()), 0);
338 return;
339 }
340
341 DCHECK(!AreAliased(lhs, rhs, rax, rdx));
342
343 ZoneLabelRef done(masm);
344 ZoneLabelRef rhs_checked(masm);
345
346 __ cmpl(rhs, Immediate(0));
347 __ JumpToDeferredIf(
349 [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs,
350 Int32ModulusWithOverflow* node) {
351 __ negl(rhs);
352 __ j(not_zero, *rhs_checked);
353 __ EmitEagerDeopt(node, deopt_reason);
354 },
355 rhs_checked, rhs, this);
356 __ bind(*rhs_checked);
357
358 __ cmpl(lhs, Immediate(0));
359 __ JumpToDeferredIf(
360 less,
361 [](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs,
362 Int32ModulusWithOverflow* node) {
363 // `divl(divisor)` divides rdx:rax by the divisor and stores the
364 // quotient in rax, the remainder in rdx.
365 __ movl(rax, lhs);
366 __ negl(rax);
367 __ xorl(rdx, rdx);
368 __ divl(rhs);
369 __ negl(rdx);
370 __ j(not_zero, *done);
371 // TODO(victorgomes): This ideally should be kMinusZero, but Maglev only
372 // allows one deopt reason per IR.
373 __ EmitEagerDeopt(node, deopt_reason);
374 },
375 done, lhs, rhs, this);
376
377 Label rhs_not_power_of_2;
378 Register mask = rax;
379 __ leal(mask, Operand(rhs, -1));
380 __ testl(rhs, mask);
381 __ j(not_zero, &rhs_not_power_of_2, Label::kNear);
382
383 // {rhs} is power of 2.
384 __ andl(mask, lhs);
385 __ movl(ToRegister(result()), mask);
386 __ jmp(*done, Label::kNear);
387
388 __ bind(&rhs_not_power_of_2);
389 // `divl(divisor)` divides rdx:rax by the divisor and stores the
390 // quotient in rax, the remainder in rdx.
391 __ movl(rax, lhs);
392 __ xorl(rdx, rdx);
393 __ divl(rhs);
394 // Result is implicitly written to rdx.
395 DCHECK_EQ(ToRegister(result()), rdx);
396
397 __ bind(*done);
398}
399
400void Int32DivideWithOverflow::SetValueLocationConstraints() {
401 UseRegister(left_input());
402 UseRegister(right_input());
403 DefineAsFixed(this, rax);
404 // rax,rdx are clobbered by idiv.
405 RequireSpecificTemporary(rax);
406 RequireSpecificTemporary(rdx);
407}
408
409void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
410 const ProcessingState& state) {
411 Register left = ToRegister(left_input());
412 Register right = ToRegister(right_input());
413 __ movl(rax, left);
414
415 // TODO(leszeks): peephole optimise division by a constant.
416
417 // Sign extend eax into edx.
418 __ cdq();
419
420 // Pre-check for overflow, since idiv throws a division exception on overflow
421 // rather than setting the overflow flag. Logic copied from
422 // effect-control-linearizer.cc
423
424 // Check if {right} is positive (and not zero).
425 __ cmpl(right, Immediate(0));
426 ZoneLabelRef done(masm);
427 __ JumpToDeferredIf(
429 [](MaglevAssembler* masm, ZoneLabelRef done, Register right,
430 Int32DivideWithOverflow* node) {
431 // {right} is negative or zero.
432
433 // Check if {right} is zero.
434 // We've already done the compare and flags won't be cleared yet.
435 // TODO(leszeks): Using kNotInt32 here, but kDivisionByZero would be
436 // better. Right now all eager deopts in a node have to be the same --
437 // we should allow a node to emit multiple eager deopts with different
438 // reasons.
439 __ EmitEagerDeoptIf(equal, DeoptimizeReason::kNotInt32, node);
440
441 // Check if {left} is zero, as that would produce minus zero. Left is in
442 // rax already.
443 __ cmpl(rax, Immediate(0));
444 // TODO(leszeks): Better DeoptimizeReason = kMinusZero.
445 __ EmitEagerDeoptIf(equal, DeoptimizeReason::kNotInt32, node);
446
447 // Check if {left} is kMinInt and {right} is -1, in which case we'd have
448 // to return -kMinInt, which is not representable as Int32.
449 __ cmpl(rax, Immediate(kMinInt));
450 __ j(not_equal, *done);
451 __ cmpl(right, Immediate(-1));
452 __ j(not_equal, *done);
453 // TODO(leszeks): Better DeoptimizeReason = kOverflow, but
454 // eager_deopt_info is already configured as kNotInt32.
455 __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
456 },
457 done, right, this);
458 __ bind(*done);
459
460 // Perform the actual integer division.
461 __ idivl(right);
462
463 // Check that the remainder is zero.
464 __ cmpl(rdx, Immediate(0));
465 // None of the mutated input registers should be a register input into the
466 // eager deopt info.
467 DCHECK_REGLIST_EMPTY(RegList{rax, rdx} &
468 GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
469 __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, this);
470 DCHECK_EQ(ToRegister(result()), rax);
471}
472
473#define DEF_BITWISE_BINOP(Instruction, opcode) \
474 void Instruction::SetValueLocationConstraints() { \
475 UseRegister(left_input()); \
476 if (TryGetInt32ConstantInput(kRightIndex)) { \
477 UseAny(right_input()); \
478 } else { \
479 UseRegister(right_input()); \
480 } \
481 DefineSameAsFirst(this); \
482 } \
483 \
484 void Instruction::GenerateCode(MaglevAssembler* masm, \
485 const ProcessingState& state) { \
486 Register left = ToRegister(left_input()); \
487 if (!right_input().operand().IsRegister()) { \
488 auto right_const = TryGetInt32ConstantInput(kRightIndex); \
489 DCHECK(right_const); \
490 __ opcode(left, Immediate(*right_const)); \
491 } else { \
492 Register right = ToRegister(right_input()); \
493 __ opcode(left, right); \
494 } \
495 }
496DEF_BITWISE_BINOP(Int32BitwiseAnd, andl)
497DEF_BITWISE_BINOP(Int32BitwiseOr, orl)
498DEF_BITWISE_BINOP(Int32BitwiseXor, xorl)
499#undef DEF_BITWISE_BINOP
500
501#define DEF_SHIFT_BINOP(Instruction, opcode) \
502 void Instruction::SetValueLocationConstraints() { \
503 UseRegister(left_input()); \
504 if (TryGetInt32ConstantInput(kRightIndex)) { \
505 UseAny(right_input()); \
506 } else { \
507 UseFixed(right_input(), rcx); \
508 } \
509 DefineSameAsFirst(this); \
510 } \
511 \
512 void Instruction::GenerateCode(MaglevAssembler* masm, \
513 const ProcessingState& state) { \
514 Register left = ToRegister(left_input()); \
515 if (auto right_const = TryGetInt32ConstantInput(kRightIndex)) { \
516 DCHECK(right_const); \
517 int right = *right_const & 31; \
518 if (right != 0) { \
519 __ opcode(left, Immediate(right)); \
520 } \
521 } else { \
522 DCHECK_EQ(rcx, ToRegister(right_input())); \
523 __ opcode##_cl(left); \
524 } \
525 }
526DEF_SHIFT_BINOP(Int32ShiftLeft, shll)
527DEF_SHIFT_BINOP(Int32ShiftRight, sarl)
528DEF_SHIFT_BINOP(Int32ShiftRightLogical, shrl)
529#undef DEF_SHIFT_BINOP
530
531void Int32IncrementWithOverflow::SetValueLocationConstraints() {
532 UseRegister(value_input());
533 DefineSameAsFirst(this);
534}
535
536void Int32IncrementWithOverflow::GenerateCode(MaglevAssembler* masm,
537 const ProcessingState& state) {
538 Register value = ToRegister(value_input());
539 __ incl(value);
540 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
541}
542
543void Int32DecrementWithOverflow::SetValueLocationConstraints() {
544 UseRegister(value_input());
545 DefineSameAsFirst(this);
546}
547
548void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm,
549 const ProcessingState& state) {
550 Register value = ToRegister(value_input());
551 __ decl(value);
552 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
553}
554
555void Int32NegateWithOverflow::SetValueLocationConstraints() {
556 UseRegister(value_input());
557 DefineSameAsFirst(this);
558}
559
560void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm,
561 const ProcessingState& state) {
562 Register value = ToRegister(value_input());
563 // Deopt when the result would be -0.
564 __ testl(value, value);
565 __ EmitEagerDeoptIf(zero, DeoptimizeReason::kOverflow, this);
566
567 __ negl(value);
568 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
569}
570
571void Int32AbsWithOverflow::GenerateCode(MaglevAssembler* masm,
572 const ProcessingState& state) {
573 Register value = ToRegister(result());
574 Label done;
575 __ cmpl(value, Immediate(0));
576 __ j(greater_equal, &done);
577 __ negl(value);
578 __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
579 __ bind(&done);
580}
581
584 DefineSameAsFirst(this);
585}
586
587void Int32BitwiseNot::GenerateCode(MaglevAssembler* masm,
588 const ProcessingState& state) {
590 __ notl(value);
591}
592
593void Float64Add::SetValueLocationConstraints() {
594 UseRegister(left_input());
595 UseRegister(right_input());
596 DefineSameAsFirst(this);
597}
598
599void Float64Add::GenerateCode(MaglevAssembler* masm,
600 const ProcessingState& state) {
601 DoubleRegister left = ToDoubleRegister(left_input());
602 DoubleRegister right = ToDoubleRegister(right_input());
603 __ Addsd(left, right);
604}
605
606void Float64Subtract::SetValueLocationConstraints() {
607 UseRegister(left_input());
608 UseRegister(right_input());
609 DefineSameAsFirst(this);
610}
611
612void Float64Subtract::GenerateCode(MaglevAssembler* masm,
613 const ProcessingState& state) {
614 DoubleRegister left = ToDoubleRegister(left_input());
615 DoubleRegister right = ToDoubleRegister(right_input());
616 __ Subsd(left, right);
617}
618
619void Float64Multiply::SetValueLocationConstraints() {
620 UseRegister(left_input());
621 UseRegister(right_input());
622 DefineSameAsFirst(this);
623}
624
625void Float64Multiply::GenerateCode(MaglevAssembler* masm,
626 const ProcessingState& state) {
627 DoubleRegister left = ToDoubleRegister(left_input());
628 DoubleRegister right = ToDoubleRegister(right_input());
629 __ Mulsd(left, right);
630}
631
632void Float64Divide::SetValueLocationConstraints() {
633 UseRegister(left_input());
634 UseRegister(right_input());
635 DefineSameAsFirst(this);
636}
637
638void Float64Divide::GenerateCode(MaglevAssembler* masm,
639 const ProcessingState& state) {
640 DoubleRegister left = ToDoubleRegister(left_input());
641 DoubleRegister right = ToDoubleRegister(right_input());
642 __ Divsd(left, right);
643}
644
645void Float64Modulus::SetValueLocationConstraints() {
646 UseRegister(left_input());
647 UseRegister(right_input());
648 RequireSpecificTemporary(rax);
649 DefineAsRegister(this);
650}
651
652void Float64Modulus::GenerateCode(MaglevAssembler* masm,
653 const ProcessingState& state) {
654 // Approach copied from code-generator-x64.cc
655 // Allocate space to use fld to move the value to the FPU stack.
656 __ AllocateStackSpace(kDoubleSize);
657 Operand scratch_stack_space = Operand(rsp, 0);
658 __ Movsd(scratch_stack_space, ToDoubleRegister(right_input()));
659 __ fld_d(scratch_stack_space);
660 __ Movsd(scratch_stack_space, ToDoubleRegister(left_input()));
661 __ fld_d(scratch_stack_space);
662 // Loop while fprem isn't done.
663 Label mod_loop;
664 __ bind(&mod_loop);
665 // This instructions traps on all kinds inputs, but we are assuming the
666 // floating point control word is set to ignore them all.
667 __ fprem();
668 // The following 2 instruction implicitly use rax.
669 __ fnstsw_ax();
670 if (CpuFeatures::IsSupported(SAHF)) {
671 CpuFeatureScope sahf_scope(masm, SAHF);
672 __ sahf();
673 } else {
674 __ shrl(rax, Immediate(8));
675 __ andl(rax, Immediate(0xFF));
676 __ pushq(rax);
677 __ popfq();
678 }
679 __ j(parity_even, &mod_loop);
680 // Move output to stack and clean up.
681 __ fstp(1);
682 __ fstp_d(scratch_stack_space);
683 __ Movsd(ToDoubleRegister(result()), scratch_stack_space);
684 __ addq(rsp, Immediate(kDoubleSize));
685}
686
689 DefineSameAsFirst(this);
690}
691
692void Float64Negate::GenerateCode(MaglevAssembler* masm,
693 const ProcessingState& state) {
695 __ Negpd(value, value, kScratchRegister);
696}
697
698void Float64Abs::GenerateCode(MaglevAssembler* masm,
699 const ProcessingState& state) {
701 __ Abspd(out, out, kScratchRegister);
702}
703
704void Float64Round::GenerateCode(MaglevAssembler* masm,
705 const ProcessingState& state) {
708
709 if (kind_ == Kind::kNearest) {
710 MaglevAssembler::TemporaryRegisterScope temps(masm);
711 DoubleRegister temp = temps.AcquireDouble();
712 __ Move(temp, in);
713 __ Roundsd(out, in, kRoundToNearest);
714 // RoundToNearest rounds to even on tie, while JS expects it to round
715 // towards +Infinity. Fix the difference by checking if we rounded down by
716 // exactly 0.5, and if so, round to the other side.
717 __ Subsd(temp, out);
718 __ Move(kScratchDoubleReg, 0.5);
719 Label done;
720 __ Ucomisd(temp, kScratchDoubleReg);
721 __ JumpIf(not_equal, &done, Label::kNear);
722 // Fix wrong tie-to-even by adding 0.5 twice.
723 __ Addsd(out, kScratchDoubleReg);
724 __ Addsd(out, kScratchDoubleReg);
725 __ bind(&done);
726 } else if (kind_ == Kind::kFloor) {
727 __ Roundsd(out, in, kRoundDown);
728 } else if (kind_ == Kind::kCeil) {
729 __ Roundsd(out, in, kRoundUp);
730 }
731}
732
733int Float64Exponentiate::MaxCallStackArgs() const {
735}
736void Float64Exponentiate::SetValueLocationConstraints() {
737 UseFixed(left_input(), xmm0);
738 UseFixed(right_input(), xmm1);
739 DefineSameAsFirst(this);
740}
741void Float64Exponentiate::GenerateCode(MaglevAssembler* masm,
742 const ProcessingState& state) {
743 AllowExternalCallThatCantCauseGC scope(masm);
744 __ PrepareCallCFunction(2);
745 __ CallCFunction(ExternalReference::ieee754_pow_function(), 2);
746}
747
750}
752 UseFixed(input(), xmm0);
753 DefineSameAsFirst(this);
754}
755void Float64Ieee754Unary::GenerateCode(MaglevAssembler* masm,
756 const ProcessingState& state) {
757 AllowExternalCallThatCantCauseGC scope(masm);
758 __ PrepareCallCFunction(1);
759 __ CallCFunction(ieee_function_ref(), 1);
760}
761
764 DefineSameAsFirst(this);
765}
766void HoleyFloat64ToMaybeNanFloat64::GenerateCode(MaglevAssembler* masm,
767 const ProcessingState& state) {
769 // The hole value is a signalling NaN, so just silence it to get the float64
770 // value.
772 __ Subsd(value, kScratchDoubleReg);
773}
774
775namespace {
776
777enum class ReduceInterruptBudgetType { kLoop, kReturn };
778
779void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done,
780 Node* node, ReduceInterruptBudgetType type) {
781 // For loops, first check for interrupts. Don't do this for returns, as we
782 // can't lazy deopt to the end of a return.
783 if (type == ReduceInterruptBudgetType::kLoop) {
784 Label next;
785
786 // Here, we only care about interrupts since we've already guarded against
787 // real stack overflows on function entry.
788 __ cmpq(rsp, __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
789 __ j(above, &next);
790
791 // An interrupt has been requested and we must call into runtime to handle
792 // it; since we already pay the call cost, combine with the TieringManager
793 // call.
794 {
795 SaveRegisterStateForCall save_register_state(masm,
796 node->register_snapshot());
797 __ Move(kContextRegister, masm->native_context().object());
799 __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, 1);
800 save_register_state.DefineSafepointWithLazyDeopt(node->lazy_deopt_info());
801 }
802 __ jmp(*done); // All done, continue.
803
804 __ bind(&next);
805 }
806
807 // No pending interrupts. Call into the TieringManager if needed.
808 {
809 SaveRegisterStateForCall save_register_state(masm,
810 node->register_snapshot());
811 __ Move(kContextRegister, masm->native_context().object());
813 // Note: must not cause a lazy deopt!
814 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Maglev, 1);
815 save_register_state.DefineSafepoint();
816 }
817 __ jmp(*done);
818}
819
820void GenerateReduceInterruptBudget(MaglevAssembler* masm, Node* node,
821 Register feedback_cell,
822 ReduceInterruptBudgetType type, int amount) {
823 MaglevAssembler::TemporaryRegisterScope temps(masm);
824 __ subl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
825 Immediate(amount));
826 ZoneLabelRef done(masm);
827 __ JumpToDeferredIf(less, HandleInterruptsAndTiering, done, node, type);
828 __ bind(*done);
829}
830
831} // namespace
832
836}
837void ReduceInterruptBudgetForLoop::GenerateCode(MaglevAssembler* masm,
838 const ProcessingState& state) {
839 GenerateReduceInterruptBudget(masm, this, ToRegister(feedback_cell()),
840 ReduceInterruptBudgetType::kLoop, amount());
841}
842
846 set_temporaries_needed(1);
847}
849 MaglevAssembler* masm, const ProcessingState& state) {
850 GenerateReduceInterruptBudget(masm, this, ToRegister(feedback_cell()),
851 ReduceInterruptBudgetType::kReturn, amount());
852}
853
854// ---
855// Control nodes
856// ---
859}
860void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
862
863 // Read the formal number of parameters from the top level compilation unit
864 // (i.e. the outermost, non inlined function).
865 int formal_params_size =
866 masm->compilation_info()->toplevel_compilation_unit()->parameter_count();
867
868 // We're not going to continue execution, so we can use an arbitrary register
869 // here instead of relying on temporaries from the register allocator.
870 Register actual_params_size = r8;
871
872 // Compute the size of the actual parameters + receiver (in bytes).
873 // TODO(leszeks): Consider making this an input into Return to reuse the
874 // incoming argc's register (if it's still valid).
875 __ movq(actual_params_size,
877
878 // Leave the frame.
879 __ LeaveFrame(StackFrame::MAGLEV);
880
881 // If actual is bigger than formal, then we should use it to free up the stack
882 // arguments.
883 Label drop_dynamic_arg_size;
884 __ cmpq(actual_params_size, Immediate(formal_params_size));
885 __ j(greater, &drop_dynamic_arg_size);
886
887 // Drop receiver + arguments according to static formal arguments size.
888 __ Ret(formal_params_size * kSystemPointerSize, kScratchRegister);
889
890 __ bind(&drop_dynamic_arg_size);
891 // Drop receiver + arguments according to dynamic arguments size.
892 __ DropArguments(actual_params_size, r9);
893 __ Ret();
894}
895
896} // namespace maglev
897} // namespace internal
898} // namespace v8
#define Assert(condition)
static bool IsSupported(CpuFeature f)
static int ArgumentStackSlotsForCFunctionCall(int num_arguments)
static const int32_t kMaxOneByteCharCode
Definition string.h:500
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
ExternalReference ieee_function_ref() const
Definition maglev-ir.cc:502
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
void GenerateCode(MaglevAssembler *, const ProcessingState &)
int end
Node * node
ZoneVector< RpoNumber > & result
uint32_t const mask
#define DEF_SHIFT_BINOP(Instruction, opcode)
#define DEF_BITWISE_BINOP(Instruction, opcode)
#define DCHECK_REGLIST_EMPTY(...)
int int32_t
Definition unicode.cc:40
int ExternalArrayElementSize(const ExternalArrayType element_type)
Definition globals.h:156
void DefineAsRegister(Node *node)
DoubleRegister ToDoubleRegister(const compiler::InstructionOperand &operand)
void DefineSameAsFirst(Node *node)
Register ToRegister(const compiler::InstructionOperand &operand)
void DefineAsFixed(Node *node, Register reg)
void UseAndClobberRegister(Input &input)
void UseAny(Input &input)
void UseRegister(Input &input)
void UseFixed(Input &input, Register reg)
constexpr int kMinInt
Definition globals.h:375
constexpr VFPRoundingMode kRoundToNearest
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
DwVfpRegister DoubleRegister
constexpr DoubleRegister kScratchDoubleReg
RegListBase< Register > RegList
Definition reglist-arm.h:14
Operand FieldOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr int ElementsKindToShiftSize(ElementsKind elements_kind)
constexpr Register kReturnRegister0
constexpr Register kScratchRegister
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int kDoubleSize
Definition globals.h:407
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define OFFSET_OF_DATA_START(Type)