v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-assembler-arm64.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
9
10namespace v8 {
11namespace internal {
12namespace maglev {
13
14#define __ masm->
15
16namespace {
17
18void SubSizeAndTagObject(MaglevAssembler* masm, Register object,
19 Register size_in_bytes) {
20 __ Sub(object, object, size_in_bytes);
21 __ Add(object, object, kHeapObjectTag);
22}
23
24void SubSizeAndTagObject(MaglevAssembler* masm, Register object,
25 int size_in_bytes) {
26 __ Add(object, object, kHeapObjectTag - size_in_bytes);
27}
28
29template <typename T>
30void AllocateRaw(MaglevAssembler* masm, Isolate* isolate,
31 RegisterSnapshot register_snapshot, Register object,
32 T size_in_bytes, AllocationType alloc_type,
33 AllocationAlignment alignment) {
34 // TODO(victorgomes): Call the runtime for large object allocation.
35 // TODO(victorgomes): Support double alignment.
36 DCHECK(masm->allow_allocate());
37 DCHECK_EQ(alignment, kTaggedAligned);
38 if (v8_flags.single_generation) {
39 alloc_type = AllocationType::kOld;
40 }
41 ExternalReference top = SpaceAllocationTopAddress(isolate, alloc_type);
42 ExternalReference limit = SpaceAllocationLimitAddress(isolate, alloc_type);
43 ZoneLabelRef done(masm);
44 MaglevAssembler::TemporaryRegisterScope temps(masm);
45 Register scratch = temps.AcquireScratch();
46 // We are a bit short on registers, so we use the same register for {object}
47 // and {new_top}. Once we have defined {new_top}, we don't use {object} until
48 // {new_top} is used for the last time. And there (at the end of this
49 // function), we recover the original {object} from {new_top} by subtracting
50 // {size_in_bytes}.
51 Register new_top = object;
52 // Check if there is enough space.
53 __ Ldr(object, __ ExternalReferenceAsOperand(top, scratch));
54 __ Add(new_top, object, size_in_bytes);
55 __ Ldr(scratch, __ ExternalReferenceAsOperand(limit, scratch));
56 __ Cmp(new_top, scratch);
57 // Otherwise call runtime.
59 register_snapshot, object, AllocateBuiltin(alloc_type),
60 size_in_bytes, done);
61 // Store new top and tag object.
62 __ Move(__ ExternalReferenceAsOperand(top, scratch), new_top);
63 SubSizeAndTagObject(masm, object, size_in_bytes);
64 __ bind(*done);
65}
66} // namespace
67
68void MaglevAssembler::Allocate(RegisterSnapshot register_snapshot,
69 Register object, int size_in_bytes,
70 AllocationType alloc_type,
71 AllocationAlignment alignment) {
72 AllocateRaw(this, isolate_, register_snapshot, object, size_in_bytes,
73 alloc_type, alignment);
74}
75
76void MaglevAssembler::Allocate(RegisterSnapshot register_snapshot,
77 Register object, Register size_in_bytes,
78 AllocationType alloc_type,
79 AllocationAlignment alignment) {
80 AllocateRaw(this, isolate_, register_snapshot, object, size_in_bytes,
81 alloc_type, alignment);
82}
83
84void MaglevAssembler::OSRPrologue(Graph* graph) {
85 DCHECK(graph->is_osr());
86 CHECK(!graph->has_recursive_calls());
87
88 uint32_t source_frame_size =
89 graph->min_maglev_stackslots_for_unoptimized_frame_size();
90
91 static_assert(StandardFrameConstants::kFixedSlotCount % 2 == 1);
92 if (source_frame_size % 2 == 0) source_frame_size++;
93
94 if (V8_ENABLE_SANDBOX_BOOL || v8_flags.debug_code) {
95 TemporaryRegisterScope temps(this);
96 Register scratch = temps.AcquireScratch();
97 Add(scratch, sp,
98 source_frame_size * kSystemPointerSize +
100 Cmp(scratch, fp);
101 SbxCheck(eq, AbortReason::kOsrUnexpectedStackSize);
102 }
103
104 uint32_t target_frame_size =
105 graph->tagged_stack_slots() + graph->untagged_stack_slots();
106 CHECK_EQ(target_frame_size % 2, 1);
107 CHECK_LE(source_frame_size, target_frame_size);
108 if (source_frame_size < target_frame_size) {
109 ASM_CODE_COMMENT_STRING(this, "Growing frame for OSR");
110 uint32_t additional_tagged =
111 source_frame_size < graph->tagged_stack_slots()
112 ? graph->tagged_stack_slots() - source_frame_size
113 : 0;
114 uint32_t additional_tagged_double =
115 additional_tagged / 2 + additional_tagged % 2;
116 for (size_t i = 0; i < additional_tagged_double; ++i) {
117 Push(xzr, xzr);
118 }
119 uint32_t size_so_far = source_frame_size + additional_tagged_double * 2;
120 CHECK_LE(size_so_far, target_frame_size);
121 if (size_so_far < target_frame_size) {
122 Sub(sp, sp,
123 Immediate((target_frame_size - size_so_far) * kSystemPointerSize));
124 }
125 }
126}
127
128void MaglevAssembler::Prologue(Graph* graph) {
129 TemporaryRegisterScope temps(this);
130 // We add two extra registers to the scope. Ideally we could add all the
131 // allocatable general registers, except Context, JSFunction, NewTarget and
132 // ArgCount. Unfortunately, OptimizeCodeOrTailCallOptimizedCodeSlot and
133 // LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing pick random registers and
134 // we could alias those.
135 // TODO(victorgomes): Fix these builtins to either use the scope or pass the
136 // used registers manually.
137 temps.Include({x14, x15});
138
139 DCHECK(!graph->is_osr());
140
141 CallTarget();
143
144 if (graph->has_recursive_calls()) {
145 BindCallTarget(code_gen_state()->entry_label());
146 }
147
148#ifndef V8_ENABLE_LEAPTIERING
149 // Tiering support.
150 if (v8_flags.turbofan) {
151 using D = MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor;
152 Register flags = D::GetRegisterParameter(D::kFlags);
153 Register feedback_vector = D::GetRegisterParameter(D::kFeedbackVector);
154 DCHECK(!AreAliased(flags, feedback_vector, kJavaScriptCallArgCountRegister,
158 DCHECK(!temps.Available().has(flags));
159 DCHECK(!temps.Available().has(feedback_vector));
160 Move(feedback_vector,
161 compilation_info()->toplevel_compilation_unit()->feedback().object());
162 Condition needs_processing =
164 CodeKind::MAGLEV);
165 TailCallBuiltin(Builtin::kMaglevOptimizeCodeOrTailCallOptimizedCodeSlot,
166 needs_processing);
167 }
168#endif // !V8_ENABLE_LEAPTIERING
169
170 EnterFrame(StackFrame::MAGLEV);
171
172 // Save arguments in frame.
173 // TODO(leszeks): Consider eliding this frame if we don't make any calls
174 // that could clobber these registers.
175 // Push the context and the JSFunction.
177 // Push the actual argument count and a _possible_ stack slot.
179 int remaining_stack_slots = code_gen_state()->stack_slots() - 1;
180 DCHECK_GE(remaining_stack_slots, 0);
181
182 // Initialize stack slots.
183 if (graph->tagged_stack_slots() > 0) {
184 ASM_CODE_COMMENT_STRING(this, "Initializing stack slots");
185
186 // If tagged_stack_slots is divisible by 2, we overshoot and allocate one
187 // extra stack slot, otherwise we allocate exactly the right amount, since
188 // one stack has already been allocated.
189 int tagged_two_slots_count = graph->tagged_stack_slots() / 2;
190 remaining_stack_slots -= 2 * tagged_two_slots_count;
191
192 // Magic value. Experimentally, an unroll size of 8 doesn't seem any
193 // worse than fully unrolled pushes.
194 const int kLoopUnrollSize = 8;
195 if (tagged_two_slots_count < kLoopUnrollSize) {
196 for (int i = 0; i < tagged_two_slots_count; i++) {
197 Push(xzr, xzr);
198 }
199 } else {
200 TemporaryRegisterScope temps(this);
201 Register count = temps.AcquireScratch();
202 // Extract the first few slots to round to the unroll size.
203 int first_slots = tagged_two_slots_count % kLoopUnrollSize;
204 for (int i = 0; i < first_slots; ++i) {
205 Push(xzr, xzr);
206 }
207 Move(count, tagged_two_slots_count / kLoopUnrollSize);
208 // We enter the loop unconditionally, so make sure we need to loop at
209 // least once.
210 DCHECK_GT(tagged_two_slots_count / kLoopUnrollSize, 0);
211 Label loop;
212 bind(&loop);
213 for (int i = 0; i < kLoopUnrollSize; ++i) {
214 Push(xzr, xzr);
215 }
216 Subs(count, count, Immediate(1));
217 B(&loop, gt);
218 }
219 }
220 if (remaining_stack_slots > 0) {
221 // Round up.
222 remaining_stack_slots += (remaining_stack_slots % 2);
223 // Extend sp by the size of the remaining untagged part of the frame,
224 // no need to initialise these.
225 Sub(sp, sp, Immediate(remaining_stack_slots * kSystemPointerSize));
226 }
227}
228
229void MaglevAssembler::MaybeEmitDeoptBuiltinsCall(size_t eager_deopt_count,
230 Label* eager_deopt_entry,
231 size_t lazy_deopt_count,
232 Label* lazy_deopt_entry) {
234
236 size_t deopt_count = eager_deopt_count + lazy_deopt_count;
238 false, false,
239 static_cast<int>(deopt_count) * Deoptimizer::kLazyDeoptExitSize);
240
241 TemporaryRegisterScope scope(this);
242 Register scratch = scope.AcquireScratch();
243 if (eager_deopt_count > 0) {
244 Bind(eager_deopt_entry);
245 LoadEntryFromBuiltin(Builtin::kDeoptimizationEntry_Eager, scratch);
246 MacroAssembler::Jump(scratch);
247 }
248 if (lazy_deopt_count > 0) {
249 Bind(lazy_deopt_entry);
250 LoadEntryFromBuiltin(Builtin::kDeoptimizationEntry_Lazy, scratch);
251 MacroAssembler::Jump(scratch);
252 }
253}
254
256 Register char_code,
257 Register scratch) {
258 DCHECK_NE(char_code, scratch);
259 if (v8_flags.debug_code) {
260 Cmp(char_code, Immediate(String::kMaxOneByteCharCode));
261 Assert(ls, AbortReason::kUnexpectedValue);
262 }
263 Register table = scratch;
264 LoadRoot(table, RootIndex::kSingleCharacterStringTable);
265 LoadTaggedFieldByIndex(result, table, char_code, kTaggedSize,
266 OFFSET_OF_DATA_START(FixedArray));
267}
268
269void MaglevAssembler::StringFromCharCode(RegisterSnapshot register_snapshot,
270 Label* char_code_fits_one_byte,
271 Register result, Register char_code,
272 Register scratch,
273 CharCodeMaskMode mask_mode) {
274 AssertZeroExtended(char_code);
275 DCHECK_NE(char_code, scratch);
276 ZoneLabelRef done(this);
277 if (mask_mode == CharCodeMaskMode::kMustApplyMask) {
278 And(char_code, char_code, Immediate(0xFFFF));
279 }
280 Cmp(char_code, Immediate(String::kMaxOneByteCharCode));
282 hi,
283 [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
284 ZoneLabelRef done, Register result, Register char_code,
285 Register scratch) {
286 // Be sure to save {char_code}. If it aliases with {result}, use
287 // the scratch register.
288 // TODO(victorgomes): This is probably not needed any more, because
289 // we now ensure that results registers don't alias with inputs/temps.
290 // Confirm, and drop this check.
291 if (char_code.Aliases(result)) {
292 __ Move(scratch, char_code);
293 char_code = scratch;
294 }
295 DCHECK(!char_code.Aliases(result));
296 DCHECK(!register_snapshot.live_tagged_registers.has(char_code));
297 register_snapshot.live_registers.set(char_code);
298 __ AllocateTwoByteString(register_snapshot, result, 1);
299 __ Strh(
300 char_code.W(),
301 FieldMemOperand(result, OFFSET_OF_DATA_START(SeqTwoByteString)));
302 __ B(*done);
303 },
304 register_snapshot, done, result, char_code, scratch);
305 if (char_code_fits_one_byte != nullptr) {
306 bind(char_code_fits_one_byte);
307 }
308 LoadSingleCharacterString(result, char_code, scratch);
309 bind(*done);
310}
311
314 RegisterSnapshot& register_snapshot, Register result, Register string,
315 Register index, Register scratch1, Register scratch2,
316 Label* result_fits_one_byte) {
317 ZoneLabelRef done(this);
318 Label seq_string;
319 Label cons_string;
320 Label sliced_string;
321
322 Label* deferred_runtime_call = MakeDeferredCode(
323 [](MaglevAssembler* masm,
325 RegisterSnapshot register_snapshot, ZoneLabelRef done, Register result,
326 Register string, Register index) {
327 DCHECK(!register_snapshot.live_registers.has(result));
328 DCHECK(!register_snapshot.live_registers.has(string));
329 DCHECK(!register_snapshot.live_registers.has(index));
330 {
331 SaveRegisterStateForCall save_register_state(masm, register_snapshot);
332 __ SmiTag(index);
333 __ Push(string, index);
334 __ Move(kContextRegister, masm->native_context().object());
335 // This call does not throw nor can deopt.
336 if (mode ==
338 __ CallRuntime(Runtime::kStringCodePointAt);
339 } else {
340 DCHECK_EQ(mode,
342 __ CallRuntime(Runtime::kStringCharCodeAt);
343 }
344 save_register_state.DefineSafepoint();
347 }
348 __ jmp(*done);
349 },
350 mode, register_snapshot, done, result, string, index);
351
352 // We might need to try more than one time for ConsString, SlicedString and
353 // ThinString.
354 Label loop;
355 bind(&loop);
356
357 if (v8_flags.debug_code) {
358 // Check if {string} is a string.
359 AssertObjectTypeInRange(string, FIRST_STRING_TYPE, LAST_STRING_TYPE,
360 AbortReason::kUnexpectedValue);
361
362 Ldr(scratch1.W(), FieldMemOperand(string, offsetof(String, length_)));
363 Cmp(index.W(), scratch1.W());
364 Check(lo, AbortReason::kUnexpectedValue);
365 }
366
367#if V8_STATIC_ROOTS_BOOL
368 Register map = scratch1.W();
369 LoadMapForCompare(map, string);
370#else
371 Register instance_type = scratch1;
372 // Get instance type.
373 LoadInstanceType(instance_type, string);
374#endif
375
376 {
377#if V8_STATIC_ROOTS_BOOL
378 using StringTypeRange = InstanceTypeChecker::kUniqueMapRangeOfStringType;
379 // Check the string map ranges in dense increasing order, to avoid needing
380 // to subtract away the lower bound.
381 static_assert(StringTypeRange::kSeqString.first == 0);
382 CompareInt32AndJumpIf(map, StringTypeRange::kSeqString.second,
384
385 static_assert(StringTypeRange::kSeqString.second + Map::kSize ==
386 StringTypeRange::kExternalString.first);
387 CompareInt32AndJumpIf(map, StringTypeRange::kExternalString.second,
388 kUnsignedLessThanEqual, deferred_runtime_call);
389 // TODO(victorgomes): Add fast path for external strings.
390
391 static_assert(StringTypeRange::kExternalString.second + Map::kSize ==
392 StringTypeRange::kConsString.first);
393 CompareInt32AndJumpIf(map, StringTypeRange::kConsString.second,
394 kUnsignedLessThanEqual, &cons_string, Label::kNear);
395
396 static_assert(StringTypeRange::kConsString.second + Map::kSize ==
397 StringTypeRange::kSlicedString.first);
398 CompareInt32AndJumpIf(map, StringTypeRange::kSlicedString.second,
399 kUnsignedLessThanEqual, &sliced_string, Label::kNear);
400
401 static_assert(StringTypeRange::kSlicedString.second + Map::kSize ==
402 StringTypeRange::kThinString.first);
403 // No need to check for thin strings, they're the last string map.
404 static_assert(StringTypeRange::kThinString.second ==
405 InstanceTypeChecker::kStringMapUpperBound);
406 // Fallthrough to thin string.
407#else
408 TemporaryRegisterScope temps(this);
409 Register representation = temps.AcquireScratch().W();
410
411 // TODO(victorgomes): Add fast path for external strings.
412 And(representation, instance_type.W(),
413 Immediate(kStringRepresentationMask));
414 CompareAndBranch(representation, Immediate(kSeqStringTag), kEqual,
415 &seq_string);
416 CompareAndBranch(representation, Immediate(kConsStringTag), kEqual,
417 &cons_string);
418 CompareAndBranch(representation, Immediate(kSlicedStringTag), kEqual,
419 &sliced_string);
420 CompareAndBranch(representation, Immediate(kThinStringTag), kNotEqual,
421 deferred_runtime_call);
422 // Fallthrough to thin string.
423#endif
424 }
425
426 // Is a thin string.
427 {
428 LoadTaggedField(string, string, offsetof(ThinString, actual_));
429 B(&loop);
430 }
431
432 bind(&sliced_string);
433 {
434 TemporaryRegisterScope temps(this);
435 Register offset = temps.AcquireScratch();
436
438 offsetof(SlicedString, offset_));
439 LoadTaggedField(string, string, offsetof(SlicedString, parent_));
440 Add(index, index, offset);
441 B(&loop);
442 }
443
444 bind(&cons_string);
445 {
446 // Reuse {instance_type} register here, since CompareRoot requires a scratch
447 // register as well.
448 Register second_string = scratch1;
449 LoadTaggedFieldWithoutDecompressing(second_string, string,
450 offsetof(ConsString, second_));
451 CompareRoot(second_string, RootIndex::kempty_string);
452 B(deferred_runtime_call, ne);
453 LoadTaggedField(string, string, offsetof(ConsString, first_));
454 B(&loop); // Try again with first string.
455 }
456
457 bind(&seq_string);
458 {
459 Label two_byte_string;
460#if V8_STATIC_ROOTS_BOOL
461 if (InstanceTypeChecker::kTwoByteStringMapBit == 0) {
463 InstanceTypeChecker::kStringMapEncodingMask,
464 &two_byte_string, Label::kNear);
465 } else {
466 TestInt32AndJumpIfAnySet(map, InstanceTypeChecker::kStringMapEncodingMask,
467 &two_byte_string, Label::kNear);
468 }
469#else
470 TestAndBranchIfAllClear(instance_type, kOneByteStringTag, &two_byte_string);
471#endif
472 // The result of one-byte string will be the same for both modes
473 // (CharCodeAt/CodePointAt), since it cannot be the first half of a
474 // surrogate pair.
475 Add(index, index, OFFSET_OF_DATA_START(SeqOneByteString) - kHeapObjectTag);
476 Ldrb(result, MemOperand(string, index));
477 B(result_fits_one_byte);
478
479 bind(&two_byte_string);
480 // {instance_type} is unused from this point, so we can use as scratch.
481 Register scratch = scratch1;
482 Lsl(scratch, index, 1);
483 Add(scratch, scratch,
484 OFFSET_OF_DATA_START(SeqTwoByteString) - kHeapObjectTag);
485
487 Ldrh(result, MemOperand(string, scratch));
488 } else {
489 DCHECK_EQ(mode,
491 Register string_backup = string;
492 if (result == string) {
493 string_backup = scratch2;
494 Mov(string_backup, string);
495 }
496 Ldrh(result, MemOperand(string, scratch));
497
498 Register first_code_point = scratch;
499 And(first_code_point.W(), result.W(), Immediate(0xfc00));
500 CompareAndBranch(first_code_point, Immediate(0xd800), kNotEqual, *done);
501
502 Register length = scratch;
503 Ldr(length.W(),
504 FieldMemOperand(string_backup, offsetof(String, length_)));
505 Add(index.W(), index.W(), Immediate(1));
506 CompareAndBranch(index, length, kGreaterThanEqual, *done);
507
508 Register second_code_point = scratch;
509 Lsl(index, index, 1);
510 Add(index, index,
511 OFFSET_OF_DATA_START(SeqTwoByteString) - kHeapObjectTag);
512 Ldrh(second_code_point, MemOperand(string_backup, index));
513
514 // {index} is not needed at this point.
515 Register scratch2 = index;
516 And(scratch2.W(), second_code_point.W(), Immediate(0xfc00));
517 CompareAndBranch(scratch2, Immediate(0xdc00), kNotEqual, *done);
518
519 int surrogate_offset = 0x10000 - (0xd800 << 10) - 0xdc00;
520 Add(second_code_point, second_code_point, Immediate(surrogate_offset));
521 Lsl(result, result, 10);
522 Add(result, result, second_code_point);
523 }
524
525 // Fallthrough.
526 }
527
528 bind(*done);
529
530 if (v8_flags.debug_code) {
531 // We make sure that the user of this macro is not relying in string and
532 // index to not be clobbered.
533 if (result != string) {
534 Mov(string, Immediate(0xdeadbeef));
535 }
536 if (result != index) {
537 Mov(index, Immediate(0xdeadbeef));
538 }
539 }
540}
541
543 if (CpuFeatures::IsSupported(JSCVT)) {
544 Fjcvtzs(dst.W(), src);
545 return;
546 }
547
548 ZoneLabelRef done(this);
549 // Try to convert with an FPU convert instruction. It's trivial to compute
550 // the modulo operation on an integer register so we convert to a 64-bit
551 // integer.
552 //
553 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7FF...FF)
554 // when the double is out of range. NaNs and infinities will be converted to 0
555 // (as ECMA-262 requires).
556 Fcvtzs(dst.X(), src);
557
558 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7FF...FF) are not
559 // representable using a double, so if the result is one of those then we know
560 // that saturation occurred, and we need to manually handle the conversion.
561 //
562 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
563 // 1 will cause signed overflow.
564 Cmp(dst.X(), 1);
565 Ccmp(dst.X(), -1, VFlag, vc);
566
568 vs,
569 [](MaglevAssembler* masm, DoubleRegister src, Register dst,
570 ZoneLabelRef done) {
571 __ MacroAssembler::Push(xzr, src);
572 __ CallBuiltin(Builtin::kDoubleToI);
573 __ Ldr(dst.W(), MemOperand(sp, 0));
574 DCHECK_EQ(xzr.SizeInBytes(), src.SizeInBytes());
575 __ Drop(2);
576 __ B(*done);
577 },
578 src, dst, done);
579
580 Bind(*done);
581 // Zero extend the converted value to complete the truncation.
582 Mov(dst, Operand(dst.W(), UXTW));
583}
584
586 Label* fail) {
587 TemporaryRegisterScope temps(this);
588 DoubleRegister converted_back = temps.AcquireScratchDouble();
589
590 // Convert the input float64 value to int32.
591 Fcvtzs(dst.W(), src);
592 // Convert that int32 value back to float64.
593 Scvtf(converted_back, dst.W());
594 // Check that the result of the float64->int32->float64 is equal to the input
595 // (i.e. that the conversion didn't truncate.
596 Fcmp(src, converted_back);
597 JumpIf(ne, fail);
598
599 // Check if {input} is -0.
600 Label check_done;
601 Cbnz(dst, &check_done);
602
603 // In case of 0, we need to check for the IEEE 0 pattern (which is all zeros).
604 Register input_bits = temps.AcquireScratch();
605 Fmov(input_bits, src);
606 Cbnz(input_bits, fail);
607
608 Bind(&check_done);
609}
610
612 DoubleRegister src,
613 Label* fail) {
614 TemporaryRegisterScope temps(this);
615 DoubleRegister converted_back = temps.AcquireScratchDouble();
616
617 // Convert the input float64 value to uint32.
618 Fcvtzu(dst.W(), src);
619 // Convert that uint32 value back to float64.
620 Ucvtf(converted_back, dst);
621 // Check that the result of the float64->uint32->float64 is equal to the input
622 // (i.e. that the conversion didn't truncate.
623 Fcmp(src, converted_back);
624 JumpIf(ne, fail);
625
626 // Check if {input} is -0.
627 Label check_done;
628 Cbnz(dst, &check_done);
629
630 // In case of 0, we need to check for the IEEE 0 pattern (which is all zeros).
631 Register input_bits = temps.AcquireScratch();
632 Fmov(input_bits, src);
633 Cbnz(input_bits, fail);
634
635 Bind(&check_done);
636}
637
639 DoubleRegister value,
640 Label* success, Label* fail) {
641 TemporaryRegisterScope temps(this);
642 DoubleRegister converted_back = temps.AcquireScratchDouble();
643 // Convert the input float64 value to int32.
644 Fcvtzs(result.W(), value);
645 // Convert that int32 value back to float64.
646 Scvtf(converted_back, result.W());
647 // Check that the result of the float64->int32->float64 is equal to
648 // the input (i.e. that the conversion didn't truncate).
649 Fcmp(value, converted_back);
650 JumpIf(kNotEqual, fail);
651 Jump(success);
652}
653
654} // namespace maglev
655} // namespace internal
656} // namespace v8
#define Assert(condition)
void CheckVeneerPool(bool force_emit, bool require_jump, size_t margin=kVeneerDistanceMargin)
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
void Cmp(const Register &rn, int imm)
void Drop(int count, Condition cond=al)
void Add(const Register &rd, const Register &rn, const Operand &operand)
void SmiUntag(Register reg, SBit s=LeaveCC)
void Bind(Label *label, BranchTargetIdentifier id=BranchTargetIdentifier::kNone)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void Fcvtzu(const Register &rd, const VRegister &fn)
void CompareAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
void Fmov(VRegister fd, VRegister fn)
void CompareRoot(Register obj, RootIndex index)
void TestAndBranchIfAllClear(const Register &reg, const uint64_t bit_pattern, Label *label)
void Scvtf(const VRegister &fd, const Register &rn, unsigned fbits=0)
void Lsl(const Register &rd, const Register &rn, unsigned shift)
void SmiTag(Register reg, SBit s=LeaveCC)
void SbxCheck(Condition cc, AbortReason reason)
void Fjcvtzs(const Register &rd, const VRegister &vn)
void Ldr(const CPURegister &rt, const Operand &imm)
void Mov(const Register &rd, const Operand &operand, DiscardMoveMode discard_mode=kDontDiscardForSameWReg)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void Jump(Register target, Condition cond=al)
void LoadRoot(Register destination, RootIndex index) final
void Fcmp(const VRegister &fn, const VRegister &fm)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void Ucvtf(const VRegister &fd, const Register &rn, unsigned fbits=0)
void Subs(const Register &rd, const Register &rn, const Operand &operand)
Condition LoadFeedbackVectorFlagsAndCheckIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind)
void Fcvtzs(const Register &rd, const VRegister &fn)
void Check(Condition cond, AbortReason reason)
void AssertZeroExtended(Register int32_register)
void Ccmp(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
void Sub(const Register &rd, const Register &rn, const Operand &operand)
void Cbnz(const Register &rt, Label *label)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
static const int32_t kMaxOneByteCharCode
Definition string.h:500
void LoadSingleCharacterString(Register result, int char_code)
void LoadAndUntagTaggedSignedField(Register result, Register object, int offset)
MaglevAssembler(Isolate *isolate, Zone *zone, MaglevCodeGenState *code_gen_state)
void AssertObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, AbortReason reason)
void StringFromCharCode(RegisterSnapshot register_snapshot, Label *char_code_fits_one_byte, Register result, Register char_code, Register scratch, CharCodeMaskMode mask_mode)
void Jump(Label *target, Label::Distance distance=Label::kFar)
void CallRuntime(Runtime::FunctionId fid)
void LoadInstanceType(Register instance_type, Register heap_object)
void Allocate(RegisterSnapshot register_snapshot, Register result, int size_in_bytes, AllocationType alloc_type=AllocationType::kYoung, AllocationAlignment alignment=kTaggedAligned)
void LoadMapForCompare(Register dst, Register obj)
void TryTruncateDoubleToUint32(Register dst, DoubleRegister src, Label *fail)
void StringCharCodeOrCodePointAt(BuiltinStringPrototypeCharCodeOrCodePointAt::Mode mode, RegisterSnapshot &register_snapshot, Register result, Register string, Register index, Register scratch1, Register scratch2, Label *result_fits_one_byte)
void AllocateTwoByteString(RegisterSnapshot register_snapshot, Register result, int length)
void Move(StackSlot dst, Register src)
void TryTruncateDoubleToInt32(Register dst, DoubleRegister src, Label *fail)
Label * MakeDeferredCode(Function &&deferred_code_gen, Args &&... args)
void JumpToDeferredIf(Condition cond, Function &&deferred_code_gen, Args &&... args)
void TruncateDoubleToInt32(Register dst, DoubleRegister src)
void MaybeEmitDeoptBuiltinsCall(size_t eager_deopt_count, Label *eager_deopt_entry, size_t lazy_deopt_count, Label *lazy_deopt_entry)
MaglevCompilationInfo * compilation_info() const
void TestInt32AndJumpIfAllClear(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
void CompareInt32AndJumpIf(Register r1, Register r2, Condition cond, Label *target, Label::Distance distance=Label::kFar)
MaglevCodeGenState * code_gen_state() const
void LoadTaggedFieldByIndex(Register result, Register object, Register index, int scale, int offset)
void TryChangeFloat64ToIndex(Register result, DoubleRegister value, Label *success, Label *fail)
void JumpIf(Condition cond, Label *target, Label::Distance distance=Label::kFar)
void LoadTaggedFieldWithoutDecompressing(Register result, Register object, int offset)
void LoadTaggedField(Register result, MemOperand operand)
void TestInt32AndJumpIfAnySet(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
Operand const offset_
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
int32_t offset
double second
ZoneVector< RpoNumber > & result
const int length_
Definition mul-fft.cc:473
void AllocateSlow(MaglevAssembler *masm, RegisterSnapshot register_snapshot, Register object, Builtin builtin, T size_in_bytes, ZoneLabelRef done)
Builtin AllocateBuiltin(AllocationType alloc_type)
ExternalReference SpaceAllocationTopAddress(Isolate *isolate, AllocationType alloc_type)
void SubSizeAndTagObject(MaglevAssembler *masm, Register object, Register size_in_bytes)
void AllocateRaw(MaglevAssembler *masm, Isolate *isolate, RegisterSnapshot register_snapshot, Register object, T size_in_bytes, AllocationType alloc_type, AllocationAlignment alignment)
ExternalReference SpaceAllocationLimitAddress(Isolate *isolate, AllocationType alloc_type)
constexpr int kTaggedSize
Definition globals.h:542
DwVfpRegister DoubleRegister
constexpr InstanceType LAST_STRING_TYPE
constexpr int B
constexpr Register kJavaScriptCallArgCountRegister
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
const uint32_t kOneByteStringTag
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Register kReturnRegister0
const uint32_t kStringRepresentationMask
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register kJavaScriptCallDispatchHandleRegister
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
template const char * string
BytecodeSequenceNode * parent_
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define OFFSET_OF_DATA_START(Type)