v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
machine-lowering-reducer-inl.h
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_TURBOSHAFT_MACHINE_LOWERING_REDUCER_INL_H_
6#define V8_COMPILER_TURBOSHAFT_MACHINE_LOWERING_REDUCER_INL_H_
7
8#include <optional>
9
10#include "src/base/logging.h"
13#include "src/common/globals.h"
32#include "src/objects/bigint.h"
37#include "src/objects/oddball.h"
39#include "src/runtime/runtime.h"
40#include "src/utils/utils.h"
41
43
45
46// MachineLoweringReducer, formerly known as EffectControlLinearizer, lowers
47// simplified operations to machine operations.
48template <typename Next>
49class MachineLoweringReducer : public Next {
50 public:
51 TURBOSHAFT_REDUCER_BOILERPLATE(MachineLowering)
52
53 bool NeedsHeapObjectCheck(ObjectIsOp::InputAssumptions input_assumptions) {
54 // TODO(nicohartmann@): Consider type information once we have that.
55 switch (input_assumptions) {
57 return true;
60 return false;
61 }
62 }
63
65 // As far as Machine operations are concerned, Int32 and Uint32 are both
66 // Word32.
67 return input;
68 }
69
71 V<FrameState> frame_state,
73 CheckForMinusZeroMode minus_zero_mode,
74 const FeedbackSource& feedback) {
75 switch (kind) {
77 __ DeoptimizeIf(__ Int32LessThan(V<Word32>::Cast(input), 0),
78 frame_state, DeoptimizeReason::kLostPrecision,
79 feedback);
80 return input;
81 }
83 V<Word64> i64_input = V<Word64>::Cast(input);
84 V<Word32> i32 = __ TruncateWord64ToWord32(i64_input);
85 __ DeoptimizeIfNot(
86 __ Word64Equal(__ ChangeInt32ToInt64(i32), i64_input), frame_state,
87 DeoptimizeReason::kLostPrecision, feedback);
88 return i32;
89 }
91 V<Word64> i64_input = V<Word64>::Cast(input);
92 __ DeoptimizeIfNot(
93 __ Uint64LessThanOrEqual(i64_input, static_cast<uint64_t>(kMaxInt)),
94 frame_state, DeoptimizeReason::kLostPrecision, feedback);
95 return __ TruncateWord64ToWord32(i64_input);
96 }
98 __ DeoptimizeIfNot(
99 __ Uint64LessThanOrEqual(V<Word64>::Cast(input),
100 std::numeric_limits<int64_t>::max()),
101 frame_state, DeoptimizeReason::kLostPrecision, feedback);
102 return input;
103 }
105 V<Float64> f64_input = V<Float64>::Cast(input);
106 V<Word32> i32 = __ TruncateFloat64ToInt32OverflowUndefined(f64_input);
107 __ DeoptimizeIfNot(
108 __ Float64Equal(__ ChangeInt32ToFloat64(i32), f64_input),
109 frame_state, DeoptimizeReason::kLostPrecisionOrNaN, feedback);
110
111 if (minus_zero_mode == CheckForMinusZeroMode::kCheckForMinusZero) {
112 // Check if {value} is -0.
113 IF (UNLIKELY(__ Word32Equal(i32, 0))) {
114 // In case of 0, we need to check the high bits for the IEEE -0
115 // pattern.
116 V<Word32> check_negative =
117 __ Int32LessThan(__ Float64ExtractHighWord32(f64_input), 0);
118 __ DeoptimizeIf(check_negative, frame_state,
119 DeoptimizeReason::kMinusZero, feedback);
120 }
121 }
122
123 return i32;
124 }
126 V<Float64> f64_input = V<Float64>::Cast(input);
127 V<Word32> ui32 = __ TruncateFloat64ToUint32OverflowUndefined(f64_input);
128 __ DeoptimizeIfNot(
129 __ Float64Equal(__ ChangeUint32ToFloat64(ui32), f64_input),
130 frame_state, DeoptimizeReason::kLostPrecisionOrNaN, feedback);
131
132 if (minus_zero_mode == CheckForMinusZeroMode::kCheckForMinusZero) {
133 // Check if {value} is -0.
134 IF (UNLIKELY(__ Word32Equal(ui32, 0))) {
135 // In case of 0, we need to check the high bits for the IEEE -0
136 // pattern.
137 V<Word32> check_negative =
138 __ Int32LessThan(__ Float64ExtractHighWord32(f64_input), 0);
139 __ DeoptimizeIf(check_negative, frame_state,
140 DeoptimizeReason::kMinusZero, feedback);
141 }
142 }
143
144 return ui32;
145 }
147 V<Float64> f64_input = V<Float64>::Cast(input);
148 V<Word64> i64 = __ TruncateFloat64ToInt64OverflowToMin(f64_input);
149 __ DeoptimizeIfNot(
150 __ Float64Equal(__ ChangeInt64ToFloat64(i64), f64_input),
151 frame_state, DeoptimizeReason::kLostPrecisionOrNaN, feedback);
152
153 if (minus_zero_mode == CheckForMinusZeroMode::kCheckForMinusZero) {
154 // Check if {value} is -0.
155 IF (UNLIKELY(__ Word64Equal(i64, 0))) {
156 // In case of 0, we need to check the high bits for the IEEE -0
157 // pattern.
158 V<Word32> check_negative =
159 __ Int32LessThan(__ Float64ExtractHighWord32(f64_input), 0);
160 __ DeoptimizeIf(check_negative, frame_state,
161 DeoptimizeReason::kMinusZero, feedback);
162 }
163 }
164
165 // Check the value actually fits in AdditiveSafeInteger.
166 // (value - kMinAdditiveSafeInteger) >> 52 == 0.
167 V<Word32> check_is_zero =
168 __ Word64Equal(__ Word64ShiftRightArithmetic(
169 __ Word64Sub(i64, kMinAdditiveSafeInteger),
171 0);
172 __ DeoptimizeIfNot(check_is_zero, frame_state,
173 DeoptimizeReason::kNotAdditiveSafeInteger, feedback);
174
175 return i64;
176 }
178 V<Float64> f64_input = V<Float64>::Cast(input);
179 V<Word64> i64 = __ TruncateFloat64ToInt64OverflowToMin(f64_input);
180 __ DeoptimizeIfNot(
181 __ Float64Equal(__ ChangeInt64ToFloat64(i64), f64_input),
182 frame_state, DeoptimizeReason::kLostPrecisionOrNaN, feedback);
183
184 if (minus_zero_mode == CheckForMinusZeroMode::kCheckForMinusZero) {
185 // Check if {value} is -0.
186 IF (UNLIKELY(__ Word64Equal(i64, 0))) {
187 // In case of 0, we need to check the high bits for the IEEE -0
188 // pattern.
189 V<Word32> check_negative =
190 __ Int32LessThan(__ Float64ExtractHighWord32(f64_input), 0);
191 __ DeoptimizeIf(check_negative, frame_state,
192 DeoptimizeReason::kMinusZero, feedback);
193 }
194 }
195
196 return i64;
197 }
199 V<Float64> f64_input = V<Float64>::Cast(input);
200 // First check whether {value} is a NaN at all...
201 IF_NOT (LIKELY(__ Float64Equal(f64_input, f64_input))) {
202 // ...and only if {value} is a NaN, perform the expensive bit
203 // check. See http://crbug.com/v8/8264 for details.
204 __ DeoptimizeIf(__ Word32Equal(__ Float64ExtractHighWord32(f64_input),
206 frame_state, DeoptimizeReason::kHole, feedback);
207 }
208
209 return input;
210 }
211 }
212 UNREACHABLE();
213 }
214
216 bool negated,
217 const DeoptimizeParameters* parameters) {
218 LABEL_BLOCK(no_change) {
219 return Next::ReduceDeoptimizeIf(condition, frame_state, negated,
220 parameters);
221 }
222 if (ShouldSkipOptimizationStep()) goto no_change;
223 // Block cloning only works for branches, but not for `DeoptimizeIf`. On the
224 // other hand, explicit control flow makes the overall pipeline and
225 // especially the register allocator slower. So we only switch a
226 // `DeoptiomizeIf` to a branch if it has a phi input, which indicates that
227 // block cloning could be helpful.
228 if (__ Get(condition).template Is<PhiOp>()) {
229 if (negated) {
231 __ Deoptimize(frame_state, parameters);
232 }
233
234 } else {
236 __ Deoptimize(frame_state, parameters);
237 }
238 }
239 return V<None>::Invalid();
240 }
241 goto no_change;
242 }
243
245 ObjectIsOp::InputAssumptions input_assumptions) {
246 switch (kind) {
250
251 Label<Word32> done(this);
252
253 if (input_assumptions != ObjectIsOp::InputAssumptions::kBigInt) {
254 if (NeedsHeapObjectCheck(input_assumptions)) {
255 // Check for Smi.
256 GOTO_IF(__ IsSmi(input), done, 0);
257 }
258
259 // Check for BigInt.
260 V<Map> map = __ LoadMapField(input);
261 V<Word32> is_bigint_map =
262 __ TaggedEqual(map, __ HeapConstant(factory_->bigint_map()));
263 GOTO_IF_NOT(is_bigint_map, done, 0);
264 }
265
267 GOTO(done, 1);
268 } else {
270 // We have to perform check for BigInt64 range.
271 V<Word32> bitfield = __ template LoadField<Word32>(
273 GOTO_IF(__ Word32Equal(bitfield, 0), done, 1);
274
275 // Length must be 1.
276 V<Word32> length_field =
277 __ Word32BitwiseAnd(bitfield, BigInt::LengthBits::kMask);
278 GOTO_IF_NOT(__ Word32Equal(length_field,
279 uint32_t{1} << BigInt::LengthBits::kShift),
280 done, 0);
281
282 // Check if it fits in 64 bit signed int.
283 V<Word64> lsd = __ template LoadField<Word64>(
285 V<Word32> magnitude_check = __ Uint64LessThanOrEqual(
286 lsd, std::numeric_limits<int64_t>::max());
287 GOTO_IF(magnitude_check, done, 1);
288
289 // The BigInt probably doesn't fit into signed int64. The only
290 // exception is int64_t::min. We check for this.
292 __ Word32BitwiseAnd(bitfield, BigInt::SignBits::kMask);
293 V<Word32> sign_check = __ Word32Equal(sign, BigInt::SignBits::kMask);
294 GOTO_IF_NOT(sign_check, done, 0);
295
296 V<Word32> min_check =
297 __ Word64Equal(lsd, std::numeric_limits<int64_t>::min());
298 GOTO_IF(min_check, done, 1);
299
300 GOTO(done, 0);
301 }
302
303 BIND(done, result);
304 return result;
305 }
308 V<Word32> is_undefined = __ TaggedEqual(
309 input, __ HeapConstant(factory_->undefined_value()));
310 V<Word32> is_null =
311 __ TaggedEqual(input, __ HeapConstant(factory_->null_value()));
312 return __ Word32BitwiseOr(is_undefined, is_null);
313 }
314 [[fallthrough]];
321 Label<Word32> done(this);
322
323 // Check for Smi if necessary.
324 if (NeedsHeapObjectCheck(input_assumptions)) {
325 GOTO_IF(UNLIKELY(__ IsSmi(input)), done, 0);
326 }
327
328#if V8_STATIC_ROOTS_BOOL
329 // Fast check for NullOrUndefined before loading the map, if helpful.
330 V<Word32> is_null_or_undefined;
332 static_assert(StaticReadOnlyRoot::kFirstAllocatedRoot ==
333 StaticReadOnlyRoot::kUndefinedValue);
334 static_assert(StaticReadOnlyRoot::kUndefinedValue +
335 sizeof(Undefined) ==
336 StaticReadOnlyRoot::kNullValue);
337 is_null_or_undefined = __ Uint32LessThanOrEqual(
338 __ TruncateWordPtrToWord32(
339 __ BitcastHeapObjectToWordPtr(V<HeapObject>::Cast(input))),
340 __ Word32Constant(StaticReadOnlyRoot::kNullValue));
341 }
342#endif // V8_STATIC_ROOTS_BOOL
343
344 // Load bitfield from map.
345 V<Map> map = __ LoadMapField(input);
346 V<Word32> bitfield =
347 __ template LoadField<Word32>(map, AccessBuilder::ForMapBitField());
348
349 V<Word32> check;
350 switch (kind) {
352 check =
353 __ Word32Equal(Map::Bits1::IsCallableBit::kMask,
354 __ Word32BitwiseAnd(
355 bitfield, Map::Bits1::IsCallableBit::kMask));
356 break;
358 check = __ Word32Equal(
359 Map::Bits1::IsConstructorBit::kMask,
360 __ Word32BitwiseAnd(bitfield,
361 Map::Bits1::IsConstructorBit::kMask));
362 break;
364 check = __ Word32Equal(
365 Map::Bits1::IsCallableBit::kMask,
366 __ Word32BitwiseAnd(
367 bitfield, (Map::Bits1::IsCallableBit::kMask) |
368 (Map::Bits1::IsUndetectableBit::kMask)));
369 break;
371 check = __ Word32Equal(
372 0, __ Word32BitwiseAnd(bitfield,
373 Map::Bits1::IsCallableBit::kMask));
374 GOTO_IF_NOT(check, done, 0);
375 // Fallthrough into receiver check.
376 [[fallthrough]];
378 check = JSAnyIsNotPrimitiveHeapObject(input, map);
379 break;
381#if V8_STATIC_ROOTS_BOOL
382 V<Word32> is_non_primitive =
384 check = __ Word32BitwiseOr(is_null_or_undefined, is_non_primitive);
385#else
386 static_assert(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE);
387 static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
388 // Rule out all primitives except oddballs (true, false, undefined,
389 // null).
390 V<Word32> instance_type = __ LoadInstanceTypeField(map);
391 GOTO_IF_NOT(__ Uint32LessThanOrEqual(ODDBALL_TYPE, instance_type),
392 done, 0);
393
394 // Rule out booleans.
395 check = __ Word32Equal(
396 0,
397 __ TaggedEqual(map, __ HeapConstant(factory_->boolean_map())));
398#endif // V8_STATIC_ROOTS_BOOL
399 break;
400 }
402 check = __ Word32Equal(
403 Map::Bits1::IsUndetectableBit::kMask,
404 __ Word32BitwiseAnd(bitfield,
405 Map::Bits1::IsUndetectableBit::kMask));
406 break;
407 default:
408 UNREACHABLE();
409 }
410 GOTO(done, check);
411
412 BIND(done, result);
413 return result;
414 }
416 // If we statically know that this is a heap object, it cannot be a Smi.
417 if (!NeedsHeapObjectCheck(input_assumptions)) {
418 return __ Word32Constant(0);
419 }
420 return __ IsSmi(input);
421 }
423 Label<Word32> done(this);
424
425 // Check for Smi if necessary.
426 if (NeedsHeapObjectCheck(input_assumptions)) {
427 GOTO_IF(__ IsSmi(input), done, 1);
428 }
429
430 V<Map> map = __ LoadMapField(input);
431 GOTO(done,
432 __ TaggedEqual(map, __ HeapConstant(factory_->heap_number_map())));
433
434 BIND(done, result);
435 return result;
436 }
438 Label<Word32> done(this);
439
440 // Check for Smi if necessary.
441 if (NeedsHeapObjectCheck(input_assumptions)) {
442 GOTO_IF(__ IsSmi(input), done, 1);
443 }
444
445 V<Map> map = __ LoadMapField(input);
447 __ TaggedEqual(map, __ HeapConstant(factory_->heap_number_map())),
448 done, 0);
449
450 GOTO(done,
451 __ Float64Is(__ LoadHeapNumberValue(V<HeapNumber>::Cast(input)),
453
454 BIND(done, result);
455 return result;
456 }
458 Label<Word32> done(this);
460
461 // Check for Smi if necessary.
462 if (NeedsHeapObjectCheck(input_assumptions)) {
463 GOTO_IF(__ IsSmi(input), done, 1);
464 }
465
466 V<Map> map = __ LoadMapField(input);
467 GOTO_IF(
468 __ TaggedEqual(map, __ HeapConstant(factory_->heap_number_map())),
469 done, 1);
470 GOTO(done,
471 __ TaggedEqual(map, __ HeapConstant(factory_->bigint_map())));
472
473 BIND(done, result);
474 return result;
475 }
476
477#if V8_STATIC_ROOTS_BOOL
479 Label<Word32> done(this);
480
481 // Check for Smi if necessary.
482 if (NeedsHeapObjectCheck(input_assumptions)) {
483 GOTO_IF(__ IsSmi(input), done, 0);
484 }
485
486 V<Map> map = __ LoadMapField(input);
487 GOTO(done,
488 __ Uint32LessThanOrEqual(
489 __ TruncateWordPtrToWord32(__ BitcastHeapObjectToWordPtr(map)),
490 __ Word32Constant(InstanceTypeChecker::kStringMapUpperBound)));
491
492 BIND(done, result);
493 return result;
494 }
496 Label<Word32> done(this);
497
498 // Check for Smi if necessary.
499 if (NeedsHeapObjectCheck(input_assumptions)) {
500 GOTO_IF(__ IsSmi(input), done, 0);
501 }
502
503 V<Map> map = __ LoadMapField(input);
504 GOTO(done,
505 __ Word32Equal(
506 __ TruncateWordPtrToWord32(__ BitcastHeapObjectToWordPtr(map)),
507 __ Word32Constant(StaticReadOnlyRoot::kSymbolMap)));
508
509 BIND(done, result);
510 return result;
511 }
512#else
515#endif
517 Label<Word32> done(this);
518
519 // Check for Smi if necessary.
520 if (NeedsHeapObjectCheck(input_assumptions)) {
521 GOTO_IF(__ IsSmi(input), done, 0);
522 }
523
524 // Load instance type from map.
525 V<Map> map = __ LoadMapField(input);
526 V<Word32> instance_type = __ LoadInstanceTypeField(map);
527
528 V<Word32> check;
529 switch (kind) {
530#if !V8_STATIC_ROOTS_BOOL
532 check = __ Word32Equal(instance_type, SYMBOL_TYPE);
533 break;
535 check = __ Uint32LessThan(instance_type, FIRST_NONSTRING_TYPE);
536 break;
537#endif
539 check = __ Uint32LessThan(
540 __ Word32Sub(instance_type, FIRST_JS_ARRAY_BUFFER_VIEW_TYPE),
541 LAST_JS_ARRAY_BUFFER_VIEW_TYPE -
542 FIRST_JS_ARRAY_BUFFER_VIEW_TYPE + 1);
543 break;
544 default:
545 UNREACHABLE();
546 }
547 GOTO(done, check);
548
549 BIND(done, result);
550 return result;
551 }
554 // Load instance type from map.
555 V<Map> map = __ LoadMapField(input);
556 V<Word32> instance_type = __ LoadInstanceTypeField(map);
557
558 return __ Word32Equal(
559 __ Word32BitwiseAnd(instance_type,
562 }
564 Label<Word32> done(this);
565
566 // Check for Smi if necessary.
567 if (NeedsHeapObjectCheck(input_assumptions)) {
568 GOTO_IF(__ IsSmi(input), done, 0);
569 }
570
571 // Load instance type from map.
572 V<Map> map = __ LoadMapField(input);
573 V<Word32> instance_type = __ LoadInstanceTypeField(map);
574
575 GOTO_IF(__ Uint32LessThan(instance_type, FIRST_NONSTRING_TYPE), done,
576 1);
577 GOTO_IF_NOT(__ Word32Equal(instance_type, JS_PRIMITIVE_WRAPPER_TYPE),
578 done, 0);
579
580 V<Word32> bitfield2 = __ template LoadField<Word32>(
582
583 V<Word32> elements_kind =
584 __ Word32BitwiseAnd(bitfield2, Map::Bits2::ElementsKindBits::kMask);
585
587 << Map::Bits2::ElementsKindBits::kShift,
588 elements_kind),
589 done, 1);
590
591 V<Word32> check =
593 << Map::Bits2::ElementsKindBits::kShift,
594 elements_kind);
595 GOTO(done, check);
596
597 BIND(done, result);
598 return result;
599 }
600 }
601 UNREACHABLE();
602 }
603
605 switch (kind) {
607 Label<Word32> done(this);
608 // First check whether {value} is a NaN at all...
609 GOTO_IF(LIKELY(__ Float64Equal(value, value)), done, 0);
610 // ...and only if {value} is a NaN, perform the expensive bit
611 // check. See http://crbug.com/v8/8264 for details.
612 GOTO(done, __ Word32Equal(__ Float64ExtractHighWord32(value),
614 BIND(done, result);
615 return result;
616 }
618 V<Float64> diff = __ Float64Sub(value, value);
619 return __ Float64Equal(diff, diff);
620 }
622 V<Float64> trunc = __ Float64RoundToZero(value);
623 V<Float64> diff = __ Float64Sub(value, trunc);
624 return __ Float64Equal(diff, 0.0);
625 }
627 Label<Word32> done(this);
628 V<Float64> trunc = __ Float64RoundToZero(value);
629 V<Float64> diff = __ Float64Sub(value, trunc);
630 GOTO_IF_NOT(__ Float64Equal(diff, 0), done, 0);
631 V<Word32> in_range =
632 __ Float64LessThanOrEqual(__ Float64Abs(trunc), kMaxSafeInteger);
633 GOTO(done, in_range);
634
635 BIND(done, result);
636 return result;
637 }
638 case NumericKind::kInt32: {
639 Label<Word32> done(this);
640 V<Word32> v32 = __ TruncateFloat64ToInt32OverflowUndefined(value);
641 GOTO_IF_NOT(__ Float64Equal(value, __ ChangeInt32ToFloat64(v32)), done,
642 0);
643 IF (__ Word32Equal(v32, 0)) {
644 // Checking -0.
645 GOTO_IF(__ Int32LessThan(__ Float64ExtractHighWord32(value), 0), done,
646 0);
647 }
648 GOTO(done, 1);
649
650 BIND(done, result);
651 return result;
652 }
653 case NumericKind::kSmi: {
654 Label<Word32> done(this);
655 V<Word32> v32 = __ TruncateFloat64ToInt32OverflowUndefined(value);
656 GOTO_IF_NOT(__ Float64Equal(value, __ ChangeInt32ToFloat64(v32)), done,
657 0);
658 IF (__ Word32Equal(v32, 0)) {
659 // Checking -0.
660 GOTO_IF(__ Int32LessThan(__ Float64ExtractHighWord32(value), 0), done,
661 0);
662 }
663
664 if constexpr (SmiValuesAre32Bits()) {
665 GOTO(done, 1);
666 } else {
667 V<Tuple<Word32, Word32>> add = __ Int32AddCheckOverflow(v32, v32);
668 V<Word32> overflow = __ template Projection<1>(add);
669 GOTO_IF(overflow, done, 0);
670 GOTO(done, 1);
671 }
672
673 BIND(done, result);
674 return result;
675 }
677 if (Is64()) {
678 V<Word64> value64 = __ BitcastFloat64ToWord64(value);
679 return __ Word64Equal(value64, kMinusZeroBits);
680 } else {
681 Label<Word32> done(this);
682 V<Word32> value_lo = __ Float64ExtractLowWord32(value);
683 GOTO_IF_NOT(__ Word32Equal(value_lo, kMinusZeroLoBits), done, 0);
684 V<Word32> value_hi = __ Float64ExtractHighWord32(value);
685 GOTO(done, __ Word32Equal(value_hi, kMinusZeroHiBits));
686
687 BIND(done, result);
688 return result;
689 }
690 }
691 case NumericKind::kNaN: {
692 V<Word32> diff = __ Float64Equal(value, value);
693 return __ Word32Equal(diff, 0);
694 }
695 }
696
697 UNREACHABLE();
698 }
699
701 FloatRepresentation input_rep) {
703 Label<Word32> done(this);
704
705 switch (kind) {
711 GOTO_IF(__ IsSmi(input), done, 1);
712 break;
715 GOTO_IF(__ IsSmi(input), done, 0);
716 break;
718 // ObjectIsFloat64Hole is not used, but can be implemented when needed.
719 UNREACHABLE();
720 }
721
722 V<Map> map = __ LoadMapField(input);
724 __ TaggedEqual(map, __ HeapConstant(factory_->heap_number_map())), done,
725 0);
726
727 V<Float64> value = __ LoadHeapNumberValue(V<HeapNumber>::Cast(input));
728 GOTO(done, __ Float64Is(value, kind));
729
730 BIND(done, result);
731 return result;
732 }
733
736 switch (to) {
739 return __ CallBuiltin_PlainPrimitiveToNumber(
741 } else {
743 return __ CallBuiltin_StringToNumber(isolate_,
744 V<String>::Cast(input));
745 }
746 }
749 return __ CallBuiltin_ToBoolean(isolate_, input);
750 }
753 return __ CallBuiltin_NumberToString(isolate_, V<Number>::Cast(input));
754 }
757 Label<Smi> done(this);
758 GOTO_IF(LIKELY(__ ObjectIsSmi(input)), done, V<Smi>::Cast(input));
759
760 V<Float64> value = __ template LoadField<Float64>(
762 GOTO(done, __ TagSmi(__ ReversibleFloat64ToInt32(value)));
763
764 BIND(done, result);
765 return result;
766 }
767 default:
768 UNREACHABLE();
769 }
770 }
771
774 RegisterRepresentation input_rep,
776 CheckForMinusZeroMode minus_zero_mode) {
777 switch (kind) {
779 DCHECK(Is64());
781 V<Word64> input_w64 = V<Word64>::Cast(input);
782 Label<BigInt> done(this);
783
784 // BigInts with value 0 must be of size 0 (canonical form).
785 GOTO_IF(__ Word64Equal(input_w64, int64_t{0}), done,
787
788 // The GOTO_IF above could have been changed to an unconditional GOTO,
789 // in which case we are now in unreachable code, so we can skip the
790 // following step and return.
791 if (input_interpretation ==
793 // Shift sign bit into BigInt's sign bit position.
794 V<Word32> bitfield = __ Word32BitwiseOr(
796 __ TruncateWord64ToWord32(__ Word64ShiftRightLogical(
797 input_w64,
798 static_cast<int32_t>(63 - BigInt::SignBits::kShift))));
799
800 // We use (value XOR (value >> 63)) - (value >> 63) to compute the
801 // absolute value, in a branchless fashion.
802 V<Word64> sign_mask =
803 __ Word64ShiftRightArithmetic(input_w64, int32_t{63});
804 V<Word64> absolute_value = __ Word64Sub(
805 __ Word64BitwiseXor(input_w64, sign_mask), sign_mask);
806 GOTO(done, AllocateBigInt(bitfield, absolute_value));
807 } else {
808 DCHECK_EQ(
809 input_interpretation,
811 const auto bitfield = BigInt::LengthBits::encode(1);
812 GOTO(done, AllocateBigInt(__ Word32Constant(bitfield), input_w64));
813 }
814
815 BIND(done, result);
816 return result;
817 }
819 if (input_rep == RegisterRepresentation::Word32()) {
820 V<Word32> input_w32 = V<Word32>::Cast(input);
821 switch (input_interpretation) {
823 if (SmiValuesAre32Bits()) {
824 return __ TagSmi(input_w32);
825 }
827
828 Label<Number> done(this);
829 Label<> overflow(this);
830
831 TagSmiOrOverflow(input_w32, &overflow, &done);
832
833 if (BIND(overflow)) {
834 GOTO(done,
835 AllocateHeapNumber(__ ChangeInt32ToFloat64(input_w32)));
836 }
837
838 BIND(done, result);
839 return result;
840 }
842 kUnsigned: {
843 Label<Number> done(this);
844
845 GOTO_IF(__ Uint32LessThanOrEqual(input_w32, Smi::kMaxValue), done,
846 __ TagSmi(input_w32));
847 GOTO(done,
848 AllocateHeapNumber(__ ChangeUint32ToFloat64(input_w32)));
849
850 BIND(done, result);
851 return result;
852 }
856 UNREACHABLE();
857 }
858 } else if (input_rep == RegisterRepresentation::Word64()) {
859 V<Word64> input_w64 = V<Word64>::Cast(input);
860 switch (input_interpretation) {
862 Label<Number> done(this);
863 Label<> outside_smi_range(this);
864
865 V<Word32> v32 = __ TruncateWord64ToWord32(input_w64);
866 V<Word64> v64 = __ ChangeInt32ToInt64(v32);
867 GOTO_IF_NOT(__ Word64Equal(v64, input_w64), outside_smi_range);
868
869 if constexpr (SmiValuesAre32Bits()) {
870 GOTO(done, __ TagSmi(v32));
871 } else {
872 TagSmiOrOverflow(v32, &outside_smi_range, &done);
873 }
874
875 if (BIND(outside_smi_range)) {
876 GOTO(done,
877 AllocateHeapNumber(__ ChangeInt64ToFloat64(input_w64)));
878 }
879
880 BIND(done, result);
881 return result;
882 }
884 kUnsigned: {
885 Label<Number> done(this);
886
887 GOTO_IF(__ Uint64LessThanOrEqual(input_w64, Smi::kMaxValue), done,
888 __ TagSmi(__ TruncateWord64ToWord32(input_w64)));
889 GOTO(done,
890 AllocateHeapNumber(__ ChangeInt64ToFloat64(input_w64)));
891
892 BIND(done, result);
893 return result;
894 }
898 UNREACHABLE();
899 }
900 } else {
902 V<Float64> input_f64 = V<Float64>::Cast(input);
903 Label<Number> done(this);
904 Label<> outside_smi_range(this);
905
906 V<Word32> v32 = __ TruncateFloat64ToInt32OverflowUndefined(input_f64);
907 GOTO_IF_NOT(__ Float64Equal(input_f64, __ ChangeInt32ToFloat64(v32)),
908 outside_smi_range);
909
910 if (minus_zero_mode == CheckForMinusZeroMode::kCheckForMinusZero) {
911 // In case of 0, we need to check the high bits for the IEEE -0
912 // pattern.
913 IF (__ Word32Equal(v32, 0)) {
914 GOTO_IF(
915 __ Int32LessThan(__ Float64ExtractHighWord32(input_f64), 0),
916 outside_smi_range);
917 }
918 }
919
920 if constexpr (SmiValuesAre32Bits()) {
921 GOTO(done, __ TagSmi(v32));
922 } else {
923 TagSmiOrOverflow(v32, &outside_smi_range, &done);
924 }
925
926 if (BIND(outside_smi_range)) {
927 GOTO(done, AllocateHeapNumber(input_f64));
928 }
929
930 BIND(done, result);
931 return result;
932 }
933 UNREACHABLE();
934 break;
935 }
938 DCHECK_EQ(input_interpretation,
941 }
945 V<Float64> input_f64 = V<Float64>::Cast(input);
946 DCHECK_EQ(input_interpretation,
949 Label<> allocate_heap_number(this);
950
951 // First check whether {input} is a NaN at all...
952 IF (UNLIKELY(__ Float64IsNaN(input_f64))) {
953 // ...and only if {input} is a NaN, perform the expensive signaling
954 // NaN bit check. See http://crbug.com/v8/8264 for details.
955 GOTO_IF_NOT(__ Word32Equal(__ Float64ExtractHighWord32(input_f64),
957 allocate_heap_number);
958 GOTO(done, __ HeapConstant(factory_->undefined_value()));
959 } ELSE {
960 GOTO(allocate_heap_number);
961 }
962
963 if (BIND(allocate_heap_number)) {
964 GOTO(done, AllocateHeapNumber(input_f64));
965 }
966
967 BIND(done, result);
968 return result;
969 }
972 DCHECK_EQ(input_interpretation,
974 return __ TagSmi(V<Word32>::Cast(input));
975 }
978 DCHECK_EQ(input_interpretation,
980 Label<Boolean> done(this);
981
982 IF (V<Word32>::Cast(input)) {
983 GOTO(done, __ HeapConstant(factory_->true_value()));
984 } ELSE {
985 GOTO(done, __ HeapConstant(factory_->false_value()));
986 }
987
988 BIND(done, result);
989 return result;
990 }
993 V<Word32> input_w32 = V<Word32>::Cast(input);
994
995 switch (input_interpretation) {
998 __ Word32BitwiseAnd(input_w32, 0xFFFF));
1000 kCodePoint: {
1002 }
1003 default:
1004 UNREACHABLE();
1005 }
1006 }
1007 }
1008
1009 UNREACHABLE();
1010 }
1011
1013 V<Untagged> input, V<FrameState> frame_state,
1015 RegisterRepresentation input_rep,
1017 input_interpretation,
1018 const FeedbackSource& feedback) {
1021 if (input_rep == RegisterRepresentation::Word32()) {
1022 V<Word32> input_w32 = V<Word32>::Cast(input);
1023 if (input_interpretation ==
1025 if constexpr (SmiValuesAre32Bits()) {
1026 return __ TagSmi(input_w32);
1027 } else {
1029 __ Int32AddCheckOverflow(input_w32, input_w32);
1030 __ DeoptimizeIf(__ template Projection<1>(test), frame_state,
1031 DeoptimizeReason::kLostPrecision, feedback);
1032 return __ BitcastWord32ToSmi(__ template Projection<0>(test));
1033 }
1034 } else {
1035 DCHECK_EQ(input_interpretation, ConvertUntaggedToJSPrimitiveOrDeoptOp::
1036 InputInterpretation::kUnsigned);
1037 V<Word32> check = __ Uint32LessThanOrEqual(input_w32, Smi::kMaxValue);
1038 __ DeoptimizeIfNot(check, frame_state, DeoptimizeReason::kLostPrecision,
1039 feedback);
1040 return __ TagSmi(input_w32);
1041 }
1042 } else {
1044 V<Word64> input_w64 = V<Word64>::Cast(input);
1045 if (input_interpretation ==
1047 V<Word32> i32 = __ TruncateWord64ToWord32(input_w64);
1048 V<Word32> check = __ Word64Equal(__ ChangeInt32ToInt64(i32), input_w64);
1049 __ DeoptimizeIfNot(check, frame_state, DeoptimizeReason::kLostPrecision,
1050 feedback);
1051 if constexpr (SmiValuesAre32Bits()) {
1052 return __ TagSmi(i32);
1053 } else {
1054 V<Tuple<Word32, Word32>> test = __ Int32AddCheckOverflow(i32, i32);
1055 __ DeoptimizeIf(__ template Projection<1>(test), frame_state,
1056 DeoptimizeReason::kLostPrecision, feedback);
1057 return __ BitcastWord32ToSmi(__ template Projection<0>(test));
1058 }
1059 } else {
1060 DCHECK_EQ(input_interpretation, ConvertUntaggedToJSPrimitiveOrDeoptOp::
1061 InputInterpretation::kUnsigned);
1062 V<Word32> check = __ Uint64LessThanOrEqual(
1063 input_w64, static_cast<uint64_t>(Smi::kMaxValue));
1064 __ DeoptimizeIfNot(check, frame_state, DeoptimizeReason::kLostPrecision,
1065 feedback);
1066 return __ TagSmi(__ TruncateWord64ToWord32(input_w64));
1067 }
1068 }
1069
1070 UNREACHABLE();
1071 }
1072
1076 switch (kind) {
1078 if (input_assumptions ==
1080 return __ UntagSmi(V<Smi>::Cast(object));
1081 } else if (input_assumptions ==
1084 Label<Word32> done(this);
1085
1086 IF (LIKELY(__ ObjectIsSmi(object))) {
1087 GOTO(done, __ UntagSmi(V<Smi>::Cast(object)));
1088 } ELSE {
1089 V<Float64> value = __ template LoadField<Float64>(
1091 GOTO(done, __ ReversibleFloat64ToInt32(value));
1092 }
1093
1094 BIND(done, result);
1095 return result;
1096 } else {
1097 DCHECK_EQ(input_assumptions, ConvertJSPrimitiveToUntaggedOp::
1098 InputAssumptions::kPlainPrimitive);
1099 Label<Word32> done(this);
1100 GOTO_IF(LIKELY(__ ObjectIsSmi(object)), done,
1101 __ UntagSmi(V<Smi>::Cast(object)));
1102 V<Number> number =
1103 __ ConvertPlainPrimitiveToNumber(V<PlainPrimitive>::Cast(object));
1104 GOTO_IF(__ ObjectIsSmi(number), done,
1105 __ UntagSmi(V<Smi>::Cast(number)));
1106 V<Float64> f64 = __ LoadHeapNumberValue(V<HeapNumber>::Cast(number));
1107 GOTO(done, __ JSTruncateFloat64ToWord32(f64));
1108 BIND(done, result);
1109 return result;
1110 }
1111 UNREACHABLE();
1113 if (input_assumptions ==
1115 return __ ChangeInt32ToInt64(__ UntagSmi(V<Smi>::Cast(object)));
1116 } else {
1117 DCHECK_EQ(input_assumptions, ConvertJSPrimitiveToUntaggedOp::
1118 InputAssumptions::kNumberOrOddball);
1119 Label<Word64> done(this);
1120
1121 IF (LIKELY(__ ObjectIsSmi(object))) {
1122 GOTO(done,
1123 __ ChangeInt32ToInt64(__ UntagSmi(V<Smi>::Cast(object))));
1124 } ELSE {
1125 V<Float64> value = __ template LoadField<Float64>(
1127 GOTO(done, __ ReversibleFloat64ToInt64(value));
1128 }
1129
1130 BIND(done, result);
1131 return result;
1132 }
1133 UNREACHABLE();
1135 DCHECK_EQ(
1136 input_assumptions,
1138 Label<Word32> done(this);
1139
1140 IF (LIKELY(__ ObjectIsSmi(object))) {
1141 GOTO(done, __ UntagSmi(V<Smi>::Cast(object)));
1142 } ELSE {
1143 V<Float64> value = __ template LoadField<Float64>(
1145 GOTO(done, __ ReversibleFloat64ToUint32(value));
1146 }
1147
1148 BIND(done, result);
1149 return result;
1150 }
1152 DCHECK_EQ(input_assumptions,
1154 return __ TaggedEqual(object, __ HeapConstant(factory_->true_value()));
1156 if (input_assumptions == ConvertJSPrimitiveToUntaggedOp::
1157 InputAssumptions::kNumberOrOddball) {
1158 Label<Float64> done(this);
1159
1160 IF (LIKELY(__ ObjectIsSmi(object))) {
1161 GOTO(done,
1162 __ ChangeInt32ToFloat64(__ UntagSmi(V<Smi>::Cast(object))));
1163 } ELSE {
1164 V<Float64> value = __ template LoadField<Float64>(
1166 GOTO(done, value);
1167 }
1168
1169 BIND(done, result);
1170 return result;
1171 } else {
1172 DCHECK_EQ(input_assumptions, ConvertJSPrimitiveToUntaggedOp::
1173 InputAssumptions::kPlainPrimitive);
1174 Label<Float64> done(this);
1175 GOTO_IF(LIKELY(__ ObjectIsSmi(object)), done,
1176 __ ChangeInt32ToFloat64(__ UntagSmi(V<Smi>::Cast(object))));
1177 V<Number> number =
1178 __ ConvertPlainPrimitiveToNumber(V<PlainPrimitive>::Cast(object));
1179 GOTO_IF(__ ObjectIsSmi(number), done,
1180 __ ChangeInt32ToFloat64(__ UntagSmi(V<Smi>::Cast(number))));
1181 V<Float64> f64 = __ LoadHeapNumberValue(V<HeapNumber>::Cast(number));
1182 GOTO(done, f64);
1183 BIND(done, result);
1184 return result;
1185 }
1186 }
1187 }
1188 UNREACHABLE();
1189 }
1190
1192 V<Object> object, V<FrameState> frame_state,
1195 CheckForMinusZeroMode minus_zero_mode, const FeedbackSource& feedback) {
1196 switch (to_kind) {
1198 if (from_kind ==
1200 __ DeoptimizeIfNot(__ ObjectIsSmi(object), frame_state,
1201 DeoptimizeReason::kNotASmi, feedback);
1202 return __ UntagSmi(V<Smi>::Cast(object));
1203 } else {
1204 DCHECK_EQ(
1205 from_kind,
1207 Label<Word32> done(this);
1208
1209 IF (LIKELY(__ ObjectIsSmi(object))) {
1210 GOTO(done, __ UntagSmi(V<Smi>::Cast(object)));
1211 } ELSE {
1212 V<Map> map = __ LoadMapField(object);
1213 __ DeoptimizeIfNot(
1214 __ TaggedEqual(map,
1215 __ HeapConstant(factory_->heap_number_map())),
1216 frame_state, DeoptimizeReason::kNotAHeapNumber, feedback);
1217 V<Float64> heap_number_value =
1218 __ LoadHeapNumberValue(V<HeapNumber>::Cast(object));
1219
1220 GOTO(done,
1221 __ ChangeFloat64ToInt32OrDeopt(heap_number_value, frame_state,
1222 minus_zero_mode, feedback));
1223 }
1224
1225 BIND(done, result);
1226 return result;
1227 }
1228 }
1231 DCHECK_EQ(
1232 from_kind,
1234 Label<Word64> done(this);
1235
1236 IF (LIKELY(__ ObjectIsSmi(object))) {
1237 GOTO(done, __ ChangeInt32ToInt64(__ UntagSmi(V<Smi>::Cast(object))));
1238 } ELSE {
1239 V<Map> map = __ LoadMapField(object);
1240 __ DeoptimizeIfNot(
1241 __ TaggedEqual(map, __ HeapConstant(factory_->heap_number_map())),
1242 frame_state, DeoptimizeReason::kNotAHeapNumber, feedback);
1243 V<Float64> heap_number_value =
1244 __ LoadHeapNumberValue(V<HeapNumber>::Cast(object));
1245 GOTO(done,
1246 __ ChangeFloat64ToAdditiveSafeIntegerOrDeopt(
1247 heap_number_value, frame_state, minus_zero_mode, feedback));
1248 }
1249
1250 BIND(done, result);
1251 return result;
1252 }
1254 DCHECK_EQ(
1255 from_kind,
1257 Label<Word64> done(this);
1258
1259 IF (LIKELY(__ ObjectIsSmi(object))) {
1260 GOTO(done, __ ChangeInt32ToInt64(__ UntagSmi(V<Smi>::Cast(object))));
1261 } ELSE {
1262 V<Map> map = __ LoadMapField(object);
1263 __ DeoptimizeIfNot(
1264 __ TaggedEqual(map, __ HeapConstant(factory_->heap_number_map())),
1265 frame_state, DeoptimizeReason::kNotAHeapNumber, feedback);
1266 V<Float64> heap_number_value =
1267 __ LoadHeapNumberValue(V<HeapNumber>::Cast(object));
1268 GOTO(done,
1269 __ ChangeFloat64ToInt64OrDeopt(heap_number_value, frame_state,
1270 minus_zero_mode, feedback));
1271 }
1272
1273 BIND(done, result);
1274 return result;
1275 }
1277 Label<Float64> done(this);
1278
1279 // In the Smi case, just convert to int32 and then float64.
1280 // Otherwise, check heap numberness and load the number.
1281 IF (__ ObjectIsSmi(object)) {
1282 GOTO(done,
1283 __ ChangeInt32ToFloat64(__ UntagSmi(V<Smi>::Cast(object))));
1284 } ELSE {
1285 GOTO(done, ConvertHeapObjectToFloat64OrDeopt(object, frame_state,
1286 from_kind, feedback));
1287 }
1288
1289 BIND(done, result);
1290 return result;
1291 }
1294 JSPrimitiveKind::kNumberOrString);
1295 Label<WordPtr> done(this);
1296
1297 IF (LIKELY(__ ObjectIsSmi(object))) {
1298 // In the Smi case, just convert to intptr_t.
1299 GOTO(done, __ ChangeInt32ToIntPtr(__ UntagSmi(V<Smi>::Cast(object))));
1300 } ELSE {
1301 V<Map> map = __ LoadMapField(object);
1302 IF (LIKELY(__ TaggedEqual(
1303 map, __ HeapConstant(factory_->heap_number_map())))) {
1304 V<Float64> heap_number_value =
1305 __ LoadHeapNumberValue(V<HeapNumber>::Cast(object));
1306 // Perform Turbofan's "CheckedFloat64ToIndex"
1307 {
1308 if constexpr (Is64()) {
1309 V<Word64> i64 = __ TruncateFloat64ToInt64OverflowUndefined(
1310 heap_number_value);
1311 // The TruncateKind above means there will be a precision loss
1312 // in case INT64_MAX input is passed, but that precision loss
1313 // would not be detected and would not lead to a deoptimization
1314 // from the first check. But in this case, we'll deopt anyway
1315 // because of the following checks.
1316 __ DeoptimizeIfNot(__ Float64Equal(__ ChangeInt64ToFloat64(i64),
1317 heap_number_value),
1318 frame_state,
1319 DeoptimizeReason::kLostPrecisionOrNaN,
1320 feedback);
1321 __ DeoptimizeIfNot(
1322 __ IntPtrLessThan(i64, kMaxSafeIntegerUint64), frame_state,
1323 DeoptimizeReason::kNotAnArrayIndex, feedback);
1324 __ DeoptimizeIfNot(
1325 __ IntPtrLessThan(-kMaxSafeIntegerUint64, i64), frame_state,
1326 DeoptimizeReason::kNotAnArrayIndex, feedback);
1327 GOTO(done, i64);
1328 } else {
1329 V<Word32> i32 = __ TruncateFloat64ToInt32OverflowUndefined(
1330 heap_number_value);
1331 __ DeoptimizeIfNot(__ Float64Equal(__ ChangeInt32ToFloat64(i32),
1332 heap_number_value),
1333 frame_state,
1334 DeoptimizeReason::kLostPrecisionOrNaN,
1335 feedback);
1336 GOTO(done, i32);
1337 }
1338 }
1339 } ELSE {
1340#if V8_STATIC_ROOTS_BOOL
1341 V<Word32> is_string_map = __ Uint32LessThanOrEqual(
1342 __ TruncateWordPtrToWord32(__ BitcastHeapObjectToWordPtr(map)),
1343 __ Word32Constant(InstanceTypeChecker::kStringMapUpperBound));
1344#else
1345 V<Word32> instance_type = __ LoadInstanceTypeField(map);
1346 V<Word32> is_string_map =
1347 __ Uint32LessThan(instance_type, FIRST_NONSTRING_TYPE);
1348#endif
1349 __ DeoptimizeIfNot(is_string_map, frame_state,
1350 DeoptimizeReason::kNotAString, feedback);
1351
1352 // TODO(nicohartmann@): We might introduce a Turboshaft way for
1353 // constructing call descriptors.
1354 MachineSignature::Builder builder(__ graph_zone(), 1, 1);
1355 builder.AddReturn(MachineType::Int32());
1358 builder.Get());
1359 auto ts_desc = TSCallDescriptor::Create(
1361 OpIndex callee = __ ExternalConstant(
1362 ExternalReference::string_to_array_index_function());
1363 // NOTE: String::ToArrayIndex() currently returns int32_t.
1364 V<WordPtr> index = __ ChangeInt32ToIntPtr(
1365 V<Word32>::Cast(__ Call(callee, {object}, ts_desc)));
1366 __ DeoptimizeIf(__ WordPtrEqual(index, -1), frame_state,
1367 DeoptimizeReason::kNotAnArrayIndex, feedback);
1368 GOTO(done, index);
1369 }
1370 }
1371
1372 BIND(done, result);
1373 return result;
1374 }
1375 }
1376 UNREACHABLE();
1377 }
1378
1382 switch (kind) {
1384 DCHECK_EQ(input_assumptions, TruncateJSPrimitiveToUntaggedOp::
1385 InputAssumptions::kNumberOrOddball);
1386 Label<Word32> done(this);
1387
1388 IF (LIKELY(__ ObjectIsSmi(object))) {
1389 GOTO(done, __ UntagSmi(V<Smi>::Cast(object)));
1390 } ELSE {
1391 V<Float64> number_value = __ template LoadField<Float64>(
1393 GOTO(done, __ JSTruncateFloat64ToWord32(number_value));
1394 }
1395
1396 BIND(done, result);
1397 return result;
1398 }
1400 DCHECK_EQ(input_assumptions,
1402 DCHECK(Is64());
1403 Label<Word64> done(this);
1404
1405 V<Word32> bitfield = __ template LoadField<Word32>(
1407 IF (__ Word32Equal(bitfield, 0)) {
1408 GOTO(done, 0);
1409 } ELSE {
1410 V<Word64> lsd = __ template LoadField<Word64>(
1412 V<Word32> sign =
1413 __ Word32BitwiseAnd(bitfield, BigInt::SignBits::kMask);
1414 IF (__ Word32Equal(sign, 1)) {
1415 GOTO(done, __ Word64Sub(0, lsd));
1416 }
1417
1418 GOTO(done, lsd);
1419 }
1420
1421 BIND(done, result);
1422 return result;
1423 }
1425 Label<Word32> done(this);
1426
1427 if (input_assumptions ==
1429 // Perform Smi check.
1430 IF (UNLIKELY(__ ObjectIsSmi(object))) {
1431 GOTO(done, __ Word32Equal(__ TaggedEqual(object, __ TagSmi(0)), 0));
1432 }
1433
1434 // Otherwise fall through into HeapObject case.
1435 } else {
1436 DCHECK_EQ(
1437 input_assumptions,
1439 }
1440
1441#if V8_STATIC_ROOTS_BOOL
1442 // Check if {object} is a falsey root or the true value.
1443 // Undefined is the first root, so it's the smallest possible pointer
1444 // value, which means we don't have to subtract it for the range check.
1445 ReadOnlyRoots roots(isolate_);
1446 static_assert(StaticReadOnlyRoot::kFirstAllocatedRoot ==
1447 StaticReadOnlyRoot::kUndefinedValue);
1448 static_assert(StaticReadOnlyRoot::kUndefinedValue + sizeof(Undefined) ==
1449 StaticReadOnlyRoot::kNullValue);
1450 static_assert(StaticReadOnlyRoot::kNullValue + sizeof(Null) ==
1451 StaticReadOnlyRoot::kempty_string);
1452 static_assert(StaticReadOnlyRoot::kempty_string +
1454 StaticReadOnlyRoot::kFalseValue);
1455 static_assert(StaticReadOnlyRoot::kFalseValue + sizeof(False) ==
1456 StaticReadOnlyRoot::kTrueValue);
1457 V<Word32> object_as_word32 = __ TruncateWordPtrToWord32(
1458 __ BitcastHeapObjectToWordPtr(V<HeapObject>::Cast(object)));
1459 V<Word32> true_as_word32 =
1460 __ Word32Constant(StaticReadOnlyRoot::kTrueValue);
1461 GOTO_IF(__ Uint32LessThan(object_as_word32, true_as_word32), done, 0);
1462 GOTO_IF(__ Word32Equal(object_as_word32, true_as_word32), done, 1);
1463#else
1464 // Check if {object} is false.
1465 GOTO_IF(
1466 __ TaggedEqual(object, __ HeapConstant(factory_->false_value())),
1467 done, 0);
1468
1469 // Check if {object} is true.
1470 GOTO_IF(__ TaggedEqual(object, __ HeapConstant(factory_->true_value())),
1471 done, 1);
1472
1473 // Check if {object} is the empty string.
1474 GOTO_IF(
1475 __ TaggedEqual(object, __ HeapConstant(factory_->empty_string())),
1476 done, 0);
1477
1478 // Only check null and undefined if we're not going to check the
1479 // undetectable bit.
1481 // Check if {object} is the null value.
1482 GOTO_IF(
1483 __ TaggedEqual(object, __ HeapConstant(factory_->null_value())),
1484 done, 0);
1485
1486 // Check if {object} is the undefined value.
1487 GOTO_IF(__ TaggedEqual(object,
1488 __ HeapConstant(factory_->undefined_value())),
1489 done, 0);
1490 }
1491#endif
1492
1493 // Load the map of {object}.
1494 V<Map> map = __ LoadMapField(object);
1495
1497 // Check if the {object} is undetectable and immediately return false.
1498 V<Word32> bitfield = __ template LoadField<Word32>(
1500 GOTO_IF(__ Word32BitwiseAnd(bitfield,
1501 Map::Bits1::IsUndetectableBit::kMask),
1502 done, 0);
1503 }
1504
1505 // Check if {object} is a HeapNumber.
1506 IF (UNLIKELY(__ TaggedEqual(
1507 map, __ HeapConstant(factory_->heap_number_map())))) {
1508 // For HeapNumber {object}, just check that its value is not 0.0, -0.0
1509 // or NaN.
1510 V<Float64> number_value =
1511 __ LoadHeapNumberValue(V<HeapNumber>::Cast(object));
1512 GOTO(done, __ Float64LessThan(0.0, __ Float64Abs(number_value)));
1513 }
1514
1515 // Check if {object} is a BigInt.
1516 IF (UNLIKELY(
1517 __ TaggedEqual(map, __ HeapConstant(factory_->bigint_map())))) {
1518 V<Word32> bitfield = __ template LoadField<Word32>(
1520 GOTO(done, IsNonZero(__ Word32BitwiseAnd(bitfield,
1522 }
1523
1524 // All other values that reach here are true.
1525 GOTO(done, 1);
1526
1527 BIND(done, result);
1528 return result;
1529 }
1530 }
1531 UNREACHABLE();
1532 }
1533
1535 V<JSPrimitive> input, V<FrameState> frame_state,
1538 input_requirement,
1539 const FeedbackSource& feedback) {
1542 Label<Word32> done(this);
1543 // In the Smi case, just convert to int32.
1544 GOTO_IF(LIKELY(__ ObjectIsSmi(input)), done,
1545 __ UntagSmi(V<Smi>::Cast(input)));
1546
1547 // Otherwise, check that it's a heap number or oddball and truncate the
1548 // value to int32.
1550 input, frame_state, input_requirement, feedback);
1551 GOTO(done, __ JSTruncateFloat64ToWord32(number_value));
1552
1553 BIND(done, result);
1554 return result;
1555 }
1556
1558 V<Object> value, V<Map> value_map = V<Map>::Invalid()) {
1559 if (!value_map.valid()) {
1560 value_map = __ LoadMapField(value);
1561 }
1562#if V8_STATIC_ROOTS_BOOL
1563 // Assumes only primitive objects and JS_RECEIVER's are passed here. All
1564 // primitive object's maps are in RO space and are allocated before all
1565 // JS_RECEIVER maps. Thus primitive object maps have smaller (compressed)
1566 // addresses.
1567 return __ Uint32LessThan(
1569 __ TruncateWordPtrToWord32(__ BitcastHeapObjectToWordPtr(value_map)));
1570#else
1571 static_assert(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1572 V<Word32> value_instance_type = __ LoadInstanceTypeField(value_map);
1573 return __ Uint32LessThanOrEqual(FIRST_JS_RECEIVER_TYPE,
1574 value_instance_type);
1575#endif
1576 }
1577
1580 V<JSGlobalProxy> global_proxy,
1582 switch (mode) {
1584 return global_proxy;
1587 Label<Object> done(this);
1588
1589 // Check if {value} is already a JSReceiver (or null/undefined).
1590 Label<> convert_to_object(this);
1591 GOTO_IF(UNLIKELY(__ ObjectIsSmi(value)), convert_to_object);
1593 convert_to_object);
1594 GOTO(done, value);
1595
1596 // Wrap the primitive {value} into a JSPrimitiveWrapper.
1597 if (BIND(convert_to_object)) {
1599 // Replace the {value} with the {global_proxy}.
1600 GOTO_IF(UNLIKELY(__ TaggedEqual(
1601 value, __ HeapConstant(factory_->undefined_value()))),
1602 done, global_proxy);
1603 GOTO_IF(UNLIKELY(__ TaggedEqual(
1604 value, __ HeapConstant(factory_->null_value()))),
1605 done, global_proxy);
1606 }
1607 GOTO(done, __ CallBuiltin_ToObject(isolate_, native_context, value));
1608 }
1609
1610 BIND(done, result);
1611 return result;
1612 }
1613 }
1614 UNREACHABLE();
1615 }
1616
1619 const ConstantOp* constant =
1620 __ Get(string).template TryCast<Opmask::kHeapConstant>();
1621 if (!constant) return StringEncoding::kUnknown;
1622
1624 HeapObjectRef ref = MakeRef(broker_, constant->handle());
1625 if (!V8_LIKELY(ref.IsString())) {
1626 // This can only happen in unreachable code.
1628 }
1629
1630 return ref.AsString().IsOneByteRepresentation() ? StringEncoding::kOneByte
1632 }
1633
1635 V<String> second) {
1636 ScopedVar<Map> map(this);
1637
1638 // Determine the instance types of {first} and {second}.
1639 StringEncoding first_encoding = GetStringEncoding(first);
1640 StringEncoding second_encoding = GetStringEncoding(second);
1641
1642 // Determine the proper map for the resulting ConsString.
1643 // If either of {first} or {second} is a 2-byte string, then we create a
1644 // 2-byte string. If both {first} and {second} are one-byte strings, we
1645 // create a new 1-byte string.
1646 if (first_encoding == StringEncoding::kTwoByte ||
1647 second_encoding == StringEncoding::kTwoByte) {
1648 map = __ HeapConstant(factory_->cons_two_byte_string_map());
1649 } else if (first_encoding == StringEncoding::kOneByte &&
1650 second_encoding == StringEncoding::kOneByte) {
1651 map = __ HeapConstant(factory_->cons_one_byte_string_map());
1652 } else {
1653 V<Word32> first_type, second_type;
1654 constexpr int kAllOnesMask = -1;
1655 if (first_encoding == StringEncoding::kUnknown) {
1656 V<Map> first_map = __ LoadMapField(first);
1657 first_type = __ LoadInstanceTypeField(first_map);
1658 } else {
1659 DCHECK_EQ(first_encoding, StringEncoding::kOneByte);
1660 first_type = __ Word32Constant(kAllOnesMask);
1661 }
1662 if (second_encoding == StringEncoding::kUnknown) {
1663 V<Map> second_map = __ LoadMapField(second);
1664 second_type = __ LoadInstanceTypeField(second_map);
1665 } else {
1666 DCHECK_EQ(second_encoding, StringEncoding::kOneByte);
1667 second_type = __ Word32Constant(kAllOnesMask);
1668 }
1669
1670 static_assert(kOneByteStringTag != 0);
1671 static_assert(kTwoByteStringTag == 0);
1672 V<Word32> instance_type = __ Word32BitwiseAnd(first_type, second_type);
1673 V<Word32> encoding =
1674 __ Word32BitwiseAnd(instance_type, kStringEncodingMask);
1675 IF (__ Word32Equal(encoding, kTwoByteStringTag)) {
1676 map = __ HeapConstant(factory_->cons_two_byte_string_map());
1677 } ELSE {
1678 map = __ HeapConstant(factory_->cons_one_byte_string_map());
1679 }
1680 }
1681
1682 // Allocate the resulting ConsString.
1683 auto string = __ template Allocate<ConsString>(
1685 __ InitializeField(string, AccessBuilder::ForMap(), map);
1686 __ InitializeField(string, AccessBuilder::ForNameRawHashField(),
1687 __ Word32Constant(Name::kEmptyHashField));
1688 __ InitializeField(string, AccessBuilder::ForStringLength(), length);
1689 __ InitializeField(string, AccessBuilder::ForConsStringFirst(), first);
1690 __ InitializeField(string, AccessBuilder::ForConsStringSecond(), second);
1691 return __ FinishInitialization(std::move(string));
1692 }
1693
1695 AllocationType allocation_type) {
1696 Label<AnyFixedArray> done(this);
1697
1698 GOTO_IF(__ WordPtrEqual(length, 0), done,
1699 __ HeapConstant(factory_->empty_fixed_array()));
1700
1701 // Compute the effective size of the backing store.
1702 intptr_t size_log2;
1703 Handle<Map> array_map;
1704 // TODO(nicohartmann@): Replace ElementAccess by a Turboshaft replacement.
1705 ElementAccess access;
1706 V<Any> the_hole_value;
1707 switch (kind) {
1709 size_log2 = kDoubleSizeLog2;
1710 array_map = factory_->fixed_double_array_map();
1712 compiler::Type::NumberOrHole(), MachineType::Float64(),
1714 the_hole_value = __ template LoadField<Float64>(
1715 __ HeapConstant(factory_->the_hole_value()),
1717 break;
1718 }
1720 size_log2 = kTaggedSizeLog2;
1721 array_map = factory_->fixed_array_map();
1723 compiler::Type::Any(), MachineType::AnyTagged(),
1725 the_hole_value = __ HeapConstant(factory_->the_hole_value());
1726 break;
1727 }
1728 }
1729 V<WordPtr> size =
1730 __ WordPtrAdd(__ WordPtrShiftLeft(length, static_cast<int>(size_log2)),
1731 access.header_size);
1732
1733 // Allocate the result and initialize the header.
1734 auto uninitialized_array =
1735 __ template Allocate<AnyFixedArray>(size, allocation_type);
1736 __ InitializeField(uninitialized_array, AccessBuilder::ForMap(),
1737 __ HeapConstant(array_map));
1738 __ InitializeField(uninitialized_array,
1740 __ TagSmi(__ TruncateWordPtrToWord32(length)));
1741 // TODO(nicohartmann@): Should finish initialization only after all elements
1742 // have been initialized.
1743 auto array = __ FinishInitialization(std::move(uninitialized_array));
1744
1745 ScopedVar<WordPtr> index(this, 0);
1746
1747 WHILE(__ UintPtrLessThan(index, length)) {
1748 __ StoreNonArrayBufferElement(array, access, index, the_hole_value);
1749 // Advance the {index}.
1750 index = __ WordPtrAdd(index, 1);
1751 }
1752
1753 GOTO(done, array);
1754
1755 BIND(done, result);
1756 return result;
1757 }
1758
1763 const bool is_max = kind == DoubleArrayMinMaxOp::Kind::kMax;
1764
1765 // Iterate the elements and find the result.
1766 V<WordPtr> array_length =
1767 __ ChangeInt32ToIntPtr(__ UntagSmi(__ template LoadField<Smi>(
1770 V<Object> elements = __ template LoadField<Object>(
1772
1774 ScopedVar<WordPtr> index(this, 0);
1775
1776 WHILE(__ UintPtrLessThan(index, array_length)) {
1777 V<Float64> element = __ template LoadNonArrayBufferElement<Float64>(
1779
1780 result = is_max ? __ Float64Max(result, element)
1781 : __ Float64Min(result, element);
1782 index = __ WordPtrAdd(index, 1);
1783 }
1784
1785 return __ ConvertFloat64ToNumber(result,
1787 }
1788
1790 // Index encoding (see `src/objects/field-index-inl.h`):
1791 // For efficiency, the LoadByFieldIndex instruction takes an index that is
1792 // optimized for quick access. If the property is inline, the index is
1793 // positive. If it's out-of-line, the encoded index is -raw_index - 1 to
1794 // disambiguate the zero out-of-line index from the zero inobject case.
1795 // The index itself is shifted up by one bit, the lower-most bit
1796 // signifying if the field is a mutable double box (1) or not (0).
1797 V<WordPtr> index = __ ChangeInt32ToIntPtr(field_index);
1798
1799 Label<> double_field(this);
1800 Label<Object> done(this);
1801
1802 // Check if field is a mutable double field.
1803 GOTO_IF(
1804 UNLIKELY(__ Word32Equal(
1805 __ Word32BitwiseAnd(__ TruncateWordPtrToWord32(index), 0x1), 0x1)),
1806 double_field);
1807
1808 {
1809 // The field is a proper Tagged field on {object}. The {index} is
1810 // shifted to the left by one in the code below.
1811
1812 // Check if field is in-object or out-of-object.
1813 IF (__ IntPtrLessThan(index, 0)) {
1814 // The field is located in the properties backing store of {object}.
1815 // The {index} is equal to the negated out of property index plus 1.
1816 V<Object> properties = __ template LoadField<Object>(
1818
1819 V<WordPtr> out_of_object_index = __ WordPtrSub(0, index);
1821 __ Load(properties, out_of_object_index,
1825 kTaggedSizeLog2 - 1);
1826 GOTO(done, result);
1827 } ELSE {
1828 // This field is located in the {object} itself.
1831 MemoryRepresentation::AnyTagged(), JSObject::kHeaderSize,
1832 kTaggedSizeLog2 - 1);
1833 GOTO(done, result);
1834 }
1835 }
1836
1837 if (BIND(double_field)) {
1838 // If field is a Double field, either unboxed in the object on 64 bit
1839 // architectures, or a mutable HeapNumber.
1840 V<WordPtr> double_index = __ WordPtrShiftRightArithmetic(index, 1);
1841 Label<Object> loaded_field(this);
1842
1843 // Check if field is in-object or out-of-object.
1844 IF (__ IntPtrLessThan(double_index, 0)) {
1845 V<Object> properties = __ template LoadField<Object>(
1847
1848 V<WordPtr> out_of_object_index = __ WordPtrSub(0, double_index);
1850 properties, out_of_object_index,
1854 GOTO(loaded_field, result);
1855 } ELSE {
1856 // The field is located in the {object} itself.
1858 __ Load(object, double_index,
1860 MemoryRepresentation::AnyTagged(), JSObject::kHeaderSize,
1862 GOTO(loaded_field, result);
1863 }
1864
1865 if (BIND(loaded_field, field)) {
1866 // We may have transitioned in-place away from double, so check that
1867 // this is a HeapNumber -- otherwise the load is fine and we don't need
1868 // to copy anything anyway.
1869 GOTO_IF(__ ObjectIsSmi(field), done, field);
1870 V<Map> map = __ LoadMapField(field);
1872 __ TaggedEqual(map, __ HeapConstant(factory_->heap_number_map())),
1873 done, field);
1874
1875 V<Float64> value = __ LoadHeapNumberValue(V<HeapNumber>::Cast(field));
1876 GOTO(done, AllocateHeapNumber(value));
1877 }
1878 }
1879
1880 BIND(done, result);
1881 return result;
1882 }
1883
1885 V<Word> left, V<Word> right, V<FrameState> frame_state,
1888 switch (kind) {
1892 __ IntAddCheckOverflow(left, right, rep);
1893
1894 V<Word32> overflow = __ template Projection<1>(result);
1895 __ DeoptimizeIf(overflow, frame_state, DeoptimizeReason::kOverflow,
1896 feedback);
1897 return __ template Projection<0>(result, rep);
1898 }
1902 __ IntSubCheckOverflow(left, right, rep);
1903
1904 V<Word32> overflow = __ template Projection<1>(result);
1905 __ DeoptimizeIf(overflow, frame_state, DeoptimizeReason::kOverflow,
1906 feedback);
1907 return __ template Projection<0>(result, rep);
1908 }
1910 if (rep == WordRepresentation::Word32()) {
1911 V<Word32> left_w32 = V<Word32>::Cast(left);
1912 V<Word32> right_w32 = V<Word32>::Cast(right);
1914 __ Int32MulCheckOverflow(left_w32, right_w32);
1915 V<Word32> overflow = __ template Projection<1>(result);
1916 __ DeoptimizeIf(overflow, frame_state, DeoptimizeReason::kOverflow,
1917 feedback);
1918 V<Word32> value = __ template Projection<0>(result);
1919
1921 IF (__ Word32Equal(value, 0)) {
1923 __ Int32LessThan(__ Word32BitwiseOr(left_w32, right_w32), 0),
1924 frame_state, DeoptimizeReason::kMinusZero, feedback);
1925 }
1926 }
1927
1928 return value;
1929 } else {
1932 V<Tuple<Word64, Word32>> result = __ Int64MulCheckOverflow(
1933 V<Word64>::Cast(left), V<Word64>::Cast(right));
1934
1935 V<Word32> overflow = __ template Projection<1>(result);
1936 __ DeoptimizeIf(overflow, frame_state, DeoptimizeReason::kOverflow,
1937 feedback);
1938 return __ template Projection<0>(result);
1939 }
1941 if (rep == WordRepresentation::Word32()) {
1942 V<Word32> left_w32 = V<Word32>::Cast(left);
1943 V<Word32> right_w32 = V<Word32>::Cast(right);
1944 // Check if the {rhs} is a known power of two.
1945 int32_t divisor;
1946 if (__ matcher().MatchPowerOfTwoWord32Constant(right_w32, &divisor)) {
1947 // Since we know that {rhs} is a power of two, we can perform a fast
1948 // check to see if the relevant least significant bits of the {lhs}
1949 // are all zero, and if so we know that we can perform a division
1950 // safely (and fast by doing an arithmetic - aka sign preserving -
1951 // right shift on {lhs}).
1952 V<Word32> check =
1953 __ Word32Equal(__ Word32BitwiseAnd(left_w32, divisor - 1), 0);
1954 __ DeoptimizeIfNot(check, frame_state,
1955 DeoptimizeReason::kLostPrecision, feedback);
1956 return __ Word32ShiftRightArithmeticShiftOutZeros(
1957 left_w32, base::bits::WhichPowerOfTwo(divisor));
1958 } else {
1959 Label<Word32> done(this);
1960
1961 // Check if {rhs} is positive (and not zero).
1962 IF (__ Int32LessThan(0, right_w32)) {
1963 GOTO(done, __ Int32Div(left_w32, right_w32));
1964 } ELSE {
1965 // Check if {rhs} is zero.
1966 __ DeoptimizeIf(__ Word32Equal(right_w32, 0), frame_state,
1967 DeoptimizeReason::kDivisionByZero, feedback);
1968
1969 // Check if {lhs} is zero, as that would produce minus zero.
1970 __ DeoptimizeIf(__ Word32Equal(left_w32, 0), frame_state,
1971 DeoptimizeReason::kMinusZero, feedback);
1972
1973 // Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd
1974 // have to return -kMinInt, which is not representable as Word32.
1975 IF (UNLIKELY(__ Word32Equal(left_w32, kMinInt))) {
1976 __ DeoptimizeIf(__ Word32Equal(right_w32, -1), frame_state,
1977 DeoptimizeReason::kOverflow, feedback);
1978 }
1979
1980 GOTO(done, __ Int32Div(left_w32, right_w32));
1981 }
1982
1983 BIND(done, value);
1984 V<Word32> lossless =
1985 __ Word32Equal(left_w32, __ Word32Mul(value, right_w32));
1986 __ DeoptimizeIfNot(lossless, frame_state,
1987 DeoptimizeReason::kLostPrecision, feedback);
1988 return value;
1989 }
1990 } else {
1992 DCHECK(Is64());
1993 V<Word64> left_w64 = V<Word64>::Cast(left);
1994 V<Word64> right_w64 = V<Word64>::Cast(right);
1995
1996 __ DeoptimizeIf(__ Word64Equal(right_w64, 0), frame_state,
1997 DeoptimizeReason::kDivisionByZero, feedback);
1998 // Check if {lhs} is kMinInt64 and {rhs} is -1, in which case we'd
1999 // have to return -kMinInt64, which is not representable as Word64.
2000 IF (UNLIKELY(__ Word64Equal(left_w64,
2001 std::numeric_limits<int64_t>::min()))) {
2002 __ DeoptimizeIf(__ Word64Equal(right_w64, int64_t{-1}), frame_state,
2003 DeoptimizeReason::kOverflow, feedback);
2004 }
2005
2006 return __ Int64Div(left_w64, right_w64);
2007 }
2009 if (rep == WordRepresentation::Word32()) {
2010 V<Word32> left_w32 = V<Word32>::Cast(left);
2011 V<Word32> right_w32 = V<Word32>::Cast(right);
2012 // General case for signed integer modulus, with optimization for
2013 // (unknown) power of 2 right hand side.
2014 //
2015 // if rhs <= 0 then
2016 // rhs = -rhs
2017 // deopt if rhs == 0
2018 // if lhs < 0 then
2019 // let lhs_abs = -lhs in
2020 // let res = lhs_abs % rhs in
2021 // deopt if res == 0
2022 // -res
2023 // else
2024 // let msk = rhs - 1 in
2025 // if rhs & msk == 0 then
2026 // lhs & msk
2027 // else
2028 // lhs % rhs
2029 //
2030 Label<Word32> rhs_checked(this);
2031 Label<Word32> done(this);
2032
2033 // Check if {rhs} is not strictly positive.
2034 IF (__ Int32LessThanOrEqual(right_w32, 0)) {
2035 // Negate {rhs}, might still produce a negative result in case of
2036 // -2^31, but that is handled safely below.
2037 V<Word32> temp = __ Word32Sub(0, right_w32);
2038
2039 // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
2040 __ DeoptimizeIfNot(temp, frame_state,
2041 DeoptimizeReason::kDivisionByZero, feedback);
2042 GOTO(rhs_checked, temp);
2043 } ELSE {
2044 GOTO(rhs_checked, right_w32);
2045 }
2046
2047 BIND(rhs_checked, rhs_value);
2048
2049 IF (__ Int32LessThan(left_w32, 0)) {
2050 // The {lhs} is a negative integer. This is very unlikely and
2051 // we intentionally don't use the BuildUint32Mod() here, which
2052 // would try to figure out whether {rhs} is a power of two,
2053 // since this is intended to be a slow-path.
2054 V<Word32> temp = __ Uint32Mod(__ Word32Sub(0, left_w32), rhs_value);
2055
2056 // Check if we would have to return -0.
2057 __ DeoptimizeIf(__ Word32Equal(temp, 0), frame_state,
2058 DeoptimizeReason::kMinusZero, feedback);
2059 GOTO(done, __ Word32Sub(0, temp));
2060 } ELSE {
2061 // The {lhs} is a non-negative integer.
2062 GOTO(done, BuildUint32Mod(left_w32, rhs_value));
2063 }
2064
2065 BIND(done, result);
2066 return result;
2067 } else {
2069 DCHECK(Is64());
2070 V<Word64> left_w64 = V<Word64>::Cast(left);
2071 V<Word64> right_w64 = V<Word64>::Cast(right);
2072
2073 __ DeoptimizeIf(__ Word64Equal(right_w64, 0), frame_state,
2074 DeoptimizeReason::kDivisionByZero, feedback);
2075
2076 // While the mod-result cannot overflow, the underlying instruction is
2077 // `idiv` and will trap when the accompanying div-result overflows.
2078 IF (UNLIKELY(__ Word64Equal(left_w64,
2079 std::numeric_limits<int64_t>::min()))) {
2080 __ DeoptimizeIf(__ Word64Equal(right_w64, int64_t{-1}), frame_state,
2081 DeoptimizeReason::kOverflow, feedback);
2082 }
2083
2084 return __ Int64Mod(left_w64, right_w64);
2085 }
2088 V<Word32> left_w32 = V<Word32>::Cast(left);
2089 V<Word32> right_w32 = V<Word32>::Cast(right);
2090
2091 // Check if the {rhs} is a known power of two.
2092 int32_t divisor;
2093 if (__ matcher().MatchPowerOfTwoWord32Constant(right_w32, &divisor)) {
2094 // Since we know that {rhs} is a power of two, we can perform a fast
2095 // check to see if the relevant least significant bits of the {lhs}
2096 // are all zero, and if so we know that we can perform a division
2097 // safely (and fast by doing a logical - aka zero extending - right
2098 // shift on {lhs}).
2099 V<Word32> check =
2100 __ Word32Equal(__ Word32BitwiseAnd(left_w32, divisor - 1), 0);
2101 __ DeoptimizeIfNot(check, frame_state,
2102 DeoptimizeReason::kLostPrecision, feedback);
2103 return __ Word32ShiftRightLogical(
2104 left_w32, base::bits::WhichPowerOfTwo(divisor));
2105 } else {
2106 // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
2107 __ DeoptimizeIf(__ Word32Equal(right_w32, 0), frame_state,
2108 DeoptimizeReason::kDivisionByZero, feedback);
2109
2110 // Perform the actual unsigned integer division.
2111 V<Word32> value = __ Uint32Div(left_w32, right_w32);
2112
2113 // Check if the remainder is non-zero.
2114 V<Word32> lossless =
2115 __ Word32Equal(left_w32, __ Word32Mul(right_w32, value));
2116 __ DeoptimizeIfNot(lossless, frame_state,
2117 DeoptimizeReason::kLostPrecision, feedback);
2118 return value;
2119 }
2120 }
2123 V<Word32> left_w32 = V<Word32>::Cast(left);
2124 V<Word32> right_w32 = V<Word32>::Cast(right);
2125
2126 // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
2127 __ DeoptimizeIf(__ Word32Equal(right_w32, 0), frame_state,
2128 DeoptimizeReason::kDivisionByZero, feedback);
2129
2130 return BuildUint32Mod(left_w32, right_w32);
2131 }
2132 }
2133 }
2134
2136 V<FrameState> frame_state,
2138 const Builtin builtin = GetBuiltinForBigIntBinop(kind);
2139 switch (kind) {
2146 V<Numeric> result = CallBuiltinForBigIntOp(builtin, {left, right});
2147
2148 // Check for exception sentinel: Smi 0 is returned to signal
2149 // BigIntTooBig.
2150 __ DeoptimizeIf(__ ObjectIsSmi(result), frame_state,
2151 DeoptimizeReason::kBigIntTooBig, FeedbackSource{});
2152 return V<BigInt>::Cast(result);
2153 }
2157 V<Numeric> result = CallBuiltinForBigIntOp(builtin, {left, right});
2158
2159 // Check for exception sentinel: Smi 1 is returned to signal
2160 // TerminationRequested.
2161 IF (UNLIKELY(__ TaggedEqual(result, __ TagSmi(1)))) {
2162 __ CallRuntime_TerminateExecution(isolate_, frame_state,
2163 __ NoContextConstant());
2164 }
2165
2166 // Check for exception sentinel: Smi 0 is returned to signal
2167 // BigIntTooBig or DivisionByZero.
2168 __ DeoptimizeIf(__ ObjectIsSmi(result), frame_state,
2170 ? DeoptimizeReason::kBigIntTooBig
2171 : DeoptimizeReason::kDivisionByZero,
2172 FeedbackSource{});
2173 return V<BigInt>::Cast(result);
2174 }
2176 return CallBuiltinForBigIntOp(builtin, {left, right});
2177 }
2178 default:
2179 UNIMPLEMENTED();
2180 }
2181 UNREACHABLE();
2182 }
2183
2186 switch (kind) {
2188 return CallBuiltinForBigIntOp(Builtin::kBigIntEqual, {left, right});
2190 return CallBuiltinForBigIntOp(Builtin::kBigIntLessThan, {left, right});
2192 return CallBuiltinForBigIntOp(Builtin::kBigIntLessThanOrEqual,
2193 {left, right});
2194 }
2195 }
2196
2199 return CallBuiltinForBigIntOp(Builtin::kBigIntUnaryMinus, {input});
2200 }
2201
2205 Label<Word32> done(this);
2206
2207 if (const ConstantOp* cst =
2208 __ matcher().template TryCast<ConstantOp>(string);
2209 cst && cst->kind == ConstantOp::Kind::kHeapObject) {
2210 // For constant SeqString, we have a fast-path that doesn't run through
2211 // the loop. It requires fewer loads (we only load the map once, but not
2212 // the instance type), uses static 1/2-byte, and only uses a single
2213 // comparison to check that the string has indeed the correct SeqString
2214 // map.
2216 HeapObjectRef ref = MakeRef(broker_, cst->handle());
2217 if (ref.IsString()) {
2218 StringRef str = ref.AsString();
2219 if (str.IsSeqString()) {
2220 V<Map> dynamic_map = __ LoadMapField(string);
2221 Handle<Map> expected_map = str.map(broker_).object();
2222 IF (__ TaggedEqual(dynamic_map, __ HeapConstant(expected_map))) {
2223 bool one_byte = str.IsOneByteRepresentation();
2224 GOTO(done,
2225 LoadFromSeqString(string, pos, __ Word32Constant(one_byte)));
2226 }
2227 }
2228 }
2229 }
2230
2231 Label<> seq_string(this), external_string(this), cons_string(this),
2232 sliced_string(this), thin_string(this);
2233 // TODO(dmercadier): the runtime label should be deferred, and because
2234 // Labels/Blocks don't have deferred annotation, we achieve this by
2235 // marking all branches to this Label as UNLIKELY, but 1) it's easy to
2236 // forget one, and 2) it makes the code less clear: `if(x) {} else
2237 // if(likely(y)) {} else {}` looks like `y` is more likely than `x`, but
2238 // it just means that `y` is more likely than `!y`.
2239 Label<> runtime(this);
2240 // We need a loop here to properly deal with indirect strings
2241 // (SlicedString, ConsString and ThinString).
2242 LoopLabel<> loop(this);
2243 ScopedVar<String> receiver(this, string);
2245 GOTO(loop);
2246
2247 BIND_LOOP(loop) {
2248 V<Map> map = __ LoadMapField(receiver);
2249#if V8_STATIC_ROOTS_BOOL
2250 V<Word32> map_bits =
2251 __ TruncateWordPtrToWord32(__ BitcastTaggedToWordPtr(map));
2252
2253 using StringTypeRange =
2254 InstanceTypeChecker::kUniqueMapRangeOfStringType;
2255 // Check the string map ranges in dense increasing order, to avoid
2256 // needing to subtract away the lower bound.
2257 static_assert(StringTypeRange::kSeqString.first == 0);
2258 GOTO_IF(__ Uint32LessThanOrEqual(map_bits,
2259 StringTypeRange::kSeqString.second),
2260 seq_string);
2261
2262 static_assert(StringTypeRange::kSeqString.second + Map::kSize ==
2263 StringTypeRange::kExternalString.first);
2264 GOTO_IF(__ Uint32LessThanOrEqual(
2265 map_bits, StringTypeRange::kExternalString.second),
2266 external_string);
2267
2268 static_assert(StringTypeRange::kExternalString.second + Map::kSize ==
2269 StringTypeRange::kConsString.first);
2270 GOTO_IF(__ Uint32LessThanOrEqual(map_bits,
2271 StringTypeRange::kConsString.second),
2272 cons_string);
2273
2274 static_assert(StringTypeRange::kConsString.second + Map::kSize ==
2275 StringTypeRange::kSlicedString.first);
2276 GOTO_IF(__ Uint32LessThanOrEqual(map_bits,
2277 StringTypeRange::kSlicedString.second),
2278 sliced_string);
2279
2280 static_assert(StringTypeRange::kSlicedString.second + Map::kSize ==
2281 StringTypeRange::kThinString.first);
2282 GOTO_IF(__ Uint32LessThanOrEqual(map_bits,
2283 StringTypeRange::kThinString.second),
2284 thin_string);
2285#else
2286 V<Word32> instance_type = __ LoadInstanceTypeField(map);
2287 V<Word32> representation =
2288 __ Word32BitwiseAnd(instance_type, kStringRepresentationMask);
2289
2290 GOTO_IF(__ Word32Equal(representation, kSeqStringTag), seq_string);
2291 GOTO_IF(__ Word32Equal(representation, kExternalStringTag),
2292 external_string);
2293 GOTO_IF(__ Word32Equal(representation, kConsStringTag), cons_string);
2294 GOTO_IF(__ Word32Equal(representation, kSlicedStringTag),
2295 sliced_string);
2296 GOTO_IF(__ Word32Equal(representation, kThinStringTag), thin_string);
2297#endif
2298
2299 __ Unreachable();
2300
2301 if (BIND(seq_string)) {
2302#if V8_STATIC_ROOTS_BOOL
2303 V<Word32> is_one_byte = __ Word32Equal(
2304 __ Word32BitwiseAnd(map_bits,
2305 InstanceTypeChecker::kStringMapEncodingMask),
2306 InstanceTypeChecker::kOneByteStringMapBit);
2307#else
2308 V<Word32> is_one_byte = __ Word32Equal(
2309 __ Word32BitwiseAnd(instance_type, kStringEncodingMask),
2311#endif
2312 GOTO(done, LoadFromSeqString(receiver, position, is_one_byte));
2313 }
2314
2315 if (BIND(external_string)) {
2316 // We need to bailout to the runtime for uncached external
2317 // strings.
2318#if V8_STATIC_ROOTS_BOOL
2319 V<Word32> is_uncached_external_string = __ Uint32LessThanOrEqual(
2320 __ Word32Sub(map_bits,
2321 StringTypeRange::kUncachedExternalString.first),
2322 StringTypeRange::kUncachedExternalString.second -
2323 StringTypeRange::kUncachedExternalString.first);
2324#else
2325 V<Word32> is_uncached_external_string = __ Word32Equal(
2326 __ Word32BitwiseAnd(instance_type, kUncachedExternalStringMask),
2328#endif
2329 GOTO_IF(UNLIKELY(is_uncached_external_string), runtime);
2330
2331 OpIndex data = __ LoadField(
2333#if V8_STATIC_ROOTS_BOOL
2334 V<Word32> is_two_byte = __ Word32Equal(
2335 __ Word32BitwiseAnd(map_bits,
2336 InstanceTypeChecker::kStringMapEncodingMask),
2337 InstanceTypeChecker::kTwoByteStringMapBit);
2338#else
2339 V<Word32> is_two_byte = __ Word32Equal(
2340 __ Word32BitwiseAnd(instance_type, kStringEncodingMask),
2342#endif
2343 IF (is_two_byte) {
2344 constexpr uint8_t twobyte_size_log2 = 1;
2345 V<Word32> value =
2346 __ Load(data, position,
2348 MemoryRepresentation::Uint16(), 0, twobyte_size_log2);
2349 GOTO(done, value);
2350 } ELSE {
2351 constexpr uint8_t onebyte_size_log2 = 0;
2352 V<Word32> value =
2353 __ Load(data, position,
2355 MemoryRepresentation::Uint8(), 0, onebyte_size_log2);
2356 GOTO(done, value);
2357 }
2358 }
2359
2360 if (BIND(cons_string)) {
2361 V<String> second = __ template LoadField<String>(
2363 GOTO_IF_NOT(LIKELY(__ TaggedEqual(
2364 second, __ HeapConstant(factory_->empty_string()))),
2365 runtime);
2366 receiver = __ template LoadField<String>(
2368 GOTO(loop);
2369 }
2370
2371 if (BIND(sliced_string)) {
2372 V<Smi> offset = __ template LoadField<Smi>(
2374 receiver = __ template LoadField<String>(
2376 position = __ WordPtrAdd(position,
2377 __ ChangeInt32ToIntPtr(__ UntagSmi(offset)));
2378 GOTO(loop);
2379 }
2380
2381 if (BIND(thin_string)) {
2382 receiver = __ template LoadField<String>(
2384 GOTO(loop);
2385 }
2386
2387 if (BIND(runtime)) {
2388 V<Word32> value =
2389 __ UntagSmi(V<Smi>::Cast(__ CallRuntime_StringCharCodeAt(
2390 isolate_, __ NoContextConstant(), receiver,
2391 __ TagSmi(__ TruncateWordPtrToWord32(position)))));
2392 GOTO(done, value);
2393 }
2394 }
2395
2396 BIND(done, result);
2397 return result;
2398 } else {
2400 return LoadSurrogatePairAt(string, {}, pos, UnicodeEncoding::UTF32);
2401 }
2402
2403 UNREACHABLE();
2404 }
2405
2407 // TODO(dmercadier): Somewhere (maybe not here but instead in a new
2408 // SimplifiedOptimizationReducer?), constant fold StringLength(Constant).
2409 return __ template LoadField<Word32>(string,
2411 }
2412
2414 ElementsKind elements_kind) {
2415 // TODO(dmercadier): Somewhere (maybe not here but instead in a new
2416 // SimplifiedOptimizationReducer?), constant fold
2417 // TypedArrayLength(Constant).
2418 V<WordPtr> byte_length = __ template LoadField<WordPtr>(
2420 CHECK(!IsRabGsabTypedArrayElementsKind(elements_kind));
2421 return __ WordPtrShiftRightLogical(byte_length,
2422 ElementsKindToShiftSize(elements_kind));
2423 }
2424
2426 V<Smi> position) {
2427 return __ CallBuiltin_StringIndexOf(isolate_, string, search, position);
2428 }
2429
2431 return __ CallBuiltin_StringFromCodePointAt(isolate_, string, index);
2432 }
2433
2434#ifdef V8_INTL_SUPPORT
2435 V<String> REDUCE(StringToCaseIntl)(V<String> string,
2436 StringToCaseIntlOp::Kind kind) {
2437 if (kind == StringToCaseIntlOp::Kind::kLower) {
2438 return __ CallBuiltin_StringToLowerCaseIntl(
2439 isolate_, __ NoContextConstant(), string);
2440 } else {
2441 DCHECK_EQ(kind, StringToCaseIntlOp::Kind::kUpper);
2442 return __ CallRuntime_StringToUpperCaseIntl(
2443 isolate_, __ NoContextConstant(), string);
2444 }
2445 }
2446#endif // V8_INTL_SUPPORT
2447
2449 V<Word32> end) {
2450 V<WordPtr> s = __ ChangeInt32ToIntPtr(start);
2451 V<WordPtr> e = __ ChangeInt32ToIntPtr(end);
2452 return __ CallBuiltin_StringSubstring(isolate_, string, s, e);
2453 }
2454
2456 V<String> right) {
2457 // TODO(nicohartmann@): Port StringBuilder once it is stable.
2458 return __ CallBuiltin_StringAdd_CheckNone(isolate_, __ NoContextConstant(),
2459 left, right);
2460 }
2461
2464 switch (kind) {
2466 Label<Boolean> done(this);
2467
2468 GOTO_IF(__ TaggedEqual(left, right), done,
2469 __ HeapConstant(factory_->true_value()));
2470
2471 V<Word32> left_length = __ template LoadField<Word32>(
2473 V<Word32> right_length = __ template LoadField<Word32>(
2475 IF (__ Word32Equal(left_length, right_length)) {
2476 GOTO(done,
2477 __ CallBuiltin_StringEqual(isolate_, left, right,
2478 __ ChangeInt32ToIntPtr(left_length)));
2479 } ELSE {
2480 GOTO(done, __ HeapConstant(factory_->false_value()));
2481 }
2482
2483 BIND(done, result);
2484 return result;
2485 }
2487 return __ CallBuiltin_StringLessThan(isolate_, left, right);
2489 return __ CallBuiltin_StringLessThanOrEqual(isolate_, left, right);
2490 }
2491 }
2492
2494 int formal_parameter_count) {
2496 __ LoadOffHeap(__ FramePointer(), StandardFrameConstants::kArgCOffset,
2498 V<WordPtr> arguments_length = __ WordPtrSub(count, kJSArgcReceiverSlots);
2499
2501 return __ TagSmi(__ TruncateWordPtrToWord32(arguments_length));
2502 } else {
2504 V<WordPtr> rest_length =
2505 __ WordPtrSub(arguments_length, formal_parameter_count);
2506 Label<WordPtr> done(this);
2507 IF (__ IntPtrLessThan(rest_length, 0)) {
2508 GOTO(done, 0);
2509 } ELSE {
2510 GOTO(done, rest_length);
2511 }
2512
2513 BIND(done, value);
2514 return __ TagSmi(__ TruncateWordPtrToWord32(value));
2515 }
2516 }
2517
2520 int formal_parameter_count) {
2521 V<WordPtr> frame = __ FramePointer();
2522 V<WordPtr> p_count = __ IntPtrConstant(formal_parameter_count);
2523 switch (type) {
2525 return __ CallBuiltin_NewSloppyArgumentsElements(
2526 isolate_, frame, p_count, arguments_count);
2528 return __ CallBuiltin_NewStrictArgumentsElements(
2529 isolate_, frame, p_count, arguments_count);
2531 return __ CallBuiltin_NewRestArgumentsElements(isolate_, frame, p_count,
2532 arguments_count);
2533 }
2534 }
2535
2537 V<WordPtr> external, V<WordPtr> index,
2538 ExternalArrayType array_type) {
2539 V<WordPtr> data_ptr = BuildTypedArrayDataPointer(base, external);
2540
2541 // Perform the actual typed element access.
2542 V<Any> result = __ LoadArrayBufferElement(
2543 data_ptr, AccessBuilder::ForTypedArrayElement(array_type, true), index);
2544
2545 // We need to keep the {buffer} alive so that the GC will not release the
2546 // ArrayBuffer (if there's any) as long as we are still operating on it.
2547 __ Retain(buffer);
2548 return result;
2549 }
2550
2552 // Note that this is a load of a Tagged value
2553 // (MemoryRepresentation::TaggedPointer()), but since it's on the stack
2554 // where stack slots are all kSystemPointerSize, we use kSystemPointerSize
2555 // for element_size_log2. On 64-bit plateforms with pointer compression,
2556 // this means that we're kinda loading a 32-bit value from an array of
2557 // 64-bit values.
2558#if V8_COMPRESS_POINTERS && V8_TARGET_BIG_ENDIAN
2559 constexpr int offset =
2561#else
2562 constexpr int offset =
2564#endif
2565 return __ Load(base, index, LoadOp::Kind::RawAligned(),
2568 }
2569
2571 V<WordPtr> external, V<WordPtr> index,
2572 V<Any> value,
2573 ExternalArrayType array_type) {
2574 V<WordPtr> data_ptr = BuildTypedArrayDataPointer(base, external);
2575
2576 // Perform the actual typed element access.
2577 __ StoreArrayBufferElement(
2578 data_ptr, AccessBuilder::ForTypedArrayElement(array_type, true), index,
2579 value);
2580
2581 // We need to keep the {buffer} alive so that the GC will not release the
2582 // ArrayBuffer (if there's any) as long as we are still operating on it.
2583 __ Retain(buffer);
2584 return {};
2585 }
2586
2590 MaybeHandle<Map> double_map) {
2591 V<Map> map = __ LoadMapField(array);
2592 V<Word32> bitfield2 =
2593 __ template LoadField<Word32>(map, AccessBuilder::ForMapBitField2());
2594 V<Word32> elements_kind = __ Word32ShiftRightLogical(
2595 __ Word32BitwiseAnd(bitfield2, Map::Bits2::ElementsKindBits::kMask),
2596 Map::Bits2::ElementsKindBits::kShift);
2597
2598 switch (kind) {
2600 // Possibly transition array based on input and store.
2601 //
2602 // -- TRANSITION PHASE -----------------
2603 // kind = ElementsKind(array)
2604 // if value is not smi {
2605 // if kind == HOLEY_SMI_ELEMENTS {
2606 // if value is heap number {
2607 // Transition array to HOLEY_DOUBLE_ELEMENTS
2608 // kind = HOLEY_DOUBLE_ELEMENTS
2609 // } else {
2610 // Transition array to HOLEY_ELEMENTS
2611 // kind = HOLEY_ELEMENTS
2612 // }
2613 // } else if kind == HOLEY_DOUBLE_ELEMENTS {
2614 // if value is not heap number {
2615 // Transition array to HOLEY_ELEMENTS
2616 // kind = HOLEY_ELEMENTS
2617 // }
2618 // }
2619 // }
2620 //
2621 // -- STORE PHASE ----------------------
2622 // [make sure {kind} is up-to-date]
2623 // if kind == HOLEY_DOUBLE_ELEMENTS {
2624 // if value is smi {
2625 // float_value = convert smi to float
2626 // Store array[index] = float_value
2627 // } else {
2628 // float_value = value
2629 // Store array[index] = float_value
2630 // }
2631 // } else {
2632 // // kind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS
2633 // Store array[index] = value
2634 // }
2635 //
2636 Label<Word32> do_store(this);
2637 // We can store a smi anywhere.
2638 GOTO_IF(__ ObjectIsSmi(value), do_store, elements_kind);
2639
2640 // {value} is a HeapObject.
2641 IF_NOT (LIKELY(__ Int32LessThan(HOLEY_SMI_ELEMENTS, elements_kind))) {
2642 // Transition {array} from HOLEY_SMI_ELEMENTS to HOLEY_DOUBLE_ELEMENTS
2643 // or to HOLEY_ELEMENTS.
2644 V<Map> value_map = __ LoadMapField(value);
2645 IF (__ TaggedEqual(value_map,
2646 __ HeapConstant(factory_->heap_number_map()))) {
2647 // {value} is a HeapNumber.
2650 double_map.ToHandleChecked());
2651 GOTO(do_store, HOLEY_DOUBLE_ELEMENTS);
2652 } ELSE {
2654 fast_map.ToHandleChecked());
2655 GOTO(do_store, HOLEY_ELEMENTS);
2656 }
2657 }
2658
2659 GOTO_IF_NOT(LIKELY(__ Int32LessThan(HOLEY_ELEMENTS, elements_kind)),
2660 do_store, elements_kind);
2661
2662 // We have double elements kind. Only a HeapNumber can be stored
2663 // without effecting a transition.
2664 V<Map> value_map = __ LoadMapField(value);
2665 IF_NOT (UNLIKELY(__ TaggedEqual(
2666 value_map, __ HeapConstant(factory_->heap_number_map())))) {
2668 fast_map.ToHandleChecked());
2669 GOTO(do_store, HOLEY_ELEMENTS);
2670 }
2671
2672 GOTO(do_store, elements_kind);
2673
2674 BIND(do_store, store_kind);
2675 V<Object> elements = __ template LoadField<Object>(
2677 IF (__ Int32LessThan(HOLEY_ELEMENTS, store_kind)) {
2678 // Our ElementsKind is HOLEY_DOUBLE_ELEMENTS.
2679 IF (__ ObjectIsSmi(value)) {
2680 V<Float64> float_value =
2681 __ ChangeInt32ToFloat64(__ UntagSmi(value));
2682 __ StoreNonArrayBufferElement(
2684 float_value);
2685 } ELSE {
2686 V<Float64> float_value =
2687 __ LoadHeapNumberValue(V<HeapNumber>::Cast(value));
2688 __ StoreNonArrayBufferElement(
2690 __ Float64SilenceNaN(float_value));
2691 }
2692 } ELSE {
2693 // Our ElementsKind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS.
2694 __ StoreNonArrayBufferElement(
2696 index, value);
2697 }
2698
2699 break;
2700 }
2702 Label<> done(this);
2703 // Possibly transition array based on input and store.
2704 //
2705 // -- TRANSITION PHASE -----------------
2706 // kind = ElementsKind(array)
2707 // if kind == HOLEY_SMI_ELEMENTS {
2708 // Transition array to HOLEY_DOUBLE_ELEMENTS
2709 // } else if kind != HOLEY_DOUBLE_ELEMENTS {
2710 // if kind == HOLEY_ELEMENTS {
2711 // Store value as a HeapNumber in array[index].
2712 // } else {
2713 // This is UNREACHABLE, execute a debug break.
2714 // }
2715 // }
2716 //
2717 // -- STORE PHASE ----------------------
2718 // Store array[index] = value (it's a float)
2719 //
2720 // {value} is a float64.
2721 IF_NOT (LIKELY(__ Int32LessThan(HOLEY_SMI_ELEMENTS, elements_kind))) {
2722 // Transition {array} from HOLEY_SMI_ELEMENTS to
2723 // HOLEY_DOUBLE_ELEMENTS.
2725 double_map.ToHandleChecked());
2726 } ELSE {
2727 // We expect that our input array started at HOLEY_SMI_ELEMENTS, and
2728 // climbs the lattice up to HOLEY_DOUBLE_ELEMENTS. However, loop
2729 // peeling can break this assumption, because in the peeled iteration,
2730 // the array might have transitioned to HOLEY_ELEMENTS kind, so we
2731 // handle this as well.
2732 IF_NOT (LIKELY(
2733 __ Word32Equal(elements_kind, HOLEY_DOUBLE_ELEMENTS))) {
2734 IF (__ Word32Equal(elements_kind, HOLEY_ELEMENTS)) {
2735 V<Object> elements = __ template LoadField<Object>(
2737 // Our ElementsKind is HOLEY_ELEMENTS.
2738 __ StoreNonArrayBufferElement(
2740 index, AllocateHeapNumber(value));
2741 GOTO(done);
2742 }
2743
2744 __ Unreachable();
2745 }
2746 }
2747
2748 V<Object> elements = __ template LoadField<Object>(
2750 __ StoreNonArrayBufferElement(
2752 __ Float64SilenceNaN(value));
2753 GOTO(done);
2754
2755 BIND(done);
2756 break;
2757 }
2760 // Possibly transition array based on input and store.
2761 //
2762 // -- TRANSITION PHASE -----------------
2763 // kind = ElementsKind(array)
2764 // if kind == HOLEY_SMI_ELEMENTS {
2765 // Transition array to HOLEY_ELEMENTS
2766 // } else if kind == HOLEY_DOUBLE_ELEMENTS {
2767 // Transition array to HOLEY_ELEMENTS
2768 // }
2769 //
2770 // -- STORE PHASE ----------------------
2771 // // kind is HOLEY_ELEMENTS
2772 // Store array[index] = value
2773 //
2774 IF_NOT (LIKELY(__ Int32LessThan(HOLEY_SMI_ELEMENTS, elements_kind))) {
2775 // Transition {array} from HOLEY_SMI_ELEMENTS to HOLEY_ELEMENTS.
2777 fast_map.ToHandleChecked());
2778 } ELSE IF (UNLIKELY(__ Int32LessThan(HOLEY_ELEMENTS, elements_kind))) {
2780 fast_map.ToHandleChecked());
2781 }
2782
2783 V<Object> elements = __ template LoadField<Object>(
2785 ElementAccess access =
2788 access.type = compiler::Type::BooleanOrNullOrUndefined();
2789 access.write_barrier_kind = kNoWriteBarrier;
2790 }
2791 __ StoreNonArrayBufferElement(elements, access, index, value);
2792 break;
2793 }
2795 // Store a signed small in an output array.
2796 //
2797 // kind = ElementsKind(array)
2798 //
2799 // -- STORE PHASE ----------------------
2800 // if kind == HOLEY_DOUBLE_ELEMENTS {
2801 // float_value = convert int32 to float
2802 // Store array[index] = float_value
2803 // } else {
2804 // // kind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS
2805 // smi_value = convert int32 to smi
2806 // Store array[index] = smi_value
2807 // }
2808 //
2809 V<Object> elements = __ template LoadField<Object>(
2811 IF (__ Int32LessThan(HOLEY_ELEMENTS, elements_kind)) {
2812 // Our ElementsKind is HOLEY_DOUBLE_ELEMENTS.
2813 V<Float64> f64 = __ ChangeInt32ToFloat64(value);
2814 __ StoreNonArrayBufferElement(
2816 f64);
2817 } ELSE {
2818 // Our ElementsKind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS.
2819 // In this case, we know our value is a signed small, and we can
2820 // optimize the ElementAccess information.
2822 access.type = compiler::Type::SignedSmall();
2823 access.machine_type = MachineType::TaggedSigned();
2824 access.write_barrier_kind = kNoWriteBarrier;
2825 __ StoreNonArrayBufferElement(elements, access, index,
2826 __ TagSmi(value));
2827 }
2828
2829 break;
2830 }
2831 }
2832
2833 return V<None>::Invalid();
2834 }
2835
2837 const ZoneRefSet<Map>& maps) {
2838 if (!map.has_value()) {
2839 map = __ LoadMapField(heap_object);
2840 }
2841 return CompareMapAgainstMultipleMaps(map.value(), maps);
2842 }
2843
2845 V<FrameState> frame_state, OptionalV<Map> map,
2847 const FeedbackSource& feedback) {
2848 if (maps.is_empty()) {
2849 __ Deoptimize(frame_state, DeoptimizeReason::kWrongMap, feedback);
2850 return {};
2851 }
2852
2853 V<Map> heap_object_map;
2854 if (map.has_value()) {
2855 heap_object_map = map.value();
2856 } else {
2857 heap_object_map = __ LoadMapField(heap_object);
2858 }
2859
2861 IF_NOT (LIKELY(CompareMapAgainstMultipleMaps(heap_object_map, maps))) {
2862 // Reloading the map slightly reduces register pressure, and we are on a
2863 // slow path here anyway.
2864 MigrateInstanceOrDeopt(heap_object, heap_object_map, frame_state,
2865 feedback);
2866 heap_object_map = __ LoadMapField(heap_object);
2867 __ DeoptimizeIfNot(__ CompareMaps(heap_object, heap_object_map, maps),
2868 frame_state, DeoptimizeReason::kWrongMap, feedback);
2869 }
2870 } else if (flags & CheckMapsFlag::kTryMigrateInstanceAndDeopt) {
2871 IF_NOT (LIKELY(CompareMapAgainstMultipleMaps(heap_object_map, maps))) {
2873 heap_object, heap_object_map, frame_state, feedback);
2874 __ Deoptimize(frame_state, DeoptimizeReason::kWrongMap, feedback);
2875 }
2876 } else {
2877 __ DeoptimizeIfNot(__ CompareMaps(heap_object, heap_object_map, maps),
2878 frame_state, DeoptimizeReason::kWrongMap, feedback);
2879 }
2880 // Inserting a AssumeMap so that subsequent optimizations know the map of
2881 // this object.
2882 __ AssumeMap(heap_object, maps);
2883 return {};
2884 }
2885
2887 FloatRepresentation rep) {
2888 LABEL_BLOCK(no_change) { return Next::ReduceFloatUnary(input, kind, rep); }
2889 switch (kind) {
2894 // TODO(14108): Implement for Float32.
2895 if (rep == FloatRepresentation::Float32()) {
2896 goto no_change;
2897 }
2899 V<Float64> input_f64 = V<Float64>::Cast(input);
2900 if (FloatUnaryOp::IsSupported(kind, rep)) {
2901 // If we have a fast machine operation for this, we can just keep it.
2902 goto no_change;
2903 }
2904 // Otherwise we have to lower it.
2905 V<Float64> two_52 = __ Float64Constant(4503599627370496.0E0);
2906 V<Float64> minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
2907
2909 // General case for ceil.
2910 //
2911 // if 0.0 < input then
2912 // if 2^52 <= input then
2913 // input
2914 // else
2915 // let temp1 = (2^52 + input) - 2^52 in
2916 // if temp1 < input then
2917 // temp1 + 1
2918 // else
2919 // temp1
2920 // else
2921 // if input == 0 then
2922 // input
2923 // else
2924 // if input <= -2^52 then
2925 // input
2926 // else
2927 // let temp1 = -0 - input in
2928 // let temp2 = (2^52 + temp1) - 2^52 in
2929 // if temp1 < temp2 then -0 - (temp2 - 1) else -0 - temp2
2930
2931 Label<Float64> done(this);
2932
2933 IF (LIKELY(__ Float64LessThan(0.0, input_f64))) {
2934 GOTO_IF(UNLIKELY(__ Float64LessThanOrEqual(two_52, input_f64)),
2935 done, input_f64);
2936 V<Float64> temp1 =
2937 __ Float64Sub(__ Float64Add(two_52, input_f64), two_52);
2938 GOTO_IF_NOT(__ Float64LessThan(temp1, input_f64), done, temp1);
2939 GOTO(done, __ Float64Add(temp1, 1.0));
2940 } ELSE IF (UNLIKELY(__ Float64Equal(input_f64, 0.0))) {
2941 GOTO(done, input_f64);
2942 } ELSE IF (UNLIKELY(
2943 __ Float64LessThanOrEqual(input_f64, minus_two_52))) {
2944 GOTO(done, input_f64);
2945 } ELSE {
2946 V<Float64> temp1 = __ Float64Sub(-0.0, input_f64);
2947 V<Float64> temp2 =
2948 __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
2949 GOTO_IF_NOT(__ Float64LessThan(temp1, temp2), done,
2950 __ Float64Sub(-0.0, temp2));
2951 GOTO(done, __ Float64Sub(-0.0, __ Float64Sub(temp2, 1.0)));
2952 }
2953
2954 BIND(done, result);
2955 return result;
2956 } else if (kind == FloatUnaryOp::Kind::kRoundDown) {
2957 // General case for floor.
2958 //
2959 // if 0.0 < input then
2960 // if 2^52 <= input then
2961 // input
2962 // else
2963 // let temp1 = (2^52 + input) - 2^52 in
2964 // if input < temp1 then
2965 // temp1 - 1
2966 // else
2967 // temp1
2968 // else
2969 // if input == 0 then
2970 // input
2971 // else
2972 // if input <= -2^52 then
2973 // input
2974 // else
2975 // let temp1 = -0 - input in
2976 // let temp2 = (2^52 + temp1) - 2^52 in
2977 // if temp2 < temp1 then
2978 // -1 - temp2
2979 // else
2980 // -0 - temp2
2981
2982 Label<Float64> done(this);
2983
2984 IF (LIKELY(__ Float64LessThan(0.0, input_f64))) {
2985 GOTO_IF(UNLIKELY(__ Float64LessThanOrEqual(two_52, input_f64)),
2986 done, input_f64);
2987 V<Float64> temp1 =
2988 __ Float64Sub(__ Float64Add(two_52, input_f64), two_52);
2989 GOTO_IF_NOT(__ Float64LessThan(input_f64, temp1), done, temp1);
2990 GOTO(done, __ Float64Sub(temp1, 1.0));
2991 } ELSE IF (UNLIKELY(__ Float64Equal(input_f64, 0.0))) {
2992 GOTO(done, input_f64);
2993 } ELSE IF (UNLIKELY(
2994 __ Float64LessThanOrEqual(input_f64, minus_two_52))) {
2995 GOTO(done, input_f64);
2996 } ELSE {
2997 V<Float64> temp1 = __ Float64Sub(-0.0, input_f64);
2998 V<Float64> temp2 =
2999 __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
3000 GOTO_IF_NOT(__ Float64LessThan(temp2, temp1), done,
3001 __ Float64Sub(-0.0, temp2));
3002 GOTO(done, __ Float64Sub(-1.0, temp2));
3003 }
3004
3005 BIND(done, result);
3006 return result;
3008 // Generate case for round ties to even:
3009 //
3010 // let value = floor(input) in
3011 // let temp1 = input - value in
3012 // if temp1 < 0.5 then
3013 // value
3014 // else if 0.5 < temp1 then
3015 // value + 1.0
3016 // else
3017 // let temp2 = value % 2.0 in
3018 // if temp2 == 0.0 then
3019 // value
3020 // else
3021 // value + 1.0
3022
3023 Label<Float64> done(this);
3024
3025 V<Float64> value = __ Float64RoundDown(input_f64);
3026 V<Float64> temp1 = __ Float64Sub(input_f64, value);
3027 GOTO_IF(__ Float64LessThan(temp1, 0.5), done, value);
3028 GOTO_IF(__ Float64LessThan(0.5, temp1), done,
3029 __ Float64Add(value, 1.0));
3030
3031 V<Float64> temp2 = __ Float64Mod(value, 2.0);
3032 GOTO_IF(__ Float64Equal(temp2, 0.0), done, value);
3033 GOTO(done, __ Float64Add(value, 1.0));
3034
3035 BIND(done, result);
3036 return result;
3037 } else if (kind == FloatUnaryOp::Kind::kRoundToZero) {
3038 // General case for trunc.
3039 //
3040 // if 0.0 < input then
3041 // if 2^52 <= input then
3042 // input
3043 // else
3044 // let temp1 = (2^52 + input) - 2^52 in
3045 // if input < temp1 then
3046 // temp1 - 1
3047 // else
3048 // temp1
3049 // else
3050 // if input == 0 then
3051 // input
3052 // if input <= -2^52 then
3053 // input
3054 // else
3055 // let temp1 = -0 - input in
3056 // let temp2 = (2^52 + temp1) - 2^52 in
3057 // if temp1 < temp2 then
3058 // -0 - (temp2 - 1)
3059 // else
3060 // -0 - temp2
3061
3062 Label<Float64> done(this);
3063
3064 IF (__ Float64LessThan(0.0, input_f64)) {
3065 GOTO_IF(UNLIKELY(__ Float64LessThanOrEqual(two_52, input_f64)),
3066 done, input_f64);
3067
3068 V<Float64> temp1 =
3069 __ Float64Sub(__ Float64Add(two_52, input_f64), two_52);
3070 GOTO_IF(__ Float64LessThan(input_f64, temp1), done,
3071 __ Float64Sub(temp1, 1.0));
3072 GOTO(done, temp1);
3073 } ELSE {
3074 GOTO_IF(UNLIKELY(__ Float64Equal(input_f64, 0.0)), done, input_f64);
3075 GOTO_IF(
3076 UNLIKELY(__ Float64LessThanOrEqual(input_f64, minus_two_52)),
3077 done, input_f64);
3078
3079 V<Float64> temp1 = __ Float64Sub(-0.0, input_f64);
3080 V<Float64> temp2 =
3081 __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
3082
3083 IF (__ Float64LessThan(temp1, temp2)) {
3084 GOTO(done, __ Float64Sub(-0.0, __ Float64Sub(temp2, 1.0)));
3085 } ELSE {
3086 GOTO(done, __ Float64Sub(-0.0, temp2));
3087 }
3088 }
3089
3090 BIND(done, result);
3091 return result;
3092 }
3093 UNREACHABLE();
3094 }
3095 default:
3097 goto no_change;
3098 }
3099 UNREACHABLE();
3100 }
3101
3103 Handle<FeedbackCell> feedback_cell) {
3104 // Check that {input} is actually a JSFunction.
3105 V<Map> map = __ LoadMapField(input);
3106 V<Word32> instance_type = __ LoadInstanceTypeField(map);
3107 V<Word32> is_function_type = __ Uint32LessThanOrEqual(
3108 __ Word32Sub(instance_type, FIRST_JS_FUNCTION_TYPE),
3109 (LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
3110 __ DeoptimizeIfNot(is_function_type, frame_state,
3111 DeoptimizeReason::kWrongCallTarget, FeedbackSource{});
3112
3113 // Check that the {input}s feedback vector cell matches the one
3114 // we recorded before.
3115 V<HeapObject> cell = __ template LoadField<HeapObject>(
3117 __ DeoptimizeIfNot(__ TaggedEqual(cell, __ HeapConstant(feedback_cell)),
3118 frame_state, DeoptimizeReason::kWrongFeedbackCell,
3119 FeedbackSource{});
3120 return input;
3121 }
3122
3125 V<FrameState> frame_state) {
3126 Label<> done(this);
3127 // Check if {expected} and {value} are the same, which is the likely case.
3128 GOTO_IF(LIKELY(__ TaggedEqual(expected, value)), done);
3129
3130 // Now {value} could still be a non-internalized String that matches
3131 // {expected}.
3132 __ DeoptimizeIf(__ ObjectIsSmi(value), frame_state,
3133 DeoptimizeReason::kWrongName, FeedbackSource{});
3134 V<Map> value_map = __ LoadMapField(value);
3135 V<Word32> value_instance_type = __ LoadInstanceTypeField(value_map);
3136 V<Word32> value_representation =
3137 __ Word32BitwiseAnd(value_instance_type, kStringRepresentationMask);
3138 // ThinString
3139 IF (__ Word32Equal(value_representation, kThinStringTag)) {
3140 // The {value} is a ThinString, let's check the actual value.
3141 V<String> value_actual = __ template LoadField<String>(
3143 __ DeoptimizeIfNot(__ TaggedEqual(expected, value_actual), frame_state,
3144 DeoptimizeReason::kWrongName, FeedbackSource{});
3145 } ELSE {
3146 // Check that the {value} is a non-internalized String, if it's anything
3147 // else it cannot match the recorded feedback {expected} anyways.
3148 __ DeoptimizeIfNot(
3149 __ Word32Equal(
3150 __ Word32BitwiseAnd(value_instance_type,
3153 frame_state, DeoptimizeReason::kWrongName, FeedbackSource{});
3154
3155 // Try to find the {value} in the string table.
3156 MachineSignature::Builder builder(__ graph_zone(), 1, 2);
3157 builder.AddReturn(MachineType::AnyTagged());
3158 builder.AddParam(MachineType::Pointer());
3159 builder.AddParam(MachineType::AnyTagged());
3160 OpIndex try_string_to_index_or_lookup_existing = __ ExternalConstant(
3161 ExternalReference::try_string_to_index_or_lookup_existing());
3162 OpIndex isolate_ptr =
3163 __ ExternalConstant(ExternalReference::isolate_address());
3164 V<String> value_internalized = V<String>::Cast(__ Call(
3165 try_string_to_index_or_lookup_existing, {isolate_ptr, value},
3169
3170 // Now see if the results match.
3171 __ DeoptimizeIfNot(__ TaggedEqual(expected, value_internalized),
3172 frame_state, DeoptimizeReason::kWrongName,
3173 FeedbackSource{});
3174 }
3175
3176 GOTO(done);
3177
3178 BIND(done);
3179 return V<None>::Invalid();
3180 }
3181
3183 return __ BitcastWordPtrToTagged(__ template LoadField<WordPtr>(
3185 }
3186
3189 __ BitcastTaggedToWordPtr(object));
3190 return V<None>::Invalid();
3191 }
3192
3195 switch (mode) {
3197 return __ CallBuiltin_SameValue(isolate_, left, right);
3199 return __ CallBuiltin_SameValueNumbersOnly(isolate_, left, right);
3200 }
3201 }
3202
3204 Label<Word32> done(this);
3205
3206 // TODO(dmercadier): Optimize if one of the sides is a constant.
3207 IF (__ Float64Equal(left, right)) {
3208 // Even if the values are float64-equal, we still need to distinguish
3209 // zero and minus zero.
3210 V<Word32> left_hi = __ Float64ExtractHighWord32(left);
3211 V<Word32> right_hi = __ Float64ExtractHighWord32(right);
3212 GOTO(done, __ Word32Equal(left_hi, right_hi));
3213 } ELSE {
3214 // Return true iff both {lhs} and {rhs} are NaN.
3215 GOTO_IF(__ Float64Equal(left, left), done, 0);
3216 GOTO_IF(__ Float64Equal(right, right), done, 0);
3217 GOTO(done, 1);
3218 }
3219
3220 BIND(done, result);
3221 return result;
3222 }
3223
3225 __ CallRuntime_Abort(isolate_, __ NoContextConstant(),
3226 __ TagSmi(static_cast<int>(reason)));
3227 return V<None>::Invalid();
3228 }
3229
3231 V<Object> elements) {
3232 Label<Object> done(this);
3233 // Load the current map of {elements}.
3234 V<Map> map = __ LoadMapField(elements);
3235
3236 // Check if {elements} is not a copy-on-write FixedArray.
3237 // Nothing to do if the {elements} are not copy-on-write.
3238 GOTO_IF(LIKELY(__ TaggedEqual(
3239 map, __ HeapConstant(factory_->fixed_array_map()))),
3240 done, elements);
3241
3242 // We need to take a copy of the {elements} and set them up for {object}.
3243 V<Object> copy =
3244 __ CallBuiltin_CopyFastSmiOrObjectElements(isolate_, object);
3245 GOTO(done, copy);
3246
3247 BIND(done, result);
3248 return result;
3249 }
3250
3253 V<Word32> elements_length,
3254 V<FrameState> frame_state,
3256 const FeedbackSource& feedback) {
3257 Label<Object> done(this);
3258 // Check if we need to grow the {elements} backing store.
3259 GOTO_IF(LIKELY(__ Uint32LessThan(index, elements_length)), done, elements);
3260 // We need to grow the {elements} for {object}.
3261 V<Object> new_elements;
3262 switch (mode) {
3264 new_elements = __ CallBuiltin_GrowFastDoubleElements(isolate_, object,
3265 __ TagSmi(index));
3266 break;
3268 new_elements = __ CallBuiltin_GrowFastSmiOrObjectElements(
3269 isolate_, object, __ TagSmi(index));
3270 break;
3271 }
3272
3273 // Ensure that we were able to grow the {elements}.
3274 __ DeoptimizeIf(__ ObjectIsSmi(new_elements), frame_state,
3275 DeoptimizeReason::kCouldNotGrowElements, feedback);
3276 GOTO(done, new_elements);
3277
3278 BIND(done, result);
3279 return result;
3280 }
3281
3283 const ElementsTransition& transition) {
3284 V<Map> source_map = __ HeapConstant(transition.source().object());
3285 V<Map> target_map = __ HeapConstant(transition.target().object());
3286
3287 // Load the current map of {object}.
3288 V<Map> map = __ LoadMapField(object);
3289
3290 // Check if {map} is the same as {source_map}.
3291 IF (UNLIKELY(__ TaggedEqual(map, source_map))) {
3292 switch (transition.mode()) {
3294 // In-place migration of {object}, just store the {target_map}.
3295 __ StoreField(object, AccessBuilder::ForMap(), target_map);
3296 break;
3298 // Instance migration, call out to the runtime for {object}.
3299 __ CallRuntime_TransitionElementsKind(
3300 isolate_, __ NoContextConstant(), object, target_map);
3301 break;
3302 }
3303 }
3304
3305 return V<None>::Invalid();
3306 }
3307
3309 V<HeapObject> object, V<Map> map, V<FrameState> frame_state,
3310 const ElementsTransitionWithMultipleSources& transition) {
3311 Label<> done(this);
3312
3313 const MapRef target = transition.target();
3314 V<Map> target_map = __ HeapConstant(target.object());
3315
3316 IF (LIKELY(__ TaggedEqual(map, target_map))) {
3317 GOTO(done);
3318 }
3319
3320 const ZoneRefSet<Map>& transition_sources = transition.sources();
3321 for (const MapRef transition_source : transition_sources) {
3322 bool is_simple = IsSimpleMapChangeTransition(
3323 transition_source.elements_kind(), target.elements_kind());
3324
3325 IF (__ TaggedEqual(map, __ HeapConstant(transition_source.object()))) {
3326 if (is_simple) {
3327 // In-place migration of {object}, just store the {target_map}.
3328 __ StoreField(object, AccessBuilder::ForMap(), target_map);
3329 } else {
3330 // Instance migration, call out to the runtime for {object}.
3331 __ CallRuntime_TransitionElementsKind(
3332 isolate_, __ NoContextConstant(), object, target_map);
3333 }
3334 GOTO(done);
3335 }
3336 }
3337 // Successful transitions jumped to `done`. If we didn't jump, we know the
3338 // map is not the target map.
3339 __ Deoptimize(frame_state, DeoptimizeReason::kWrongMap,
3340 transition.feedback());
3341
3342 BIND(done);
3343
3344 // Inserting a AssumeMap so that subsequent optimizations know the map of
3345 // this object.
3346 ZoneRefSet<Map> maps({target}, __ graph_zone());
3347 __ AssumeMap(object, maps);
3348
3349 return V<None>::Invalid();
3350 }
3351
3354 switch (kind) {
3356 return __ CallBuiltin_FindOrderedHashMapEntry(
3357 isolate_, __ NoContextConstant(), data_structure, key);
3359 // Compute the integer hash code.
3360 V<WordPtr> hash = __ ChangeUint32ToUintPtr(ComputeUnseededHash(key));
3361
3362 V<WordPtr> number_of_buckets =
3363 __ ChangeInt32ToIntPtr(__ UntagSmi(__ template LoadField<Smi>(
3364 data_structure,
3366 hash = __ WordPtrBitwiseAnd(hash, __ WordPtrSub(number_of_buckets, 1));
3367 V<WordPtr> first_entry = __ ChangeInt32ToIntPtr(__ UntagSmi(__ Load(
3368 data_structure,
3369 __ WordPtrAdd(__ WordPtrShiftLeft(hash, kTaggedSizeLog2),
3372
3373 Label<WordPtr> done(this);
3374 LoopLabel<WordPtr> loop(this);
3375 GOTO(loop, first_entry);
3376
3377 BIND_LOOP(loop, entry) {
3378 GOTO_IF(__ WordPtrEqual(entry, OrderedHashMap::kNotFound), done,
3379 entry);
3380 V<WordPtr> candidate =
3381 __ WordPtrAdd(__ WordPtrMul(entry, OrderedHashMap::kEntrySize),
3382 number_of_buckets);
3383 V<Object> candidate_key = __ Load(
3384 data_structure,
3385 __ WordPtrAdd(__ WordPtrShiftLeft(candidate, kTaggedSizeLog2),
3388
3389 IF (LIKELY(__ ObjectIsSmi(candidate_key))) {
3390 GOTO_IF(
3391 __ Word32Equal(__ UntagSmi(V<Smi>::Cast(candidate_key)), key),
3392 done, candidate);
3393 } ELSE IF (__ TaggedEqual(
3394 __ LoadMapField(candidate_key),
3395 __ HeapConstant(factory_->heap_number_map()))) {
3396 GOTO_IF(__ Float64Equal(__ LoadHeapNumberValue(
3397 V<HeapNumber>::Cast(candidate_key)),
3398 __ ChangeInt32ToFloat64(key)),
3399 done, candidate);
3400 }
3401
3402 V<WordPtr> next_entry = __ ChangeInt32ToIntPtr(__ UntagSmi(__ Load(
3403 data_structure,
3404 __ WordPtrAdd(__ WordPtrShiftLeft(candidate, kTaggedSizeLog2),
3409 GOTO(loop, next_entry);
3410 }
3411
3412 BIND(done, result);
3413 return result;
3414 }
3416 return __ CallBuiltin_FindOrderedHashSetEntry(
3417 isolate_, __ NoContextConstant(), data_structure, key);
3418 }
3419 }
3420
3421 // Loads a surrogate pair from {string} starting at {index} and returns the
3422 // result encode in {encoding}. Note that UTF32 encoding is identical to the
3423 // code point. If the string's {length} is already available, it can be
3424 // passed, otherwise it will be loaded when required.
3426 V<WordPtr> index, UnicodeEncoding encoding) {
3427 Label<Word32> done(this);
3428
3429 V<Word32> first_code_unit = __ StringCharCodeAt(string, index);
3430 GOTO_IF_NOT(UNLIKELY(__ Word32Equal(
3431 __ Word32BitwiseAnd(first_code_unit, 0xFC00), 0xD800)),
3432 done, first_code_unit);
3433 if (!length.has_value()) {
3434 length = __ ChangeUint32ToUintPtr(__ template LoadField<Word32>(
3436 }
3437 V<WordPtr> next_index = __ WordPtrAdd(index, 1);
3438 GOTO_IF_NOT(__ IntPtrLessThan(next_index, length.value()), done,
3439 first_code_unit);
3440
3441 V<Word32> second_code_unit = __ StringCharCodeAt(string, next_index);
3443 __ Word32Equal(__ Word32BitwiseAnd(second_code_unit, 0xFC00), 0xDC00),
3444 done, first_code_unit);
3445
3446 switch (encoding) {
3448// Need to swap the order for big-endian platforms
3449#if V8_TARGET_BIG_ENDIAN
3450 V<Word32> value = __ Word32BitwiseOr(
3451 __ Word32ShiftLeft(first_code_unit, 16), second_code_unit);
3452#else
3453 V<Word32> value = __ Word32BitwiseOr(
3454 __ Word32ShiftLeft(second_code_unit, 16), first_code_unit);
3455#endif
3456 GOTO(done, value);
3457 break;
3458 }
3460 const int32_t surrogate_offset = 0x10000 - (0xD800 << 10) - 0xDC00;
3461 V<Word32> value =
3462 __ Word32Add(__ Word32ShiftLeft(first_code_unit, 10),
3463 __ Word32Add(second_code_unit, surrogate_offset));
3464 GOTO(done, value);
3465 break;
3466 }
3467 }
3468
3469 BIND(done, result);
3470 return result;
3471 }
3472
3474 Label<String> done(this);
3475
3476 // Check if the {code} is a one byte character.
3477 IF (LIKELY(__ Uint32LessThanOrEqual(code, String::kMaxOneByteCharCode))) {
3478 // Load the isolate wide single character string table.
3479 V<FixedArray> table = __ SingleCharacterStringTableConstant();
3480
3481 // Compute the {table} index for {code}.
3482 V<WordPtr> index = __ ChangeUint32ToUintPtr(code);
3483
3484 // Load the string for the {code} from the single character string
3485 // table.
3486 V<String> entry = __ LoadElement(
3488
3489 // Use the {entry} from the {table}.
3490 GOTO(done, entry);
3491 } ELSE {
3494 __ InitializeElement(
3495 string, AccessBuilderTS::ForSeqTwoByteStringCharacter(), 0, code);
3496 GOTO(done, __ FinishInitialization(std::move(string)));
3497 }
3498
3499 BIND(done, result);
3500 return result;
3501 }
3502
3504 UnicodeEncoding encoding) {
3505 Label<String> done(this);
3506 // Check if the input is a single code unit.
3507 GOTO_IF(LIKELY(__ Uint32LessThan(codepoint, 0x10000)), done,
3508 StringFromSingleCharCode(codepoint));
3509
3511 switch (encoding) {
3513 code = codepoint;
3514 break;
3516 // Convert UTF32 to UTF16 code units and store as a 32 bit word.
3517 V<Word32> lead_offset = __ Word32Constant(0xD800 - (0x10000 >> 10));
3518
3519 // lead = (codepoint >> 10) + LEAD_OFFSET
3520 V<Word32> lead = __ Word32Add(__ Word32ShiftRightLogical(codepoint, 10),
3521 lead_offset);
3522
3523 // trail = (codepoint & 0x3FF) + 0xDC00
3524 V<Word32> trail =
3525 __ Word32Add(__ Word32BitwiseAnd(codepoint, 0x3FF), 0xDC00);
3526
3527 // codepoint = (trail << 16) | lead
3528#if V8_TARGET_BIG_ENDIAN
3529 code = __ Word32BitwiseOr(__ Word32ShiftLeft(lead, 16), trail);
3530#else
3531 code = __ Word32BitwiseOr(__ Word32ShiftLeft(trail, 16), lead);
3532#endif
3533 break;
3534 }
3535 }
3536
3539 // Write the code as a single 32-bit value by adapting the elements
3540 // access to SeqTwoByteString characters.
3541 auto access = AccessBuilderTS::ForSeqTwoByteStringCharacter();
3542 access.machine_type = MachineType::Uint32();
3543 __ InitializeElement(string, access, 0, code);
3544 GOTO(done, __ FinishInitialization(std::move(string)));
3545
3546 BIND(done, result);
3547 return result;
3548 }
3549
3551 uint32_t length, AllocationType type) {
3552 __ CodeComment("AllocateSeqTwoByteString");
3553 DCHECK_GT(length, 0);
3554 // Allocate a new string object.
3556 __ template Allocate<SeqTwoByteString>(
3557 SeqTwoByteString::SizeFor(length), type);
3558 // Set padding to 0.
3559 __ Initialize(string, __ IntPtrConstant(0),
3563 // Initialize remaining fields.
3564 __ InitializeField(string, AccessBuilderTS::ForMap(),
3565 __ SeqTwoByteStringMapConstant());
3566 __ InitializeField(string, AccessBuilderTS::ForStringLength(), length);
3567 __ InitializeField(string, AccessBuilderTS::ForNameRawHashField(),
3569 // Do not finish allocation here, because the caller has to initialize
3570 // characters.
3571 return string;
3572 }
3573
3574#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
3575 V<Object> REDUCE(GetContinuationPreservedEmbedderData)() {
3576 return __ LoadOffHeap(
3577 __ IsolateField(IsolateFieldId::kContinuationPreservedEmbedderData),
3579 }
3580
3581 V<None> REDUCE(SetContinuationPreservedEmbedderData)(V<Object> data) {
3582 __ StoreOffHeap(
3583 __ IsolateField(IsolateFieldId::kContinuationPreservedEmbedderData),
3585 return {};
3586 }
3587#endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
3588
3589 private:
3591 Label<Word32> done(this);
3592
3593 // Compute the mask for the {rhs}.
3594 V<Word32> msk = __ Word32Sub(right, 1);
3595
3596 // Check if the {rhs} is a power of two.
3597 IF (__ Word32Equal(__ Word32BitwiseAnd(right, msk), 0)) {
3598 // The {rhs} is a power of two, just do a fast bit masking.
3599 GOTO(done, __ Word32BitwiseAnd(left, msk));
3600 } ELSE {
3601 // The {rhs} is not a power of two, do a generic Uint32Mod.
3602 GOTO(done, __ Uint32Mod(left, right));
3603 }
3604
3605 BIND(done, result);
3606 return result;
3607 }
3608
3609 // Pass {bitfield} = {digit} = OpIndex::Invalid() to construct the canonical
3610 // 0n BigInt.
3612 if (Asm().generating_unreachable_operations()) return V<BigInt>::Invalid();
3613
3614 DCHECK(Is64());
3615 DCHECK_EQ(bitfield.valid(), digit.valid());
3616 static constexpr auto zero_bitfield =
3618
3619 V<Map> map = __ HeapConstant(factory_->bigint_map());
3620 auto bigint = __ template Allocate<FreshlyAllocatedBigInt>(
3621 __ IntPtrConstant(BigInt::SizeFor(digit.valid() ? 1 : 0)),
3623 __ InitializeField(bigint, AccessBuilder::ForMap(), map);
3624 __ InitializeField(
3626 bitfield.valid() ? bitfield : __ Word32Constant(zero_bitfield));
3627
3628 // BigInts have no padding on 64 bit architectures with pointer compression.
3629#ifdef BIGINT_NEEDS_PADDING
3630 __ InitializeField(bigint, AccessBuilder::ForBigIntOptionalPadding(),
3631 __ Word32Constant(0));
3632#endif
3633 if (digit.valid()) {
3634 __ InitializeField(
3636 }
3637 return V<BigInt>::Cast(__ FinishInitialization(std::move(bigint)));
3638 }
3639
3640 void TagSmiOrOverflow(V<Word32> input, Label<>* overflow,
3641 Label<Number>* done) {
3643
3644 // Check for overflow at the same time that we are smi tagging.
3645 // Since smi tagging shifts left by one, it's the same as adding value
3646 // twice.
3647 V<Tuple<Word32, Word32>> add = __ Int32AddCheckOverflow(input, input);
3648 V<Word32> check = __ template Projection<1>(add);
3649 GOTO_IF(UNLIKELY(check), *overflow);
3650 GOTO(*done, __ BitcastWord32ToSmi(__ template Projection<0>(add)));
3651 }
3652
3653 // `IsNonZero` converts any non-0 value into 1.
3655 return __ Word32Equal(__ Word32Equal(value, 0), 0);
3656 }
3657
3659 return __ AllocateHeapNumberWithValue(value, factory_);
3660 }
3661
3663 V<Object> heap_object, V<FrameState> frame_state,
3665 const FeedbackSource& feedback) {
3666 V<Map> map = __ LoadMapField(heap_object);
3667 switch (input_kind) {
3671 UNREACHABLE();
3674 V<Word32> is_number =
3675 __ TaggedEqual(map, __ HeapConstant(factory_->heap_number_map()));
3676 __ DeoptimizeIfNot(is_number, frame_state,
3677 DeoptimizeReason::kNotAHeapNumber, feedback);
3678 __ ChangeFloat64ToAdditiveSafeIntegerOrDeopt(
3679 __ LoadHeapNumberValue(V<HeapNumber>::Cast(heap_object)),
3680 frame_state, CheckForMinusZeroMode::kCheckForMinusZero, feedback);
3681 break;
3682 }
3684 V<Word32> is_number =
3685 __ TaggedEqual(map, __ HeapConstant(factory_->heap_number_map()));
3686 __ DeoptimizeIfNot(is_number, frame_state,
3687 DeoptimizeReason::kNotAHeapNumber, feedback);
3688 break;
3689 }
3692#if V8_STATIC_ROOTS_BOOL
3693 // TODO(leszeks): Consider checking the boolean oddballs by value,
3694 // before loading the map.
3695 static_assert(StaticReadOnlyRoot::kBooleanMap + Map::kSize ==
3696 StaticReadOnlyRoot::kHeapNumberMap);
3697 V<Word32> map_int32 =
3698 __ TruncateWordPtrToWord32(__ BitcastHeapObjectToWordPtr(map));
3699 V<Word32> is_in_range = __ Uint32LessThanOrEqual(
3700 __ Word32Sub(map_int32,
3701 __ Word32Constant(StaticReadOnlyRoot::kBooleanMap)),
3702 __ Word32Constant(StaticReadOnlyRoot::kHeapNumberMap -
3703 StaticReadOnlyRoot::kBooleanMap));
3704 __ DeoptimizeIfNot(is_in_range, frame_state,
3705 DeoptimizeReason::kNotANumberOrBoolean, feedback);
3706#else
3707 IF_NOT (__ TaggedEqual(map,
3708 __ HeapConstant(factory_->heap_number_map()))) {
3709 __ DeoptimizeIfNot(
3710 __ TaggedEqual(map, __ HeapConstant(factory_->boolean_map())),
3711 frame_state, DeoptimizeReason::kNotANumberOrBoolean, feedback);
3712 }
3713#endif
3714
3715 break;
3716 }
3719#if V8_STATIC_ROOTS_BOOL
3720 constexpr auto kNumberOrOddballRange =
3721 InstanceTypeChecker::UniqueMapRangeOfInstanceTypeRange(
3722 HEAP_NUMBER_TYPE, ODDBALL_TYPE)
3723 .value();
3724 V<Word32> map_int32 =
3725 __ TruncateWordPtrToWord32(__ BitcastHeapObjectToWordPtr(map));
3726 V<Word32> is_in_range = __ Uint32LessThanOrEqual(
3727 __ Word32Sub(map_int32,
3728 __ Word32Constant(kNumberOrOddballRange.first)),
3729 __ Word32Constant(kNumberOrOddballRange.second -
3730 kNumberOrOddballRange.first));
3731 __ DeoptimizeIfNot(is_in_range, frame_state,
3732 DeoptimizeReason::kNotANumberOrOddball, feedback);
3733#else
3734 IF_NOT (__ TaggedEqual(map,
3735 __ HeapConstant(factory_->heap_number_map()))) {
3736 // For oddballs also contain the numeric value, let us just check that
3737 // we have an oddball here.
3738 V<Word32> instance_type = __ LoadInstanceTypeField(map);
3739 __ DeoptimizeIfNot(__ Word32Equal(instance_type, ODDBALL_TYPE),
3740 frame_state,
3741 DeoptimizeReason::kNotANumberOrOddball, feedback);
3742 }
3743#endif
3744
3745 break;
3746 }
3747 }
3748 return __ template LoadField<Float64>(
3750 }
3751
3753 V<Word32> onebyte) {
3754 Label<Word32> done(this);
3755
3756 IF (onebyte) {
3757 GOTO(done, __ template LoadNonArrayBufferElement<Word32>(
3759 position));
3760 } ELSE {
3761 GOTO(done, __ template LoadNonArrayBufferElement<Word32>(
3763 position));
3764 }
3765
3766 BIND(done, result);
3767 return result;
3768 }
3769
3770 void MigrateInstanceOrDeopt(V<HeapObject> heap_object, V<Map> heap_object_map,
3771 V<FrameState> frame_state,
3772 const FeedbackSource& feedback) {
3773 // If {heap_object_map} is not deprecated, the migration attempt does not
3774 // make sense.
3775 V<Word32> bitfield3 = __ template LoadField<Word32>(
3776 heap_object_map, AccessBuilder::ForMapBitField3());
3777 V<Word32> deprecated =
3778 __ Word32BitwiseAnd(bitfield3, Map::Bits3::IsDeprecatedBit::kMask);
3779 __ DeoptimizeIfNot(deprecated, frame_state, DeoptimizeReason::kWrongMap,
3780 feedback);
3781 V<Object> result = __ CallRuntime_TryMigrateInstance(
3782 isolate_, __ NoContextConstant(), heap_object);
3783 // TryMigrateInstance returns a Smi value to signal failure.
3784 __ DeoptimizeIf(__ ObjectIsSmi(result), frame_state,
3785 DeoptimizeReason::kInstanceMigrationFailed, feedback);
3786 }
3787
3789 V<HeapObject> heap_object, V<Map> heap_object_map,
3790 V<FrameState> frame_state, const FeedbackSource& feedback) {
3791 // If {heap_object_map} is not deprecated, the migration attempt does not
3792 // make sense.
3793 V<Word32> bitfield3 = __ template LoadField<Word32>(
3794 heap_object_map, AccessBuilder::ForMapBitField3());
3795 V<Word32> deprecated =
3796 __ Word32BitwiseAnd(bitfield3, Map::Bits3::IsDeprecatedBit::kMask);
3797 __ DeoptimizeIfNot(deprecated, frame_state, DeoptimizeReason::kWrongMap,
3798 feedback);
3799 __ CallRuntime_TryMigrateInstanceAndMarkMapAsMigrationTarget(
3800 isolate_, __ NoContextConstant(), heap_object);
3801 }
3802
3803 // TODO(nicohartmann@): Might use the CallBuiltinDescriptors here.
3805 std::initializer_list<OpIndex> arguments) {
3806 DCHECK_IMPLIES(builtin == Builtin::kBigIntUnaryMinus,
3807 arguments.size() == 1);
3808 DCHECK_IMPLIES(builtin != Builtin::kBigIntUnaryMinus,
3809 arguments.size() == 2);
3811 args.push_back(__ NoContextConstant());
3812
3813 Callable callable = Builtins::CallableFor(isolate_, builtin);
3814 auto descriptor = Linkage::GetStubCallDescriptor(
3815 __ graph_zone(), callable.descriptor(),
3818 auto ts_descriptor = TSCallDescriptor::Create(
3820 return __ Call(__ HeapConstant(callable.code()), V<FrameState>::Invalid(),
3821 base::VectorOf(args), ts_descriptor);
3822 }
3823
3825 switch (kind) {
3827 return Builtin::kBigIntAddNoThrow;
3829 return Builtin::kBigIntSubtractNoThrow;
3831 return Builtin::kBigIntMultiplyNoThrow;
3833 return Builtin::kBigIntDivideNoThrow;
3835 return Builtin::kBigIntModulusNoThrow;
3837 return Builtin::kBigIntBitwiseAndNoThrow;
3839 return Builtin::kBigIntBitwiseOrNoThrow;
3841 return Builtin::kBigIntBitwiseXorNoThrow;
3843 return Builtin::kBigIntShiftLeftNoThrow;
3845 return Builtin::kBigIntShiftRightNoThrow;
3846 }
3847 }
3848
3850 if (__ matcher().MatchZero(base)) return external;
3851 V<WordPtr> untagged_base = __ BitcastTaggedToWordPtr(base);
3853 // Zero-extend Tagged_t to UintPtr according to current compression
3854 // scheme so that the addition with |external_pointer| (which already
3855 // contains compensated offset value) will decompress the tagged value.
3856 // See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for
3857 // details.
3858 untagged_base =
3859 __ ChangeUint32ToUintPtr(__ TruncateWordPtrToWord32(untagged_base));
3860 }
3861 return __ WordPtrAdd(untagged_base, external);
3862 }
3863
3865 // See v8::internal::ComputeUnseededHash()
3866 value = __ Word32Add(__ Word32BitwiseXor(value, 0xFFFFFFFF),
3867 __ Word32ShiftLeft(value, 15));
3868 value = __ Word32BitwiseXor(value, __ Word32ShiftRightLogical(value, 12));
3869 value = __ Word32Add(value, __ Word32ShiftLeft(value, 2));
3870 value = __ Word32BitwiseXor(value, __ Word32ShiftRightLogical(value, 4));
3871 value = __ Word32Mul(value, 2057);
3872 value = __ Word32BitwiseXor(value, __ Word32ShiftRightLogical(value, 16));
3873 value = __ Word32BitwiseAnd(value, 0x3FFFFFFF);
3874 return value;
3875 }
3876
3878 ElementsKind to, Handle<Map> target_map) {
3881
3882 if (IsSimpleMapChangeTransition(from, to)) {
3883 __ StoreField(array, AccessBuilder::ForMap(),
3884 __ HeapConstant(target_map));
3885 } else {
3886 // Instance migration, call out to the runtime for {array}.
3887 __ CallRuntime_TransitionElementsKind(isolate_, __ NoContextConstant(),
3888 array, __ HeapConstant(target_map));
3889 }
3890 }
3891
3893 const ZoneRefSet<Map>& maps) {
3894 if (maps.is_empty()) {
3895 return __ Word32Constant(0);
3896 }
3898 for (size_t i = 0; i < maps.size(); ++i) {
3899 V<Map> map = __ HeapConstant(maps[i].object());
3900 if (i == 0) {
3901 result = __ TaggedEqual(heap_object_map, map);
3902 } else {
3903 result =
3904 __ Word32BitwiseOr(result, __ TaggedEqual(heap_object_map, map));
3905 }
3906 }
3907 return result;
3908 }
3909
3918
3922 std::optional<bool> undetectable_objects_protector_ = {};
3923};
3924
3926
3927} // namespace v8::internal::compiler::turboshaft
3928
3929#endif // V8_COMPILER_TURBOSHAFT_MACHINE_LOWERING_REDUCER_INL_H_
#define BIND(label)
#define REDUCE(operation)
#define ELSE
#define GOTO(label,...)
#define IF_NOT(...)
#define UNLIKELY(...)
#define WHILE(...)
#define LIKELY(...)
#define BIND_LOOP(loop_label,...)
#define GOTO_IF_NOT(cond, label,...)
#define IF(...)
#define GOTO_IF(cond, label,...)
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
Builtins::Kind kind
Definition builtins.cc:40
SourcePosition pos
static constexpr U encode(T value)
Definition bit-field.h:55
static V8_NODISCARD constexpr U update(U previous, T value)
Definition bit-field.h:61
static constexpr U kMask
Definition bit-field.h:41
static constexpr int kShift
Definition bit-field.h:39
static uint32_t SizeFor(uint32_t length)
Definition bigint.h:252
static V8_EXPORT_PRIVATE Callable CallableFor(Isolate *isolate, Builtin builtin)
Definition builtins.cc:214
Handle< Code > code() const
Definition callable.h:22
CallInterfaceDescriptor descriptor() const
Definition callable.h:23
static constexpr int kFixedFrameSizeAboveFp
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
v8::internal::Factory * factory()
Definition isolate.h:1527
static constexpr MachineType Float64()
static constexpr MachineType Pointer()
static constexpr MachineType Int32()
static constexpr MachineType AnyTagged()
static constexpr MachineType Uint32()
static constexpr MachineType TaggedSigned()
static constexpr MachineType TaggedPointer()
static constexpr int kEmptyHashField
Definition name.h:133
static V8_INLINE constexpr int32_t SizeFor(int32_t length)
static V8_INLINE constexpr int32_t SizeFor(int32_t length)
static constexpr int kMaxValue
Definition smi.h:101
static const int32_t kMaxOneByteCharCode
Definition string.h:500
static ElementAccess ForFixedArrayElement()
static ElementAccess ForSeqTwoByteStringCharacter()
static FieldAccess ForMap(WriteBarrierKind write_barrier=kMapWriteBarrier)
static ElementAccess ForSeqOneByteStringCharacter()
static FieldAccess ForJSObjectPropertiesOrHashKnownPointer()
static FieldAccess ForExternalStringResourceData()
static FieldAccess ForJSArrayLength(ElementsKind elements_kind)
static FieldAccess ForOrderedHashMapOrSetNumberOfBuckets()
static FieldAccess ForJSFunctionFeedbackCell()
static ElementAccess ForFixedDoubleArrayElement()
static FieldAccess ForBigIntLeastSignificantDigit64()
static FieldAccess ForHeapNumberOrOddballOrHoleValue()
static FieldAccess ForJSTypedArrayByteLength()
static ElementAccess ForTypedArrayElement(ExternalArrayType type, bool is_external)
V8_EXPORT_PRIVATE MapRef map(JSHeapBroker *broker) const
CompilationDependencies * dependencies() const
static CallDescriptor * GetSimplifiedCDescriptor(Zone *zone, const MachineSignature *sig, CallDescriptor::Flags flags=CallDescriptor::kNoFlags, Operator::Properties properties=Operator::kNoThrow)
Definition c-linkage.cc:269
static CallDescriptor * GetStubCallDescriptor(Zone *zone, const CallInterfaceDescriptor &descriptor, int stack_parameter_count, CallDescriptor::Flags flags, Operator::Properties properties=Operator::kNoProperties, StubCallMode stub_mode=StubCallMode::kCallCodeObject)
Definition linkage.cc:587
IndirectHandle< Map > object() const
Map ForMap(WriteBarrierKind write_barrier=kMapWriteBarrier)
static ElementAccessTS< FixedArray, T > ForFixedArrayElement()
static constexpr FloatRepresentation Float32()
static constexpr FloatRepresentation Float64()
V< String > REDUCE StringSubstring(V< String > string, V< Word32 > start, V< Word32 > end)
V< ConsString > REDUCE NewConsString(V< Word32 > length, V< String > first, V< String > second)
void TransitionElementsTo(V< JSArray > array, ElementsKind from, ElementsKind to, Handle< Map > target_map)
V< Word32 > REDUCE ObjectIs(V< Object > input, ObjectIsOp::Kind kind, ObjectIsOp::InputAssumptions input_assumptions)
V< None > REDUCE StoreMessage(V< WordPtr > offset, V< Object > object)
bool NeedsHeapObjectCheck(ObjectIsOp::InputAssumptions input_assumptions)
void TagSmiOrOverflow(V< Word32 > input, Label<> *overflow, Label< Number > *done)
V< Object > REDUCE Convert(V< Object > input, ConvertOp::Kind from, ConvertOp::Kind to)
V< String > REDUCE StringConcat(V< Smi > length, V< String > left, V< String > right)
V< String > REDUCE StringFromCodePointAt(V< String > string, V< WordPtr > index)
V< Object > REDUCE EnsureWritableFastElements(V< Object > object, V< Object > elements)
V< Any > REDUCE LoadTypedElement(OpIndex buffer, V< Object > base, V< WordPtr > external, V< WordPtr > index, ExternalArrayType array_type)
V< Word32 > JSAnyIsNotPrimitiveHeapObject(V< Object > value, V< Map > value_map=V< Map >::Invalid())
V< BigInt > REDUCE BigIntBinop(V< BigInt > left, V< BigInt > right, V< FrameState > frame_state, BigIntBinopOp::Kind kind)
V< AnyFixedArray > REDUCE NewArray(V< WordPtr > length, NewArrayOp::Kind kind, AllocationType allocation_type)
V< WordPtr > REDUCE TypedArrayLength(V< JSTypedArray > typed_array, ElementsKind elements_kind)
V< Object > REDUCE LoadStackArgument(V< WordPtr > base, V< WordPtr > index)
V< Word32 > REDUCE Float64Is(V< Float64 > value, NumericKind kind)
V< None > REDUCE CheckMaps(V< HeapObject > heap_object, V< FrameState > frame_state, OptionalV< Map > map, const ZoneRefSet< Map > &maps, CheckMapsFlags flags, const FeedbackSource &feedback)
V< Smi > REDUCE StringIndexOf(V< String > string, V< String > search, V< Smi > position)
V< Untagged > REDUCE ConvertJSPrimitiveToUntagged(V< JSPrimitive > object, ConvertJSPrimitiveToUntaggedOp::UntaggedKind kind, ConvertJSPrimitiveToUntaggedOp::InputAssumptions input_assumptions)
V< Untagged > REDUCE ChangeOrDeopt(V< Untagged > input, V< FrameState > frame_state, ChangeOrDeoptOp::Kind kind, CheckForMinusZeroMode minus_zero_mode, const FeedbackSource &feedback)
V< Number > REDUCE DoubleArrayMinMax(V< JSArray > array, DoubleArrayMinMaxOp::Kind kind)
V< Word32 > BuildUint32Mod(V< Word32 > left, V< Word32 > right)
V< Word > REDUCE TruncateJSPrimitiveToUntagged(V< JSPrimitive > object, TruncateJSPrimitiveToUntaggedOp::UntaggedKind kind, TruncateJSPrimitiveToUntaggedOp::InputAssumptions input_assumptions)
V< JSPrimitive > REDUCE ConvertUntaggedToJSPrimitive(V< Untagged > input, ConvertUntaggedToJSPrimitiveOp::JSPrimitiveKind kind, RegisterRepresentation input_rep, ConvertUntaggedToJSPrimitiveOp::InputInterpretation input_interpretation, CheckForMinusZeroMode minus_zero_mode)
V< String > StringFromSingleCodePoint(V< Word32 > codepoint, UnicodeEncoding encoding)
V< Word32 > CompareMapAgainstMultipleMaps(V< Map > heap_object_map, const ZoneRefSet< Map > &maps)
V< BigInt > REDUCE BigIntUnary(V< BigInt > input, BigIntUnaryOp::Kind kind)
void MigrateInstanceOrDeopt(V< HeapObject > heap_object, V< Map > heap_object_map, V< FrameState > frame_state, const FeedbackSource &feedback)
V< Boolean > REDUCE BigIntComparison(V< BigInt > left, V< BigInt > right, BigIntComparisonOp::Kind kind)
V< None > REDUCE StoreTypedElement(OpIndex buffer, V< Object > base, V< WordPtr > external, V< WordPtr > index, V< Any > value, ExternalArrayType array_type)
V< Word32 > LoadFromSeqString(V< Object > receiver, V< WordPtr > position, V< Word32 > onebyte)
OpIndex CallBuiltinForBigIntOp(Builtin builtin, std::initializer_list< OpIndex > arguments)
V< Word32 > REDUCE StringAt(V< String > string, V< WordPtr > pos, StringAtOp::Kind kind)
V< Object > REDUCE LoadFieldByIndex(V< Object > object, V< Word32 > field_index)
V< Word32 > REDUCE ObjectIsNumericValue(V< Object > input, NumericKind kind, FloatRepresentation input_rep)
void TryMigrateInstanceAndMarkMapAsMigrationTarget(V< HeapObject > heap_object, V< Map > heap_object_map, V< FrameState > frame_state, const FeedbackSource &feedback)
V< Float > REDUCE FloatUnary(V< Float > input, FloatUnaryOp::Kind kind, FloatRepresentation rep)
V< None > REDUCE CheckEqualsInternalizedString(V< Object > expected, V< Object > value, V< FrameState > frame_state)
V< Float64 > ConvertHeapObjectToFloat64OrDeopt(V< Object > heap_object, V< FrameState > frame_state, ConvertJSPrimitiveToUntaggedOrDeoptOp::JSPrimitiveKind input_kind, const FeedbackSource &feedback)
V< Boolean > REDUCE SameValue(V< Object > left, V< Object > right, SameValueOp::Mode mode)
V< Word32 > REDUCE CompareMaps(V< HeapObject > heap_object, OptionalV< Map > map, const ZoneRefSet< Map > &maps)
V< WordPtr > BuildTypedArrayDataPointer(V< Object > base, V< WordPtr > external)
V< None > REDUCE TransitionElementsKindOrCheckMap(V< HeapObject > object, V< Map > map, V< FrameState > frame_state, const ElementsTransitionWithMultipleSources &transition)
V< Smi > REDUCE ArgumentsLength(ArgumentsLengthOp::Kind kind, int formal_parameter_count)
V< None > REDUCE TransitionAndStoreArrayElement(V< JSArray > array, V< WordPtr > index, OpIndex value, TransitionAndStoreArrayElementOp::Kind kind, MaybeHandle< Map > fast_map, MaybeHandle< Map > double_map)
V< None > REDUCE DeoptimizeIf(V< Word32 > condition, V< FrameState > frame_state, bool negated, const DeoptimizeParameters *parameters)
V< Word32 > LoadSurrogatePairAt(V< String > string, OptionalV< WordPtr > length, V< WordPtr > index, UnicodeEncoding encoding)
V< Object > REDUCE ConvertJSPrimitiveToObject(V< JSPrimitive > value, V< Context > native_context, V< JSGlobalProxy > global_proxy, ConvertReceiverMode mode)
V< None > REDUCE TransitionElementsKind(V< HeapObject > object, const ElementsTransition &transition)
V< Untagged > REDUCE ConvertJSPrimitiveToUntaggedOrDeopt(V< Object > object, V< FrameState > frame_state, ConvertJSPrimitiveToUntaggedOrDeoptOp::JSPrimitiveKind from_kind, ConvertJSPrimitiveToUntaggedOrDeoptOp::UntaggedKind to_kind, CheckForMinusZeroMode minus_zero_mode, const FeedbackSource &feedback)
V< Word > REDUCE TruncateJSPrimitiveToUntaggedOrDeopt(V< JSPrimitive > input, V< FrameState > frame_state, TruncateJSPrimitiveToUntaggedOrDeoptOp::UntaggedKind kind, TruncateJSPrimitiveToUntaggedOrDeoptOp::InputRequirement input_requirement, const FeedbackSource &feedback)
V< Boolean > REDUCE StringComparison(V< String > left, V< String > right, StringComparisonOp::Kind kind)
V< Word32 > REDUCE Float64SameValue(V< Float64 > left, V< Float64 > right)
V< Object > REDUCE NewArgumentsElements(V< Smi > arguments_count, CreateArgumentsType type, int formal_parameter_count)
V< JSPrimitive > REDUCE ConvertUntaggedToJSPrimitiveOrDeopt(V< Untagged > input, V< FrameState > frame_state, ConvertUntaggedToJSPrimitiveOrDeoptOp::JSPrimitiveKind kind, RegisterRepresentation input_rep, ConvertUntaggedToJSPrimitiveOrDeoptOp::InputInterpretation input_interpretation, const FeedbackSource &feedback)
V< Object > REDUCE MaybeGrowFastElements(V< Object > object, V< Object > elements, V< Word32 > index, V< Word32 > elements_length, V< FrameState > frame_state, GrowFastElementsMode mode, const FeedbackSource &feedback)
V< Object > REDUCE CheckedClosure(V< Object > input, V< FrameState > frame_state, Handle< FeedbackCell > feedback_cell)
V< BigInt > AllocateBigInt(V< Word32 > bitfield, V< Word64 > digit)
V< Word > REDUCE WordBinopDeoptOnOverflow(V< Word > left, V< Word > right, V< FrameState > frame_state, WordBinopDeoptOnOverflowOp::Kind kind, WordRepresentation rep, FeedbackSource feedback, CheckForMinusZeroMode mode)
Uninitialized< SeqTwoByteString > AllocateSeqTwoByteString(uint32_t length, AllocationType type)
OpIndex REDUCE FindOrderedHashEntry(V< Object > data_structure, OpIndex key, FindOrderedHashEntryOp::Kind kind)
V< Word32 > REDUCE Word32SignHint(V< Word32 > input, Word32SignHintOp::Sign)
static constexpr MemoryRepresentation AnyTagged()
static constexpr MemoryRepresentation UncompressedTaggedPointer()
static constexpr MemoryRepresentation TaggedSigned()
static constexpr MemoryRepresentation Uint16()
static constexpr MemoryRepresentation TaggedPointer()
static constexpr MemoryRepresentation UintPtr()
static constexpr MemoryRepresentation Uint8()
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation Word64()
static V< T > Cast(V< U > index)
Definition index.h:632
Handle< Code > code
#define V8_INFINITY
Definition globals.h:23
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
constexpr int32_t kMinusZeroLoBits
Definition globals.h:185
constexpr int64_t kMinusZeroBits
Definition globals.h:187
constexpr int32_t kMinusZeroHiBits
Definition globals.h:186
#define TURBOSHAFT_REDUCER_BOILERPLATE(Name)
Definition assembler.h:823
#define LABEL_BLOCK(label)
Definition assembler.h:910
int start
int end
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Isolate * isolate
Zone * graph_zone
JSHeapBroker * broker
int32_t offset
TNode< Object > receiver
std::map< const std::string, const std::string > map
double second
ZoneVector< RpoNumber > & result
Point from
Point to
int position
Definition liveedit.cc:290
constexpr int WhichPowerOfTwo(T value)
Definition bits.h:195
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
V8_EXPORT_PRIVATE bool ShouldSkipOptimizationStep()
Definition utils.h:84
template const Signature< wasm::ValueType > bool
TNode< Float64T > Float64Add(TNode< Float64T > a, TNode< Float64T > b)
static const Operator * IntPtrConstant(CommonOperatorBuilder *common, intptr_t value)
ref_traits< T >::ref_type MakeRef(JSHeapBroker *broker, Tagged< T > object)
constexpr int kMinInt
Definition globals.h:375
const uint32_t kStringEncodingMask
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
constexpr int kTaggedSize
Definition globals.h:542
constexpr double kMaxSafeInteger
Definition globals.h:1985
constexpr intptr_t kObjectAlignment
Definition globals.h:930
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
const uint32_t kTwoByteStringTag
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind, ElementsKind to_kind)
const uint32_t kUncachedExternalStringTag
bool IsRabGsabTypedArrayElementsKind(ElementsKind kind)
const uint32_t kUncachedExternalStringMask
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
const uint32_t kNotInternalizedTag
constexpr uint64_t kMaxSafeIntegerUint64
Definition globals.h:1983
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
@ SLOW_STRING_WRAPPER_ELEMENTS
@ FAST_STRING_WRAPPER_ELEMENTS
const uint32_t kStringTag
Flag flags[]
Definition flags.cc:3797
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
constexpr int kJSArgcReceiverSlots
Definition globals.h:2778
const uint32_t kOneByteStringTag
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr int kAdditiveSafeIntegerBitLength
Definition globals.h:1995
constexpr int ElementsKindToShiftSize(ElementsKind elements_kind)
constexpr int kTaggedSizeLog2
Definition globals.h:543
DONT_OVERRIDE DISABLE_ALLOCATION_SITES HOLEY_ELEMENTS
constexpr bool SmiValuesAre31Bits()
const uint32_t kStringRepresentationMask
DONT_OVERRIDE DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES HOLEY_DOUBLE_ELEMENTS
constexpr bool SmiValuesAre32Bits()
constexpr uint32_t kHoleNanUpper32
Definition globals.h:1952
const uint32_t kInternalizedTag
const uint32_t kIsNotInternalizedMask
return value
Definition map-inl.h:893
constexpr bool Is64()
constexpr int kDoubleSizeLog2
Definition globals.h:421
constexpr int kMaxInt
Definition globals.h:374
const uint32_t kIsNotStringMask
constexpr int64_t kMinAdditiveSafeInteger
Definition globals.h:1993
template const char * string
!IsContextMap !IsContextMap native_context
Definition map-inl.h:877
i::Address Load(i::Address address)
Definition unwinder.cc:19
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
static V8_EXPORT_PRIVATE bool IsSupported(Kind kind, FloatRepresentation rep)
static constexpr Kind Aligned(BaseTaggedness base_is_tagged)
static const TSCallDescriptor * Create(const CallDescriptor *descriptor, CanThrow can_throw, LazyDeoptOnThrow lazy_deopt_on_throw, Zone *graph_zone, const JSWasmCallParameters *js_wasm_call_parameters=nullptr)
#define OFFSET_OF_DATA_START(Type)
#define V8_LIKELY(condition)
Definition v8config.h:661
wasm::ValueType type