v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
machine-optimization-reducer.h
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_TURBOSHAFT_MACHINE_OPTIMIZATION_REDUCER_H_
6#define V8_COMPILER_TURBOSHAFT_MACHINE_OPTIMIZATION_REDUCER_H_
7
8#include <algorithm>
9#include <cmath>
10#include <cstring>
11#include <limits>
12#include <optional>
13#include <type_traits>
14
15#include "include/v8-internal.h"
16#include "src/base/bits.h"
18#include "src/base/hashing.h"
19#include "src/base/ieee754.h"
20#include "src/base/logging.h"
21#include "src/base/macros.h"
25#include "src/base/vector.h"
39#include "src/handles/handles.h"
41#include "src/numbers/ieee754.h"
42
43#if V8_ENABLE_WEBASSEMBLY
45#endif
46
48
49// ******************************** OVERVIEW ********************************
50//
51// The MachineOptimizationAssembler performs basic optimizations on low-level
52// operations that can be performed on-the-fly, without requiring type analysis
53// or analyzing uses. It largely corresponds to MachineOperatorReducer in
54// sea-of-nodes Turbofan.
55//
56// These peephole optimizations are typically very local: they based on the
57// immediate inputs of an operation, we try to constant-fold or strength-reduce
58// the operation.
59//
60// Typical examples include:
61//
62// * Reducing `a == a` to `1`
63//
64// * Reducing `a + 0` to `a`
65//
66// * Reducing `a * 2^k` to `a << k`
67//
68
70
71template <typename>
72class VariableReducer;
73template <typename>
75
76namespace detail {
77
78// Represents an operation of the form `(source & mask) == masked_value`.
79// where each bit set in masked_value also has to be set in mask.
82 uint32_t const mask;
83 uint32_t const masked_value;
85
86 BitfieldCheck(V<Word> source, uint32_t mask, uint32_t masked_value,
88 : source(source),
89 mask(mask),
93 }
94
95 static std::optional<BitfieldCheck> Detect(const OperationMatcher& matcher,
96 const Graph& graph,
97 V<Word> index) {
98 // There are two patterns to check for here:
99 // 1. Single-bit checks: `(val >> shift) & 1`, where:
100 // - the shift may be omitted, and/or
101 // - the result may be truncated from 64 to 32
102 // 2. Equality checks: `(val & mask) == expected`, where:
103 // - val may be truncated from 64 to 32 before masking (see
104 // ReduceWordEqualForConstantRhs)
105 const Operation& op = graph.Get(index);
106 if (const ComparisonOp* equal = op.TryCast<Opmask::kWord32Equal>()) {
107 if (const WordBinopOp* left_and =
108 graph.Get(equal->left<Word32>())
110 uint32_t mask;
111 uint32_t masked_value;
112 if (matcher.MatchIntegralWord32Constant(left_and->right(), &mask) &&
113 matcher.MatchIntegralWord32Constant(equal->right<Word32>(),
114 &masked_value)) {
115 if ((masked_value & ~mask) != 0) return std::nullopt;
116 if (const ChangeOp* truncate =
117 graph.Get(left_and->left())
119 return BitfieldCheck{truncate->input<Word64>(), mask, masked_value,
120 true};
121 } else {
122 return BitfieldCheck{left_and->left(), mask, masked_value, false};
123 }
124 }
125 }
126 } else if (const ChangeOp* truncate =
129 truncate->input<Word64>());
130 } else {
131 return TryDetectShiftAndMaskOneBit<Word32>(matcher, index);
132 }
133 return std::nullopt;
134 }
135
136 std::optional<BitfieldCheck> TryCombine(const BitfieldCheck& other) {
137 if (source != other.source ||
138 truncate_from_64_bit != other.truncate_from_64_bit) {
139 return std::nullopt;
140 }
141 uint32_t overlapping_bits = mask & other.mask;
142 // It would be kind of strange to have any overlapping bits, but they can be
143 // allowed as long as they don't require opposite values in the same
144 // positions.
145 if ((masked_value & overlapping_bits) !=
146 (other.masked_value & overlapping_bits)) {
147 return std::nullopt;
148 }
149 return BitfieldCheck{source, mask | other.mask,
150 masked_value | other.masked_value,
152 }
153
154 private:
155 template <typename WordType>
156 static std::optional<BitfieldCheck> TryDetectShiftAndMaskOneBit(
157 const OperationMatcher& matcher, V<Word> index) {
158 constexpr WordRepresentation Rep = V<WordType>::rep;
159 // Look for the pattern `(val >> shift) & 1`. The shift may be omitted.
161 uint64_t constant;
162 if (matcher.MatchBitwiseAndWithConstant(index, &value, &constant, Rep) &&
163 constant == 1) {
164 V<WordType> input;
165 if (int shift_amount;
166 matcher.MatchConstantRightShift(value, &input, Rep, &shift_amount) &&
167 shift_amount >= 0 && shift_amount < 32) {
168 uint32_t mask = 1 << shift_amount;
169 return BitfieldCheck{input, mask, mask,
171 }
172 return BitfieldCheck{value, 1, 1, Rep == WordRepresentation::Word64()};
173 }
174 return std::nullopt;
175 }
176};
177
178} // namespace detail
179
180template <class Next>
181class MachineOptimizationReducer : public Next {
182 public:
183 TURBOSHAFT_REDUCER_BOILERPLATE(MachineOptimization)
184#if defined(__clang__)
185 // TODO(dmercadier): this static_assert ensures that the stack contains a
186 // VariableReducer. It is currently not very clean, because when GraphVisitor
187 // is on the stack, it implicitly adds a VariableReducer that isn't detected
188 // by reducer_list_contains. It would be cleaner to have a single "reducer
189 // list contains VariableReducer" check that sees the VariableReducer
190 // introduced by GraphVisitor.
193#endif
194
195 // TODO(mslekova): Implement ReduceSelect and ReducePhi,
196 // by reducing `(f > 0) ? f : -f` to `fabs(f)`.
197
199 ChangeOp::Assumption assumption,
203 return Next::ReduceChange(input, kind, assumption, from, to);
204 }
205 using Kind = ChangeOp::Kind;
206 if (from == WordRepresentation::Word32()) {
208 }
209 if (uint64_t value;
210 from.IsWord() && matcher_.MatchIntegralWordConstant(
211 input, WordRepresentation(from), &value)) {
212 using Rep = RegisterRepresentation;
213 switch (multi(kind, from, to)) {
214 case multi(Kind::kSignExtend, Rep::Word32(), Rep::Word64()):
215 return __ Word64Constant(int64_t{static_cast<int32_t>(value)});
216 case multi(Kind::kZeroExtend, Rep::Word32(), Rep::Word64()):
217 case multi(Kind::kBitcast, Rep::Word32(), Rep::Word64()):
218 return __ Word64Constant(uint64_t{static_cast<uint32_t>(value)});
219 case multi(Kind::kBitcast, Rep::Word32(), Rep::Float32()):
220 return __ Float32Constant(
221 i::Float32::FromBits(static_cast<uint32_t>(value)));
222 case multi(Kind::kBitcast, Rep::Word64(), Rep::Float64()):
223 return __ Float64Constant(i::Float64::FromBits(value));
224 case multi(Kind::kSignedToFloat, Rep::Word32(), Rep::Float64()):
225 return __ Float64Constant(
226 static_cast<double>(static_cast<int32_t>(value)));
227 case multi(Kind::kSignedToFloat, Rep::Word64(), Rep::Float64()):
228 return __ Float64Constant(
229 static_cast<double>(static_cast<int64_t>(value)));
230 case multi(Kind::kUnsignedToFloat, Rep::Word32(), Rep::Float64()):
231 return __ Float64Constant(
232 static_cast<double>(static_cast<uint32_t>(value)));
233 case multi(Kind::kTruncate, Rep::Word64(), Rep::Word32()):
234 return __ Word32Constant(static_cast<uint32_t>(value));
235 default:
236 break;
237 }
238 }
239 if (i::Float32 value; from == RegisterRepresentation::Float32() &&
240 matcher_.MatchFloat32Constant(input, &value)) {
241 if (kind == Kind::kFloatConversion &&
243 return __ Float64Constant(value.get_scalar());
244 }
245 if (kind == Kind::kBitcast && to == WordRepresentation::Word32()) {
246 return __ Word32Constant(value.get_bits());
247 }
248 }
249 if (i::Float64 value; from == RegisterRepresentation::Float64() &&
250 matcher_.MatchFloat64Constant(input, &value)) {
251 if (kind == Kind::kFloatConversion &&
253 return __ Float32Constant(DoubleToFloat32_NoInline(value.get_scalar()));
254 }
255 if (kind == Kind::kBitcast && to == WordRepresentation::Word64()) {
256 return __ Word64Constant(base::bit_cast<uint64_t>(value));
257 }
258 if (kind == Kind::kSignedFloatTruncateOverflowToMin) {
259 double truncated = std::trunc(value.get_scalar());
260 if (to == WordRepresentation::Word64()) {
261 int64_t result = std::numeric_limits<int64_t>::min();
262 if (truncated >= std::numeric_limits<int64_t>::min() &&
263 truncated <= kMaxDoubleRepresentableInt64) {
264 result = static_cast<int64_t>(truncated);
265 }
266 return __ Word64Constant(result);
267 }
268 if (to == WordRepresentation::Word32()) {
269 int32_t result = std::numeric_limits<int32_t>::min();
270 if (truncated >= std::numeric_limits<int32_t>::min() &&
271 truncated <= std::numeric_limits<int32_t>::max()) {
272 result = static_cast<int32_t>(truncated);
273 }
274 return __ Word32Constant(result);
275 }
276 }
277 if (kind == Kind::kJSFloatTruncate &&
279 return __ Word32Constant(DoubleToInt32_NoInline(value.get_scalar()));
280 }
281 if (kind == Kind::kExtractHighHalf) {
283 return __ Word32Constant(static_cast<uint32_t>(value.get_bits() >> 32));
284 }
285 if (kind == Kind::kExtractLowHalf) {
287 return __ Word32Constant(static_cast<uint32_t>(value.get_bits()));
288 }
289 }
290 if (float value; from == RegisterRepresentation::Float32() &&
291 matcher_.MatchFloat32Constant(input, &value)) {
292 if (kind == Kind::kFloatConversion &&
294 return __ Float64Constant(value);
295 }
296 }
297
298 const Operation& input_op = matcher_.Get(input);
299 if (const ChangeOp* change_op = input_op.TryCast<ChangeOp>()) {
300 if (change_op->from == to && change_op->to == from &&
301 change_op->IsReversibleBy(kind, signalling_nan_possible)) {
302 return change_op->input();
303 }
304 }
305 return Next::ReduceChange(input, kind, assumption, from, to);
306 }
307
309 V<Word32> lo_word32) {
311 return Next::ReduceBitcastWord32PairToFloat64(hi_word32, lo_word32);
312 }
313 uint32_t lo, hi;
314 if (matcher_.MatchIntegralWord32Constant(hi_word32, &hi) &&
315 matcher_.MatchIntegralWord32Constant(lo_word32, &lo)) {
316 return __ Float64Constant(
317 base::bit_cast<double>(uint64_t{hi} << 32 | uint64_t{lo}));
318 }
319 return Next::ReduceBitcastWord32PairToFloat64(hi_word32, lo_word32);
320 }
321
326 return Next::ReduceTaggedBitcast(input, from, to, kind);
327 }
328 // A Tagged -> Untagged -> Tagged sequence can be short-cut.
329 // An Untagged -> Tagged -> Untagged sequence however cannot be removed,
330 // because the GC might have modified the pointer.
331 if (auto* input_bitcast = matcher_.TryCast<TaggedBitcastOp>(input)) {
332 if (all_of(input_bitcast->to, from) ==
334 all_of(input_bitcast->from, to) == RegisterRepresentation::Tagged()) {
335 return input_bitcast->input();
336 }
337 }
338 // An Untagged -> Smi -> Untagged sequence can be short-cut.
339 if (auto* input_bitcast = matcher_.TryCast<TaggedBitcastOp>(input);
340 input_bitcast && to.IsWord() &&
342 input_bitcast->kind == TaggedBitcastOp::Kind::kSmi)) {
343 if (input_bitcast->from == to) return input_bitcast->input();
344 if (input_bitcast->from == RegisterRepresentation::Word32()) {
346 return __ BitcastWord32ToWord64(input_bitcast->input());
347 }
348 DCHECK(input_bitcast->from == RegisterRepresentation::Word64() &&
350 return __ TruncateWord64ToWord32(input_bitcast->input());
351 }
352 // Try to constant-fold TaggedBitcast from Word Constant to Word.
353 if (to.IsWord()) {
354 if (const ConstantOp* cst = matcher_.TryCast<ConstantOp>(input)) {
355 if (cst->kind == ConstantOp::Kind::kWord32 ||
356 cst->kind == ConstantOp::Kind::kWord64) {
357 if (to == RegisterRepresentation::Word64()) {
358 return __ Word64Constant(cst->integral());
359 } else {
361 return __ Word32Constant(static_cast<uint32_t>(cst->integral()));
362 }
363 }
364 }
365 }
366 if (const ConstantOp* cst = matcher_.TryCast<ConstantOp>(input)) {
367 // Try to constant-fold Word constant -> Tagged (Smi).
368 if (cst->IsIntegral() && to == RegisterRepresentation::Tagged()) {
369 if (Smi::IsValid(cst->integral())) {
370 return __ SmiConstant(
371 i::Tagged<Smi>(static_cast<intptr_t>(cst->integral())));
372 }
373 }
374 // Try to constant-fold Smi -> Untagged.
375 if (cst->kind == ConstantOp::Kind::kSmi) {
376 if (to == RegisterRepresentation::Word32()) {
377 return __ Word32Constant(static_cast<uint32_t>(cst->smi().ptr()));
378 } else if (to == RegisterRepresentation::Word64()) {
379 return __ Word64Constant(static_cast<uint64_t>(cst->smi().ptr()));
380 }
381 }
382 }
383 return Next::ReduceTaggedBitcast(input, from, to, kind);
384 }
385
389 return Next::ReduceFloatUnary(input, kind, rep);
390 }
391 if (float f32_k; rep == FloatRepresentation::Float32() &&
392 matcher_.MatchFloat32Constant(input, &f32_k)) {
393 if (std::isnan(f32_k) && !signalling_nan_possible) {
394 return __ Float32Constant(std::numeric_limits<float>::quiet_NaN());
395 }
396 switch (kind) {
398 return __ Float32Constant(std::abs(f32_k));
400 return __ Float32Constant(-f32_k);
402 DCHECK(!std::isnan(f32_k));
403 return __ Float32Constant(f32_k);
405 return __ Float32Constant(std::floor(f32_k));
407 return __ Float32Constant(std::ceil(f32_k));
409 return __ Float32Constant(std::trunc(f32_k));
411 DCHECK_EQ(std::nearbyint(1.5), 2);
412 DCHECK_EQ(std::nearbyint(2.5), 2);
413 return __ Float32Constant(std::nearbyint(f32_k));
415 return __ Float32Constant(base::ieee754::log(f32_k));
417 return __ Float32Constant(std::sqrt(f32_k));
419 return __ Float32Constant(base::ieee754::exp(f32_k));
421 return __ Float32Constant(base::ieee754::expm1(f32_k));
423 return __ Float32Constant(SIN_IMPL(f32_k));
425 return __ Float32Constant(COS_IMPL(f32_k));
427 return __ Float32Constant(base::ieee754::sinh(f32_k));
429 return __ Float32Constant(base::ieee754::cosh(f32_k));
431 return __ Float32Constant(base::ieee754::acos(f32_k));
433 return __ Float32Constant(base::ieee754::asin(f32_k));
435 return __ Float32Constant(base::ieee754::asinh(f32_k));
437 return __ Float32Constant(base::ieee754::acosh(f32_k));
439 return __ Float32Constant(base::ieee754::tan(f32_k));
441 return __ Float32Constant(base::ieee754::tanh(f32_k));
443 return __ Float32Constant(base::ieee754::log2(f32_k));
445 return __ Float32Constant(base::ieee754::log10(f32_k));
447 return __ Float32Constant(base::ieee754::log1p(f32_k));
449 return __ Float32Constant(base::ieee754::cbrt(f32_k));
451 return __ Float32Constant(base::ieee754::atan(f32_k));
453 return __ Float32Constant(base::ieee754::atanh(f32_k));
454 }
455 } else if (double f64_k; rep == FloatRepresentation::Float64() &&
456 matcher_.MatchFloat64Constant(input, &f64_k)) {
457 if (std::isnan(f64_k) && !signalling_nan_possible) {
458 return __ Float64Constant(std::numeric_limits<double>::quiet_NaN());
459 }
460 switch (kind) {
462 return __ Float64Constant(std::abs(f64_k));
464 return __ Float64Constant(-f64_k);
466 DCHECK(!std::isnan(f64_k));
467 return __ Float64Constant(f64_k);
469 return __ Float64Constant(std::floor(f64_k));
471 return __ Float64Constant(std::ceil(f64_k));
473 return __ Float64Constant(std::trunc(f64_k));
475 DCHECK_EQ(std::nearbyint(1.5), 2);
476 DCHECK_EQ(std::nearbyint(2.5), 2);
477 return __ Float64Constant(std::nearbyint(f64_k));
479 return __ Float64Constant(base::ieee754::log(f64_k));
481 return __ Float64Constant(std::sqrt(f64_k));
483 return __ Float64Constant(base::ieee754::exp(f64_k));
485 return __ Float64Constant(base::ieee754::expm1(f64_k));
487 return __ Float64Constant(SIN_IMPL(f64_k));
489 return __ Float64Constant(COS_IMPL(f64_k));
491 return __ Float64Constant(base::ieee754::sinh(f64_k));
493 return __ Float64Constant(base::ieee754::cosh(f64_k));
495 return __ Float64Constant(base::ieee754::acos(f64_k));
497 return __ Float64Constant(base::ieee754::asin(f64_k));
499 return __ Float64Constant(base::ieee754::asinh(f64_k));
501 return __ Float64Constant(base::ieee754::acosh(f64_k));
503 return __ Float64Constant(base::ieee754::tan(f64_k));
505 return __ Float64Constant(base::ieee754::tanh(f64_k));
507 return __ Float64Constant(base::ieee754::log2(f64_k));
509 return __ Float64Constant(base::ieee754::log10(f64_k));
511 return __ Float64Constant(base::ieee754::log1p(f64_k));
513 return __ Float64Constant(base::ieee754::cbrt(f64_k));
515 return __ Float64Constant(base::ieee754::atan(f64_k));
517 return __ Float64Constant(base::ieee754::atanh(f64_k));
518 }
519 }
520 return Next::ReduceFloatUnary(input, kind, rep);
521 }
522
524 WordRepresentation rep) {
526 return Next::ReduceWordUnary(input, kind, rep);
527 }
528 if (rep == WordRepresentation::Word32()) {
530 }
531 if (uint32_t w32_k; rep == WordRepresentation::Word32() &&
532 matcher_.MatchIntegralWord32Constant(input, &w32_k)) {
533 switch (kind) {
535 return __ Word32Constant(base::bits::ReverseBytes(w32_k));
537 return __ Word32Constant(base::bits::CountLeadingZeros(w32_k));
539 return __ Word32Constant(base::bits::CountTrailingZeros(w32_k));
541 return __ Word32Constant(base::bits::CountPopulation(w32_k));
543 return __ Word32Constant(int32_t{static_cast<int8_t>(w32_k)});
545 return __ Word32Constant(int32_t{static_cast<int16_t>(w32_k)});
546 }
547 } else if (uint64_t w64_k;
549 matcher_.MatchIntegralWord64Constant(input, &w64_k)) {
550 switch (kind) {
552 return __ Word64Constant(base::bits::ReverseBytes(w64_k));
554 return __ Word64Constant(
555 uint64_t{base::bits::CountLeadingZeros(w64_k)});
557 return __ Word64Constant(
558 uint64_t{base::bits::CountTrailingZeros(w64_k)});
560 return __ Word64Constant(
561 uint64_t{base::bits::CountPopulation(w64_k)});
563 return __ Word64Constant(int64_t{static_cast<int8_t>(w64_k)});
565 return __ Word64Constant(int64_t{static_cast<int16_t>(w64_k)});
566 }
567 }
568 return Next::ReduceWordUnary(input, kind, rep);
569 }
570
575 return Next::ReduceFloatBinop(lhs, rhs, kind, rep);
576 }
577
578 using Kind = FloatBinopOp::Kind;
579
580 // Place constant on the right for commutative operators.
582 !matcher_.Is<ConstantOp>(rhs)) {
583 return ReduceFloatBinop(rhs, lhs, kind, rep);
584 }
585
586 // constant folding
587 if (float k1, k2; rep == FloatRepresentation::Float32() &&
588 matcher_.MatchFloat32Constant(lhs, &k1) &&
589 matcher_.MatchFloat32Constant(rhs, &k2)) {
590 switch (kind) {
591 case Kind::kAdd:
592 return __ Float32Constant(k1 + k2);
593 case Kind::kMul:
594 return __ Float32Constant(k1 * k2);
595 case Kind::kSub:
596 return __ Float32Constant(k1 - k2);
597 case Kind::kMin:
598 return __ Float32Constant(JSMin(k1, k2));
599 case Kind::kMax:
600 return __ Float32Constant(JSMax(k1, k2));
601 case Kind::kDiv:
602 return __ Float32Constant(k1 / k2);
603 case Kind::kPower:
604 return __ Float32Constant(internal::math::pow(k1, k2));
605 case Kind::kAtan2:
606 return __ Float32Constant(base::ieee754::atan2(k1, k2));
607 case Kind::kMod:
608 UNREACHABLE();
609 }
610 }
611 if (double k1, k2; rep == FloatRepresentation::Float64() &&
612 matcher_.MatchFloat64Constant(lhs, &k1) &&
613 matcher_.MatchFloat64Constant(rhs, &k2)) {
614 switch (kind) {
615 case Kind::kAdd:
616 return __ Float64Constant(k1 + k2);
617 case Kind::kMul:
618 return __ Float64Constant(k1 * k2);
619 case Kind::kSub:
620 return __ Float64Constant(k1 - k2);
621 case Kind::kMin:
622 return __ Float64Constant(JSMin(k1, k2));
623 case Kind::kMax:
624 return __ Float64Constant(JSMax(k1, k2));
625 case Kind::kDiv:
626 return __ Float64Constant(k1 / k2);
627 case Kind::kMod:
628 return __ Float64Constant(Modulo(k1, k2));
629 case Kind::kPower:
630 return __ Float64Constant(math::pow(k1, k2));
631 case Kind::kAtan2:
632 return __ Float64Constant(base::ieee754::atan2(k1, k2));
633 }
634 }
635
636 // lhs <op> NaN => NaN
637 if (matcher_.MatchNaN(rhs) ||
638 (matcher_.MatchNaN(lhs) && kind != Kind::kPower)) {
639 // Return a quiet NaN since Wasm operations could have signalling NaN as
640 // input but not as output.
641 return __ FloatConstant(std::numeric_limits<double>::quiet_NaN(), rep);
642 }
643
644 if (matcher_.Is<ConstantOp>(rhs)) {
645 if (kind == Kind::kMul) {
646 // lhs * 1 => lhs
647 if (!signalling_nan_possible && matcher_.MatchFloat(rhs, 1.0)) {
648 return lhs;
649 }
650 // lhs * 2 => lhs + lhs
651 if (matcher_.MatchFloat(rhs, 2.0)) {
652 return __ FloatAdd(lhs, lhs, rep);
653 }
654 // lhs * -1 => -lhs
655 if (!signalling_nan_possible && matcher_.MatchFloat(rhs, -1.0)) {
656 return __ FloatNegate(lhs, rep);
657 }
658 }
659
660 if (kind == Kind::kDiv) {
661 // lhs / 1 => lhs
662 if (!signalling_nan_possible && matcher_.MatchFloat(rhs, 1.0)) {
663 return lhs;
664 }
665 // lhs / -1 => -lhs
666 if (!signalling_nan_possible && matcher_.MatchFloat(rhs, -1.0)) {
667 return __ FloatNegate(lhs, rep);
668 }
669 // All reciprocals of non-denormal powers of two can be represented
670 // exactly, so division by power of two can be reduced to
671 // multiplication by reciprocal, with the same result.
672 // x / k => x * (1 / k)
673 if (rep == FloatRepresentation::Float32()) {
674 if (float k;
675 matcher_.MatchFloat32Constant(rhs, &k) && std::isnormal(k) &&
676 k != 0 && std::isfinite(k) &&
677 base::bits::IsPowerOfTwo(base::Double(k).Significand())) {
678 return __ FloatMul(lhs, __ FloatConstant(1.0 / k, rep), rep);
679 }
680 } else {
682 if (double k;
683 matcher_.MatchFloat64Constant(rhs, &k) && std::isnormal(k) &&
684 k != 0 && std::isfinite(k) &&
685 base::bits::IsPowerOfTwo(base::Double(k).Significand())) {
686 return __ FloatMul(lhs, __ FloatConstant(1.0 / k, rep), rep);
687 }
688 }
689 }
690
691 if (kind == Kind::kMod) {
692 // x % 0 => NaN
693 if (matcher_.MatchFloat(rhs, 0.0)) {
694 return __ FloatConstant(std::numeric_limits<double>::quiet_NaN(),
695 rep);
696 }
697 }
698
699 if (kind == Kind::kSub) {
700 // lhs - +0.0 => lhs
701 if (!signalling_nan_possible && matcher_.MatchFloat(rhs, +0.0)) {
702 return lhs;
703 }
704 }
705
706 if (kind == Kind::kPower) {
707 if (matcher_.MatchFloat(rhs, 0.0) || matcher_.MatchFloat(rhs, -0.0)) {
708 // lhs ** 0 ==> 1
709 return __ FloatConstant(1.0, rep);
710 }
711 if (matcher_.MatchFloat(rhs, 2.0)) {
712 // lhs ** 2 ==> lhs * lhs
713 return __ FloatMul(lhs, lhs, rep);
714 }
715 if (matcher_.MatchFloat(rhs, 0.5)) {
716 // lhs ** 0.5 ==> sqrt(lhs)
717 // (unless if lhs is -infinity)
718 Variable result = __ NewLoopInvariantVariable(rep);
719 IF (UNLIKELY(__ FloatLessThanOrEqual(
720 lhs, __ FloatConstant(-V8_INFINITY, rep), rep))) {
721 __ SetVariable(result, __ FloatConstant(V8_INFINITY, rep));
722 } ELSE {
723 __ SetVariable(
724 result,
725 __ FloatSqrt(__ FloatAdd(lhs, __ FloatConstant(0, rep), rep),
726 rep));
727 }
728
729 return __ GetVariable(result);
730 }
731 }
732 }
733
734 if (!signalling_nan_possible && kind == Kind::kSub &&
735 matcher_.MatchFloat(lhs, -0.0)) {
736 // -0.0 - round_down(-0.0 - y) => round_up(y)
737 if (V<Float> a, b, c;
739 matcher_.MatchFloatRoundDown(rhs, &a, rep) &&
740 matcher_.MatchFloatSub(a, &b, &c, rep) &&
741 matcher_.MatchFloat(b, -0.0)) {
742 return __ FloatRoundUp(c, rep);
743 }
744 // -0.0 - rhs => -rhs
745 return __ FloatNegate(rhs, rep);
746 }
747
748 return Next::ReduceFloatBinop(lhs, rhs, kind, rep);
749 }
750
752 WordRepresentation rep) {
754 return Next::ReduceWordBinop(left, right, kind, rep);
755 }
756
757 using Kind = WordBinopOp::Kind;
758
761 bool is_64 = rep == WordRepresentation::Word64();
762
763 if (!is_64) {
766 }
767
768 // Place constant on the right for commutative operators.
770 !matcher_.Is<ConstantOp>(right)) {
771 return ReduceWordBinop(right, left, kind, rep);
772 }
773 // constant folding
774 if (uint64_t k1, k2; matcher_.MatchIntegralWordConstant(left, rep, &k1) &&
775 matcher_.MatchIntegralWordConstant(right, rep, &k2)) {
776 switch (kind) {
777 case Kind::kAdd:
778 return __ WordConstant(k1 + k2, rep);
779 case Kind::kMul:
780 return __ WordConstant(k1 * k2, rep);
781 case Kind::kBitwiseAnd:
782 return __ WordConstant(k1 & k2, rep);
783 case Kind::kBitwiseOr:
784 return __ WordConstant(k1 | k2, rep);
785 case Kind::kBitwiseXor:
786 return __ WordConstant(k1 ^ k2, rep);
787 case Kind::kSub:
788 return __ WordConstant(k1 - k2, rep);
789 case Kind::kSignedMulOverflownBits:
790 return __ WordConstant(
791 is_64 ? base::bits::SignedMulHigh64(static_cast<int64_t>(k1),
792 static_cast<int64_t>(k2))
793 : base::bits::SignedMulHigh32(static_cast<int32_t>(k1),
794 static_cast<int32_t>(k2)),
795 rep);
796 case Kind::kUnsignedMulOverflownBits:
797 return __ WordConstant(
798 is_64 ? base::bits::UnsignedMulHigh64(k1, k2)
799 : base::bits::UnsignedMulHigh32(static_cast<uint32_t>(k1),
800 static_cast<uint32_t>(k2)),
801 rep);
802 case Kind::kSignedDiv:
803 return __ WordConstant(
804 is_64 ? base::bits::SignedDiv64(k1, k2)
805 : base::bits::SignedDiv32(static_cast<int32_t>(k1),
806 static_cast<int32_t>(k2)),
807 rep);
808 case Kind::kUnsignedDiv:
809 return __ WordConstant(
810 is_64 ? base::bits::UnsignedDiv64(k1, k2)
811 : base::bits::UnsignedDiv32(static_cast<uint32_t>(k1),
812 static_cast<uint32_t>(k2)),
813 rep);
814 case Kind::kSignedMod:
815 return __ WordConstant(
816 is_64 ? base::bits::SignedMod64(k1, k2)
817 : base::bits::SignedMod32(static_cast<int32_t>(k1),
818 static_cast<int32_t>(k2)),
819 rep);
820 case Kind::kUnsignedMod:
821 return __ WordConstant(
822 is_64 ? base::bits::UnsignedMod64(k1, k2)
823 : base::bits::UnsignedMod32(static_cast<uint32_t>(k1),
824 static_cast<uint32_t>(k2)),
825 rep);
826 }
827 }
828
831 if (auto right_bitfield = detail::BitfieldCheck::Detect(
832 matcher_, __ output_graph(), right)) {
833 if (auto left_bitfield = detail::BitfieldCheck::Detect(
834 matcher_, __ output_graph(), left)) {
835 if (auto combined_bitfield =
836 left_bitfield->TryCombine(*right_bitfield)) {
837 V<Word> source = combined_bitfield->source;
838 if (combined_bitfield->truncate_from_64_bit) {
839 source = __ TruncateWord64ToWord32(V<Word64>::Cast(source));
840 }
841 return __ Word32Equal(__ Word32BitwiseAnd(V<Word32>::Cast(source),
842 combined_bitfield->mask),
843 combined_bitfield->masked_value);
844 }
845 }
846 }
847 }
848
849 if (uint64_t right_value;
850 matcher_.MatchIntegralWordConstant(right, rep, &right_value)) {
851 // TODO(jkummerow): computing {right_value_signed} could probably be
852 // handled by the 4th argument to {MatchIntegralWordConstant}.
853 int64_t right_value_signed =
854 is_64 ? static_cast<int64_t>(right_value)
855 : int64_t{static_cast<int32_t>(right_value)};
856 // (a <op> k1) <op> k2 => a <op> (k1 <op> k2)
857 if (V<Word> a, k1;
859 matcher_.MatchWordBinop<Word>(left, &a, &k1, kind, rep) &&
860 matcher_.Is<ConstantOp>(k1)) {
861 V<Word> k2 = right;
862 // This optimization allows to do constant folding of `k1` and `k2`.
863 // However, if (a <op> k1) has to be calculated anyways, then constant
864 // folding does not save any calculations during runtime, and it may
865 // increase register pressure because it extends the lifetime of `a`.
866 // Therefore we do the optimization only when `left = (a <op k1)` has no
867 // other uses.
868 if (matcher_.Get(left).saturated_use_count.IsZero()) {
869 return ReduceWordBinop(a, ReduceWordBinop(k1, k2, kind, rep), kind,
870 rep);
871 }
872 }
873 switch (kind) {
874 case Kind::kSub:
875 // left - k => left + -k
876 return ReduceWordBinop(left, __ WordConstant(-right_value, rep),
877 Kind::kAdd, rep);
878 case Kind::kAdd:
879 // left + 0 => left
880 if (right_value == 0) {
881 return left;
882 }
883 break;
884 case Kind::kBitwiseXor:
885 // left ^ 0 => left
886 if (right_value == 0) {
887 return left;
888 }
889 // left ^ 1 => left == 0 if left is 0 or 1
890 if (right_value == 1 && IsBit(left)) {
891 return __ Word32Equal(V<Word32>::Cast(left), 0);
892 }
893 // (x ^ -1) ^ -1 => x
894 {
895 V<Word> x, y;
896 int64_t k;
897 if (right_value_signed == -1 &&
898 matcher_.MatchBitwiseAnd(left, &x, &y, rep) &&
899 matcher_.MatchIntegralWordConstant(y, rep, &k) && k == -1) {
900 return x;
901 }
902 }
903 break;
904 case Kind::kBitwiseOr:
905 // left | 0 => left
906 if (right_value == 0) {
907 return left;
908 }
909 // left | -1 => -1
910 if (right_value_signed == -1) {
911 return right;
912 }
913 // (x & K1) | K2 => x | K2 if K2 has ones for every zero bit in K1.
914 // This case can be constructed by UpdateWord and UpdateWord32 in CSA.
915 {
916 V<Word> x, y;
917 uint64_t k1;
918 uint64_t k2 = right_value;
919 if (matcher_.MatchBitwiseAnd(left, &x, &y, rep) &&
920 matcher_.MatchIntegralWordConstant(y, rep, &k1) &&
921 (k1 | k2) == rep.MaxUnsignedValue()) {
922 return __ WordBitwiseOr(x, right, rep);
923 }
924 }
925 break;
926 case Kind::kMul:
927 // left * 0 => 0
928 if (right_value == 0) {
929 return __ WordConstant(0, rep);
930 }
931 // left * 1 => left
932 if (right_value == 1) {
933 return left;
934 }
935 // left * -1 => 0 - left
936 if (right_value_signed == -1) {
937 return __ WordSub(__ WordConstant(0, rep), left, rep);
938 }
939 // left * 2^k => left << k
940 if (base::bits::IsPowerOfTwo(right_value)) {
941 return __ ShiftLeft(left, base::bits::WhichPowerOfTwo(right_value),
942 rep);
943 }
944 break;
945 case Kind::kBitwiseAnd:
946 // left & -1 => left
947 if (right_value_signed == -1) {
948 return left;
949 }
950 // x & 0 => 0
951 if (right_value == 0) {
952 return right;
953 }
954
955 if (right_value == 1) {
956 // (x + x) & 1 => 0
957 V<Word> left_ignore_extensions =
960 : left;
961 if (V<Word> a, b;
962 matcher_.MatchWordAdd(left_ignore_extensions, &a, &b,
964 a == b) {
965 return __ WordConstant(0, rep);
966 }
967
968 // CMP & 1 => CMP
969 if (IsBit(left_ignore_extensions)) {
970 return left;
971 }
972
973 static_assert(kSmiTagMask == 1);
974 // HeapObject & 1 => 1 ("& 1" is a Smi-check)
975 if (TryMatchHeapObject(left)) {
976 return __ WordConstant(1, rep);
977 } else if (__ Get(left).template Is<Opmask::kSmiConstant>()) {
978 return __ WordConstant(0, rep);
979 }
980 }
981
982 // asm.js often benefits from these transformations, to optimize out
983 // unnecessary memory access alignment masks. Conventions used in
984 // the comments below:
985 // x, y: arbitrary values
986 // K, L, M: arbitrary constants
987 // (-1 << K) == mask: the right-hand side of the bitwise AND.
988 if (IsNegativePowerOfTwo(right_value_signed)) {
989 uint64_t mask = right_value;
991 V<Word> x, y;
992 {
993 int L;
994 // (x << L) & (-1 << K)
995 // => x << L iff L >= K
996 if (matcher_.MatchConstantLeftShift(left, &x, rep, &L) &&
997 L >= K) {
998 return left;
999 }
1000 }
1001
1002 if (matcher_.MatchWordAdd(left, &x, &y, rep)) {
1003 uint64_t L; // L == (M << K) iff (L & mask) == L.
1004
1005 // (x + (M << K)) & (-1 << K)
1006 // => (x & (-1 << K)) + (M << K)
1007 if (matcher_.MatchIntegralWordConstant(y, rep, &L) &&
1008 (L & mask) == L) {
1009 return __ WordAdd(__ WordBitwiseAnd(x, right, rep),
1010 __ WordConstant(L, rep), rep);
1011 }
1012
1013 // (x1 * (M << K) + y) & (-1 << K)
1014 // => x1 * (M << K) + (y & (-1 << K))
1015 V<Word> x1, x2, y1, y2;
1016 if (matcher_.MatchWordMul(x, &x1, &x2, rep) &&
1017 matcher_.MatchIntegralWordConstant(x2, rep, &L) &&
1018 (L & mask) == L) {
1019 return __ WordAdd(x, __ WordBitwiseAnd(y, right, rep), rep);
1020 }
1021 // Same as above with swapped order:
1022 // (x + y1 * (M << K)) & (-1 << K)
1023 // => (x & (-1 << K)) + y1 * (M << K)
1024 if (matcher_.MatchWordMul(y, &y1, &y2, rep) &&
1025 matcher_.MatchIntegralWordConstant(y2, rep, &L) &&
1026 (L & mask) == L) {
1027 return __ WordAdd(__ WordBitwiseAnd(x, right, rep), y, rep);
1028 }
1029
1030 // ((x1 << K) + y) & (-1 << K)
1031 // => (x1 << K) + (y & (-1 << K))
1032 int K2;
1033 if (matcher_.MatchConstantLeftShift(x, &x1, rep, &K2) &&
1034 K2 == K) {
1035 return __ WordAdd(x, __ WordBitwiseAnd(y, right, rep), rep);
1036 }
1037 // Same as above with swapped order:
1038 // (x + (y1 << K)) & (-1 << K)
1039 // => (x & (-1 << K)) + (y1 << K)
1040 if (matcher_.MatchConstantLeftShift(y, &y1, rep, &K2) &&
1041 K2 == K) {
1042 return __ WordAdd(__ WordBitwiseAnd(x, right, rep), y, rep);
1043 }
1044 } else if (matcher_.MatchWordMul(left, &x, &y, rep)) {
1045 // (x * (M << K)) & (-1 << K) => x * (M << K)
1046 uint64_t L; // L == (M << K) iff (L & mask) == L.
1047 if (matcher_.MatchIntegralWordConstant(y, rep, &L) &&
1048 (L & mask) == L) {
1049 return left;
1050 }
1051 }
1052 }
1053 break;
1055 return ReduceSignedDiv(left, right_value_signed, rep);
1057 return ReduceUnsignedDiv(left, right_value, rep);
1059 // left % 0 => 0
1060 // left % 1 => 0
1061 // left % -1 => 0
1062 if (right_value_signed == any_of(0, 1, -1)) {
1063 return __ WordConstant(0, rep);
1064 }
1065 if (right_value_signed != rep.MinSignedValue()) {
1066 right_value_signed = Abs(right_value_signed);
1067 }
1068 // left % 2^n => ((left + m) & (2^n - 1)) - m
1069 // where m = (left >> bits-1) >>> bits-n
1070 // This is a branch-free version of the following:
1071 // left >= 0 ? left & (2^n - 1)
1072 // : ((left + (2^n - 1)) & (2^n - 1)) - (2^n - 1)
1073 // Adding and subtracting (2^n - 1) before and after the bitwise-and
1074 // keeps the result congruent modulo 2^n, but shifts the resulting
1075 // value range to become -(2^n - 1) ... 0.
1076 if (base::bits::IsPowerOfTwo(right_value_signed)) {
1077 uint32_t bits = rep.bit_width();
1078 uint32_t n = base::bits::WhichPowerOfTwo(right_value_signed);
1079 V<Word> m = __ ShiftRightLogical(
1080 __ ShiftRightArithmetic(left, bits - 1, rep), bits - n, rep);
1081 return __ WordSub(
1082 __ WordBitwiseAnd(__ WordAdd(left, m, rep),
1083 __ WordConstant(right_value_signed - 1, rep),
1084 rep),
1085 m, rep);
1086 }
1087 // The `IntDiv` with a constant right-hand side will be turned into a
1088 // multiplication, avoiding the expensive integer division.
1089 return __ WordSub(
1090 left, __ WordMul(__ IntDiv(left, right, rep), right, rep), rep);
1092 // left % 0 => 0
1093 // left % 1 => 0
1094 if (right_value == 0 || right_value == 1) {
1095 return __ WordConstant(0, rep);
1096 }
1097 // x % 2^n => x & (2^n - 1)
1098 if (base::bits::IsPowerOfTwo(right_value)) {
1099 return __ WordBitwiseAnd(
1100 left, __ WordConstant(right_value - 1, rep), rep);
1101 }
1102 // The `UintDiv` with a constant right-hand side will be turned into a
1103 // multiplication, avoiding the expensive integer division.
1104 return __ WordSub(
1105 left, __ WordMul(right, __ UintDiv(left, right, rep), rep), rep);
1108 break;
1109 }
1110 }
1111
1112 if (kind == Kind::kAdd) {
1113 V<Word> x, y, zero;
1114 // (0 - x) + y => y - x
1115 if (matcher_.MatchWordSub(left, &zero, &x, rep) &&
1116 matcher_.MatchZero(zero)) {
1117 y = right;
1118 return __ WordSub(y, x, rep);
1119 }
1120 // x + (0 - y) => x - y
1121 if (matcher_.MatchWordSub(right, &zero, &y, rep) &&
1122 matcher_.MatchZero(zero)) {
1123 x = left;
1124 return __ WordSub(x, y, rep);
1125 }
1126 }
1127
1128 // 0 / right => 0
1129 // 0 % right => 0
1130 if (matcher_.MatchZero(left) &&
1131 kind == any_of(Kind::kSignedDiv, Kind::kUnsignedDiv, Kind::kUnsignedMod,
1132 Kind::kSignedMod)) {
1133 return __ WordConstant(0, rep);
1134 }
1135
1136 if (left == right) {
1137 V<Word> x = left;
1138 switch (kind) {
1139 // x & x => x
1140 // x | x => x
1143 return x;
1144 // x ^ x => 0
1145 // x - x => 0
1146 // x % x => 0
1151 return __ WordConstant(0, rep);
1152 // x / x => x != 0
1155 V<Word> zero = __ WordConstant(0, rep);
1156 V<Word32> result = __ Word32Equal(__ Equal(left, zero, rep), 0);
1157 return __ ZeroExtendWord32ToRep(result, rep);
1158 }
1163 break;
1164 }
1165 }
1166
1167 if (std::optional<V<Word>> ror = TryReduceToRor(left, right, kind, rep)) {
1168 return *ror;
1169 }
1170
1171 return Next::ReduceWordBinop(left, right, kind, rep);
1172 }
1173
1174 bool TryMatchHeapObject(V<Any> idx, int depth = 0) {
1175 constexpr int kMaxDepth = 2;
1176 if (depth == kMaxDepth) return false;
1177
1178 if (matcher_.MatchHeapConstant(idx)) return true;
1179 if (matcher_.Is<AllocateOp>(idx)) return true;
1180 if (matcher_.Is<Opmask::kTaggedBitcastHeapObject>(idx)) return true;
1181
1182 // A Phi whose inputs are all HeapObject is itself a HeapObject.
1183 if (const PhiOp* phi = matcher_.TryCast<Opmask::kTaggedPhi>(idx)) {
1184 return base::all_of(phi->inputs(), [depth, this](V<Any> input) {
1185 return TryMatchHeapObject(input, depth + 1);
1186 });
1187 }
1188
1189 // For anything else, assume that it's not a heap object.
1190 return false;
1191 }
1192
1193 std::optional<V<Word>> TryReduceToRor(V<Word> left, V<Word> right,
1195 WordRepresentation rep) {
1196 // Recognize rotation, we are matcher_.Matching and transforming as follows
1197 // (assuming kWord32, kWord64 is handled correspondingly):
1198 // x << y | x >>> (32 - y) => x ror (32 - y)
1199 // x << (32 - y) | x >>> y => x ror y
1200 // x << y ^ x >>> (32 - y) => x ror (32 - y) if 1 <= y < 32
1201 // x << (32 - y) ^ x >>> y => x ror y if 1 <= y < 32
1202 // (As well as the commuted forms.)
1203 // Note the side condition for XOR: the optimization doesn't hold for
1204 // an effective rotation amount of 0.
1205
1208 return {};
1209 }
1210
1211 const ShiftOp* high = matcher_.TryCast<ShiftOp>(left);
1212 if (!high) return {};
1213 const ShiftOp* low = matcher_.TryCast<ShiftOp>(right);
1214 if (!low) return {};
1215
1216 if (low->kind == ShiftOp::Kind::kShiftLeft) {
1217 std::swap(low, high);
1218 }
1219 if (high->kind != ShiftOp::Kind::kShiftLeft ||
1220 low->kind != ShiftOp::Kind::kShiftRightLogical) {
1221 return {};
1222 }
1223 V<Word> x = high->left();
1224 if (low->left() != x) return {};
1225
1226 if (uint64_t k1, k2;
1227 matcher_.MatchIntegralWordConstant(high->right(), rep, &k1) &&
1228 matcher_.MatchIntegralWordConstant(low->right(), rep, &k2) &&
1229 k1 + k2 == rep.bit_width() && k1 >= 0 && k2 >= 0) {
1230 if (k1 == 0 || k2 == 0) {
1232 return __ WordConstant(0, rep);
1233 } else {
1235 return x;
1236 }
1237 }
1238 return __ RotateRight(x, low->right(), rep);
1239 }
1240
1242 // Can't guarantee that rotation amount is not 0.
1243 return {};
1244 }
1246
1247 V<Word> a, b;
1248 uint64_t k;
1249 if (matcher_.MatchWordSub(high->right(), &a, &b, rep) &&
1250 matcher_.MatchIntegralWordConstant(a, rep, &k) && b == low->right() &&
1251 k == rep.bit_width()) {
1252 return __ RotateRight(x, b, rep);
1253 } else if (matcher_.MatchWordSub(low->right(), &a, &b, rep) &&
1254 matcher_.MatchIntegralWordConstant(a, rep, &k) &&
1255 b == high->right() && k == rep.bit_width()) {
1256 return __ RotateRight(x, low->right(), rep);
1257 }
1258
1259 return {};
1260 }
1261
1264 WordRepresentation rep) {
1266 return Next::ReduceOverflowCheckedBinop(left, right, kind, rep);
1267 }
1268 using Kind = OverflowCheckedBinopOp::Kind;
1270 matcher_.Is<ConstantOp>(left) && !matcher_.Is<ConstantOp>(right)) {
1271 return ReduceOverflowCheckedBinop(right, left, kind, rep);
1272 }
1273 if (rep == WordRepresentation::Word32()) {
1275 right = TryRemoveWord32ToWord64Conversion(right);
1276 }
1277 // constant folding
1278 if (rep == WordRepresentation::Word32()) {
1279 if (int32_t k1, k2; matcher_.MatchIntegralWord32Constant(left, &k1) &&
1280 matcher_.MatchIntegralWord32Constant(right, &k2)) {
1281 bool overflow;
1282 int32_t res;
1283 switch (kind) {
1285 overflow = base::bits::SignedAddOverflow32(k1, k2, &res);
1286 break;
1288 overflow = base::bits::SignedMulOverflow32(k1, k2, &res);
1289 break;
1291 overflow = base::bits::SignedSubOverflow32(k1, k2, &res);
1292 break;
1293 }
1294 return __ Tuple(__ Word32Constant(res), __ Word32Constant(overflow));
1295 }
1296 } else {
1298 if (int64_t k1, k2; matcher_.MatchIntegralWord64Constant(left, &k1) &&
1299 matcher_.MatchIntegralWord64Constant(right, &k2)) {
1300 bool overflow;
1301 int64_t res;
1302 switch (kind) {
1304 overflow = base::bits::SignedAddOverflow64(k1, k2, &res);
1305 break;
1307 overflow = base::bits::SignedMulOverflow64(k1, k2, &res);
1308 break;
1310 overflow = base::bits::SignedSubOverflow64(k1, k2, &res);
1311 break;
1312 }
1313 return __ Tuple(__ Word64Constant(res), __ Word32Constant(overflow));
1314 }
1315 }
1316
1317 // left + 0 => (left, false)
1318 // left - 0 => (left, false)
1319 if (kind == any_of(Kind::kSignedAdd, Kind::kSignedSub) &&
1320 matcher_.MatchZero(right)) {
1321 return __ Tuple(left, __ Word32Constant(0));
1322 }
1323
1324 if (kind == Kind::kSignedMul) {
1325 if (int64_t k; matcher_.MatchIntegralWordConstant(right, rep, &k)) {
1326 // left * 0 => (0, false)
1327 if (k == 0) {
1328 return __ Tuple(__ WordConstant(0, rep), __ Word32Constant(false));
1329 }
1330 // left * 1 => (left, false)
1331 if (k == 1) {
1332 return __ Tuple(left, __ Word32Constant(false));
1333 }
1334 // left * -1 => 0 - left
1335 if (k == -1) {
1336 return __ IntSubCheckOverflow(__ WordConstant(0, rep), left, rep);
1337 }
1338 // left * 2 => left + left
1339 if (k == 2) {
1340 return __ IntAddCheckOverflow(left, left, rep);
1341 }
1342 }
1343 }
1344
1345 // UntagSmi(x) + UntagSmi(x) => (x, false)
1346 // (where UntagSmi(x) = x >> 1 with a ShiftOutZeros shift)
1347 if (kind == Kind::kSignedAdd && left == right) {
1348 uint16_t amount;
1349 if (V<Word32> x; matcher_.MatchConstantShiftRightArithmeticShiftOutZeros(
1350 left, &x, WordRepresentation::Word32(), &amount) &&
1351 amount == 1) {
1352 return __ Tuple(x, __ Word32Constant(0));
1353 }
1354 }
1355
1356 return Next::ReduceOverflowCheckedBinop(left, right, kind, rep);
1357 }
1358
1363 return Next::ReduceComparison(left, right, kind, rep);
1364 }
1366 return ReduceCompareEqual(left, right, rep);
1367 }
1368 if (rep == WordRepresentation::Word32()) {
1371 }
1372 using Kind = ComparisonOp::Kind;
1373 if (left == right &&
1376 kind == any_of(Kind::kSignedLessThanOrEqual,
1377 Kind::kUnsignedLessThanOrEqual)) {
1378 switch (kind) {
1379 case Kind::kEqual:
1380 UNREACHABLE();
1381 case Kind::kUnsignedLessThanOrEqual:
1382 case Kind::kSignedLessThanOrEqual:
1383 return __ Word32Constant(1);
1384 case Kind::kUnsignedLessThan:
1385 case Kind::kSignedLessThan:
1386 return __ Word32Constant(0);
1387 }
1388 }
1389 // constant folding
1390 if (matcher_.Is<ConstantOp>(right) && matcher_.Is<ConstantOp>(left)) {
1391 switch (rep.value()) {
1394 if (kind ==
1395 any_of(Kind::kSignedLessThan, Kind::kSignedLessThanOrEqual)) {
1396 if (int64_t k1, k2; matcher_.MatchIntegralWordConstant(
1397 left, WordRepresentation(rep), &k1) &&
1398 matcher_.MatchIntegralWordConstant(
1399 right, WordRepresentation(rep), &k2)) {
1400 switch (kind) {
1402 return __ Word32Constant(k1 < k2);
1404 return __ Word32Constant(k1 <= k2);
1408 UNREACHABLE();
1409 }
1410 }
1411 } else {
1412 if (uint64_t k1, k2; matcher_.MatchIntegralWordConstant(
1413 left, WordRepresentation(rep), &k1) &&
1414 matcher_.MatchIntegralWordConstant(
1415 right, WordRepresentation(rep), &k2)) {
1416 switch (kind) {
1418 return __ Word32Constant(k1 < k2);
1420 return __ Word32Constant(k1 <= k2);
1424 UNREACHABLE();
1425 }
1426 }
1427 }
1428 break;
1429 }
1431 if (float k1, k2; matcher_.MatchFloat32Constant(left, &k1) &&
1432 matcher_.MatchFloat32Constant(right, &k2)) {
1433 switch (kind) {
1435 return __ Word32Constant(k1 < k2);
1437 return __ Word32Constant(k1 <= k2);
1441 UNREACHABLE();
1442 }
1443 }
1444 break;
1445 }
1447 if (double k1, k2; matcher_.MatchFloat64Constant(left, &k1) &&
1448 matcher_.MatchFloat64Constant(right, &k2)) {
1449 switch (kind) {
1451 return __ Word32Constant(k1 < k2);
1453 return __ Word32Constant(k1 <= k2);
1457 UNREACHABLE();
1458 }
1459 }
1460 break;
1461 }
1462 default:
1463 UNREACHABLE();
1464 }
1465 }
1466 if (rep == RegisterRepresentation::Float64() &&
1469 return __ Comparison(
1473 }
1474 if (rep.IsWord()) {
1475 WordRepresentation rep_w{rep};
1476 if (kind == Kind::kUnsignedLessThanOrEqual) {
1477 // 0 <= x => true
1478 if (uint64_t k;
1479 matcher_.MatchIntegralWordConstant(left, rep_w, &k) && k == 0) {
1480 return __ Word32Constant(1);
1481 }
1482 // x <= MaxUint => true
1483 if (uint64_t k; matcher_.MatchIntegralWordConstant(right, rep_w, &k) &&
1484 k == rep.MaxUnsignedValue()) {
1485 return __ Word32Constant(1);
1486 }
1487 // x <= 0 => x == 0
1488 if (uint64_t k;
1489 matcher_.MatchIntegralWordConstant(right, rep_w, &k) && k == 0) {
1490 return __ Equal(left, __ WordConstant(0, rep_w), rep_w);
1491 }
1492 }
1493 if (kind == Kind::kUnsignedLessThan) {
1494 // x < 0 => false
1495 if (uint64_t k;
1496 matcher_.MatchIntegralWordConstant(right, rep_w, &k) && k == 0) {
1497 return __ Word32Constant(0);
1498 }
1499 // MaxUint < x => true
1500 if (uint64_t k; matcher_.MatchIntegralWordConstant(left, rep_w, &k) &&
1501 k == rep.MaxUnsignedValue()) {
1502 return __ Word32Constant(0);
1503 }
1504 }
1505 {
1506 // (x >> k) </<= (y >> k) => x </<= y if shifts reversible
1507 V<Word> x, y;
1508 uint16_t k1, k2;
1509 if (matcher_.MatchConstantShiftRightArithmeticShiftOutZeros(
1510 left, &x, rep_w, &k1) &&
1511 matcher_.MatchConstantShiftRightArithmeticShiftOutZeros(
1512 right, &y, rep_w, &k2) &&
1513 k1 == k2) {
1514 return __ Comparison(x, y, kind, rep_w);
1515 }
1516 }
1517 {
1518 // (x >> k1) </<= k2 => x </<= (k2 << k1) if shifts reversible
1519 // Only perform the transformation if the shift is not used yet, to
1520 // avoid keeping both the shift and x alive.
1521 V<Word> x;
1522 uint16_t k1;
1523 int64_t k2;
1524 if (matcher_.MatchConstantShiftRightArithmeticShiftOutZeros(
1525 left, &x, rep_w, &k1) &&
1526 matcher_.MatchIntegralWordConstant(right, rep_w, &k2) &&
1527 CountLeadingSignBits(k2, rep_w) > k1) {
1528 if (matcher_.Get(left).saturated_use_count.IsZero()) {
1529 return __ Comparison(
1530 x, __ WordConstant(base::bits::Unsigned(k2) << k1, rep_w), kind,
1531 rep_w);
1532 } else if constexpr (reducer_list_contains<
1533 ReducerList, ValueNumberingReducer>::value) {
1534 // If the shift has uses, we only apply the transformation if the
1535 // result would be GVNed away.
1536 V<Word> rhs =
1537 __ WordConstant(base::bits::Unsigned(k2) << k1, rep_w);
1538 static_assert(ComparisonOp::input_count == 2);
1539 static_assert(sizeof(ComparisonOp) == 8);
1541 ComparisonOp* cmp =
1542 CreateOperation<ComparisonOp>(storage, x, rhs, kind, rep_w);
1543 if (__ WillGVNOp(*cmp)) {
1544 return __ Comparison(x, rhs, kind, rep_w);
1545 }
1546 }
1547 }
1548 // k2 </<= (x >> k1) => (k2 << k1) </<= x if shifts reversible
1549 // Only perform the transformation if the shift is not used yet, to
1550 // avoid keeping both the shift and x alive.
1551 if (matcher_.MatchConstantShiftRightArithmeticShiftOutZeros(
1552 right, &x, rep_w, &k1) &&
1553 matcher_.MatchIntegralWordConstant(left, rep_w, &k2) &&
1554 CountLeadingSignBits(k2, rep_w) > k1) {
1555 if (matcher_.Get(right).saturated_use_count.IsZero()) {
1556 return __ Comparison(
1557 __ WordConstant(base::bits::Unsigned(k2) << k1, rep_w), x, kind,
1558 rep_w);
1559 } else if constexpr (reducer_list_contains<
1560 ReducerList, ValueNumberingReducer>::value) {
1561 // If the shift has uses, we only apply the transformation if the
1562 // result would be GVNed away.
1563 V<Word> lhs =
1564 __ WordConstant(base::bits::Unsigned(k2) << k1, rep_w);
1565 static_assert(ComparisonOp::input_count == 2);
1566 static_assert(sizeof(ComparisonOp) == 8);
1568 ComparisonOp* cmp =
1569 CreateOperation<ComparisonOp>(storage, lhs, x, kind, rep_w);
1570 if (__ WillGVNOp(*cmp)) {
1571 return __ Comparison(lhs, x, kind, rep_w);
1572 }
1573 }
1574 }
1575 }
1576 // Map 64bit to 32bit comparisons.
1577 if (rep_w == WordRepresentation::Word64()) {
1578 std::optional<bool> left_sign_extended;
1579 std::optional<bool> right_sign_extended;
1580 if (IsWord32ConvertedToWord64(left, &left_sign_extended) &&
1581 IsWord32ConvertedToWord64(right, &right_sign_extended)) {
1582 if (left_sign_extended != true && right_sign_extended != true) {
1583 // Both sides were zero-extended, so the resulting comparison always
1584 // behaves unsigned even if it was a signed 64bit comparison.
1585 auto SetSigned = [](Kind kind, bool is_signed) {
1586 switch (kind) {
1587 case Kind::kSignedLessThan:
1588 case Kind::kUnsignedLessThan:
1589 return is_signed ? Kind::kSignedLessThan
1590 : Kind::kUnsignedLessThan;
1591 case Kind::kSignedLessThanOrEqual:
1592 case Kind::kUnsignedLessThanOrEqual:
1593 return is_signed ? Kind::kSignedLessThanOrEqual
1594 : Kind::kUnsignedLessThanOrEqual;
1595 case Kind::kEqual:
1596 UNREACHABLE();
1597 }
1598 };
1599 return __ Comparison(
1602 SetSigned(kind, false), WordRepresentation::Word32());
1603 } else if (left_sign_extended != false &&
1604 right_sign_extended != false) {
1605 // Both sides were sign-extended, this preserves both signed and
1606 // unsigned comparisons.
1607 return __ Comparison(
1611 }
1612 }
1613 }
1614 }
1615 return Next::ReduceComparison(left, right, kind, rep);
1616 }
1617
1618 V<Word> REDUCE(Shift)(V<Word> left, V<Word32> right, ShiftOp::Kind kind,
1619 WordRepresentation rep) {
1621 return Next::ReduceShift(left, right, kind, rep);
1622 }
1623
1624 if (rep == WordRepresentation::Word32()) {
1626 }
1627
1628 using Kind = ShiftOp::Kind;
1629 uint64_t c_unsigned;
1630 int64_t c_signed;
1631 if (matcher_.MatchIntegralWordConstant(left, rep, &c_unsigned, &c_signed)) {
1632 if (uint32_t amount;
1633 matcher_.MatchIntegralWord32Constant(right, &amount)) {
1634 amount = amount & (rep.bit_width() - 1);
1635 switch (kind) {
1636 case Kind::kShiftRightArithmeticShiftOutZeros:
1637 if (base::bits::CountTrailingZeros(c_signed) < amount) {
1638 // This assumes that we never hoist operations to before their
1639 // original place in the control flow.
1640 __ Unreachable();
1641 return V<Word>::Invalid();
1642 }
1643 [[fallthrough]];
1644 case Kind::kShiftRightArithmetic:
1645 switch (rep.value()) {
1647 return __ Word32Constant(static_cast<int32_t>(c_signed) >>
1648 amount);
1650 return __ Word64Constant(c_signed >> amount);
1651 }
1652 case Kind::kShiftRightLogical:
1653 switch (rep.value()) {
1655 return __ Word32Constant(static_cast<uint32_t>(c_unsigned) >>
1656 amount);
1658 return __ Word64Constant(c_unsigned >> amount);
1659 }
1660 case Kind::kShiftLeft:
1661 return __ WordConstant(c_unsigned << amount, rep);
1662 case Kind::kRotateRight:
1663 switch (rep.value()) {
1665 return __ Word32Constant(base::bits::RotateRight32(
1666 static_cast<uint32_t>(c_unsigned), amount));
1668 return __ Word64Constant(
1669 base::bits::RotateRight64(c_unsigned, amount));
1670 }
1671 case Kind::kRotateLeft:
1672 switch (rep.value()) {
1674 return __ Word32Constant(base::bits::RotateLeft32(
1675 static_cast<uint32_t>(c_unsigned), amount));
1677 return __ Word64Constant(
1678 base::bits::RotateLeft64(c_unsigned, amount));
1679 }
1680 }
1681 }
1682 }
1683 if (int32_t amount; matcher_.MatchIntegralWord32Constant(right, &amount) &&
1684 0 <= amount && amount < rep.bit_width()) {
1685 if (amount == 0) {
1686 return left;
1687 }
1688 if (kind == Kind::kShiftLeft) {
1689 // If x >> K only shifted out zeros:
1690 // (x >> K) << L => x if K == L
1691 // (x >> K) << L => x >> (K-L) if K > L
1692 // (x >> K) << L => x << (L-K) if K < L
1693 // Since this is used for Smi untagging, we currently only need it for
1694 // signed shifts.
1695 int k;
1696 V<Word> x;
1697 if (matcher_.MatchConstantShift(
1698 left, &x, Kind::kShiftRightArithmeticShiftOutZeros, rep, &k)) {
1699 int32_t l = amount;
1700 if (k == l) {
1701 return x;
1702 } else if (k > l) {
1703 return __ ShiftRightArithmeticShiftOutZeros(
1704 x, __ Word32Constant(k - l), rep);
1705 } else if (k < l) {
1706 return __ ShiftLeft(x, __ Word32Constant(l - k), rep);
1707 }
1708 }
1709 // (x >>> K) << K => x & ~(2^K - 1)
1710 // (x >> K) << K => x & ~(2^K - 1)
1711 if (matcher_.MatchConstantRightShift(left, &x, rep, &k) &&
1712 k == amount) {
1713 return __ WordBitwiseAnd(
1714 x, __ WordConstant(rep.MaxUnsignedValue() << k, rep), rep);
1715 }
1716 }
1717 if (kind == any_of(Kind::kShiftRightArithmetic,
1718 Kind::kShiftRightArithmeticShiftOutZeros)) {
1719 V<Word> x;
1720 int left_shift_amount;
1721 // (x << k) >> k
1722 if (matcher_.MatchConstantShift(left, &x, ShiftOp::Kind::kShiftLeft,
1723 rep, &left_shift_amount) &&
1724 amount == left_shift_amount) {
1725 // x << (bit_width - 1) >> (bit_width - 1) => 0 - x if x is 0 or 1
1726 if (amount == rep.bit_width() - 1 && IsBit(x)) {
1727 return __ WordSub(__ WordConstant(0, rep), x, rep);
1728 }
1729 // x << (bit_width - 8) >> (bit_width - 8) => x if x is within Int8
1730 if (amount <= rep.bit_width() - 8 && IsInt8(x)) {
1731 return x;
1732 }
1733 // x << (bit_width - 8) >> (bit_width - 8) => x if x is within Int8
1734 if (amount <= rep.bit_width() - 16 && IsInt16(x)) {
1735 return x;
1736 }
1737 }
1738 }
1739 if (rep == WordRepresentation::Word32() &&
1740 SupportedOperations::word32_shift_is_safe()) {
1741 // Remove the explicit 'and' with 0x1F if the shift provided by the
1742 // machine instruction matcher_.Matches that required by JavaScript.
1743 if (V<Word32> a, b; matcher_.MatchBitwiseAnd(
1744 right, &a, &b, WordRepresentation::Word32())) {
1745#if defined(__clang__)
1746 static_assert(0x1f == WordRepresentation::Word32().bit_width() - 1);
1747#endif
1748 if (uint32_t b_value;
1749 matcher_.MatchIntegralWord32Constant(b, &b_value) &&
1750 b_value == 0x1f) {
1751 return __ Shift(left, a, kind, rep);
1752 }
1753 }
1754 }
1755 }
1756 return Next::ReduceShift(left, right, kind, rep);
1757 }
1758
1760 BranchHint hint) {
1761 LABEL_BLOCK(no_change) {
1762 return Next::ReduceBranch(condition, if_true, if_false, hint);
1763 }
1764 if (ShouldSkipOptimizationStep()) goto no_change;
1765
1766 // Try to replace the Branch by a Goto.
1767 if (std::optional<bool> decision = MatchBoolConstant(condition)) {
1768 __ Goto(*decision ? if_true : if_false);
1769 return V<None>::Invalid();
1770 }
1771
1772 // Try to simplify the Branch's condition (eg, `if (x == 0) A else B` can
1773 // become `if (x) B else A`).
1774 bool negated = false;
1775 if (std::optional<OpIndex> new_condition =
1776 ReduceBranchCondition(condition, &negated)) {
1777 if (negated) {
1778 std::swap(if_true, if_false);
1779 hint = NegateBranchHint(hint);
1780 }
1781
1782 return __ ReduceBranch(new_condition.value(), if_true, if_false, hint);
1783 }
1784
1785 goto no_change;
1786 }
1787
1789 bool negated,
1790 const DeoptimizeParameters* parameters) {
1792 return Next::ReduceDeoptimizeIf(condition, frame_state, negated,
1793 parameters);
1794 }
1795 if (std::optional<bool> decision = MatchBoolConstant(condition)) {
1796 if (*decision != negated) {
1797 __ Deoptimize(frame_state, parameters);
1798 }
1799 // `DeoptimizeIf` doesn't produce a value.
1800 return V<None>::Invalid();
1801 }
1802 if (std::optional<V<Word32>> new_condition =
1803 ReduceBranchCondition(condition, &negated)) {
1804 return __ ReduceDeoptimizeIf(new_condition.value(), frame_state, negated,
1805 parameters);
1806 } else {
1807 return Next::ReduceDeoptimizeIf(condition, frame_state, negated,
1808 parameters);
1809 }
1810 }
1811
1812#if V8_ENABLE_WEBASSEMBLY
1814 bool negated, TrapId trap_id) {
1815 LABEL_BLOCK(no_change) {
1816 return Next::ReduceTrapIf(condition, frame_state, negated, trap_id);
1817 }
1818 if (ShouldSkipOptimizationStep()) goto no_change;
1819 if (std::optional<bool> decision = MatchBoolConstant(condition)) {
1820 if (*decision != negated) {
1821 Next::ReduceTrapIf(condition, frame_state, negated, trap_id);
1822 __ Unreachable();
1823 }
1824 // `TrapIf` doesn't produce a value.
1825 return V<None>::Invalid();
1826 }
1827 if (std::optional<V<Word32>> new_condition =
1828 ReduceBranchCondition(condition, &negated)) {
1829 return __ ReduceTrapIf(new_condition.value(), frame_state, negated,
1830 trap_id);
1831 } else {
1832 goto no_change;
1833 }
1834 }
1835#endif // V8_ENABLE_WEBASSEMBLY
1836
1837 V<Any> REDUCE(Select)(V<Word32> cond, V<Any> vtrue, V<Any> vfalse,
1839 SelectOp::Implementation implem) {
1840 LABEL_BLOCK(no_change) {
1841 return Next::ReduceSelect(cond, vtrue, vfalse, rep, hint, implem);
1842 }
1843 if (ShouldSkipOptimizationStep()) goto no_change;
1844
1845 // Try to remove the Select.
1846 if (std::optional<bool> decision = MatchBoolConstant(cond)) {
1847 return *decision ? vtrue : vfalse;
1848 }
1849
1850 goto no_change;
1851 }
1852
1854 LABEL_BLOCK(no_change) {
1855 return Next::ReduceStaticAssert(condition, source);
1856 }
1857 if (std::optional<bool> decision = MatchBoolConstant(condition)) {
1858 if (*decision) {
1859 // Drop the assert, the condition holds true.
1860 return V<None>::Invalid();
1861 } else {
1862 // Leave the assert, as the condition is not true.
1863 goto no_change;
1864 }
1865 }
1866 goto no_change;
1867 }
1868
1870 Block* default_case, BranchHint default_hint) {
1871 LABEL_BLOCK(no_change) {
1872 return Next::ReduceSwitch(input, cases, default_case, default_hint);
1873 }
1874 if (ShouldSkipOptimizationStep()) goto no_change;
1875 if (int32_t value; matcher_.MatchIntegralWord32Constant(input, &value)) {
1876 for (const SwitchOp::Case& if_value : cases) {
1877 if (if_value.value == value) {
1878 __ Goto(if_value.destination);
1879 return {};
1880 }
1881 }
1882 __ Goto(default_case);
1883 return {};
1884 }
1885 goto no_change;
1886 }
1887
1890 WriteBarrierKind write_barrier, int32_t offset,
1891 uint8_t element_scale,
1892 bool maybe_initializing_or_transitioning,
1893 IndirectPointerTag maybe_indirect_pointer_tag) {
1894 LABEL_BLOCK(no_change) {
1895 return Next::ReduceStore(base_idx, index, value, kind, stored_rep,
1896 write_barrier, offset, element_scale,
1897 maybe_initializing_or_transitioning,
1898 maybe_indirect_pointer_tag);
1899 }
1900 if (ShouldSkipOptimizationStep()) goto no_change;
1901#if V8_TARGET_ARCH_32_BIT
1902 if (kind.is_atomic && stored_rep.SizeInBytes() == 8) {
1903 // AtomicWord32PairOp (as used by Int64Lowering) cannot handle
1904 // element_scale != 0 currently.
1905 // TODO(jkummerow): Add support for element_scale in AtomicWord32PairOp.
1906 goto no_change;
1907 }
1908#endif
1909 if (stored_rep.SizeInBytes() <= 4) {
1910 value = TryRemoveWord32ToWord64Conversion(value);
1911 }
1912 index = ReduceMemoryIndex(index.value_or_invalid(), &offset, &element_scale,
1913 kind.tagged_base);
1914 switch (stored_rep) {
1917 value = ReduceWithTruncation(value, std::numeric_limits<uint8_t>::max(),
1919 break;
1922 value =
1923 ReduceWithTruncation(value, std::numeric_limits<uint16_t>::max(),
1925 break;
1928 value =
1929 ReduceWithTruncation(value, std::numeric_limits<uint32_t>::max(),
1931 break;
1932 default:
1933 break;
1934 }
1935
1936 // If index is invalid and base is `left+right`, we use `left` as base and
1937 // `right` as index.
1938 if (!index.valid() && matcher_.Is<Opmask::kWord64Add>(base_idx)) {
1939 DCHECK_EQ(element_scale, 0);
1940 const WordBinopOp& base = matcher_.Cast<WordBinopOp>(base_idx);
1941 base_idx = base.left();
1942 index = base.right();
1943 // We go through the Store stack again, which might merge {index} into
1944 // {offset}, or just do other optimizations on this Store.
1945 __ Store(base_idx, index, value, kind, stored_rep, write_barrier, offset,
1946 element_scale, maybe_initializing_or_transitioning,
1947 maybe_indirect_pointer_tag);
1948 return V<None>::Invalid();
1949 }
1950
1951 return Next::ReduceStore(base_idx, index, value, kind, stored_rep,
1952 write_barrier, offset, element_scale,
1953 maybe_initializing_or_transitioning,
1954 maybe_indirect_pointer_tag);
1955 }
1956
1959 RegisterRepresentation result_rep, int32_t offset,
1960 uint8_t element_scale) {
1961 LABEL_BLOCK(no_change) {
1962 return Next::ReduceLoad(base_idx, index, kind, loaded_rep, result_rep,
1963 offset, element_scale);
1964 }
1965 if (ShouldSkipOptimizationStep()) goto no_change;
1966#if V8_TARGET_ARCH_32_BIT
1967 if (kind.is_atomic && loaded_rep.SizeInBytes() == 8) {
1968 // AtomicWord32PairOp (as used by Int64Lowering) cannot handle
1969 // element_scale != 0 currently.
1970 // TODO(jkummerow): Add support for element_scale in AtomicWord32PairOp.
1971 goto no_change;
1972 }
1973#endif
1974
1975 while (true) {
1976 index = ReduceMemoryIndex(index.value_or_invalid(), &offset,
1977 &element_scale, kind.tagged_base);
1978 if (!kind.tagged_base && !index.valid()) {
1979 if (V<WordPtr> left, right;
1980 matcher_.MatchWordAdd(base_idx, &left, &right,
1982 TryAdjustOffset(&offset, matcher_.Get(right), element_scale,
1983 kind.tagged_base)) {
1984 base_idx = left;
1985 continue;
1986 }
1987 }
1988 break;
1989 }
1990
1991 if (!index.valid() && matcher_.Is<ConstantOp>(base_idx)) {
1992 const ConstantOp& base = matcher_.Cast<ConstantOp>(base_idx);
1996 // Only few loads should be loading the map from a ConstantOp
1997 // HeapObject, so unparking the JSHeapBroker here rather than before
1998 // the optimization pass itself it probably more efficient.
1999
2002 broker != nullptr);
2003 if (broker != nullptr) {
2005 AllowHandleDereference allow_handle_dereference;
2006 OptionalMapRef map = TryMakeRef(broker, base.handle()->map());
2007 if (MapLoadCanBeConstantFolded(map)) {
2008 return __ HeapConstant(map->object());
2009 }
2010 }
2011 }
2012 // TODO(dmercadier): consider constant-folding other accesses, in
2013 // particular for constant objects (ie, if
2014 // base.handle()->InReadOnlySpace() is true). We have to be a bit
2015 // careful though, because loading could be invalid (since we could
2016 // be in unreachable code). (all objects have a map, so loading the map
2017 // should always be safe, regardless of whether we are generating
2018 // unreachable code or not)
2019 }
2020 }
2021
2022 // If index is invalid and base is `left+right`, we use `left` as base and
2023 // `right` as index.
2024 if (!index.valid() && matcher_.Is<Opmask::kWord64Add>(base_idx)) {
2025 DCHECK_EQ(element_scale, 0);
2026 const WordBinopOp& base = matcher_.Cast<WordBinopOp>(base_idx);
2027 base_idx = base.left();
2028 index = base.right();
2029 // We go through the Load stack again, which might merge {index} into
2030 // {offset}, or just do other optimizations on this Load.
2031 return __ Load(base_idx, index, kind, loaded_rep, result_rep, offset,
2032 element_scale);
2033 }
2034
2035 return Next::ReduceLoad(base_idx, index, kind, loaded_rep, result_rep,
2036 offset, element_scale);
2037 }
2038
2039#if V8_ENABLE_WEBASSEMBLY
2040#ifdef V8_TARGET_ARCH_ARM64
2041 V<Any> REDUCE(Simd128ExtractLane)(V<Simd128> input,
2042 Simd128ExtractLaneOp::Kind kind,
2043 uint8_t lane) {
2044 LABEL_BLOCK(no_change) {
2045 return Next::ReduceSimd128ExtractLane(input, kind, lane);
2046 }
2047 if (ShouldSkipOptimizationStep()) goto no_change;
2048
2049 if (lane != 0) {
2050 goto no_change;
2051 }
2052
2053 const Simd128BinopOp* binop = matcher_.TryCast<Simd128BinopOp>(input);
2054 if (!binop) {
2055 goto no_change;
2056 }
2057
2058 // Support pairwise addition: int and fp.
2059 switch (binop->kind) {
2060 default:
2061 goto no_change;
2062 case Simd128BinopOp::Kind::kI8x16Add:
2063 case Simd128BinopOp::Kind::kI16x8Add:
2064 case Simd128BinopOp::Kind::kI32x4Add:
2065 case Simd128BinopOp::Kind::kF32x4Add:
2066 case Simd128BinopOp::Kind::kI64x2Add:
2067 case Simd128BinopOp::Kind::kF64x2Add:
2068 break;
2069 }
2070
2071 auto MatchShuffle =
2072 [this](V<Simd128> maybe_shuffle) -> const Simd128ShuffleOp* {
2073 if (const Simd128ShuffleOp* shuffle =
2074 matcher_.TryCast<Simd128ShuffleOp>(maybe_shuffle)) {
2075 if (shuffle->kind == Simd128ShuffleOp::Kind::kI8x16) {
2076 return shuffle;
2077 }
2078 }
2079 return nullptr;
2080 };
2081
2082 auto MatchBinop =
2083 [this](
2084 V<Simd128> maybe_binop,
2085 Simd128BinopOp::Kind required_binop_kind) -> const Simd128BinopOp* {
2086 if (const Simd128BinopOp* binop =
2087 matcher_.TryCast<Simd128BinopOp>(maybe_binop)) {
2088 if (required_binop_kind == binop->kind) {
2089 return binop;
2090 }
2091 }
2092 return nullptr;
2093 };
2094
2095 // We're going to look for vector reductions performed with
2096 // shuffles and binops. The TS operations are defined as pairwise
2097 // to map well onto hardware, although the ordering is only
2098 // important for FP operations. For an example of the Word32
2099 // UpperToLower case:
2100 //
2101 // input = (V<Simd128>)
2102 // shuffle1 = (Simd128ShuffleOp input, input, [ 2, 3, X, X])
2103 // add1 = (Simd128BinopOp input, shuffle1)
2104 // shuffle2 = (Simd128ShuffleOp add1, add1, [1, X, X, X])
2105 // add2 = (Simd128BinopOp add1, shuffle2)
2106 // result = (ExtractLaneOp add2, 0)
2107
2108 // Walk up from binop to discover the tree of binops and shuffles:
2109 // (extract (binop (binop (reduce_input), shuffle), shuffle), 0)
2110 base::SmallVector<const Simd128ShuffleOp*, 4> shuffles;
2111 base::SmallVector<const Simd128BinopOp*, 4> binops;
2112 binops.push_back(binop);
2113 while (!binops.empty()) {
2114 const Simd128BinopOp* binop = binops.back();
2115 binops.pop_back();
2116 V<Simd128> operands[2] = {binop->left(), binop->right()};
2117 for (unsigned i = 0; i < 2; ++i) {
2118 V<Simd128> operand = operands[i];
2119 if (const Simd128ShuffleOp* shuffle = MatchShuffle(operand)) {
2120 // Ensure that the input to the shuffle is also the other input to
2121 // current binop. We take the left input because all of the shuffle
2122 // pattern matching is specified, and will test, for it.
2123 V<Simd128> shuffle_in = shuffle->left();
2124 V<Simd128> other_operand = operands[i ^ 1];
2125 if (shuffle_in != other_operand) {
2126 break;
2127 }
2128 shuffles.push_back(shuffle);
2129 if (const Simd128BinopOp* other_binop =
2130 MatchBinop(shuffle_in, binop->kind)) {
2131 binops.push_back(other_binop);
2132 break;
2133 }
2134 }
2135 }
2136 }
2137 if (shuffles.empty()) {
2138 goto no_change;
2139 }
2140
2141 // Reverse so that they're in execution order, just for readability.
2142 std::reverse(shuffles.begin(), shuffles.end());
2143 V<Simd128> reduce_input = shuffles.front()->left();
2144 MachineRepresentation rep = Simd128ExtractLaneOp::element_rep(kind);
2145 switch (rep) {
2146 default:
2147 goto no_change;
2149 if (shuffles.size() == 4) {
2150 const uint8_t* shuffle1 = shuffles[0]->shuffle;
2151 const uint8_t* shuffle2 = shuffles[1]->shuffle;
2152 const uint8_t* shuffle3 = shuffles[2]->shuffle;
2153 const uint8_t* shuffle4 = shuffles[3]->shuffle;
2155 shuffle1, shuffle2, shuffle3, shuffle4)) {
2156 V<Simd128> reduce = __ Simd128Reduce(
2157 reduce_input, Simd128ReduceOp::Kind::kI8x16AddReduce);
2158 return __ Simd128ExtractLane(reduce, kind, 0);
2159 }
2160 }
2161 break;
2162 }
2164 if (shuffles.size() == 3) {
2165 const uint8_t* shuffle1 = shuffles[0]->shuffle;
2166 const uint8_t* shuffle2 = shuffles[1]->shuffle;
2167 const uint8_t* shuffle3 = shuffles[2]->shuffle;
2169 shuffle1, shuffle2, shuffle3)) {
2170 V<Simd128> reduce = __ Simd128Reduce(
2171 reduce_input, Simd128ReduceOp::Kind::kI16x8AddReduce);
2172 return __ Simd128ExtractLane(reduce, kind, 0);
2173 }
2174 }
2175 break;
2176 }
2178 if (shuffles.size() == 2) {
2179 const uint8_t* shuffle1 = shuffles[0]->shuffle;
2180 const uint8_t* shuffle2 = shuffles[1]->shuffle;
2182 shuffle2)) {
2183 V<Simd128> reduce = __ Simd128Reduce(
2184 reduce_input, Simd128ReduceOp::Kind::kI32x4AddReduce);
2185 return __ Simd128ExtractLane(reduce, kind, 0);
2186 }
2187 }
2188 break;
2189 }
2191 if (shuffles.size() == 2) {
2192 const uint8_t* shuffle1 = shuffles[0]->shuffle;
2193 const uint8_t* shuffle2 = shuffles[1]->shuffle;
2195 shuffle2)) {
2196 V<Simd128> reduce = __ Simd128Reduce(
2197 reduce_input, Simd128ReduceOp::Kind::kF32x4AddReduce);
2198 return __ Simd128ExtractLane(reduce, kind, 0);
2199 }
2200 }
2201 break;
2202 }
2205 if (shuffles.size() == 1) {
2206 uint8_t shuffle64x2[2];
2207 if (wasm::SimdShuffle::TryMatch64x2Shuffle(shuffles[0]->shuffle,
2208 shuffle64x2) &&
2210 V<Simd128> reduce =
2212 ? __ Simd128Reduce(reduce_input,
2213 Simd128ReduceOp::Kind::kI64x2AddReduce)
2214 : __ Simd128Reduce(reduce_input,
2215 Simd128ReduceOp::Kind::kF64x2AddReduce);
2216 return __ Simd128ExtractLane(reduce, kind, 0);
2217 }
2218 }
2219 break;
2220 }
2221 }
2222 goto no_change;
2223 }
2224#endif // V8_TARGET_ARCH_ARM64
2225#endif // V8_ENABLE_WEBASSEMBLY
2226
2227 private:
2230 if (left == right && !rep.IsFloat()) {
2231 return __ Word32Constant(1);
2232 }
2233 if (rep == WordRepresentation::Word32()) {
2236 }
2237 if (matcher_.Is<ConstantOp>(left) && !matcher_.Is<ConstantOp>(right)) {
2238 return ReduceCompareEqual(right, left, rep);
2239 }
2240 if (matcher_.Is<ConstantOp>(right)) {
2241 if (matcher_.Is<ConstantOp>(left)) {
2242 // k1 == k2 => k
2243 switch (rep.value()) {
2246 if (uint64_t k1, k2; matcher_.MatchIntegralWordConstant(
2247 left, WordRepresentation(rep), &k1) &&
2248 matcher_.MatchIntegralWordConstant(
2249 right, WordRepresentation(rep), &k2)) {
2250 return __ Word32Constant(k1 == k2);
2251 }
2252 break;
2253 }
2255 if (float k1, k2; matcher_.MatchFloat32Constant(left, &k1) &&
2256 matcher_.MatchFloat32Constant(right, &k2)) {
2257 return __ Word32Constant(k1 == k2);
2258 }
2259 break;
2260 }
2262 if (double k1, k2; matcher_.MatchFloat64Constant(left, &k1) &&
2263 matcher_.MatchFloat64Constant(right, &k2)) {
2264 return __ Word32Constant(k1 == k2);
2265 }
2266 break;
2267 }
2269 if (Handle<HeapObject> o1, o2;
2270 matcher_.MatchHeapConstant(left, &o1) &&
2271 matcher_.MatchHeapConstant(right, &o2)) {
2272 UnparkedScopeIfNeeded unparked(broker);
2273 if (IsString(*o1) && IsString(*o2)) {
2274 // If handles refer to the same object, we can eliminate the
2275 // check.
2276 if (o1.address() == o2.address()) return __ Word32Constant(1);
2277 // But if they are different, we cannot eliminate the
2278 // comparison, because the objects might be different now, but
2279 // if they contain the same content, they might be internalized
2280 // to the same object eventually.
2281 break;
2282 }
2283 return __ Word32Constant(o1.address() == o2.address());
2284 }
2285 break;
2286 }
2287 default:
2288 UNREACHABLE();
2289 }
2290 }
2291 if (rep.IsWord()) {
2292 WordRepresentation rep_w{rep};
2293 // x - y == 0 => x == y
2294 if (V<Word> x, y; matcher_.MatchWordSub(left, &x, &y, rep_w) &&
2295 matcher_.MatchZero(right)) {
2296 return ReduceCompareEqual(x, y, rep);
2297 }
2298 {
2299 // ((x >> shift_amount) & mask) == k
2300 // => (x & (mask << shift_amount)) == (k << shift_amount)
2301 V<Word> shift, x, mask_op;
2302 int shift_amount;
2303 uint64_t mask, k;
2304 if (matcher_.MatchBitwiseAnd(left, &shift, &mask_op, rep_w) &&
2305 matcher_.MatchConstantRightShift(shift, &x, rep_w,
2306 &shift_amount) &&
2307 matcher_.MatchIntegralWordConstant(mask_op, rep_w, &mask) &&
2308 matcher_.MatchIntegralWordConstant(right, rep_w, &k) &&
2309 mask <= rep.MaxUnsignedValue() >> shift_amount &&
2310 k <= rep.MaxUnsignedValue() >> shift_amount) {
2311 return ReduceCompareEqual(
2312 __ WordBitwiseAnd(
2313 x, __ WordConstant(mask << shift_amount, rep_w), rep_w),
2314 __ WordConstant(k << shift_amount, rep_w), rep_w);
2315 }
2316 }
2317 {
2318 // (x >> k1) == k2 => x == (k2 << k1) if shifts reversible
2319 // Only perform the transformation if the shift is not used yet, to
2320 // avoid keeping both the shift and x alive.
2321 V<Word> x;
2322 uint16_t k1;
2323 int64_t k2;
2324 if (matcher_.MatchConstantShiftRightArithmeticShiftOutZeros(
2325 left, &x, rep_w, &k1) &&
2326 matcher_.MatchIntegralWordConstant(right, rep_w, &k2) &&
2327 CountLeadingSignBits(k2, rep_w) > k1 &&
2328 matcher_.Get(left).saturated_use_count.IsZero()) {
2329 return __ Equal(
2330 x, __ WordConstant(base::bits::Unsigned(k2) << k1, rep_w),
2331 rep_w);
2332 }
2333 }
2334 // Map 64bit to 32bit equals.
2335 if (rep_w == WordRepresentation::Word64()) {
2336 std::optional<bool> left_sign_extended;
2337 std::optional<bool> right_sign_extended;
2338 if (IsWord32ConvertedToWord64(left, &left_sign_extended) &&
2339 IsWord32ConvertedToWord64(right, &right_sign_extended)) {
2340 if (left_sign_extended == right_sign_extended) {
2341 return __ Equal(
2345 }
2346 }
2347 }
2348 }
2349 }
2350 return Next::ReduceComparison(left, right, ComparisonOp::Kind::kEqual, rep);
2351 }
2352
2353 // Try to match a constant and add it to `offset`. Return `true` if
2354 // successful.
2355 bool TryAdjustOffset(int32_t* offset, const Operation& maybe_constant,
2356 uint8_t element_scale, bool tagged_base) {
2357 if (!maybe_constant.Is<ConstantOp>()) return false;
2358 const ConstantOp& constant = maybe_constant.Cast<ConstantOp>();
2359 if (constant.rep != WordRepresentation::WordPtr() ||
2360 !constant.IsIntegral()) {
2361 // This can only happen in unreachable code. Ideally, we identify this
2362 // situation and use `__ Unreachable()`. However, this is difficult to
2363 // do from within this helper, so we just don't perform the reduction.
2364 return false;
2365 }
2366 int64_t diff = constant.signed_integral();
2367 int32_t new_offset;
2368 if (element_scale > 31) return false;
2369 if (diff <= (std::numeric_limits<int32_t>::max() >> element_scale) &&
2370 diff >= (std::numeric_limits<int32_t>::min() >> element_scale) &&
2372 *offset,
2373 static_cast<int32_t>(base::bits::Unsigned(diff) << element_scale),
2374 &new_offset) &&
2375 LoadOp::OffsetIsValid(new_offset, tagged_base)) {
2376 *offset = new_offset;
2377 return true;
2378 }
2379 return false;
2380 }
2381
2382 bool TryAdjustIndex(int32_t offset, OpIndex* index,
2383 const Operation& maybe_constant, uint8_t element_scale) {
2384 if (!maybe_constant.Is<ConstantOp>()) return false;
2385 const ConstantOp& constant = maybe_constant.Cast<ConstantOp>();
2386 if (constant.rep != WordRepresentation::WordPtr() ||
2387 !constant.IsIntegral()) {
2388 // This can only happen in unreachable code. Ideally, we identify this
2389 // situation and use `__ Unreachable()`. However, this is difficult to
2390 // do from within this helper, so we just don't perform the reduction.
2391 return false;
2392 }
2393 int64_t diff = constant.signed_integral();
2394 int64_t new_index;
2395 if (!base::bits::SignedAddOverflow64(offset, diff << element_scale,
2396 &new_index)) {
2397 *index = __ IntPtrConstant(new_index);
2398 return true;
2399 }
2400 return false;
2401 }
2402
2403 bool TryAdjustElementScale(uint8_t* element_scale, OpIndex maybe_constant) {
2404 uint64_t diff;
2405 if (!matcher_.MatchIntegralWordConstant(
2406 maybe_constant, WordRepresentation::WordPtr(), &diff)) {
2407 return false;
2408 }
2409 DCHECK_LT(*element_scale, WordRepresentation::WordPtr().bit_width());
2410 if (diff < (WordRepresentation::WordPtr().bit_width() -
2411 uint64_t{*element_scale})) {
2412 *element_scale += diff;
2413 return true;
2414 }
2415 return false;
2416 }
2417
2418 // Fold away operations in the computation of `index` while preserving the
2419 // value of `(index << element_scale) + offset)` by updating `offset`,
2420 // `element_scale` and returning the updated `index`.
2421 // Return `OpIndex::Invalid()` if the resulting index is zero.
2423 uint8_t* element_scale, bool tagged_base) {
2424 while (index.valid()) {
2425 const Operation& index_op = matcher_.Get(index);
2426 if (TryAdjustOffset(offset, index_op, *element_scale, tagged_base)) {
2427 index = OpIndex::Invalid();
2428 *element_scale = 0;
2429 } else if (TryAdjustIndex(*offset, &index, index_op, *element_scale)) {
2430 *element_scale = 0;
2431 *offset = 0;
2432 // This function cannot optimize the index further since at this point
2433 // it's just a WordPtrConstant.
2434 return index;
2435 } else if (const ShiftOp* shift_op = index_op.TryCast<ShiftOp>()) {
2436 if (shift_op->kind == ShiftOp::Kind::kShiftLeft &&
2437 TryAdjustElementScale(element_scale, shift_op->right())) {
2438 index = shift_op->left();
2439 continue;
2440 }
2441 } else if (const WordBinopOp* binary_op =
2442 index_op.TryCast<WordBinopOp>()) {
2443 // TODO(jkummerow): This doesn't trigger for wasm32 memory operations
2444 // on 64-bit platforms, because `index_op` is a `Change` (from uint32
2445 // to uint64) in that case, and that Change's input is the addition
2446 // we're looking for. When we fix that, we must also teach the x64
2447 // instruction selector to support xchg with index *and* offset.
2448 if (binary_op->kind == WordBinopOp::Kind::kAdd &&
2449 TryAdjustOffset(offset, matcher_.Get(binary_op->right()),
2450 *element_scale, tagged_base)) {
2451 index = binary_op->left();
2452 continue;
2453 }
2454 }
2455 break;
2456 }
2457 return index;
2458 }
2459
2461 if (V<Float32> input; matcher_.MatchChange<Float32>(
2465 return true;
2466 }
2467 if (double c;
2468 matcher_.MatchFloat64Constant(value, &c) && DoubleToFloat32(c) == c) {
2469 return true;
2470 }
2471 return false;
2472 }
2473
2475 if (V<Float32> input; matcher_.MatchChange<Float32>(
2479 return input;
2480 }
2481 if (double c;
2482 matcher_.MatchFloat64Constant(value, &c) && DoubleToFloat32(c) == c) {
2483 return __ Float32Constant(DoubleToFloat32(c));
2484 }
2485 UNREACHABLE();
2486 }
2487
2488 bool IsBit(V<Word> value) { return matcher_.Is<ComparisonOp>(value); }
2489
2490 bool IsInt8(V<Word> value) {
2491 if (auto* op = matcher_.TryCast<LoadOp>(value)) {
2492 return op->loaded_rep == MemoryRepresentation::Int8();
2493 }
2494 return false;
2495 }
2496
2497 bool IsInt16(V<Word> value) {
2498 if (auto* op = matcher_.TryCast<LoadOp>(value)) {
2499 return op->loaded_rep == any_of(MemoryRepresentation::Int16(),
2501 }
2502 return false;
2503 }
2504
2506 std::optional<bool>* sign_extended = nullptr) {
2507 if (const ChangeOp* change_op = matcher_.TryCast<ChangeOp>(value)) {
2508 if (change_op->from == WordRepresentation::Word32() &&
2509 change_op->to == WordRepresentation::Word64()) {
2510 if (change_op->kind == ChangeOp::Kind::kSignExtend) {
2511 if (sign_extended) *sign_extended = true;
2512 return true;
2513 } else if (change_op->kind == ChangeOp::Kind::kZeroExtend) {
2514 if (sign_extended) *sign_extended = false;
2515 return true;
2516 }
2517 }
2518 }
2519 if (int64_t c; matcher_.MatchIntegralWord64Constant(value, &c) &&
2520 c >= std::numeric_limits<int32_t>::min()) {
2521 if (c < 0) {
2522 if (sign_extended) *sign_extended = true;
2523 return true;
2524 } else if (c <= std::numeric_limits<int32_t>::max()) {
2525 // Sign- and zero-extension produce the same result.
2526 if (sign_extended) *sign_extended = {};
2527 return true;
2528 } else if (c <= std::numeric_limits<uint32_t>::max()) {
2529 if (sign_extended) *sign_extended = false;
2530 return true;
2531 }
2532 }
2533 return false;
2534 }
2535
2538 if (const ChangeOp* op = matcher_.TryCast<ChangeOp>(value)) {
2539 return V<Word32>::Cast(op->input());
2540 }
2541 return __ Word32Constant(matcher_.Cast<ConstantOp>(value).word32());
2542 }
2543
2545 if (const ChangeOp* op = matcher_.TryCast<ChangeOp>(value)) {
2546 if (op->from == WordRepresentation::Word32() &&
2547 op->to == WordRepresentation::Word64() &&
2550 return V<Word32>::Cast(op->input());
2551 }
2552 }
2553 return value;
2554 }
2555
2556 uint64_t TruncateWord(uint64_t value, WordRepresentation rep) {
2557 if (rep == WordRepresentation::Word32()) {
2558 return static_cast<uint32_t>(value);
2559 } else {
2561 return value;
2562 }
2563 }
2564
2565 // Reduce the given value under the assumption that only the bits set in
2566 // `truncation_mask` will be observed.
2567 V<Word> ReduceWithTruncation(V<Word> value, uint64_t truncation_mask,
2568 WordRepresentation rep) {
2569 { // Remove bitwise-and with a mask whose zero-bits are not observed.
2570 V<Word> input, mask;
2571 uint64_t mask_value;
2572 if (matcher_.MatchBitwiseAnd(value, &input, &mask, rep) &&
2573 matcher_.MatchIntegralWordConstant(mask, rep, &mask_value)) {
2574 if ((mask_value & truncation_mask) == truncation_mask) {
2575 return ReduceWithTruncation(input, truncation_mask, rep);
2576 }
2577 }
2578 }
2579 {
2580 int left_shift_amount;
2581 int right_shift_amount;
2582 WordRepresentation value_rep;
2583 V<Word> left_shift;
2584 ShiftOp::Kind right_shift_kind;
2585 V<Word> left_shift_input;
2586 if (matcher_.MatchConstantShift(value, &left_shift, &right_shift_kind,
2587 &value_rep, &right_shift_amount) &&
2588 ShiftOp::IsRightShift(right_shift_kind) &&
2589 matcher_.MatchConstantShift(left_shift, &left_shift_input,
2590 ShiftOp::Kind::kShiftLeft, value_rep,
2591 &left_shift_amount) &&
2592 ((value_rep.MaxUnsignedValue() >> right_shift_amount) &
2593 truncation_mask) == truncation_mask) {
2594 if (left_shift_amount == right_shift_amount) {
2595 return left_shift_input;
2596 } else if (left_shift_amount < right_shift_amount) {
2597 V<Word32> shift_amount =
2598 __ Word32Constant(right_shift_amount - left_shift_amount);
2599 return __ Shift(left_shift_input, shift_amount, right_shift_kind,
2600 value_rep);
2601 } else if (left_shift_amount > right_shift_amount) {
2602 V<Word32> shift_amount =
2603 __ Word32Constant(left_shift_amount - right_shift_amount);
2604 return __ Shift(left_shift_input, shift_amount,
2605 ShiftOp::Kind::kShiftLeft, value_rep);
2606 }
2607 }
2608 }
2609 return value;
2610 }
2611
2613 // left / -1 => 0 - left
2614 if (right == -1) {
2615 return __ WordSub(__ WordConstant(0, rep), left, rep);
2616 }
2617 // left / 0 => 0
2618 if (right == 0) {
2619 return __ WordConstant(0, rep);
2620 }
2621 // left / 1 => left
2622 if (right == 1) {
2623 return left;
2624 }
2625 // left / MinSignedValue => left == MinSignedValue
2626 if (right == rep.MinSignedValue()) {
2627 V<Word32> equal_op = __ Equal(left, __ WordConstant(right, rep), rep);
2628 if (rep == WordRepresentation::Word64()) {
2629 return __ ChangeUint32ToUint64(equal_op);
2630 } else {
2631 return equal_op;
2632 }
2633 }
2634 // left / -right => -(left / right)
2635 if (right < 0) {
2636 DCHECK_NE(right, rep.MinSignedValue());
2637 return __ WordSub(__ WordConstant(0, rep),
2638 ReduceSignedDiv(left, Abs(right), rep), rep);
2639 }
2640
2641 V<Word> quotient = left;
2642 if (base::bits::IsPowerOfTwo(right)) {
2643 uint32_t shift = base::bits::WhichPowerOfTwo(right);
2644 DCHECK_GT(shift, 0);
2645 if (shift > 1) {
2646 quotient = __ ShiftRightArithmetic(quotient, rep.bit_width() - 1, rep);
2647 }
2648 quotient = __ ShiftRightLogical(quotient, rep.bit_width() - shift, rep);
2649 quotient = __ WordAdd(quotient, left, rep);
2650 quotient = __ ShiftRightArithmetic(quotient, shift, rep);
2651 return quotient;
2652 }
2653 DCHECK_GT(right, 0);
2654 // Compute the magic number for `right`, using a generic lambda to treat
2655 // 32- and 64-bit uniformly.
2656 auto LowerToMul = [this, left](auto right, WordRepresentation rep) {
2657 base::MagicNumbersForDivision<decltype(right)> magic =
2659 V<Word> quotient = __ IntMulOverflownBits(
2660 left, __ WordConstant(magic.multiplier, rep), rep);
2661 if (magic.multiplier < 0) {
2662 quotient = __ WordAdd(quotient, left, rep);
2663 }
2664 V<Word> sign_bit = __ ShiftRightLogical(left, rep.bit_width() - 1, rep);
2665 return __ WordAdd(__ ShiftRightArithmetic(quotient, magic.shift, rep),
2666 sign_bit, rep);
2667 };
2668 if (rep == WordRepresentation::Word32()) {
2669 return LowerToMul(static_cast<int32_t>(right),
2671 } else {
2673 return LowerToMul(static_cast<int64_t>(right),
2675 }
2676 }
2677
2678 V<Word> ReduceUnsignedDiv(V<Word> left, uint64_t right,
2679 WordRepresentation rep) {
2680 // left / 0 => 0
2681 if (right == 0) {
2682 return __ WordConstant(0, rep);
2683 }
2684 // left / 1 => left
2685 if (right == 1) {
2686 return left;
2687 }
2688 // left / 2^k => left >> k
2689 if (base::bits::IsPowerOfTwo(right)) {
2690 return __ ShiftRightLogical(left, base::bits::WhichPowerOfTwo(right),
2691 rep);
2692 }
2693 DCHECK_GT(right, 0);
2694 // If `right` is even, we can avoid using the expensive fixup by
2695 // shifting `left` upfront.
2696 unsigned const shift = base::bits::CountTrailingZeros(right);
2697 left = __ ShiftRightLogical(left, shift, rep);
2698 right >>= shift;
2699 // Compute the magic number for `right`, using a generic lambda to treat
2700 // 32- and 64-bit uniformly.
2701 auto LowerToMul = [this, left, shift](auto right, WordRepresentation rep) {
2702 base::MagicNumbersForDivision<decltype(right)> const mag =
2704 V<Word> quotient = __ UintMulOverflownBits(
2705 left, __ WordConstant(mag.multiplier, rep), rep);
2706 if (mag.add) {
2707 DCHECK_GE(mag.shift, 1);
2708 // quotient = (((left - quotient) >> 1) + quotient) >> (mag.shift -
2709 // 1)
2710 quotient = __ ShiftRightLogical(
2711 __ WordAdd(
2712 __ ShiftRightLogical(__ WordSub(left, quotient, rep), 1, rep),
2713 quotient, rep),
2714 mag.shift - 1, rep);
2715 } else {
2716 quotient = __ ShiftRightLogical(quotient, mag.shift, rep);
2717 }
2718 return quotient;
2719 };
2720 if (rep == WordRepresentation::Word32()) {
2721 return LowerToMul(static_cast<uint32_t>(right),
2723 } else {
2725 return LowerToMul(static_cast<uint64_t>(right),
2727 }
2728 }
2729
2731 bool* negated) {
2732 // TODO(dmercadier): consider generalizing this function both Word32 and
2733 // Word64.
2734 bool reduced = false;
2735 while (true) {
2736 // x == 0 => x with flipped branches
2737 if (V<Word32> left, right;
2738 matcher_.MatchEqual(condition, &left, &right) &&
2739 matcher_.MatchZero(right)) {
2740 reduced = true;
2741 condition = left;
2742 *negated = !*negated;
2743 continue;
2744 }
2745 // x - y => x == y with flipped branches
2746 if (V<Word32> left, right; matcher_.MatchWordSub(
2747 condition, &left, &right, WordRepresentation::Word32())) {
2748 reduced = true;
2749 condition = __ Word32Equal(left, right);
2750 *negated = !*negated;
2751 continue;
2752 }
2753 // x & (1 << k) == (1 << k) => x & (1 << k)
2754 if (V<Word32> left, right;
2755 matcher_.MatchEqual(condition, &left, &right)) {
2756 V<Word32> x, mask;
2757 uint32_t k1, k2;
2758 if (matcher_.MatchBitwiseAnd(left, &x, &mask,
2760 matcher_.MatchIntegralWord32Constant(mask, &k1) &&
2761 matcher_.MatchIntegralWord32Constant(right, &k2) && k1 == k2 &&
2763 reduced = true;
2764 condition = left;
2765 continue;
2766 }
2767 }
2768 // (x >> k1) & k2 => x & (k2 << k1)
2769 {
2770 V<Word32> shift, k2_index, x;
2771 int k1_int;
2772 uint32_t k1, k2;
2773 if (matcher_.MatchBitwiseAnd(condition, &shift, &k2_index,
2775 matcher_.MatchConstantRightShift(
2776 shift, &x, WordRepresentation::Word32(), &k1_int) &&
2777 matcher_.MatchIntegralWord32Constant(k2_index, &k2)) {
2778 k1 = static_cast<uint32_t>(k1_int);
2779 if (k1 <= base::bits::CountLeadingZeros(k2) &&
2780 (static_cast<uint64_t>(k2) << k1 <=
2781 std::numeric_limits<uint32_t>::max())) {
2782 return __ Word32BitwiseAnd(x, k2 << k1);
2783 }
2784 }
2785 }
2786 // Select(x, true, false) => x
2787 if (const SelectOp* select = matcher_.TryCast<SelectOp>(condition)) {
2788 auto left_val = MatchBoolConstant(select->vtrue());
2789 auto right_val = MatchBoolConstant(select->vfalse());
2790 if (left_val && right_val) {
2791 if (*left_val == *right_val) {
2792 // Select(x, v, v) => v
2793 return __ Word32Constant(*left_val);
2794 }
2795 if (*left_val == false) {
2796 // Select(x, false, true) => !x
2797 *negated = !*negated;
2798 }
2799 condition = select->cond();
2800 reduced = true;
2801 continue;
2802 }
2803 }
2804 break;
2805 }
2806 return reduced ? std::optional<V<Word32>>(condition) : std::nullopt;
2807 }
2808
2809 std::optional<bool> MatchBoolConstant(V<Any> condition) {
2810 if (uint32_t value;
2811 matcher_.MatchIntegralWord32Constant(condition, &value)) {
2812 return value != 0;
2813 }
2814 return std::nullopt;
2815 }
2816
2817 // Returns true if loading the map of an object with map {map} can be constant
2818 // folded and done at compile time or not. For instance, doing this for
2819 // strings is not safe, since the map of a string could change during a GC,
2820 // but doing this for a HeapNumber is always safe.
2821 bool MapLoadCanBeConstantFolded(OptionalMapRef map) {
2822 if (!map.has_value()) return false;
2823
2824 if (map->IsJSObjectMap() && map->is_stable()) {
2826 // For JS objects, this is only safe is the map is stable.
2827 return true;
2828 }
2829
2830 if (map->instance_type() ==
2831 any_of(BIG_INT_BASE_TYPE, HEAP_NUMBER_TYPE, ODDBALL_TYPE)) {
2832 return true;
2833 }
2834
2835 return false;
2836 }
2837
2838 static constexpr bool IsNegativePowerOfTwo(int64_t x) {
2839 if (x >= 0) return false;
2840 if (x == std::numeric_limits<int64_t>::min()) return true;
2841 int64_t x_abs = -x; // This can't overflow after the check above.
2842 DCHECK_GE(x_abs, 1); // The subtraction below can't underflow.
2843 return (x_abs & (x_abs - 1)) == 0;
2844 }
2845
2846 static constexpr uint16_t CountLeadingSignBits(int64_t c,
2847 WordRepresentation rep) {
2848 return base::bits::CountLeadingSignBits(c) - (64 - rep.bit_width());
2849 }
2850
2852 const OperationMatcher& matcher_ = __ matcher();
2853#if V8_ENABLE_WEBASSEMBLY
2854 const bool signalling_nan_possible = __ data() -> is_wasm();
2855#else
2856 static constexpr bool signalling_nan_possible = false;
2857#endif // V8_ENABLE_WEBASSEMBLY
2858};
2859
2861
2862} // namespace v8::internal::compiler::turboshaft
2863
2864#endif // V8_COMPILER_TURBOSHAFT_MACHINE_OPTIMIZATION_REDUCER_H_
#define REDUCE(operation)
#define ELSE
#define UNLIKELY(...)
#define IF(...)
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
Builtins::Kind kind
Definition builtins.cc:40
#define SIN_IMPL(X)
Definition builtins.h:470
#define COS_IMPL(X)
Definition builtins.h:471
int default_case
V8_INLINE Address address() const
Definition handles.h:73
static constexpr int kMapOffset
static bool constexpr IsValid(T value)
Definition smi.h:67
CompilationDependencies * dependencies() const
static constexpr FloatRepresentation Float32()
static constexpr FloatRepresentation Float64()
V< None > REDUCE Branch(OpIndex condition, Block *if_true, Block *if_false, BranchHint hint)
V< None > REDUCE DeoptimizeIf(V< Word32 > condition, V< FrameState > frame_state, bool negated, const DeoptimizeParameters *parameters)
bool TryAdjustIndex(int32_t offset, OpIndex *index, const Operation &maybe_constant, uint8_t element_scale)
V< Word > ReduceWithTruncation(V< Word > value, uint64_t truncation_mask, WordRepresentation rep)
bool TryAdjustOffset(int32_t *offset, const Operation &maybe_constant, uint8_t element_scale, bool tagged_base)
V< Word32 > REDUCE Comparison(V< Any > left, V< Any > right, ComparisonOp::Kind kind, RegisterRepresentation rep)
std::optional< V< Word > > TryReduceToRor(V< Word > left, V< Word > right, WordBinopOp::Kind kind, WordRepresentation rep)
V< Word > REDUCE WordUnary(V< Word > input, WordUnaryOp::Kind kind, WordRepresentation rep)
std::optional< V< Word32 > > ReduceBranchCondition(V< Word32 > condition, bool *negated)
V< Tuple< Word, Word32 > > REDUCE OverflowCheckedBinop(V< Word > left, V< Word > right, OverflowCheckedBinopOp::Kind kind, WordRepresentation rep)
V< Untagged > REDUCE Change(V< Untagged > input, ChangeOp::Kind kind, ChangeOp::Assumption assumption, RegisterRepresentation from, RegisterRepresentation to)
OpIndex ReduceMemoryIndex(OpIndex index, int32_t *offset, uint8_t *element_scale, bool tagged_base)
static constexpr uint16_t CountLeadingSignBits(int64_t c, WordRepresentation rep)
V< Word > REDUCE WordBinop(V< Word > left, V< Word > right, WordBinopOp::Kind kind, WordRepresentation rep)
bool TryAdjustElementScale(uint8_t *element_scale, OpIndex maybe_constant)
V< Any > REDUCE TaggedBitcast(V< Any > input, RegisterRepresentation from, RegisterRepresentation to, TaggedBitcastOp::Kind kind)
bool IsWord32ConvertedToWord64(V< Any > value, std::optional< bool > *sign_extended=nullptr)
V< Word > ReduceUnsignedDiv(V< Word > left, uint64_t right, WordRepresentation rep)
V< None > REDUCE Store(OpIndex base_idx, OptionalOpIndex index, OpIndex value, StoreOp::Kind kind, MemoryRepresentation stored_rep, WriteBarrierKind write_barrier, int32_t offset, uint8_t element_scale, bool maybe_initializing_or_transitioning, IndirectPointerTag maybe_indirect_pointer_tag)
V< Word > ReduceSignedDiv(V< Word > left, int64_t right, WordRepresentation rep)
V< None > REDUCE Switch(V< Word32 > input, base::Vector< SwitchOp::Case > cases, Block *default_case, BranchHint default_hint)
V< Word32 > ReduceCompareEqual(V< Any > left, V< Any > right, RegisterRepresentation rep)
V< Float > REDUCE FloatBinop(V< Float > lhs, V< Float > rhs, FloatBinopOp::Kind kind, FloatRepresentation rep)
OpIndex REDUCE Load(OpIndex base_idx, OptionalOpIndex index, LoadOp::Kind kind, MemoryRepresentation loaded_rep, RegisterRepresentation result_rep, int32_t offset, uint8_t element_scale)
V< Float64 > REDUCE BitcastWord32PairToFloat64(V< Word32 > hi_word32, V< Word32 > lo_word32)
V< None > REDUCE StaticAssert(V< Word32 > condition, const char *source)
V< Float > REDUCE FloatUnary(V< Float > input, FloatUnaryOp::Kind kind, FloatRepresentation rep)
static constexpr MemoryRepresentation Uint32()
static constexpr MemoryRepresentation Int32()
static constexpr MemoryRepresentation Uint16()
static constexpr MemoryRepresentation Uint8()
static constexpr MemoryRepresentation Int16()
static constexpr OpIndex Invalid()
Definition index.h:88
bool MatchConstantRightShift(V< Any > matched, V< T > *input, WordRepresentation rep, int *amount) const
bool MatchBitwiseAndWithConstant(V< Any > matched, V< T > *value, uint64_t *constant, WordRepresentation rep) const
bool MatchIntegralWord32Constant(V< Any > matched, uint32_t *constant) const
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation WordPtr()
static constexpr RegisterRepresentation Float32()
static constexpr RegisterRepresentation Word64()
static constexpr RegisterRepresentation Tagged()
static V< T > Cast(V< U > index)
Definition index.h:632
static constexpr WordRepresentation WordPtr()
static bool TryMatch16x8UpperToLowerReduce(const uint8_t *shuffle1, const uint8_t *shuffle2, const uint8_t *shuffle3)
static bool TryMatch32x4PairwiseReduce(const uint8_t *shuffle1, const uint8_t *shuffle2)
static bool TryMatch64x2Shuffle(const uint8_t *shuffle, uint8_t *shuffle64x2)
static bool TryMatch32x4UpperToLowerReduce(const uint8_t *shuffle1, const uint8_t *shuffle2)
static bool TryMatch64x2Reduce(const uint8_t *shuffle64x2)
static bool TryMatch8x16UpperToLowerReduce(const uint8_t *shuffle1, const uint8_t *shuffle2, const uint8_t *shuffle3, const uint8_t *shuffle4)
#define V8_INFINITY
Definition globals.h:23
constexpr double kMaxDoubleRepresentableInt64
Definition globals.h:176
#define TURBOSHAFT_REDUCER_BOILERPLATE(Name)
Definition assembler.h:823
#define LABEL_BLOCK(label)
Definition assembler.h:910
TurboshaftPipelineKind pipeline_kind
int32_t offset
std::optional< TNode< JSArray > > a
ZoneVector< RpoNumber > & result
Point from
int x
Point to
uint32_t const mask
InstructionOperand source
int m
Definition mul-fft.cc:294
constexpr unsigned CountLeadingZeros(T value)
Definition bits.h:100
bool SignedAddOverflow64(int64_t lhs, int64_t rhs, int64_t *val)
Definition bits.h:338
constexpr uint64_t RotateRight64(uint64_t value, uint64_t shift)
Definition bits.h:284
int32_t SignedMulHigh32(int32_t lhs, int32_t rhs)
Definition bits.cc:15
constexpr uint32_t RotateLeft32(uint32_t value, uint32_t shift)
Definition bits.h:279
constexpr unsigned CountTrailingZeros64(uint64_t value)
Definition bits.h:164
int32_t SignedDiv32(int32_t lhs, int32_t rhs)
Definition bits.cc:69
int64_t SignedDiv64(int64_t lhs, int64_t rhs)
Definition bits.cc:75
bool SignedSubOverflow64(int64_t lhs, int64_t rhs, int64_t *val)
Definition bits.h:352
constexpr unsigned CountTrailingZeros(T value)
Definition bits.h:144
constexpr unsigned CountPopulation(T value)
Definition bits.h:26
uint32_t UnsignedMulHigh32(uint32_t lhs, uint32_t rhs)
Definition bits.cc:56
int64_t SignedMulHigh64(int64_t u, int64_t v)
Definition bits.cc:24
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
uint32_t UnsignedDiv32(uint32_t lhs, uint32_t rhs)
Definition bits.h:444
T ReverseBytes(T value)
Definition bits.h:74
uint32_t UnsignedMod32(uint32_t lhs, uint32_t rhs)
Definition bits.h:456
constexpr uint64_t RotateLeft64(uint64_t value, uint64_t shift)
Definition bits.h:289
constexpr uint32_t RotateRight32(uint32_t value, uint32_t shift)
Definition bits.h:274
constexpr std::make_unsigned_t< T > Unsigned(T value)
Definition bits.h:86
constexpr unsigned CountLeadingSignBits(T value)
Definition bits.h:132
int64_t SignedMod64(int64_t lhs, int64_t rhs)
Definition bits.cc:86
uint64_t UnsignedDiv64(uint64_t lhs, uint64_t rhs)
Definition bits.h:450
bool SignedAddOverflow32(int32_t lhs, int32_t rhs, int32_t *val)
Definition bits.h:296
uint64_t UnsignedMod64(uint64_t lhs, uint64_t rhs)
Definition bits.h:462
int32_t SignedMod32(int32_t lhs, int32_t rhs)
Definition bits.cc:81
bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t *val)
Definition bits.h:310
bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t *val)
Definition bits.h:323
uint64_t UnsignedMulHigh64(uint64_t u, uint64_t v)
Definition bits.cc:41
bool SignedMulOverflow64(int64_t lhs, int64_t rhs, int64_t *val)
Definition bits.h:365
constexpr int WhichPowerOfTwo(T value)
Definition bits.h:195
double atanh(double x)
Definition ieee754.cc:1557
double tan(double x)
Definition ieee754.cc:2510
double log(double x)
Definition ieee754.cc:1638
double log2(double x)
Definition ieee754.cc:1973
double sinh(double x)
Definition ieee754.cc:2930
double atan2(double y, double x)
Definition ieee754.cc:1228
double cosh(double x)
Definition ieee754.cc:2554
double acosh(double x)
Definition ieee754.cc:936
double exp(double x)
Definition ieee754.cc:1447
double log10(double x)
Definition ieee754.cc:2079
double acos(double x)
Definition ieee754.cc:862
double log1p(double x)
Definition ieee754.cc:1783
double expm1(double x)
Definition ieee754.cc:2213
double tanh(double x)
Definition ieee754.cc:2986
double cbrt(double x)
Definition ieee754.cc:2333
double asinh(double x)
Definition ieee754.cc:1067
double asin(double x)
Definition ieee754.cc:991
double atan(double x)
Definition ieee754.cc:1116
MagicNumbersForDivision< T > UnsignedDivisionByConstant(T d, unsigned leading_zeros)
V8_INLINE Dest bit_cast(Source const &source)
Definition macros.h:95
MagicNumbersForDivision< T > SignedDivisionByConstant(T d)
bool all_of(const C &container, const P &predicate)
constexpr uint64_t multi(const Ts &... values)
Definition utils.h:177
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
any_of(const Args &...) -> any_of< Args... >
all_of(const Args &...) -> all_of< Args... >
V8_EXPORT_PRIVATE bool ShouldSkipOptimizationStep()
Definition utils.h:84
Op * CreateOperation(base::SmallVector< OperationStorageSlot, 32 > &storage, Args... args)
BranchHint NegateBranchHint(BranchHint hint)
OptionalRef< typename ref_traits< T >::ref_type > TryMakeRef(JSHeapBroker *broker, ObjectData *data)
static const Operator * IntPtrConstant(CommonOperatorBuilder *common, intptr_t value)
double pow(double x, double y)
Definition ieee754.cc:14
int32_t DoubleToInt32_NoInline(double x)
std::make_unsigned< T >::type Abs(T a)
Definition utils.h:93
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
double Modulo(double x, double y)
Definition utils.h:105
T JSMax(T x, T y)
Definition utils.h:75
static const uint32_t K[64]
Definition sha-256.cc:36
constexpr int L
float DoubleToFloat32_NoInline(double x)
float DoubleToFloat32(double x)
const intptr_t kSmiTagMask
Definition v8-internal.h:88
return value
Definition map-inl.h:893
bool is_signed(Condition cond)
T JSMin(T x, T y)
Definition utils.h:84
#define ror(value, bits)
Definition sha-256.cc:30
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
static V8_EXPORT_PRIVATE bool IsSupported(Kind kind, FloatRepresentation rep)
static constexpr bool OffsetIsValid(int32_t offset, bool tagged_base)
const underlying_operation_t< Op > * TryCast() const
Definition operations.h:990
underlying_operation_t< Op > & Cast()
Definition operations.h:980
std::optional< BitfieldCheck > TryCombine(const BitfieldCheck &other)
static std::optional< BitfieldCheck > TryDetectShiftAndMaskOneBit(const OperationMatcher &matcher, V< Word > index)
static std::optional< BitfieldCheck > Detect(const OperationMatcher &matcher, const Graph &graph, V< Word > index)
BitfieldCheck(V< Word > source, uint32_t mask, uint32_t masked_value, bool truncate_from_64_bit)