v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
simplified-lowering.cc
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <limits>
8#include <optional>
9
11#include "src/base/logging.h"
35#include "src/flags/flags.h"
37#include "src/objects/objects.h"
38
39#if V8_ENABLE_WEBASSEMBLY
40#include "src/wasm/value-type.h"
41#endif // V8_ENABLE_WEBASSEMBLY
42
43namespace v8 {
44namespace internal {
45namespace compiler {
46
47// Macro for outputting trace information from representation inference.
48#define TRACE(...) \
49 do { \
50 if (v8_flags.trace_representation) PrintF(__VA_ARGS__); \
51 } while (false)
52
53const char* kSimplifiedLoweringReducerName = "SimplifiedLowering";
54
55// Representation selection and lowering of {Simplified} operators to machine
56// operators are interwined. We use a fixpoint calculation to compute both the
57// output representation and the best possible lowering for {Simplified} nodes.
58// Representation change insertion ensures that all values are in the correct
59// machine representation after this phase, as dictated by the machine
60// operators themselves.
61enum Phase {
62 // 1.) PROPAGATE: Traverse the graph from the end, pushing usage information
63 // backwards from uses to definitions, around cycles in phis, according
64 // to local rules for each operator.
65 // During this phase, the usage information for a node determines the best
66 // possible lowering for each operator so far, and that in turn determines
67 // the output representation.
68 // Therefore, to be correct, this phase must iterate to a fixpoint before
69 // the next phase can begin.
71
72 // 2.) RETYPE: Propagate types from type feedback forwards.
74
75 // 3.) LOWER: perform lowering for all {Simplified} nodes by replacing some
76 // operators for some nodes, expanding some nodes to multiple nodes, or
77 // removing some (redundant) nodes.
78 // During this phase, use the {RepresentationChanger} to insert
79 // representation changes between uses that demand a particular
80 // representation and nodes that produce a different representation.
81 LOWER
82};
83
84namespace {
85
86MachineRepresentation MachineRepresentationFromArrayType(
87 ExternalArrayType array_type) {
88 switch (array_type) {
108 }
109 UNREACHABLE();
110}
111
112UseInfo CheckedUseInfoAsWord32FromHint(
114 const FeedbackSource& feedback = FeedbackSource()) {
115 switch (hint) {
118 return UseInfo::CheckedSignedSmallAsWord32(identify_zeros, feedback);
121 DCHECK_EQ(identify_zeros, kIdentifyZeros);
122 return UseInfo::CheckedNumberAsWord32(feedback);
124 // Not used currently.
125 UNREACHABLE();
127 DCHECK_EQ(identify_zeros, kIdentifyZeros);
129 }
130 UNREACHABLE();
131}
132
133UseInfo CheckedUseInfoAsFloat64FromHint(
134 NumberOperationHint hint, const FeedbackSource& feedback,
135 IdentifyZeros identify_zeros = kDistinguishZeros) {
136 switch (hint) {
139 // Not used currently.
140 UNREACHABLE();
143 return UseInfo::CheckedNumberAsFloat64(identify_zeros, feedback);
145 return UseInfo::CheckedNumberOrBooleanAsFloat64(identify_zeros, feedback);
147 return UseInfo::CheckedNumberOrOddballAsFloat64(identify_zeros, feedback);
148 }
149 UNREACHABLE();
150}
151
152UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
153 switch (rep) {
155 return UseInfo::TaggedSigned();
160 return UseInfo::AnyTagged();
164 return UseInfo::Float32();
174 return UseInfo::Bool();
183 UNREACHABLE();
184 }
185}
186
187UseInfo UseInfoForBasePointer(const FieldAccess& access) {
188 return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::Word();
189}
190
191UseInfo UseInfoForBasePointer(const ElementAccess& access) {
192 return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::Word();
193}
194
195void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) {
196 for (Edge edge : node->use_edges()) {
198 edge.UpdateTo(control);
199 } else if (NodeProperties::IsEffectEdge(edge)) {
200 edge.UpdateTo(effect);
201 } else {
204 }
205 }
206}
207
208bool CanOverflowSigned32(const Operator* op, Type left, Type right,
209 TypeCache const* type_cache, Zone* type_zone) {
210 // We assume the inputs are checked Signed32 (or known statically to be
211 // Signed32). Technically, the inputs could also be minus zero, which we treat
212 // as 0 for the purpose of this function.
213 if (left.Maybe(Type::MinusZero())) {
214 left = Type::Union(left, type_cache->kSingletonZero, type_zone);
215 }
216 if (right.Maybe(Type::MinusZero())) {
217 right = Type::Union(right, type_cache->kSingletonZero, type_zone);
218 }
219 left = Type::Intersect(left, Type::Signed32(), type_zone);
220 right = Type::Intersect(right, Type::Signed32(), type_zone);
221 if (left.IsNone() || right.IsNone()) return false;
222 switch (op->opcode()) {
223 case IrOpcode::kSpeculativeSmallIntegerAdd:
224 return (left.Max() + right.Max() > kMaxInt) ||
225 (left.Min() + right.Min() < kMinInt);
226
227 case IrOpcode::kSpeculativeSmallIntegerSubtract:
228 return (left.Max() - right.Min() > kMaxInt) ||
229 (left.Min() - right.Max() < kMinInt);
230
231 default:
232 UNREACHABLE();
233 }
234}
235
236bool IsSomePositiveOrderedNumber(Type type) {
237 return type.Is(Type::OrderedNumber()) && (type.IsNone() || type.Min() > 0);
238}
239
240inline bool IsLargeBigInt(Type type) {
241 return type.Is(Type::BigInt()) && !type.Is(Type::SignedBigInt64()) &&
242 !type.Is(Type::UnsignedBigInt64());
243}
244
245class JSONGraphWriterWithVerifierTypes : public JSONGraphWriter {
246 public:
247 JSONGraphWriterWithVerifierTypes(std::ostream& os, const TFGraph* graph,
248 const SourcePositionTable* positions,
249 const NodeOriginTable* origins,
250 SimplifiedLoweringVerifier* verifier)
251 : JSONGraphWriter(os, graph, positions, origins), verifier_(verifier) {}
252
253 protected:
254 std::optional<Type> GetType(Node* node) override {
255 return verifier_->GetType(node);
256 }
257
258 private:
259 SimplifiedLoweringVerifier* verifier_;
260};
261
262bool IsLoadFloat16ArrayElement(Node* node) {
263 Operator::Opcode opcode = node->op()->opcode();
264 return (opcode == IrOpcode::kLoadTypedElement ||
265 opcode == IrOpcode::kLoadDataViewElement) &&
267}
268
269} // namespace
270
271#ifdef DEBUG
272// Helpers for monotonicity checking.
273class InputUseInfos {
274 public:
275 explicit InputUseInfos(Zone* zone) : input_use_infos_(zone) {}
276
277 void SetAndCheckInput(Node* node, int index, UseInfo use_info) {
278 if (input_use_infos_.empty()) {
279 input_use_infos_.resize(node->InputCount(), UseInfo::None());
280 }
281 // Check that the new use informatin is a super-type of the old
282 // one.
283 DCHECK(IsUseLessGeneral(input_use_infos_[index], use_info));
284 input_use_infos_[index] = use_info;
285 }
286
287 private:
288 ZoneVector<UseInfo> input_use_infos_;
289
290 static bool IsUseLessGeneral(UseInfo use1, UseInfo use2) {
291 return use1.truncation().IsLessGeneralThan(use2.truncation());
292 }
293};
294
295#endif // DEBUG
296
298 // The purpose of this nested class is to hide method
299 // v8::internal::compiler::NodeProperties::ChangeOp which should not be
300 // directly used by code in RepresentationSelector and SimplifiedLowering.
301 // RepresentationSelector code should call RepresentationSelector::ChangeOp in
302 // place of NodeProperties::ChangeOp, in order to notify the changes to a
303 // registered ObserveNodeManager and support the %ObserveNode intrinsic.
305 static void ChangeOp(Node* node, const Operator* new_op) { UNREACHABLE(); }
306 };
307
308 public:
309 // Information for each node tracked during the fixpoint.
310 class NodeInfo final {
311 public:
312 // Adds new use to the node. Returns true if something has changed
313 // and the node has to be requeued.
314 bool AddUse(UseInfo info) {
315 Truncation old_truncation = truncation_;
316 truncation_ = Truncation::Generalize(truncation_, info.truncation());
317 return truncation_ != old_truncation;
318 }
319
324 bool visited() const { return state_ == kVisited; }
325 bool queued() const { return state_ == kQueued; }
326 bool pushed() const { return state_ == kPushed; }
327 bool unvisited() const { return state_ == kUnvisited; }
330
332
333 // Helpers for feedback typing.
336 void set_weakened() { weakened_ = true; }
337 bool weakened() const { return weakened_; }
340
341 private:
342 // Fields are ordered to avoid mixing byte and word size fields to minimize
343 // padding.
344 enum State : uint8_t { kUnvisited, kPushed, kVisited, kQueued };
347 MachineRepresentation::kNone; // Output representation.
348 Truncation truncation_ = Truncation::None(); // Information about uses.
349 bool weakened_ = false;
350
353 };
354
356 RepresentationChanger* changer,
358 NodeOriginTable* node_origins,
359 TickCounter* tick_counter, Linkage* linkage,
360 ObserveNodeManager* observe_node_manager,
362 : jsgraph_(jsgraph),
364 zone_(zone),
366 count_(jsgraph->graph()->NodeCount()),
367 info_(count_, zone),
368#ifdef DEBUG
369 node_input_use_infos_(count_, InputUseInfos(zone), zone),
370#endif
372 changer_(changer),
376 node_origins_(node_origins),
379 tick_counter_(tick_counter),
381 observe_node_manager_(observe_node_manager),
382 verifier_(verifier) {
384 Type::Constant(broker, broker->true_value(), graph_zone());
386 Type::Constant(broker, broker->false_value(), graph_zone());
387 }
388
389 bool verification_enabled() const { return verifier_ != nullptr; }
390
392 // Clean up for the next phase.
393 for (NodeInfo& info : info_) {
394 info.reset_state();
395 }
396 }
397
398 Type TypeOf(Node* node) {
399 Type type = GetInfo(node)->feedback_type();
400 return type.IsInvalid() ? NodeProperties::GetType(node) : type;
401 }
402
404 Type type = GetInfo(node)->feedback_type();
405 return type.IsInvalid() ? Type::None() : type;
406 }
407
409 int arity = node->op()->ValueInputCount();
410 Type type = FeedbackTypeOf(node->InputAt(0));
411 for (int i = 1; i < arity; ++i) {
412 type = op_typer_.Merge(type, FeedbackTypeOf(node->InputAt(i)));
413 }
414 return type;
415 }
416
418 return op_typer_.Merge(FeedbackTypeOf(node->InputAt(1)),
419 FeedbackTypeOf(node->InputAt(2)));
420 }
421
423 if (node->op()->ValueOutputCount() == 0) return false;
424 if ((IrOpcode::IsMachineOpcode(node->opcode()) ||
425 IrOpcode::IsMachineConstantOpcode(node->opcode())) &&
426 node->opcode() != IrOpcode::kLoadFramePointer) {
427 DCHECK(NodeProperties::GetType(node).Is(Type::Machine()));
428 }
429
430 // For any non-phi node just wait until we get all inputs typed. We only
431 // allow untyped inputs for phi nodes because phis are the only places
432 // where cycles need to be broken.
433 if (node->opcode() != IrOpcode::kPhi) {
434 for (int i = 0; i < node->op()->ValueInputCount(); i++) {
435 if (GetInfo(node->InputAt(i))->feedback_type().IsInvalid()) {
436 return false;
437 }
438 }
439 }
440
441 NodeInfo* info = GetInfo(node);
442 Type type = info->feedback_type();
443 Type new_type = NodeProperties::GetType(node);
444
445 // We preload these values here to avoid increasing the binary size too
446 // much, which happens if we inline the calls into the macros below.
447 Type input0_type;
448 if (node->InputCount() > 0) input0_type = FeedbackTypeOf(node->InputAt(0));
449 Type input1_type;
450 if (node->InputCount() > 1) input1_type = FeedbackTypeOf(node->InputAt(1));
451
452 switch (node->opcode()) {
453#define DECLARE_CASE(Name) \
454 case IrOpcode::k##Name: { \
455 new_type = op_typer_.Name(input0_type, input1_type); \
456 break; \
457 }
459 DECLARE_CASE(SameValue)
460#undef DECLARE_CASE
461
462#define DECLARE_CASE(Name) \
463 case IrOpcode::k##Name: { \
464 new_type = Type::Intersect(op_typer_.Name(input0_type, input1_type), \
465 info->restriction_type(), graph_zone()); \
466 break; \
467 }
470#undef DECLARE_CASE
471
472#define DECLARE_CASE(Name) \
473 case IrOpcode::k##Name: { \
474 new_type = op_typer_.Name(input0_type); \
475 break; \
476 }
478#undef DECLARE_CASE
479
480#define DECLARE_CASE(Name) \
481 case IrOpcode::k##Name: { \
482 new_type = Type::Intersect(op_typer_.Name(input0_type), \
483 info->restriction_type(), graph_zone()); \
484 break; \
485 }
487#undef DECLARE_CASE
488
489 case IrOpcode::kConvertReceiver:
490 new_type = op_typer_.ConvertReceiver(input0_type);
491 break;
492
493 case IrOpcode::kPlainPrimitiveToNumber:
494 new_type = op_typer_.ToNumber(input0_type);
495 break;
496
497 case IrOpcode::kCheckBounds:
498 new_type =
499 Type::Intersect(op_typer_.CheckBounds(input0_type, input1_type),
500 info->restriction_type(), graph_zone());
501 break;
502
503 case IrOpcode::kCheckFloat64Hole:
504 new_type = Type::Intersect(op_typer_.CheckFloat64Hole(input0_type),
505 info->restriction_type(), graph_zone());
506 break;
507
508 case IrOpcode::kCheckNumber:
509 new_type = Type::Intersect(op_typer_.CheckNumber(input0_type),
510 info->restriction_type(), graph_zone());
511 break;
512
513 case IrOpcode::kCheckNumberFitsInt32:
514 new_type = Type::Intersect(op_typer_.CheckNumberFitsInt32(input0_type),
515 info->restriction_type(), graph_zone());
516 break;
517
518 case IrOpcode::kPhi: {
519 new_type = TypePhi(node);
520 if (!type.IsInvalid()) {
521 new_type = Weaken(node, type, new_type);
522 }
523 break;
524 }
525
526 case IrOpcode::kConvertTaggedHoleToUndefined:
528 FeedbackTypeOf(node->InputAt(0)));
529 break;
530
531 case IrOpcode::kTypeGuard: {
532 new_type = op_typer_.TypeTypeGuard(node->op(),
533 FeedbackTypeOf(node->InputAt(0)));
534 break;
535 }
536
537 case IrOpcode::kSelect: {
538 const auto& p = SelectParametersOf(node->op());
539 if (p.semantics() == BranchSemantics::kMachine) {
540 if (type.IsInvalid()) {
542 return true;
543 }
544 return false;
545 }
546 new_type = TypeSelect(node);
547 break;
548 }
549
550 default:
551 // Shortcut for operations that we do not handle.
552 if (type.IsInvalid()) {
554 return true;
555 }
556 return false;
557 }
558 // We need to guarantee that the feedback type is a subtype of the upper
559 // bound. Naively that should hold, but weakening can actually produce
560 // a bigger type if we are unlucky with ordering of phi typing. To be
561 // really sure, just intersect the upper bound with the feedback type.
562 new_type = Type::Intersect(GetUpperBound(node), new_type, graph_zone());
563
564 if (!type.IsInvalid() && new_type.Is(type)) return false;
565 GetInfo(node)->set_feedback_type(new_type);
566 if (v8_flags.trace_representation) {
568 }
569 return true;
570 }
571
573 StdoutStream os;
574 os << "#" << n->id() << ":" << *n->op() << "(";
575 int j = 0;
576 for (Node* const i : n->inputs()) {
577 if (j++ > 0) os << ", ";
578 os << "#" << i->id() << ":" << i->op()->mnemonic();
579 }
580 os << ")";
582 Type static_type = NodeProperties::GetType(n);
583 os << " [Static type: " << static_type;
584 Type feedback_type = GetInfo(n)->feedback_type();
585 if (!feedback_type.IsInvalid() && feedback_type != static_type) {
586 os << ", Feedback type: " << feedback_type;
587 }
588 os << "]";
589 }
590 os << std::endl;
591 }
592
593 Type Weaken(Node* node, Type previous_type, Type current_type) {
594 // If the types have nothing to do with integers, return the types.
595 Type const integer = type_cache_->kInteger;
596 if (!previous_type.Maybe(integer)) {
597 return current_type;
598 }
599 DCHECK(current_type.Maybe(integer));
600
601 Type current_integer = Type::Intersect(current_type, integer, graph_zone());
602 DCHECK(!current_integer.IsNone());
603 Type previous_integer =
604 Type::Intersect(previous_type, integer, graph_zone());
605 DCHECK(!previous_integer.IsNone());
606
607 // Once we start weakening a node, we should always weaken.
608 if (!GetInfo(node)->weakened()) {
609 // Only weaken if there is range involved; we should converge quickly
610 // for all other types (the exception is a union of many constants,
611 // but we currently do not increase the number of constants in unions).
612 Type previous = previous_integer.GetRange();
613 Type current = current_integer.GetRange();
614 if (current.IsInvalid() || previous.IsInvalid()) {
615 return current_type;
616 }
617 // Range is involved => we are weakening.
618 GetInfo(node)->set_weakened();
619 }
620
621 return Type::Union(current_type,
622 op_typer_.WeakenRange(previous_integer, current_integer),
623 graph_zone());
624 }
625
626 // Generates a pre-order traversal of the nodes, starting with End.
628 // Reset previous state.
631 count_ = graph()->NodeCount();
632 info_.resize(count_);
633
635
636 stack.push({graph()->end(), 0});
637 GetInfo(graph()->end())->set_pushed();
638 while (!stack.empty()) {
639 NodeState& current = stack.top();
640 Node* node = current.node;
641 // If there is an unvisited input, push it and continue with that node.
642 bool pushed_unvisited = false;
643 while (current.input_index < node->InputCount()) {
644 Node* input = node->InputAt(current.input_index);
645 NodeInfo* input_info = GetInfo(input);
646 current.input_index++;
647 if (input_info->unvisited()) {
648 input_info->set_pushed();
649 stack.push({input, 0});
650 pushed_unvisited = true;
651 break;
652 } else if (input_info->pushed()) {
653 // Optimization for the Retype phase.
654 // If we had already pushed (and not visited) an input, it means that
655 // the current node will be visited in the Retype phase before one of
656 // its inputs. If this happens, the current node might need to be
657 // revisited.
658 MarkAsPossibleRevisit(node, input);
659 }
660 }
661
662 if (pushed_unvisited) continue;
663
664 stack.pop();
665 NodeInfo* info = GetInfo(node);
666 info->set_visited();
667
668 // Generate the traversal
670 }
671 }
672
674 NodeInfo* info = GetInfo(node);
675 if (info->visited()) {
676 TRACE(" QUEUEING #%d: %s\n", node->id(), node->op()->mnemonic());
677 info->set_queued();
678 revisit_queue_.push(node);
679 }
680 }
681
682 // Tries to update the feedback type of the node, as well as setting its
683 // machine representation (in VisitNode). Returns true iff updating the
684 // feedback type is successful.
685 bool RetypeNode(Node* node) {
686 NodeInfo* info = GetInfo(node);
687 info->set_visited();
688 bool updated = UpdateFeedbackType(node);
689 TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
690 VisitNode<RETYPE>(node, info->truncation(), nullptr);
691 TRACE(" ==> output %s\n", MachineReprToString(info->representation()));
692 return updated;
693 }
694
695 // Visits the node and marks it as visited. Inside of VisitNode, we might
696 // change the truncation of one of our inputs (see EnqueueInput<PROPAGATE> for
697 // this). If we change the truncation of an already visited node, we will add
698 // it to the revisit queue.
700 NodeInfo* info = GetInfo(node);
701 info->set_visited();
702 TRACE(" visit #%d: %s (trunc: %s)\n", node->id(), node->op()->mnemonic(),
703 info->truncation().description());
704 VisitNode<PROPAGATE>(node, info->truncation(), nullptr);
705 }
706
707 // Backward propagation of truncations to a fixpoint.
709 TRACE("--{Propagate phase}--\n");
711 DCHECK(revisit_queue_.empty());
712
713 // Process nodes in reverse post order, with End as the root.
714 for (auto it = traversal_nodes_.crbegin(); it != traversal_nodes_.crend();
715 ++it) {
717
718 while (!revisit_queue_.empty()) {
719 Node* node = revisit_queue_.front();
720 revisit_queue_.pop();
722 }
723 }
724 }
725
726 // Forward propagation of types from type feedback to a fixpoint.
728 TRACE("--{Retype phase}--\n");
730 DCHECK(revisit_queue_.empty());
731
732 for (auto it = traversal_nodes_.cbegin(); it != traversal_nodes_.cend();
733 ++it) {
734 Node* node = *it;
735 if (!RetypeNode(node)) continue;
736
737 auto revisit_it = might_need_revisit_.find(node);
738 if (revisit_it == might_need_revisit_.end()) continue;
739
740 for (Node* const user : revisit_it->second) {
742 }
743
744 // Process the revisit queue.
745 while (!revisit_queue_.empty()) {
746 Node* revisit_node = revisit_queue_.front();
747 revisit_queue_.pop();
748 if (!RetypeNode(revisit_node)) continue;
749 // Here we need to check all uses since we can't easily know which
750 // nodes will need to be revisited due to having an input which was
751 // a revisited node.
752 for (Node* const user : revisit_node->uses()) {
754 }
755 }
756 }
757 }
758
759 // Lowering and change insertion phase.
761 TRACE("--{Lower phase}--\n");
762 for (auto it = traversal_nodes_.cbegin(); it != traversal_nodes_.cend();
763 ++it) {
764 Node* node = *it;
765 NodeInfo* info = GetInfo(node);
766 TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
767 // Reuse {VisitNode()} so the representation rules are in one place.
770 NodeOriginTable::Scope origin_scope(node_origins_, "simplified lowering",
771 node);
772 VisitNode<LOWER>(node, info->truncation(), lowering);
773 }
774
775 // Perform the final replacements.
777 i != replacements_.end(); ++i) {
778 Node* node = *i;
779 Node* replacement = *(++i);
780 node->ReplaceUses(replacement);
781 node->Kill();
782 // We also need to replace the node in the rest of the vector.
783 for (NodeVector::iterator j = i + 1; j != replacements_.end(); ++j) {
784 ++j;
785 if (*j == node) *j = replacement;
786 }
787 }
788 }
789
790 void RunVerifyPhase(OptimizedCompilationInfo* compilation_info) {
792
793 TRACE("--{Verify Phase}--\n");
794
795 // Patch pending type overrides.
796 for (const auto& [constant, uses] :
798 Node* typed_constant =
799 InsertTypeOverrideForVerifier(Type::Machine(), constant);
800 for (auto use : uses) {
801 for (int i = 0; i < use->InputCount(); ++i) {
802 if (use->InputAt(i) == constant) {
803 use->ReplaceInput(i, typed_constant);
804 }
805 }
806 }
807 }
808
809 // Generate a new traversal containing all the new nodes created during
810 // lowering.
812
813 // Set node types to the refined types computed during retyping.
814 for (Node* node : traversal_nodes_) {
815 NodeInfo* info = GetInfo(node);
816 if (!info->feedback_type().IsInvalid()) {
817 NodeProperties::SetType(node, info->feedback_type());
818 }
819 }
820
821 // Print graph.
822 if (compilation_info != nullptr && compilation_info->trace_turbo_json()) {
824 AllowHandleDereference allow_deref;
825
826 TurboJsonFile json_of(compilation_info, std::ios_base::app);
827 JSONGraphWriter writer(json_of, graph(), source_positions_,
829 writer.PrintPhase("V8.TFSimplifiedLowering [after lower]");
830 }
831
832 // Verify all nodes.
833 for (Node* node : traversal_nodes_) {
835 }
836
837 // Print graph.
838 if (compilation_info != nullptr && compilation_info->trace_turbo_json()) {
840 AllowHandleDereference allow_deref;
841
842 TurboJsonFile json_of(compilation_info, std::ios_base::app);
843 JSONGraphWriterWithVerifierTypes writer(
845 writer.PrintPhase("V8.TFSimplifiedLowering [after verify]");
846 }
847
848 // Eliminate all introduced hints.
849 for (Node* node : verifier_->inserted_hints()) {
850 Node* input = node->InputAt(0);
851 node->ReplaceUses(input);
852 node->Kill();
853 }
854 }
855
856 void Run(SimplifiedLowering* lowering) {
860 RunLowerPhase(lowering);
861 if (verification_enabled()) {
862 RunVerifyPhase(lowering->info_);
863 }
864 }
865
866 // Just assert for Retype and Lower. Propagate specialized below.
867 template <Phase T>
868 void EnqueueInput(Node* use_node, int index,
869 UseInfo use_info = UseInfo::None()) {
870 static_assert(retype<T>() || lower<T>(),
871 "This version of EnqueueInput has to be called in "
872 "the Retype or Lower phase.");
873 }
874
875 template <Phase T>
876 static constexpr bool propagate() {
877 return T == PROPAGATE;
878 }
879
880 template <Phase T>
881 static constexpr bool retype() {
882 return T == RETYPE;
883 }
884
885 template <Phase T>
886 static constexpr bool lower() {
887 return T == LOWER;
888 }
889
890 template <Phase T>
891 void SetOutput(Node* node, MachineRepresentation representation,
892 Type restriction_type = Type::Any());
893
895
896 bool InputCannotBe(Node* node, Type type) {
897 DCHECK_EQ(1, node->op()->ValueInputCount());
898 return !GetUpperBound(node->InputAt(0)).Maybe(type);
899 }
900
901 bool InputIs(Node* node, Type type) {
902 DCHECK_EQ(1, node->op()->ValueInputCount());
903 return GetUpperBound(node->InputAt(0)).Is(type);
904 }
905
907 return BothInputsAre(node, Type::Signed32());
908 }
909
911 return BothInputsAre(node, Type::Unsigned32());
912 }
913
914 bool BothInputsAre(Node* node, Type type) {
915 DCHECK_EQ(2, node->op()->ValueInputCount());
916 return GetUpperBound(node->InputAt(0)).Is(type) &&
917 GetUpperBound(node->InputAt(1)).Is(type);
918 }
919
921 MachineRepresentation representation = GetInfo(node)->representation();
922 return IsAnyTagged(representation);
923 }
924
925 bool OneInputCannotBe(Node* node, Type type) {
926 DCHECK_EQ(2, node->op()->ValueInputCount());
927 return !GetUpperBound(node->InputAt(0)).Maybe(type) ||
928 !GetUpperBound(node->InputAt(1)).Maybe(type);
929 }
930
931 void ChangeToDeadValue(Node* node, Node* effect, Node* control) {
932 DCHECK(TypeOf(node).IsNone());
933 // If the node is unreachable, insert an Unreachable node and mark the
934 // value dead.
935 // TODO(jarin,turbofan) Find a way to unify/merge this insertion with
936 // InsertUnreachableIfNecessary.
937 Node* unreachable = effect =
938 graph()->NewNode(common()->Unreachable(), effect, control);
939 const Operator* dead_value =
940 common()->DeadValue(GetInfo(node)->representation());
941 node->ReplaceInput(0, unreachable);
942 node->TrimInputCount(dead_value->ValueInputCount());
943 ReplaceEffectControlUses(node, effect, control);
944 ChangeOp(node, dead_value);
945 }
946
947 // This function is a generalization of ChangeToPureOp. It can be used to
948 // replace a node that is part of the effect and control chain by a pure node.
949 void ReplaceWithPureNode(Node* node, Node* pure_node) {
950 DCHECK(pure_node->op()->HasProperty(Operator::kPure));
951 if (node->op()->EffectInputCount() > 0) {
952 DCHECK_LT(0, node->op()->ControlInputCount());
953 Node* control = NodeProperties::GetControlInput(node);
954 Node* effect = NodeProperties::GetEffectInput(node);
955 if (TypeOf(node).IsNone()) {
956 ChangeToDeadValue(node, effect, control);
957 return;
958 }
959 // Rewire the effect and control chains.
960 ReplaceEffectControlUses(node, effect, control);
961 } else {
962 DCHECK_EQ(0, node->op()->ControlInputCount());
963 }
964 DeferReplacement(node, pure_node);
965 }
966
967 void ChangeToPureOp(Node* node, const Operator* new_op) {
969 DCHECK_EQ(new_op->ValueInputCount(), node->op()->ValueInputCount());
970 if (node->op()->EffectInputCount() > 0) {
971 DCHECK_LT(0, node->op()->ControlInputCount());
972 Node* control = NodeProperties::GetControlInput(node);
973 Node* effect = NodeProperties::GetEffectInput(node);
974 if (TypeOf(node).IsNone()) {
975 ChangeToDeadValue(node, effect, control);
976 return;
977 }
978 // Rewire the effect and control chains.
979 node->TrimInputCount(new_op->ValueInputCount());
980 ReplaceEffectControlUses(node, effect, control);
981 } else {
982 DCHECK_EQ(0, node->op()->ControlInputCount());
983 }
984 ChangeOp(node, new_op);
985 }
986
987 void ChangeUnaryToPureBinaryOp(Node* node, const Operator* new_op,
988 int new_input_index, Node* new_input) {
990 DCHECK_EQ(new_op->ValueInputCount(), 2);
991 DCHECK_EQ(node->op()->ValueInputCount(), 1);
992 DCHECK_LE(0, new_input_index);
993 DCHECK_LE(new_input_index, 1);
994 if (node->op()->EffectInputCount() > 0) {
995 DCHECK_LT(0, node->op()->ControlInputCount());
996 Node* control = NodeProperties::GetControlInput(node);
997 Node* effect = NodeProperties::GetEffectInput(node);
998 if (TypeOf(node).IsNone()) {
999 ChangeToDeadValue(node, effect, control);
1000 return;
1001 }
1002 node->TrimInputCount(node->op()->ValueInputCount());
1003 ReplaceEffectControlUses(node, effect, control);
1004 } else {
1005 DCHECK_EQ(0, node->op()->ControlInputCount());
1006 }
1007 if (new_input_index == 0) {
1008 node->InsertInput(jsgraph_->zone(), 0, new_input);
1009 } else {
1010 DCHECK_EQ(new_input_index, 1);
1011 DCHECK_EQ(node->InputCount(), 1);
1012 node->AppendInput(jsgraph_->zone(), new_input);
1013 }
1014 ChangeOp(node, new_op);
1015 }
1016
1017 // Converts input {index} of {node} according to given UseInfo {use},
1018 // assuming the type of the input is {input_type}. If {input_type} is null,
1019 // it takes the input from the input node {TypeOf(node->InputAt(index))}.
1020 void ConvertInput(Node* node, int index, UseInfo use,
1021 Type input_type = Type::Invalid()) {
1022 // In the change phase, insert a change before the use if necessary.
1023 if (use.representation() == MachineRepresentation::kNone)
1024 return; // No input requirement on the use.
1025 Node* input = node->InputAt(index);
1026 DCHECK_NOT_NULL(input);
1027 NodeInfo* input_info = GetInfo(input);
1028 MachineRepresentation input_rep = input_info->representation();
1029 if (input_rep != use.representation() ||
1030 use.type_check() != TypeCheckKind::kNone) {
1031 // Output representation doesn't match usage.
1032 TRACE(" change: #%d:%s(@%d #%d:%s) ", node->id(), node->op()->mnemonic(),
1033 index, input->id(), input->op()->mnemonic());
1034 TRACE("from %s to %s:%s\n",
1035 MachineReprToString(input_info->representation()),
1036 MachineReprToString(use.representation()),
1037 use.truncation().description());
1038 if (input_type.IsInvalid()) {
1039 input_type = TypeOf(input);
1040 } else {
1041 // This case is reached when ConvertInput is called for TypeGuard nodes
1042 // which explicitly set the {input_type} for their input. In order to
1043 // correctly verify the resulting graph, we have to preserve this
1044 // forced type for the verifier.
1045 DCHECK_EQ(node->opcode(), IrOpcode::kTypeGuard);
1046 input = InsertTypeOverrideForVerifier(input_type, input);
1047 }
1048 Node* n = changer_->GetRepresentationFor(input, input_rep, input_type,
1049 node, use);
1050 node->ReplaceInput(index, n);
1051 }
1052 }
1053
1054 template <Phase T>
1055 void ProcessInput(Node* node, int index, UseInfo use);
1056
1057 // Just assert for Retype and Lower. Propagate specialized below.
1058 template <Phase T>
1059 void ProcessRemainingInputs(Node* node, int index) {
1060 static_assert(retype<T>() || lower<T>(),
1061 "This version of ProcessRemainingInputs has to be called in "
1062 "the Retype or Lower phase.");
1065 }
1066
1067 // Marks node as a possible revisit since it is a use of input that will be
1068 // visited before input is visited.
1069 void MarkAsPossibleRevisit(Node* node, Node* input) {
1070 auto it = might_need_revisit_.find(input);
1071 if (it == might_need_revisit_.end()) {
1072 it = might_need_revisit_.insert({input, ZoneVector<Node*>(zone())}).first;
1073 }
1074 it->second.push_back(node);
1075 TRACE(" Marking #%d: %s as needing revisit due to #%d: %s\n", node->id(),
1076 node->op()->mnemonic(), input->id(), input->op()->mnemonic());
1077 }
1078
1079 // Just assert for Retype. Propagate and Lower specialized below.
1080 template <Phase T>
1081 void VisitInputs(Node* node) {
1082 static_assert(
1083 retype<T>(),
1084 "This version of VisitInputs has to be called in the Retype phase.");
1085 }
1086
1087 template <Phase T>
1088 void VisitReturn(Node* node) {
1089 int first_effect_index = NodeProperties::FirstEffectIndex(node);
1090 // Visit integer slot count to pop
1092
1093 // Visit value, context and frame state inputs as tagged.
1094 for (int i = 1; i < first_effect_index; i++) {
1096 }
1097 // Only enqueue other inputs (effects, control).
1098 for (int i = first_effect_index; i < node->InputCount(); i++) {
1099 EnqueueInput<T>(node, i);
1100 }
1101 }
1102
1103 // Helper for an unused node.
1104 template <Phase T>
1105 void VisitUnused(Node* node) {
1106 int first_effect_index = NodeProperties::FirstEffectIndex(node);
1107 for (int i = 0; i < first_effect_index; i++) {
1109 }
1110 ProcessRemainingInputs<T>(node, first_effect_index);
1111
1112 if (lower<T>()) {
1113 TRACE("disconnecting unused #%d:%s\n", node->id(),
1114 node->op()->mnemonic());
1116 node->NullAllInputs(); // Node is now dead.
1117 DeferReplacement(node, graph()->NewNode(common()->Plug()));
1118 }
1119 }
1120
1121 // Helper for no-op node.
1122 template <Phase T>
1123 void VisitNoop(Node* node, Truncation truncation) {
1124 if (truncation.IsUnused()) return VisitUnused<T>(node);
1125 MachineRepresentation representation =
1126 GetOutputInfoForPhi(TypeOf(node), truncation);
1127 VisitUnop<T>(node, UseInfo(representation, truncation), representation);
1128 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
1129 }
1130
1131 // Helper for binops of the R x L -> O variety.
1132 template <Phase T>
1133 void VisitBinop(Node* node, UseInfo left_use, UseInfo right_use,
1134 MachineRepresentation output,
1135 Type restriction_type = Type::Any()) {
1136 DCHECK_EQ(2, node->op()->ValueInputCount());
1137 ProcessInput<T>(node, 0, left_use);
1138 ProcessInput<T>(node, 1, right_use);
1139 for (int i = 2; i < node->InputCount(); i++) {
1140 EnqueueInput<T>(node, i);
1141 }
1142 SetOutput<T>(node, output, restriction_type);
1143 }
1144
1145 // Helper for binops of the I x I -> O variety.
1146 template <Phase T>
1147 void VisitBinop(Node* node, UseInfo input_use, MachineRepresentation output,
1148 Type restriction_type = Type::Any()) {
1149 VisitBinop<T>(node, input_use, input_use, output, restriction_type);
1150 }
1151
1152 template <Phase T>
1154 DCHECK_EQ(2, node->op()->ValueInputCount());
1155 if (BothInputsAre(node, Type::NumberOrOddball())) {
1158 }
1159 NumberOperationHint hint = NumberOperationHintOf(node->op());
1160 return VisitBinop<T>(node,
1161 CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros),
1163 }
1164
1165 // Helper for unops of the I -> O variety.
1166 template <Phase T>
1167 void VisitUnop(Node* node, UseInfo input_use, MachineRepresentation output,
1168 Type restriction_type = Type::Any()) {
1169 DCHECK_EQ(1, node->op()->ValueInputCount());
1170 ProcessInput<T>(node, 0, input_use);
1172 SetOutput<T>(node, output, restriction_type);
1173 }
1174
1175 // Helper for leaf nodes.
1176 template <Phase T>
1178 DCHECK_EQ(0, node->InputCount());
1179 SetOutput<T>(node, output);
1180 }
1181
1182 // Helpers for specific types of binops.
1183
1184 template <Phase T>
1189
1190 template <Phase T>
1194
1195 template <Phase T>
1200
1201 // Infer representation for phi-like nodes.
1203 // Compute the representation.
1204 if (type.Is(Type::None())) {
1206 } else if (type.Is(Type::Signed32()) || type.Is(Type::Unsigned32())) {
1208 } else if (type.Is(Type::NumberOrOddball()) && use.IsUsedAsWord32()) {
1210 } else if (type.Is(Type::Boolean())) {
1212 } else if (type.Is(Type::NumberOrOddball()) &&
1213 use.TruncatesOddballAndBigIntToNumber()) {
1215 } else if (type.Is(Type::Union(Type::SignedSmall(), Type::NaN(), zone()))) {
1216 // TODO(turbofan): For Phis that return either NaN or some Smi, it's
1217 // beneficial to not go all the way to double, unless the uses are
1218 // double uses. For tagging that just means some potentially expensive
1219 // allocation code; we might want to do the same for -0 as well?
1221 } else if (type.Is(Type::Number())) {
1223 } else if (type.Is(Type::BigInt()) && Is64() && use.IsUsedAsWord64()) {
1225 } else if (type.Is(Type::ExternalPointer()) ||
1226 type.Is(Type::SandboxedPointer())) {
1228 }
1230 }
1231
1232 // Helper for handling selects.
1233 template <Phase T>
1234 void VisitSelect(Node* node, Truncation truncation,
1235 SimplifiedLowering* lowering) {
1236 DCHECK(TypeOf(node->InputAt(0)).Is(Type::Boolean()));
1237 ProcessInput<T>(node, 0, UseInfo::Bool());
1238
1239 MachineRepresentation output =
1240 GetOutputInfoForPhi(TypeOf(node), truncation);
1241 SetOutput<T>(node, output);
1242
1243 if (lower<T>()) {
1244 // Update the select operator.
1245 SelectParameters p = SelectParametersOf(node->op());
1246 if (output != p.representation()) {
1247 ChangeOp(node, lowering->common()->Select(output, p.hint()));
1248 }
1249 }
1250 // Convert inputs to the output representation of this phi, pass the
1251 // truncation truncation along.
1252 UseInfo input_use(output, truncation);
1253 ProcessInput<T>(node, 1, input_use);
1254 ProcessInput<T>(node, 2, input_use);
1255 }
1256
1257 // Helper for handling phis.
1258 template <Phase T>
1259 void VisitPhi(Node* node, Truncation truncation,
1260 SimplifiedLowering* lowering) {
1261 // If we already have a non-tagged representation set in the Phi node, it
1262 // does come from subgraphs using machine operators we introduced early in
1263 // the pipeline. In this case, we just keep the representation.
1264 MachineRepresentation output = PhiRepresentationOf(node->op());
1265 if (output == MachineRepresentation::kTagged) {
1266 output = GetOutputInfoForPhi(TypeOf(node), truncation);
1267 }
1268 // Only set the output representation if not running with type
1269 // feedback. (Feedback typing will set the representation.)
1270 SetOutput<T>(node, output);
1271
1272 int values = node->op()->ValueInputCount();
1273 if (lower<T>()) {
1274 // Update the phi operator.
1275 if (output != PhiRepresentationOf(node->op())) {
1276 ChangeOp(node, lowering->common()->Phi(output, values));
1277 }
1278 }
1279
1280 // Convert inputs to the output representation of this phi, pass the
1281 // truncation along.
1282 UseInfo input_use(output, truncation);
1283 for (int i = 0; i < node->InputCount(); i++) {
1284 ProcessInput<T>(node, i, i < values ? input_use : UseInfo::None());
1285 }
1286 }
1287
1288 template <Phase T>
1289 void VisitObjectIs(Node* node, Type type, SimplifiedLowering* lowering) {
1290 Type const input_type = TypeOf(node->InputAt(0));
1291 if (input_type.Is(type)) {
1293 if (lower<T>()) {
1296 true_type(), lowering->jsgraph()->Int32Constant(1)));
1297 }
1298 } else {
1300 if (lower<T>() && !input_type.Maybe(type)) {
1303 false_type(), lowering->jsgraph()->Int32Constant(0)));
1304 }
1305 }
1306 }
1307
1308 template <Phase T>
1309 void VisitCheck(Node* node, Type type, SimplifiedLowering* lowering) {
1310 if (InputIs(node, type)) {
1313 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
1314 } else {
1315 VisitUnop<T>(node,
1318 }
1319 }
1320
1321 template <Phase T>
1322 void VisitCall(Node* node, SimplifiedLowering* lowering) {
1323 auto call_descriptor = CallDescriptorOf(node->op());
1324 int params = static_cast<int>(call_descriptor->ParameterCount());
1325 int value_input_count = node->op()->ValueInputCount();
1326
1327 DCHECK_GT(value_input_count, 0);
1328 DCHECK_GE(value_input_count, params);
1329
1330 // The target of the call.
1331 ProcessInput<T>(node, 0, UseInfo::Any());
1332
1333 // For the parameters (indexes [1, ..., params]), propagate representation
1334 // information from call descriptor.
1335 for (int i = 1; i <= params; i++) {
1336 ProcessInput<T>(node, i,
1337 TruncatingUseInfoFromRepresentation(
1338 call_descriptor->GetInputType(i).representation()));
1339 }
1340
1341 // Rest of the value inputs.
1342 for (int i = params + 1; i < value_input_count; i++) {
1344 }
1345
1346 // Effect and Control.
1347 ProcessRemainingInputs<T>(node, value_input_count);
1348
1349 if (call_descriptor->ReturnCount() > 0) {
1350 SetOutput<T>(node, call_descriptor->GetReturnType(0).representation());
1351 } else {
1353 }
1354 }
1355
1356 void MaskShiftOperand(Node* node, Type rhs_type) {
1357 if (!rhs_type.Is(type_cache_->kZeroToThirtyOne)) {
1358 Node* const rhs = NodeProperties::GetValueInput(node, 1);
1359 node->ReplaceInput(1,
1360 graph()->NewNode(jsgraph_->machine()->Word32And(), rhs,
1361 jsgraph_->Int32Constant(0x1F)));
1362 }
1363 }
1364
1366 // We only need signedness to do deopt correctly.
1367 if (type.Is(Type::Signed32())) {
1369 } else if (type.Is(Type::Unsigned32())) {
1371 } else {
1372 return MachineSemantic::kAny;
1373 }
1374 }
1375
1377 if (type.IsNone()) {
1378 return MachineType::None();
1379 }
1380 // Do not distinguish between various Tagged variations.
1381 if (IsAnyTagged(rep)) {
1382 return MachineType::AnyTagged();
1383 }
1384 if (rep == MachineRepresentation::kWord64) {
1385 if (type.Is(Type::SignedBigInt64())) {
1387 }
1388
1389 if (type.Is(Type::UnsignedBigInt64())) {
1391 }
1392
1393 if (type.Is(Type::BigInt())) {
1394 return MachineType::AnyTagged();
1395 }
1396
1397 DCHECK(type.Is(TypeCache::Get()->kSafeInteger));
1399 }
1400 MachineType machine_type(rep, DeoptValueSemanticOf(type));
1403 machine_type.semantic() == MachineSemantic::kInt32 ||
1404 machine_type.semantic() == MachineSemantic::kUint32);
1406 type.Is(Type::Boolean()));
1407 return machine_type;
1408 }
1409
1410 template <Phase T>
1412 if (propagate<T>()) {
1413 for (int i = 0; i < node->InputCount(); i++) {
1414 if (IsLargeBigInt(TypeOf(node->InputAt(i)))) {
1415 // BigInt64s are rematerialized in deoptimization. The other BigInts
1416 // must be rematerialized before deoptimization. By propagating an
1417 // AnyTagged use, the RepresentationChanger is going to insert the
1418 // necessary conversions.
1420 } else if (IsLoadFloat16ArrayElement(node->InputAt(i))) {
1421 // Loads from Float16Arrays are raw bits as word16s but have the
1422 // Number type, since not all archs have native float16
1423 // representation. Rematerialize them as float64s in deoptimization.
1425 } else {
1426 EnqueueInput<T>(node, i, UseInfo::Any());
1427 }
1428 }
1429 } else if (lower<T>()) {
1430 Zone* zone = jsgraph_->zone();
1432 zone->New<ZoneVector<MachineType>>(node->InputCount(), zone);
1433 for (int i = 0; i < node->InputCount(); i++) {
1434 Node* input = node->InputAt(i);
1435 MachineRepresentation input_rep = GetInfo(input)->representation();
1436 if (IsLargeBigInt(TypeOf(input))) {
1438 } else if (IsLoadFloat16ArrayElement(input)) {
1441 }
1442 (*types)[i] = DeoptMachineTypeOf(input_rep, TypeOf(input));
1443 }
1445 ChangeOp(node, common()->TypedStateValues(types, mask));
1446 }
1448 }
1449
1450 template <Phase T>
1452 DCHECK_EQ(5, node->op()->ValueInputCount());
1454 DCHECK_EQ(FrameState::kFrameStateInputCount, node->InputCount());
1455
1460
1461 // Accumulator is a special flower - we need to remember its type in
1462 // a singleton typed-state-values node (as if it was a singleton
1463 // state-values node).
1464 Node* accumulator = node.stack();
1465 if (propagate<T>()) {
1466 if (IsLargeBigInt(TypeOf(accumulator))) {
1469 } else if (IsLoadFloat16ArrayElement(accumulator)) {
1472 } else {
1474 UseInfo::Any());
1475 }
1476 } else if (lower<T>()) {
1477 MachineRepresentation accumulator_rep =
1478 GetInfo(accumulator)->representation();
1479 Type accumulator_type = TypeOf(accumulator);
1480 if (IsLargeBigInt(accumulator_type)) {
1483 accumulator = node.stack();
1484 } else if (IsLoadFloat16ArrayElement(accumulator)) {
1487 accumulator = node.stack();
1488 accumulator_rep = MachineRepresentation::kFloat64;
1489 }
1490 Zone* zone = jsgraph_->zone();
1491 if (accumulator == jsgraph_->OptimizedOutConstant()) {
1492 node->ReplaceInput(FrameState::kFrameStateStackInput,
1493 jsgraph_->SingleDeadTypedStateValues());
1494 } else {
1497 (*types)[0] = DeoptMachineTypeOf(accumulator_rep, accumulator_type);
1498
1499 node->ReplaceInput(
1502 common()->TypedStateValues(types, SparseInputMask::Dense()),
1503 node.stack()));
1504 }
1505 }
1506
1514 }
1515
1516 template <Phase T>
1518 if (propagate<T>()) {
1519 for (int i = 0; i < node->InputCount(); i++) {
1520 if (IsLargeBigInt(TypeOf(node->InputAt(i)))) {
1522 } else if (IsLoadFloat16ArrayElement(node->InputAt(i))) {
1524 } else {
1525 EnqueueInput<T>(node, i, UseInfo::Any());
1526 }
1527 }
1528 } else if (lower<T>()) {
1529 Zone* zone = jsgraph_->zone();
1531 zone->New<ZoneVector<MachineType>>(node->InputCount(), zone);
1532 for (int i = 0; i < node->InputCount(); i++) {
1533 Node* input = node->InputAt(i);
1534 MachineRepresentation input_rep = GetInfo(input)->representation();
1535 if (IsLargeBigInt(TypeOf(input))) {
1537 } else if (IsLoadFloat16ArrayElement(input)) {
1540 }
1541 (*types)[i] = DeoptMachineTypeOf(input_rep, TypeOf(input));
1542 }
1543 ChangeOp(node, common()->TypedObjectState(ObjectIdOf(node->op()), types));
1544 }
1546 }
1547
1548 const Operator* Int32Op(Node* node) {
1549 return changer_->Int32OperatorFor(node->opcode());
1550 }
1551
1553 return changer_->Int32OverflowOperatorFor(node->opcode());
1554 }
1555
1559
1560 const Operator* Int64Op(Node* node) {
1561 return changer_->Int64OperatorFor(node->opcode());
1562 }
1563
1565 return changer_->Int64OverflowOperatorFor(node->opcode());
1566 }
1567
1568 const Operator* BigIntOp(Node* node) {
1569 return changer_->BigIntOperatorFor(node->opcode());
1570 }
1571
1572 const Operator* Uint32Op(Node* node) {
1573 return changer_->Uint32OperatorFor(node->opcode());
1574 }
1575
1577 return changer_->Uint32OverflowOperatorFor(node->opcode());
1578 }
1579
1580 const Operator* Float64Op(Node* node) {
1581 return changer_->Float64OperatorFor(node->opcode());
1582 }
1583
1585 BaseTaggedness base_taggedness,
1586 MachineRepresentation field_representation, Type field_type,
1587 MachineRepresentation value_representation, Node* value) {
1588 if (base_taggedness == kTaggedBase &&
1589 CanBeTaggedPointer(field_representation)) {
1590 Type value_type = NodeProperties::GetType(value);
1591 if (value_representation == MachineRepresentation::kTaggedSigned) {
1592 // Write barriers are only for stores of heap objects.
1593 return kNoWriteBarrier;
1594 }
1595 if (field_type.Is(Type::BooleanOrNullOrUndefined()) ||
1596 value_type.Is(Type::BooleanOrNullOrUndefined())) {
1597 // Write barriers are not necessary when storing true, false, null or
1598 // undefined, because these special oddballs are always in the root set.
1599 return kNoWriteBarrier;
1600 }
1601 if (value_type.IsHeapConstant()) {
1602 RootIndex root_index;
1603 const RootsTable& roots_table = jsgraph_->isolate()->roots_table();
1604 if (roots_table.IsRootHandle(value_type.AsHeapConstant()->Value(),
1605 &root_index)) {
1606 if (RootsTable::IsImmortalImmovable(root_index)) {
1607 // Write barriers are unnecessary for immortal immovable roots.
1608 return kNoWriteBarrier;
1609 }
1610 }
1611 }
1612 if (field_representation == MachineRepresentation::kTaggedPointer ||
1613 value_representation == MachineRepresentation::kTaggedPointer) {
1614 // Write barriers for heap objects are cheaper.
1615 return kPointerWriteBarrier;
1616 }
1617 NumberMatcher m(value);
1618 if (m.HasResolvedValue()) {
1619 if (IsSmiDouble(m.ResolvedValue())) {
1620 // Storing a smi doesn't need a write barrier.
1621 return kNoWriteBarrier;
1622 }
1623 // The NumberConstant will be represented as HeapNumber.
1624 return kPointerWriteBarrier;
1625 }
1626 return kFullWriteBarrier;
1627 }
1628 return kNoWriteBarrier;
1629 }
1630
1632 BaseTaggedness base_taggedness,
1633 MachineRepresentation field_representation, int field_offset,
1634 Type field_type, MachineRepresentation value_representation,
1635 Node* value) {
1636 WriteBarrierKind write_barrier_kind =
1637 WriteBarrierKindFor(base_taggedness, field_representation, field_type,
1638 value_representation, value);
1639 if (write_barrier_kind != kNoWriteBarrier) {
1640 if (base_taggedness == kTaggedBase &&
1641 field_offset == HeapObject::kMapOffset) {
1642 write_barrier_kind = kMapWriteBarrier;
1643 }
1644 }
1645 return write_barrier_kind;
1646 }
1647
1648 TFGraph* graph() const { return jsgraph_->graph(); }
1651 return jsgraph_->simplified();
1652 }
1653
1654 template <Phase T>
1656 Type input0_type, Type input1_type,
1657 UseInfo input_use) {
1658 DCHECK_EQ(node->opcode(), IrOpcode::kSpeculativeNumberMultiply);
1659 // A -0 input is impossible or will cause a deopt.
1660 DCHECK(BothInputsAre(node, Type::Signed32()) ||
1661 !input_use.truncation().IdentifiesZeroAndMinusZero());
1662
1663 CheckForMinusZeroMode mz_mode;
1664 Type restriction;
1665 if (IsSomePositiveOrderedNumber(input0_type) ||
1666 IsSomePositiveOrderedNumber(input1_type)) {
1668 restriction = Type::Signed32();
1669 } else if (truncation.IdentifiesZeroAndMinusZero()) {
1671 restriction = Type::Signed32OrMinusZero();
1672 } else {
1674 restriction = Type::Signed32();
1675 }
1676
1677 VisitBinop<T>(node, input_use, MachineRepresentation::kWord32, restriction);
1678 if (lower<T>()) ChangeOp(node, simplified()->CheckedInt32Mul(mz_mode));
1679 }
1680
1682 ChangeOp(node, Int32OverflowOp(node));
1683 }
1684
1686 ChangeOp(node, Uint32OverflowOp(node));
1687 }
1688
1689 template <Phase T>
1691 SimplifiedLowering* lowering) {
1692 Type left_upper = GetUpperBound(node->InputAt(0));
1693 Type right_upper = GetUpperBound(node->InputAt(1));
1694
1697 // Only eliminate the node if its typing rule can be satisfied, namely
1698 // that a safe integer is produced.
1699 if (truncation.IsUnused()) return VisitUnused<T>(node);
1700
1701 // If we know how to interpret the result or if the users only care
1702 // about the low 32-bits, we can truncate to Word32 do a wrapping
1703 // addition.
1704 if (GetUpperBound(node).Is(Type::Signed32()) ||
1705 GetUpperBound(node).Is(Type::Unsigned32()) ||
1706 truncation.IsUsedAsWord32()) {
1707 // => Int32Add/Sub
1709 if (lower<T>()) ChangeToPureOp(node, Int32Op(node));
1710 return;
1711 }
1712 }
1713
1714 // Try to use type feedback.
1716 DCHECK_EQ(hint, NumberOperationHintOf(node->op()));
1717
1718 Type left_feedback_type = TypeOf(node->InputAt(0));
1719 Type right_feedback_type = TypeOf(node->InputAt(1));
1720
1721 // Using Signed32 as restriction type amounts to promising there won't be
1722 // signed overflow. This is incompatible with relying on a Word32 truncation
1723 // in order to skip the overflow check. Similarly, we must not drop -0 from
1724 // the result type unless we deopt for -0 inputs.
1725 Type const restriction =
1726 truncation.IsUsedAsWord32()
1727 ? Type::Any()
1728 : (truncation.identify_zeros() == kIdentifyZeros)
1729 ? Type::Signed32OrMinusZero()
1730 : Type::Signed32();
1731
1732 // Handle the case when no int32 checks on inputs are necessary (but
1733 // an overflow check is needed on the output). Note that we do not
1734 // have to do any check if at most one side can be minus zero. For
1735 // subtraction we need to handle the case of -0 - 0 properly, since
1736 // that can produce -0.
1737 Type left_constraint_type =
1738 node->opcode() == IrOpcode::kSpeculativeSmallIntegerAdd
1739 ? Type::Signed32OrMinusZero()
1740 : Type::Signed32();
1741 if (left_upper.Is(left_constraint_type) &&
1742 right_upper.Is(Type::Signed32OrMinusZero()) &&
1743 (left_upper.Is(Type::Signed32()) || right_upper.Is(Type::Signed32()))) {
1745 MachineRepresentation::kWord32, restriction);
1746 } else {
1747 // If the output's truncation is identify-zeros, we can pass it
1748 // along. Moreover, if the operation is addition and we know the
1749 // right-hand side is not minus zero, we do not have to distinguish
1750 // between 0 and -0.
1751 IdentifyZeros left_identify_zeros = truncation.identify_zeros();
1752 if (node->opcode() == IrOpcode::kSpeculativeSmallIntegerAdd &&
1753 !right_feedback_type.Maybe(Type::MinusZero())) {
1754 left_identify_zeros = kIdentifyZeros;
1755 }
1756 UseInfo left_use =
1757 CheckedUseInfoAsWord32FromHint(hint, left_identify_zeros);
1758 // For CheckedInt32Add and CheckedInt32Sub, we don't need to do
1759 // a minus zero check for the right hand side, since we already
1760 // know that the left hand side is a proper Signed32 value,
1761 // potentially guarded by a check.
1762 UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros);
1763 VisitBinop<T>(node, left_use, right_use, MachineRepresentation::kWord32,
1764 restriction);
1765 }
1766
1767 if (lower<T>()) {
1768 if (truncation.IsUsedAsWord32() ||
1769 !CanOverflowSigned32(node->op(), left_feedback_type,
1770 right_feedback_type, type_cache_,
1771 graph_zone())) {
1772 ChangeToPureOp(node, Int32Op(node));
1773 } else {
1775 }
1776 }
1777 }
1778
1780 if (!v8_flags.additive_safe_int_feedback) return false;
1781 if (NumberOperationHintOf(node->op()) !=
1783 return false;
1784 }
1785 DCHECK_EQ(2, node->op()->ValueInputCount());
1786 Node* lhs = node->InputAt(0);
1787 auto lhs_restriction_type = GetInfo(lhs)->restriction_type();
1788 Node* rhs = node->InputAt(1);
1789 auto rhs_restriction_type = GetInfo(rhs)->restriction_type();
1790 // Only speculate AdditiveSafeInteger if one of the sides are already known
1791 // to be in the AdditiveSafeInteger range, since the check is relatively
1792 // expensive.
1795 lhs_restriction_type.Is(type_cache_->kAdditiveSafeInteger) ||
1796 rhs_restriction_type.Is(type_cache_->kAdditiveSafeInteger);
1797 }
1798
1799 template <Phase T>
1801 SimplifiedLowering* lowering) {
1802 if (GetUpperBound(node).Is(Type::Signed32()) ||
1803 GetUpperBound(node).Is(Type::Unsigned32()) ||
1804 truncation.IsUsedAsWord32()) {
1806 // => Int32Add/Sub
1809 if (lower<T>()) ChangeToPureOp(node, Int32Op(node));
1810 return;
1811 }
1812
1814 // This case handles addition where the result might be truncated to
1815 // word32. Even if the inputs might be larger than 2^32, we can safely
1816 // perform 32-bit addition *here* if the inputs are in the additive safe
1817 // range. We *must* propagate the CheckedSafeIntTruncatingWord32
1818 // information. This is because we need to ensure that we deoptimize if
1819 // either input is not an integer, or not in the range.
1820 // => Int32Add/Sub
1821 VisitBinop<T>(node,
1824 if (lower<T>()) ChangeToPureOp(node, Int32Op(node));
1825 return;
1826 }
1827 } else if (CanSpeculateAdditiveSafeInteger(node)) {
1828 // => AdditiveSafeIntegerAdd/Sub
1833 return;
1834 }
1835
1836 // Default case => Float64Add/Sub
1837 VisitBinop<T>(node,
1839 FeedbackSource()),
1840 MachineRepresentation::kFloat64, Type::Number());
1841 if (lower<T>()) {
1842 ChangeToPureOp(node, Float64Op(node));
1843 }
1844 }
1845
1846 template <Phase T>
1848 SimplifiedLowering* lowering) {
1849 if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
1850 (truncation.IsUsedAsWord32() ||
1851 NodeProperties::GetType(node).Is(Type::Unsigned32()))) {
1852 // => unsigned Uint32Mod
1854 if (lower<T>()) DeferReplacement(node, lowering->Uint32Mod(node));
1855 return;
1856 }
1857 if (BothInputsAre(node, Type::Signed32OrMinusZeroOrNaN()) &&
1858 (truncation.IsUsedAsWord32() ||
1859 NodeProperties::GetType(node).Is(Type::Signed32()))) {
1860 // => signed Int32Mod
1862 if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node));
1863 return;
1864 }
1865
1866 // Try to use type feedback.
1867 NumberOperationHint hint = NumberOperationHintOf(node->op());
1868
1869 // Handle the case when no uint32 checks on inputs are necessary
1870 // (but an overflow check is needed on the output).
1871 if (BothInputsAreUnsigned32(node)) {
1874 MachineRepresentation::kWord32, Type::Unsigned32());
1876 return;
1877 }
1878 }
1879
1880 // Handle the case when no int32 checks on inputs are necessary
1881 // (but an overflow check is needed on the output).
1882 if (BothInputsAre(node, Type::Signed32())) {
1883 // If both the inputs the feedback are int32, use the overflow op.
1886 MachineRepresentation::kWord32, Type::Signed32());
1887 if (lower<T>()) ChangeToInt32OverflowOp(node);
1888 return;
1889 }
1890 }
1891
1893 // If the result is truncated, we only need to check the inputs.
1894 // For the left hand side we just propagate the identify zeros
1895 // mode of the {truncation}; and for modulus the sign of the
1896 // right hand side doesn't matter anyways, so in particular there's
1897 // no observable difference between a 0 and a -0 then.
1898 UseInfo const lhs_use =
1899 CheckedUseInfoAsWord32FromHint(hint, truncation.identify_zeros());
1900 UseInfo const rhs_use =
1901 CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros);
1902 if (truncation.IsUsedAsWord32()) {
1903 VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kWord32);
1904 if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node));
1905 } else if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN())) {
1906 Type const restriction =
1907 truncation.IdentifiesZeroAndMinusZero() &&
1908 TypeOf(node->InputAt(0)).Maybe(Type::MinusZero())
1909 ? Type::Unsigned32OrMinusZero()
1910 : Type::Unsigned32();
1911 VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kWord32,
1912 restriction);
1914 } else {
1915 Type const restriction =
1916 truncation.IdentifiesZeroAndMinusZero() &&
1917 TypeOf(node->InputAt(0)).Maybe(Type::MinusZero())
1918 ? Type::Signed32OrMinusZero()
1919 : Type::Signed32();
1920 VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kWord32,
1921 restriction);
1922 if (lower<T>()) ChangeToInt32OverflowOp(node);
1923 }
1924 return;
1925 }
1926
1927 if (TypeOf(node->InputAt(0)).Is(Type::Unsigned32()) &&
1928 TypeOf(node->InputAt(1)).Is(Type::Unsigned32()) &&
1929 (truncation.IsUsedAsWord32() ||
1930 NodeProperties::GetType(node).Is(Type::Unsigned32()))) {
1932 MachineRepresentation::kWord32, Type::Number());
1933 if (lower<T>()) DeferReplacement(node, lowering->Uint32Mod(node));
1934 return;
1935 }
1936 if (TypeOf(node->InputAt(0)).Is(Type::Signed32()) &&
1937 TypeOf(node->InputAt(1)).Is(Type::Signed32()) &&
1938 (truncation.IsUsedAsWord32() ||
1939 NodeProperties::GetType(node).Is(Type::Signed32()))) {
1941 MachineRepresentation::kWord32, Type::Number());
1942 if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node));
1943 return;
1944 }
1945
1946 // default case => Float64Mod
1947 // For the left hand side we just propagate the identify zeros
1948 // mode of the {truncation}; and for modulus the sign of the
1949 // right hand side doesn't matter anyways, so in particular there's
1950 // no observable difference between a 0 and a -0 then.
1952 truncation.identify_zeros(), FeedbackSource());
1955 VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kFloat64,
1956 Type::Number());
1957 if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
1958 }
1959
1960 // Just assert for Propagate and Retype. Lower specialized below.
1961 template <Phase T>
1963 static_assert(propagate<T>() || retype<T>(),
1964 "This version of InsertUnreachableIfNecessary has to be "
1965 "called in the Propagate or Retype phase.");
1966 }
1967
1968 template <Phase T>
1970 CheckBoundsParameters const& p = CheckBoundsParametersOf(node->op());
1971 FeedbackSource const& feedback = p.check_parameters().feedback();
1972 Type const index_type = TypeOf(node->InputAt(0));
1973 Type const length_type = TypeOf(node->InputAt(1));
1974
1975 // Conversions, if requested and needed, will be handled by the
1976 // representation changer, not by the lower-level Checked*Bounds operators.
1977 CheckBoundsFlags new_flags =
1979
1980 if (length_type.Is(Type::Unsigned31())) {
1981 if (index_type.Is(Type::Integral32()) ||
1982 (index_type.Is(Type::Integral32OrMinusZero()) &&
1984 // Map the values in the [-2^31,-1] range to the [2^31,2^32-1] range,
1985 // which will be considered out-of-bounds because the {length_type} is
1986 // limited to Unsigned31. This also converts -0 to 0.
1989 if (lower<T>()) {
1990 if (index_type.IsNone() || length_type.IsNone() ||
1991 (index_type.Min() >= 0.0 &&
1992 index_type.Max() < length_type.Min())) {
1993 // The bounds check is redundant if we already know that
1994 // the index is within the bounds of [0.0, length[.
1995 // TODO(neis): Move this into TypedOptimization?
1996 if (v8_flags.turbo_typer_hardening) {
1998 } else {
2000 return;
2001 }
2002 }
2003 ChangeOp(node,
2004 simplified()->CheckedUint32Bounds(feedback, new_flags));
2005 }
2009 if (lower<T>()) {
2010 if (jsgraph_->machine()->Is64()) {
2011 ChangeOp(node,
2012 simplified()->CheckedUint64Bounds(feedback, new_flags));
2013 } else {
2014 ChangeOp(node,
2015 simplified()->CheckedUint32Bounds(feedback, new_flags));
2016 }
2017 }
2018 } else {
2022 if (lower<T>()) {
2023 ChangeOp(node,
2024 simplified()->CheckedUint32Bounds(feedback, new_flags));
2025 }
2026 }
2027 } else {
2029 IdentifyZeros zero_handling =
2033 VisitBinop<T>(node,
2034 UseInfo::CheckedSigned64AsWord64(zero_handling, feedback),
2036 if (lower<T>()) {
2037 ChangeOp(node, simplified()->CheckedUint64Bounds(feedback, new_flags));
2038 }
2039 }
2040 }
2041
2044 FeedbackSource const& feedback) {
2046 switch (type.GetSequenceType()) {
2047 case CTypeInfo::SequenceType::kScalar: {
2048 uint8_t flags = uint8_t(type.GetFlags());
2049 if (flags & uint8_t(CTypeInfo::Flags::kEnforceRangeBit) ||
2050 flags & uint8_t(CTypeInfo::Flags::kClampBit)) {
2052 // If the parameter is marked as `kEnforceRange` or `kClampBit`, then
2053 // special type conversion gets added explicitly to the generated
2054 // code. Therefore it is sufficient here to only require here that the
2055 // value is a Float64, even though the C++ signature actually asks for
2056 // an `int32_t`.
2058 }
2059 switch (type.GetType()) {
2062 UNREACHABLE();
2064 return UseInfo::Bool();
2067 return UseInfo::CheckedNumberAsWord32(feedback);
2068 // TODO(mslekova): We deopt for unsafe integers, but ultimately we
2069 // want to make this less restrictive in order to stay on the fast
2070 // path.
2077 } else {
2078 UNREACHABLE();
2079 }
2089 return UseInfo::AnyTagged();
2090 }
2091 }
2092 case CTypeInfo::SequenceType::kIsSequence: {
2093 CHECK_EQ(type.GetType(), CTypeInfo::Type::kVoid);
2094 return UseInfo::AnyTagged();
2095 }
2096 default: {
2097 UNREACHABLE(); // TODO(mslekova): Implement array buffers.
2098 }
2099 }
2101 }
2102
2103 static constexpr int kInitialArgumentsCount = 10;
2104
2105 template <Phase T>
2107 FastApiCallParameters const& op_params =
2108 FastApiCallParametersOf(node->op());
2109 // We only consider the first function signature here. In case of function
2110 // overloads, we only support the case of two functions that differ for one
2111 // argument, which must be a JSArray in one function and a TypedArray in the
2112 // other function, and both JSArrays and TypedArrays have the same UseInfo
2113 // UseInfo::AnyTagged(). All the other argument types must match.
2114 const CFunctionInfo* c_signature = op_params.c_function().signature;
2115 const int c_arg_count = c_signature->ArgumentCount();
2116 CallDescriptor* call_descriptor = op_params.descriptor();
2117 // Arguments for CallApiCallbackOptimizedXXX builtin (including context)
2118 // plus JS arguments (including receiver).
2119 int slow_arg_count = static_cast<int>(call_descriptor->ParameterCount());
2120 const int value_input_count = node->op()->ValueInputCount();
2121 CHECK_EQ(FastApiCallNode::ArityForArgc(c_arg_count, slow_arg_count),
2122 value_input_count);
2123
2124 FastApiCallNode n(node);
2125
2127 c_arg_count);
2128 // Propagate representation information from TypeInfo.
2129 int cursor = 0;
2130 for (int i = 0; i < c_arg_count; i++) {
2131 arg_use_info[i] = UseInfoForFastApiCallArgument(
2132 c_signature->ArgumentInfo(i), c_signature->GetInt64Representation(),
2133 op_params.feedback());
2134 ProcessInput<T>(node, cursor++, arg_use_info[i]);
2135 }
2136 // Callback data for fast call.
2137 DCHECK_EQ(n.CallbackDataIndex(), cursor);
2138 ProcessInput<T>(node, cursor++, UseInfo::AnyTagged());
2139
2140 // The call code for the slow call.
2141 ProcessInput<T>(node, cursor++, UseInfo::AnyTagged());
2142 // For the slow builtin parameters (indexes [1, ..., params]), propagate
2143 // representation information from call descriptor.
2144 for (int i = 1; i <= slow_arg_count; i++) {
2145 ProcessInput<T>(node, cursor++,
2146 TruncatingUseInfoFromRepresentation(
2147 call_descriptor->GetInputType(i).representation()));
2148 }
2149 // Visit frame state input as tagged.
2150 DCHECK_EQ(n.FrameStateIndex(), cursor);
2151 ProcessInput<T>(node, cursor++, UseInfo::AnyTagged());
2152 DCHECK_EQ(cursor, value_input_count);
2153
2154 // Effect and Control.
2155 ProcessRemainingInputs<T>(node, value_input_count);
2156
2157 CTypeInfo return_type = op_params.c_function().signature->ReturnInfo();
2158 switch (return_type.GetType()) {
2161 return;
2164 return;
2167 return;
2170 return;
2173 if (c_signature->GetInt64Representation() ==
2176 return;
2177 }
2178 DCHECK_EQ(c_signature->GetInt64Representation(),
2181 return;
2184 return;
2187 return;
2190 return;
2192 // This type is only supposed to be used for parameters, not returns.
2193 UNREACHABLE();
2199 return;
2200 }
2201 }
2202
2203 template <Phase T>
2204 bool TryOptimizeBigInt64Shift(Node* node, const Truncation& truncation,
2205 SimplifiedLowering* lowering) {
2206 DCHECK(Is64());
2207 if (!truncation.IsUsedAsWord64()) return false;
2208
2209 Type input_type = GetUpperBound(node->InputAt(0));
2210 Type shift_amount_type = GetUpperBound(node->InputAt(1));
2211
2212 if (!shift_amount_type.IsHeapConstant()) return false;
2213 HeapObjectRef ref = shift_amount_type.AsHeapConstant()->Ref();
2214 if (!ref.IsBigInt()) return false;
2215 BigIntRef bigint = ref.AsBigInt();
2216 bool lossless = false;
2217 int64_t shift_amount = bigint.AsInt64(&lossless);
2218 // We bail out if we cannot represent the shift amount correctly.
2219 if (!lossless) return false;
2220
2221 // Canonicalize {shift_amount}.
2222 bool is_shift_left =
2223 node->opcode() == IrOpcode::kSpeculativeBigIntShiftLeft;
2224 if (shift_amount < 0) {
2225 // A shift amount of abs(std::numeric_limits<int64_t>::min()) is not
2226 // representable.
2227 if (shift_amount == std::numeric_limits<int64_t>::min()) return false;
2228 is_shift_left = !is_shift_left;
2229 shift_amount = -shift_amount;
2230 DCHECK_GT(shift_amount, 0);
2231 }
2232 DCHECK_GE(shift_amount, 0);
2233
2234 // If the operation is a *real* left shift, propagate truncation.
2235 // If it is a *real* right shift, the output representation is
2236 // word64 only if we know the input type is BigInt64.
2237 // Otherwise, fall through to using BigIntOperationHint.
2238 if (is_shift_left) {
2239 VisitBinop<T>(node,
2242 if (lower<T>()) {
2243 if (shift_amount > 63) {
2245 } else if (shift_amount == 0) {
2246 DeferReplacement(node, node->InputAt(0));
2247 } else {
2248 DCHECK_GE(shift_amount, 1);
2249 DCHECK_LE(shift_amount, 63);
2251 node, graph()->NewNode(lowering->machine()->Word64Shl(),
2252 node->InputAt(0),
2253 jsgraph_->Int64Constant(shift_amount)));
2254 }
2255 }
2256 return true;
2257 } else if (input_type.Is(Type::SignedBigInt64())) {
2258 VisitBinop<T>(node,
2261 if (lower<T>()) {
2262 if (shift_amount > 63) {
2264 node,
2265 graph()->NewNode(lowering->machine()->Word64Sar(),
2266 node->InputAt(0), jsgraph_->Int64Constant(63)));
2267 } else if (shift_amount == 0) {
2268 DeferReplacement(node, node->InputAt(0));
2269 } else {
2270 DCHECK_GE(shift_amount, 1);
2271 DCHECK_LE(shift_amount, 63);
2273 node, graph()->NewNode(lowering->machine()->Word64Sar(),
2274 node->InputAt(0),
2275 jsgraph_->Int64Constant(shift_amount)));
2276 }
2277 }
2278 return true;
2279 } else if (input_type.Is(Type::UnsignedBigInt64())) {
2280 VisitBinop<T>(node,
2283 if (lower<T>()) {
2284 if (shift_amount > 63) {
2286 } else if (shift_amount == 0) {
2287 DeferReplacement(node, node->InputAt(0));
2288 } else {
2289 DCHECK_GE(shift_amount, 1);
2290 DCHECK_LE(shift_amount, 63);
2292 node, graph()->NewNode(lowering->machine()->Word64Shr(),
2293 node->InputAt(0),
2294 jsgraph_->Int64Constant(shift_amount)));
2295 }
2296 }
2297 return true;
2298 }
2299
2300 // None of the cases we can optimize here.
2301 return false;
2302 }
2303
2304#if V8_ENABLE_WEBASSEMBLY
2305 static MachineType MachineTypeForWasmReturnType(
2307 switch (type.kind()) {
2308 case wasm::kI32:
2309 return MachineType::Int32();
2310 case wasm::kI64:
2311 return MachineType::Int64();
2312 case wasm::kF32:
2313 return MachineType::Float32();
2314 case wasm::kF64:
2315 return MachineType::Float64();
2316 case wasm::kRef:
2317 case wasm::kRefNull:
2318 return MachineType::AnyTagged();
2319 default:
2320 UNREACHABLE();
2321 }
2322 }
2323
2324 UseInfo UseInfoForJSWasmCallArgument(Node* input,
2325 wasm::CanonicalValueType type,
2326 FeedbackSource const& feedback) {
2327 // If the input type is a Number or Oddball, we can directly convert the
2328 // input into the Wasm native type of the argument. If not, we return
2329 // UseInfo::AnyTagged to signal that WasmWrapperGraphBuilder will need to
2330 // add Nodes to perform the conversion (in WasmWrapperGraphBuilder::FromJS).
2331 switch (type.kind()) {
2332 case wasm::kI32:
2334 case wasm::kI64:
2336 case wasm::kF32:
2337 case wasm::kF64:
2338 // For Float32, TruncateFloat64ToFloat32 will be inserted later in
2339 // WasmWrapperGraphBuilder::BuildJSToWasmWrapper.
2341 feedback);
2342 case wasm::kRef:
2343 case wasm::kRefNull:
2344 return UseInfo::AnyTagged();
2345 default:
2346 UNREACHABLE();
2347 }
2348 }
2349
2350 template <Phase T>
2351 void VisitJSWasmCall(Node* node, SimplifiedLowering* lowering) {
2352 DCHECK_EQ(JSWasmCallNode::TargetIndex(), 0);
2353 DCHECK_EQ(JSWasmCallNode::ReceiverIndex(), 1);
2354 DCHECK_EQ(JSWasmCallNode::FirstArgumentIndex(), 2);
2355
2356 JSWasmCallNode n(node);
2357
2358 JSWasmCallParameters const& params = n.Parameters();
2359 const wasm::CanonicalSig* wasm_signature = params.signature();
2360 int wasm_arg_count = static_cast<int>(wasm_signature->parameter_count());
2361 DCHECK_EQ(wasm_arg_count, n.ArgumentCount());
2362
2363 base::SmallVector<UseInfo, kInitialArgumentsCount> arg_use_info(
2364 wasm_arg_count);
2365
2366 // Visit JSFunction and Receiver nodes.
2367 ProcessInput<T>(node, JSWasmCallNode::TargetIndex(), UseInfo::Any());
2368 ProcessInput<T>(node, JSWasmCallNode::ReceiverIndex(), UseInfo::Any());
2369
2370 // Propagate representation information from TypeInfo.
2371 for (int i = 0; i < wasm_arg_count; i++) {
2372 TNode<Object> input = n.Argument(i);
2373 DCHECK_NOT_NULL(input);
2374 arg_use_info[i] = UseInfoForJSWasmCallArgument(
2375 input, wasm_signature->GetParam(i), params.feedback());
2376 ProcessInput<T>(node, JSWasmCallNode::ArgumentIndex(i), arg_use_info[i]);
2377 }
2378
2379 // Visit value, context and frame state inputs as tagged.
2380 int first_effect_index = NodeProperties::FirstEffectIndex(node);
2381 DCHECK(first_effect_index >
2382 JSWasmCallNode::FirstArgumentIndex() + wasm_arg_count);
2383 for (int i = JSWasmCallNode::FirstArgumentIndex() + wasm_arg_count;
2384 i < first_effect_index; i++) {
2386 }
2387
2388 // Effect and Control.
2390
2391 if (wasm_signature->return_count() == 1) {
2392 MachineType return_type =
2393 MachineTypeForWasmReturnType(wasm_signature->GetReturn());
2395 node, return_type.representation(),
2396 JSWasmCallNode::TypeForWasmReturnType(wasm_signature->GetReturn()));
2397 } else {
2398 DCHECK_EQ(wasm_signature->return_count(), 0);
2400 }
2401
2402 // The actual lowering of JSWasmCall nodes happens later, in the subsequent
2403 // "wasm-inlining" phase.
2404 }
2405#endif // V8_ENABLE_WEBASSEMBLY
2406
2407 // Dispatching routine for visiting the node {node} with the usage {use}.
2408 // Depending on the operator, propagate new usage info to the inputs.
2409 template <Phase T>
2410 void VisitNode(Node* node, Truncation truncation,
2411 SimplifiedLowering* lowering) {
2413
2414 if (lower<T>()) {
2415 // Kill non-effectful operations that have a None-type input and are thus
2416 // dead code. Otherwise we might end up lowering the operation in a way,
2417 // e.g. by replacing it with a constant, that cuts the dependency on a
2418 // deopting operation (the producer of the None type), possibly resulting
2419 // in a nonsense schedule.
2420 if (node->op()->EffectOutputCount() == 0 &&
2421 node->op()->ControlOutputCount() == 0 &&
2422 node->opcode() != IrOpcode::kDeadValue &&
2423 node->opcode() != IrOpcode::kStateValues &&
2424 node->opcode() != IrOpcode::kFrameState &&
2425 node->opcode() != IrOpcode::kPhi) {
2426 for (int i = 0; i < node->op()->ValueInputCount(); i++) {
2427 Node* input = node->InputAt(i);
2428 if (TypeOf(input).IsNone()) {
2429 node->ReplaceInput(0, input);
2430 node->TrimInputCount(1);
2431 ChangeOp(node,
2432 common()->DeadValue(GetInfo(node)->representation()));
2433 return;
2434 }
2435 }
2436 } else {
2438 }
2439 }
2440
2441 // Unconditionally eliminate unused pure nodes (only relevant if there's
2442 // a pure operation in between two effectful ones, where the last one
2443 // is unused).
2444 // Note: We must not do this for constants, as they are cached and we
2445 // would thus kill the cached {node} during lowering (i.e. replace all
2446 // uses with Dead), but at that point some node lowering might have
2447 // already taken the constant {node} from the cache (while it was not
2448 // yet killed) and we would afterwards replace that use with Dead as well.
2449 if (node->op()->ValueInputCount() > 0 &&
2450 node->op()->HasProperty(Operator::kPure) && truncation.IsUnused()) {
2451 return VisitUnused<T>(node);
2452 }
2453
2454 switch (node->opcode()) {
2455 //------------------------------------------------------------------
2456 // Common operators.
2457 //------------------------------------------------------------------
2458 case IrOpcode::kStart:
2459 // We use Start as a terminator for the frame state chain, so even
2460 // tho Start doesn't really produce a value, we have to say Tagged
2461 // here, otherwise the input conversion will fail.
2463 case IrOpcode::kParameter:
2464 return VisitUnop<T>(node, UseInfo::None(),
2465 linkage()
2466 ->GetParameterType(ParameterIndexOf(node->op()))
2467 .representation());
2468 case IrOpcode::kInt32Constant:
2469 DCHECK_EQ(0, node->InputCount());
2471 DCHECK(NodeProperties::GetType(node).Is(Type::Machine()));
2473 // During lowering, SimplifiedLowering generates Int32Constants which
2474 // need to be treated differently by the verifier than the
2475 // Int32Constants introduced explicitly in machine graphs. To be able
2476 // to distinguish them, we record those that are being visited here
2477 // because they were generated before SimplifiedLowering.
2478 if (propagate<T>()) {
2479 verifier_->RecordMachineUsesOfConstant(node, node->uses());
2480 }
2481 }
2482 return;
2483 case IrOpcode::kInt64Constant:
2485 case IrOpcode::kExternalConstant:
2487 case IrOpcode::kNumberConstant: {
2488 double const value = OpParameter<double>(node->op());
2489 int value_as_int;
2490 if (DoubleToSmiInteger(value, &value_as_int)) {
2492 if (lower<T>()) {
2493 intptr_t smi = base::bit_cast<intptr_t>(Smi::FromInt(value_as_int));
2496 lowering->jsgraph()->IntPtrConstant(smi));
2497 DeferReplacement(node, constant);
2498 }
2499 return;
2500 }
2502 return;
2503 }
2504 case IrOpcode::kHeapConstant:
2506 case IrOpcode::kTrustedHeapConstant:
2508 case IrOpcode::kPointerConstant: {
2510 if (lower<T>()) {
2511 intptr_t const value = OpParameter<intptr_t>(node->op());
2512 DeferReplacement(node, lowering->jsgraph()->IntPtrConstant(value));
2513 }
2514 return;
2515 }
2516
2517 case IrOpcode::kBranch: {
2518 const auto& p = BranchParametersOf(node->op());
2519 if (p.semantics() == BranchSemantics::kMachine) {
2520 // If this is a machine branch, the condition is a machine operator,
2521 // so we enter machine branch here.
2522 ProcessInput<T>(node, 0, UseInfo::Any());
2523 } else {
2524 DCHECK(TypeOf(node->InputAt(0)).Is(Type::Boolean()));
2525 ProcessInput<T>(node, 0, UseInfo::Bool());
2526 if (lower<T>()) {
2527 ChangeOp(node,
2528 common()->Branch(p.hint(), BranchSemantics::kMachine));
2529 }
2530 }
2532 return;
2533 }
2534 case IrOpcode::kSwitch:
2537 return;
2538 case IrOpcode::kSelect: {
2539 const auto& p = SelectParametersOf(node->op());
2540 if (p.semantics() == BranchSemantics::kMachine) {
2541 // If this is a machine select, all inputs are machine operators.
2542 ProcessInput<T>(node, 0, UseInfo::Any());
2543 ProcessInput<T>(node, 1, UseInfo::Any());
2544 ProcessInput<T>(node, 2, UseInfo::Any());
2545 SetOutput<T>(node, p.representation());
2546 } else {
2547 VisitSelect<T>(node, truncation, lowering);
2548 }
2549 return;
2550 }
2551 case IrOpcode::kPhi:
2552 return VisitPhi<T>(node, truncation, lowering);
2553 case IrOpcode::kCall:
2554 return VisitCall<T>(node, lowering);
2555 case IrOpcode::kAssert: {
2556 const auto& p = AssertParametersOf(node->op());
2557 if (p.semantics() == BranchSemantics::kMachine) {
2558 // If this is a machine condition already, we don't need to do
2559 // anything.
2560 ProcessInput<T>(node, 0, UseInfo::Any());
2561 } else {
2562 DCHECK(TypeOf(node->InputAt(0)).Is(Type::Boolean()));
2563 ProcessInput<T>(node, 0, UseInfo::Bool());
2564 if (lower<T>()) {
2566 p.condition_string(), p.file(),
2567 p.line()));
2568 }
2569 }
2571 return;
2572 }
2573
2574 //------------------------------------------------------------------
2575 // JavaScript operators.
2576 //------------------------------------------------------------------
2577 case IrOpcode::kJSToNumber:
2578 case IrOpcode::kJSToNumberConvertBigInt:
2579 case IrOpcode::kJSToNumeric: {
2581 Type::BigInt(), Type::NumberOrOddball(), graph()->zone())));
2582 VisitInputs<T>(node);
2583 // TODO(bmeurer): Optimize somewhat based on input type?
2584 if (truncation.IsUsedAsWord32()) {
2586 if (lower<T>())
2587 lowering->DoJSToNumberOrNumericTruncatesToWord32(node, this);
2588 } else if (truncation.TruncatesOddballAndBigIntToNumber()) {
2590 if (lower<T>())
2591 lowering->DoJSToNumberOrNumericTruncatesToFloat64(node, this);
2592 } else {
2594 }
2595 return;
2596 }
2597 case IrOpcode::kJSToBigInt:
2598 case IrOpcode::kJSToBigIntConvertNumber: {
2599 VisitInputs<T>(node);
2601 return;
2602 }
2603
2604 //------------------------------------------------------------------
2605 // Simplified operators.
2606 //------------------------------------------------------------------
2607 case IrOpcode::kToBoolean: {
2608 if (truncation.IsUsedAsBool()) {
2609 ProcessInput<T>(node, 0, UseInfo::Bool());
2611 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
2612 } else {
2613 VisitInputs<T>(node);
2615 }
2616 return;
2617 }
2618 case IrOpcode::kBooleanNot: {
2619 if (lower<T>()) {
2620 NodeInfo* input_info = GetInfo(node->InputAt(0));
2621 if (input_info->representation() == MachineRepresentation::kBit) {
2622 // BooleanNot(x: kRepBit) => Word32Equal(x, #0)
2623 node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
2624 ChangeOp(node, lowering->machine()->Word32Equal());
2625 } else if (CanBeTaggedPointer(input_info->representation())) {
2626 // BooleanNot(x: kRepTagged) => TaggedEqual(x, #false)
2627 node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
2628 ChangeOp(node, lowering->machine()->TaggedEqual());
2629 } else {
2630 DCHECK(TypeOf(node->InputAt(0)).IsNone());
2631 DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
2632 }
2633 } else {
2634 // No input representation requirement; adapt during lowering.
2637 }
2638 return;
2639 }
2640 case IrOpcode::kNumberEqual: {
2641 Type const lhs_type = TypeOf(node->InputAt(0));
2642 Type const rhs_type = TypeOf(node->InputAt(1));
2643 // Regular number comparisons in JavaScript generally identify zeros,
2644 // so we always pass kIdentifyZeros for the inputs, and in addition
2645 // we can truncate -0 to 0 for otherwise Unsigned32 or Signed32 inputs.
2646 // For equality we also handle the case that one side is non-zero, in
2647 // which case we allow to truncate NaN to 0 on the other side.
2648 if ((lhs_type.Is(Type::Unsigned32OrMinusZero()) &&
2649 rhs_type.Is(Type::Unsigned32OrMinusZero())) ||
2650 (lhs_type.Is(Type::Unsigned32OrMinusZeroOrNaN()) &&
2651 rhs_type.Is(Type::Unsigned32OrMinusZeroOrNaN()) &&
2653 // => unsigned Int32Cmp
2656 if (lower<T>()) ChangeOp(node, Uint32Op(node));
2657 return;
2658 }
2659 if ((lhs_type.Is(Type::Signed32OrMinusZero()) &&
2660 rhs_type.Is(Type::Signed32OrMinusZero())) ||
2661 (lhs_type.Is(Type::Signed32OrMinusZeroOrNaN()) &&
2662 rhs_type.Is(Type::Signed32OrMinusZeroOrNaN()) &&
2664 // => signed Int32Cmp
2667 if (lower<T>()) ChangeOp(node, Int32Op(node));
2668 return;
2669 }
2670 if (lhs_type.Is(Type::Boolean()) && rhs_type.Is(Type::Boolean())) {
2672 if (lower<T>()) ChangeOp(node, lowering->machine()->Word32Equal());
2673 return;
2674 }
2675 // => Float64Cmp
2678 if (lower<T>()) ChangeOp(node, Float64Op(node));
2679 return;
2680 }
2681 case IrOpcode::kNumberLessThan:
2682 case IrOpcode::kNumberLessThanOrEqual: {
2683 Type const lhs_type = TypeOf(node->InputAt(0));
2684 Type const rhs_type = TypeOf(node->InputAt(1));
2685 // Regular number comparisons in JavaScript generally identify zeros,
2686 // so we always pass kIdentifyZeros for the inputs, and in addition
2687 // we can truncate -0 to 0 for otherwise Unsigned32 or Signed32 inputs.
2688 if (lhs_type.Is(Type::Unsigned32OrMinusZero()) &&
2689 rhs_type.Is(Type::Unsigned32OrMinusZero())) {
2690 // => unsigned Int32Cmp
2693 if (lower<T>()) ChangeOp(node, Uint32Op(node));
2694 } else if (lhs_type.Is(Type::Signed32OrMinusZero()) &&
2695 rhs_type.Is(Type::Signed32OrMinusZero())) {
2696 // => signed Int32Cmp
2699 if (lower<T>()) ChangeOp(node, Int32Op(node));
2700 } else {
2701 // => Float64Cmp
2704 if (lower<T>()) ChangeOp(node, Float64Op(node));
2705 }
2706 return;
2707 }
2708
2709 case IrOpcode::kSpeculativeSmallIntegerAdd:
2710 case IrOpcode::kSpeculativeSmallIntegerSubtract:
2711 return VisitSpeculativeSmallIntegerAdditiveOp<T>(node, truncation,
2712 lowering);
2713
2714 case IrOpcode::kSpeculativeAdditiveSafeIntegerAdd:
2715 case IrOpcode::kSpeculativeAdditiveSafeIntegerSubtract:
2716 case IrOpcode::kSpeculativeNumberAdd:
2717 case IrOpcode::kSpeculativeNumberSubtract:
2718 return VisitSpeculativeAdditiveOp<T>(node, truncation, lowering);
2719
2720 case IrOpcode::kSpeculativeNumberLessThan:
2721 case IrOpcode::kSpeculativeNumberLessThanOrEqual:
2722 case IrOpcode::kSpeculativeNumberEqual: {
2723 Type const lhs_type = TypeOf(node->InputAt(0));
2724 Type const rhs_type = TypeOf(node->InputAt(1));
2725 // Regular number comparisons in JavaScript generally identify zeros,
2726 // so we always pass kIdentifyZeros for the inputs, and in addition
2727 // we can truncate -0 to 0 for otherwise Unsigned32 or Signed32 inputs.
2728 if (lhs_type.Is(Type::Unsigned32OrMinusZero()) &&
2729 rhs_type.Is(Type::Unsigned32OrMinusZero())) {
2730 // => unsigned Int32Cmp
2733 if (lower<T>()) ChangeToPureOp(node, Uint32Op(node));
2734 return;
2735 } else if (lhs_type.Is(Type::Signed32OrMinusZero()) &&
2736 rhs_type.Is(Type::Signed32OrMinusZero())) {
2737 // => signed Int32Cmp
2740 if (lower<T>()) ChangeToPureOp(node, Int32Op(node));
2741 return;
2742 } else if (lhs_type.Is(Type::Boolean()) &&
2743 rhs_type.Is(Type::Boolean())) {
2745 if (lower<T>())
2746 ChangeToPureOp(node, lowering->machine()->Word32Equal());
2747 return;
2748 }
2749 // Try to use type feedback.
2750 NumberOperationHint hint = NumberOperationHintOf(node->op());
2751 switch (hint) {
2753 if (propagate<T>()) {
2755 node, CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros),
2757 } else if (retype<T>()) {
2759 } else {
2760 DCHECK(lower<T>());
2761 Node* lhs = node->InputAt(0);
2762 Node* rhs = node->InputAt(1);
2763 if (IsNodeRepresentationTagged(lhs) &&
2765 VisitBinop<T>(node,
2770 node, changer_->TaggedSignedOperatorFor(node->opcode()));
2771
2772 } else {
2774 node, CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros),
2776 ChangeToPureOp(node, Int32Op(node));
2777 }
2778 }
2779 return;
2782 // This doesn't make sense for compare operations.
2783 UNREACHABLE();
2785 // Abstract and strict equality don't perform ToNumber conversions
2786 // on Oddballs, so make sure we don't accidentially sneak in a
2787 // hint with Oddball feedback here.
2788 DCHECK_NE(IrOpcode::kSpeculativeNumberEqual, node->opcode());
2789 [[fallthrough]];
2792 VisitBinop<T>(node,
2793 CheckedUseInfoAsFloat64FromHint(
2796 if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
2797 return;
2798 }
2799 UNREACHABLE();
2800 return;
2801 }
2802
2803 case IrOpcode::kNumberAdd:
2804 case IrOpcode::kNumberSubtract: {
2805 if (TypeOf(node->InputAt(0))
2807 TypeOf(node->InputAt(1))
2809 (TypeOf(node).Is(Type::Signed32()) ||
2810 TypeOf(node).Is(Type::Unsigned32()) ||
2811 truncation.IsUsedAsWord32())) {
2812 // => Int32Add/Sub
2814 if (lower<T>()) ChangeToPureOp(node, Int32Op(node));
2815 } else if (jsgraph_->machine()->Is64() &&
2818 // => Int64Add/Sub
2819 VisitInt64Binop<T>(node);
2820 if (lower<T>()) ChangeToPureOp(node, Int64Op(node));
2821 } else {
2822 // => Float64Add/Sub
2824 if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
2825 }
2826 return;
2827 }
2828 case IrOpcode::kSpeculativeNumberMultiply: {
2829 if (BothInputsAre(node, Type::Integral32()) &&
2830 (NodeProperties::GetType(node).Is(Type::Signed32()) ||
2831 NodeProperties::GetType(node).Is(Type::Unsigned32()) ||
2832 (truncation.IsUsedAsWord32() &&
2835 // Multiply reduces to Int32Mul if the inputs are integers, and
2836 // (a) the output is either known to be Signed32, or
2837 // (b) the output is known to be Unsigned32, or
2838 // (c) the uses are truncating and the result is in the safe
2839 // integer range.
2841 if (lower<T>()) ChangeToPureOp(node, Int32Op(node));
2842 return;
2843 }
2844 // Try to use type feedback.
2845 NumberOperationHint hint = NumberOperationHintOf(node->op());
2846 Type input0_type = TypeOf(node->InputAt(0));
2847 Type input1_type = TypeOf(node->InputAt(1));
2848
2849 // Handle the case when no int32 checks on inputs are necessary
2850 // (but an overflow check is needed on the output).
2851 if (BothInputsAre(node, Type::Signed32())) {
2852 // If both inputs and feedback are int32, use the overflow op.
2854 VisitForCheckedInt32Mul<T>(node, truncation, input0_type,
2855 input1_type,
2857 return;
2858 }
2859 }
2860
2862 VisitForCheckedInt32Mul<T>(node, truncation, input0_type, input1_type,
2863 CheckedUseInfoAsWord32FromHint(hint));
2864 return;
2865 }
2866
2867 // Checked float64 x float64 => float64
2868 VisitBinop<T>(node,
2871 MachineRepresentation::kFloat64, Type::Number());
2872 if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
2873 return;
2874 }
2875 case IrOpcode::kNumberMultiply: {
2876 if (TypeOf(node->InputAt(0)).Is(Type::Integral32()) &&
2877 TypeOf(node->InputAt(1)).Is(Type::Integral32()) &&
2878 (TypeOf(node).Is(Type::Signed32()) ||
2879 TypeOf(node).Is(Type::Unsigned32()) ||
2880 (truncation.IsUsedAsWord32() &&
2882 // Multiply reduces to Int32Mul if the inputs are integers, and
2883 // (a) the output is either known to be Signed32, or
2884 // (b) the output is known to be Unsigned32, or
2885 // (c) the uses are truncating and the result is in the safe
2886 // integer range.
2888 if (lower<T>()) ChangeToPureOp(node, Int32Op(node));
2889 return;
2890 }
2891 // Number x Number => Float64Mul
2893 if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
2894 return;
2895 }
2896 case IrOpcode::kSpeculativeNumberDivide: {
2897 if (BothInputsAreUnsigned32(node) && truncation.IsUsedAsWord32()) {
2898 // => unsigned Uint32Div
2900 if (lower<T>()) DeferReplacement(node, lowering->Uint32Div(node));
2901 return;
2902 }
2903 if (BothInputsAreSigned32(node)) {
2904 if (NodeProperties::GetType(node).Is(Type::Signed32())) {
2905 // => signed Int32Div
2907 if (lower<T>()) DeferReplacement(node, lowering->Int32Div(node));
2908 return;
2909 }
2910 if (truncation.IsUsedAsWord32()) {
2911 // => signed Int32Div
2913 if (lower<T>()) DeferReplacement(node, lowering->Int32Div(node));
2914 return;
2915 }
2916 }
2917
2918 // Try to use type feedback.
2919 NumberOperationHint hint = NumberOperationHintOf(node->op());
2920
2921 // Handle the case when no uint32 checks on inputs are necessary
2922 // (but an overflow check is needed on the output).
2923 if (BothInputsAreUnsigned32(node)) {
2926 MachineRepresentation::kWord32, Type::Unsigned32());
2928 return;
2929 }
2930 }
2931
2932 // Handle the case when no int32 checks on inputs are necessary
2933 // (but an overflow check is needed on the output).
2934 if (BothInputsAreSigned32(node)) {
2935 // If both the inputs the feedback are int32, use the overflow op.
2938 MachineRepresentation::kWord32, Type::Signed32());
2939 if (lower<T>()) ChangeToInt32OverflowOp(node);
2940 return;
2941 }
2942 }
2943
2946 // If the result is truncated, we only need to check the inputs.
2947 if (truncation.IsUsedAsWord32()) {
2948 VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint),
2950 if (lower<T>()) DeferReplacement(node, lowering->Int32Div(node));
2951 return;
2952 } else if (hint != NumberOperationHint::kSignedSmallInputs) {
2953 VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint),
2954 MachineRepresentation::kWord32, Type::Signed32());
2955 if (lower<T>()) ChangeToInt32OverflowOp(node);
2956 return;
2957 }
2958 }
2959
2960 // default case => Float64Div
2961 VisitBinop<T>(node,
2964 MachineRepresentation::kFloat64, Type::Number());
2965 if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
2966 return;
2967 }
2968 case IrOpcode::kNumberDivide: {
2969 if (TypeOf(node->InputAt(0)).Is(Type::Unsigned32()) &&
2970 TypeOf(node->InputAt(1)).Is(Type::Unsigned32()) &&
2971 (truncation.IsUsedAsWord32() ||
2972 TypeOf(node).Is(Type::Unsigned32()))) {
2973 // => unsigned Uint32Div
2975 if (lower<T>()) DeferReplacement(node, lowering->Uint32Div(node));
2976 return;
2977 }
2978 if (TypeOf(node->InputAt(0)).Is(Type::Signed32()) &&
2979 TypeOf(node->InputAt(1)).Is(Type::Signed32()) &&
2980 (truncation.IsUsedAsWord32() ||
2981 TypeOf(node).Is(Type::Signed32()))) {
2982 // => signed Int32Div
2984 if (lower<T>()) DeferReplacement(node, lowering->Int32Div(node));
2985 return;
2986 }
2987 // Number x Number => Float64Div
2989 if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
2990 return;
2991 }
2992 case IrOpcode::kUnsigned32Divide: {
2993 CHECK(TypeOf(node->InputAt(0)).Is(Type::Unsigned32()));
2994 CHECK(TypeOf(node->InputAt(1)).Is(Type::Unsigned32()));
2995 // => unsigned Uint32Div
2997 if (lower<T>()) DeferReplacement(node, lowering->Uint32Div(node));
2998 return;
2999 }
3000 case IrOpcode::kSpeculativeNumberModulus:
3001 return VisitSpeculativeNumberModulus<T>(node, truncation, lowering);
3002 case IrOpcode::kNumberModulus: {
3003 Type const lhs_type = TypeOf(node->InputAt(0));
3004 Type const rhs_type = TypeOf(node->InputAt(1));
3005 if ((lhs_type.Is(Type::Unsigned32OrMinusZeroOrNaN()) &&
3006 rhs_type.Is(Type::Unsigned32OrMinusZeroOrNaN())) &&
3007 (truncation.IsUsedAsWord32() ||
3008 TypeOf(node).Is(Type::Unsigned32()))) {
3009 // => unsigned Uint32Mod
3011 if (lower<T>()) DeferReplacement(node, lowering->Uint32Mod(node));
3012 return;
3013 }
3014 if ((lhs_type.Is(Type::Signed32OrMinusZeroOrNaN()) &&
3015 rhs_type.Is(Type::Signed32OrMinusZeroOrNaN())) &&
3016 (truncation.IsUsedAsWord32() || TypeOf(node).Is(Type::Signed32()) ||
3017 (truncation.IdentifiesZeroAndMinusZero() &&
3018 TypeOf(node).Is(Type::Signed32OrMinusZero())))) {
3019 // => signed Int32Mod
3021 if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node));
3022 return;
3023 }
3024 // => Float64Mod
3025 // For the left hand side we just propagate the identify zeros
3026 // mode of the {truncation}; and for modulus the sign of the
3027 // right hand side doesn't matter anyways, so in particular there's
3028 // no observable difference between a 0 and a -0 then.
3029 UseInfo const lhs_use =
3032 VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kFloat64);
3033 if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
3034 return;
3035 }
3036 case IrOpcode::kNumberBitwiseOr:
3037 case IrOpcode::kNumberBitwiseXor:
3038 case IrOpcode::kNumberBitwiseAnd: {
3040 if (lower<T>()) ChangeOp(node, Int32Op(node));
3041 return;
3042 }
3043 case IrOpcode::kSpeculativeNumberBitwiseOr:
3044 case IrOpcode::kSpeculativeNumberBitwiseXor:
3045 case IrOpcode::kSpeculativeNumberBitwiseAnd:
3047 if (lower<T>()) {
3048 ChangeToPureOp(node, Int32Op(node));
3049 }
3050 return;
3051 case IrOpcode::kNumberShiftLeft: {
3052 Type rhs_type = GetUpperBound(node->InputAt(1));
3056 if (lower<T>()) {
3057 MaskShiftOperand(node, rhs_type);
3058 ChangeToPureOp(node, lowering->machine()->Word32Shl());
3059 }
3060 return;
3061 }
3062 case IrOpcode::kSpeculativeNumberShiftLeft: {
3063 if (BothInputsAre(node, Type::NumberOrOddball())) {
3064 Type rhs_type = GetUpperBound(node->InputAt(1));
3068 if (lower<T>()) {
3069 MaskShiftOperand(node, rhs_type);
3070 ChangeToPureOp(node, lowering->machine()->Word32Shl());
3071 }
3072 return;
3073 }
3074 NumberOperationHint hint = NumberOperationHintOf(node->op());
3075 Type rhs_type = GetUpperBound(node->InputAt(1));
3076 VisitBinop<T>(node,
3077 CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros),
3078 MachineRepresentation::kWord32, Type::Signed32());
3079 if (lower<T>()) {
3080 MaskShiftOperand(node, rhs_type);
3081 ChangeToPureOp(node, lowering->machine()->Word32Shl());
3082 }
3083 return;
3084 }
3085 case IrOpcode::kNumberShiftRight: {
3086 Type rhs_type = GetUpperBound(node->InputAt(1));
3090 if (lower<T>()) {
3091 MaskShiftOperand(node, rhs_type);
3092 ChangeToPureOp(node, lowering->machine()->Word32Sar());
3093 }
3094 return;
3095 }
3096 case IrOpcode::kSpeculativeNumberShiftRight: {
3097 if (BothInputsAre(node, Type::NumberOrOddball())) {
3098 Type rhs_type = GetUpperBound(node->InputAt(1));
3102 if (lower<T>()) {
3103 MaskShiftOperand(node, rhs_type);
3104 ChangeToPureOp(node, lowering->machine()->Word32Sar());
3105 }
3106 return;
3107 }
3108 NumberOperationHint hint = NumberOperationHintOf(node->op());
3109 Type rhs_type = GetUpperBound(node->InputAt(1));
3110 VisitBinop<T>(node,
3111 CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros),
3112 MachineRepresentation::kWord32, Type::Signed32());
3113 if (lower<T>()) {
3114 MaskShiftOperand(node, rhs_type);
3115 ChangeToPureOp(node, lowering->machine()->Word32Sar());
3116 }
3117 return;
3118 }
3119 case IrOpcode::kNumberShiftRightLogical: {
3120 Type rhs_type = GetUpperBound(node->InputAt(1));
3124 if (lower<T>()) {
3125 MaskShiftOperand(node, rhs_type);
3126 ChangeToPureOp(node, lowering->machine()->Word32Shr());
3127 }
3128 return;
3129 }
3130 case IrOpcode::kSpeculativeNumberShiftRightLogical: {
3131 NumberOperationHint hint = NumberOperationHintOf(node->op());
3132 Type rhs_type = GetUpperBound(node->InputAt(1));
3133 if (rhs_type.Is(type_cache_->kZeroish) &&
3135 !truncation.IsUsedAsWord32()) {
3136 // The SignedSmall or Signed32 feedback means that the results that we
3137 // have seen so far were of type Unsigned31. We speculate that this
3138 // will continue to hold. Moreover, since the RHS is 0, the result
3139 // will just be the (converted) LHS.
3140 VisitBinop<T>(node,
3141 CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros),
3142 MachineRepresentation::kWord32, Type::Unsigned31());
3143 if (lower<T>()) {
3144 node->RemoveInput(1);
3145 ChangeOp(node,
3146 simplified()->CheckedUint32ToInt32(FeedbackSource()));
3147 }
3148 return;
3149 }
3150 if (BothInputsAre(node, Type::NumberOrOddball())) {
3154 if (lower<T>()) {
3155 MaskShiftOperand(node, rhs_type);
3156 ChangeToPureOp(node, lowering->machine()->Word32Shr());
3157 }
3158 return;
3159 }
3160 VisitBinop<T>(node,
3161 CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros),
3162 MachineRepresentation::kWord32, Type::Unsigned32());
3163 if (lower<T>()) {
3164 MaskShiftOperand(node, rhs_type);
3165 ChangeToPureOp(node, lowering->machine()->Word32Shr());
3166 }
3167 return;
3168 }
3169 case IrOpcode::kNumberAbs: {
3170 // NumberAbs maps both 0 and -0 to 0, so we can generally
3171 // pass the kIdentifyZeros truncation to its input, and
3172 // choose to ignore minus zero in all cases.
3173 Type const input_type = TypeOf(node->InputAt(0));
3174 if (input_type.Is(Type::Unsigned32OrMinusZero())) {
3177 if (lower<T>()) {
3179 node,
3181 Type::Intersect(input_type, Type::Unsigned32(), zone()),
3182 node->InputAt(0)));
3183 }
3184 } else if (input_type.Is(Type::Signed32OrMinusZero())) {
3187 if (lower<T>()) {
3189 node,
3191 Type::Intersect(input_type, Type::Unsigned32(), zone()),
3192 lowering->Int32Abs(node)));
3193 }
3194 } else if (input_type.Is(type_cache_->kPositiveIntegerOrNaN)) {
3197 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
3198 } else {
3201 if (lower<T>()) ChangeOp(node, Float64Op(node));
3202 }
3203 return;
3204 }
3205 case IrOpcode::kNumberClz32: {
3208 if (lower<T>()) ChangeOp(node, Uint32Op(node));
3209 return;
3210 }
3211 case IrOpcode::kNumberImul: {
3215 if (lower<T>()) ChangeOp(node, Uint32Op(node));
3216 return;
3217 }
3218 case IrOpcode::kNumberFround: {
3221 if (lower<T>()) ChangeOp(node, Float64Op(node));
3222 return;
3223 }
3224 case IrOpcode::kNumberMax: {
3225 // It is safe to use the feedback types for left and right hand side
3226 // here, since we can only narrow those types and thus we can only
3227 // promise a more specific truncation.
3228 // For NumberMax we generally propagate whether the truncation
3229 // identifies zeros to the inputs, and we choose to ignore minus
3230 // zero in those cases.
3231 Type const lhs_type = TypeOf(node->InputAt(0));
3232 Type const rhs_type = TypeOf(node->InputAt(1));
3233 if ((lhs_type.Is(Type::Unsigned32()) &&
3234 rhs_type.Is(Type::Unsigned32())) ||
3235 (lhs_type.Is(Type::Unsigned32OrMinusZero()) &&
3236 rhs_type.Is(Type::Unsigned32OrMinusZero()) &&
3237 truncation.IdentifiesZeroAndMinusZero())) {
3239 if (lower<T>()) {
3240 lowering->DoMax(node, lowering->machine()->Uint32LessThan(),
3242 }
3243 } else if ((lhs_type.Is(Type::Signed32()) &&
3244 rhs_type.Is(Type::Signed32())) ||
3245 (lhs_type.Is(Type::Signed32OrMinusZero()) &&
3246 rhs_type.Is(Type::Signed32OrMinusZero()) &&
3247 truncation.IdentifiesZeroAndMinusZero())) {
3249 if (lower<T>()) {
3250 lowering->DoMax(node, lowering->machine()->Int32LessThan(),
3252 }
3253 } else if (jsgraph_->machine()->Is64() &&
3254 lhs_type.Is(type_cache_->kSafeInteger) &&
3255 rhs_type.Is(type_cache_->kSafeInteger)) {
3256 VisitInt64Binop<T>(node);
3257 if (lower<T>()) {
3258 lowering->DoMax(node, lowering->machine()->Int64LessThan(),
3260 }
3261 } else {
3262 VisitBinop<T>(node,
3265 if (lower<T>()) {
3266 // If the right hand side is not NaN, and the left hand side
3267 // is not NaN (or -0 if the difference between the zeros is
3268 // observed), we can do a simple floating point comparison here.
3269 if (lhs_type.Is(truncation.IdentifiesZeroAndMinusZero()
3270 ? Type::OrderedNumber()
3271 : Type::PlainNumber()) &&
3272 rhs_type.Is(Type::OrderedNumber())) {
3273 lowering->DoMax(node, lowering->machine()->Float64LessThan(),
3275 } else {
3276 ChangeOp(node, Float64Op(node));
3277 }
3278 }
3279 }
3280 return;
3281 }
3282 case IrOpcode::kNumberMin: {
3283 // It is safe to use the feedback types for left and right hand side
3284 // here, since we can only narrow those types and thus we can only
3285 // promise a more specific truncation.
3286 // For NumberMin we generally propagate whether the truncation
3287 // identifies zeros to the inputs, and we choose to ignore minus
3288 // zero in those cases.
3289 Type const lhs_type = TypeOf(node->InputAt(0));
3290 Type const rhs_type = TypeOf(node->InputAt(1));
3291 if ((lhs_type.Is(Type::Unsigned32()) &&
3292 rhs_type.Is(Type::Unsigned32())) ||
3293 (lhs_type.Is(Type::Unsigned32OrMinusZero()) &&
3294 rhs_type.Is(Type::Unsigned32OrMinusZero()) &&
3295 truncation.IdentifiesZeroAndMinusZero())) {
3297 if (lower<T>()) {
3298 lowering->DoMin(node, lowering->machine()->Uint32LessThan(),
3300 }
3301 } else if ((lhs_type.Is(Type::Signed32()) &&
3302 rhs_type.Is(Type::Signed32())) ||
3303 (lhs_type.Is(Type::Signed32OrMinusZero()) &&
3304 rhs_type.Is(Type::Signed32OrMinusZero()) &&
3305 truncation.IdentifiesZeroAndMinusZero())) {
3307 if (lower<T>()) {
3308 lowering->DoMin(node, lowering->machine()->Int32LessThan(),
3310 }
3311 } else if (jsgraph_->machine()->Is64() &&
3312 lhs_type.Is(type_cache_->kSafeInteger) &&
3313 rhs_type.Is(type_cache_->kSafeInteger)) {
3314 VisitInt64Binop<T>(node);
3315 if (lower<T>()) {
3316 lowering->DoMin(node, lowering->machine()->Int64LessThan(),
3318 }
3319 } else {
3320 VisitBinop<T>(node,
3323 if (lower<T>()) {
3324 // If the left hand side is not NaN, and the right hand side
3325 // is not NaN (or -0 if the difference between the zeros is
3326 // observed), we can do a simple floating point comparison here.
3327 if (lhs_type.Is(Type::OrderedNumber()) &&
3328 rhs_type.Is(truncation.IdentifiesZeroAndMinusZero()
3329 ? Type::OrderedNumber()
3330 : Type::PlainNumber())) {
3331 lowering->DoMin(node,
3332 lowering->machine()->Float64LessThanOrEqual(),
3334 } else {
3335 ChangeOp(node, Float64Op(node));
3336 }
3337 }
3338 }
3339 return;
3340 }
3341 case IrOpcode::kSpeculativeNumberPow: {
3342 // Checked float64 ** float64 => float64
3343 VisitBinop<T>(node,
3346 MachineRepresentation::kFloat64, Type::Number());
3347 if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
3348 return;
3349 }
3350 case IrOpcode::kNumberAtan2:
3351 case IrOpcode::kNumberPow: {
3354 if (lower<T>()) ChangeOp(node, Float64Op(node));
3355 return;
3356 }
3357 case IrOpcode::kNumberCeil:
3358 case IrOpcode::kNumberFloor:
3359 case IrOpcode::kNumberRound:
3360 case IrOpcode::kNumberTrunc: {
3361 // For NumberCeil, NumberFloor, NumberRound and NumberTrunc we propagate
3362 // the zero identification part of the truncation, and we turn them into
3363 // no-ops if we figure out (late) that their input is already an
3364 // integer, NaN or -0.
3365 Type const input_type = TypeOf(node->InputAt(0));
3366 VisitUnop<T>(node,
3369 if (lower<T>()) {
3370 if (input_type.Is(type_cache_->kIntegerOrMinusZeroOrNaN)) {
3371 DeferReplacement(node, node->InputAt(0));
3372 } else if (node->opcode() == IrOpcode::kNumberRound) {
3373 DeferReplacement(node, lowering->Float64Round(node));
3374 } else {
3375 ChangeOp(node, Float64Op(node));
3376 }
3377 }
3378 return;
3379 }
3380 case IrOpcode::kSpeculativeBigIntAsIntN:
3381 case IrOpcode::kSpeculativeBigIntAsUintN: {
3382 const bool is_asuintn =
3383 node->opcode() == IrOpcode::kSpeculativeBigIntAsUintN;
3384 const auto p = SpeculativeBigIntAsNParametersOf(node->op());
3385 DCHECK_LE(0, p.bits());
3386 DCHECK_LE(p.bits(), 64);
3387
3388 ProcessInput<T>(node, 0,
3392 is_asuintn ? Type::UnsignedBigInt64() : Type::SignedBigInt64());
3393 if (lower<T>()) {
3394 if (p.bits() == 0) {
3396 Type::UnsignedBigInt63(),
3397 jsgraph_->Int64Constant(0)));
3398 } else if (p.bits() == 64) {
3400 is_asuintn ? Type::UnsignedBigInt64()
3401 : Type::SignedBigInt64(),
3402 node->InputAt(0)));
3403 } else {
3404 if (is_asuintn) {
3405 const uint64_t mask = (1ULL << p.bits()) - 1ULL;
3406 ChangeUnaryToPureBinaryOp(node, lowering->machine()->Word64And(),
3408 } else {
3409 // We truncate the value to N bits, but to correctly interpret
3410 // negative values, we have to fill the top (64-N) bits with the
3411 // sign. This is done by shifting the value left and then back
3412 // with an arithmetic right shift. E.g. for {value} =
3413 // 0..0'0001'1101 (29n) and N = 3: {shifted} is 1010'0000'0..0
3414 // after left shift by 61 bits, {unshifted} is 1..1'1111'1101
3415 // after arithmetic right shift by 61. This is the 64 bit
3416 // representation of -3 we expect for the signed 3 bit integer
3417 // 101.
3418 const uint64_t shift = 64 - p.bits();
3419 Node* value = node->InputAt(0);
3420 Node* shifted =
3421 graph()->NewNode(lowering->machine()->Word64Shl(), value,
3422 jsgraph_->Uint64Constant(shift));
3423 Node* unshifted =
3424 graph()->NewNode(lowering->machine()->Word64Sar(), shifted,
3425 jsgraph_->Uint64Constant(shift));
3426
3427 ReplaceWithPureNode(node, unshifted);
3428 }
3429 }
3430 }
3431 return;
3432 }
3433 case IrOpcode::kNumberAcos:
3434 case IrOpcode::kNumberAcosh:
3435 case IrOpcode::kNumberAsin:
3436 case IrOpcode::kNumberAsinh:
3437 case IrOpcode::kNumberAtan:
3438 case IrOpcode::kNumberAtanh:
3439 case IrOpcode::kNumberCos:
3440 case IrOpcode::kNumberCosh:
3441 case IrOpcode::kNumberExp:
3442 case IrOpcode::kNumberExpm1:
3443 case IrOpcode::kNumberLog:
3444 case IrOpcode::kNumberLog1p:
3445 case IrOpcode::kNumberLog2:
3446 case IrOpcode::kNumberLog10:
3447 case IrOpcode::kNumberCbrt:
3448 case IrOpcode::kNumberSin:
3449 case IrOpcode::kNumberSinh:
3450 case IrOpcode::kNumberTan:
3451 case IrOpcode::kNumberTanh: {
3454 if (lower<T>()) ChangeOp(node, Float64Op(node));
3455 return;
3456 }
3457 case IrOpcode::kNumberSign: {
3458 if (InputIs(node, Type::Signed32())) {
3461 if (lower<T>()) DeferReplacement(node, lowering->Int32Sign(node));
3462 } else {
3465 if (lower<T>()) DeferReplacement(node, lowering->Float64Sign(node));
3466 }
3467 return;
3468 }
3469 case IrOpcode::kNumberSilenceNaN: {
3470 Type const input_type = TypeOf(node->InputAt(0));
3471 if (input_type.Is(Type::OrderedNumber())) {
3472 // No need to silence anything if the input cannot be NaN.
3475 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
3476 } else {
3479 if (lower<T>()) ChangeOp(node, Float64Op(node));
3480 }
3481 return;
3482 }
3483 case IrOpcode::kNumberSqrt: {
3486 if (lower<T>()) ChangeOp(node, Float64Op(node));
3487 return;
3488 }
3489 case IrOpcode::kNumberToBoolean: {
3490 // For NumberToBoolean we don't care whether the input is 0 or
3491 // -0, since both of them are mapped to false anyways, so we
3492 // can generally pass kIdentifyZeros truncation.
3493 Type const input_type = TypeOf(node->InputAt(0));
3494 if (input_type.Is(Type::Integral32OrMinusZeroOrNaN())) {
3495 // 0, -0 and NaN all map to false, so we can safely truncate
3496 // all of them to zero here.
3499 if (lower<T>()) lowering->DoIntegral32ToBit(node);
3500 } else if (input_type.Is(Type::OrderedNumber())) {
3503 if (lower<T>()) lowering->DoOrderedNumberToBit(node);
3504 } else {
3507 if (lower<T>()) lowering->DoNumberToBit(node);
3508 }
3509 return;
3510 }
3511 case IrOpcode::kNumberToInt32: {
3512 // Just change representation if necessary.
3515 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
3516 return;
3517 }
3518 case IrOpcode::kNumberToString: {
3521 return;
3522 }
3523 case IrOpcode::kNumberToUint32: {
3524 // Just change representation if necessary.
3527 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
3528 return;
3529 }
3530 case IrOpcode::kNumberToUint8Clamped: {
3531 Type const input_type = TypeOf(node->InputAt(0));
3532 if (input_type.Is(type_cache_->kUint8OrMinusZeroOrNaN)) {
3535 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
3536 } else if (input_type.Is(Type::Unsigned32OrMinusZeroOrNaN())) {
3539 if (lower<T>()) lowering->DoUnsigned32ToUint8Clamped(node);
3540 } else if (input_type.Is(Type::Signed32OrMinusZeroOrNaN())) {
3543 if (lower<T>()) lowering->DoSigned32ToUint8Clamped(node);
3544 } else if (input_type.Is(type_cache_->kIntegerOrMinusZeroOrNaN)) {
3547 if (lower<T>()) lowering->DoIntegerToUint8Clamped(node);
3548 } else {
3551 if (lower<T>()) lowering->DoNumberToUint8Clamped(node);
3552 }
3553 return;
3554 }
3555 case IrOpcode::kIntegral32OrMinusZeroToBigInt: {
3558 if (lower<T>()) {
3561 node->InputAt(0)));
3562 }
3563 return;
3564 }
3565 case IrOpcode::kReferenceEqual: {
3567 if (lower<T>()) {
3569 ChangeOp(node, lowering->machine()->Word32Equal());
3570 } else {
3571 ChangeOp(node, lowering->machine()->WordEqual());
3572 }
3573 }
3574 return;
3575 }
3576 case IrOpcode::kSameValueNumbersOnly: {
3579 return;
3580 }
3581 case IrOpcode::kSameValue: {
3582 if (truncation.IsUnused()) return VisitUnused<T>(node);
3583 if (BothInputsAre(node, Type::Number())) {
3586 if (lower<T>()) {
3587 ChangeOp(node, lowering->simplified()->NumberSameValue());
3588 }
3589 } else {
3592 }
3593 return;
3594 }
3595 case IrOpcode::kTypeOf: {
3596 return VisitUnop<T>(node, UseInfo::AnyTagged(),
3598 }
3599 case IrOpcode::kNewConsString: {
3600 ProcessInput<T>(node, 0, UseInfo::TruncatingWord32()); // length
3601 ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // first
3602 ProcessInput<T>(node, 2, UseInfo::AnyTagged()); // second
3604 return;
3605 }
3606 case IrOpcode::kSpeculativeBigIntAdd:
3607 case IrOpcode::kSpeculativeBigIntSubtract:
3608 case IrOpcode::kSpeculativeBigIntMultiply: {
3609 if (truncation.IsUnused() && BothInputsAre(node, Type::BigInt())) {
3610 VisitUnused<T>(node);
3611 return;
3612 }
3613 if (Is64() && truncation.IsUsedAsWord64()) {
3617 if (lower<T>()) {
3618 ChangeToPureOp(node, Int64Op(node));
3619 }
3620 return;
3621 }
3622 BigIntOperationHint hint = BigIntOperationHintOf(node->op());
3623 switch (hint) {
3627 MachineRepresentation::kWord64, Type::SignedBigInt64());
3628 if (lower<T>()) {
3629 ChangeOp(node, Int64OverflowOp(node));
3630 }
3631 return;
3632 }
3637 if (lower<T>()) {
3638 ChangeOp(node, BigIntOp(node));
3639 }
3640 return;
3641 }
3642 }
3643 }
3644 case IrOpcode::kSpeculativeBigIntDivide:
3645 case IrOpcode::kSpeculativeBigIntModulus: {
3646 if (truncation.IsUnused() && BothInputsAre(node, Type::BigInt())) {
3647 VisitUnused<T>(node);
3648 return;
3649 }
3650 BigIntOperationHint hint = BigIntOperationHintOf(node->op());
3651 switch (hint) {
3655 MachineRepresentation::kWord64, Type::SignedBigInt64());
3656 if (lower<T>()) {
3657 ChangeOp(node, Int64OverflowOp(node));
3658 }
3659 return;
3660 }
3665 if (lower<T>()) {
3666 ChangeOp(node, BigIntOp(node));
3667 }
3668 return;
3669 }
3670 }
3671 }
3672 case IrOpcode::kSpeculativeBigIntBitwiseAnd:
3673 case IrOpcode::kSpeculativeBigIntBitwiseOr:
3674 case IrOpcode::kSpeculativeBigIntBitwiseXor: {
3675 if (truncation.IsUnused() && BothInputsAre(node, Type::BigInt())) {
3676 VisitUnused<T>(node);
3677 return;
3678 }
3679 if (Is64() && truncation.IsUsedAsWord64()) {
3683 if (lower<T>()) {
3684 ChangeToPureOp(node, Int64Op(node));
3685 }
3686 return;
3687 }
3688 BigIntOperationHint hint = BigIntOperationHintOf(node->op());
3689 switch (hint) {
3693 MachineRepresentation::kWord64, Type::SignedBigInt64());
3694 if (lower<T>()) {
3695 ChangeToPureOp(node, Int64Op(node));
3696 }
3697 return;
3698 }
3703 if (lower<T>()) {
3704 ChangeOp(node, BigIntOp(node));
3705 }
3706 return;
3707 }
3708 }
3709 }
3710 case IrOpcode::kSpeculativeBigIntShiftLeft:
3711 case IrOpcode::kSpeculativeBigIntShiftRight: {
3712 if (truncation.IsUnused() && BothInputsAre(node, Type::BigInt())) {
3713 VisitUnused<T>(node);
3714 return;
3715 }
3716 if (Is64() && TryOptimizeBigInt64Shift<T>(node, truncation, lowering)) {
3717 return;
3718 }
3719 DCHECK_EQ(BigIntOperationHintOf(node->op()),
3721 VisitBinop<T>(node,
3724 if (lower<T>()) {
3725 ChangeOp(node, BigIntOp(node));
3726 }
3727 return;
3728 }
3729 case IrOpcode::kSpeculativeBigIntEqual:
3730 case IrOpcode::kSpeculativeBigIntLessThan:
3731 case IrOpcode::kSpeculativeBigIntLessThanOrEqual: {
3732 // Loose equality can throw a TypeError when failing to cast an object
3733 // operand to primitive.
3734 if (truncation.IsUnused() && BothInputsAre(node, Type::BigInt())) {
3735 VisitUnused<T>(node);
3736 return;
3737 }
3738 BigIntOperationHint hint = BigIntOperationHintOf(node->op());
3739 switch (hint) {
3741 VisitBinop<T>(node,
3744 if (lower<T>()) {
3745 ChangeToPureOp(node, Int64Op(node));
3746 }
3747 return;
3748 }
3753 if (lower<T>()) {
3754 ChangeToPureOp(node, BigIntOp(node));
3755 }
3756 return;
3757 }
3758 }
3759 }
3760 case IrOpcode::kSpeculativeBigIntNegate: {
3761 // NOTE: If truncation is Unused, we still need to preserve at least the
3762 // BigInt type check (see http://crbug.com/1431713 for some details).
3763 // We can use the standard lowering to word64 operations and have
3764 // following phases remove the unused truncation and subtraction
3765 // operations.
3766 if (Is64() && truncation.IsUsedAsWord64()) {
3767 VisitUnop<T>(node,
3770 if (lower<T>()) {
3771 ChangeUnaryToPureBinaryOp(node, lowering->machine()->Int64Sub(), 0,
3773 }
3774 } else {
3775 VisitUnop<T>(node,
3778 if (lower<T>()) {
3779 ChangeToPureOp(node, lowering->simplified()->BigIntNegate());
3780 }
3781 }
3782 return;
3783 }
3784 case IrOpcode::kStringConcat: {
3785 // TODO(turbofan): We currently depend on having this first length input
3786 // to make sure that the overflow check is properly scheduled before the
3787 // actual string concatenation. We should also use the length to pass it
3788 // to the builtin or decide in optimized code how to construct the
3789 // resulting string (i.e. cons string or sequential string).
3790 ProcessInput<T>(node, 0, UseInfo::TaggedSigned()); // length
3791 ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // first
3792 ProcessInput<T>(node, 2, UseInfo::AnyTagged()); // second
3794 return;
3795 }
3796 case IrOpcode::kStringEqual:
3797 case IrOpcode::kStringLessThan:
3798 case IrOpcode::kStringLessThanOrEqual: {
3799 return VisitBinop<T>(node, UseInfo::AnyTagged(),
3801 }
3802 case IrOpcode::kStringCharCodeAt: {
3805 }
3806 case IrOpcode::kStringCodePointAt: {
3809 }
3810 case IrOpcode::kStringFromSingleCharCode: {
3813 return;
3814 }
3815 case IrOpcode::kStringFromSingleCodePoint: {
3818 return;
3819 }
3820 case IrOpcode::kStringFromCodePointAt: {
3823 }
3824 case IrOpcode::kStringIndexOf: {
3829 return;
3830 }
3831 case IrOpcode::kStringLength:
3832 case IrOpcode::kStringWrapperLength: {
3833 // TODO(bmeurer): The input representation should be TaggedPointer.
3834 // Fix this once we have a dedicated StringConcat/JSStringAdd
3835 // operator, which marks it's output as TaggedPointer properly.
3838 return;
3839 }
3840 case IrOpcode::kTypedArrayLength: {
3843 return;
3844 }
3845 case IrOpcode::kStringSubstring: {
3851 return;
3852 }
3853 case IrOpcode::kStringToLowerCaseIntl:
3854 case IrOpcode::kStringToUpperCaseIntl: {
3857 return;
3858 }
3859 case IrOpcode::kCheckBounds:
3860 return VisitCheckBounds<T>(node, lowering);
3861 case IrOpcode::kCheckHeapObject: {
3862 if (InputCannotBe(node, Type::SignedSmall())) {
3865 } else {
3869 }
3870 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
3871 return;
3872 }
3873 case IrOpcode::kCheckIf: {
3874 ProcessInput<T>(node, 0, UseInfo::Bool());
3877 return;
3878 }
3879 case IrOpcode::kCheckInternalizedString: {
3880 VisitCheck<T>(node, Type::InternalizedString(), lowering);
3881 return;
3882 }
3883 case IrOpcode::kCheckNumber: {
3884 Type const input_type = TypeOf(node->InputAt(0));
3885 if (input_type.Is(Type::Number())) {
3886 VisitNoop<T>(node, truncation);
3887 } else {
3890 }
3891 return;
3892 }
3893 case IrOpcode::kCheckNumberFitsInt32: {
3894 Type const input_type = TypeOf(node->InputAt(0));
3895 if (input_type.Is(Type::Signed32())) {
3896 VisitNoop<T>(node, truncation);
3897 } else {
3900 }
3901 return;
3902 }
3903 case IrOpcode::kCheckReceiver: {
3904 VisitCheck<T>(node, Type::Receiver(), lowering);
3905 return;
3906 }
3907 case IrOpcode::kCheckReceiverOrNullOrUndefined: {
3908 VisitCheck<T>(node, Type::ReceiverOrNullOrUndefined(), lowering);
3909 return;
3910 }
3911 case IrOpcode::kCheckSmi: {
3912 const CheckParameters& params = CheckParametersOf(node->op());
3913 if (SmiValuesAre32Bits() && truncation.IsUsedAsWord32()) {
3914 VisitUnop<T>(node,
3916 params.feedback()),
3918 } else {
3920 node,
3923 }
3924 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
3925 return;
3926 }
3927 case IrOpcode::kCheckString: {
3928 const CheckParameters& params = CheckParametersOf(node->op());
3929 if (InputIs(node, Type::String())) {
3932 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
3933 } else {
3935 node,
3938 }
3939 return;
3940 }
3941 case IrOpcode::kCheckStringOrStringWrapper: {
3942 const CheckParameters& params = CheckParametersOf(node->op());
3943 if (InputIs(node, Type::StringOrStringWrapper())) {
3946 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
3947 } else {
3949 node,
3952 }
3953 return;
3954 }
3955 case IrOpcode::kCheckSymbol: {
3956 VisitCheck<T>(node, Type::Symbol(), lowering);
3957 return;
3958 }
3959
3960 case IrOpcode::kAllocate: {
3961 ProcessInput<T>(node, 0, UseInfo::Word());
3964 return;
3965 }
3966 case IrOpcode::kLoadFramePointer: {
3968 return;
3969 }
3970#if V8_ENABLE_WEBASSEMBLY
3971 case IrOpcode::kLoadStackPointer: {
3973 return;
3974 }
3975 case IrOpcode::kSetStackPointer: {
3977 return;
3978 }
3979#endif // V8_ENABLE_WEBASSEMBLY
3980 case IrOpcode::kLoadMessage: {
3981 if (truncation.IsUnused()) return VisitUnused<T>(node);
3983 return;
3984 }
3985 case IrOpcode::kStoreMessage: {
3986 ProcessInput<T>(node, 0, UseInfo::Word());
3990 return;
3991 }
3992 case IrOpcode::kLoadFieldByIndex: {
3993 if (truncation.IsUnused()) return VisitUnused<T>(node);
3996 return;
3997 }
3998 case IrOpcode::kLoadField: {
3999 if (truncation.IsUnused()) return VisitUnused<T>(node);
4000 FieldAccess access = FieldAccessOf(node->op());
4001 MachineRepresentation const representation =
4002 access.machine_type.representation();
4003 VisitUnop<T>(node, UseInfoForBasePointer(access), representation);
4004 return;
4005 }
4006 case IrOpcode::kStoreField: {
4007 FieldAccess access = FieldAccessOf(node->op());
4008 Node* value_node = node->InputAt(1);
4009 NodeInfo* input_info = GetInfo(value_node);
4010 MachineRepresentation field_representation =
4011 access.machine_type.representation();
4012
4013 // Convert to Smi if possible, such that we can avoid a write barrier.
4014 if (field_representation == MachineRepresentation::kTagged &&
4015 TypeOf(value_node).Is(Type::SignedSmall())) {
4016 field_representation = MachineRepresentation::kTaggedSigned;
4017 }
4018 WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
4019 access.base_is_tagged, field_representation, access.offset,
4020 access.type, input_info->representation(), value_node);
4021
4022 ProcessInput<T>(node, 0, UseInfoForBasePointer(access));
4024 node, 1, TruncatingUseInfoFromRepresentation(field_representation));
4027 if (lower<T>()) {
4028 if (write_barrier_kind < access.write_barrier_kind) {
4029 access.write_barrier_kind = write_barrier_kind;
4030 ChangeOp(node, jsgraph_->simplified()->StoreField(access));
4031 }
4032 }
4033 return;
4034 }
4035 case IrOpcode::kLoadElement: {
4036 if (truncation.IsUnused()) return VisitUnused<T>(node);
4037 ElementAccess access = ElementAccessOf(node->op());
4038 VisitBinop<T>(node, UseInfoForBasePointer(access), UseInfo::Word(),
4039 access.machine_type.representation());
4040 return;
4041 }
4042 case IrOpcode::kLoadStackArgument: {
4043 if (truncation.IsUnused()) return VisitUnused<T>(node);
4045 return;
4046 }
4047 case IrOpcode::kStoreElement: {
4048 ElementAccess access = ElementAccessOf(node->op());
4049 Node* value_node = node->InputAt(2);
4050 NodeInfo* input_info = GetInfo(value_node);
4051 MachineRepresentation element_representation =
4052 access.machine_type.representation();
4053
4054 // Convert to Smi if possible, such that we can avoid a write barrier.
4055 if (element_representation == MachineRepresentation::kTagged &&
4056 TypeOf(value_node).Is(Type::SignedSmall())) {
4057 element_representation = MachineRepresentation::kTaggedSigned;
4058 }
4059 WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
4060 access.base_is_tagged, element_representation, access.type,
4061 input_info->representation(), value_node);
4062 ProcessInput<T>(node, 0, UseInfoForBasePointer(access)); // base
4063 ProcessInput<T>(node, 1, UseInfo::Word()); // index
4064 ProcessInput<T>(node, 2,
4065 TruncatingUseInfoFromRepresentation(
4066 element_representation)); // value
4069 if (lower<T>()) {
4070 if (write_barrier_kind < access.write_barrier_kind) {
4071 access.write_barrier_kind = write_barrier_kind;
4072 ChangeOp(node, jsgraph_->simplified()->StoreElement(access));
4073 }
4074 }
4075 return;
4076 }
4077 case IrOpcode::kNumberIsFloat64Hole: {
4080 return;
4081 }
4082 case IrOpcode::kTransitionAndStoreElement: {
4083 Type value_type = TypeOf(node->InputAt(2));
4084
4085 ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // array
4086 ProcessInput<T>(node, 1, UseInfo::Word()); // index
4087
4088 if (value_type.Is(Type::SignedSmall())) {
4089 ProcessInput<T>(node, 2, UseInfo::TruncatingWord32()); // value
4090 if (lower<T>()) {
4091 ChangeOp(node, simplified()->StoreSignedSmallElement());
4092 }
4093 } else if (value_type.Is(Type::Number())) {
4094 ProcessInput<T>(node, 2, UseInfo::TruncatingFloat64()); // value
4095 if (lower<T>()) {
4096 MapRef double_map = DoubleMapParameterOf(node->op());
4097 ChangeOp(node,
4098 simplified()->TransitionAndStoreNumberElement(double_map));
4099 }
4100 } else if (value_type.Is(Type::NonNumber())) {
4101 ProcessInput<T>(node, 2, UseInfo::AnyTagged()); // value
4102 if (lower<T>()) {
4103 MapRef fast_map = FastMapParameterOf(node->op());
4104 ChangeOp(node, simplified()->TransitionAndStoreNonNumberElement(
4105 fast_map, value_type));
4106 }
4107 } else {
4108 ProcessInput<T>(node, 2, UseInfo::AnyTagged()); // value
4109 }
4110
4113 return;
4114 }
4115 case IrOpcode::kLoadTypedElement: {
4116 MachineRepresentation const rep =
4117 MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
4118 ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // buffer
4119 ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // base pointer
4120 ProcessInput<T>(node, 2, UseInfo::Word()); // external pointer
4121 ProcessInput<T>(node, 3, UseInfo::Word()); // index
4123 SetOutput<T>(node, rep);
4124 return;
4125 }
4126 case IrOpcode::kLoadDataViewElement: {
4127 MachineRepresentation const rep =
4128 MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
4129 ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // object
4130 ProcessInput<T>(node, 1, UseInfo::Word()); // base
4131 ProcessInput<T>(node, 2, UseInfo::Word()); // index
4132 ProcessInput<T>(node, 3, UseInfo::Bool()); // little-endian
4134 SetOutput<T>(node, rep);
4135 return;
4136 }
4137 case IrOpcode::kStoreTypedElement: {
4138 MachineRepresentation const rep =
4139 MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
4140 ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // buffer
4141 ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // base pointer
4142 ProcessInput<T>(node, 2, UseInfo::Word()); // external pointer
4143 ProcessInput<T>(node, 3, UseInfo::Word()); // index
4144 ProcessInput<T>(node, 4,
4145 TruncatingUseInfoFromRepresentation(rep)); // value
4148 return;
4149 }
4150 case IrOpcode::kStoreDataViewElement: {
4151 MachineRepresentation const rep =
4152 MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
4153 ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // object
4154 ProcessInput<T>(node, 1, UseInfo::Word()); // base
4155 ProcessInput<T>(node, 2, UseInfo::Word()); // index
4156 ProcessInput<T>(node, 3,
4157 TruncatingUseInfoFromRepresentation(rep)); // value
4158 ProcessInput<T>(node, 4, UseInfo::Bool()); // little-endian
4161 return;
4162 }
4163 case IrOpcode::kConvertReceiver: {
4164 Type input_type = TypeOf(node->InputAt(0));
4165 ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // object
4166 ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // native_context
4167 ProcessInput<T>(node, 2, UseInfo::AnyTagged()); // global_proxy
4170 if (lower<T>()) {
4171 // Try to optimize the {node} based on the input type.
4172 if (input_type.Is(Type::Receiver())) {
4173 DeferReplacement(node, node->InputAt(0));
4174 } else if (input_type.Is(Type::NullOrUndefined())) {
4175 DeferReplacement(node, node->InputAt(2));
4176 } else if (!input_type.Maybe(Type::NullOrUndefined())) {
4177 ChangeOp(node, lowering->simplified()->ConvertReceiver(
4179 }
4180 }
4181 return;
4182 }
4183 case IrOpcode::kPlainPrimitiveToNumber: {
4184 if (InputIs(node, Type::Boolean())) {
4186 if (lower<T>()) {
4188 node->op(), node->InputAt(0)));
4189 }
4190 } else if (InputIs(node, Type::String())) {
4193 if (lower<T>()) {
4194 ChangeOp(node, simplified()->StringToNumber());
4195 }
4196 } else if (truncation.IsUsedAsWord32()) {
4197 if (InputIs(node, Type::NumberOrOddball())) {
4200 if (lower<T>()) {
4202 node->op(), node->InputAt(0)));
4203 }
4204 } else {
4207 if (lower<T>()) {
4208 ChangeOp(node, simplified()->PlainPrimitiveToWord32());
4209 }
4210 }
4211 } else if (truncation.TruncatesOddballAndBigIntToNumber()) {
4212 if (InputIs(node, Type::NumberOrOddball())) {
4215 if (lower<T>()) {
4217 node->op(), node->InputAt(0)));
4218 }
4219 } else {
4222 if (lower<T>()) {
4223 ChangeOp(node, simplified()->PlainPrimitiveToFloat64());
4224 }
4225 }
4226 } else {
4229 }
4230 return;
4231 }
4232 case IrOpcode::kSpeculativeToNumber: {
4233 NumberOperationParameters const& p =
4234 NumberOperationParametersOf(node->op());
4235 switch (p.hint()) {
4238 VisitUnop<T>(node,
4239 CheckedUseInfoAsWord32FromHint(
4240 p.hint(), kDistinguishZeros, p.feedback()),
4241 MachineRepresentation::kWord32, Type::Signed32());
4242 break;
4248 node, CheckedUseInfoAsFloat64FromHint(p.hint(), p.feedback()),
4250 break;
4251 }
4252 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
4253 return;
4254 }
4255 case IrOpcode::kSpeculativeToBigInt: {
4256 if (truncation.IsUnused() && InputIs(node, Type::BigInt())) {
4257 VisitUnused<T>(node);
4258 return;
4259 }
4260 if (Is64() && truncation.IsUsedAsWord64()) {
4261 VisitUnop<T>(node,
4264 } else {
4265 BigIntOperationParameters const& p =
4266 BigIntOperationParametersOf(node->op());
4267 switch (p.hint()) {
4271 break;
4272 }
4274 VisitUnop<T>(node,
4277 }
4278 }
4279 }
4280 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
4281 return;
4282 }
4283 case IrOpcode::kObjectIsArrayBufferView: {
4284 // TODO(turbofan): Introduce a Type::ArrayBufferView?
4286 return;
4287 }
4288 case IrOpcode::kObjectIsBigInt: {
4289 VisitObjectIs<T>(node, Type::BigInt(), lowering);
4290 return;
4291 }
4292 case IrOpcode::kObjectIsCallable: {
4293 VisitObjectIs<T>(node, Type::Callable(), lowering);
4294 return;
4295 }
4296 case IrOpcode::kObjectIsConstructor: {
4297 // TODO(turbofan): Introduce a Type::Constructor?
4299 return;
4300 }
4301 case IrOpcode::kObjectIsDetectableCallable: {
4302 VisitObjectIs<T>(node, Type::DetectableCallable(), lowering);
4303 return;
4304 }
4305 case IrOpcode::kObjectIsFiniteNumber: {
4306 Type const input_type = GetUpperBound(node->InputAt(0));
4307 if (input_type.Is(type_cache_->kSafeInteger)) {
4309 if (lower<T>()) {
4312 true_type(), lowering->jsgraph()->Int32Constant(1)));
4313 }
4314 } else if (!input_type.Maybe(Type::Number())) {
4316 if (lower<T>()) {
4319 false_type(), lowering->jsgraph()->Int32Constant(0)));
4320 }
4321 } else if (input_type.Is(Type::Number())) {
4324 if (lower<T>()) {
4325 ChangeOp(node, lowering->simplified()->NumberIsFinite());
4326 }
4327 } else {
4329 }
4330 return;
4331 }
4332 case IrOpcode::kNumberIsFinite: {
4335 return;
4336 }
4337 case IrOpcode::kObjectIsSafeInteger: {
4338 Type const input_type = GetUpperBound(node->InputAt(0));
4339 if (input_type.Is(type_cache_->kSafeInteger)) {
4341 if (lower<T>()) {
4344 true_type(), lowering->jsgraph()->Int32Constant(1)));
4345 }
4346 } else if (!input_type.Maybe(Type::Number())) {
4348 if (lower<T>()) {
4351 false_type(), lowering->jsgraph()->Int32Constant(0)));
4352 }
4353 } else if (input_type.Is(Type::Number())) {
4356 if (lower<T>()) {
4357 ChangeOp(node, lowering->simplified()->NumberIsSafeInteger());
4358 }
4359 } else {
4361 }
4362 return;
4363 }
4364 case IrOpcode::kNumberIsSafeInteger: {
4365 UNREACHABLE();
4366 }
4367 case IrOpcode::kObjectIsInteger: {
4368 Type const input_type = GetUpperBound(node->InputAt(0));
4369 if (input_type.Is(type_cache_->kSafeInteger)) {
4371 if (lower<T>()) {
4374 true_type(), lowering->jsgraph()->Int32Constant(1)));
4375 }
4376 } else if (!input_type.Maybe(Type::Number())) {
4378 if (lower<T>()) {
4381 false_type(), lowering->jsgraph()->Int32Constant(0)));
4382 }
4383 } else if (input_type.Is(Type::Number())) {
4386 if (lower<T>()) {
4387 ChangeOp(node, lowering->simplified()->NumberIsInteger());
4388 }
4389 } else {
4391 }
4392 return;
4393 }
4394 case IrOpcode::kNumberIsInteger: {
4397 return;
4398 }
4399 case IrOpcode::kObjectIsMinusZero: {
4400 Type const input_type = GetUpperBound(node->InputAt(0));
4401 if (input_type.Is(Type::MinusZero())) {
4403 if (lower<T>()) {
4406 true_type(), lowering->jsgraph()->Int32Constant(1)));
4407 }
4408 } else if (!input_type.Maybe(Type::MinusZero())) {
4410 if (lower<T>()) {
4413 false_type(), lowering->jsgraph()->Int32Constant(0)));
4414 }
4415 } else if (input_type.Is(Type::Number())) {
4418 if (lower<T>()) {
4419 ChangeOp(node, simplified()->NumberIsMinusZero());
4420 }
4421 } else {
4423 }
4424 return;
4425 }
4426 case IrOpcode::kObjectIsNaN: {
4427 Type const input_type = GetUpperBound(node->InputAt(0));
4428 if (input_type.Is(Type::NaN())) {
4430 if (lower<T>()) {
4433 true_type(), lowering->jsgraph()->Int32Constant(1)));
4434 }
4435 } else if (!input_type.Maybe(Type::NaN())) {
4437 if (lower<T>()) {
4440 false_type(), lowering->jsgraph()->Int32Constant(0)));
4441 }
4442 } else if (input_type.Is(Type::Number())) {
4445 if (lower<T>()) {
4446 ChangeOp(node, simplified()->NumberIsNaN());
4447 }
4448 } else {
4450 }
4451 return;
4452 }
4453 case IrOpcode::kNumberIsNaN: {
4456 return;
4457 }
4458 case IrOpcode::kObjectIsNonCallable: {
4459 VisitObjectIs<T>(node, Type::NonCallable(), lowering);
4460 return;
4461 }
4462 case IrOpcode::kObjectIsNumber: {
4463 VisitObjectIs<T>(node, Type::Number(), lowering);
4464 return;
4465 }
4466 case IrOpcode::kObjectIsReceiver: {
4467 VisitObjectIs<T>(node, Type::Receiver(), lowering);
4468 return;
4469 }
4470 case IrOpcode::kObjectIsSmi: {
4471 // TODO(turbofan): Optimize based on input representation.
4473 return;
4474 }
4475 case IrOpcode::kObjectIsString: {
4476 VisitObjectIs<T>(node, Type::String(), lowering);
4477 return;
4478 }
4479 case IrOpcode::kObjectIsSymbol: {
4480 VisitObjectIs<T>(node, Type::Symbol(), lowering);
4481 return;
4482 }
4483 case IrOpcode::kObjectIsUndetectable: {
4484 VisitObjectIs<T>(node, Type::Undetectable(), lowering);
4485 return;
4486 }
4487 case IrOpcode::kArgumentsLength:
4488 case IrOpcode::kRestLength: {
4490 return;
4491 }
4492 case IrOpcode::kNewDoubleElements:
4493 case IrOpcode::kNewSmiOrObjectElements: {
4496 return;
4497 }
4498 case IrOpcode::kNewArgumentsElements: {
4501 return;
4502 }
4503 case IrOpcode::kCheckFloat64Hole: {
4504 Type const input_type = TypeOf(node->InputAt(0));
4506 CheckFloat64HoleParametersOf(node->op()).mode();
4508 // If {mode} is allow-return-hole _and_ the {truncation}
4509 // identifies NaN and undefined, we can just pass along
4510 // the {truncation} and completely wipe the {node}.
4511 if (truncation.IsUnused()) return VisitUnused<T>(node);
4512 if (truncation.TruncatesOddballAndBigIntToNumber()) {
4515 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
4516 return;
4517 }
4518 }
4521 MachineRepresentation::kFloat64, Type::Number());
4522 if (lower<T>() && input_type.Is(Type::Number())) {
4523 DeferReplacement(node, node->InputAt(0));
4524 }
4525 return;
4526 }
4527 case IrOpcode::kChangeFloat64HoleToTagged: {
4528 // If the {truncation} identifies NaN and undefined, we can just pass
4529 // along the {truncation} and completely wipe the {node}.
4530 if (truncation.IsUnused()) return VisitUnused<T>(node);
4531 if (truncation.TruncatesOddballAndBigIntToNumber()) {
4534 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
4535 return;
4536 }
4540 return;
4541 }
4542 case IrOpcode::kCheckNotTaggedHole: {
4545 return;
4546 }
4547 case IrOpcode::kCheckClosure: {
4551 return;
4552 }
4553 case IrOpcode::kConvertTaggedHoleToUndefined: {
4554 if (InputIs(node, Type::NumberOrHole()) &&
4555 truncation.IsUsedAsWord32()) {
4556 // Propagate the Word32 truncation.
4559 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
4560 } else if (InputIs(node, Type::NumberOrHole()) &&
4561 truncation.TruncatesOddballAndBigIntToNumber()) {
4562 // Propagate the Float64 truncation.
4565 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
4566 } else if (InputIs(node, Type::NonInternal())) {
4569 if (lower<T>()) DeferReplacement(node, node->InputAt(0));
4570 } else {
4571 // TODO(turbofan): Add a (Tagged) truncation that identifies hole
4572 // and undefined, i.e. for a[i] === obj cases.
4575 }
4576 return;
4577 }
4578 case IrOpcode::kCheckEqualsSymbol:
4579 case IrOpcode::kCheckEqualsInternalizedString:
4580 return VisitBinop<T>(node, UseInfo::AnyTagged(),
4582 case IrOpcode::kMapGuard:
4583 // Eliminate MapGuard nodes here.
4584 return VisitUnused<T>(node);
4585 case IrOpcode::kCheckMaps: {
4586 CheckMapsParameters const& p = CheckMapsParametersOf(node->op());
4587 return VisitUnop<T>(
4590 }
4591 case IrOpcode::kTransitionElementsKind: {
4592 return VisitUnop<T>(
4595 }
4596 case IrOpcode::kTransitionElementsKindOrCheckMap: {
4597 return VisitUnop<T>(
4600 }
4601 case IrOpcode::kCompareMaps:
4602 return VisitUnop<T>(
4605 case IrOpcode::kEnsureWritableFastElements:
4606 return VisitBinop<T>(node, UseInfo::AnyTagged(),
4608 case IrOpcode::kMaybeGrowFastElements: {
4609 ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // object
4610 ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // elements
4611 ProcessInput<T>(node, 2, UseInfo::TruncatingWord32()); // index
4612 ProcessInput<T>(node, 3, UseInfo::TruncatingWord32()); // length
4615 return;
4616 }
4617
4618 case IrOpcode::kDateNow:
4619 VisitInputs<T>(node);
4621 case IrOpcode::kDoubleArrayMax: {
4622 return VisitUnop<T>(node, UseInfo::AnyTagged(),
4624 }
4625 case IrOpcode::kDoubleArrayMin: {
4626 return VisitUnop<T>(node, UseInfo::AnyTagged(),
4628 }
4629 case IrOpcode::kFrameState:
4630 return VisitFrameState<T>(FrameState{node});
4631 case IrOpcode::kStateValues:
4632 return VisitStateValues<T>(node);
4633 case IrOpcode::kObjectState:
4634 return VisitObjectState<T>(node);
4635 case IrOpcode::kObjectId:
4637
4638 case IrOpcode::kTypeGuard: {
4639 if (truncation.IsUnused()) return VisitUnused<T>(node);
4640
4641 // We just get rid of the sigma here, choosing the best representation
4642 // for the sigma's type.
4643 Type type = TypeOf(node);
4644 MachineRepresentation representation =
4645 GetOutputInfoForPhi(type, truncation);
4646
4647 // Here we pretend that the input has the sigma's type for the
4648 // conversion.
4649 UseInfo use(representation, truncation);
4650 if (propagate<T>()) {
4651 EnqueueInput<T>(node, 0, use);
4652 } else if (lower<T>()) {
4653 ConvertInput(node, 0, use, type);
4654 }
4656 SetOutput<T>(node, representation);
4657 return;
4658 }
4659
4660 case IrOpcode::kFinishRegion:
4661 VisitInputs<T>(node);
4662 // Assume the output is tagged pointer.
4664
4665 case IrOpcode::kReturn:
4666 VisitReturn<T>(node);
4667 // Assume the output is tagged.
4669
4670 case IrOpcode::kFindOrderedHashMapEntry: {
4671 Type const key_type = TypeOf(node->InputAt(1));
4672 if (key_type.Is(Type::Signed32OrMinusZero())) {
4675 if (lower<T>()) {
4676 ChangeOp(
4677 node,
4678 lowering->simplified()->FindOrderedHashMapEntryForInt32Key());
4679 }
4680 } else {
4683 }
4684 return;
4685 }
4686
4687 case IrOpcode::kFindOrderedHashSetEntry:
4690 return;
4691
4692 case IrOpcode::kFastApiCall: {
4693 VisitFastApiCall<T>(node, lowering);
4694 return;
4695 }
4696
4697 // Operators with all inputs tagged and no or tagged output have uniform
4698 // handling.
4699 case IrOpcode::kEnd:
4700 case IrOpcode::kIfSuccess:
4701 case IrOpcode::kIfException:
4702 case IrOpcode::kIfTrue:
4703 case IrOpcode::kIfFalse:
4704 case IrOpcode::kIfValue:
4705 case IrOpcode::kIfDefault:
4706 case IrOpcode::kDeoptimize:
4707 case IrOpcode::kEffectPhi:
4708 case IrOpcode::kTerminate:
4709 case IrOpcode::kCheckpoint:
4710 case IrOpcode::kLoop:
4711 case IrOpcode::kMerge:
4712 case IrOpcode::kThrow:
4713 case IrOpcode::kBeginRegion:
4714 case IrOpcode::kProjection:
4715 case IrOpcode::kOsrValue:
4716 case IrOpcode::kArgumentsElementsState:
4717 case IrOpcode::kArgumentsLengthState:
4718 case IrOpcode::kUnreachable:
4719 case IrOpcode::kRuntimeAbort:
4720// All JavaScript operators except JSToNumber, JSToNumberConvertBigInt,
4721// kJSToNumeric and JSWasmCall have uniform handling.
4722#define OPCODE_CASE(name, ...) case IrOpcode::k##name:
4727#undef OPCODE_CASE
4728 case IrOpcode::kJSBitwiseNot:
4729 case IrOpcode::kJSDecrement:
4730 case IrOpcode::kJSIncrement:
4731 case IrOpcode::kJSNegate:
4732 case IrOpcode::kJSToLength:
4733 case IrOpcode::kJSToName:
4734 case IrOpcode::kJSToObject:
4735 case IrOpcode::kJSToString:
4736 case IrOpcode::kJSParseInt:
4737#if V8_ENABLE_WEBASSEMBLY
4738 if (node->opcode() == IrOpcode::kJSWasmCall) {
4739 return VisitJSWasmCall<T>(node, lowering);
4740 }
4741#endif // V8_ENABLE_WEBASSEMBLY
4742 VisitInputs<T>(node);
4743 // Assume the output is tagged.
4745 case IrOpcode::kDeadValue:
4746 ProcessInput<T>(node, 0, UseInfo::Any());
4748 case IrOpcode::kStaticAssert:
4749 DCHECK(TypeOf(node->InputAt(0)).Is(Type::Boolean()));
4750 return VisitUnop<T>(node, UseInfo::Bool(),
4752 case IrOpcode::kAssertType:
4753 return VisitUnop<T>(node, UseInfo::AnyTagged(),
4755 case IrOpcode::kVerifyType: {
4756 Type inputType = TypeOf(node->InputAt(0));
4758 inputType);
4759 if (lower<T>()) {
4760 if (inputType.CanBeAsserted()) {
4761 ChangeOp(node, simplified()->AssertType(inputType));
4762 } else {
4763 if (!v8_flags.fuzzing) {
4764#ifdef DEBUG
4765 inputType.Print();
4766#endif
4767 FATAL("%%VerifyType: unsupported type");
4768 }
4770 }
4771 }
4772 return;
4773 }
4774 case IrOpcode::kCheckTurboshaftTypeOf: {
4775 NodeInfo* info = GetInfo(node->InputAt(0));
4776 MachineRepresentation input_rep = info->representation();
4777 ProcessInput<T>(node, 0, UseInfo{input_rep, Truncation::None()});
4778 ProcessInput<T>(node, 1, UseInfo::Any());
4779 SetOutput<T>(node, input_rep);
4780 return;
4781 }
4782 case IrOpcode::kDebugBreak:
4783 return;
4784
4785 // Nodes from machine graphs.
4786 case IrOpcode::kEnterMachineGraph: {
4787 DCHECK_EQ(1, node->op()->ValueInputCount());
4788 UseInfo use_info = OpParameter<UseInfo>(node->op());
4789 ProcessInput<T>(node, 0, use_info);
4790 SetOutput<T>(node, use_info.representation());
4791 if (lower<T>()) {
4793 Type::Machine(), node->InputAt(0)));
4794 }
4795 return;
4796 }
4797 case IrOpcode::kExitMachineGraph: {
4798 DCHECK_EQ(1, node->op()->ValueInputCount());
4799 ProcessInput<T>(node, 0, UseInfo::Any());
4800 const auto& p = ExitMachineGraphParametersOf(node->op());
4801 SetOutput<T>(node, p.output_representation(), p.output_type());
4802 if (lower<T>()) {
4804 p.output_type(), node->InputAt(0)));
4805 }
4806 return;
4807 }
4808 case IrOpcode::kInt32Add:
4809 case IrOpcode::kInt32LessThanOrEqual:
4810 case IrOpcode::kInt32Sub:
4811 case IrOpcode::kUint32LessThan:
4812 case IrOpcode::kUint32LessThanOrEqual:
4813 case IrOpcode::kUint64LessThan:
4814 case IrOpcode::kUint64LessThanOrEqual:
4815 case IrOpcode::kUint32Div:
4816 case IrOpcode::kWord32And:
4817 case IrOpcode::kWord32Equal:
4818 case IrOpcode::kWord32Or:
4819 case IrOpcode::kWord32Shl:
4820 case IrOpcode::kWord32Shr:
4821 for (int i = 0; i < node->InputCount(); ++i) {
4822 ProcessInput<T>(node, i, UseInfo::Any());
4823 }
4825 return;
4826 case IrOpcode::kInt64Add:
4827 case IrOpcode::kInt64Sub:
4828 case IrOpcode::kUint64Div:
4829 case IrOpcode::kWord64And:
4830 case IrOpcode::kWord64Shl:
4831 case IrOpcode::kWord64Shr:
4832 case IrOpcode::kChangeUint32ToUint64:
4833 for (int i = 0; i < node->InputCount(); ++i) {
4834 ProcessInput<T>(node, i, UseInfo::Any());
4835 }
4837 return;
4838 case IrOpcode::kLoad:
4839 for (int i = 0; i < node->InputCount(); ++i) {
4840 ProcessInput<T>(node, i, UseInfo::Any());
4841 }
4843 return;
4844
4845#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
4846 case IrOpcode::kGetContinuationPreservedEmbedderData:
4848 return;
4849
4850 case IrOpcode::kSetContinuationPreservedEmbedderData:
4853 return;
4854#endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
4855
4856 default:
4857 FATAL(
4858 "Representation inference: unsupported opcode %i (%s), node #%i\n.",
4859 node->opcode(), node->op()->mnemonic(), node->id());
4860 break;
4861 }
4862 UNREACHABLE();
4863 }
4864
4866 if (node->op()->EffectInputCount() == 1) {
4867 Node* control;
4868 if (node->op()->ControlInputCount() == 1) {
4869 control = NodeProperties::GetControlInput(node);
4870 } else {
4871 DCHECK_EQ(node->op()->ControlInputCount(), 0);
4872 control = nullptr;
4873 }
4874 Node* effect = NodeProperties::GetEffectInput(node);
4875 ReplaceEffectControlUses(node, effect, control);
4876 } else {
4877 DCHECK_EQ(0, node->op()->EffectInputCount());
4878 DCHECK_EQ(0, node->op()->ControlOutputCount());
4879 DCHECK_EQ(0, node->op()->EffectOutputCount());
4880 }
4881 }
4882
4883 void DeferReplacement(Node* node, Node* replacement) {
4884 TRACE("defer replacement #%d:%s with #%d:%s\n", node->id(),
4885 node->op()->mnemonic(), replacement->id(),
4886 replacement->op()->mnemonic());
4887
4889 node->NullAllInputs(); // Node is now dead.
4890
4892 replacements_.push_back(replacement);
4893
4894 NotifyNodeReplaced(node, replacement);
4895 }
4896
4899 DCHECK(!type.IsInvalid());
4900 node = graph()->NewNode(common()->SLVerifierHint(nullptr, type), node);
4901 verifier_->RecordHint(node);
4902 }
4903 return node;
4904 }
4905
4908 node = graph()->NewNode(common()->SLVerifierHint(semantics, {}), node);
4909 verifier_->RecordHint(node);
4910 }
4911 return node;
4912 }
4913
4914 private:
4915 void ChangeOp(Node* node, const Operator* new_op) {
4917
4918 if (V8_UNLIKELY(observe_node_manager_ != nullptr))
4920 node);
4921 }
4922
4923 void NotifyNodeReplaced(Node* node, Node* replacement) {
4924 if (V8_UNLIKELY(observe_node_manager_ != nullptr))
4926 replacement);
4927 }
4928
4929 Type true_type() const { return singleton_true_; }
4931
4934 Zone* zone_; // Temporary zone.
4935 // Map from node to its uses that might need to be revisited.
4937 size_t count_; // number of nodes in the graph
4938 ZoneVector<NodeInfo> info_; // node id -> usage information
4939#ifdef DEBUG
4940 ZoneVector<InputUseInfos> node_input_use_infos_; // Debug information about
4941 // requirements on inputs.
4942#endif // DEBUG
4943 NodeVector replacements_; // replacements to be done after lowering
4944 RepresentationChanger* changer_; // for inserting representation changes
4945 ZoneQueue<Node*> revisit_queue_; // Queue for revisiting nodes.
4946
4947 struct NodeState {
4950 };
4951 NodeVector traversal_nodes_; // Order in which to traverse the nodes.
4952 // TODO(danno): RepresentationSelector shouldn't know anything about the
4953 // source positions table, but must for now since there currently is no other
4954 // way to pass down source position information to nodes created during
4955 // lowering. Once this phase becomes a vanilla reducer, it should get source
4956 // position information via the SourcePositionWrapper like all other reducers.
4960 OperationTyper op_typer_; // helper for the feedback typer
4966 SimplifiedLoweringVerifier* verifier_; // Used to verify output graph.
4967
4969 DCHECK(node->id() < count_);
4970 return &info_[node->id()];
4971 }
4972 Zone* zone() { return zone_; }
4973 Zone* graph_zone() { return jsgraph_->zone(); }
4974 Linkage* linkage() { return linkage_; }
4975};
4976
4977// Template specializations
4978
4979// Enqueue {use_node}'s {index} input if the {use_info} contains new information
4980// for that input node.
4981template <>
4983 UseInfo use_info) {
4984 Node* node = use_node->InputAt(index);
4985 NodeInfo* info = GetInfo(node);
4986#ifdef DEBUG
4987 // Check monotonicity of input requirements.
4988 node_input_use_infos_[use_node->id()].SetAndCheckInput(use_node, index,
4989 use_info);
4990#endif // DEBUG
4991 if (info->unvisited()) {
4992 info->AddUse(use_info);
4993 TRACE(" initial #%i: %s\n", node->id(), info->truncation().description());
4994 return;
4995 }
4996 TRACE(" queue #%i?: %s\n", node->id(), info->truncation().description());
4997 if (info->AddUse(use_info)) {
4998 // New usage information for the node is available.
4999 if (!info->queued()) {
5000 DCHECK(info->visited());
5001 revisit_queue_.push(node);
5002 info->set_queued();
5003 TRACE(" added: %s\n", info->truncation().description());
5004 } else {
5005 TRACE(" inqueue: %s\n", info->truncation().description());
5006 }
5007 }
5008}
5009
5010template <>
5012 Node* node, MachineRepresentation representation, Type restriction_type) {
5013 NodeInfo* const info = GetInfo(node);
5014 info->set_restriction_type(restriction_type);
5015}
5016
5017template <>
5019 Node* node, MachineRepresentation representation, Type restriction_type) {
5020 NodeInfo* const info = GetInfo(node);
5021 Type node_static_type = NodeProperties::GetTypeOrAny(node);
5022 DCHECK(Type::Intersect(restriction_type, node_static_type, zone_)
5023 .Is(info->restriction_type()));
5024 USE(node_static_type);
5025 info->set_output(representation);
5026}
5027
5028template <>
5030 Node* node, MachineRepresentation representation, Type restriction_type) {
5031 NodeInfo* const info = GetInfo(node);
5032 Type node_static_type = NodeProperties::GetTypeOrAny(node);
5033 DCHECK_EQ(info->representation(), representation);
5034 DCHECK(Type::Intersect(restriction_type, node_static_type, zone_)
5035 .Is(info->restriction_type()));
5036 USE(node_static_type);
5037 USE(info);
5038}
5039
5040template <>
5042 UseInfo use) {
5043 DCHECK_IMPLIES(use.type_check() != TypeCheckKind::kNone,
5044 !node->op()->HasProperty(Operator::kNoDeopt) &&
5045 node->op()->EffectInputCount() > 0);
5046 EnqueueInput<PROPAGATE>(node, index, use);
5047}
5048
5049template <>
5051 UseInfo use) {
5052 DCHECK_IMPLIES(use.type_check() != TypeCheckKind::kNone,
5053 !node->op()->HasProperty(Operator::kNoDeopt) &&
5054 node->op()->EffectInputCount() > 0);
5055}
5056
5057template <>
5059 UseInfo use) {
5060 DCHECK_IMPLIES(use.type_check() != TypeCheckKind::kNone,
5061 !node->op()->HasProperty(Operator::kNoDeopt) &&
5062 node->op()->EffectInputCount() > 0);
5063 ConvertInput(node, index, use);
5064}
5065
5066template <>
5068 int index) {
5070
5071 // Enqueue other inputs (effects, control).
5072 for (int i = std::max(index, NodeProperties::FirstEffectIndex(node));
5073 i < node->InputCount(); ++i) {
5075 }
5076}
5077
5078// The default, most general visitation case. For {node}, process all value,
5079// context, frame state, effect, and control inputs, assuming that value
5080// inputs should have {kRepTagged} representation and can observe all output
5081// values {kTypeAny}.
5082template <>
5084 int first_effect_index = NodeProperties::FirstEffectIndex(node);
5085 // Visit value, context and frame state inputs as tagged.
5086 for (int i = 0; i < first_effect_index; i++) {
5088 }
5089 // Only enqueue other inputs (effects, control).
5090 for (int i = first_effect_index; i < node->InputCount(); i++) {
5092 }
5093}
5094
5095template <>
5097 int first_effect_index = NodeProperties::FirstEffectIndex(node);
5098 // Visit value, context and frame state inputs as tagged.
5099 for (int i = 0; i < first_effect_index; i++) {
5101 }
5102}
5103
5104template <>
5106 // If the node is effectful and it produces an impossible value, then we
5107 // insert Unreachable node after it.
5108 if (node->op()->ValueOutputCount() > 0 &&
5109 node->op()->EffectOutputCount() > 0 &&
5110 node->opcode() != IrOpcode::kUnreachable && TypeOf(node).IsNone()) {
5111 Node* control = (node->op()->ControlOutputCount() == 0)
5114
5115 Node* unreachable =
5116 graph()->NewNode(common()->Unreachable(), node, control);
5117
5118 // Insert unreachable node and replace all the effect uses of the {node}
5119 // with the new unreachable node.
5120 for (Edge edge : node->use_edges()) {
5121 if (!NodeProperties::IsEffectEdge(edge)) continue;
5122 // Make sure to not overwrite the unreachable node's input. That would
5123 // create a cycle.
5124 if (edge.from() == unreachable) continue;
5125 // Avoid messing up the exceptional path.
5126 if (edge.from()->opcode() == IrOpcode::kIfException) {
5127 DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
5128 DCHECK_EQ(NodeProperties::GetControlInput(edge.from()), node);
5129 continue;
5130 }
5131
5132 edge.UpdateTo(unreachable);
5133 }
5134 }
5135}
5136
5140 TickCounter* tick_counter, Linkage* linkage, OptimizedCompilationInfo* info,
5141 ObserveNodeManager* observe_node_manager)
5142 : jsgraph_(jsgraph),
5143 broker_(broker),
5144 zone_(zone),
5145 type_cache_(TypeCache::Get()),
5146 source_positions_(source_positions),
5147 node_origins_(node_origins),
5148 tick_counter_(tick_counter),
5150 info_(info),
5151 observe_node_manager_(observe_node_manager) {}
5152
5154 SimplifiedLoweringVerifier* verifier = nullptr;
5155 if (v8_flags.verify_simplified_lowering) {
5157 }
5158 RepresentationChanger changer(jsgraph(), broker_, verifier);
5159 RepresentationSelector selector(
5162 selector.Run(this);
5163}
5164
5166 Node* node, RepresentationSelector* selector) {
5167 DCHECK(node->opcode() == IrOpcode::kJSToNumber ||
5168 node->opcode() == IrOpcode::kJSToNumberConvertBigInt ||
5169 node->opcode() == IrOpcode::kJSToNumeric);
5170 Node* value = node->InputAt(0);
5171 Node* context = node->InputAt(1);
5172 Node* frame_state = node->InputAt(2);
5173 Node* effect = node->InputAt(3);
5174 Node* control = node->InputAt(4);
5175
5176 Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
5177 Node* branch0 = graph()->NewNode(
5179 control);
5180
5181 Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
5182 Node* etrue0 = effect;
5183 Node* vtrue0;
5184 {
5185 vtrue0 = graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), value);
5186 vtrue0 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue0);
5187 }
5188
5189 Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
5190 Node* efalse0 = effect;
5191 Node* vfalse0;
5192 {
5193 Operator const* op =
5194 node->opcode() == IrOpcode::kJSToNumber
5195 ? (node->opcode() == IrOpcode::kJSToNumberConvertBigInt
5197 : ToNumberOperator())
5199 Node* code = node->opcode() == IrOpcode::kJSToNumber
5200 ? ToNumberCode()
5201 : (node->opcode() == IrOpcode::kJSToNumberConvertBigInt
5203 : ToNumericCode());
5204 vfalse0 = efalse0 = if_false0 = graph()->NewNode(
5205 op, code, value, context, frame_state, efalse0, if_false0);
5206
5207 // Update potential {IfException} uses of {node} to point to the above
5208 // stub call node instead.
5209 Node* on_exception = nullptr;
5210 if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
5211 NodeProperties::ReplaceControlInput(on_exception, vfalse0);
5212 NodeProperties::ReplaceEffectInput(on_exception, efalse0);
5213 if_false0 = graph()->NewNode(common()->IfSuccess(), vfalse0);
5214 }
5215
5216 Node* check1 = graph()->NewNode(simplified()->ObjectIsSmi(), vfalse0);
5217 Node* branch1 = graph()->NewNode(
5219 if_false0);
5220
5221 Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
5222 Node* etrue1 = efalse0;
5223 Node* vtrue1;
5224 {
5225 vtrue1 =
5226 graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), vfalse0);
5227 vtrue1 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue1);
5228 }
5229
5230 Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
5231 Node* efalse1 = efalse0;
5232 Node* vfalse1;
5233 {
5234 vfalse1 = efalse1 = graph()->NewNode(
5235 simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
5236 efalse1, if_false1);
5237 }
5238
5239 if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
5240 efalse0 =
5241 graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
5242 vfalse0 =
5244 vtrue1, vfalse1, if_false0);
5245 }
5246
5247 control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
5248 effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
5250 vtrue0, vfalse0, control);
5251
5252 // Replace effect and control uses appropriately.
5253 for (Edge edge : node->use_edges()) {
5255 if (edge.from()->opcode() == IrOpcode::kIfSuccess) {
5256 edge.from()->ReplaceUses(control);
5257 edge.from()->Kill();
5258 } else {
5259 DCHECK_NE(IrOpcode::kIfException, edge.from()->opcode());
5260 edge.UpdateTo(control);
5261 }
5262 } else if (NodeProperties::IsEffectEdge(edge)) {
5263 edge.UpdateTo(effect);
5264 }
5265 }
5266
5267 selector->DeferReplacement(node, value);
5268}
5269
5271 Node* node, RepresentationSelector* selector) {
5272 DCHECK(node->opcode() == IrOpcode::kJSToNumber ||
5273 node->opcode() == IrOpcode::kJSToNumberConvertBigInt ||
5274 node->opcode() == IrOpcode::kJSToNumeric);
5275 Node* value = node->InputAt(0);
5276 Node* context = node->InputAt(1);
5277 Node* frame_state = node->InputAt(2);
5278 Node* effect = node->InputAt(3);
5279 Node* control = node->InputAt(4);
5280
5281 Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
5282 Node* branch0 = graph()->NewNode(
5284 control);
5285
5286 Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
5287 Node* etrue0 = effect;
5288 Node* vtrue0 =
5289 graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), value);
5290
5291 Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
5292 Node* efalse0 = effect;
5293 Node* vfalse0;
5294 {
5295 Operator const* op =
5296 node->opcode() == IrOpcode::kJSToNumber
5297 ? (node->opcode() == IrOpcode::kJSToNumberConvertBigInt
5299 : ToNumberOperator())
5301 Node* code = node->opcode() == IrOpcode::kJSToNumber
5302 ? ToNumberCode()
5303 : (node->opcode() == IrOpcode::kJSToNumberConvertBigInt
5305 : ToNumericCode());
5306 vfalse0 = efalse0 = if_false0 = graph()->NewNode(
5307 op, code, value, context, frame_state, efalse0, if_false0);
5308
5309 // Update potential {IfException} uses of {node} to point to the above
5310 // stub call node instead.
5311 Node* on_exception = nullptr;
5312 if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
5313 NodeProperties::ReplaceControlInput(on_exception, vfalse0);
5314 NodeProperties::ReplaceEffectInput(on_exception, efalse0);
5315 if_false0 = graph()->NewNode(common()->IfSuccess(), vfalse0);
5316 }
5317
5318 Node* check1 = graph()->NewNode(simplified()->ObjectIsSmi(), vfalse0);
5319 Node* branch1 = graph()->NewNode(
5321 if_false0);
5322
5323 Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
5324 Node* etrue1 = efalse0;
5325 Node* vtrue1 =
5326 graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), vfalse0);
5327
5328 Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
5329 Node* efalse1 = efalse0;
5330 Node* vfalse1;
5331 {
5332 vfalse1 = efalse1 = graph()->NewNode(
5333 simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
5334 efalse1, if_false1);
5335 vfalse1 = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse1);
5336 }
5337
5338 if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
5339 efalse0 =
5340 graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
5341 vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
5342 vtrue1, vfalse1, if_false0);
5343 }
5344
5345 control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
5346 effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
5347 value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
5348 vtrue0, vfalse0, control);
5349
5350 // Replace effect and control uses appropriately.
5351 for (Edge edge : node->use_edges()) {
5353 if (edge.from()->opcode() == IrOpcode::kIfSuccess) {
5354 edge.from()->ReplaceUses(control);
5355 edge.from()->Kill();
5356 } else {
5357 DCHECK_NE(IrOpcode::kIfException, edge.from()->opcode());
5358 edge.UpdateTo(control);
5359 }
5360 } else if (NodeProperties::IsEffectEdge(edge)) {
5361 edge.UpdateTo(effect);
5362 }
5363 }
5364
5365 selector->DeferReplacement(node, value);
5366}
5367
5369 Node* const one = jsgraph()->Float64Constant(1.0);
5370 Node* const one_half = jsgraph()->Float64Constant(0.5);
5371 Node* const input = node->InputAt(0);
5372
5373 // Round up towards Infinity, and adjust if the difference exceeds 0.5.
5374 Node* result = graph()->NewNode(machine()->Float64RoundUp().placeholder(),
5375 node->InputAt(0));
5376 return graph()->NewNode(
5378 graph()->NewNode(
5379 machine()->Float64LessThanOrEqual(),
5380 graph()->NewNode(machine()->Float64Sub(), result, one_half), input),
5381 result, graph()->NewNode(machine()->Float64Sub(), result, one));
5382}
5383
5385 Node* const minus_one = jsgraph()->Float64Constant(-1.0);
5386 Node* const zero = jsgraph()->Float64Constant(0.0);
5387 Node* const one = jsgraph()->Float64Constant(1.0);
5388
5389 Node* const input = node->InputAt(0);
5390
5391 return graph()->NewNode(
5393 graph()->NewNode(machine()->Float64LessThan(), input, zero), minus_one,
5394 graph()->NewNode(
5396 graph()->NewNode(machine()->Float64LessThan(), zero, input), one,
5397 input));
5398}
5399
5401 Node* const input = node->InputAt(0);
5402
5403 // Generate case for absolute integer value.
5404 //
5405 // let sign = input >> 31 in
5406 // (input ^ sign) - sign
5407
5408 Node* sign = graph()->NewNode(machine()->Word32Sar(), input,
5409 jsgraph()->Int32Constant(31));
5410 return graph()->NewNode(machine()->Int32Sub(),
5411 graph()->NewNode(machine()->Word32Xor(), input, sign),
5412 sign);
5413}
5414
5416 Int32BinopMatcher m(node);
5417 Node* const zero = jsgraph()->Int32Constant(0);
5418 Node* const minus_one = jsgraph()->Int32Constant(-1);
5419 Node* const lhs = m.left().node();
5420 Node* const rhs = m.right().node();
5421
5422 if (m.right().Is(-1)) {
5423 return graph()->NewNode(machine()->Int32Sub(), zero, lhs);
5424 } else if (m.right().Is(0)) {
5425 return rhs;
5426 } else if (machine()->Int32DivIsSafe() || m.right().HasResolvedValue()) {
5427 return graph()->NewNode(machine()->Int32Div(), lhs, rhs, graph()->start());
5428 }
5429
5430 // General case for signed integer division.
5431 //
5432 // if 0 < rhs then
5433 // lhs / rhs
5434 // else
5435 // if rhs < -1 then
5436 // lhs / rhs
5437 // else if rhs == 0 then
5438 // 0
5439 // else
5440 // 0 - lhs
5441 //
5442 // Note: We do not use the Diamond helper class here, because it really hurts
5443 // readability with nested diamonds.
5444 const Operator* const merge_op = common()->Merge(2);
5445 const Operator* const phi_op =
5447
5448 Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
5449 Node* branch0 = graph()->NewNode(
5451 graph()->start());
5452
5453 Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
5454 Node* true0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_true0);
5455
5456 Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
5457 Node* false0;
5458 {
5459 Node* check1 = graph()->NewNode(machine()->Int32LessThan(), rhs, minus_one);
5460 Node* branch1 = graph()->NewNode(
5462 if_false0);
5463
5464 Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
5465 Node* true1 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_true1);
5466
5467 Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
5468 Node* false1;
5469 {
5470 Node* check2 = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
5471 Node* branch2 = graph()->NewNode(
5473 check2, if_false1);
5474
5475 Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
5476 Node* true2 = zero;
5477
5478 Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
5479 Node* false2 = graph()->NewNode(machine()->Int32Sub(), zero, lhs);
5480
5481 if_false1 = graph()->NewNode(merge_op, if_true2, if_false2);
5482 false1 = graph()->NewNode(phi_op, true2, false2, if_false1);
5483 }
5484
5485 if_false0 = graph()->NewNode(merge_op, if_true1, if_false1);
5486 false0 = graph()->NewNode(phi_op, true1, false1, if_false0);
5487 }
5488
5489 Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
5490 return graph()->NewNode(phi_op, true0, false0, merge0);
5491}
5492
5494 Int32BinopMatcher m(node);
5495 Node* const zero = jsgraph()->Int32Constant(0);
5496 Node* const minus_one = jsgraph()->Int32Constant(-1);
5497 Node* const lhs = m.left().node();
5498 Node* const rhs = m.right().node();
5499
5500 if (m.right().Is(-1) || m.right().Is(0)) {
5501 return zero;
5502 } else if (m.right().HasResolvedValue()) {
5503 return graph()->NewNode(machine()->Int32Mod(), lhs, rhs, graph()->start());
5504 }
5505
5506 // General case for signed integer modulus, with optimization for (unknown)
5507 // power of 2 right hand side.
5508 //
5509 // if 0 < rhs then
5510 // msk = rhs - 1
5511 // if rhs & msk != 0 then
5512 // lhs % rhs
5513 // else
5514 // if lhs < 0 then
5515 // -(-lhs & msk)
5516 // else
5517 // lhs & msk
5518 // else
5519 // if rhs < -1 then
5520 // lhs % rhs
5521 // else
5522 // zero
5523 //
5524 // Note: We do not use the Diamond helper class here, because it really hurts
5525 // readability with nested diamonds.
5526 const Operator* const merge_op = common()->Merge(2);
5527 const Operator* const phi_op =
5529
5530 Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
5531 Node* branch0 = graph()->NewNode(
5533 graph()->start());
5534
5535 Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
5536 Node* true0;
5537 {
5538 Node* msk = graph()->NewNode(machine()->Int32Add(), rhs, minus_one);
5539
5540 Node* check1 = graph()->NewNode(machine()->Word32And(), rhs, msk);
5541 Node* branch1 = graph()->NewNode(
5543 if_true0);
5544
5545 Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
5546 Node* true1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
5547
5548 Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
5549 Node* false1;
5550 {
5551 Node* check2 = graph()->NewNode(machine()->Int32LessThan(), lhs, zero);
5552 Node* branch2 = graph()->NewNode(
5554 check2, if_false1);
5555
5556 Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
5557 Node* true2 = graph()->NewNode(
5558 machine()->Int32Sub(), zero,
5559 graph()->NewNode(machine()->Word32And(),
5560 graph()->NewNode(machine()->Int32Sub(), zero, lhs),
5561 msk));
5562
5563 Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
5564 Node* false2 = graph()->NewNode(machine()->Word32And(), lhs, msk);
5565
5566 if_false1 = graph()->NewNode(merge_op, if_true2, if_false2);
5567 false1 = graph()->NewNode(phi_op, true2, false2, if_false1);
5568 }
5569
5570 if_true0 = graph()->NewNode(merge_op, if_true1, if_false1);
5571 true0 = graph()->NewNode(phi_op, true1, false1, if_true0);
5572 }
5573
5574 Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
5575 Node* false0;
5576 {
5577 Node* check1 = graph()->NewNode(machine()->Int32LessThan(), rhs, minus_one);
5578 Node* branch1 = graph()->NewNode(
5580 if_false0);
5581
5582 Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
5583 Node* true1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
5584
5585 Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
5586 Node* false1 = zero;
5587
5588 if_false0 = graph()->NewNode(merge_op, if_true1, if_false1);
5589 false0 = graph()->NewNode(phi_op, true1, false1, if_false0);
5590 }
5591
5592 Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
5593 return graph()->NewNode(phi_op, true0, false0, merge0);
5594}
5595
5597 Node* const minus_one = jsgraph()->Int32Constant(-1);
5598 Node* const zero = jsgraph()->Int32Constant(0);
5599 Node* const one = jsgraph()->Int32Constant(1);
5600
5601 Node* const input = node->InputAt(0);
5602
5603 return graph()->NewNode(
5605 graph()->NewNode(machine()->Int32LessThan(), input, zero), minus_one,
5606 graph()->NewNode(
5608 graph()->NewNode(machine()->Int32LessThan(), zero, input), one,
5609 zero));
5610}
5611
5613 Uint32BinopMatcher m(node);
5614 Node* const zero = jsgraph()->Uint32Constant(0);
5615 Node* const lhs = m.left().node();
5616 Node* const rhs = m.right().node();
5617
5618 if (m.right().Is(0)) {
5619 return zero;
5620 } else if (machine()->Uint32DivIsSafe() || m.right().HasResolvedValue()) {
5621 return graph()->NewNode(machine()->Uint32Div(), lhs, rhs, graph()->start());
5622 }
5623
5624 Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
5625 Diamond d(graph(), common(), check, BranchHint::kFalse,
5627 Node* div = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, d.if_false);
5628 return d.Phi(MachineRepresentation::kWord32, zero, div);
5629}
5630
5632 Uint32BinopMatcher m(node);
5633 Node* const minus_one = jsgraph()->Int32Constant(-1);
5634 Node* const zero = jsgraph()->Uint32Constant(0);
5635 Node* const lhs = m.left().node();
5636 Node* const rhs = m.right().node();
5637
5638 if (m.right().Is(0)) {
5639 return zero;
5640 } else if (m.right().HasResolvedValue()) {
5641 return graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, graph()->start());
5642 }
5643
5644 // General case for unsigned integer modulus, with optimization for (unknown)
5645 // power of 2 right hand side.
5646 //
5647 // if rhs == 0 then
5648 // zero
5649 // else
5650 // msk = rhs - 1
5651 // if rhs & msk != 0 then
5652 // lhs % rhs
5653 // else
5654 // lhs & msk
5655 //
5656 // Note: We do not use the Diamond helper class here, because it really hurts
5657 // readability with nested diamonds.
5658 const Operator* const merge_op = common()->Merge(2);
5659 const Operator* const phi_op =
5661
5662 Node* check0 = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
5663 Node* branch0 = graph()->NewNode(
5665 graph()->start());
5666
5667 Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
5668 Node* true0 = zero;
5669
5670 Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
5671 Node* false0;
5672 {
5673 Node* msk = graph()->NewNode(machine()->Int32Add(), rhs, minus_one);
5674
5675 Node* check1 = graph()->NewNode(machine()->Word32And(), rhs, msk);
5676 Node* branch1 = graph()->NewNode(
5678 if_false0);
5679
5680 Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
5681 Node* true1 = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, if_true1);
5682
5683 Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
5684 Node* false1 = graph()->NewNode(machine()->Word32And(), lhs, msk);
5685
5686 if_false0 = graph()->NewNode(merge_op, if_true1, if_false1);
5687 false0 = graph()->NewNode(phi_op, true1, false1, if_false0);
5688 }
5689
5690 Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
5691 return graph()->NewNode(phi_op, true0, false0, merge0);
5692}
5693
5696 Node* const lhs = node->InputAt(0);
5697 Node* const rhs = node->InputAt(1);
5698
5699 node->ReplaceInput(0, graph()->NewNode(op, lhs, rhs));
5700 DCHECK_EQ(rhs, node->InputAt(1));
5701 node->AppendInput(graph()->zone(), lhs);
5702 ChangeOp(node, common()->Select(rep));
5703}
5704
5707 Node* const lhs = node->InputAt(0);
5708 Node* const rhs = node->InputAt(1);
5709
5710 node->InsertInput(graph()->zone(), 0, graph()->NewNode(op, lhs, rhs));
5711 DCHECK_EQ(lhs, node->InputAt(1));
5712 DCHECK_EQ(rhs, node->InputAt(2));
5713 ChangeOp(node, common()->Select(rep));
5714}
5715
5717 Node* const input = node->InputAt(0);
5718 Node* const zero = jsgraph()->Int32Constant(0);
5719 Operator const* const op = machine()->Word32Equal();
5720
5721 node->ReplaceInput(0, graph()->NewNode(op, input, zero));
5722 node->AppendInput(graph()->zone(), zero);
5723 ChangeOp(node, op);
5724}
5725
5727 Node* const input = node->InputAt(0);
5728
5729 node->ReplaceInput(0, graph()->NewNode(machine()->Float64Equal(), input,
5730 jsgraph()->Float64Constant(0.0)));
5731 node->AppendInput(graph()->zone(), jsgraph()->Int32Constant(0));
5732 ChangeOp(node, machine()->Word32Equal());
5733}
5734
5736 Node* const input = node->InputAt(0);
5737
5738 node->ReplaceInput(0, jsgraph()->Float64Constant(0.0));
5739 node->AppendInput(graph()->zone(),
5740 graph()->NewNode(machine()->Float64Abs(), input));
5741 ChangeOp(node, machine()->Float64LessThan());
5742}
5743
5745 Node* const input = node->InputAt(0);
5746 Node* const min = jsgraph()->Float64Constant(0.0);
5747 Node* const max = jsgraph()->Float64Constant(255.0);
5748
5749 node->ReplaceInput(
5750 0, graph()->NewNode(machine()->Float64LessThan(), min, input));
5751 node->AppendInput(
5752 graph()->zone(),
5753 graph()->NewNode(
5755 graph()->NewNode(machine()->Float64LessThan(), input, max), input,
5756 max));
5757 node->AppendInput(graph()->zone(), min);
5759}
5760
5762 Node* const input = node->InputAt(0);
5763 Node* const min = jsgraph()->Float64Constant(0.0);
5764 Node* const max = jsgraph()->Float64Constant(255.0);
5765
5766 node->ReplaceInput(
5767 0, graph()->NewNode(
5769 graph()->NewNode(machine()->Float64LessThan(), min, input),
5770 graph()->NewNode(
5772 graph()->NewNode(machine()->Float64LessThan(), input, max),
5773 input, max),
5774 min));
5775 ChangeOp(node, machine()->Float64RoundTiesEven().placeholder());
5776}
5777
5779 Node* const input = node->InputAt(0);
5780 Node* const min = jsgraph()->Int32Constant(0);
5781 Node* const max = jsgraph()->Int32Constant(255);
5782
5783 node->ReplaceInput(
5784 0, graph()->NewNode(machine()->Int32LessThanOrEqual(), input, max));
5785 node->AppendInput(
5786 graph()->zone(),
5787 graph()->NewNode(common()->Select(MachineRepresentation::kWord32),
5788 graph()->NewNode(machine()->Int32LessThan(), input, min),
5789 min, input));
5790 node->AppendInput(graph()->zone(), max);
5792}
5793
5795 Node* const input = node->InputAt(0);
5796 Node* const max = jsgraph()->Uint32Constant(255u);
5797
5798 node->ReplaceInput(
5799 0, graph()->NewNode(machine()->Uint32LessThanOrEqual(), input, max));
5800 node->AppendInput(graph()->zone(), input);
5801 node->AppendInput(graph()->zone(), max);
5803}
5804
5806 if (!to_number_code_.is_set()) {
5807 Callable callable = Builtins::CallableFor(isolate(), Builtin::kToNumber);
5808 to_number_code_.set(jsgraph()->HeapConstantNoHole(callable.code()));
5809 }
5810 return to_number_code_.get();
5811}
5812
5814 if (!to_number_convert_big_int_code_.is_set()) {
5815 Callable callable =
5816 Builtins::CallableFor(isolate(), Builtin::kToNumberConvertBigInt);
5818 jsgraph()->HeapConstantNoHole(callable.code()));
5819 }
5821}
5822
5824 if (!to_numeric_code_.is_set()) {
5825 Callable callable = Builtins::CallableFor(isolate(), Builtin::kToNumeric);
5826 to_numeric_code_.set(jsgraph()->HeapConstantNoHole(callable.code()));
5827 }
5828 return to_numeric_code_.get();
5829}
5830
5832 if (!to_number_operator_.is_set()) {
5833 Callable callable = Builtins::CallableFor(isolate(), Builtin::kToNumber);
5835 auto call_descriptor = Linkage::GetStubCallDescriptor(
5836 graph()->zone(), callable.descriptor(),
5837 callable.descriptor().GetStackParameterCount(), flags,
5839 to_number_operator_.set(common()->Call(call_descriptor));
5840 }
5841 return to_number_operator_.get();
5842}
5843
5846 Callable callable =
5847 Builtins::CallableFor(isolate(), Builtin::kToNumberConvertBigInt);
5849 auto call_descriptor = Linkage::GetStubCallDescriptor(
5850 graph()->zone(), callable.descriptor(),
5851 callable.descriptor().GetStackParameterCount(), flags,
5853 to_number_convert_big_int_operator_.set(common()->Call(call_descriptor));
5854 }
5856}
5857
5859 if (!to_numeric_operator_.is_set()) {
5860 Callable callable = Builtins::CallableFor(isolate(), Builtin::kToNumeric);
5862 auto call_descriptor = Linkage::GetStubCallDescriptor(
5863 graph()->zone(), callable.descriptor(),
5864 callable.descriptor().GetStackParameterCount(), flags,
5866 to_numeric_operator_.set(common()->Call(call_descriptor));
5867 }
5868 return to_numeric_operator_.get();
5869}
5870
5878
5879#undef TRACE
5880
5881} // namespace compiler
5882} // namespace internal
5883} // namespace v8
JSGraph * jsgraph
friend Zone
Definition asm-types.cc:195
#define one
#define Assert(condition)
Int64Representation GetInt64Representation() const
const CTypeInfo & ArgumentInfo(unsigned int index) const
Definition api.cc:11874
unsigned int ArgumentCount() const
constexpr Type GetType() const
Flags without(flag_type flag) const
Definition flags.h:92
static V8_EXPORT_PRIVATE Callable CallableFor(Isolate *isolate, Builtin builtin)
Definition builtins.cc:214
Handle< Code > code() const
Definition callable.h:22
CallInterfaceDescriptor descriptor() const
Definition callable.h:23
static constexpr int kMapOffset
RootsTable & roots_table()
Definition isolate.h:1250
constexpr MachineSemantic semantic() const
static constexpr MachineType Float64()
static constexpr MachineType SignedBigInt64()
constexpr MachineRepresentation representation() const
static constexpr MachineType Int32()
static constexpr MachineType AnyTagged()
static constexpr MachineType UnsignedBigInt64()
static constexpr MachineType Float32()
static constexpr MachineType Int64()
static constexpr MachineType None()
static constexpr MachineRepresentation PointerRepresentation()
bool IsRootHandle(IndirectHandle< T > handle, RootIndex *index) const
Definition roots-inl.h:65
static constexpr bool IsImmortalImmovable(RootIndex root_index)
Definition roots.h:616
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
const T * cbegin() const V8_NOEXCEPT
const_reverse_iterator crbegin() const V8_NOEXCEPT
const T * cend() const V8_NOEXCEPT
const_reverse_iterator crend() const V8_NOEXCEPT
void push_back(const T &value)
T * New(Args &&... args)
Definition zone.h:114
int64_t AsInt64(bool *lossless) const
MachineType GetInputType(size_t index) const
Definition linkage.h:285
const CheckParameters & check_parameters() const
const Operator * Phi(MachineRepresentation representation, int value_input_count)
const Operator * DeadValue(MachineRepresentation rep)
const Operator * Merge(int control_input_count)
static constexpr int ArityForArgc(int c_arg_count, int slow_arg_count)
static constexpr int kFrameStateLocalsInput
static constexpr int kFrameStateInputCount
static constexpr int kFrameStateContextInput
static constexpr int kFrameStateParametersInput
static constexpr int kFrameStateFunctionInput
static constexpr int kFrameStateOuterStateInput
static constexpr int kFrameStateStackInput
static bool IsMachineOpcode(Value value)
Definition opcodes.h:1369
static bool IsMachineConstantOpcode(Value value)
Definition opcodes.h:1374
SimplifiedOperatorBuilder * simplified() const
Definition js-graph.h:105
Isolate * isolate() const
Definition js-graph.h:106
static CallDescriptor * GetStubCallDescriptor(Zone *zone, const CallInterfaceDescriptor &descriptor, int stack_parameter_count, CallDescriptor::Flags flags, Operator::Properties properties=Operator::kNoProperties, StubCallMode stub_mode=StubCallMode::kCallCodeObject)
Definition linkage.cc:587
Node * Uint64Constant(uint64_t value)
CommonOperatorBuilder * common() const
MachineOperatorBuilder * machine() const
Node * Uint32Constant(uint32_t value)
static void ChangeOp(Node *node, const Operator *new_op)
static void ReplaceEffectInput(Node *node, Node *effect, int index=0)
static Type GetType(const Node *node)
static void ReplaceControlInput(Node *node, Node *control, int index=0)
static bool IsTyped(const Node *node)
static Type GetTypeOrAny(const Node *node)
static Node * GetEffectInput(Node *node, int index=0)
static Node * GetValueInput(Node *node, int index)
static void SetType(Node *node, Type type)
static bool IsExceptionalCall(Node *node, Node **out_exception=nullptr)
static Node * GetControlInput(Node *node, int index=0)
static Node * FindSuccessfulControlProjection(Node *node)
constexpr IrOpcode::Value opcode() const
Definition node.h:52
const Operator * op() const
Definition node.h:50
void ReplaceInput(int index, Node *new_to)
Definition node.h:76
Node * InputAt(int index) const
Definition node.h:70
NodeId id() const
Definition node.h:57
void ReplaceUses(Node *replace_to)
Definition node.cc:306
void InsertInput(Zone *zone, int index, Node *new_to)
Definition node.cc:203
void OnNodeChanged(const char *reducer_name, const Node *old_node, const Node *new_node)
Type TypeTypeGuard(const Operator *sigma_op, Type input)
Type Merge(Type left, Type right)
Type WeakenRange(Type current_range, Type previous_range)
Type CheckBounds(Type index, Type length)
static int GetFrameStateInputCount(const Operator *op)
bool HasProperty(Property property) const
Definition operator.h:94
constexpr Opcode opcode() const
Definition operator.h:75
const Operator * Int32OperatorFor(IrOpcode::Value opcode)
const Operator * Int32OverflowOperatorFor(IrOpcode::Value opcode)
const Operator * AdditiveSafeIntegerOverflowOperatorFor(IrOpcode::Value opcode)
const Operator * Uint32OperatorFor(IrOpcode::Value opcode)
const Operator * Uint32OverflowOperatorFor(IrOpcode::Value opcode)
Node * GetRepresentationFor(Node *node, MachineRepresentation output_rep, Type output_type, Node *use_node, UseInfo use_info)
const Operator * TaggedSignedOperatorFor(IrOpcode::Value opcode)
const Operator * Int64OverflowOperatorFor(IrOpcode::Value opcode)
const Operator * BigIntOperatorFor(IrOpcode::Value opcode)
const Operator * Int64OperatorFor(IrOpcode::Value opcode)
const Operator * Float64OperatorFor(IrOpcode::Value opcode)
static void ChangeOp(Node *node, const Operator *new_op)
void ChangeUnaryToPureBinaryOp(Node *node, const Operator *new_op, int new_input_index, Node *new_input)
void VisitSpeculativeNumberModulus(Node *node, Truncation truncation, SimplifiedLowering *lowering)
void ReplaceWithPureNode(Node *node, Node *pure_node)
void ChangeToDeadValue(Node *node, Node *effect, Node *control)
void ConvertInput(Node *node, int index, UseInfo use, Type input_type=Type::Invalid())
void VisitSpeculativeSmallIntegerAdditiveOp(Node *node, Truncation truncation, SimplifiedLowering *lowering)
ZoneMap< Node *, ZoneVector< Node * > > might_need_revisit_
void VisitSelect(Node *node, Truncation truncation, SimplifiedLowering *lowering)
SimplifiedOperatorBuilder * simplified() const
void VisitCall(Node *node, SimplifiedLowering *lowering)
UseInfo UseInfoForFastApiCallArgument(CTypeInfo type, CFunctionInfo::Int64Representation repr, FeedbackSource const &feedback)
void VisitLeaf(Node *node, MachineRepresentation output)
void ChangeOp(Node *node, const Operator *new_op)
static MachineType DeoptMachineTypeOf(MachineRepresentation rep, Type type)
void DeferReplacement(Node *node, Node *replacement)
static MachineSemantic DeoptValueSemanticOf(Type type)
bool TryOptimizeBigInt64Shift(Node *node, const Truncation &truncation, SimplifiedLowering *lowering)
WriteBarrierKind WriteBarrierKindFor(BaseTaggedness base_taggedness, MachineRepresentation field_representation, int field_offset, Type field_type, MachineRepresentation value_representation, Node *value)
void VisitBinop(Node *node, UseInfo input_use, MachineRepresentation output, Type restriction_type=Type::Any())
void VisitNode(Node *node, Truncation truncation, SimplifiedLowering *lowering)
Node * InsertTypeOverrideForVerifier(const Type &type, Node *node)
WriteBarrierKind WriteBarrierKindFor(BaseTaggedness base_taggedness, MachineRepresentation field_representation, Type field_type, MachineRepresentation value_representation, Node *value)
void ChangeToPureOp(Node *node, const Operator *new_op)
void VisitForCheckedInt32Mul(Node *node, Truncation truncation, Type input0_type, Type input1_type, UseInfo input_use)
void VisitFastApiCall(Node *node, SimplifiedLowering *lowering)
void RunVerifyPhase(OptimizedCompilationInfo *compilation_info)
RepresentationSelector(JSGraph *jsgraph, JSHeapBroker *broker, Zone *zone, RepresentationChanger *changer, SourcePositionTable *source_positions, NodeOriginTable *node_origins, TickCounter *tick_counter, Linkage *linkage, ObserveNodeManager *observe_node_manager, SimplifiedLoweringVerifier *verifier)
void VisitUnop(Node *node, UseInfo input_use, MachineRepresentation output, Type restriction_type=Type::Any())
void VisitSpeculativeAdditiveOp(Node *node, Truncation truncation, SimplifiedLowering *lowering)
void VisitCheckBounds(Node *node, SimplifiedLowering *lowering)
void VisitCheck(Node *node, Type type, SimplifiedLowering *lowering)
void SetOutput(Node *node, MachineRepresentation representation, Type restriction_type=Type::Any())
Type Weaken(Node *node, Type previous_type, Type current_type)
void NotifyNodeReplaced(Node *node, Node *replacement)
void VisitPhi(Node *node, Truncation truncation, SimplifiedLowering *lowering)
MachineRepresentation GetOutputInfoForPhi(Type type, Truncation use)
void VisitNoop(Node *node, Truncation truncation)
void ProcessInput(Node *node, int index, UseInfo use)
Node * InsertSemanticsHintForVerifier(const Operator *semantics, Node *node)
void RunLowerPhase(SimplifiedLowering *lowering)
void VisitBinop(Node *node, UseInfo left_use, UseInfo right_use, MachineRepresentation output, Type restriction_type=Type::Any())
void EnqueueInput(Node *use_node, int index, UseInfo use_info=UseInfo::None())
void VisitObjectIs(Node *node, Type type, SimplifiedLowering *lowering)
MachineRepresentation representation() const
const ZoneUnorderedMap< Node *, ZoneVector< Node * > > & machine_uses_of_constants() const
void RecordMachineUsesOfConstant(Node *constant, Node::Uses uses)
SetOncePointer< Operator const > to_number_operator_
SetOncePointer< Operator const > to_number_convert_big_int_operator_
void DoMin(Node *node, Operator const *op, MachineRepresentation rep)
void DoMax(Node *node, Operator const *op, MachineRepresentation rep)
void DoJSToNumberOrNumericTruncatesToWord32(Node *node, RepresentationSelector *selector)
void DoJSToNumberOrNumericTruncatesToFloat64(Node *node, RepresentationSelector *selector)
void ChangeOp(Node *node, const Operator *new_op)
SimplifiedLowering(JSGraph *jsgraph, JSHeapBroker *broker, Zone *zone, SourcePositionTable *source_position, NodeOriginTable *node_origins, TickCounter *tick_counter, Linkage *linkage, OptimizedCompilationInfo *info, ObserveNodeManager *observe_node_manager=nullptr)
SetOncePointer< Operator const > to_numeric_operator_
const Operator * StoreField(FieldAccess const &, bool maybe_initializing_or_transitioning=true)
const Operator * StoreElement(ElementAccess const &)
Node * NewNode(const Operator *op, int input_count, Node *const *inputs, bool incomplete=false)
static Truncation Any(IdentifyZeros identify_zeros=kDistinguishZeros)
Definition use-info.h:46
bool TruncatesOddballAndBigIntToNumber() const
Definition use-info.h:68
IdentifyZeros identify_zeros() const
Definition use-info.h:92
bool IdentifiesZeroAndMinusZero() const
Definition use-info.h:75
static Truncation Generalize(Truncation t1, Truncation t2)
Definition use-info.h:50
static TypeCache const * Get()
static Type Union(Type type1, Type type2, Zone *zone)
bool Maybe(Type that) const
const HeapConstantType * AsHeapConstant() const
static Type Constant(JSHeapBroker *broker, Handle< i::Object > value, Zone *zone)
static Type Intersect(Type type1, Type type2, Zone *zone)
bool Is(Type that) const
static UseInfo CheckedSignedSmallAsTaggedSigned(const FeedbackSource &feedback, IdentifyZeros identify_zeros=kDistinguishZeros)
Definition use-info.h:287
static UseInfo CheckedSignedSmallAsWord32(IdentifyZeros identify_zeros, const FeedbackSource &feedback)
Definition use-info.h:294
Truncation truncation() const
Definition use-info.h:354
static UseInfo TruncatingFloat64(IdentifyZeros identify_zeros=kDistinguishZeros)
Definition use-info.h:239
static UseInfo CheckedBigIntAsTaggedPointer(const FeedbackSource &feedback)
Definition use-info.h:282
static UseInfo CheckedNumberOrOddballAsFloat64(IdentifyZeros identify_zeros, const FeedbackSource &feedback)
Definition use-info.h:328
static UseInfo CheckedNumberAsFloat64(IdentifyZeros identify_zeros, const FeedbackSource &feedback)
Definition use-info.h:312
static UseInfo Float16RawBits()
Definition use-info.h:233
static UseInfo AnyTruncatingToBool()
Definition use-info.h:344
static UseInfo CheckedSafeIntAsWord64(const FeedbackSource &feedback)
Definition use-info.h:255
static UseInfo CheckedSigned32AsWord32(IdentifyZeros identify_zeros, const FeedbackSource &feedback)
Definition use-info.h:300
static UseInfo TruncatingWord64()
Definition use-info.h:204
static UseInfo TaggedSigned()
Definition use-info.h:263
static UseInfo Word64(IdentifyZeros identify_zeros=kDistinguishZeros)
Definition use-info.h:220
static UseInfo CheckedNumberAsWord32(const FeedbackSource &feedback)
Definition use-info.h:318
static UseInfo TruncatingWord32()
Definition use-info.h:200
static UseInfo CheckedBigIntTruncatingWord64(const FeedbackSource &feedback)
Definition use-info.h:207
static UseInfo CheckedBigInt64AsWord64(const FeedbackSource &feedback)
Definition use-info.h:215
static UseInfo CheckedSigned64AsWord64(IdentifyZeros identify_zeros, const FeedbackSource &feedback)
Definition use-info.h:306
static UseInfo CheckedNumberOrBooleanAsFloat64(IdentifyZeros identify_zeros, const FeedbackSource &feedback)
Definition use-info.h:322
static UseInfo CheckedHeapObjectAsTaggedPointer(const FeedbackSource &feedback)
Definition use-info.h:276
MachineRepresentation representation() const
Definition use-info.h:353
static UseInfo CheckedNumberOrOddballAsWord32(const FeedbackSource &feedback)
Definition use-info.h:334
static UseInfo CheckedSafeIntTruncatingWord32(const FeedbackSource &feedback)
Definition use-info.h:249
static UseInfo CheckedTaggedAsArrayIndex(const FeedbackSource &feedback)
Definition use-info.h:271
Zone * zone_
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
JSHeapBroker *const broker_
int start
int end
LineAndColumn previous
NodeOriginTable * origins
SourcePositionTable * source_positions
JSHeapBroker * broker
Linkage * linkage
OptionalOpIndex index
#define DECLARE_CASE(x,...)
Node * node
ZoneVector< RpoNumber > & result
#define TRACE(...)
ZoneStack< RpoNumber > & stack
Linkage const *const linkage_
uint32_t const mask
int m
Definition mul-fft.cc:294
int n
Definition mul-fft.cc:296
V8_INLINE Dest bit_cast(Source const &source)
Definition macros.h:95
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
UntaggedUnion< Word32, Word64 > Word
Definition index.h:535
const char * kSimplifiedLoweringReducerName
uint32_t ObjectIdOf(Operator const *op)
const BranchParameters & BranchParametersOf(const Operator *const op)
NumberOperationParameters const & NumberOperationParametersOf(Operator const *op)
FastApiCallParameters const & FastApiCallParametersOf(const Operator *op)
CheckParameters const & CheckParametersOf(Operator const *op)
CheckMapsParameters const & CheckMapsParametersOf(Operator const *op)
const AssertParameters & AssertParametersOf(const Operator *const op)
CheckFloat64HoleParameters const & CheckFloat64HoleParametersOf(Operator const *op)
CallDescriptor const * CallDescriptorOf(const Operator *const op)
int ParameterIndexOf(const Operator *const op)
NumberOperationHint NumberOperationHintOf(const Operator *op)
const FieldAccess & FieldAccessOf(const Operator *op)
const ElementAccess & ElementAccessOf(const Operator *op)
CheckBoundsParameters const & CheckBoundsParametersOf(Operator const *op)
MapRef FastMapParameterOf(const Operator *op)
SelectParameters const & SelectParametersOf(const Operator *const op)
LoadRepresentation LoadRepresentationOf(Operator const *op)
HeapConstantNoHole(BUILTIN_CODE(isolate(), AllocateInOldGeneration))) DEFINE_GETTER(ArrayConstructorStubConstant
ExternalArrayType ExternalArrayTypeOf(const Operator *op)
T const & OpParameter(const Operator *op)
Definition operator.h:214
const ExitMachineGraphParameters & ExitMachineGraphParametersOf(const Operator *op)
BigIntOperationParameters const & BigIntOperationParametersOf(Operator const *op)
SparseInputMask SparseInputMaskOf(Operator const *op)
SpeculativeBigIntAsNParameters const & SpeculativeBigIntAsNParametersOf(Operator const *op)
MachineRepresentation PhiRepresentationOf(const Operator *const op)
BigIntOperationHint BigIntOperationHintOf(const Operator *op)
MapRef DoubleMapParameterOf(const Operator *op)
constexpr int kMinInt
Definition globals.h:375
bool IsNone(Tagged< FieldType > obj)
Definition field-type.h:50
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
bool DoubleToSmiInteger(double value, int *smi_int_value)
bool IsSmiDouble(double value)
constexpr bool IsAnyTagged(MachineRepresentation rep)
constexpr bool CanBeTaggedPointer(MachineRepresentation rep)
const char * MachineReprToString(MachineRepresentation rep)
V8_EXPORT_PRIVATE FlagValues v8_flags
other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can use(0 for unbounded)") DEFINE_BOOL( stress_concurrent_inlining
constexpr bool SmiValuesAre32Bits()
@ kExternalFloat64Array
Definition globals.h:2461
@ kExternalUint32Array
Definition globals.h:2458
@ kExternalBigInt64Array
Definition globals.h:2463
@ kExternalInt32Array
Definition globals.h:2457
@ kExternalInt8Array
Definition globals.h:2453
@ kExternalUint8ClampedArray
Definition globals.h:2462
@ kExternalUint8Array
Definition globals.h:2454
@ kExternalUint16Array
Definition globals.h:2456
@ kExternalFloat32Array
Definition globals.h:2460
@ kExternalInt16Array
Definition globals.h:2455
@ kExternalFloat16Array
Definition globals.h:2459
@ kExternalBigUint64Array
Definition globals.h:2464
constexpr bool Is64()
constexpr int kMaxInt
Definition globals.h:374
#define JS_CONTEXT_OP_LIST(V)
Definition opcodes.h:201
#define JS_OTHER_OP_LIST(V)
Definition opcodes.h:226
#define SIMPLIFIED_NUMBER_UNOP_LIST(V)
Definition opcodes.h:397
#define JS_OBJECT_OP_LIST(V)
Definition opcodes.h:183
#define SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(V)
Definition opcodes.h:439
#define JS_SIMPLE_BINOP_LIST(V)
Definition opcodes.h:124
#define SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(V)
Definition opcodes.h:561
#define SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V)
Definition opcodes.h:379
#define SIMPLIFIED_NUMBER_BINOP_LIST(V)
Definition opcodes.h:349
OptimizedCompilationInfo * info_
Definition pipeline.cc:305
SimplifiedLoweringVerifier * verifier_
#define OPCODE_CASE(name,...)
#define FATAL(...)
Definition logging.h:47
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
#define END_ALLOW_USE_DEPRECATED()
Definition v8config.h:634
#define V8_UNLIKELY(condition)
Definition v8config.h:660
#define START_ALLOW_USE_DEPRECATED()
Definition v8config.h:633
wasm::ValueType type