v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instruction-selector.cc
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <limits>
8#include <optional>
9
10#include "include/v8-internal.h"
11#include "src/base/iterator.h"
14#include "src/common/globals.h"
28
29#if V8_ENABLE_WEBASSEMBLY
31#endif // V8_ENABLE_WEBASSEMBLY
32
33namespace v8 {
34namespace internal {
35namespace compiler {
36
37#define VISIT_UNSUPPORTED_OP(op) \
38 void InstructionSelectorT::Visit##op(OpIndex) { UNIMPLEMENTED(); }
39
40using namespace turboshaft; // NOLINT(build/namespaces)
41
42namespace {
43// Here we really want the raw Bits of the mask, but the `.bits()` method is
44// not constexpr, and so users of this constant need to call it.
45// TODO(turboshaft): EffectDimensions could probably be defined via
46// base::Flags<> instead, which should solve this.
47constexpr EffectDimensions kTurboshaftEffectLevelMask =
48 OpEffects().CanReadMemory().produces;
49}
50
52 Zone* zone, size_t node_count, Linkage* linkage,
55 InstructionSelector::EnableSwitchJumpTable enable_switch_jump_table,
56 TickCounter* tick_counter, JSHeapBroker* broker,
57 size_t* max_unoptimized_frame_height, size_t* max_pushed_argument_count,
58 InstructionSelector::SourcePositionMode source_position_mode,
59 Features features, InstructionSelector::EnableScheduling enable_scheduling,
61 enable_roots_relative_addressing,
64 zone_(zone),
66 sequence_(sequence),
67 source_positions_(source_positions),
68 source_position_mode_(source_position_mode),
69 features_(features),
71 current_block_(nullptr),
72 instructions_(zone),
73 continuation_inputs_(sequence->zone()),
74 continuation_outputs_(sequence->zone()),
75 continuation_temps_(sequence->zone()),
76 defined_(static_cast<int>(node_count), zone),
77 used_(static_cast<int>(node_count), zone),
78 effect_level_(node_count, 0, zone),
79 virtual_registers_(node_count,
80 InstructionOperand::kInvalidVirtualRegister, zone),
81 virtual_register_rename_(zone),
82 scheduler_(nullptr),
83 enable_scheduling_(enable_scheduling),
84 enable_roots_relative_addressing_(enable_roots_relative_addressing),
85 enable_switch_jump_table_(enable_switch_jump_table),
86 state_values_cache_(zone),
87 frame_(frame),
88 instruction_selection_failed_(false),
89 instr_origins_(sequence->zone()),
90 trace_turbo_(trace_turbo),
91 tick_counter_(tick_counter),
93 max_unoptimized_frame_height_(max_unoptimized_frame_height),
94 max_pushed_argument_count_(max_pushed_argument_count)
95#if V8_TARGET_ARCH_64_BIT
96 ,
97 node_count_(node_count),
98 phi_states_(zone)
99#endif
100{
102 protected_loads_to_remove_.emplace(static_cast<int>(node_count), zone);
103 additional_protected_instructions_.emplace(static_cast<int>(node_count),
104 zone);
105
106 DCHECK_EQ(*max_unoptimized_frame_height, 0); // Caller-initialized.
107
108 instructions_.reserve(node_count);
111
113 instr_origins_.assign(node_count, {-1, 0});
114 }
115}
116
117std::optional<BailoutReason> InstructionSelectorT::SelectInstructions() {
118 // Mark the inputs of all phis in loop headers as used.
120 for (const Block* block : blocks) {
121 if (!IsLoopHeader(block)) continue;
122 DCHECK_LE(2u, PredecessorCount(block));
123 for (OpIndex node : nodes(block)) {
124 const PhiOp* phi = TryCast<PhiOp>(node);
125 if (!phi) continue;
126
127 // Mark all inputs as used.
128 for (OpIndex input : phi->inputs()) {
129 MarkAsUsed(input);
130 }
131 }
132 }
133
134 // Visit each basic block in post order.
135 for (auto i = blocks.rbegin(); i != blocks.rend(); ++i) {
136 VisitBlock(*i);
138 return BailoutReason::kCodeGenerationFailed;
139 }
140
141 // Schedule the selected instructions.
143 scheduler_ = zone()->template New<InstructionScheduler>(zone(), sequence());
144 }
145
146 for (const Block* block : blocks) {
147 InstructionBlock* instruction_block =
148 sequence()->InstructionBlockAt(this->rpo_number(block));
149 for (size_t i = 0; i < instruction_block->phis().size(); i++) {
150 UpdateRenamesInPhi(instruction_block->PhiAt(i));
151 }
152 size_t end = instruction_block->code_end();
153 size_t start = instruction_block->code_start();
155 StartBlock(this->rpo_number(block));
156 if (end != start) {
157 while (start-- > end + 1) {
160 }
163 }
164 EndBlock(this->rpo_number(block));
165 }
166#if DEBUG
168#endif
169 return std::nullopt;
170}
171
176 } else {
177 sequence()->StartBlock(rpo);
178 }
179}
180
184 scheduler_->EndBlock(rpo);
185 } else {
186 sequence()->EndBlock(rpo);
187 }
188}
189
198
207
209 InstructionOperand output,
210 size_t temp_count,
211 InstructionOperand* temps) {
212 size_t output_count = output.IsInvalid() ? 0 : 1;
213 return Emit(opcode, output_count, &output, 0, nullptr, temp_count, temps);
214}
215
217 InstructionOperand output,
218 InstructionOperand a, size_t temp_count,
219 InstructionOperand* temps) {
220 size_t output_count = output.IsInvalid() ? 0 : 1;
221 return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
222}
223
225 InstructionOperand output,
227 InstructionOperand b, size_t temp_count,
228 InstructionOperand* temps) {
229 size_t output_count = output.IsInvalid() ? 0 : 1;
230 InstructionOperand inputs[] = {a, b};
231 size_t input_count = arraysize(inputs);
232 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
233 temps);
234}
235
237 InstructionOperand output,
240 InstructionOperand c, size_t temp_count,
241 InstructionOperand* temps) {
242 size_t output_count = output.IsInvalid() ? 0 : 1;
243 InstructionOperand inputs[] = {a, b, c};
244 size_t input_count = arraysize(inputs);
245 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
246 temps);
247}
248
252 size_t temp_count, InstructionOperand* temps) {
253 size_t output_count = output.IsInvalid() ? 0 : 1;
254 InstructionOperand inputs[] = {a, b, c, d};
255 size_t input_count = arraysize(inputs);
256 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
257 temps);
258}
259
263 InstructionOperand e, size_t temp_count, InstructionOperand* temps) {
264 size_t output_count = output.IsInvalid() ? 0 : 1;
265 InstructionOperand inputs[] = {a, b, c, d, e};
266 size_t input_count = arraysize(inputs);
267 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
268 temps);
269}
270
274 InstructionOperand e, InstructionOperand f, size_t temp_count,
275 InstructionOperand* temps) {
276 size_t output_count = output.IsInvalid() ? 0 : 1;
277 InstructionOperand inputs[] = {a, b, c, d, e, f};
278 size_t input_count = arraysize(inputs);
279 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
280 temps);
281}
282
287 InstructionOperand h, size_t temp_count, InstructionOperand* temps) {
288 size_t output_count = output.IsInvalid() ? 0 : 1;
289 InstructionOperand inputs[] = {a, b, c, d, e, f, g, h};
290 size_t input_count = arraysize(inputs);
291 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
292 temps);
293}
294
296 InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
297 size_t input_count, InstructionOperand* inputs, size_t temp_count,
298 InstructionOperand* temps) {
299 if (output_count >= Instruction::kMaxOutputCount ||
300 input_count >= Instruction::kMaxInputCount ||
301 temp_count >= Instruction::kMaxTempCount) {
303 return nullptr;
304 }
305
307 Instruction::New(instruction_zone(), opcode, output_count, outputs,
308 input_count, inputs, temp_count, temps);
309 return Emit(instr);
310}
311
316
317namespace {
318bool is_exclusive_user_of(const Graph* graph, OpIndex user, OpIndex value) {
319 DCHECK(user.valid());
320 DCHECK(value.valid());
321 const Operation& value_op = graph->Get(value);
322 const Operation& user_op = graph->Get(user);
323 size_t use_count = base::count_if(
324 user_op.inputs(), [value](OpIndex input) { return input == value; });
325 if (V8_UNLIKELY(use_count == 0)) {
326 // We have a special case here:
327 //
328 // value
329 // |
330 // TruncateWord64ToWord32
331 // |
332 // user
333 //
334 // If emitting user performs the truncation implicitly, we end up calling
335 // CanCover with value and user such that user might have no (direct) uses
336 // of value. There are cases of other unnecessary operations that can lead
337 // to the same situation (e.g. bitwise and, ...). In this case, we still
338 // cover if value has only a single use and this is one of the direct
339 // inputs of user, which also only has a single use (in user).
340 // TODO(nicohartmann@): We might generalize this further if we see use
341 // cases.
342 if (!value_op.saturated_use_count.IsOne()) return false;
343 for (auto input : user_op.inputs()) {
344 const Operation& input_op = graph->Get(input);
345 const size_t indirect_use_count = base::count_if(
346 input_op.inputs(), [value](OpIndex input) { return input == value; });
347 if (indirect_use_count > 0) {
348 return input_op.saturated_use_count.IsOne();
349 }
350 }
351 return false;
352 }
353 if (value_op.Is<ProjectionOp>()) {
354 // Projections always have a Tuple use, but it shouldn't count as a use as
355 // far as is_exclusive_user_of is concerned, since no instructions are
356 // emitted for the TupleOp, which is just a Turboshaft "meta operation".
357 // We thus increase the use_count by 1, to attribute the TupleOp use to
358 // the current operation.
359 use_count++;
360 }
361 DCHECK_LE(use_count, graph->Get(value).saturated_use_count.Get());
362 return (value_op.saturated_use_count.Get() == use_count) &&
364}
365} // namespace
366
368 // 1. Both {user} and {node} must be in the same basic block.
369 if (block(schedule(), node) != current_block_) {
370 return false;
371 }
372
373 const Operation& op = Get(node);
374 // 2. If node does not produce anything, it can be covered.
375 if (op.Effects().produces.bits() == 0) {
376 return is_exclusive_user_of(schedule(), user, node);
377 }
378
379 // 3. Otherwise, the {node}'s effect level must match the {user}'s.
381 return false;
382 }
383
384 // 4. Only {node} must have value edges pointing to {user}.
385 return is_exclusive_user_of(schedule(), user, node);
386}
387
389 OpIndex node) const {
390 DCHECK(CanCover(user, node));
391 const Graph* graph = this->turboshaft_graph();
392 for (OpIndex next = graph->NextIndex(node); next.valid();
393 next = graph->NextIndex(next)) {
394 if (next == user) break;
395 const Operation& op = graph->Get(next);
396 OpEffects effects = op.Effects();
397 if (effects.produces.control_flow || effects.required_when_unused) {
398 return false;
399 }
400 }
401 return true;
402}
403
405 OpIndex node) const {
406 Block* bb_user = this->block(schedule(), user);
407 Block* bb_node = this->block(schedule(), node);
408 if (bb_user != bb_node) return false;
409
410 const Operation& node_op = this->turboshaft_graph()->Get(node);
411 if (node_op.saturated_use_count.Get() == 1) return true;
412 for (OpIndex use : turboshaft_uses(node)) {
413 if (use == user) continue;
414 if (this->block(schedule(), use) == bb_user) return false;
415 }
416 return true;
417}
418
420 size_t projection_index) {
421 const Graph* graph = this->turboshaft_graph();
422 // Projections are always emitted right after the operation.
423 for (OpIndex next = graph->NextIndex(node); next.valid();
424 next = graph->NextIndex(next)) {
425 const ProjectionOp* projection = graph->Get(next).TryCast<ProjectionOp>();
426 if (projection == nullptr) break;
427 DCHECK(!projection->saturated_use_count.IsZero());
428 if (projection->saturated_use_count.IsOne()) {
429 // If the projection has a single use, it is the following tuple, so we
430 // don't return it, since there is no point in emitting it.
431 DCHECK(turboshaft_uses(next).size() == 1 &&
432 graph->Get(turboshaft_uses(next)[0]).Is<TupleOp>());
433 continue;
434 }
435 if (projection->index == projection_index) return next;
436 }
437
438 // If there is no Projection with index {projection_index} following the
439 // operation, then there shouldn't be any such Projection in the graph. We
440 // verify this in Debug mode.
441#ifdef DEBUG
442 for (OpIndex use : turboshaft_uses(node)) {
443 if (const ProjectionOp* projection =
444 this->Get(use).TryCast<ProjectionOp>()) {
445 DCHECK_EQ(projection->input(), node);
446 if (projection->index == projection_index) {
447 // If we found the projection, it should have a single use: a Tuple
448 // (which doesn't count as a regular use since it is just an artifact of
449 // the Turboshaft graph).
450 DCHECK(turboshaft_uses(use).size() == 1 &&
451 graph->Get(turboshaft_uses(use)[0]).Is<TupleOp>());
452 }
453 }
454 }
455#endif // DEBUG
456 return OpIndex::Invalid();
457}
458
460 for (size_t i = 0; i < instruction->InputCount(); i++) {
461 TryRename(instruction->InputAt(i));
462 }
463}
464
466 for (size_t i = 0; i < phi->operands().size(); i++) {
467 int vreg = phi->operands()[i];
468 int renamed = GetRename(vreg);
469 if (vreg != renamed) {
470 phi->RenameInput(i, renamed);
471 }
472 }
473}
474
475int InstructionSelectorT::GetRename(int virtual_register) {
476 int rename = virtual_register;
477 while (true) {
478 if (static_cast<size_t>(rename) >= virtual_register_rename_.size()) break;
479 int next = virtual_register_rename_[rename];
481 break;
482 }
483 rename = next;
484 }
485 return rename;
486}
487
489 if (!op->IsUnallocated()) return;
490 UnallocatedOperand* unalloc = UnallocatedOperand::cast(op);
491 int vreg = unalloc->virtual_register();
492 int rename = GetRename(vreg);
493 if (rename != vreg) {
494 *unalloc = UnallocatedOperand(*unalloc, rename);
495 }
496}
497
499 int vreg = GetVirtualRegister(node);
500 if (static_cast<size_t>(vreg) >= virtual_register_rename_.size()) {
502 virtual_register_rename_.resize(vreg + 1, invalid);
503 }
505}
506
508 DCHECK(node.valid());
509 size_t const id = node.id();
511 int virtual_register = virtual_registers_[id];
512 if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
513 virtual_register = sequence()->NextVirtualRegister();
514 virtual_registers_[id] = virtual_register;
515 }
516 return virtual_register;
517}
518
519const std::map<uint32_t, int>
521 std::map<uint32_t, int> virtual_registers;
522 for (size_t n = 0; n < virtual_registers_.size(); ++n) {
524 const uint32_t id = static_cast<uint32_t>(n);
525 virtual_registers.insert(std::make_pair(id, virtual_registers_[n]));
526 }
527 }
528 return virtual_registers;
529}
530
532 DCHECK(node.valid());
533 return defined_.Contains(node.id());
534}
535
537 DCHECK(node.valid());
538 defined_.Add(node.id());
539}
540
542 DCHECK(node.valid());
543 if (!ShouldSkipOptimizationStep() && ShouldSkipOperation(this->Get(node))) {
544 return false;
545 }
546 if (Get(node).IsRequiredWhenUnused()) return true;
547 return used_.Contains(node.id());
548}
549
551 DCHECK(node.valid());
552 if (!ShouldSkipOptimizationStep() && ShouldSkipOperation(this->Get(node))) {
553 return false;
554 }
555 return used_.Contains(node.id());
556}
557
559 DCHECK(node.valid());
560 used_.Add(node.id());
561}
562
564 DCHECK(node.valid());
565 size_t const id = node.id();
567 return effect_level_[id];
568}
569
571 FlagsContinuation* cont) const {
572 return cont->IsBranch() ? GetEffectLevel(this->block_terminator(
573 this->PredecessorAt(cont->true_block(), 0)))
574 : GetEffectLevel(node);
575}
576
577void InstructionSelectorT::SetEffectLevel(OpIndex node, int effect_level) {
578 DCHECK(node.valid());
579 size_t const id = node.id();
581 effect_level_[id] = effect_level;
582}
583
585 const ExternalReference& reference) const {
586 // There are three things to consider here:
587 // 1. CanUseRootsRegister: Is kRootRegister initialized?
588 const bool root_register_is_available_and_initialized = CanUseRootsRegister();
589 if (!root_register_is_available_and_initialized) return false;
590
591 // 2. enable_roots_relative_addressing_: Can we address everything on the heap
592 // through the root register, i.e. are root-relative addresses to arbitrary
593 // addresses guaranteed not to change between code generation and
594 // execution?
595 const bool all_root_relative_offsets_are_constant =
598 if (all_root_relative_offsets_are_constant) return true;
599
600 // 3. IsAddressableThroughRootRegister: Is the target address guaranteed to
601 // have a fixed root-relative offset? If so, we can ignore 2.
602 const bool this_root_relative_offset_is_constant =
604 reference);
605 return this_root_relative_offset_is_constant;
606}
607
612
614 const InstructionOperand& op) {
615 UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
617}
618
623
624namespace {
625
626InstructionOperand OperandForDeopt(Isolate* isolate, OperandGeneratorT* g,
629 if (rep == MachineRepresentation::kNone) {
631 }
632
633 const Operation& op = g->turboshaft_graph()->Get(input);
634 if (const ConstantOp* constant = op.TryCast<ConstantOp>()) {
635 using Kind = ConstantOp::Kind;
636 switch (constant->kind) {
637 case Kind::kWord32:
638 case Kind::kWord64:
639 case Kind::kSmi:
640 case Kind::kFloat32:
641 case Kind::kFloat64:
642 return g->UseImmediate(input);
643 case Kind::kNumber:
645 const double d = constant->number().get_scalar();
646 Tagged<Smi> smi = Smi::FromInt(static_cast<int32_t>(d));
647 CHECK_EQ(smi.value(), d);
648 return g->UseImmediate(static_cast<int32_t>(smi.ptr()));
649 }
650 return g->UseImmediate(input);
651 case Kind::kHeapObject:
652 case Kind::kCompressedHeapObject:
653 case Kind::kTrustedHeapObject: {
655 // If we have inconsistent static and dynamic types, e.g. if we
656 // smi-check a string, we can get here with a heap object that
657 // says it is a smi. In that case, we return an invalid instruction
658 // operand, which will be interpreted as an optimized-out value.
659
660 // TODO(jarin) Ideally, we should turn the current instruction
661 // into an abort (we should never execute it).
662 return InstructionOperand();
663 }
664
665 Handle<HeapObject> object = constant->handle();
666 RootIndex root_index;
667 if (isolate->roots_table().IsRootHandle(object, &root_index) &&
668 root_index == RootIndex::kOptimizedOut) {
669 // For an optimized-out object we return an invalid instruction
670 // operand, so that we take the fast path for optimized-out values.
671 return InstructionOperand();
672 }
673
674 return g->UseImmediate(input);
675 }
676 default:
678 }
679 } else if (const TaggedBitcastOp* bitcast =
681 const Operation& bitcast_input = g->Get(bitcast->input());
682 if (const ConstantOp* cst =
683 bitcast_input.TryCast<Opmask::kWord32Constant>()) {
684 if constexpr (Is64()) {
685 return g->UseImmediate64(cst->word32());
686 } else {
687 return g->UseImmediate(cst->word32());
688 }
689 } else if (Is64() && bitcast_input.Is<Opmask::kWord64Constant>()) {
691 return g->UseImmediate(bitcast_input.Cast<ConstantOp>().word32());
692 } else {
693 return g->UseImmediate64(bitcast_input.Cast<ConstantOp>().word64());
694 }
695 }
696 }
697
698 switch (kind) {
700 return g->UseUniqueSlot(input);
702 // Currently deopts "wrap" other operations, so the deopt's inputs
703 // are potentially needed until the end of the deoptimising code.
704 return g->UseAnyAtEnd(input);
705 }
706}
707
708} // namespace
709
711
713 public:
716 static constexpr size_t kNotDuplicated = std::numeric_limits<size_t>::max();
717
718 size_t GetObjectId(uint32_t old_id, ObjectType type) {
719 auto& ids_map = GetMapForType(type);
720 auto it = ids_map.find(old_id);
721 if (it == ids_map.end()) return kNotDuplicated;
722 return it->second;
723 }
724
725 size_t InsertObject(uint32_t old_id, ObjectType type) {
726 auto& ids_map = GetMapForType(type);
727 uint32_t new_id = next_id_++;
728 ids_map.insert({old_id, new_id});
729 return new_id;
730 }
731
733
734 private:
743 uint32_t next_id_ = 0;
744
747};
748
750 public:
751 CachedStateValues(Zone* zone, StateValueList* values, size_t values_start,
752 InstructionOperandVector* inputs, size_t inputs_start)
753 : inputs_(inputs->begin() + inputs_start, inputs->end(), zone),
754 values_(values->MakeSlice(values_start)) {}
755
757 inputs->insert(inputs->end(), inputs_.begin(), inputs_.end());
758 values->PushCachedSlice(values_);
759 return inputs_.size();
760 }
761
762 private:
765};
766
768 InstructionSelectorT* selector, StateValueList* values,
772 switch (it->current_instr()) {
773 case FrameStateData::Instr::kUnusedRegister:
774 it->ConsumeUnusedRegister();
775 values->PushOptimizedOut();
776 return 0;
777 case FrameStateData::Instr::kInput: {
779 OpIndex input;
780 it->ConsumeInput(&type, &input);
781 const Operation& op = selector->Get(input);
783 type.representation() == MachineRepresentation::kWord32) {
784 // 64 to 32-bit conversion is implicit in turboshaft.
785 // TODO(nicohartmann@): Fix this once we have explicit truncations.
787 }
788 InstructionOperand instr_op = OperandForDeopt(
789 selector->isolate(), g, input, kind, type.representation());
790 if (instr_op.kind() == InstructionOperand::INVALID) {
791 // Invalid operand means the value is impossible or optimized-out.
792 values->PushOptimizedOut();
793 return 0;
794 } else {
795 inputs->push_back(instr_op);
796 values->PushPlain(type);
797 return 1;
798 }
799 }
800 case FrameStateData::Instr::kDematerializedObject: {
801 uint32_t obj_id;
802 uint32_t field_count;
803 it->ConsumeDematerializedObject(&obj_id, &field_count);
804 size_t id = deduplicator->GetObjectId(obj_id, ObjectType::kRegularObject);
806 id = deduplicator->InsertObject(obj_id, ObjectType::kRegularObject);
807 size_t entries = 0;
808 StateValueList* nested = values->PushRecursiveField(zone, id);
809 for (uint32_t i = 0; i < field_count; ++i) {
811 selector, nested, inputs, g, deduplicator, it, kind, zone);
812 }
813 return entries;
814 } else {
815 // Deoptimizer counts duplicate objects for the running id, so we have
816 // to push the input again.
817 deduplicator->InsertObject(obj_id, ObjectType::kRegularObject);
818 values->PushDuplicate(id);
819 return 0;
820 }
821 }
822 case FrameStateData::Instr::kDematerializedObjectReference: {
823 uint32_t obj_id;
824 it->ConsumeDematerializedObjectReference(&obj_id);
825 size_t id = deduplicator->GetObjectId(obj_id, ObjectType::kRegularObject);
827 // Deoptimizer counts duplicate objects for the running id, so we have
828 // to push the input again.
829 deduplicator->InsertObject(obj_id, ObjectType::kRegularObject);
830 values->PushDuplicate(id);
831 return 0;
832 }
833 case FrameStateData::Instr::kDematerializedStringConcat: {
834 DCHECK(v8_flags.turboshaft_string_concat_escape_analysis);
835 uint32_t obj_id;
836 it->ConsumeDematerializedStringConcat(&obj_id);
837 size_t id = deduplicator->GetObjectId(obj_id, ObjectType::kStringConcat);
839 id = deduplicator->InsertObject(obj_id, ObjectType::kStringConcat);
840 StateValueList* nested = values->PushStringConcat(zone, id);
841 static constexpr int kLeft = 1, kRight = 1;
842 static constexpr int kInputCount = kLeft + kRight;
843 size_t entries = 0;
844 for (uint32_t i = 0; i < kInputCount; i++) {
846 selector, nested, inputs, g, deduplicator, it, kind, zone);
847 }
848 return entries;
849 } else {
850 // Deoptimizer counts duplicate objects for the running id, so we have
851 // to push the input again.
852 deduplicator->InsertObject(obj_id, ObjectType::kStringConcat);
853 values->PushDuplicate(id);
854 return 0;
855 }
856 }
857 case FrameStateData::Instr::kDematerializedStringConcatReference: {
858 DCHECK(v8_flags.turboshaft_string_concat_escape_analysis);
859 uint32_t obj_id;
860 it->ConsumeDematerializedStringConcatReference(&obj_id);
861 size_t id = deduplicator->GetObjectId(obj_id, ObjectType::kStringConcat);
863 // Deoptimizer counts duplicate objects for the running id, so we have
864 // to push the input again.
865 deduplicator->InsertObject(obj_id, ObjectType::kStringConcat);
866 values->PushDuplicate(id);
867 return 0;
868 }
869 case FrameStateData::Instr::kArgumentsElements: {
871 it->ConsumeArgumentsElements(&type);
872 values->PushArgumentsElements(type);
873 // The elements backing store of an arguments object participates in the
874 // duplicate object counting, but can itself never appear duplicated.
875 deduplicator->InsertDummyForArgumentsElements();
876 return 0;
877 }
878 case FrameStateData::Instr::kArgumentsLength:
879 it->ConsumeArgumentsLength();
880 values->PushArgumentsLength();
881 return 0;
882 case FrameStateData::Instr::kRestLength:
883 it->ConsumeRestLength();
884 values->PushRestLength();
885 return 0;
886 }
887 UNREACHABLE();
888}
889
890// Returns the number of instruction operands added to inputs.
892 FrameStateDescriptor* descriptor, OpIndex state_node, OperandGenerator* g,
895 FrameStateOp& state =
896 schedule()->Get(state_node).template Cast<FrameStateOp>();
897 const FrameStateInfo& info = state.data->frame_state_info;
898 USE(info);
899 FrameStateData::Iterator it = state.data->iterator(state.state_values());
900
901 size_t entries = 0;
902 size_t initial_size = inputs->size();
903 USE(initial_size); // initial_size is only used for debug.
904 if (descriptor->outer_state()) {
906 descriptor->outer_state(), state.parent_frame_state(), g, deduplicator,
907 inputs, kind, zone);
908 }
909
910 DCHECK_EQ(descriptor->parameters_count(), info.parameter_count());
911 DCHECK_EQ(descriptor->locals_count(), info.local_count());
912 DCHECK_EQ(descriptor->stack_count(), info.stack_count());
913
914 StateValueList* values_descriptor = descriptor->GetStateValueDescriptors();
915
916 DCHECK_EQ(values_descriptor->size(), 0u);
917 values_descriptor->ReserveSize(descriptor->GetSize());
918
919 // Function
920 if (descriptor->HasClosure()) {
922 this, values_descriptor, inputs, g, deduplicator, &it,
924 } else {
925 // Advance the iterator either way.
926 MachineType unused_type;
927 OpIndex unused_input;
928 it.ConsumeInput(&unused_type, &unused_input);
929 }
930
931 // Parameters
932 for (size_t i = 0; i < descriptor->parameters_count(); ++i) {
934 this, values_descriptor, inputs, g, deduplicator, &it, kind, zone);
935 }
936
937 // Context
938 if (descriptor->HasContext()) {
940 this, values_descriptor, inputs, g, deduplicator, &it,
942 } else {
943 // Advance the iterator either way.
944 MachineType unused_type;
945 OpIndex unused_input;
946 it.ConsumeInput(&unused_type, &unused_input);
947 }
948
949 // Locals
950 for (size_t i = 0; i < descriptor->locals_count(); ++i) {
952 this, values_descriptor, inputs, g, deduplicator, &it, kind, zone);
953 }
954
955 // Stack
956 for (size_t i = 0; i < descriptor->stack_count(); ++i) {
958 this, values_descriptor, inputs, g, deduplicator, &it, kind, zone);
959 }
960
961 DCHECK_EQ(initial_size + entries, inputs->size());
962 return entries;
963}
964
969
977
985
987 InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
988 size_t input_count, InstructionOperand* inputs, FlagsContinuation* cont) {
989 return EmitWithContinuation(opcode, output_count, outputs, input_count,
990 inputs, 0, nullptr, cont);
991}
992
994 InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
995 size_t input_count, InstructionOperand* inputs, size_t temp_count,
997 OperandGenerator g(this);
998
999 opcode = cont->Encode(opcode);
1000
1002 for (size_t i = 0; i < input_count; i++) {
1004 }
1005
1007 for (size_t i = 0; i < output_count; i++) {
1009 }
1010
1012 for (size_t i = 0; i < temp_count; i++) {
1014 }
1015
1016 if (cont->IsBranch() || cont->IsConditionalBranch()) {
1019 } else if (cont->IsDeoptimize()) {
1020 int immediate_args_count = 0;
1021 opcode |= DeoptImmedArgsCountField::encode(immediate_args_count) |
1022 DeoptFrameStateOffsetField::encode(static_cast<int>(input_count));
1024 cont->node_id(), cont->feedback(),
1025 cont->frame_state());
1026 } else if (cont->IsSet() || cont->IsConditionalSet()) {
1028 } else if (cont->IsSelect()) {
1029 // The {Select} should put one of two values into the output register,
1030 // depending on the result of the condition. The two result values are in
1031 // the last two input slots, the {false_value} in {input_count - 2}, and the
1032 // true_value in {input_count - 1}. The other inputs are used for the
1033 // condition.
1034 AddOutputToSelectContinuation(&g, static_cast<int>(input_count) - 2,
1035 cont->result());
1036 } else if (cont->IsTrap()) {
1037 int trap_id = static_cast<int>(cont->trap_id());
1039 } else {
1040 DCHECK(cont->IsNone());
1041 }
1042
1043 size_t const emit_inputs_size = continuation_inputs_.size();
1044 auto* emit_inputs =
1045 emit_inputs_size ? &continuation_inputs_.front() : nullptr;
1046 size_t const emit_outputs_size = continuation_outputs_.size();
1047 auto* emit_outputs =
1048 emit_outputs_size ? &continuation_outputs_.front() : nullptr;
1049 size_t const emit_temps_size = continuation_temps_.size();
1050 auto* emit_temps = emit_temps_size ? &continuation_temps_.front() : nullptr;
1051 return Emit(opcode, emit_outputs_size, emit_outputs, emit_inputs_size,
1052 emit_inputs, emit_temps_size, emit_temps);
1053}
1054
1056 InstructionOperandVector* args, DeoptimizeReason reason, uint32_t node_id,
1057 FeedbackSource const& feedback, OpIndex frame_state, DeoptimizeKind kind) {
1058 OperandGenerator g(this);
1059 FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
1060 int const state_id = sequence()->AddDeoptimizationEntry(
1061 descriptor, kind, reason, node_id, feedback);
1062 args->push_back(g.TempImmediate(state_id));
1064 AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
1067}
1068
1069// An internal helper class for generating the operands to calls.
1070// TODO(bmeurer): Get rid of the CallBuffer business and make
1071// InstructionSelector::VisitCall platform independent instead.
1073 CallBufferT(Zone* zone, const CallDescriptor* call_descriptor,
1074 FrameStateDescriptor* frame_state)
1075 : descriptor(call_descriptor),
1076 frame_state_descriptor(frame_state),
1077 output_nodes(zone),
1078 outputs(zone),
1079 instruction_args(zone),
1080 pushed_nodes(zone) {
1081 output_nodes.reserve(call_descriptor->ReturnCount());
1082 outputs.reserve(call_descriptor->ReturnCount());
1083 pushed_nodes.reserve(input_count());
1085 }
1086
1093
1094 size_t input_count() const { return descriptor->InputCount(); }
1095
1096 size_t frame_state_count() const { return descriptor->FrameStateCount(); }
1097
1099 return (frame_state_descriptor == nullptr)
1100 ? 0
1102 1); // Include deopt id.
1103 }
1104};
1105
1106// TODO(bmeurer): Get rid of the CallBuffer business and make
1107// InstructionSelector::VisitCall platform independent instead.
1109 OpIndex node, CallBuffer* buffer, CallBufferFlags flags, OpIndex callee,
1110 OptionalOpIndex frame_state_opt, base::Vector<const OpIndex> arguments,
1111 int return_count, int stack_param_delta) {
1112 OperandGenerator g(this);
1113 size_t ret_count = buffer->descriptor->ReturnCount();
1114 bool is_tail_call = (flags & kCallTail) != 0;
1115 DCHECK_LE(return_count, ret_count);
1116
1117 if (ret_count > 0) {
1118 // Collect the projections that represent multiple outputs from this call.
1119 if (ret_count == 1) {
1120 PushParameter result = {node, buffer->descriptor->GetReturnLocation(0)};
1121 buffer->output_nodes.push_back(result);
1122 } else {
1123 buffer->output_nodes.resize(ret_count);
1124 for (size_t i = 0; i < ret_count; ++i) {
1125 LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
1126 buffer->output_nodes[i] = PushParameter({}, location);
1127 }
1128 for (OpIndex call_use : turboshaft_uses(node)) {
1129 const Operation& use_op = this->Get(call_use);
1130 if (use_op.Is<DidntThrowOp>()) {
1131 for (OpIndex use : turboshaft_uses(call_use)) {
1132 const ProjectionOp& projection = Cast<ProjectionOp>(use);
1133 size_t index = projection.index;
1134 DCHECK_LT(index, buffer->output_nodes.size());
1135 DCHECK(!buffer->output_nodes[index].node.valid());
1136 buffer->output_nodes[index].node = use;
1137 }
1138 } else {
1139 DCHECK(use_op.Is<CheckExceptionOp>());
1140 }
1141 }
1143 static_cast<int>(buffer->descriptor->ReturnSlotCount()));
1144 }
1145
1146 // Filter out the outputs that aren't live because no projection uses them.
1147 size_t outputs_needed_by_framestate =
1148 buffer->frame_state_descriptor == nullptr
1149 ? 0
1152 for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
1153 bool output_is_live = buffer->output_nodes[i].node.valid() ||
1154 i < outputs_needed_by_framestate;
1155 if (output_is_live) {
1156 LinkageLocation location = buffer->output_nodes[i].location;
1157 MachineRepresentation rep = location.GetType().representation();
1158
1159 OpIndex output = buffer->output_nodes[i].node;
1160 InstructionOperand op = !output.valid()
1161 ? g.TempLocation(location)
1162 : g.DefineAsLocation(output, location);
1163 MarkAsRepresentation(rep, op);
1164
1165 if (!UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
1166 buffer->outputs.push_back(op);
1167 buffer->output_nodes[i].node = {};
1168 }
1169 }
1170 }
1171 }
1172
1173 // The first argument is always the callee code.
1174 bool call_code_immediate = (flags & kCallCodeImmediate) != 0;
1175 bool call_address_immediate = (flags & kCallAddressImmediate) != 0;
1176 bool call_use_fixed_target_reg = (flags & kCallFixedTargetRegister) != 0;
1177 switch (buffer->descriptor->kind()) {
1180 (call_code_immediate && this->IsHeapConstant(callee))
1181 ? g.UseImmediate(callee)
1182 : call_use_fixed_target_reg
1184 : g.UseRegister(callee));
1185 break;
1188 (call_address_immediate && this->IsExternalConstant(callee))
1189 ? g.UseImmediate(callee)
1190 : call_use_fixed_target_reg
1192 : g.UseRegister(callee));
1193 break;
1194#if V8_ENABLE_WEBASSEMBLY
1195 case CallDescriptor::kCallWasmCapiFunction:
1196 case CallDescriptor::kCallWasmFunction:
1197 case CallDescriptor::kCallWasmFunctionIndirect:
1198 case CallDescriptor::kCallWasmImportWrapper:
1200 (call_address_immediate && this->IsRelocatableWasmConstant(callee))
1201 ? g.UseImmediate(callee)
1202 : call_use_fixed_target_reg
1204 : g.UseRegister(callee));
1205 break;
1206#endif // V8_ENABLE_WEBASSEMBLY
1208 // The common case for builtin pointers is to have the target in a
1209 // register. If we have a constant, we use a register anyway to simplify
1210 // related code.
1211 LinkageLocation location = buffer->descriptor->GetInputLocation(0);
1212 bool location_is_fixed_register =
1213 location.IsRegister() && !location.IsAnyRegister();
1215 // If earlier phases specified a particular register, don't override
1216 // their choice.
1217 if (location_is_fixed_register) {
1218 op = g.UseLocation(callee, location);
1219 } else if (call_use_fixed_target_reg) {
1221 } else {
1222 op = g.UseRegister(callee);
1223 }
1224 buffer->instruction_args.push_back(op);
1225 break;
1226 }
1229 g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)));
1230 break;
1231 }
1232 DCHECK_EQ(1u, buffer->instruction_args.size());
1233
1234 // If the call needs a frame state, we insert the state information as
1235 // follows (n is the number of value inputs to the frame state):
1236 // arg 1 : deoptimization id.
1237 // arg 2 - arg (n + 2) : value inputs to the frame state.
1238 size_t frame_state_entries = 0;
1239 USE(frame_state_entries); // frame_state_entries is only used for debug.
1240 if (buffer->frame_state_descriptor != nullptr) {
1241 OpIndex frame_state = frame_state_opt.value();
1242
1243 // If it was a syntactic tail call we need to drop the current frame and
1244 // all the frames on top of it that are either inlined extra arguments
1245 // or a tail caller frame.
1246 if (is_tail_call) {
1247 frame_state = Cast<FrameStateOp>(frame_state).parent_frame_state();
1248 buffer->frame_state_descriptor =
1250 while (buffer->frame_state_descriptor != nullptr &&
1251 buffer->frame_state_descriptor->type() ==
1253 frame_state = Cast<FrameStateOp>(frame_state).parent_frame_state();
1254 buffer->frame_state_descriptor =
1256 }
1257 }
1258
1259 int const state_id = sequence()->AddDeoptimizationEntry(
1261 DeoptimizeReason::kUnknown, node.id(), FeedbackSource());
1262 buffer->instruction_args.push_back(g.TempImmediate(state_id));
1263
1265
1266 frame_state_entries =
1268 buffer->frame_state_descriptor, frame_state, &g, &deduplicator,
1271
1272 DCHECK_EQ(1 + frame_state_entries, buffer->instruction_args.size());
1273 }
1274
1275 size_t input_count = buffer->input_count();
1276
1277 // Split the arguments into pushed_nodes and instruction_args. Pushed
1278 // arguments require an explicit push instruction before the call and do
1279 // not appear as arguments to the call. Everything else ends up
1280 // as an InstructionOperand argument to the call.
1281 auto iter(arguments.begin());
1282 size_t pushed_count = 0;
1283 for (size_t index = 1; index < input_count; ++iter, ++index) {
1284 DCHECK_NE(iter, arguments.end());
1285
1286 LinkageLocation location = buffer->descriptor->GetInputLocation(index);
1287 if (is_tail_call) {
1289 location, stack_param_delta);
1290 }
1291 InstructionOperand op = g.UseLocation(*iter, location);
1292 UnallocatedOperand unallocated = UnallocatedOperand::cast(op);
1293 if (unallocated.HasFixedSlotPolicy() && !is_tail_call) {
1294 int stack_index = buffer->descriptor->GetStackIndexFromSlot(
1295 unallocated.fixed_slot_index());
1296 // This can insert empty slots before stack_index and will insert enough
1297 // slots after stack_index to store the parameter.
1298 if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
1299 int num_slots = location.GetSizeInPointers();
1300 buffer->pushed_nodes.resize(stack_index + num_slots);
1301 }
1302 PushParameter param = {*iter, location};
1303 buffer->pushed_nodes[stack_index] = param;
1304 pushed_count++;
1305 } else {
1306 if (location.IsNullRegister()) {
1307 EmitMoveFPRToParam(&op, location);
1308 }
1309 buffer->instruction_args.push_back(op);
1310 }
1311 }
1312 DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
1313 frame_state_entries);
1314 USE(pushed_count);
1316 stack_param_delta != 0) {
1317 // For tail calls that change the size of their parameter list and keep
1318 // their return address on the stack, move the return address to just above
1319 // the parameters.
1320 LinkageLocation saved_return_location =
1322 InstructionOperand return_address =
1324 saved_return_location, stack_param_delta),
1325 saved_return_location);
1326 buffer->instruction_args.push_back(return_address);
1327 }
1328}
1329
1331 OpIndex node) {
1332 sequence()->SetSourcePosition(instruction, (*source_positions_)[node]);
1333}
1334
1337 return true;
1338 }
1339 const Operation& operation = this->Get(node);
1340 // DidntThrow is where the actual call is generated.
1341 if (operation.Is<DidntThrowOp>()) return true;
1342 if (const LoadOp* load = operation.TryCast<LoadOp>()) {
1343 return load->kind.with_trap_handler;
1344 }
1345 if (const StoreOp* store = operation.TryCast<StoreOp>()) {
1346 return store->kind.with_trap_handler;
1347 }
1348#if V8_ENABLE_WEBASSEMBLY
1349 if (operation.Is<TrapIfOp>()) return true;
1350 if (const AtomicRMWOp* rmw = operation.TryCast<AtomicRMWOp>()) {
1351 return rmw->memory_access_kind ==
1353 }
1354 if (const Simd128LoadTransformOp* lt =
1355 operation.TryCast<Simd128LoadTransformOp>()) {
1356 return lt->load_kind.with_trap_handler;
1357 }
1358#if V8_ENABLE_WASM_SIMD256_REVEC
1359 if (const Simd256LoadTransformOp* lt =
1360 operation.TryCast<Simd256LoadTransformOp>()) {
1361 return lt->load_kind.with_trap_handler;
1362 }
1363#endif // V8_ENABLE_WASM_SIMD256_REVEC
1364 if (const Simd128LaneMemoryOp* lm =
1365 operation.TryCast<Simd128LaneMemoryOp>()) {
1366 return lm->kind.with_trap_handler;
1367 }
1368#if V8_ENABLE_WASM_DEINTERLEAVED_MEM_OPS
1369 if (const Simd128LoadPairDeinterleaveOp* dl =
1370 operation.TryCast<Simd128LoadPairDeinterleaveOp>()) {
1371 return dl->load_kind.with_trap_handler;
1372 }
1373#endif // V8_ENABLE_WASM_DEINTERLEAVED_MEM_OPS
1374#endif
1375 if (additional_protected_instructions_->Contains(node.id())) {
1376 return true;
1377 }
1378 return false;
1379}
1380
1382 const turboshaft::Operation& op = Get(node);
1383 if (const auto word_binop = op.TryCast<turboshaft::WordBinopOp>()) {
1384 return turboshaft::WordBinopOp::IsCommutative(word_binop->kind);
1385 } else if (const auto overflow_binop =
1388 overflow_binop->kind);
1389 } else if (const auto float_binop = op.TryCast<turboshaft::FloatBinopOp>()) {
1390 return turboshaft::FloatBinopOp::IsCommutative(float_binop->kind);
1391 } else if (const auto comparison = op.TryCast<turboshaft::ComparisonOp>()) {
1392 return turboshaft::ComparisonOp::IsCommutative(comparison->kind);
1393 }
1394 return false;
1395}
1396namespace {
1397bool increment_effect_level_for_node(TurboshaftAdapter* adapter, OpIndex node) {
1398 // We need to increment the effect level if the operation consumes any of the
1399 // dimensions of the {kTurboshaftEffectLevelMask}.
1400 const Operation& op = adapter->Get(node);
1401 if (op.Is<RetainOp>()) {
1402 // Retain has CanWrite effect so that it's not reordered before the last
1403 // read it protects, but it shouldn't increment the effect level, since
1404 // doing a Load(x) after a Retain(x) is safe as long as there is not call
1405 // (or something that can trigger GC) in between Retain(x) and Load(x), and
1406 // if there were, then this call would increment the effect level, which
1407 // would prevent covering in the ISEL.
1408 return false;
1409 }
1410 return (op.Effects().consumes.bits() & kTurboshaftEffectLevelMask.bits()) !=
1411 0;
1412}
1413} // namespace
1414
1418 auto current_num_instructions = [&] {
1420 return static_cast<int>(instructions_.size());
1421 };
1422 int current_block_end = current_num_instructions();
1423
1424 int effect_level = 0;
1425 for (OpIndex node : this->nodes(block)) {
1426 SetEffectLevel(node, effect_level);
1427 if (increment_effect_level_for_node(this, node)) {
1428 ++effect_level;
1429 }
1430 }
1431
1432 // We visit the control first, then the nodes in the block, so the block's
1433 // control input should be on the same effect level as the last node.
1434 if (OpIndex terminator = this->block_terminator(block); terminator.valid()) {
1435 SetEffectLevel(terminator, effect_level);
1436 current_effect_level_ = effect_level;
1437 }
1438
1439 auto FinishEmittedInstructions = [&](OpIndex node, int instruction_start) {
1440 if (instruction_selection_failed()) return false;
1441 if (current_num_instructions() == instruction_start) return true;
1442 std::reverse(instructions_.begin() + instruction_start,
1443 instructions_.end());
1444 if (!node.valid()) return true;
1445 if (!source_positions_) return true;
1446
1447 SourcePosition source_position;
1448#if V8_ENABLE_WEBASSEMBLY && V8_TARGET_ARCH_X64
1449 if (const Simd128UnaryOp* op =
1451 V8_UNLIKELY(op)) {
1452 // On x64 there exists an optimization that folds
1453 // `kF64x2PromoteLowF32x4` and `kS128Load64Zero` together into a single
1454 // instruction. If the instruction causes an out-of-bounds memory
1455 // access exception, then the stack trace has to show the source
1456 // position of the `kS128Load64Zero` and not of the
1457 // `kF64x2PromoteLowF32x4`.
1458 if (CanOptimizeF64x2PromoteLowF32x4(node)) {
1459 node = op->input();
1460 }
1461 }
1462#endif // V8_ENABLE_WEBASSEMBLY && V8_TARGET_ARCH_X64
1463 source_position = (*source_positions_)[node];
1464 if (source_position.IsKnown() && IsSourcePositionUsed(node)) {
1465 sequence()->SetSourcePosition(instructions_.back(), source_position);
1466 }
1467 return true;
1468 };
1469
1470 // Generate code for the block control "top down", but schedule the code
1471 // "bottom up".
1472 VisitControl(block);
1473 if (!FinishEmittedInstructions(this->block_terminator(block),
1474 current_block_end)) {
1475 return;
1476 }
1477
1478 // Visit code in reverse control flow order, because architecture-specific
1479 // matching may cover more than one node at a time.
1480 for (OpIndex node : base::Reversed(this->nodes(block))) {
1481 int current_node_end = current_num_instructions();
1482
1483 if (protected_loads_to_remove_->Contains(node.id()) &&
1484 !IsReallyUsed(node)) {
1485 MarkAsDefined(node);
1486 }
1487
1488 if (!IsUsed(node)) {
1489 // Skip nodes that are unused, while marking them as Defined so that it's
1490 // clear that these unused nodes have been visited and will not be Defined
1491 // later.
1492 MarkAsDefined(node);
1493 } else if (!IsDefined(node)) {
1494 // Generate code for this node "top down", but schedule the code "bottom
1495 // up".
1497 VisitNode(node);
1498 if (!FinishEmittedInstructions(node, current_node_end)) return;
1499 }
1501 instr_origins_[node.id()] = {current_num_instructions(),
1502 current_node_end};
1503 }
1504 }
1505
1506 // We're done with the block.
1507 InstructionBlock* instruction_block =
1508 sequence()->InstructionBlockAt(this->rpo_number(block));
1509 if (current_num_instructions() == current_block_end) {
1510 // Avoid empty block: insert a {kArchNop} instruction.
1511 Emit(Instruction::New(sequence()->zone(), kArchNop));
1512 }
1513 instruction_block->set_code_start(current_num_instructions());
1514 instruction_block->set_code_end(current_block_end);
1515 current_block_ = nullptr;
1516}
1517
1519 const ComparisonOp& op) const {
1520 switch (op.kind) {
1521 case ComparisonOp::Kind::kEqual:
1522 return kEqual;
1523 case ComparisonOp::Kind::kSignedLessThan:
1524 return kSignedLessThan;
1525 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1527 case ComparisonOp::Kind::kUnsignedLessThan:
1528 return kUnsignedLessThan;
1529 case ComparisonOp::Kind::kUnsignedLessThanOrEqual:
1531 }
1532}
1533
1535 OptionalOpIndex projection0 = FindProjection(node, 0);
1536 if (projection0.valid()) {
1537 MarkAsWord32(projection0.value());
1538 }
1539 OptionalOpIndex projection1 = FindProjection(node, 1);
1540 if (projection1.valid()) {
1541 MarkAsWord32(projection1.value());
1542 }
1543}
1544
1546 FlagsContinuation* cont) {
1547 // Try to combine with comparisons against 0 by simply inverting the branch.
1548 while (const ComparisonOp* equal =
1550 if (equal->rep == RegisterRepresentation::Word32()) {
1551 if (!MatchIntegralZero(equal->right())) return;
1552#ifdef V8_COMPRESS_POINTERS
1553 } else if (equal->rep == RegisterRepresentation::Tagged()) {
1556 if (!MatchSmiZero(equal->right())) return;
1557#endif // V8_COMPRESS_POINTERS
1558 } else {
1559 return;
1560 }
1561 if (!CanCover(*user, *value)) return;
1562
1563 *user = *value;
1564 *value = equal->left();
1565 cont->Negate();
1566 }
1567}
1568
1569#if V8_ENABLE_WEBASSEMBLY
1571 return VisitI8x16Swizzle(node);
1572}
1573#endif // V8_ENABLE_WEBASSEMBLY
1574
1576 FlagsContinuation cont =
1578 VisitStackPointerGreaterThan(node, &cont);
1579}
1580
1581void InstructionSelectorT::VisitLoadStackCheckOffset(OpIndex node) {
1582 OperandGenerator g(this);
1583 Emit(kArchStackCheckOffset, g.DefineAsRegister(node));
1584}
1585
1586void InstructionSelectorT::VisitLoadFramePointer(OpIndex node) {
1587 OperandGenerator g(this);
1588 Emit(kArchFramePointer, g.DefineAsRegister(node));
1589}
1590
1591#if V8_ENABLE_WEBASSEMBLY
1592void InstructionSelectorT::VisitLoadStackPointer(OpIndex node) {
1593 OperandGenerator g(this);
1594 Emit(kArchStackPointer, g.DefineAsRegister(node));
1595}
1596#endif // V8_ENABLE_WEBASSEMBLY
1597
1598void InstructionSelectorT::VisitLoadParentFramePointer(OpIndex node) {
1599 OperandGenerator g(this);
1600 Emit(kArchParentFramePointer, g.DefineAsRegister(node));
1601}
1602
1603void InstructionSelectorT::VisitLoadRootRegister(OpIndex node) {
1604 // Do nothing. Following loads/stores from this operator will use kMode_Root
1605 // to load/store from an offset of the root register.
1606 UNREACHABLE();
1607}
1608
1609void InstructionSelectorT::VisitFloat64Acos(OpIndex node) {
1610 VisitFloat64Ieee754Unop(node, kIeee754Float64Acos);
1611}
1612
1613void InstructionSelectorT::VisitFloat64Acosh(OpIndex node) {
1614 VisitFloat64Ieee754Unop(node, kIeee754Float64Acosh);
1615}
1616
1617void InstructionSelectorT::VisitFloat64Asin(OpIndex node) {
1618 VisitFloat64Ieee754Unop(node, kIeee754Float64Asin);
1619}
1620
1621void InstructionSelectorT::VisitFloat64Asinh(OpIndex node) {
1622 VisitFloat64Ieee754Unop(node, kIeee754Float64Asinh);
1623}
1624
1625void InstructionSelectorT::VisitFloat64Atan(OpIndex node) {
1626 VisitFloat64Ieee754Unop(node, kIeee754Float64Atan);
1627}
1628
1629void InstructionSelectorT::VisitFloat64Atanh(OpIndex node) {
1630 VisitFloat64Ieee754Unop(node, kIeee754Float64Atanh);
1631}
1632
1633void InstructionSelectorT::VisitFloat64Atan2(OpIndex node) {
1634 VisitFloat64Ieee754Binop(node, kIeee754Float64Atan2);
1635}
1636
1637void InstructionSelectorT::VisitFloat64Cbrt(OpIndex node) {
1638 VisitFloat64Ieee754Unop(node, kIeee754Float64Cbrt);
1639}
1640
1641void InstructionSelectorT::VisitFloat64Cos(OpIndex node) {
1642 VisitFloat64Ieee754Unop(node, kIeee754Float64Cos);
1643}
1644
1645void InstructionSelectorT::VisitFloat64Cosh(OpIndex node) {
1646 VisitFloat64Ieee754Unop(node, kIeee754Float64Cosh);
1647}
1648
1649void InstructionSelectorT::VisitFloat64Exp(OpIndex node) {
1650 VisitFloat64Ieee754Unop(node, kIeee754Float64Exp);
1651}
1652
1653void InstructionSelectorT::VisitFloat64Expm1(OpIndex node) {
1654 VisitFloat64Ieee754Unop(node, kIeee754Float64Expm1);
1655}
1656
1657void InstructionSelectorT::VisitFloat64Log(OpIndex node) {
1658 VisitFloat64Ieee754Unop(node, kIeee754Float64Log);
1659}
1660
1661void InstructionSelectorT::VisitFloat64Log1p(OpIndex node) {
1662 VisitFloat64Ieee754Unop(node, kIeee754Float64Log1p);
1663}
1664
1665void InstructionSelectorT::VisitFloat64Log2(OpIndex node) {
1666 VisitFloat64Ieee754Unop(node, kIeee754Float64Log2);
1667}
1668
1669void InstructionSelectorT::VisitFloat64Log10(OpIndex node) {
1670 VisitFloat64Ieee754Unop(node, kIeee754Float64Log10);
1671}
1672
1673void InstructionSelectorT::VisitFloat64Pow(OpIndex node) {
1674 VisitFloat64Ieee754Binop(node, kIeee754Float64Pow);
1675}
1676
1677void InstructionSelectorT::VisitFloat64Sin(OpIndex node) {
1678 VisitFloat64Ieee754Unop(node, kIeee754Float64Sin);
1679}
1680
1681void InstructionSelectorT::VisitFloat64Sinh(OpIndex node) {
1682 VisitFloat64Ieee754Unop(node, kIeee754Float64Sinh);
1683}
1684
1685void InstructionSelectorT::VisitFloat64Tan(OpIndex node) {
1686 VisitFloat64Ieee754Unop(node, kIeee754Float64Tan);
1687}
1688
1689void InstructionSelectorT::VisitFloat64Tanh(OpIndex node) {
1690 VisitFloat64Ieee754Unop(node, kIeee754Float64Tanh);
1691}
1692
1694 const SwitchInfo& sw, InstructionOperand const& index_operand) {
1695 OperandGenerator g(this);
1696 size_t input_count = 2 + sw.value_range();
1697 DCHECK_LE(sw.value_range(), std::numeric_limits<size_t>::max() - 2);
1698 auto* inputs =
1699 zone()->template AllocateArray<InstructionOperand>(input_count);
1700 inputs[0] = index_operand;
1701 InstructionOperand default_operand = g.Label(sw.default_branch());
1702 std::fill(&inputs[1], &inputs[input_count], default_operand);
1703 for (const CaseInfo& c : sw.CasesUnsorted()) {
1704 size_t value = c.value - sw.min_value();
1705 DCHECK_LE(0u, value);
1706 DCHECK_LT(value + 2, input_count);
1707 inputs[value + 2] = g.Label(c.branch);
1708 }
1709 Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
1710}
1711
1713 const SwitchInfo& sw, InstructionOperand const& value_operand) {
1714 OperandGenerator g(this);
1715 size_t input_count = 2 + sw.case_count() * 2;
1716 DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2);
1717 auto* inputs =
1718 zone()->template AllocateArray<InstructionOperand>(input_count);
1719 inputs[0] = value_operand;
1720 inputs[1] = g.Label(sw.default_branch());
1721 std::vector<CaseInfo> cases = sw.CasesSortedByValue();
1722 for (size_t index = 0; index < cases.size(); ++index) {
1723 const CaseInfo& c = cases[index];
1724 inputs[index * 2 + 2 + 0] = g.TempImmediate(c.value);
1725 inputs[index * 2 + 2 + 1] = g.Label(c.branch);
1726 }
1727 Emit(kArchBinarySearchSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
1728}
1729
1730void InstructionSelectorT::VisitBitcastTaggedToWord(OpIndex node) {
1731 EmitIdentity(node);
1732}
1733
1734void InstructionSelectorT::VisitBitcastWordToTagged(OpIndex node) {
1735 OperandGenerator g(this);
1736 Emit(kArchNop, g.DefineSameAsFirst(node),
1737 g.Use(this->Get(node).Cast<TaggedBitcastOp>().input()));
1738}
1739
1740void InstructionSelectorT::VisitBitcastSmiToWord(OpIndex node) {
1741 // TODO(dmercadier): using EmitIdentity here is not ideal, because users of
1742 // {node} will then use its input, which may not have the Word32
1743 // representation. This might in turn lead to the register allocator wrongly
1744 // tracking Tagged values that are in fact just Smis. However, using
1745 // Emit(kArchNop) hurts performance because it inserts a gap move which cannot
1746 // always be eliminated because the operands may have different sizes (and the
1747 // move is then truncating or extending). As a temporary work-around until the
1748 // register allocator is fixed, we use Emit(kArchNop) in DEBUG mode to silence
1749 // the register allocator verifier.
1750#ifdef DEBUG
1751 OperandGenerator g(this);
1752 Emit(kArchNop, g.DefineSameAsFirst(node),
1753 g.Use(this->Get(node).Cast<TaggedBitcastOp>().input()));
1754#else
1755 EmitIdentity(node);
1756#endif
1757}
1758
1759// 32 bit targets do not implement the following instructions.
1760#if V8_TARGET_ARCH_32_BIT
1761
1762VISIT_UNSUPPORTED_OP(Word64And)
1763VISIT_UNSUPPORTED_OP(Word64Or)
1764VISIT_UNSUPPORTED_OP(Word64Xor)
1765VISIT_UNSUPPORTED_OP(Word64Shl)
1766VISIT_UNSUPPORTED_OP(Word64Shr)
1767VISIT_UNSUPPORTED_OP(Word64Sar)
1768VISIT_UNSUPPORTED_OP(Word64Rol)
1769VISIT_UNSUPPORTED_OP(Word64Ror)
1770VISIT_UNSUPPORTED_OP(Word64Clz)
1771VISIT_UNSUPPORTED_OP(Word64Ctz)
1772VISIT_UNSUPPORTED_OP(Word64ReverseBits)
1773VISIT_UNSUPPORTED_OP(Word64Popcnt)
1774VISIT_UNSUPPORTED_OP(Word64Equal)
1775VISIT_UNSUPPORTED_OP(Int64Add)
1776VISIT_UNSUPPORTED_OP(Int64Sub)
1777VISIT_UNSUPPORTED_OP(Int64Mul)
1778VISIT_UNSUPPORTED_OP(Int64MulHigh)
1779VISIT_UNSUPPORTED_OP(Uint64MulHigh)
1780VISIT_UNSUPPORTED_OP(Int64Div)
1781VISIT_UNSUPPORTED_OP(Int64Mod)
1782VISIT_UNSUPPORTED_OP(Uint64Div)
1783VISIT_UNSUPPORTED_OP(Uint64Mod)
1784VISIT_UNSUPPORTED_OP(Int64AddWithOverflow)
1785VISIT_UNSUPPORTED_OP(Int64MulWithOverflow)
1786VISIT_UNSUPPORTED_OP(Int64SubWithOverflow)
1787VISIT_UNSUPPORTED_OP(Int64LessThan)
1788VISIT_UNSUPPORTED_OP(Int64LessThanOrEqual)
1789VISIT_UNSUPPORTED_OP(Uint64LessThan)
1790VISIT_UNSUPPORTED_OP(Uint64LessThanOrEqual)
1791VISIT_UNSUPPORTED_OP(BitcastWord32ToWord64)
1792VISIT_UNSUPPORTED_OP(ChangeInt32ToInt64)
1793VISIT_UNSUPPORTED_OP(ChangeInt64ToFloat64)
1794VISIT_UNSUPPORTED_OP(ChangeUint32ToUint64)
1795VISIT_UNSUPPORTED_OP(ChangeFloat64ToInt64)
1796VISIT_UNSUPPORTED_OP(ChangeFloat64ToUint64)
1797VISIT_UNSUPPORTED_OP(TruncateFloat64ToInt64)
1798VISIT_UNSUPPORTED_OP(TruncateInt64ToInt32)
1799VISIT_UNSUPPORTED_OP(TryTruncateFloat32ToInt64)
1800VISIT_UNSUPPORTED_OP(TryTruncateFloat64ToInt64)
1801VISIT_UNSUPPORTED_OP(TryTruncateFloat32ToUint64)
1802VISIT_UNSUPPORTED_OP(TryTruncateFloat64ToUint64)
1803VISIT_UNSUPPORTED_OP(TryTruncateFloat64ToInt32)
1804VISIT_UNSUPPORTED_OP(TryTruncateFloat64ToUint32)
1805VISIT_UNSUPPORTED_OP(RoundInt64ToFloat32)
1806VISIT_UNSUPPORTED_OP(RoundInt64ToFloat64)
1807VISIT_UNSUPPORTED_OP(RoundUint64ToFloat32)
1808VISIT_UNSUPPORTED_OP(RoundUint64ToFloat64)
1809VISIT_UNSUPPORTED_OP(BitcastFloat64ToInt64)
1810VISIT_UNSUPPORTED_OP(BitcastInt64ToFloat64)
1811VISIT_UNSUPPORTED_OP(SignExtendWord8ToInt64)
1812VISIT_UNSUPPORTED_OP(SignExtendWord16ToInt64)
1813VISIT_UNSUPPORTED_OP(SignExtendWord32ToInt64)
1814#endif // V8_TARGET_ARCH_32_BIT
1815
1816// 64 bit targets do not implement the following instructions.
1817#if V8_TARGET_ARCH_64_BIT
1818VISIT_UNSUPPORTED_OP(Int32PairAdd)
1819VISIT_UNSUPPORTED_OP(Int32PairSub)
1820VISIT_UNSUPPORTED_OP(Int32PairMul)
1821VISIT_UNSUPPORTED_OP(Word32PairShl)
1822VISIT_UNSUPPORTED_OP(Word32PairShr)
1823VISIT_UNSUPPORTED_OP(Word32PairSar)
1824#endif // V8_TARGET_ARCH_64_BIT
1825
1826#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_RISCV32
1827void InstructionSelectorT::VisitWord32AtomicPairLoad(OpIndex node) {
1828 UNIMPLEMENTED();
1829}
1830
1831void InstructionSelectorT::VisitWord32AtomicPairStore(OpIndex node) {
1832 UNIMPLEMENTED();
1833}
1834
1835void InstructionSelectorT::VisitWord32AtomicPairAdd(OpIndex node) {
1836 UNIMPLEMENTED();
1837}
1838
1839void InstructionSelectorT::VisitWord32AtomicPairSub(OpIndex node) {
1840 UNIMPLEMENTED();
1841}
1842
1843void InstructionSelectorT::VisitWord32AtomicPairAnd(OpIndex node) {
1844 UNIMPLEMENTED();
1845}
1846
1847void InstructionSelectorT::VisitWord32AtomicPairOr(OpIndex node) {
1848 UNIMPLEMENTED();
1849}
1850
1851void InstructionSelectorT::VisitWord32AtomicPairXor(OpIndex node) {
1852 UNIMPLEMENTED();
1853}
1854
1855void InstructionSelectorT::VisitWord32AtomicPairExchange(OpIndex node) {
1856 UNIMPLEMENTED();
1857}
1858
1859void InstructionSelectorT::VisitWord32AtomicPairCompareExchange(OpIndex node) {
1860 UNIMPLEMENTED();
1861}
1862#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
1863 // && !V8_TARGET_ARCH_RISCV32
1864
1865#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
1866 !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 && \
1867 !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
1868
1869VISIT_UNSUPPORTED_OP(Word64AtomicLoad)
1870VISIT_UNSUPPORTED_OP(Word64AtomicStore)
1871VISIT_UNSUPPORTED_OP(Word64AtomicAdd)
1872VISIT_UNSUPPORTED_OP(Word64AtomicSub)
1873VISIT_UNSUPPORTED_OP(Word64AtomicAnd)
1874VISIT_UNSUPPORTED_OP(Word64AtomicOr)
1875VISIT_UNSUPPORTED_OP(Word64AtomicXor)
1876VISIT_UNSUPPORTED_OP(Word64AtomicExchange)
1877VISIT_UNSUPPORTED_OP(Word64AtomicCompareExchange)
1878
1879#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64
1880 // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390X &&
1881 // !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
1882
1883#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_RISCV32
1884// This is only needed on 32-bit to split the 64-bit value into two operands.
1885IF_WASM(VISIT_UNSUPPORTED_OP, I64x2SplatI32Pair)
1886IF_WASM(VISIT_UNSUPPORTED_OP, I64x2ReplaceLaneI32Pair)
1887#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM &&
1888 // !V8_TARGET_ARCH_RISCV32
1889
1890#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
1891#if !V8_TARGET_ARCH_ARM64
1892#if !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 && \
1893 !V8_TARGET_ARCH_RISCV32 && !V8_TARGET_ARCH_RISCV64
1894
1895IF_WASM(VISIT_UNSUPPORTED_OP, I64x2Splat)
1896IF_WASM(VISIT_UNSUPPORTED_OP, I64x2ExtractLane)
1897IF_WASM(VISIT_UNSUPPORTED_OP, I64x2ReplaceLane)
1898
1899#endif // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 &&
1900 // !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_RISCV32
1901#endif // !V8_TARGET_ARCH_ARM64
1902#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
1903
1904#if !V8_TARGET_ARCH_ARM64
1905
1906IF_WASM(VISIT_UNSUPPORTED_OP, I8x16AddReduce)
1907IF_WASM(VISIT_UNSUPPORTED_OP, I16x8AddReduce)
1908IF_WASM(VISIT_UNSUPPORTED_OP, I32x4AddReduce)
1909IF_WASM(VISIT_UNSUPPORTED_OP, I64x2AddReduce)
1910IF_WASM(VISIT_UNSUPPORTED_OP, F32x4AddReduce)
1911IF_WASM(VISIT_UNSUPPORTED_OP, F64x2AddReduce)
1912
1913IF_WASM(VISIT_UNSUPPORTED_OP, I8x2Shuffle)
1914IF_WASM(VISIT_UNSUPPORTED_OP, I8x4Shuffle)
1915IF_WASM(VISIT_UNSUPPORTED_OP, I8x8Shuffle)
1916#endif // !V8_TARGET_ARCH_ARM64
1917
1919 const ParameterOp& parameter = Cast<ParameterOp>(node);
1920 const int index = parameter.parameter_index;
1921 OperandGenerator g(this);
1922
1923 if (linkage()->GetParameterLocation(index).IsNullRegister()) {
1924 EmitMoveParamToFPR(node, index);
1925 } else {
1929 node, linkage()->GetParameterLocation(index),
1930 linkage()->GetParameterSecondaryLocation(index))
1931 : g.DefineAsLocation(node, linkage()->GetParameterLocation(index));
1932 Emit(kArchNop, op);
1933 }
1934}
1935
1936namespace {
1937
1938LinkageLocation ExceptionLocation() {
1941}
1942
1943constexpr InstructionCode EncodeCallDescriptorFlags(
1944 InstructionCode opcode, CallDescriptor::Flags flags) {
1945 // Note: Not all bits of `flags` are preserved.
1949 return opcode | MiscField::encode(flags & MiscField::kMax);
1950}
1951
1952} // namespace
1953
1955 OperandGenerator g(this);
1956 Emit(kArchNop, g.DefineAsLocation(node, ExceptionLocation()));
1957}
1958
1960 const OsrValueOp& osr_value = Cast<OsrValueOp>(node);
1961 OperandGenerator g(this);
1962 Emit(kArchNop, g.DefineAsLocation(
1963 node, linkage()->GetOsrValueLocation(osr_value.index)));
1964}
1965
1967 const Operation& op = Get(node);
1969 PhiInstruction* phi = instruction_zone()->template New<PhiInstruction>(
1971 static_cast<size_t>(op.input_count));
1973 for (size_t i = 0; i < op.input_count; ++i) {
1974 OpIndex input = op.input(i);
1975 MarkAsUsed(input);
1976 phi->SetInput(i, GetVirtualRegister(input));
1977 }
1978}
1979
1981 const ProjectionOp& projection = this->Get(node).Cast<ProjectionOp>();
1982 const Operation& value_op = this->Get(projection.input());
1983 if (value_op.Is<OverflowCheckedBinopOp>() ||
1984 value_op.Is<OverflowCheckedUnaryOp>() || value_op.Is<TryChangeOp>() ||
1985 value_op.Is<Word32PairBinopOp>()) {
1986 if (projection.index == 0u) {
1987 EmitIdentity(node);
1988 } else {
1989 DCHECK_EQ(1u, projection.index);
1990 MarkAsUsed(projection.input());
1991 }
1992 } else if (value_op.Is<DidntThrowOp>()) {
1993 // Nothing to do here?
1994 } else if (value_op.Is<CallOp>()) {
1995 // Call projections need to be behind the call's DidntThrow.
1996 UNREACHABLE();
1997 } else if (value_op.Is<AtomicWord32PairOp>()) {
1998 // Nothing to do here.
1999#if V8_ENABLE_WASM_DEINTERLEAVED_MEM_OPS
2000 } else if (value_op.Is<Simd128LoadPairDeinterleaveOp>()) {
2001 MarkAsUsed(projection.input());
2002#endif // V8_ENABLE_WASM_DEINTERLEAVED_MEM_OPS
2003 } else {
2004 UNIMPLEMENTED();
2005 }
2006}
2007
2009 const Graph* graph = this->turboshaft_graph();
2010 DCHECK(graph->Get(binop).template Is<OverflowCheckedBinopOp>() ||
2011 graph->Get(binop).template Is<OverflowCheckedUnaryOp>());
2012
2013 // Getting the 1st projection. Projections are always emitted right after the
2014 // operation, in ascending order.
2015 OpIndex projection0_index = graph->NextIndex(binop);
2016 const ProjectionOp& projection0 =
2017 graph->Get(projection0_index).Cast<ProjectionOp>();
2018 DCHECK_EQ(projection0.index, 0);
2019
2020 if (IsDefined(projection0_index)) {
2021 // In Turboshaft, this can only happen if {projection0_index} has already
2022 // been eagerly scheduled somewhere else, like in
2023 // TryPrepareScheduleFirstProjection.
2024 return true;
2025 }
2026
2027 if (projection0.saturated_use_count.IsOne()) {
2028 // If the projection has a single use, it is the following tuple, so we
2029 // don't care about the value, and can do branch-if-overflow fusion.
2030 DCHECK(turboshaft_uses(projection0_index).size() == 1 &&
2031 graph->Get(turboshaft_uses(projection0_index)[0]).Is<TupleOp>());
2032 return true;
2033 }
2034
2035 if (this->block(schedule_, binop) != current_block_) {
2036 // {binop} is not supposed to be defined in the current block, so let's not
2037 // pull it in this block (the checks would need to be stronger, and it's
2038 // unlikely that it's doable because of effect levels and all).
2039 return false;
2040 }
2041
2042 // We now need to make sure that all uses of {projection0} are already
2043 // defined, which will imply that it's fine to define {projection0} and
2044 // {binop} now.
2045 for (OpIndex use : turboshaft_uses(projection0_index)) {
2046 if (this->Get(use).template Is<TupleOp>()) {
2047 // The Tuple won't have any uses since it would have to be accessed
2048 // through Projections, and Projections on Tuples return the original
2049 // Projection instead (see Assembler::ReduceProjection in
2050 // turboshaft/assembler.h).
2051 DCHECK(this->Get(use).saturated_use_count.IsZero());
2052 continue;
2053 }
2054 if (IsDefined(use)) continue;
2055 if (this->block(schedule_, use) != current_block_) {
2056 // {use} is in a later block, so it should already have been visited. Note
2057 // that operations that don't produce values are not marked as Defined,
2058 // like Return for instance, so it's possible that {use} has been visited
2059 // but the previous `IsDefined` check didn't match.
2060
2061#ifdef DEBUG
2062 if (this->block(schedule_, use)->index() < current_block_->index()) {
2063 // If {use} is in a previous block, then it has to be a loop Phi that
2064 // uses {projection0} as its backedge input. In that case, it's fine to
2065 // schedule the binop right now, even though it's after the use of its
2066 // 1st projection (since the use is conceptually after rather than
2067 // before because it goes through a backedge).
2068 DCHECK(this->Get(use).template Is<PhiOp>());
2069 DCHECK_EQ(this->Get(use).template Cast<PhiOp>().input(1),
2070 projection0_index);
2071 }
2072#endif
2073
2074 continue;
2075 }
2076
2077 if (this->Get(use).template Is<PhiOp>()) {
2079 // If {projection0} is used by a Phi in the current block, then it has to
2080 // be a loop phi, and {projection0} has to be its backedge value. This
2081 // doesn't prevent scheduling {projection0} now, since anyways it
2082 // necessarily needs to be scheduled after the Phi.
2083 DCHECK(current_block_->IsLoop());
2084 continue;
2085 }
2086
2087 // {use} is not defined yet (and is not a special case), which means that
2088 // {projection0} has a use that comes before {binop}, and we thus can't fuse
2089 // binop with a branch to do a branch-if-overflow.
2090 return false;
2091 }
2092
2093 VisitProjection(projection0_index);
2094 return true;
2095}
2096
2098 // We must emit a NOP here because every live range needs a defining
2099 // instruction in the register allocator.
2100 OperandGenerator g(this);
2101 Emit(kArchNop, g.DefineAsConstant(node));
2102}
2103
2107
2109 OperandGenerator g(this);
2110 const CallOp& call_op = Cast<CallOp>(node);
2111 const CallDescriptor* call_descriptor = call_op.descriptor->descriptor;
2112 SaveFPRegsMode mode = call_descriptor->NeedsCallerSavedFPRegisters()
2115
2116 if (call_descriptor->NeedsCallerSavedRegisters()) {
2117 Emit(kArchSaveCallerRegisters | MiscField::encode(static_cast<int>(mode)),
2118 g.NoOutput());
2119 }
2120
2121 FrameStateDescriptor* frame_state_descriptor = nullptr;
2122 bool needs_frame_state = false;
2123 if (call_descriptor->NeedsFrameState()) {
2124 needs_frame_state = true;
2125 frame_state_descriptor =
2126 GetFrameStateDescriptor(call_op.frame_state().value());
2127 }
2128
2129 CallBuffer buffer(zone(), call_descriptor, frame_state_descriptor);
2130 CallDescriptor::Flags flags = call_descriptor->flags();
2131
2132 // Compute InstructionOperands for inputs and outputs.
2133 // TODO(turbofan): on some architectures it's probably better to use
2134 // the code object in a register if there are multiple uses of it.
2135 // Improve constant pool and the heuristics in the register allocator
2136 // for where to emit constants.
2139 call_buffer_flags |= kCallFixedTargetRegister;
2140 }
2141 InitializeCallBuffer(node, &buffer, call_buffer_flags, call_op.callee(),
2142 call_op.frame_state(), call_op.arguments(),
2143 static_cast<int>(call_op.results_rep().size()));
2144
2145 EmitPrepareArguments(&buffer.pushed_nodes, call_descriptor, node);
2147
2149
2150#if V8_ENABLE_WEBASSEMBLY
2151 if (call_descriptor->IsIndirectWasmFunctionCall()) {
2153 g.UseImmediate64(call_descriptor->signature_hash()));
2154 }
2155#endif
2156
2157 if (call_descriptor->RequiresEntrypointTagForCall()) {
2158 DCHECK(!call_descriptor->IsJSFunctionCall());
2160 g.TempImmediate(call_descriptor->shifted_tag()));
2161 } else if (call_descriptor->IsJSFunctionCall()) {
2162 // For JSFunctions we need to know the number of pushed parameters during
2163 // code generation.
2164 uint32_t parameter_count =
2165 static_cast<uint32_t>(buffer.pushed_nodes.size());
2167 }
2168
2169 // Pass label of exception handler block.
2170 if (handler) {
2172 buffer.instruction_args.push_back(g.Label(handler));
2173 } else {
2174 if (call_op.descriptor->lazy_deopt_on_throw == LazyDeoptOnThrow::kYes) {
2178 }
2179 }
2180
2181 // Select the appropriate opcode based on the call type.
2183 switch (call_descriptor->kind()) {
2185 int gp_param_count =
2186 static_cast<int>(call_descriptor->GPParameterCount());
2187 int fp_param_count =
2188 static_cast<int>(call_descriptor->FPParameterCount());
2189#if ABI_USES_FUNCTION_DESCRIPTORS
2190 // Highest fp_param_count bit is used on AIX to indicate if a CFunction
2191 // call has function descriptor or not.
2193 if (!call_descriptor->NoFunctionDescriptor()) {
2194 fp_param_count |= 1 << kHasFunctionDescriptorBitShift;
2195 }
2196#endif
2197 opcode = needs_frame_state ? kArchCallCFunctionWithFrameState
2198 : kArchCallCFunction;
2199 opcode |= ParamField::encode(gp_param_count) |
2200 FPParamField::encode(fp_param_count);
2201 break;
2202 }
2204 opcode = EncodeCallDescriptorFlags(kArchCallCodeObject, flags);
2205 break;
2207 opcode = EncodeCallDescriptorFlags(kArchCallJSFunction, flags);
2208 break;
2209#if V8_ENABLE_WEBASSEMBLY
2210 case CallDescriptor::kCallWasmCapiFunction:
2211 case CallDescriptor::kCallWasmFunction:
2212 case CallDescriptor::kCallWasmImportWrapper:
2213 DCHECK(this->IsRelocatableWasmConstant(call_op.callee()));
2214 opcode = EncodeCallDescriptorFlags(kArchCallWasmFunction, flags);
2215 break;
2216 case CallDescriptor::kCallWasmFunctionIndirect:
2217 DCHECK(!this->IsRelocatableWasmConstant(call_op.callee()));
2218 opcode = EncodeCallDescriptorFlags(kArchCallWasmFunctionIndirect, flags);
2219 break;
2220#endif // V8_ENABLE_WEBASSEMBLY
2222 opcode = EncodeCallDescriptorFlags(kArchCallBuiltinPointer, flags);
2223 break;
2224 }
2225
2226 // Emit the call instruction.
2227 size_t const output_count = buffer.outputs.size();
2228 auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
2229 Instruction* call_instr =
2230 Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
2231 &buffer.instruction_args.front());
2232 if (instruction_selection_failed()) return;
2233 call_instr->MarkAsCall();
2234
2235 EmitPrepareResults(&(buffer.output_nodes), call_descriptor, node);
2236
2237 if (call_descriptor->NeedsCallerSavedRegisters()) {
2238 Emit(
2239 kArchRestoreCallerRegisters | MiscField::encode(static_cast<int>(mode)),
2240 g.NoOutput());
2241 }
2242}
2243
2245 OperandGenerator g(this);
2246
2247 const TailCallOp& call_op = Cast<TailCallOp>(node);
2248 auto caller = linkage()->GetIncomingDescriptor();
2249 auto callee = call_op.descriptor->descriptor;
2250 DCHECK(caller->CanTailCall(callee));
2251 const int stack_param_delta = callee->GetStackParameterDelta(caller);
2252 CallBuffer buffer(zone(), callee, nullptr);
2253
2254 // Compute InstructionOperands for inputs and outputs.
2257 flags |= kCallAddressImmediate;
2258 }
2259 if (callee->flags() & CallDescriptor::kFixedTargetRegister) {
2260 flags |= kCallFixedTargetRegister;
2261 }
2262 InitializeCallBuffer(node, &buffer, flags, call_op.callee(),
2264 static_cast<int>(call_op.outputs_rep().size()),
2265 stack_param_delta);
2266 UpdateMaxPushedArgumentCount(stack_param_delta);
2267
2268 // Select the appropriate opcode based on the call type.
2271 switch (callee->kind()) {
2273 opcode = kArchTailCallCodeObject;
2274 break;
2276 DCHECK(!caller->IsJSFunctionCall());
2277 opcode = kArchTailCallAddress;
2278 break;
2279#if V8_ENABLE_WEBASSEMBLY
2280 case CallDescriptor::kCallWasmFunction:
2281 DCHECK(!caller->IsJSFunctionCall());
2282 DCHECK(this->IsRelocatableWasmConstant(call_op.callee()));
2283 opcode = kArchTailCallWasm;
2284 break;
2285 case CallDescriptor::kCallWasmFunctionIndirect:
2286 DCHECK(!caller->IsJSFunctionCall());
2287 DCHECK(!this->IsRelocatableWasmConstant(call_op.callee()));
2288 opcode = kArchTailCallWasmIndirect;
2289 break;
2290#endif // V8_ENABLE_WEBASSEMBLY
2291 default:
2292 UNREACHABLE();
2293 }
2294 opcode = EncodeCallDescriptorFlags(opcode, callee->flags());
2295
2296 Emit(kArchPrepareTailCall, g.NoOutput());
2297
2298#if V8_ENABLE_WEBASSEMBLY
2299 if (callee->IsIndirectWasmFunctionCall()) {
2301 g.UseImmediate64(callee->signature_hash()));
2302 }
2303#endif
2304
2305 if (callee->RequiresEntrypointTagForCall()) {
2306 buffer.instruction_args.push_back(g.TempImmediate(callee->shifted_tag()));
2307 }
2308
2309 // Add an immediate operand that represents the offset to the first slot
2310 // that is unused with respect to the stack pointer that has been updated
2311 // for the tail call instruction. Backends that pad arguments can write the
2312 // padding value at this offset from the stack.
2313 const int optional_padding_offset =
2314 callee->GetOffsetToFirstUnusedStackSlot() - 1;
2315 buffer.instruction_args.push_back(g.TempImmediate(optional_padding_offset));
2316
2317 const int first_unused_slot_offset =
2318 kReturnAddressStackSlotCount + stack_param_delta;
2319 buffer.instruction_args.push_back(g.TempImmediate(first_unused_slot_offset));
2320
2321 // Emit the tailcall instruction.
2322 Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
2323 &buffer.instruction_args.front(), temps.size(),
2324 temps.empty() ? nullptr : &temps.front());
2325}
2326
2328 // jump to the next block.
2329 OperandGenerator g(this);
2330 Emit(kArchJmp, g.NoOutput(), g.Label(target));
2331}
2332
2334 const ReturnOp& ret = schedule()->Get(node).Cast<ReturnOp>();
2335
2336 OperandGenerator g(this);
2337 const size_t return_count = linkage()->GetIncomingDescriptor()->ReturnCount();
2338 const int input_count =
2339 return_count == 0 ? 1
2340 : (1 + static_cast<int>(ret.return_values().size()));
2341 DCHECK_GE(input_count, 1);
2342
2343 auto value_locations =
2344 zone()->template AllocateArray<InstructionOperand>(input_count);
2345 const Operation& pop_count = schedule()->Get(ret.pop_count());
2346 if (pop_count.Is<Opmask::kWord32Constant>() ||
2347 pop_count.Is<Opmask::kWord64Constant>()) {
2348 value_locations[0] = g.UseImmediate(ret.pop_count());
2349 } else {
2350 value_locations[0] = g.UseRegister(ret.pop_count());
2351 }
2352 for (size_t i = 0, return_value_idx = 0; i < return_count; ++i) {
2354 // Return values passed via frame slots have already been stored
2355 // on the stack by the GrowableStacksReducer.
2357 continue;
2358 }
2359 value_locations[return_value_idx + 1] =
2360 g.UseLocation(ret.return_values()[return_value_idx], loc);
2361 return_value_idx++;
2362 }
2363 Emit(kArchRet, 0, nullptr, input_count, value_locations);
2364}
2365
2367 Block* fbranch) {
2368 const BranchOp& branch = Cast<BranchOp>(branch_node);
2370
2371 FlagsContinuation cont =
2372 FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
2373 VisitWordCompareZero(branch_node, branch.condition(), &cont);
2374}
2375
2376// When a DeoptimizeIf/DeoptimizeUnless/Branch depends on a BinopOverflow, the
2377// InstructionSelector can sometimes generate a fuse instruction covering both
2378// the BinopOverflow and the DeoptIf/Branch, and the final emitted code will
2379// look like:
2380//
2381// r = BinopOverflow
2382// jo branch_target/deopt_target
2383//
2384// When this fusing fails, the final code looks like:
2385//
2386// r = BinopOverflow
2387// o = sete // sets overflow bit
2388// cmp o, 0
2389// jnz branch_target/deopt_target
2390//
2391// To be able to fuse tue BinopOverflow and the DeoptIf/Branch, the 1st
2392// projection (Projection[0], which contains the actual result) must already be
2393// scheduled (and a few other conditions must be satisfied, see
2394// InstructionSelectorXXX::VisitWordCompareZero).
2395// TryPrepareScheduleFirstProjection is thus called from
2396// VisitDeoptimizeIf/VisitBranch and detects if the 1st
2397// projection could be scheduled now, and, if so, defines it.
2399 OpIndex maybe_projection) {
2400 // The DeoptimizeIf/Branch condition is not a projection.
2401 const ProjectionOp* projection = TryCast<ProjectionOp>(maybe_projection);
2402 if (!projection) return;
2403
2404 if (projection->index != 1u) {
2405 // The DeoptimizeIf/Branch isn't on the Projection[1]
2406 // (ie, not on the overflow bit of a BinopOverflow).
2407 return;
2408 }
2409
2410 DCHECK_EQ(projection->input_count, 1);
2411 OpIndex node = projection->input();
2412 if (block(schedule_, node) != current_block_) {
2413 // The projection input is not in the current block, so it shouldn't be
2414 // emitted now, so we don't need to eagerly schedule its Projection[0].
2415 return;
2416 }
2417
2418 auto* binop = TryCast<OverflowCheckedBinopOp>(node);
2419 auto* unop = TryCast<OverflowCheckedUnaryOp>(node);
2420 if (binop == nullptr && unop == nullptr) return;
2421 if (binop) {
2422 DCHECK(binop->kind == OverflowCheckedBinopOp::Kind::kSignedAdd ||
2423 binop->kind == OverflowCheckedBinopOp::Kind::kSignedSub ||
2424 binop->kind == OverflowCheckedBinopOp::Kind::kSignedMul);
2425 } else {
2426 DCHECK_EQ(unop->kind, OverflowCheckedUnaryOp::Kind::kAbs);
2427 }
2428
2430 if (!result.valid() || IsDefined(result.value())) {
2431 // No Projection(0), or it's already defined.
2432 return;
2433 }
2434
2435 if (block(schedule_, result.value()) != current_block_) {
2436 // {result} wasn't planned to be scheduled in {current_block_}. To
2437 // avoid adding checks to see if it can still be scheduled now, we
2438 // just bail out.
2439 return;
2440 }
2441
2442 // Checking if all uses of {result} that are in the current block have
2443 // already been Defined.
2444 // We also ignore Phi uses: if {result} is used in a Phi in the block in
2445 // which it is defined, this means that this block is a loop header, and
2446 // {result} back into it through the back edge. In this case, it's
2447 // normal to schedule {result} before the Phi that uses it.
2448 for (OpIndex use : turboshaft_uses(result.value())) {
2449 // We ignore TupleOp uses, since TupleOp don't lead to emitted machine
2450 // instructions and are just Turboshaft "meta operations".
2451 if (!Is<TupleOp>(use) && !IsDefined(use) &&
2452 block(schedule_, use) == current_block_ && !Is<PhiOp>(use)) {
2453 return;
2454 }
2455 }
2456
2457 // Visiting the projection now. Note that this relies on the fact that
2458 // VisitProjection doesn't Emit something: if it did, then we could be
2459 // Emitting something after a Branch, which is invalid (Branch can only
2460 // be at the end of a block, and the end of a block must always be a
2461 // block terminator). (remember that we emit operation in reverse order,
2462 // so because we are doing TryPrepareScheduleFirstProjection before
2463 // actually emitting the Branch, it would be after in the final
2464 // instruction sequence, not before)
2465 VisitProjection(result.value());
2466}
2467
2469 const DeoptimizeIfOp& deopt = Cast<DeoptimizeIfOp>(node);
2470
2471 TryPrepareScheduleFirstProjection(deopt.condition());
2472
2474 deopt.negated ? kEqual : kNotEqual, deopt.parameters->reason(), node.id(),
2475 deopt.parameters->feedback(), deopt.frame_state());
2476 VisitWordCompareZero(node, deopt.condition(), &cont);
2477}
2478
2480 const SelectOp& select = Cast<SelectOp>(node);
2481 DCHECK_EQ(select.input_count, 3);
2483 kNotEqual, node, select.vtrue(), select.vfalse());
2484 VisitWordCompareZero(node, select.cond(), &cont);
2485}
2486
2488#if V8_ENABLE_WEBASSEMBLY
2489 const TrapIfOp& trap_if = Cast<TrapIfOp>(node);
2490 // FrameStates are only used for wasm traps inlined in JS. In that case the
2491 // trap node will be lowered (replaced) before instruction selection.
2492 // Therefore any TrapIf node has only one input.
2493 DCHECK_EQ(trap_if.input_count, 1);
2495 trap_if.negated ? kEqual : kNotEqual, trap_if.trap_id);
2496 VisitWordCompareZero(node, trap_if.condition(), &cont);
2497#else
2498 UNREACHABLE();
2499#endif
2500}
2501
2503 const Operation& op = Get(node);
2504 MarkAsUsed(op.input(0));
2505 MarkAsDefined(node);
2506 SetRename(node, op.input(0));
2507}
2508
2510 uint32_t node_id,
2511 FeedbackSource const& feedback,
2512 OpIndex frame_state) {
2514 AppendDeoptimizeArguments(&args, reason, node_id, feedback, frame_state);
2515 Emit(kArchDeoptimize, 0, nullptr, args.size(), &args.front(), 0, nullptr);
2516}
2517
2519 OperandGenerator g(this);
2520 Emit(kArchThrowTerminator, g.NoOutput());
2521}
2522
2523void InstructionSelectorT::VisitDebugBreak(OpIndex node) {
2524 OperandGenerator g(this);
2525 Emit(kArchDebugBreak, g.NoOutput());
2526}
2527
2529 OperandGenerator g(this);
2530 Emit(kArchDebugBreak, g.NoOutput());
2531}
2532
2534 const StaticAssertOp& op = Cast<StaticAssertOp>(node);
2535 DCHECK_EQ(op.input_count, 1);
2536 OpIndex asserted = op.condition();
2538 AllowHandleDereference allow_handle_dereference;
2539 StdoutStream os;
2540 os << Get(asserted);
2541 FATAL(
2542 "Expected Turbofan static assert to hold, but got non-true input:\n "
2543 "%s",
2544 op.source);
2545}
2546
2547void InstructionSelectorT::VisitComment(OpIndex node) {
2548 OperandGenerator g(this);
2549 const CommentOp& comment =
2550 this->turboshaft_graph()->Get(node).template Cast<CommentOp>();
2551 using ptrsize_int_t =
2552 std::conditional_t<kSystemPointerSize == 8, int64_t, int32_t>;
2554 Constant{reinterpret_cast<ptrsize_int_t>(comment.message)});
2555 Emit(kArchComment, 0, nullptr, 1, &operand);
2556}
2557
2559 const RetainOp& retain = Cast<RetainOp>(node);
2560 OperandGenerator g(this);
2561 DCHECK_EQ(retain.input_count, 1);
2562 Emit(kArchNop, g.NoOutput(), g.UseAny(retain.retained()));
2563}
2564
2566#ifdef DEBUG
2567 // SSA deconstruction requires targets of branches not to have phis.
2568 // Edge split form guarantees this property, but is more strict.
2569 if (auto successors =
2570 SuccessorBlocks(block->LastOperation(*turboshaft_graph()));
2571 successors.size() > 1) {
2572 for (Block* successor : successors) {
2573 if (successor->HasPhis(*turboshaft_graph())) {
2574 std::ostringstream str;
2575 str << "You might have specified merged variables for a label with "
2576 << "only one predecessor." << std::endl
2577 << "# Current Block: " << successor->index() << std::endl;
2578 FATAL("%s", str.str().c_str());
2579 }
2580 }
2581 }
2582#endif // DEBUG
2583 const Operation& op = block->LastOperation(*schedule());
2584 OpIndex node = schedule()->Index(op);
2585 int instruction_end = static_cast<int>(instructions_.size());
2586 switch (op.opcode) {
2587 case Opcode::kGoto:
2589 break;
2590 case Opcode::kReturn:
2591 VisitReturn(node);
2592 break;
2593 case Opcode::kTailCall:
2594 VisitTailCall(node);
2595 break;
2596 case Opcode::kDeoptimize: {
2597 const DeoptimizeOp& deoptimize = op.Cast<DeoptimizeOp>();
2598 VisitDeoptimize(deoptimize.parameters->reason(), node.id(),
2599 deoptimize.parameters->feedback(),
2600 deoptimize.frame_state());
2601 break;
2602 }
2603 case Opcode::kBranch: {
2604 const BranchOp& branch = op.Cast<BranchOp>();
2605 Block* tbranch = branch.if_true;
2606 Block* fbranch = branch.if_false;
2607 if (tbranch == fbranch) {
2608 VisitGoto(tbranch);
2609 } else {
2610 VisitBranch(node, tbranch, fbranch);
2611 }
2612 break;
2613 }
2614 case Opcode::kSwitch: {
2615 const SwitchOp& swtch = op.Cast<SwitchOp>();
2616 int32_t min_value = std::numeric_limits<int32_t>::max();
2617 int32_t max_value = std::numeric_limits<int32_t>::min();
2618
2619 ZoneVector<CaseInfo> cases(swtch.cases.size(), zone());
2620 for (size_t i = 0; i < swtch.cases.size(); ++i) {
2621 const SwitchOp::Case& c = swtch.cases[i];
2622 cases[i] = CaseInfo{c.value, 0, c.destination};
2623 if (min_value > c.value) min_value = c.value;
2624 if (max_value < c.value) max_value = c.value;
2625 }
2626 SwitchInfo sw(std::move(cases), min_value, max_value, swtch.default_case);
2627 return VisitSwitch(node, sw);
2628 }
2629 case Opcode::kCheckException: {
2630 const CheckExceptionOp& check = op.Cast<CheckExceptionOp>();
2631 VisitCall(check.throwing_operation(), check.catch_block);
2632 VisitGoto(check.didnt_throw_block);
2633 return;
2634 }
2635 case Opcode::kUnreachable:
2636 return VisitUnreachable(node);
2637 case Opcode::kStaticAssert:
2638 return VisitStaticAssert(node);
2639 default: {
2640 const std::string op_string = op.ToString();
2641 PrintF("\033[31mNo ISEL support for: %s\033[m\n", op_string.c_str());
2642 FATAL("Unexpected operation #%d:%s", node.id(), op_string.c_str());
2643 }
2644 }
2645
2647 DCHECK(node.valid());
2648 int instruction_start = static_cast<int>(instructions_.size());
2649 instr_origins_[node.id()] = {instruction_start, instruction_end};
2650 }
2651}
2652
2655 const Operation& op = this->Get(node);
2656 using Opcode = Opcode;
2657 using Rep = RegisterRepresentation;
2658 switch (op.opcode) {
2659 case Opcode::kBranch:
2660 case Opcode::kGoto:
2661 case Opcode::kReturn:
2662 case Opcode::kTailCall:
2663 case Opcode::kUnreachable:
2664 case Opcode::kDeoptimize:
2665 case Opcode::kSwitch:
2666 case Opcode::kCheckException:
2667 // Those are already handled in VisitControl.
2669 break;
2670 case Opcode::kParameter: {
2671 // Parameters should always be scheduled to the first block.
2672 DCHECK_EQ(this->rpo_number(this->block(schedule(), node)).ToInt(), 0);
2673 MachineType type =
2675 MarkAsRepresentation(type.representation(), node);
2676 return VisitParameter(node);
2677 }
2678 case Opcode::kChange: {
2679 const ChangeOp& change = op.Cast<ChangeOp>();
2681 switch (change.kind) {
2682 case ChangeOp::Kind::kFloatConversion:
2683 if (change.from == Rep::Float64()) {
2684 DCHECK_EQ(change.to, Rep::Float32());
2685 return VisitTruncateFloat64ToFloat32(node);
2686 } else {
2687 DCHECK_EQ(change.from, Rep::Float32());
2688 DCHECK_EQ(change.to, Rep::Float64());
2689 return VisitChangeFloat32ToFloat64(node);
2690 }
2691 case ChangeOp::Kind::kSignedFloatTruncateOverflowToMin:
2692 case ChangeOp::Kind::kUnsignedFloatTruncateOverflowToMin: {
2693 using A = ChangeOp::Assumption;
2694 bool is_signed =
2695 change.kind == ChangeOp::Kind::kSignedFloatTruncateOverflowToMin;
2696 switch (multi(change.from, change.to, is_signed, change.assumption)) {
2697 case multi(Rep::Float32(), Rep::Word32(), true, A::kNoOverflow):
2698 case multi(Rep::Float32(), Rep::Word32(), true, A::kNoAssumption):
2699 return VisitTruncateFloat32ToInt32(node);
2700 case multi(Rep::Float32(), Rep::Word32(), false, A::kNoOverflow):
2701 case multi(Rep::Float32(), Rep::Word32(), false, A::kNoAssumption):
2702 return VisitTruncateFloat32ToUint32(node);
2703 case multi(Rep::Float64(), Rep::Word32(), true, A::kReversible):
2704 return VisitChangeFloat64ToInt32(node);
2705 case multi(Rep::Float64(), Rep::Word32(), false, A::kReversible):
2706 return VisitChangeFloat64ToUint32(node);
2707 case multi(Rep::Float64(), Rep::Word32(), true, A::kNoOverflow):
2708 return VisitRoundFloat64ToInt32(node);
2709 case multi(Rep::Float64(), Rep::Word32(), false, A::kNoAssumption):
2710 case multi(Rep::Float64(), Rep::Word32(), false, A::kNoOverflow):
2711 return VisitTruncateFloat64ToUint32(node);
2712 case multi(Rep::Float64(), Rep::Word64(), true, A::kReversible):
2713 return VisitChangeFloat64ToInt64(node);
2714 case multi(Rep::Float64(), Rep::Word64(), false, A::kReversible):
2715 return VisitChangeFloat64ToUint64(node);
2716 case multi(Rep::Float64(), Rep::Word64(), true, A::kNoOverflow):
2717 case multi(Rep::Float64(), Rep::Word64(), true, A::kNoAssumption):
2718 return VisitTruncateFloat64ToInt64(node);
2719 default:
2720 // Invalid combination.
2721 UNREACHABLE();
2722 }
2723
2724 UNREACHABLE();
2725 }
2726 case ChangeOp::Kind::kJSFloatTruncate:
2727 DCHECK_EQ(change.from, Rep::Float64());
2728 DCHECK_EQ(change.to, Rep::Word32());
2729 return VisitTruncateFloat64ToWord32(node);
2730 case ChangeOp::Kind::kJSFloat16TruncateWithBitcast:
2731 DCHECK_EQ(Rep::Float64(), change.from);
2732 DCHECK_EQ(Rep::Word32(), change.to);
2733 return VisitTruncateFloat64ToFloat16RawBits(node);
2734 case ChangeOp::Kind::kJSFloat16ChangeWithBitcast:
2735 DCHECK_EQ(Rep::Word32(), change.from);
2736 DCHECK_EQ(Rep::Float64(), change.to);
2737 return VisitChangeFloat16RawBitsToFloat64(node);
2738 case ChangeOp::Kind::kSignedToFloat:
2739 if (change.from == Rep::Word32()) {
2740 if (change.to == Rep::Float32()) {
2741 return VisitRoundInt32ToFloat32(node);
2742 } else {
2743 DCHECK_EQ(change.to, Rep::Float64());
2744 DCHECK_EQ(change.assumption, ChangeOp::Assumption::kNoAssumption);
2745 return VisitChangeInt32ToFloat64(node);
2746 }
2747 } else {
2748 DCHECK_EQ(change.from, Rep::Word64());
2749 if (change.to == Rep::Float32()) {
2750 return VisitRoundInt64ToFloat32(node);
2751 } else {
2752 DCHECK_EQ(change.to, Rep::Float64());
2753 if (change.assumption == ChangeOp::Assumption::kReversible) {
2754 return VisitChangeInt64ToFloat64(node);
2755 } else {
2756 return VisitRoundInt64ToFloat64(node);
2757 }
2758 }
2759 }
2760 UNREACHABLE();
2761 case ChangeOp::Kind::kUnsignedToFloat:
2762 switch (multi(change.from, change.to)) {
2763 case multi(Rep::Word32(), Rep::Float32()):
2764 return VisitRoundUint32ToFloat32(node);
2765 case multi(Rep::Word32(), Rep::Float64()):
2766 return VisitChangeUint32ToFloat64(node);
2767 case multi(Rep::Word64(), Rep::Float32()):
2768 return VisitRoundUint64ToFloat32(node);
2769 case multi(Rep::Word64(), Rep::Float64()):
2770 return VisitRoundUint64ToFloat64(node);
2771 default:
2772 UNREACHABLE();
2773 }
2774 case ChangeOp::Kind::kExtractHighHalf:
2775 DCHECK_EQ(change.from, Rep::Float64());
2776 DCHECK_EQ(change.to, Rep::Word32());
2777 return VisitFloat64ExtractHighWord32(node);
2778 case ChangeOp::Kind::kExtractLowHalf:
2779 DCHECK_EQ(change.from, Rep::Float64());
2780 DCHECK_EQ(change.to, Rep::Word32());
2781 return VisitFloat64ExtractLowWord32(node);
2782 case ChangeOp::Kind::kZeroExtend:
2783 DCHECK_EQ(change.from, Rep::Word32());
2784 DCHECK_EQ(change.to, Rep::Word64());
2785 return VisitChangeUint32ToUint64(node);
2786 case ChangeOp::Kind::kSignExtend:
2787 DCHECK_EQ(change.from, Rep::Word32());
2788 DCHECK_EQ(change.to, Rep::Word64());
2789 return VisitChangeInt32ToInt64(node);
2790 case ChangeOp::Kind::kTruncate:
2791 DCHECK_EQ(change.from, Rep::Word64());
2792 DCHECK_EQ(change.to, Rep::Word32());
2793 MarkAsWord32(node);
2794 return VisitTruncateInt64ToInt32(node);
2795 case ChangeOp::Kind::kBitcast:
2796 switch (multi(change.from, change.to)) {
2797 case multi(Rep::Word32(), Rep::Word64()):
2798 return VisitBitcastWord32ToWord64(node);
2799 case multi(Rep::Word32(), Rep::Float32()):
2800 return VisitBitcastInt32ToFloat32(node);
2801 case multi(Rep::Word64(), Rep::Float64()):
2802 return VisitBitcastInt64ToFloat64(node);
2803 case multi(Rep::Float32(), Rep::Word32()):
2804 return VisitBitcastFloat32ToInt32(node);
2805 case multi(Rep::Float64(), Rep::Word64()):
2806 return VisitBitcastFloat64ToInt64(node);
2807 default:
2808 UNREACHABLE();
2809 }
2810 }
2811 UNREACHABLE();
2812 }
2813 case Opcode::kTryChange: {
2814 const TryChangeOp& try_change = op.Cast<TryChangeOp>();
2816 DCHECK(try_change.kind ==
2817 TryChangeOp::Kind::kSignedFloatTruncateOverflowUndefined ||
2818 try_change.kind ==
2819 TryChangeOp::Kind::kUnsignedFloatTruncateOverflowUndefined);
2820 const bool is_signed =
2821 try_change.kind ==
2822 TryChangeOp::Kind::kSignedFloatTruncateOverflowUndefined;
2823 switch (multi(try_change.from, try_change.to, is_signed)) {
2824 case multi(Rep::Float64(), Rep::Word64(), true):
2825 return VisitTryTruncateFloat64ToInt64(node);
2826 case multi(Rep::Float64(), Rep::Word64(), false):
2827 return VisitTryTruncateFloat64ToUint64(node);
2828 case multi(Rep::Float64(), Rep::Word32(), true):
2829 return VisitTryTruncateFloat64ToInt32(node);
2830 case multi(Rep::Float64(), Rep::Word32(), false):
2831 return VisitTryTruncateFloat64ToUint32(node);
2832 case multi(Rep::Float32(), Rep::Word64(), true):
2833 return VisitTryTruncateFloat32ToInt64(node);
2834 case multi(Rep::Float32(), Rep::Word64(), false):
2835 return VisitTryTruncateFloat32ToUint64(node);
2836 default:
2837 UNREACHABLE();
2838 }
2839 UNREACHABLE();
2840 }
2841 case Opcode::kConstant: {
2842 const ConstantOp& constant = op.Cast<ConstantOp>();
2843 switch (constant.kind) {
2844 case ConstantOp::Kind::kWord32:
2845 case ConstantOp::Kind::kWord64:
2846 case ConstantOp::Kind::kSmi:
2847 case ConstantOp::Kind::kTaggedIndex:
2848 case ConstantOp::Kind::kExternal:
2849 break;
2850 case ConstantOp::Kind::kFloat32:
2851 MarkAsFloat32(node);
2852 break;
2853 case ConstantOp::Kind::kFloat64:
2854 MarkAsFloat64(node);
2855 break;
2856 case ConstantOp::Kind::kHeapObject:
2857 case ConstantOp::Kind::kTrustedHeapObject:
2858 MarkAsTagged(node);
2859 break;
2860 case ConstantOp::Kind::kCompressedHeapObject:
2861 MarkAsCompressed(node);
2862 break;
2863 case ConstantOp::Kind::kNumber:
2864 if (!IsSmiDouble(constant.number().get_scalar())) MarkAsTagged(node);
2865 break;
2866 case ConstantOp::Kind::kRelocatableWasmCall:
2867 case ConstantOp::Kind::kRelocatableWasmStubCall:
2868 case ConstantOp::Kind::kRelocatableWasmCanonicalSignatureId:
2869 case ConstantOp::Kind::kRelocatableWasmIndirectCallTarget:
2870 break;
2871 }
2872 VisitConstant(node);
2873 break;
2874 }
2875 case Opcode::kWordUnary: {
2876 const WordUnaryOp& unop = op.Cast<WordUnaryOp>();
2877 if (unop.rep == WordRepresentation::Word32()) {
2878 MarkAsWord32(node);
2879 switch (unop.kind) {
2880 case WordUnaryOp::Kind::kReverseBytes:
2881 return VisitWord32ReverseBytes(node);
2882 case WordUnaryOp::Kind::kCountLeadingZeros:
2883 return VisitWord32Clz(node);
2884 case WordUnaryOp::Kind::kCountTrailingZeros:
2885 return VisitWord32Ctz(node);
2886 case WordUnaryOp::Kind::kPopCount:
2887 return VisitWord32Popcnt(node);
2888 case WordUnaryOp::Kind::kSignExtend8:
2889 return VisitSignExtendWord8ToInt32(node);
2890 case WordUnaryOp::Kind::kSignExtend16:
2891 return VisitSignExtendWord16ToInt32(node);
2892 }
2893 } else {
2895 MarkAsWord64(node);
2896 switch (unop.kind) {
2897 case WordUnaryOp::Kind::kReverseBytes:
2898 return VisitWord64ReverseBytes(node);
2899 case WordUnaryOp::Kind::kCountLeadingZeros:
2900 return VisitWord64Clz(node);
2901 case WordUnaryOp::Kind::kCountTrailingZeros:
2902 return VisitWord64Ctz(node);
2903 case WordUnaryOp::Kind::kPopCount:
2904 return VisitWord64Popcnt(node);
2905 case WordUnaryOp::Kind::kSignExtend8:
2906 return VisitSignExtendWord8ToInt64(node);
2907 case WordUnaryOp::Kind::kSignExtend16:
2908 return VisitSignExtendWord16ToInt64(node);
2909 }
2910 }
2911 UNREACHABLE();
2912 }
2913 case Opcode::kWordBinop: {
2914 const WordBinopOp& binop = op.Cast<WordBinopOp>();
2915 if (binop.rep == WordRepresentation::Word32()) {
2916 MarkAsWord32(node);
2917 switch (binop.kind) {
2918 case WordBinopOp::Kind::kAdd:
2919 return VisitInt32Add(node);
2920 case WordBinopOp::Kind::kMul:
2921 return VisitInt32Mul(node);
2922 case WordBinopOp::Kind::kSignedMulOverflownBits:
2923 return VisitInt32MulHigh(node);
2924 case WordBinopOp::Kind::kUnsignedMulOverflownBits:
2925 return VisitUint32MulHigh(node);
2926 case WordBinopOp::Kind::kBitwiseAnd:
2927 return VisitWord32And(node);
2928 case WordBinopOp::Kind::kBitwiseOr:
2929 return VisitWord32Or(node);
2930 case WordBinopOp::Kind::kBitwiseXor:
2931 return VisitWord32Xor(node);
2932 case WordBinopOp::Kind::kSub:
2933 return VisitInt32Sub(node);
2934 case WordBinopOp::Kind::kSignedDiv:
2935 return VisitInt32Div(node);
2936 case WordBinopOp::Kind::kUnsignedDiv:
2937 return VisitUint32Div(node);
2938 case WordBinopOp::Kind::kSignedMod:
2939 return VisitInt32Mod(node);
2940 case WordBinopOp::Kind::kUnsignedMod:
2941 return VisitUint32Mod(node);
2942 }
2943 } else {
2945 MarkAsWord64(node);
2946 switch (binop.kind) {
2947 case WordBinopOp::Kind::kAdd:
2948 return VisitInt64Add(node);
2949 case WordBinopOp::Kind::kMul:
2950 return VisitInt64Mul(node);
2951 case WordBinopOp::Kind::kSignedMulOverflownBits:
2952 return VisitInt64MulHigh(node);
2953 case WordBinopOp::Kind::kUnsignedMulOverflownBits:
2954 return VisitUint64MulHigh(node);
2955 case WordBinopOp::Kind::kBitwiseAnd:
2956 return VisitWord64And(node);
2957 case WordBinopOp::Kind::kBitwiseOr:
2958 return VisitWord64Or(node);
2959 case WordBinopOp::Kind::kBitwiseXor:
2960 return VisitWord64Xor(node);
2961 case WordBinopOp::Kind::kSub:
2962 return VisitInt64Sub(node);
2963 case WordBinopOp::Kind::kSignedDiv:
2964 return VisitInt64Div(node);
2965 case WordBinopOp::Kind::kUnsignedDiv:
2966 return VisitUint64Div(node);
2967 case WordBinopOp::Kind::kSignedMod:
2968 return VisitInt64Mod(node);
2969 case WordBinopOp::Kind::kUnsignedMod:
2970 return VisitUint64Mod(node);
2971 }
2972 }
2973 UNREACHABLE();
2974 }
2975 case Opcode::kFloatUnary: {
2976 const auto& unop = op.Cast<FloatUnaryOp>();
2977 if (unop.rep == Rep::Float32()) {
2978 MarkAsFloat32(node);
2979 switch (unop.kind) {
2980 case FloatUnaryOp::Kind::kAbs:
2981 return VisitFloat32Abs(node);
2982 case FloatUnaryOp::Kind::kNegate:
2983 return VisitFloat32Neg(node);
2984 case FloatUnaryOp::Kind::kRoundDown:
2985 return VisitFloat32RoundDown(node);
2986 case FloatUnaryOp::Kind::kRoundUp:
2987 return VisitFloat32RoundUp(node);
2988 case FloatUnaryOp::Kind::kRoundToZero:
2989 return VisitFloat32RoundTruncate(node);
2990 case FloatUnaryOp::Kind::kRoundTiesEven:
2991 return VisitFloat32RoundTiesEven(node);
2992 case FloatUnaryOp::Kind::kSqrt:
2993 return VisitFloat32Sqrt(node);
2994 // Those operations are only supported on 64 bit.
2995 case FloatUnaryOp::Kind::kSilenceNaN:
2996 case FloatUnaryOp::Kind::kLog:
2997 case FloatUnaryOp::Kind::kLog2:
2998 case FloatUnaryOp::Kind::kLog10:
2999 case FloatUnaryOp::Kind::kLog1p:
3000 case FloatUnaryOp::Kind::kCbrt:
3001 case FloatUnaryOp::Kind::kExp:
3002 case FloatUnaryOp::Kind::kExpm1:
3003 case FloatUnaryOp::Kind::kSin:
3004 case FloatUnaryOp::Kind::kCos:
3005 case FloatUnaryOp::Kind::kSinh:
3006 case FloatUnaryOp::Kind::kCosh:
3007 case FloatUnaryOp::Kind::kAcos:
3008 case FloatUnaryOp::Kind::kAsin:
3009 case FloatUnaryOp::Kind::kAsinh:
3010 case FloatUnaryOp::Kind::kAcosh:
3011 case FloatUnaryOp::Kind::kTan:
3012 case FloatUnaryOp::Kind::kTanh:
3013 case FloatUnaryOp::Kind::kAtan:
3014 case FloatUnaryOp::Kind::kAtanh:
3015 UNREACHABLE();
3016 }
3017 } else {
3018 DCHECK_EQ(unop.rep, Rep::Float64());
3019 MarkAsFloat64(node);
3020 switch (unop.kind) {
3021 case FloatUnaryOp::Kind::kAbs:
3022 return VisitFloat64Abs(node);
3023 case FloatUnaryOp::Kind::kNegate:
3024 return VisitFloat64Neg(node);
3025 case FloatUnaryOp::Kind::kSilenceNaN:
3026 return VisitFloat64SilenceNaN(node);
3027 case FloatUnaryOp::Kind::kRoundDown:
3028 return VisitFloat64RoundDown(node);
3029 case FloatUnaryOp::Kind::kRoundUp:
3030 return VisitFloat64RoundUp(node);
3031 case FloatUnaryOp::Kind::kRoundToZero:
3032 return VisitFloat64RoundTruncate(node);
3033 case FloatUnaryOp::Kind::kRoundTiesEven:
3034 return VisitFloat64RoundTiesEven(node);
3035 case FloatUnaryOp::Kind::kLog:
3036 return VisitFloat64Log(node);
3037 case FloatUnaryOp::Kind::kLog2:
3038 return VisitFloat64Log2(node);
3039 case FloatUnaryOp::Kind::kLog10:
3040 return VisitFloat64Log10(node);
3041 case FloatUnaryOp::Kind::kLog1p:
3042 return VisitFloat64Log1p(node);
3043 case FloatUnaryOp::Kind::kSqrt:
3044 return VisitFloat64Sqrt(node);
3045 case FloatUnaryOp::Kind::kCbrt:
3046 return VisitFloat64Cbrt(node);
3047 case FloatUnaryOp::Kind::kExp:
3048 return VisitFloat64Exp(node);
3049 case FloatUnaryOp::Kind::kExpm1:
3050 return VisitFloat64Expm1(node);
3051 case FloatUnaryOp::Kind::kSin:
3052 return VisitFloat64Sin(node);
3053 case FloatUnaryOp::Kind::kCos:
3054 return VisitFloat64Cos(node);
3055 case FloatUnaryOp::Kind::kSinh:
3056 return VisitFloat64Sinh(node);
3057 case FloatUnaryOp::Kind::kCosh:
3058 return VisitFloat64Cosh(node);
3059 case FloatUnaryOp::Kind::kAcos:
3060 return VisitFloat64Acos(node);
3061 case FloatUnaryOp::Kind::kAsin:
3062 return VisitFloat64Asin(node);
3063 case FloatUnaryOp::Kind::kAsinh:
3064 return VisitFloat64Asinh(node);
3065 case FloatUnaryOp::Kind::kAcosh:
3066 return VisitFloat64Acosh(node);
3067 case FloatUnaryOp::Kind::kTan:
3068 return VisitFloat64Tan(node);
3069 case FloatUnaryOp::Kind::kTanh:
3070 return VisitFloat64Tanh(node);
3071 case FloatUnaryOp::Kind::kAtan:
3072 return VisitFloat64Atan(node);
3073 case FloatUnaryOp::Kind::kAtanh:
3074 return VisitFloat64Atanh(node);
3075 }
3076 }
3077 UNREACHABLE();
3078 }
3079 case Opcode::kFloatBinop: {
3080 const auto& binop = op.Cast<FloatBinopOp>();
3081 if (binop.rep == Rep::Float32()) {
3082 MarkAsFloat32(node);
3083 switch (binop.kind) {
3084 case FloatBinopOp::Kind::kAdd:
3085 return VisitFloat32Add(node);
3086 case FloatBinopOp::Kind::kSub:
3087 return VisitFloat32Sub(node);
3088 case FloatBinopOp::Kind::kMul:
3089 return VisitFloat32Mul(node);
3090 case FloatBinopOp::Kind::kDiv:
3091 return VisitFloat32Div(node);
3092 case FloatBinopOp::Kind::kMin:
3093 return VisitFloat32Min(node);
3094 case FloatBinopOp::Kind::kMax:
3095 return VisitFloat32Max(node);
3096 case FloatBinopOp::Kind::kMod:
3097 case FloatBinopOp::Kind::kPower:
3098 case FloatBinopOp::Kind::kAtan2:
3099 UNREACHABLE();
3100 }
3101 } else {
3102 DCHECK_EQ(binop.rep, Rep::Float64());
3103 MarkAsFloat64(node);
3104 switch (binop.kind) {
3105 case FloatBinopOp::Kind::kAdd:
3106 return VisitFloat64Add(node);
3107 case FloatBinopOp::Kind::kSub:
3108 return VisitFloat64Sub(node);
3109 case FloatBinopOp::Kind::kMul:
3110 return VisitFloat64Mul(node);
3111 case FloatBinopOp::Kind::kDiv:
3112 return VisitFloat64Div(node);
3113 case FloatBinopOp::Kind::kMod:
3114 return VisitFloat64Mod(node);
3115 case FloatBinopOp::Kind::kMin:
3116 return VisitFloat64Min(node);
3117 case FloatBinopOp::Kind::kMax:
3118 return VisitFloat64Max(node);
3119 case FloatBinopOp::Kind::kPower:
3120 return VisitFloat64Pow(node);
3121 case FloatBinopOp::Kind::kAtan2:
3122 return VisitFloat64Atan2(node);
3123 }
3124 }
3125 UNREACHABLE();
3126 }
3127 case Opcode::kOverflowCheckedBinop: {
3128 const auto& binop = op.Cast<OverflowCheckedBinopOp>();
3129 if (binop.rep == WordRepresentation::Word32()) {
3130 MarkAsWord32(node);
3131 switch (binop.kind) {
3132 case OverflowCheckedBinopOp::Kind::kSignedAdd:
3133 return VisitInt32AddWithOverflow(node);
3134 case OverflowCheckedBinopOp::Kind::kSignedMul:
3135 return VisitInt32MulWithOverflow(node);
3136 case OverflowCheckedBinopOp::Kind::kSignedSub:
3137 return VisitInt32SubWithOverflow(node);
3138 }
3139 } else {
3141 MarkAsWord64(node);
3142 switch (binop.kind) {
3143 case OverflowCheckedBinopOp::Kind::kSignedAdd:
3144 return VisitInt64AddWithOverflow(node);
3145 case OverflowCheckedBinopOp::Kind::kSignedMul:
3146 return VisitInt64MulWithOverflow(node);
3147 case OverflowCheckedBinopOp::Kind::kSignedSub:
3148 return VisitInt64SubWithOverflow(node);
3149 }
3150 }
3151 UNREACHABLE();
3152 }
3153 case Opcode::kOverflowCheckedUnary: {
3154 const auto& unop = op.Cast<OverflowCheckedUnaryOp>();
3155 if (unop.rep == WordRepresentation::Word32()) {
3156 MarkAsWord32(node);
3157 switch (unop.kind) {
3158 case OverflowCheckedUnaryOp::Kind::kAbs:
3159 return VisitInt32AbsWithOverflow(node);
3160 }
3161 } else {
3163 MarkAsWord64(node);
3164 switch (unop.kind) {
3165 case OverflowCheckedUnaryOp::Kind::kAbs:
3166 return VisitInt64AbsWithOverflow(node);
3167 }
3168 }
3169 UNREACHABLE();
3170 }
3171 case Opcode::kShift: {
3172 const auto& shift = op.Cast<ShiftOp>();
3173 if (shift.rep == RegisterRepresentation::Word32()) {
3174 MarkAsWord32(node);
3175 switch (shift.kind) {
3176 case ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros:
3177 case ShiftOp::Kind::kShiftRightArithmetic:
3178 return VisitWord32Sar(node);
3179 case ShiftOp::Kind::kShiftRightLogical:
3180 return VisitWord32Shr(node);
3181 case ShiftOp::Kind::kShiftLeft:
3182 return VisitWord32Shl(node);
3183 case ShiftOp::Kind::kRotateRight:
3184 return VisitWord32Ror(node);
3185 case ShiftOp::Kind::kRotateLeft:
3186 return VisitWord32Rol(node);
3187 }
3188 } else {
3190 MarkAsWord64(node);
3191 switch (shift.kind) {
3192 case ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros:
3193 case ShiftOp::Kind::kShiftRightArithmetic:
3194 return VisitWord64Sar(node);
3195 case ShiftOp::Kind::kShiftRightLogical:
3196 return VisitWord64Shr(node);
3197 case ShiftOp::Kind::kShiftLeft:
3198 return VisitWord64Shl(node);
3199 case ShiftOp::Kind::kRotateRight:
3200 return VisitWord64Ror(node);
3201 case ShiftOp::Kind::kRotateLeft:
3202 return VisitWord64Rol(node);
3203 }
3204 }
3205 UNREACHABLE();
3206 }
3207 case Opcode::kCall:
3208 // Process the call at `DidntThrow`, when we know if exceptions are caught
3209 // or not.
3210 break;
3211 case Opcode::kDidntThrow:
3212 if (current_block_->begin() == node) {
3213 DCHECK_EQ(current_block_->PredecessorCount(), 1);
3214 DCHECK(current_block_->LastPredecessor()
3215 ->LastOperation(*this->turboshaft_graph())
3216 .Is<CheckExceptionOp>());
3217 // In this case, the Call has been generated at the `CheckException`
3218 // already.
3219 } else {
3221 }
3222 EmitIdentity(node);
3223 break;
3224 case Opcode::kFrameConstant: {
3225 const auto& constant = op.Cast<FrameConstantOp>();
3226 using Kind = FrameConstantOp::Kind;
3227 OperandGenerator g(this);
3228 switch (constant.kind) {
3229 case Kind::kStackCheckOffset:
3230 Emit(kArchStackCheckOffset, g.DefineAsRegister(node));
3231 break;
3232 case Kind::kFramePointer:
3233 Emit(kArchFramePointer, g.DefineAsRegister(node));
3234 break;
3235 case Kind::kParentFramePointer:
3236 Emit(kArchParentFramePointer, g.DefineAsRegister(node));
3237 break;
3238 }
3239 break;
3240 }
3241 case Opcode::kStackPointerGreaterThan:
3242 return VisitStackPointerGreaterThan(node);
3243 case Opcode::kComparison: {
3244 const ComparisonOp& comparison = op.Cast<ComparisonOp>();
3245 using Kind = ComparisonOp::Kind;
3246 switch (multi(comparison.kind, comparison.rep)) {
3247 case multi(Kind::kEqual, Rep::Word32()):
3248 return VisitWord32Equal(node);
3249 case multi(Kind::kEqual, Rep::Word64()):
3250 return VisitWord64Equal(node);
3251 case multi(Kind::kEqual, Rep::Float32()):
3252 return VisitFloat32Equal(node);
3253 case multi(Kind::kEqual, Rep::Float64()):
3254 return VisitFloat64Equal(node);
3255 case multi(Kind::kEqual, Rep::Tagged()):
3256 if constexpr (Is64() && !COMPRESS_POINTERS_BOOL) {
3257 return VisitWord64Equal(node);
3258 }
3259 return VisitWord32Equal(node);
3260 case multi(Kind::kSignedLessThan, Rep::Word32()):
3261 return VisitInt32LessThan(node);
3262 case multi(Kind::kSignedLessThan, Rep::Word64()):
3263 return VisitInt64LessThan(node);
3264 case multi(Kind::kSignedLessThan, Rep::Float32()):
3265 return VisitFloat32LessThan(node);
3266 case multi(Kind::kSignedLessThan, Rep::Float64()):
3267 return VisitFloat64LessThan(node);
3268 case multi(Kind::kSignedLessThanOrEqual, Rep::Word32()):
3269 return VisitInt32LessThanOrEqual(node);
3270 case multi(Kind::kSignedLessThanOrEqual, Rep::Word64()):
3271 return VisitInt64LessThanOrEqual(node);
3272 case multi(Kind::kSignedLessThanOrEqual, Rep::Float32()):
3273 return VisitFloat32LessThanOrEqual(node);
3274 case multi(Kind::kSignedLessThanOrEqual, Rep::Float64()):
3275 return VisitFloat64LessThanOrEqual(node);
3276 case multi(Kind::kUnsignedLessThan, Rep::Word32()):
3277 return VisitUint32LessThan(node);
3278 case multi(Kind::kUnsignedLessThan, Rep::Word64()):
3279 return VisitUint64LessThan(node);
3280 case multi(Kind::kUnsignedLessThanOrEqual, Rep::Word32()):
3281 return VisitUint32LessThanOrEqual(node);
3282 case multi(Kind::kUnsignedLessThanOrEqual, Rep::Word64()):
3283 return VisitUint64LessThanOrEqual(node);
3284 default:
3285 UNREACHABLE();
3286 }
3287 UNREACHABLE();
3288 }
3289 case Opcode::kLoad: {
3290 const LoadOp& load = op.Cast<LoadOp>();
3291 MachineType loaded_type = load.machine_type();
3292 MarkAsRepresentation(loaded_type.representation(), node);
3293 if (load.kind.maybe_unaligned) {
3294 DCHECK(!load.kind.with_trap_handler);
3295 if (loaded_type.representation() == MachineRepresentation::kWord8 ||
3297 .IsUnalignedLoadSupported(loaded_type.representation())) {
3298 return VisitLoad(node);
3299 } else {
3300 return VisitUnalignedLoad(node);
3301 }
3302 } else if (load.kind.is_atomic) {
3303 if (load.result_rep == Rep::Word32()) {
3304 return VisitWord32AtomicLoad(node);
3305 } else {
3306 DCHECK_EQ(load.result_rep, Rep::Word64());
3307 return VisitWord64AtomicLoad(node);
3308 }
3309 } else if (load.kind.with_trap_handler) {
3310 DCHECK(!load.kind.maybe_unaligned);
3311 return VisitProtectedLoad(node);
3312 } else {
3313 return VisitLoad(node);
3314 }
3315 UNREACHABLE();
3316 }
3317 case Opcode::kStore: {
3318 const StoreOp& store = op.Cast<StoreOp>();
3321 if (store.kind.maybe_unaligned) {
3322 DCHECK(!store.kind.with_trap_handler);
3323 DCHECK_EQ(store.write_barrier, WriteBarrierKind::kNoWriteBarrier);
3324 if (rep == MachineRepresentation::kWord8 ||
3326 .IsUnalignedStoreSupported(rep)) {
3327 return VisitStore(node);
3328 } else {
3329 return VisitUnalignedStore(node);
3330 }
3331 } else if (store.kind.is_atomic) {
3332 if (store.stored_rep.SizeInBytes() == 8) {
3333 return VisitWord64AtomicStore(node);
3334 } else {
3335 DCHECK_LE(store.stored_rep.SizeInBytes(), 4);
3336 return VisitWord32AtomicStore(node);
3337 }
3338 } else if (store.kind.with_trap_handler) {
3339 DCHECK(!store.kind.maybe_unaligned);
3340 return VisitProtectedStore(node);
3341 } else {
3342 return VisitStore(node);
3343 }
3344 UNREACHABLE();
3345 }
3346 case Opcode::kTaggedBitcast: {
3348 switch (multi(cast.from, cast.to)) {
3349 case multi(Rep::Tagged(), Rep::Word32()):
3350 MarkAsWord32(node);
3351 if constexpr (Is64()) {
3352 DCHECK_EQ(cast.kind, TaggedBitcastOp::Kind::kSmi);
3354 return VisitBitcastSmiToWord(node);
3355 } else {
3356 return VisitBitcastTaggedToWord(node);
3357 }
3358 case multi(Rep::Tagged(), Rep::Word64()):
3359 MarkAsWord64(node);
3360 return VisitBitcastTaggedToWord(node);
3361 case multi(Rep::Word32(), Rep::Tagged()):
3362 case multi(Rep::Word64(), Rep::Tagged()):
3363 if (cast.kind == TaggedBitcastOp::Kind::kSmi) {
3365 return EmitIdentity(node);
3366 } else {
3367 MarkAsTagged(node);
3368 return VisitBitcastWordToTagged(node);
3369 }
3370 case multi(Rep::Compressed(), Rep::Word32()):
3371 MarkAsWord32(node);
3372 if (cast.kind == TaggedBitcastOp::Kind::kSmi) {
3373 return VisitBitcastSmiToWord(node);
3374 } else {
3375 return VisitBitcastTaggedToWord(node);
3376 }
3377 default:
3378 UNIMPLEMENTED();
3379 }
3380 }
3381 case Opcode::kPhi:
3382 MarkAsRepresentation(op.Cast<PhiOp>().rep, node);
3383 return VisitPhi(node);
3384 case Opcode::kProjection:
3385 return VisitProjection(node);
3386 case Opcode::kDeoptimizeIf:
3387 return VisitDeoptimizeIf(node);
3388#if V8_ENABLE_WEBASSEMBLY
3389 case Opcode::kTrapIf:
3390 return VisitTrapIf(node);
3391#endif // V8_ENABLE_WEBASSEMBLY
3392 case Opcode::kCatchBlockBegin:
3393 MarkAsTagged(node);
3394 return VisitIfException(node);
3395 case Opcode::kRetain:
3396 return VisitRetain(node);
3397 case Opcode::kOsrValue:
3398 MarkAsTagged(node);
3399 return VisitOsrValue(node);
3400 case Opcode::kStackSlot:
3401 return VisitStackSlot(node);
3402 case Opcode::kFrameState:
3403 // FrameState is covered as part of calls.
3404 UNREACHABLE();
3405 case Opcode::kLoadRootRegister:
3406 return VisitLoadRootRegister(node);
3407 case Opcode::kAssumeMap:
3408 // AssumeMap is used as a hint for optimization phases but does not
3409 // produce any code.
3410 return;
3411 case Opcode::kDebugBreak:
3412 return VisitDebugBreak(node);
3413 case Opcode::kAbortCSADcheck:
3414 return VisitAbortCSADcheck(node);
3415 case Opcode::kSelect: {
3416 const SelectOp& select = op.Cast<SelectOp>();
3417 // If there is a Select, then it should only be one that is supported by
3418 // the machine, and it should be meant to be implementation with cmove.
3419 DCHECK_EQ(select.implem, SelectOp::Implementation::kCMove);
3420 MarkAsRepresentation(select.rep, node);
3421 return VisitSelect(node);
3422 }
3423 case Opcode::kWord32PairBinop: {
3424 const Word32PairBinopOp& binop = op.Cast<Word32PairBinopOp>();
3425 MarkAsWord32(node);
3427 switch (binop.kind) {
3428 case Word32PairBinopOp::Kind::kAdd:
3429 return VisitInt32PairAdd(node);
3430 case Word32PairBinopOp::Kind::kSub:
3431 return VisitInt32PairSub(node);
3432 case Word32PairBinopOp::Kind::kMul:
3433 return VisitInt32PairMul(node);
3434 case Word32PairBinopOp::Kind::kShiftLeft:
3435 return VisitWord32PairShl(node);
3436 case Word32PairBinopOp::Kind::kShiftRightLogical:
3437 return VisitWord32PairShr(node);
3438 case Word32PairBinopOp::Kind::kShiftRightArithmetic:
3439 return VisitWord32PairSar(node);
3440 }
3441 UNREACHABLE();
3442 }
3443 case Opcode::kAtomicWord32Pair: {
3444 const AtomicWord32PairOp& atomic_op = op.Cast<AtomicWord32PairOp>();
3445 if (atomic_op.kind != AtomicWord32PairOp::Kind::kStore) {
3446 MarkAsWord32(node);
3448 }
3449 switch (atomic_op.kind) {
3450 case AtomicWord32PairOp::Kind::kAdd:
3451 return VisitWord32AtomicPairAdd(node);
3452 case AtomicWord32PairOp::Kind::kAnd:
3453 return VisitWord32AtomicPairAnd(node);
3454 case AtomicWord32PairOp::Kind::kCompareExchange:
3455 return VisitWord32AtomicPairCompareExchange(node);
3456 case AtomicWord32PairOp::Kind::kExchange:
3457 return VisitWord32AtomicPairExchange(node);
3458 case AtomicWord32PairOp::Kind::kLoad:
3459 return VisitWord32AtomicPairLoad(node);
3460 case AtomicWord32PairOp::Kind::kOr:
3461 return VisitWord32AtomicPairOr(node);
3462 case AtomicWord32PairOp::Kind::kSub:
3463 return VisitWord32AtomicPairSub(node);
3464 case AtomicWord32PairOp::Kind::kXor:
3465 return VisitWord32AtomicPairXor(node);
3466 case AtomicWord32PairOp::Kind::kStore:
3467 return VisitWord32AtomicPairStore(node);
3468 }
3469 }
3470 case Opcode::kBitcastWord32PairToFloat64:
3472 case Opcode::kAtomicRMW: {
3473 const AtomicRMWOp& atomic_op = op.Cast<AtomicRMWOp>();
3475 node);
3476 if (atomic_op.in_out_rep == Rep::Word32()) {
3477 switch (atomic_op.bin_op) {
3478 case AtomicRMWOp::BinOp::kAdd:
3479 return VisitWord32AtomicAdd(node);
3480 case AtomicRMWOp::BinOp::kSub:
3481 return VisitWord32AtomicSub(node);
3482 case AtomicRMWOp::BinOp::kAnd:
3483 return VisitWord32AtomicAnd(node);
3484 case AtomicRMWOp::BinOp::kOr:
3485 return VisitWord32AtomicOr(node);
3486 case AtomicRMWOp::BinOp::kXor:
3487 return VisitWord32AtomicXor(node);
3488 case AtomicRMWOp::BinOp::kExchange:
3489 return VisitWord32AtomicExchange(node);
3490 case AtomicRMWOp::BinOp::kCompareExchange:
3491 return VisitWord32AtomicCompareExchange(node);
3492 }
3493 } else {
3494 DCHECK_EQ(atomic_op.in_out_rep, Rep::Word64());
3495 switch (atomic_op.bin_op) {
3496 case AtomicRMWOp::BinOp::kAdd:
3497 return VisitWord64AtomicAdd(node);
3498 case AtomicRMWOp::BinOp::kSub:
3499 return VisitWord64AtomicSub(node);
3500 case AtomicRMWOp::BinOp::kAnd:
3501 return VisitWord64AtomicAnd(node);
3502 case AtomicRMWOp::BinOp::kOr:
3503 return VisitWord64AtomicOr(node);
3504 case AtomicRMWOp::BinOp::kXor:
3505 return VisitWord64AtomicXor(node);
3506 case AtomicRMWOp::BinOp::kExchange:
3507 return VisitWord64AtomicExchange(node);
3508 case AtomicRMWOp::BinOp::kCompareExchange:
3509 return VisitWord64AtomicCompareExchange(node);
3510 }
3511 }
3512 UNREACHABLE();
3513 }
3514 case Opcode::kMemoryBarrier:
3515 return VisitMemoryBarrier(node);
3516
3517 case Opcode::kComment:
3518 return VisitComment(node);
3519
3520#ifdef V8_ENABLE_WEBASSEMBLY
3521 case Opcode::kSimd128Constant: {
3522 const Simd128ConstantOp& constant = op.Cast<Simd128ConstantOp>();
3523 MarkAsSimd128(node);
3524 if (constant.IsZero()) return VisitS128Zero(node);
3525 return VisitS128Const(node);
3526 }
3527 case Opcode::kSimd128Unary: {
3528 const Simd128UnaryOp& unary = op.Cast<Simd128UnaryOp>();
3529 MarkAsSimd128(node);
3530 switch (unary.kind) {
3531#define VISIT_SIMD_UNARY(kind) \
3532 case Simd128UnaryOp::Kind::k##kind: \
3533 return Visit##kind(node);
3534 FOREACH_SIMD_128_UNARY_OPCODE(VISIT_SIMD_UNARY)
3535#undef VISIT_SIMD_UNARY
3536 }
3537 }
3538 case Opcode::kSimd128Reduce: {
3539 const Simd128ReduceOp& reduce = op.Cast<Simd128ReduceOp>();
3540 MarkAsSimd128(node);
3541 switch (reduce.kind) {
3542 case Simd128ReduceOp::Kind::kI8x16AddReduce:
3543 return VisitI8x16AddReduce(node);
3544 case Simd128ReduceOp::Kind::kI16x8AddReduce:
3545 return VisitI16x8AddReduce(node);
3546 case Simd128ReduceOp::Kind::kI32x4AddReduce:
3547 return VisitI32x4AddReduce(node);
3548 case Simd128ReduceOp::Kind::kI64x2AddReduce:
3549 return VisitI64x2AddReduce(node);
3550 case Simd128ReduceOp::Kind::kF32x4AddReduce:
3551 return VisitF32x4AddReduce(node);
3552 case Simd128ReduceOp::Kind::kF64x2AddReduce:
3553 return VisitF64x2AddReduce(node);
3554 }
3555 }
3556 case Opcode::kSimd128Binop: {
3557 const Simd128BinopOp& binop = op.Cast<Simd128BinopOp>();
3558 MarkAsSimd128(node);
3559 switch (binop.kind) {
3560#define VISIT_SIMD_BINOP(kind) \
3561 case Simd128BinopOp::Kind::k##kind: \
3562 return Visit##kind(node);
3563 FOREACH_SIMD_128_BINARY_OPCODE(VISIT_SIMD_BINOP)
3564#undef VISIT_SIMD_BINOP
3565 }
3566 }
3567 case Opcode::kSimd128Shift: {
3568 const Simd128ShiftOp& shift = op.Cast<Simd128ShiftOp>();
3569 MarkAsSimd128(node);
3570 switch (shift.kind) {
3571#define VISIT_SIMD_SHIFT(kind) \
3572 case Simd128ShiftOp::Kind::k##kind: \
3573 return Visit##kind(node);
3574 FOREACH_SIMD_128_SHIFT_OPCODE(VISIT_SIMD_SHIFT)
3575#undef VISIT_SIMD_SHIFT
3576 }
3577 }
3578 case Opcode::kSimd128Test: {
3579 const Simd128TestOp& test = op.Cast<Simd128TestOp>();
3580 MarkAsWord32(node);
3581 switch (test.kind) {
3582#define VISIT_SIMD_TEST(kind) \
3583 case Simd128TestOp::Kind::k##kind: \
3584 return Visit##kind(node);
3585 FOREACH_SIMD_128_TEST_OPCODE(VISIT_SIMD_TEST)
3586#undef VISIT_SIMD_TEST
3587 }
3588 }
3589 case Opcode::kSimd128Splat: {
3590 const Simd128SplatOp& splat = op.Cast<Simd128SplatOp>();
3591 MarkAsSimd128(node);
3592 switch (splat.kind) {
3593#define VISIT_SIMD_SPLAT(kind) \
3594 case Simd128SplatOp::Kind::k##kind: \
3595 return Visit##kind##Splat(node);
3596 FOREACH_SIMD_128_SPLAT_OPCODE(VISIT_SIMD_SPLAT)
3597#undef VISIT_SIMD_SPLAT
3598 }
3599 }
3600 case Opcode::kSimd128Shuffle: {
3601 MarkAsSimd128(node);
3602 const Simd128ShuffleOp& shuffle = op.Cast<Simd128ShuffleOp>();
3603 switch (shuffle.kind) {
3604 case Simd128ShuffleOp::Kind::kI8x2:
3605 return VisitI8x2Shuffle(node);
3606 case Simd128ShuffleOp::Kind::kI8x4:
3607 return VisitI8x4Shuffle(node);
3608 case Simd128ShuffleOp::Kind::kI8x8:
3609 return VisitI8x8Shuffle(node);
3610 case Simd128ShuffleOp::Kind::kI8x16:
3611 return VisitI8x16Shuffle(node);
3612 }
3613 }
3614 case Opcode::kSimd128ReplaceLane: {
3615 const Simd128ReplaceLaneOp& replace = op.Cast<Simd128ReplaceLaneOp>();
3616 MarkAsSimd128(node);
3617 switch (replace.kind) {
3618 case Simd128ReplaceLaneOp::Kind::kI8x16:
3619 return VisitI8x16ReplaceLane(node);
3620 case Simd128ReplaceLaneOp::Kind::kI16x8:
3621 return VisitI16x8ReplaceLane(node);
3622 case Simd128ReplaceLaneOp::Kind::kI32x4:
3623 return VisitI32x4ReplaceLane(node);
3624 case Simd128ReplaceLaneOp::Kind::kI64x2:
3625 return VisitI64x2ReplaceLane(node);
3626 case Simd128ReplaceLaneOp::Kind::kF16x8:
3627 return VisitF16x8ReplaceLane(node);
3628 case Simd128ReplaceLaneOp::Kind::kF32x4:
3629 return VisitF32x4ReplaceLane(node);
3630 case Simd128ReplaceLaneOp::Kind::kF64x2:
3631 return VisitF64x2ReplaceLane(node);
3632 }
3633 }
3634 case Opcode::kSimd128ExtractLane: {
3635 const Simd128ExtractLaneOp& extract = op.Cast<Simd128ExtractLaneOp>();
3636 switch (extract.kind) {
3637 case Simd128ExtractLaneOp::Kind::kI8x16S:
3638 MarkAsWord32(node);
3639 return VisitI8x16ExtractLaneS(node);
3640 case Simd128ExtractLaneOp::Kind::kI8x16U:
3641 MarkAsWord32(node);
3642 return VisitI8x16ExtractLaneU(node);
3643 case Simd128ExtractLaneOp::Kind::kI16x8S:
3644 MarkAsWord32(node);
3645 return VisitI16x8ExtractLaneS(node);
3646 case Simd128ExtractLaneOp::Kind::kI16x8U:
3647 MarkAsWord32(node);
3648 return VisitI16x8ExtractLaneU(node);
3649 case Simd128ExtractLaneOp::Kind::kI32x4:
3650 MarkAsWord32(node);
3651 return VisitI32x4ExtractLane(node);
3652 case Simd128ExtractLaneOp::Kind::kI64x2:
3653 MarkAsWord64(node);
3654 return VisitI64x2ExtractLane(node);
3655 case Simd128ExtractLaneOp::Kind::kF16x8:
3656 MarkAsFloat32(node);
3657 return VisitF16x8ExtractLane(node);
3658 case Simd128ExtractLaneOp::Kind::kF32x4:
3659 MarkAsFloat32(node);
3660 return VisitF32x4ExtractLane(node);
3661 case Simd128ExtractLaneOp::Kind::kF64x2:
3662 MarkAsFloat64(node);
3663 return VisitF64x2ExtractLane(node);
3664 }
3665 }
3666 case Opcode::kSimd128LoadTransform:
3667 MarkAsSimd128(node);
3668 return VisitLoadTransform(node);
3669 case Opcode::kSimd128LaneMemory: {
3670 const Simd128LaneMemoryOp& memory = op.Cast<Simd128LaneMemoryOp>();
3671 MarkAsSimd128(node);
3672 if (memory.mode == Simd128LaneMemoryOp::Mode::kLoad) {
3673 return VisitLoadLane(node);
3674 } else {
3675 DCHECK_EQ(memory.mode, Simd128LaneMemoryOp::Mode::kStore);
3676 return VisitStoreLane(node);
3677 }
3678 }
3679 case Opcode::kSimd128Ternary: {
3680 const Simd128TernaryOp& ternary = op.Cast<Simd128TernaryOp>();
3681 MarkAsSimd128(node);
3682 switch (ternary.kind) {
3683#define VISIT_SIMD_TERNARY(kind) \
3684 case Simd128TernaryOp::Kind::k##kind: \
3685 return Visit##kind(node);
3686 FOREACH_SIMD_128_TERNARY_OPCODE(VISIT_SIMD_TERNARY)
3687#undef VISIT_SIMD_TERNARY
3688 }
3689 }
3690
3691#if V8_ENABLE_WASM_DEINTERLEAVED_MEM_OPS
3692 case Opcode::kSimd128LoadPairDeinterleave: {
3693 OptionalOpIndex projection0 = FindProjection(node, 0);
3694 DCHECK(projection0.valid());
3695 MarkAsSimd128(projection0.value());
3696 OptionalOpIndex projection1 = FindProjection(node, 1);
3697 DCHECK(projection1.valid());
3698 MarkAsSimd128(projection1.value());
3699 return VisitSimd128LoadPairDeinterleave(node);
3700 }
3701#endif // V8_ENABLE_WASM_DEINTERLEAVED_MEM_OPS
3702
3703 // SIMD256
3704#if V8_ENABLE_WASM_SIMD256_REVEC
3705 case Opcode::kSimd256Constant: {
3706 const Simd256ConstantOp& constant = op.Cast<Simd256ConstantOp>();
3707 MarkAsSimd256(node);
3708 if (constant.IsZero()) return VisitS256Zero(node);
3709 return VisitS256Const(node);
3710 }
3711 case Opcode::kSimd256Extract128Lane: {
3712 MarkAsSimd128(node);
3713 return VisitExtractF128(node);
3714 }
3715 case Opcode::kSimd256LoadTransform: {
3716 MarkAsSimd256(node);
3717 return VisitSimd256LoadTransform(node);
3718 }
3719 case Opcode::kSimd256Unary: {
3720 const Simd256UnaryOp& unary = op.Cast<Simd256UnaryOp>();
3721 MarkAsSimd256(node);
3722 switch (unary.kind) {
3723#define VISIT_SIMD_256_UNARY(kind) \
3724 case Simd256UnaryOp::Kind::k##kind: \
3725 return Visit##kind(node);
3726 FOREACH_SIMD_256_UNARY_OPCODE(VISIT_SIMD_256_UNARY)
3727#undef VISIT_SIMD_256_UNARY
3728 }
3729 }
3730 case Opcode::kSimd256Binop: {
3731 const Simd256BinopOp& binop = op.Cast<Simd256BinopOp>();
3732 MarkAsSimd256(node);
3733 switch (binop.kind) {
3734#define VISIT_SIMD_BINOP(kind) \
3735 case Simd256BinopOp::Kind::k##kind: \
3736 return Visit##kind(node);
3737 FOREACH_SIMD_256_BINARY_OPCODE(VISIT_SIMD_BINOP)
3738#undef VISIT_SIMD_BINOP
3739 }
3740 }
3741 case Opcode::kSimd256Shift: {
3742 const Simd256ShiftOp& shift = op.Cast<Simd256ShiftOp>();
3743 MarkAsSimd256(node);
3744 switch (shift.kind) {
3745#define VISIT_SIMD_SHIFT(kind) \
3746 case Simd256ShiftOp::Kind::k##kind: \
3747 return Visit##kind(node);
3748 FOREACH_SIMD_256_SHIFT_OPCODE(VISIT_SIMD_SHIFT)
3749#undef VISIT_SIMD_SHIFT
3750 }
3751 }
3752 case Opcode::kSimd256Ternary: {
3753 const Simd256TernaryOp& ternary = op.Cast<Simd256TernaryOp>();
3754 MarkAsSimd256(node);
3755 switch (ternary.kind) {
3756#define VISIT_SIMD_256_TERNARY(kind) \
3757 case Simd256TernaryOp::Kind::k##kind: \
3758 return Visit##kind(node);
3759 FOREACH_SIMD_256_TERNARY_OPCODE(VISIT_SIMD_256_TERNARY)
3760#undef VISIT_SIMD_256_UNARY
3761 }
3762 }
3763 case Opcode::kSimd256Splat: {
3764 const Simd256SplatOp& splat = op.Cast<Simd256SplatOp>();
3765 MarkAsSimd256(node);
3766 switch (splat.kind) {
3767#define VISIT_SIMD_SPLAT(kind) \
3768 case Simd256SplatOp::Kind::k##kind: \
3769 return Visit##kind##Splat(node);
3770 FOREACH_SIMD_256_SPLAT_OPCODE(VISIT_SIMD_SPLAT)
3771#undef VISIT_SIMD_SPLAT
3772 }
3773 }
3774#ifdef V8_TARGET_ARCH_X64
3775 case Opcode::kSimd256Shufd: {
3776 MarkAsSimd256(node);
3777 return VisitSimd256Shufd(node);
3778 }
3779 case Opcode::kSimd256Shufps: {
3780 MarkAsSimd256(node);
3781 return VisitSimd256Shufps(node);
3782 }
3783 case Opcode::kSimd256Unpack: {
3784 MarkAsSimd256(node);
3785 return VisitSimd256Unpack(node);
3786 }
3787 case Opcode::kSimdPack128To256: {
3788 MarkAsSimd256(node);
3789 return VisitSimdPack128To256(node);
3790 }
3791#endif // V8_TARGET_ARCH_X64
3792#endif // V8_ENABLE_WASM_SIMD256_REVEC
3793
3794 case Opcode::kLoadStackPointer:
3795 return VisitLoadStackPointer(node);
3796
3797 case Opcode::kSetStackPointer:
3798 return VisitSetStackPointer(node);
3799
3800#endif // V8_ENABLE_WEBASSEMBLY
3801#define UNREACHABLE_CASE(op) case Opcode::k##op:
3806 UNREACHABLE_CASE(PendingLoopPhi)
3808 UNREACHABLE_CASE(Dead)
3809 UNREACHABLE();
3810#undef UNREACHABLE_CASE
3811 }
3812}
3813
3815 // TODO(jarin) Improve the heuristic here.
3816 if (node->opcode() == IrOpcode::kFloat64Add ||
3817 node->opcode() == IrOpcode::kFloat64Sub ||
3818 node->opcode() == IrOpcode::kFloat64Mul) {
3819 return false;
3820 }
3821 return true;
3822}
3823
3824#if V8_TARGET_ARCH_64_BIT
3825bool InstructionSelectorT::ZeroExtendsWord32ToWord64(OpIndex node,
3826 int recursion_depth) {
3827 // To compute whether a Node sets its upper 32 bits to zero, there are three
3828 // cases.
3829 // 1. Phi node, with a computed result already available in phi_states_:
3830 // Read the value from phi_states_.
3831 // 2. Phi node, with no result available in phi_states_ yet:
3832 // Recursively check its inputs, and store the result in phi_states_.
3833 // 3. Anything else:
3834 // Call the architecture-specific ZeroExtendsWord32ToWord64NoPhis.
3835
3836 // Limit recursion depth to avoid the possibility of stack overflow on very
3837 // large functions.
3838 const int kMaxRecursionDepth = 100;
3839
3840 if (const PhiOp* phi = TryCast<PhiOp>(node)) {
3841 if (recursion_depth == 0) {
3842 if (phi_states_.empty()) {
3843 // This vector is lazily allocated because the majority of compilations
3844 // never use it.
3845 phi_states_ = ZoneVector<Upper32BitsState>(
3846 node_count_, Upper32BitsState::kNotYetChecked, zone());
3847 }
3848 }
3849
3850 Upper32BitsState current = phi_states_[node.id()];
3851 if (current != Upper32BitsState::kNotYetChecked) {
3852 return current == Upper32BitsState::kZero;
3853 }
3854
3855 // If further recursion is prevented, we can't make any assumptions about
3856 // the output of this phi node.
3857 if (recursion_depth >= kMaxRecursionDepth) {
3858 return false;
3859 }
3860
3861 // Optimistically mark the current node as zero-extended so that we skip it
3862 // if we recursively visit it again due to a cycle. If this optimistic guess
3863 // is wrong, it will be corrected in MarkNodeAsNotZeroExtended.
3864 phi_states_[node.id()] = Upper32BitsState::kZero;
3865
3866 for (int i = 0; i < phi->input_count; ++i) {
3867 OpIndex input = phi->input(i);
3868 if (!ZeroExtendsWord32ToWord64(input, recursion_depth + 1)) {
3869 MarkNodeAsNotZeroExtended(node);
3870 return false;
3871 }
3872 }
3873
3874 return true;
3875 }
3876 return ZeroExtendsWord32ToWord64NoPhis(node);
3877}
3878
3879void InstructionSelectorT::MarkNodeAsNotZeroExtended(OpIndex node) {
3880 if (phi_states_[node.id()] == Upper32BitsState::kMayBeNonZero) return;
3881 phi_states_[node.id()] = Upper32BitsState::kMayBeNonZero;
3882 ZoneVector<OpIndex> worklist(zone_);
3883 worklist.push_back(node);
3884 while (!worklist.empty()) {
3885 node = worklist.back();
3886 worklist.pop_back();
3887 // We may have previously marked some uses of this node as zero-extended,
3888 // but that optimistic guess was proven incorrect.
3889 for (OpIndex use : turboshaft_uses(node)) {
3890 if (phi_states_[use.id()] == Upper32BitsState::kZero) {
3891 phi_states_[use.id()] = Upper32BitsState::kMayBeNonZero;
3892 worklist.push_back(use);
3893 }
3894 }
3895 }
3896}
3897#endif // V8_TARGET_ARCH_64_BIT
3898
3899namespace {
3900
3901FrameStateDescriptor* GetFrameStateDescriptorInternal(
3902 Zone* zone, Graph* graph, const FrameStateOp& state) {
3903 const FrameStateInfo& state_info = state.data->frame_state_info;
3904 uint16_t parameters = state_info.parameter_count();
3905 uint16_t max_arguments = state_info.max_arguments();
3906 int locals = state_info.local_count();
3907 int stack = state_info.stack_count();
3908
3909 FrameStateDescriptor* outer_state = nullptr;
3910 if (state.inlined) {
3911 outer_state = GetFrameStateDescriptorInternal(
3912 zone, graph,
3913 graph->Get(state.parent_frame_state()).template Cast<FrameStateOp>());
3914 }
3915
3916#if V8_ENABLE_WEBASSEMBLY
3917 if (state_info.type() == FrameStateType::kJSToWasmBuiltinContinuation) {
3918 auto function_info = static_cast<const JSToWasmFrameStateFunctionInfo*>(
3919 state_info.function_info());
3920 return zone->New<JSToWasmFrameStateDescriptor>(
3921 zone, state_info.type(), state_info.bailout_id(),
3922 state_info.state_combine(), parameters, locals, stack,
3923 state_info.shared_info(), outer_state, function_info->signature());
3924 }
3925#endif // V8_ENABLE_WEBASSEMBLY
3926
3927 return zone->New<FrameStateDescriptor>(
3928 zone, state_info.type(), state_info.bailout_id(),
3929 state_info.state_combine(), parameters, max_arguments, locals, stack,
3930 state_info.shared_info(), state_info.bytecode_array(), outer_state,
3931 state_info.function_info()->wasm_liftoff_frame_size(),
3932 state_info.function_info()->wasm_function_index());
3933}
3934
3935} // namespace
3936
3938 OpIndex node) {
3939 const FrameStateOp& state =
3940 this->turboshaft_graph()->Get(node).template Cast<FrameStateOp>();
3941 auto* desc = GetFrameStateDescriptorInternal(instruction_zone(),
3942 this->turboshaft_graph(), state);
3945 desc->total_conservative_frame_size_in_bytes() +
3946 (desc->max_arguments() * kSystemPointerSize));
3947 return desc;
3948}
3949
3950#if V8_ENABLE_WEBASSEMBLY
3951// static
3952void InstructionSelectorT::SwapShuffleInputs(
3953 TurboshaftAdapter::SimdShuffleView& view) {
3954 view.SwapInputs();
3955}
3956#endif // V8_ENABLE_WEBASSEMBLY
3957
3959 Zone* zone, size_t node_count, Linkage* linkage,
3960 InstructionSequence* sequence, Graph* graph, Frame* frame,
3961 EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
3962 JSHeapBroker* broker, size_t* max_unoptimized_frame_height,
3963 size_t* max_pushed_argument_count, SourcePositionMode source_position_mode,
3964 Features features, EnableScheduling enable_scheduling,
3965 EnableRootsRelativeAddressing enable_roots_relative_addressing,
3966 EnableTraceTurboJson trace_turbo) {
3967 return InstructionSelector(
3968 nullptr,
3970 zone, node_count, linkage, sequence, graph,
3971 &graph->source_positions(), frame, enable_switch_jump_table,
3972 tick_counter, broker, max_unoptimized_frame_height,
3973 max_pushed_argument_count, source_position_mode, features,
3974 enable_scheduling, enable_roots_relative_addressing, trace_turbo));
3975}
3976
3978 InstructionSelectorT* turboshaft_impl)
3979 : turboshaft_impl_(turboshaft_impl) {}
3980
3982
3983#define DISPATCH_TO_IMPL(...) return turboshaft_impl_->__VA_ARGS__;
3984
3988
3992
3997
4002
4003#undef DISPATCH_TO_IMPL
4004#undef VISIT_UNSUPPORTED_OP
4005
4006} // namespace compiler
4007} // namespace internal
4008} // namespace v8
Schedule * schedule
friend Zone
Definition asm-types.cc:195
#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK
constexpr int kReturnAddressStackSlotCount
int16_t parameter_count
Definition builtins.cc:67
Builtins::Kind kind
Definition builtins.cc:40
static constexpr U kMax
Definition bit-field.h:44
static constexpr U encode(T value)
Definition bit-field.h:55
static constexpr int kSize
Definition bit-field.h:40
bool Contains(int i) const
Definition bit-vector.h:180
static LinkageLocation ForRegister(int32_t reg, MachineType type=MachineType::None())
static LinkageLocation ForSavedCallerReturnAddress()
NO_INLINE_FOR_ARM64_MSVC bool IsRegister() const
static LinkageLocation ConvertToTailCallerLocation(LinkageLocation caller_location, int stack_param_delta)
constexpr MachineRepresentation representation() const
static constexpr MachineType TaggedPointer()
static bool IsAddressableThroughRootRegister(Isolate *isolate, const ExternalReference &reference)
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
V8_INLINE constexpr StorageType ptr() const
reverse_iterator rbegin() V8_NOEXCEPT
void reserve(size_t new_cap)
void resize(size_t new_size)
void assign(size_t new_size, const T &value)
reverse_iterator rend() V8_NOEXCEPT
void push_back(const T &value)
static constexpr int kFlagsBitsEncodedInInstructionCode
Definition linkage.h:63
static FlagsContinuationT ForDeoptimize(FlagsCondition condition, DeoptimizeReason reason, uint32_t node_id, FeedbackSource const &feedback, turboshaft::OpIndex frame_state)
static FlagsContinuationT ForSelect(FlagsCondition condition, turboshaft::OpIndex result, turboshaft::OpIndex true_value, turboshaft::OpIndex false_value)
InstructionCode Encode(InstructionCode opcode)
static FlagsContinuationT ForSet(FlagsCondition condition, turboshaft::OpIndex result)
static FlagsContinuationT ForBranch(FlagsCondition condition, turboshaft::Block *true_block, turboshaft::Block *false_block)
static FlagsContinuationT ForTrap(FlagsCondition condition, TrapId trap_id)
FrameStateDescriptor * outer_state() const
OutputFrameStateCombine state_combine() const
void EnsureReturnSlots(int count)
Definition frame.h:173
PhiInstruction * PhiAt(size_t i) const
const PhiInstructions & phis() const
V8_EXPORT_PRIVATE void StartBlock(RpoNumber rpo)
V8_EXPORT_PRIVATE void AddTerminator(Instruction *instr)
V8_EXPORT_PRIVATE void AddInstruction(Instruction *instr)
V8_EXPORT_PRIVATE void EndBlock(RpoNumber rpo)
void VisitBitcastWord32PairToFloat64(turboshaft::OpIndex node)
bool IsUsed(turboshaft::OpIndex node) const
void SetRename(turboshaft::OpIndex node, turboshaft::OpIndex rename)
void MarkPairProjectionsAsWord32(turboshaft::OpIndex node)
size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor *descriptor, turboshaft::OpIndex state, OperandGenerator *g, StateObjectDeduplicator *deduplicator, InstructionOperandVector *inputs, FrameStateInputKind kind, Zone *zone)
void VisitWordCompareZero(turboshaft::OpIndex user, turboshaft::OpIndex value, FlagsContinuation *cont)
void VisitBlock(const turboshaft::Block *block)
FlagsCondition GetComparisonFlagCondition(const turboshaft::ComparisonOp &op) const
void AddOutputToSelectContinuation(OperandGenerator *g, int first_input_index, turboshaft::OpIndex node)
bool CanDoBranchIfOverflowFusion(turboshaft::OpIndex node)
void EmitPrepareArguments(ZoneVector< PushParameter > *arguments, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
void VisitI8x16RelaxedSwizzle(turboshaft::OpIndex node)
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
InstructionSelector::EnableRootsRelativeAddressing enable_roots_relative_addressing_
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
bool IsDefined(turboshaft::OpIndex node) const
void MarkAsRepresentation(MachineRepresentation rep, turboshaft::OpIndex node)
ZoneVector< std::pair< int, int > > instr_origins_
FrameStateDescriptor * GetFrameStateDescriptor(turboshaft::OpIndex node)
void TryPrepareScheduleFirstProjection(turboshaft::OpIndex maybe_projection)
std::optional< BitVector > additional_protected_instructions_
bool IsReallyUsed(turboshaft::OpIndex node) const
void EmitMoveFPRToParam(InstructionOperand *op, LinkageLocation location)
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
turboshaft::OpIndex block_terminator(const turboshaft::Block *block) const
void VisitLoadTransform(Node *node, Node *value, InstructionCode opcode)
const std::map< uint32_t, int > GetVirtualRegistersForTesting() const
void InitializeCallBuffer(turboshaft::OpIndex call, CallBuffer *buffer, CallBufferFlags flags, turboshaft::OpIndex callee, turboshaft::OptionalOpIndex frame_state_opt, base::Vector< const turboshaft::OpIndex > arguments, int return_count, int stack_slot_delta=0)
bool CanCoverProtectedLoad(turboshaft::OpIndex user, turboshaft::OpIndex node) const
void VisitDeoptimize(DeoptimizeReason reason, uint32_t node_id, FeedbackSource const &feedback, turboshaft::OpIndex frame_state)
void EmitTableSwitch(const SwitchInfo &sw, InstructionOperand const &index_operand)
void SetEffectLevel(turboshaft::OpIndex node, int effect_level)
void VisitControl(const turboshaft::Block *block)
void VisitStackPointerGreaterThan(turboshaft::OpIndex node, FlagsContinuation *cont)
std::optional< BailoutReason > SelectInstructions()
void ConsumeEqualZero(turboshaft::OpIndex *user, turboshaft::OpIndex *value, FlagsContinuation *cont)
turboshaft::OptionalOpIndex FindProjection(turboshaft::OpIndex node, size_t projection_index)
InstructionSelectorT(Zone *zone, size_t node_count, Linkage *linkage, InstructionSequence *sequence, turboshaft::Graph *schedule, source_position_table_t *source_positions, Frame *frame, InstructionSelector::EnableSwitchJumpTable enable_switch_jump_table, TickCounter *tick_counter, JSHeapBroker *broker, size_t *max_unoptimized_frame_height, size_t *max_pushed_argument_count, InstructionSelector::SourcePositionMode source_position_mode=InstructionSelector::kCallSourcePositions, Features features=SupportedFeatures(), InstructionSelector::EnableScheduling enable_scheduling=v8_flags.turbo_instruction_scheduling ? InstructionSelector::kEnableScheduling :InstructionSelector::kDisableScheduling, InstructionSelector::EnableRootsRelativeAddressing enable_roots_relative_addressing=InstructionSelector::kDisableRootsRelativeAddressing, InstructionSelector::EnableTraceTurboJson trace_turbo=InstructionSelector::kDisableTraceTurboJson)
void VisitLoad(turboshaft::OpIndex node, turboshaft::OpIndex value, InstructionCode opcode)
void VisitCall(turboshaft::OpIndex call, turboshaft::Block *handler={})
bool CanAddressRelativeToRootsRegister(const ExternalReference &reference) const
std::optional< turboshaft::UseMap > turboshaft_use_map_
bool IsCommutative(turboshaft::OpIndex node) const
bool IsOnlyUserOfNodeInSameBlock(turboshaft::OpIndex user, turboshaft::OpIndex node) const
int GetEffectLevel(turboshaft::OpIndex node) const
void VisitFloat64Ieee754Binop(turboshaft::OpIndex, InstructionCode code)
InstructionSelector::SourcePositionMode const source_position_mode_
InstructionSelector::EnableTraceTurboJson trace_turbo_
void VisitSwitch(turboshaft::OpIndex node, const SwitchInfo &sw)
void VisitBranch(turboshaft::OpIndex input, turboshaft::Block *tbranch, turboshaft::Block *fbranch)
base::Vector< const turboshaft::OpIndex > turboshaft_uses(turboshaft::OpIndex node) const
void UpdateSourcePosition(Instruction *instruction, turboshaft::OpIndex node)
void AppendDeoptimizeArguments(InstructionOperandVector *args, DeoptimizeReason reason, uint32_t node_id, FeedbackSource const &feedback, turboshaft::OpIndex frame_state, DeoptimizeKind kind=DeoptimizeKind::kEager)
void EmitMoveParamToFPR(turboshaft::OpIndex node, int index)
void EmitBinarySearchSwitch(const SwitchInfo &sw, InstructionOperand const &value_operand)
void VisitFloat64Ieee754Unop(turboshaft::OpIndex, InstructionCode code)
void EmitPrepareResults(ZoneVector< PushParameter > *results, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements()
InstructionSelector(std::nullptr_t, InstructionSelectorT *turboshaft_impl)
std::optional< BailoutReason > SelectInstructions()
const std::map< NodeId, int > GetVirtualRegistersForTesting() const
const ZoneVector< std::pair< int, int > > & instr_origins() const
static InstructionSelector ForTurboshaft(Zone *zone, size_t node_count, Linkage *linkage, InstructionSequence *sequence, turboshaft::Graph *schedule, Frame *frame, EnableSwitchJumpTable enable_switch_jump_table, TickCounter *tick_counter, JSHeapBroker *broker, size_t *max_unoptimized_frame_height, size_t *max_pushed_argument_count, SourcePositionMode source_position_mode=kCallSourcePositions, Features features=SupportedFeatures(), EnableScheduling enable_scheduling=v8_flags.turbo_instruction_scheduling ? kEnableScheduling :kDisableScheduling, EnableRootsRelativeAddressing enable_roots_relative_addressing=kDisableRootsRelativeAddressing, EnableTraceTurboJson trace_turbo=kDisableTraceTurboJson)
int AddDeoptimizationEntry(FrameStateDescriptor *descriptor, DeoptimizeKind kind, DeoptimizeReason reason, NodeId node_id, FeedbackSource const &feedback)
ImmediateOperand AddImmediate(const Constant &constant)
void MarkAsRepresentation(MachineRepresentation rep, int virtual_register)
void SetSourcePosition(const Instruction *instr, SourcePosition value)
InstructionBlock * InstructionBlockAt(RpoNumber rpo_number)
static Instruction * New(Zone *zone, InstructionCode opcode)
bool ParameterHasSecondaryLocation(int index) const
Definition linkage.cc:762
LinkageLocation GetReturnLocation(size_t index=0) const
Definition linkage.h:454
MachineType GetParameterType(int index) const
Definition linkage.h:449
CallDescriptor * GetIncomingDescriptor() const
Definition linkage.h:405
InstructionOperand UseUniqueSlot(turboshaft::OpIndex node)
InstructionOperand UsePointerLocation(LinkageLocation to_location, LinkageLocation from_location)
InstructionOperand UseAnyAtEnd(turboshaft::OpIndex node)
InstructionOperand DefineAsDualLocation(turboshaft::OpIndex node, LinkageLocation primary_location, LinkageLocation secondary_location)
InstructionOperand UseLocation(turboshaft::OpIndex node, LinkageLocation location)
InstructionOperand UseFixed(turboshaft::OpIndex node, Register reg)
InstructionOperand DefineAsRegister(turboshaft::OpIndex node)
InstructionOperand Use(turboshaft::OpIndex node)
InstructionOperand DefineSameAsFirst(turboshaft::OpIndex node)
InstructionOperand UseImmediate64(int64_t immediate)
InstructionOperand DefineAsConstant(turboshaft::OpIndex node)
InstructionOperand UseRegister(turboshaft::OpIndex node)
InstructionOperand UseAny(turboshaft::OpIndex node)
InstructionOperand DefineAsLocation(turboshaft::OpIndex node, LinkageLocation location)
InstructionOperand TempLocation(LinkageLocation location)
InstructionOperand Label(turboshaft::Block *block)
StateValueList * PushRecursiveField(Zone *zone, size_t id)
StateValueList * PushStringConcat(Zone *zone, size_t id)
std::vector< CaseInfo > CasesSortedByValue() const
const ZoneVector< CaseInfo > & CasesUnsorted() const
ZoneAbslFlatHashMap< uint32_t, uint32_t > & GetMapForType(ObjectType type)
ZoneAbslFlatHashMap< uint32_t, uint32_t > objects_ids_mapping_
ZoneAbslFlatHashMap< uint32_t, uint32_t > string_ids_mapping_
OpIndex Index(const Operation &op) const
Definition graph.h:655
V8_INLINE const Operation & Get(OpIndex i) const
Definition graph.h:618
static constexpr OpIndex Invalid()
Definition index.h:88
const Operation & Get(V< AnyOrNone > op_idx) const
const underlying_operation_t< Op > * TryCast(V< AnyOrNone > op_idx) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
static constexpr OptionalOpIndex Nullopt()
Definition index.h:171
static constexpr RegisterRepresentation Word32()
constexpr RegisterRepresentation MapTaggedToWord() const
static constexpr RegisterRepresentation Word64()
static constexpr RegisterRepresentation Tagged()
Zone * zone_
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
JSHeapBroker *const broker_
int start
int end
WasmFrame *const frame_
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
SourcePositionTable * source_positions
JSHeapBroker * broker
Linkage * linkage
#define UNREACHABLE_CASE(op)
#define DISPATCH_TO_IMPL(...)
#define VISIT_UNSUPPORTED_OP(op)
std::optional< TNode< JSArray > > a
Node * node
Instruction * instr
RpoNumber block
ZoneVector< RpoNumber > & result
ZoneVector< Entry > entries
Schedule const *const schedule_
BasicBlock * current_block_
Linkage const *const linkage_
int n
Definition mul-fft.cc:296
unsigned short uint16_t
Definition unicode.cc:39
auto Reversed(T &t)
Definition iterator.h:105
size_t count_if(const C &container, const P &predicate)
TaggedBitcastKindMask::For< TaggedBitcastOp::Kind::kSmi > kTaggedBitcastSmi
Definition opmasks.h:317
ConstantMask::For< ConstantOp::Kind::kWord32 > kWord32Constant
Definition opmasks.h:242
ConstantMask::For< ConstantOp::Kind::kWord64 > kWord64Constant
Definition opmasks.h:243
constexpr uint64_t multi(const Ts &... values)
Definition utils.h:177
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
V8_EXPORT_PRIVATE V8_INLINE bool ShouldSkipOperation(const Operation &op)
V8_EXPORT_PRIVATE bool ShouldSkipOptimizationStep()
Definition utils.h:84
base::SmallVector< Block *, 4 > SuccessorBlocks(const Block &block, const Graph &graph)
size_t AddOperandToStateValueDescriptor(InstructionSelectorT *selector, StateValueList *values, InstructionOperandVector *inputs, OperandGeneratorT *g, TurboshaftStateObjectDeduplicator *deduplicator, FrameStateData::Iterator *it, FrameStateInputKind kind, Zone *zone)
static constexpr int kLazyDeoptOnThrowSentinel
static constexpr FlagsCondition kStackPointerGreaterThanCondition
Node::Uses::const_iterator begin(const Node::Uses &uses)
Definition node.h:708
void PrintF(const char *format,...)
Definition utils.cc:39
bool IsSmiDouble(double value)
constexpr bool CanBeTaggedOrCompressedPointer(MachineRepresentation rep)
Flag flags[]
Definition flags.cc:3797
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Register kReturnRegister0
constexpr bool SmiValuesAre31Bits()
constexpr int kHasFunctionDescriptorBitShift
V8_EXPORT_PRIVATE FlagValues v8_flags
other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can use(0 for unbounded)") DEFINE_BOOL( stress_concurrent_inlining
constexpr Register kJavaScriptCallCodeStartRegister
return value
Definition map-inl.h:893
constexpr bool Is64()
constexpr int kMaxInt
Definition globals.h:374
constexpr int A
bool is_signed(Condition cond)
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
Operation
Definition operation.h:43
#define TURBOSHAFT_SIMPLIFIED_OPERATION_LIST(V)
Definition operations.h:231
#define TURBOSHAFT_JS_OPERATION_LIST(V)
Definition operations.h:343
#define TURBOSHAFT_OTHER_OPERATION_LIST(V)
Definition operations.h:349
#define TURBOSHAFT_WASM_OPERATION_LIST(V)
Definition operations.h:207
uint32_t cast
uint32_t recursion_depth
#define FATAL(...)
Definition logging.h:47
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
#define arraysize(array)
Definition macros.h:67
#define IF_WASM(V,...)
Definition macros.h:472
ZoneVector< PushParameterT > output_nodes
CallBufferT(Zone *zone, const CallDescriptor *call_descriptor, FrameStateDescriptor *frame_state)
ZoneVector< PushParameterT > pushed_nodes
CachedStateValues(Zone *zone, StateValueList *values, size_t values_start, InstructionOperandVector *inputs, size_t inputs_start)
size_t Emit(InstructionOperandVector *inputs, StateValueList *values)
bool IsRelocatableWasmConstant(turboshaft::OpIndex node) const
bool IsExternalConstant(turboshaft::OpIndex node) const
RpoNumber rpo_number(const turboshaft::Block *block) const
const ZoneVector< turboshaft::Block * > & rpo_order(turboshaft::Graph *schedule)
base::Vector< const turboshaft::OpIndex > inputs(turboshaft::OpIndex node) const
uint32_t id(turboshaft::OpIndex node) const
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
base::iterator_range< turboshaft::Graph::OpIndexIterator > nodes(const turboshaft::Block *block)
turboshaft::Block * PredecessorAt(const turboshaft::Block *block, size_t index) const
bool IsHeapConstant(turboshaft::OpIndex node) const
size_t PredecessorCount(const turboshaft::Block *block) const
bool IsLoopHeader(const turboshaft::Block *block) const
base::Vector< const OpIndex > arguments() const
base::Vector< const RegisterRepresentation > results_rep() const
OptionalV< FrameState > frame_state() const
V8_INLINE OpIndex input(size_t i) const
Definition operations.h:959
base::Vector< const OpIndex > inputs() const
const underlying_operation_t< Op > * TryCast() const
Definition operations.h:990
underlying_operation_t< Op > & Cast()
Definition operations.h:980
base::Vector< const RegisterRepresentation > outputs_rep() const
base::Vector< const OpIndex > return_values() const
base::Vector< const OpIndex > arguments() const
base::Vector< const RegisterRepresentation > outputs_rep() const
#define V8_UNLIKELY(condition)
Definition v8config.h:660
wasm::ValueType type