v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
graph-builder.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <limits>
8#include <numeric>
9#include <optional>
10#include <string_view>
11
13#include "src/base/logging.h"
16#include "src/base/vector.h"
19#include "src/common/globals.h"
43#include "src/flags/flags.h"
46#include "src/objects/map.h"
48
50
52
53namespace {
54
55bool IsValidSmi(intptr_t c) {
57 if (!IsSmi(as_obj)) return false;
58
59 return Smi::FromInt(Smi::ToInt(as_obj)).ptr() == static_cast<uintptr_t>(c);
60}
61
62struct GraphBuilder {
64 Schedule& schedule;
65 Linkage* linkage;
66
67 Isolate* isolate;
68 JSHeapBroker* broker;
70 using AssemblerT = TSAssembler<ExplicitTruncationReducer, VariableReducer>;
71 AssemblerT assembler;
72 SourcePositionTable* source_positions;
73 NodeOriginTable* origins;
76
77 GraphBuilder(PipelineData* data, Zone* phase_zone, Schedule& schedule,
78 Linkage* linkage, JsWasmCallsSidetable* js_wasm_calls_sidetable)
82 isolate(data->isolate()),
83 broker(data->broker()),
84 graph_zone(data->graph_zone()),
85 assembler(data, data->graph(), data->graph(), phase_zone),
87 origins(data->node_origins()),
90
91 struct BlockData {
92 Block* block;
94 };
95 NodeAuxData<OpIndex> op_mapping{phase_zone};
96 ZoneVector<BlockData> block_mapping{schedule.RpoBlockCount(), phase_zone};
97 bool inside_region = false;
98
99 std::optional<BailoutReason> Run();
100 AssemblerT& Asm() { return assembler; }
101
102 private:
103 template <typename T>
104 V<T> Map(Node* old_node) {
105 V<T> result = V<T>::Cast(op_mapping.Get(old_node));
106 DCHECK(__ output_graph().IsValid(result));
107 return result;
108 }
109
110 OpIndex Map(Node* old_node) {
111 OpIndex result = op_mapping.Get(old_node);
112 DCHECK(__ output_graph().IsValid(result));
113 return result;
114 }
115
116 Block* Map(BasicBlock* block) {
117 Block* result = block_mapping[block->rpo_number()].block;
119 return result;
120 }
121
122 void FixLoopPhis(BasicBlock* loop) {
123 DCHECK(loop->IsLoopHeader());
124 for (Node* node : *loop->nodes()) {
125 if (node->opcode() != IrOpcode::kPhi) {
126 continue;
127 }
128 OpIndex phi_index = Map(node);
129 PendingLoopPhiOp& pending_phi =
130 __ output_graph().Get(phi_index).Cast<PendingLoopPhiOp>();
131 __ output_graph().Replace<PhiOp>(
132 phi_index,
133 base::VectorOf({pending_phi.first(), Map(node->InputAt(1))}),
134 pending_phi.rep);
135 }
136 }
137
138 void ProcessDeoptInput(FrameStateData::Builder* builder, Node* input,
139 MachineType type) {
140 DCHECK_NE(input->opcode(), IrOpcode::kObjectState);
141 DCHECK_NE(input->opcode(), IrOpcode::kStateValues);
142 DCHECK_NE(input->opcode(), IrOpcode::kTypedStateValues);
143 if (input->opcode() == IrOpcode::kObjectId) {
144 builder->AddDematerializedObjectReference(ObjectIdOf(input->op()));
145 } else if (input->opcode() == IrOpcode::kTypedObjectState) {
146 const TypedObjectStateInfo& info =
148 int field_count = input->op()->ValueInputCount();
149 builder->AddDematerializedObject(info.object_id(),
150 static_cast<uint32_t>(field_count));
151 for (int i = 0; i < field_count; ++i) {
152 ProcessDeoptInput(builder, input->InputAt(i),
153 (*info.machine_types())[i]);
154 }
155 } else if (input->opcode() == IrOpcode::kArgumentsElementsState) {
156 builder->AddArgumentsElements(ArgumentsStateTypeOf(input->op()));
157 } else if (input->opcode() == IrOpcode::kArgumentsLengthState) {
158 builder->AddArgumentsLength();
159 } else {
160 builder->AddInput(type, Map(input));
161 }
162 }
163
164 void ProcessStateValues(FrameStateData::Builder* builder,
165 Node* state_values) {
166 for (auto it = StateValuesAccess(state_values).begin(); !it.done(); ++it) {
167 if (Node* node = it.node()) {
168 ProcessDeoptInput(builder, node, (*it).type);
169 } else {
170 builder->AddUnusedRegister();
171 }
172 }
173 }
174
175 void BuildFrameStateData(FrameStateData::Builder* builder,
176 compiler::FrameState frame_state) {
177 if (frame_state.outer_frame_state()->opcode() != IrOpcode::kStart) {
178 builder->AddParentFrameState(Map(frame_state.outer_frame_state()));
179 }
180 ProcessDeoptInput(builder, frame_state.function(),
182 ProcessStateValues(builder, frame_state.parameters());
183 ProcessDeoptInput(builder, frame_state.context(), MachineType::AnyTagged());
184 ProcessStateValues(builder, frame_state.locals());
185 Node* stack = frame_state.stack();
186 ProcessStateValues(builder, stack);
187 }
188
189 Block::Kind BlockKind(BasicBlock* block) {
190 switch (block->front()->opcode()) {
191 case IrOpcode::kStart:
192 case IrOpcode::kEnd:
193 case IrOpcode::kMerge:
194 return Block::Kind::kMerge;
195 case IrOpcode::kIfTrue:
196 case IrOpcode::kIfFalse:
197 case IrOpcode::kIfValue:
198 case IrOpcode::kIfDefault:
199 case IrOpcode::kIfSuccess:
200 case IrOpcode::kIfException:
202 case IrOpcode::kLoop:
204 default:
205 block->front()->Print();
207 }
208 }
209 OpIndex Process(Node* node, BasicBlock* block,
210 const base::SmallVector<int, 16>& predecessor_permutation,
211 OpIndex& dominating_frame_state,
212 std::optional<BailoutReason>* bailout,
213 bool is_final_control = false);
214};
215
216std::optional<BailoutReason> GraphBuilder::Run() {
217 for (BasicBlock* block : *schedule.rpo_order()) {
218 block_mapping[block->rpo_number()].block =
219 block->IsLoopHeader() ? __ NewLoopHeader() : __ NewBlock();
220 }
221
222 for (BasicBlock* block : *schedule.rpo_order()) {
223 Block* target_block = Map(block);
224 if (!__ Bind(target_block)) continue;
225
226 // Since we visit blocks in rpo-order, the new block predecessors are sorted
227 // in rpo order too. However, the input schedule does not order
228 // predecessors, so we have to apply a corresponding permutation to phi
229 // inputs.
230 const BasicBlockVector& predecessors = block->predecessors();
231 base::SmallVector<int, 16> predecessor_permutation(predecessors.size());
232 std::iota(predecessor_permutation.begin(), predecessor_permutation.end(),
233 0);
234 std::sort(predecessor_permutation.begin(), predecessor_permutation.end(),
235 [&](size_t i, size_t j) {
236 return predecessors[i]->rpo_number() <
237 predecessors[j]->rpo_number();
238 });
239
240 OpIndex dominating_frame_state = OpIndex::Invalid();
241 if (!predecessors.empty()) {
242 dominating_frame_state =
243 block_mapping[predecessors[0]->rpo_number()].final_frame_state;
244 for (size_t i = 1; i < predecessors.size(); ++i) {
245 if (block_mapping[predecessors[i]->rpo_number()].final_frame_state !=
246 dominating_frame_state) {
247 dominating_frame_state = OpIndex::Invalid();
248 break;
249 }
250 }
251 }
252 std::optional<BailoutReason> bailout = std::nullopt;
253 for (Node* node : *block->nodes()) {
254 if (V8_UNLIKELY(node->InputCount() >=
255 int{std::numeric_limits<
256 decltype(Operation::input_count)>::max()})) {
257 return BailoutReason::kTooManyArguments;
258 }
259 OpIndex i = Process(node, block, predecessor_permutation,
260 dominating_frame_state, &bailout);
261 if (V8_UNLIKELY(bailout)) return bailout;
262 if (!__ current_block()) break;
263 op_mapping.Set(node, i);
264 }
265 // We have terminated this block with `Unreachable`, so we stop generation
266 // here and continue with the next block.
267 if (!__ current_block()) continue;
268
269 if (Node* node = block->control_input()) {
270 if (V8_UNLIKELY(node->InputCount() >=
271 int{std::numeric_limits<
272 decltype(Operation::input_count)>::max()})) {
273 return BailoutReason::kTooManyArguments;
274 }
275 OpIndex i = Process(node, block, predecessor_permutation,
276 dominating_frame_state, &bailout, true);
277 if (V8_UNLIKELY(bailout)) return bailout;
278 op_mapping.Set(node, i);
279 }
280 switch (block->control()) {
281 case BasicBlock::kGoto: {
282 DCHECK_EQ(block->SuccessorCount(), 1);
283 Block* destination = Map(block->SuccessorAt(0));
284 __ Goto(destination);
285 if (destination->IsBound()) {
286 DCHECK(destination->IsLoop());
287 FixLoopPhis(block->SuccessorAt(0));
288 }
289 break;
290 }
291 case BasicBlock::kBranch:
292 case BasicBlock::kSwitch:
293 case BasicBlock::kReturn:
294 case BasicBlock::kDeoptimize:
295 case BasicBlock::kThrow:
296 case BasicBlock::kCall:
297 case BasicBlock::kTailCall:
298 break;
299 case BasicBlock::kNone:
300 UNREACHABLE();
301 }
302 DCHECK_NULL(__ current_block());
303
304 block_mapping[block->rpo_number()].final_frame_state =
305 dominating_frame_state;
306 }
307
308 if (source_positions && source_positions->IsEnabled()) {
309 for (OpIndex index : __ output_graph().AllOperationIndices()) {
310 compiler::NodeId origin =
311 __ output_graph().operation_origins()[index].DecodeTurbofanNodeId();
312 __ output_graph().source_positions()[index] =
313 source_positions->GetSourcePosition(origin);
314 }
315 }
316
317 if (origins) {
318 for (OpIndex index : __ output_graph().AllOperationIndices()) {
319 OpIndex origin = __ output_graph().operation_origins()[index];
320 origins->SetNodeOrigin(index.id(), origin.DecodeTurbofanNodeId());
321 }
322 }
323
324 return std::nullopt;
325}
326
327OpIndex GraphBuilder::Process(
328 Node* node, BasicBlock* block,
329 const base::SmallVector<int, 16>& predecessor_permutation,
330 OpIndex& dominating_frame_state, std::optional<BailoutReason>* bailout,
331 bool is_final_control) {
332 if (Asm().current_block() == nullptr) {
333 return OpIndex::Invalid();
334 }
335 __ SetCurrentOrigin(OpIndex::EncodeTurbofanNodeId(node->id()));
336 const Operator* op = node->op();
337 Operator::Opcode opcode = op->opcode();
338 switch (opcode) {
339 case IrOpcode::kStart:
340 case IrOpcode::kMerge:
341 case IrOpcode::kLoop:
342 case IrOpcode::kIfTrue:
343 case IrOpcode::kIfFalse:
344 case IrOpcode::kIfDefault:
345 case IrOpcode::kIfValue:
346 case IrOpcode::kStateValues:
347 case IrOpcode::kTypedStateValues:
348 case IrOpcode::kObjectId:
349 case IrOpcode::kTypedObjectState:
350 case IrOpcode::kArgumentsElementsState:
351 case IrOpcode::kArgumentsLengthState:
352 case IrOpcode::kEffectPhi:
353 case IrOpcode::kTerminate:
354 return OpIndex::Invalid();
355
356 case IrOpcode::kCheckpoint: {
357 // Preserve the frame state from this checkpoint for following nodes.
358 dominating_frame_state = Map(NodeProperties::GetFrameStateInput(node));
359 return OpIndex::Invalid();
360 }
361
362 case IrOpcode::kIfException: {
363 return __ CatchBlockBegin();
364 }
365
366 case IrOpcode::kIfSuccess: {
367 return OpIndex::Invalid();
368 }
369
370 case IrOpcode::kParameter: {
371 const ParameterInfo& info = ParameterInfoOf(op);
372 RegisterRepresentation rep =
373 RegisterRepresentation::FromMachineRepresentation(
374 linkage->GetParameterType(ParameterIndexOf(node->op()))
375 .representation());
376 return __ Parameter(info.index(), rep, info.debug_name());
377 }
378
379 case IrOpcode::kOsrValue: {
380 return __ OsrValue(OsrValueIndexOf(op));
381 }
382
383 case IrOpcode::kPhi: {
384 int input_count = op->ValueInputCount();
385 RegisterRepresentation rep =
386 RegisterRepresentation::FromMachineRepresentation(
388 if (__ current_block()->IsLoop()) {
389 DCHECK_EQ(input_count, 2);
390 return __ PendingLoopPhi(Map(node->InputAt(0)), rep);
391 } else {
392 base::SmallVector<OpIndex, 16> inputs;
393 for (int i = 0; i < input_count; ++i) {
394 // If this predecessor end with an unreachable (and doesn't jump to
395 // this merge block), we skip its Phi input.
396 Block* pred = Map(block->PredecessorAt(predecessor_permutation[i]));
397 if (!pred->IsBound() ||
398 pred->LastOperation(__ output_graph()).Is<UnreachableOp>()) {
399 continue;
400 }
401 inputs.push_back(Map(node->InputAt(predecessor_permutation[i])));
402 }
403 return __ Phi(base::VectorOf(inputs), rep);
404 }
405 }
406
407 case IrOpcode::kInt64Constant:
408 return __ Word64Constant(static_cast<uint64_t>(OpParameter<int64_t>(op)));
409 case IrOpcode::kInt32Constant:
410 return __ Word32Constant(static_cast<uint32_t>(OpParameter<int32_t>(op)));
411 case IrOpcode::kFloat64Constant:
412 return __ Float64Constant(OpParameter<double>(op));
413 case IrOpcode::kFloat32Constant:
414 return __ Float32Constant(OpParameter<float>(op));
415 case IrOpcode::kNumberConstant:
416 return __ NumberConstant(OpParameter<double>(op));
417 case IrOpcode::kTaggedIndexConstant:
418 return __ TaggedIndexConstant(OpParameter<int32_t>(op));
419 case IrOpcode::kHeapConstant:
420 return __ HeapConstant(HeapConstantOf(op));
421 case IrOpcode::kCompressedHeapConstant:
422 return __ CompressedHeapConstant(HeapConstantOf(op));
423 case IrOpcode::kTrustedHeapConstant:
424 return __ TrustedHeapConstant(HeapConstantOf(op));
425 case IrOpcode::kExternalConstant:
426 return __ ExternalConstant(OpParameter<ExternalReference>(op));
427 case IrOpcode::kRelocatableInt64Constant:
428 return __ RelocatableConstant(
429 OpParameter<RelocatablePtrConstantInfo>(op).value(),
430 OpParameter<RelocatablePtrConstantInfo>(op).rmode());
431#define BINOP_CASE(opcode, assembler_op) \
432 case IrOpcode::k##opcode: \
433 return __ assembler_op(Map(node->InputAt(0)), Map(node->InputAt(1)));
434
435 BINOP_CASE(Int32Add, Word32Add)
436 BINOP_CASE(Int64Add, Word64Add)
437 BINOP_CASE(Int32Mul, Word32Mul)
438 BINOP_CASE(Int64Mul, Word64Mul)
439 BINOP_CASE(Word32And, Word32BitwiseAnd)
440 BINOP_CASE(Word64And, Word64BitwiseAnd)
441 BINOP_CASE(Word32Or, Word32BitwiseOr)
442 BINOP_CASE(Word64Or, Word64BitwiseOr)
443 BINOP_CASE(Word32Xor, Word32BitwiseXor)
444 BINOP_CASE(Word64Xor, Word64BitwiseXor)
445 BINOP_CASE(Int32Sub, Word32Sub)
446 BINOP_CASE(Int64Sub, Word64Sub)
447 BINOP_CASE(Int32Div, Int32Div)
448 BINOP_CASE(Uint32Div, Uint32Div)
449 BINOP_CASE(Int64Div, Int64Div)
450 BINOP_CASE(Uint64Div, Uint64Div)
451 BINOP_CASE(Int32Mod, Int32Mod)
452 BINOP_CASE(Uint32Mod, Uint32Mod)
453 BINOP_CASE(Int64Mod, Int64Mod)
454 BINOP_CASE(Uint64Mod, Uint64Mod)
455 BINOP_CASE(Int32MulHigh, Int32MulOverflownBits)
456 BINOP_CASE(Int64MulHigh, Int64MulOverflownBits)
457 BINOP_CASE(Uint32MulHigh, Uint32MulOverflownBits)
458 BINOP_CASE(Uint64MulHigh, Uint64MulOverflownBits)
459
460 BINOP_CASE(Float32Add, Float32Add)
461 BINOP_CASE(Float64Add, Float64Add)
462 BINOP_CASE(Float32Sub, Float32Sub)
463 BINOP_CASE(Float64Sub, Float64Sub)
464 BINOP_CASE(Float64Mul, Float64Mul)
465 BINOP_CASE(Float32Mul, Float32Mul)
466 BINOP_CASE(Float32Div, Float32Div)
467 BINOP_CASE(Float64Div, Float64Div)
468 BINOP_CASE(Float32Min, Float32Min)
469 BINOP_CASE(Float64Min, Float64Min)
470 BINOP_CASE(Float32Max, Float32Max)
471 BINOP_CASE(Float64Max, Float64Max)
472 BINOP_CASE(Float64Mod, Float64Mod)
473 BINOP_CASE(Float64Pow, Float64Power)
474 BINOP_CASE(Float64Atan2, Float64Atan2)
475
476 BINOP_CASE(Word32Shr, Word32ShiftRightLogical)
477 BINOP_CASE(Word64Shr, Word64ShiftRightLogical)
478
479 BINOP_CASE(Word32Shl, Word32ShiftLeft)
480 BINOP_CASE(Word64Shl, Word64ShiftLeft)
481
482 BINOP_CASE(Word32Rol, Word32RotateLeft)
483 BINOP_CASE(Word64Rol, Word64RotateLeft)
484
485 BINOP_CASE(Word32Ror, Word32RotateRight)
486 BINOP_CASE(Word64Ror, Word64RotateRight)
487
488 BINOP_CASE(Float32Equal, Float32Equal)
489 BINOP_CASE(Float64Equal, Float64Equal)
490
491 BINOP_CASE(Int32LessThan, Int32LessThan)
492 BINOP_CASE(Int64LessThan, Int64LessThan)
493 BINOP_CASE(Uint32LessThan, Uint32LessThan)
494 BINOP_CASE(Uint64LessThan, Uint64LessThan)
495 BINOP_CASE(Float32LessThan, Float32LessThan)
496 BINOP_CASE(Float64LessThan, Float64LessThan)
497
498 BINOP_CASE(Int32LessThanOrEqual, Int32LessThanOrEqual)
499 BINOP_CASE(Int64LessThanOrEqual, Int64LessThanOrEqual)
500 BINOP_CASE(Uint32LessThanOrEqual, Uint32LessThanOrEqual)
501 BINOP_CASE(Uint64LessThanOrEqual, Uint64LessThanOrEqual)
502 BINOP_CASE(Float32LessThanOrEqual, Float32LessThanOrEqual)
503 BINOP_CASE(Float64LessThanOrEqual, Float64LessThanOrEqual)
504
505 BINOP_CASE(Int32AddWithOverflow, Int32AddCheckOverflow)
506 BINOP_CASE(Int64AddWithOverflow, Int64AddCheckOverflow)
507 BINOP_CASE(Int32MulWithOverflow, Int32MulCheckOverflow)
508 BINOP_CASE(Int64MulWithOverflow, Int64MulCheckOverflow)
509 BINOP_CASE(Int32SubWithOverflow, Int32SubCheckOverflow)
510 BINOP_CASE(Int64SubWithOverflow, Int64SubCheckOverflow)
511#undef BINOP_CASE
512
513 case IrOpcode::kWord32Equal: {
514 OpIndex left = Map(node->InputAt(0));
515 OpIndex right = Map(node->InputAt(1));
516 if constexpr (kTaggedSize == kInt32Size) {
517 // Unfortunately, CSA produces Word32Equal for tagged comparison.
518 if (V8_UNLIKELY(pipeline_kind == TurboshaftPipelineKind::kCSA)) {
519 // We need to detect these cases and construct a consistent graph.
520 const bool left_is_tagged =
521 __ output_graph().Get(left).outputs_rep().at(0) ==
522 RegisterRepresentation::Tagged();
523 const bool right_is_tagged =
524 __ output_graph().Get(right).outputs_rep().at(0) ==
525 RegisterRepresentation::Tagged();
526 if (left_is_tagged && right_is_tagged) {
527 return __ TaggedEqual(V<Object>::Cast(left),
528 V<Object>::Cast(right));
529 } else if (left_is_tagged) {
530 return __ Word32Equal(
531 __ TruncateWordPtrToWord32(
532 __ BitcastTaggedToWordPtr(V<Object>::Cast(left))),
533 V<Word32>::Cast(right));
534 } else if (right_is_tagged) {
535 return __ Word32Equal(
536 V<Word32>::Cast(left),
537 __ TruncateWordPtrToWord32(
538 __ BitcastTaggedToWordPtr(V<Object>::Cast(right))));
539 }
540 }
541 }
542 return __ Word32Equal(V<Word32>::Cast(left), V<Word32>::Cast(right));
543 }
544
545 case IrOpcode::kWord64Equal: {
546 OpIndex left = Map(node->InputAt(0));
547 OpIndex right = Map(node->InputAt(1));
548 if constexpr (kTaggedSize == kInt64Size) {
549 // Unfortunately, CSA produces Word32Equal for tagged comparison.
550 if (V8_UNLIKELY(pipeline_kind == TurboshaftPipelineKind::kCSA)) {
551 // We need to detect these cases and construct a consistent graph.
552 const bool left_is_tagged =
553 __ output_graph().Get(left).outputs_rep().at(0) ==
554 RegisterRepresentation::Tagged();
555 const bool right_is_tagged =
556 __ output_graph().Get(right).outputs_rep().at(0) ==
557 RegisterRepresentation::Tagged();
558 if (left_is_tagged && right_is_tagged) {
559 return __ TaggedEqual(V<Object>::Cast(left),
560 V<Object>::Cast(right));
561 } else if (left_is_tagged) {
562 DCHECK((std::is_same_v<WordPtr, Word64>));
563 return __ Word64Equal(V<Word64>::Cast(__ BitcastTaggedToWordPtr(
564 V<Object>::Cast(left))),
565 V<Word64>::Cast(right));
566 } else if (right_is_tagged) {
567 DCHECK((std::is_same_v<WordPtr, Word64>));
568 return __ Word64Equal(V<Word64>::Cast(left),
569 V<Word64>::Cast(__ BitcastTaggedToWordPtr(
570 V<Object>::Cast(right))));
571 }
572 }
573 }
574 return __ Word64Equal(V<Word64>::Cast(left), V<Word64>::Cast(right));
575 }
576
577 case IrOpcode::kWord64Sar:
578 case IrOpcode::kWord32Sar: {
579 WordRepresentation rep = opcode == IrOpcode::kWord64Sar
580 ? WordRepresentation::Word64()
581 : WordRepresentation::Word32();
582 ShiftOp::Kind kind;
583 switch (ShiftKindOf(op)) {
584 case ShiftKind::kShiftOutZeros:
585 kind = ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros;
586 break;
587 case ShiftKind::kNormal:
588 kind = ShiftOp::Kind::kShiftRightArithmetic;
589 break;
590 }
591 return __ Shift(Map(node->InputAt(0)), Map(node->InputAt(1)), kind, rep);
592 }
593
594#define UNARY_CASE(opcode, assembler_op) \
595 case IrOpcode::k##opcode: \
596 return __ assembler_op(Map(node->InputAt(0)));
597
598 UNARY_CASE(Word32ReverseBytes, Word32ReverseBytes)
599 UNARY_CASE(Word64ReverseBytes, Word64ReverseBytes)
600 UNARY_CASE(Word32Clz, Word32CountLeadingZeros)
601 UNARY_CASE(Word64Clz, Word64CountLeadingZeros)
602 UNARY_CASE(Word32Ctz, Word32CountTrailingZeros)
603 UNARY_CASE(Word64Ctz, Word64CountTrailingZeros)
604 UNARY_CASE(Word32Popcnt, Word32PopCount)
605 UNARY_CASE(Word64Popcnt, Word64PopCount)
606 UNARY_CASE(SignExtendWord8ToInt32, Word32SignExtend8)
607 UNARY_CASE(SignExtendWord16ToInt32, Word32SignExtend16)
608 UNARY_CASE(SignExtendWord8ToInt64, Word64SignExtend8)
609 UNARY_CASE(SignExtendWord16ToInt64, Word64SignExtend16)
610 UNARY_CASE(Int32AbsWithOverflow, Int32AbsCheckOverflow)
611 UNARY_CASE(Int64AbsWithOverflow, Int64AbsCheckOverflow)
612
613 UNARY_CASE(Float32Abs, Float32Abs)
614 UNARY_CASE(Float64Abs, Float64Abs)
615 UNARY_CASE(Float32Neg, Float32Negate)
616 UNARY_CASE(Float64Neg, Float64Negate)
617 UNARY_CASE(Float64SilenceNaN, Float64SilenceNaN)
618 UNARY_CASE(Float32RoundDown, Float32RoundDown)
619 UNARY_CASE(Float64RoundDown, Float64RoundDown)
620 UNARY_CASE(Float32RoundUp, Float32RoundUp)
621 UNARY_CASE(Float64RoundUp, Float64RoundUp)
622 UNARY_CASE(Float32RoundTruncate, Float32RoundToZero)
623 UNARY_CASE(Float64RoundTruncate, Float64RoundToZero)
624 UNARY_CASE(Float32RoundTiesEven, Float32RoundTiesEven)
625 UNARY_CASE(Float64RoundTiesEven, Float64RoundTiesEven)
626 UNARY_CASE(Float64Log, Float64Log)
627 UNARY_CASE(Float32Sqrt, Float32Sqrt)
628 UNARY_CASE(Float64Sqrt, Float64Sqrt)
629 UNARY_CASE(Float64Exp, Float64Exp)
630 UNARY_CASE(Float64Expm1, Float64Expm1)
631 UNARY_CASE(Float64Sin, Float64Sin)
632 UNARY_CASE(Float64Cos, Float64Cos)
633 UNARY_CASE(Float64Sinh, Float64Sinh)
634 UNARY_CASE(Float64Cosh, Float64Cosh)
635 UNARY_CASE(Float64Asin, Float64Asin)
636 UNARY_CASE(Float64Acos, Float64Acos)
637 UNARY_CASE(Float64Asinh, Float64Asinh)
638 UNARY_CASE(Float64Acosh, Float64Acosh)
639 UNARY_CASE(Float64Tan, Float64Tan)
640 UNARY_CASE(Float64Tanh, Float64Tanh)
641 UNARY_CASE(Float64Log2, Float64Log2)
642 UNARY_CASE(Float64Log10, Float64Log10)
643 UNARY_CASE(Float64Log1p, Float64Log1p)
644 UNARY_CASE(Float64Atan, Float64Atan)
645 UNARY_CASE(Float64Atanh, Float64Atanh)
646 UNARY_CASE(Float64Cbrt, Float64Cbrt)
647
648 UNARY_CASE(BitcastWord32ToWord64, BitcastWord32ToWord64)
649 UNARY_CASE(BitcastFloat32ToInt32, BitcastFloat32ToWord32)
650 UNARY_CASE(BitcastInt32ToFloat32, BitcastWord32ToFloat32)
651 UNARY_CASE(BitcastFloat64ToInt64, BitcastFloat64ToWord64)
652 UNARY_CASE(BitcastInt64ToFloat64, BitcastWord64ToFloat64)
653 UNARY_CASE(ChangeUint32ToUint64, ChangeUint32ToUint64)
654 UNARY_CASE(ChangeInt32ToInt64, ChangeInt32ToInt64)
655 UNARY_CASE(SignExtendWord32ToInt64, ChangeInt32ToInt64)
656
657 UNARY_CASE(ChangeFloat32ToFloat64, ChangeFloat32ToFloat64)
658
659 UNARY_CASE(ChangeFloat64ToInt32, ReversibleFloat64ToInt32)
660 UNARY_CASE(ChangeFloat64ToInt64, ReversibleFloat64ToInt64)
661 UNARY_CASE(ChangeFloat64ToUint32, ReversibleFloat64ToUint32)
662 UNARY_CASE(ChangeFloat64ToUint64, ReversibleFloat64ToUint64)
663
664 UNARY_CASE(ChangeInt32ToFloat64, ChangeInt32ToFloat64)
665 UNARY_CASE(ChangeInt64ToFloat64, ReversibleInt64ToFloat64)
666 UNARY_CASE(ChangeUint32ToFloat64, ChangeUint32ToFloat64)
667
668 UNARY_CASE(RoundFloat64ToInt32, TruncateFloat64ToInt32OverflowUndefined)
669 UNARY_CASE(RoundInt32ToFloat32, ChangeInt32ToFloat32)
670 UNARY_CASE(RoundInt64ToFloat32, ChangeInt64ToFloat32)
671 UNARY_CASE(RoundInt64ToFloat64, ChangeInt64ToFloat64)
672 UNARY_CASE(RoundUint32ToFloat32, ChangeUint32ToFloat32)
673 UNARY_CASE(RoundUint64ToFloat32, ChangeUint64ToFloat32)
674 UNARY_CASE(RoundUint64ToFloat64, ChangeUint64ToFloat64)
675 UNARY_CASE(TruncateFloat64ToFloat32, TruncateFloat64ToFloat32)
676 UNARY_CASE(TruncateFloat64ToUint32,
677 TruncateFloat64ToUint32OverflowUndefined)
678 UNARY_CASE(TruncateFloat64ToWord32, JSTruncateFloat64ToWord32)
679
680 UNARY_CASE(TryTruncateFloat32ToInt64, TryTruncateFloat32ToInt64)
681 UNARY_CASE(TryTruncateFloat32ToUint64, TryTruncateFloat32ToUint64)
682 UNARY_CASE(TryTruncateFloat64ToInt32, TryTruncateFloat64ToInt32)
683 UNARY_CASE(TryTruncateFloat64ToInt64, TryTruncateFloat64ToInt64)
684 UNARY_CASE(TryTruncateFloat64ToUint32, TryTruncateFloat64ToUint32)
685 UNARY_CASE(TryTruncateFloat64ToUint64, TryTruncateFloat64ToUint64)
686
687 UNARY_CASE(Float64ExtractLowWord32, Float64ExtractLowWord32)
688 UNARY_CASE(Float64ExtractHighWord32, Float64ExtractHighWord32)
689
690 UNARY_CASE(TruncateFloat64ToFloat16RawBits,
691 TruncateFloat64ToFloat16RawBits)
692 UNARY_CASE(ChangeFloat16RawBitsToFloat64, ChangeFloat16RawBitsToFloat64)
693#undef UNARY_CASE
694 case IrOpcode::kTruncateInt64ToInt32:
695 return __ TruncateWord64ToWord32(Map(node->InputAt(0)));
696 case IrOpcode::kTruncateFloat32ToInt32:
697 switch (OpParameter<TruncateKind>(node->op())) {
698 case TruncateKind::kArchitectureDefault:
699 return __ TruncateFloat32ToInt32OverflowUndefined(
700 Map(node->InputAt(0)));
701 case TruncateKind::kSetOverflowToMin:
702 return __ TruncateFloat32ToInt32OverflowToMin(Map(node->InputAt(0)));
703 }
704 case IrOpcode::kTruncateFloat32ToUint32:
705 switch (OpParameter<TruncateKind>(node->op())) {
706 case TruncateKind::kArchitectureDefault:
707 return __ TruncateFloat32ToUint32OverflowUndefined(
708 Map(node->InputAt(0)));
709 case TruncateKind::kSetOverflowToMin:
710 return __ TruncateFloat32ToUint32OverflowToMin(Map(node->InputAt(0)));
711 }
712 case IrOpcode::kTruncateFloat64ToInt64:
713 switch (OpParameter<TruncateKind>(node->op())) {
714 case TruncateKind::kArchitectureDefault:
715 return __ TruncateFloat64ToInt64OverflowUndefined(
716 Map(node->InputAt(0)));
717 case TruncateKind::kSetOverflowToMin:
718 return __ TruncateFloat64ToInt64OverflowToMin(Map(node->InputAt(0)));
719 }
720 case IrOpcode::kFloat64InsertLowWord32: {
721 V<Word32> high;
722 V<Word32> low = Map<Word32>(node->InputAt(1));
723 if (node->InputAt(0)->opcode() == IrOpcode::kFloat64InsertHighWord32) {
724 // We can turn this into a single operation.
725 high = Map<Word32>(node->InputAt(0)->InputAt(1));
726 } else {
727 // We need to extract the high word to combine it.
728 high = __ Float64ExtractHighWord32(Map(node->InputAt(0)));
729 }
730 return __ BitcastWord32PairToFloat64(high, low);
731 }
732 case IrOpcode::kFloat64InsertHighWord32: {
733 V<Word32> high = Map<Word32>(node->InputAt(1));
734 V<Word32> low;
735 if (node->InputAt(0)->opcode() == IrOpcode::kFloat64InsertLowWord32) {
736 // We can turn this into a single operation.
737 low = Map<Word32>(node->InputAt(0)->InputAt(1));
738 } else {
739 // We need to extract the low word to combine it.
740 low = __ Float64ExtractLowWord32(Map<Float64>(node->InputAt(0)));
741 }
742 return __ BitcastWord32PairToFloat64(high, low);
743 }
744 case IrOpcode::kBitcastTaggedToWord:
745 return __ BitcastTaggedToWordPtr(Map(node->InputAt(0)));
746 case IrOpcode::kBitcastWordToTagged: {
747 V<WordPtr> input = Map(node->InputAt(0));
748 if (V8_UNLIKELY(pipeline_kind == TurboshaftPipelineKind::kCSA)) {
749 // TODO(nicohartmann@): This is currently required to properly compile
750 // builtins. We should fix them and remove this.
751 if (LoadOp* load = __ output_graph().Get(input).TryCast<LoadOp>()) {
752 CHECK_EQ(2, node->InputAt(0)->UseCount());
753 CHECK(base::all_equal(node->InputAt(0)->uses(), node));
754 // CSA produces the pattern
755 // BitcastWordToTagged(Load<RawPtr>(...))
756 // which is not safe to translate to Turboshaft, because
757 // LateLoadElimination can potentially merge this with an identical
758 // untagged load that would be unsound in presence of a GC.
759 CHECK(load->loaded_rep == MemoryRepresentation::UintPtr() ||
760 load->loaded_rep == (Is64() ? MemoryRepresentation::Int64()
761 : MemoryRepresentation::Int32()));
762 CHECK_EQ(load->result_rep, RegisterRepresentation::WordPtr());
763 // In this case we turn the load into a tagged load directly...
764 load->loaded_rep = MemoryRepresentation::UncompressedTaggedPointer();
765 load->result_rep = RegisterRepresentation::Tagged();
766 // ... and skip the bitcast.
767 return input;
768 }
769 }
770 return __ BitcastWordPtrToTagged(Map(node->InputAt(0)));
771 }
772 case IrOpcode::kNumberIsFinite:
773 return __ Float64Is(Map(node->InputAt(0)), NumericKind::kFinite);
774 case IrOpcode::kNumberIsInteger:
775 return __ Float64Is(Map(node->InputAt(0)), NumericKind::kInteger);
776 case IrOpcode::kNumberIsSafeInteger:
777 return __ Float64Is(Map(node->InputAt(0)), NumericKind::kSafeInteger);
778 case IrOpcode::kNumberIsFloat64Hole:
779 return __ Float64Is(Map(node->InputAt(0)), NumericKind::kFloat64Hole);
780 case IrOpcode::kNumberIsMinusZero:
781 return __ Float64Is(Map(node->InputAt(0)), NumericKind::kMinusZero);
782 case IrOpcode::kNumberIsNaN:
783 return __ Float64Is(Map(node->InputAt(0)), NumericKind::kNaN);
784 case IrOpcode::kObjectIsMinusZero:
785 return __ ObjectIsNumericValue(Map(node->InputAt(0)),
786 NumericKind::kMinusZero,
787 FloatRepresentation::Float64());
788 case IrOpcode::kObjectIsNaN:
789 return __ ObjectIsNumericValue(Map(node->InputAt(0)), NumericKind::kNaN,
790 FloatRepresentation::Float64());
791 case IrOpcode::kObjectIsFiniteNumber:
792 return __ ObjectIsNumericValue(Map(node->InputAt(0)),
793 NumericKind::kFinite,
794 FloatRepresentation::Float64());
795 case IrOpcode::kObjectIsInteger:
796 return __ ObjectIsNumericValue(Map(node->InputAt(0)),
797 NumericKind::kInteger,
798 FloatRepresentation::Float64());
799 case IrOpcode::kObjectIsSafeInteger:
800 return __ ObjectIsNumericValue(Map(node->InputAt(0)),
801 NumericKind::kSafeInteger,
802 FloatRepresentation::Float64());
803
804#define OBJECT_IS_CASE(kind) \
805 case IrOpcode::kObjectIs##kind: { \
806 return __ ObjectIs(Map(node->InputAt(0)), ObjectIsOp::Kind::k##kind, \
807 ObjectIsOp::InputAssumptions::kNone); \
808 }
809 OBJECT_IS_CASE(ArrayBufferView)
810 OBJECT_IS_CASE(BigInt)
811 OBJECT_IS_CASE(Callable)
812 OBJECT_IS_CASE(Constructor)
813 OBJECT_IS_CASE(DetectableCallable)
814 OBJECT_IS_CASE(NonCallable)
815 OBJECT_IS_CASE(Number)
816 OBJECT_IS_CASE(Receiver)
817 OBJECT_IS_CASE(Smi)
818 OBJECT_IS_CASE(String)
819 OBJECT_IS_CASE(Symbol)
820 OBJECT_IS_CASE(Undetectable)
821#undef OBJECT_IS_CASE
822
823#define CHECK_OBJECT_IS_CASE(code, kind, input_assumptions, reason, feedback) \
824 case IrOpcode::k##code: { \
825 DCHECK(dominating_frame_state.valid()); \
826 V<Object> input = Map(node->InputAt(0)); \
827 V<Word32> check = \
828 __ ObjectIs(input, ObjectIsOp::Kind::k##kind, \
829 ObjectIsOp::InputAssumptions::k##input_assumptions); \
830 __ DeoptimizeIfNot(check, dominating_frame_state, \
831 DeoptimizeReason::k##reason, feedback); \
832 return input; \
833 }
834 CHECK_OBJECT_IS_CASE(CheckInternalizedString, InternalizedString,
835 HeapObject, WrongInstanceType, {})
836 CHECK_OBJECT_IS_CASE(CheckNumber, Number, None, NotANumber,
837 CheckParametersOf(op).feedback())
838 CHECK_OBJECT_IS_CASE(CheckNumberFitsInt32, NumberFitsInt32, None,
839 NotInt32, CheckParametersOf(op).feedback())
840 CHECK_OBJECT_IS_CASE(CheckReceiver, Receiver, HeapObject,
841 NotAJavaScriptObject, {})
842 CHECK_OBJECT_IS_CASE(CheckReceiverOrNullOrUndefined,
843 ReceiverOrNullOrUndefined, HeapObject,
844 NotAJavaScriptObjectOrNullOrUndefined, {})
845 CHECK_OBJECT_IS_CASE(CheckString, String, HeapObject, NotAString,
847 CHECK_OBJECT_IS_CASE(CheckStringOrStringWrapper, StringOrStringWrapper,
848 HeapObject, NotAStringOrStringWrapper,
850 CHECK_OBJECT_IS_CASE(CheckSymbol, Symbol, HeapObject, NotASymbol, {})
851 CHECK_OBJECT_IS_CASE(CheckBigInt, BigInt, None, NotABigInt,
853 CHECK_OBJECT_IS_CASE(CheckedBigIntToBigInt64, BigInt64, BigInt,
854 NotABigInt64, CheckParametersOf(op).feedback())
856
857 case IrOpcode::kPlainPrimitiveToNumber:
858 return __ ConvertPlainPrimitiveToNumber(Map(node->InputAt(0)));
859 case IrOpcode::kPlainPrimitiveToWord32:
860 return __ ConvertJSPrimitiveToUntagged(
861 Map(node->InputAt(0)),
862 ConvertJSPrimitiveToUntaggedOp::UntaggedKind::kInt32,
863 ConvertJSPrimitiveToUntaggedOp::InputAssumptions::kPlainPrimitive);
864 case IrOpcode::kPlainPrimitiveToFloat64:
865 return __ ConvertJSPrimitiveToUntagged(
866 Map(node->InputAt(0)),
867 ConvertJSPrimitiveToUntaggedOp::UntaggedKind::kFloat64,
868 ConvertJSPrimitiveToUntaggedOp::InputAssumptions::kPlainPrimitive);
869
870 case IrOpcode::kConvertTaggedHoleToUndefined: {
871 V<Object> input = Map(node->InputAt(0));
872 V<Word32> is_the_hole = __ TaggedEqual(
873 input, __ HeapConstant(isolate->factory()->the_hole_value()));
874 return __ Conditional(
875 is_the_hole, __ HeapConstant(isolate->factory()->undefined_value()),
876 input, BranchHint::kFalse);
877 }
878
879 case IrOpcode::kConvertReceiver:
880 return __ ConvertJSPrimitiveToObject(
881 Map(node->InputAt(0)), Map(node->InputAt(1)), Map(node->InputAt(2)),
882 ConvertReceiverModeOf(node->op()));
883
884 case IrOpcode::kToBoolean:
885 return __ ConvertToBoolean(Map(node->InputAt(0)));
886 case IrOpcode::kNumberToString:
887 return __ ConvertNumberToString(Map(node->InputAt(0)));
888 case IrOpcode::kStringToNumber:
889 return __ ConvertStringToNumber(Map(node->InputAt(0)));
890 case IrOpcode::kChangeTaggedToTaggedSigned:
891 return __ Convert(Map(node->InputAt(0)),
892 ConvertOp::Kind::kNumberOrOddball,
893 ConvertOp::Kind::kSmi);
894
895 case IrOpcode::kCheckedTaggedToTaggedSigned: {
896 DCHECK(dominating_frame_state.valid());
897 V<Object> input = Map(node->InputAt(0));
898 __ DeoptimizeIfNot(__ ObjectIsSmi(input), dominating_frame_state,
899 DeoptimizeReason::kNotASmi,
900 CheckParametersOf(node->op()).feedback());
901 return input;
902 }
903
904 case IrOpcode::kCheckedTaggedToTaggedPointer: {
905 DCHECK(dominating_frame_state.valid());
906 V<Object> input = Map(node->InputAt(0));
907 __ DeoptimizeIf(__ ObjectIsSmi(input), dominating_frame_state,
908 DeoptimizeReason::kSmi,
909 CheckParametersOf(node->op()).feedback());
910 return input;
911 }
912
913#define CONVERT_PRIMITIVE_TO_OBJECT_CASE(name, kind, input_type, \
914 input_interpretation) \
915 case IrOpcode::k##name: \
916 return __ ConvertUntaggedToJSPrimitive( \
917 Map(node->InputAt(0)), \
918 ConvertUntaggedToJSPrimitiveOp::JSPrimitiveKind::k##kind, \
919 V<input_type>::rep, \
920 ConvertUntaggedToJSPrimitiveOp::InputInterpretation:: \
921 k##input_interpretation, \
922 CheckForMinusZeroMode::kDontCheckForMinusZero);
923 CONVERT_PRIMITIVE_TO_OBJECT_CASE(ChangeInt32ToTagged, Number, Word32,
924 Signed)
925 CONVERT_PRIMITIVE_TO_OBJECT_CASE(ChangeUint32ToTagged, Number, Word32,
926 Unsigned)
927 CONVERT_PRIMITIVE_TO_OBJECT_CASE(ChangeInt64ToTagged, Number, Word64,
928 Signed)
929 CONVERT_PRIMITIVE_TO_OBJECT_CASE(ChangeUint64ToTagged, Number, Word64,
930 Unsigned)
931 CONVERT_PRIMITIVE_TO_OBJECT_CASE(ChangeFloat64ToTaggedPointer, HeapNumber,
932 Float64, Signed)
933 CONVERT_PRIMITIVE_TO_OBJECT_CASE(ChangeInt64ToBigInt, BigInt, Word64,
934 Signed)
935 CONVERT_PRIMITIVE_TO_OBJECT_CASE(ChangeUint64ToBigInt, BigInt, Word64,
936 Unsigned)
937 CONVERT_PRIMITIVE_TO_OBJECT_CASE(ChangeInt31ToTaggedSigned, Smi, Word32,
938 Signed)
939 CONVERT_PRIMITIVE_TO_OBJECT_CASE(ChangeBitToTagged, Boolean, Word32,
940 Signed)
941 CONVERT_PRIMITIVE_TO_OBJECT_CASE(StringFromSingleCharCode, String, Word32,
942 CharCode)
943 CONVERT_PRIMITIVE_TO_OBJECT_CASE(StringFromSingleCodePoint, String,
944 Word32, CodePoint)
945 CONVERT_PRIMITIVE_TO_OBJECT_CASE(ChangeFloat64HoleToTagged,
946 HeapNumberOrUndefined, Float64, Signed)
947
948 case IrOpcode::kChangeFloat64ToTagged:
949 return __ ConvertUntaggedToJSPrimitive(
950 Map(node->InputAt(0)),
951 ConvertUntaggedToJSPrimitiveOp::JSPrimitiveKind::kNumber,
952 RegisterRepresentation::Float64(),
953 ConvertUntaggedToJSPrimitiveOp::InputInterpretation::kSigned,
954 CheckMinusZeroModeOf(node->op()));
955#undef CONVERT_PRIMITIVE_TO_OBJECT_CASE
956
957#define CONVERT_PRIMITIVE_TO_OBJECT_OR_DEOPT_CASE(name, kind, input_type, \
958 input_interpretation) \
959 case IrOpcode::k##name: { \
960 DCHECK(dominating_frame_state.valid()); \
961 const CheckParameters& params = CheckParametersOf(node->op()); \
962 return __ ConvertUntaggedToJSPrimitiveOrDeopt( \
963 Map(node->InputAt(0)), dominating_frame_state, \
964 ConvertUntaggedToJSPrimitiveOrDeoptOp::JSPrimitiveKind::k##kind, \
965 V<input_type>::rep, \
966 ConvertUntaggedToJSPrimitiveOrDeoptOp::InputInterpretation:: \
967 k##input_interpretation, \
968 params.feedback()); \
969 }
970 CONVERT_PRIMITIVE_TO_OBJECT_OR_DEOPT_CASE(CheckedInt32ToTaggedSigned, Smi,
971 Word32, Signed)
972 CONVERT_PRIMITIVE_TO_OBJECT_OR_DEOPT_CASE(CheckedUint32ToTaggedSigned,
973 Smi, Word32, Unsigned)
974 CONVERT_PRIMITIVE_TO_OBJECT_OR_DEOPT_CASE(CheckedInt64ToTaggedSigned, Smi,
975 Word64, Signed)
976 CONVERT_PRIMITIVE_TO_OBJECT_OR_DEOPT_CASE(CheckedUint64ToTaggedSigned,
977 Smi, Word64, Unsigned)
978#undef CONVERT_PRIMITIVE_TO_OBJECT_OR_DEOPT_CASE
979
980#define CONVERT_OBJECT_TO_PRIMITIVE_CASE(name, kind, input_assumptions) \
981 case IrOpcode::k##name: \
982 return __ ConvertJSPrimitiveToUntagged( \
983 Map(node->InputAt(0)), \
984 ConvertJSPrimitiveToUntaggedOp::UntaggedKind::k##kind, \
985 ConvertJSPrimitiveToUntaggedOp::InputAssumptions:: \
986 k##input_assumptions);
987 CONVERT_OBJECT_TO_PRIMITIVE_CASE(ChangeTaggedSignedToInt32, Int32, Smi)
988 CONVERT_OBJECT_TO_PRIMITIVE_CASE(ChangeTaggedSignedToInt64, Int64, Smi)
989 CONVERT_OBJECT_TO_PRIMITIVE_CASE(ChangeTaggedToBit, Bit, Boolean)
990 CONVERT_OBJECT_TO_PRIMITIVE_CASE(ChangeTaggedToInt32, Int32,
991 NumberOrOddball)
992 CONVERT_OBJECT_TO_PRIMITIVE_CASE(ChangeTaggedToUint32, Uint32,
993 NumberOrOddball)
994 CONVERT_OBJECT_TO_PRIMITIVE_CASE(ChangeTaggedToInt64, Int64,
995 NumberOrOddball)
996 CONVERT_OBJECT_TO_PRIMITIVE_CASE(ChangeTaggedToFloat64, Float64,
997 NumberOrOddball)
998 CONVERT_OBJECT_TO_PRIMITIVE_CASE(TruncateTaggedToFloat64, Float64,
999 NumberOrOddball)
1000#undef CONVERT_OBJECT_TO_PRIMITIVE_CASE
1001
1002#define TRUNCATE_OBJECT_TO_PRIMITIVE_CASE(name, kind, input_assumptions) \
1003 case IrOpcode::k##name: \
1004 return __ TruncateJSPrimitiveToUntagged( \
1005 Map(node->InputAt(0)), \
1006 TruncateJSPrimitiveToUntaggedOp::UntaggedKind::k##kind, \
1007 TruncateJSPrimitiveToUntaggedOp::InputAssumptions:: \
1008 k##input_assumptions);
1009 TRUNCATE_OBJECT_TO_PRIMITIVE_CASE(TruncateTaggedToWord32, Int32,
1010 NumberOrOddball)
1011 TRUNCATE_OBJECT_TO_PRIMITIVE_CASE(TruncateBigIntToWord64, Int64, BigInt)
1012 TRUNCATE_OBJECT_TO_PRIMITIVE_CASE(TruncateTaggedToBit, Bit, Object)
1013 TRUNCATE_OBJECT_TO_PRIMITIVE_CASE(TruncateTaggedPointerToBit, Bit,
1014 HeapObject)
1015#undef TRUNCATE_OBJECT_TO_PRIMITIVE_CASE
1016
1017 case IrOpcode::kCheckedTruncateTaggedToWord32:
1018 DCHECK(dominating_frame_state.valid());
1019 using IR = TruncateJSPrimitiveToUntaggedOrDeoptOp::InputRequirement;
1020 IR input_requirement;
1021 switch (CheckTaggedInputParametersOf(node->op()).mode()) {
1022 case CheckTaggedInputMode::kAdditiveSafeInteger:
1023 input_requirement = IR::kAdditiveSafeInteger;
1024 break;
1025 case CheckTaggedInputMode::kNumber:
1026 input_requirement = IR::kNumber;
1027 break;
1028 case CheckTaggedInputMode::kNumberOrBoolean:
1029 input_requirement = IR::kNumberOrBoolean;
1030 break;
1031 case CheckTaggedInputMode::kNumberOrOddball:
1032 input_requirement = IR::kNumberOrOddball;
1033 break;
1034 }
1035 return __ TruncateJSPrimitiveToUntaggedOrDeopt(
1036 Map(node->InputAt(0)), dominating_frame_state,
1037 TruncateJSPrimitiveToUntaggedOrDeoptOp::UntaggedKind::kInt32,
1038 input_requirement,
1039 CheckTaggedInputParametersOf(node->op()).feedback());
1040
1041#define CHANGE_OR_DEOPT_INT_CASE(kind) \
1042 case IrOpcode::kChecked##kind: { \
1043 DCHECK(dominating_frame_state.valid()); \
1044 const CheckParameters& params = CheckParametersOf(node->op()); \
1045 return __ ChangeOrDeopt(Map(node->InputAt(0)), dominating_frame_state, \
1046 ChangeOrDeoptOp::Kind::k##kind, \
1047 CheckForMinusZeroMode::kDontCheckForMinusZero, \
1048 params.feedback()); \
1049 }
1050 CHANGE_OR_DEOPT_INT_CASE(Uint32ToInt32)
1051 CHANGE_OR_DEOPT_INT_CASE(Int64ToInt32)
1052 CHANGE_OR_DEOPT_INT_CASE(Uint64ToInt32)
1053 CHANGE_OR_DEOPT_INT_CASE(Uint64ToInt64)
1054#undef CHANGE_OR_DEOPT_INT_CASE
1055
1056 case IrOpcode::kCheckedFloat64ToInt32: {
1057 DCHECK(dominating_frame_state.valid());
1058 const CheckMinusZeroParameters& params =
1059 CheckMinusZeroParametersOf(node->op());
1060 return __ ChangeOrDeopt(Map(node->InputAt(0)), dominating_frame_state,
1061 ChangeOrDeoptOp::Kind::kFloat64ToInt32,
1062 params.mode(), params.feedback());
1063 }
1064
1065 case IrOpcode::kCheckedFloat64ToAdditiveSafeInteger: {
1066 DCHECK(dominating_frame_state.valid());
1067 const CheckMinusZeroParameters& params =
1068 CheckMinusZeroParametersOf(node->op());
1069 return __ ChangeOrDeopt(
1070 Map(node->InputAt(0)), dominating_frame_state,
1071 ChangeOrDeoptOp::Kind::kFloat64ToAdditiveSafeInteger, params.mode(),
1072 params.feedback());
1073 }
1074
1075 case IrOpcode::kCheckedFloat64ToInt64: {
1076 DCHECK(dominating_frame_state.valid());
1077 const CheckMinusZeroParameters& params =
1078 CheckMinusZeroParametersOf(node->op());
1079 return __ ChangeOrDeopt(Map(node->InputAt(0)), dominating_frame_state,
1080 ChangeOrDeoptOp::Kind::kFloat64ToInt64,
1081 params.mode(), params.feedback());
1082 }
1083
1084 case IrOpcode::kCheckedTaggedToInt32: {
1085 DCHECK(dominating_frame_state.valid());
1086 const CheckMinusZeroParameters& params =
1087 CheckMinusZeroParametersOf(node->op());
1088 return __ ConvertJSPrimitiveToUntaggedOrDeopt(
1089 Map(node->InputAt(0)), dominating_frame_state,
1090 ConvertJSPrimitiveToUntaggedOrDeoptOp::JSPrimitiveKind::kNumber,
1091 ConvertJSPrimitiveToUntaggedOrDeoptOp::UntaggedKind::kInt32,
1092 params.mode(), params.feedback());
1093 }
1094
1095 case IrOpcode::kCheckedTaggedToAdditiveSafeInteger: {
1096 DCHECK(dominating_frame_state.valid());
1097 const CheckMinusZeroParameters& params =
1098 CheckMinusZeroParametersOf(node->op());
1099 return __ ConvertJSPrimitiveToUntaggedOrDeopt(
1100 Map(node->InputAt(0)), dominating_frame_state,
1101 ConvertJSPrimitiveToUntaggedOrDeoptOp::JSPrimitiveKind::kNumber,
1102 ConvertJSPrimitiveToUntaggedOrDeoptOp::UntaggedKind::
1103 kAdditiveSafeInteger,
1104 params.mode(), params.feedback());
1105 }
1106
1107 case IrOpcode::kCheckedTaggedToInt64: {
1108 DCHECK(dominating_frame_state.valid());
1109 const CheckMinusZeroParameters& params =
1110 CheckMinusZeroParametersOf(node->op());
1111 return __ ConvertJSPrimitiveToUntaggedOrDeopt(
1112 Map(node->InputAt(0)), dominating_frame_state,
1113 ConvertJSPrimitiveToUntaggedOrDeoptOp::JSPrimitiveKind::kNumber,
1114 ConvertJSPrimitiveToUntaggedOrDeoptOp::UntaggedKind::kInt64,
1115 params.mode(), params.feedback());
1116 }
1117
1118 case IrOpcode::kCheckedTaggedToFloat64: {
1119 DCHECK(dominating_frame_state.valid());
1120 const CheckTaggedInputParameters& params =
1121 CheckTaggedInputParametersOf(node->op());
1122 ConvertJSPrimitiveToUntaggedOrDeoptOp::JSPrimitiveKind from_kind;
1123 switch (params.mode()) {
1124#define CASE(mode) \
1125 case CheckTaggedInputMode::k##mode: \
1126 from_kind = \
1127 ConvertJSPrimitiveToUntaggedOrDeoptOp::JSPrimitiveKind::k##mode; \
1128 break;
1129 CASE(AdditiveSafeInteger)
1130 CASE(Number)
1131 CASE(NumberOrBoolean)
1132 CASE(NumberOrOddball)
1133#undef CASE
1134 }
1135 return __ ConvertJSPrimitiveToUntaggedOrDeopt(
1136 Map(node->InputAt(0)), dominating_frame_state, from_kind,
1137 ConvertJSPrimitiveToUntaggedOrDeoptOp::UntaggedKind::kFloat64,
1138 CheckForMinusZeroMode::kDontCheckForMinusZero, params.feedback());
1139 }
1140
1141 case IrOpcode::kCheckedTaggedToArrayIndex: {
1142 DCHECK(dominating_frame_state.valid());
1143 const CheckParameters& params = CheckParametersOf(node->op());
1144 return __ ConvertJSPrimitiveToUntaggedOrDeopt(
1145 Map(node->InputAt(0)), dominating_frame_state,
1146 ConvertJSPrimitiveToUntaggedOrDeoptOp::JSPrimitiveKind::
1147 kNumberOrString,
1148 ConvertJSPrimitiveToUntaggedOrDeoptOp::UntaggedKind::kArrayIndex,
1149 CheckForMinusZeroMode::kCheckForMinusZero, params.feedback());
1150 }
1151
1152 case IrOpcode::kCheckedTaggedSignedToInt32: {
1153 DCHECK(dominating_frame_state.valid());
1154 const CheckParameters& params = CheckParametersOf(node->op());
1155 return __ ConvertJSPrimitiveToUntaggedOrDeopt(
1156 Map(node->InputAt(0)), dominating_frame_state,
1157 ConvertJSPrimitiveToUntaggedOrDeoptOp::JSPrimitiveKind::kSmi,
1158 ConvertJSPrimitiveToUntaggedOrDeoptOp::UntaggedKind::kInt32,
1159 CheckForMinusZeroMode::kDontCheckForMinusZero, params.feedback());
1160 }
1161
1162 case IrOpcode::kSelect: {
1163 V<Word32> cond = Map(node->InputAt(0));
1164 V<Any> vtrue = Map(node->InputAt(1));
1165 V<Any> vfalse = Map(node->InputAt(2));
1166 const SelectParameters& params = SelectParametersOf(op);
1167 return __ Select(cond, vtrue, vfalse,
1168 RegisterRepresentation::FromMachineRepresentation(
1169 params.representation()),
1170 params.hint(), SelectOp::Implementation::kBranch);
1171 }
1172 case IrOpcode::kWord32Select:
1173 return __ Select(
1174 Map<Word32>(node->InputAt(0)), Map<Word32>(node->InputAt(1)),
1175 Map<Word32>(node->InputAt(2)), RegisterRepresentation::Word32(),
1176 BranchHint::kNone, SelectOp::Implementation::kCMove);
1177 case IrOpcode::kWord64Select:
1178 return __ Select(
1179 Map<Word32>(node->InputAt(0)), Map<Word64>(node->InputAt(1)),
1180 Map<Word64>(node->InputAt(2)), RegisterRepresentation::Word64(),
1181 BranchHint::kNone, SelectOp::Implementation::kCMove);
1182 case IrOpcode::kFloat32Select:
1183 return __ Select(
1184 Map<Word32>(node->InputAt(0)), Map<Float32>(node->InputAt(1)),
1185 Map<Float32>(node->InputAt(2)), RegisterRepresentation::Float32(),
1186 BranchHint::kNone, SelectOp::Implementation::kCMove);
1187
1188 case IrOpcode::kLoad:
1189 case IrOpcode::kLoadImmutable:
1190 case IrOpcode::kUnalignedLoad: {
1191 MemoryRepresentation loaded_rep =
1192 MemoryRepresentation::FromMachineType(LoadRepresentationOf(op));
1193 Node* base = node->InputAt(0);
1194 Node* index = node->InputAt(1);
1195 // It's ok to merge LoadImmutable into Load after scheduling.
1196 LoadOp::Kind kind = opcode == IrOpcode::kUnalignedLoad
1197 ? LoadOp::Kind::RawUnaligned()
1198 : LoadOp::Kind::RawAligned();
1199 if (__ output_graph().Get(Map(base)).outputs_rep().at(0) ==
1200 RegisterRepresentation::Tagged()) {
1201 kind = LoadOp::Kind::TaggedBase();
1202 }
1203 if (index->opcode() == IrOpcode::kInt32Constant) {
1204 int32_t offset = OpParameter<int32_t>(index->op());
1205 if (kind.tagged_base) offset += kHeapObjectTag;
1206 return __ Load(Map(base), kind, loaded_rep, offset);
1207 }
1208 if (index->opcode() == IrOpcode::kInt64Constant) {
1209 int64_t offset = OpParameter<int64_t>(index->op());
1210 if (kind.tagged_base) offset += kHeapObjectTag;
1211 if (base::IsValueInRangeForNumericType<int32_t>(offset)) {
1212 return __ Load(Map(base), kind, loaded_rep,
1213 static_cast<int32_t>(offset));
1214 }
1215 }
1216 int32_t offset = kind.tagged_base ? kHeapObjectTag : 0;
1217 uint8_t element_size_log2 = 0;
1218 return __ Load(Map(base), Map(index), kind, loaded_rep, offset,
1219 element_size_log2);
1220 }
1221 case IrOpcode::kProtectedLoad: {
1222 MemoryRepresentation loaded_rep =
1223 MemoryRepresentation::FromMachineType(LoadRepresentationOf(op));
1224 return __ Load(Map(node->InputAt(0)), Map(node->InputAt(1)),
1225 LoadOp::Kind::Protected(), loaded_rep);
1226 }
1227
1228 case IrOpcode::kStore:
1229 case IrOpcode::kUnalignedStore: {
1230 OpIndex base = Map(node->InputAt(0));
1231 if (pipeline_kind == TurboshaftPipelineKind::kCSA) {
1232 // TODO(nicohartmann@): This is currently required to properly compile
1233 // builtins. We should fix them and remove this.
1234 if (__ output_graph().Get(base).outputs_rep()[0] ==
1235 RegisterRepresentation::Tagged()) {
1236 base = __ BitcastTaggedToWordPtr(base);
1237 }
1238 }
1239 bool aligned = opcode != IrOpcode::kUnalignedStore;
1240 StoreRepresentation store_rep =
1241 aligned ? StoreRepresentationOf(op)
1242 : StoreRepresentation(UnalignedStoreRepresentationOf(op),
1244 StoreOp::Kind kind = opcode == IrOpcode::kStore
1245 ? StoreOp::Kind::RawAligned()
1246 : StoreOp::Kind::RawUnaligned();
1247 bool initializing_transitioning = inside_region;
1248
1249 Node* index = node->InputAt(1);
1250 Node* value = node->InputAt(2);
1251 if (index->opcode() == IrOpcode::kInt32Constant) {
1252 int32_t offset = OpParameter<int32_t>(index->op());
1253 __ Store(base, Map(value), kind,
1254 MemoryRepresentation::FromMachineRepresentation(
1255 store_rep.representation()),
1256 store_rep.write_barrier_kind(), offset,
1257 initializing_transitioning);
1258 return OpIndex::Invalid();
1259 }
1260 if (index->opcode() == IrOpcode::kInt64Constant) {
1261 int64_t offset = OpParameter<int64_t>(index->op());
1262 if (base::IsValueInRangeForNumericType<int32_t>(offset)) {
1263 __ Store(base, Map(value), kind,
1264 MemoryRepresentation::FromMachineRepresentation(
1265 store_rep.representation()),
1266 store_rep.write_barrier_kind(), static_cast<int32_t>(offset),
1267 initializing_transitioning);
1268 return OpIndex::Invalid();
1269 }
1270 }
1271 int32_t offset = 0;
1272 uint8_t element_size_log2 = 0;
1273 __ Store(base, Map(index), Map(value), kind,
1274 MemoryRepresentation::FromMachineRepresentation(
1275 store_rep.representation()),
1276 store_rep.write_barrier_kind(), offset, element_size_log2,
1277 initializing_transitioning);
1278 return OpIndex::Invalid();
1279 }
1280 case IrOpcode::kProtectedStore:
1281 // We don't mark ProtectedStores as initialzing even when inside regions,
1282 // since we don't store-store eliminate them because they have a raw base.
1283 __ Store(Map(node->InputAt(0)), Map(node->InputAt(1)),
1284 Map(node->InputAt(2)), StoreOp::Kind::Protected(),
1288 return OpIndex::Invalid();
1289
1290 case IrOpcode::kRetain:
1291 __ Retain(Map(node->InputAt(0)));
1292 return OpIndex::Invalid();
1293 case IrOpcode::kStackPointerGreaterThan:
1294 return __ StackPointerGreaterThan(Map<WordPtr>(node->InputAt(0)),
1295 StackCheckKindOf(op));
1296 case IrOpcode::kLoadStackCheckOffset:
1297 return __ StackCheckOffset();
1298 case IrOpcode::kLoadFramePointer:
1299 return __ FramePointer();
1300 case IrOpcode::kLoadParentFramePointer:
1301 return __ ParentFramePointer();
1302
1303 case IrOpcode::kStackSlot: {
1304 StackSlotRepresentation rep = StackSlotRepresentationOf(op);
1305 return __ StackSlot(rep.size(), rep.alignment(), rep.is_tagged());
1306 }
1307 case IrOpcode::kBranch:
1308 DCHECK_EQ(block->SuccessorCount(), 2);
1309 __ Branch(Map(node->InputAt(0)), Map(block->SuccessorAt(0)),
1310 Map(block->SuccessorAt(1)), BranchHintOf(node->op()));
1311 return OpIndex::Invalid();
1312
1313 case IrOpcode::kSwitch: {
1314 BasicBlock* default_branch = block->successors().back();
1315 DCHECK_EQ(IrOpcode::kIfDefault, default_branch->front()->opcode());
1316 size_t case_count = block->SuccessorCount() - 1;
1317 base::SmallVector<SwitchOp::Case, 16> cases;
1318 for (size_t i = 0; i < case_count; ++i) {
1319 BasicBlock* branch = block->SuccessorAt(i);
1320 const IfValueParameters& p = IfValueParametersOf(branch->front()->op());
1321 cases.emplace_back(p.value(), Map(branch), p.hint());
1322 }
1323 __ Switch(
1324 Map(node->InputAt(0)), graph_zone->CloneVector(base::VectorOf(cases)),
1325 Map(default_branch), BranchHintOf(default_branch->front()->op()));
1326 return OpIndex::Invalid();
1327 }
1328
1329 case IrOpcode::kCall: {
1330 auto call_descriptor = CallDescriptorOf(op);
1331 const JSWasmCallParameters* wasm_call_parameters = nullptr;
1332#if V8_ENABLE_WEBASSEMBLY
1333 if (call_descriptor->IsAnyWasmFunctionCall() &&
1334 v8_flags.turboshaft_wasm_in_js_inlining) {
1335 // A JS-to-Wasm call where the wrapper got inlined in TurboFan but the
1336 // actual Wasm body inlining was either not possible or is going to
1337 // happen later in Turboshaft. See https://crbug.com/353475584.
1338 // Make sure that for each not-yet-body-inlined call node, there is an
1339 // entry in the sidetable.
1341 auto it = js_wasm_calls_sidetable->find(node->id());
1343 wasm_call_parameters = it->second;
1344 }
1345#endif // V8_ENABLE_WEBASSEMBLY
1346 CanThrow can_throw =
1347 op->HasProperty(Operator::kNoThrow) ? CanThrow::kNo : CanThrow::kYes;
1348 const TSCallDescriptor* ts_descriptor = TSCallDescriptor::Create(
1349 call_descriptor, can_throw, LazyDeoptOnThrow::kNo, graph_zone,
1350 wasm_call_parameters);
1351
1352 base::SmallVector<OpIndex, 16> arguments;
1353 // The input `0` is the callee, the following value inputs are the
1354 // arguments. `CallDescriptor::InputCount()` counts the callee and
1355 // arguments, but excludes a possible `FrameState` input.
1356 OpIndex callee = Map(node->InputAt(0));
1357 for (int i = 1; i < static_cast<int>(call_descriptor->InputCount());
1358 ++i) {
1359 arguments.emplace_back(Map(node->InputAt(i)));
1360 }
1361
1363 if (call_descriptor->NeedsFrameState()) {
1364 compiler::FrameState frame_state{
1365 node->InputAt(static_cast<int>(call_descriptor->InputCount()))};
1366 frame_state_idx = Map(frame_state);
1367 }
1368 std::optional<decltype(assembler)::CatchScope> catch_scope;
1369 if (is_final_control) {
1370 Block* catch_block = Map(block->SuccessorAt(1));
1371 catch_scope.emplace(assembler, catch_block);
1372 }
1373 OpEffects effects =
1374 OpEffects().CanDependOnChecks().CanChangeControlFlow().CanDeopt();
1375 if ((call_descriptor->flags() & CallDescriptor::kNoAllocate) == 0) {
1376 effects = effects.CanAllocate();
1377 }
1378 if (!op->HasProperty(Operator::kNoWrite)) {
1379 effects = effects.CanWriteMemory();
1380 }
1381 if (!op->HasProperty(Operator::kNoRead)) {
1382 effects = effects.CanReadMemory();
1383 }
1384 OpIndex result =
1385 __ Call(callee, frame_state_idx, base::VectorOf(arguments),
1386 ts_descriptor, effects);
1387 if (is_final_control) {
1388 // The `__ Call()` before has already created exceptional control flow
1389 // and bound a new block for the success case. So we can just `Goto` the
1390 // block that Turbofan designated as the `IfSuccess` successor.
1391 __ Goto(Map(block->SuccessorAt(0)));
1392 }
1393 return result;
1394 }
1395
1396 case IrOpcode::kTailCall: {
1397 auto call_descriptor = CallDescriptorOf(op);
1398 base::SmallVector<OpIndex, 16> arguments;
1399 // The input `0` is the callee, the following value inputs are the
1400 // arguments. `CallDescriptor::InputCount()` counts the callee and
1401 // arguments.
1402 OpIndex callee = Map(node->InputAt(0));
1403 for (int i = 1; i < static_cast<int>(call_descriptor->InputCount());
1404 ++i) {
1405 arguments.emplace_back(Map(node->InputAt(i)));
1406 }
1407
1408 CanThrow can_throw =
1409 op->HasProperty(Operator::kNoThrow) ? CanThrow::kNo : CanThrow::kYes;
1410 const TSCallDescriptor* ts_descriptor = TSCallDescriptor::Create(
1411 call_descriptor, can_throw, LazyDeoptOnThrow::kNo, graph_zone);
1412
1413 __ TailCall(callee, base::VectorOf(arguments), ts_descriptor);
1414 return OpIndex::Invalid();
1415 }
1416
1417 case IrOpcode::kFrameState: {
1418 compiler::FrameState frame_state{node};
1419 FrameStateData::Builder builder;
1420 BuildFrameStateData(&builder, frame_state);
1421 if (builder.Inputs().size() >
1422 std::numeric_limits<decltype(Operation::input_count)>::max() - 1) {
1423 *bailout = BailoutReason::kTooManyArguments;
1424 return OpIndex::Invalid();
1425 }
1426 return __ FrameState(builder.Inputs(), builder.inlined(),
1427 builder.AllocateFrameStateData(
1428 frame_state.frame_state_info(), graph_zone));
1429 }
1430
1431 case IrOpcode::kDeoptimizeIf:
1432 __ DeoptimizeIf(Map(node->InputAt(0)), Map(node->InputAt(1)),
1434 return OpIndex::Invalid();
1435 case IrOpcode::kDeoptimizeUnless:
1436 __ DeoptimizeIfNot(Map(node->InputAt(0)), Map(node->InputAt(1)),
1438 return OpIndex::Invalid();
1439
1440#if V8_ENABLE_WEBASSEMBLY
1441 case IrOpcode::kTrapIf:
1442 // For wasm the dominating_frame_state is invalid and will not be used.
1443 // For traps inlined into JS the dominating_frame_state is valid and is
1444 // needed for the trap.
1445 __ TrapIf(Map(node->InputAt(0)), dominating_frame_state, TrapIdOf(op));
1446 return OpIndex::Invalid();
1447
1448 case IrOpcode::kTrapUnless:
1449 // For wasm the dominating_frame_state is invalid and will not be used.
1450 // For traps inlined into JS the dominating_frame_state is valid and is
1451 // needed for the trap.
1452 __ TrapIfNot(Map(node->InputAt(0)), dominating_frame_state, TrapIdOf(op));
1453 return OpIndex::Invalid();
1454#endif // V8_ENABLE_WEBASSEMBLY
1455
1456 case IrOpcode::kDeoptimize: {
1457 V<FrameState> frame_state = Map(node->InputAt(0));
1458 __ Deoptimize(frame_state, &DeoptimizeParametersOf(op));
1459 return OpIndex::Invalid();
1460 }
1461
1462 case IrOpcode::kReturn: {
1463 Node* pop_count = node->InputAt(0);
1464 base::SmallVector<OpIndex, 4> return_values;
1465 for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
1466 return_values.push_back(Map(node->InputAt(i)));
1467 }
1468 __ Return(Map(pop_count), base::VectorOf(return_values));
1469 return OpIndex::Invalid();
1470 }
1471 case IrOpcode::kUnreachable:
1472 case IrOpcode::kThrow:
1473 __ Unreachable();
1474 return OpIndex::Invalid();
1475
1476 case IrOpcode::kDeadValue:
1477 // Typically, DeadValue nodes have Unreachable as their input. In this
1478 // case, we would not get here because Unreachable already terminated the
1479 // block and we stopped generating additional operations.
1480 DCHECK_NE(node->InputAt(0)->opcode(), IrOpcode::kUnreachable);
1481 // If we find a DeadValue without an Unreachable input, we just generate
1482 // one here and stop.
1483 __ Unreachable();
1484 return OpIndex::Invalid();
1485
1486 case IrOpcode::kProjection: {
1487 Node* input = node->InputAt(0);
1488 size_t index = ProjectionIndexOf(op);
1489 RegisterRepresentation rep =
1492 return __ Projection(Map(input), index, rep);
1493 }
1494
1495 case IrOpcode::kStaticAssert:
1496 __ StaticAssert(Map(node->InputAt(0)), StaticAssertSourceOf(node->op()));
1497 return OpIndex::Invalid();
1498
1499 case IrOpcode::kAllocate: {
1500 AllocationType allocation = AllocationTypeOf(node->op());
1501 return __ FinishInitialization(
1502 __ Allocate(Map(node->InputAt(0)), allocation));
1503 }
1504 // TODO(nicohartmann@): We might not see AllocateRaw here anymore.
1505 case IrOpcode::kAllocateRaw: {
1506 Node* size = node->InputAt(0);
1507 const AllocateParameters& params = AllocateParametersOf(node->op());
1508 return __ FinishInitialization(
1509 __ Allocate(Map(size), params.allocation_type()));
1510 }
1511 case IrOpcode::kStoreToObject: {
1512 Node* object = node->InputAt(0);
1513 Node* offset = node->InputAt(1);
1514 Node* value = node->InputAt(2);
1515 ObjectAccess const& access = ObjectAccessOf(node->op());
1516 bool initializing_transitioning = inside_region;
1517 __ Store(Map(object), Map(offset), Map(value),
1519 MemoryRepresentation::FromMachineType(access.machine_type),
1520 access.write_barrier_kind, kHeapObjectTag,
1521 initializing_transitioning);
1522 return OpIndex::Invalid();
1523 }
1524 case IrOpcode::kStoreElement: {
1525 Node* object = node->InputAt(0);
1526 Node* index = node->InputAt(1);
1527 Node* value = node->InputAt(2);
1528 ElementAccess const& access = ElementAccessOf(node->op());
1529 DCHECK(!access.machine_type.IsMapWord());
1530 StoreOp::Kind kind = StoreOp::Kind::Aligned(access.base_is_tagged);
1531 MemoryRepresentation rep =
1532 MemoryRepresentation::FromMachineType(access.machine_type);
1533 bool initializing_transitioning = inside_region;
1534 __ Store(Map(object), Map(index), Map(value), kind, rep,
1535 access.write_barrier_kind, access.header_size,
1536 rep.SizeInBytesLog2(), initializing_transitioning);
1537 return OpIndex::Invalid();
1538 }
1539 case IrOpcode::kStoreField: {
1540 OpIndex object = Map(node->InputAt(0));
1541 OpIndex value = Map(node->InputAt(1));
1542 FieldAccess const& access = FieldAccessOf(node->op());
1543 // External pointer must never be stored by optimized code.
1544 DCHECK(!access.type.Is(compiler::Type::ExternalPointer()) ||
1546 // SandboxedPointers are not currently stored by optimized code.
1547 DCHECK(!access.type.Is(compiler::Type::SandboxedPointer()));
1548
1549#ifdef V8_ENABLE_SANDBOX
1550 if (access.is_bounded_size_access) {
1551 value = __ ShiftLeft(value, kBoundedSizeShift,
1553 }
1554#endif // V8_ENABLE_SANDBOX
1555
1556 StoreOp::Kind kind = StoreOp::Kind::Aligned(access.base_is_tagged);
1557 MachineType machine_type = access.machine_type;
1558 if (machine_type.IsMapWord()) {
1560#ifdef V8_MAP_PACKING
1561 UNIMPLEMENTED();
1562#endif
1563 }
1564
1565 bool initializing_transitioning =
1566 access.maybe_initializing_or_transitioning_store;
1567 if (!inside_region) {
1568 // Mark stores outside a region as non-initializing and
1569 // non-transitioning.
1570 initializing_transitioning = false;
1571 }
1572
1573 MemoryRepresentation rep =
1575
1576 if (const ConstantOp* value_cst =
1580 // This is storing a Smi as a raw Word64. Instead, we'll convert the
1581 // raw Word64 to a proper Smi.
1582 if (IsValidSmi(value_cst->signed_integral())) {
1583 value = __ SmiConstant(Tagged<Smi>(value_cst->signed_integral()));
1584 }
1585 }
1586 }
1587
1588 __ Store(object, value, kind, rep, access.write_barrier_kind,
1589 access.offset, initializing_transitioning,
1590 access.indirect_pointer_tag);
1591 return OpIndex::Invalid();
1592 }
1593 case IrOpcode::kLoadFromObject:
1594 case IrOpcode::kLoadImmutableFromObject: {
1595 Node* object = node->InputAt(0);
1596 Node* offset = node->InputAt(1);
1597 ObjectAccess const& access = ObjectAccessOf(node->op());
1598 MemoryRepresentation rep =
1599 MemoryRepresentation::FromMachineType(access.machine_type);
1600 return __ Load(Map(object), Map(offset), LoadOp::Kind::TaggedBase(), rep,
1602 }
1603 case IrOpcode::kLoadField: {
1604 Node* object = node->InputAt(0);
1605 FieldAccess const& access = FieldAccessOf(node->op());
1606 StoreOp::Kind kind = StoreOp::Kind::Aligned(access.base_is_tagged);
1607 MachineType machine_type = access.machine_type;
1608 if (machine_type.IsMapWord()) {
1610#ifdef V8_MAP_PACKING
1611 UNIMPLEMENTED();
1612#endif
1613 }
1614 MemoryRepresentation rep =
1616#ifdef V8_ENABLE_SANDBOX
1617 bool is_sandboxed_external =
1618 access.type.Is(compiler::Type::ExternalPointer());
1619 if (is_sandboxed_external) {
1620 // Fields for sandboxed external pointer contain a 32-bit handle, not a
1621 // 64-bit raw pointer.
1623 }
1624#endif // V8_ENABLE_SANDBOX
1625 OpIndex value = __ Load(Map(object), kind, rep, access.offset);
1626#ifdef V8_ENABLE_SANDBOX
1627 if (is_sandboxed_external) {
1628 value = __ DecodeExternalPointer(value, access.external_pointer_tag);
1629 }
1630 if (access.is_bounded_size_access) {
1631 DCHECK(!is_sandboxed_external);
1632 value = __ ShiftRightLogical(value, kBoundedSizeShift,
1634 }
1635#endif // V8_ENABLE_SANDBOX
1636 return value;
1637 }
1638 case IrOpcode::kLoadElement: {
1639 Node* object = node->InputAt(0);
1640 Node* index = node->InputAt(1);
1641 ElementAccess const& access = ElementAccessOf(node->op());
1642 LoadOp::Kind kind = LoadOp::Kind::Aligned(access.base_is_tagged);
1643 MemoryRepresentation rep =
1644 MemoryRepresentation::FromMachineType(access.machine_type);
1645 return __ Load(Map(object), Map(index), kind, rep, access.header_size,
1646 rep.SizeInBytesLog2());
1647 }
1648 case IrOpcode::kCheckTurboshaftTypeOf: {
1649 Node* input = node->InputAt(0);
1650 Node* type_description = node->InputAt(1);
1651
1652 HeapObjectMatcher m(type_description);
1653 CHECK(m.HasResolvedValue() && m.Ref(broker).IsString() &&
1654 m.Ref(broker).AsString().IsContentAccessible());
1655 StringRef type_string = m.Ref(broker).AsString();
1656 DirectHandle<String> pattern_string =
1657 *type_string.ObjectIfContentAccessible(broker);
1658 std::unique_ptr<char[]> pattern = pattern_string->ToCString();
1659
1660 auto type_opt =
1661 Type::ParseFromString(std::string_view{pattern.get()}, graph_zone);
1662 if (type_opt == std::nullopt) {
1663 FATAL(
1664 "String '%s' (of %d:CheckTurboshaftTypeOf) is not a valid type "
1665 "description!",
1666 pattern.get(), node->id());
1667 }
1668
1669 OpIndex input_index = Map(input);
1670 RegisterRepresentation rep =
1671 __ output_graph().Get(input_index).outputs_rep()[0];
1672 return __ CheckTurboshaftTypeOf(input_index, rep, *type_opt, false);
1673 }
1674
1675 case IrOpcode::kNewConsString:
1676 return __ NewConsString(Map(node->InputAt(0)), Map(node->InputAt(1)),
1677 Map(node->InputAt(2)));
1678 case IrOpcode::kNewDoubleElements:
1679 return __ NewArray(Map(node->InputAt(0)), NewArrayOp::Kind::kDouble,
1680 AllocationTypeOf(node->op()));
1681 case IrOpcode::kNewSmiOrObjectElements:
1682 return __ NewArray(Map(node->InputAt(0)), NewArrayOp::Kind::kObject,
1683 AllocationTypeOf(node->op()));
1684
1685 case IrOpcode::kDoubleArrayMin:
1686 return __ DoubleArrayMinMax(Map(node->InputAt(0)),
1688 case IrOpcode::kDoubleArrayMax:
1689 return __ DoubleArrayMinMax(Map(node->InputAt(0)),
1691
1692 case IrOpcode::kLoadFieldByIndex:
1693 return __ LoadFieldByIndex(Map(node->InputAt(0)), Map(node->InputAt(1)));
1694
1695 case IrOpcode::kCheckedAdditiveSafeIntegerAdd: {
1696 DCHECK(Is64());
1697 DCHECK(dominating_frame_state.valid());
1698 auto shifted_lhs =
1699 __ Word64ShiftLeft(Map(node->InputAt(0)), kAdditiveSafeIntegerShift);
1700 auto shifted_rhs =
1701 __ Word64ShiftLeft(Map(node->InputAt(1)), kAdditiveSafeIntegerShift);
1702 auto shifted_result = __ Word64SignedAddDeoptOnOverflow(
1703 shifted_lhs, shifted_rhs, dominating_frame_state, FeedbackSource{});
1704 return __ Word64ShiftRightArithmetic(shifted_result,
1706 }
1707 case IrOpcode::kCheckedAdditiveSafeIntegerSub: {
1708 DCHECK(Is64());
1709 DCHECK(dominating_frame_state.valid());
1710 auto shifted_lhs =
1711 __ Word64ShiftLeft(Map(node->InputAt(0)), kAdditiveSafeIntegerShift);
1712 auto shifted_rhs =
1713 __ Word64ShiftLeft(Map(node->InputAt(1)), kAdditiveSafeIntegerShift);
1714 auto shifted_result = __ Word64SignedSubDeoptOnOverflow(
1715 shifted_lhs, shifted_rhs, dominating_frame_state, FeedbackSource{});
1716 return __ Word64ShiftRightArithmetic(shifted_result,
1718 }
1719 case IrOpcode::kCheckedInt64Add:
1720 DCHECK(Is64());
1721 DCHECK(dominating_frame_state.valid());
1722 return __ Word64SignedAddDeoptOnOverflow(
1723 Map(node->InputAt(0)), Map(node->InputAt(1)), dominating_frame_state,
1724 FeedbackSource{});
1725
1726 case IrOpcode::kCheckedInt64Sub:
1727 DCHECK(Is64());
1728 DCHECK(dominating_frame_state.valid());
1729 return __ Word64SignedSubDeoptOnOverflow(
1730 Map(node->InputAt(0)), Map(node->InputAt(1)), dominating_frame_state,
1731 FeedbackSource{});
1732
1733 case IrOpcode::kCheckedInt32Add:
1734 DCHECK(dominating_frame_state.valid());
1735 return __ Word32SignedAddDeoptOnOverflow(
1736 Map(node->InputAt(0)), Map(node->InputAt(1)), dominating_frame_state,
1737 FeedbackSource{});
1738
1739 case IrOpcode::kCheckedInt32Sub:
1740 DCHECK(dominating_frame_state.valid());
1741 return __ Word32SignedSubDeoptOnOverflow(
1742 Map(node->InputAt(0)), Map(node->InputAt(1)), dominating_frame_state,
1743 FeedbackSource{});
1744
1745 case IrOpcode::kCheckedInt32Mul: {
1746 DCHECK(dominating_frame_state.valid());
1748 return __ Word32SignedMulDeoptOnOverflow(
1749 Map(node->InputAt(0)), Map(node->InputAt(1)), dominating_frame_state,
1750 FeedbackSource{}, mode);
1751 }
1752
1753 case IrOpcode::kCheckedInt64Mul:
1754 DCHECK(Is64());
1755 DCHECK(dominating_frame_state.valid());
1756 return __ Word64SignedMulDeoptOnOverflow(
1757 Map(node->InputAt(0)), Map(node->InputAt(1)), dominating_frame_state,
1758 FeedbackSource{});
1759
1760 case IrOpcode::kCheckedInt32Div:
1761 DCHECK(dominating_frame_state.valid());
1762 return __ Word32SignedDivDeoptOnOverflow(
1763 Map(node->InputAt(0)), Map(node->InputAt(1)), dominating_frame_state,
1764 FeedbackSource{});
1765
1766 case IrOpcode::kCheckedInt64Div:
1767 DCHECK(Is64());
1768 DCHECK(dominating_frame_state.valid());
1769 return __ Word64SignedDivDeoptOnOverflow(
1770 Map(node->InputAt(0)), Map(node->InputAt(1)), dominating_frame_state,
1771 FeedbackSource{});
1772
1773 case IrOpcode::kCheckedUint32Div:
1774 DCHECK(dominating_frame_state.valid());
1775 return __ Word32UnsignedDivDeoptOnOverflow(
1776 Map(node->InputAt(0)), Map(node->InputAt(1)), dominating_frame_state,
1777 FeedbackSource{});
1778
1779 case IrOpcode::kCheckedInt32Mod:
1780 DCHECK(dominating_frame_state.valid());
1781 return __ Word32SignedModDeoptOnOverflow(
1782 Map(node->InputAt(0)), Map(node->InputAt(1)), dominating_frame_state,
1783 FeedbackSource{});
1784
1785 case IrOpcode::kCheckedInt64Mod:
1786 DCHECK(Is64());
1787 DCHECK(dominating_frame_state.valid());
1788 return __ Word64SignedModDeoptOnOverflow(
1789 Map(node->InputAt(0)), Map(node->InputAt(1)), dominating_frame_state,
1790 FeedbackSource{});
1791
1792 case IrOpcode::kCheckedUint32Mod:
1793 DCHECK(dominating_frame_state.valid());
1794 return __ Word32UnsignedModDeoptOnOverflow(
1795 Map(node->InputAt(0)), Map(node->InputAt(1)), dominating_frame_state,
1796 FeedbackSource{});
1797
1798#define BIGINT_BINOP_CASE(op, kind) \
1799 case IrOpcode::kBigInt##op: \
1800 DCHECK(dominating_frame_state.valid()); \
1801 return __ BigIntBinop(Map(node->InputAt(0)), Map(node->InputAt(1)), \
1802 dominating_frame_state, \
1803 BigIntBinopOp::Kind::k##kind);
1804 BIGINT_BINOP_CASE(Add, Add)
1805 BIGINT_BINOP_CASE(Subtract, Sub)
1806 BIGINT_BINOP_CASE(Multiply, Mul)
1807 BIGINT_BINOP_CASE(Divide, Div)
1808 BIGINT_BINOP_CASE(Modulus, Mod)
1809 BIGINT_BINOP_CASE(BitwiseAnd, BitwiseAnd)
1810 BIGINT_BINOP_CASE(BitwiseOr, BitwiseOr)
1811 BIGINT_BINOP_CASE(BitwiseXor, BitwiseXor)
1812 BIGINT_BINOP_CASE(ShiftLeft, ShiftLeft)
1813 BIGINT_BINOP_CASE(ShiftRight, ShiftRightArithmetic)
1814#undef BIGINT_BINOP_CASE
1815
1816 case IrOpcode::kBigIntEqual:
1817 return __ BigIntEqual(Map(node->InputAt(0)), Map(node->InputAt(1)));
1818
1819 case IrOpcode::kBigIntLessThan:
1820 return __ BigIntLessThan(Map(node->InputAt(0)), Map(node->InputAt(1)));
1821 case IrOpcode::kBigIntLessThanOrEqual:
1822 return __ BigIntLessThanOrEqual(Map(node->InputAt(0)),
1823 Map(node->InputAt(1)));
1824
1825 case IrOpcode::kBigIntNegate:
1826 return __ BigIntNegate(Map<BigInt>(node->InputAt(0)));
1827
1828 case IrOpcode::kLoadRootRegister:
1829 // Inlined usage of wasm root register operation in JS.
1830 return assembler.ReduceLoadRootRegister();
1831
1832 case IrOpcode::kStringCharCodeAt:
1833 return __ StringCharCodeAt(Map(node->InputAt(0)), Map(node->InputAt(1)));
1834 case IrOpcode::kStringCodePointAt:
1835 return __ StringCodePointAt(Map(node->InputAt(0)), Map(node->InputAt(1)));
1836
1837#ifdef V8_INTL_SUPPORT
1838 case IrOpcode::kStringToLowerCaseIntl:
1839 return __ StringToLowerCaseIntl(Map(node->InputAt(0)));
1840 case IrOpcode::kStringToUpperCaseIntl:
1841 return __ StringToUpperCaseIntl(Map(node->InputAt(0)));
1842#else
1843 case IrOpcode::kStringToLowerCaseIntl:
1844 case IrOpcode::kStringToUpperCaseIntl:
1845 UNREACHABLE();
1846#endif // V8_INTL_SUPPORT
1847
1848 case IrOpcode::kStringLength:
1849 return __ StringLength(Map(node->InputAt(0)));
1850
1851 case IrOpcode::kStringWrapperLength: {
1852 V<String> str =
1853 __ LoadField<String>(Map<JSPrimitiveWrapper>(node->InputAt(0)),
1855 return __ StringLength(str);
1856 }
1857
1858 case IrOpcode::kTypedArrayLength: {
1859 ElementsKind elements_kind = OpParameter<ElementsKind>(node->op());
1860 return __ TypedArrayLength(Map(node->InputAt(0)), elements_kind);
1861 }
1862
1863 case IrOpcode::kStringIndexOf:
1864 return __ StringIndexOf(Map(node->InputAt(0)), Map(node->InputAt(1)),
1865 Map(node->InputAt(2)));
1866
1867 case IrOpcode::kStringFromCodePointAt:
1868 return __ StringFromCodePointAt(Map(node->InputAt(0)),
1869 Map(node->InputAt(1)));
1870
1871 case IrOpcode::kStringSubstring:
1872 return __ StringSubstring(Map(node->InputAt(0)), Map(node->InputAt(1)),
1873 Map(node->InputAt(2)));
1874
1875 case IrOpcode::kStringConcat:
1876 return __ StringConcat(Map(node->InputAt(0)), Map(node->InputAt(1)),
1877 Map(node->InputAt(2)));
1878
1879 case IrOpcode::kStringEqual:
1880 return __ StringEqual(Map(node->InputAt(0)), Map(node->InputAt(1)));
1881 case IrOpcode::kStringLessThan:
1882 return __ StringLessThan(Map(node->InputAt(0)), Map(node->InputAt(1)));
1883 case IrOpcode::kStringLessThanOrEqual:
1884 return __ StringLessThanOrEqual(Map(node->InputAt(0)),
1885 Map(node->InputAt(1)));
1886
1887 case IrOpcode::kArgumentsLength:
1888 return __ ArgumentsLength();
1889 case IrOpcode::kRestLength:
1890 return __ RestLength(FormalParameterCountOf(node->op()));
1891
1892 case IrOpcode::kNewArgumentsElements: {
1893 const auto& p = NewArgumentsElementsParametersOf(node->op());
1894 // EffectControlLinearizer used to use `node->op()->properties()` to
1895 // construct the builtin call descriptor for this operation. However, this
1896 // always seemed to be `kEliminatable` so the Turboshaft
1897 // BuiltinCallDescriptor's for those builtins have this property
1898 // hard-coded.
1899 DCHECK_EQ(node->op()->properties(), Operator::kEliminatable);
1900 return __ NewArgumentsElements(Map(node->InputAt(0)), p.arguments_type(),
1901 p.formal_parameter_count());
1902 }
1903
1904 case IrOpcode::kLoadTypedElement:
1905 return __ LoadTypedElement(Map(node->InputAt(0)), Map(node->InputAt(1)),
1906 Map(node->InputAt(2)), Map(node->InputAt(3)),
1907 ExternalArrayTypeOf(node->op()));
1908 case IrOpcode::kLoadDataViewElement:
1909 return __ LoadDataViewElement(
1910 Map(node->InputAt(0)), Map(node->InputAt(1)), Map(node->InputAt(2)),
1911 Map(node->InputAt(3)), ExternalArrayTypeOf(node->op()));
1912 case IrOpcode::kLoadStackArgument:
1913 return __ LoadStackArgument(Map(node->InputAt(0)), Map(node->InputAt(1)));
1914
1915 case IrOpcode::kStoreTypedElement:
1916 __ StoreTypedElement(Map(node->InputAt(0)), Map(node->InputAt(1)),
1917 Map(node->InputAt(2)), Map(node->InputAt(3)),
1918 Map(node->InputAt(4)),
1919 ExternalArrayTypeOf(node->op()));
1920 return OpIndex::Invalid();
1921 case IrOpcode::kStoreDataViewElement:
1922 __ StoreDataViewElement(Map(node->InputAt(0)), Map(node->InputAt(1)),
1923 Map(node->InputAt(2)), Map(node->InputAt(3)),
1924 Map(node->InputAt(4)),
1925 ExternalArrayTypeOf(node->op()));
1926 return OpIndex::Invalid();
1927 case IrOpcode::kTransitionAndStoreElement:
1928 __ TransitionAndStoreArrayElement(
1929 Map(node->InputAt(0)), Map(node->InputAt(1)), Map(node->InputAt(2)),
1931 FastMapParameterOf(node->op()).object(),
1932 DoubleMapParameterOf(node->op()).object());
1933 return OpIndex::Invalid();
1934 case IrOpcode::kTransitionAndStoreNumberElement:
1935 __ TransitionAndStoreArrayElement(
1936 Map(node->InputAt(0)), Map(node->InputAt(1)), Map(node->InputAt(2)),
1938 DoubleMapParameterOf(node->op()).object());
1939 return OpIndex::Invalid();
1940 case IrOpcode::kTransitionAndStoreNonNumberElement: {
1941 auto kind =
1942 ValueTypeParameterOf(node->op())
1943 .Is(compiler::Type::BooleanOrNullOrUndefined())
1946 __ TransitionAndStoreArrayElement(
1947 Map(node->InputAt(0)), Map(node->InputAt(1)), Map(node->InputAt(2)),
1948 kind, FastMapParameterOf(node->op()).object(), {});
1949 return OpIndex::Invalid();
1950 }
1951 case IrOpcode::kStoreSignedSmallElement:
1952 __ StoreSignedSmallElement(Map(node->InputAt(0)), Map(node->InputAt(1)),
1953 Map(node->InputAt(2)));
1954 return OpIndex::Invalid();
1955
1956 case IrOpcode::kCompareMaps: {
1957 const ZoneRefSet<v8::internal::Map>& maps =
1958 CompareMapsParametersOf(node->op());
1959 return __ CompareMaps(Map(node->InputAt(0)), {}, maps);
1960 }
1961
1962 case IrOpcode::kCheckMaps: {
1963 DCHECK(dominating_frame_state.valid());
1964 const auto& p = CheckMapsParametersOf(node->op());
1965 __ CheckMaps(Map(node->InputAt(0)), dominating_frame_state, {}, p.maps(),
1966 p.flags(), p.feedback());
1967 return OpIndex{};
1968 }
1969
1970 case IrOpcode::kCheckedUint32Bounds:
1971 case IrOpcode::kCheckedUint64Bounds: {
1972 WordRepresentation rep = node->opcode() == IrOpcode::kCheckedUint32Bounds
1974 : WordRepresentation::Word64();
1975 const CheckBoundsParameters& params = CheckBoundsParametersOf(node->op());
1976 OpIndex index = Map(node->InputAt(0));
1977 OpIndex limit = Map(node->InputAt(1));
1978 V<Word32> check = __ UintLessThan(index, limit, rep);
1979 if ((params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds) != 0) {
1980 IF_NOT(LIKELY(check)) { __ Unreachable(); }
1981
1982 } else {
1983 DCHECK(dominating_frame_state.valid());
1984 __ DeoptimizeIfNot(check, dominating_frame_state,
1985 DeoptimizeReason::kOutOfBounds,
1986 params.check_parameters().feedback());
1987 }
1988 return index;
1989 }
1990
1991 case IrOpcode::kCheckIf: {
1992 DCHECK(dominating_frame_state.valid());
1993 const CheckIfParameters& params = CheckIfParametersOf(node->op());
1994 __ DeoptimizeIfNot(Map(node->InputAt(0)), dominating_frame_state,
1995 params.reason(), params.feedback());
1996 return OpIndex::Invalid();
1997 }
1998
1999 case IrOpcode::kCheckClosure:
2000 DCHECK(dominating_frame_state.valid());
2001 return __ CheckedClosure(Map(node->InputAt(0)), dominating_frame_state,
2002 FeedbackCellOf(node->op()));
2003
2004 case IrOpcode::kCheckEqualsSymbol:
2005 DCHECK(dominating_frame_state.valid());
2006 __ DeoptimizeIfNot(
2007 __ TaggedEqual(Map(node->InputAt(0)), Map(node->InputAt(1))),
2008 dominating_frame_state, DeoptimizeReason::kWrongName,
2009 FeedbackSource{});
2010 return OpIndex::Invalid();
2011
2012 case IrOpcode::kCheckEqualsInternalizedString:
2013 DCHECK(dominating_frame_state.valid());
2014 __ CheckEqualsInternalizedString(
2015 Map(node->InputAt(0)), Map(node->InputAt(1)), dominating_frame_state);
2016 return OpIndex::Invalid();
2017
2018 case IrOpcode::kCheckFloat64Hole: {
2019 DCHECK(dominating_frame_state.valid());
2020 V<Float64> value = Map(node->InputAt(0));
2021 // TODO(tebbi): If we did partial block cloning, we could emit a
2022 // `DeoptimizeIf` operation here. Alternatively, we could use a branch and
2023 // a separate block with an unconditional `Deoptimize`.
2024 return __ ChangeOrDeopt(
2025 value, dominating_frame_state, ChangeOrDeoptOp::Kind::kFloat64NotHole,
2028 }
2029
2030 case IrOpcode::kCheckNotTaggedHole: {
2031 DCHECK(dominating_frame_state.valid());
2032 V<Object> value = Map(node->InputAt(0));
2033 __ DeoptimizeIf(
2034 __ TaggedEqual(value,
2035 __ HeapConstant(isolate->factory()->the_hole_value())),
2036 dominating_frame_state, DeoptimizeReason::kHole, FeedbackSource{});
2037 return value;
2038 }
2039
2040 case IrOpcode::kLoadMessage:
2041 return __ LoadMessage(Map(node->InputAt(0)));
2042 case IrOpcode::kStoreMessage:
2043 __ StoreMessage(Map(node->InputAt(0)), Map(node->InputAt(1)));
2044 return OpIndex::Invalid();
2045
2046 case IrOpcode::kSameValue:
2047 return __ SameValue(Map(node->InputAt(0)), Map(node->InputAt(1)),
2049 case IrOpcode::kSameValueNumbersOnly:
2050 return __ SameValue(Map(node->InputAt(0)), Map(node->InputAt(1)),
2052 case IrOpcode::kNumberSameValue:
2053 return __ Float64SameValue(Map(node->InputAt(0)), Map(node->InputAt(1)));
2054
2055 case IrOpcode::kTypeOf:
2056 return __ CallBuiltin_Typeof(isolate, Map(node->InputAt(0)));
2057
2058 case IrOpcode::kFastApiCall: {
2059 DCHECK(dominating_frame_state.valid());
2060 FastApiCallNode n(node);
2061 const auto& params = n.Parameters();
2062 FastApiCallFunction c_function = params.c_function();
2063 const int c_arg_count = params.argument_count();
2064
2065 base::SmallVector<OpIndex, 16> slow_call_arguments;
2066 DCHECK_EQ(node->op()->ValueInputCount(),
2067 c_arg_count + FastApiCallNode::kCallbackData +
2068 n.SlowCallArgumentCount());
2069 OpIndex slow_call_callee = Map(n.SlowCallArgument(0));
2070 for (int i = 1; i < n.SlowCallArgumentCount(); ++i) {
2071 slow_call_arguments.push_back(Map(n.SlowCallArgument(i)));
2072 }
2073
2074 auto convert_fallback_return = [this](Variable value,
2076 int64_rep,
2077 CTypeInfo::Type return_type,
2078 V<Object> result) {
2079#define ELSE_UNREACHABLE \
2080 ELSE { \
2081 __ RuntimeAbort(AbortReason::kFastCallFallbackInvalid); \
2082 __ Unreachable(); \
2083 }
2084 switch (return_type) {
2086 __ SetVariable(value, __ UndefinedConstant());
2087 return;
2089 // Check that the return value is actually a boolean.
2090 IF (LIKELY(__ Word32BitwiseOr(
2091 __ TaggedEqual(result, __ TrueConstant()),
2092 __ TaggedEqual(result, __ FalseConstant())))) {
2093 __ SetVariable(
2094 value, __ ConvertJSPrimitiveToUntagged(
2098 kBoolean));
2099 }
2101 return;
2103 IF (LIKELY(__ ObjectIsNumber(result))) {
2104 __ SetVariable(
2105 value,
2106 __ ConvertJSPrimitiveToUntagged(
2111 }
2113 return;
2115 IF (LIKELY(__ ObjectIsNumber(result))) {
2116 __ SetVariable(
2117 value,
2118 __ ConvertJSPrimitiveToUntagged(
2123 }
2125 return;
2128 IF (LIKELY(__ ObjectIsBigInt(result))) {
2129 __ SetVariable(
2130 value,
2131 __ TruncateJSPrimitiveToUntagged(
2135 kBigInt));
2136 }
2138 } else {
2140 IF (LIKELY(__ ObjectIsNumber(result))) {
2142 __ TryTruncateFloat64ToInt64(
2143 V<Float64>::Cast(__ ConvertJSPrimitiveToUntagged(
2146 kFloat64,
2149 IF (__ Word32Equal(__ template Projection<1>(tuple),
2151 __ SetVariable(value, __ ChangeInt64ToFloat64(
2152 __ template Projection<0>(tuple)));
2153 }
2155 }
2157 }
2158 return;
2161 IF (LIKELY(__ ObjectIsBigInt(result))) {
2162 __ SetVariable(
2163 value,
2164 __ TruncateJSPrimitiveToUntagged(
2166 // Truncation from BigInt to int64 and uint64 is the
2167 // same.
2170 kBigInt));
2171 }
2173 } else {
2175 IF (LIKELY(__ ObjectIsNumber(result))) {
2177 __ TryTruncateFloat64ToUint64(
2178 V<Float64>::Cast(__ ConvertJSPrimitiveToUntagged(
2181 kFloat64,
2184 IF (__ Word32Equal(__ template Projection<1>(tuple),
2186 __ SetVariable(value, __ ChangeUint64ToFloat64(
2187 __ template Projection<0>(tuple)));
2188 }
2190 }
2192 }
2193 return;
2196 IF (LIKELY(__ ObjectIsNumber(result))) {
2197 V<Float64> f = V<Float64>::Cast(__ ConvertJSPrimitiveToUntagged(
2202 if (return_type == CTypeInfo::Type::kFloat32) {
2203 __ SetVariable(value, __ TruncateFloat64ToFloat32(f));
2204 } else {
2205 __ SetVariable(value, f);
2206 }
2207 }
2209 return;
2211 __ SetVariable(value, result);
2212 return;
2218 UNREACHABLE();
2219 }
2220
2221#undef ELSE_UNREACHABLE
2222 };
2223
2224 std::optional<decltype(assembler)::CatchScope> catch_scope;
2225 if (is_final_control) {
2226 Block* catch_block = Map(block->SuccessorAt(1));
2227 catch_scope.emplace(assembler, catch_block);
2228 }
2229 // Prepare FastCallApiOp parameters.
2230 base::SmallVector<OpIndex, 16> arguments;
2231 for (int i = 0; i < c_arg_count; ++i) {
2232 arguments.push_back(Map(NodeProperties::GetValueInput(node, i)));
2233 }
2234 V<Object> data_argument = Map(n.CallbackData());
2235
2236 V<Context> context = Map(n.Context());
2237
2238 const FastApiCallParameters* parameters =
2240
2241 // There is one return in addition to the return value of the C function,
2242 // which indicates if a fast API call actually happened.
2243 CTypeInfo return_type = parameters->c_signature()->ReturnInfo();
2244 int return_count = 2;
2245
2246 // Allocate the out_reps vector in the zone, so that it lives through the
2247 // whole compilation.
2248 const base::Vector<RegisterRepresentation> out_reps =
2249 graph_zone->AllocateVector<RegisterRepresentation>(return_count);
2250 out_reps[0] = RegisterRepresentation::Word32();
2252 return_type, parameters->c_signature()->GetInt64Representation());
2253
2254 V<Tuple<Word32, Any>> fast_call_result =
2255 __ FastApiCall(dominating_frame_state, data_argument, context,
2256 base::VectorOf(arguments), parameters, out_reps);
2257
2258 V<Word32> result_state = __ template Projection<0>(fast_call_result);
2259 V<Any> result_value =
2260 __ template Projection<1>(fast_call_result, out_reps[1]);
2261 Variable result = __ NewVariable(out_reps[1]);
2262 __ SetVariable(result, result_value);
2263
2264 IF (UNLIKELY(
2266 // We need to generate a fallback (both fast and slow call) in case
2267 // the generated code might fail, in case e.g. a Smi was passed where
2268 // a JSObject was expected and an error must be thrown.
2269 // None of this usually holds true for Wasm functions with
2270 // primitive types only, so we avoid generating an extra branch here.
2271
2272 V<Object> fallback_result = V<Object>::Cast(__ Call(
2273 slow_call_callee, dominating_frame_state,
2274 base::VectorOf(slow_call_arguments),
2275 TSCallDescriptor::Create(params.descriptor(), CanThrow::kYes,
2277
2278 convert_fallback_return(
2279 result, parameters->c_signature()->GetInt64Representation(),
2280 return_type.GetType(), fallback_result);
2281 }
2282 V<Any> value = __ GetVariable(result);
2283 if (is_final_control) {
2284 // The `__ FastApiCall()` before has already created exceptional control
2285 // flow and bound a new block for the success case. So we can just
2286 // `Goto` the block that Turbofan designated as the `IfSuccess`
2287 // successor.
2288 __ Goto(Map(block->SuccessorAt(0)));
2289 }
2290 return value;
2291 }
2292
2293 case IrOpcode::kRuntimeAbort:
2294 __ RuntimeAbort(AbortReasonOf(node->op()));
2295 return OpIndex::Invalid();
2296
2297 case IrOpcode::kDateNow:
2298 return __ CallRuntime_DateCurrentTime(isolate, __ NoContextConstant());
2299
2300 case IrOpcode::kEnsureWritableFastElements:
2301 return __ EnsureWritableFastElements(Map(node->InputAt(0)),
2302 Map(node->InputAt(1)));
2303
2304 case IrOpcode::kMaybeGrowFastElements: {
2305 DCHECK(dominating_frame_state.valid());
2306 const GrowFastElementsParameters& params =
2307 GrowFastElementsParametersOf(node->op());
2308 return __ MaybeGrowFastElements(
2309 Map(node->InputAt(0)), Map(node->InputAt(1)), Map(node->InputAt(2)),
2310 Map(node->InputAt(3)), dominating_frame_state, params.mode(),
2311 params.feedback());
2312 }
2313
2314 case IrOpcode::kTransitionElementsKind:
2315 __ TransitionElementsKind(Map(node->InputAt(0)),
2316 ElementsTransitionOf(node->op()));
2317 return OpIndex::Invalid();
2318 case IrOpcode::kTransitionElementsKindOrCheckMap: {
2319 DCHECK(dominating_frame_state.valid());
2320 V<HeapObject> receiver = Map(node->InputAt(0));
2321 V<i::Map> map = __ LoadMapField(receiver);
2322 __ TransitionElementsKindOrCheckMap(
2323 receiver, map, dominating_frame_state,
2325 return OpIndex::Invalid();
2326 }
2327 case IrOpcode::kAssertType: {
2328 compiler::Type type = OpParameter<compiler::Type>(node->op());
2329 CHECK(type.CanBeAsserted());
2330 V<TurbofanType> allocated_type;
2331 {
2332 DCHECK(isolate->CurrentLocalHeap()->is_main_thread());
2333 std::optional<UnparkedScope> unparked_scope;
2334 if (isolate->CurrentLocalHeap()->IsParked()) {
2335 unparked_scope.emplace(isolate->main_thread_local_isolate());
2336 }
2337 allocated_type =
2338 __ HeapConstant(type.AllocateOnHeap(isolate->factory()));
2339 }
2340 __ CallBuiltin_CheckTurbofanType(isolate, __ NoContextConstant(),
2341 Map(node->InputAt(0)), allocated_type,
2342 __ TagSmi(node->id()));
2343 return OpIndex::Invalid();
2344 }
2345
2346 case IrOpcode::kFindOrderedHashMapEntry:
2347 return __ FindOrderedHashMapEntry(Map(node->InputAt(0)),
2348 Map(node->InputAt(1)));
2349 case IrOpcode::kFindOrderedHashSetEntry:
2350 return __ FindOrderedHashSetEntry(Map(node->InputAt(0)),
2351 Map(node->InputAt(1)));
2352 case IrOpcode::kFindOrderedHashMapEntryForInt32Key:
2353 return __ FindOrderedHashMapEntryForInt32Key(Map(node->InputAt(0)),
2354 Map(node->InputAt(1)));
2355
2356 case IrOpcode::kBeginRegion:
2357 inside_region = true;
2358 return OpIndex::Invalid();
2359 case IrOpcode::kFinishRegion:
2360 inside_region = false;
2361 return Map(node->InputAt(0));
2362
2363 case IrOpcode::kTypeGuard:
2364 return Map(node->InputAt(0));
2365
2366 case IrOpcode::kAbortCSADcheck:
2367 __ AbortCSADcheck(Map(node->InputAt(0)));
2368 return OpIndex::Invalid();
2369
2370 case IrOpcode::kDebugBreak:
2371 __ DebugBreak();
2372 return OpIndex::Invalid();
2373
2374 case IrOpcode::kComment:
2375 __ Comment(OpParameter<const char*>(node->op()));
2376 return OpIndex::Invalid();
2377
2378 case IrOpcode::kAssert: {
2379 const AssertParameters& p = AssertParametersOf(node->op());
2380 __ AssertImpl(Map(node->InputAt(0)), p.condition_string(), p.file(),
2381 p.line());
2382 return OpIndex::Invalid();
2383 }
2384
2385 case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
2386 // Currently this is only used by the CSA pipeline.
2388 return __ BitcastTaggedToWordPtrForTagAndSmiBits(Map(node->InputAt(0)));
2389 case IrOpcode::kBitcastWordToTaggedSigned:
2390 return __ BitcastWordPtrToSmi(Map(node->InputAt(0)));
2391
2392 case IrOpcode::kWord32AtomicLoad:
2393 case IrOpcode::kWord64AtomicLoad: {
2394 OpIndex base = Map(node->InputAt(0));
2395 OpIndex offset = Map(node->InputAt(1));
2396 const AtomicLoadParameters& p = AtomicLoadParametersOf(node->op());
2397 DCHECK_EQ(__ output_graph().Get(base).outputs_rep()[0],
2399 LoadOp::Kind kind;
2400 switch (p.kind()) {
2403 break;
2405 UNREACHABLE();
2408 break;
2409 }
2410 RegisterRepresentation result_rep =
2411 RegisterRepresentation::FromMachineType(p.representation());
2412 if (result_rep == RegisterRepresentation::Tagged()) {
2413 // TODO(nicohartmann): Tagged loads are currently not supported by the
2414 // instruction selectors, but are emitted by some tests.. We work around
2415 // this using bitcasts, which is safe because atomic loads are not
2416 // load-eliminated. We still should try to properly support tagged
2417 // atomic loads eventually and remove this workaround.
2418 result_rep = node->opcode() == IrOpcode::kWord32AtomicLoad
2420 : RegisterRepresentation::Word64();
2421 return __ TaggedBitcast(
2422 __ Load(base, offset, kind,
2424 true),
2425 result_rep, 0, 0),
2426 result_rep, RegisterRepresentation::Tagged(),
2428 }
2429 return __ Load(base, offset, kind,
2430 MemoryRepresentation::FromMachineType(p.representation()),
2431 result_rep, 0, 0);
2432 }
2433
2434 case IrOpcode::kWord32AtomicStore:
2435 case IrOpcode::kWord64AtomicStore: {
2436 OpIndex base = Map(node->InputAt(0));
2437 OpIndex offset = Map(node->InputAt(1));
2438 OpIndex value = Map(node->InputAt(2));
2439 const AtomicStoreParameters& p = AtomicStoreParametersOf(node->op());
2440 DCHECK_EQ(__ output_graph().Get(base).outputs_rep()[0],
2443 switch (p.kind()) {
2446 break;
2448 UNREACHABLE();
2451 break;
2452 }
2453 __ Store(
2454 base, offset, value, kind,
2456 p.write_barrier_kind(), 0, 0, true);
2457 return OpIndex::Invalid();
2458 }
2459
2460 case IrOpcode::kWord32AtomicAdd:
2461 case IrOpcode::kWord32AtomicSub:
2462 case IrOpcode::kWord32AtomicAnd:
2463 case IrOpcode::kWord32AtomicOr:
2464 case IrOpcode::kWord32AtomicXor:
2465 case IrOpcode::kWord32AtomicExchange:
2466 case IrOpcode::kWord32AtomicCompareExchange:
2467 case IrOpcode::kWord64AtomicAdd:
2468 case IrOpcode::kWord64AtomicSub:
2469 case IrOpcode::kWord64AtomicAnd:
2470 case IrOpcode::kWord64AtomicOr:
2471 case IrOpcode::kWord64AtomicXor:
2472 case IrOpcode::kWord64AtomicExchange:
2473 case IrOpcode::kWord64AtomicCompareExchange: {
2474 int input_index = 0;
2475 OpIndex base = Map(node->InputAt(input_index++));
2476 OpIndex offset = Map(node->InputAt(input_index++));
2477 OpIndex expected;
2478 if (node->opcode() == IrOpcode::kWord32AtomicCompareExchange ||
2479 node->opcode() == IrOpcode::kWord64AtomicCompareExchange) {
2480 expected = Map(node->InputAt(input_index++));
2481 }
2482 OpIndex value = Map(node->InputAt(input_index++));
2483 const AtomicOpParameters& p = AtomicOpParametersOf(node->op());
2484 switch (node->opcode()) {
2485#define BINOP(binop, size) \
2486 case IrOpcode::kWord##size##Atomic##binop: \
2487 return __ AtomicRMW(base, offset, value, AtomicRMWOp::BinOp::k##binop, \
2488 RegisterRepresentation::Word##size(), \
2489 MemoryRepresentation::FromMachineType(p.type()), \
2490 p.kind());
2491 BINOP(Add, 32)
2492 BINOP(Sub, 32)
2493 BINOP(And, 32)
2494 BINOP(Or, 32)
2495 BINOP(Xor, 32)
2496 BINOP(Exchange, 32)
2497 BINOP(Add, 64)
2498 BINOP(Sub, 64)
2499 BINOP(And, 64)
2500 BINOP(Or, 64)
2501 BINOP(Xor, 64)
2502 BINOP(Exchange, 64)
2503#undef BINOP
2504 case IrOpcode::kWord32AtomicCompareExchange:
2505 return __ AtomicCompareExchange(
2506 base, offset, expected, value, RegisterRepresentation::Word32(),
2507 MemoryRepresentation::FromMachineType(p.type()), p.kind());
2508 case IrOpcode::kWord64AtomicCompareExchange:
2509 return __ AtomicCompareExchange(
2510 base, offset, expected, value, RegisterRepresentation::Word64(),
2511 MemoryRepresentation::FromMachineType(p.type()), p.kind());
2512 default:
2513 UNREACHABLE();
2514 }
2515 }
2516
2517 case IrOpcode::kWord32AtomicPairLoad:
2518 return __ AtomicWord32PairLoad(Map(node->InputAt(0)),
2519 Map(node->InputAt(1)), 0);
2520 case IrOpcode::kWord32AtomicPairStore:
2521 return __ AtomicWord32PairStore(
2522 Map(node->InputAt(0)), Map(node->InputAt(1)), Map(node->InputAt(2)),
2523 Map(node->InputAt(3)), 0);
2524
2525#define ATOMIC_WORD32_PAIR_BINOP(kind) \
2526 case IrOpcode::kWord32AtomicPair##kind: \
2527 return __ AtomicWord32PairBinop( \
2528 Map(node->InputAt(0)), Map(node->InputAt(1)), Map(node->InputAt(2)), \
2529 Map(node->InputAt(3)), AtomicRMWOp::BinOp::k##kind, 0);
2535 ATOMIC_WORD32_PAIR_BINOP(Exchange)
2536 case IrOpcode::kWord32AtomicPairCompareExchange:
2537 return __ AtomicWord32PairCompareExchange(
2538 Map(node->InputAt(0)), Map(node->InputAt(1)), Map(node->InputAt(4)),
2539 Map(node->InputAt(5)), Map(node->InputAt(2)), Map(node->InputAt(3)),
2540 0);
2541
2542#ifdef V8_ENABLE_WEBASSEMBLY
2543#define SIMD128_BINOP(name) \
2544 case IrOpcode::k##name: \
2545 return __ Simd128Binop(Map(node->InputAt(0)), Map(node->InputAt(1)), \
2546 Simd128BinopOp::Kind::k##name);
2547 FOREACH_SIMD_128_BINARY_BASIC_OPCODE(SIMD128_BINOP)
2548#undef SIMD128_BINOP
2549 case IrOpcode::kI8x16Swizzle: {
2550 bool relaxed = OpParameter<bool>(node->op());
2551 return __ Simd128Binop(Map(node->InputAt(0)), Map(node->InputAt(1)),
2552 relaxed
2553 ? Simd128BinopOp::Kind::kI8x16RelaxedSwizzle
2554 : Simd128BinopOp::Kind::kI8x16Swizzle);
2555 }
2556
2557#define SIMD128_UNOP(name) \
2558 case IrOpcode::k##name: \
2559 return __ Simd128Unary(Map<Simd128>(node->InputAt(0)), \
2560 Simd128UnaryOp::Kind::k##name);
2561 FOREACH_SIMD_128_UNARY_OPCODE(SIMD128_UNOP)
2562#undef SIMD128_UNOP
2563
2564#define SIMD128_SHIFT(name) \
2565 case IrOpcode::k##name: \
2566 return __ Simd128Shift(Map<Simd128>(node->InputAt(0)), \
2567 Map<Word32>(node->InputAt(1)), \
2568 Simd128ShiftOp::Kind::k##name);
2569 FOREACH_SIMD_128_SHIFT_OPCODE(SIMD128_SHIFT)
2570#undef SIMD128_UNOP
2571
2572#define SIMD128_TEST(name) \
2573 case IrOpcode::k##name: \
2574 return __ Simd128Test(Map<Simd128>(node->InputAt(0)), \
2575 Simd128TestOp::Kind::k##name);
2576 FOREACH_SIMD_128_TEST_OPCODE(SIMD128_TEST)
2577#undef SIMD128_UNOP
2578
2579#define SIMD128_SPLAT(name) \
2580 case IrOpcode::k##name##Splat: \
2581 return __ Simd128Splat(Map<Any>(node->InputAt(0)), \
2582 Simd128SplatOp::Kind::k##name);
2583 FOREACH_SIMD_128_SPLAT_OPCODE(SIMD128_SPLAT)
2584#undef SIMD128_SPLAT
2585
2586#define SIMD128_TERNARY(name) \
2587 case IrOpcode::k##name: \
2588 return __ Simd128Ternary(Map(node->InputAt(0)), Map(node->InputAt(1)), \
2589 Map(node->InputAt(2)), \
2590 Simd128TernaryOp::Kind::k##name);
2591 FOREACH_SIMD_128_TERNARY_OPCODE(SIMD128_TERNARY)
2592#undef SIMD128_TERNARY
2593
2594#define SIMD128_EXTRACT_LANE(name, suffix) \
2595 case IrOpcode::k##name##ExtractLane##suffix: \
2596 return __ Simd128ExtractLane(Map<Simd128>(node->InputAt(0)), \
2597 Simd128ExtractLaneOp::Kind::k##name##suffix, \
2598 OpParameter<int32_t>(node->op()));
2599 SIMD128_EXTRACT_LANE(I8x16, S)
2600 SIMD128_EXTRACT_LANE(I8x16, U)
2601 SIMD128_EXTRACT_LANE(I16x8, S)
2602 SIMD128_EXTRACT_LANE(I16x8, U)
2603 SIMD128_EXTRACT_LANE(I32x4, )
2604 SIMD128_EXTRACT_LANE(I64x2, )
2605 SIMD128_EXTRACT_LANE(F32x4, )
2606 SIMD128_EXTRACT_LANE(F64x2, )
2607#undef SIMD128_LANE
2608
2609#define SIMD128_REPLACE_LANE(name) \
2610 case IrOpcode::k##name##ReplaceLane: \
2611 return __ Simd128ReplaceLane(Map<Simd128>(node->InputAt(0)), \
2612 Map<Any>(node->InputAt(1)), \
2613 Simd128ReplaceLaneOp::Kind::k##name, \
2614 OpParameter<int32_t>(node->op()));
2615 SIMD128_REPLACE_LANE(I8x16)
2616 SIMD128_REPLACE_LANE(I16x8)
2617 SIMD128_REPLACE_LANE(I32x4)
2618 SIMD128_REPLACE_LANE(I64x2)
2619 SIMD128_REPLACE_LANE(F32x4)
2620 SIMD128_REPLACE_LANE(F64x2)
2621#undef SIMD128_REPLACE_LANE
2622
2623 case IrOpcode::kLoadStackPointer:
2624 return __ LoadStackPointer();
2625
2626 case IrOpcode::kSetStackPointer:
2627 __ SetStackPointer(Map(node->InputAt(0)));
2628 return OpIndex::Invalid();
2629
2630#endif // V8_ENABLE_WEBASSEMBLY
2631
2632 case IrOpcode::kJSStackCheck: {
2635 V<Context> context = Map(node->InputAt(0));
2636 V<FrameState> frame_state = Map(node->InputAt(1));
2637 __ JSFunctionEntryStackCheck(context, frame_state);
2638 return OpIndex::Invalid();
2639 }
2640
2641 case IrOpcode::kInt32PairAdd:
2642 case IrOpcode::kInt32PairSub:
2643 case IrOpcode::kInt32PairMul:
2644 case IrOpcode::kWord32PairShl:
2645 case IrOpcode::kWord32PairSar:
2646 case IrOpcode::kWord32PairShr: {
2647 V<Word32> left_low = Map(node->InputAt(0));
2648 V<Word32> left_high = Map(node->InputAt(1));
2649 V<Word32> right_low = Map(node->InputAt(2));
2650 V<Word32> right_high = Map(node->InputAt(3));
2652 switch (node->opcode()) {
2653 case IrOpcode::kInt32PairAdd:
2655 break;
2656 case IrOpcode::kInt32PairSub:
2658 break;
2659 case IrOpcode::kInt32PairMul:
2661 break;
2662 case IrOpcode::kWord32PairShl:
2664 break;
2665 case IrOpcode::kWord32PairSar:
2667 break;
2668 case IrOpcode::kWord32PairShr:
2670 break;
2671 default:
2672 UNREACHABLE();
2673 }
2674 return __ Word32PairBinop(left_low, left_high, right_low, right_high,
2675 kind);
2676 }
2677
2678#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
2679 case IrOpcode::kGetContinuationPreservedEmbedderData:
2680 return __ GetContinuationPreservedEmbedderData();
2681 case IrOpcode::kSetContinuationPreservedEmbedderData:
2682 __ SetContinuationPreservedEmbedderData(Map(node->InputAt(0)));
2683 return OpIndex::Invalid();
2684#endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
2685
2686 default:
2687 std::cerr << "unsupported node type: " << *node->op() << "\n";
2688 node->Print(std::cerr);
2689 UNIMPLEMENTED();
2690 }
2691}
2692
2693} // namespace
2694
2695std::optional<BailoutReason> BuildGraph(
2698 GraphBuilder builder{data, phase_zone, *schedule, linkage,
2700#if DEBUG
2701 data->graph().SetCreatedFromTurbofan();
2702#endif
2703 return builder.Run();
2704}
2705
2707
2708} // namespace v8::internal::compiler::turboshaft
Schedule * schedule
#define V(Name)
friend Zone
Definition asm-types.cc:195
#define CASE(Name,...)
#define IF_NOT(...)
#define UNLIKELY(...)
#define LIKELY(...)
#define IF(...)
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
Builtins::Kind kind
Definition builtins.cc:40
static constexpr MachineType AnyTagged()
constexpr bool IsMapWord() const
static constexpr MachineType TaggedPointer()
static constexpr int ToInt(const Tagged< Object > object)
Definition smi.h:33
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
V8_INLINE constexpr StorageType ptr() const
static FieldAccess ForJSPrimitiveWrapperValue()
FeedbackSource const & feedback() const
IndirectHandle< Map > object() const
static MachineRepresentation GetProjectionType(Node const *projection)
static Node * GetValueInput(Node *node, int index)
bool Is(Type that) const
static constexpr MemoryRepresentation FromMachineRepresentation(MachineRepresentation rep)
static MemoryRepresentation FromMachineType(MachineType type)
static MemoryRepresentation FromRegisterRepresentation(RegisterRepresentation repr, bool is_signed)
static constexpr MemoryRepresentation Uint32()
static constexpr OpIndex Invalid()
Definition index.h:88
static constexpr RegisterRepresentation FromMachineType(MachineType type)
static constexpr RegisterRepresentation FromMachineRepresentation(MachineRepresentation rep)
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation WordPtr()
static constexpr RegisterRepresentation FromCTypeInfo(CTypeInfo t, CFunctionInfo::Int64Representation int64_repr)
static constexpr RegisterRepresentation Word64()
static constexpr RegisterRepresentation Tagged()
static std::optional< Type > ParseFromString(const std::string_view &str, Zone *zone)
Definition types.cc:147
static V< T > Cast(V< U > index)
Definition index.h:632
static constexpr WordRepresentation WordPtr()
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
#define CONVERT_PRIMITIVE_TO_OBJECT_CASE(name, kind, input_type, input_interpretation)
JsWasmCallsSidetable * js_wasm_calls_sidetable
#define TRUNCATE_OBJECT_TO_PRIMITIVE_CASE(name, kind, input_assumptions)
TurboshaftPipelineKind pipeline_kind
#define BIGINT_BINOP_CASE(op, kind)
#define UNARY_CASE(opcode, assembler_op)
#define BINOP(binop, size)
bool inside_region
NodeAuxData< OpIndex > op_mapping
#define ELSE_UNREACHABLE
#define CHANGE_OR_DEOPT_INT_CASE(kind)
#define CONVERT_PRIMITIVE_TO_OBJECT_OR_DEOPT_CASE(name, kind, input_type, input_interpretation)
#define ATOMIC_WORD32_PAIR_BINOP(kind)
AssemblerT assembler
Zone * graph_zone
#define CHECK_OBJECT_IS_CASE(code, kind, input_assumptions, reason, feedback)
#define BINOP_CASE(opcode, assembler_op)
NodeOriginTable * origins
SourcePositionTable * source_positions
#define OBJECT_IS_CASE(kind)
ZoneVector< BlockData > block_mapping
#define CONVERT_OBJECT_TO_PRIMITIVE_CASE(name, kind, input_assumptions)
JSHeapBroker * broker
Linkage * linkage
OpIndex final_frame_state
OptionalOpIndex index
int32_t offset
TNode< Object > receiver
std::string pattern
ZoneVector< RpoNumber > & result
InstructionOperand destination
int m
Definition mul-fft.cc:294
int n
Definition mul-fft.cc:296
int int32_t
Definition unicode.cc:40
void * Allocate(void *address, size_t size, OS::MemoryPermission access)
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
TNode< Oddball > UndefinedConstant(JSGraph *jsgraph)
std::optional< BailoutReason > BuildGraph(PipelineData *data, Schedule *schedule, Zone *phase_zone, Linkage *linkage, JsWasmCallsSidetable *js_wasm_calls_sidetable)
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
any_of(const Args &...) -> any_of< Args... >
WordWithBits< 64 > Word64
Definition index.h:224
WordWithBits< 32 > Word32
Definition index.h:223
SnapshotTable< OpIndex, VariableData >::Key Variable
Definition operations.h:82
OptionalV(V< T >) -> OptionalV< T >
uint32_t ObjectIdOf(Operator const *op)
IfValueParameters const & IfValueParametersOf(const Operator *op)
AtomicOpParameters AtomicOpParametersOf(Operator const *op)
const GrowFastElementsParameters & GrowFastElementsParametersOf(const Operator *op)
CheckParameters const & CheckParametersOf(Operator const *op)
size_t ProjectionIndexOf(const Operator *const op)
StackCheckKind StackCheckKindOf(Operator const *op)
AtomicStoreParameters const & AtomicStoreParametersOf(Operator const *op)
ArgumentsStateType ArgumentsStateTypeOf(Operator const *op)
const CheckTaggedInputParameters & CheckTaggedInputParametersOf(const Operator *op)
NumberConstant(std::numeric_limits< double >::quiet_NaN())) DEFINE_GETTER(EmptyStateValues
const NewArgumentsElementsParameters & NewArgumentsElementsParametersOf(const Operator *op)
CheckMapsParameters const & CheckMapsParametersOf(Operator const *op)
BranchHint BranchHintOf(const Operator *const op)
Handle< HeapObject > HeapConstantOf(const Operator *op)
const AssertParameters & AssertParametersOf(const Operator *const op)
CheckFloat64HoleParameters const & CheckFloat64HoleParametersOf(Operator const *op)
StoreRepresentation const & StoreRepresentationOf(Operator const *op)
CheckIfParameters const & CheckIfParametersOf(Operator const *op)
const char * StaticAssertSourceOf(const Operator *op)
const CheckMinusZeroParameters & CheckMinusZeroParametersOf(const Operator *op)
CallDescriptor const * CallDescriptorOf(const Operator *const op)
int FormalParameterCountOf(const Operator *op)
AtomicLoadParameters AtomicLoadParametersOf(Operator const *op)
int ParameterIndexOf(const Operator *const op)
const FieldAccess & FieldAccessOf(const Operator *op)
const ElementAccess & ElementAccessOf(const Operator *op)
const AllocateParameters & AllocateParametersOf(const Operator *op)
const ParameterInfo & ParameterInfoOf(const Operator *const op)
ZoneRefSet< Map > const & CompareMapsParametersOf(Operator const *op)
CheckBoundsParameters const & CheckBoundsParametersOf(Operator const *op)
const ObjectAccess & ObjectAccessOf(const Operator *op)
ZoneMap< NodeId, const JSWasmCallParameters * > JsWasmCallsSidetable
Definition js-inlining.h:28
StackSlotRepresentation const & StackSlotRepresentationOf(Operator const *op)
DeoptimizeParameters const & DeoptimizeParametersOf(Operator const *const op)
MapRef FastMapParameterOf(const Operator *op)
Handle< FeedbackCell > FeedbackCellOf(const Operator *op)
Type ValueTypeParameterOf(const Operator *op)
SelectParameters const & SelectParametersOf(const Operator *const op)
LoadRepresentation LoadRepresentationOf(Operator const *op)
ExternalArrayType ExternalArrayTypeOf(const Operator *op)
ZoneVector< BasicBlock * > BasicBlockVector
Definition schedule.h:23
T const & OpParameter(const Operator *op)
Definition operator.h:214
ConvertReceiverMode ConvertReceiverModeOf(Operator const *op)
AllocationType AllocationTypeOf(const Operator *op)
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator *op)
UnalignedStoreRepresentation const & UnalignedStoreRepresentationOf(Operator const *op)
ElementsTransitionWithMultipleSources const & ElementsTransitionWithMultipleSourcesOf(const Operator *op)
ElementsTransition const & ElementsTransitionOf(const Operator *op)
AbortReason AbortReasonOf(const Operator *op)
MachineRepresentation PhiRepresentationOf(const Operator *const op)
Node::Uses::const_iterator begin(const Node::Uses &uses)
Definition node.h:708
ShiftKind ShiftKindOf(Operator const *op)
int OsrValueIndexOf(Operator const *op)
ZoneCompactSet< typename ref_traits< T >::ref_type > ZoneRefSet
Definition heap-refs.h:1301
HeapObjectMatcherImpl< IrOpcode::kHeapConstant > HeapObjectMatcher
MapRef DoubleMapParameterOf(const Operator *op)
void Store(LiftoffAssembler *assm, LiftoffRegister src, MemOperand dst, ValueKind kind)
constexpr MachineType machine_type(ValueKind kind)
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kInt64Size
Definition globals.h:402
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
constexpr int U
constexpr int S
constexpr int kInt32Size
Definition globals.h:401
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
return value
Definition map-inl.h:893
constexpr bool Is64()
constexpr int kAdditiveSafeIntegerShift
Definition globals.h:1997
T * NewArray(size_t size)
Definition allocation.h:43
i::Address Load(i::Address address)
Definition unwinder.cc:19
#define UNREACHABLE()
Definition logging.h:67
#define FATAL(...)
Definition logging.h:47
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_NE(lhs, rhs)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
static const FastApiCallParameters * Create(FastApiCallFunction c_function, Zone *graph_zone)
static constexpr Kind Aligned(BaseTaggedness base_is_tagged)
static const TSCallDescriptor * Create(const CallDescriptor *descriptor, CanThrow can_throw, LazyDeoptOnThrow lazy_deopt_on_throw, Zone *graph_zone, const JSWasmCallParameters *js_wasm_call_parameters=nullptr)
#define V8_UNLIKELY(condition)
Definition v8config.h:660
std::unique_ptr< ValueMirror > value