v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
turboshaft-graph-interface.cc
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <optional>
8
9#include "absl/container/btree_map.h"
11#include "src/base/logging.h"
14#include "src/common/globals.h"
34
35namespace v8::internal::wasm {
36
38
39using compiler::AccessBuilder;
40using compiler::CallDescriptor;
42using compiler::Operator;
43using compiler::TrapId;
67using compiler::turboshaft::Simd128ConstantOp;
74using compiler::turboshaft::WasmArrayNullable;
75using compiler::turboshaft::WasmStackCheckOp;
76using compiler::turboshaft::WasmStringRefNullable;
77using compiler::turboshaft::WasmStructNullable;
78using compiler::turboshaft::WasmTypeAnnotationOp;
79using compiler::turboshaft::WasmTypeCastOp;
83
84namespace {
85
86ExternalArrayType GetExternalArrayType(DataViewOp op_type) {
87 switch (op_type) {
88#define V(Name) \
89 case DataViewOp::kGet##Name: \
90 case DataViewOp::kSet##Name: \
91 return kExternal##Name##Array;
93#undef V
96 }
97}
98
99size_t GetTypeSize(DataViewOp op_type) {
100 ExternalArrayType array_type = GetExternalArrayType(op_type);
101 switch (array_type) {
102#define ELEMENTS_KIND_TO_ELEMENT_SIZE(Type, type, TYPE, ctype) \
103 case kExternal##Type##Array: \
104 return sizeof(ctype);
105
107#undef ELEMENTS_KIND_TO_ELEMENT_SIZE
108 }
109}
110
111bool ReverseBytesSupported(size_t size_in_bytes) {
112 switch (size_in_bytes) {
113 case 4:
114 case 16:
115 return true;
116 case 8:
117 return Is64();
118 default:
119 return false;
120 }
121}
122
123enum class BranchHintingMode {
124 kNone,
125 kModuleProvided,
126 kStress,
127};
128
129class BranchHintingStresser {
130 public:
131 BranchHint GetNextHint() {
132 // To strike a balance between randomness and simplicity, we simply
133 // iterate over the bits of the random seed.
134 int bit = v8_flags.random_seed & (1 << cursor_);
135 static_assert(sizeof(v8_flags.random_seed) * kBitsPerByte == 32);
136 cursor_ = (cursor_ + 1) & 31;
137 return bit == 0 ? BranchHint::kFalse : BranchHint::kTrue;
138 }
139
140 private:
141 int cursor_{0};
142};
143
144} // namespace
145
146// TODO(14108): Annotate runtime functions as not having side effects
147// where appropriate.
149 Zone* zone, Runtime::FunctionId f,
150 std::initializer_list<const OpIndex> args, V<Context> context) {
152 OpIndex isolate_root = __ LoadRootRegister();
153 DCHECK_EQ(1, fun->result_size);
154 int builtin_slot_offset =
155 IsolateData::BuiltinSlotOffset(Builtin::kWasmCEntry);
156 OpIndex centry_stub =
157 __ Load(isolate_root, LoadOp::Kind::RawAligned(),
158 MemoryRepresentation::UintPtr(), builtin_slot_offset);
159 // CallRuntime is always called with 0 or 1 argument, so a vector of size 4
160 // always suffices.
161 SmallZoneVector<OpIndex, 4> centry_args(zone);
162 for (OpIndex arg : args) centry_args.emplace_back(arg);
163 centry_args.emplace_back(__ ExternalConstant(ExternalReference::Create(f)));
164 centry_args.emplace_back(__ Word32Constant(fun->nargs));
165 centry_args.emplace_back(context);
166 const CallDescriptor* call_descriptor =
170 const TSCallDescriptor* ts_call_descriptor = TSCallDescriptor::Create(
171 call_descriptor, compiler::CanThrow::kYes,
173 return __ Call(centry_stub, OpIndex::Invalid(), base::VectorOf(centry_args),
174 ts_call_descriptor);
175}
176
178 static_assert(std::is_same<Smi, BuiltinPtr>(), "BuiltinPtr must be Smi");
179 return __ SmiConstant(Smi::FromInt(static_cast<int>(builtin)));
180}
181
183 Builtin builtin, StubCallMode stub_mode) {
184 return stub_mode == StubCallMode::kCallWasmRuntimeStub
185 ? __ RelocatableWasmBuiltinCallTarget(builtin)
186 : GetBuiltinPointerTarget(builtin);
187}
188
190 V<Word64> input, StubCallMode stub_mode) {
191 Builtin builtin = Is64() ? Builtin::kI64ToBigInt : Builtin::kI32PairToBigInt;
192 V<WordPtr> target = GetTargetForBuiltinCall(builtin, stub_mode);
193 CallInterfaceDescriptor interface_descriptor =
195 const CallDescriptor* call_descriptor =
197 __ graph_zone(), // zone
198 interface_descriptor,
199 0, // stack parameter count
201 Operator::kNoProperties, // properties
202 stub_mode);
203 const TSCallDescriptor* ts_call_descriptor = TSCallDescriptor::Create(
205 __ graph_zone());
206 if constexpr (Is64()) {
207 return V<BigInt>::Cast(__ Call(target, {input}, ts_call_descriptor));
208 }
209 V<Word32> low_word = __ TruncateWord64ToWord32(input);
210 V<Word32> high_word = __ TruncateWord64ToWord32(__ ShiftRightLogical(
211 input, __ Word32Constant(32), WordRepresentation::Word64()));
212 return V<BigInt>::Cast(
213 __ Call(target, {low_word, high_word}, ts_call_descriptor));
214}
215
216std::pair<V<Word32>, V<HeapObject>>
218 ConstOrV<Word32> func_index,
219 V<WasmTrustedInstanceData> trusted_instance_data) {
221 trusted_instance_data, DispatchTableForImports, WasmDispatchTable);
222 // Handle constant indexes specially to reduce graph size, even though later
223 // optimization would optimize this to the same result.
224 if (func_index.is_constant()) {
226 V<Word32> target = __ Load(dispatch_table, LoadOp::Kind::TaggedBase(),
229 V<ExposedTrustedObject> implicit_arg =
230 V<ExposedTrustedObject>::Cast(__ LoadProtectedPointerField(
231 dispatch_table, LoadOp::Kind::TaggedBase(),
233 return {target, implicit_arg};
234 }
235
236 V<WordPtr> dispatch_table_entry_offset =
237 __ WordPtrMul(__ ChangeUint32ToUintPtr(func_index.value()),
239 V<Word32> target = __ Load(
240 dispatch_table, dispatch_table_entry_offset, LoadOp::Kind::TaggedBase(),
244 __ LoadProtectedPointerField(dispatch_table, dispatch_table_entry_offset,
248 0));
249 return {target, implicit_arg};
250}
251
252std::pair<V<Word32>, V<ExposedTrustedObject>>
254 V<WasmInternalFunction> internal_function) {
255 V<ExposedTrustedObject> implicit_arg =
256 V<ExposedTrustedObject>::Cast(__ LoadProtectedPointerField(
257 internal_function, LoadOp::Kind::TaggedBase().Immutable(),
258 WasmInternalFunction::kProtectedImplicitArgOffset));
259
260 V<Word32> target = __ Load(internal_function, LoadOp::Kind::TaggedBase(),
262 WasmInternalFunction::kRawCallTargetOffset);
263
264 return {target, implicit_arg};
265}
266
268 ValueTypeBase type) {
269 switch (type.kind()) {
270 case kI8:
271 case kI16:
272 case kI32:
274 case kI64:
276 case kF16:
277 case kF32:
279 case kF64:
281 case kRefNull:
282 case kRef:
284 case kS128:
286 case kVoid:
287 case kTop:
288 case kBottom:
289 UNREACHABLE();
290 }
291}
292
293// Load the trusted data from a WasmInstanceObject.
297 return V<WasmTrustedInstanceData>::Cast(__ LoadTrustedPointerField(
299 kWasmTrustedInstanceDataIndirectPointerTag,
300 WasmInstanceObject::kTrustedDataOffset));
301}
302
304 Zone* zone, OpIndex thread_in_wasm_flag_address, bool new_value) {
305 if (v8_flags.debug_code) {
306 V<Word32> flag_value =
307 __ Load(thread_in_wasm_flag_address, LoadOp::Kind::RawAligned(),
309
310 IF (UNLIKELY(__ Word32Equal(flag_value, new_value))) {
311 OpIndex message_id = __ TaggedIndexConstant(static_cast<int32_t>(
312 new_value ? AbortReason::kUnexpectedThreadInWasmSet
313 : AbortReason::kUnexpectedThreadInWasmUnset));
314 CallRuntime(zone, Runtime::kAbort, {message_id}, __ NoContextConstant());
315 __ Unreachable();
316 }
317 }
318
319 __ Store(thread_in_wasm_flag_address, __ Word32Constant(new_value),
322}
323
325 bool new_value) {
327
328 OpIndex isolate_root = __ LoadRootRegister();
329 OpIndex thread_in_wasm_flag_address =
330 __ Load(isolate_root, LoadOp::Kind::RawAligned().Immutable(),
333 BuildModifyThreadInWasmFlagHelper(zone, thread_in_wasm_flag_address,
334 new_value);
335}
336
337// TODO(14108): Annotate C functions as not having side effects where
338// appropriate.
341 std::initializer_list<OpIndex> args) {
342 return WasmGraphBuilderBase::CallC(sig, __ ExternalConstant(ref), args);
343}
344
346 OpIndex function,
347 std::initializer_list<OpIndex> args) {
348 DCHECK_LE(sig->return_count(), 1);
349 DCHECK_EQ(sig->parameter_count(), args.size());
350 const CallDescriptor* call_descriptor =
352 const TSCallDescriptor* ts_call_descriptor = TSCallDescriptor::Create(
354 __ graph_zone());
355 return __ Call(function, OpIndex::Invalid(), base::VectorOf(args),
356 ts_call_descriptor);
357}
358
360 V<WordPtr> new_limit) {
361 // Set the new interrupt limit and real limit. Use a compare-and-swap for
362 // the interrupt limit to avoid overwriting a pending interrupt.
363 __ AtomicCompareExchange(
364 __ IsolateField(IsolateFieldId::kJsLimitAddress), __ UintPtrConstant(0),
365 old_limit, new_limit, RegisterRepresentation::WordPtr(),
367 __ Store(__ LoadRootRegister(), new_limit, StoreOp::Kind::RawAligned(),
370}
371
373 V<WordPtr> old_limit) {
374 // Set the is_on_central_stack flag.
375 OpIndex isolate_root = __ LoadRootRegister();
376 __ Store(isolate_root, __ Word32Constant(1), LoadOp::Kind::RawAligned(),
378 IsolateData::is_on_central_stack_flag_offset());
379
380 // Save the old fp and the target sp in the StackMemory's stack switch info.
381 // We are not on the main stack, so the active stack must be set.
382 V<WordPtr> active_stack = __ Load(isolate_root, LoadOp::Kind::RawAligned(),
384 IsolateData::active_stack_offset());
385 __ Store(active_stack, __ FramePointer(), StoreOp::Kind::RawAligned(),
388 V<WordPtr> central_stack_sp = __ Load(
391 __ Store(active_stack, central_stack_sp, StoreOp::Kind::RawAligned(),
394
395 // Switch the stack limit and the stack pointer.
396 V<WordPtr> central_stack_limit = __ Load(
399 BuildSetNewStackLimit(old_limit, central_stack_limit);
400 OpIndex old_sp = __ LoadStackPointer();
401 __ SetStackPointer(central_stack_sp);
402 return old_sp;
403}
404
405// Returns the old (secondary stack's) sp and stack limit.
406std::pair<V<WordPtr>, V<WordPtr>>
408 V<WordPtr> isolate_root = __ LoadRootRegister();
409 V<Word32> is_on_central_stack_flag = __ Load(
411 IsolateData::is_on_central_stack_flag_offset());
412 ScopedVar<WordPtr> old_sp_var(this, __ IntPtrConstant(0));
413 ScopedVar<WordPtr> old_limit_var(this, __ IntPtrConstant(0));
414 IF_NOT (LIKELY(is_on_central_stack_flag)) {
415 V<WordPtr> old_limit = __ Load(isolate_root, LoadOp::Kind::RawAligned(),
418 V<WordPtr> old_sp = BuildSwitchToTheCentralStack(old_limit);
419 old_sp_var = old_sp;
420 old_limit_var = old_limit;
421 }
422 return {old_sp_var, old_limit_var};
423}
424
426 V<WordPtr> old_sp, V<WordPtr> old_limit) {
427 IF_NOT (LIKELY(__ WordPtrEqual(old_sp, __ IntPtrConstant(0)))) {
428 // Reset is_on_central_stack flag.
429 V<WordPtr> isolate_root = __ LoadRootRegister();
430 __ Store(isolate_root, __ Word32Constant(0), StoreOp::Kind::RawAligned(),
432 IsolateData::is_on_central_stack_flag_offset());
433
434 // Clear stack switch info.
435 // We are not on the main stack, so the active stack must be set.
436 V<WordPtr> active_stack = __ Load(isolate_root, LoadOp::Kind::RawAligned(),
438 IsolateData::active_stack_offset());
439 __ Store(active_stack, __ UintPtrConstant(0), StoreOp::Kind::RawAligned(),
442
443 // Restore the old stack limit and stack pointer.
444 V<WordPtr> real_jslimit = __ Load(isolate_root, LoadOp::Kind::RawAligned(),
447 BuildSetNewStackLimit(real_jslimit, old_limit);
448 __ SetStackPointer(old_sp);
449 }
450}
451
453 private:
454 class BlockPhis;
455 class InstanceCache;
456
457 public:
467 static constexpr bool kUsesPoppedArgs = true;
468
469 struct Value : public ValueBase<ValidationTag> {
471 template <typename... Args>
472 explicit Value(Args&&... args) V8_NOEXCEPT
473 : ValueBase(std::forward<Args>(args)...) {}
474 };
475
476 struct Control : public ControlBase<Value, ValidationTag> {
478 // for 'if', loops, and 'try'/'try-table' respectively.
480 BitVector* assigned = nullptr; // Only for loops.
481 V<Object> exception = OpIndex::Invalid(); // Only for 'try-catch'.
482
483 template <typename... Args>
484 explicit Control(Args&&... args) V8_NOEXCEPT
485 : ControlBase(std::forward<Args>(args)...) {}
486 };
487
488 public:
489 // For non-inlined functions.
491 Zone* zone, CompilationEnv* env, Assembler& assembler,
492 std::unique_ptr<AssumptionsJournal>* assumptions,
493 ZoneVector<WasmInliningPosition>* inlining_positions, int func_index,
494 bool shared, const WireBytesStorage* wire_bytes)
495 : WasmGraphBuilderBase(zone, assembler),
497 block_phis_(zone),
498 env_(env),
499 owned_instance_cache_(std::make_unique<InstanceCache>(assembler)),
501 assumptions_(assumptions),
502 inlining_positions_(inlining_positions),
503 ssa_env_(zone),
504 func_index_(func_index),
505 shared_(shared),
506 wire_bytes_(wire_bytes),
507 return_phis_(nullptr),
508 is_inlined_tail_call_(false) {
511 }
512
513 // For inlined functions.
515 Zone* zone, CompilationEnv* env, Assembler& assembler, Mode mode,
516 InstanceCache& instance_cache,
517 std::unique_ptr<AssumptionsJournal>* assumptions,
518 ZoneVector<WasmInliningPosition>* inlining_positions, int func_index,
519 bool shared, const WireBytesStorage* wire_bytes,
520 base::Vector<OpIndex> real_parameters, TSBlock* return_block,
521 BlockPhis* return_phis, TSBlock* catch_block, bool is_inlined_tail_call,
522 OptionalV<FrameState> parent_frame_state)
523 : WasmGraphBuilderBase(zone, assembler),
524 mode_(mode),
525 block_phis_(zone),
526 env_(env),
527 instance_cache_(instance_cache),
528 assumptions_(assumptions),
529 inlining_positions_(inlining_positions),
530 ssa_env_(zone),
531 func_index_(func_index),
532 shared_(shared),
533 wire_bytes_(wire_bytes),
534 real_parameters_(real_parameters),
535 return_block_(return_block),
537 return_catch_block_(catch_block),
538 is_inlined_tail_call_(is_inlined_tail_call),
539 parent_frame_state_(parent_frame_state) {
541 DCHECK_EQ(return_block == nullptr, mode == kInlinedTailCall);
542 DCHECK_EQ(catch_block != nullptr, mode == kInlinedWithCatch);
543 }
544
545 void StartFunction(FullDecoder* decoder) {
546 if (mode_ == kRegular) __ Bind(__ NewBlock());
547 // Set 0 as the current source position (before locals declarations).
548 __ SetCurrentOrigin(WasmPositionToOpIndex(0, inlining_id_));
549 ssa_env_.resize(decoder->num_locals());
550 uint32_t index = 0;
552 if (mode_ == kRegular) {
553 static_assert(kWasmInstanceDataParameterIndex == 0);
554 trusted_instance_data = __ WasmInstanceDataParameter();
555 for (; index < decoder->sig_->parameter_count(); index++) {
556 // Parameter indices are shifted by 1 because parameter 0 is the
557 // instance.
558 ssa_env_[index] = __ Parameter(
559 index + 1, RepresentationFor(decoder->sig_->GetParam(index)));
560 }
562 } else {
564 for (; index < decoder->sig_->parameter_count(); index++) {
565 // Parameter indices are shifted by 1 because parameter 0 is the
566 // instance.
567 ssa_env_[index] = real_parameters_[index + 1];
568 }
570 return_phis_->InitReturnPhis(decoder->sig_->returns());
571 }
572 }
573 while (index < decoder->num_locals()) {
574 ValueType type = decoder->local_type(index);
575 OpIndex op;
576 if (!type.is_defaultable()) {
577 DCHECK(type.is_reference());
578 op = __ RootConstant(RootIndex::kOptimizedOut);
579 } else {
580 op = DefaultValue(type);
581 }
582 while (index < decoder->num_locals() &&
583 decoder->local_type(index) == type) {
584 ssa_env_[index++] = op;
585 }
586 }
587
588 if (v8_flags.wasm_inlining) {
589 if (mode_ == kRegular) {
590 if (v8_flags.liftoff) {
592 decoder->zone_, decoder->module_, func_index_);
593 } else {
596 }
597 } else {
598#if DEBUG
599 // We don't have support for inlining asm.js functions, those should
600 // never be selected in `InliningTree`.
602
603 if (v8_flags.liftoff && inlining_decisions_) {
604 // DCHECK that `inlining_decisions_` is consistent.
607 base::MutexGuard mutex_guard(&decoder->module_->type_feedback.mutex);
609 DCHECK_NE(
610 decoder->module_->type_feedback.feedback_for_function.find(
612 decoder->module_->type_feedback.feedback_for_function.end());
614 decoder->module_->type_feedback.feedback_for_function
615 .find(func_index_)
616 ->second.feedback_vector.size());
618 decoder->module_->type_feedback.feedback_for_function
619 .find(func_index_)
620 ->second.call_targets.size());
621 }
622 }
623#endif
624 }
625 }
626
627 if (v8_flags.debug_code) {
628 IF_NOT (LIKELY(__ HasInstanceType(trusted_instance_data,
629 WASM_TRUSTED_INSTANCE_DATA_TYPE))) {
630 OpIndex message_id = __ TaggedIndexConstant(
631 static_cast<int32_t>(AbortReason::kUnexpectedInstanceType));
632 CallRuntime(decoder->zone(), Runtime::kAbort, {message_id},
633 __ NoContextConstant());
634 __ Unreachable();
635 }
636 }
637
638 if (mode_ == kRegular) {
639 StackCheck(WasmStackCheckOp::Kind::kFunctionEntry, decoder);
640 }
641
642 if (v8_flags.trace_wasm) {
643 __ SetCurrentOrigin(
645 CallRuntime(decoder->zone(), Runtime::kWasmTraceEnter, {},
646 __ NoContextConstant());
647 }
648
649 auto branch_hints_it = decoder->module_->branch_hints.find(func_index_);
650 if (v8_flags.stress_branch_hinting) {
651 // Stress mode takes precedence over a branch hints section.
652 branch_hinting_mode_ = BranchHintingMode::kStress;
653 } else if (branch_hints_it != decoder->module_->branch_hints.end()) {
654 branch_hints_ = &branch_hints_it->second;
655 branch_hinting_mode_ = BranchHintingMode::kModuleProvided;
656 } else {
657 branch_hinting_mode_ = BranchHintingMode::kNone;
658 }
659 }
660
661 void StartFunctionBody(FullDecoder* decoder, Control* block) {}
662
664 if (v8_flags.liftoff && inlining_decisions_ &&
666 DCHECK_EQ(
668 static_cast<int>(inlining_decisions_->function_calls().size()) - 1);
669 }
670 if (mode_ == kRegular) {
671 // Just accessing `source_positions` at the maximum `OpIndex` already
672 // pre-allocates the underlying storage such that we avoid repeatedly
673 // resizing/copying in the following loop.
674 __ output_graph().source_positions()[__ output_graph().EndIndex()];
675
676 for (OpIndex index : __ output_graph().AllOperationIndices()) {
678 __ output_graph().operation_origins()[index]);
679 __ output_graph().source_positions()[index] = position;
680 }
681 if (v8_flags.trace_wasm_inlining) {
682 uint32_t node_count =
683 __ output_graph().NumberOfOperationsForDebugging();
684 PrintF("[function %d: emitted %d nodes]\n", func_index_, node_count);
685 }
686 }
687 }
688
690
692 __ SetCurrentOrigin(
694 }
695
696 // ******** Control Flow ********
697 // The basic structure of control flow is {block_phis_}. It contains a mapping
698 // from blocks to phi inputs corresponding to the SSA values plus the stack
699 // merge values at the beginning of the block.
700 // - When we create a new block (to be bound in the future), we register it to
701 // {block_phis_} with {NewBlockWithPhis}.
702 // - When we encounter an jump to a block, we invoke {SetupControlFlowEdge}.
703 // - Finally, when we bind a block, we setup its phis, the SSA environment,
704 // and its merge values, with {BindBlockAndGeneratePhis}.
705 // - When we create a loop, we generate PendingLoopPhis for the SSA state and
706 // the incoming stack values. We also create a block which will act as a
707 // merge block for all loop backedges (since a loop in Turboshaft can only
708 // have one backedge). When we PopControl a loop, we enter the merge block
709 // to create its Phis for all backedges as necessary, and use those values
710 // to patch the backedge of the PendingLoopPhis of the loop.
711
712 void Block(FullDecoder* decoder, Control* block) {
713 block->merge_block = NewBlockWithPhis(decoder, block->br_merge());
714 }
715
716 void Loop(FullDecoder* decoder, Control* block) {
717 TSBlock* loop = __ NewLoopHeader();
718 __ Goto(loop);
719 __ Bind(loop);
720
721 bool can_be_innermost = false; // unused
723 decoder, decoder->pc(), decoder->num_locals(), decoder->zone(),
724 &can_be_innermost);
725 block->assigned = assigned;
726
727 for (uint32_t i = 0; i < decoder->num_locals(); i++) {
728 if (!assigned->Contains(i)) continue;
729 OpIndex phi = __ PendingLoopPhi(
731 ssa_env_[i] = phi;
732 }
733 uint32_t arity = block->start_merge.arity;
734 Value* stack_base = arity > 0 ? decoder->stack_value(arity) : nullptr;
735 for (uint32_t i = 0; i < arity; i++) {
736 OpIndex phi = __ PendingLoopPhi(stack_base[i].op,
737 RepresentationFor(stack_base[i].type));
738 block->start_merge[i].op = phi;
739 }
740
741 StackCheck(WasmStackCheckOp::Kind::kLoop, decoder);
742
743 TSBlock* loop_merge = NewBlockWithPhis(decoder, &block->start_merge);
744 block->merge_block = loop_merge;
745 block->false_or_loop_or_catch_block = loop;
746 }
747
748 void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
749 TSBlock* true_block = __ NewBlock();
750 TSBlock* false_block = NewBlockWithPhis(decoder, nullptr);
751 TSBlock* merge_block = NewBlockWithPhis(decoder, &if_block->end_merge);
752 if_block->false_or_loop_or_catch_block = false_block;
753 if_block->merge_block = merge_block;
754 SetupControlFlowEdge(decoder, false_block);
755 __ Branch({cond.op, GetBranchHint(decoder)}, true_block, false_block);
756 __ Bind(true_block);
757 }
758
759 void Else(FullDecoder* decoder, Control* if_block) {
760 if (if_block->reachable()) {
761 SetupControlFlowEdge(decoder, if_block->merge_block);
762 __ Goto(if_block->merge_block);
763 }
765 nullptr);
766 }
767
768 void BrOrRet(FullDecoder* decoder, uint32_t depth, uint32_t drop_values = 0) {
769 if (depth == decoder->control_depth() - 1) {
770 DoReturn(decoder, drop_values);
771 } else {
772 Control* target = decoder->control_at(depth);
773 SetupControlFlowEdge(decoder, target->merge_block, drop_values);
774 __ Goto(target->merge_block);
775 }
776 }
777
778 void BrIf(FullDecoder* decoder, const Value& cond, uint32_t depth) {
779 BranchHint hint = GetBranchHint(decoder);
780 if (depth == decoder->control_depth() - 1) {
781 IF ({cond.op, hint}) {
782 DoReturn(decoder, 0);
783 }
784 } else {
785 Control* target = decoder->control_at(depth);
786 SetupControlFlowEdge(decoder, target->merge_block);
787 TSBlock* non_branching = __ NewBlock();
788 __ Branch({cond.op, hint}, target->merge_block, non_branching);
789 __ Bind(non_branching);
790 }
791 }
792
793 // An analysis to determine whether a br_table should be lowered to a switch
794 // or a series of compare and branch. This can be for small tables or larger
795 // 'sparse' ones, which include many cases but few targets. A sparse table may
796 // look like this: br_table [ 1, 0, 0, 0, 0, 0, 2, 0 ] which can be lowered to
797 // two conditional branches followed by an unconditional one. The advantages
798 // of this are reducing the space required for the table and reducing the
799 // latency.
800 template <typename ValidationTag>
802 public:
803 static constexpr int32_t kMaxComparesPerTarget = 2;
804 static constexpr uint32_t kMaxTargets = 3;
805 static constexpr int32_t kMaxTableCount = 20;
807 using TargetMap = absl::btree_map<uint32_t, CaseVector>;
808
809 bool LowerToBranches(Decoder* decoder, const BranchTableImmediate& imm) {
810 BranchTableIterator<ValidationTag> iterator(decoder, imm);
811 while (iterator.has_next()) {
812 uint32_t i = iterator.cur_index();
813 uint32_t target = iterator.next();
814
815 if (i == imm.table_count) {
816 AddDefault(target);
817 } else if (!TryAddTarget(target, i)) {
818 return false;
819 }
820 }
823 size_t total_targets = other_targets_.size() + 1;
824 if (default_target() != primary_target() &&
825 !other_targets_.count(default_target())) {
826 total_targets++;
827 }
828 return total_targets <= kMaxTargets;
829 }
830 // The most often occurring target, or the default if there is no other
831 // target with multiple cases.
832 uint32_t primary_target() const { return primary_target_.value(); }
833 // The default target, for when the br_table index is out-of-range.
834 uint32_t default_target() const { return default_target_.value(); }
835 // other_targets doesn't include the primary target, nor the default if it
836 // isn't an in-range target.
837 const TargetMap& other_targets() const { return other_targets_; }
838 // All the indices which target the primary target.
839 const CaseVector& primary_indices() const { return primary_indices_; }
840
841 private:
842 bool TryAddTarget(uint32_t target, uint32_t index) {
845 if (other_targets_.size() > kMaxTargets) {
846 return false;
847 }
848 if (cases.size() == kMaxComparesPerTarget) {
849 if (primary_target_.has_value() && target != primary_target()) {
850 return false;
851 }
853 }
854 cases.push_back(index);
855 return true;
856 }
857 void AddDefault(uint32_t target) {
859 if (!primary_target_.has_value()) {
861 }
862 }
863
864 std::optional<uint32_t> default_target_;
865 std::optional<uint32_t> primary_target_;
868 };
869
870 void BrTable(FullDecoder* decoder, const BranchTableImmediate& imm,
871 const Value& key) {
873 BrTableAnalysis<ValidationTag> table_analysis;
874 if (table_analysis.LowerToBranches(decoder, imm)) {
875 auto generate_cond =
876 [this](const Value& key,
878 -> OpIndex {
879 switch (cases.size()) {
880 default:
881 static_assert(
883 UNREACHABLE();
884 case 1:
885 return __ Word32Equal(key.op, __ Word32Constant(cases[0]));
886 case 2: {
887 return __ Word32BitwiseOr(__ Word32Equal(key.op, cases[0]),
888 __ Word32Equal(key.op, cases[1]));
889 }
890 }
891 };
892 auto insert_cond_branch = [this, &decoder](OpIndex cond,
893 uint32_t depth) {
894 BranchHint hint = GetBranchHint(decoder);
895 if (depth == decoder->control_depth() - 1) {
896 IF ({cond, hint}) {
897 DoReturn(decoder, 0);
898 }
899 } else {
900 Control* target = decoder->control_at(depth);
901 SetupControlFlowEdge(decoder, target->merge_block);
902 TSBlock* non_branching = __ NewBlock();
903 __ Branch({cond, hint}, target->merge_block, non_branching);
904 __ Bind(non_branching);
905 }
906 };
907 // Insert conditional branches to the other targets.
908 for (auto const& [target, cases] : table_analysis.other_targets()) {
909 DCHECK_LE(cases.size(),
911 insert_cond_branch(generate_cond(key, cases), target);
912 }
913 // If needed, insert the range check for the primary target.
914 if (table_analysis.primary_target() !=
915 table_analysis.default_target()) {
916 OpIndex lower = __ Word32Equal(__ Int32LessThan(key.op, 0), 0);
917 OpIndex upper =
918 __ Int32LessThan(key.op, __ Word32Constant(imm.table_count));
919 OpIndex cond = __ Word32BitwiseAnd(lower, upper);
920 insert_cond_branch(cond, table_analysis.primary_target());
921 }
922 // Always fallthrough and branch to the default case.
923 BrOrRet(decoder, table_analysis.default_target());
924 return;
925 }
926 }
928 __ output_graph().graph_zone()
929 -> AllocateArray<compiler::turboshaft::SwitchOp::Case>(
930 imm.table_count);
931 BranchTableIterator<ValidationTag> new_block_iterator(decoder, imm);
932 SmallZoneVector<TSBlock*, 16> intermediate_blocks(decoder->zone_);
933 TSBlock* default_case = nullptr;
934 while (new_block_iterator.has_next()) {
935 TSBlock* intermediate = __ NewBlock();
936 intermediate_blocks.emplace_back(intermediate);
937 uint32_t i = new_block_iterator.cur_index();
938 if (i == imm.table_count) {
939 default_case = intermediate;
940 } else {
941 cases[i] = {static_cast<int>(i), intermediate, BranchHint::kNone};
942 }
943 new_block_iterator.next();
944 }
946 __ Switch(key.op, base::VectorOf(cases, imm.table_count), default_case);
947
948 int i = 0;
949 BranchTableIterator<ValidationTag> branch_iterator(decoder, imm);
950 while (branch_iterator.has_next()) {
951 TSBlock* intermediate = intermediate_blocks[i];
952 i++;
953 __ Bind(intermediate);
954 BrOrRet(decoder, branch_iterator.next());
955 }
956 }
957
958 void FallThruTo(FullDecoder* decoder, Control* block) {
959 // TODO(14108): Why is {block->reachable()} not reliable here? Maybe it is
960 // not in other spots as well.
961 if (__ current_block() != nullptr) {
962 SetupControlFlowEdge(decoder, block->merge_block);
963 __ Goto(block->merge_block);
964 }
965 }
966
967 void PopControl(FullDecoder* decoder, Control* block) {
968 switch (block->kind) {
969 case kControlIf:
970 if (block->reachable()) {
971 SetupControlFlowEdge(decoder, block->merge_block);
972 __ Goto(block->merge_block);
973 }
974 BindBlockAndGeneratePhis(decoder, block->false_or_loop_or_catch_block,
975 nullptr);
976 // Exceptionally for one-armed if, we cannot take the values from the
977 // stack; we have to pass the stack values at the beginning of the
978 // if-block.
979 SetupControlFlowEdge(decoder, block->merge_block, 0, OpIndex::Invalid(),
980 &block->start_merge);
981 __ Goto(block->merge_block);
982 BindBlockAndGeneratePhis(decoder, block->merge_block,
983 block->br_merge());
984 break;
985 case kControlIfElse:
986 case kControlBlock:
987 case kControlTry:
988 case kControlTryCatch:
990 // {block->reachable()} is not reliable here for exceptions, because
991 // the decoder sets the reachability to the upper block's reachability
992 // before calling this interface function.
993 if (__ current_block() != nullptr) {
994 SetupControlFlowEdge(decoder, block->merge_block);
995 __ Goto(block->merge_block);
996 }
997 BindBlockAndGeneratePhis(decoder, block->merge_block,
998 block->br_merge());
999 break;
1000 case kControlTryTable:
1001 DCHECK_EQ(__ current_block(), nullptr);
1002 BindBlockAndGeneratePhis(decoder, block->merge_block,
1003 block->br_merge());
1004 break;
1005 case kControlLoop: {
1006 TSBlock* post_loop = NewBlockWithPhis(decoder, nullptr);
1007 if (block->reachable()) {
1008 SetupControlFlowEdge(decoder, post_loop);
1009 __ Goto(post_loop);
1010 }
1011 if (!block->false_or_loop_or_catch_block->IsBound()) {
1012 // The loop is unreachable. In this case, no operations have been
1013 // emitted for it. Do nothing.
1014 } else if (block->merge_block->PredecessorCount() == 0) {
1015 // Turns out, the loop has no backedges, i.e. it is not quite a loop
1016 // at all. Replace it with a merge, and its PendingPhis with one-input
1017 // phis.
1018 block->false_or_loop_or_catch_block->SetKind(
1020 for (auto& op : __ output_graph().operations(
1021 *block->false_or_loop_or_catch_block)) {
1022 PendingLoopPhiOp* pending_phi = op.TryCast<PendingLoopPhiOp>();
1023 if (!pending_phi) break;
1024 OpIndex replaced = __ output_graph().Index(op);
1025 __ output_graph().Replace<compiler::turboshaft::PhiOp>(
1026 replaced, base::VectorOf({pending_phi -> first()}),
1027 pending_phi->rep);
1028 }
1029 } else {
1030 // We abuse the start merge of the loop, which is not used otherwise
1031 // anymore, to store backedge inputs for the pending phi stack values
1032 // of the loop.
1033 BindBlockAndGeneratePhis(decoder, block->merge_block,
1034 block->br_merge());
1035 __ Goto(block->false_or_loop_or_catch_block);
1036 auto operations = __ output_graph().operations(
1037 *block -> false_or_loop_or_catch_block);
1038 auto to = operations.begin();
1039 // The VariableReducer can introduce loop phis as well which are at
1040 // the beginning of the block. We need to skip them.
1041 while (to != operations.end() &&
1042 to->Is<compiler::turboshaft::PhiOp>()) {
1043 ++to;
1044 }
1045 for (auto it = block->assigned->begin(); it != block->assigned->end();
1046 ++it, ++to) {
1047 // The last bit represents the instance cache.
1048 if (*it == static_cast<int>(ssa_env_.size())) break;
1049 PendingLoopPhiOp& pending_phi = to->Cast<PendingLoopPhiOp>();
1050 OpIndex replaced = __ output_graph().Index(*to);
1051 __ output_graph().Replace<compiler::turboshaft::PhiOp>(
1052 replaced, base::VectorOf({pending_phi.first(), ssa_env_[*it]}),
1053 pending_phi.rep);
1054 }
1055 for (uint32_t i = 0; i < block->br_merge()->arity; ++i, ++to) {
1056 PendingLoopPhiOp& pending_phi = to->Cast<PendingLoopPhiOp>();
1057 OpIndex replaced = __ output_graph().Index(*to);
1058 __ output_graph().Replace<compiler::turboshaft::PhiOp>(
1059 replaced,
1061 {pending_phi.first(), (*block->br_merge())[i].op}),
1062 pending_phi.rep);
1063 }
1064 }
1065 BindBlockAndGeneratePhis(decoder, post_loop, nullptr);
1066 break;
1067 }
1068 }
1069 }
1070
1071 void DoReturn(FullDecoder* decoder, uint32_t drop_values) {
1072 size_t return_count = decoder->sig_->return_count();
1073 SmallZoneVector<OpIndex, 16> return_values(return_count, decoder->zone_);
1074 Value* stack_base = return_count == 0
1075 ? nullptr
1076 : decoder->stack_value(static_cast<uint32_t>(
1077 return_count + drop_values));
1078 for (size_t i = 0; i < return_count; i++) {
1079 return_values[i] = stack_base[i].op;
1080 }
1081 if (v8_flags.trace_wasm) {
1082 V<WordPtr> info = __ IntPtrConstant(0);
1083 if (return_count == 1) {
1084 wasm::ValueType return_type = decoder->sig_->GetReturn(0);
1085 int size = return_type.value_kind_size();
1086 // TODO(14108): This won't fit everything.
1087 info = __ StackSlot(size, size);
1088 // TODO(14108): Write barrier might be needed.
1089 __ Store(
1090 info, return_values[0], StoreOp::Kind::RawAligned(),
1093 }
1094 CallRuntime(decoder->zone(), Runtime::kWasmTraceExit, {info},
1095 __ NoContextConstant());
1096 }
1097 if (mode_ == kRegular || mode_ == kInlinedTailCall) {
1098 __ Return(__ Word32Constant(0), base::VectorOf(return_values),
1099 v8_flags.experimental_wasm_growable_stacks);
1100 } else {
1101 // Do not add return values if we are in unreachable code.
1102 if (__ generating_unreachable_operations()) return;
1103 for (size_t i = 0; i < return_count; i++) {
1104 return_phis_->AddInputForPhi(i, return_values[i]);
1105 }
1106 __ Goto(return_block_);
1107 }
1108 }
1109
1110 void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
1111 Value* result) {
1112 result->op = UnOpImpl(opcode, value.op, value.type);
1113 }
1114
1115 void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
1116 const Value& rhs, Value* result) {
1117 result->op = BinOpImpl(opcode, lhs.op, rhs.op);
1118 }
1119
1120 void TraceInstruction(FullDecoder* decoder, uint32_t markid) {
1121 // TODO(14108): Implement.
1122 }
1123
1124 void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
1125 result->op = __ Word32Constant(value);
1126 }
1127
1128 void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
1129 result->op = __ Word64Constant(value);
1130 }
1131
1132 void F32Const(FullDecoder* decoder, Value* result, float value) {
1133 result->op = __ Float32Constant(value);
1134 }
1135
1136 void F64Const(FullDecoder* decoder, Value* result, double value) {
1137 result->op = __ Float64Constant(value);
1138 }
1139
1140 void S128Const(FullDecoder* decoder, const Simd128Immediate& imm,
1141 Value* result) {
1142 result->op = __ Simd128Constant(imm.value);
1143 }
1144
1145 void RefNull(FullDecoder* decoder, ValueType type, Value* result) {
1146 result->op = __ Null(type);
1147 }
1148
1149 void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
1150 ModuleTypeIndex sig_index =
1151 decoder->module_->functions[function_index].sig_index;
1152 bool shared = decoder->module_->type(sig_index).is_shared;
1153 result->op = __ WasmRefFunc(trusted_instance_data(shared), function_index);
1154 }
1155
1156 void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
1157 result->op =
1158 __ AssertNotNull(arg.op, arg.type, TrapId::kTrapNullDereference);
1159 }
1160
1161 void Drop(FullDecoder* decoder) {}
1162
1164 const IndexImmediate& imm) {
1165 result->op = ssa_env_[imm.index];
1166 }
1167
1168 void LocalSet(FullDecoder* decoder, const Value& value,
1169 const IndexImmediate& imm) {
1170 ssa_env_[imm.index] = value.op;
1171 }
1172
1173 void LocalTee(FullDecoder* decoder, const Value& value, Value* result,
1174 const IndexImmediate& imm) {
1175 ssa_env_[imm.index] = result->op = value.op;
1176 }
1177
1179 const GlobalIndexImmediate& imm) {
1180 bool shared = decoder->module_->globals[imm.index].shared;
1181 result->op = __ GlobalGet(trusted_instance_data(shared), imm.global);
1182 }
1183
1184 void GlobalSet(FullDecoder* decoder, const Value& value,
1185 const GlobalIndexImmediate& imm) {
1186 bool shared = decoder->module_->globals[imm.index].shared;
1187 __ GlobalSet(trusted_instance_data(shared), value.op, imm.global);
1188 }
1189
1190 void Trap(FullDecoder* decoder, TrapReason reason) {
1191 __ TrapIfNot(__ Word32Constant(0), GetTrapIdForTrap(reason));
1192 __ Unreachable();
1193 }
1194
1195 void AssertNullTypecheck(FullDecoder* decoder, const Value& obj,
1196 Value* result) {
1197 __ TrapIfNot(__ IsNull(obj.op, obj.type), TrapId::kTrapIllegalCast);
1198 Forward(decoder, obj, result);
1199 }
1200
1201 void AssertNotNullTypecheck(FullDecoder* decoder, const Value& obj,
1202 Value* result) {
1203 __ AssertNotNull(obj.op, obj.type, TrapId::kTrapIllegalCast);
1204 Forward(decoder, obj, result);
1205 }
1206
1208 // This is just for testing bailouts in Liftoff, here it's just a nop.
1209 }
1210
1211 void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
1212 const Value& tval, Value* result) {
1214 bool use_select = false;
1215 switch (tval.type.kind()) {
1216 case kI32:
1217 if (SupportedOperations::word32_select()) use_select = true;
1218 break;
1219 case kI64:
1220 if (SupportedOperations::word64_select()) use_select = true;
1221 break;
1222 case kF32:
1223 if (SupportedOperations::float32_select()) use_select = true;
1224 break;
1225 case kF64:
1226 if (SupportedOperations::float64_select()) use_select = true;
1227 break;
1228 case kRef:
1229 case kRefNull:
1230 case kS128:
1231 break;
1232 case kI8:
1233 case kI16:
1234 case kF16:
1235 case kVoid:
1236 case kTop:
1237 case kBottom:
1238 UNREACHABLE();
1239 }
1240 result->op = __ Select(
1241 cond.op, tval.op, fval.op, RepresentationFor(tval.type),
1243 use_select ? Implementation::kCMove : Implementation::kBranch);
1244 }
1245
1247 MachineRepresentation mem_rep,
1248 wasm::ValueType wasmtype) {
1250 OpIndex value = node;
1251 int value_size_in_bytes = wasmtype.value_kind_size();
1252 int value_size_in_bits = 8 * value_size_in_bytes;
1253 bool is_float = false;
1254
1255 switch (wasmtype.kind()) {
1256 case wasm::kF64:
1257 value = __ BitcastFloat64ToWord64(node);
1258 is_float = true;
1259 [[fallthrough]];
1260 case wasm::kI64:
1261 result = __ Word64Constant(static_cast<uint64_t>(0));
1262 break;
1263 case wasm::kF32:
1264 value = __ BitcastFloat32ToWord32(node);
1265 is_float = true;
1266 [[fallthrough]];
1267 case wasm::kI32:
1268 result = __ Word32Constant(0);
1269 break;
1270 case wasm::kS128:
1271 DCHECK(ReverseBytesSupported(value_size_in_bytes));
1272 break;
1273 default:
1274 UNREACHABLE();
1275 }
1276
1277 if (mem_rep == MachineRepresentation::kWord8) {
1278 // No need to change endianness for byte size, return original node
1279 return node;
1280 }
1281 if (wasmtype == wasm::kWasmI64 &&
1283 // In case we store lower part of WasmI64 expression, we can truncate
1284 // upper 32bits.
1285 value_size_in_bytes = wasm::kWasmI32.value_kind_size();
1286 value_size_in_bits = 8 * value_size_in_bytes;
1287 if (mem_rep == MachineRepresentation::kWord16) {
1288 value = __ Word32ShiftLeft(value, 16);
1289 }
1290 } else if (wasmtype == wasm::kWasmI32 &&
1291 mem_rep == MachineRepresentation::kWord16) {
1292 value = __ Word32ShiftLeft(value, 16);
1293 }
1294
1295 int i;
1296 uint32_t shift_count;
1297
1298 if (ReverseBytesSupported(value_size_in_bytes)) {
1299 switch (value_size_in_bytes) {
1300 case 4:
1301 result = __ Word32ReverseBytes(V<Word32>::Cast(value));
1302 break;
1303 case 8:
1304 result = __ Word64ReverseBytes(V<Word64>::Cast(value));
1305 break;
1306 case 16:
1307 result = __ Simd128ReverseBytes(
1309 break;
1310 default:
1311 UNREACHABLE();
1312 }
1313 } else {
1314 for (i = 0, shift_count = value_size_in_bits - 8;
1315 i < value_size_in_bits / 2; i += 8, shift_count -= 16) {
1316 OpIndex shift_lower;
1317 OpIndex shift_higher;
1318 OpIndex lower_byte;
1319 OpIndex higher_byte;
1320
1321 DCHECK_LT(0, shift_count);
1322 DCHECK_EQ(0, (shift_count + 8) % 16);
1323
1324 if (value_size_in_bits > 32) {
1325 shift_lower = __ Word64ShiftLeft(value, shift_count);
1326 shift_higher = __ Word64ShiftRightLogical(value, shift_count);
1327 lower_byte = __ Word64BitwiseAnd(shift_lower,
1328 static_cast<uint64_t>(0xFF)
1329 << (value_size_in_bits - 8 - i));
1330 higher_byte = __ Word64BitwiseAnd(shift_higher,
1331 static_cast<uint64_t>(0xFF) << i);
1332 result = __ Word64BitwiseOr(result, lower_byte);
1333 result = __ Word64BitwiseOr(result, higher_byte);
1334 } else {
1335 shift_lower = __ Word32ShiftLeft(value, shift_count);
1336 shift_higher = __ Word32ShiftRightLogical(value, shift_count);
1337 lower_byte = __ Word32BitwiseAnd(shift_lower,
1338 static_cast<uint32_t>(0xFF)
1339 << (value_size_in_bits - 8 - i));
1340 higher_byte = __ Word32BitwiseAnd(shift_higher,
1341 static_cast<uint32_t>(0xFF) << i);
1342 result = __ Word32BitwiseOr(result, lower_byte);
1343 result = __ Word32BitwiseOr(result, higher_byte);
1344 }
1345 }
1346 }
1347
1348 if (is_float) {
1349 switch (wasmtype.kind()) {
1350 case wasm::kF64:
1351 result = __ BitcastWord64ToFloat64(result);
1352 break;
1353 case wasm::kF32:
1354 result = __ BitcastWord32ToFloat32(result);
1355 break;
1356 default:
1357 UNREACHABLE();
1358 }
1359 }
1360
1361 return result;
1362 }
1363
1365 wasm::ValueType wasmtype) {
1367 OpIndex value = node;
1368 int value_size_in_bytes = ElementSizeInBytes(memtype.representation());
1369 int value_size_in_bits = 8 * value_size_in_bytes;
1370 bool is_float = false;
1371
1372 switch (memtype.representation()) {
1374 value = __ BitcastFloat64ToWord64(node);
1375 is_float = true;
1376 [[fallthrough]];
1378 result = __ Word64Constant(static_cast<uint64_t>(0));
1379 break;
1381 value = __ BitcastFloat32ToWord32(node);
1382 is_float = true;
1383 [[fallthrough]];
1386 result = __ Word32Constant(0);
1387 break;
1389 // No need to change endianness for byte size, return original node.
1390 return node;
1392 DCHECK(ReverseBytesSupported(value_size_in_bytes));
1393 break;
1394 default:
1395 UNREACHABLE();
1396 }
1397
1398 int i;
1399 uint32_t shift_count;
1400
1401 if (ReverseBytesSupported(value_size_in_bytes < 4 ? 4
1402 : value_size_in_bytes)) {
1403 switch (value_size_in_bytes) {
1404 case 2:
1405 result = __ Word32ReverseBytes(__ Word32ShiftLeft(value, 16));
1406 break;
1407 case 4:
1408 result = __ Word32ReverseBytes(value);
1409 break;
1410 case 8:
1411 result = __ Word64ReverseBytes(value);
1412 break;
1413 case 16:
1414 result = __ Simd128ReverseBytes(value);
1415 break;
1416 default:
1417 UNREACHABLE();
1418 }
1419 } else {
1420 for (i = 0, shift_count = value_size_in_bits - 8;
1421 i < value_size_in_bits / 2; i += 8, shift_count -= 16) {
1422 OpIndex shift_lower;
1423 OpIndex shift_higher;
1424 OpIndex lower_byte;
1425 OpIndex higher_byte;
1426
1427 DCHECK_LT(0, shift_count);
1428 DCHECK_EQ(0, (shift_count + 8) % 16);
1429
1430 if (value_size_in_bits > 32) {
1431 shift_lower = __ Word64ShiftLeft(value, shift_count);
1432 shift_higher = __ Word64ShiftRightLogical(value, shift_count);
1433 lower_byte = __ Word64BitwiseAnd(shift_lower,
1434 static_cast<uint64_t>(0xFF)
1435 << (value_size_in_bits - 8 - i));
1436 higher_byte = __ Word64BitwiseAnd(shift_higher,
1437 static_cast<uint64_t>(0xFF) << i);
1438 result = __ Word64BitwiseOr(result, lower_byte);
1439 result = __ Word64BitwiseOr(result, higher_byte);
1440 } else {
1441 shift_lower = __ Word32ShiftLeft(value, shift_count);
1442 shift_higher = __ Word32ShiftRightLogical(value, shift_count);
1443 lower_byte = __ Word32BitwiseAnd(shift_lower,
1444 static_cast<uint32_t>(0xFF)
1445 << (value_size_in_bits - 8 - i));
1446 higher_byte = __ Word32BitwiseAnd(shift_higher,
1447 static_cast<uint32_t>(0xFF) << i);
1448 result = __ Word32BitwiseOr(result, lower_byte);
1449 result = __ Word32BitwiseOr(result, higher_byte);
1450 }
1451 }
1452 }
1453
1454 if (is_float) {
1455 switch (memtype.representation()) {
1457 result = __ BitcastWord64ToFloat64(result);
1458 break;
1460 result = __ BitcastWord32ToFloat32(result);
1461 break;
1462 default:
1463 UNREACHABLE();
1464 }
1465 }
1466
1467 // We need to sign or zero extend the value.
1468 // Values with size >= 32-bits may need to be sign/zero extended after
1469 // calling this function.
1470 if (value_size_in_bits < 32) {
1471 DCHECK(!is_float);
1472 int shift_bit_count = 32 - value_size_in_bits;
1473 result = __ Word32ShiftLeft(result, shift_bit_count);
1474 if (memtype.IsSigned()) {
1475 result =
1476 __ Word32ShiftRightArithmeticShiftOutZeros(result, shift_bit_count);
1477 } else {
1478 result = __ Word32ShiftRightLogical(result, shift_bit_count);
1479 }
1480 }
1481
1482 return result;
1483 }
1484
1485 void LoadMem(FullDecoder* decoder, LoadType type,
1486 const MemoryAccessImmediate& imm, const Value& index,
1487 Value* result) {
1488 bool needs_f16_to_f32_conv = false;
1489 if (type.value() == LoadType::kF32LoadF16 &&
1490 !SupportedOperations::float16()) {
1491 needs_f16_to_f32_conv = true;
1492 type = LoadType::kI32Load16U;
1493 }
1496
1497 auto [final_index, strategy] =
1498 BoundsCheckMem(imm.memory, repr, index.op, imm.offset,
1501
1502 V<WordPtr> mem_start = MemStart(imm.memory->index);
1503
1504 LoadOp::Kind load_kind = GetMemoryAccessKind(repr, strategy);
1505
1506 const bool offset_in_int_range =
1507 imm.offset <= std::numeric_limits<int32_t>::max();
1508 OpIndex base =
1509 offset_in_int_range ? mem_start : __ WordPtrAdd(mem_start, imm.offset);
1510 int32_t offset = offset_in_int_range ? static_cast<int32_t>(imm.offset) : 0;
1511 OpIndex load = __ Load(base, final_index, load_kind, repr, offset);
1512
1513#if V8_TARGET_BIG_ENDIAN
1514 load = BuildChangeEndiannessLoad(load, type.mem_type(), type.value_type());
1515#endif
1516
1517 if (type.value_type() == kWasmI64 && repr.SizeInBytes() < 8) {
1518 load = repr.IsSigned() ? __ ChangeInt32ToInt64(load)
1519 : __ ChangeUint32ToUint64(load);
1520 }
1521
1522 if (needs_f16_to_f32_conv) {
1524 load, ExternalReference::wasm_float16_to_float32(),
1526 }
1527
1528 if (v8_flags.trace_wasm_memory) {
1529 // TODO(14259): Implement memory tracing for multiple memories.
1530 CHECK_EQ(0, imm.memory->index);
1531 TraceMemoryOperation(decoder, false, repr, final_index, imm.offset);
1532 }
1533
1534 result->op = load;
1535 }
1536
1538 LoadTransformationKind transform,
1539 const MemoryAccessImmediate& imm, const Value& index,
1540 Value* result) {
1544 : MemoryRepresentation::FromMachineType(type.mem_type());
1545
1546 auto [final_index, strategy] =
1547 BoundsCheckMem(imm.memory, repr, index.op, imm.offset,
1550
1551 compiler::turboshaft::Simd128LoadTransformOp::LoadKind load_kind =
1552 GetMemoryAccessKind(repr, strategy);
1553
1554 using TransformKind =
1555 compiler::turboshaft::Simd128LoadTransformOp::TransformKind;
1556
1557 TransformKind transform_kind;
1558
1559 if (transform == LoadTransformationKind::kExtend) {
1560 if (type.mem_type() == MachineType::Int8()) {
1561 transform_kind = TransformKind::k8x8S;
1562 } else if (type.mem_type() == MachineType::Uint8()) {
1563 transform_kind = TransformKind::k8x8U;
1564 } else if (type.mem_type() == MachineType::Int16()) {
1565 transform_kind = TransformKind::k16x4S;
1566 } else if (type.mem_type() == MachineType::Uint16()) {
1567 transform_kind = TransformKind::k16x4U;
1568 } else if (type.mem_type() == MachineType::Int32()) {
1569 transform_kind = TransformKind::k32x2S;
1570 } else if (type.mem_type() == MachineType::Uint32()) {
1571 transform_kind = TransformKind::k32x2U;
1572 } else {
1573 UNREACHABLE();
1574 }
1575 } else if (transform == LoadTransformationKind::kSplat) {
1576 if (type.mem_type() == MachineType::Int8()) {
1577 transform_kind = TransformKind::k8Splat;
1578 } else if (type.mem_type() == MachineType::Int16()) {
1579 transform_kind = TransformKind::k16Splat;
1580 } else if (type.mem_type() == MachineType::Int32()) {
1581 transform_kind = TransformKind::k32Splat;
1582 } else if (type.mem_type() == MachineType::Int64()) {
1583 transform_kind = TransformKind::k64Splat;
1584 } else {
1585 UNREACHABLE();
1586 }
1587 } else {
1588 if (type.mem_type() == MachineType::Int32()) {
1589 transform_kind = TransformKind::k32Zero;
1590 } else if (type.mem_type() == MachineType::Int64()) {
1591 transform_kind = TransformKind::k64Zero;
1592 } else {
1593 UNREACHABLE();
1594 }
1595 }
1596
1597 V<compiler::turboshaft::Simd128> load = __ Simd128LoadTransform(
1598 __ WordPtrAdd(MemStart(imm.mem_index), imm.offset), final_index,
1599 load_kind, transform_kind, 0);
1600
1601 if (v8_flags.trace_wasm_memory) {
1602 TraceMemoryOperation(decoder, false, repr, final_index, imm.offset);
1603 }
1604
1605 result->op = load;
1606 }
1607
1608 void LoadLane(FullDecoder* decoder, LoadType type, const Value& value,
1609 const Value& index, const MemoryAccessImmediate& imm,
1610 const uint8_t laneidx, Value* result) {
1611 using compiler::turboshaft::Simd128LaneMemoryOp;
1612
1615
1616 auto [final_index, strategy] =
1617 BoundsCheckMem(imm.memory, repr, index.op, imm.offset,
1620 Simd128LaneMemoryOp::Kind kind = GetMemoryAccessKind(repr, strategy);
1621
1622 Simd128LaneMemoryOp::LaneKind lane_kind;
1623
1624 switch (repr) {
1626 lane_kind = Simd128LaneMemoryOp::LaneKind::k8;
1627 break;
1629 lane_kind = Simd128LaneMemoryOp::LaneKind::k16;
1630 break;
1632 lane_kind = Simd128LaneMemoryOp::LaneKind::k32;
1633 break;
1635 lane_kind = Simd128LaneMemoryOp::LaneKind::k64;
1636 break;
1637 default:
1638 UNREACHABLE();
1639 }
1640
1641 // TODO(14108): If `offset` is in int range, use it as static offset, or
1642 // consider using a larger type as offset.
1643 OpIndex load = __ Simd128LaneMemory(
1644 __ WordPtrAdd(MemStart(imm.mem_index), imm.offset), final_index,
1645 value.op, Simd128LaneMemoryOp::Mode::kLoad, kind, lane_kind, laneidx,
1646 0);
1647
1648 if (v8_flags.trace_wasm_memory) {
1649 TraceMemoryOperation(decoder, false, repr, final_index, imm.offset);
1650 }
1651
1652 result->op = load;
1653 }
1654
1655 void StoreMem(FullDecoder* decoder, StoreType type,
1656 const MemoryAccessImmediate& imm, const Value& index,
1657 const Value& value) {
1658 bool needs_f32_to_f16_conv = false;
1659 if (type.value() == StoreType::kF32StoreF16 &&
1660 !SupportedOperations::float16()) {
1661 needs_f32_to_f16_conv = true;
1662 type = StoreType::kI32Store16;
1663 }
1666
1667 compiler::EnforceBoundsCheck enforce_bounds_check =
1668 (wasm::kPartialOOBWritesAreNoops || type.size() == 1)
1671
1672 auto [final_index, strategy] =
1673 BoundsCheckMem(imm.memory, repr, index.op, imm.offset,
1674 enforce_bounds_check, compiler::AlignmentCheck::kNo);
1675
1676 V<WordPtr> mem_start = MemStart(imm.memory->index);
1677
1678 StoreOp::Kind store_kind = GetMemoryAccessKind(repr, strategy);
1679
1680 OpIndex store_value = value.op;
1681 if (value.type == kWasmI64 && repr.SizeInBytes() <= 4) {
1682 store_value = __ TruncateWord64ToWord32(store_value);
1683 }
1684 if (needs_f32_to_f16_conv) {
1685 store_value = CallCStackSlotToStackSlot(
1686 store_value, ExternalReference::wasm_float32_to_float16(),
1688 }
1689
1690#if defined(V8_TARGET_BIG_ENDIAN)
1691 store_value = BuildChangeEndiannessStore(store_value, type.mem_rep(),
1692 type.value_type());
1693#endif
1694 const bool offset_in_int_range =
1695 imm.offset <= std::numeric_limits<int32_t>::max();
1696 OpIndex base =
1697 offset_in_int_range ? mem_start : __ WordPtrAdd(mem_start, imm.offset);
1698 int32_t offset = offset_in_int_range ? static_cast<int32_t>(imm.offset) : 0;
1699 __ Store(base, final_index, store_value, store_kind, repr,
1701
1702 if (v8_flags.trace_wasm_memory) {
1703 // TODO(14259): Implement memory tracing for multiple memories.
1704 CHECK_EQ(0, imm.memory->index);
1705 TraceMemoryOperation(decoder, true, repr, final_index, imm.offset);
1706 }
1707 }
1708
1709 void StoreLane(FullDecoder* decoder, StoreType type,
1710 const MemoryAccessImmediate& imm, const Value& index,
1711 const Value& value, const uint8_t laneidx) {
1712 using compiler::turboshaft::Simd128LaneMemoryOp;
1713
1716
1717 compiler::EnforceBoundsCheck enforce_bounds_check =
1718 (wasm::kPartialOOBWritesAreNoops || type.size() == 1)
1721
1722 auto [final_index, strategy] =
1723 BoundsCheckMem(imm.memory, repr, index.op, imm.offset,
1724 enforce_bounds_check, compiler::AlignmentCheck::kNo);
1725 Simd128LaneMemoryOp::Kind kind = GetMemoryAccessKind(repr, strategy);
1726
1727 Simd128LaneMemoryOp::LaneKind lane_kind;
1728
1729 switch (repr) {
1730 // TODO(manoskouk): Why use unsigned representations here as opposed to
1731 // LoadLane?
1733 lane_kind = Simd128LaneMemoryOp::LaneKind::k8;
1734 break;
1736 lane_kind = Simd128LaneMemoryOp::LaneKind::k16;
1737 break;
1739 lane_kind = Simd128LaneMemoryOp::LaneKind::k32;
1740 break;
1742 lane_kind = Simd128LaneMemoryOp::LaneKind::k64;
1743 break;
1744 default:
1745 UNREACHABLE();
1746 }
1747
1748 // TODO(14108): If `offset` is in int range, use it as static offset, or
1749 // consider using a larger type as offset.
1750 __ Simd128LaneMemory(__ WordPtrAdd(MemStart(imm.mem_index), imm.offset),
1751 final_index, value.op,
1752 Simd128LaneMemoryOp::Mode::kStore, kind, lane_kind,
1753 laneidx, 0);
1754
1755 if (v8_flags.trace_wasm_memory) {
1756 TraceMemoryOperation(decoder, true, repr, final_index, imm.offset);
1757 }
1758 }
1759
1761 Value* result) {
1762 V<WordPtr> result_wordptr =
1763 __ WordPtrShiftRightArithmetic(MemSize(imm.index), kWasmPageSizeLog2);
1764 // In the 32-bit case, truncation happens implicitly.
1765 if (imm.memory->is_memory64()) {
1766 result->op = __ ChangeIntPtrToInt64(result_wordptr);
1767 } else {
1768 result->op = __ TruncateWordPtrToWord32(result_wordptr);
1769 }
1770 }
1771
1773 const Value& value, Value* result) {
1774 if (!imm.memory->is_memory64()) {
1775 result->op =
1777 decoder, {__ Word32Constant(imm.index), value.op});
1778 } else {
1779 Label<Word64> done(&asm_);
1780
1781 IF (LIKELY(__ Uint64LessThanOrEqual(
1782 value.op, __ Word64Constant(static_cast<int64_t>(kMaxInt))))) {
1783 GOTO(done, __ ChangeInt32ToInt64(CallBuiltinThroughJumptable<
1784 BuiltinCallDescriptor::WasmMemoryGrow>(
1785 decoder, {__ Word32Constant(imm.index),
1786 __ TruncateWord64ToWord32(value.op)})));
1787 } ELSE {
1788 GOTO(done, __ Word64Constant(int64_t{-1}));
1789 }
1790
1791 BIND(done, result_64);
1792
1793 result->op = result_64;
1794 }
1796 }
1797
1800 V<Map> rtt = OpIndex::Invalid();
1801 return __ WasmTypeCheck(value.op, rtt, config);
1802 }
1803
1804 V<String> ExternRefToString(const Value value, bool null_succeeds = false) {
1805 wasm::ValueType target_type =
1807 compiler::WasmTypeCheckConfig config{value.type, target_type};
1808 V<Map> rtt = OpIndex::Invalid();
1809 return V<String>::Cast(__ WasmTypeCast(value.op, rtt, config));
1810 }
1811
1812 bool IsExplicitStringCast(const Value value) {
1813 if (__ generating_unreachable_operations()) return false;
1814 const WasmTypeCastOp* cast =
1815 __ output_graph().Get(value.op).TryCast<WasmTypeCastOp>();
1816 return cast && cast->config.to == kWasmRefExternString;
1817 }
1818
1820 V<String> search, V<Word32> start) {
1821 // Clamp the start index.
1822 Label<Word32> clamped_start_label(&asm_);
1823 GOTO_IF(__ Int32LessThan(start, 0), clamped_start_label,
1824 __ Word32Constant(0));
1825 V<Word32> length = __ template LoadField<Word32>(
1827 GOTO_IF(__ Int32LessThan(start, length), clamped_start_label, start);
1828 GOTO(clamped_start_label, length);
1829 BIND(clamped_start_label, clamped_start);
1830 start = clamped_start;
1831
1832 // This can't overflow because we've clamped `start` above.
1833 V<Smi> start_smi = __ TagSmi(start);
1834 BuildModifyThreadInWasmFlag(decoder->zone(), false);
1835
1836 V<Smi> result_value =
1838 decoder, {string, search, start_smi});
1839 BuildModifyThreadInWasmFlag(decoder->zone(), true);
1840
1841 return __ UntagSmi(result_value);
1842 }
1843
1844#if V8_INTL_SUPPORT
1845 V<String> CallStringToLowercase(FullDecoder* decoder, V<String> string) {
1846 BuildModifyThreadInWasmFlag(decoder->zone(), false);
1848 BuiltinCallDescriptor::StringToLowerCaseIntl>(
1849 decoder, __ NoContextConstant(), {string});
1850 BuildModifyThreadInWasmFlag(decoder->zone(), true);
1851 return result;
1852 }
1853#endif
1854
1856 OpIndex isolate_root = __ LoadRootRegister();
1857 __ Store(isolate_root, __ Word32Constant(op_type),
1860 }
1861
1863 DataViewOp op_type) {
1866 decoder, {V<JSDataView>::Cast(dataview)});
1867 __ Unreachable();
1868 }
1869
1873 BuiltinCallDescriptor::ThrowDataViewOutOfBounds>(decoder, {});
1874 __ Unreachable();
1875 }
1876
1880 BuiltinCallDescriptor::ThrowDataViewDetachedError>(decoder, {});
1881 __ Unreachable();
1882 }
1883
1885 V<WordPtr> right, DataViewOp op_type) {
1886 IF (UNLIKELY(__ IntPtrLessThan(left, right))) {
1887 ThrowDataViewOutOfBoundsError(decoder, op_type);
1888 }
1889 }
1890
1892 V<WordPtr> right, DataViewOp op_type) {
1893 IF (UNLIKELY(__ IntPtrLessThan(left, right))) {
1894 ThrowDataViewDetachedError(decoder, op_type);
1895 }
1896 }
1897
1899 DataViewOp op_type) {
1900 IF (UNLIKELY(
1901 __ ArrayBufferIsDetached(V<JSArrayBufferView>::Cast(dataview)))) {
1902 ThrowDataViewDetachedError(decoder, op_type);
1903 }
1904 }
1905
1907 DataViewOp op_type) {
1909 return GetDataViewByteLength(decoder, dataview, __ IntPtrConstant(0),
1910 op_type);
1911 }
1912
1913 // Converts a Smi or HeapNumber to an intptr. The input is not validated.
1915 Label<> smi_label(&asm_);
1916 Label<> heapnumber_label(&asm_);
1917 Label<WordPtr> done_label(&asm_);
1918
1919 GOTO_IF(LIKELY(__ IsSmi(tagged)), smi_label);
1920 GOTO(heapnumber_label);
1921
1922 BIND(smi_label);
1923 V<WordPtr> smi_length =
1924 __ ChangeInt32ToIntPtr(__ UntagSmi(V<Smi>::Cast(tagged)));
1925 GOTO(done_label, smi_length);
1926
1927 BIND(heapnumber_label);
1928 V<Float64> float_value = __ template LoadField<Float64>(
1930 if constexpr (Is64()) {
1931 DCHECK_EQ(WordPtr::bits, Word64::bits);
1932 GOTO(done_label,
1934 __ TruncateFloat64ToInt64OverflowUndefined(float_value)));
1935 } else {
1936 GOTO(done_label,
1937 __ ChangeInt32ToIntPtr(
1938 __ TruncateFloat64ToInt32OverflowUndefined(float_value)));
1939 }
1940
1941 BIND(done_label, length);
1942 return length;
1943 }
1944
1945 // An `ArrayBuffer` can be resizable, i.e. it can shrink or grow.
1946 // A `SharedArrayBuffer` can be growable, i.e. it can only grow. A `DataView`
1947 // can be length-tracking or non-legth-tracking . A length-tracking `DataView`
1948 // is tracking the length of the underlying buffer, i.e. it doesn't have a
1949 // `byteLength` specified, which means that the length of the `DataView` is
1950 // the length (or remaining length if `byteOffset != 0`) of the underlying
1951 // array buffer. On the other hand, a non-length-tracking `DataView` has a
1952 // `byteLength`.
1953 // Depending on whether the buffer is resizable or growable and the `DataView`
1954 // is length-tracking or non-length-tracking, getting the byte length has to
1955 // be handled differently.
1957 V<WordPtr> offset, DataViewOp op_type) {
1958 Label<WordPtr> done_label(&asm_);
1959 Label<> type_error_label(&asm_);
1960
1961 GOTO_IF(UNLIKELY(__ IsSmi(dataview)), type_error_label);
1962
1963 // Case 1):
1964 // - non-resizable ArrayBuffers, length-tracking and non-length-tracking
1965 // - non-growable SharedArrayBuffers, length-tracking and non-length-tr.
1966 // - growable SharedArrayBuffers, non-length-tracking
1967 IF (LIKELY(__ HasInstanceType(dataview, InstanceType::JS_DATA_VIEW_TYPE))) {
1968 if (op_type != DataViewOp::kByteLength) {
1969 DataViewRangeCheck(decoder, offset, __ IntPtrConstant(0), op_type);
1970 }
1971 DataViewDetachedBufferCheck(decoder, dataview, op_type);
1972 V<WordPtr> view_byte_length = __ LoadField<WordPtr>(
1974 GOTO(done_label, view_byte_length);
1975 }
1976
1977 // Case 2):
1978 // - resizable ArrayBuffers, length-tracking and non-length-tracking
1979 // - growable SharedArrayBuffers, length-tracking
1980 GOTO_IF_NOT(LIKELY(__ HasInstanceType(
1981 dataview, InstanceType::JS_RAB_GSAB_DATA_VIEW_TYPE)),
1982 type_error_label);
1983 if (op_type != DataViewOp::kByteLength) {
1984 DataViewRangeCheck(decoder, offset, __ IntPtrConstant(0), op_type);
1985 }
1986 DataViewDetachedBufferCheck(decoder, dataview, op_type);
1987
1988 V<Word32> bit_field = __ LoadField<Word32>(
1990 V<Word32> length_tracking = __ Word32BitwiseAnd(
1991 bit_field, JSArrayBufferView::IsLengthTrackingBit::kMask);
1992 V<Word32> backed_by_rab_bit = __ Word32BitwiseAnd(
1993 bit_field, JSArrayBufferView::IsBackedByRabBit::kMask);
1994
1995 V<Object> buffer = __ LoadField<Object>(
1997 V<WordPtr> buffer_byte_length = __ LoadField<WordPtr>(
1999 V<WordPtr> view_byte_offset = __ LoadField<WordPtr>(
2001
2002 // The final length for each case in Case 2) is calculated differently.
2003 // Case: resizable ArrayBuffers, LT and non-LT.
2004 IF (backed_by_rab_bit) {
2005 // DataViews with resizable ArrayBuffers can go out of bounds.
2006 IF (length_tracking) {
2007 ScopedVar<WordPtr> final_length(this, 0);
2008 IF (LIKELY(__ UintPtrLessThanOrEqual(view_byte_offset,
2009 buffer_byte_length))) {
2010 final_length = __ WordPtrSub(buffer_byte_length, view_byte_offset);
2011 }
2012 DataViewBoundsCheck(decoder, buffer_byte_length, view_byte_offset,
2013 op_type);
2014 GOTO(done_label, final_length);
2015 } ELSE {
2016 V<WordPtr> view_byte_length = __ LoadField<WordPtr>(
2018 DataViewBoundsCheck(decoder, buffer_byte_length,
2019 __ WordPtrAdd(view_byte_offset, view_byte_length),
2020 op_type);
2021
2022 GOTO(done_label, view_byte_length);
2023 }
2024 }
2025 // Case: growable SharedArrayBuffers, LT.
2026 ELSE {
2027 V<Object> gsab_length_tagged = CallRuntime(
2028 decoder->zone(), Runtime::kGrowableSharedArrayBufferByteLength,
2029 {buffer}, __ NoContextConstant());
2030 V<WordPtr> gsab_length = ChangeTaggedNumberToIntPtr(gsab_length_tagged);
2031 ScopedVar<WordPtr> gsab_buffer_byte_length(this, 0);
2032 IF (LIKELY(__ UintPtrLessThanOrEqual(view_byte_offset, gsab_length))) {
2033 gsab_buffer_byte_length = __ WordPtrSub(gsab_length, view_byte_offset);
2034 }
2035 GOTO(done_label, gsab_buffer_byte_length);
2036 }
2037 __ Unreachable();
2038
2039 BIND(type_error_label);
2040 ThrowDataViewTypeError(decoder, dataview, op_type);
2041
2042 BIND(done_label, final_view_byte_length);
2043 return final_view_byte_length;
2044 }
2045
2047 V<WordPtr> offset, DataViewOp op_type) {
2048 V<WordPtr> view_byte_length =
2049 GetDataViewByteLength(decoder, dataview, offset, op_type);
2050 V<WordPtr> view_byte_length_minus_size =
2051 __ WordPtrSub(view_byte_length, GetTypeSize(op_type));
2052 DataViewRangeCheck(decoder, view_byte_length_minus_size, offset, op_type);
2053 return __ LoadField<WordPtr>(
2055 }
2056
2058 DataViewOp op_type) {
2059 V<Object> dataview = args[0].op;
2060 V<WordPtr> offset = __ ChangeInt32ToIntPtr(args[1].op);
2061 V<Word32> is_little_endian =
2062 (op_type == DataViewOp::kGetInt8 || op_type == DataViewOp::kGetUint8)
2063 ? __ Word32Constant(1)
2064 : args[2].op;
2065
2066 V<WordPtr> data_ptr =
2067 GetDataViewDataPtr(decoder, dataview, offset, op_type);
2068 return __ LoadDataViewElement(dataview, data_ptr, offset, is_little_endian,
2069 GetExternalArrayType(op_type));
2070 }
2071
2072 void DataViewSetter(FullDecoder* decoder, const Value args[],
2073 DataViewOp op_type) {
2074 V<Object> dataview = args[0].op;
2075 V<WordPtr> offset = __ ChangeInt32ToIntPtr(args[1].op);
2076 V<Word32> value = args[2].op;
2077 V<Word32> is_little_endian =
2078 (op_type == DataViewOp::kSetInt8 || op_type == DataViewOp::kSetUint8)
2079 ? __ Word32Constant(1)
2080 : args[3].op;
2081
2082 V<WordPtr> data_ptr =
2083 GetDataViewDataPtr(decoder, dataview, offset, op_type);
2084 __ StoreDataViewElement(dataview, data_ptr, offset, value, is_little_endian,
2085 GetExternalArrayType(op_type));
2086 }
2087
2088 // Adds a wasm type annotation to the graph and replaces any extern type with
2089 // the extern string type.
2090 template <typename T>
2092 DCHECK(type.is_reference_to(HeapType::kString) ||
2093 type.is_reference_to(HeapType::kExternString) ||
2094 type.is_reference_to(HeapType::kExtern));
2095 if (type.is_reference_to(HeapType::kExtern)) {
2097 }
2098 return __ AnnotateWasmType(value, type);
2099 }
2100
2102 const Value args[], Value returns[]) {
2103 uint32_t func_index = imm.index;
2104 V<Object> receiver = args[0].op;
2105 // TODO(14616): Fix this.
2107 trusted_instance_data(false), WellKnownImports,
2109 V<Object> data = __ LoadFixedArrayElement(imports_array, func_index);
2110 V<Object> cached_map = __ Load(data, LoadOp::Kind::TaggedBase(),
2112 WasmFastApiCallData::kCachedMapOffset);
2113
2114 Label<> if_equal_maps(&asm_);
2115 Label<> if_unknown_receiver(&asm_);
2116 GOTO_IF(__ IsSmi(receiver), if_unknown_receiver);
2117
2118 V<Map> map = __ LoadMapField(V<Object>::Cast(receiver));
2119
2120 // Clear the weak bit.
2121 cached_map = __ BitcastWordPtrToTagged(__ WordPtrBitwiseAnd(
2122 __ BitcastTaggedToWordPtr(cached_map), ~kWeakHeapObjectMask));
2123 GOTO_IF(__ TaggedEqual(map, cached_map), if_equal_maps);
2124 GOTO(if_unknown_receiver);
2125
2126 BIND(if_unknown_receiver);
2129 BuiltinCallDescriptor::WasmFastApiCallTypeCheckAndUpdateIC>(
2130 decoder, context, {data, receiver});
2131 GOTO(if_equal_maps);
2132
2133 BIND(if_equal_maps);
2134 OpIndex receiver_handle = __ AdaptLocalArgument(receiver);
2135
2136 const wasm::FunctionSig* sig = decoder->module_->functions[func_index].sig;
2137 size_t param_count = sig->parameter_count();
2138 DCHECK_LE(sig->return_count(), 1);
2139
2140 const MachineSignature* callback_sig =
2141 env_->fast_api_signatures[func_index];
2142 // All normal parameters + the options as additional parameter at the end.
2143 MachineSignature::Builder builder(decoder->zone(), sig->return_count(),
2144 param_count + 1);
2145 if (sig->return_count()) {
2146 builder.AddReturn(callback_sig->GetReturn());
2147 }
2148 // The first parameter is the receiver. Because of the fake handle on the
2149 // stack the type is `Pointer`.
2150 builder.AddParam(MachineType::Pointer());
2151
2152 for (size_t i = 0; i < callback_sig->parameter_count(); ++i) {
2153 builder.AddParam(callback_sig->GetParam(i));
2154 }
2155 // Options object.
2156 builder.AddParam(MachineType::Pointer());
2157
2158 base::SmallVector<OpIndex, 16> inputs(param_count + 1);
2159
2160 inputs[0] = receiver_handle;
2161
2162 Label<> value_out_of_range(&asm_);
2163 for (size_t i = 1; i < param_count; ++i) {
2164 if (sig->GetParam(i).is_reference()) {
2165 inputs[i] = __ AdaptLocalArgument(args[i].op);
2166 } else if (callback_sig->GetParam(i - 1).representation() ==
2168 if (sig->GetParam(i) == kWasmI64) {
2169 // If we already have an I64, then no conversion is needed neither for
2170 // int64 nor uint64.
2171 inputs[i] = args[i].op;
2172 } else if (callback_sig->GetParam(i - 1) == MachineType::Int64()) {
2173 if (sig->GetParam(i) == kWasmF64) {
2174 V<Tuple<Word64, Word32>> truncate =
2175 __ TryTruncateFloat64ToInt64(args[i].op);
2176 inputs[i] = __ template Projection<0>(truncate);
2178 __ Word32Equal(__ template Projection<1>(truncate), 0)),
2179 value_out_of_range);
2180 } else if (sig->GetParam(i) == kWasmI32) {
2181 inputs[i] = __ ChangeInt32ToInt64(args[i].op);
2182 } else {
2183 // TODO(ahaas): Handle values that are out of range of int64.
2184 CHECK_EQ(sig->GetParam(i), kWasmF32);
2185 V<Tuple<Word64, Word32>> truncate =
2186 __ TryTruncateFloat32ToInt64(args[i].op);
2187 inputs[i] = __ template Projection<0>(truncate);
2189 __ Word32Equal(__ template Projection<1>(truncate), 0)),
2190 value_out_of_range);
2191 }
2192 } else if (callback_sig->GetParam(i - 1) == MachineType::Uint64()) {
2193 if (sig->GetParam(i) == kWasmF64) {
2194 V<Tuple<Word64, Word32>> truncate =
2195 __ TryTruncateFloat64ToUint64(args[i].op);
2196 inputs[i] = __ template Projection<0>(truncate);
2198 __ Word32Equal(__ template Projection<1>(truncate), 0)),
2199 value_out_of_range);
2200 } else if (sig->GetParam(i) == kWasmI32) {
2201 inputs[i] = __ ChangeUint32ToUint64(args[i].op);
2202 } else {
2203 // TODO(ahaas): Handle values that are out of range of int64.
2204 CHECK_EQ(sig->GetParam(i), kWasmF32);
2205 V<Tuple<Word64, Word32>> truncate =
2206 __ TryTruncateFloat32ToUint64(args[i].op);
2207 inputs[i] = __ template Projection<0>(truncate);
2209 __ Word32Equal(__ template Projection<1>(truncate), 0)),
2210 value_out_of_range);
2211 }
2212 }
2213 } else {
2214 inputs[i] = args[i].op;
2215 }
2216 }
2217
2218 OpIndex options_object;
2219 {
2220 const int kAlign = alignof(v8::FastApiCallbackOptions);
2221 const int kSize = sizeof(v8::FastApiCallbackOptions);
2222
2223 options_object = __ StackSlot(kSize, kAlign);
2224
2225 static_assert(
2226 sizeof(v8::FastApiCallbackOptions::isolate) == sizeof(intptr_t),
2227 "We expected 'isolate' to be pointer sized, but it is not.");
2228 __ StoreOffHeap(options_object,
2229 __ IsolateField(IsolateFieldId::kIsolateAddress),
2231 offsetof(v8::FastApiCallbackOptions, isolate));
2232
2233 V<Object> callback_data =
2236 WasmFastApiCallData::kCallbackDataOffset);
2237 V<WordPtr> data_argument_to_pass = __ AdaptLocalArgument(callback_data);
2238
2239 __ StoreOffHeap(options_object, data_argument_to_pass,
2241 offsetof(v8::FastApiCallbackOptions, data));
2242 }
2243
2244 inputs[param_count] = options_object;
2245
2246 const CallDescriptor* call_descriptor =
2248 builder.Get());
2249 const TSCallDescriptor* ts_call_descriptor = TSCallDescriptor::Create(
2250 call_descriptor, compiler::CanThrow::kNo,
2252 OpIndex target_address = __ ExternalConstant(ExternalReference::Create(
2253 env_->fast_api_targets[func_index].load(std::memory_order_relaxed),
2255
2257
2258 __ Store(__ LoadRootRegister(),
2259 __ BitcastHeapObjectToWordPtr(native_context),
2263 auto [old_sp, old_limit] = BuildSwitchToTheCentralStackIfNeeded();
2264 OpIndex ret_val = __ Call(target_address, OpIndex::Invalid(),
2265 base::VectorOf(inputs), ts_call_descriptor);
2266 BuildSwitchBackFromCentralStack(old_sp, old_limit);
2267
2268#if DEBUG
2269 // Reset the context again after the call, to make sure nobody is using the
2270 // leftover context in the isolate.
2271 __ Store(__ LoadRootRegister(),
2272 __ WordPtrConstant(Context::kInvalidContext),
2275#endif
2276
2277 V<Object> exception = __ Load(
2278 __ LoadRootRegister(), LoadOp::Kind::RawAligned(),
2280
2281 IF_NOT (LIKELY(
2282 __ TaggedEqual(exception, LOAD_ROOT(TheHoleValue)))) {
2284 BuiltinCallDescriptor::WasmPropagateException>(
2286 }
2288
2289 if (callback_sig->return_count() > 0) {
2290 if (callback_sig->GetReturn() == MachineType::Bool()) {
2291 ret_val = __ WordBitwiseAnd(ret_val, __ Word32Constant(0xff),
2293 } else if (callback_sig->GetReturn() == MachineType::Int64()) {
2294 if (sig->GetReturn() == kWasmF64) {
2295 ret_val = __ ChangeInt64ToFloat64(ret_val);
2296 } else if (sig->GetReturn() == kWasmI32) {
2297 ret_val = __ TruncateWord64ToWord32(ret_val);
2298 } else if (sig->GetReturn() == kWasmF32) {
2299 ret_val = __ ChangeInt64ToFloat32(ret_val);
2300 }
2301 } else if (callback_sig->GetReturn() == MachineType::Uint64()) {
2302 if (sig->GetReturn() == kWasmF64) {
2303 ret_val = __ ChangeUint64ToFloat64(ret_val);
2304 } else if (sig->GetReturn() == kWasmI32) {
2305 ret_val = __ TruncateWord64ToWord32(ret_val);
2306 } else if (sig->GetReturn() == kWasmF32) {
2307 ret_val = __ ChangeUint64ToFloat32(ret_val);
2308 }
2309 }
2310 }
2311 Label<> done(&asm_);
2312 GOTO(done);
2313 BIND(value_out_of_range);
2314 auto [target, implicit_arg] =
2316 BuildWasmCall(decoder, imm.sig, target, implicit_arg, args, returns,
2318 __ Unreachable();
2319 BIND(done);
2320 if (sig->return_count()) {
2321 returns[0].op = ret_val;
2322 }
2323 }
2324
2326 const CallFunctionImmediate& imm,
2327 const Value args[], Value returns[]) {
2328 uint32_t index = imm.index;
2329 if (!decoder->module_) return false; // Only needed for tests.
2330 const WellKnownImportsList& well_known_imports =
2331 decoder->module_->type_feedback.well_known_imports;
2332 using WKI = WellKnownImport;
2333 WKI imported_op = well_known_imports.get(index);
2335 switch (imported_op) {
2336 case WKI::kUninstantiated:
2337 case WKI::kGeneric:
2338 case WKI::kLinkError:
2339 return false;
2340
2341 // JS String Builtins proposal.
2342 case WKI::kStringCast: {
2344 decoder->detected_->add_imported_strings();
2345 break;
2346 }
2347 case WKI::kStringTest: {
2349 decoder->detected_->add_imported_strings();
2350 break;
2351 }
2352 case WKI::kStringCharCodeAt: {
2353 V<String> string = ExternRefToString(args[0]);
2354 V<String> view = __ StringAsWtf16(string);
2355 // TODO(14108): Annotate `view`'s type.
2356 result = GetCodeUnitImpl(decoder, view, args[1].op);
2357 decoder->detected_->add_imported_strings();
2358 break;
2359 }
2360 case WKI::kStringCodePointAt: {
2361 V<String> string = ExternRefToString(args[0]);
2362 V<String> view = __ StringAsWtf16(string);
2363 // TODO(14108): Annotate `view`'s type.
2364 result = StringCodePointAt(decoder, view, args[1].op);
2365 decoder->detected_->add_imported_strings();
2366 break;
2367 }
2368 case WKI::kStringCompare: {
2369 V<String> a_string = ExternRefToString(args[0]);
2370 V<String> b_string = ExternRefToString(args[1]);
2371 result = __ UntagSmi(
2373 decoder, {a_string, b_string}));
2374 decoder->detected_->add_imported_strings();
2375 break;
2376 }
2377 case WKI::kStringConcat: {
2378 V<String> head_string = ExternRefToString(args[0]);
2379 V<String> tail_string = ExternRefToString(args[1]);
2384 {head_string, tail_string});
2385 result = __ AnnotateWasmType(result_value, kWasmRefExternString);
2386 decoder->detected_->add_imported_strings();
2387 break;
2388 }
2389 case WKI::kStringEquals: {
2390 // Using nullable type guards here because this instruction needs to
2391 // handle {null} without trapping.
2392 static constexpr bool kNullSucceeds = true;
2393 V<String> a_string = ExternRefToString(args[0], kNullSucceeds);
2394 V<String> b_string = ExternRefToString(args[1], kNullSucceeds);
2395 result = StringEqImpl(decoder, a_string, b_string, kWasmExternRef,
2397 decoder->detected_->add_imported_strings();
2398 break;
2399 }
2400 case WKI::kStringFromCharCode: {
2401 V<Word32> capped = __ Word32BitwiseAnd(args[0].op, 0xFFFF);
2403 BuiltinCallDescriptor::WasmStringFromCodePoint>(decoder, {capped});
2404 result = __ AnnotateWasmType(result_value, kWasmRefExternString);
2405 decoder->detected_->add_imported_strings();
2406 break;
2407 }
2408 case WKI::kStringFromCodePoint: {
2410 BuiltinCallDescriptor::WasmStringFromCodePoint>(decoder,
2411 {args[0].op});
2412 result = __ AnnotateWasmType(result_value, kWasmRefExternString);
2413 decoder->detected_->add_imported_strings();
2414 break;
2415 }
2416 case WKI::kStringFromWtf16Array: {
2418 BuiltinCallDescriptor::WasmStringNewWtf16Array>(
2419 decoder,
2420 {V<WasmArray>::Cast(NullCheck(args[0])), args[1].op, args[2].op});
2421 result = __ AnnotateWasmType(result_value, kWasmRefExternString);
2422 decoder->detected_->add_imported_strings();
2423 break;
2424 }
2425 case WKI::kStringFromUtf8Array:
2429 decoder->detected_->add_imported_strings();
2430 break;
2431 case WKI::kStringIntoUtf8Array: {
2432 V<String> string = ExternRefToString(args[0]);
2434 decoder, unibrow::Utf8Variant::kLossyUtf8, string,
2436 decoder->detected_->add_imported_strings();
2437 break;
2438 }
2439 case WKI::kStringToUtf8Array: {
2440 V<String> string = ExternRefToString(args[0]);
2442 BuiltinCallDescriptor::WasmStringToUtf8Array>(decoder, {string});
2443 result = __ AnnotateWasmType(result_value, returns[0].type);
2444 decoder->detected_->add_imported_strings();
2445 break;
2446 }
2447 case WKI::kStringLength: {
2448 V<Object> string = ExternRefToString(args[0]);
2449 result = __ template LoadField<Word32>(
2451 decoder->detected_->add_imported_strings();
2452 break;
2453 }
2454 case WKI::kStringMeasureUtf8: {
2455 V<String> string = ExternRefToString(args[0]);
2457 decoder, unibrow::Utf8Variant::kLossyUtf8, string);
2458 decoder->detected_->add_imported_strings();
2459 break;
2460 }
2461 case WKI::kStringSubstring: {
2462 V<String> string = ExternRefToString(args[0]);
2463 V<String> view = __ StringAsWtf16(string);
2464 // TODO(12868): Consider annotating {view}'s type when the typing story
2465 // for string views has been settled.
2467 BuiltinCallDescriptor::WasmStringViewWtf16Slice>(
2468 decoder, {view, args[1].op, args[2].op});
2469 result = __ AnnotateWasmType(result_value, kWasmRefExternString);
2470 decoder->detected_->add_imported_strings();
2471 break;
2472 }
2473 case WKI::kStringToWtf16Array: {
2474 V<String> string = ExternRefToString(args[0]);
2476 BuiltinCallDescriptor::WasmStringEncodeWtf16Array>(
2477 decoder,
2479 decoder->detected_->add_imported_strings();
2480 break;
2481 }
2482
2483 // Other string-related imports.
2484 case WKI::kDoubleToString: {
2485 BuildModifyThreadInWasmFlag(decoder->zone(), false);
2487 BuiltinCallDescriptor::WasmFloat64ToString>(decoder, {args[0].op});
2488 result = AnnotateAsString(result_value, returns[0].type);
2489 BuildModifyThreadInWasmFlag(decoder->zone(), true);
2490 decoder->detected_->Add(
2491 returns[0].type.is_reference_to(wasm::HeapType::kString)
2492 ? WasmDetectedFeature::stringref
2493 : WasmDetectedFeature::imported_strings);
2494 break;
2495 }
2496 case WKI::kIntToString: {
2497 BuildModifyThreadInWasmFlag(decoder->zone(), false);
2498 V<String> result_value =
2500 decoder, {args[0].op, args[1].op});
2501 result = AnnotateAsString(result_value, returns[0].type);
2502 BuildModifyThreadInWasmFlag(decoder->zone(), true);
2503 decoder->detected_->Add(
2504 returns[0].type.is_reference_to(wasm::HeapType::kString)
2505 ? WasmDetectedFeature::stringref
2506 : WasmDetectedFeature::imported_strings);
2507 break;
2508 }
2509 case WKI::kParseFloat: {
2510 if (args[0].type.is_nullable()) {
2511 Label<Float64> done(&asm_);
2512 GOTO_IF(__ IsNull(args[0].op, args[0].type), done,
2513 __ Float64Constant(std::numeric_limits<double>::quiet_NaN()));
2514
2515 BuildModifyThreadInWasmFlag(decoder->zone(), false);
2517 BuiltinCallDescriptor::WasmStringToDouble>(decoder, {args[0].op});
2518 BuildModifyThreadInWasmFlag(decoder->zone(), true);
2519 GOTO(done, not_null_res);
2520
2521 BIND(done, result_f64);
2522 result = result_f64;
2523 } else {
2524 BuildModifyThreadInWasmFlag(decoder->zone(), false);
2526 BuiltinCallDescriptor::WasmStringToDouble>(decoder, {args[0].op});
2527 BuildModifyThreadInWasmFlag(decoder->zone(), true);
2528 }
2529 decoder->detected_->add_stringref();
2530 break;
2531 }
2532 case WKI::kStringIndexOf: {
2533 V<String> string = args[0].op;
2534 V<String> search = args[1].op;
2535 V<Word32> start = args[2].op;
2536
2537 // If string is null, throw.
2538 if (args[0].type.is_nullable()) {
2539 IF (__ IsNull(string, args[0].type)) {
2541 BuiltinCallDescriptor::ThrowIndexOfCalledOnNull>(decoder, {});
2542 __ Unreachable();
2543 }
2544 }
2545
2546 // If search is null, replace it with "null".
2547 if (args[1].type.is_nullable()) {
2548 Label<String> search_done_label(&asm_);
2549 GOTO_IF_NOT(__ IsNull(search, args[1].type), search_done_label,
2550 search);
2551 GOTO(search_done_label, LOAD_ROOT(null_string));
2552 BIND(search_done_label, search_value);
2553 search = search_value;
2554 }
2555
2556 result = GetStringIndexOf(decoder, string, search, start);
2557 decoder->detected_->add_stringref();
2558 break;
2559 }
2560 case WKI::kStringIndexOfImported: {
2561 // As the `string` and `search` parameters are externrefs, we have to
2562 // make sure they are strings. To enforce this, we inline only if a
2563 // (successful) `"js-string":"cast"` was performed before.
2565 return false;
2566 }
2567 V<String> string = args[0].op;
2568 V<String> search = args[1].op;
2569 V<Word32> start = args[2].op;
2570
2571 result = GetStringIndexOf(decoder, string, search, start);
2572 decoder->detected_->add_imported_strings();
2573 break;
2574 }
2575 case WKI::kStringToLocaleLowerCaseStringref:
2576 // TODO(14108): Implement.
2577 return false;
2578 case WKI::kStringToLowerCaseStringref: {
2579#if V8_INTL_SUPPORT
2580 V<String> string = args[0].op;
2581 if (args[0].type.is_nullable()) {
2582 IF (__ IsNull(string, args[0].type)) {
2584 BuiltinCallDescriptor::ThrowToLowerCaseCalledOnNull>(decoder,
2585 {});
2586 __ Unreachable();
2587 }
2588 }
2589 V<String> result_value = CallStringToLowercase(decoder, string);
2590 result = __ AnnotateWasmType(result_value, kWasmRefString);
2591 decoder->detected_->add_stringref();
2592 break;
2593#else
2594 return false;
2595#endif
2596 }
2597 case WKI::kStringToLowerCaseImported: {
2598 // We have to make sure that the externref `string` parameter is a
2599 // string. To enforce this, we inline only if a (successful)
2600 // `"js-string":"cast"` was performed before.
2601#if V8_INTL_SUPPORT
2602 if (!IsExplicitStringCast(args[0])) {
2603 return false;
2604 }
2605 V<String> string = args[0].op;
2606 V<String> result_value = CallStringToLowercase(decoder, string);
2607 result = __ AnnotateWasmType(result_value, kWasmRefExternString);
2608 decoder->detected_->add_imported_strings();
2609 break;
2610#else
2611 return false;
2612#endif
2613 }
2614
2615 // DataView related imports.
2616 // Note that we don't support DataView imports for resizable ArrayBuffers.
2617 case WKI::kDataViewGetBigInt64: {
2618 result = DataViewGetter(decoder, args, DataViewOp::kGetBigInt64);
2619 break;
2620 }
2621 case WKI::kDataViewGetBigUint64:
2622 result = DataViewGetter(decoder, args, DataViewOp::kGetBigUint64);
2623 break;
2624 case WKI::kDataViewGetFloat32:
2625 result = DataViewGetter(decoder, args, DataViewOp::kGetFloat32);
2626 break;
2627 case WKI::kDataViewGetFloat64:
2628 result = DataViewGetter(decoder, args, DataViewOp::kGetFloat64);
2629 break;
2630 case WKI::kDataViewGetInt8:
2631 result = DataViewGetter(decoder, args, DataViewOp::kGetInt8);
2632 break;
2633 case WKI::kDataViewGetInt16:
2634 result = DataViewGetter(decoder, args, DataViewOp::kGetInt16);
2635 break;
2636 case WKI::kDataViewGetInt32:
2637 result = DataViewGetter(decoder, args, DataViewOp::kGetInt32);
2638 break;
2639 case WKI::kDataViewGetUint8:
2640 result = DataViewGetter(decoder, args, DataViewOp::kGetUint8);
2641 break;
2642 case WKI::kDataViewGetUint16:
2643 result = DataViewGetter(decoder, args, DataViewOp::kGetUint16);
2644 break;
2645 case WKI::kDataViewGetUint32:
2646 result = DataViewGetter(decoder, args, DataViewOp::kGetUint32);
2647 break;
2648 case WKI::kDataViewSetBigInt64:
2649 DataViewSetter(decoder, args, DataViewOp::kSetBigInt64);
2650 break;
2651 case WKI::kDataViewSetBigUint64:
2652 DataViewSetter(decoder, args, DataViewOp::kSetBigUint64);
2653 break;
2654 case WKI::kDataViewSetFloat32:
2655 DataViewSetter(decoder, args, DataViewOp::kSetFloat32);
2656 break;
2657 case WKI::kDataViewSetFloat64:
2658 DataViewSetter(decoder, args, DataViewOp::kSetFloat64);
2659 break;
2660 case WKI::kDataViewSetInt8:
2661 DataViewSetter(decoder, args, DataViewOp::kSetInt8);
2662 break;
2663 case WKI::kDataViewSetInt16:
2664 DataViewSetter(decoder, args, DataViewOp::kSetInt16);
2665 break;
2666 case WKI::kDataViewSetInt32:
2667 DataViewSetter(decoder, args, DataViewOp::kSetInt32);
2668 break;
2669 case WKI::kDataViewSetUint8:
2670 DataViewSetter(decoder, args, DataViewOp::kSetUint8);
2671 break;
2672 case WKI::kDataViewSetUint16:
2673 DataViewSetter(decoder, args, DataViewOp::kSetUint16);
2674 break;
2675 case WKI::kDataViewSetUint32:
2676 DataViewSetter(decoder, args, DataViewOp::kSetUint32);
2677 break;
2678 case WKI::kDataViewByteLength: {
2679 V<Object> dataview = args[0].op;
2680 V<WordPtr> view_byte_length =
2682 if constexpr (Is64()) {
2683 result =
2684 __ ChangeInt64ToFloat64(__ ChangeIntPtrToInt64(view_byte_length));
2685 } else {
2686 result = __ ChangeInt32ToFloat64(
2687 __ TruncateWordPtrToWord32(view_byte_length));
2688 }
2689 break;
2690 }
2691
2692 // Math functions.
2693 case WKI::kMathF64Acos:
2694 result = __ Float64Acos(args[0].op);
2695 break;
2696 case WKI::kMathF64Asin:
2697 result = __ Float64Asin(args[0].op);
2698 break;
2699 case WKI::kMathF64Atan:
2700 result = __ Float64Atan(args[0].op);
2701 break;
2702 case WKI::kMathF64Atan2:
2703 result = __ Float64Atan2(args[0].op, args[1].op);
2704 break;
2705 case WKI::kMathF64Cos:
2706 result = __ Float64Cos(args[0].op);
2707 break;
2708 case WKI::kMathF64Sin:
2709 result = __ Float64Sin(args[0].op);
2710 break;
2711 case WKI::kMathF64Tan:
2712 result = __ Float64Tan(args[0].op);
2713 break;
2714 case WKI::kMathF64Exp:
2715 result = __ Float64Exp(args[0].op);
2716 break;
2717 case WKI::kMathF64Log:
2718 result = __ Float64Log(args[0].op);
2719 break;
2720 case WKI::kMathF64Pow:
2721 result = __ Float64Power(args[0].op, args[1].op);
2722 break;
2723 case WKI::kMathF64Sqrt:
2724 result = __ Float64Sqrt(args[0].op);
2725 break;
2726
2727 // Fast API calls.
2728 case WKI::kFastAPICall: {
2729 WellKnown_FastApi(decoder, imm, args, returns);
2730 result = returns[0].op;
2731 break;
2732 }
2733 }
2734 if (v8_flags.trace_wasm_inlining) {
2735 PrintF("[function %d: call to %d is well-known %s]\n", func_index_, index,
2736 WellKnownImportName(imported_op));
2737 }
2738 if (!*assumptions_) *assumptions_ = std::make_unique<AssumptionsJournal>();
2739 (*assumptions_)->RecordAssumption(index, imported_op);
2740 returns[0].op = result;
2741 return true;
2742 }
2743
2745 const Value args[], Value returns[]) {
2747 if (imm.index < decoder->module_->num_imported_functions) {
2748 if (HandleWellKnownImport(decoder, imm, args, returns)) {
2749 return;
2750 }
2751 auto [target, implicit_arg] =
2753 BuildWasmCall(decoder, imm.sig, target, implicit_arg, args, returns,
2755 } else {
2756 // Locally defined function.
2757 if (should_inline(decoder, feedback_slot_,
2758 decoder->module_->functions[imm.index].code.length())) {
2759 if (v8_flags.trace_wasm_inlining) {
2760 PrintF("[function %d%s: inlining direct call #%d to function %d]\n",
2761 func_index_, mode_ == kRegular ? "" : " (inlined)",
2762 feedback_slot_, imm.index);
2763 }
2764 InlineWasmCall(decoder, imm.index, imm.sig, 0, false, args, returns);
2765 } else {
2766 V<WordPtr> callee =
2767 __ RelocatableConstant(imm.index, RelocInfo::WASM_CALL);
2768 BuildWasmCall(decoder, imm.sig, callee,
2770 decoder->module_->function_is_shared(imm.index)),
2771 args, returns);
2772 }
2773 }
2774 }
2775
2777 const Value args[]) {
2779 if (imm.index < decoder->module_->num_imported_functions) {
2780 auto [target, implicit_arg] =
2782 BuildWasmMaybeReturnCall(decoder, imm.sig, target, implicit_arg, args,
2784 } else {
2785 // Locally defined function.
2786 if (should_inline(decoder, feedback_slot_,
2787 decoder->module_->functions[imm.index].code.length())) {
2788 if (v8_flags.trace_wasm_inlining) {
2789 PrintF(
2790 "[function %d%s: inlining direct tail call #%d to function %d]\n",
2791 func_index_, mode_ == kRegular ? "" : " (inlined)",
2792 feedback_slot_, imm.index);
2793 }
2794 InlineWasmCall(decoder, imm.index, imm.sig, 0, true, args, nullptr);
2795 } else {
2797 decoder, imm.sig,
2798 __ RelocatableConstant(imm.index, RelocInfo::WASM_CALL),
2800 decoder->module_->function_is_shared(imm.index)),
2801 args);
2802 }
2803 }
2804 }
2805
2806 void CallIndirect(FullDecoder* decoder, const Value& index,
2807 const CallIndirectImmediate& imm, const Value args[],
2808 Value returns[]) {
2809 if (v8_flags.wasm_inlining_call_indirect) {
2810 CHECK(v8_flags.wasm_inlining);
2812 // In case of being unreachable, skip it because it tries to access nodes
2813 // which might be non-existent (OpIndex::Invalid()) in unreachable code.
2814 if (__ generating_unreachable_operations()) return;
2815
2816 if (should_inline(decoder, feedback_slot_,
2817 std::numeric_limits<int>::max())) {
2819 imm.table_imm.table->address_type, index.op);
2820
2821 DCHECK(!shared_);
2822 // Load the instance here even though it's only used below, in the hope
2823 // that load elimination can use it when fetching the target next.
2825
2826 // We are only interested in the target here for comparison against
2827 // the inlined call target below.
2828 // In particular, we don't need a dynamic type or null check: If the
2829 // actual call target (at runtime) is equal to the inlined call target,
2830 // we know already from the static check on the inlinee (see below) that
2831 // the inlined code has the right signature.
2832 constexpr bool kNeedsTypeOrNullCheck = false;
2833 auto [target, implicit_arg] = BuildIndirectCallTargetAndImplicitArg(
2834 decoder, index_wordptr, imm, kNeedsTypeOrNullCheck);
2835
2836 size_t return_count = imm.sig->return_count();
2837 base::Vector<InliningTree*> feedback_cases =
2839 std::vector<base::SmallVector<OpIndex, 2>> case_returns(return_count);
2840 // The slow path is the non-inlined generic `call_indirect`,
2841 // or a deopt node if that is enabled.
2842 constexpr int kSlowpathCase = 1;
2844 case_blocks;
2845 for (size_t i = 0; i < feedback_cases.size() + kSlowpathCase; i++) {
2846 case_blocks.push_back(__ NewBlock());
2847 }
2848 // Block for the slowpath, i.e., the not-inlined call or deopt.
2849 TSBlock* no_inline_block = case_blocks.back();
2850 // Block for merging results after the inlined code.
2851 TSBlock* merge = __ NewBlock();
2852
2853 // Always create a frame state, but rely on DCE to remove it in case we
2854 // end up not using deopts. This allows us to share the frame state
2855 // between a deopt due to wrong instance and deopt due to wrong target.
2856 V<FrameState> frame_state =
2857 CreateFrameState(decoder, imm.sig, &index, args);
2858 bool use_deopt_slowpath = deopts_enabled();
2859 DCHECK_IMPLIES(use_deopt_slowpath, frame_state.valid());
2860 if (use_deopt_slowpath &&
2862 if (v8_flags.trace_wasm_inlining) {
2863 PrintF(
2864 "[function %d%s: Not emitting deopt slow-path for "
2865 "call_indirect #%d as feedback contains non-inlineable "
2866 "targets]\n",
2867 func_index_, mode_ == kRegular ? "" : " (inlined)",
2869 }
2870 use_deopt_slowpath = false;
2871 }
2872
2873 // Wasm functions are semantically closures over the instance, but
2874 // when we inline a target in the following, we implicitly assume the
2875 // inlinee instance is the same as the caller's instance.
2876 // Directly jump to the non-inlined slowpath if that's violated.
2877 // Note that for `call_ref` this isn't necessary, because the funcref
2878 // equality check already captures both code and instance equality.
2879 constexpr BranchHint kUnlikelyCrossInstanceCall = BranchHint::kTrue;
2880 // Note that the `implicit_arg` can never be a `WasmImportData`,
2881 // since we don't inline imported functions right now.
2882 __ Branch({__ TaggedEqual(implicit_arg, instance),
2883 kUnlikelyCrossInstanceCall},
2884 case_blocks[0], no_inline_block);
2885
2886 for (size_t i = 0; i < feedback_cases.size(); i++) {
2887 __ Bind(case_blocks[i]);
2888 InliningTree* tree = feedback_cases[i];
2889 if (!tree || !tree->is_inlined()) {
2890 // Fall through to the next case.
2891 __ Goto(case_blocks[i + 1]);
2892 // Do not use the deopt slowpath if we decided to not inline (at
2893 // least) one call target.
2894 // Otherwise, this could lead to a deopt loop.
2895 use_deopt_slowpath = false;
2896 continue;
2897 }
2898 uint32_t inlined_index = tree->function_index();
2899 // Ensure that we only inline if the inlinee's signature is compatible
2900 // with the call_indirect. In other words, perform the type check that
2901 // would normally be done dynamically (see above
2902 // `BuildIndirectCallTargetAndImplicitArg`) statically on the inlined
2903 // target. This can fail, e.g., because the mapping of feedback back
2904 // to function indices may produce spurious targets, or because the
2905 // feedback in the JS heap has been corrupted by a vulnerability.
2907 decoder->module_, imm.sig,
2908 decoder->module_->functions[inlined_index].sig)) {
2909 __ Goto(case_blocks[i + 1]);
2910 continue;
2911 }
2912
2913 V<Word32> inlined_target =
2914 __ RelocatableWasmIndirectCallTarget(inlined_index);
2915
2916 bool is_last_feedback_case = (i == feedback_cases.size() - 1);
2917 if (use_deopt_slowpath && is_last_feedback_case) {
2918 DeoptIfNot(decoder, __ Word32Equal(target, inlined_target),
2919 frame_state);
2920 } else {
2921 TSBlock* inline_block = __ NewBlock();
2922 BranchHint hint =
2923 is_last_feedback_case ? BranchHint::kTrue : BranchHint::kNone;
2924 __ Branch({__ Word32Equal(target, inlined_target), hint},
2925 inline_block, case_blocks[i + 1]);
2926 __ Bind(inline_block);
2927 }
2928
2929 SmallZoneVector<Value, 4> direct_returns(return_count,
2930 decoder->zone_);
2931 if (v8_flags.trace_wasm_inlining) {
2932 PrintF(
2933 "[function %d%s: Speculatively inlining call_indirect #%d, "
2934 "case #%zu, to function %d]\n",
2935 func_index_, mode_ == kRegular ? "" : " (inlined)",
2936 feedback_slot_, i, inlined_index);
2937 }
2938 InlineWasmCall(decoder, inlined_index, imm.sig,
2939 static_cast<uint32_t>(i), false, args,
2940 direct_returns.data());
2941
2942 if (__ current_block() != nullptr) {
2943 // Only add phi inputs and a Goto to {merge} if the current_block is
2944 // not nullptr. If the current_block is nullptr, it means that the
2945 // inlined body unconditionally exits early (likely an unconditional
2946 // trap or throw).
2947 for (size_t ret = 0; ret < direct_returns.size(); ret++) {
2948 case_returns[ret].push_back(direct_returns[ret].op);
2949 }
2950 __ Goto(merge);
2951 }
2952 }
2953
2954 __ Bind(no_inline_block);
2955 if (use_deopt_slowpath) {
2956 // We need this unconditional deopt only for the "instance check",
2957 // as the last "target check" already uses a `DeoptIfNot` node.
2958 Deopt(decoder, frame_state);
2959 } else {
2960 auto [call_target, call_implicit_arg] =
2961 BuildIndirectCallTargetAndImplicitArg(decoder, index_wordptr,
2962 imm);
2963 SmallZoneVector<Value, 4> indirect_returns(return_count,
2964 decoder->zone_);
2965 BuildWasmCall(decoder, imm.sig, call_target, call_implicit_arg, args,
2966 indirect_returns.data(),
2968 for (size_t ret = 0; ret < indirect_returns.size(); ret++) {
2969 case_returns[ret].push_back(indirect_returns[ret].op);
2970 }
2971 __ Goto(merge);
2972 }
2973
2974 __ Bind(merge);
2975 for (size_t i = 0; i < case_returns.size(); i++) {
2976 returns[i].op = __ Phi(base::VectorOf(case_returns[i]),
2977 RepresentationFor(imm.sig->GetReturn(i)));
2978 }
2979
2980 return;
2981 } // should_inline
2982 } // v8_flags.wasm_inlining_call_indirect
2983
2984 // Didn't inline.
2986 imm.table_imm.table->address_type, index.op);
2987 auto [target, implicit_arg] =
2988 BuildIndirectCallTargetAndImplicitArg(decoder, index_wordptr, imm);
2989 BuildWasmCall(decoder, imm.sig, target, implicit_arg, args, returns,
2991 }
2992
2993 void ReturnCallIndirect(FullDecoder* decoder, const Value& index,
2994 const CallIndirectImmediate& imm,
2995 const Value args[]) {
2996 if (v8_flags.wasm_inlining_call_indirect) {
2997 CHECK(v8_flags.wasm_inlining);
2999
3000 if (should_inline(decoder, feedback_slot_,
3001 std::numeric_limits<int>::max())) {
3003 imm.table_imm.table->address_type, index.op);
3004
3005 DCHECK(!shared_);
3006 // Load the instance here even though it's only used below, in the hope
3007 // that load elimination can use it when fetching the target next.
3009
3010 // We are only interested in the target here for comparison against
3011 // the inlined call target below.
3012 // In particular, we don't need a dynamic type or null check: If the
3013 // actual call target (at runtime) is equal to the inlined call target,
3014 // we know already from the static check on the inlinee (see below) that
3015 // the inlined code has the right signature.
3016 constexpr bool kNeedsTypeOrNullCheck = false;
3017 auto [target, implicit_arg] = BuildIndirectCallTargetAndImplicitArg(
3018 decoder, index_wordptr, imm, kNeedsTypeOrNullCheck);
3019
3020 base::Vector<InliningTree*> feedback_cases =
3022 constexpr int kSlowpathCase = 1;
3024 case_blocks;
3025 for (size_t i = 0; i < feedback_cases.size() + kSlowpathCase; i++) {
3026 case_blocks.push_back(__ NewBlock());
3027 }
3028 // Block for the slowpath, i.e., the not-inlined call.
3029 TSBlock* no_inline_block = case_blocks.back();
3030
3031 // Wasm functions are semantically closures over the instance, but
3032 // when we inline a target in the following, we implicitly assume the
3033 // inlinee instance is the same as the caller's instance.
3034 // Directly jump to the non-inlined slowpath if that's violated.
3035 // Note that for `call_ref` this isn't necessary, because the funcref
3036 // equality check already captures both code and instance equality.
3037 constexpr BranchHint kUnlikelyCrossInstanceCall = BranchHint::kTrue;
3038 // Note that the `implicit_arg` can never be a `WasmImportData`,
3039 // since we don't inline imported functions right now.
3040 __ Branch({__ TaggedEqual(implicit_arg, instance),
3041 kUnlikelyCrossInstanceCall},
3042 case_blocks[0], no_inline_block);
3043
3044 for (size_t i = 0; i < feedback_cases.size(); i++) {
3045 __ Bind(case_blocks[i]);
3046 InliningTree* tree = feedback_cases[i];
3047 if (!tree || !tree->is_inlined()) {
3048 // Fall through to the next case.
3049 __ Goto(case_blocks[i + 1]);
3050 continue;
3051 }
3052 uint32_t inlined_index = tree->function_index();
3053 // Ensure that we only inline if the inlinee's signature is compatible
3054 // with the call_indirect. In other words, perform the type check that
3055 // would normally be done dynamically (see above
3056 // `BuildIndirectCallTargetAndImplicitArg`) statically on the inlined
3057 // target. This can fail, e.g., because the mapping of feedback back
3058 // to function indices may produce spurious targets, or because the
3059 // feedback in the JS heap has been corrupted by a vulnerability.
3061 decoder->module_, imm.sig,
3062 decoder->module_->functions[inlined_index].sig)) {
3063 __ Goto(case_blocks[i + 1]);
3064 continue;
3065 }
3066
3067 V<Word32> inlined_target =
3068 __ RelocatableWasmIndirectCallTarget(inlined_index);
3069
3070 TSBlock* inline_block = __ NewBlock();
3071 bool is_last_case = (i == feedback_cases.size() - 1);
3072 BranchHint hint =
3073 is_last_case ? BranchHint::kTrue : BranchHint::kNone;
3074 __ Branch({__ Word32Equal(target, inlined_target), hint},
3075 inline_block, case_blocks[i + 1]);
3076 __ Bind(inline_block);
3077 if (v8_flags.trace_wasm_inlining) {
3078 PrintF(
3079 "[function %d%s: Speculatively inlining return_call_indirect "
3080 "#%d, case #%zu, to function %d]\n",
3081 func_index_, mode_ == kRegular ? "" : " (inlined)",
3082 feedback_slot_, i, inlined_index);
3083 }
3084 InlineWasmCall(decoder, inlined_index, imm.sig,
3085 static_cast<uint32_t>(i), true, args, nullptr);
3086
3087 // An inlined tail call should still terminate execution.
3088 DCHECK_NULL(__ current_block());
3089 }
3090
3091 __ Bind(no_inline_block);
3092 } // should_inline
3093 } // v8_flags.wasm_inlining_call_indirect
3094
3095 // Didn't inline.
3097 imm.table_imm.table->address_type, index.op);
3098 auto [target, implicit_arg] =
3099 BuildIndirectCallTargetAndImplicitArg(decoder, index_wordptr, imm);
3100 BuildWasmMaybeReturnCall(decoder, imm.sig, target, implicit_arg, args,
3102 }
3103
3104 void CallRef(FullDecoder* decoder, const Value& func_ref,
3105 const FunctionSig* sig, const Value args[], Value returns[]) {
3106 // TODO(14108): As the slot needs to be aligned with Liftoff, ideally the
3107 // stack slot index would be provided by the decoder and passed to both
3108 // Liftoff and Turbofan.
3110 // In case of being unreachable, skip it because it tries to access nodes
3111 // which might be non-existent (OpIndex::Invalid()) in unreachable code.
3112 if (__ generating_unreachable_operations()) return;
3113
3114 if (should_inline(decoder, feedback_slot_,
3115 std::numeric_limits<int>::max())) {
3116 DCHECK(!shared_);
3120
3121 size_t return_count = sig->return_count();
3122 base::Vector<InliningTree*> feedback_cases =
3124 std::vector<base::SmallVector<OpIndex, 2>> case_returns(return_count);
3125 // The slow path is the non-inlined generic `call_ref`,
3126 // or a deopt node if that is enabled.
3127 constexpr int kSlowpathCase = 1;
3129 case_blocks;
3130 for (size_t i = 0; i < feedback_cases.size() + kSlowpathCase; i++) {
3131 case_blocks.push_back(__ NewBlock());
3132 }
3133 TSBlock* merge = __ NewBlock();
3134 __ Goto(case_blocks[0]);
3135
3136 bool use_deopt_slowpath = deopts_enabled();
3137 for (size_t i = 0; i < feedback_cases.size(); i++) {
3138 __ Bind(case_blocks[i]);
3139 InliningTree* tree = feedback_cases[i];
3140 if (!tree || !tree->is_inlined()) {
3141 // Fall through to the next case.
3142 __ Goto(case_blocks[i + 1]);
3143 // Do not use the deopt slowpath if we decided to not inline (at
3144 // least) one call target. Otherwise, this could lead to a deopt loop.
3145 use_deopt_slowpath = false;
3146 continue;
3147 }
3148 uint32_t inlined_index = tree->function_index();
3149 DCHECK(!decoder->module_->function_is_shared(inlined_index));
3150 V<Object> inlined_func_ref =
3151 __ LoadFixedArrayElement(func_refs, inlined_index);
3152
3153 bool is_last_feedback_case = (i == feedback_cases.size() - 1);
3154 if (use_deopt_slowpath && is_last_feedback_case) {
3156 ->has_non_inlineable_targets()[feedback_slot_]) {
3157 if (v8_flags.trace_wasm_inlining) {
3158 PrintF(
3159 "[function %d%s: Not emitting deopt slow-path for "
3160 "call_ref #%d as feedback contains non-inlineable "
3161 "targets]\n",
3162 func_index_, mode_ == kRegular ? "" : " (inlined)",
3164 }
3165 use_deopt_slowpath = false;
3166 }
3167 }
3168 bool emit_deopt = use_deopt_slowpath && is_last_feedback_case;
3169 if (emit_deopt) {
3170 V<FrameState> frame_state =
3171 CreateFrameState(decoder, sig, &func_ref, args);
3172 if (frame_state.valid()) {
3173 DeoptIfNot(decoder, __ TaggedEqual(func_ref.op, inlined_func_ref),
3174 frame_state);
3175 } else {
3176 emit_deopt = false;
3177 use_deopt_slowpath = false;
3178 }
3179 }
3180 if (!emit_deopt) {
3181 TSBlock* inline_block = __ NewBlock();
3182 BranchHint hint =
3183 is_last_feedback_case ? BranchHint::kTrue : BranchHint::kNone;
3184 __ Branch({__ TaggedEqual(func_ref.op, inlined_func_ref), hint},
3185 inline_block, case_blocks[i + 1]);
3186 __ Bind(inline_block);
3187 }
3188
3189 SmallZoneVector<Value, 4> direct_returns(return_count, decoder->zone_);
3190 if (v8_flags.trace_wasm_inlining) {
3191 PrintF(
3192 "[function %d%s: Speculatively inlining call_ref #%d, case #%zu, "
3193 "to function %d]\n",
3194 func_index_, mode_ == kRegular ? "" : " (inlined)",
3195 feedback_slot_, i, inlined_index);
3196 }
3197 InlineWasmCall(decoder, inlined_index, sig, static_cast<uint32_t>(i),
3198 false, args, direct_returns.data());
3199
3200 if (__ current_block() != nullptr) {
3201 // Only add phi inputs and a Goto to {merge} if the current_block is
3202 // not nullptr. If the current_block is nullptr, it means that the
3203 // inlined body unconditionally exits early (likely an unconditional
3204 // trap or throw).
3205 for (size_t ret = 0; ret < direct_returns.size(); ret++) {
3206 case_returns[ret].push_back(direct_returns[ret].op);
3207 }
3208 __ Goto(merge);
3209 }
3210 }
3211
3212 if (!use_deopt_slowpath) {
3213 TSBlock* no_inline_block = case_blocks.back();
3214 __ Bind(no_inline_block);
3215 auto [target, implicit_arg] =
3217 func_ref.type);
3218 SmallZoneVector<Value, 4> ref_returns(return_count, decoder->zone_);
3219 BuildWasmCall(decoder, sig, target, implicit_arg, args,
3220 ref_returns.data(), compiler::kWasmIndirectFunction);
3221 for (size_t ret = 0; ret < ref_returns.size(); ret++) {
3222 case_returns[ret].push_back(ref_returns[ret].op);
3223 }
3224 __ Goto(merge);
3225 }
3226
3227 __ Bind(merge);
3228 for (size_t i = 0; i < case_returns.size(); i++) {
3229 returns[i].op = __ Phi(base::VectorOf(case_returns[i]),
3230 RepresentationFor(sig->GetReturn(i)));
3231 }
3232 } else {
3234 func_ref.op, func_ref.type);
3235 BuildWasmCall(decoder, sig, target, implicit_arg, args, returns,
3237 }
3238 }
3239
3240 void ReturnCallRef(FullDecoder* decoder, const Value& func_ref,
3241 const FunctionSig* sig, const Value args[]) {
3243
3244 if (should_inline(decoder, feedback_slot_,
3245 std::numeric_limits<int>::max())) {
3246 DCHECK(!shared_);
3250
3251 base::Vector<InliningTree*> feedback_cases =
3253 constexpr int kSlowpathCase = 1;
3255 case_blocks;
3256
3257 for (size_t i = 0; i < feedback_cases.size() + kSlowpathCase; i++) {
3258 case_blocks.push_back(__ NewBlock());
3259 }
3260 __ Goto(case_blocks[0]);
3261
3262 for (size_t i = 0; i < feedback_cases.size(); i++) {
3263 __ Bind(case_blocks[i]);
3264 InliningTree* tree = feedback_cases[i];
3265 if (!tree || !tree->is_inlined()) {
3266 // Fall through to the next case.
3267 __ Goto(case_blocks[i + 1]);
3268 continue;
3269 }
3270 uint32_t inlined_index = tree->function_index();
3271 DCHECK(!decoder->module_->function_is_shared(inlined_index));
3272 V<Object> inlined_func_ref =
3273 __ LoadFixedArrayElement(func_refs, inlined_index);
3274
3275 TSBlock* inline_block = __ NewBlock();
3276 bool is_last_case = (i == feedback_cases.size() - 1);
3277 BranchHint hint = is_last_case ? BranchHint::kTrue : BranchHint::kNone;
3278 __ Branch({__ TaggedEqual(func_ref.op, inlined_func_ref), hint},
3279 inline_block, case_blocks[i + 1]);
3280 __ Bind(inline_block);
3281 if (v8_flags.trace_wasm_inlining) {
3282 PrintF(
3283 "[function %d%s: Speculatively inlining return_call_ref #%d, "
3284 "case #%zu, to function %d]\n",
3285 func_index_, mode_ == kRegular ? "" : " (inlined)",
3286 feedback_slot_, i, inlined_index);
3287 }
3288 InlineWasmCall(decoder, inlined_index, sig, static_cast<uint32_t>(i),
3289 true, args, nullptr);
3290
3291 // An inlined tail call should still terminate execution.
3292 DCHECK_NULL(__ current_block());
3293 }
3294
3295 TSBlock* no_inline_block = case_blocks.back();
3296 __ Bind(no_inline_block);
3297 }
3298 auto [target, implicit_arg] =
3300 BuildWasmMaybeReturnCall(decoder, sig, target, implicit_arg, args,
3302 }
3303
3304 void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth,
3305 bool pass_null_along_branch, Value* result_on_fallthrough) {
3306 result_on_fallthrough->op = ref_object.op;
3307 IF (UNLIKELY(__ IsNull(ref_object.op, ref_object.type))) {
3308 int drop_values = pass_null_along_branch ? 0 : 1;
3309 BrOrRet(decoder, depth, drop_values);
3310 }
3311 }
3312
3313 void BrOnNonNull(FullDecoder* decoder, const Value& ref_object, Value* result,
3314 uint32_t depth, bool /* drop_null_on_fallthrough */) {
3315 result->op = ref_object.op;
3316 IF_NOT (UNLIKELY(__ IsNull(ref_object.op, ref_object.type))) {
3317 BrOrRet(decoder, depth);
3318 }
3319 }
3320
3321 void SimdOp(FullDecoder* decoder, WasmOpcode opcode, const Value* args,
3322 Value* result) {
3323 switch (opcode) {
3324#define HANDLE_BINARY_OPCODE(kind) \
3325 case kExpr##kind: \
3326 result->op = \
3327 __ Simd128Binop(V<compiler::turboshaft::Simd128>::Cast(args[0].op), \
3328 V<compiler::turboshaft::Simd128>::Cast(args[1].op), \
3329 compiler::turboshaft::Simd128BinopOp::Kind::k##kind); \
3330 break;
3331 FOREACH_SIMD_128_BINARY_MANDATORY_OPCODE(HANDLE_BINARY_OPCODE)
3332#undef HANDLE_BINARY_OPCODE
3333#define HANDLE_F16X8_BIN_OPTIONAL_OPCODE(kind, extern_ref) \
3334 case kExprF16x8##kind: \
3335 if (SupportedOperations::float16()) { \
3336 result->op = __ Simd128Binop( \
3337 V<compiler::turboshaft::Simd128>::Cast(args[0].op), \
3338 V<compiler::turboshaft::Simd128>::Cast(args[1].op), \
3339 compiler::turboshaft::Simd128BinopOp::Kind::kF16x8##kind); \
3340 } else { \
3341 result->op = CallCStackSlotToStackSlot(args[0].op, args[1].op, \
3342 ExternalReference::extern_ref(), \
3343 MemoryRepresentation::Simd128()); \
3344 } \
3345 break;
3346
3347 HANDLE_F16X8_BIN_OPTIONAL_OPCODE(Add, wasm_f16x8_add)
3348 HANDLE_F16X8_BIN_OPTIONAL_OPCODE(Sub, wasm_f16x8_sub)
3349 HANDLE_F16X8_BIN_OPTIONAL_OPCODE(Mul, wasm_f16x8_mul)
3350 HANDLE_F16X8_BIN_OPTIONAL_OPCODE(Div, wasm_f16x8_div)
3351 HANDLE_F16X8_BIN_OPTIONAL_OPCODE(Min, wasm_f16x8_min)
3352 HANDLE_F16X8_BIN_OPTIONAL_OPCODE(Max, wasm_f16x8_max)
3353 HANDLE_F16X8_BIN_OPTIONAL_OPCODE(Pmin, wasm_f16x8_pmin)
3354 HANDLE_F16X8_BIN_OPTIONAL_OPCODE(Pmax, wasm_f16x8_pmax)
3355 HANDLE_F16X8_BIN_OPTIONAL_OPCODE(Eq, wasm_f16x8_eq)
3356 HANDLE_F16X8_BIN_OPTIONAL_OPCODE(Ne, wasm_f16x8_ne)
3357 HANDLE_F16X8_BIN_OPTIONAL_OPCODE(Lt, wasm_f16x8_lt)
3358 HANDLE_F16X8_BIN_OPTIONAL_OPCODE(Le, wasm_f16x8_le)
3359#undef HANDLE_F16X8_BIN_OPCODE
3360
3361#define HANDLE_F16X8_INVERSE_COMPARISON(kind, ts_kind, extern_ref) \
3362 case kExprF16x8##kind: \
3363 if (SupportedOperations::float16()) { \
3364 result->op = __ Simd128Binop( \
3365 V<compiler::turboshaft::Simd128>::Cast(args[1].op), \
3366 V<compiler::turboshaft::Simd128>::Cast(args[0].op), \
3367 compiler::turboshaft::Simd128BinopOp::Kind::kF16x8##ts_kind); \
3368 } else { \
3369 result->op = CallCStackSlotToStackSlot(args[1].op, args[0].op, \
3370 ExternalReference::extern_ref(), \
3371 MemoryRepresentation::Simd128()); \
3372 } \
3373 break;
3374
3375 HANDLE_F16X8_INVERSE_COMPARISON(Gt, Lt, wasm_f16x8_lt)
3376 HANDLE_F16X8_INVERSE_COMPARISON(Ge, Le, wasm_f16x8_le)
3377#undef HANDLE_F16X8_INVERSE_COMPARISON
3378
3379#define HANDLE_INVERSE_COMPARISON(wasm_kind, ts_kind) \
3380 case kExpr##wasm_kind: \
3381 result->op = __ Simd128Binop( \
3382 V<compiler::turboshaft::Simd128>::Cast(args[1].op), \
3383 V<compiler::turboshaft::Simd128>::Cast(args[0].op), \
3384 compiler::turboshaft::Simd128BinopOp::Kind::k##ts_kind); \
3385 break;
3386
3387 HANDLE_INVERSE_COMPARISON(I8x16LtS, I8x16GtS)
3388 HANDLE_INVERSE_COMPARISON(I8x16LtU, I8x16GtU)
3389 HANDLE_INVERSE_COMPARISON(I8x16LeS, I8x16GeS)
3390 HANDLE_INVERSE_COMPARISON(I8x16LeU, I8x16GeU)
3391
3392 HANDLE_INVERSE_COMPARISON(I16x8LtS, I16x8GtS)
3393 HANDLE_INVERSE_COMPARISON(I16x8LtU, I16x8GtU)
3394 HANDLE_INVERSE_COMPARISON(I16x8LeS, I16x8GeS)
3395 HANDLE_INVERSE_COMPARISON(I16x8LeU, I16x8GeU)
3396
3397 HANDLE_INVERSE_COMPARISON(I32x4LtS, I32x4GtS)
3398 HANDLE_INVERSE_COMPARISON(I32x4LtU, I32x4GtU)
3399 HANDLE_INVERSE_COMPARISON(I32x4LeS, I32x4GeS)
3400 HANDLE_INVERSE_COMPARISON(I32x4LeU, I32x4GeU)
3401
3402 HANDLE_INVERSE_COMPARISON(I64x2LtS, I64x2GtS)
3403 HANDLE_INVERSE_COMPARISON(I64x2LeS, I64x2GeS)
3404
3405 HANDLE_INVERSE_COMPARISON(F32x4Gt, F32x4Lt)
3406 HANDLE_INVERSE_COMPARISON(F32x4Ge, F32x4Le)
3407 HANDLE_INVERSE_COMPARISON(F64x2Gt, F64x2Lt)
3408 HANDLE_INVERSE_COMPARISON(F64x2Ge, F64x2Le)
3409
3410#undef HANDLE_INVERSE_COMPARISON
3411
3412#define HANDLE_UNARY_NON_OPTIONAL_OPCODE(kind) \
3413 case kExpr##kind: \
3414 result->op = \
3415 __ Simd128Unary(V<compiler::turboshaft::Simd128>::Cast(args[0].op), \
3416 compiler::turboshaft::Simd128UnaryOp::Kind::k##kind); \
3417 break;
3418 FOREACH_SIMD_128_UNARY_NON_OPTIONAL_OPCODE(
3420#undef HANDLE_UNARY_NON_OPTIONAL_OPCODE
3421
3422#define HANDLE_UNARY_OPTIONAL_OPCODE(kind, feature, external_ref) \
3423 case kExpr##kind: \
3424 if (SupportedOperations::feature()) { \
3425 result->op = __ Simd128Unary( \
3426 V<compiler::turboshaft::Simd128>::Cast(args[0].op), \
3427 compiler::turboshaft::Simd128UnaryOp::Kind::k##kind); \
3428 } else { \
3429 result->op = CallCStackSlotToStackSlot( \
3430 args[0].op, ExternalReference::external_ref(), \
3431 MemoryRepresentation::Simd128()); \
3432 } \
3433 break;
3434 HANDLE_UNARY_OPTIONAL_OPCODE(F16x8Abs, float16, wasm_f16x8_abs)
3435 HANDLE_UNARY_OPTIONAL_OPCODE(F16x8Neg, float16, wasm_f16x8_neg)
3436 HANDLE_UNARY_OPTIONAL_OPCODE(F16x8Sqrt, float16, wasm_f16x8_sqrt)
3437 HANDLE_UNARY_OPTIONAL_OPCODE(F16x8Ceil, float16, wasm_f16x8_ceil)
3438 HANDLE_UNARY_OPTIONAL_OPCODE(F16x8Floor, float16, wasm_f16x8_floor)
3439 HANDLE_UNARY_OPTIONAL_OPCODE(F16x8Trunc, float16, wasm_f16x8_trunc)
3440 HANDLE_UNARY_OPTIONAL_OPCODE(F16x8NearestInt, float16,
3441 wasm_f16x8_nearest_int)
3442 HANDLE_UNARY_OPTIONAL_OPCODE(I16x8SConvertF16x8, float16,
3443 wasm_i16x8_sconvert_f16x8)
3444 HANDLE_UNARY_OPTIONAL_OPCODE(I16x8UConvertF16x8, float16,
3445 wasm_i16x8_uconvert_f16x8)
3446 HANDLE_UNARY_OPTIONAL_OPCODE(F16x8SConvertI16x8, float16,
3447 wasm_f16x8_sconvert_i16x8)
3448 HANDLE_UNARY_OPTIONAL_OPCODE(F16x8UConvertI16x8, float16,
3449 wasm_f16x8_uconvert_i16x8)
3450 HANDLE_UNARY_OPTIONAL_OPCODE(F16x8DemoteF32x4Zero, float16,
3451 wasm_f16x8_demote_f32x4_zero)
3452 HANDLE_UNARY_OPTIONAL_OPCODE(F16x8DemoteF64x2Zero,
3453 float64_to_float16_raw_bits,
3454 wasm_f16x8_demote_f64x2_zero)
3455 HANDLE_UNARY_OPTIONAL_OPCODE(F32x4PromoteLowF16x8, float16,
3456 wasm_f32x4_promote_low_f16x8)
3457 HANDLE_UNARY_OPTIONAL_OPCODE(F32x4Ceil, float32_round_up, wasm_f32x4_ceil)
3458 HANDLE_UNARY_OPTIONAL_OPCODE(F32x4Floor, float32_round_down,
3459 wasm_f32x4_floor)
3460 HANDLE_UNARY_OPTIONAL_OPCODE(F32x4Trunc, float32_round_to_zero,
3461 wasm_f32x4_trunc)
3462 HANDLE_UNARY_OPTIONAL_OPCODE(F32x4NearestInt, float32_round_ties_even,
3463 wasm_f32x4_nearest_int)
3464 HANDLE_UNARY_OPTIONAL_OPCODE(F64x2Ceil, float64_round_up, wasm_f64x2_ceil)
3465 HANDLE_UNARY_OPTIONAL_OPCODE(F64x2Floor, float64_round_down,
3466 wasm_f64x2_floor)
3467 HANDLE_UNARY_OPTIONAL_OPCODE(F64x2Trunc, float64_round_to_zero,
3468 wasm_f64x2_trunc)
3469 HANDLE_UNARY_OPTIONAL_OPCODE(F64x2NearestInt, float64_round_ties_even,
3470 wasm_f64x2_nearest_int)
3471#undef HANDLE_UNARY_OPTIONAL_OPCODE
3472
3473#define HANDLE_SHIFT_OPCODE(kind) \
3474 case kExpr##kind: \
3475 result->op = \
3476 __ Simd128Shift(V<compiler::turboshaft::Simd128>::Cast(args[0].op), \
3477 V<Word32>::Cast(args[1].op), \
3478 compiler::turboshaft::Simd128ShiftOp::Kind::k##kind); \
3479 break;
3480 FOREACH_SIMD_128_SHIFT_OPCODE(HANDLE_SHIFT_OPCODE)
3481#undef HANDLE_SHIFT_OPCODE
3482
3483#define HANDLE_TEST_OPCODE(kind) \
3484 case kExpr##kind: \
3485 result->op = \
3486 __ Simd128Test(V<compiler::turboshaft::Simd128>::Cast(args[0].op), \
3487 compiler::turboshaft::Simd128TestOp::Kind::k##kind); \
3488 break;
3489 FOREACH_SIMD_128_TEST_OPCODE(HANDLE_TEST_OPCODE)
3490#undef HANDLE_TEST_OPCODE
3491
3492#define HANDLE_SPLAT_OPCODE(kind) \
3493 case kExpr##kind##Splat: \
3494 result->op = \
3495 __ Simd128Splat(V<Any>::Cast(args[0].op), \
3496 compiler::turboshaft::Simd128SplatOp::Kind::k##kind); \
3497 break;
3498 FOREACH_SIMD_128_SPLAT_MANDATORY_OPCODE(HANDLE_SPLAT_OPCODE)
3499#undef HANDLE_SPLAT_OPCODE
3500 case kExprF16x8Splat:
3501 if (SupportedOperations::float16()) {
3502 result->op = __ Simd128Splat(
3503 V<Any>::Cast(args[0].op),
3504 compiler::turboshaft::Simd128SplatOp::Kind::kF16x8);
3505 } else {
3506 auto f16 = CallCStackSlotToStackSlot(
3507 args[0].op, ExternalReference::wasm_float32_to_float16(),
3509 result->op = __ Simd128Splat(
3510 V<Any>::Cast(f16),
3511 compiler::turboshaft::Simd128SplatOp::Kind::kI16x8);
3512 }
3513 break;
3514
3515// Ternary mask operators put the mask as first input.
3516#define HANDLE_TERNARY_MASK_OPCODE(kind) \
3517 case kExpr##kind: \
3518 result->op = __ Simd128Ternary( \
3519 V<compiler::turboshaft::Simd128>::Cast(args[2].op), \
3520 V<compiler::turboshaft::Simd128>::Cast(args[0].op), \
3521 V<compiler::turboshaft::Simd128>::Cast(args[1].op), \
3522 compiler::turboshaft::Simd128TernaryOp::Kind::k##kind); \
3523 break;
3524 FOREACH_SIMD_128_TERNARY_MASK_OPCODE(HANDLE_TERNARY_MASK_OPCODE)
3525#undef HANDLE_TERNARY_MASK_OPCODE
3526
3527#define HANDLE_TERNARY_OTHER_OPCODE(kind) \
3528 case kExpr##kind: \
3529 result->op = __ Simd128Ternary( \
3530 V<compiler::turboshaft::Simd128>::Cast(args[0].op), \
3531 V<compiler::turboshaft::Simd128>::Cast(args[1].op), \
3532 V<compiler::turboshaft::Simd128>::Cast(args[2].op), \
3533 compiler::turboshaft::Simd128TernaryOp::Kind::k##kind); \
3534 break;
3535 FOREACH_SIMD_128_TERNARY_OTHER_OPCODE(HANDLE_TERNARY_OTHER_OPCODE)
3536#undef HANDLE_TERNARY_OTHER_OPCODE
3537
3538#define HANDLE_F16X8_TERN_OPCODE(kind, extern_ref) \
3539 case kExpr##kind: \
3540 if (SupportedOperations::float16()) { \
3541 result->op = __ Simd128Ternary( \
3542 V<compiler::turboshaft::Simd128>::Cast(args[0].op), \
3543 V<compiler::turboshaft::Simd128>::Cast(args[1].op), \
3544 V<compiler::turboshaft::Simd128>::Cast(args[2].op), \
3545 compiler::turboshaft::Simd128TernaryOp::Kind::k##kind); \
3546 } else { \
3547 result->op = CallCStackSlotToStackSlot( \
3548 ExternalReference::extern_ref(), MemoryRepresentation::Simd128(), \
3549 {{args[0].op, MemoryRepresentation::Simd128()}, \
3550 {args[1].op, MemoryRepresentation::Simd128()}, \
3551 {args[2].op, MemoryRepresentation::Simd128()}}); \
3552 } \
3553 break;
3554 HANDLE_F16X8_TERN_OPCODE(F16x8Qfma, wasm_f16x8_qfma)
3555 HANDLE_F16X8_TERN_OPCODE(F16x8Qfms, wasm_f16x8_qfms)
3556#undef HANDLE_F16X8_TERN_OPCODE
3557 default:
3558 UNREACHABLE();
3559 }
3560 }
3561
3562 void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
3563 const SimdLaneImmediate& imm,
3565 using compiler::turboshaft::Simd128ExtractLaneOp;
3566 using compiler::turboshaft::Simd128ReplaceLaneOp;
3568 V<Simd128> input_val = V<Simd128>::Cast(inputs[0].op);
3569 switch (opcode) {
3570 case kExprI8x16ExtractLaneS:
3571 result->op = __ Simd128ExtractLane(
3572 input_val, Simd128ExtractLaneOp::Kind::kI8x16S, imm.lane);
3573 break;
3574 case kExprI8x16ExtractLaneU:
3575 result->op = __ Simd128ExtractLane(
3576 input_val, Simd128ExtractLaneOp::Kind::kI8x16U, imm.lane);
3577 break;
3578 case kExprI16x8ExtractLaneS:
3579 result->op = __ Simd128ExtractLane(
3580 input_val, Simd128ExtractLaneOp::Kind::kI16x8S, imm.lane);
3581 break;
3582 case kExprI16x8ExtractLaneU:
3583 result->op = __ Simd128ExtractLane(
3584 input_val, Simd128ExtractLaneOp::Kind::kI16x8U, imm.lane);
3585 break;
3586 case kExprI32x4ExtractLane:
3587 result->op = __ Simd128ExtractLane(
3588 input_val, Simd128ExtractLaneOp::Kind::kI32x4, imm.lane);
3589 break;
3590 case kExprI64x2ExtractLane:
3591 result->op = __ Simd128ExtractLane(
3592 input_val, Simd128ExtractLaneOp::Kind::kI64x2, imm.lane);
3593 break;
3594 case kExprF16x8ExtractLane:
3595 if (SupportedOperations::float16()) {
3596 result->op = __ Simd128ExtractLane(
3597 input_val, Simd128ExtractLaneOp::Kind::kF16x8, imm.lane);
3598 } else {
3599 auto f16 = __ Simd128ExtractLane(
3600 input_val, Simd128ExtractLaneOp::Kind::kI16x8S, imm.lane);
3602 f16, ExternalReference::wasm_float16_to_float32(),
3604 }
3605 break;
3606 case kExprF32x4ExtractLane:
3607 result->op = __ Simd128ExtractLane(
3608 input_val, Simd128ExtractLaneOp::Kind::kF32x4, imm.lane);
3609 break;
3610 case kExprF64x2ExtractLane:
3611 result->op = __ Simd128ExtractLane(
3612 input_val, Simd128ExtractLaneOp::Kind::kF64x2, imm.lane);
3613 break;
3614 case kExprI8x16ReplaceLane:
3615 result->op =
3616 __ Simd128ReplaceLane(input_val, V<Any>::Cast(inputs[1].op),
3617 Simd128ReplaceLaneOp::Kind::kI8x16, imm.lane);
3618 break;
3619 case kExprI16x8ReplaceLane:
3620 result->op =
3621 __ Simd128ReplaceLane(input_val, V<Simd128>::Cast(inputs[1].op),
3622 Simd128ReplaceLaneOp::Kind::kI16x8, imm.lane);
3623 break;
3624 case kExprI32x4ReplaceLane:
3625 result->op =
3626 __ Simd128ReplaceLane(input_val, V<Any>::Cast(inputs[1].op),
3627 Simd128ReplaceLaneOp::Kind::kI32x4, imm.lane);
3628 break;
3629 case kExprI64x2ReplaceLane:
3630 result->op =
3631 __ Simd128ReplaceLane(input_val, V<Any>::Cast(inputs[1].op),
3632 Simd128ReplaceLaneOp::Kind::kI64x2, imm.lane);
3633 break;
3634 case kExprF16x8ReplaceLane:
3635 if (SupportedOperations::float16()) {
3636 result->op = __ Simd128ReplaceLane(
3637 input_val, V<Any>::Cast(inputs[1].op),
3638 Simd128ReplaceLaneOp::Kind::kF16x8, imm.lane);
3639 } else {
3640 auto f16 = CallCStackSlotToStackSlot(
3641 inputs[1].op, ExternalReference::wasm_float32_to_float16(),
3643 result->op = __ Simd128ReplaceLane(input_val, V<Any>::Cast(f16),
3644 Simd128ReplaceLaneOp::Kind::kI16x8,
3645 imm.lane);
3646 }
3647 break;
3648 case kExprF32x4ReplaceLane:
3649 result->op =
3650 __ Simd128ReplaceLane(input_val, V<Any>::Cast(inputs[1].op),
3651 Simd128ReplaceLaneOp::Kind::kF32x4, imm.lane);
3652 break;
3653 case kExprF64x2ReplaceLane:
3654 result->op =
3655 __ Simd128ReplaceLane(input_val, V<Any>::Cast(inputs[1].op),
3656 Simd128ReplaceLaneOp::Kind::kF64x2, imm.lane);
3657 break;
3658 default:
3659 UNREACHABLE();
3660 }
3661 }
3662
3664 const Value& input0, const Value& input1,
3665 Value* result) {
3666 result->op = __ Simd128Shuffle(
3669 compiler::turboshaft::Simd128ShuffleOp::Kind::kI8x16, imm.value);
3670 }
3671
3672 void Try(FullDecoder* decoder, Control* block) {
3673 block->false_or_loop_or_catch_block = NewBlockWithPhis(decoder, nullptr);
3674 block->merge_block = NewBlockWithPhis(decoder, block->br_merge());
3675 }
3676
3677 void Throw(FullDecoder* decoder, const TagIndexImmediate& imm,
3678 const Value arg_values[]) {
3679 size_t count = imm.tag->sig->parameter_count();
3680 SmallZoneVector<OpIndex, 16> values(count, decoder->zone_);
3681 for (size_t index = 0; index < count; index++) {
3682 values[index] = arg_values[index].op;
3683 }
3684
3685 uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(imm.tag);
3686
3688 BuiltinCallDescriptor::WasmAllocateFixedArray>(
3689 decoder, {__ IntPtrConstant(encoded_size)});
3690 uint32_t index = 0;
3691 const wasm::WasmTagSig* sig = imm.tag->sig;
3692
3693 // Encode the exception values in {values_array}.
3694 for (size_t i = 0; i < count; i++) {
3695 OpIndex value = values[i];
3696 switch (sig->GetParam(i).kind()) {
3697 case kF32:
3698 value = __ BitcastFloat32ToWord32(value);
3699 [[fallthrough]];
3700 case kI32:
3701 BuildEncodeException32BitValue(values_array, index, value);
3702 // We need 2 Smis to encode a 32-bit value.
3703 index += 2;
3704 break;
3705 case kF64:
3706 value = __ BitcastFloat64ToWord64(value);
3707 [[fallthrough]];
3708 case kI64: {
3709 OpIndex upper_half =
3710 __ TruncateWord64ToWord32(__ Word64ShiftRightLogical(value, 32));
3711 BuildEncodeException32BitValue(values_array, index, upper_half);
3712 index += 2;
3713 OpIndex lower_half = __ TruncateWord64ToWord32(value);
3714 BuildEncodeException32BitValue(values_array, index, lower_half);
3715 index += 2;
3716 break;
3717 }
3718 case wasm::kRef:
3719 case wasm::kRefNull:
3720 __ StoreFixedArrayElement(values_array, index, value,
3722 index++;
3723 break;
3724 case kS128: {
3726 V<Simd128> value_s128 = V<Simd128>::Cast(value);
3727 using Kind = compiler::turboshaft::Simd128ExtractLaneOp::Kind;
3728 BuildEncodeException32BitValue(values_array, index,
3729 V<Word32>::Cast(__ Simd128ExtractLane(
3730 value_s128, Kind::kI32x4, 0)));
3731 index += 2;
3732 BuildEncodeException32BitValue(values_array, index,
3733 V<Word32>::Cast(__ Simd128ExtractLane(
3734 value_s128, Kind::kI32x4, 1)));
3735 index += 2;
3736 BuildEncodeException32BitValue(values_array, index,
3737 V<Word32>::Cast(__ Simd128ExtractLane(
3738 value_s128, Kind::kI32x4, 2)));
3739 index += 2;
3740 BuildEncodeException32BitValue(values_array, index,
3741 V<Word32>::Cast(__ Simd128ExtractLane(
3742 value_s128, Kind::kI32x4, 3)));
3743 index += 2;
3744 break;
3745 }
3746 case kI8:
3747 case kI16:
3748 case kF16:
3749 case kVoid:
3750 case kTop:
3751 case kBottom:
3752 UNREACHABLE();
3753 }
3754 }
3755
3756 // TODO(14616): Support shared tags.
3757 V<FixedArray> instance_tags =
3760 auto tag = V<WasmTagObject>::Cast(
3761 __ LoadFixedArrayElement(instance_tags, imm.index));
3762
3764 decoder, {tag, values_array}, CheckForException::kCatchInThisFrame);
3765 __ Unreachable();
3766 }
3767
3768 void Rethrow(FullDecoder* decoder, Control* block) {
3770 decoder, {block->exception}, CheckForException::kCatchInThisFrame);
3771 __ Unreachable();
3772 }
3773
3775 Control* block, base::Vector<Value> values) {
3776 if (deopts_enabled()) {
3777 if (v8_flags.trace_wasm_inlining) {
3778 PrintF(
3779 "[function %d%s: Disabling deoptimizations for speculative "
3780 "inlining due to legacy exception handling usage]\n",
3781 func_index_, mode_ == kRegular ? "" : " (inlined)");
3782 }
3784 }
3785
3786 BindBlockAndGeneratePhis(decoder, block->false_or_loop_or_catch_block,
3787 nullptr, &block->exception);
3791 decoder, native_context,
3792 {block->exception, LOAD_ROOT(wasm_exception_tag_symbol)}));
3793 // TODO(14616): Support shared tags.
3794 V<FixedArray> instance_tags =
3797 auto expected_tag = V<WasmTagObject>::Cast(
3798 __ LoadFixedArrayElement(instance_tags, imm.index));
3799 TSBlock* if_no_catch = NewBlockWithPhis(decoder, nullptr);
3800 SetupControlFlowEdge(decoder, if_no_catch);
3801
3802 // If the tags don't match we continue with the next tag by setting the
3803 // no-catch environment as the new {block->false_or_loop_or_catch_block}
3804 // here.
3805 block->false_or_loop_or_catch_block = if_no_catch;
3806
3807 if (imm.tag->sig->parameter_count() == 1 &&
3808 imm.tag->sig->GetParam(0).is_reference_to(HeapType::kExtern)) {
3809 // Check for the special case where the tag is WebAssembly.JSTag and the
3810 // exception is not a WebAssembly.Exception. In this case the exception is
3811 // caught and pushed on the operand stack.
3812 // Only perform this check if the tag signature is the same as
3813 // the JSTag signature, i.e. a single externref or (ref extern), otherwise
3814 // we know statically that it cannot be the JSTag.
3815 V<Word32> caught_tag_undefined =
3816 __ TaggedEqual(caught_tag, LOAD_ROOT(UndefinedValue));
3817 Label<Object> if_catch(&asm_);
3818 Label<> no_catch_merge(&asm_);
3819
3820 IF (UNLIKELY(caught_tag_undefined)) {
3821 V<Object> tag_object = __ Load(
3824 NativeContext::OffsetOfElementAt(Context::WASM_JS_TAG_INDEX));
3825 V<Object> js_tag = __ Load(tag_object, LoadOp::Kind::TaggedBase(),
3827 WasmTagObject::kTagOffset);
3828 GOTO_IF(__ TaggedEqual(expected_tag, js_tag), if_catch,
3829 block->exception);
3830 GOTO(no_catch_merge);
3831 } ELSE {
3832 IF (__ TaggedEqual(caught_tag, expected_tag)) {
3833 UnpackWasmException(decoder, block->exception, values);
3834 GOTO(if_catch, values[0].op);
3835 }
3836 GOTO(no_catch_merge);
3837 }
3838
3839 BIND(no_catch_merge);
3840 __ Goto(if_no_catch);
3841
3842 BIND(if_catch, caught_exception);
3843 // The first unpacked value is the exception itself in the case of a JS
3844 // exception.
3845 values[0].op = caught_exception;
3846 } else {
3847 TSBlock* if_catch = __ NewBlock();
3848 __ Branch(ConditionWithHint(__ TaggedEqual(caught_tag, expected_tag)),
3849 if_catch, if_no_catch);
3850 __ Bind(if_catch);
3851 UnpackWasmException(decoder, block->exception, values);
3852 }
3853 }
3854
3855 void Delegate(FullDecoder* decoder, uint32_t depth, Control* block) {
3856 BindBlockAndGeneratePhis(decoder, block->false_or_loop_or_catch_block,
3857 nullptr, &block->exception);
3858 if (depth == decoder->control_depth() - 1) {
3859 if (mode_ == kInlinedWithCatch) {
3860 if (block->exception.valid()) {
3861 return_phis_->AddIncomingException(block->exception);
3862 }
3863 __ Goto(return_catch_block_);
3864 } else {
3865 // We just throw to the caller, no need to handle the exception in this
3866 // frame.
3868 decoder, {block->exception});
3869 __ Unreachable();
3870 }
3871 } else {
3872 DCHECK(decoder->control_at(depth)->is_try());
3873 TSBlock* target_catch =
3875 SetupControlFlowEdge(decoder, target_catch, 0, block->exception);
3876 __ Goto(target_catch);
3877 }
3878 }
3879
3880 void CatchAll(FullDecoder* decoder, Control* block) {
3881 DCHECK(block->is_try_catchall() || block->is_try_catch());
3882 DCHECK_EQ(decoder->control_at(0), block);
3883
3884 if (deopts_enabled()) {
3885 if (v8_flags.trace_wasm_inlining) {
3886 // TODO(42204618): Would it be worthwhile to add support for this?
3887 // The difficulty is the handling of the exception which is handled as a
3888 // value on the value stack in Liftoff but handled very differently in
3889 // Turboshaft (and it would need to be passed on in the FrameState).
3890 PrintF(
3891 "[function %d%s: Disabling deoptimizations for speculative "
3892 "inlining due to legacy exception handling usage]\n",
3893 func_index_, mode_ == kRegular ? "" : " (inlined)");
3894 }
3896 }
3897
3898 BindBlockAndGeneratePhis(decoder, block->false_or_loop_or_catch_block,
3899 nullptr, &block->exception);
3900 }
3901
3902 void TryTable(FullDecoder* decoder, Control* block) { Try(decoder, block); }
3903
3904 void CatchCase(FullDecoder* decoder, Control* block,
3905 const CatchCase& catch_case, base::Vector<Value> values) {
3906 // If this is the first catch case, {block->false_or_loop_or_catch_block} is
3907 // the block that was created on block entry, and is where all throwing
3908 // instructions in the try-table jump to if they throw.
3909 // Otherwise, {block->false_or_loop_or_catch_block} has been overwritten by
3910 // the previous handler, and is where we jump to if we did not catch the
3911 // exception yet.
3912 BindBlockAndGeneratePhis(decoder, block->false_or_loop_or_catch_block,
3913 nullptr, &block->exception);
3914 if (catch_case.kind == kCatchAll || catch_case.kind == kCatchAllRef) {
3915 if (catch_case.kind == kCatchAllRef) {
3916 DCHECK_EQ(values.size(), 1);
3917 values.last().op = block->exception;
3918 }
3919 BrOrRet(decoder, catch_case.br_imm.depth);
3920 return;
3921 }
3925 decoder, native_context,
3926 {block->exception, LOAD_ROOT(wasm_exception_tag_symbol)}));
3927 // TODO(14616): Support shared tags.
3928 V<FixedArray> instance_tags =
3931 auto expected_tag = V<WasmTagObject>::Cast(__ LoadFixedArrayElement(
3932 instance_tags, catch_case.maybe_tag.tag_imm.index));
3933 TSBlock* if_no_catch = NewBlockWithPhis(decoder, nullptr);
3934 SetupControlFlowEdge(decoder, if_no_catch);
3935
3936 // If the tags don't match we continue with the next tag by setting the
3937 // no-catch environment as the new {block->false_or_loop_or_catch_block}
3938 // here.
3939 block->false_or_loop_or_catch_block = if_no_catch;
3940
3941 if (catch_case.maybe_tag.tag_imm.tag->sig->parameter_count() == 1 &&
3942 catch_case.maybe_tag.tag_imm.tag->sig->GetParam(0) == kWasmExternRef) {
3943 // Check for the special case where the tag is WebAssembly.JSTag and the
3944 // exception is not a WebAssembly.Exception. In this case the exception is
3945 // caught and pushed on the operand stack.
3946 // Only perform this check if the tag signature is the same as
3947 // the JSTag signature, i.e. a single externref, otherwise
3948 // we know statically that it cannot be the JSTag.
3949 V<Word32> caught_tag_undefined =
3950 __ TaggedEqual(caught_tag, LOAD_ROOT(UndefinedValue));
3951 Label<Object> if_catch(&asm_);
3952 Label<> no_catch_merge(&asm_);
3953
3954 IF (UNLIKELY(caught_tag_undefined)) {
3955 V<Object> tag_object = __ Load(
3958 NativeContext::OffsetOfElementAt(Context::WASM_JS_TAG_INDEX));
3959 V<Object> js_tag = __ Load(tag_object, LoadOp::Kind::TaggedBase(),
3961 WasmTagObject::kTagOffset);
3962 GOTO_IF(__ TaggedEqual(expected_tag, js_tag), if_catch,
3963 block->exception);
3964 GOTO(no_catch_merge);
3965 } ELSE {
3966 IF (__ TaggedEqual(caught_tag, expected_tag)) {
3967 if (catch_case.kind == kCatchRef) {
3968 UnpackWasmException(decoder, block->exception,
3969 values.SubVector(0, values.size() - 1));
3970 values.last().op = block->exception;
3971 } else {
3972 UnpackWasmException(decoder, block->exception, values);
3973 }
3974 GOTO(if_catch, values[0].op);
3975 }
3976 GOTO(no_catch_merge);
3977 }
3978
3979 BIND(no_catch_merge);
3980 __ Goto(if_no_catch);
3981
3982 BIND(if_catch, caught_exception);
3983 // The first unpacked value is the exception itself in the case of a JS
3984 // exception.
3985 values[0].op = caught_exception;
3986 } else {
3987 TSBlock* if_catch = __ NewBlock();
3988 __ Branch(ConditionWithHint(__ TaggedEqual(caught_tag, expected_tag)),
3989 if_catch, if_no_catch);
3990 __ Bind(if_catch);
3991 if (catch_case.kind == kCatchRef) {
3992 UnpackWasmException(decoder, block->exception,
3993 values.SubVector(0, values.size() - 1));
3994 values.last().op = block->exception;
3995 } else {
3996 UnpackWasmException(decoder, block->exception, values);
3997 }
3998 }
3999
4000 BrOrRet(decoder, catch_case.br_imm.depth);
4001
4002 bool is_last = &catch_case == &block->catch_cases.last();
4003 if (is_last && !decoder->HasCatchAll(block)) {
4004 BindBlockAndGeneratePhis(decoder, block->false_or_loop_or_catch_block,
4005 nullptr, &block->exception);
4006 ThrowRef(decoder, block->exception);
4007 }
4008 }
4009
4010 void ThrowRef(FullDecoder* decoder, Value* value) {
4011 ThrowRef(decoder, value->op);
4012 }
4013
4015 OpIndex index, OpIndex num_waiters_to_wake, Value* result) {
4016 V<WordPtr> converted_index;
4017 compiler::BoundsCheckResult bounds_check_result;
4018 std::tie(converted_index, bounds_check_result) = BoundsCheckMem(
4019 imm.memory, MemoryRepresentation::Int32(), index, imm.offset,
4022
4023 OpIndex effective_offset = __ WordPtrAdd(converted_index, imm.offset);
4024 OpIndex addr = __ WordPtrAdd(MemStart(imm.mem_index), effective_offset);
4025
4028 result->op = CallC(&sig, ExternalReference::wasm_atomic_notify(),
4029 {addr, num_waiters_to_wake});
4030 }
4031
4032 void AtomicWait(FullDecoder* decoder, WasmOpcode opcode,
4033 const MemoryAccessImmediate& imm, OpIndex index,
4034 OpIndex expected, V<Word64> timeout, Value* result) {
4035 constexpr StubCallMode kStubMode = StubCallMode::kCallWasmRuntimeStub;
4036 V<WordPtr> converted_index;
4037 compiler::BoundsCheckResult bounds_check_result;
4038 std::tie(converted_index, bounds_check_result) = BoundsCheckMem(
4039 imm.memory,
4040 opcode == kExprI32AtomicWait ? MemoryRepresentation::Int32()
4044
4045 OpIndex effective_offset = __ WordPtrAdd(converted_index, imm.offset);
4046 V<BigInt> bigint_timeout = BuildChangeInt64ToBigInt(timeout, kStubMode);
4047
4048 if (opcode == kExprI32AtomicWait) {
4049 result->op =
4051 decoder, {__ Word32Constant(imm.memory->index), effective_offset,
4052 expected, bigint_timeout});
4053 return;
4054 }
4055 DCHECK_EQ(opcode, kExprI64AtomicWait);
4056 V<BigInt> bigint_expected = BuildChangeInt64ToBigInt(expected, kStubMode);
4057 result->op =
4059 decoder, {__ Word32Constant(imm.memory->index), effective_offset,
4060 bigint_expected, bigint_timeout});
4061 }
4062
4063 void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, const Value args[],
4064 const size_t argc, const MemoryAccessImmediate& imm,
4065 Value* result) {
4066 if (opcode == WasmOpcode::kExprAtomicNotify) {
4067 return AtomicNotify(decoder, imm, args[0].op, args[1].op, result);
4068 }
4069 if (opcode == WasmOpcode::kExprI32AtomicWait ||
4070 opcode == WasmOpcode::kExprI64AtomicWait) {
4071 return AtomicWait(decoder, opcode, imm, args[0].op, args[1].op,
4072 args[2].op, result);
4073 }
4075 enum OpType { kBinop, kLoad, kStore };
4076 struct AtomicOpInfo {
4077 OpType op_type;
4078 // Initialize with a default value, to allow constexpr constructors.
4079 Binop bin_op = Binop::kAdd;
4080 RegisterRepresentation in_out_rep;
4081 MemoryRepresentation memory_rep;
4082
4083 constexpr AtomicOpInfo(Binop bin_op, RegisterRepresentation in_out_rep,
4084 MemoryRepresentation memory_rep)
4085 : op_type(kBinop),
4086 bin_op(bin_op),
4087 in_out_rep(in_out_rep),
4088 memory_rep(memory_rep) {}
4089
4090 constexpr AtomicOpInfo(OpType op_type, RegisterRepresentation in_out_rep,
4091 MemoryRepresentation memory_rep)
4092 : op_type(op_type), in_out_rep(in_out_rep), memory_rep(memory_rep) {}
4093
4094 static constexpr AtomicOpInfo Get(wasm::WasmOpcode opcode) {
4095 switch (opcode) {
4096#define CASE_BINOP(OPCODE, BINOP, RESULT, INPUT) \
4097 case WasmOpcode::kExpr##OPCODE: \
4098 return AtomicOpInfo(Binop::k##BINOP, RegisterRepresentation::RESULT(), \
4099 MemoryRepresentation::INPUT());
4100#define RMW_OPERATION(V) \
4101 V(I32AtomicAdd, Add, Word32, Uint32) \
4102 V(I32AtomicAdd8U, Add, Word32, Uint8) \
4103 V(I32AtomicAdd16U, Add, Word32, Uint16) \
4104 V(I32AtomicSub, Sub, Word32, Uint32) \
4105 V(I32AtomicSub8U, Sub, Word32, Uint8) \
4106 V(I32AtomicSub16U, Sub, Word32, Uint16) \
4107 V(I32AtomicAnd, And, Word32, Uint32) \
4108 V(I32AtomicAnd8U, And, Word32, Uint8) \
4109 V(I32AtomicAnd16U, And, Word32, Uint16) \
4110 V(I32AtomicOr, Or, Word32, Uint32) \
4111 V(I32AtomicOr8U, Or, Word32, Uint8) \
4112 V(I32AtomicOr16U, Or, Word32, Uint16) \
4113 V(I32AtomicXor, Xor, Word32, Uint32) \
4114 V(I32AtomicXor8U, Xor, Word32, Uint8) \
4115 V(I32AtomicXor16U, Xor, Word32, Uint16) \
4116 V(I32AtomicExchange, Exchange, Word32, Uint32) \
4117 V(I32AtomicExchange8U, Exchange, Word32, Uint8) \
4118 V(I32AtomicExchange16U, Exchange, Word32, Uint16) \
4119 V(I32AtomicCompareExchange, CompareExchange, Word32, Uint32) \
4120 V(I32AtomicCompareExchange8U, CompareExchange, Word32, Uint8) \
4121 V(I32AtomicCompareExchange16U, CompareExchange, Word32, Uint16) \
4122 V(I64AtomicAdd, Add, Word64, Uint64) \
4123 V(I64AtomicAdd8U, Add, Word64, Uint8) \
4124 V(I64AtomicAdd16U, Add, Word64, Uint16) \
4125 V(I64AtomicAdd32U, Add, Word64, Uint32) \
4126 V(I64AtomicSub, Sub, Word64, Uint64) \
4127 V(I64AtomicSub8U, Sub, Word64, Uint8) \
4128 V(I64AtomicSub16U, Sub, Word64, Uint16) \
4129 V(I64AtomicSub32U, Sub, Word64, Uint32) \
4130 V(I64AtomicAnd, And, Word64, Uint64) \
4131 V(I64AtomicAnd8U, And, Word64, Uint8) \
4132 V(I64AtomicAnd16U, And, Word64, Uint16) \
4133 V(I64AtomicAnd32U, And, Word64, Uint32) \
4134 V(I64AtomicOr, Or, Word64, Uint64) \
4135 V(I64AtomicOr8U, Or, Word64, Uint8) \
4136 V(I64AtomicOr16U, Or, Word64, Uint16) \
4137 V(I64AtomicOr32U, Or, Word64, Uint32) \
4138 V(I64AtomicXor, Xor, Word64, Uint64) \
4139 V(I64AtomicXor8U, Xor, Word64, Uint8) \
4140 V(I64AtomicXor16U, Xor, Word64, Uint16) \
4141 V(I64AtomicXor32U, Xor, Word64, Uint32) \
4142 V(I64AtomicExchange, Exchange, Word64, Uint64) \
4143 V(I64AtomicExchange8U, Exchange, Word64, Uint8) \
4144 V(I64AtomicExchange16U, Exchange, Word64, Uint16) \
4145 V(I64AtomicExchange32U, Exchange, Word64, Uint32) \
4146 V(I64AtomicCompareExchange, CompareExchange, Word64, Uint64) \
4147 V(I64AtomicCompareExchange8U, CompareExchange, Word64, Uint8) \
4148 V(I64AtomicCompareExchange16U, CompareExchange, Word64, Uint16) \
4149 V(I64AtomicCompareExchange32U, CompareExchange, Word64, Uint32)
4150
4152#undef RMW_OPERATION
4153#undef CASE
4154#define CASE_LOAD(OPCODE, RESULT, INPUT) \
4155 case WasmOpcode::kExpr##OPCODE: \
4156 return AtomicOpInfo(kLoad, RegisterRepresentation::RESULT(), \
4157 MemoryRepresentation::INPUT());
4158#define LOAD_OPERATION(V) \
4159 V(I32AtomicLoad, Word32, Uint32) \
4160 V(I32AtomicLoad16U, Word32, Uint16) \
4161 V(I32AtomicLoad8U, Word32, Uint8) \
4162 V(I64AtomicLoad, Word64, Uint64) \
4163 V(I64AtomicLoad32U, Word64, Uint32) \
4164 V(I64AtomicLoad16U, Word64, Uint16) \
4165 V(I64AtomicLoad8U, Word64, Uint8)
4167#undef LOAD_OPERATION
4168#undef CASE_LOAD
4169#define CASE_STORE(OPCODE, INPUT, OUTPUT) \
4170 case WasmOpcode::kExpr##OPCODE: \
4171 return AtomicOpInfo(kStore, RegisterRepresentation::INPUT(), \
4172 MemoryRepresentation::OUTPUT());
4173#define STORE_OPERATION(V) \
4174 V(I32AtomicStore, Word32, Uint32) \
4175 V(I32AtomicStore16U, Word32, Uint16) \
4176 V(I32AtomicStore8U, Word32, Uint8) \
4177 V(I64AtomicStore, Word64, Uint64) \
4178 V(I64AtomicStore32U, Word64, Uint32) \
4179 V(I64AtomicStore16U, Word64, Uint16) \
4180 V(I64AtomicStore8U, Word64, Uint8)
4182#undef STORE_OPERATION_OPERATION
4183#undef CASE_STORE
4184 default:
4185 UNREACHABLE();
4186 }
4187 }
4188 };
4189
4190 AtomicOpInfo info = AtomicOpInfo::Get(opcode);
4192 compiler::BoundsCheckResult bounds_check_result;
4193 std::tie(index, bounds_check_result) =
4194 BoundsCheckMem(imm.memory, info.memory_rep, args[0].op, imm.offset,
4197 // MemoryAccessKind::kUnaligned is impossible due to explicit aligment
4198 // check.
4199 MemoryAccessKind access_kind =
4200 bounds_check_result == compiler::BoundsCheckResult::kTrapHandler
4203
4204 if (info.op_type == kBinop) {
4205 if (info.bin_op == Binop::kCompareExchange) {
4206 result->op = __ AtomicCompareExchange(
4207 MemBuffer(imm.memory->index, imm.offset), index, args[1].op,
4208 args[2].op, info.in_out_rep, info.memory_rep, access_kind);
4209 return;
4210 }
4211 result->op = __ AtomicRMW(MemBuffer(imm.memory->index, imm.offset), index,
4212 args[1].op, info.bin_op, info.in_out_rep,
4213 info.memory_rep, access_kind);
4214 return;
4215 }
4216 if (info.op_type == kStore) {
4217 OpIndex value = args[1].op;
4218 if (info.in_out_rep == RegisterRepresentation::Word64() &&
4219 info.memory_rep != MemoryRepresentation::Uint64()) {
4220 value = __ TruncateWord64ToWord32(value);
4221 }
4222#ifdef V8_TARGET_BIG_ENDIAN
4223 // Reverse the value bytes before storing.
4224 DCHECK(info.in_out_rep == RegisterRepresentation::Word32() ||
4225 info.in_out_rep == RegisterRepresentation::Word64());
4226 wasm::ValueType wasm_type =
4227 info.in_out_rep == RegisterRepresentation::Word32() ? wasm::kWasmI32
4230 value, info.memory_rep.ToMachineType().representation(), wasm_type);
4231#endif
4232 __ Store(MemBuffer(imm.memory->index, imm.offset), index, value,
4236 info.memory_rep, compiler::kNoWriteBarrier);
4237 return;
4238 }
4239 DCHECK_EQ(info.op_type, kLoad);
4240 RegisterRepresentation loaded_value_rep = info.in_out_rep;
4241#if V8_TARGET_BIG_ENDIAN
4242 // Do not sign-extend / zero-extend the value to 64 bits as the bytes need
4243 // to be reversed first to keep little-endian load / store semantics. Still
4244 // extend for 1 byte loads as it doesn't require reversing any bytes.
4245 bool needs_zero_extension_64 = false;
4246 if (info.in_out_rep == RegisterRepresentation::Word64() &&
4247 info.memory_rep.SizeInBytes() < 8 &&
4248 info.memory_rep.SizeInBytes() != 1) {
4249 needs_zero_extension_64 = true;
4250 loaded_value_rep = RegisterRepresentation::Word32();
4251 }
4252#endif
4253 result->op =
4254 __ Load(MemBuffer(imm.memory->index, imm.offset), index,
4258 info.memory_rep, loaded_value_rep);
4259
4260#ifdef V8_TARGET_BIG_ENDIAN
4261 // Reverse the value bytes after load.
4262 DCHECK(info.in_out_rep == RegisterRepresentation::Word32() ||
4263 info.in_out_rep == RegisterRepresentation::Word64());
4264 wasm::ValueType wasm_type =
4265 info.in_out_rep == RegisterRepresentation::Word32() ? wasm::kWasmI32
4268 result->op, info.memory_rep.ToMachineType(), wasm_type);
4269
4270 if (needs_zero_extension_64) {
4271 result->op = __ ChangeUint32ToUint64(result->op);
4272 }
4273#endif
4274 }
4275
4276 void AtomicFence(FullDecoder* decoder) {
4277 __ MemoryBarrier(AtomicMemoryOrder::kSeqCst);
4278 }
4279
4280 void MemoryInit(FullDecoder* decoder, const MemoryInitImmediate& imm,
4281 const Value& dst, const Value& src, const Value& size) {
4283 imm.memory.memory->address_type, dst.op);
4284 DCHECK_EQ(size.type, kWasmI32);
4289 // TODO(14616): Fix sharedness.
4291 CallC(&sig, ExternalReference::wasm_memory_init(),
4292 {__ BitcastHeapObjectToWordPtr(trusted_instance_data(false)),
4293 __ Word32Constant(imm.memory.index), dst_uintptr, src.op,
4294 __ Word32Constant(imm.data_segment.index), size.op});
4295 __ TrapIfNot(result, TrapId::kTrapMemOutOfBounds);
4296 }
4297
4298 void MemoryCopy(FullDecoder* decoder, const MemoryCopyImmediate& imm,
4299 const Value& dst, const Value& src, const Value& size) {
4300 const WasmMemory* dst_memory = imm.memory_dst.memory;
4301 const WasmMemory* src_memory = imm.memory_src.memory;
4302 V<WordPtr> dst_uintptr =
4304 V<WordPtr> src_uintptr =
4305 MemoryAddressToUintPtrOrOOBTrap(src_memory->address_type, src.op);
4306 AddressType min_address_type =
4307 dst_memory->is_memory64() && src_memory->is_memory64()
4310 V<WordPtr> size_uintptr =
4311 MemoryAddressToUintPtrOrOOBTrap(min_address_type, size.op);
4316 // TODO(14616): Fix sharedness.
4318 CallC(&sig, ExternalReference::wasm_memory_copy(),
4319 {__ BitcastHeapObjectToWordPtr(trusted_instance_data(false)),
4320 __ Word32Constant(imm.memory_dst.index),
4321 __ Word32Constant(imm.memory_src.index), dst_uintptr,
4322 src_uintptr, size_uintptr});
4323 __ TrapIfNot(result, TrapId::kTrapMemOutOfBounds);
4324 }
4325
4327 const Value& dst, const Value& value, const Value& size) {
4328 AddressType address_type = imm.memory->address_type;
4329 V<WordPtr> dst_uintptr =
4330 MemoryAddressToUintPtrOrOOBTrap(address_type, dst.op);
4331 V<WordPtr> size_uintptr =
4332 MemoryAddressToUintPtrOrOOBTrap(address_type, size.op);
4337 // TODO(14616): Fix sharedness.
4339 &sig, ExternalReference::wasm_memory_fill(),
4340 {__ BitcastHeapObjectToWordPtr(trusted_instance_data(false)),
4341 __ Word32Constant(imm.index), dst_uintptr, value.op, size_uintptr});
4342
4343 __ TrapIfNot(result, TrapId::kTrapMemOutOfBounds);
4344 }
4345
4346 void DataDrop(FullDecoder* decoder, const IndexImmediate& imm) {
4347 // TODO(14616): Data segments aren't available during streaming compilation.
4348 // Discussion: github.com/WebAssembly/shared-everything-threads/issues/83
4349 bool shared = decoder->enabled_.has_shared() &&
4350 decoder->module_->data_segments[imm.index].shared;
4352 trusted_instance_data(shared), DataSegmentSizes,
4354 __ Store(data_segment_sizes, __ Word32Constant(0),
4358 }
4359
4360 void TableGet(FullDecoder* decoder, const Value& index, Value* result,
4361 const TableIndexImmediate& imm) {
4362 V<WasmTableObject> table = LoadTable(decoder, imm);
4363 V<Smi> size_smi = __ Load(table, LoadOp::Kind::TaggedBase(),
4365 WasmTableObject::kCurrentLengthOffset);
4366 V<WordPtr> index_wordptr =
4367 TableAddressToUintPtrOrOOBTrap(imm.table->address_type, index.op);
4369 V<Word32> in_bounds = __ UintPtrLessThan(
4370 index_wordptr, __ ChangeUint32ToUintPtr(__ UntagSmi(size_smi)));
4371 __ TrapIfNot(in_bounds, TrapId::kTrapTableOutOfBounds);
4374 WasmTableObject::kEntriesOffset);
4375 OpIndex entry = __ LoadFixedArrayElement(entries, index_wordptr);
4376
4377 if (imm.table->type.ref_type_kind() == RefTypeKind::kFunction) {
4378 // If the entry has map type Tuple2, call WasmFunctionTableGet which will
4379 // initialize the function table entry.
4380 Label<Object> resolved(&asm_);
4381 Label<> call_runtime(&asm_);
4382 // The entry is a WasmFuncRef, WasmNull, or Tuple2. Hence
4383 // it is safe to cast it to HeapObject.
4384 V<Map> entry_map = __ LoadMapField(V<HeapObject>::Cast(entry));
4385 V<Word32> instance_type = __ LoadInstanceTypeField(entry_map);
4386 GOTO_IF(
4387 UNLIKELY(__ Word32Equal(instance_type, InstanceType::TUPLE2_TYPE)),
4388 call_runtime);
4389 // Otherwise the entry is WasmFuncRef or WasmNull; we are done.
4390 GOTO(resolved, entry);
4391
4392 BIND(call_runtime);
4393 bool extract_shared_data = !shared_ && imm.table->shared;
4394 GOTO(resolved,
4396 BuiltinCallDescriptor::WasmFunctionTableGet>(
4397 decoder, {__ IntPtrConstant(imm.index), index_wordptr,
4398 __ Word32Constant(extract_shared_data ? 1 : 0)}));
4399
4400 BIND(resolved, resolved_entry);
4401 result->op = resolved_entry;
4402 } else {
4403 result->op = entry;
4404 }
4405 result->op = AnnotateResultIfReference(result->op, imm.table->type);
4406 }
4407
4408 void TableSet(FullDecoder* decoder, const Value& index, const Value& value,
4409 const TableIndexImmediate& imm) {
4410 bool extract_shared_data = !shared_ && imm.table->shared;
4411
4412 V<WordPtr> index_wordptr =
4413 TableAddressToUintPtrOrOOBTrap(imm.table->address_type, index.op);
4414
4415 if (imm.table->type.ref_type_kind() == RefTypeKind::kFunction) {
4417 decoder, {__ IntPtrConstant(imm.index),
4418 __ Word32Constant(extract_shared_data ? 1 : 0),
4419 index_wordptr, value.op});
4420 } else {
4422 decoder, {__ IntPtrConstant(imm.index),
4423 __ Word32Constant(extract_shared_data ? 1 : 0),
4424 index_wordptr, value.op});
4425 }
4426 }
4427
4428 void TableInit(FullDecoder* decoder, const TableInitImmediate& imm,
4429 const Value& dst_val, const Value& src_val,
4430 const Value& size_val) {
4431 const WasmTable* table = imm.table.table;
4432 V<WordPtr> dst_wordptr =
4433 TableAddressToUintPtrOrOOBTrap(table->address_type, dst_val.op);
4434 V<Word32> src = src_val.op;
4435 V<Word32> size = size_val.op;
4436 DCHECK_EQ(table->shared, table->shared);
4438 decoder, {
4439 dst_wordptr,
4440 src,
4441 size,
4442 __ NumberConstant(imm.table.index),
4443 __ NumberConstant(imm.element_segment.index),
4444 __ NumberConstant((!shared_ && table->shared) ? 1 : 0),
4445 });
4446 }
4447
4448 void TableCopy(FullDecoder* decoder, const TableCopyImmediate& imm,
4449 const Value& dst_val, const Value& src_val,
4450 const Value& size_val) {
4451 const WasmTable* dst_table = imm.table_dst.table;
4452 const WasmTable* src_table = imm.table_src.table;
4453 V<WordPtr> dst_wordptr =
4454 TableAddressToUintPtrOrOOBTrap(dst_table->address_type, dst_val.op);
4455 V<WordPtr> src_wordptr =
4456 TableAddressToUintPtrOrOOBTrap(src_table->address_type, src_val.op);
4457 AddressType min_address_type =
4458 dst_table->is_table64() && src_table->is_table64() ? AddressType::kI64
4460 V<WordPtr> size_wordptr =
4461 TableAddressToUintPtrOrOOBTrap(min_address_type, size_val.op);
4462 bool table_is_shared = imm.table_dst.table->shared;
4463 // TODO(14616): Is this too restrictive?
4464 DCHECK_EQ(table_is_shared, imm.table_src.table->shared);
4466 decoder, {dst_wordptr, src_wordptr, size_wordptr,
4467 __ NumberConstant(imm.table_dst.index),
4468 __ NumberConstant(imm.table_src.index),
4469 __ NumberConstant((!shared_ && table_is_shared) ? 1 : 0)});
4470 }
4471
4472 void TableGrow(FullDecoder* decoder, const TableIndexImmediate& imm,
4473 const Value& value, const Value& delta, Value* result) {
4475 V<WordPtr> delta_wordptr;
4476
4477 // If `delta` is OOB, return -1.
4478 if (!imm.table->is_table64()) {
4479 delta_wordptr = __ ChangeUint32ToUintPtr(delta.op);
4480 } else if constexpr (Is64()) {
4481 delta_wordptr = delta.op;
4482 } else {
4483 GOTO_IF(UNLIKELY(__ TruncateWord64ToWord32(
4484 __ Word64ShiftRightLogical(delta.op, 32))),
4485 end, __ Word32Constant(-1));
4486 delta_wordptr = V<WordPtr>::Cast(__ TruncateWord64ToWord32(delta.op));
4487 }
4488
4489 bool extract_shared_data = !shared_ && imm.table->shared;
4491 V<Word32> call_result = __ UntagSmi(
4493 decoder, {__ NumberConstant(imm.index), delta_wordptr,
4494 __ Word32Constant(extract_shared_data), value.op}));
4495 GOTO(end, call_result);
4496
4497 BIND(end, result_i32);
4498 if (imm.table->is_table64()) {
4499 result->op = __ ChangeInt32ToInt64(result_i32);
4500 } else {
4501 result->op = result_i32;
4502 }
4503 }
4504
4505 void TableFill(FullDecoder* decoder, const TableIndexImmediate& imm,
4506 const Value& start, const Value& value, const Value& count) {
4507 V<WordPtr> start_wordptr =
4508 TableAddressToUintPtrOrOOBTrap(imm.table->address_type, start.op);
4509 V<WordPtr> count_wordptr =
4510 TableAddressToUintPtrOrOOBTrap(imm.table->address_type, count.op);
4511 bool extract_shared_data = !shared_ && imm.table->shared;
4513 decoder,
4514 {start_wordptr, count_wordptr, __ Word32Constant(extract_shared_data),
4515 __ NumberConstant(imm.index), value.op});
4516 }
4517
4519 const TableIndexImmediate& imm) {
4521 trusted_instance_data(imm.table->shared), Tables,
4524 __ LoadFixedArrayElement(tables, imm.index));
4525 }
4526
4527 void TableSize(FullDecoder* decoder, const TableIndexImmediate& imm,
4528 Value* result) {
4529 V<WasmTableObject> table = LoadTable(decoder, imm);
4530 V<Word32> size_word32 = __ UntagSmi(__ Load(
4532 WasmTableObject::kCurrentLengthOffset));
4533 if (imm.table->is_table64()) {
4534 result->op = __ ChangeUint32ToUint64(size_word32);
4535 } else {
4536 result->op = size_word32;
4537 }
4538 }
4539
4540 void ElemDrop(FullDecoder* decoder, const IndexImmediate& imm) {
4541 // Note: Contrary to data segments, elem segments occur before the code
4542 // section, so we can be sure that they're available even during streaming
4543 // compilation.
4544 bool shared = decoder->module_->elem_segments[imm.index].shared;
4546 trusted_instance_data(shared), ElementSegments,
4548 __ StoreFixedArrayElement(elem_segments, imm.index,
4549 LOAD_ROOT(EmptyFixedArray),
4551 }
4552
4553 void StructNew(FullDecoder* decoder, const StructIndexImmediate& imm,
4554 const Value& descriptor, const Value args[], Value* result) {
4555 uint32_t field_count = imm.struct_type->field_count();
4556 SmallZoneVector<OpIndex, 16> args_vector(field_count, decoder->zone_);
4557 for (uint32_t i = 0; i < field_count; ++i) {
4558 args_vector[i] = args[i].op;
4559 }
4560 result->op = StructNewImpl(decoder, imm, descriptor, args_vector.data());
4561 }
4562
4564 const Value& descriptor, Value* result) {
4565 uint32_t field_count = imm.struct_type->field_count();
4566 SmallZoneVector<OpIndex, 16> args(field_count, decoder->zone_);
4567 for (uint32_t i = 0; i < field_count; i++) {
4568 ValueType field_type = imm.struct_type->field(i);
4569 args[i] = DefaultValue(field_type);
4570 }
4571 result->op = StructNewImpl(decoder, imm, descriptor, args.data());
4572 }
4573
4574 void StructGet(FullDecoder* decoder, const Value& struct_object,
4575 const FieldImmediate& field, bool is_signed, Value* result) {
4576 result->op = __ StructGet(
4577 V<WasmStructNullable>::Cast(struct_object.op),
4579 field.field_imm.index, is_signed,
4580 struct_object.type.is_nullable() ? compiler::kWithNullCheck
4582 }
4583
4584 void StructSet(FullDecoder* decoder, const Value& struct_object,
4585 const FieldImmediate& field, const Value& field_value) {
4586 __ StructSet(V<WasmStructNullable>::Cast(struct_object.op), field_value.op,
4588 field.field_imm.index,
4589 struct_object.type.is_nullable()
4592 }
4593
4594 void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate& imm,
4595 const Value& length, const Value& initial_value,
4596 Value* result) {
4597 result->op = ArrayNewImpl(decoder, imm.index, imm.array_type,
4598 V<Word32>::Cast(length.op),
4599 V<Any>::Cast(initial_value.op));
4600 }
4601
4603 const Value& length, Value* result) {
4604 V<Any> initial_value = DefaultValue(imm.array_type->element_type());
4605 result->op = ArrayNewImpl(decoder, imm.index, imm.array_type,
4606 V<Word32>::Cast(length.op), initial_value);
4607 }
4608
4609 void ArrayGet(FullDecoder* decoder, const Value& array_obj,
4610 const ArrayIndexImmediate& imm, const Value& index,
4611 bool is_signed, Value* result) {
4612 auto array_value = V<WasmArrayNullable>::Cast(array_obj.op);
4613 BoundsCheckArray(array_value, index.op, array_obj.type);
4614 result->op = __ ArrayGet(array_value, V<Word32>::Cast(index.op),
4615 imm.array_type, is_signed);
4616 }
4617
4618 void ArraySet(FullDecoder* decoder, const Value& array_obj,
4619 const ArrayIndexImmediate& imm, const Value& index,
4620 const Value& value) {
4621 auto array_value = V<WasmArrayNullable>::Cast(array_obj.op);
4622 BoundsCheckArray(array_value, index.op, array_obj.type);
4623 __ ArraySet(array_value, V<Word32>::Cast(index.op), V<Any>::Cast(value.op),
4624 imm.array_type->element_type());
4625 }
4626
4627 void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) {
4628 result->op = __ ArrayLength(V<WasmArrayNullable>::Cast(array_obj.op),
4629 array_obj.type.is_nullable()
4632 }
4633
4634 void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index,
4635 const Value& src, const Value& src_index,
4636 const ArrayIndexImmediate& src_imm, const Value& length) {
4639 BoundsCheckArrayWithLength(dst_array, dst_index.op, length.op,
4640 dst.type.is_nullable()
4643 BoundsCheckArrayWithLength(src_array, src_index.op, length.op,
4644 src.type.is_nullable()
4647
4648 ValueType element_type = src_imm.array_type->element_type();
4649
4650 IF_NOT (__ Word32Equal(length.op, 0)) {
4651 // Values determined by test/mjsunit/wasm/array-copy-benchmark.js on x64.
4652 int array_copy_max_loop_length;
4653 switch (element_type.kind()) {
4654 case wasm::kI32:
4655 case wasm::kI64:
4656 case wasm::kI8:
4657 case wasm::kI16:
4658 array_copy_max_loop_length = 20;
4659 break;
4660 case wasm::kF16: // TODO(irezvov): verify the threshold for F16.
4661 case wasm::kF32:
4662 case wasm::kF64:
4663 array_copy_max_loop_length = 35;
4664 break;
4665 case wasm::kS128:
4666 array_copy_max_loop_length = 100;
4667 break;
4668 case wasm::kRef:
4669 case wasm::kRefNull:
4670 array_copy_max_loop_length = 15;
4671 break;
4672 case wasm::kVoid:
4673 case kTop:
4674 case wasm::kBottom:
4675 UNREACHABLE();
4676 }
4677
4678 IF (__ Uint32LessThan(array_copy_max_loop_length, length.op)) {
4679 // Builtin
4684 MachineSignature sig(0, 5, arg_types);
4685
4686 CallC(&sig, ExternalReference::wasm_array_copy(),
4687 {dst_array, dst_index.op, src_array, src_index.op, length.op});
4688 } ELSE {
4689 V<Word32> src_end_index =
4690 __ Word32Sub(__ Word32Add(src_index.op, length.op), 1);
4691
4692 IF (__ Uint32LessThan(src_index.op, dst_index.op)) {
4693 // Reverse
4694 V<Word32> dst_end_index =
4695 __ Word32Sub(__ Word32Add(dst_index.op, length.op), 1);
4696 ScopedVar<Word32> src_index_loop(this, src_end_index);
4697 ScopedVar<Word32> dst_index_loop(this, dst_end_index);
4698
4699 WHILE(__ Word32Constant(1)) {
4700 V<Any> value = __ ArrayGet(src_array, src_index_loop,
4701 src_imm.array_type, true);
4702 __ ArraySet(dst_array, dst_index_loop, value, element_type);
4703
4704 IF_NOT (__ Uint32LessThan(src_index.op, src_index_loop)) BREAK;
4705
4706 src_index_loop = __ Word32Sub(src_index_loop, 1);
4707 dst_index_loop = __ Word32Sub(dst_index_loop, 1);
4708 }
4709 } ELSE {
4710 ScopedVar<Word32> src_index_loop(this, src_index.op);
4711 ScopedVar<Word32> dst_index_loop(this, dst_index.op);
4712
4713 WHILE(__ Word32Constant(1)) {
4714 V<Any> value = __ ArrayGet(src_array, src_index_loop,
4715 src_imm.array_type, true);
4716 __ ArraySet(dst_array, dst_index_loop, value, element_type);
4717
4718 IF_NOT (__ Uint32LessThan(src_index_loop, src_end_index)) BREAK;
4719
4720 src_index_loop = __ Word32Add(src_index_loop, 1);
4721 dst_index_loop = __ Word32Add(dst_index_loop, 1);
4722 }
4723 }
4724 }
4725 }
4726 }
4727
4729 const Value& array, const Value& index, const Value& value,
4730 const Value& length) {
4731 const bool emit_write_barrier =
4732 imm.array_type->element_type().is_reference();
4733 auto array_value = V<WasmArrayNullable>::Cast(array.op);
4735 array_value, index.op, length.op,
4736 array.type.is_nullable() ? compiler::kWithNullCheck
4738 ArrayFillImpl(array_not_null, V<Word32>::Cast(index.op),
4739 V<Any>::Cast(value.op), V<Word32>::Cast(length.op),
4740 imm.array_type, emit_write_barrier);
4741 }
4742
4743 void ArrayNewFixed(FullDecoder* decoder, const ArrayIndexImmediate& array_imm,
4744 const IndexImmediate& length_imm, const Value elements[],
4745 Value* result) {
4746 const wasm::ArrayType* type = array_imm.array_type;
4747 wasm::ValueType element_type = type->element_type();
4748 int element_count = length_imm.index;
4749 // Initialize the array header.
4750 bool shared = decoder->module_->type(array_imm.index).is_shared;
4751 V<Map> rtt = __ RttCanon(managed_object_maps(shared), array_imm.index);
4752 V<WasmArray> array = __ WasmAllocateArray(rtt, element_count, type);
4753 // Initialize all elements.
4754 for (int i = 0; i < element_count; i++) {
4755 __ ArraySet(array, __ Word32Constant(i), elements[i].op, element_type);
4756 }
4757 result->op = array;
4758 }
4759
4761 const ArrayIndexImmediate& array_imm,
4762 const IndexImmediate& segment_imm, const Value& offset,
4763 const Value& length, Value* result) {
4764 bool is_element = array_imm.array_type->element_type().is_reference();
4765 // TODO(14616): Data segments aren't available during streaming compilation.
4766 // Discussion: github.com/WebAssembly/shared-everything-threads/issues/83
4767 bool segment_is_shared =
4768 decoder->enabled_.has_shared() &&
4769 (is_element
4770 ? decoder->module_->elem_segments[segment_imm.index].shared
4771 : decoder->module_->data_segments[segment_imm.index].shared);
4772 // TODO(14616): Add DCHECK that array sharedness is equal to `shared`?
4773 V<WasmArray> result_value =
4775 decoder,
4776 {__ Word32Constant(segment_imm.index), offset.op, length.op,
4777 __ SmiConstant(Smi::FromInt(is_element ? 1 : 0)),
4778 __ SmiConstant(Smi::FromInt(!shared_ && segment_is_shared)),
4779 __ RttCanon(managed_object_maps(segment_is_shared),
4780 array_imm.index)});
4781 result->op = __ AnnotateWasmType(result_value, result->type);
4782 }
4783
4785 const ArrayIndexImmediate& array_imm,
4786 const IndexImmediate& segment_imm, const Value& array,
4787 const Value& array_index, const Value& segment_offset,
4788 const Value& length) {
4789 bool is_element = array_imm.array_type->element_type().is_reference();
4790 // TODO(14616): Segments aren't available during streaming compilation.
4791 bool segment_is_shared =
4792 decoder->enabled_.has_shared() &&
4793 (is_element
4794 ? decoder->module_->elem_segments[segment_imm.index].shared
4795 : decoder->module_->data_segments[segment_imm.index].shared);
4796 // TODO(14616): Is this too restrictive?
4797 DCHECK_EQ(segment_is_shared,
4798 decoder->module_->type(array_imm.index).is_shared);
4800 decoder,
4801 {array_index.op, segment_offset.op, length.op,
4802 __ SmiConstant(Smi::FromInt(segment_imm.index)),
4803 __ SmiConstant(Smi::FromInt(is_element ? 1 : 0)),
4804 __ SmiConstant(Smi::FromInt((!shared_ && segment_is_shared) ? 1 : 0)),
4805 array.op});
4806 }
4807
4808 void RefI31(FullDecoder* decoder, const Value& input, Value* result) {
4809 if constexpr (SmiValuesAre31Bits()) {
4810 V<Word32> shifted =
4811 __ Word32ShiftLeft(input.op, kSmiTagSize + kSmiShiftSize);
4812 if constexpr (Is64()) {
4813 // The uppermost bits don't matter.
4814 result->op = __ BitcastWord32ToWord64(shifted);
4815 } else {
4816 result->op = shifted;
4817 }
4818 } else {
4819 // Set the topmost bit to sign-extend the second bit. This way,
4820 // interpretation in JS (if this value escapes there) will be the same as
4821 // i31.get_s.
4822 V<WordPtr> input_wordptr = __ ChangeUint32ToUintPtr(input.op);
4823 result->op = __ WordPtrShiftRightArithmetic(
4824 __ WordPtrShiftLeft(input_wordptr, kSmiShiftSize + kSmiTagSize + 1),
4825 1);
4826 }
4827 result->op =
4828 __ AnnotateWasmType(__ BitcastWordPtrToSmi(result->op), kWasmRefI31);
4829 }
4830
4831 void I31GetS(FullDecoder* decoder, const Value& input, Value* result) {
4832 V<Object> input_non_null = NullCheck(input);
4833 if constexpr (SmiValuesAre31Bits()) {
4834 result->op = __ Word32ShiftRightArithmeticShiftOutZeros(
4835 __ TruncateWordPtrToWord32(__ BitcastTaggedToWordPtr(input_non_null)),
4837 } else {
4838 // Topmost bit is already sign-extended.
4839 result->op = __ TruncateWordPtrToWord32(
4840 __ WordPtrShiftRightArithmeticShiftOutZeros(
4841 __ BitcastTaggedToWordPtr(input_non_null),
4843 }
4844 }
4845
4846 void I31GetU(FullDecoder* decoder, const Value& input, Value* result) {
4847 V<Object> input_non_null = NullCheck(input);
4848 if constexpr (SmiValuesAre31Bits()) {
4849 result->op = __ Word32ShiftRightLogical(
4850 __ TruncateWordPtrToWord32(__ BitcastTaggedToWordPtr(input_non_null)),
4852 } else {
4853 // Topmost bit is sign-extended, remove it.
4854 result->op = __ TruncateWordPtrToWord32(__ WordPtrShiftRightLogical(
4855 __ WordPtrShiftLeft(__ BitcastTaggedToWordPtr(input_non_null), 1),
4856 kSmiTagSize + kSmiShiftSize + 1));
4857 }
4858 }
4859
4860 void RefGetDesc(FullDecoder* decoder, const Value& ref_val, Value* result) {
4861 // Implicit null checks don't cover the map load.
4862 V<Object> ref = NullCheck(ref_val);
4863 V<Map> map = __ LoadMapField(ref);
4864 result->op = __ Load(map, LoadOp::Kind::TaggedBase().Immutable(),
4866 Map::kInstanceDescriptorsOffset);
4867 }
4868
4870 // For exact target types, an exact match is needed for correctness;
4871 // for final target types, it's a performance optimization.
4872 if (target.is_exact() ||
4873 decoder->module_->type(target.ref_index()).is_final) {
4875 }
4877 }
4878
4879 void RefTest(FullDecoder* decoder, HeapType target, const Value& object,
4880 Value* result, bool null_succeeds) {
4881 V<Map> rtt = __ RttCanon(managed_object_maps(target.is_shared()),
4882 target.ref_index());
4884 object.type,
4887 GetExactness(decoder, target)};
4888 result->op = __ WasmTypeCheck(object.op, rtt, config);
4889 }
4890
4891 void RefTestAbstract(FullDecoder* decoder, const Value& object, HeapType type,
4892 Value* result, bool null_succeeds) {
4894 object.type, ValueType::RefMaybeNull(
4896 V<Map> rtt = OpIndex::Invalid();
4897 result->op = __ WasmTypeCheck(object.op, rtt, config);
4898 }
4899
4900 void RefCast(FullDecoder* decoder, const Value& object, Value* result) {
4901 if (v8_flags.experimental_wasm_assume_ref_cast_succeeds) {
4902 // TODO(14108): Implement type guards.
4903 Forward(decoder, object, result);
4904 return;
4905 }
4906 ValueType target = result->type;
4907 V<Map> rtt = __ RttCanon(managed_object_maps(target.is_shared()),
4908 target.ref_index());
4910 object.type, target, GetExactness(decoder, target.heap_type())};
4911 result->op = __ WasmTypeCast(object.op, rtt, config);
4912 }
4913
4914 void RefCastDesc(FullDecoder* decoder, const Value& object, const Value& desc,
4915 Value* result) {
4916 if (v8_flags.experimental_wasm_assume_ref_cast_succeeds) {
4917 // TODO(14108): Implement type guards.
4918 Forward(decoder, object, result);
4919 return;
4920 }
4921 V<Map> rtt = GetRttFromDescriptor(desc);
4922 compiler::WasmTypeCheckConfig config{object.type, result->type,
4924 result->op = __ WasmTypeCast(object.op, rtt, config);
4925 }
4926
4927 void RefCastAbstract(FullDecoder* decoder, const Value& object, HeapType type,
4928 Value* result, bool null_succeeds) {
4929 if (v8_flags.experimental_wasm_assume_ref_cast_succeeds) {
4930 // TODO(14108): Implement type guards.
4931 Forward(decoder, object, result);
4932 return;
4933 }
4934 // TODO(jkummerow): {type} is redundant.
4935 DCHECK_IMPLIES(null_succeeds, result->type.is_nullable());
4936 DCHECK_EQ(type, result->type.heap_type());
4938 object.type, ValueType::RefMaybeNull(
4940 V<Map> rtt = OpIndex::Invalid();
4941 result->op = __ WasmTypeCast(object.op, rtt, config);
4942 }
4943
4944 // TODO(jkummerow): Pass {target} to the interface.
4945 void BrOnCast(FullDecoder* decoder, HeapType target_type, const Value& object,
4946 Value* value_on_branch, uint32_t br_depth, bool null_succeeds) {
4947 // Note: we cannot just take {ref_index} and {null_succeeds} from
4948 // {value_on_branch->type} because of
4949 // https://github.com/WebAssembly/gc/issues/516.
4951 target_type, null_succeeds ? kNullable : kNonNullable);
4952 V<Map> rtt = __ RttCanon(managed_object_maps(target.is_shared()),
4953 target_type.ref_index());
4954 compiler::WasmTypeCheckConfig config{object.type, target,
4955 GetExactness(decoder, target_type)};
4956 return BrOnCastImpl(decoder, rtt, config, object, value_on_branch, br_depth,
4958 }
4959
4960 void BrOnCastDesc(FullDecoder* decoder, HeapType target_type,
4961 const Value& object, const Value& descriptor,
4962 Value* value_on_branch, uint32_t br_depth,
4963 bool null_succeeds) {
4965 target_type, null_succeeds ? kNullable : kNonNullable);
4966 V<Map> rtt = GetRttFromDescriptor(descriptor);
4967 compiler::WasmTypeCheckConfig config{object.type, target,
4969 return BrOnCastImpl(decoder, rtt, config, object, value_on_branch, br_depth,
4971 }
4972
4973 void BrOnCastAbstract(FullDecoder* decoder, const Value& object,
4974 HeapType type, Value* value_on_branch,
4975 uint32_t br_depth, bool null_succeeds) {
4976 V<Map> rtt = OpIndex::Invalid();
4978 object.type, ValueType::RefMaybeNull(
4980 return BrOnCastImpl(decoder, rtt, config, object, value_on_branch, br_depth,
4982 }
4983
4984 void BrOnCastFail(FullDecoder* decoder, HeapType target_type,
4985 const Value& object, Value* value_on_fallthrough,
4986 uint32_t br_depth, bool null_succeeds) {
4988 target_type, null_succeeds ? kNullable : kNonNullable);
4989 V<Map> rtt = __ RttCanon(managed_object_maps(target.is_shared()),
4990 target_type.ref_index());
4991 compiler::WasmTypeCheckConfig config{object.type, target,
4992 GetExactness(decoder, target_type)};
4993 return BrOnCastFailImpl(decoder, rtt, config, object, value_on_fallthrough,
4994 br_depth, null_succeeds);
4995 }
4996
4997 void BrOnCastDescFail(FullDecoder* decoder, HeapType target_type,
4998 const Value& object, const Value& descriptor,
4999 Value* value_on_fallthrough, uint32_t br_depth,
5000 bool null_succeeds) {
5002 target_type, null_succeeds ? kNullable : kNonNullable);
5003 V<Map> rtt = GetRttFromDescriptor(descriptor);
5004 compiler::WasmTypeCheckConfig config{object.type, target,
5006 return BrOnCastFailImpl(decoder, rtt, config, object, value_on_fallthrough,
5007 br_depth, null_succeeds);
5008 }
5009
5010 void BrOnCastFailAbstract(FullDecoder* decoder, const Value& object,
5011 HeapType type, Value* value_on_fallthrough,
5012 uint32_t br_depth, bool null_succeeds) {
5013 V<Map> rtt = OpIndex::Invalid();
5015 object.type, ValueType::RefMaybeNull(
5017 return BrOnCastFailImpl(decoder, rtt, config, object, value_on_fallthrough,
5018 br_depth, null_succeeds);
5019 }
5020
5022 const unibrow::Utf8Variant variant, const Value& offset,
5023 const Value& size, Value* result) {
5024 V<Word32> memory = __ Word32Constant(imm.index);
5025 V<Smi> variant_smi =
5026 __ SmiConstant(Smi::FromInt(static_cast<int>(variant)));
5027 V<WordPtr> index =
5028 MemoryAddressToUintPtrOrOOBTrap(imm.memory->address_type, offset.op);
5029 V<WasmStringRefNullable> result_value =
5031 decoder, {index, size.op, memory, variant_smi});
5032 result->op = __ AnnotateWasmType(result_value, result->type);
5033 }
5034
5035 // TODO(jkummerow): This check would be more elegant if we made
5036 // {ArrayNewSegment} a high-level node that's lowered later.
5037 // Returns the call on success, nullptr otherwise (like `TryCast`).
5039 DCHECK_IMPLIES(!array.valid(), __ generating_unreachable_operations());
5040 if (__ generating_unreachable_operations()) return nullptr;
5041 if (const WasmTypeAnnotationOp* annotation =
5042 __ output_graph().Get(array).TryCast<WasmTypeAnnotationOp>()) {
5043 array = annotation->value();
5044 }
5045 if (const DidntThrowOp* didnt_throw =
5046 __ output_graph().Get(array).TryCast<DidntThrowOp>()) {
5047 array = didnt_throw->throwing_operation();
5048 }
5049 const CallOp* call = __ output_graph().Get(array).TryCast<CallOp>();
5050 if (call == nullptr) return nullptr;
5051 uint64_t stub_id{};
5052 if (!OperationMatcher(__ output_graph())
5053 .MatchWasmStubCallConstant(call->callee(), &stub_id)) {
5054 return nullptr;
5055 }
5056 DCHECK_LT(stub_id, static_cast<uint64_t>(Builtin::kFirstBytecodeHandler));
5057 if (stub_id == static_cast<uint64_t>(Builtin::kWasmArrayNewSegment)) {
5058 return call;
5059 }
5060 return nullptr;
5061 }
5062
5064 const unibrow::Utf8Variant variant,
5065 const Value& array, const Value& start,
5066 const Value& end,
5067 ValueType result_type) {
5068 // Special case: shortcut a sequence "array from data segment" + "string
5069 // from wtf8 array" to directly create a string from the segment.
5071 if (const CallOp* array_new = IsArrayNewSegment(array.op)) {
5072 // We can only pass 3 untagged parameters to the builtin (on 32-bit
5073 // platforms). The segment index is easy to tag: if it validated, it must
5074 // be in Smi range.
5075 OpIndex segment_index = array_new->input(1);
5076 int32_t index_val;
5077 OperationMatcher(__ output_graph())
5078 .MatchIntegralWord32Constant(segment_index, &index_val);
5079 V<Smi> index_smi = __ SmiConstant(Smi::FromInt(index_val));
5080 // Arbitrary choice for the second tagged parameter: the segment offset.
5081 OpIndex segment_offset = array_new->input(2);
5082 __ TrapIfNot(
5083 __ Uint32LessThan(segment_offset, __ Word32Constant(Smi::kMaxValue)),
5084 OpIndex::Invalid(), TrapId::kTrapDataSegmentOutOfBounds);
5085 V<Smi> offset_smi = __ TagSmi(segment_offset);
5086 OpIndex segment_length = array_new->input(3);
5087 V<Smi> variant_smi =
5088 __ SmiConstant(Smi::FromInt(static_cast<int32_t>(variant)));
5090 BuiltinCallDescriptor::WasmStringFromDataSegment>(
5091 decoder, {segment_length, start.op, end.op, index_smi, offset_smi,
5092 variant_smi});
5093 } else {
5094 // Regular path if the shortcut wasn't taken.
5096 BuiltinCallDescriptor::WasmStringNewWtf8Array>(
5097 decoder,
5098 {start.op, end.op, V<WasmArray>::Cast(NullCheck(array)),
5099 __ SmiConstant(Smi::FromInt(static_cast<int32_t>(variant)))});
5100 }
5101 DCHECK_IMPLIES(variant == unibrow::Utf8Variant::kUtf8NoTrap,
5102 result_type.is_nullable());
5103 // The builtin returns a WasmNull for kUtf8NoTrap, so nullable values in
5104 // combination with extern strings are not supported.
5105 DCHECK_NE(result_type, wasm::kWasmExternRef);
5106 return AnnotateAsString(call, result_type);
5107 }
5108
5110 const unibrow::Utf8Variant variant,
5111 const Value& array, const Value& start,
5112 const Value& end, Value* result) {
5113 result->op = StringNewWtf8ArrayImpl(decoder, variant, array, start, end,
5114 result->type);
5115 }
5116
5118 const Value& offset, const Value& size, Value* result) {
5119 V<WordPtr> index =
5120 MemoryAddressToUintPtrOrOOBTrap(imm.memory->address_type, offset.op);
5121 V<String> result_value =
5123 decoder, {__ Word32Constant(imm.index), index, size.op});
5124 result->op = __ AnnotateWasmType(result_value, result->type);
5125 }
5126
5127 void StringNewWtf16Array(FullDecoder* decoder, const Value& array,
5128 const Value& start, const Value& end,
5129 Value* result) {
5131 BuiltinCallDescriptor::WasmStringNewWtf16Array>(
5132 decoder, {V<WasmArray>::Cast(NullCheck(array)), start.op, end.op});
5133 result->op = __ AnnotateWasmType(result_value, result->type);
5134 }
5135
5137 Value* result) {
5138 V<String> result_value =
5140 decoder, {__ Word32Constant(imm.index)});
5141 result->op = __ AnnotateWasmType(result_value, result->type);
5142 }
5143
5145 const unibrow::Utf8Variant variant, const Value& str,
5146 Value* result) {
5147 result->op = StringMeasureWtf8Impl(decoder, variant,
5149 }
5150
5152 const unibrow::Utf8Variant variant,
5153 V<String> string) {
5154 switch (variant) {
5155 case unibrow::Utf8Variant::kUtf8:
5157 BuiltinCallDescriptor::WasmStringMeasureUtf8>(decoder, {string});
5159 case unibrow::Utf8Variant::kWtf8:
5161 BuiltinCallDescriptor::WasmStringMeasureWtf8>(decoder, {string});
5162 case unibrow::Utf8Variant::kUtf8NoTrap:
5163 UNREACHABLE();
5164 }
5165 }
5166
5168 return __ template LoadField<Word32>(
5170 }
5171
5172 void StringMeasureWtf16(FullDecoder* decoder, const Value& str,
5173 Value* result) {
5174 result->op = LoadStringLength(NullCheck(str));
5175 }
5176
5178 const MemoryIndexImmediate& memory,
5179 const unibrow::Utf8Variant variant, const Value& str,
5180 const Value& offset, Value* result) {
5181 V<WordPtr> address =
5182 MemoryAddressToUintPtrOrOOBTrap(memory.memory->address_type, offset.op);
5183 V<Word32> mem_index = __ Word32Constant(memory.index);
5184 V<Word32> utf8 = __ Word32Constant(static_cast<int32_t>(variant));
5186 BuiltinCallDescriptor::WasmStringEncodeWtf8>(
5187 decoder, {address, mem_index, utf8, V<String>::Cast(NullCheck(str))});
5188 }
5189
5191 const unibrow::Utf8Variant variant,
5192 const Value& str, const Value& array,
5193 const Value& start, Value* result) {
5195 decoder, variant, V<String>::Cast(NullCheck(str)),
5196 V<WasmArray>::Cast(NullCheck(array)), start.op);
5197 }
5198
5200 const unibrow::Utf8Variant variant,
5201 V<String> str, V<WasmArray> array,
5202 V<Word32> start) {
5203 V<Smi> utf8 = __ SmiConstant(Smi::FromInt(static_cast<int32_t>(variant)));
5205 BuiltinCallDescriptor::WasmStringEncodeWtf8Array>(
5206 decoder, {str, array, start, utf8});
5207 }
5208
5210 const Value& str, const Value& offset, Value* result) {
5211 V<WordPtr> address =
5212 MemoryAddressToUintPtrOrOOBTrap(imm.memory->address_type, offset.op);
5213 V<Word32> mem_index = __ Word32Constant(static_cast<int32_t>(imm.index));
5215 BuiltinCallDescriptor::WasmStringEncodeWtf16>(
5216 decoder, {V<String>::Cast(NullCheck(str)), address, mem_index});
5217 }
5218
5219 void StringEncodeWtf16Array(FullDecoder* decoder, const Value& str,
5220 const Value& array, const Value& start,
5221 Value* result) {
5223 BuiltinCallDescriptor::WasmStringEncodeWtf16Array>(
5224 decoder, {V<String>::Cast(NullCheck(str)),
5225 V<WasmArray>::Cast(NullCheck(array)), start.op});
5226 }
5227
5228 void StringConcat(FullDecoder* decoder, const Value& head, const Value& tail,
5229 Value* result) {
5231 V<String> result_value =
5233 decoder, native_context,
5234 {V<String>::Cast(NullCheck(head)),
5235 V<String>::Cast(NullCheck(tail))});
5236 result->op = __ AnnotateWasmType(result_value, result->type);
5237 }
5238
5240 ValueType a_type, ValueType b_type) {
5241 Label<Word32> done(&asm_);
5242 // Covers "identical string pointer" and "both are null" cases.
5243 GOTO_IF(__ TaggedEqual(a, b), done, __ Word32Constant(1));
5244 if (a_type.is_nullable()) {
5245 GOTO_IF(__ IsNull(a, a_type), done, __ Word32Constant(0));
5246 }
5247 if (b_type.is_nullable()) {
5248 GOTO_IF(__ IsNull(b, b_type), done, __ Word32Constant(0));
5249 }
5250
5251 // Strings with unequal lengths can't be equal.
5252 V<Word32> a_length =
5253 __ LoadField<Word32>(a, AccessBuilder::ForStringLength());
5254 V<Word32> b_length =
5255 __ LoadField<Word32>(b, AccessBuilder::ForStringLength());
5256 GOTO_IF_NOT(__ Word32Equal(a_length, b_length), done, __ Word32Constant(0));
5257
5258 // Comparing the strings' hashes before calling the builtin has been tried,
5259 // but isn't likely to be useful unless Wasm gains an ability to trigger
5260 // string hash computation (or internalization, for that matter).
5261
5262 V<WordPtr> length = __ ChangeInt32ToIntPtr(a_length);
5265 decoder, {a, b, length});
5266
5267 GOTO(done, __ IsRootConstant(result, RootIndex::kTrueValue));
5268
5269 BIND(done, eq_result);
5270 return eq_result;
5271 }
5272
5273 void StringEq(FullDecoder* decoder, const Value& a, const Value& b,
5274 Value* result) {
5275 result->op = StringEqImpl(decoder, a.op, b.op, a.type, b.type);
5276 }
5277
5278 void StringIsUSVSequence(FullDecoder* decoder, const Value& str,
5279 Value* result) {
5281 BuiltinCallDescriptor::WasmStringIsUSVSequence>(
5282 decoder, {V<String>::Cast(NullCheck(str))});
5283 }
5284
5285 void StringAsWtf8(FullDecoder* decoder, const Value& str, Value* result) {
5286 V<ByteArray> result_value =
5288 decoder, {V<String>::Cast(NullCheck(str))});
5289 result->op = __ AnnotateWasmType(result_value, result->type);
5290 }
5291
5292 void StringViewWtf8Advance(FullDecoder* decoder, const Value& view,
5293 const Value& pos, const Value& bytes,
5294 Value* result) {
5296 BuiltinCallDescriptor::WasmStringViewWtf8Advance>(
5297 decoder, {V<ByteArray>::Cast(NullCheck(view)), pos.op, bytes.op});
5298 }
5299
5301 const MemoryIndexImmediate& memory,
5302 const unibrow::Utf8Variant variant,
5303 const Value& view, const Value& addr,
5304 const Value& pos, const Value& bytes,
5305 Value* next_pos, Value* bytes_written) {
5306 V<WordPtr> address =
5307 MemoryAddressToUintPtrOrOOBTrap(memory.memory->address_type, addr.op);
5308 V<Smi> mem_index = __ SmiConstant(Smi::FromInt(memory.index));
5309 V<Smi> utf8 = __ SmiConstant(Smi::FromInt(static_cast<int32_t>(variant)));
5311 BuiltinCallDescriptor::WasmStringViewWtf8Encode>(
5312 decoder, {address, pos.op, bytes.op,
5313 V<ByteArray>::Cast(NullCheck(view)), mem_index, utf8});
5314 next_pos->op = __ Projection(result, 0, RepresentationFor(next_pos->type));
5315 bytes_written->op =
5316 __ Projection(result, 1, RepresentationFor(bytes_written->type));
5317 }
5318
5319 void StringViewWtf8Slice(FullDecoder* decoder, const Value& view,
5320 const Value& start, const Value& end,
5321 Value* result) {
5323 BuiltinCallDescriptor::WasmStringViewWtf8Slice>(
5324 decoder, {V<ByteArray>::Cast(NullCheck(view)), start.op, end.op});
5325 result->op = __ AnnotateWasmType(result_value, result->type);
5326 }
5327
5328 void StringAsWtf16(FullDecoder* decoder, const Value& str, Value* result) {
5330 }
5331
5333 V<Word32> offset) {
5334 auto prepare = __ StringPrepareForGetCodeUnit(string);
5335 V<Object> base = __ template Projection<0>(prepare);
5336 V<WordPtr> base_offset = __ template Projection<1>(prepare);
5337 V<Word32> charwidth_shift = __ template Projection<2>(prepare);
5338
5339 // Bounds check.
5340 V<Word32> length = LoadStringLength(string);
5341 __ TrapIfNot(__ Uint32LessThan(offset, length),
5342 TrapId::kTrapStringOffsetOutOfBounds);
5343
5344 Label<> onebyte(&asm_);
5345 Label<> bailout(&asm_);
5346 Label<Word32> done(&asm_);
5347 GOTO_IF(UNLIKELY(__ Word32Equal(charwidth_shift,
5349 bailout);
5350 GOTO_IF(__ Word32Equal(charwidth_shift, 0), onebyte);
5351
5352 // Two-byte.
5353 V<WordPtr> object_offset = __ WordPtrAdd(
5354 __ WordPtrMul(__ ChangeInt32ToIntPtr(offset), 2), base_offset);
5355 // Bitcast the tagged to a wordptr as the offset already contains the
5356 // kHeapObjectTag handling. Furthermore, in case of external strings the
5357 // tagged value is a smi 0, which doesn't really encode a tagged load.
5358 V<WordPtr> base_ptr = __ BitcastTaggedToWordPtr(base);
5359 V<Word32> result_value =
5360 __ Load(base_ptr, object_offset, LoadOp::Kind::RawAligned().Immutable(),
5362 GOTO(done, result_value);
5363
5364 // One-byte.
5365 BIND(onebyte);
5366 object_offset = __ WordPtrAdd(__ ChangeInt32ToIntPtr(offset), base_offset);
5367 // Bitcast the tagged to a wordptr as the offset already contains the
5368 // kHeapObjectTag handling. Furthermore, in case of external strings the
5369 // tagged value is a smi 0, which doesn't really encode a tagged load.
5370 base_ptr = __ BitcastTaggedToWordPtr(base);
5371 result_value =
5372 __ Load(base_ptr, object_offset, LoadOp::Kind::RawAligned().Immutable(),
5374 GOTO(done, result_value);
5375
5376 BIND(bailout);
5378 BuiltinCallDescriptor::WasmStringViewWtf16GetCodeUnit>(
5379 decoder, {string, offset}));
5380
5381 BIND(done, final_result);
5382 // Make sure the original string is kept alive as long as we're operating
5383 // on pointers extracted from it (otherwise e.g. external strings' resources
5384 // might get freed prematurely).
5385 __ Retain(string);
5386 return final_result;
5387 }
5388
5390 const Value& pos, Value* result) {
5391 result->op =
5392 GetCodeUnitImpl(decoder, V<String>::Cast(NullCheck(view)), pos.op);
5393 }
5394
5396 V<Word32> offset) {
5397 auto prepare = __ StringPrepareForGetCodeUnit(string);
5398 V<Object> base = __ template Projection<0>(prepare);
5399 V<WordPtr> base_offset = __ template Projection<1>(prepare);
5400 V<Word32> charwidth_shift = __ template Projection<2>(prepare);
5401
5402 // Bounds check.
5403 V<Word32> length = LoadStringLength(string);
5404 __ TrapIfNot(__ Uint32LessThan(offset, length),
5405 TrapId::kTrapStringOffsetOutOfBounds);
5406
5407 Label<> onebyte(&asm_);
5408 Label<> bailout(&asm_);
5409 Label<Word32> done(&asm_);
5410 GOTO_IF(
5411 __ Word32Equal(charwidth_shift, compiler::kCharWidthBailoutSentinel),
5412 bailout);
5413 GOTO_IF(__ Word32Equal(charwidth_shift, 0), onebyte);
5414
5415 // Two-byte.
5416 V<WordPtr> object_offset = __ WordPtrAdd(
5417 __ WordPtrMul(__ ChangeInt32ToIntPtr(offset), 2), base_offset);
5418 // Bitcast the tagged to a wordptr as the offset already contains the
5419 // kHeapObjectTag handling. Furthermore, in case of external strings the
5420 // tagged value is a smi 0, which doesn't really encode a tagged load.
5421 V<WordPtr> base_ptr = __ BitcastTaggedToWordPtr(base);
5422 V<Word32> lead =
5423 __ Load(base_ptr, object_offset, LoadOp::Kind::RawAligned().Immutable(),
5425 V<Word32> is_lead_surrogate =
5426 __ Word32Equal(__ Word32BitwiseAnd(lead, 0xFC00), 0xD800);
5427 GOTO_IF_NOT(is_lead_surrogate, done, lead);
5428 V<Word32> trail_offset = __ Word32Add(offset, 1);
5429 GOTO_IF_NOT(__ Uint32LessThan(trail_offset, length), done, lead);
5430 V<Word32> trail = __ Load(
5431 base_ptr, __ WordPtrAdd(object_offset, __ IntPtrConstant(2)),
5433 V<Word32> is_trail_surrogate =
5434 __ Word32Equal(__ Word32BitwiseAnd(trail, 0xFC00), 0xDC00);
5435 GOTO_IF_NOT(is_trail_surrogate, done, lead);
5436 V<Word32> surrogate_bias =
5437 __ Word32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
5438 V<Word32> result = __ Word32Add(__ Word32ShiftLeft(lead, 10),
5439 __ Word32Add(trail, surrogate_bias));
5440 GOTO(done, result);
5441
5442 // One-byte.
5443 BIND(onebyte);
5444 object_offset = __ WordPtrAdd(__ ChangeInt32ToIntPtr(offset), base_offset);
5445 // Bitcast the tagged to a wordptr as the offset already contains the
5446 // kHeapObjectTag handling. Furthermore, in case of external strings the
5447 // tagged value is a smi 0, which doesn't really encode a tagged load.
5448 base_ptr = __ BitcastTaggedToWordPtr(base);
5449 result =
5450 __ Load(base_ptr, object_offset, LoadOp::Kind::RawAligned().Immutable(),
5452 GOTO(done, result);
5453
5454 BIND(bailout);
5456 BuiltinCallDescriptor::WasmStringCodePointAt>(
5457 decoder, {string, offset}));
5458
5459 BIND(done, final_result);
5460 // Make sure the original string is kept alive as long as we're operating
5461 // on pointers extracted from it (otherwise e.g. external strings' resources
5462 // might get freed prematurely).
5463 __ Retain(string);
5464 return final_result;
5465 }
5466
5468 const MemoryIndexImmediate& imm, const Value& view,
5469 const Value& offset, const Value& pos,
5470 const Value& codeunits, Value* result) {
5471 V<String> string = V<String>::Cast(NullCheck(view));
5472 V<WordPtr> address =
5473 MemoryAddressToUintPtrOrOOBTrap(imm.memory->address_type, offset.op);
5474 V<Smi> mem_index = __ SmiConstant(Smi::FromInt(imm.index));
5476 BuiltinCallDescriptor::WasmStringViewWtf16Encode>(
5477 decoder, {address, pos.op, codeunits.op, string, mem_index});
5478 }
5479
5480 void StringViewWtf16Slice(FullDecoder* decoder, const Value& view,
5481 const Value& start, const Value& end,
5482 Value* result) {
5483 V<String> string = V<String>::Cast(NullCheck(view));
5485 BuiltinCallDescriptor::WasmStringViewWtf16Slice>(
5486 decoder, {string, start.op, end.op});
5487 result->op = __ AnnotateWasmType(result_value, result->type);
5488 }
5489
5490 void StringAsIter(FullDecoder* decoder, const Value& str, Value* result) {
5491 V<String> string = V<String>::Cast(NullCheck(str));
5492 V<WasmStringViewIter> result_value =
5494 decoder, {string});
5495 result->op = __ AnnotateWasmType(result_value, result->type);
5496 }
5497
5498 void StringViewIterNext(FullDecoder* decoder, const Value& view,
5499 Value* result) {
5502 BuiltinCallDescriptor::WasmStringViewIterNext>(decoder, {iter});
5503 }
5504
5505 void StringViewIterAdvance(FullDecoder* decoder, const Value& view,
5506 const Value& codepoints, Value* result) {
5509 BuiltinCallDescriptor::WasmStringViewIterAdvance>(
5510 decoder, {iter, codepoints.op});
5511 }
5512
5513 void StringViewIterRewind(FullDecoder* decoder, const Value& view,
5514 const Value& codepoints, Value* result) {
5517 BuiltinCallDescriptor::WasmStringViewIterRewind>(decoder,
5518 {iter, codepoints.op});
5519 }
5520
5521 void StringViewIterSlice(FullDecoder* decoder, const Value& view,
5522 const Value& codepoints, Value* result) {
5525 BuiltinCallDescriptor::WasmStringViewIterSlice>(decoder,
5526 {iter, codepoints.op});
5527 result->op = __ AnnotateWasmType(result_value, result->type);
5528 }
5529
5530 void StringCompare(FullDecoder* decoder, const Value& lhs, const Value& rhs,
5531 Value* result) {
5532 V<String> lhs_val = V<String>::Cast(NullCheck(lhs));
5533 V<String> rhs_val = V<String>::Cast(NullCheck(rhs));
5534 result->op = __ UntagSmi(
5536 decoder, {lhs_val, rhs_val}));
5537 }
5538
5539 void StringFromCodePoint(FullDecoder* decoder, const Value& code_point,
5540 Value* result) {
5542 BuiltinCallDescriptor::WasmStringFromCodePoint>(decoder,
5543 {code_point.op});
5544 result->op = __ AnnotateWasmType(result_value, result->type);
5545 }
5546
5547 void StringHash(FullDecoder* decoder, const Value& string, Value* result) {
5548 V<String> string_val = V<String>::Cast(NullCheck(string));
5549
5550 Label<> runtime_label(&Asm());
5551 Label<Word32> end_label(&Asm());
5552
5553 V<Word32> raw_hash = __ template LoadField<Word32>(
5555 V<Word32> hash_not_computed_mask =
5556 __ Word32Constant(static_cast<int32_t>(Name::kHashNotComputedMask));
5557 static_assert(Name::HashFieldTypeBits::kShift == 0);
5558 V<Word32> hash_not_computed =
5559 __ Word32BitwiseAnd(raw_hash, hash_not_computed_mask);
5560 GOTO_IF(hash_not_computed, runtime_label);
5561
5562 // Fast path if hash is already computed: Decode raw hash value.
5563 static_assert(Name::HashBits::kLastUsedBit == kBitsPerInt - 1);
5564 V<Word32> hash = __ Word32ShiftRightLogical(
5565 raw_hash, static_cast<int32_t>(Name::HashBits::kShift));
5566 GOTO(end_label, hash);
5567
5568 BIND(runtime_label);
5569 V<Word32> hash_runtime =
5571 decoder, {string_val});
5572 GOTO(end_label, hash_runtime);
5573
5574 BIND(end_label, hash_val);
5575 result->op = hash_val;
5576 }
5577
5578 void Forward(FullDecoder* decoder, const Value& from, Value* to) {
5579 to->op = from.op;
5580 }
5581
5582 private:
5583 // The InstanceCache caches commonly used fields of the
5584 // WasmTrustedInstanceData.
5585 // We can extend the set of cached fields as needed.
5586 // This caching serves two purposes:
5587 // (1) It makes sure that the respective fields are loaded early on, as
5588 // opposed to within conditional branches, so the values are easily
5589 // reusable.
5590 // (2) It makes sure that the loaded values are actually reused.
5591 // It achieves these effects more reliably and more cheaply than general-
5592 // purpose optimizations could (loop peeling isn't always used; load
5593 // elimination struggles with arbitrary side effects of indexed stores;
5594 // we don't currently have a generic mechanism for hoisting loads out of
5595 // conditional branches).
5597 public:
5598 explicit InstanceCache(Assembler& assembler)
5599 : mem_start_(assembler), mem_size_(assembler), asm_(assembler) {}
5600
5602 const WasmModule* mod) {
5603 DCHECK(!trusted_data_.valid()); // Only call {Initialize()} once.
5604 trusted_data_ = trusted_instance_data;
5605 managed_object_maps_ =
5608 WasmTrustedInstanceData::kManagedObjectMapsOffset);
5612 WasmTrustedInstanceData::kNativeContextOffset);
5613
5614 if (!mod->memories.empty()) {
5615#if DEBUG
5616 has_memory_ = true;
5617#endif
5618 const WasmMemory& mem = mod->memories[0];
5619 memory_can_grow_ = mem.initial_pages != mem.maximum_pages;
5620 // For now, we don't cache the size of shared growable memories.
5621 // If we wanted to support this case, we would have to reload the
5622 // memory size when loop stack checks detect an interrupt request.
5623 // Since memory size caching is particularly important for asm.js,
5624 // which never uses growable or shared memories, this limitation is
5625 // considered acceptable for now.
5626 memory_size_cached_ = !mem.is_shared || !memory_can_grow_;
5627 // Trap handler enabled memories never move.
5628 // Memories that can't grow have no reason to move.
5629 // Shared memories can only be grown in-place.
5630 memory_can_move_ = mem.bounds_checks != kTrapHandler &&
5631 memory_can_grow_ && !mem.is_shared;
5632 memory_is_shared_ = mem.is_shared;
5633 if (memory_size_cached_) {
5634 mem_size_ = LoadMemSize();
5635 }
5636 mem_start_ = LoadMemStart();
5637 }
5638 }
5639
5640 // TODO(14108): Port the dynamic "cached_memory_index" infrastructure
5641 // from Turbofan.
5643 if (memory_can_move()) mem_start_ = LoadMemStart();
5644 if (memory_can_grow_ && memory_size_cached_) mem_size_ = LoadMemSize();
5645 }
5646
5648 V<FixedArray> managed_object_maps() { return managed_object_maps_; }
5651 DCHECK(has_memory_);
5652 return mem_start_;
5653 }
5655 DCHECK(has_memory_);
5656 if (!memory_size_cached_) return LoadMemSize();
5657 return mem_size_;
5658 }
5659
5660 private:
5661 static constexpr uint8_t kUnused = ~uint8_t{0};
5662
5664 DCHECK(has_memory_);
5665 // In contrast to memory size loads, we can mark memory start loads as
5666 // eliminable: shared memories never move, and non-shared memories can't
5667 // have their start modified by other threads.
5669 if (!memory_can_move()) kind = kind.Immutable();
5670 return __ Load(trusted_data_, kind, MemoryRepresentation::UintPtr(),
5671 WasmTrustedInstanceData::kMemory0StartOffset);
5672 }
5673
5675 DCHECK(has_memory_);
5677 if (memory_is_shared_ && memory_can_grow_) {
5678 // Memory size loads should not be load-eliminated as the memory size
5679 // can be modified by another thread.
5680 kind = kind.NotLoadEliminable();
5681 }
5682 if (!memory_can_grow_) kind = kind.Immutable();
5683 return __ Load(trusted_data_, kind, MemoryRepresentation::UintPtr(),
5684 WasmTrustedInstanceData::kMemory0SizeOffset);
5685 }
5686
5687 bool memory_can_move() { return memory_can_move_; }
5688
5689 // For compatibility with `__` macro.
5690 Assembler& Asm() { return asm_; }
5691
5692 // Cached immutable fields (need no Phi nodes):
5696
5697 // Cached mutable fields:
5700
5701 // Other fields for internal usage.
5703 bool memory_is_shared_{false};
5704 bool memory_can_grow_{false};
5705 bool memory_can_move_{false};
5706 bool memory_size_cached_{false};
5707#if DEBUG
5708 bool has_memory_{false};
5709#endif
5710 };
5711
5713
5714 private:
5715 // Holds phi inputs for a specific block. These include SSA values, stack
5716 // merge values, and cached fields from the instance..
5717 // Conceptually, this is a two-dimensional, rectangular array of size
5718 // `phi_count * inputs_per_phi`, since each phi has the same number of inputs,
5719 // namely the number of incoming edges for this block.
5721 public:
5722 // Ctor for regular blocks.
5724 : incoming_exceptions_(decoder -> zone()) {
5725 // Allocate space and initialize the types of all phis.
5726 uint32_t num_locals = decoder->num_locals();
5727 uint32_t merge_arity = merge != nullptr ? merge->arity : 0;
5728
5729 phi_count_ = num_locals + merge_arity;
5730 phi_types_ = decoder->zone()->AllocateArray<ValueType>(phi_count_);
5731
5732 base::Vector<ValueType> locals = decoder->local_types();
5733 std::uninitialized_copy(locals.begin(), locals.end(), phi_types_);
5734 for (uint32_t i = 0; i < merge_arity; i++) {
5735 new (&phi_types_[num_locals + i]) ValueType((*merge)[i].type);
5736 }
5737 AllocatePhiInputs(decoder->zone());
5738 }
5739
5740 // Consider this "private"; it's next to the constructors (where it's
5741 // called) for context.
5743 // Only reserve some space for the inputs to be added later.
5744 phi_inputs_capacity_total_ = phi_count_ * input_capacity_per_phi_;
5745 phi_inputs_ = zone->AllocateArray<OpIndex>(phi_inputs_capacity_total_);
5746
5747#ifdef DEBUG
5748 constexpr uint32_t kNoInputs = 0;
5749 input_count_per_phi_ = std::vector(phi_count_, kNoInputs);
5750#endif
5751 }
5752
5753 // Default ctor and later initialization for function returns.
5754 explicit BlockPhis(Zone* zone) : incoming_exceptions_(zone) {}
5756 // For `return_phis_`, nobody should have inserted into `this` before
5757 // calling `InitReturnPhis`.
5758 DCHECK_EQ(phi_count_, 0);
5759 DCHECK_EQ(inputs_per_phi_, 0);
5760
5761 uint32_t return_count = static_cast<uint32_t>(return_types.size());
5762 phi_count_ = return_count;
5763 phi_types_ = zone()->AllocateArray<ValueType>(phi_count_);
5764
5765 std::uninitialized_copy(return_types.begin(), return_types.end(),
5766 phi_types_);
5767 AllocatePhiInputs(zone());
5768 }
5769
5770 void AddInputForPhi(size_t phi_i, OpIndex input) {
5771 if (V8_UNLIKELY(phi_inputs_total_ >= phi_inputs_capacity_total_)) {
5772 GrowInputsVector();
5773 }
5774
5775#ifdef DEBUG
5776 // We rely on adding inputs in the order of phis, i.e.,
5777 // `AddInputForPhi(0, ...); AddInputForPhi(1, ...); ...`.
5778 size_t phi_inputs_start = phi_i * input_capacity_per_phi_;
5779 size_t phi_input_offset_from_start = inputs_per_phi_;
5780 CHECK_EQ(input_count_per_phi_[phi_i]++, phi_input_offset_from_start);
5781 size_t phi_input_offset = phi_inputs_start + phi_input_offset_from_start;
5782 CHECK_EQ(next_phi_input_add_offset_, phi_input_offset);
5783#endif
5784 new (&phi_inputs_[next_phi_input_add_offset_]) OpIndex(input);
5785
5786 phi_inputs_total_++;
5787 next_phi_input_add_offset_ += input_capacity_per_phi_;
5788 if (next_phi_input_add_offset_ >= phi_inputs_capacity_total_) {
5789 // We have finished adding the last input for all phis.
5790 inputs_per_phi_++;
5791 next_phi_input_add_offset_ = inputs_per_phi_;
5792#ifdef DEBUG
5793 EnsureAllPhisHaveSameInputCount();
5794#endif
5795 }
5796 }
5797
5798 uint32_t phi_count() const { return phi_count_; }
5799
5800 ValueType phi_type(size_t phi_i) const { return phi_types_[phi_i]; }
5801
5803 size_t phi_inputs_start = phi_i * input_capacity_per_phi_;
5804 return base::VectorOf(&phi_inputs_[phi_inputs_start], inputs_per_phi_);
5805 }
5806
5808 incoming_exceptions_.push_back(exception);
5809 }
5810
5812 return base::VectorOf(incoming_exceptions_);
5813 }
5814
5815#if DEBUG
5816 void DcheckConsistency() { EnsureAllPhisHaveSameInputCount(); }
5817#endif
5818
5819 private:
5820 // Invariants:
5821 // The number of phis for a given block (e.g., locals, merged stack values,
5822 // and cached instance fields) is known when constructing the `BlockPhis`
5823 // and doesn't grow afterwards.
5824 // The number of _inputs_ for each phi is however _not_ yet known when
5825 // constructing this, but grows over time as new incoming edges for a given
5826 // block are created.
5827 // After such an edge is created, each phi has the same number of inputs.
5828 // When eventually creating a phi, we also need all inputs layed out
5829 // contiguously.
5830 // Due to those requirements, we write our own little container, see below.
5831
5832 // First the backing storage:
5833 // Of size `phi_count_`, one type per phi.
5834 ValueType* phi_types_ = nullptr;
5835 // Of size `phi_inputs_capacity_total_ == phi_count_ *
5836 // input_capacity_per_phi_`, of which `phi_inputs_total_ == phi_count_ *
5837 // inputs_per_phi_` are set/initialized. All inputs for a given phi are
5838 // stored contiguously, but between them are uninitialized elements for
5839 // adding new inputs without reallocating.
5840 OpIndex* phi_inputs_ = nullptr;
5841
5842 // Stored explicitly to save multiplications in the hot `AddInputForPhi()`.
5843 // Also pulled up to be in the same cache-line as `phi_inputs_`.
5844 uint32_t phi_inputs_capacity_total_ = 0; // Updated with `phi_inputs_`.
5845 uint32_t phi_inputs_total_ = 0;
5846 uint32_t next_phi_input_add_offset_ = 0;
5847
5848 // The dimensions.
5849 uint32_t phi_count_ = 0;
5850 uint32_t inputs_per_phi_ = 0;
5851 static constexpr uint32_t kInitialInputCapacityPerPhi = 2;
5852 uint32_t input_capacity_per_phi_ = kInitialInputCapacityPerPhi;
5853
5854#ifdef DEBUG
5855 std::vector<uint32_t> input_count_per_phi_;
5856 void EnsureAllPhisHaveSameInputCount() const {
5857 CHECK_EQ(phi_inputs_total_, phi_count() * inputs_per_phi_);
5858 CHECK_EQ(phi_count(), input_count_per_phi_.size());
5859 CHECK(std::all_of(input_count_per_phi_.begin(),
5860 input_count_per_phi_.end(),
5861 [=, this](uint32_t input_count) {
5862 return input_count == inputs_per_phi_;
5863 }));
5864 }
5865#endif
5866
5867 // The number of `incoming_exceptions` is also not known when constructing
5868 // the block, but at least it is only one-dimensional, so we can use a
5869 // simple `ZoneVector`.
5871
5872 Zone* zone() { return incoming_exceptions_.zone(); }
5873
5875 // We should have always initialized some storage, see
5876 // `kInitialInputCapacityPerPhi`.
5877 DCHECK_NOT_NULL(phi_inputs_);
5878 DCHECK_NE(phi_inputs_capacity_total_, 0);
5879
5880 OpIndex* old_phi_inputs = phi_inputs_;
5881 uint32_t old_input_capacity_per_phi = input_capacity_per_phi_;
5882 uint32_t old_phi_inputs_capacity_total = phi_inputs_capacity_total_;
5883
5884 input_capacity_per_phi_ *= 2;
5885 phi_inputs_capacity_total_ *= 2;
5886 phi_inputs_ = zone()->AllocateArray<OpIndex>(phi_inputs_capacity_total_);
5887
5888 // This is essentially a strided copy, where we expand the storage by
5889 // "inserting" unitialized elements in between contiguous stretches of
5890 // inputs belonging to the same phi.
5891#ifdef DEBUG
5892 EnsureAllPhisHaveSameInputCount();
5893#endif
5894 for (size_t phi_i = 0; phi_i < phi_count(); ++phi_i) {
5895 const OpIndex* old_begin =
5896 &old_phi_inputs[phi_i * old_input_capacity_per_phi];
5897 const OpIndex* old_end = old_begin + inputs_per_phi_;
5898 OpIndex* begin = &phi_inputs_[phi_i * input_capacity_per_phi_];
5899 std::uninitialized_copy(old_begin, old_end, begin);
5900 }
5901
5902 zone()->DeleteArray(old_phi_inputs, old_phi_inputs_capacity_total);
5903 }
5904 };
5905
5906 // Perform a null check if the input type is nullable.
5908 TrapId trap_id = TrapId::kTrapNullDereference) {
5909 V<Object> not_null_value = V<Object>::Cast(value.op);
5910 if (value.type.is_nullable()) {
5911 not_null_value = __ AssertNotNull(value.op, value.type, trap_id);
5912 }
5913 return not_null_value;
5914 }
5915
5916 // Creates a new block, initializes a {BlockPhis} for it, and registers it
5917 // with block_phis_. We pass a {merge} only if we later need to recover values
5918 // for that merge.
5920 TSBlock* block = __ NewBlock();
5921 block_phis_.emplace(block, BlockPhis(decoder, merge));
5922 return block;
5923 }
5924
5925 // Sets up a control flow edge from the current SSA environment and a stack to
5926 // {block}. The stack is {stack_values} if present, otherwise the current
5927 // decoder stack.
5929 uint32_t drop_values = 0,
5930 V<Object> exception = OpIndex::Invalid(),
5931 Merge<Value>* stack_values = nullptr) {
5932 if (__ current_block() == nullptr) return;
5933 // It is guaranteed that this element exists.
5934 DCHECK_NE(block_phis_.find(block), block_phis_.end());
5935 BlockPhis& phis_for_block = block_phis_.find(block)->second;
5936 uint32_t merge_arity = static_cast<uint32_t>(phis_for_block.phi_count()) -
5937 decoder->num_locals();
5938
5939 for (size_t i = 0; i < ssa_env_.size(); i++) {
5940 phis_for_block.AddInputForPhi(i, ssa_env_[i]);
5941 }
5942 // We never drop values from an explicit merge.
5943 DCHECK_IMPLIES(stack_values != nullptr, drop_values == 0);
5944 Value* stack_base = merge_arity == 0 ? nullptr
5945 : stack_values != nullptr
5946 ? &(*stack_values)[0]
5947 : decoder->stack_value(merge_arity + drop_values);
5948 for (size_t i = 0; i < merge_arity; i++) {
5949 DCHECK(stack_base[i].op.valid());
5950 phis_for_block.AddInputForPhi(decoder->num_locals() + i,
5951 stack_base[i].op);
5952 }
5953 if (exception.valid()) {
5954 phis_for_block.AddIncomingException(exception);
5955 }
5956 }
5957
5959 if (elements.empty()) return OpIndex::Invalid();
5960 for (size_t i = 1; i < elements.size(); i++) {
5961 if (elements[i] != elements[0]) {
5962 return __ Phi(elements, RepresentationFor(type));
5963 }
5964 }
5965 return elements[0];
5966 }
5967
5968 // Binds a block, initializes phis for its SSA environment from its entry in
5969 // {block_phis_}, and sets values to its {merge} (if available) from the
5970 // its entry in {block_phis_}.
5972 Merge<Value>* merge,
5973 OpIndex* exception = nullptr) {
5974 __ Bind(tsblock);
5975 auto block_phis_it = block_phis_.find(tsblock);
5976 DCHECK_NE(block_phis_it, block_phis_.end());
5977 BlockPhis& block_phis = block_phis_it->second;
5978
5979 uint32_t merge_arity = merge != nullptr ? merge->arity : 0;
5980 DCHECK_EQ(decoder->num_locals() + merge_arity, block_phis.phi_count());
5981
5982#ifdef DEBUG
5983 // Check consistency of Phi storage. We do this here rather than inside
5984 // {block_phis.phi_inputs()} to avoid overall O(n²) complexity.
5985 block_phis.DcheckConsistency();
5986#endif
5987
5988 for (uint32_t i = 0; i < decoder->num_locals(); i++) {
5989 ssa_env_[i] = MaybePhi(block_phis.phi_inputs(i), block_phis.phi_type(i));
5990 }
5991 for (uint32_t i = 0; i < merge_arity; i++) {
5992 uint32_t phi_index = decoder->num_locals() + i;
5993 (*merge)[i].op = MaybePhi(block_phis.phi_inputs(phi_index),
5994 block_phis.phi_type(phi_index));
5995 }
5996 DCHECK_IMPLIES(exception == nullptr,
5997 block_phis.incoming_exceptions().empty());
5998 if (exception != nullptr && !exception->valid()) {
5999 *exception = MaybePhi(block_phis.incoming_exceptions(), kWasmExternRef);
6000 }
6001 block_phis_.erase(block_phis_it);
6002 }
6003
6005 switch (type.kind()) {
6006 case kI8:
6007 case kI16:
6008 case kI32:
6009 return __ Word32Constant(int32_t{0});
6010 case kI64:
6011 return __ Word64Constant(int64_t{0});
6012 case kF16:
6013 case kF32:
6014 return __ Float32Constant(0.0f);
6015 case kF64:
6016 return __ Float64Constant(0.0);
6017 case kRefNull:
6018 return __ Null(type);
6019 case kS128: {
6020 uint8_t value[kSimd128Size] = {};
6021 return __ Simd128Constant(value);
6022 }
6023 case kVoid:
6024 case kRef:
6025 case kTop:
6026 case kBottom:
6027 UNREACHABLE();
6028 }
6029 }
6030
6031 private:
6033 const FunctionSig* callee_sig,
6034 const Value* func_ref_or_index,
6035 const Value args[]) {
6037 if (parent_frame_state_.valid()) {
6039 }
6040 // The first input is the closure for JS. (The instruction selector will
6041 // just skip this input as the liftoff frame doesn't have a closure.)
6042 V<Object> dummy_tagged = __ SmiZeroConstant();
6043 builder.AddInput(MachineType::AnyTagged(), dummy_tagged);
6044 // Add the parameters.
6045 size_t param_count = decoder->sig_->parameter_count();
6046 for (size_t i = 0; i < param_count; ++i) {
6047 builder.AddInput(decoder->sig_->GetParam(i).machine_type(), ssa_env_[i]);
6048 }
6049 // Add the context. Wasm doesn't have a JS context, so this is another
6050 // value skipped by the instruction selector.
6051 builder.AddInput(MachineType::AnyTagged(), dummy_tagged);
6052
6053 // Add the wasm locals.
6054 for (size_t i = param_count; i < ssa_env_.size(); ++i) {
6055 builder.AddInput(
6056 decoder->local_type(static_cast<uint32_t>(i)).machine_type(),
6057 ssa_env_[i]);
6058 }
6059 // Add the wasm stack values.
6060 // Note that the decoder stack is already in the state after the call, i.e.
6061 // the callee and the arguments were already popped from the stack and the
6062 // returns are pushed. Therefore skip the results and manually add the
6063 // call_ref stack values.
6064 for (int32_t i = decoder->stack_size();
6065 i > static_cast<int32_t>(callee_sig->return_count()); --i) {
6066 Value* val = decoder->stack_value(i);
6067 builder.AddInput(val->type.machine_type(), val->op);
6068 }
6069 // Add the call_ref or call_indirect stack values.
6070 if (args != nullptr) {
6071 for (const Value& arg :
6072 base::VectorOf(args, callee_sig->parameter_count())) {
6073 builder.AddInput(arg.type.machine_type(), arg.op);
6074 }
6075 }
6076 if (func_ref_or_index) {
6077 builder.AddInput(func_ref_or_index->type.machine_type(),
6078 func_ref_or_index->op);
6079 }
6080 // The call_ref (callee) or the table index.
6081 const size_t kExtraLocals = func_ref_or_index != nullptr ? 1 : 0;
6082 size_t wasm_local_count = ssa_env_.size() - param_count;
6083 size_t local_count = kExtraLocals + decoder->stack_size() +
6084 wasm_local_count - callee_sig->return_count();
6085 local_count += args != nullptr ? callee_sig->parameter_count() : 0;
6086 Zone* zone = Asm().data()->compilation_zone();
6087 auto* function_info = zone->New<compiler::FrameStateFunctionInfo>(
6088 compiler::FrameStateType::kLiftoffFunction,
6089 static_cast<uint16_t>(param_count), 0, static_cast<int>(local_count),
6092 auto* frame_state_info = zone->New<compiler::FrameStateInfo>(
6093 BytecodeOffset(decoder->pc_offset()),
6095
6096 // Limit the maximum amount of inputs to a deopt node to a reasonably low
6097 // value. For each of these values extra metadata needs to be stored for the
6098 // deoptimizer while the performance upside of a deoptimization node doesn't
6099 // scale with the amount of inputs, so there are diminishing returns.
6100 constexpr size_t max_deopt_input_count = 500;
6101 // We don't want to include the instruction selector just for this limit.
6102 constexpr size_t max_isel_input_count =
6103 std::numeric_limits<uint16_t>::max();
6104 // Note that the instruction selector has limits on the maximum amount of
6105 // inputs for an instruction while nested FrameStates are unfolded, so the
6106 // total number of inputs after unfolding needs to be lower than
6107 // uint16_t::max (adding a random 42 here for additional "lower-level"
6108 // inputs that might not be accounted for in the FrameState inputs.)
6109 // The number of inputs might be doubled by the Int64Lowering on 32 bit
6110 // platforms.
6111 static_assert((max_deopt_input_count * 2 + 42) *
6113 max_isel_input_count);
6114 if (builder.Inputs().size() >= max_deopt_input_count) {
6115 if (v8_flags.trace_wasm_inlining) {
6116 PrintF(
6117 "[function %d%s: Disabling deoptimizations for speculative "
6118 "inlining as the deoptimization FrameState takes too many inputs "
6119 "(%zu vs. %zu)]\n",
6120 func_index_, mode_ == kRegular ? "" : " (inlined)",
6121 builder.Inputs().size(), max_deopt_input_count);
6122 }
6123 // If there are too many inputs, disable deopts completely for the rest of
6124 // the function as the FrameState could be needed by other deoptimization
6125 // points in case of nested inlining.
6127 return OpIndex::Invalid();
6128 }
6129
6130 return __ FrameState(
6131 builder.Inputs(), builder.inlined(),
6132 builder.AllocateFrameStateData(*frame_state_info, zone));
6133 }
6134
6135 void DeoptIfNot(FullDecoder* decoder, OpIndex deopt_condition,
6136 V<FrameState> frame_state) {
6138 DCHECK(frame_state.valid());
6139 __ DeoptimizeIfNot(deopt_condition, frame_state,
6140 DeoptimizeReason::kWrongCallTarget,
6142 }
6143
6144 void Deopt(FullDecoder* decoder, V<FrameState> frame_state) {
6146 DCHECK(frame_state.valid());
6147 __ Deoptimize(frame_state, DeoptimizeReason::kWrongCallTarget,
6149 }
6150
6151 uint32_t GetLiftoffFrameSize(const FullDecoder* decoder) {
6152 if (liftoff_frame_size_ !=
6154 return liftoff_frame_size_;
6155 }
6156 const TypeFeedbackStorage& feedback = decoder->module_->type_feedback;
6157 base::MutexGuard mutex_guard(&feedback.mutex);
6158 auto function_feedback = feedback.feedback_for_function.find(func_index_);
6159 CHECK_NE(function_feedback, feedback.feedback_for_function.end());
6160 liftoff_frame_size_ = function_feedback->second.liftoff_frame_size;
6161 // The liftoff frame size is strictly required. If it is not properly set,
6162 // calling the function embedding the deopt node will always fail on the
6163 // stack check.
6166 return liftoff_frame_size_;
6167 }
6168
6170 V<Word64> result = __ template Projection<0>(truncated);
6171 V<Word32> check = __ template Projection<1>(truncated);
6172 __ TrapIf(__ Word32Equal(check, 0), TrapId::kTrapFloatUnrepresentable);
6173 return result;
6174 }
6175
6176 std::pair<OpIndex, V<Word32>> BuildCCallForFloatConversion(
6178 ExternalReference ccall_ref) {
6179 uint8_t slot_size = MemoryRepresentation::Int64().SizeInBytes();
6180 V<WordPtr> stack_slot = __ StackSlot(slot_size, slot_size);
6181 __ Store(stack_slot, arg, StoreOp::Kind::RawAligned(), float_type,
6184 MachineSignature sig(1, 1, reps);
6185 V<Word32> overflow = CallC(&sig, ccall_ref, stack_slot);
6186 return {stack_slot, overflow};
6187 }
6188
6190 ExternalReference ccall_ref) {
6191 auto [stack_slot, overflow] =
6193 __ TrapIf(__ Word32Equal(overflow, 0),
6194 compiler::TrapId::kTrapFloatUnrepresentable);
6196 return __ Load(stack_slot, LoadOp::Kind::RawAligned(), int64);
6197 }
6198
6201 ExternalReference ccall_ref,
6202 bool is_signed) {
6204 uint8_t slot_size = int64.SizeInBytes();
6205 V<WordPtr> stack_slot = __ StackSlot(slot_size, slot_size);
6206 __ Store(stack_slot, arg, StoreOp::Kind::RawAligned(), float_type,
6209 MachineSignature sig(0, 1, reps);
6210 CallC(&sig, ccall_ref, stack_slot);
6211 return __ Load(stack_slot, LoadOp::Kind::RawAligned(), int64);
6212 }
6213
6215 OpIndex input, ExternalReference ccall_ref,
6216 MemoryRepresentation input_representation,
6217 MemoryRepresentation result_representation) {
6218 uint8_t slot_size = std::max(input_representation.SizeInBytes(),
6219 result_representation.SizeInBytes());
6220 V<WordPtr> stack_slot = __ StackSlot(slot_size, slot_size);
6221 __ Store(stack_slot, input, StoreOp::Kind::RawAligned(),
6222 input_representation, compiler::WriteBarrierKind::kNoWriteBarrier);
6224 MachineSignature sig(0, 1, reps);
6225 CallC(&sig, ccall_ref, stack_slot);
6226 return __ Load(stack_slot, LoadOp::Kind::RawAligned(),
6227 result_representation);
6228 }
6229
6231 wasm::TrapId trap_zero) {
6233 V<WordPtr> stack_slot =
6234 __ StackSlot(2 * int64_rep.SizeInBytes(), int64_rep.SizeInBytes());
6235 __ Store(stack_slot, lhs, StoreOp::Kind::RawAligned(), int64_rep,
6237 __ Store(stack_slot, rhs, StoreOp::Kind::RawAligned(), int64_rep,
6239 int64_rep.SizeInBytes());
6240
6242 MachineSignature sig(1, 1, sig_types);
6243 OpIndex rc = CallC(&sig, ccall_ref, stack_slot);
6244 __ TrapIf(__ Word32Equal(rc, 0), trap_zero);
6245 __ TrapIf(__ Word32Equal(rc, -1), TrapId::kTrapDivUnrepresentable);
6246 return __ Load(stack_slot, LoadOp::Kind::RawAligned(), int64_rep);
6247 }
6248
6250 ValueType input_type /* for ref.is_null only*/) {
6251 switch (opcode) {
6252 case kExprI32Eqz:
6253 return __ Word32Equal(arg, 0);
6254 case kExprF32Abs:
6255 return __ Float32Abs(arg);
6256 case kExprF32Neg:
6257 return __ Float32Negate(arg);
6258 case kExprF32Sqrt:
6259 return __ Float32Sqrt(arg);
6260 case kExprF64Abs:
6261 return __ Float64Abs(arg);
6262 case kExprF64Neg:
6263 return __ Float64Negate(arg);
6264 case kExprF64Sqrt:
6265 return __ Float64Sqrt(arg);
6266 case kExprI32SConvertF32: {
6267 V<Float32> truncated = UnOpImpl(kExprF32Trunc, arg, kWasmF32);
6268 V<Word32> result = __ TruncateFloat32ToInt32OverflowToMin(truncated);
6269 V<Float32> converted_back = __ ChangeInt32ToFloat32(result);
6270 __ TrapIf(__ Word32Equal(__ Float32Equal(converted_back, truncated), 0),
6271 TrapId::kTrapFloatUnrepresentable);
6272 return result;
6273 }
6274 case kExprI32UConvertF32: {
6275 V<Float32> truncated = UnOpImpl(kExprF32Trunc, arg, kWasmF32);
6276 V<Word32> result = __ TruncateFloat32ToUint32OverflowToMin(truncated);
6277 V<Float32> converted_back = __ ChangeUint32ToFloat32(result);
6278 __ TrapIf(__ Word32Equal(__ Float32Equal(converted_back, truncated), 0),
6279 TrapId::kTrapFloatUnrepresentable);
6280 return result;
6281 }
6282 case kExprI32SConvertF64: {
6283 V<Float64> truncated = UnOpImpl(kExprF64Trunc, arg, kWasmF64);
6285 __ TruncateFloat64ToInt32OverflowUndefined(truncated);
6286 V<Float64> converted_back = __ ChangeInt32ToFloat64(result);
6287 __ TrapIf(__ Word32Equal(__ Float64Equal(converted_back, truncated), 0),
6288 TrapId::kTrapFloatUnrepresentable);
6289 return result;
6290 }
6291 case kExprI32UConvertF64: {
6292 V<Float64> truncated = UnOpImpl(kExprF64Trunc, arg, kWasmF64);
6293 V<Word32> result = __ TruncateFloat64ToUint32OverflowToMin(truncated);
6294 V<Float64> converted_back = __ ChangeUint32ToFloat64(result);
6295 __ TrapIf(__ Word32Equal(__ Float64Equal(converted_back, truncated), 0),
6296 TrapId::kTrapFloatUnrepresentable);
6297 return result;
6298 }
6299 case kExprI64SConvertF32:
6301 __ TryTruncateFloat32ToInt64(arg))
6304 ExternalReference::wasm_float32_to_int64());
6305 case kExprI64UConvertF32:
6307 __ TryTruncateFloat32ToUint64(arg))
6310 ExternalReference::wasm_float32_to_uint64());
6311 case kExprI64SConvertF64:
6313 __ TryTruncateFloat64ToInt64(arg))
6316 ExternalReference::wasm_float64_to_int64());
6317 case kExprI64UConvertF64:
6319 __ TryTruncateFloat64ToUint64(arg))
6322 ExternalReference::wasm_float64_to_uint64());
6323 case kExprF64SConvertI32:
6324 return __ ChangeInt32ToFloat64(arg);
6325 case kExprF64UConvertI32:
6326 return __ ChangeUint32ToFloat64(arg);
6327 case kExprF32SConvertI32:
6328 return __ ChangeInt32ToFloat32(arg);
6329 case kExprF32UConvertI32:
6330 return __ ChangeUint32ToFloat32(arg);
6331 case kExprI32SConvertSatF32: {
6332 V<Float32> truncated = UnOpImpl(kExprF32Trunc, arg, kWasmF32);
6333 V<Word32> converted =
6334 __ TruncateFloat32ToInt32OverflowUndefined(truncated);
6335 V<Float32> converted_back = __ ChangeInt32ToFloat32(converted);
6336
6337 Label<Word32> done(&asm_);
6338
6339 IF (LIKELY(__ Float32Equal(truncated, converted_back))) {
6340 GOTO(done, converted);
6341 } ELSE {
6342 // Overflow.
6343 IF (__ Float32Equal(arg, arg)) {
6344 // Not NaN.
6345 IF (__ Float32LessThan(arg, 0)) {
6346 // Negative arg.
6347 GOTO(done,
6348 __ Word32Constant(std::numeric_limits<int32_t>::min()));
6349 } ELSE {
6350 // Positive arg.
6351 GOTO(done,
6352 __ Word32Constant(std::numeric_limits<int32_t>::max()));
6353 }
6354 } ELSE {
6355 // NaN.
6356 GOTO(done, __ Word32Constant(0));
6357 }
6358 }
6359 BIND(done, result);
6360
6361 return result;
6362 }
6363 case kExprI32UConvertSatF32: {
6364 V<Float32> truncated = UnOpImpl(kExprF32Trunc, arg, kWasmF32);
6365 V<Word32> converted =
6366 __ TruncateFloat32ToUint32OverflowUndefined(truncated);
6367 V<Float32> converted_back = __ ChangeUint32ToFloat32(converted);
6368
6369 Label<Word32> done(&asm_);
6370
6371 IF (LIKELY(__ Float32Equal(truncated, converted_back))) {
6372 GOTO(done, converted);
6373 } ELSE {
6374 // Overflow.
6375 IF (__ Float32Equal(arg, arg)) {
6376 // Not NaN.
6377 IF (__ Float32LessThan(arg, 0)) {
6378 // Negative arg.
6379 GOTO(done, __ Word32Constant(0));
6380 } ELSE {
6381 // Positive arg.
6382 GOTO(done,
6383 __ Word32Constant(std::numeric_limits<uint32_t>::max()));
6384 }
6385 } ELSE {
6386 // NaN.
6387 GOTO(done, __ Word32Constant(0));
6388 }
6389 }
6390 BIND(done, result);
6391
6392 return result;
6393 }
6394 case kExprI32SConvertSatF64: {
6395 V<Float64> truncated = UnOpImpl(kExprF64Trunc, arg, kWasmF64);
6396 V<Word32> converted =
6397 __ TruncateFloat64ToInt32OverflowUndefined(truncated);
6398 V<Float64> converted_back = __ ChangeInt32ToFloat64(converted);
6399
6400 Label<Word32> done(&asm_);
6401
6402 IF (LIKELY(__ Float64Equal(truncated, converted_back))) {
6403 GOTO(done, converted);
6404 } ELSE {
6405 // Overflow.
6406 IF (__ Float64Equal(arg, arg)) {
6407 // Not NaN.
6408 IF (__ Float64LessThan(arg, 0)) {
6409 // Negative arg.
6410 GOTO(done,
6411 __ Word32Constant(std::numeric_limits<int32_t>::min()));
6412 } ELSE {
6413 // Positive arg.
6414 GOTO(done,
6415 __ Word32Constant(std::numeric_limits<int32_t>::max()));
6416 }
6417 } ELSE {
6418 // NaN.
6419 GOTO(done, __ Word32Constant(0));
6420 }
6421 }
6422 BIND(done, result);
6423
6424 return result;
6425 }
6426 case kExprI32UConvertSatF64: {
6427 V<Float64> truncated = UnOpImpl(kExprF64Trunc, arg, kWasmF64);
6428 V<Word32> converted =
6429 __ TruncateFloat64ToUint32OverflowUndefined(truncated);
6430 V<Float64> converted_back = __ ChangeUint32ToFloat64(converted);
6431
6432 Label<Word32> done(&asm_);
6433
6434 IF (LIKELY(__ Float64Equal(truncated, converted_back))) {
6435 GOTO(done, converted);
6436 } ELSE {
6437 // Overflow.
6438 IF (__ Float64Equal(arg, arg)) {
6439 // Not NaN.
6440 IF (__ Float64LessThan(arg, 0)) {
6441 // Negative arg.
6442 GOTO(done, __ Word32Constant(0));
6443 } ELSE {
6444 // Positive arg.
6445 GOTO(done,
6446 __ Word32Constant(std::numeric_limits<uint32_t>::max()));
6447 }
6448 } ELSE {
6449 // NaN.
6450 GOTO(done, __ Word32Constant(0));
6451 }
6452 }
6453 BIND(done, result);
6454
6455 return result;
6456 }
6457 case kExprI64SConvertSatF32: {
6458 if constexpr (!Is64()) {
6459 bool is_signed = true;
6462 ExternalReference::wasm_float32_to_int64_sat(), is_signed);
6463 }
6464 V<Tuple<Word64, Word32>> converted = __ TryTruncateFloat32ToInt64(arg);
6466
6467 if (SupportedOperations::sat_conversion_is_safe()) {
6468 return __ Projection<0>(converted);
6469 }
6470 IF (LIKELY(__ Projection<1>(converted))) {
6471 GOTO(done, __ Projection<0>(converted));
6472 } ELSE {
6473 // Overflow.
6474 IF (__ Float32Equal(arg, arg)) {
6475 // Not NaN.
6476 IF (__ Float32LessThan(arg, 0)) {
6477 // Negative arg.
6478 GOTO(done,
6479 __ Word64Constant(std::numeric_limits<int64_t>::min()));
6480 } ELSE {
6481 // Positive arg.
6482 GOTO(done,
6483 __ Word64Constant(std::numeric_limits<int64_t>::max()));
6484 }
6485 } ELSE {
6486 // NaN.
6487 GOTO(done, __ Word64Constant(int64_t{0}));
6488 }
6489 }
6490 BIND(done, result);
6491
6492 return result;
6493 }
6494 case kExprI64UConvertSatF32: {
6495 if constexpr (!Is64()) {
6496 bool is_signed = false;
6499 ExternalReference::wasm_float32_to_uint64_sat(), is_signed);
6500 }
6501 V<Tuple<Word64, Word32>> converted = __ TryTruncateFloat32ToUint64(arg);
6503
6504 if (SupportedOperations::sat_conversion_is_safe()) {
6505 return __ template Projection<0>(converted);
6506 }
6507
6508 IF (LIKELY(__ template Projection<1>(converted))) {
6509 GOTO(done, __ template Projection<0>(converted));
6510 } ELSE {
6511 // Overflow.
6512 IF (__ Float32Equal(arg, arg)) {
6513 // Not NaN.
6514 IF (__ Float32LessThan(arg, 0)) {
6515 // Negative arg.
6516 GOTO(done, __ Word64Constant(int64_t{0}));
6517 } ELSE {
6518 // Positive arg.
6519 GOTO(done,
6520 __ Word64Constant(std::numeric_limits<uint64_t>::max()));
6521 }
6522 } ELSE {
6523 // NaN.
6524 GOTO(done, __ Word64Constant(int64_t{0}));
6525 }
6526 }
6527 BIND(done, result);
6528
6529 return result;
6530 }
6531 case kExprI64SConvertSatF64: {
6532 if constexpr (!Is64()) {
6533 bool is_signed = true;
6536 ExternalReference::wasm_float64_to_int64_sat(), is_signed);
6537 }
6538 V<Tuple<Word64, Word32>> converted = __ TryTruncateFloat64ToInt64(arg);
6540
6541 if (SupportedOperations::sat_conversion_is_safe()) {
6542 return __ template Projection<0>(converted);
6543 }
6544
6545 IF (LIKELY(__ template Projection<1>(converted))) {
6546 GOTO(done, __ template Projection<0>(converted));
6547 } ELSE {
6548 // Overflow.
6549 IF (__ Float64Equal(arg, arg)) {
6550 // Not NaN.
6551 IF (__ Float64LessThan(arg, 0)) {
6552 // Negative arg.
6553 GOTO(done,
6554 __ Word64Constant(std::numeric_limits<int64_t>::min()));
6555 } ELSE {
6556 // Positive arg.
6557 GOTO(done,
6558 __ Word64Constant(std::numeric_limits<int64_t>::max()));
6559 }
6560 } ELSE {
6561 // NaN.
6562 GOTO(done, __ Word64Constant(int64_t{0}));
6563 }
6564 }
6565 BIND(done, result);
6566
6567 return result;
6568 }
6569 case kExprI64UConvertSatF64: {
6570 if constexpr (!Is64()) {
6571 bool is_signed = false;
6574 ExternalReference::wasm_float64_to_uint64_sat(), is_signed);
6575 }
6576 V<Tuple<Word64, Word32>> converted = __ TryTruncateFloat64ToUint64(arg);
6578
6579 if (SupportedOperations::sat_conversion_is_safe()) {
6580 return __ template Projection<0>(converted);
6581 }
6582
6583 IF (LIKELY(__ template Projection<1>(converted))) {
6584 GOTO(done, __ template Projection<0>(converted));
6585 } ELSE {
6586 // Overflow.
6587 IF (__ Float64Equal(arg, arg)) {
6588 // Not NaN.
6589 IF (__ Float64LessThan(arg, 0)) {
6590 // Negative arg.
6591 GOTO(done, __ Word64Constant(int64_t{0}));
6592 } ELSE {
6593 // Positive arg.
6594 GOTO(done,
6595 __ Word64Constant(std::numeric_limits<uint64_t>::max()));
6596 }
6597 } ELSE {
6598 // NaN.
6599 GOTO(done, __ Word64Constant(int64_t{0}));
6600 }
6601 }
6602 BIND(done, result);
6603
6604 return result;
6605 }
6606 case kExprF32ConvertF64:
6607 return __ TruncateFloat64ToFloat32(arg);
6608 case kExprF64ConvertF32:
6609 return __ ChangeFloat32ToFloat64(arg);
6610 case kExprF32ReinterpretI32:
6611 return __ BitcastWord32ToFloat32(arg);
6612 case kExprI32ReinterpretF32:
6613 return __ BitcastFloat32ToWord32(arg);
6614 case kExprI32Clz:
6615 return __ Word32CountLeadingZeros(arg);
6616 case kExprI32Ctz:
6617 if (SupportedOperations::word32_ctz()) {
6618 return __ Word32CountTrailingZeros(arg);
6619 } else {
6620 // TODO(14108): Use reverse_bits if supported.
6621 auto sig =
6623 .Params(MachineType::Uint32());
6624 return CallC(&sig, ExternalReference::wasm_word32_ctz(), arg);
6625 }
6626 case kExprI32Popcnt:
6627 if (SupportedOperations::word32_popcnt()) {
6628 return __ Word32PopCount(arg);
6629 } else {
6630 auto sig =
6632 .Params(MachineType::Uint32());
6633 return CallC(&sig, ExternalReference::wasm_word32_popcnt(), arg);
6634 }
6635 case kExprF32Floor:
6636 if (SupportedOperations::float32_round_down()) {
6637 return __ Float32RoundDown(arg);
6638 } else {
6639 return CallCStackSlotToStackSlot(arg,
6640 ExternalReference::wasm_f32_floor(),
6642 }
6643 case kExprF32Ceil:
6644 if (SupportedOperations::float32_round_up()) {
6645 return __ Float32RoundUp(arg);
6646 } else {
6647 return CallCStackSlotToStackSlot(arg,
6648 ExternalReference::wasm_f32_ceil(),
6650 }
6651 case kExprF32Trunc:
6652 if (SupportedOperations::float32_round_to_zero()) {
6653 return __ Float32RoundToZero(arg);
6654 } else {
6655 return CallCStackSlotToStackSlot(arg,
6656 ExternalReference::wasm_f32_trunc(),
6658 }
6659 case kExprF32NearestInt:
6660 if (SupportedOperations::float32_round_ties_even()) {
6661 return __ Float32RoundTiesEven(arg);
6662 } else {
6664 arg, ExternalReference::wasm_f32_nearest_int(),
6666 }
6667 case kExprF64Floor:
6668 if (SupportedOperations::float64_round_down()) {
6669 return __ Float64RoundDown(arg);
6670 } else {
6671 return CallCStackSlotToStackSlot(arg,
6672 ExternalReference::wasm_f64_floor(),
6674 }
6675 case kExprF64Ceil:
6676 if (SupportedOperations::float64_round_up()) {
6677 return __ Float64RoundUp(arg);
6678 } else {
6679 return CallCStackSlotToStackSlot(arg,
6680 ExternalReference::wasm_f64_ceil(),
6682 }
6683 case kExprF64Trunc:
6684 if (SupportedOperations::float64_round_to_zero()) {
6685 return __ Float64RoundToZero(arg);
6686 } else {
6687 return CallCStackSlotToStackSlot(arg,
6688 ExternalReference::wasm_f64_trunc(),
6690 }
6691 case kExprF64NearestInt:
6692 if (SupportedOperations::float64_round_ties_even()) {
6693 return __ Float64RoundTiesEven(arg);
6694 } else {
6696 arg, ExternalReference::wasm_f64_nearest_int(),
6698 }
6699 case kExprF64Acos:
6701 arg, ExternalReference::f64_acos_wrapper_function(),
6703 case kExprF64Asin:
6705 arg, ExternalReference::f64_asin_wrapper_function(),
6707 case kExprF64Atan:
6708 return __ Float64Atan(arg);
6709 case kExprF64Cos:
6710 return __ Float64Cos(arg);
6711 case kExprF64Sin:
6712 return __ Float64Sin(arg);
6713 case kExprF64Tan:
6714 return __ Float64Tan(arg);
6715 case kExprF64Exp:
6716 return __ Float64Exp(arg);
6717 case kExprF64Log:
6718 return __ Float64Log(arg);
6719 case kExprI32ConvertI64:
6720 return __ TruncateWord64ToWord32(arg);
6721 case kExprI64SConvertI32:
6722 return __ ChangeInt32ToInt64(arg);
6723 case kExprI64UConvertI32:
6724 return __ ChangeUint32ToUint64(arg);
6725 case kExprF64ReinterpretI64:
6726 return __ BitcastWord64ToFloat64(arg);
6727 case kExprI64ReinterpretF64:
6728 return __ BitcastFloat64ToWord64(arg);
6729 case kExprI64Clz:
6730 return __ Word64CountLeadingZeros(arg);
6731 case kExprI64Ctz:
6732 if (SupportedOperations::word64_ctz() ||
6733 (!Is64() && SupportedOperations::word32_ctz())) {
6734 return __ Word64CountTrailingZeros(arg);
6735 } else if (Is64()) {
6736 // TODO(14108): Use reverse_bits if supported.
6737 auto sig =
6739 .Params(MachineType::Uint64());
6740 return __ ChangeUint32ToUint64(
6741 CallC(&sig, ExternalReference::wasm_word64_ctz(), arg));
6742 } else {
6743 // lower_word == 0 ? 32 + CTZ32(upper_word) : CTZ32(lower_word);
6744 OpIndex upper_word =
6745 __ TruncateWord64ToWord32(__ Word64ShiftRightLogical(arg, 32));
6746 OpIndex lower_word = __ TruncateWord64ToWord32(arg);
6747 auto sig =
6749 .Params(MachineType::Uint32());
6750 Label<Word32> done(&asm_);
6751 IF (__ Word32Equal(lower_word, 0)) {
6752 GOTO(done,
6753 __ Word32Add(CallC(&sig, ExternalReference::wasm_word32_ctz(),
6754 upper_word),
6755 32));
6756 } ELSE {
6757 GOTO(done,
6758 CallC(&sig, ExternalReference::wasm_word32_ctz(), lower_word));
6759 }
6760 BIND(done, result);
6761 return __ ChangeUint32ToUint64(result);
6762 }
6763 case kExprI64Popcnt:
6764 if (SupportedOperations::word64_popcnt() ||
6765 (!Is64() && SupportedOperations::word32_popcnt())) {
6766 return __ Word64PopCount(arg);
6767 } else if (Is64()) {
6768 // Call wasm_word64_popcnt.
6769 auto sig =
6771 .Params(MachineType::Uint64());
6772 return __ ChangeUint32ToUint64(
6773 CallC(&sig, ExternalReference::wasm_word64_popcnt(), arg));
6774 } else {
6775 // Emit two calls to wasm_word32_popcnt.
6776 OpIndex upper_word =
6777 __ TruncateWord64ToWord32(__ Word64ShiftRightLogical(arg, 32));
6778 OpIndex lower_word = __ TruncateWord64ToWord32(arg);
6779 auto sig =
6781 .Params(MachineType::Uint32());
6782 return __ ChangeUint32ToUint64(__ Word32Add(
6783 CallC(&sig, ExternalReference::wasm_word32_popcnt(), lower_word),
6784 CallC(&sig, ExternalReference::wasm_word32_popcnt(),
6785 upper_word)));
6786 }
6787 case kExprI64Eqz:
6788 return __ Word64Equal(arg, 0);
6789 case kExprF32SConvertI64:
6790 if constexpr (!Is64()) {
6792 arg, ExternalReference::wasm_int64_to_float32(),
6794 }
6795 return __ ChangeInt64ToFloat32(arg);
6796 case kExprF32UConvertI64:
6797 if constexpr (!Is64()) {
6799 arg, ExternalReference::wasm_uint64_to_float32(),
6801 }
6802 return __ ChangeUint64ToFloat32(arg);
6803 case kExprF64SConvertI64:
6804 if constexpr (!Is64()) {
6806 arg, ExternalReference::wasm_int64_to_float64(),
6808 }
6809 return __ ChangeInt64ToFloat64(arg);
6810 case kExprF64UConvertI64:
6811 if constexpr (!Is64()) {
6813 arg, ExternalReference::wasm_uint64_to_float64(),
6815 }
6816 return __ ChangeUint64ToFloat64(arg);
6817 case kExprI32SExtendI8:
6818 return __ Word32SignExtend8(arg);
6819 case kExprI32SExtendI16:
6820 return __ Word32SignExtend16(arg);
6821 case kExprI64SExtendI8:
6822 return __ Word64SignExtend8(arg);
6823 case kExprI64SExtendI16:
6824 return __ Word64SignExtend16(arg);
6825 case kExprI64SExtendI32:
6826 return __ ChangeInt32ToInt64(__ TruncateWord64ToWord32(arg));
6827 case kExprRefIsNull:
6828 return __ IsNull(arg, input_type);
6829 case kExprI32AsmjsLoadMem8S:
6831 case kExprI32AsmjsLoadMem8U:
6833 case kExprI32AsmjsLoadMem16S:
6835 case kExprI32AsmjsLoadMem16U:
6837 case kExprI32AsmjsLoadMem:
6839 case kExprF32AsmjsLoadMem:
6841 case kExprF64AsmjsLoadMem:
6843 case kExprI32AsmjsSConvertF32:
6844 case kExprI32AsmjsUConvertF32:
6845 return __ JSTruncateFloat64ToWord32(__ ChangeFloat32ToFloat64(arg));
6846 case kExprI32AsmjsSConvertF64:
6847 case kExprI32AsmjsUConvertF64:
6848 return __ JSTruncateFloat64ToWord32(arg);
6849 case kExprRefAsNonNull:
6850 // We abuse ref.as_non_null, which isn't otherwise used in this switch,
6851 // as a sentinel for the negation of ref.is_null.
6852 return __ Word32Equal(__ IsNull(arg, input_type), 0);
6853 case kExprAnyConvertExtern:
6854 return __ AnyConvertExtern(arg);
6855 case kExprExternConvertAny:
6856 return __ ExternConvertAny(arg);
6857 default:
6858 UNREACHABLE();
6859 }
6860 }
6861
6863 switch (opcode) {
6864 case kExprI32Add:
6865 return __ Word32Add(lhs, rhs);
6866 case kExprI32Sub:
6867 return __ Word32Sub(lhs, rhs);
6868 case kExprI32Mul:
6869 return __ Word32Mul(lhs, rhs);
6870 case kExprI32DivS: {
6871 __ TrapIf(__ Word32Equal(rhs, 0), TrapId::kTrapDivByZero);
6872 V<Word32> unrepresentable_condition = __ Word32BitwiseAnd(
6873 __ Word32Equal(rhs, -1), __ Word32Equal(lhs, kMinInt));
6874 __ TrapIf(unrepresentable_condition, TrapId::kTrapDivUnrepresentable);
6875 return __ Int32Div(lhs, rhs);
6876 }
6877 case kExprI32DivU:
6878 __ TrapIf(__ Word32Equal(rhs, 0), TrapId::kTrapDivByZero);
6879 return __ Uint32Div(lhs, rhs);
6880 case kExprI32RemS: {
6881 __ TrapIf(__ Word32Equal(rhs, 0), TrapId::kTrapRemByZero);
6882 Label<Word32> done(&asm_);
6883 IF (UNLIKELY(__ Word32Equal(rhs, -1))) {
6884 GOTO(done, __ Word32Constant(0));
6885 } ELSE {
6886 GOTO(done, __ Int32Mod(lhs, rhs));
6887 };
6888
6889 BIND(done, result);
6890 return result;
6891 }
6892 case kExprI32RemU:
6893 __ TrapIf(__ Word32Equal(rhs, 0), TrapId::kTrapRemByZero);
6894 return __ Uint32Mod(lhs, rhs);
6895 case kExprI32And:
6896 return __ Word32BitwiseAnd(lhs, rhs);
6897 case kExprI32Ior:
6898 return __ Word32BitwiseOr(lhs, rhs);
6899 case kExprI32Xor:
6900 return __ Word32BitwiseXor(lhs, rhs);
6901 case kExprI32Shl:
6902 // If possible, the bitwise-and gets optimized away later.
6903 return __ Word32ShiftLeft(lhs, __ Word32BitwiseAnd(rhs, 0x1f));
6904 case kExprI32ShrS:
6905 return __ Word32ShiftRightArithmetic(lhs,
6906 __ Word32BitwiseAnd(rhs, 0x1f));
6907 case kExprI32ShrU:
6908 return __ Word32ShiftRightLogical(lhs, __ Word32BitwiseAnd(rhs, 0x1f));
6909 case kExprI32Ror:
6910 return __ Word32RotateRight(lhs, __ Word32BitwiseAnd(rhs, 0x1f));
6911 case kExprI32Rol:
6912 if (SupportedOperations::word32_rol()) {
6913 return __ Word32RotateLeft(lhs, __ Word32BitwiseAnd(rhs, 0x1f));
6914 } else {
6915 return __ Word32RotateRight(
6916 lhs, __ Word32Sub(32, __ Word32BitwiseAnd(rhs, 0x1f)));
6917 }
6918 case kExprI32Eq:
6919 return __ Word32Equal(lhs, rhs);
6920 case kExprI32Ne:
6921 return __ Word32Equal(__ Word32Equal(lhs, rhs), 0);
6922 case kExprI32LtS:
6923 return __ Int32LessThan(lhs, rhs);
6924 case kExprI32LeS:
6925 return __ Int32LessThanOrEqual(lhs, rhs);
6926 case kExprI32LtU:
6927 return __ Uint32LessThan(lhs, rhs);
6928 case kExprI32LeU:
6929 return __ Uint32LessThanOrEqual(lhs, rhs);
6930 case kExprI32GtS:
6931 return __ Int32LessThan(rhs, lhs);
6932 case kExprI32GeS:
6933 return __ Int32LessThanOrEqual(rhs, lhs);
6934 case kExprI32GtU:
6935 return __ Uint32LessThan(rhs, lhs);
6936 case kExprI32GeU:
6937 return __ Uint32LessThanOrEqual(rhs, lhs);
6938 case kExprI64Add:
6939 return __ Word64Add(lhs, rhs);
6940 case kExprI64Sub:
6941 return __ Word64Sub(lhs, rhs);
6942 case kExprI64Mul:
6943 return __ Word64Mul(lhs, rhs);
6944 case kExprI64DivS: {
6945 if constexpr (!Is64()) {
6946 return BuildDiv64Call(lhs, rhs, ExternalReference::wasm_int64_div(),
6947 wasm::TrapId::kTrapDivByZero);
6948 }
6949 __ TrapIf(__ Word64Equal(rhs, 0), TrapId::kTrapDivByZero);
6950 V<Word32> unrepresentable_condition = __ Word32BitwiseAnd(
6951 __ Word64Equal(rhs, -1),
6952 __ Word64Equal(lhs, std::numeric_limits<int64_t>::min()));
6953 __ TrapIf(unrepresentable_condition, TrapId::kTrapDivUnrepresentable);
6954 return __ Int64Div(lhs, rhs);
6955 }
6956 case kExprI64DivU:
6957 if constexpr (!Is64()) {
6958 return BuildDiv64Call(lhs, rhs, ExternalReference::wasm_uint64_div(),
6959 wasm::TrapId::kTrapDivByZero);
6960 }
6961 __ TrapIf(__ Word64Equal(rhs, 0), TrapId::kTrapDivByZero);
6962 return __ Uint64Div(lhs, rhs);
6963 case kExprI64RemS: {
6964 if constexpr (!Is64()) {
6965 return BuildDiv64Call(lhs, rhs, ExternalReference::wasm_int64_mod(),
6966 wasm::TrapId::kTrapRemByZero);
6967 }
6968 __ TrapIf(__ Word64Equal(rhs, 0), TrapId::kTrapRemByZero);
6969 Label<Word64> done(&asm_);
6970 IF (UNLIKELY(__ Word64Equal(rhs, -1))) {
6971 GOTO(done, __ Word64Constant(int64_t{0}));
6972 } ELSE {
6973 GOTO(done, __ Int64Mod(lhs, rhs));
6974 };
6975
6976 BIND(done, result);
6977 return result;
6978 }
6979 case kExprI64RemU:
6980 if constexpr (!Is64()) {
6981 return BuildDiv64Call(lhs, rhs, ExternalReference::wasm_uint64_mod(),
6982 wasm::TrapId::kTrapRemByZero);
6983 }
6984 __ TrapIf(__ Word64Equal(rhs, 0), TrapId::kTrapRemByZero);
6985 return __ Uint64Mod(lhs, rhs);
6986 case kExprI64And:
6987 return __ Word64BitwiseAnd(lhs, rhs);
6988 case kExprI64Ior:
6989 return __ Word64BitwiseOr(lhs, rhs);
6990 case kExprI64Xor:
6991 return __ Word64BitwiseXor(lhs, rhs);
6992 case kExprI64Shl:
6993 // If possible, the bitwise-and gets optimized away later.
6994 return __ Word64ShiftLeft(
6995 lhs, __ Word32BitwiseAnd(__ TruncateWord64ToWord32(rhs), 0x3f));
6996 case kExprI64ShrS:
6997 return __ Word64ShiftRightArithmetic(
6998 lhs, __ Word32BitwiseAnd(__ TruncateWord64ToWord32(rhs), 0x3f));
6999 case kExprI64ShrU:
7000 return __ Word64ShiftRightLogical(
7001 lhs, __ Word32BitwiseAnd(__ TruncateWord64ToWord32(rhs), 0x3f));
7002 case kExprI64Ror:
7003 return __ Word64RotateRight(
7004 lhs, __ Word32BitwiseAnd(__ TruncateWord64ToWord32(rhs), 0x3f));
7005 case kExprI64Rol:
7006 if (SupportedOperations::word64_rol()) {
7007 return __ Word64RotateLeft(
7008 lhs, __ Word32BitwiseAnd(__ TruncateWord64ToWord32(rhs), 0x3f));
7009 } else {
7010 return __ Word64RotateRight(
7011 lhs, __ Word32BitwiseAnd(
7012 __ Word32Sub(64, __ TruncateWord64ToWord32(rhs)), 0x3f));
7013 }
7014 case kExprI64Eq:
7015 return __ Word64Equal(lhs, rhs);
7016 case kExprI64Ne:
7017 return __ Word32Equal(__ Word64Equal(lhs, rhs), 0);
7018 case kExprI64LtS:
7019 return __ Int64LessThan(lhs, rhs);
7020 case kExprI64LeS:
7021 return __ Int64LessThanOrEqual(lhs, rhs);
7022 case kExprI64LtU:
7023 return __ Uint64LessThan(lhs, rhs);
7024 case kExprI64LeU:
7025 return __ Uint64LessThanOrEqual(lhs, rhs);
7026 case kExprI64GtS:
7027 return __ Int64LessThan(rhs, lhs);
7028 case kExprI64GeS:
7029 return __ Int64LessThanOrEqual(rhs, lhs);
7030 case kExprI64GtU:
7031 return __ Uint64LessThan(rhs, lhs);
7032 case kExprI64GeU:
7033 return __ Uint64LessThanOrEqual(rhs, lhs);
7034 case kExprF32CopySign: {
7035 V<Word32> lhs_without_sign =
7036 __ Word32BitwiseAnd(__ BitcastFloat32ToWord32(lhs), 0x7fffffff);
7037 V<Word32> rhs_sign =
7038 __ Word32BitwiseAnd(__ BitcastFloat32ToWord32(rhs), 0x80000000);
7039 return __ BitcastWord32ToFloat32(
7040 __ Word32BitwiseOr(lhs_without_sign, rhs_sign));
7041 }
7042 case kExprF32Add:
7043 return __ Float32Add(lhs, rhs);
7044 case kExprF32Sub:
7045 return __ Float32Sub(lhs, rhs);
7046 case kExprF32Mul:
7047 return __ Float32Mul(lhs, rhs);
7048 case kExprF32Div:
7049 return __ Float32Div(lhs, rhs);
7050 case kExprF32Eq:
7051 return __ Float32Equal(lhs, rhs);
7052 case kExprF32Ne:
7053 return __ Word32Equal(__ Float32Equal(lhs, rhs), 0);
7054 case kExprF32Lt:
7055 return __ Float32LessThan(lhs, rhs);
7056 case kExprF32Le:
7057 return __ Float32LessThanOrEqual(lhs, rhs);
7058 case kExprF32Gt:
7059 return __ Float32LessThan(rhs, lhs);
7060 case kExprF32Ge:
7061 return __ Float32LessThanOrEqual(rhs, lhs);
7062 case kExprF32Min:
7063 return __ Float32Min(rhs, lhs);
7064 case kExprF32Max:
7065 return __ Float32Max(rhs, lhs);
7066 case kExprF64CopySign: {
7067 V<Word64> lhs_without_sign = __ Word64BitwiseAnd(
7068 __ BitcastFloat64ToWord64(lhs), 0x7fffffffffffffff);
7069 V<Word64> rhs_sign = __ Word64BitwiseAnd(__ BitcastFloat64ToWord64(rhs),
7070 0x8000000000000000);
7071 return __ BitcastWord64ToFloat64(
7072 __ Word64BitwiseOr(lhs_without_sign, rhs_sign));
7073 }
7074 case kExprF64Add:
7075 return __ Float64Add(lhs, rhs);
7076 case kExprF64Sub:
7077 return __ Float64Sub(lhs, rhs);
7078 case kExprF64Mul:
7079 return __ Float64Mul(lhs, rhs);
7080 case kExprF64Div:
7081 return __ Float64Div(lhs, rhs);
7082 case kExprF64Eq:
7083 return __ Float64Equal(lhs, rhs);
7084 case kExprF64Ne:
7085 return __ Word32Equal(__ Float64Equal(lhs, rhs), 0);
7086 case kExprF64Lt:
7087 return __ Float64LessThan(lhs, rhs);
7088 case kExprF64Le:
7089 return __ Float64LessThanOrEqual(lhs, rhs);
7090 case kExprF64Gt:
7091 return __ Float64LessThan(rhs, lhs);
7092 case kExprF64Ge:
7093 return __ Float64LessThanOrEqual(rhs, lhs);
7094 case kExprF64Min:
7095 return __ Float64Min(lhs, rhs);
7096 case kExprF64Max:
7097 return __ Float64Max(lhs, rhs);
7098 case kExprF64Pow:
7099 return __ Float64Power(lhs, rhs);
7100 case kExprF64Atan2:
7101 return __ Float64Atan2(lhs, rhs);
7102 case kExprF64Mod:
7104 lhs, rhs, ExternalReference::f64_mod_wrapper_function(),
7106 case kExprRefEq:
7107 return __ TaggedEqual(lhs, rhs);
7108 case kExprI32AsmjsDivS: {
7109 // asmjs semantics return 0 when dividing by 0.
7110 if (SupportedOperations::int32_div_is_safe()) {
7111 return __ Int32Div(lhs, rhs);
7112 }
7113 Label<Word32> done(&asm_);
7114 IF (UNLIKELY(__ Word32Equal(rhs, 0))) {
7115 GOTO(done, __ Word32Constant(0));
7116 } ELSE {
7117 IF (UNLIKELY(__ Word32Equal(rhs, -1))) {
7118 GOTO(done, __ Word32Sub(0, lhs));
7119 } ELSE {
7120 GOTO(done, __ Int32Div(lhs, rhs));
7121 }
7122 }
7123 BIND(done, result);
7124 return result;
7125 }
7126 case kExprI32AsmjsDivU: {
7127 // asmjs semantics return 0 when dividing by 0.
7128 if (SupportedOperations::uint32_div_is_safe()) {
7129 return __ Uint32Div(lhs, rhs);
7130 }
7131 Label<Word32> done(&asm_);
7132 IF (UNLIKELY(__ Word32Equal(rhs, 0))) {
7133 GOTO(done, __ Word32Constant(0));
7134 } ELSE {
7135 GOTO(done, __ Uint32Div(lhs, rhs));
7136 }
7137 BIND(done, result);
7138 return result;
7139 }
7140 case kExprI32AsmjsRemS: {
7141 // General case for signed integer modulus, with optimization for
7142 // (unknown) power of 2 right hand side.
7143 //
7144 // if 0 < rhs then
7145 // mask = rhs - 1
7146 // if rhs & mask != 0 then
7147 // lhs % rhs
7148 // else
7149 // if lhs < 0 then
7150 // -(-lhs & mask)
7151 // else
7152 // lhs & mask
7153 // else
7154 // if rhs < -1 then
7155 // lhs % rhs
7156 // else
7157 // zero
7158 Label<Word32> done(&asm_);
7159 IF (__ Int32LessThan(0, rhs)) {
7160 V<Word32> mask = __ Word32Sub(rhs, 1);
7161 IF (__ Word32Equal(__ Word32BitwiseAnd(rhs, mask), 0)) {
7162 IF (UNLIKELY(__ Int32LessThan(lhs, 0))) {
7163 V<Word32> neg_lhs = __ Word32Sub(0, lhs);
7164 V<Word32> combined = __ Word32BitwiseAnd(neg_lhs, mask);
7165 GOTO(done, __ Word32Sub(0, combined));
7166 } ELSE {
7167 GOTO(done, __ Word32BitwiseAnd(lhs, mask));
7168 }
7169 } ELSE {
7170 GOTO(done, __ Int32Mod(lhs, rhs));
7171 }
7172 } ELSE {
7173 IF (__ Int32LessThan(rhs, -1)) {
7174 GOTO(done, __ Int32Mod(lhs, rhs));
7175 } ELSE {
7176 GOTO(done, __ Word32Constant(0));
7177 }
7178 }
7179 BIND(done, result);
7180 return result;
7181 }
7182 case kExprI32AsmjsRemU: {
7183 // asmjs semantics return 0 for mod with 0.
7184 Label<Word32> done(&asm_);
7185 IF (UNLIKELY(__ Word32Equal(rhs, 0))) {
7186 GOTO(done, __ Word32Constant(0));
7187 } ELSE {
7188 GOTO(done, __ Uint32Mod(lhs, rhs));
7189 }
7190 BIND(done, result);
7191 return result;
7192 }
7193 case kExprI32AsmjsStoreMem8:
7195 return rhs;
7196 case kExprI32AsmjsStoreMem16:
7198 return rhs;
7199 case kExprI32AsmjsStoreMem:
7201 return rhs;
7202 case kExprF32AsmjsStoreMem:
7204 return rhs;
7205 case kExprF64AsmjsStoreMem:
7207 return rhs;
7208 default:
7209 UNREACHABLE();
7210 }
7211 }
7212
7214 const wasm::WasmMemory* memory, MemoryRepresentation repr, OpIndex index,
7215 uintptr_t offset, compiler::EnforceBoundsCheck enforce_bounds_check,
7216 compiler::AlignmentCheck alignment_check) {
7217 // The function body decoder already validated that the access is not
7218 // statically OOB.
7219 DCHECK(base::IsInBounds(offset, static_cast<uintptr_t>(repr.SizeInBytes()),
7220 memory->max_memory_size));
7221
7222 wasm::BoundsCheckStrategy bounds_checks = memory->bounds_checks;
7223 // Convert the index to uintptr.
7224 // TODO(jkummerow): This should reuse MemoryAddressToUintPtrOrOOBTrap.
7225 V<WordPtr> converted_index = index;
7226 if (!memory->is_memory64()) {
7227 // Note: this doesn't just satisfy the compiler's internal consistency
7228 // checks, it's also load-bearing to prevent escaping from a compromised
7229 // sandbox (where in-sandbox corruption can cause the high word of
7230 // what's supposed to be an i32 to be non-zero).
7231 converted_index = __ ChangeUint32ToUintPtr(index);
7232 } else if (kSystemPointerSize == kInt32Size) {
7233 // Truncate index to 32-bit.
7234 converted_index = V<WordPtr>::Cast(__ TruncateWord64ToWord32(index));
7235 }
7236
7237 const uintptr_t align_mask = repr.SizeInBytes() - 1;
7238 // Do alignment checks only for > 1 byte accesses (otherwise they trivially
7239 // pass).
7240 if (static_cast<bool>(alignment_check) && align_mask != 0) {
7241 // TODO(14108): Optimize constant index as per wasm-compiler.cc.
7242
7243 // Unlike regular memory accesses, atomic memory accesses should trap if
7244 // the effective offset is misaligned.
7245 // TODO(wasm): this addition is redundant with one inserted by
7246 // {MemBuffer}.
7247 OpIndex effective_offset =
7248 __ WordPtrAdd(MemBuffer(memory->index, offset), converted_index);
7249
7250 V<Word32> cond = __ TruncateWordPtrToWord32(__ WordPtrBitwiseAnd(
7251 effective_offset, __ IntPtrConstant(align_mask)));
7252 __ TrapIfNot(__ Word32Equal(cond, __ Word32Constant(0)),
7253 TrapId::kTrapUnalignedAccess);
7254 }
7255
7256 // If no bounds checks should be performed (for testing), just return the
7257 // converted index and assume it to be in-bounds.
7258 if (bounds_checks == wasm::kNoBoundsChecks) {
7259 return {converted_index, compiler::BoundsCheckResult::kInBounds};
7260 }
7261
7262 if (memory->is_memory64() && kSystemPointerSize == kInt32Size) {
7263 // In memory64 mode on 32-bit systems, the upper 32 bits need to be zero
7264 // to succeed the bounds check.
7265 DCHECK_EQ(kExplicitBoundsChecks, bounds_checks);
7266 V<Word32> high_word =
7267 __ TruncateWord64ToWord32(__ Word64ShiftRightLogical(index, 32));
7268 __ TrapIf(high_word, TrapId::kTrapMemOutOfBounds);
7269 }
7270
7271 uintptr_t end_offset = offset + repr.SizeInBytes() - 1u;
7272 DCHECK_LT(end_offset, memory->max_memory_size);
7273
7274 // The index can be invalid if we are generating unreachable operations.
7275 if (end_offset <= memory->min_memory_size && index.valid() &&
7276 __ output_graph().Get(index).Is<ConstantOp>()) {
7277 ConstantOp& constant_index_op =
7278 __ output_graph().Get(index).Cast<ConstantOp>();
7279 uintptr_t constant_index = memory->is_memory64()
7280 ? constant_index_op.word64()
7281 : constant_index_op.word32();
7282 if (constant_index < memory->min_memory_size - end_offset) {
7283 return {converted_index, compiler::BoundsCheckResult::kInBounds};
7284 }
7285 }
7286
7287#if V8_TRAP_HANDLER_SUPPORTED
7288 if (bounds_checks == kTrapHandler &&
7289 enforce_bounds_check ==
7291 if (memory->is_memory64()) {
7292 // Bounds check `index` against `kMaxMemory64Size - end_offset`, such
7293 // that at runtime `index + end_offset` will be within
7294 // `kMaxMemory64Size`, where the trap handler can handle out-of-bound
7295 // accesses.
7296 V<Word32> cond = __ Uint64LessThan(
7297 V<Word64>::Cast(converted_index),
7298 __ Word64Constant(uint64_t{wasm::kMaxMemory64Size - end_offset}));
7299 __ TrapIfNot(cond, TrapId::kTrapMemOutOfBounds);
7300 }
7301 return {converted_index, compiler::BoundsCheckResult::kTrapHandler};
7302 }
7303#else
7304 CHECK_NE(bounds_checks, kTrapHandler);
7305#endif // V8_TRAP_HANDLER_SUPPORTED
7306
7307 V<WordPtr> memory_size = MemSize(memory->index);
7308 if (end_offset > memory->min_memory_size) {
7309 // The end offset is larger than the smallest memory.
7310 // Dynamically check the end offset against the dynamic memory size.
7311 __ TrapIfNot(
7312 __ UintPtrLessThan(__ UintPtrConstant(end_offset), memory_size),
7313 TrapId::kTrapMemOutOfBounds);
7314 }
7315
7316 // This produces a positive number since {end_offset <= min_size <=
7317 // mem_size}.
7318 V<WordPtr> effective_size = __ WordPtrSub(memory_size, end_offset);
7319 __ TrapIfNot(__ UintPtrLessThan(converted_index, effective_size),
7320 TrapId::kTrapMemOutOfBounds);
7321 return {converted_index, compiler::BoundsCheckResult::kDynamicallyChecked};
7322 }
7323
7324 V<WordPtr> MemStart(uint32_t index) {
7325 if (index == 0) {
7326 // TODO(14108): Port TF's dynamic "cached_memory_index" infrastructure.
7328 } else {
7329 // TODO(14616): Fix sharedness.
7330 V<TrustedFixedAddressArray> instance_memories =
7332 MemoryBasesAndSizes,
7334 return __ Load(instance_memories, LoadOp::Kind::TaggedBase(),
7337 }
7338 }
7339
7340 V<WordPtr> MemBuffer(uint32_t mem_index, uintptr_t offset) {
7341 V<WordPtr> mem_start = MemStart(mem_index);
7342 if (offset == 0) return mem_start;
7343 return __ WordPtrAdd(mem_start, offset);
7344 }
7345
7346 V<WordPtr> MemSize(uint32_t index) {
7347 if (index == 0) {
7348 // TODO(14108): Port TF's dynamic "cached_memory_index" infrastructure.
7350 } else {
7351 // TODO(14616): Fix sharedness.
7352 V<TrustedByteArray> instance_memories =
7354 MemoryBasesAndSizes,
7356 return __ Load(
7357 instance_memories, LoadOp::Kind::TaggedBase().NotLoadEliminable(),
7360 }
7361 }
7362
7381
7382 void TraceMemoryOperation(FullDecoder* decoder, bool is_store,
7383 MemoryRepresentation repr, V<WordPtr> index,
7384 uintptr_t offset) {
7385 int kAlign = 4; // Ensure that the LSB is 0, like a Smi.
7386 V<WordPtr> info = __ StackSlot(sizeof(MemoryTracingInfo), kAlign);
7387 V<WordPtr> effective_offset = __ WordPtrAdd(index, offset);
7388 __ Store(info, effective_offset, StoreOp::Kind::RawAligned(),
7390 offsetof(MemoryTracingInfo, offset));
7391 __ Store(info, __ Word32Constant(is_store ? 1 : 0),
7393 compiler::kNoWriteBarrier, offsetof(MemoryTracingInfo, is_store));
7394 V<Word32> rep_as_int = __ Word32Constant(
7395 static_cast<int>(repr.ToMachineType().representation()));
7396 __ Store(info, rep_as_int, StoreOp::Kind::RawAligned(),
7398 offsetof(MemoryTracingInfo, mem_rep));
7399 CallRuntime(decoder->zone(), Runtime::kWasmTraceMemory, {info},
7400 __ NoContextConstant());
7401 }
7402
7403 void StackCheck(WasmStackCheckOp::Kind kind, FullDecoder* decoder) {
7404 if (V8_UNLIKELY(!v8_flags.wasm_stack_checks)) return;
7405 __ WasmStackCheck(kind);
7406 }
7407
7408 private:
7410 FullDecoder* decoder, uint32_t function_index) {
7411 ModuleTypeIndex sig_index =
7412 decoder->module_->functions[function_index].sig_index;
7413 bool shared = decoder->module_->type(sig_index).is_shared;
7415 function_index, trusted_instance_data(shared));
7416 }
7417
7418 // Returns the call target and the implicit argument (WasmTrustedInstanceData
7419 // or WasmImportData) for an indirect call.
7420 std::pair<V<Word32>, V<ExposedTrustedObject>>
7422 V<WordPtr> index_wordptr,
7424 bool needs_type_or_null_check = true) {
7425 static_assert(kV8MaxWasmTableSize < size_t{kMaxInt});
7426 const WasmTable* table = imm.table_imm.table;
7427
7428 /* Step 1: Load the indirect function tables for this table. */
7429 V<WasmDispatchTable> dispatch_table;
7430 if (imm.table_imm.index == 0) {
7431 dispatch_table =
7433 DispatchTable0, WasmDispatchTable);
7434 } else {
7435 V<ProtectedFixedArray> dispatch_tables =
7437 trusted_instance_data(table->shared), DispatchTables,
7439 dispatch_table =
7440 V<WasmDispatchTable>::Cast(__ LoadProtectedFixedArrayElement(
7441 dispatch_tables, imm.table_imm.index));
7442 }
7443
7444 /* Step 2: Bounds check against the table size. */
7445 V<Word32> table_length;
7446 bool needs_dynamic_size =
7447 !table->has_maximum_size || table->maximum_size != table->initial_size;
7448 if (needs_dynamic_size) {
7449 table_length = __ LoadField<Word32>(
7450 dispatch_table, AccessBuilder::ForWasmDispatchTableLength());
7451 } else {
7452 table_length = __ Word32Constant(table->initial_size);
7453 }
7454 V<Word32> in_bounds = __ UintPtrLessThan(
7455 index_wordptr, __ ChangeUint32ToUintPtr(table_length));
7456 __ TrapIfNot(in_bounds, TrapId::kTrapTableOutOfBounds);
7457
7458 /* Step 3: Check the canonical real signature against the canonical declared
7459 * signature. */
7460 ModuleTypeIndex sig_index = imm.sig_imm.index;
7461 bool needs_type_check =
7462 needs_type_or_null_check &&
7463 !EquivalentTypes(table->type.AsNonNull(),
7465 decoder->module_, decoder->module_);
7466 bool needs_null_check =
7467 needs_type_or_null_check && table->type.is_nullable();
7468
7469 V<WordPtr> dispatch_table_entry_offset = __ WordPtrAdd(
7470 __ WordPtrMul(index_wordptr, WasmDispatchTable::kEntrySize),
7472
7473 if (needs_type_check) {
7474 CanonicalTypeIndex sig_id = env_->module->canonical_sig_id(sig_index);
7475 V<Word32> expected_canonical_sig =
7476 __ RelocatableWasmCanonicalSignatureId(sig_id.index);
7477
7478 V<Word32> loaded_sig =
7479 __ Load(dispatch_table, dispatch_table_entry_offset,
7482 V<Word32> sigs_match = __ Word32Equal(expected_canonical_sig, loaded_sig);
7483 if (!decoder->module_->type(sig_index).is_final) {
7484 // In this case, a full type check is needed.
7485 Label<> end(&asm_);
7486
7487 // First, check if signatures happen to match exactly.
7488 GOTO_IF(sigs_match, end);
7489
7490 if (needs_null_check) {
7491 // Trap on null element.
7492 __ TrapIf(__ Word32Equal(loaded_sig, -1),
7493 TrapId::kTrapFuncSigMismatch);
7494 }
7495 bool shared = decoder->module_->type(sig_index).is_shared;
7496 V<Map> formal_rtt = __ RttCanon(managed_object_maps(shared), sig_index);
7497 int rtt_depth = GetSubtypingDepth(decoder->module_, sig_index);
7498 DCHECK_GE(rtt_depth, 0);
7499
7500 // Since we have the canonical index of the real rtt, we have to load it
7501 // from the isolate rtt-array (which is canonically indexed). Since this
7502 // reference is weak, we have to promote it to a strong reference.
7503 // Note: The reference cannot have been cleared: Since the loaded_sig
7504 // corresponds to a function of the same canonical type, that function
7505 // will have kept the type alive.
7506 V<WeakFixedArray> rtts = LOAD_ROOT(WasmCanonicalRtts);
7507 V<Object> weak_rtt = __ Load(
7508 rtts, __ ChangeInt32ToIntPtr(loaded_sig),
7511 V<Map> real_rtt =
7512 V<Map>::Cast(__ BitcastWordPtrToTagged(__ WordPtrBitwiseAnd(
7513 __ BitcastHeapObjectToWordPtr(V<HeapObject>::Cast(weak_rtt)),
7515 V<WasmTypeInfo> type_info =
7516 __ Load(real_rtt, LoadOp::Kind::TaggedBase(),
7518 Map::kConstructorOrBackPointerOrNativeContextOffset);
7519 // If the depth of the rtt is known to be less than the minimum
7520 // supertype array length, we can access the supertype without
7521 // bounds-checking the supertype array.
7522 if (static_cast<uint32_t>(rtt_depth) >=
7524 V<Word32> supertypes_length =
7525 __ UntagSmi(__ Load(type_info, LoadOp::Kind::TaggedBase(),
7527 WasmTypeInfo::kSupertypesLengthOffset));
7528 __ TrapIfNot(__ Uint32LessThan(rtt_depth, supertypes_length),
7529 OpIndex::Invalid(), TrapId::kTrapFuncSigMismatch);
7530 }
7531 V<Map> maybe_match =
7532 __ Load(type_info, LoadOp::Kind::TaggedBase(),
7534 WasmTypeInfo::kSupertypesOffset + kTaggedSize * rtt_depth);
7535 __ TrapIfNot(__ TaggedEqual(maybe_match, formal_rtt),
7536 OpIndex::Invalid(), TrapId::kTrapFuncSigMismatch);
7537 GOTO(end);
7538 BIND(end);
7539 } else {
7540 // In this case, signatures must match exactly.
7541 __ TrapIfNot(sigs_match, TrapId::kTrapFuncSigMismatch);
7542 }
7543 } else if (needs_null_check) {
7544 V<Word32> loaded_sig =
7545 __ Load(dispatch_table, dispatch_table_entry_offset,
7548 __ TrapIf(__ Word32Equal(-1, loaded_sig), TrapId::kTrapFuncSigMismatch);
7549 }
7550
7551 /* Step 4: Extract ref and target. */
7552 V<Word32> target = __ Load(
7553 dispatch_table, dispatch_table_entry_offset, LoadOp::Kind::TaggedBase(),
7555 V<ExposedTrustedObject> implicit_arg =
7556 V<ExposedTrustedObject>::Cast(__ LoadProtectedPointerField(
7557 dispatch_table, dispatch_table_entry_offset,
7559 0));
7560
7561 return {target, implicit_arg};
7562 }
7563
7564 // Load the call target and implicit arg (WasmTrustedInstanceData or
7565 // WasmImportData) from a function reference.
7566 std::pair<V<Word32>, V<ExposedTrustedObject>>
7568 ValueType type) {
7569 if (type.is_nullable() &&
7571 func_ref = V<WasmFuncRef>::Cast(
7572 __ AssertNotNull(func_ref, type, TrapId::kTrapNullDereference));
7573 }
7574
7575 LoadOp::Kind load_kind =
7576 type.is_nullable() && null_check_strategy_ ==
7580
7581 V<WasmInternalFunction> internal_function =
7582 V<WasmInternalFunction>::Cast(__ LoadTrustedPointerField(
7583 func_ref, load_kind, kWasmInternalFunctionIndirectPointerTag,
7584 WasmFuncRef::kTrustedInternalOffset));
7585
7586 return BuildFunctionTargetAndImplicitArg(internal_function);
7587 }
7588
7590 return type.is_object_reference()
7591 ? __ AnnotateWasmType(V<Object>::Cast(result), type)
7592 : result;
7593 }
7594
7596 FullDecoder* decoder, const FunctionSig* sig, V<CallTarget> callee,
7597 V<HeapObject> ref, const Value args[], Value returns[],
7599 CheckForException check_for_exception =
7601 const TSCallDescriptor* descriptor = TSCallDescriptor::Create(
7604 __ graph_zone());
7605
7606 SmallZoneVector<OpIndex, 16> arg_indices(sig->parameter_count() + 1,
7607 decoder->zone());
7608 arg_indices[0] = ref;
7609 for (uint32_t i = 0; i < sig->parameter_count(); i++) {
7610 arg_indices[i + 1] = args[i].op;
7611 }
7612
7614 decoder, callee, base::VectorOf(arg_indices), descriptor,
7615 check_for_exception, OpEffects().CanCallAnything());
7616
7617 if (sig->return_count() == 1) {
7618 returns[0].op = AnnotateResultIfReference(call, sig->GetReturn(0));
7619 } else if (sig->return_count() > 1) {
7620 for (uint32_t i = 0; i < sig->return_count(); i++) {
7621 wasm::ValueType type = sig->GetReturn(i);
7622 returns[i].op = AnnotateResultIfReference(
7623 __ Projection(call, i, RepresentationFor(type)), type);
7624 }
7625 }
7626 // Calls might mutate cached instance fields.
7628 }
7629
7630 private:
7632 FullDecoder* decoder, const FunctionSig* sig, V<CallTarget> callee,
7633 V<HeapObject> ref, const Value args[],
7635 if (mode_ == kRegular || mode_ == kInlinedTailCall) {
7636 const TSCallDescriptor* descriptor = TSCallDescriptor::Create(
7639 __ graph_zone());
7640
7641 SmallZoneVector<OpIndex, 16> arg_indices(sig->parameter_count() + 1,
7642 decoder->zone_);
7643 arg_indices[0] = ref;
7644 for (uint32_t i = 0; i < sig->parameter_count(); i++) {
7645 arg_indices[i + 1] = args[i].op;
7646 }
7647 __ TailCall(callee, base::VectorOf(arg_indices), descriptor);
7648 } else {
7649 if (__ generating_unreachable_operations()) return;
7650 // This is a tail call in the inlinee, which in turn was a regular call.
7651 // Transform the tail call into a regular call, and return the return
7652 // values to the caller.
7653 size_t return_count = sig->return_count();
7654 SmallZoneVector<Value, 16> returns(return_count, decoder->zone_);
7655 // Since an exception in a tail call cannot be caught in this frame, we
7656 // should only catch exceptions in the generated call if this is a
7657 // recursively inlined function, and the parent frame provides a handler.
7658 BuildWasmCall(decoder, sig, callee, ref, args, returns.data(), call_kind,
7660 for (size_t i = 0; i < return_count; i++) {
7661 return_phis_->AddInputForPhi(i, returns[i].op);
7662 }
7663 __ Goto(return_block_);
7664 }
7665 }
7666
7667 template <typename Descriptor>
7670 FullDecoder* decoder, const typename Descriptor::arguments_t& args,
7671 CheckForException check_for_exception = CheckForException::kNo)
7672 requires(!Descriptor::kNeedsContext)
7673 {
7675
7676 V<WordPtr> callee =
7677 __ RelocatableWasmBuiltinCallTarget(Descriptor::kFunction);
7678 auto arguments = std::apply(
7679 [](auto&&... as) {
7680 return base::SmallVector<
7681 OpIndex, std::tuple_size_v<typename Descriptor::arguments_t> + 1>{
7682 std::forward<decltype(as)>(as)...};
7683 },
7684 args);
7685
7687 decoder, callee, base::VectorOf(arguments),
7688 Descriptor::Create(StubCallMode::kCallWasmRuntimeStub,
7689 __ output_graph().graph_zone()),
7690 check_for_exception, Descriptor::kEffects);
7691 }
7692
7693 template <typename Descriptor>
7696 FullDecoder* decoder, V<Context> context,
7697 const typename Descriptor::arguments_t& args,
7698 CheckForException check_for_exception = CheckForException::kNo)
7699 requires Descriptor::kNeedsContext
7700 {
7702
7703 V<WordPtr> callee =
7704 __ RelocatableWasmBuiltinCallTarget(Descriptor::kFunction);
7705 auto arguments = std::apply(
7706 [context](auto&&... as) {
7707 return base::SmallVector<
7708 OpIndex, std::tuple_size_v<typename Descriptor::arguments_t> + 1>{
7709 std::forward<decltype(as)>(as)..., context};
7710 },
7711 args);
7712
7714 decoder, callee, base::VectorOf(arguments),
7715 Descriptor::Create(StubCallMode::kCallWasmRuntimeStub,
7716 __ output_graph().graph_zone()),
7717 check_for_exception, Descriptor::kEffects);
7718 }
7719
7720 template <typename Descriptor>
7723 FullDecoder* decoder, const typename Descriptor::arguments_t& args,
7724 CheckForException check_for_exception = CheckForException::kNo)
7725 requires(!Descriptor::kNeedsContext)
7726 {
7728
7729 V<WordPtr> callee = GetBuiltinPointerTarget(Descriptor::kFunction);
7730 auto arguments = std::apply(
7731 [](auto&&... as) {
7732 return base::SmallVector<
7733 OpIndex, std::tuple_size_v<typename Descriptor::arguments_t> + 1>{
7734 std::forward<decltype(as)>(as)...};
7735 },
7736 args);
7737
7739 decoder, callee, base::VectorOf(arguments),
7740 Descriptor::Create(StubCallMode::kCallBuiltinPointer,
7741 __ output_graph().graph_zone()),
7742 check_for_exception, Descriptor::kEffects);
7743 }
7744
7745 private:
7747 CheckForException check_for_exception) {
7748 // For tail calls that we transform to regular calls, we need to set the
7749 // call's position to that of the inlined call node to get correct stack
7750 // traces.
7751 if (check_for_exception == CheckForException::kCatchInParentFrame) {
7752 __ output_graph().operation_origins()[call] = WasmPositionToOpIndex(
7756 }
7757 }
7758
7761 const TSCallDescriptor* descriptor,
7762 CheckForException check_for_exception,
7763 OpEffects effects) {
7764 if (check_for_exception == CheckForException::kNo) {
7765 return __ Call(callee, OpIndex::Invalid(), args, descriptor, effects);
7766 }
7767 bool handled_in_this_frame =
7768 decoder && decoder->current_catch() != -1 &&
7769 check_for_exception == CheckForException::kCatchInThisFrame;
7770 if (!handled_in_this_frame && mode_ != kInlinedWithCatch) {
7771 OpIndex call =
7772 __ Call(callee, OpIndex::Invalid(), args, descriptor, effects);
7773 MaybeSetPositionToParent(call, check_for_exception);
7774 return call;
7775 }
7776
7777 TSBlock* catch_block;
7778 if (handled_in_this_frame) {
7779 Control* current_catch =
7780 decoder->control_at(decoder->control_depth_of_current_catch());
7781 catch_block = current_catch->false_or_loop_or_catch_block;
7782 } else {
7784 catch_block = return_catch_block_;
7785 }
7786 TSBlock* success_block = __ NewBlock();
7787 TSBlock* exception_block = __ NewBlock();
7788 OpIndex call;
7789 {
7790 Assembler::CatchScope scope(asm_, exception_block);
7791
7792 call = __ Call(callee, OpIndex::Invalid(), args, descriptor, effects);
7793 __ Goto(success_block);
7794 }
7795
7796 __ Bind(exception_block);
7797 OpIndex exception = __ CatchBlockBegin();
7798 if (handled_in_this_frame) {
7799 // The exceptional operation could have modified memory size; we need
7800 // to reload the memory context into the exceptional control path.
7802 SetupControlFlowEdge(decoder, catch_block, 0, exception);
7803 } else {
7805 if (exception.valid()) return_phis_->AddIncomingException(exception);
7806 // Reloading the InstanceCache will happen when {return_exception_phis_}
7807 // are retrieved.
7808 }
7809 __ Goto(catch_block);
7810
7811 __ Bind(success_block);
7812
7813 MaybeSetPositionToParent(call, check_for_exception);
7814
7815 return call;
7816 }
7817
7819 MemoryRepresentation arg_type) {
7820 OpIndex stack_slot_param =
7821 __ StackSlot(arg_type.SizeInBytes(), arg_type.SizeInBytes());
7822 __ Store(stack_slot_param, arg, StoreOp::Kind::RawAligned(), arg_type,
7825 MachineSignature sig(1, 1, reps);
7826 return CallC(&sig, ref, stack_slot_param);
7827 }
7828
7831 std::initializer_list<std::pair<OpIndex, MemoryRepresentation>> args) {
7832 int slot_size = 0;
7833 for (auto arg : args) slot_size += arg.second.SizeInBytes();
7834 // Since we are storing the arguments unaligned anyway, we do not need
7835 // alignment > 0.
7836 V<WordPtr> stack_slot_param = __ StackSlot(slot_size, 0);
7837 int offset = 0;
7838 for (auto arg : args) {
7839 __ Store(stack_slot_param, arg.first,
7840 StoreOp::Kind::MaybeUnaligned(arg.second), arg.second,
7842 offset += arg.second.SizeInBytes();
7843 }
7845 MachineSignature sig(1, 1, reps);
7846 return CallC(&sig, ref, stack_slot_param);
7847 }
7848
7851 std::initializer_list<std::pair<OpIndex, MemoryRepresentation>> args) {
7852 int slot_size = 0;
7853 for (auto arg : args) slot_size += arg.second.SizeInBytes();
7854 // Since we are storing the arguments unaligned anyway, we do not need
7855 // alignment > 0.
7856 slot_size = std::max<int>(slot_size, res_type.SizeInBytes());
7857 V<WordPtr> stack_slot_param = __ StackSlot(slot_size, 0);
7858 int offset = 0;
7859 for (auto arg : args) {
7860 __ Store(stack_slot_param, arg.first,
7861 StoreOp::Kind::MaybeUnaligned(arg.second), arg.second,
7863 offset += arg.second.SizeInBytes();
7864 }
7866 MachineSignature sig(0, 1, reps);
7867 CallC(&sig, ref, stack_slot_param);
7868 return __ Load(stack_slot_param, LoadOp::Kind::RawAligned(), res_type);
7869 }
7870
7872 MemoryRepresentation arg_type) {
7873 return CallCStackSlotToStackSlot(arg, ref, arg_type, arg_type);
7874 }
7875
7877 MemoryRepresentation arg_type,
7878 MemoryRepresentation res_type) {
7879 return CallCStackSlotToStackSlot(ref, res_type, {{arg, arg_type}});
7880 }
7881
7884 MemoryRepresentation arg_type) {
7885 return CallCStackSlotToStackSlot(ref, arg_type,
7886 {{arg0, arg_type}, {arg1, arg_type}});
7887 }
7888
7890 V<Word> index,
7891 TrapId trap_reason) {
7892 // Note: this {ChangeUint32ToUintPtr} doesn't just satisfy the compiler's
7893 // consistency checks, it's also load-bearing to prevent escaping from a
7894 // compromised sandbox (where in-sandbox corruption can cause the high
7895 // word of what's supposed to be an i32 to be non-zero).
7896 if (address_type == AddressType::kI32) {
7897 return __ ChangeUint32ToUintPtr(V<Word32>::Cast(index));
7898 }
7899 if constexpr (Is64()) {
7900 return V<WordPtr>::Cast(index);
7901 }
7902 __ TrapIf(__ TruncateWord64ToWord32(
7903 __ Word64ShiftRightLogical(V<Word64>::Cast(index), 32)),
7904 OpIndex::Invalid(), trap_reason);
7905 return V<WordPtr>::Cast(__ TruncateWord64ToWord32(V<Word64>::Cast(index)));
7906 }
7907
7909 V<Word> index) {
7910 return MemOrTableAddressToUintPtrOrOOBTrap(address_type, index,
7911 TrapId::kTrapMemOutOfBounds);
7912 }
7913
7915 V<Word> index) {
7916 return MemOrTableAddressToUintPtrOrOOBTrap(address_type, index,
7917 TrapId::kTrapTableOutOfBounds);
7918 }
7919
7921 if constexpr (COMPRESS_POINTERS_BOOL) {
7922 return V<Smi>::Cast(
7923 __ Word32ShiftLeft(value, kSmiShiftSize + kSmiTagSize));
7924 } else {
7925 return V<Smi>::Cast(__ WordPtrShiftLeft(__ ChangeUint32ToUintPtr(value),
7927 }
7928 }
7929
7931 if constexpr (COMPRESS_POINTERS_BOOL) {
7932 return __ Word32ShiftRightLogical(V<Word32>::Cast(value),
7934 } else {
7935 return __ TruncateWordPtrToWord32(__ WordPtrShiftRightLogical(
7937 }
7938 }
7939
7941 uint32_t index, V<Word32> value) {
7942 V<Smi> upper_half =
7943 ChangeUint31ToSmi(__ Word32ShiftRightLogical(value, 16));
7944 __ StoreFixedArrayElement(values_array, index, upper_half,
7946 V<Smi> lower_half = ChangeUint31ToSmi(__ Word32BitwiseAnd(value, 0xffffu));
7947 __ StoreFixedArrayElement(values_array, index + 1, lower_half,
7949 }
7950
7952 int index) {
7953 V<Word32> upper_half = __ Word32ShiftLeft(
7955 __ LoadFixedArrayElement(exception_values_array, index))),
7956 16);
7958 __ LoadFixedArrayElement(exception_values_array, index + 1)));
7959 return __ Word32BitwiseOr(upper_half, lower_half);
7960 }
7961
7963 int index) {
7964 V<Word64> upper_half = __ Word64ShiftLeft(
7965 __ ChangeUint32ToUint64(
7966 BuildDecodeException32BitValue(exception_values_array, index)),
7967 32);
7968 V<Word64> lower_half = __ ChangeUint32ToUint64(
7969 BuildDecodeException32BitValue(exception_values_array, index + 2));
7970 return __ Word64BitwiseOr(upper_half, lower_half);
7971 }
7972
7974 base::Vector<Value> values) {
7975 V<FixedArray> exception_values_array = V<FixedArray>::Cast(
7978 {exception, LOAD_ROOT(wasm_exception_values_symbol)}));
7979
7980 int index = 0;
7981 for (Value& value : values) {
7982 switch (value.type.kind()) {
7983 case kI32:
7984 value.op =
7985 BuildDecodeException32BitValue(exception_values_array, index);
7986 index += 2;
7987 break;
7988 case kI64:
7989 value.op =
7990 BuildDecodeException64BitValue(exception_values_array, index);
7991 index += 4;
7992 break;
7993 case kF32:
7994 value.op = __ BitcastWord32ToFloat32(
7995 BuildDecodeException32BitValue(exception_values_array, index));
7996 index += 2;
7997 break;
7998 case kF64:
7999 value.op = __ BitcastWord64ToFloat64(
8000 BuildDecodeException64BitValue(exception_values_array, index));
8001 index += 4;
8002 break;
8003 case kS128: {
8005 value_s128 = __ Simd128Splat(
8006 BuildDecodeException32BitValue(exception_values_array, index),
8007 compiler::turboshaft::Simd128SplatOp::Kind::kI32x4);
8008 index += 2;
8009 using Kind = compiler::turboshaft::Simd128ReplaceLaneOp::Kind;
8010 value_s128 = __ Simd128ReplaceLane(
8011 value_s128,
8012 BuildDecodeException32BitValue(exception_values_array, index),
8013 Kind::kI32x4, 1);
8014 index += 2;
8015 value_s128 = __ Simd128ReplaceLane(
8016 value_s128,
8017 BuildDecodeException32BitValue(exception_values_array, index),
8018 Kind::kI32x4, 2);
8019 index += 2;
8020 value.op = __ Simd128ReplaceLane(
8021 value_s128,
8022 BuildDecodeException32BitValue(exception_values_array, index),
8023 Kind::kI32x4, 3);
8024 index += 2;
8025 break;
8026 }
8027 case kRef:
8028 case kRefNull:
8029 value.op = __ LoadFixedArrayElement(exception_values_array, index);
8030 index++;
8031 break;
8032 case kI8:
8033 case kI16:
8034 case kF16:
8035 case kVoid:
8036 case kTop:
8037 case kBottom:
8038 UNREACHABLE();
8039 }
8040 }
8041 }
8042
8048
8050 MemoryRepresentation repr) {
8051 // Since asmjs does not support unaligned accesses, we can bounds-check
8052 // ignoring the access size.
8053 // Technically, we should do a signed 32-to-ptr extension here. However,
8054 // that is an explicit instruction, whereas unsigned extension is implicit.
8055 // Since the difference is only observable for memories larger than 2 GiB,
8056 // and since we disallow such memories, we can use unsigned extension.
8057 V<WordPtr> index_ptr = __ ChangeUint32ToUintPtr(index);
8058 IF (LIKELY(__ UintPtrLessThan(index_ptr, MemSize(0)))) {
8059 __ Store(MemStart(0), index_ptr, value, StoreOp::Kind::RawAligned(), repr,
8061 }
8062 }
8063
8065 // Since asmjs does not support unaligned accesses, we can bounds-check
8066 // ignoring the access size.
8067 Variable result = __ NewVariable(repr.ToRegisterRepresentation());
8068
8069 // Technically, we should do a signed 32-to-ptr extension here. However,
8070 // that is an explicit instruction, whereas unsigned extension is implicit.
8071 // Since the difference is only observable for memories larger than 2 GiB,
8072 // and since we disallow such memories, we can use unsigned extension.
8073 V<WordPtr> index_ptr = __ ChangeUint32ToUintPtr(index);
8074 IF (LIKELY(__ UintPtrLessThan(index_ptr, MemSize(0)))) {
8075 __ SetVariable(result, __ Load(MemStart(0), index_ptr,
8076 LoadOp::Kind::RawAligned(), repr));
8077 } ELSE {
8078 switch (repr) {
8085 __ SetVariable(result, __ Word32Constant(0));
8086 break;
8088 __ SetVariable(result, __ Float32Constant(
8089 std::numeric_limits<float>::quiet_NaN()));
8090 break;
8092 __ SetVariable(result, __ Float64Constant(
8093 std::numeric_limits<double>::quiet_NaN()));
8094 break;
8095 default:
8096 UNREACHABLE();
8097 }
8098 }
8099
8100 OpIndex result_op = __ GetVariable(result);
8101 __ SetVariable(result, OpIndex::Invalid());
8102 return result_op;
8103 }
8104
8106 ValueType array_type) {
8107 if (V8_UNLIKELY(v8_flags.experimental_wasm_skip_bounds_checks)) {
8108 if (array_type.is_nullable()) {
8109 __ AssertNotNull(array, array_type, TrapId::kTrapNullDereference);
8110 }
8111 } else {
8112 OpIndex length = __ ArrayLength(array, array_type.is_nullable()
8115 __ TrapIfNot(__ Uint32LessThan(index, length),
8116 TrapId::kTrapArrayOutOfBounds);
8117 }
8118 }
8119
8121 V<Word32> index, V<Word32> length,
8122 compiler::CheckForNull null_check) {
8123 if (V8_UNLIKELY(v8_flags.experimental_wasm_skip_bounds_checks)) {
8124 return V<WasmArray>::Cast(array);
8125 }
8126 V<Word32> array_length = __ ArrayLength(array, null_check);
8127 V<Word32> range_end = __ Word32Add(index, length);
8128 V<Word32> range_valid = __ Word32BitwiseAnd(
8129 // OOB if (index + length > array.len).
8130 __ Uint32LessThanOrEqual(range_end, array_length),
8131 // OOB if (index + length) overflows.
8132 __ Uint32LessThanOrEqual(index, range_end));
8133 __ TrapIfNot(range_valid, TrapId::kTrapArrayOutOfBounds);
8134 // The array is now guaranteed to be non-null.
8135 return V<WasmArray>::Cast(array);
8136 }
8137
8138 void BrOnCastImpl(FullDecoder* decoder, V<Map> rtt,
8139 compiler::WasmTypeCheckConfig config, const Value& object,
8140 Value* value_on_branch, uint32_t br_depth,
8141 bool null_succeeds) {
8142 OpIndex cast_succeeds = __ WasmTypeCheck(object.op, rtt, config);
8143 IF (cast_succeeds) {
8144 // Narrow type for the successful cast target branch.
8145 Forward(decoder, object, value_on_branch);
8146 BrOrRet(decoder, br_depth);
8147 }
8148 // Note: Differently to below for br_on_cast_fail, we do not Forward
8149 // the value here to perform a TypeGuard. It can't be done here due to
8150 // asymmetric decoder code. A Forward here would be popped from the stack
8151 // and ignored by the decoder. Therefore the decoder has to call Forward
8152 // itself.
8153 }
8154
8157 const Value& object, Value* value_on_fallthrough,
8158 uint32_t br_depth, bool null_succeeds) {
8159 OpIndex cast_succeeds = __ WasmTypeCheck(object.op, rtt, config);
8160 IF (__ Word32Equal(cast_succeeds, 0)) {
8161 // It is necessary in case of {null_succeeds} to forward the value.
8162 // This will add a TypeGuard to the non-null type (as in this case the
8163 // object is non-nullable).
8164 Forward(decoder, object, decoder->stack_value(1));
8165 BrOrRet(decoder, br_depth);
8166 }
8167 // Narrow type for the successful cast fallthrough branch.
8168 value_on_fallthrough->op =
8169 __ AnnotateWasmType(V<Object>::Cast(object.op), config.to);
8170 }
8171
8173 const ArrayType* array_type, V<Word32> length,
8174 V<Any> initial_value) {
8175 // Initialize the array header.
8176 bool shared = decoder->module_->type(index).is_shared;
8177 V<Map> rtt = __ RttCanon(managed_object_maps(shared), index);
8178 V<WasmArray> array = __ WasmAllocateArray(rtt, length, array_type);
8179 // Initialize the elements.
8180 ArrayFillImpl(array, __ Word32Constant(0), initial_value, length,
8181 array_type, false);
8182 return array;
8183 }
8184
8185 V<Map> GetRttFromDescriptor(const Value& descriptor) {
8186 V<WasmStructNullable> descriptor_struct =
8187 V<WasmStructNullable>::Cast(descriptor.op);
8188 // TODO(jkummerow): Refactor this to share the logic with
8189 // wasm-lowering-reducer.h.
8190 bool force_explicit_null_check =
8192 bool explicit_null_check =
8193 descriptor.type.is_nullable() && force_explicit_null_check;
8194 bool implicit_null_check =
8195 descriptor.type.is_nullable() && !force_explicit_null_check;
8196
8197 if (explicit_null_check) {
8198 __ TrapIf(__ IsNull(descriptor_struct, wasm::kWasmAnyRef),
8199 TrapId::kTrapNullDereference);
8200 }
8201 LoadOp::Kind load_kind = implicit_null_check ? LoadOp::Kind::TrapOnNull()
8203 load_kind = load_kind.Immutable();
8204 return __ Load(descriptor_struct, load_kind,
8206 WasmStruct::kHeaderSize);
8207 }
8208
8210 const StructIndexImmediate& imm,
8211 const Value& descriptor, OpIndex args[]) {
8212 const TypeDefinition& type = decoder->module_->type(imm.index);
8213 DCHECK_EQ(type.has_descriptor(), descriptor.op.valid());
8214 V<Map> rtt;
8215 if (type.has_descriptor()) {
8216 rtt = GetRttFromDescriptor(descriptor);
8217 } else {
8218 rtt = __ RttCanon(managed_object_maps(type.is_shared), imm.index);
8219 }
8220
8221 V<WasmStruct> struct_value;
8222 if (type.is_descriptor()) {
8223 struct_value = CallBuiltinThroughJumptable<
8224 BuiltinCallDescriptor::WasmAllocateDescriptorStruct>(
8225 decoder, {rtt, __ Word32Constant(imm.index.index)});
8226 } else {
8227 struct_value = __ WasmAllocateStruct(rtt, imm.struct_type);
8228 }
8229
8230 for (uint32_t i = 0; i < imm.struct_type->field_count(); ++i) {
8231 __ StructSet(struct_value, args[i], imm.struct_type, imm.index, i,
8233 }
8234 // If this assert fails then initialization of padding field might be
8235 // necessary.
8236 static_assert(Heap::kMinObjectSizeInTaggedWords == 2 &&
8237 WasmStruct::kHeaderSize == 2 * kTaggedSize,
8238 "empty struct might require initialization of padding field");
8239 return struct_value;
8240 }
8241
8243 DCHECK_IMPLIES(!op.valid(), __ generating_unreachable_operations());
8244 if (__ generating_unreachable_operations()) return false;
8245 const Simd128ConstantOp* s128_op =
8246 __ output_graph().Get(op).TryCast<Simd128ConstantOp>();
8247 return s128_op && s128_op->IsZero();
8248 }
8249
8250 void ArrayFillImpl(V<WasmArray> array, V<Word32> index, V<Any> value,
8251 OpIndex length, const wasm::ArrayType* type,
8252 bool emit_write_barrier) {
8253 wasm::ValueType element_type = type->element_type();
8254
8255 // Initialize the array. Use an external function for large arrays with
8256 // null/number initializer. Use a loop for small arrays and reference arrays
8257 // with a non-null initial value.
8258 Label<> done(&asm_);
8259
8260 // The builtin cannot handle s128 values other than 0.
8261 if (!(element_type == wasm::kWasmS128 && !IsSimd128ZeroConstant(value))) {
8262 constexpr uint32_t kArrayNewMinimumSizeForMemSet = 16;
8263 IF_NOT (__ Uint32LessThan(
8264 length, __ Word32Constant(kArrayNewMinimumSizeForMemSet))) {
8265 OpIndex stack_slot = StoreInStackSlot(value, element_type);
8266 MachineType arg_types[]{
8270 MachineSignature sig(0, 6, arg_types);
8271 CallC(&sig, ExternalReference::wasm_array_fill(),
8272 {array, index, length,
8273 __ Word32Constant(emit_write_barrier ? 1 : 0),
8274 __ Word32Constant(element_type.raw_bit_field()), stack_slot});
8275 GOTO(done);
8276 }
8277 }
8278
8279 ScopedVar<Word32> current_index(this, index);
8280
8281 WHILE(__ Uint32LessThan(current_index, __ Word32Add(index, length))) {
8282 __ ArraySet(array, current_index, value, type->element_type());
8283 current_index = __ Word32Add(current_index, 1);
8284 }
8285
8286 GOTO(done);
8287
8288 BIND(done);
8289 }
8290
8292 // The input is unpacked.
8293 ValueType input_type = type.Unpacked();
8294
8295 switch (type.kind()) {
8296 case wasm::kI32:
8297 case wasm::kI8:
8298 case wasm::kI16:
8299 case wasm::kI64:
8300 case wasm::kF32:
8301 case wasm::kF64:
8302 case wasm::kRefNull:
8303 case wasm::kRef:
8304 break;
8305 case wasm::kS128:
8306 // We can only get here if {value} is the constant 0.
8307 DCHECK(__ output_graph().Get(value).Cast<Simd128ConstantOp>().IsZero());
8308 value = __ Word64Constant(uint64_t{0});
8309 input_type = kWasmI64;
8310 break;
8311 case wasm::kF16:
8312 UNIMPLEMENTED();
8313 case wasm::kVoid:
8314 case kTop:
8315 case wasm::kBottom:
8316 UNREACHABLE();
8317 }
8318
8319 // Differently to values on the heap, stack slots are always uncompressed.
8320 MemoryRepresentation memory_rep =
8323 input_type.machine_representation());
8324 V<WordPtr> stack_slot =
8325 __ StackSlot(memory_rep.SizeInBytes(), memory_rep.SizeInBytes(),
8326 type.is_reference());
8327 __ Store(stack_slot, value, StoreOp::Kind::RawAligned(), memory_rep,
8329 return stack_slot;
8330 }
8331
8333 const FunctionSig* sig,
8334 const FunctionSig* inlinee) {
8335 if (sig->parameter_count() != inlinee->parameter_count()) return false;
8336 if (sig->return_count() != inlinee->return_count()) return false;
8337 for (size_t i = 0; i < sig->return_count(); ++i) {
8338 if (!IsSubtypeOf(inlinee->GetReturn(i), sig->GetReturn(i), module))
8339 return false;
8340 }
8341 for (size_t i = 0; i < sig->parameter_count(); ++i) {
8342 if (!IsSubtypeOf(sig->GetParam(i), inlinee->GetParam(i), module))
8343 return false;
8344 }
8345 return true;
8346 }
8347
8348 void InlineWasmCall(FullDecoder* decoder, uint32_t func_index,
8349 const FunctionSig* sig, uint32_t feedback_case,
8350 bool is_tail_call, const Value args[], Value returns[]) {
8351 DCHECK_IMPLIES(is_tail_call, returns == nullptr);
8352 const WasmFunction& inlinee = decoder->module_->functions[func_index];
8353 // In a corrupted sandbox, we can't trust the collected feedback.
8355
8356 SmallZoneVector<OpIndex, 16> inlinee_args(
8357 inlinee.sig->parameter_count() + 1, decoder->zone_);
8358 bool inlinee_is_shared = decoder->module_->function_is_shared(func_index);
8359 inlinee_args[0] = trusted_instance_data(inlinee_is_shared);
8360 for (size_t i = 0; i < inlinee.sig->parameter_count(); i++) {
8361 inlinee_args[i + 1] = args[i].op;
8362 }
8363
8364 base::Vector<const uint8_t> function_bytes =
8365 wire_bytes_->GetCode(inlinee.code);
8366
8367 const wasm::FunctionBody inlinee_body{
8368 inlinee.sig, inlinee.code.offset(), function_bytes.begin(),
8369 function_bytes.end(), inlinee_is_shared};
8370
8371 // If the inlinee was not validated before, do that now.
8372 if (V8_UNLIKELY(!decoder->module_->function_was_validated(func_index))) {
8373 if (ValidateFunctionBody(decoder->zone_, decoder->enabled_,
8374 decoder->module_, decoder->detected_,
8375 inlinee_body)
8376 .failed()) {
8377 // At this point we cannot easily raise a compilation error any more.
8378 // Since this situation is highly unlikely though, we just ignore this
8379 // inlinee, emit a regular call, and move on. The same validation error
8380 // will be triggered again when actually compiling the invalid function.
8381 V<WordPtr> callee =
8382 __ RelocatableConstant(func_index, RelocInfo::WASM_CALL);
8383 if (is_tail_call) {
8385 decoder, sig, callee,
8387 decoder->module_->function_is_shared(func_index)),
8388 args);
8389 } else {
8390 BuildWasmCall(decoder, sig, callee,
8392 decoder->module_->function_is_shared(func_index)),
8393 args, returns);
8394 }
8395 return;
8396 }
8397 decoder->module_->set_function_validated(func_index);
8398 }
8399
8400 BlockPhis fresh_return_phis(decoder->zone_);
8401
8402 Mode inlinee_mode;
8403 TSBlock* callee_catch_block = nullptr;
8404 TSBlock* callee_return_block;
8405 BlockPhis* inlinee_return_phis;
8406
8407 if (is_tail_call) {
8408 if (mode_ == kInlinedTailCall || mode_ == kRegular) {
8409 inlinee_mode = kInlinedTailCall;
8410 callee_return_block = nullptr;
8411 inlinee_return_phis = nullptr;
8412 } else {
8413 // A tail call inlined inside a regular call inherits its settings,
8414 // as any `return` statement returns to the nearest non-tail caller.
8415 inlinee_mode = mode_;
8416 callee_return_block = return_block_;
8417 inlinee_return_phis = return_phis_;
8418 if (mode_ == kInlinedWithCatch) {
8419 callee_catch_block = return_catch_block_;
8420 }
8421 }
8422 } else {
8423 // Regular call (i.e. not a tail call).
8424 if (mode_ == kInlinedWithCatch || decoder->current_catch() != -1) {
8425 inlinee_mode = kInlinedWithCatch;
8426 // TODO(14108): If this is a nested inlining, can we forward the
8427 // caller's catch block instead?
8428 callee_catch_block = __ NewBlock();
8429 } else {
8430 inlinee_mode = kInlinedUnhandled;
8431 }
8432 callee_return_block = __ NewBlock();
8433 inlinee_return_phis = &fresh_return_phis;
8434 }
8435
8436 OptionalV<FrameState> frame_state;
8437 if (deopts_enabled()) {
8438 frame_state = is_tail_call
8440 : CreateFrameState(decoder, sig, /*funcref*/ nullptr,
8441 /*args*/ nullptr);
8442 }
8443
8446 inlinee_decoder(decoder->zone_, decoder->module_, decoder->enabled_,
8447 decoder->detected_, inlinee_body, decoder->zone_, env_,
8448 asm_, inlinee_mode, instance_cache_, assumptions_,
8449 inlining_positions_, func_index, inlinee_is_shared,
8450 wire_bytes_, base::VectorOf(inlinee_args),
8451 callee_return_block, inlinee_return_phis,
8452 callee_catch_block, is_tail_call, frame_state);
8453 SourcePosition call_position =
8456 : inlining_id_);
8457 inlining_positions_->push_back(
8458 {static_cast<int>(func_index), is_tail_call, call_position});
8459 inlinee_decoder.interface().set_inlining_id(
8460 static_cast<uint8_t>(inlining_positions_->size() - 1));
8461 inlinee_decoder.interface().set_parent_position(call_position);
8462 // Explicitly disable deopts if it has already been disabled for this
8463 // function.
8464 if (!deopts_enabled()) {
8465 inlinee_decoder.interface().disable_deopts();
8466 }
8467 if (v8_flags.liftoff) {
8469 inlinee_decoder.interface().set_inlining_decisions(
8471 ->function_calls()[feedback_slot_][feedback_case]);
8472 }
8473 } else {
8475 inlinee_decoder.interface().set_no_liftoff_inlining_budget(
8477 }
8478 inlinee_decoder.Decode();
8479 // The function was already validated above.
8480 DCHECK(inlinee_decoder.ok());
8481
8482 DCHECK_IMPLIES(!is_tail_call && inlinee_mode == kInlinedWithCatch,
8483 inlinee_return_phis != nullptr);
8484
8485 if (!is_tail_call && inlinee_mode == kInlinedWithCatch &&
8486 !inlinee_return_phis->incoming_exceptions().empty()) {
8487 // We need to handle exceptions in the inlined call.
8488 __ Bind(callee_catch_block);
8489 OpIndex exception =
8490 MaybePhi(inlinee_return_phis->incoming_exceptions(), kWasmExternRef);
8491 bool handled_in_this_frame = decoder->current_catch() != -1;
8492 TSBlock* catch_block;
8493 if (handled_in_this_frame) {
8494 Control* current_catch =
8495 decoder->control_at(decoder->control_depth_of_current_catch());
8496 catch_block = current_catch->false_or_loop_or_catch_block;
8497 // The exceptional operation could have modified memory size; we need
8498 // to reload the memory context into the exceptional control path.
8500 SetupControlFlowEdge(decoder, catch_block, 0, exception);
8501 } else {
8503 catch_block = return_catch_block_;
8504 if (exception.valid()) return_phis_->AddIncomingException(exception);
8505 // Reloading the InstanceCache will happen when {return_exception_phis_}
8506 // are retrieved.
8507 }
8508 __ Goto(catch_block);
8509 }
8510
8511 if (!is_tail_call) {
8512 __ Bind(callee_return_block);
8513 BlockPhis* return_phis = inlinee_decoder.interface().return_phis();
8514 size_t return_count = inlinee.sig->return_count();
8515 for (size_t i = 0; i < return_count; i++) {
8516 returns[i].op =
8518 }
8519 }
8520
8521 if (!v8_flags.liftoff) {
8523 inlinee_decoder.interface().no_liftoff_inlining_budget());
8524 }
8525 }
8526
8528 switch (reason) {
8529#define TRAPREASON_TO_TRAPID(name) \
8530 case wasm::k##name: \
8531 static_assert(static_cast<int>(TrapId::k##name) == \
8532 static_cast<int>(Builtin::kThrowWasm##name), \
8533 "trap id mismatch"); \
8534 return TrapId::k##name;
8536#undef TRAPREASON_TO_TRAPID
8537 default:
8538 UNREACHABLE();
8539 }
8540 }
8541
8542 // We need this shift so that resulting OpIndex offsets are multiples of
8543 // `sizeof(OperationStorageSlot)`.
8544 static constexpr int kPositionFieldShift = 3;
8545 static_assert(sizeof(compiler::turboshaft::OperationStorageSlot) ==
8546 1 << kPositionFieldShift);
8547 static constexpr int kPositionFieldSize = 23;
8548 static_assert(kV8MaxWasmFunctionSize < (1 << kPositionFieldSize));
8549 static constexpr int kInliningIdFieldSize = 6;
8550 static constexpr uint8_t kNoInliningId = 63;
8551 static_assert((1 << kInliningIdFieldSize) - 1 == kNoInliningId);
8552 // We need to assign inlining_ids to inlined nodes.
8554
8555 // We encode the wasm code position and the inlining index in an OpIndex
8556 // stored in the output graph's node origins.
8560
8565
8567 DCHECK(index.valid());
8568 uint8_t inlining_id = InliningIdField::decode(index.offset());
8569 return SourcePosition(PositionField::decode(index.offset()),
8570 inlining_id == kNoInliningId
8572 : inlining_id);
8573 }
8574
8576 // Minimize overhead when branch hints aren't being used.
8577 if (branch_hinting_mode_ == BranchHintingMode::kNone) {
8578 return BranchHint::kNone;
8579 }
8580 if (branch_hinting_mode_ == BranchHintingMode::kModuleProvided) {
8581 return branch_hints_->GetHintFor(decoder->pc_relative_offset());
8582 }
8583 if (branch_hinting_mode_ == BranchHintingMode::kStress) {
8584 return branch_hinting_stresser_.GetNextHint();
8585 }
8586 UNREACHABLE();
8587 }
8588
8589 private:
8590 bool should_inline(FullDecoder* decoder, int feedback_slot, int size) {
8591 if (!v8_flags.wasm_inlining) return false;
8592 // TODO(42204563,41480394,335082212): Do not inline if the current function
8593 // is shared (which also implies the target cannot be shared either).
8594 if (shared_) return false;
8595
8596 // Configuration without Liftoff and feedback, e.g., for testing.
8597 if (!v8_flags.liftoff) {
8598 return size < no_liftoff_inlining_budget_ &&
8599 // In a production configuration, `InliningTree` decides what to
8600 // (not) inline, e.g., asm.js functions or to not exceed
8601 // `kMaxInlinedCount`. But without Liftoff, we need to "manually"
8602 // comply with these constraints here.
8603 !is_asmjs_module(decoder->module_) &&
8605 }
8606
8607 // Default, production configuration: Liftoff collects feedback, which
8608 // decides whether we inline:
8610 DCHECK_GT(inlining_decisions_->function_calls().size(), feedback_slot);
8611 // We should inline if at least one case for this feedback slot needs
8612 // to be inlined.
8613 for (InliningTree* tree :
8614 inlining_decisions_->function_calls()[feedback_slot]) {
8615 if (tree && tree->is_inlined()) {
8616 DCHECK(!decoder->module_->function_is_shared(tree->function_index()));
8617 return true;
8618 }
8619 }
8620 return false;
8621 }
8622 return false;
8623 }
8624
8625 void set_inlining_decisions(InliningTree* inlining_decisions) {
8626 inlining_decisions_ = inlining_decisions;
8627 }
8628
8630 void set_inlining_id(uint8_t inlining_id) {
8631 DCHECK_NE(inlining_id, kNoInliningId);
8632 inlining_id_ = inlining_id;
8633 }
8641
8644 if (!deopts_enabled_.has_value()) {
8645 deopts_enabled_ = v8_flags.wasm_deopt;
8646 if (v8_flags.wasm_deopt) {
8647 const wasm::TypeFeedbackStorage& feedback = env_->module->type_feedback;
8648 base::MutexGuard mutex_guard(&feedback.mutex);
8649 auto iter = feedback.deopt_count_for_function.find(func_index_);
8650 if (iter != feedback.deopt_count_for_function.end() &&
8651 iter->second >= v8_flags.wasm_deopts_per_function_limit) {
8652 deopts_enabled_ = false;
8653 if (v8_flags.trace_wasm_inlining) {
8654 PrintF(
8655 "[function %d%s: Disabling deoptimizations for speculative "
8656 "inlining as the deoptimization limit (%u) for this function "
8657 "is reached or exceeded (%zu)]\n",
8658 func_index_, mode_ == kRegular ? "" : " (inlined)",
8659 iter->second, v8_flags.wasm_deopts_per_function_limit.value());
8660 }
8661 }
8662 }
8663 }
8664 return deopts_enabled_.value();
8665 }
8666
8668 DCHECK_IMPLIES(shared_, element_is_shared);
8669 return (element_is_shared && !shared_)
8674 }
8675
8676 V<FixedArray> managed_object_maps(bool type_is_shared) {
8677 DCHECK_IMPLIES(shared_, type_is_shared);
8678 if (type_is_shared && !shared_) {
8679 V<WasmTrustedInstanceData> shared_instance = trusted_instance_data(true);
8681 shared_instance, ManagedObjectMaps,
8683 } else {
8685 }
8686 }
8687
8688 private:
8692 // Only used for "top-level" instantiations, not for inlining.
8693 std::unique_ptr<InstanceCache> owned_instance_cache_;
8694
8695 // The instance cache to use (may be owned or passed in).
8697
8698 // Yes, a *pointer* to a `unique_ptr`. The `unique_ptr` is allocated lazily
8699 // when adding the first assumption.
8700 std::unique_ptr<AssumptionsJournal>* assumptions_;
8711 BranchHintingMode branch_hinting_mode_;
8713 BranchHintingStresser branch_hinting_stresser_;
8716 // Inlining budget in case of --no-liftoff.
8720
8721 /* Used for inlining modes */
8722 // Contains real parameters for this inlined function, including the instance.
8723 // Used only in StartFunction();
8725 // The block where this function returns its values (passed by the caller).
8727 // The return values and exception values for this function.
8728 // The caller will reconstruct each one with a Phi.
8730 // The block where exceptions from this function are caught (passed by the
8731 // caller).
8733 // The position of the call that is being inlined.
8736
8737 std::optional<bool> deopts_enabled_;
8739};
8740
8743 CompilationEnv* env, WasmDetectedFeatures* detected, Graph& graph,
8744 const FunctionBody& func_body, const WireBytesStorage* wire_bytes,
8745 std::unique_ptr<AssumptionsJournal>* assumptions,
8746 ZoneVector<WasmInliningPosition>* inlining_positions, int func_index) {
8747 DCHECK(env->module->function_was_validated(func_index));
8748 Zone zone(allocator, ZONE_NAME);
8749 WasmGraphBuilderBase::Assembler assembler(data, graph, graph, &zone);
8752 decoder(&zone, env->module, env->enabled_features, detected, func_body,
8753 &zone, env, assembler, assumptions, inlining_positions,
8754 func_index, func_body.is_shared, wire_bytes);
8755 decoder.Decode();
8756 // The function was already validated, so graph building must always succeed.
8757 DCHECK(decoder.ok());
8758}
8759
8760#undef LOAD_IMMUTABLE_INSTANCE_FIELD
8761#undef LOAD_INSTANCE_FIELD
8762#undef LOAD_ROOT
8764
8765} // namespace v8::internal::wasm
#define BIND(label)
#define ELSE
#define GOTO(label,...)
#define IF_NOT(...)
#define UNLIKELY(...)
#define WHILE(...)
#define LIKELY(...)
#define GOTO_IF_NOT(cond, label,...)
#define IF(...)
#define GOTO_IF(cond, label,...)
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
Builtins::Kind kind
Definition builtins.cc:40
int default_case
#define SBXCHECK(condition)
Definition check.h:61
SourcePosition pos
static constexpr int kLastUsedBit
Definition bit-field.h:42
static constexpr T decode(U value)
Definition bit-field.h:66
static constexpr U encode(T value)
Definition bit-field.h:55
static constexpr int kShift
Definition bit-field.h:39
constexpr void Add(E element)
Definition enum-set.h:50
size_t size() const
void emplace_back(Args &&... args)
constexpr bool empty() const
Definition vector.h:73
constexpr size_t size() const
Definition vector.h:70
constexpr T * begin() const
Definition vector.h:96
constexpr T * end() const
Definition vector.h:103
bool Contains(int i) const
Definition bit-vector.h:180
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
Definition builtins.cc:189
static V8_INLINE constexpr int OffsetOfElementAt(int index)
Definition contexts.h:512
static const int kInvalidContext
Definition contexts.h:578
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr int OffsetOfElementAt(int index)
auto Returns(ReturnTypes... return_types) const
Definition signature.h:166
static constexpr int real_jslimit_offset()
static constexpr int exception_offset()
static constexpr int BuiltinSlotOffset(Builtin id)
static constexpr uint32_t thread_in_wasm_flag_address_offset()
Definition isolate.h:1338
static constexpr uint32_t central_stack_sp_offset()
Definition isolate.h:1359
static constexpr uint32_t context_offset()
Definition isolate.h:1351
static uint32_t error_message_param_offset()
Definition isolate.h:1373
static constexpr uint32_t central_stack_limit_offset()
Definition isolate.h:1366
constexpr bool IsSigned() const
static constexpr MachineType Pointer()
static constexpr MachineType Uint8()
constexpr MachineRepresentation representation() const
static constexpr MachineType Int32()
static constexpr MachineType AnyTagged()
static constexpr MachineType Uint64()
static constexpr MachineType Uint32()
static constexpr MachineType Uint16()
static constexpr MachineType Int16()
static constexpr MachineType Bool()
static constexpr MachineType Int64()
static constexpr MachineType TaggedPointer()
static constexpr MachineType UintPtr()
static constexpr MachineType Int8()
static constexpr int kHashNotComputedMask
Definition name.h:131
static V8_EXPORT_PRIVATE const Function * FunctionForId(FunctionId id)
Definition runtime.cc:350
size_t return_count() const
Definition signature.h:93
T GetParam(size_t index) const
Definition signature.h:96
T GetReturn(size_t index=0) const
Definition signature.h:103
size_t parameter_count() const
Definition signature.h:94
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr int kMaxValue
Definition smi.h:101
static constexpr size_t kTargetBias
static constexpr size_t kSigBias
static constexpr size_t kImplicitArgBias
static constexpr size_t kEntrySize
static constexpr size_t kEntriesOffset
static constexpr int OffsetOf(int index)
static uint32_t GetEncodedSize(const wasm::WasmTagSig *tag)
T * AllocateArray(size_t length)
Definition zone.h:127
T * New(Args &&... args)
Definition zone.h:114
static FieldAccess ForJSArrayBufferViewBuffer()
static FieldAccess ForJSArrayBufferViewByteLength()
static FieldAccess ForJSArrayBufferViewByteOffset()
static FieldAccess ForJSArrayBufferByteLength()
static FieldAccess ForJSDataViewDataPointer()
static FieldAccess ForJSArrayBufferViewBitField()
static CallDescriptor * GetSimplifiedCDescriptor(Zone *zone, const MachineSignature *sig, CallDescriptor::Flags flags=CallDescriptor::kNoFlags, Operator::Properties properties=Operator::kNoThrow)
Definition c-linkage.cc:269
static CallDescriptor * GetStubCallDescriptor(Zone *zone, const CallInterfaceDescriptor &descriptor, int stack_parameter_count, CallDescriptor::Flags flags, Operator::Properties properties=Operator::kNoProperties, StubCallMode stub_mode=StubCallMode::kCallCodeObject)
Definition linkage.cc:587
static CallDescriptor * GetRuntimeCallDescriptor(Zone *zone, Runtime::FunctionId function, int js_parameter_count, Operator::Properties properties, CallDescriptor::Flags flags, LazyDeoptOnThrow lazy_deopt_on_throw=LazyDeoptOnThrow::kNo)
Definition linkage.cc:426
static OutputFrameStateCombine Ignore()
const FrameStateData * AllocateFrameStateData(const FrameStateInfo &info, Zone *zone)
Definition deopt-data.h:83
void AddInput(MachineType type, OpIndex input)
Definition deopt-data.h:41
static constexpr MemoryRepresentation FromMachineRepresentation(MachineRepresentation rep)
static MemoryRepresentation FromMachineType(MachineType type)
static constexpr MemoryRepresentation UncompressedTaggedPointer()
static constexpr MemoryRepresentation Uint32()
static constexpr MemoryRepresentation TaggedSigned()
static constexpr MemoryRepresentation Int32()
static constexpr MemoryRepresentation Int64()
static constexpr MemoryRepresentation Uint16()
static constexpr MemoryRepresentation TaggedPointer()
static constexpr MemoryRepresentation UintPtr()
static constexpr MemoryRepresentation Uint8()
static constexpr MemoryRepresentation Int16()
static constexpr MemoryRepresentation Uint64()
static constexpr MemoryRepresentation Float32()
static constexpr MemoryRepresentation Float64()
static constexpr OpIndex Invalid()
Definition index.h:88
static constexpr OpIndex FromOffset(uint32_t offset)
Definition index.h:50
bool MatchIntegralWord32Constant(V< Any > matched, uint32_t *constant) const
ZoneWithName< kCompilationZoneName > & compilation_zone()
Definition phase.h:396
static constexpr RegisterRepresentation Simd128()
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation WordPtr()
static constexpr RegisterRepresentation Float32()
static constexpr RegisterRepresentation Word64()
static constexpr RegisterRepresentation Tagged()
static bool IsUnalignedLoadSupported(MemoryRepresentation repr)
static V< T > Cast(V< U > index)
Definition index.h:632
const uint8_t * pc() const
Definition decoder.h:408
uint32_t V8_INLINE pc_offset(const uint8_t *pc) const
Definition decoder.h:413
constexpr ModuleTypeIndex ref_index() const
Definition value-type.h:762
base::Vector< CasesPerCallSite > function_calls()
static int NoLiftoffBudget(const WasmModule *module, uint32_t func_index)
base::Vector< bool > has_non_inlineable_targets()
static constexpr uint32_t kMaxInliningNestingDepth
static constexpr int kMaxInlinedCount
static InliningTree * CreateRoot(Zone *zone, const WasmModule *module, uint32_t function_index)
static constexpr uint32_t stack_switch_target_sp_offset()
Definition stacks.h:172
static constexpr uint32_t stack_switch_source_fp_offset()
Definition stacks.h:168
V8_INLINE BlockPhis(FullDecoder *decoder, Merge< Value > *merge)
void InitReturnPhis(base::Vector< const ValueType > return_types)
bool LowerToBranches(Decoder *decoder, const BranchTableImmediate &imm)
void Initialize(V< WasmTrustedInstanceData > trusted_instance_data, const WasmModule *mod)
void Delegate(FullDecoder *decoder, uint32_t depth, Control *block)
void RefGetDesc(FullDecoder *decoder, const Value &ref_val, Value *result)
V< Word32 > StringCodePointAt(FullDecoder *decoder, V< String > string, V< Word32 > offset)
void StoreMem(FullDecoder *decoder, StoreType type, const MemoryAccessImmediate &imm, const Value &index, const Value &value)
V< WordPtr > GetDataViewByteLength(FullDecoder *decoder, V< Object > dataview, V< WordPtr > offset, DataViewOp op_type)
void MemoryFill(FullDecoder *decoder, const MemoryIndexImmediate &imm, const Value &dst, const Value &value, const Value &size)
void LocalGet(FullDecoder *decoder, Value *result, const IndexImmediate &imm)
void StringEncodeWtf16Array(FullDecoder *decoder, const Value &str, const Value &array, const Value &start, Value *result)
void BuildWasmMaybeReturnCall(FullDecoder *decoder, const FunctionSig *sig, V< CallTarget > callee, V< HeapObject > ref, const Value args[], compiler::WasmCallKind call_kind=compiler::kWasmFunction)
compiler::ExactOrSubtype GetExactness(FullDecoder *decoder, HeapType target)
WasmFullDecoder< ValidationTag, TurboshaftGraphBuildingInterface > FullDecoder
void BrOnCastDescFail(FullDecoder *decoder, HeapType target_type, const Value &object, const Value &descriptor, Value *value_on_fallthrough, uint32_t br_depth, bool null_succeeds)
void LoadMem(FullDecoder *decoder, LoadType type, const MemoryAccessImmediate &imm, const Value &index, Value *result)
void Deopt(FullDecoder *decoder, V< FrameState > frame_state)
void StructNewDefault(FullDecoder *decoder, const StructIndexImmediate &imm, const Value &descriptor, Value *result)
void ArrayNewSegment(FullDecoder *decoder, const ArrayIndexImmediate &array_imm, const IndexImmediate &segment_imm, const Value &offset, const Value &length, Value *result)
void I31GetU(FullDecoder *decoder, const Value &input, Value *result)
void ReturnCallRef(FullDecoder *decoder, const Value &func_ref, const FunctionSig *sig, const Value args[])
void DeoptIfNot(FullDecoder *decoder, OpIndex deopt_condition, V< FrameState > frame_state)
void CatchCase(FullDecoder *decoder, Control *block, const CatchCase &catch_case, base::Vector< Value > values)
OpIndex MaybePhi(base::Vector< const OpIndex > elements, ValueType type)
void TableGet(FullDecoder *decoder, const Value &index, Value *result, const TableIndexImmediate &imm)
void MemoryInit(FullDecoder *decoder, const MemoryInitImmediate &imm, const Value &dst, const Value &src, const Value &size)
compiler::turboshaft::detail::index_type_for_t< typename Descriptor::results_t > CallBuiltinThroughJumptable(FullDecoder *decoder, V< Context > context, const typename Descriptor::arguments_t &args, CheckForException check_for_exception=CheckForException::kNo)
bool should_inline(FullDecoder *decoder, int feedback_slot, int size)
void TableCopy(FullDecoder *decoder, const TableCopyImmediate &imm, const Value &dst_val, const Value &src_val, const Value &size_val)
void ElemDrop(FullDecoder *decoder, const IndexImmediate &imm)
ZoneAbslFlatHashMap< TSBlock *, BlockPhis > block_phis_
OpIndex AsmjsLoadMem(V< Word32 > index, MemoryRepresentation repr)
void BoundsCheckArray(V< WasmArrayNullable > array, V< Word32 > index, ValueType array_type)
void StringAsWtf16(FullDecoder *decoder, const Value &str, Value *result)
void I64Const(FullDecoder *decoder, Value *result, int64_t value)
OpIndex DataViewGetter(FullDecoder *decoder, const Value args[], DataViewOp op_type)
void DataViewDetachedBufferCheck(FullDecoder *decoder, V< Object > dataview, DataViewOp op_type)
void StackCheck(WasmStackCheckOp::Kind kind, FullDecoder *decoder)
V< Object > NullCheck(const Value &value, TrapId trap_id=TrapId::kTrapNullDereference)
void StringViewIterSlice(FullDecoder *decoder, const Value &view, const Value &codepoints, Value *result)
void StringViewIterNext(FullDecoder *decoder, const Value &view, Value *result)
void Else(FullDecoder *decoder, Control *if_block)
void TableFill(FullDecoder *decoder, const TableIndexImmediate &imm, const Value &start, const Value &value, const Value &count)
void StoreLane(FullDecoder *decoder, StoreType type, const MemoryAccessImmediate &imm, const Value &index, const Value &value, const uint8_t laneidx)
OpIndex BinOpImpl(WasmOpcode opcode, OpIndex lhs, OpIndex rhs)
void LoadLane(FullDecoder *decoder, LoadType type, const Value &value, const Value &index, const MemoryAccessImmediate &imm, const uint8_t laneidx, Value *result)
void UnpackWasmException(FullDecoder *decoder, V< Object > exception, base::Vector< Value > values)
void CallRef(FullDecoder *decoder, const Value &func_ref, const FunctionSig *sig, const Value args[], Value returns[])
void ArrayNewDefault(FullDecoder *decoder, const ArrayIndexImmediate &imm, const Value &length, Value *result)
void RefTest(FullDecoder *decoder, HeapType target, const Value &object, Value *result, bool null_succeeds)
void StringNewWtf16Array(FullDecoder *decoder, const Value &array, const Value &start, const Value &end, Value *result)
void ReturnCallIndirect(FullDecoder *decoder, const Value &index, const CallIndirectImmediate &imm, const Value args[])
V< HeapObject > StringNewWtf8ArrayImpl(FullDecoder *decoder, const unibrow::Utf8Variant variant, const Value &array, const Value &start, const Value &end, ValueType result_type)
void ArrayNewFixed(FullDecoder *decoder, const ArrayIndexImmediate &array_imm, const IndexImmediate &length_imm, const Value elements[], Value *result)
void StringNewWtf8Array(FullDecoder *decoder, const unibrow::Utf8Variant variant, const Value &array, const Value &start, const Value &end, Value *result)
OpIndex CallCStackSlotToStackSlot(OpIndex arg, ExternalReference ref, MemoryRepresentation arg_type, MemoryRepresentation res_type)
void LoadTransform(FullDecoder *decoder, LoadType type, LoadTransformationKind transform, const MemoryAccessImmediate &imm, const Value &index, Value *result)
void MemoryCopy(FullDecoder *decoder, const MemoryCopyImmediate &imm, const Value &dst, const Value &src, const Value &size)
void DataViewSetter(FullDecoder *decoder, const Value args[], DataViewOp op_type)
V< Word32 > StringEqImpl(FullDecoder *decoder, V< String > a, V< String > b, ValueType a_type, ValueType b_type)
void StringViewWtf16Slice(FullDecoder *decoder, const Value &view, const Value &start, const Value &end, Value *result)
void StringMeasureWtf8(FullDecoder *decoder, const unibrow::Utf8Variant variant, const Value &str, Value *result)
void AssertNullTypecheck(FullDecoder *decoder, const Value &obj, Value *result)
void AtomicNotify(FullDecoder *decoder, const MemoryAccessImmediate &imm, OpIndex index, OpIndex num_waiters_to_wake, Value *result)
void F64Const(FullDecoder *decoder, Value *result, double value)
void AtomicOp(FullDecoder *decoder, WasmOpcode opcode, const Value args[], const size_t argc, const MemoryAccessImmediate &imm, Value *result)
void BrTable(FullDecoder *decoder, const BranchTableImmediate &imm, const Value &key)
V< Word32 > CallCStackSlotToInt32(ExternalReference ref, std::initializer_list< std::pair< OpIndex, MemoryRepresentation > > args)
void DataViewBoundsCheck(FullDecoder *decoder, V< WordPtr > left, V< WordPtr > right, DataViewOp op_type)
void DataDrop(FullDecoder *decoder, const IndexImmediate &imm)
void If(FullDecoder *decoder, const Value &cond, Control *if_block)
void ThrowDataViewOutOfBoundsError(FullDecoder *decoder, DataViewOp op_type)
V< WasmTableObject > LoadTable(FullDecoder *decoder, const TableIndexImmediate &imm)
bool HandleWellKnownImport(FullDecoder *decoder, const CallFunctionImmediate &imm, const Value args[], Value returns[])
OpIndex BuildDiv64Call(OpIndex lhs, OpIndex rhs, ExternalReference ccall_ref, wasm::TrapId trap_zero)
void StructGet(FullDecoder *decoder, const Value &struct_object, const FieldImmediate &field, bool is_signed, Value *result)
void LocalSet(FullDecoder *decoder, const Value &value, const IndexImmediate &imm)
V< WordPtr > TableAddressToUintPtrOrOOBTrap(AddressType address_type, V< Word > index)
V< WordPtr > MemoryAddressToUintPtrOrOOBTrap(AddressType address_type, V< Word > index)
void BrOnNonNull(FullDecoder *decoder, const Value &ref_object, Value *result, uint32_t depth, bool)
void ArraySet(FullDecoder *decoder, const Value &array_obj, const ArrayIndexImmediate &imm, const Value &index, const Value &value)
void RefCastDesc(FullDecoder *decoder, const Value &object, const Value &desc, Value *result)
void BrIf(FullDecoder *decoder, const Value &cond, uint32_t depth)
void StringViewIterRewind(FullDecoder *decoder, const Value &view, const Value &codepoints, Value *result)
void CallIndirect(FullDecoder *decoder, const Value &index, const CallIndirectImmediate &imm, const Value args[], Value returns[])
void StringFromCodePoint(FullDecoder *decoder, const Value &code_point, Value *result)
OpIndex CallCStackSlotToInt32(OpIndex arg, ExternalReference ref, MemoryRepresentation arg_type)
V< WasmTrustedInstanceData > trusted_instance_data(bool element_is_shared)
void Simd8x16ShuffleOp(FullDecoder *decoder, const Simd128Immediate &imm, const Value &input0, const Value &input1, Value *result)
void BrOnNull(FullDecoder *decoder, const Value &ref_object, uint32_t depth, bool pass_null_along_branch, Value *result_on_fallthrough)
V< WordPtr > StoreInStackSlot(OpIndex value, wasm::ValueType type)
void RefAsNonNull(FullDecoder *decoder, const Value &arg, Value *result)
V< WordPtr > GetDataViewByteLength(FullDecoder *decoder, V< Object > dataview, DataViewOp op_type)
void InlineWasmCall(FullDecoder *decoder, uint32_t func_index, const FunctionSig *sig, uint32_t feedback_case, bool is_tail_call, const Value args[], Value returns[])
V< WordPtr > GetDataViewDataPtr(FullDecoder *decoder, V< Object > dataview, V< WordPtr > offset, DataViewOp op_type)
void StringEncodeWtf8Array(FullDecoder *decoder, const unibrow::Utf8Variant variant, const Value &str, const Value &array, const Value &start, Value *result)
void Select(FullDecoder *decoder, const Value &cond, const Value &fval, const Value &tval, Value *result)
void RefCastAbstract(FullDecoder *decoder, const Value &object, HeapType type, Value *result, bool null_succeeds)
std::pair< V< Word32 >, V< ExposedTrustedObject > > BuildIndirectCallTargetAndImplicitArg(FullDecoder *decoder, V< WordPtr > index_wordptr, CallIndirectImmediate imm, bool needs_type_or_null_check=true)
void StringNewWtf8(FullDecoder *decoder, const MemoryIndexImmediate &imm, const unibrow::Utf8Variant variant, const Value &offset, const Value &size, Value *result)
compiler::turboshaft::detail::index_type_for_t< typename Descriptor::results_t > CallBuiltinByPointer(FullDecoder *decoder, const typename Descriptor::arguments_t &args, CheckForException check_for_exception=CheckForException::kNo)
void SimdOp(FullDecoder *decoder, WasmOpcode opcode, const Value *args, Value *result)
std::pair< V< WordPtr >, compiler::BoundsCheckResult > BoundsCheckMem(const wasm::WasmMemory *memory, MemoryRepresentation repr, OpIndex index, uintptr_t offset, compiler::EnforceBoundsCheck enforce_bounds_check, compiler::AlignmentCheck alignment_check)
void StringEq(FullDecoder *decoder, const Value &a, const Value &b, Value *result)
void BrOnCastFail(FullDecoder *decoder, HeapType target_type, const Value &object, Value *value_on_fallthrough, uint32_t br_depth, bool null_succeeds)
void CurrentMemoryPages(FullDecoder *decoder, const MemoryIndexImmediate &imm, Value *result)
void ArrayFill(FullDecoder *decoder, ArrayIndexImmediate &imm, const Value &array, const Value &index, const Value &value, const Value &length)
void BindBlockAndGeneratePhis(FullDecoder *decoder, TSBlock *tsblock, Merge< Value > *merge, OpIndex *exception=nullptr)
void TableSize(FullDecoder *decoder, const TableIndexImmediate &imm, Value *result)
void StringEncodeWtf8(FullDecoder *decoder, const MemoryIndexImmediate &memory, const unibrow::Utf8Variant variant, const Value &str, const Value &offset, Value *result)
void ArrayGet(FullDecoder *decoder, const Value &array_obj, const ArrayIndexImmediate &imm, const Value &index, bool is_signed, Value *result)
void ArrayNew(FullDecoder *decoder, const ArrayIndexImmediate &imm, const Value &length, const Value &initial_value, Value *result)
V< Word64 > ExtractTruncationProjections(V< Tuple< Word64, Word32 > > truncated)
OpIndex StringMeasureWtf8Impl(FullDecoder *decoder, const unibrow::Utf8Variant variant, V< String > string)
OpIndex BuildIntToFloatConversionInstruction(OpIndex input, ExternalReference ccall_ref, MemoryRepresentation input_representation, MemoryRepresentation result_representation)
void RefFunc(FullDecoder *decoder, uint32_t function_index, Value *result)
void DoReturn(FullDecoder *decoder, uint32_t drop_values)
OpIndex BuildChangeEndiannessStore(OpIndex node, MachineRepresentation mem_rep, wasm::ValueType wasmtype)
void DataViewRangeCheck(FullDecoder *decoder, V< WordPtr > left, V< WordPtr > right, DataViewOp op_type)
void Forward(FullDecoder *decoder, const Value &from, Value *to)
LoadOp::Kind GetMemoryAccessKind(MemoryRepresentation repr, compiler::BoundsCheckResult bounds_check_result)
void StringAsIter(FullDecoder *decoder, const Value &str, Value *result)
void ArrayLen(FullDecoder *decoder, const Value &array_obj, Value *result)
void CatchException(FullDecoder *decoder, const TagIndexImmediate &imm, Control *block, base::Vector< Value > values)
std::pair< V< Word32 >, V< HeapObject > > BuildImportedFunctionTargetAndImplicitArg(FullDecoder *decoder, uint32_t function_index)
compiler::turboshaft::detail::index_type_for_t< typename Descriptor::results_t > CallBuiltinThroughJumptable(FullDecoder *decoder, const typename Descriptor::arguments_t &args, CheckForException check_for_exception=CheckForException::kNo)
void BuildEncodeException32BitValue(V< FixedArray > values_array, uint32_t index, V< Word32 > value)
OpIndex CallCStackSlotToStackSlot(OpIndex arg, ExternalReference ref, MemoryRepresentation arg_type)
void TraceMemoryOperation(FullDecoder *decoder, bool is_store, MemoryRepresentation repr, V< WordPtr > index, uintptr_t offset)
void GlobalGet(FullDecoder *decoder, Value *result, const GlobalIndexImmediate &imm)
void TableGrow(FullDecoder *decoder, const TableIndexImmediate &imm, const Value &value, const Value &delta, Value *result)
void StartFunctionBody(FullDecoder *decoder, Control *block)
void GlobalSet(FullDecoder *decoder, const Value &value, const GlobalIndexImmediate &imm)
V< Word32 > GetStringIndexOf(FullDecoder *decoder, V< String > string, V< String > search, V< Word32 > start)
void ArrayInitSegment(FullDecoder *decoder, const ArrayIndexImmediate &array_imm, const IndexImmediate &segment_imm, const Value &array, const Value &array_index, const Value &segment_offset, const Value &length)
void StringConst(FullDecoder *decoder, const StringConstImmediate &imm, Value *result)
void Throw(FullDecoder *decoder, const TagIndexImmediate &imm, const Value arg_values[])
void AsmjsStoreMem(V< Word32 > index, OpIndex value, MemoryRepresentation repr)
void StringHash(FullDecoder *decoder, const Value &string, Value *result)
void StringViewWtf16Encode(FullDecoder *decoder, const MemoryIndexImmediate &imm, const Value &view, const Value &offset, const Value &pos, const Value &codeunits, Value *result)
void WellKnown_FastApi(FullDecoder *decoder, const CallFunctionImmediate &imm, const Value args[], Value returns[])
V< WordPtr > MemBuffer(uint32_t mem_index, uintptr_t offset)
TurboshaftGraphBuildingInterface(Zone *zone, CompilationEnv *env, Assembler &assembler, std::unique_ptr< AssumptionsJournal > *assumptions, ZoneVector< WasmInliningPosition > *inlining_positions, int func_index, bool shared, const WireBytesStorage *wire_bytes)
void BrOnCastDesc(FullDecoder *decoder, HeapType target_type, const Value &object, const Value &descriptor, Value *value_on_branch, uint32_t br_depth, bool null_succeeds)
void MemoryGrow(FullDecoder *decoder, const MemoryIndexImmediate &imm, const Value &value, Value *result)
void StringIsUSVSequence(FullDecoder *decoder, const Value &str, Value *result)
void RefTestAbstract(FullDecoder *decoder, const Value &object, HeapType type, Value *result, bool null_succeeds)
bool InlineTargetIsTypeCompatible(const WasmModule *module, const FunctionSig *sig, const FunctionSig *inlinee)
V< Word64 > BuildDecodeException64BitValue(V< FixedArray > exception_values_array, int index)
void TraceInstruction(FullDecoder *decoder, uint32_t markid)
V< WordPtr > MemOrTableAddressToUintPtrOrOOBTrap(AddressType address_type, V< Word > index, TrapId trap_reason)
void StringMeasureWtf16(FullDecoder *decoder, const Value &str, Value *result)
void LocalTee(FullDecoder *decoder, const Value &value, Value *result, const IndexImmediate &imm)
V< T > AnnotateAsString(V< T > value, wasm::ValueType type)
void ArrayCopy(FullDecoder *decoder, const Value &dst, const Value &dst_index, const Value &src, const Value &src_index, const ArrayIndexImmediate &src_imm, const Value &length)
void BuildWasmCall(FullDecoder *decoder, const FunctionSig *sig, V< CallTarget > callee, V< HeapObject > ref, const Value args[], Value returns[], compiler::WasmCallKind call_kind=compiler::WasmCallKind::kWasmFunction, CheckForException check_for_exception=CheckForException::kCatchInThisFrame)
OpIndex BuildCcallConvertFloatSat(OpIndex arg, MemoryRepresentation float_type, ExternalReference ccall_ref, bool is_signed)
void StringEncodeWtf16(FullDecoder *decoder, const MemoryIndexImmediate &imm, const Value &str, const Value &offset, Value *result)
void UnOp(FullDecoder *decoder, WasmOpcode opcode, const Value &value, Value *result)
void RefNull(FullDecoder *decoder, ValueType type, Value *result)
void BrOnCastFailImpl(FullDecoder *decoder, V< Map > rtt, compiler::WasmTypeCheckConfig config, const Value &object, Value *value_on_fallthrough, uint32_t br_depth, bool null_succeeds)
void AssertNotNullTypecheck(FullDecoder *decoder, const Value &obj, Value *result)
void StructSet(FullDecoder *decoder, const Value &struct_object, const FieldImmediate &field, const Value &field_value)
std::pair< V< Word32 >, V< ExposedTrustedObject > > BuildFunctionReferenceTargetAndImplicitArg(V< WasmFuncRef > func_ref, ValueType type)
void MaybeSetPositionToParent(OpIndex call, CheckForException check_for_exception)
OpIndex CallCStackSlotToStackSlot(OpIndex arg0, OpIndex arg1, ExternalReference ref, MemoryRepresentation arg_type)
void F32Const(FullDecoder *decoder, Value *result, float value)
void RefI31(FullDecoder *decoder, const Value &input, Value *result)
OpIndex BuildChangeEndiannessLoad(OpIndex node, MachineType memtype, wasm::ValueType wasmtype)
OpIndex AnnotateResultIfReference(OpIndex result, wasm::ValueType type)
void CallDirect(FullDecoder *decoder, const CallFunctionImmediate &imm, const Value args[], Value returns[])
void AtomicWait(FullDecoder *decoder, WasmOpcode opcode, const MemoryAccessImmediate &imm, OpIndex index, OpIndex expected, V< Word64 > timeout, Value *result)
void StringViewWtf8Advance(FullDecoder *decoder, const Value &view, const Value &pos, const Value &bytes, Value *result)
TurboshaftGraphBuildingInterface(Zone *zone, CompilationEnv *env, Assembler &assembler, Mode mode, InstanceCache &instance_cache, std::unique_ptr< AssumptionsJournal > *assumptions, ZoneVector< WasmInliningPosition > *inlining_positions, int func_index, bool shared, const WireBytesStorage *wire_bytes, base::Vector< OpIndex > real_parameters, TSBlock *return_block, BlockPhis *return_phis, TSBlock *catch_block, bool is_inlined_tail_call, OptionalV< FrameState > parent_frame_state)
void BrOnCast(FullDecoder *decoder, HeapType target_type, const Value &object, Value *value_on_branch, uint32_t br_depth, bool null_succeeds)
void ThrowDataViewTypeError(FullDecoder *decoder, V< Object > dataview, DataViewOp op_type)
V< FrameState > CreateFrameState(FullDecoder *decoder, const FunctionSig *callee_sig, const Value *func_ref_or_index, const Value args[])
void StringViewWtf8Encode(FullDecoder *decoder, const MemoryIndexImmediate &memory, const unibrow::Utf8Variant variant, const Value &view, const Value &addr, const Value &pos, const Value &bytes, Value *next_pos, Value *bytes_written)
OpIndex CallAndMaybeCatchException(FullDecoder *decoder, V< CallTarget > callee, base::Vector< const OpIndex > args, const TSCallDescriptor *descriptor, CheckForException check_for_exception, OpEffects effects)
void ThrowDataViewDetachedError(FullDecoder *decoder, DataViewOp op_type)
void StringCompare(FullDecoder *decoder, const Value &lhs, const Value &rhs, Value *result)
V< Word32 > BuildDecodeException32BitValue(V< FixedArray > exception_values_array, int index)
OpIndex BuildCcallConvertFloat(OpIndex arg, MemoryRepresentation float_type, ExternalReference ccall_ref)
OpIndex CallCStackSlotToStackSlot(ExternalReference ref, MemoryRepresentation res_type, std::initializer_list< std::pair< OpIndex, MemoryRepresentation > > args)
void StringViewWtf8Slice(FullDecoder *decoder, const Value &view, const Value &start, const Value &end, Value *result)
TSBlock * NewBlockWithPhis(FullDecoder *decoder, Merge< Value > *merge)
void SetupControlFlowEdge(FullDecoder *decoder, TSBlock *block, uint32_t drop_values=0, V< Object > exception=OpIndex::Invalid(), Merge< Value > *stack_values=nullptr)
void I32Const(FullDecoder *decoder, Value *result, int32_t value)
void TableInit(FullDecoder *decoder, const TableInitImmediate &imm, const Value &dst_val, const Value &src_val, const Value &size_val)
void StringViewIterAdvance(FullDecoder *decoder, const Value &view, const Value &codepoints, Value *result)
void ArrayFillImpl(V< WasmArray > array, V< Word32 > index, V< Any > value, OpIndex length, const wasm::ArrayType *type, bool emit_write_barrier)
void SimdLaneOp(FullDecoder *decoder, WasmOpcode opcode, const SimdLaneImmediate &imm, base::Vector< const Value > inputs, Value *result)
V< Word32 > GetCodeUnitImpl(FullDecoder *decoder, V< String > string, V< Word32 > offset)
void StringConcat(FullDecoder *decoder, const Value &head, const Value &tail, Value *result)
void I31GetS(FullDecoder *decoder, const Value &input, Value *result)
void BrOnCastAbstract(FullDecoder *decoder, const Value &object, HeapType type, Value *value_on_branch, uint32_t br_depth, bool null_succeeds)
void S128Const(FullDecoder *decoder, const Simd128Immediate &imm, Value *result)
void BrOnCastImpl(FullDecoder *decoder, V< Map > rtt, compiler::WasmTypeCheckConfig config, const Value &object, Value *value_on_branch, uint32_t br_depth, bool null_succeeds)
V< String > ExternRefToString(const Value value, bool null_succeeds=false)
void TableSet(FullDecoder *decoder, const Value &index, const Value &value, const TableIndexImmediate &imm)
OpIndex UnOpImpl(WasmOpcode opcode, OpIndex arg, ValueType input_type)
std::pair< OpIndex, V< Word32 > > BuildCCallForFloatConversion(OpIndex arg, MemoryRepresentation float_type, ExternalReference ccall_ref)
V< HeapObject > ArrayNewImpl(FullDecoder *decoder, ModuleTypeIndex index, const ArrayType *array_type, V< Word32 > length, V< Any > initial_value)
void StringAsWtf8(FullDecoder *decoder, const Value &str, Value *result)
V< WasmStruct > StructNewImpl(FullDecoder *decoder, const StructIndexImmediate &imm, const Value &descriptor, OpIndex args[])
void StructNew(FullDecoder *decoder, const StructIndexImmediate &imm, const Value &descriptor, const Value args[], Value *result)
V< WasmArray > BoundsCheckArrayWithLength(V< WasmArrayNullable > array, V< Word32 > index, V< Word32 > length, compiler::CheckForNull null_check)
void BrOnCastFailAbstract(FullDecoder *decoder, const Value &object, HeapType type, Value *value_on_fallthrough, uint32_t br_depth, bool null_succeeds)
void StringNewWtf16(FullDecoder *decoder, const MemoryIndexImmediate &imm, const Value &offset, const Value &size, Value *result)
void ReturnCall(FullDecoder *decoder, const CallFunctionImmediate &imm, const Value args[])
void BinOp(FullDecoder *decoder, WasmOpcode opcode, const Value &lhs, const Value &rhs, Value *result)
void RefCast(FullDecoder *decoder, const Value &object, Value *result)
OpIndex WasmPositionToOpIndex(WasmCodePosition position, int inlining_id)
void BrOrRet(FullDecoder *decoder, uint32_t depth, uint32_t drop_values=0)
void StringViewWtf16GetCodeUnit(FullDecoder *decoder, const Value &view, const Value &pos, Value *result)
OpIndex StringEncodeWtf8ArrayImpl(FullDecoder *decoder, const unibrow::Utf8Variant variant, V< String > str, V< WasmArray > array, V< Word32 > start)
constexpr int value_kind_size() const
Definition value-type.h:485
constexpr MachineType machine_type() const
Definition value-type.h:506
constexpr ValueKind kind() const
Definition value-type.h:631
constexpr MachineRepresentation machine_representation() const
Definition value-type.h:520
constexpr Nullability nullability() const
Definition value-type.h:389
constexpr bool is_nullable() const
Definition value-type.h:393
constexpr uint32_t raw_bit_field() const
Definition value-type.h:594
constexpr ValueType Unpacked() const
Definition value-type.h:944
static constexpr ValueType Ref(ModuleTypeIndex index, bool shared, RefTypeKind kind)
Definition value-type.h:887
static constexpr ValueType RefMaybeNull(ModuleTypeIndex index, Nullability nullable, bool shared, RefTypeKind kind)
Definition value-type.h:903
base::Vector< ValueType > local_types() const
static BitVector * AnalyzeLoopAssignment(WasmDecoder *decoder, const uint8_t *pc, uint32_t locals_count, Zone *zone, bool *loop_is_innermost=nullptr)
ValueType local_type(uint32_t index) const
V< BigInt > BuildChangeInt64ToBigInt(V< Word64 > input, StubCallMode stub_mode)
V< WasmTrustedInstanceData > LoadTrustedDataFromInstanceObject(V< HeapObject > instance_object)
RegisterRepresentation RepresentationFor(ValueTypeBase type)
V< WordPtr > BuildSwitchToTheCentralStack(V< WordPtr > old_limit)
void BuildSetNewStackLimit(V< WordPtr > old_limit, V< WordPtr > new_limit)
std::pair< V< WordPtr >, V< WordPtr > > BuildSwitchToTheCentralStackIfNeeded()
V< WordPtr > GetTargetForBuiltinCall(Builtin builtin, StubCallMode stub_mode)
std::pair< V< Word32 >, V< HeapObject > > BuildImportedFunctionTargetAndImplicitArg(ConstOrV< Word32 > func_index, V< WasmTrustedInstanceData > trusted_instance_data)
std::pair< V< Word32 >, V< ExposedTrustedObject > > BuildFunctionTargetAndImplicitArg(V< WasmInternalFunction > internal_function)
OpIndex CallRuntime(Zone *zone, Runtime::FunctionId f, std::initializer_list< const OpIndex > args, V< Context > context)
void BuildSwitchBackFromCentralStack(V< WordPtr > old_sp, V< WordPtr > old_limit)
void BuildModifyThreadInWasmFlagHelper(Zone *zone, OpIndex thread_in_wasm_flag_address, bool new_value)
OpIndex CallC(const MachineSignature *sig, ExternalReference ref, std::initializer_list< OpIndex > args)
void BuildModifyThreadInWasmFlag(Zone *zone, bool new_value)
WellKnownImport get(int index) const
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define FOREACH_WASM_TRAPREASON(V)
Definition globals.h:2650
Tagged< NativeContext > native_context_
DataViewOp
@ kByteLength
#define DATAVIEW_OP_LIST(V)
int start
uint32_t count
int end
#define TYPED_ARRAYS(V)
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
AssemblerT assembler
Zone * graph_zone
int32_t offset
#define ELEMENTS_KIND_TO_ELEMENT_SIZE(Type, type, TYPE, ctype)
TNode< Context > context
TNode< Object > target
std::optional< TNode< JSArray > > a
TNode< Object > receiver
Node * node
RpoNumber block
ZoneVector< RpoNumber > & result
bool null_succeeds
ZoneVector< Entry > entries
Point to
int position
Definition liveedit.cc:290
uint32_t const mask
STL namespace.
Utf8Variant
Definition unicode.h:145
constexpr bool IsInBounds(T index, T length, T max)
Definition bounds.h:49
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
typename IndexTypeFor< T >::type index_type_for_t
Definition assembler.h:372
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
UnionOf< String, Null > StringOrNull
Definition index.h:542
std::conditional_t< Is64(), Word64, Word32 > WordPtr
Definition index.h:225
typename detail::TypeForBits< Bits >::float_type float_type
Definition types.h:159
WordWithBits< 128 > Simd128
Definition index.h:236
CallDescriptor * GetWasmCallDescriptor(Zone *zone, const Signature< T > *fsig, WasmCallKind call_kind, bool need_frame_state)
static constexpr int kCharWidthBailoutSentinel
const char * WellKnownImportName(WellKnownImport wki)
constexpr uint32_t kWasmPageSizeLog2
int GetSubtypingDepth(const WasmModule *module, ModuleTypeIndex type_index)
V8_EXPORT_PRIVATE void BuildTSGraph(compiler::turboshaft::PipelineData *data, AccountingAllocator *allocator, CompilationEnv *env, WasmDetectedFeatures *detected, Graph &graph, const FunctionBody &func_body, const WireBytesStorage *wire_bytes, std::unique_ptr< AssumptionsJournal > *assumptions, ZoneVector< WasmInliningPosition > *inlining_positions, int func_index)
constexpr int kWasmInstanceDataParameterIndex
uint32_t max_table_size()
constexpr uint32_t kMinimumSupertypeArraySize
constexpr IndependentHeapType kWasmRefNullExternString
constexpr size_t kV8MaxWasmFunctionSize
Definition wasm-limits.h:51
constexpr IndependentValueType kWasmF32
constexpr IndependentHeapType kWasmAnyRef
V8_NOINLINE bool EquivalentTypes(ValueType type1, ValueType type2, const WasmModule *module1, const WasmModule *module2)
bool is_asmjs_module(const WasmModule *module)
constexpr IndependentHeapType kWasmExternRef
constexpr IndependentValueType kWasmI32
constexpr IndependentHeapType kWasmRefI31
constexpr size_t kMaxMemory64Size
constexpr IndependentHeapType kWasmRefExternString
static constexpr bool kNotShared
Definition value-type.h:101
DecodeResult ValidateFunctionBody(Zone *zone, WasmEnabledFeatures enabled, const WasmModule *module, WasmDetectedFeatures *detected, const FunctionBody &body)
WireBytesRef Get(const NameMap &map, uint32_t index)
V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype, const WasmModule *sub_module, const WasmModule *super_module)
constexpr size_t kV8MaxWasmTableSize
Definition wasm-limits.h:58
constexpr bool kPartialOOBWritesAreNoops
constexpr IndependentValueType kWasmS128
constexpr IndependentValueType kWasmF64
constexpr IndependentValueType kWasmI64
constexpr IndependentHeapType kWasmRefString
constexpr int kMinInt
Definition globals.h:375
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
constexpr NullMaybeHandleType kNullMaybeHandle
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kSimd128Size
Definition globals.h:706
constexpr int kBitsPerByte
Definition globals.h:682
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
void PrintF(const char *format,...)
Definition utils.cc:39
const int kSmiTagSize
Definition v8-internal.h:87
const Address kWeakHeapObjectMask
Definition globals.h:967
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
constexpr int kSystemPointerSize
Definition globals.h:410
kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset tables
constexpr int kTaggedSizeLog2
Definition globals.h:543
Handle< T > IndirectHandle
Definition globals.h:1086
constexpr int kBitsPerInt
Definition globals.h:687
constexpr bool SmiValuesAre31Bits()
kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset kTablesOffset kProtectedDispatchTable0Offset kProtectedDispatchTableForImportsOffset func_refs
constexpr int kInt32Size
Definition globals.h:401
const int kSmiShiftSize
V8_EXPORT_PRIVATE FlagValues v8_flags
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation nullptr
Definition flags.cc:1263
const int kSmiMaxValue
V8_EXPORT_PRIVATE constexpr int ElementSizeInBytes(MachineRepresentation)
constexpr bool Is64()
constexpr int kMaxInt
Definition globals.h:374
kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset instance_object
kInstanceDescriptorsOffset kTransitionsOrPrototypeInfoOffset IsNull(value)||IsJSProxy(value)||IsWasmObject(value)||(IsJSObject(value) &&(HeapLayout
Definition map-inl.h:70
bool is_signed(Condition cond)
template const char * string
!IsContextMap !IsContextMap native_context
Definition map-inl.h:877
static bool IsZero(const Operand &rt)
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
V8_INLINE Local< Primitive > Null(Isolate *isolate)
i::Address Load(i::Address address)
Definition unwinder.cc:19
uint32_t cast
const size_t segment_offset
#define V8_NOEXCEPT
#define UNREACHABLE()
Definition logging.h:67
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define V8_EXPORT_PRIVATE
Definition macros.h:460
static constexpr Kind MaybeUnaligned(MemoryRepresentation rep)
const underlying_operation_t< Op > * TryCast() const
Definition operations.h:990
underlying_operation_t< Op > & Cast()
Definition operations.h:980
static const TSCallDescriptor * Create(const CallDescriptor *descriptor, CanThrow can_throw, LazyDeoptOnThrow lazy_deopt_on_throw, Zone *graph_zone, const JSWasmCallParameters *js_wasm_call_parameters=nullptr)
union v8::internal::wasm::CatchCase::MaybeTagIndex maybe_tag
std::atomic< const MachineSignature * > * fast_api_signatures
const std::atomic< Address > * fast_api_targets
ControlBase(Zone *zone, ControlKind kind, uint32_t stack_depth, uint32_t init_stack_depth, const uint8_t *pc, Reachability reachability)
static constexpr uint32_t kUninitializedLiftoffFrameSize
ValueBase(const uint8_t *pc, ValueType type)
BoundsCheckStrategy bounds_checks
std::vector< WasmMemory > memories
#define OFFSET_OF_DATA_START(Type)
Symbol annotation
#define HANDLE_F16X8_INVERSE_COMPARISON(kind, ts_kind, extern_ref)
#define HANDLE_UNARY_OPTIONAL_OPCODE(kind, feature, external_ref)
#define HANDLE_SHIFT_OPCODE(kind)
#define HANDLE_TERNARY_MASK_OPCODE(kind)
#define HANDLE_BINARY_OPCODE(kind)
#define HANDLE_INVERSE_COMPARISON(wasm_kind, ts_kind)
#define RMW_OPERATION(V)
#define HANDLE_TEST_OPCODE(kind)
#define HANDLE_SPLAT_OPCODE(kind)
#define CASE_LOAD(OPCODE, RESULT, INPUT)
#define HANDLE_F16X8_TERN_OPCODE(kind, extern_ref)
#define LOAD_OPERATION(V)
#define HANDLE_TERNARY_OTHER_OPCODE(kind)
#define STORE_OPERATION(V)
#define HANDLE_UNARY_NON_OPTIONAL_OPCODE(kind)
#define CASE_BINOP(OPCODE, BINOP, RESULT, INPUT)
#define HANDLE_F16X8_BIN_OPTIONAL_OPCODE(kind, extern_ref)
#define CASE_STORE(OPCODE, INPUT, OUTPUT)
#define V8_STATIC_ROOTS_BOOL
Definition v8config.h:1001
#define V8_INLINE
Definition v8config.h:500
#define V8_UNLIKELY(condition)
Definition v8config.h:660
#define V8_NOINLINE
Definition v8config.h:586
#define V8_PRESERVE_MOST
Definition v8config.h:598
#define LOAD_IMMUTABLE_PROTECTED_INSTANCE_FIELD(instance, name, type)
#define LOAD_PROTECTED_INSTANCE_FIELD(instance, name, type)
#define LOAD_ROOT(name)
#define LOAD_IMMUTABLE_INSTANCE_FIELD(instance, name, representation)
#define TRAPREASON_TO_TRAPID(name)
#define ZONE_NAME
Definition zone.h:22