107 block == &
__ output_graph().StartBlock());
268 for (
auto it = graph->rbegin(); it != graph->rend(); ++it) {
269 if ((*it)->is_loop()) {
315 if (backedge_block == header) {
323 for (; *it != backedge_block; --it) {
368 constexpr int kGeneratorSwitchBLockIndex = 2;
370 graph->blocks()[kGeneratorSwitchBLockIndex];
376 if (innermost_header) {
402 std::unordered_map<const maglev::BasicBlock*, const maglev::BasicBlock*>
413#define GET_FRAME_STATE_MAYBE_ABORT(name, deopt_info) \
414 V<FrameState> name; \
416 OptionalV<FrameState> _maybe_frame_state = BuildFrameState(deopt_info); \
417 if (!_maybe_frame_state.has_value()) { \
418 DCHECK(bailout_->has_value()); \
419 return maglev::ProcessResult::kAbort; \
421 name = _maybe_frame_state.value(); \
425 constexpr int kCalleeCount = 1;
426 constexpr int kFrameStateCount = 1;
427 return (arguments_count + kCalleeCount + kFrameStateCount) >
431#define BAILOUT_IF_TOO_MANY_ARGUMENTS_FOR_CALL(count) \
433 if (TooManyArgumentsForCall(count)) { \
434 *bailout_ = BailoutReason::kTooManyArguments; \
435 return maglev::ProcessResult::kAbort; \
439#define GENERATE_AND_MAP_BUILTIN_CALL(node, builtin, frame_state, arguments, \
441 BAILOUT_IF_TOO_MANY_ARGUMENTS_FOR_CALL(arguments.size()); \
442 SetMap(node, GenerateBuiltinCall(node, builtin, frame_state, arguments, \
451#define RETURN_IF_UNREACHABLE() \
452 if (__ generating_unreachable_operations()) { \
453 return maglev::ProcessResult::kContinue; \
491 std::optional<BailoutReason>* bailout)
508 block->is_loop() ?
__ NewLoopHeader() :
__ NewBlock();
512 __ Bind(
__ NewBlock());
532 if (graph->has_resumable_generator()) {
563 for (
Block& block :
__ output_graph().blocks()) {
564 if (block.IsLoop() && block.PredecessorCount() == 1) {
565 __ output_graph().TurnLoopIntoMerge(&block);
575 constexpr int kMainSwitchBlockId = 2;
576 bool is_main_switch_block = maglev_block->
id() == kMainSwitchBlockId;
579 return is_main_switch_block;
588 __ SetMaglevInputBlock(maglev_block);
593 Block* turboshaft_block =
Map(maglev_block);
595 if (
__ current_block() !=
nullptr) {
598 __ Goto(turboshaft_block);
621 if (!
__ Bind(turboshaft_block)) {
670 Block* turboshaft_block,
672 bool ignore_last_predecessor) {
700 if (ignore_last_predecessor &&
739 Block* turboshaft_catch_handler =
Map(maglev_catch_handler);
747 turboshaft_catch_handler);
750 turboshaft_catch_handler);
755 Block* turboshaft_catch_handler) {
756 if (!
__ Bind(turboshaft_catch_handler))
return;
758 if (!maglev_catch_handler->
has_phi())
return;
771 V<Any> ts_idx =
__ GetVariable(var);
795 ConversionMode::kCanonicalizeSmi));
805 Block* turboshaft_catch_handler) {
806 if (!maglev_catch_handler->
has_phi()) {
809 if (!
__ Bind(turboshaft_catch_handler))
return;
815 auto predecessors = turboshaft_catch_handler->
Predecessors();
818 for (
Block* predecessor : predecessors) {
820 V<Object> catch_begin = predecessor->begin();
822 catch_block_begins.
push_back(catch_begin);
825 turboshaft_catch_handler);
829 __ Bind(turboshaft_catch_handler);
841 Block* turboshaft_catch_handler) {
846 Block* new_block =
__ NewBlock();
850 __ output_graph().Replace<
GotoOp>(
__ output_graph().Index(old_goto),
852 __ AddPredecessor(old_block, new_block,
false);
855 __ BindReachable(new_block);
859 __ Goto(turboshaft_catch_handler);
865 bool has_special_generator_handling =
false;
870 std::vector<GeneratorSplitEdge>& generator_preds =
876 Block* pred_for_generator =
__ NewBlock();
879 __ Bind(pred.pre_loop_dst);
881 __ Word32Constant(pred.switch_value));
882 __ Goto(pred_for_generator);
885 __ Bind(pred_for_generator);
891 has_special_generator_handling =
true;
899 if (maglev_loop_header->
has_phi()) {
901 has_special_generator_handling);
908 constexpr int kSkipBackedge = 1;
909 int input_count = phi->input_count() - kSkipBackedge;
911 if (has_special_generator_handling) {
915 switch (phi->value_representation()) {
940 if (has_special_generator_handling) {
943 constexpr int kSkipGeneratorPredecessor = 1;
946 int input_count_without_generator =
949 inputs.
insert(inputs.
begin(), input_count_without_generator,
955 inputs.
push_back(switch_var_first_input);
963 __ Goto(
Map(maglev_loop_header));
1027 std::vector<GeneratorSplitEdge>& generator_preds =
1031 __ output_graph().graph_zone()
1032 -> AllocateArray<compiler::turboshaft::SwitchOp::Case>(
1033 generator_preds.size());
1035 for (
int i = 0;
static_cast<unsigned int>(
i) < generator_preds.size();
1041 Block* default_block =
__ NewBlock();
1048 __ Bind(default_block);
1055 SetMap(node,
__ HeapConstant(node->object().object()));
1067 SetMap(node,
__ Word32Constant(node->value()));
1072 SetMap(node,
__ Word32SignHintUnsigned(
__ Word32Constant(node->value())));
1077 SetMap(node,
__ Float64Constant(node->value()));
1082 SetMap(node,
__ SmiConstant(node->value()));
1094 SetMap(node,
__ TrustedHeapConstant(node->object().object()));
1108 std::string reg_string_name = node->source().ToString();
1111 snprintf(debug_name_arr.
data(), debug_name_arr.
length(),
"%s",
1112 reg_string_name.c_str());
1113 char* debug_name = debug_name_arr.
data();
1115 char* debug_name =
nullptr;
1119 if (source.is_function_closure()) {
1126 if (source.is_current_context()) {
1130 }
else if (source.is_parameter()) {
1131 index = source.ToParameterIndex();
1135 index = source.index() + InterpreterFrameConstants::kExtraSlotCount +
1138 value =
__ OsrValue(index);
1140 int index = source.ToParameterIndex();
1141 if (source.is_current_context()) {
1145 index = source.ToParameterIndex();
1172 int input_count = node->input_count();
1175 if (node->is_exception_phi()) {
1189 if (
__ current_block()->IsLoop()) {
1190 DCHECK(state.block()->is_loop());
1192 if (state.block()->predecessor_count() > 2 ||
1199 static_cast<size_t>(state.block()->phis()->LengthForTest()));
1204 __ current_block()->LastPredecessor());
1209 DCHECK_EQ(state.block()->predecessor_count(), 2);
1211 first_phi_input =
Map(node->input(0));
1213 SetMap(node,
__ PendingLoopPhi(first_phi_input, rep));
1230 inputs.
resize(
__ current_block()->PredecessorCount(), {});
1231 for (
int i = 0;
i < maglev_input_count; ++
i) {
1238 if (additional_input.has_value()) {
1242 inputs[inputs.
size() - 1] = additional_input.value();
1257 switch (node->target_type()) {
1259 switch (node->receiver_mode()) {
1261 builtin = Builtin::kCall_ReceiverIsNullOrUndefined;
1264 builtin = Builtin::kCall_ReceiverIsNotNullOrUndefined;
1267 builtin = Builtin::kCall_ReceiverIsAny;
1272 switch (node->receiver_mode()) {
1274 builtin = Builtin::kCallFunction_ReceiverIsNullOrUndefined;
1277 builtin = Builtin::kCallFunction_ReceiverIsNotNullOrUndefined;
1280 builtin = Builtin::kCallFunction_ReceiverIsAny;
1288 arguments.push_back(
__ Word32Constant(node->num_args()));
1289 for (
auto arg : node->args()) {
1290 arguments.push_back(
Map(arg));
1292 arguments.push_back(context);
1305 if (node->shared_function_info().HasBuiltinId()) {
1310 arguments.push_back(
Map(node->new_target()));
1311 arguments.push_back(
__ Word32Constant(actual_parameter_count));
1312#ifdef V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE
1313 arguments.push_back(
1316 arguments.push_back(
Map(node->receiver()));
1317 for (
int i = 0;
i < node->num_args();
i++) {
1318 arguments.push_back(
Map(node->arg(
i)));
1321 for (
int i = actual_parameter_count;
i < node->expected_parameter_count();
1325 arguments.push_back(
Map(node->context()));
1327 node, node->shared_function_info().builtin_id(), frame_state,
1329 std::max<int>(actual_parameter_count,
1330 node->expected_parameter_count()));
1335 for (
int i = 0;
i < node->num_args();
i++) {
1336 arguments.push_back(
Map(node->arg(
i)));
1339 for (
int i = actual_parameter_count;
i < node->expected_parameter_count();
1343 arguments.push_back(
Map(node->new_target()));
1344 arguments.push_back(
__ Word32Constant(actual_parameter_count));
1345#ifdef V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE
1346 arguments.push_back(
1353 arguments.push_back(context);
1357 std::max<int>(actual_parameter_count,
1358 node->expected_parameter_count()),
1367 lazy_deopt_on_throw,
1377 if (node->inline_builtin()) {
1389 __ HeapConstant(node->function_template_info().AsHeapObject().object());
1396 arguments.
push_back(
__ ExternalConstant(function_ref));
1397 arguments.push_back(
__ Word32Constant(node->num_args()));
1398 arguments.push_back(target);
1399 arguments.push_back(
Map(node->receiver()));
1401 arguments.push_back(
Map(arg));
1403 arguments.push_back(
Map(node->context()));
1406 switch (node->mode()) {
1408 builtin = Builtin::kCallApiCallbackOptimizedNoProfiling;
1414 builtin = Builtin::kCallApiCallbackOptimized;
1418 int stack_arg_count = node->num_args() + 1;
1427 std::optional<int> stack_arg_count = std::nullopt) {
1435 stack_arg_count.has_value() ? stack_arg_count.value()
1452 for (
int i = 0;
i < node->InputCountWithoutContext();
i++) {
1453 arguments.push_back(
Map(node->input(
i)));
1456 if (node->has_feedback()) {
1458 switch (node->slot_type()) {
1460 feedback_slot =
__ TaggedIndexConstant(node->feedback().index());
1463 feedback_slot =
__ WordPtrConstant(node->feedback().index());
1466 arguments.push_back(feedback_slot);
1467 arguments.push_back(
__ HeapConstant(node->feedback().vector));
1471 if (descriptor.HasContextParameter()) {
1472 arguments.push_back(
Map(node->context_input()));
1475 int stack_arg_count =
1476 node->InputCountWithoutContext() - node->InputsInRegisterCount();
1477 if (node->has_feedback()) {
1483 int slot_index = node->InputCountWithoutContext();
1484 int vector_index = slot_index + 1;
1485 if (vector_index < descriptor.GetRegisterParameterCount()) {
1487 }
else if (vector_index == descriptor.GetRegisterParameterCount()) {
1489 stack_arg_count += 1;
1492 stack_arg_count += 2;
1509 auto c_entry_stub =
__ CEntryStubConstant(
isolate_, node->ReturnCount());
1512 graph_zone(), node->function_id(), node->num_args(),
1514 lazy_deopt_on_throw);
1517 for (
int i = 0;
i < node->num_args();
i++) {
1518 arguments.push_back(
Map(node->arg(
i)));
1523 arguments.push_back(
__ Word32Constant(node->num_args()));
1525 arguments.push_back(
Map(node->context()));
1530 frame_state = frame_state_value;
1551 __ CallRuntime_ThrowAccessedUninitializedVariable(
1553 __ HeapConstant(node->name().object()));
1569 static_assert(Map::kBitFieldOffsetEnd + 1 - Map::kBitFieldOffset == 1);
1573 Map::Bits1::IsConstructorBit::kMask))) {
1575 __ CallRuntime_ThrowNotSuperConstructor(
1577 constructor,
Map(node->function()));
1595 __ CallRuntime_ThrowSuperAlreadyCalledError(
isolate_, frame_state,
1615 __ CallRuntime_ThrowSuperNotCalled(
isolate_, frame_state,
1636 __ CallRuntime_ThrowCalledNonCallable(
1655 V<ScopeInfo> scope_info =
__ HeapConstant(node->scope_info().object());
1657 SetMap(node,
__ CallBuiltin_FastNewFunctionContextFunction(
1658 isolate_, frame_state, context, scope_info,
1662 SetMap(node,
__ CallBuiltin_FastNewFunctionContextEval(
1663 isolate_, frame_state, context, scope_info,
1676 __ HeapConstant(node->shared_function_info().object());
1678 __ HeapConstant(node->feedback_cell().object());
1681 __ CallBuiltin_FastNewClosure(
isolate_, frame_state, context,
1682 shared_function_info, feedback_cell));
1692 __ HeapConstant(node->shared_function_info().object());
1694 __ HeapConstant(node->feedback_cell().object());
1697 if (node->pretenured()) {
1698 closure =
__ CallRuntime_NewClosure_Tenured(
1699 isolate_, context, shared_function_info, feedback_cell);
1701 closure =
__ CallRuntime_NewClosure(
isolate_, context,
1702 shared_function_info, feedback_cell);
1718 V<Object> arguments_list =
Map(node->arguments_list());
1720 SetMap(node,
__ CallBuiltin_CallWithArrayLike(
1737 for (
auto arg : node->args_no_spread()) {
1741 SetMap(node,
__ CallBuiltin_CallWithSpread(
1743 node->num_args_no_spread(), spread,
1759 for (
auto arg : node->args()) {
1760 arguments.push_back(
Map(arg));
1762 DCHECK_EQ(node->num_args(), arguments.size());
1765 switch (node->target_type()) {
1767 builtin = Builtin::kCallFunctionForwardVarargs;
1770 builtin = Builtin::kCallForwardVarargs;
1773 V<Object> call =
__ CallBuiltin_CallForwardVarargs(
1775 node->num_args(), node->start_index(),
base::VectorOf(arguments),
1788 arguments.push_back(
Map(node->new_target()));
1789 arguments.push_back(
__ Word32Constant(node->num_args()));
1791#ifndef V8_TARGET_ARCH_ARM64
1792 arguments.push_back(
__ WordPtrConstant(node->feedback().index()));
1793 arguments.push_back(
__ HeapConstant(node->feedback().vector));
1796 for (
auto arg : node->args()) {
1797 arguments.push_back(
Map(arg));
1800 arguments.push_back(
Map(node->context()));
1802#ifndef V8_TARGET_ARCH_ARM64
1812 static constexpr int kFeedbackVector = 1;
1813 int stack_arg_count = node->num_args() + kFeedbackVector;
1814 Builtin builtin = Builtin::kConstruct_WithFeedback;
1816 int stack_arg_count = node->num_args();
1817 Builtin builtin = Builtin::kConstruct;
1831 arguments.push_back(
Map(node->new_target()));
1832 arguments.push_back(
__ Word32Constant(node->num_args_no_spread()));
1833 arguments.push_back(
Map(node->spread()));
1835 for (
auto arg : node->args_no_spread()) {
1836 arguments.push_back(
Map(arg));
1839 arguments.push_back(
Map(node->context()));
1843 node->num_args_no_spread());
1848 SetMap(node,
__ CheckConstructResult(
Map(node->construct_result_input()),
1849 Map(node->implicit_receiver_input())));
1855 V<Object> construct_result =
Map(node->construct_result_input());
1857 __ CheckDerivedConstructResult(construct_result, frame_state,
1860 SetMap(node, construct_result);
1868 OpIndex arguments[] = {
Map(node->object_input()),
1869 Map(node->key_input()),
1870 Map(node->value_input()),
1871 __ TaggedIndexConstant(node->feedback().index()),
1872 __ HeapConstant(node->feedback().vector),
1873 Map(node->context())};
1883 OpIndex arguments[] = {
Map(node->object_input()),
Map(node->key_input()),
1884 __ TaggedIndexConstant(node->feedback().index()),
1885 __ HeapConstant(node->feedback().vector),
1886 Map(node->context())};
1897 OpIndex arguments[] = {
Map(node->object_input()),
1898 __ HeapConstant(node->name().object()),
1899 Map(node->value_input()),
1900 __ TaggedIndexConstant(node->feedback().index()),
1901 __ HeapConstant(node->feedback().vector),
1902 Map(node->context())};
1913 Map(node->object_input()),
__ HeapConstant(node->name().object()),
1914 __ TaggedIndexConstant(node->feedback().index()),
1915 __ HeapConstant(node->feedback().vector),
Map(node->context())};
1926 OpIndex arguments[] = {
Map(node->receiver()),
1927 Map(node->lookup_start_object()),
1928 __ HeapConstant(node->name().object()),
1929 __ TaggedIndexConstant(node->feedback().index()),
1930 __ HeapConstant(node->feedback().vector),
1931 Map(node->context())};
1942 OpIndex arguments[] = {
__ HeapConstant(node->name().object()),
1943 __ TaggedIndexConstant(node->feedback().index()),
1944 __ HeapConstant(node->feedback().vector),
1945 Map(node->context())};
1948 switch (node->typeof_mode()) {
1950 builtin = Builtin::kLoadGlobalICInsideTypeof;
1953 builtin = Builtin::kLoadGlobalIC;
1967 __ HeapConstant(node->name().object()),
Map(node->value()),
1968 __ TaggedIndexConstant(node->feedback().index()),
1969 __ HeapConstant(node->feedback().vector),
Map(node->context())};
1980 OpIndex arguments[] = {
Map(node->object_input()),
1981 Map(node->key_input()),
1982 Map(node->value_input()),
1983 Map(node->flags_input()),
1984 __ TaggedIndexConstant(node->feedback().index()),
1985 __ HeapConstant(node->feedback().vector),
1986 Map(node->context())};
1997 OpIndex arguments[] = {
Map(node->object_input()),
1998 __ HeapConstant(node->name().object()),
1999 Map(node->value_input()),
2000 __ TaggedIndexConstant(node->feedback().index()),
2001 __ HeapConstant(node->feedback().vector),
2002 Map(node->context())};
2014 Map(node->receiver()),
__ TaggedIndexConstant(node->load_slot()),
2015 __ TaggedIndexConstant(node->call_slot()),
2016 __ HeapConstant(node->feedback()),
Map(node->context())};
2028 __ HeapConstant(node->feedback().vector),
2029 __ TaggedIndexConstant(node->feedback().index()),
2030 __ HeapConstant(node->boilerplate_descriptor().object()),
2042 OpIndex arguments[] = {
__ HeapConstant(node->feedback().vector),
2043 __ TaggedIndexConstant(node->feedback().index()),
2044 __ HeapConstant(node->constant_elements().object()),
2057 OpIndex arguments[] = {
Map(node->object_input()),
2058 Map(node->name_input()),
2059 Map(node->value_input()),
2060 __ TaggedIndexConstant(node->feedback().index()),
2061 __ HeapConstant(node->feedback().vector),
2074 V<Smi> entry =
__ FindOrderedHashMapEntry(table,
key);
2079 __ LoadElement(table, AccessBuilderTS::ForOrderedHashMapEntryValue(),
2080 __ ChangeInt32ToIntPtr(
__ UntagSmi(entry)));
2096 IF_NOT (
__ Word32Equal(
__ TruncateWordPtrToWord32(entry), -1)) {
2098 table, AccessBuilderTS::ForOrderedHashMapEntryValue(), entry);
2111 V<Smi> entry =
__ FindOrderedHashSetEntry(table,
key);
2127 OpIndex arguments[] = {
Map(node->object()),
Map(node->callable()),
2128 Map(node->context())};
2140 Map(node->object()),
Map(node->key()),
2142 Map(node->context())};
2153 OpIndex arguments[] = {
Map(node->value_input()),
Map(node->context())};
2164 OpIndex arguments[] = {
__ HeapConstant(node->feedback().vector),
2165 __ TaggedIndexConstant(node->feedback().index()),
2166 __ HeapConstant(node->pattern().object()),
2180 __ HeapConstant(node->shared_function_info().object()),
2181 Map(node->description()),
__ WordPtrConstant(node->feedback().index()),
2194 __ HeapConstant(node->feedback().vector),
2195 __ TaggedIndexConstant(node->feedback().index()),
2196 __ HeapConstant(node->boilerplate_descriptor().object()),
2200 Builtin::kCreateObjectFromSlowBoilerplate,
2218 OpIndex arguments[] = {
__ HeapConstant(node->feedback().vector),
2219 __ TaggedIndexConstant(node->feedback().index()),
2220 __ HeapConstant(node->constant_elements().object()),
2225 Builtin::kCreateArrayFromSlowBoilerplate,
2232 OpIndex arguments[] = {
Map(node->enumerator()),
2233 __ TaggedIndexConstant(node->feedback().index()),
2234 __ HeapConstant(node->feedback().vector),
2235 Map(node->context())};
2250 OpIndex arguments[] = {
__ WordPtrConstant(node->feedback().index()),
2251 Map(node->receiver()),
2252 Map(node->cache_array()),
2253 Map(node->cache_type()),
2254 Map(node->cache_index()),
2255 __ HeapConstant(node->feedback().vector),
2256 Map(node->context())};
2266 __ DeoptimizeIfNot(
__ ObjectIsSmi(
Map(node->receiver_input())), frame_state,
2267 DeoptimizeReason::kNotASmi,
2268 node->eager_deopt_info()->feedback_to_update());
2275 node->eager_deopt_info()->feedback_to_update());
2282 frame_state, DeoptimizeReason::kNotASmi,
2283 node->eager_deopt_info()->feedback_to_update());
2292 frame_state, DeoptimizeReason::kNotASmi,
2293 node->eager_deopt_info()->feedback_to_update());
2303 Map(node->input()), frame_state,
2305 node->eager_deopt_info()->feedback_to_update());
2308 node->eager_deopt_info()->feedback_to_update());
2318 check =
__ ObjectIsNumberOrBigInt(input);
2321 check =
__ ObjectIsNumber(input);
2323 __ DeoptimizeIfNot(check, frame_state, DeoptimizeReason::kNotANumber,
2324 node->eager_deopt_info()->feedback_to_update());
2330 __ DeoptimizeIf(
__ ObjectIsSmi(
Map(node->receiver_input())), frame_state,
2331 DeoptimizeReason::kSmi,
2332 node->eager_deopt_info()->feedback_to_update());
2343 __ DeoptimizeIfNot(
__ IsHeapNumberMap(
__ LoadMapField(input)),
2344 frame_state, DeoptimizeReason::kNotInt32,
2345 node->eager_deopt_info()->feedback_to_update());
2346 value =
__ ChangeFloat64ToInt32OrDeopt(
2349 node->eager_deopt_info()->feedback_to_update());
2359 if (check_heap_object) {
2365 __ DeoptimizeIf(is_smi, frame_state, DeoptimizeReason::kWrongMap,
2372 bool has_migration_targets =
false;
2373 for (
MapRef map : maps) {
2374 if (map.object()->is_migration_target()) {
2375 has_migration_targets =
true;
2379 DCHECK(has_migration_targets);
2384 maps, flags, feedback);
2394 CheckMaps(
Map(node->receiver_input()), frame_state, {},
2395 node->eager_deopt_info()->feedback_to_update(),
2404 CheckMaps(
Map(node->object_input()), frame_state,
Map(node->map_input()),
2405 node->eager_deopt_info()->feedback_to_update(),
2413 CheckMaps(
Map(node->receiver_input()), frame_state, {},
2414 node->eager_deopt_info()->feedback_to_update(),
2423 CheckMaps(
Map(node->receiver_input()), frame_state, {},
2424 node->eager_deopt_info()->feedback_to_update(),
2434 __ MigrateMapIfNeeded(
2435 Map(node->object_input()),
Map(node->map_input()), frame_state,
2436 node->eager_deopt_info()->feedback_to_update()));
2442 __ DeoptimizeIfNot(
__ TaggedEqual(
Map(node->target_input()),
2443 __ HeapConstant(node->value().object())),
2444 frame_state, node->deoptimize_reason(),
2445 node->eager_deopt_info()->feedback_to_update());
2451 __ DeoptimizeIfNot(
__ Word32Equal(
Map(node->target_input()), node->value()),
2452 frame_state, node->deoptimize_reason(),
2453 node->eager_deopt_info()->feedback_to_update());
2459 __ DeoptimizeIfNot(
__ Float64SameValue(
Map(node->target_input()),
2460 node->value().get_scalar()),
2461 frame_state, node->deoptimize_reason(),
2462 node->eager_deopt_info()->feedback_to_update());
2474 __ DeoptimizeIfNot(check, frame_state, DeoptimizeReason::kNotAString,
2475 node->eager_deopt_info()->feedback_to_update());
2488 __ DeoptimizeIfNot(check, frame_state,
2489 DeoptimizeReason::kNotAStringOrStringWrapper,
2490 node->eager_deopt_info()->feedback_to_update());
2502 __ DeoptimizeIfNot(check, frame_state, DeoptimizeReason::kNotASymbol,
2503 node->eager_deopt_info()->feedback_to_update());
2509 __ CheckInstanceType(
2510 Map(node->receiver_input()), frame_state,
2511 node->eager_deopt_info()->feedback_to_update(),
2512 node->first_instance_type(), node->last_instance_type(),
2521 __ TaggedEqual(
Map(node->first_input()),
Map(node->second_input())),
2522 frame_state, node->deoptimize_reason(),
2523 node->eager_deopt_info()->feedback_to_update());
2530 node->eager_deopt_info()->feedback_to_update());
2537 __ DeoptimizeIf(
RootEqual(node->object_input(), RootIndex::kTheHoleValue),
2538 frame_state, DeoptimizeReason::kHole,
2539 node->eager_deopt_info()->feedback_to_update());
2545 __ DeoptimizeIf(
__ Float64IsHole(
Map(node->float64_input())), frame_state,
2546 DeoptimizeReason::kHole,
2547 node->eager_deopt_info()->feedback_to_update());
2553 bool negate_result =
false;
2555 node->condition(), &negate_result);
2556 if (negate_result) {
2557 __ DeoptimizeIf(cmp, frame_state, node->deoptimize_reason(),
2558 node->eager_deopt_info()->feedback_to_update());
2560 __ DeoptimizeIfNot(cmp, frame_state, node->deoptimize_reason(),
2561 node->eager_deopt_info()->feedback_to_update());
2571 for (
auto alloc : node->allocation_list()) {
2572 if (!alloc->HasBeenAnalysed() || alloc->HasEscaped()) {
2573 alloc->set_offset(size);
2574 size += alloc->size();
2577 node->set_size(size);
2578 SetMap(node,
__ FinishInitialization(
2579 __ Allocate<HeapObject>(size, node->allocation_type())));
2584 DCHECK(node->HasBeenAnalysed() &&
2585 node->HasEscaped());
2587 SetMap(node,
__ BitcastWordPtrToHeapObject(
__ WordPtrAdd(
2588 __ BitcastHeapObjectToWordPtr(alloc), node->offset())));
2594 SetMap(node,
__ EnsureWritableFastElements(
Map(node->object_input()),
2595 Map(node->elements_input())));
2605 SetMap(node,
__ MaybeGrowFastElements(
2606 Map(node->object_input()),
Map(node->elements_input()),
2607 Map(node->index_input()),
2608 Map(node->elements_length_input()), frame_state, mode,
2609 node->eager_deopt_info()->feedback_to_update()));
2616 SetMap(node,
__ ExtendPropertiesBackingStore(
2617 Map(node->property_array_input()),
2618 Map(node->object_input()), node->old_length(), frame_state,
2619 node->eager_deopt_info()->feedback_to_update()));
2625 __ TransitionAndStoreArrayElement(
2626 Map(node->array_input()),
2627 __ ChangeInt32ToIntPtr(
Map(node->index_input())),
2628 Map(node->value_input()),
2630 node->fast_map().object(), node->double_map().object());
2638 node->transition_sources().end(),
graph_zone());
2639 __ TransitionElementsKindOrCheckMap(
2640 Map(node->object_input()),
Map(node->map_input()), frame_state,
2642 sources, node->transition_target(),
2643 node->eager_deopt_info()->feedback_to_update()));
2648 SetMap(node,
__ TransitionMultipleElementsKind(
2649 Map(node->object_input()),
Map(node->map_input()),
2650 node->transition_sources(), node->transition_target()));
2659 SetMap(node,
__ HasInPrototypeChain(
Map(node->object()), node->prototype(),
2667 SetMap(node,
__ UpdateJSArrayLength(
Map(node->length_input()),
2668 Map(node->object_input()),
2669 Map(node->index_input())));
2681 frame_state, DeoptimizeReason::kGreaterThanMaxFastElementArray,
2682 node->eager_deopt_info()->feedback_to_update());
2691 template <
typename Node>
2713 __ Int32AddCheckOverflow(left_len, right_len);
2717 Label<> throw_invalid_length(
this);
2723 GOTO(throw_invalid_length);
2724 BIND(throw_invalid_length);
2727 __ CallRuntime_ThrowInvalidStringLength(
isolate_, frame_state,
2735 SetMap(node,
__ StringConcat(
__ TagSmi(len), left, right));
2749 IF (
__ ObjectIsString(string_or_wrapper)) {
2754 JSPrimitiveWrapper::kValueOffset));
2769 SetMap(node,
__ StringEqual(
Map(node->lhs()),
Map(node->rhs())));
2774 SetMap(node,
__ StringLength(
Map(node->object_input())));
2780 __ StringCharCodeAt(
Map(node->string_input()),
2781 __ ChangeUint32ToUintPtr(
Map(node->index_input())));
2782 SetMap(node,
__ ConvertCharCodeToString(char_code));
2788 SetMap(node,
__ CheckedInternalizedString(
2789 Map(node->object_input()), frame_state,
2791 node->eager_deopt_info()->feedback_to_update()));
2797 __ CheckValueEqualsString(
Map(node->target_input()), node->value(),
2799 node->eager_deopt_info()->feedback_to_update());
2804 SetMap(node,
__ ConvertCharCodeToString(
Map(node->code_input())));
2811 Mode::kCharCodeAt) {
2813 Map(node->string_input()),
2814 __ ChangeUint32ToUintPtr(
Map(node->index_input()))));
2820 Map(node->string_input()),
2821 __ ChangeUint32ToUintPtr(
Map(node->index_input()))));
2839 V<Word32> instance_type =
__ LoadInstanceTypeField(map);
2840 IF (
__ Word32Equal(instance_type, SYMBOL_TYPE)) {
2841 GOTO(done,
__ CallRuntime_SymbolDescriptiveString(
2849 __ CallBuiltin_ToString(
isolate_, frame_state,
Map(node->context()),
2861 __ CallBuiltin_NumberToString(
isolate_,
Map(node->value_input())));
2871 SetMap(node,
__ UntagSmi(
__ ArgumentsLength()));
2876 SetMap(node,
__ NewArgumentsElements(
Map(node->arguments_count_input()),
2878 node->formal_parameter_count()));
2883 SetMap(node,
__ RestLength(node->formal_parameter_count()));
2887 template <
typename T>
2891 __ LoadTaggedField(
Map(node->object_input()), node->offset());
2897 node->offset() == JSGeneratorObject::kContextOffset) {
2917 result =
__ LoadHeapNumberFromScriptContext(script_context,
2918 node->index(), heap_number);
2927 Map(node->object_input()), node->offset());
2928 SetMap(node,
__ LoadHeapNumberValue(field));
2946 Map(node->object_input()), node->offset());
2947 SetMap(node,
__ LoadHeapInt32Value(field));
2952 SetMap(node,
__ LoadFixedArrayElement(
2953 Map(node->elements_input()),
2954 __ ChangeInt32ToIntPtr(
Map(node->index_input()))));
2959 SetMap(node,
__ LoadFixedDoubleArrayElement(
2960 Map(node->elements_input()),
2961 __ ChangeInt32ToIntPtr(
Map(node->index_input()))));
2966 SetMap(node,
__ LoadFixedDoubleArrayElement(
2967 Map(node->elements_input()),
2968 __ ChangeInt32ToIntPtr(
Map(node->index_input()))));
2976 Map(node->elements_input()),
2977 __ ChangeInt32ToIntPtr(
Map(node->index_input())));
2978 __ DeoptimizeIf(
__ Float64IsHole(
result), frame_state,
2979 DeoptimizeReason::kHole,
2980 node->eager_deopt_info()->feedback_to_update());
2987 __ Store(
Map(node->object_input()),
Map(node->value_input()),
2990 node->initializing_or_transitioning());
2995 __ Store(
Map(node->object_input()),
Map(node->value_input()),
2998 node->initializing_or_transitioning());
3007 V<Object> old_value =
__ LoadTaggedField(context, node->offset());
3008 IF_NOT (
__ TaggedEqual(old_value, new_value)) {
3010 __ LoadScriptContextSideData(context, node->index());
3015 __ StoreScriptContextSlowPath(
3016 context, old_value, new_value, side_data, frame_state,
3017 node->eager_deopt_info()->feedback_to_update(), done);
3030 Map(node->object_input()), node->offset());
3032 Map(node->value_input()));
3038 Map(node->object_input()), node->offset());
3040 Map(node->value_input()));
3046 __ Store(
Map(node->object_input()),
Map(node->value_input()),
3050 node->initializing_or_transitioning(), node->tag());
3056 __ StoreFixedArrayElement(
Map(node->elements_input()),
3057 __ ChangeInt32ToIntPtr(
Map(node->index_input())),
3058 Map(node->value_input()),
3065 __ StoreFixedArrayElement(
Map(node->elements_input()),
3066 __ ChangeInt32ToIntPtr(
Map(node->index_input())),
3067 Map(node->value_input()),
3073 __ StoreFixedDoubleArrayElement(
3074 Map(node->elements_input()),
3075 __ ChangeInt32ToIntPtr(
Map(node->index_input())),
3076 Map(node->value_input()));
3081 __ Store(
Map(node->object_input()),
__ HeapConstant(node->map().object()),
3089 __ Store(
Map(node->object_input()),
Map(node->value_input()),
3096 __ Store(
Map(node->object_input()),
Map(node->value_input()),
3109 __ Word32BitwiseAnd(bitfield3, Map::Bits3::EnumLengthBits::kMask),
3110 Map::Bits3::EnumLengthBits::kShift);
3119 IF_NOT (
__ Word32Equal(
Map(node->length_input()), 0)) {
3122 RootEqual(node->indices_input(), RootIndex::kEmptyFixedArray);
3124 DeoptimizeReason::kWrongEnumIndices,
3125 node->eager_deopt_info()->feedback_to_update());
3132 __ LoadFieldByIndex(
Map(node->object_input()),
3133 __ UntagSmi(
Map<Smi>(node->index_input()))));
3147 if (shift_size > 0) {
3148 DCHECK(shift_size == 1 || shift_size == 2 || shift_size == 3);
3149 length =
__ WordPtrShiftRightLogical(length, shift_size);
3158 __ UintPtrLessThan(
__ ChangeUint32ToUintPtr(
Map(node->index_input())),
3159 Map(node->length_input())),
3160 frame_state, DeoptimizeReason::kOutOfBounds,
3161 node->eager_deopt_info()->feedback_to_update());
3169 node->elements_kind()));
3176 node->elements_kind()));
3182 any_of(FLOAT32_ELEMENTS, FLOAT64_ELEMENTS));
3185 Map<Word32>(node->index_input()), node->elements_kind()));
3186 if (node->elements_kind() == FLOAT32_ELEMENTS) {
3198 node->elements_kind());
3204 any_of(FLOAT32_ELEMENTS, FLOAT64_ELEMENTS));
3206 if (node->elements_kind() == FLOAT32_ELEMENTS) {
3207 value =
__ TruncateFloat64ToFloat32(
Map(node->value_input()));
3211 node->elements_kind());
3225 if (element_size > 1) {
3232 DCHECK(element_size == 2 || element_size == 4 || element_size == 8);
3233 byte_length =
__ WordPtrSub(byte_length, element_size - 1);
3234 __ DeoptimizeIf(
__ IntPtrLessThan(byte_length, 0), frame_state,
3235 DeoptimizeReason::kOutOfBounds,
3236 node->eager_deopt_info()->feedback_to_update());
3240 __ TruncateWordPtrToWord32(byte_length)),
3241 frame_state, DeoptimizeReason::kOutOfBounds,
3242 node->eager_deopt_info()->feedback_to_update());
3252 ToBit(node->is_little_endian_input(),
3254 SetMap(node,
__ LoadDataViewElement(
3257 is_little_endian, node->
type()));
3266 ToBit(node->is_little_endian_input(),
3269 __ LoadDataViewElement(
3282 ToBit(node->is_little_endian_input(),
3284 __ StoreDataViewElement(
3296 ToBit(node->is_little_endian_input(),
3298 __ StoreDataViewElement(
3311 frame_state, DeoptimizeReason::kArrayBufferWasDetached,
3312 node->eager_deopt_info()->feedback_to_update());
3319 if (target->is_loop() && (target->predecessor_count() > 2 ||
3327 Block* loop_only_pred =
__ NewBlock();
3351 __ Goto(
Map(node->target()));
3375 __ TaggedEqual(
Map(node->lhs()),
Map(node->rhs()))));
3381 __ TaggedEqual(
Map(node->lhs()),
Map(node->rhs())),
3388 switch (node->check_type()) {
3397 __ ObjectIs(
Map(node->value()),
3405 switch (node->literal()) {
3406 case interpreter::TestTypeOfFlags::LiteralFlag::kNumber:
3411 case interpreter::TestTypeOfFlags::LiteralFlag::kString:
3416 case interpreter::TestTypeOfFlags::LiteralFlag::kSymbol:
3421 case interpreter::TestTypeOfFlags::LiteralFlag::kBigInt:
3426 case interpreter::TestTypeOfFlags::LiteralFlag::kFunction:
3431 case interpreter::TestTypeOfFlags::LiteralFlag::kBoolean:
3435 input, RootIndex::kFalseValue,
isolate_)),
3439 case interpreter::TestTypeOfFlags::LiteralFlag::kUndefined:
3448 case interpreter::TestTypeOfFlags::LiteralFlag::kObject:
3453 input, RootIndex::kNullValue,
isolate_)),
3457 case interpreter::TestTypeOfFlags::LiteralFlag::kOther:
3471 switch (node->check_type()) {
3483 frame_state, DeoptimizeReason::kNotDetectableReceiver,
3484 node->eager_deopt_info()->feedback_to_update());
3495 switch (node->check_type()) {
3507 frame_state, DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined,
3508 node->eager_deopt_info()->feedback_to_update());
3578 __ TaggedEqual(
Map(node->left_input()),
Map(node->right_input()));
3585 RootEqual(node->condition_input(), node->root_index());
3591 __ GotoIf(
RootEqual(node->condition_input(), RootIndex::kUndefinedValue),
3592 Map(node->if_true()));
3593 __ Branch(
RootEqual(node->condition_input(), RootIndex::kNullValue),
3594 Map(node->if_true()),
Map(node->if_false()));
3600 switch (node->check_type()) {
3608 __ Branch(
__ ObjectIs(
Map(node->condition_input()),
3610 Map(node->if_true()),
Map(node->if_false()));
3615 __ Branch(
__ IsSmi(
Map(node->condition_input())),
Map(node->if_true()),
3616 Map(node->if_false()));
3621 __ GotoIf(
__ IsSmi(
Map(node->condition_input())),
Map(node->if_false()));
3622 __ Branch(
__ JSAnyIsNotPrimitive(
Map(node->condition_input())),
3623 Map(node->if_true()),
Map(node->if_false()));
3635 __ output_graph().graph_zone()
3636 -> AllocateArray<compiler::turboshaft::SwitchOp::Case>(
3641 for (
int i = 0;
i < node->
size();
i++) {
3644 Block* new_dst =
__ NewBlock();
3650 {new_dst,
Map(target),
i});
3659 bypassed_header !=
nullptr;
3662 Block* prev_loop_dst =
__ NewBlock();
3664 {prev_loop_dst, new_dst,
i});
3665 new_dst = prev_loop_dst;
3675 Block* default_block =
__ NewBlock();
3678 __ Bind(default_block);
3685 __ output_graph().graph_zone()
3686 -> AllocateArray<compiler::turboshaft::SwitchOp::Case>(
3688 int case_value_base = node->value_base();
3689 for (
int i = 0;
i < node->
size();
i++) {
3690 cases[
i] = {
i + case_value_base,
Map(node->targets()[
i].block_ptr()),
3693 Block* default_block;
3694 bool emit_default_block =
false;
3695 if (node->has_fallthrough()) {
3696 default_block =
Map(state.next_block());
3698 default_block =
__ NewBlock();
3699 emit_default_block =
true;
3703 if (emit_default_block) {
3704 __ Bind(default_block);
3714 __ CheckedSmiUntag(
Map(node->input()), frame_state,
3715 node->eager_deopt_info()->feedback_to_update()));
3728 __ ConvertUntaggedToJSPrimitiveOrDeopt(
3729 Map(node->input()), frame_state,
3733 node->eager_deopt_info()->feedback_to_update()));
3740 __ ConvertUntaggedToJSPrimitiveOrDeopt(
3741 Map(node->input()), frame_state,
3746 node->eager_deopt_info()->feedback_to_update()));
3755 __ ConvertUntaggedToJSPrimitiveOrDeopt(
3756 Map(node->input()), frame_state,
3760 node->eager_deopt_info()->feedback_to_update()));
3767 V<Word32> as_int32 =
__ ChangeFloat64ToInt32OrDeopt(
3768 Map(node->input()), frame_state,
3770 node->eager_deopt_info()->feedback_to_update());
3773 __ ConvertUntaggedToJSPrimitiveOrDeopt(
3774 as_int32, frame_state,
3778 node->eager_deopt_info()->feedback_to_update()));
3796 SetMap(node,
__ TagSmi(
__ TruncateWordPtrToWord32(
Map(node->input()))));
3800#define PROCESS_BINOP_WITH_OVERFLOW(MaglevName, TurboshaftName, \
3802 maglev::ProcessResult Process(maglev::Int32##MaglevName##WithOverflow* node, \
3803 const maglev::ProcessingState& state) { \
3804 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->eager_deopt_info()); \
3806 __ Word32##TurboshaftName##DeoptOnOverflow( \
3807 Map(node->left_input()), Map(node->right_input()), frame_state, \
3808 node->eager_deopt_info()->feedback_to_update(), \
3809 CheckForMinusZeroMode::k##minus_zero_mode)); \
3810 return maglev::ProcessResult::kContinue; \
3817#undef PROCESS_BINOP_WITH_OVERFLOW
3823 SetMap(node,
__ Word32SignedAddDeoptOnOverflow(
3824 Map(node->value_input()), 1, frame_state,
3825 node->eager_deopt_info()->feedback_to_update()));
3833 SetMap(node,
__ Word32SignedSubDeoptOnOverflow(
3834 Map(node->value_input()), 1, frame_state,
3835 node->eager_deopt_info()->feedback_to_update()));
3844 SetMap(node,
__ Word32SignedMulDeoptOnOverflow(
3845 Map(node->value_input()), -1, frame_state,
3846 node->eager_deopt_info()->feedback_to_update(),
3851#define PROCESS_FLOAT64_BINOP(MaglevName, TurboshaftName) \
3852 maglev::ProcessResult Process(maglev::Float64##MaglevName* node, \
3853 const maglev::ProcessingState& state) { \
3854 SetMap(node, __ Float64##TurboshaftName(Map(node->left_input()), \
3855 Map(node->right_input()))); \
3856 return maglev::ProcessResult::kContinue; \
3864#undef PROCESS_FLOAT64_BINOP
3866#define PROCESS_INT32_BITWISE_BINOP(Name) \
3867 maglev::ProcessResult Process(maglev::Int32Bitwise##Name* node, \
3868 const maglev::ProcessingState& state) { \
3869 SetMap(node, __ Word32Bitwise##Name(Map(node->left_input()), \
3870 Map(node->right_input()))); \
3871 return maglev::ProcessResult::kContinue; \
3876#undef PROCESS_INT32_BITWISE_BINOP
3878#define PROCESS_INT32_SHIFT(MaglevName, TurboshaftName) \
3879 maglev::ProcessResult Process(maglev::Int32##MaglevName* node, \
3880 const maglev::ProcessingState& state) { \
3881 V<Word32> right = Map(node->right_input()); \
3882 if (!SupportedOperations::word32_shift_is_safe()) { \
3887 right = __ Word32BitwiseAnd(right, 0x1f); \
3889 SetMap(node, __ Word32##TurboshaftName(Map(node->left_input()), right)); \
3890 return maglev::ProcessResult::kContinue; \
3894#undef PROCESS_INT32_SHIFT
3899 if (!SupportedOperations::word32_shift_is_safe()) {
3903 right =
__ Word32BitwiseAnd(right, 0x1f);
3906 __ Word32ShiftRightLogical(
Map(node->left_input()), right);
3907 SetMap(node,
__ Word32SignHintUnsigned(ts_op));
3914 SetMap(node,
__ Word32BitwiseXor(
Map(node->value_input()),
3924 IF (
__ Int32LessThan(input, 0)) {
3926 __ Int32MulCheckOverflow(input, -1);
3927 __ DeoptimizeIf(
__ Projection<1>(result_with_ovf), frame_state,
3928 DeoptimizeReason::kOverflow,
3929 node->eager_deopt_info()->feedback_to_update());
3930 result =
__ Projection<0>(result_with_ovf);
3950 SetMap(node,
__ Float64RoundDown(
Map(node->input())));
3952 SetMap(node,
__ Float64RoundUp(
Map(node->input())));
3972 switch (node->ieee_function()) {
3973#define CASE(MathName, ExpName, EnumName) \
3974 case maglev::Float64Ieee754Unary::Ieee754Function::k##EnumName: \
3975 kind = FloatUnaryOp::Kind::k##EnumName; \
3989 result =
__ BitcastWord32ToSmi(
__ Word32SignedAddDeoptOnOverflow(
3990 __ BitcastSmiToWord32(
Map(node->value_input())),
3992 node->eager_deopt_info()->feedback_to_update()));
3997 result =
__ BitcastWordPtrToSmi(
__ WordPtrSignedAddDeoptOnOverflow(
3998 __ BitcastSmiToWordPtr(
Map(node->value_input())),
4000 node->eager_deopt_info()->feedback_to_update()));
4010 result =
__ BitcastWord32ToSmi(
__ Word32SignedSubDeoptOnOverflow(
4011 __ BitcastSmiToWord32(
Map(node->value_input())),
4013 node->eager_deopt_info()->feedback_to_update()));
4015 result =
__ BitcastWordPtrToSmi(
__ WordPtrSignedSubDeoptOnOverflow(
4016 __ BitcastSmiToWordPtr(
Map(node->value_input())),
4018 node->eager_deopt_info()->feedback_to_update()));
4029#define PROCESS_GENERIC_BINOP(Name) \
4030 maglev::ProcessResult Process(maglev::Generic##Name* node, \
4031 const maglev::ProcessingState& state) { \
4032 ThrowingScope throwing_scope(this, node); \
4033 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info()); \
4035 __ Generic##Name(Map(node->left_input()), Map(node->right_input()), \
4036 frame_state, native_context(), \
4037 ShouldLazyDeoptOnThrow(node))); \
4038 return maglev::ProcessResult::kContinue; \
4041#undef PROCESS_GENERIC_BINOP
4043#define PROCESS_GENERIC_UNOP(Name) \
4044 maglev::ProcessResult Process(maglev::Generic##Name* node, \
4045 const maglev::ProcessingState& state) { \
4046 ThrowingScope throwing_scope(this, node); \
4047 GET_FRAME_STATE_MAYBE_ABORT(frame_state, node->lazy_deopt_info()); \
4049 __ Generic##Name(Map(node->operand_input()), frame_state, \
4050 native_context(), ShouldLazyDeoptOnThrow(node))); \
4051 return maglev::ProcessResult::kContinue; \
4054#undef PROCESS_GENERIC_UNOP
4060 SetMap(node,
__ ToNumberOrNumeric(
Map(node->value_input()), frame_state,
4088 switch (node->check_type()) {
4127 SetMap(node,
__ ConvertUint32ToNumber(
Map(node->input())));
4133 SetMap(node,
__ ConvertIntPtrToNumber(
Map(node->input())));
4155 SetMap(node,
__ AllocateHeapNumberWithValue(
Map(node->input()),
4166 template <
typename NumberToFloat64Op>
4167 requires(std::is_same_v<NumberToFloat64Op,
4169 std::is_same_v<NumberToFloat64Op,
4175 switch (node->conversion_type()) {
4189 __ ConvertJSPrimitiveToUntaggedOrDeopt(
4190 Map(node->input()), frame_state,
kind,
4193 node->eager_deopt_info()->feedback_to_update()));
4201 SetMap(node,
__ ConvertJSPrimitiveToUntagged(
4217 __ DeoptimizeIf(
__ Int32LessThan(
Map(node->input()), 0), frame_state,
4218 DeoptimizeReason::kNotUint32,
4219 node->eager_deopt_info()->feedback_to_update());
4220 SetMap(node,
__ Word32SignHintUnsigned(
Map(node->input())));
4230 __ UintPtrLessThanOrEqual(
Map(node->input()),
4231 std::numeric_limits<uint32_t>::max()),
4232 frame_state, DeoptimizeReason::kNotUint32,
4233 node->eager_deopt_info()->feedback_to_update());
4234 SetMap(node,
__ Word32SignHintUnsigned(
4235 __ TruncateWordPtrToWord32(
Map(node->input()))));
4242 __ DeoptimizeIf(
__ Int32LessThan(
Map(node->input()), 0), frame_state,
4243 DeoptimizeReason::kNotInt32,
4244 node->eager_deopt_info()->feedback_to_update());
4245 SetMap(node,
__ Word32SignHintSigned(
Map(node->input())));
4255 __ UintPtrLessThanOrEqual(
Map(node->input()),
4256 std::numeric_limits<int32_t>::max()),
4257 frame_state, DeoptimizeReason::kNotInt32,
4258 node->eager_deopt_info()->feedback_to_update());
4259 SetMap(node,
__ Word32SignHintSigned(
4260 __ TruncateWordPtrToWord32(
Map(node->input()))));
4273 node->eager_deopt_info()->feedback_to_update();
4275 Map(node->object_input()), frame_state,
4279 if constexpr (
Is64()) {
4283 __ DeoptimizeIfNot(
__ Uint64LessThanOrEqual(
4284 result, std::numeric_limits<uint32_t>::max()),
4285 frame_state, DeoptimizeReason::kNotInt32, feedback);
4298 SetMap(node,
__ ChangeUint32ToFloat64(
Map(node->input())));
4304 SetMap(node,
__ ChangeIntPtrToFloat64(
Map(node->input())));
4311 SetMap(node,
__ ChangeFloat64ToInt32OrDeopt(
4312 Map(node->input()), frame_state,
4314 node->eager_deopt_info()->feedback_to_update()));
4320 SetMap(node,
__ ChangeFloat64ToUint32OrDeopt(
4321 Map(node->input()), frame_state,
4323 node->eager_deopt_info()->feedback_to_update()));
4330 switch (node->conversion_type()) {
4333 TruncateJSPrimitiveToUntaggedOrDeoptOp::InputRequirement::kNumber;
4336 input_requirement = TruncateJSPrimitiveToUntaggedOrDeoptOp::
4337 InputRequirement::kNumberOrBoolean;
4340 input_requirement = TruncateJSPrimitiveToUntaggedOrDeoptOp::
4341 InputRequirement::kNumberOrOddball;
4347 __ TruncateJSPrimitiveToUntaggedOrDeopt(
4348 Map(node->input()), frame_state,
4350 input_requirement, node->eager_deopt_info()->feedback_to_update()));
4361 SetMap(node,
__ TruncateJSPrimitiveToUntagged(
4383 __ DeoptimizeIf(
__ Float64IsHole(input), frame_state,
4384 DeoptimizeReason::kHole,
4385 node->eager_deopt_info()->feedback_to_update());
4401 NoThrowingScopeRequired no_throws(node);
4404 Label<> non_js_receiver(
this);
4413 Label<> convert_global_proxy(
this);
4415 convert_global_proxy);
4418 GOTO(convert_global_proxy);
4419 BIND(convert_global_proxy);
4422 node->native_context().global_proxy_object(
broker_).object()));
4424 GOTO(non_js_receiver);
4427 BIND(non_js_receiver);
4428 GOTO(done,
__ CallBuiltin_ToObject(
4429 isolate_,
__ HeapConstant(node->native_context().object()),
4461 result =
__ JSTruncateFloat64ToWord32(
__ Float64RoundTiesEven(value));
4500 frame_state, DeoptimizeReason::kNotAHeapNumber,
4501 node->eager_deopt_info()->feedback_to_update());
4513 OpIndex arguments[] = {
Map(node->value_input()),
Map(node->context())};
4523 __ Return(
Map(node->value_input()));
4530 __ Deoptimize(frame_state, node->deoptimize_reason(),
4531 node->eager_deopt_info()->feedback_to_update());
4539 V<Object> old_message =
__ LoadMessage(message_address);
4540 __ StoreMessage(message_address,
Map(node->value()));
4541 SetMap(node, old_message);
4548 int num_parameters_and_registers = node->num_parameters_and_registers();
4549 for (
int i = 0;
i < num_parameters_and_registers;
i++) {
4551 Map(node->parameters_and_registers(
i)));
4553 __ GeneratorStore(
Map(node->context_input()),
Map(node->generator_input()),
4554 parameters_and_registers, node->suspend_id(),
4555 node->bytecode_offset());
4575 __ RuntimeAbort(node->reason());
4654#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
4658 V<Object> data =
__ GetContinuationPreservedEmbedderData();
4667 __ SetContinuationPreservedEmbedderData(data);
4674 bool negate_result =
false;
4676 node->condition(), &negate_result);
4679 if (negate_result) {
4687 __ RuntimeAbort(node->reason());
4742 const int result_size = 0;
4749 virtual_objects, result_location, result_size);
4793 const int result_size = 0;
4795 switch (frame.
type()) {
4798 result_location, result_size);
4813 if (frame.
parent() !=
nullptr) {
4826 V<Any> fake_closure_input =
__ SmiZeroConstant();
4835 if (builder.
Inputs().size() >
4837 *
bailout_ = BailoutReason::kTooManyArguments;
4842 return __ FrameState(
4851 if (frame.
parent() !=
nullptr) {
4872 V<Any> fake_context_input =
__ SmiZeroConstant();
4875 if (builder.
Inputs().size() >
4877 *
bailout_ = BailoutReason::kTooManyArguments;
4882 return __ FrameState(
4891 if (frame.
parent() !=
nullptr) {
4907 for (maglev::ValueNode* param : frame.
parameters()) {
4913 constexpr int kExtraFixedJSFrameParameters =
4918 kExtraFixedJSFrameParameters);
4919 static_assert(kExtraFixedJSFrameParameters ==
4931#ifdef V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE
4942 if (builder.
Inputs().size() >
4944 *
bailout_ = BailoutReason::kTooManyArguments;
4949 return __ FrameState(
4961 if (frame.
parent() !=
nullptr) {
4974 AddDeoptInput(builder, virtual_objects, value, reg, result_location,
4989 int local_index = 0;
4992 while (local_index < reg.index()) {
4993 builder.AddUnusedRegister();
5000 for (; local_index < frame.
unit().register_count(); local_index++) {
5005 if (frame.
frame_state()->liveness()->AccumulatorIsLive()) {
5009 result_location, result_size);
5014 OutputFrameStateCombine combine =
5017 if (builder.
Inputs().size() >
5019 *
bailout_ = BailoutReason::kTooManyArguments;
5024 return __ FrameState(
5034 DCHECK(alloc->HasBeenAnalysed());
5035 if (alloc->HasBeenElided()) {
5036 AddVirtualObjectInput(builder, virtual_objects,
5046 node = ident_obj->input(0).node();
5050 builder.
AddInput(MachineTypeFor(node->value_representation()),
Map(node));
5058 reg, result_location, result_size)) {
5061 AddDeoptInput(builder, virtual_objects, node);
5072 constexpr int kNumberOfField = 2;
5076 __ HeapConstant(local_factory_->heap_number_map()));
5078 __ Float64Constant(vobj->
number()));
5082 Deduplicator::DuplicatedId dup_id = deduplicator_.GetDuplicatedId(vobj);
5083 if (dup_id.duplicated) {
5088 switch (vobj->
type()) {
5096 constexpr int kMapAndLengthFieldCount = 2;
5098 uint32_t field_count = length + kMapAndLengthFieldCount;
5107 i::Float64 value = elements.GetFromImmutableFixedDoubleArray(
i);
5108 if (value.is_hole_nan()) {
5120 constexpr int kMapFieldCount = 1;
5121 uint32_t field_count = vobj->
slot_count() + kMapFieldCount;
5125 vobj->
ForEachInput([&](maglev::ValueNode* value_node) {
5126 AddVirtualObjectNestedValue(builder, virtual_objects, value_node);
5132 void AddVirtualObjectNestedValue(
5137 switch (value->opcode()) {
5138 case maglev::Opcode::kConstant:
5144 case maglev::Opcode::kFloat64Constant:
5152 case maglev::Opcode::kInt32Constant:
5158 case maglev::Opcode::kUint32Constant:
5164 case maglev::Opcode::kRootConstant:
5171 case maglev::Opcode::kSmiConstant:
5177 case maglev::Opcode::kTrustedConstant:
5180 __ TrustedHeapConstant(
5184 case maglev::Opcode::kTaggedIndexConstant:
5185 case maglev::Opcode::kExternalConstant:
5193 switch (value->opcode()) {
5194 case maglev::Opcode::kArgumentsElements:
5196 value->Cast<maglev::ArgumentsElements>()->type());
5198 case maglev::Opcode::kArgumentsLength:
5201 case maglev::Opcode::kRestLength:
5204 case maglev::Opcode::kVirtualObject:
5207 AddDeoptInput(builder, virtual_objects, value);
5220 for (uint32_t idx = 0; idx <
object_ids_.size(); idx++) {
5226 return {next_id_++,
false};
5229 DuplicatedId CreateFreshId() {
return {next_id_++,
false}; }
5239 std::vector<const maglev::VirtualObject*>
object_ids_;
5240 uint32_t next_id_ = 0;
5246 if (result_size == 0) {
5258 uint16_t max_arguments = maglev_frame.
unit().max_arguments();
5259 int local_count = maglev_frame.
unit().register_count();
5261 maglev_frame.
unit().shared_function_info().object();
5263 maglev_frame.
unit().bytecode().object();
5277 uint16_t max_arguments = 0;
5278 int local_count = 0;
5280 maglev_frame.
unit().shared_function_info().object();
5282 maglev_frame.
unit().bytecode().object();
5296 maglev_frame.
unit().shared_function_info().object();
5298 maglev_frame.
unit().bytecode().object();
5299 constexpr uint16_t kParameterCount = 1;
5300 constexpr uint16_t kMaxArguments = 0;
5301 constexpr int kLocalCount = 0;
5303 type, kParameterCount, kMaxArguments, kLocalCount, shared_info,
5318 constexpr int kExtraFixedJSFrameParameters =
5322 kExtraFixedJSFrameParameters);
5326 GetSharedFunctionInfo(maglev_frame).object();
5327 constexpr int kLocalCount = 0;
5328 constexpr uint16_t kMaxArguments = 0;
5340 switch (deopt_frame.
type()) {
5342 return deopt_frame.
as_interpreted().unit().shared_function_info();
5348 return GetSharedFunctionInfo(*deopt_frame.
parent());
5354 template <
typename rep>
5358 (std::is_same_v<rep, Float64> || std::is_same_v<rep, Float32>),
5359 sign == Sign::kSigned);
5361 bool swap_inputs =
false;
5362 switch (operation) {
5363 case ::Operation::kEqual:
5364 case ::Operation::kStrictEqual:
5367 case ::Operation::kLessThan:
5371 case ::Operation::kLessThanOrEqual:
5376 case ::Operation::kGreaterThan:
5381 case ::Operation::kGreaterThanOrEqual:
5392 if (swap_inputs) std::swap(left, right);
5399 bool* negate_result) {
5401 bool swap_inputs =
false;
5403 case maglev::AssertCondition::kEqual:
5406 case maglev::AssertCondition::kNotEqual:
5408 *negate_result =
true;
5410 case maglev::AssertCondition::kLessThan:
5413 case maglev::AssertCondition::kLessThanEqual:
5416 case maglev::AssertCondition::kGreaterThan:
5420 case maglev::AssertCondition::kGreaterThanEqual:
5424 case maglev::AssertCondition::kUnsignedLessThan:
5427 case maglev::AssertCondition::kUnsignedLessThanEqual:
5430 case maglev::AssertCondition::kUnsignedGreaterThan:
5434 case maglev::AssertCondition::kUnsignedGreaterThanEqual:
5441 if (swap_inputs) std::swap(left, right);
5460 __ DeoptimizeIf(check, frame_state, DeoptimizeReason::kNotASmi, feedback);
5463 std::pair<V<WordPtr>,
V<Object>> GetTypedArrayDataAndBasePointers(
5469 return {data_pointer, base_pointer};
5473 auto [data_pointer, base_pointer] =
5474 GetTypedArrayDataAndBasePointers(typed_array);
5475 return __ LoadTypedElement(typed_array, base_pointer, data_pointer,
5476 __ ChangeUint32ToUintPtr(index),
5481 auto [data_pointer, base_pointer] =
5482 GetTypedArrayDataAndBasePointers(typed_array);
5483 __ StoreTypedElement(typed_array, base_pointer, data_pointer,
5484 __ ChangeUint32ToUintPtr(index), value,
5510 if (conversion_mode ==
5515 IF (
__ Float64IsSmi(input)) {
5516 V<Word32> as_int32 =
__ TruncateFloat64ToInt32OverflowUndefined(input);
5529 kHeapNumberOrUndefined,
5533 if (done.has_incoming_jump()) {
5553 constexpr bool kIndexCanBeInvalid =
false;
5554 OpIndex phi_index =
Map(maglev_phi, kIndexCanBeInvalid);
5557 __ output_graph().Replace<
PhiOp>(
5560 {pending_phi.
first(),
5561 Map(maglev_phi -> backedge_input(), kIndexCanBeInvalid)}),
5568 switch (value_rep) {
5591 V<Boolean> true_idx =
__ HeapConstant(local_factory_->true_value());
5592 V<Boolean> false_idx =
__ HeapConstant(local_factory_->false_value());
5593 if (flip) std::swap(true_idx, false_idx);
5599 V<Boolean> true_idx =
__ HeapConstant(local_factory_->true_value());
5600 V<Boolean> false_idx =
__ HeapConstant(local_factory_->false_value());
5601 if (flip) std::swap(true_idx, false_idx);
5602 return __ Select(
__ WordPtrEqual(b,
__ WordPtrConstant(0)), false_idx,
5630 return __ Float64LessThan(0.0,
__ Float64Abs(input));
5671 __ set_current_catch_block(
builder_.Map(catch_block_));
5676 if (!catch_block_->has_phi()) {
5687 interpreted_frame.
unit();
5690 catch_block_, [
this, compact_frame, maglev_unit](
5692 DCHECK_NE(owner, interpreter::Register::virtual_accumulator());
5695 compact_frame->
GetValueOf(owner, maglev_unit);
5704 __ SetVariable(var, ts_value);
5705 builder_.RecordRepresentation(ts_value,
5717 __ set_current_catch_block(
nullptr);
5719 if (catch_block_ ==
nullptr)
return;
5720 if (!catch_block_->has_phi())
return;
5744 DCHECK(!node->properties().can_throw());
5748 template <
typename Function>
5753 for (
auto phi : *catch_block->
phis()) {
5754 DCHECK(phi->is_exception_phi());
5765 auto it = regs_to_vars_.find(owner.
index());
5767 if (it == regs_to_vars_.end()) {
5772 regs_to_vars_.insert({owner.
index(), var});
5782 return MapPhiInput(input.node(), input_index);
5785 if (
V8_UNLIKELY(node == maglev_generator_context_node_)) {
5786 OpIndex generator_context =
__ GetVariable(generator_context_);
5787 if (
__ current_block()->Contains(generator_context)) {
5788 DCHECK(!
__ current_block()->IsLoop());
5794 return __ GetPredecessorValue(generator_context_, input_index);
5796 return generator_context;
5801 template <
typename T>
5806 return Map(input.node(), can_be_invalid);
5814 if (can_be_invalid &&
__ generating_unreachable_operations()) {
5817 if (
V8_UNLIKELY(node == maglev_generator_context_node_)) {
5818 return __ GetVariable(generator_context_);
5821 return node_mapping_[
node];
5826 if (
__ generating_unreachable_operations())
return;
5842 SetMap(node, tuple->input(0));
5843 second_return_value_ = tuple->input<
Object>(1);
5851 maglev_representations_[idx] == repr);
5852 maglev_representations_[idx] = repr;
5894 loop_single_edge_predecessors_;
5902 int loop_phis_first_input_index_ = -1;
5920 maglev_representations_;
5923 static constexpr int kDefaultSwitchVarValue = -1;
5927 bool is_visiting_generator_main_switch_ =
false;
5931 bool on_generator_switch_loop_ =
false;
5962 Block* pre_loop_dst;
5963 Block* inside_loop_target;
5966 std::unordered_map<const maglev::BasicBlock*, std::vector<GeneratorSplitEdge>>
5967 pre_loop_generator_blocks_;
5973 std::optional<BailoutReason>* bailout_;
5981 using GraphBuildingNodeProcessor::GraphBuildingNodeProcessor;
5985 std::optional<BailoutReason>* bailout)
5987 maglev_compilation_unit, bailout),
5989 labeller_(maglev_compilation_unit->graph_labeller()) {}
5991 template <
typename NodeT>
5994 if (GraphBuildingNodeProcessor::Asm().generating_unreachable_operations()) {
5999 return maglev::ProcessResult::kContinue;
6004 GraphBuildingNodeProcessor::Process(node, state);
6006 !GraphBuildingNodeProcessor::Asm()
6007 .generating_unreachable_operations() &&
6008 maglev::IsValueNode(node->opcode()),
6012 SourcePosition source = labeller_->GetNodeProvenance(node).position;
6013 for (
OpIndex idx = end_index_before; idx !=
graph_.EndIndex();
6014 idx =
graph_.NextIndex(idx)) {
6028 DCHECK(data.info()->trace_turbo_graph());
6031 CodeTracer* code_tracer = data.GetCodeTracer();
6034 <<
"\n----- Bytecode before MaglevGraphBuilding -----\n"
6036 tracing_scope.
stream() <<
"Function: "
6047 CodeTracer* code_tracer = data.GetCodeTracer();
6049 tracing_scope.stream() <<
"\n----- " << msg <<
" -----" << std::endl;
6050 maglev::PrintGraph(tracing_scope.stream(), compilation_info, maglev_graph);
6064 if (
v8_flags.turbolev_non_eager_inlining) {
6066 inliner.
Run(data->info()->trace_turbo_graph());
6077 &maglev_graph_builder);
6078 processor.ProcessGraph(maglev_graph);
6081 if (
V8_UNLIKELY(data->info()->trace_turbo_graph())) {
6083 "After phi untagging");
6106 if (
V8_UNLIKELY(data->info()->trace_turbo_graph())) {
6108 "After escape analysis and dead node sweeping");
6112std::optional<BailoutReason> MaglevGraphBuildingPhase::Run(
PipelineData* data,
6118 std::unique_ptr<maglev::MaglevCompilationInfo> compilation_info =
6119 maglev::MaglevCompilationInfo::NewForTurboshaft(
6120 data->isolate(),
broker, data->info()->closure(),
6121 data->info()->osr_offset(),
6122 data->info()->function_context_specializing());
6128 SBXCHECK_EQ(compilation_info->toplevel_compilation_unit()->parameter_count(),
6129 linkage->GetIncomingDescriptor()->ParameterSlotCount());
6131 if (
V8_UNLIKELY(data->info()->trace_turbo_graph())) {
6137 maglev::Graph::New(temp_zone, data->info()->is_osr());
6143 local_isolate, compilation_info->toplevel_compilation_unit(),
6145 maglev_graph_builder.Build();
6147 if (
V8_UNLIKELY(data->info()->trace_turbo_graph())) {
6149 "After graph building");
6152 RunMaglevOptimizations(data, compilation_info.get(), maglev_graph_builder,
6156 data->InitializeGraphComponent(
nullptr);
6158 std::optional<BailoutReason> bailout;
6159 maglev::GraphProcessor<NodeProcessorBase, true> builder(
6160 data, data->graph(), temp_zone,
6161 compilation_info->toplevel_compilation_unit(), &bailout);
6162 builder.ProcessGraph(maglev_graph);
6165 for (OptimizedCompilationInfo::InlinedFunctionHolder holder :
6167 data->info()->inlined_functions().push_back(holder);
6171 (v8_flags.trace_turbo || v8_flags.trace_turbo_graph))) {
6175 data->graph().Reset();
#define GOTO_IF_NOT(cond, label,...)
#define GOTO_IF(cond, label,...)
uint8_t data_[MAX_STACK_LENGTH]
#define SBXCHECK_EQ(lhs, rhs)
T * insert(T *pos, const T &value)
void resize(size_t new_size)
constexpr UnderlyingType & value() &
constexpr T * data() const
static BytecodeOffset GetContinuationBytecodeOffset(Builtin builtin)
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
static V8_EXPORT_PRIVATE int GetStackParameterCount(Builtin builtin)
static V8_EXPORT_PRIVATE Callable CallableFor(Isolate *isolate, Builtin builtin)
static constexpr BytecodeOffset None()
int GetStackParameterCount() const
int GetRegisterParameterCount() const
Handle< Code > code() const
CallInterfaceDescriptor descriptor() const
static Tagged< Smi > Other()
static V8_EXPORT_PRIVATE ExternalReference address_of_pending_message(LocalIsolate *local_isolate)
static ExternalReference Create(const SCTableReference &table_ref)
Isolate * isolate() const
static constexpr int kMapOffset
v8::internal::Factory * factory()
static const int kInitialMaxFastElementArray
LocalIsolate * AsLocalIsolate()
static constexpr MachineType Float64()
static constexpr MachineType Int32()
static constexpr MachineType AnyTagged()
static constexpr MachineType Uint32()
static constexpr MachineType HoleyFloat64()
static constexpr MachineType IntPtr()
static constexpr Tagged< Smi > FromInt(int value)
static constexpr int kMaxValue
static const uint32_t kMaxLength
static constexpr int OffsetOfElementAt(int index)
V8_INLINE constexpr StorageType ptr() const
base::Vector< T > NewVector(size_t length)
static FieldAccess ForHeapNumberValue()
static FieldAccess ForHeapInt32Value()
static FieldAccess ForJSFunctionContext()
static FieldAccess ForJSTypedArrayExternalPointer()
static FieldAccess ForJSTypedArrayByteLength()
static FieldAccess ForJSDataViewByteLength()
static FieldAccess ForMapBitField()
static FieldAccess ForMapBitField3()
static FieldAccess ForJSTypedArrayBasePointer()
static FieldAccess ForJSDataViewDataPointer()
interpreter::Register incoming_new_target_or_generator_register() const
IndirectHandle< BytecodeArray > object() const
bool NeedsFrameState() const
IndirectHandle< FeedbackVector > object() const
IndirectHandle< HeapObject > object() const
IndirectHandle< JSFunction > object() const
NativeContextRef target_native_context() const
static constexpr int GetJSCallNewTargetParamIndex(int parameter_count)
static constexpr int GetJSCallContextParamIndex(int parameter_count)
static CallDescriptor * GetStubCallDescriptor(Zone *zone, const CallInterfaceDescriptor &descriptor, int stack_parameter_count, CallDescriptor::Flags flags, Operator::Properties properties=Operator::kNoProperties, StubCallMode stub_mode=StubCallMode::kCallCodeObject)
static CallDescriptor * GetRuntimeCallDescriptor(Zone *zone, Runtime::FunctionId function, int js_parameter_count, Operator::Properties properties, CallDescriptor::Flags flags, LazyDeoptOnThrow lazy_deopt_on_throw=LazyDeoptOnThrow::kNo)
static constexpr int kJSCallClosureParamIndex
static CallDescriptor * GetJSCallDescriptor(Zone *zone, bool is_osr, int parameter_count, CallDescriptor::Flags flags, Operator::Properties properties=Operator::kNoProperties)
static const int kOsrContextSpillSlotIndex
static const int kOsrAccumulatorRegisterIndex
IndirectHandle< Map > object() const
IndirectHandle< NativeContext > object() const
static OutputFrameStateCombine PokeAt(size_t index)
static OutputFrameStateCombine Ignore()
Graph & output_graph() const
const maglev::BasicBlock * maglev_input_block_
GrowingBlockSidetable< const maglev::BasicBlock * > turboshaft_block_origins_
const maglev::BasicBlock * maglev_input_block() const
const maglev::BasicBlock * GetMaglevOrigin(const Block *block)
void SetMaglevInputBlock(const maglev::BasicBlock *block)
void ResetAllPredecessors()
static constexpr int kInvalidPredecessorIndex
base::SmallVector< Block *, 8 > Predecessors() const
const Operation & LastOperation(const Graph &graph) const
NeighboringPredecessorIterable PredecessorsIterable() const
int PredecessorCount() const
void AddArgumentsLength()
const FrameStateData * AllocateFrameStateData(const FrameStateInfo &info, Zone *zone)
void AddParentFrameState(V< FrameState > parent)
void AddDematerializedObject(uint32_t id, uint32_t field_count)
base::Vector< const OpIndex > Inputs()
void AddArgumentsElements(CreateArgumentsType type)
void AddInput(MachineType type, OpIndex input)
void AddDematerializedObjectReference(uint32_t id)
void Analyze(maglev::Graph *graph)
const maglev::BasicBlock * GetInnermostBypassedHeader(const maglev::BasicBlock *target)
bool HeaderIsBypassed(const maglev::BasicBlock *header)
bool has_header_bypasses() const
GeneratorAnalyzer(Zone *phase_zone, maglev::MaglevGraphLabeller *labeller)
bool JumpBypassesHeader(const maglev::BasicBlock *target)
maglev::MaglevGraphLabeller * labeller_
void FindLoopBody(maglev::BlockConstReverseIterator it)
void RecordHeadersForBypass(maglev::BasicBlock *initial_target, const maglev::BasicBlock *innermost_header)
std::unordered_set< const maglev::BasicBlock * > bypassed_headers_
void FindLoopHeaderBypasses(maglev::Graph *graph)
ZoneAbslFlatHashMap< const maglev::BasicBlock *, const maglev::BasicBlock * > block_to_header_
ZoneVector< const maglev::BasicBlock * > visit_queue_
std::unordered_map< const maglev::BasicBlock *, const maglev::BasicBlock * > block_to_innermost_bypassed_header_
const maglev::BasicBlock * GetLoopHeader(const maglev::BasicBlock *node)
Deduplicator deduplicator_
maglev::ProcessResult Process(maglev::CheckValueEqualsString *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::ThrowReferenceErrorIfHole *node, const maglev::ProcessingState &state)
const FrameStateInfo * MakeFrameStateInfo(maglev::InterpretedDeoptFrame &maglev_frame, OutputFrameStateCombine combine)
V< Number > Float64ToTagged(V< Float64 > input, maglev::Float64ToTagged::ConversionMode conversion_mode)
maglev::ProcessResult Process(maglev::TaggedIndexConstant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StringConcat *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckStringOrStringWrapper *node, const maglev::ProcessingState &state)
Variable header_switch_input_
maglev::ProcessResult Process(maglev::AbstractLoadTaggedField< T > *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TestInstanceOf *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::GetKeyedGeneric *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreGlobal *node, const maglev::ProcessingState &state)
V< Any > GenerateBuiltinCall(maglev::NodeBase *node, Builtin builtin, OptionalV< FrameState > frame_state, base::Vector< const OpIndex > arguments, std::optional< int > stack_arg_count=std::nullopt)
maglev::ProcessResult Process(maglev::BranchIfFloat64IsHole *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::GetIterator *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreTaggedFieldNoWriteBarrier *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadHeapInt32 *node, const maglev::ProcessingState &state)
V< Object > second_return_value_
maglev::ProcessResult Process(maglev::HasInPrototypeChain *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Int32Constant *node, const maglev::ProcessingState &state)
void PostProcessGraph(maglev::Graph *graph)
void DeoptIfInt32IsNotSmi(V< Word32 > input, V< FrameState > frame_state, const compiler::FeedbackSource &feedback)
maglev::ProcessResult Process(maglev::LoadSignedIntTypedArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckHoleyFloat64IsSmi *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::ToName *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckValueEqualsInt32 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckSymbol *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreFixedArrayElementNoWriteBarrier *node, const maglev::ProcessingState &state)
void ComputePredecessorPermutations(maglev::BasicBlock *maglev_block, Block *turboshaft_block, bool skip_backedge, bool ignore_last_predecessor)
void BuildJump(maglev::BasicBlock *target)
maglev::ProcessResult Process(maglev::ExtendPropertiesBackingStore *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckNotHole *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::ConstructWithSpread *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckConstructResult *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::SetKeyedGeneric *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::SmiConstant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::RestLength *node, const maglev::ProcessingState &state)
ZoneUnorderedMap< const maglev::BasicBlock *, Block * > loop_single_edge_predecessors_
maglev::ProcessResult Process(maglev::EnsureWritableFastElements *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadUnsignedIntTypedArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckSmi *node, const maglev::ProcessingState &state)
bool is_visiting_generator_main_switch_
maglev::ProcessResult Process(maglev::BranchIfFloat64ToBooleanTrue *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::ForInPrepare *node, const maglev::ProcessingState &state)
base::SmallVector< int, 16 > predecessor_permutation_
maglev::ProcessResult Process(maglev::CheckMapsWithMigration *node, const maglev::ProcessingState &state)
V< NumberOrUndefined > HoleyFloat64ToTagged(V< Float64 > input, maglev::HoleyFloat64ToTagged::ConversionMode conversion_mode)
maglev::ProcessResult Process(maglev::CallKnownApiFunction *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckedSmiTagUint32 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CallBuiltin *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Int32DecrementWithOverflow *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadTaggedFieldForScriptContextSlot *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreInArrayLiteralGeneric *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Call *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckIntPtrIsSmi *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CallWithSpread *node, const maglev::ProcessingState &state)
LazyDeoptOnThrow ShouldLazyDeoptOnThrow(maglev::NodeBase *node)
maglev::ProcessResult Process(maglev::Phi *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::ToString *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TransitionAndStoreArrayElement *node, const maglev::ProcessingState &state)
V< Float64 > dummy_float64_input_
maglev::ProcessResult Process(maglev::BranchIfReferenceEqual *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CreateShallowObjectLiteral *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfInt32Compare *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckInt32Condition *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::UpdateJSArrayLength *node, const maglev::ProcessingState &state)
void BuildTypedArrayStore(V< JSTypedArray > typed_array, V< Word32 > index, V< Untagged > value, ElementsKind kind)
maglev::ProcessResult Process(maglev::Float64Compare *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckCacheIndicesNotCleared *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CreateFunctionContext *node, const maglev::ProcessingState &state)
V< Any > MakePhiMaybePermuteInputs(maglev::ValueNode *maglev_node, int maglev_input_count, OptionalV< Any > additional_input=OptionalV< Any >::Nullopt())
maglev::ProcessResult Process(maglev::CallForwardVarargs *node, const maglev::ProcessingState &state)
void IterCatchHandlerPhis(const maglev::BasicBlock *catch_block, Function &&callback)
maglev::ProcessResult Process(maglev::MapPrototypeGet *node, const maglev::ProcessingState &state)
RegisterRepresentation RegisterRepresentationFor(maglev::ValueRepresentation value_rep)
LocalFactory * local_factory_
maglev::ProcessResult Process(maglev::StringLength *node, const maglev::ProcessingState &state)
V< HeapObject > undefined_value_
maglev::ProcessResult Process(maglev::CheckTypedArrayBounds *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::ArgumentsElements *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StringAt *node, const maglev::ProcessingState &state)
OpIndex Map(const maglev::NodeBase *node, bool can_be_invalid=true)
maglev::ProcessResult Process(maglev::CheckedSmiTagIntPtr *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckMapsWithMigrationAndDeopt *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckTypedArrayNotDetached *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::UnsafeSmiTagInt32 *node, const maglev::ProcessingState &state)
V< Word32 > Float64ToBit(V< Float64 > input)
V< Object > dummy_object_input_
maglev::ProcessResult Process(maglev::TestTypeOf *node, const maglev::ProcessingState &state)
OptionalV< FrameState > BuildParentFrameState(maglev::DeoptFrame &frame, const maglev::VirtualObjectList &virtual_objects)
maglev::ProcessResult Process(maglev::CheckedSmiTagInt32 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::DeleteProperty *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::SetNamedGeneric *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TaggedEqual *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadTypedArrayLength *node, const maglev::ProcessingState &state)
void InsertTaggingForPhis(maglev::BasicBlock *maglev_catch_handler)
maglev::ProcessResult Process(maglev::LoadDoubleTypedArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreInt32 *node, const maglev::ProcessingState &state)
void StartMultiPredecessorExceptionBlock(maglev::BasicBlock *maglev_catch_handler, Block *turboshaft_catch_handler)
maglev::ProcessResult Process(maglev::ArgumentsLength *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::DefineKeyedOwnGeneric *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Float64Constant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::UnsafeSmiTagUint32 *node, const maglev::ProcessingState &state)
maglev::NodeBase * maglev_generator_context_node_
maglev::ProcessResult Process(maglev::LoadSignedIntDataViewElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadTaggedFieldByFieldIndex *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadGlobal *node, const maglev::ProcessingState &state)
ZoneUnorderedMap< int, Variable > regs_to_vars_
maglev::ProcessResult StringConcatHelper(Node *node, V< String > left, V< String > right)
maglev::ProcessResult Process(maglev::Int32IncrementWithOverflow *node, const maglev::ProcessingState &state)
void TagExceptionPhiInputsForBlock(Block *old_block, maglev::BasicBlock *maglev_catch_handler, Block *turboshaft_catch_handler)
static constexpr int kDefaultSwitchVarValue
maglev::BlockProcessResult PreProcessBasicBlock(maglev::BasicBlock *maglev_block)
maglev::ProcessResult Process(maglev::BranchIfJSReceiver *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfIntPtrToBooleanTrue *node, const maglev::ProcessingState &state)
void PostProcessBasicBlock(maglev::BasicBlock *maglev_block)
maglev::ProcessResult Process(maglev::BranchIfFloat64Compare *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckJSDataViewBounds *node, const maglev::ProcessingState &state)
OptionalV< FrameState > BuildFrameState(maglev::EagerDeoptInfo *eager_deopt_info)
V< Word32 > ToBit(maglev::Input input, TruncateJSPrimitiveToUntaggedOp::InputAssumptions assumptions)
maglev::ProcessResult Process(maglev::StoreFixedArrayElementWithWriteBarrier *node, const maglev::ProcessingState &state)
V< Untagged > BuildTypedArrayLoad(V< JSTypedArray > typed_array, V< Word32 > index, ElementsKind kind)
maglev::ProcessResult Process(maglev::Constant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::SetPrototypeHas *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckedSmiSizedInt32 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfUint32Compare *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreScriptContextSlotWithWriteBarrier *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfUndefinedOrNull *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckUint32IsSmi *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreFloat64 *node, const maglev::ProcessingState &state)
std::optional< BailoutReason > * bailout_
maglev::ProcessResult Process(maglev::UnsafeSmiUntag *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::UnsafeSmiTagIntPtr *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Int32NegateWithOverflow *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckDetectableCallable *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::UnwrapThinString *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckMapsWithAlreadyLoadedMap *node, const maglev::ProcessingState &state)
void EmitLoopSinglePredecessorBlock(maglev::BasicBlock *maglev_loop_header)
maglev::ProcessResult Process(maglev::ThrowSuperNotCalledIfHole *node, const maglev::ProcessingState &state)
OpIndex MapPhiInput(const maglev::NodeBase *node, int input_index)
maglev::ProcessResult Process(maglev::ThrowIfNotSuperConstructor *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TransitionElementsKindOrCheckMap *node, const maglev::ProcessingState &state)
OpIndex Map(const maglev::Input input, bool can_be_invalid=true)
GeneratorAnalyzer generator_analyzer_
bool IsMapped(const maglev::NodeBase *node) const
void SetMapMaybeMultiReturn(maglev::NodeBase *node, V< Any > idx)
Variable generator_context_
maglev::ProcessResult Process(maglev::ThrowIfNotCallable *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckDynamicValue *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Jump *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CreateObjectLiteral *node, const maglev::ProcessingState &state)
void SetMap(maglev::NodeBase *node, V< Any > idx)
maglev::ProcessResult Process(maglev::CheckDerivedConstructResult *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::RegisterInput *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadInt32 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadDoubleField *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Construct *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadDoubleDataViewElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::NumberToString *node, const maglev::ProcessingState &state)
V< Word32 > ConvertInt32Compare(maglev::Input left_input, maglev::Input right_input, maglev::AssertCondition condition, bool *negate_result)
maglev::ProcessResult Process(maglev::CheckHoleyFloat64NotHole *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckedSmiTagFloat64 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadHoleyFixedDoubleArrayElement *node, const maglev::ProcessingState &state)
std::unordered_map< const maglev::BasicBlock *, std::vector< GeneratorSplitEdge > > pre_loop_generator_blocks_
maglev::ProcessResult Process(maglev::BranchIfRootConstant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadEnumCacheLength *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::MapPrototypeGetInt32Key *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfInt32ToBooleanTrue *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BuiltinStringFromCharCode *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreFixedDoubleArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CallKnownJSFunction *node, const maglev::ProcessingState &state)
V< Object > catch_block_begin_
maglev::ProcessResult Process(maglev::TaggedNotEqual *node, const maglev::ProcessingState &state)
V< Word32 > ConvertCompare(maglev::Input left_input, maglev::Input right_input, ::Operation operation, Sign sign)
maglev::ProcessResult Process(maglev::CheckValue *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::ForInNext *node, const maglev::ProcessingState &state)
void StartExceptionBlock(maglev::BasicBlock *maglev_catch_handler)
OutputFrameStateCombine ComputeCombine(maglev::InterpretedDeoptFrame &frame, interpreter::Register result_location, int result_size)
maglev::ProcessResult Process(maglev::StringEqual *node, const maglev::ProcessingState &state)
void AddDeoptInput(FrameStateData::Builder &builder, const maglev::VirtualObjectList &virtual_objects, const maglev::ValueNode *node)
maglev::ProcessResult Process(maglev::CheckNumber *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::MigrateMapIfNeeded *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TrustedConstant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreTaggedFieldWithWriteBarrier *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfUndetectable *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfToBooleanTrue *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::BranchIfSmi *node, const maglev::ProcessingState &state)
static constexpr int kMinClampedUint8
maglev::ProcessResult Process(maglev::StoreSignedIntDataViewElement *node, const maglev::ProcessingState &state)
bool on_generator_switch_loop_
V< Word32 > Float64ToUint8Clamped(V< Float64 > value)
maglev::ProcessResult Process(maglev::LoadNamedGeneric *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckedInternalizedString *node, const maglev::ProcessingState &state)
static constexpr int kMaxClampedUint8
base::SmallVector< OpIndex, 16 > loop_phis_first_input_
maglev::ProcessResult Process(maglev::StoreTrustedPointerFieldWithWriteBarrier *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::FunctionEntryStackCheck *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::FastCreateClosure *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreMap *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Switch *node, const maglev::ProcessingState &state)
V< Object > new_target_param_
ZoneUnorderedMap< const maglev::BasicBlock *, Block * > block_mapping_
int loop_phis_first_input_index_
maglev::ProcessResult Process(maglev::CreateRegExpLiteral *node, const maglev::ProcessingState &state)
maglev::MaglevCompilationUnit * maglev_compilation_unit_
V< NativeContext > native_context()
void DeoptIfInt32IsNotSmi(maglev::Input maglev_input, V< FrameState > frame_state, const compiler::FeedbackSource &feedback)
maglev::ProcessResult Process(maglev::CheckpointedJump *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::JumpLoop *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::InitialValue *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CreateClosure *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CreateFastArrayElements *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CreateShallowArrayLiteral *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreDoubleDataViewElement *node, const maglev::ProcessingState &state)
V< Boolean > ConvertWord32ToJSBool(V< Word32 > b, bool flip=false)
maglev::ProcessResult Process(maglev::GetTemplateObject *node, const maglev::ProcessingState &state)
void StartSinglePredecessorExceptionBlock(maglev::BasicBlock *maglev_catch_handler, Block *turboshaft_catch_handler)
maglev::ProcessResult Process(maglev::LoadNamedFromSuperGeneric *node, const maglev::ProcessingState &state)
GraphBuildingNodeProcessor(PipelineData *data, Graph &graph, Zone *temp_zone, maglev::MaglevCompilationUnit *maglev_compilation_unit, std::optional< BailoutReason > *bailout)
maglev::ProcessResult Process(maglev::ThrowSuperAlreadyCalledIfNotHole *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckJSReceiverOrNullOrUndefined *node, const maglev::ProcessingState &state)
ZoneUnorderedMap< const maglev::NodeBase *, OpIndex > node_mapping_
maglev::ProcessResult Process(maglev::StoreDoubleField *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::AllocationBlock *node, const maglev::ProcessingState &state)
V< Word32 > Int32ToUint8Clamped(V< Word32 > value)
void FixLoopPhis(maglev::BasicBlock *loop)
maglev::ProcessResult Process(maglev::ConsStringMap *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CallRuntime *node, const maglev::ProcessingState &state)
void PreProcessGraph(maglev::Graph *graph)
bool IsMaglevMainGeneratorSwitchBlock(const maglev::BasicBlock *maglev_block)
ZoneAbslFlatHashMap< OpIndex, maglev::ValueRepresentation > maglev_representations_
OpIndex MapPhiInput(const maglev::Input input, int input_index)
maglev::ProcessResult Process(maglev::BuiltinStringPrototypeCharCodeOrCodePointAt *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::Uint32Constant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TestUndetectable *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckInstanceType *node, const maglev::ProcessingState &state)
V< NativeContext > native_context_
V< Word32 > loop_default_generator_value_
maglev::ProcessResult Process(maglev::Int32Compare *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckString *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::RootConstant *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadFloat64 *node, const maglev::ProcessingState &state)
V< Word32 > dummy_word32_input_
maglev::ProcessResult Process(maglev::CheckMaps *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CallWithArrayLike *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::TransitionElementsKind *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadFixedDoubleArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::StoreHeapInt32 *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckedSmiUntag *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::UnwrapStringWrapper *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckedNumberToInt32 *node, const maglev::ProcessingState &state)
V< Word32 > RootEqual(maglev::Input input, RootIndex root)
maglev::ProcessResult Process(maglev::CheckInt32IsSmi *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::MaybeGrowFastElements *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadFixedArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::AllocateElementsArray *node, const maglev::ProcessingState &state)
LocalIsolate * local_isolate_
maglev::ProcessResult Process(maglev::StoreDoubleTypedArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CreateArrayLiteral *node, const maglev::ProcessingState &state)
V< Boolean > ConvertWordPtrToJSBool(V< WordPtr > b, bool flip=false)
maglev::ProcessResult Process(maglev::StoreIntTypedArrayElement *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::LoadHoleyFixedDoubleArrayElementCheckedNotHole *node, const maglev::ProcessingState &state)
void CheckMaps(V< Object > receiver_input, V< FrameState > frame_state, OptionalV< Map > object_map, const FeedbackSource &feedback, const compiler::ZoneRefSet< Map > &maps, bool check_heap_object, CheckMapsFlags flags)
maglev::ProcessResult Process(maglev::CheckFloat64SameValue *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::DefineNamedOwnGeneric *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::CheckHeapObject *node, const maglev::ProcessingState &state)
maglev::ProcessResult Process(maglev::InlinedAllocation *node, const maglev::ProcessingState &state)
Zone * graph_zone() const
bool has_incoming_jump() const
static constexpr MemoryRepresentation AnyTagged()
static constexpr MemoryRepresentation TaggedSigned()
static constexpr MemoryRepresentation Int32()
static constexpr MemoryRepresentation TaggedPointer()
static constexpr MemoryRepresentation IndirectPointer()
static constexpr MemoryRepresentation Float64()
static constexpr OpIndex Invalid()
constexpr bool valid() const
constexpr bool has_value() const
constexpr bool valid() const
constexpr V< T > value() const
static OptionalV Nullopt()
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation WordPtr()
static constexpr RegisterRepresentation Tagged()
static V< T > Cast(V< U > index)
static constexpr WordRepresentation Word32()
static constexpr Register virtual_accumulator()
constexpr int index() const
static constexpr Register invalid_value()
constexpr bool is_valid() const
BasicBlock * backedge_predecessor() const
base::SmallVector< BasicBlock *, 2 > successors() const
BasicBlock * predecessor_at(int i) const
ControlNode * control_node() const
int predecessor_count() const
bool is_exception_handler_block() const
const Builtin & builtin_id() const
bool is_javascript() const
compiler::JSFunctionRef javascript_target() const
base::Vector< ValueNode * > parameters() const
ValueNode * GetValueOf(interpreter::Register reg, const MaglevCompilationUnit &info) const
compiler::HeapObjectRef ref() const
const MaglevCompilationUnit & unit() const
const InlinedArgumentsDeoptFrame & as_inlined_arguments() const
const ConstructInvokeStubDeoptFrame & as_construct_stub() const
VirtualObjectList GetVirtualObjects() const
const InterpretedDeoptFrame & as_interpreted() const
@ kConstructInvokeStubFrame
@ kBuiltinContinuationFrame
const BuiltinContinuationDeoptFrame & as_builtin_continuation() const
BasicBlock * catch_block() const
bool ShouldLazyDeopt() const
bool HasExceptionHandler() const
void ProcessGraph(Graph *graph)
ZoneVector< OptimizedCompilationInfo::InlinedFunctionHolder > & inlined_functions()
BytecodeOffset bytecode_position() const
base::Vector< ValueNode * > arguments() const
const MaglevCompilationUnit & unit() const
const MaglevCompilationUnit & unit() const
int ComputeReturnOffset(interpreter::Register result_location, int result_size) const
BytecodeOffset bytecode_position() const
const CompactInterpreterFrameState * frame_state() const
interpreter::Register result_location() const
static bool InReturnValues(interpreter::Register reg, interpreter::Register result_location, int result_size)
const InterpretedDeoptFrame & GetFrameForExceptionHandler(const ExceptionHandlerInfo *handler_info)
IndirectHandle< JSFunction > toplevel_function() const
MaglevCompilationUnit * toplevel_compilation_unit() const
uint16_t parameter_count() const
compiler::BytecodeArrayRef bytecode() const
compiler::FeedbackVectorRef feedback() const
void Run(bool is_tracing_maglev_graphs_enabled)
constexpr bool Is() const
ExceptionHandlerInfo * exception_handler_info()
constexpr Input & input(int index)
LazyDeoptInfo * lazy_deopt_info()
constexpr OpProperties properties() const
constexpr bool can_throw() const
Tagged< Smi > value() const
compiler::HeapObjectRef object() const
constexpr ValueRepresentation value_representation() const
VirtualObject * FindAllocatedWith(const InlinedAllocation *allocation) const
void ForEachInput(Function &&callback)
compiler::FixedDoubleArrayRef double_elements() const
uint32_t double_elements_length() const
uint32_t slot_count() const
compiler::MapRef map() const
InlinedAllocation * allocation() const
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
JSHeapBroker *const broker_
#define TURBOSHAFT_REDUCER_BOILERPLATE(Name)
Tagged< NativeContext > native_context_
Handle< SharedFunctionInfo > info
BytecodeAssembler & assembler_
ZoneVector< RpoNumber > & result
std::vector< intptr_t > object_ids_
LocalIsolate * local_isolate_
static const int kNotDuplicated
#define IEEE_754_UNARY_LIST(V)
InstructionOperand source
InstructionOperand destination
std::optional< size_t > index_of(const C &container, const T &element)
constexpr Vector< T > VectorOf(T *start, size_t size)
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
void RunMaglevOptimizations(PipelineData *data, maglev::MaglevCompilationInfo *compilation_info, maglev::MaglevGraphBuilder &maglev_graph_builder, maglev::Graph *maglev_graph)
void PrintMaglevGraph(PipelineData &data, maglev::MaglevCompilationInfo *compilation_info, maglev::Graph *maglev_graph, const char *msg)
void PrintBytecode(PipelineData &data, maglev::MaglevCompilationInfo *compilation_info)
constexpr bool TooManyArgumentsForCall(size_t arguments_count)
HeapConstantHole(factory() ->the_hole_value())) DEFINE_GETTER(PropertyCellHoleConstant
@ kJavaScriptBuiltinContinuation
@ kTryMigrateInstanceAndDeopt
@ kIndirectPointerWriteBarrier
NumberConstant(std::numeric_limits< double >::quiet_NaN())) DEFINE_GETTER(EmptyStateValues
HeapConstantNoHole(BUILTIN_CODE(isolate(), AllocateInOldGeneration))) DEFINE_GETTER(ArrayConstructorStubConstant
int ExternalArrayElementSize(const ExternalArrayType element_type)
bool AnyMapIsHeapNumber(const ZoneRefSet< Map > &maps)
ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind)
static const Operator * IntPtrConstant(CommonOperatorBuilder *common, intptr_t value)
ref_traits< T >::ref_type MakeRef(JSHeapBroker *broker, Tagged< T > object)
constexpr bool IsConstantNode(Opcode opcode)
ZoneVector< BasicBlock * >::const_reverse_iterator BlockConstReverseIterator
constexpr NullMaybeHandleType kNullMaybeHandle
bool Is(IndirectHandle< U > value)
constexpr JSDispatchHandle kInvalidDispatchHandle(0xffffffff<< kJSDispatchHandleShift)
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
constexpr int ElementsKindToShiftSize(ElementsKind elements_kind)
constexpr bool SmiValuesAre31Bits()
constexpr JSDispatchHandle kPlaceholderDispatchHandle(0x0)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int JSParameterCount(int param_count_without_receiver)
constexpr bool SmiValuesAre32Bits()
constexpr bool IsDoubleElementsKind(ElementsKind kind)
T * NewArray(size_t size)
constexpr Register kJavaScriptCallNewTargetRegister
template const char * string
!IsContextMap !IsContextMap native_context
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
i::Address Load(i::Address address)
#define GENERIC_BINOP_LIST(V)
#define GENERIC_UNOP_LIST(V)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
@ kUnsignedLessThanOrEqual
Block * inside_loop_target
static constexpr Kind TaggedBase()
@ kReceiverOrNullOrUndefined
const uint16_t input_count
const underlying_operation_t< Op > * TryCast() const
underlying_operation_t< Op > & Cast()
RegisterRepresentation rep
static const TSCallDescriptor * Create(const CallDescriptor *descriptor, CanThrow can_throw, LazyDeoptOnThrow lazy_deopt_on_throw, Zone *graph_zone, const JSWasmCallParameters *js_wasm_call_parameters=nullptr)
#define PROCESS_FLOAT64_BINOP(MaglevName, TurboshaftName)
#define GET_FRAME_STATE_MAYBE_ABORT(name, deopt_info)
#define PROCESS_BINOP_WITH_OVERFLOW(MaglevName, TurboshaftName, minus_zero_mode)
#define PROCESS_INT32_SHIFT(MaglevName, TurboshaftName)
#define BAILOUT_IF_TOO_MANY_ARGUMENTS_FOR_CALL(count)
#define RETURN_IF_UNREACHABLE()
#define PROCESS_GENERIC_UNOP(Name)
#define PROCESS_GENERIC_BINOP(Name)
#define GENERATE_AND_MAP_BUILTIN_CALL(node, builtin, frame_state, arguments,...)
#define PROCESS_INT32_BITWISE_BINOP(Name)
#define V8_UNLIKELY(condition)