5#ifndef V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
6#define V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
77 return payload_.GetPointerWithKnownPayload(kDoneWithValue);
82 bool IsDone()
const {
return !IsFail(); }
97 Kind kind()
const {
return payload_.GetPayload(); }
109 : payload_(payload) {}
136#define RETURN_IF_DONE(result) \
138 auto res = (result); \
139 if (res.IsDone()) { \
140 return res.Checked(); \
144#define RETURN_IF_ABORT(result) \
146 if ((result).IsDoneWithAbort()) { \
147 return ReduceResult::DoneWithAbort(); \
151#define PROCESS_AND_RETURN_IF_DONE(result, value_processor) \
153 auto res = (result); \
154 if (res.IsDone()) { \
155 if (res.IsDoneWithValue()) { \
156 value_processor(res.value()); \
158 return res.Checked(); \
162#define GET_VALUE_OR_ABORT(variable, result) \
164 MaybeReduceResult res = (result); \
165 if (res.IsDoneWithAbort()) { \
166 return ReduceResult::DoneWithAbort(); \
168 DCHECK(res.IsDoneWithValue()); \
169 using T = std::remove_pointer_t<std::decay_t<decltype(variable)>>; \
170 variable = res.value()->Cast<T>(); \
255 new (function_entry_stack_check->lazy_deopt_info())
LazyDeoptInfo(
312 interpreter::Bytecode::kJumpLoop);
414 v8_flags.maglev_non_eager_inlining ||
415 v8_flags.turbolev_non_eager_inlining);
422 return v8_flags.maglev_speculative_hoist_phi_untagging ||
429 phi->RecordUseReprHint(reprs);
435 if (
Phi* phi = node->TryCast<
Phi>()) {
452 return v8_flags.turbolev_non_eager_inlining;
454 return v8_flags.maglev_non_eager_inlining;
461 return v8_flags.max_inlined_bytecode_size;
463 return v8_flags.max_maglev_inlined_bytecode_size;
468 return v8_flags.max_inlined_bytecode_size_small;
470 return v8_flags.max_maglev_inlined_bytecode_size_small;
475 return v8_flags.min_inlining_frequency;
477 return v8_flags.min_maglev_inlining_frequency;
482 return v8_flags.max_inlined_bytecode_size_cumulative;
484 return v8_flags.max_maglev_inlined_bytecode_size_cumulative;
507 template <
typename ControlNodeT,
typename... Args>
509 std::initializer_list<ValueNode*> control_inputs,
511 template <
typename ControlNodeT,
typename... Args>
513 std::initializer_list<ValueNode*> control_inputs,
524 template <
typename FCond,
typename FTrue,
typename FFalse>
526 FTrue if_true, FFalse if_false);
546 class CallSpeculationScope;
564 template <
typename Function>
605 if (
v8_flags.trace_maglev_graph_building) {
606 std::cout <<
"== New empty block ==" << std::endl;
642 if (
v8_flags.trace_maglev_graph_building) {
643 std::cout <<
" " << phi <<
" "
655 preserve_known_node_aspects,
zone());
691 while (old_jump_targets !=
nullptr) {
705 old_jump_targets = old_jump_targets->
MoveToRefList(&jump_targets);
719 for (
Phi* phi : *merge_state.
phis()) {
721 if (
v8_flags.trace_maglev_graph_building) {
722 std::cout <<
" " << phi <<
" "
757 DCHECK_NE(bc, interpreter::Bytecode::kJumpLoop);
767 for (
int i = 0;
i < peelings; ++
i) {
778 if (
v8_flags.trace_maglev_graph_building) {
779 std::cout <<
"== Dead ==\n"
783 std::cout << std::endl;
796 }
else if (bytecode == interpreter::Bytecode::kJumpLoop) {
833 if (!
v8_flags.trace_maglev_graph_building)
return;
835 std::cout,
"* VOs (Interpreter Frame State): ",
840 if (
v8_flags.trace_maglev_graph_building) {
844 std::cout << std::endl;
858 DCHECK(!preserve_known_node_aspects);
873 if (
v8_flags.trace_maglev_graph_building) {
875 : merge_state->
is_loop() ?
"loop header"
877 std::cout <<
"== New block (" << detail <<
" @" << merge_state
880 <<
"==" << std::endl;
928 if (
bytecode().handler_table_size() > 0) {
969#define BYTECODE_CASE(name, ...) \
970 case interpreter::Bytecode::k##name: { \
971 if (Visit##name().IsDoneWithAbort()) { \
972 MarkBytecodeDead(); \
981#define BYTECODE_VISITOR(name, ...) ReduceResult Visit##name();
983#undef BYTECODE_VISITOR
985#define DECLARE_VISITOR(name, ...) \
986 ReduceResult VisitIntrinsic##name(interpreter::RegisterList args);
988#undef DECLARE_VISITOR
999 if (
v8_flags.trace_maglev_graph_building) {
1000 std::cout <<
" " << node <<
" "
1005 new_nodes_.insert(node);
1012 template <
typename NodeT,
typename Function,
typename... Args>
1014 Function&& post_create_input_initializer, Args&&...
args) {
1017 post_create_input_initializer(node);
1021 template <
typename NodeT,
typename... Args>
1027 using options_result =
1028 typename std::invoke_result<
decltype(&NodeT::options),
1031 std::is_assignable<options_result, std::tuple<Args...>>
::value,
1032 "Instruction participating in CSE needs options() returning "
1033 "a tuple matching the constructor arguments");
1035 static_assert(NodeT::kInputCount <= 3);
1037 std::array<ValueNode*, NodeT::kInputCount> inputs;
1039 if constexpr (NodeT::kInputCount > 0) {
1042 for (
ValueNode* raw_input : raw_inputs) {
1047 static_assert(NodeT::kInputCount == 2);
1048 if ((
IsConstantNode(inputs[0]->opcode()) || inputs[0] > inputs[1]) &&
1050 std::swap(inputs[0], inputs[1]);
1055 uint32_t value_number;
1064 for (
const auto& inp : inputs) {
1068 value_number =
static_cast<uint32_t
>(tmp_value_number);
1073 auto candidate = exists->second.node;
1074 const bool sanity_check =
1075 candidate->Is<
NodeT>() &&
1076 static_cast<size_t>(candidate->input_count()) == inputs.size();
1079 candidate->properties()) == candidate->properties());
1080 const bool epoch_check =
1083 if (sanity_check && epoch_check) {
1085 std::tuple{std::forward<Args>(
args)...}) {
1087 for (
const auto& inp : inputs) {
1088 if (inp != candidate->input(
i).node()) {
1093 if (
static_cast<size_t>(
i) == inputs.size()) {
1094 return static_cast<NodeT*
>(candidate);
1103 std::forward<Args>(
args)...);
1107 node->set_input(
i++, input);
1109 DCHECK_EQ(node->options(), std::tuple{std::forward<Args>(args)...});
1120 template <
typename NodeT,
typename... Args>
1126 std::forward<Args>(
args)...);
1130 std::forward<Args>(
args)...);
1135 template <
typename NodeT,
typename... Args>
1139 static_assert(!NodeT::kProperties.can_eager_deopt());
1140 static_assert(!NodeT::kProperties.can_lazy_deopt());
1141 static_assert(!NodeT::kProperties.can_throw());
1142 static_assert(!NodeT::kProperties.can_write());
1144 if (
v8_flags.trace_maglev_graph_building) {
1145 std::cout <<
" " << node <<
" "
1152 template <
typename NodeT>
1154 static_assert(NodeT::kProperties.is_deopt_checkpoint() +
1155 NodeT::kProperties.can_eager_deopt() +
1156 NodeT::kProperties.can_lazy_deopt() <=
1167 template <
typename NodeT>
1169 if constexpr (NodeT::kProperties.is_deopt_checkpoint()) {
1174 template <
typename NodeT>
1176 if constexpr (NodeT::kProperties.can_eager_deopt()) {
1182 template <
typename NodeT>
1184 if constexpr (NodeT::kProperties.can_lazy_deopt()) {
1199 template <
typename NodeT>
1201 if constexpr (NodeT::kProperties.can_throw()) {
1203 if (catch_block.
ref) {
1207 new (node->exception_handler_info())
1209 DCHECK(node->exception_handler_info()->HasExceptionHandler());
1210 DCHECK(node->exception_handler_info()->ShouldLazyDeopt());
1211 if constexpr (std ::is_same_v<NodeT, CallKnownJSFunction>) {
1216 node->exception_handler_info());
1236 DCHECK(node->exception_handler_info()->HasExceptionHandler());
1237 DCHECK(!node->exception_handler_info()->ShouldLazyDeopt());
1254 DCHECK(!node->exception_handler_info()->HasExceptionHandler());
1255 if constexpr (std ::is_same_v<NodeT, CallKnownJSFunction>) {
1289 return {info->catch_block_ref_address(), !info->ShouldLazyDeopt(), 0};
1298 compiler::OptionalScopeInfoRef scope_info);
1331 template <Builtin kBuiltin>
1334 if constexpr (Descriptor::HasContextParameter()) {
1339 for (auto* input : inputs) {
1340 call_builtin->set_arg(arg_index++, input);
1349 for (auto* input : inputs) {
1350 call_builtin->set_arg(arg_index++, input);
1357 template <Builtin kBuiltin>
1359 std::initializer_list<ValueNode*> inputs,
1362 CallBuiltin* call_builtin = BuildCallBuiltin<kBuiltin>(inputs);
1368 int vector_index = slot_index + 1;
1369 DCHECK_EQ(slot_index, Descriptor::kSlot);
1372 DCHECK_EQ(vector_index, Descriptor::kVector);
1374 return call_builtin;
1379 std::initializer_list<ValueNode*> inputs) {
1381 const size_t input_count = inputs.size() + CallCPPBuiltin::kFixedInputCount;
1382 return AddNewNode<CallCPPBuiltin>(
1386 for (
auto* input : inputs) {
1387 call_builtin->
set_arg(arg_index++, input);
1391 GetTaggedValue(GetContext()));
1401#define BAILOUT(name, ...) \
1402 if (function_id == Runtime::k##name) { \
1411 std::initializer_list<ValueNode*> inputs) {
1413 inputs.size() + CallRuntime::kFixedInputCount,
1416 for (auto* input : inputs) {
1417 call_runtime->set_arg(arg_index++, GetTaggedValue(input));
1420 function_id, GetContext());
1422 if (RuntimeFunctionCanThrow(function_id)) {
1423 return BuildAbort(AbortReason::kUnexpectedReturnFromThrow);
1431 FinishBlock<Abort>({}, reason);
1432 return ReduceResult::DoneWithAbort();
1437 local_isolate()->factory()->NewStringFromAsciiChecked(
1438 str, AllocationType::kOld);
1439 ValueNode* string_node = GetConstant(MakeRefAssumeMemoryFence(
1440 broker(),
broker()->CanonicalPersistentHandle(string_handle)));
1441 CHECK(BuildCallRuntime(Runtime::kGlobalPrint, {string_node}).IsDone());
1445 CHECK(BuildCallRuntime(Runtime::kDebugPrint, {value}).IsDone());
1455 compilation_unit_->GetTopLevelCompilationUnit()->feedback_cell());
1459 return current_interpreter_frame_.get(
1460 interpreter::Register::function_closure());
1464 return current_interpreter_frame_.get(
1465 interpreter::Register::current_context());
1469 current_interpreter_frame_.set(interpreter::Register::current_context(),
1474 return iterator_.GetSlotOperand(operand_index);
1478 return iterator_.GetFlag8Operand(operand_index);
1482 return iterator_.GetFlag16Operand(operand_index);
1487 requires(is_taggable_v<T>)
1491 return MakeRefAssumeMemoryFence(
1493 Cast<T>(iterator_.GetConstantForIndexOperand(
1494 operand_index, local_isolate()))));
1498 auto it =
graph_->external_references().find(reference.
address());
1499 if (it ==
graph_->external_references().end()) {
1501 CreateNewConstantNode<ExternalConstant>(0, reference);
1502 graph_->external_references().emplace(reference.
address(), node);
1509 auto it =
graph_->root().find(index);
1510 if (it ==
graph_->root().end()) {
1511 RootConstant* node = CreateNewConstantNode<RootConstant>(0, index);
1512 graph_->root().emplace(index, node);
1519 return GetRootConstant(value ? RootIndex::kTrueValue
1520 : RootIndex::kFalseValue);
1523 ValueNode* GetConstant(compiler::ObjectRef ref);
1531 return AddNewNode<RegisterInput>({},
reg);
1534#define DEFINE_IS_ROOT_OBJECT(type, name, CamelName) \
1535 bool Is##CamelName(ValueNode* value) const { \
1536 if (RootConstant* constant = value->TryCast<RootConstant>()) { \
1537 return constant->index() == RootIndex::k##CamelName; \
1542#undef DEFINE_IS_ROOT_OBJECT
1549 DCHECK(!IsNodeCreatedForThisBytecode(current_interpreter_frame_.get(src)));
1552 current_interpreter_frame_.set(dst, current_interpreter_frame_.get(src));
1557 UseReprHintRecording::kRecord);
1560 UseReprHintRecording::kRecord);
1564 UseReprHintRecording::kRecord) {
1565 ValueNode* value = current_interpreter_frame_.get(
reg);
1566 return GetSmiValue(value, record_use_repr_hint);
1571 UseReprHintRecording::kRecord) {
1572 ValueNode* value = current_interpreter_frame_.get(
reg);
1573 return GetTaggedValue(value, record_use_repr_hint);
1584 ValueNode* GetTruncatedInt32ForToNumber(
1591 return GetTruncatedInt32ForToNumber(current_interpreter_frame_.get(
reg),
1592 allowed_input_type, conversion_type);
1604 return GetUint8ClampedForToNumber(current_interpreter_frame_.get(
reg));
1607 std::optional<int32_t> TryGetInt32Constant(
ValueNode* value);
1608 std::optional<uint32_t> TryGetUint32Constant(
ValueNode* value);
1619 GetInt32(value, can_be_heap_number);
1623 EnsureInt32(current_interpreter_frame_.get(
reg));
1626 std::optional<double> TryGetFloat64Constant(
1636 return GetFloat64(current_interpreter_frame_.get(
reg));
1652 return GetFloat64ForToNumber(current_interpreter_frame_.get(
reg),
1653 allowed_input_type, conversion_type);
1663 return GetHoleyFloat64ForToNumber(current_interpreter_frame_.get(
reg),
1664 allowed_input_type, conversion_type);
1668 return current_interpreter_frame_.get(
1669 interpreter::Register::virtual_accumulator());
1674 UseReprHintRecording::kRecord) {
1675 return GetSmiValue(interpreter::Register::virtual_accumulator(),
1676 record_use_repr_hint);
1682 return GetTruncatedInt32ForToNumber(
1683 interpreter::Register::virtual_accumulator(), allowed_input_type,
1688 return GetUint8ClampedForToNumber(
1689 interpreter::Register::virtual_accumulator());
1695 return GetHoleyFloat64ForToNumber(
1696 interpreter::Register::virtual_accumulator(), allowed_input_type,
1701 DCHECK_EQ(value->properties().value_representation(),
1702 ValueRepresentation::kFloat64);
1706 if (value->properties().is_conversion()) {
1711 if (!value->input(0).node()->properties().is_tagged()) {
1719 constexpr double quiet_NaN = std::numeric_limits<double>::quiet_NaN();
1720 if (!constant->value().is_nan())
return constant;
1721 return GetFloat64Constant(quiet_NaN);
1725 return AddNewNode<HoleyFloat64ToMaybeNanFloat64>({value});
1730 return current_interpreter_frame_.get(source) ==
1731 current_interpreter_frame_.accumulator();
1735 return current_interpreter_frame_.get(
1736 iterator_.GetRegisterOperand(operand_index));
1740 int operand_index,
NodeType allowed_input_type,
1742 return GetHoleyFloat64ForToNumber(
1743 iterator_.GetRegisterOperand(operand_index), allowed_input_type,
1747 template <
typename NodeT>
1751 StoreRegister(interpreter::Register::virtual_accumulator(), node);
1755 DCHECK(interpreter::Bytecodes::ClobbersAccumulator(
1756 iterator_.current_bytecode()));
1757 current_interpreter_frame_.set_accumulator(
1758 GetRootConstant(RootIndex::kOptimizedOut));
1767 if (
result->opcode() == Opcode::kCallRuntime) {
1769 }
else if (
result->opcode() == Opcode::kCallBuiltin) {
1776 return AddNewNode<GetSecondReturnedValue>({});
1779 template <
typename NodeT>
1781 static_assert(std::is_base_of_v<ValueNode, NodeT>);
1782 DCHECK(HasOutputRegister(target));
1783 current_interpreter_frame_.set(target, value);
1788 IsNodeCreatedForThisBytecode(value),
1789 value->lazy_deopt_info()->IsResultRegister(target));
1794 !IsNodeCreatedForThisBytecode(value));
1795 current_interpreter_frame_.set(interpreter::Register::virtual_accumulator(),
1799 template <
typename NodeT>
1801 std::pair<interpreter::Register, interpreter::Register> target,
1813 DCHECK(HasOutputRegister(target0));
1814 current_interpreter_frame_.set(target0, value);
1816 ValueNode* second_value = GetSecondValue(value);
1818 DCHECK_NE(0, new_nodes_.count(second_value));
1820 DCHECK(HasOutputRegister(target1));
1821 current_interpreter_frame_.set(target1, second_value);
1826 IsNodeCreatedForThisBytecode(value),
1827 value->lazy_deopt_info()->IsResultRegister(target0));
1829 IsNodeCreatedForThisBytecode(value),
1830 value->lazy_deopt_info()->IsResultRegister(target1));
1833 std::pair<interpreter::Register, int> GetResultLocationAndSize()
const;
1846 DeoptFrameScope* scope,
bool mark_accumulator_dead);
1849 template <
typename NodeT>
1851 if constexpr (NodeT::kProperties.can_read() ||
1852 NodeT::kProperties.can_deopt() ||
1853 NodeT::kProperties.can_throw()) {
1854 unobserved_context_slot_stores_.clear();
1857 if constexpr (Node::opcode_of<NodeT> != Opcode::kAllocationBlock &&
1858 (NodeT::kProperties.can_deopt() ||
1859 NodeT::kProperties.can_throw() ||
1860 NodeT::kProperties.can_allocate())) {
1861 ClearCurrentAllocationBlock();
1865 if constexpr (!NodeT::kProperties.can_write())
return;
1868 known_node_aspects().increment_effect_epoch();
1876 node->ClearElementsProperties(known_node_aspects());
1877 if (is_loop_effect_tracking()) {
1878 loop_effects_->keys_cleared.insert(
1879 KnownNodeAspects::LoadedPropertyMapKey::Elements());
1887 node->ClearUnstableNodeAspects(known_node_aspects());
1888 if (is_loop_effect_tracking()) {
1889 loop_effects_->unstable_aspects_cleared =
true;
1894 static constexpr bool is_possible_map_change =
1900 ResetBuilderCachedState<is_possible_map_change>();
1903 template <
bool is_possible_map_change = true>
1905 latest_checkpointed_frame_.reset();
1910 if constexpr (is_possible_map_change) {
1911 current_for_in_state.receiver_needs_map_check =
true;
1916 return iterator_.current_offset() + iterator_.current_bytecode_size();
1919 return GetInLivenessFor(iterator_.current_offset());
1922 return bytecode_analysis().GetInLivenessFor(
offset);
1925 return GetOutLivenessFor(iterator_.current_offset());
1928 return bytecode_analysis().GetOutLivenessFor(
offset);
1932 StartNewBlock(predecessor, merge_states_[
offset], jump_targets_[
offset]);
1940 if (merge_state ==
nullptr) {
1949 template <UseReprH
intRecording h
int = UseReprH
intRecording::kRecord>
1952 if (repr == expected)
return input;
1954 case ValueRepresentation::kTagged:
1955 return GetTaggedValue(input, hint);
1956 case ValueRepresentation::kInt32:
1957 return GetInt32(input);
1958 case ValueRepresentation::kFloat64:
1959 case ValueRepresentation::kHoleyFloat64:
1960 return GetFloat64(input);
1961 case ValueRepresentation::kUint32:
1962 case ValueRepresentation::kIntPtr:
1968 template <
typename NodeT>
1974 if constexpr (std::is_same_v<NodeT, Return>) {
1975 return UseReprHintRecording::kDoNotRecord;
1977 return UseReprHintRecording::kRecord;
1981 template <
typename NodeT>
1984 if constexpr (NodeT::kInputCount > 0) {
1989 node->set_input(
i, ConvertInputTo<hint>(input, NodeT::kInputTypes[
i]));
1997 size_t old_size = nodes.
size();
1998 nodes.resize(old_size + node_buffer().
size());
1999 std::copy(node_buffer().begin(), node_buffer().
end(),
2000 nodes.begin() + old_size);
2001 node_buffer().clear();
2004 template <
typename ControlNodeT,
typename... Args>
2007 ControlNodeT* control_node = NodeBase::New<ControlNodeT>(
2008 zone(), control_inputs.size(), std::forward<Args>(
args)...);
2009 SetNodeInputs(control_node, control_inputs);
2010 AttachEagerDeoptInfo(control_node);
2011 AttachDeoptCheckpoint(control_node);
2012 static_assert(!ControlNodeT::kProperties.can_lazy_deopt());
2013 static_assert(!ControlNodeT::kProperties.can_throw());
2014 static_assert(!ControlNodeT::kProperties.can_write());
2021 unobserved_context_slot_stores_.clear();
2024 ClearCurrentAllocationBlock();
2027 FlushNodesToBlock();
2030 graph()->Add(block);
2031 if (has_graph_labeller()) {
2032 graph_labeller()->RegisterNode(control_node, compilation_unit_,
2034 current_source_position_);
2035 if (
v8_flags.trace_maglev_graph_building) {
2036 bool kSkipTargets =
true;
2037 std::cout <<
" " << control_node <<
" "
2039 <<
PrintNode(graph_labeller(), control_node, kSkipTargets)
2053 if (predecessor_count(next_block_offset) == 1) {
2054 if (
v8_flags.trace_maglev_graph_building) {
2055 std::cout <<
"== New block (single fallthrough) at "
2056 << *compilation_unit_->shared_function_info().object()
2057 <<
"==" << std::endl;
2058 PrintVirtualObjects();
2060 StartNewBlock(next_block_offset, predecessor);
2062 MergeIntoFrameState(predecessor, next_block_offset);
2067 if (maybe_value ==
nullptr) {
2068 return GetRootConstant(RootIndex::kUndefinedValue);
2078 compiler::OptionalHeapObjectRef TryGetConstant(
2080 std::optional<ValueNode*> TryGetConstantAlternative(
ValueNode* node);
2082 template <
typename LoadNode>
2085 template <
typename StoreNode,
typename Function>
2090#ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA
2091#define CONTINUATION_PRESERVED_EMBEDDER_DATA_LIST(V) \
2092 V(GetContinuationPreservedEmbedderData) \
2093 V(SetContinuationPreservedEmbedderData)
2095#define CONTINUATION_PRESERVED_EMBEDDER_DATA_LIST(V)
2098#define MAGLEV_REDUCED_BUILTIN(V) \
2099 V(ArrayConstructor) \
2102 V(ArrayIteratorPrototypeNext) \
2104 V(ArrayPrototypeEntries) \
2105 V(ArrayPrototypeKeys) \
2106 V(ArrayPrototypeValues) \
2107 V(ArrayPrototypePush) \
2108 V(ArrayPrototypePop) \
2109 V(DataViewPrototypeGetInt8) \
2110 V(DataViewPrototypeSetInt8) \
2111 V(DataViewPrototypeGetInt16) \
2112 V(DataViewPrototypeSetInt16) \
2113 V(DataViewPrototypeGetInt32) \
2114 V(DataViewPrototypeSetInt32) \
2115 V(DataViewPrototypeGetFloat64) \
2116 V(DataViewPrototypeSetFloat64) \
2117 V(FunctionPrototypeApply) \
2118 V(FunctionPrototypeCall) \
2119 V(FunctionPrototypeHasInstance) \
2120 V(MapPrototypeGet) \
2121 V(ObjectPrototypeGetProto) \
2122 V(ObjectGetPrototypeOf) \
2123 V(ReflectGetPrototypeOf) \
2124 V(ObjectPrototypeHasOwnProperty) \
2131 V(SetPrototypeHas) \
2132 V(StringConstructor) \
2133 V(StringFromCharCode) \
2134 V(StringPrototypeCharCodeAt) \
2135 V(StringPrototypeCodePointAt) \
2136 V(StringPrototypeIterator) \
2137 IF_INTL(V, StringPrototypeLocaleCompareIntl) \
2138 CONTINUATION_PRESERVED_EMBEDDER_DATA_LIST(V) \
2139 IEEE_754_UNARY_LIST(V)
2141#define DEFINE_BUILTIN_REDUCER(Name, ...) \
2142 MaybeReduceResult TryReduce##Name(compiler::JSFunctionRef target, \
2143 CallArguments& args);
2145#undef DEFINE_BUILTIN_REDUCER
2161 const std::optional<InitialCallback>& initial_callback = {},
2162 const std::optional<ProcessElementCallback>& process_element_callback =
2165 MaybeReduceResult TryReduceGetProto(ValueNode* node);
2167 template <
typename MapKindsT,
typename IndexToElementsKindFunc,
2168 typename BuildKindSpecificFunc>
2169 MaybeReduceResult BuildJSArrayBuiltinMapSwitchOnElementsKind(
2170 ValueNode*
receiver,
const MapKindsT& map_kinds,
2171 MaglevSubGraphBuilder& sub_graph,
2172 std::optional<MaglevSubGraphBuilder::Label>& do_return,
2173 int unique_kind_count, IndexToElementsKindFunc&& index_to_elements_kind,
2174 BuildKindSpecificFunc&& build_kind_specific);
2176 MaybeReduceResult DoTryReduceMathRound(CallArguments&
args,
2177 Float64Round::Kind
kind);
2179 template <
typename CallNode,
typename... Args>
2180 CallNode* AddNewCallNode(
const CallArguments&
args, Args&&... extra_args);
2182 MaybeReduceResult TryReduceGetIterator(ValueNode*
receiver,
int load_slot,
2185 ValueNode* BuildCallSelf(ValueNode* context, ValueNode* function,
2188 CallArguments&
args);
2189 MaybeReduceResult TryReduceBuiltin(
2193 CallKnownJSFunction* BuildCallKnownJSFunction(
2194 ValueNode* context, ValueNode* function, ValueNode*
new_target,
2195#ifdef V8_ENABLE_LEAPTIERING
2201 CallKnownJSFunction* BuildCallKnownJSFunction(
2202 ValueNode* context, ValueNode* function, ValueNode*
new_target,
2203#ifdef V8_ENABLE_LEAPTIERING
2208 MaybeReduceResult TryBuildCallKnownJSFunction(
2211 MaybeReduceResult TryBuildCallKnownJSFunction(
2212 ValueNode* context, ValueNode* function, ValueNode*
new_target,
2213#ifdef V8_ENABLE_LEAPTIERING
2220 float call_frequency);
2222 ReduceResult BuildEagerInlineCall(ValueNode* context, ValueNode* function,
2226 CallArguments&
args,
float call_frequency);
2227 MaybeReduceResult TryBuildInlineCall(
2228 ValueNode* context, ValueNode* function, ValueNode*
new_target,
2229#ifdef V8_ENABLE_LEAPTIERING
2235 ValueNode* BuildGenericCall(ValueNode* target, Call::TargetType target_type,
2236 const CallArguments&
args);
2237 MaybeReduceResult TryReduceCallForConstant(
2241 MaybeReduceResult TryReduceCallForTarget(
2244 MaybeReduceResult TryReduceCallForNewClosure(
2245 ValueNode* target_node, ValueNode* target_context,
2246#ifdef V8_ENABLE_LEAPTIERING
2252 MaybeReduceResult TryBuildCallKnownApiFunction(
2254 CallArguments&
args);
2258 MaybeReduceResult TryReduceCallForApiFunction(
2260 compiler::OptionalSharedFunctionInfoRef maybe_shared,
2261 CallArguments&
args);
2262 MaybeReduceResult TryReduceFunctionPrototypeApplyCallWithReceiver(
2263 compiler::OptionalHeapObjectRef maybe_receiver, CallArguments&
args,
2265 ReduceResult ReduceCallWithArrayLikeForArgumentsObject(
2266 ValueNode* target_node, CallArguments&
args,
2267 VirtualObject* arguments_object,
2269 ReduceResult ReduceCallWithArrayLike(
2270 ValueNode* target_node, CallArguments&
args,
2272 ReduceResult ReduceCall(ValueNode* target_node, CallArguments&
args,
2275 ReduceResult BuildCallWithFeedback(
2276 ValueNode* target_node, CallArguments&
args,
2279 ReduceResult BuildCallFromRegisters(
int argc_count,
2282 ValueNode* BuildElementsArray(
int length);
2283 ReduceResult BuildAndAllocateKeyValueArray(ValueNode*
key, ValueNode* value);
2284 ReduceResult BuildAndAllocateJSArray(
2288 ValueNode* BuildAndAllocateJSArrayIterator(ValueNode* array,
2291 MaybeReduceResult TryBuildAndAllocateJSGeneratorObject(ValueNode* closure,
2294 ValueNode* BuildGenericConstruct(
2295 ValueNode* target, ValueNode*
new_target, ValueNode* context,
2296 const CallArguments&
args,
2300 MaybeReduceResult TryReduceConstructArrayConstructor(
2302 compiler::OptionalAllocationSiteRef maybe_allocation_site = {});
2303 MaybeReduceResult TryReduceConstructBuiltin(
2304 compiler::JSFunctionRef builtin,
2305 compiler::SharedFunctionInfoRef shared_function_info, ValueNode* target,
2306 CallArguments&
args);
2307 MaybeReduceResult TryReduceConstructGeneric(
2308 compiler::JSFunctionRef function,
2309 compiler::SharedFunctionInfoRef shared_function_info, ValueNode* target,
2311 compiler::FeedbackSource& feedback_source);
2312 MaybeReduceResult TryReduceConstruct(
2313 compiler::HeapObjectRef feedback_target, ValueNode* target,
2315 compiler::FeedbackSource& feedback_source);
2316 ReduceResult BuildConstruct(ValueNode* target, ValueNode*
new_target,
2317 CallArguments&
args,
2318 compiler::FeedbackSource& feedback_source);
2320 MaybeReduceResult TryBuildScriptContextStore(
2321 const compiler::GlobalAccessFeedback& global_access_feedback);
2322 MaybeReduceResult TryBuildPropertyCellStore(
2323 const compiler::GlobalAccessFeedback& global_access_feedback);
2324 MaybeReduceResult TryBuildGlobalStore(
2325 const compiler::GlobalAccessFeedback& global_access_feedback);
2327 MaybeReduceResult TryBuildScriptContextConstantLoad(
2328 const compiler::GlobalAccessFeedback& global_access_feedback);
2329 MaybeReduceResult TryBuildScriptContextLoad(
2330 const compiler::GlobalAccessFeedback& global_access_feedback);
2331 MaybeReduceResult TryBuildPropertyCellLoad(
2332 const compiler::GlobalAccessFeedback& global_access_feedback);
2333 MaybeReduceResult TryBuildGlobalLoad(
2334 const compiler::GlobalAccessFeedback& global_access_feedback);
2336 bool TryBuildFindNonDefaultConstructorOrConstruct(
2337 ValueNode* this_function, ValueNode*
new_target,
2338 std::pair<interpreter::Register, interpreter::Register>
result);
2340 ValueNode* BuildSmiUntag(ValueNode* node);
2341 ValueNode* BuildNumberOrOddballToFloat64(
2342 ValueNode* node, NodeType allowed_input_type,
2343 TaggedToFloat64ConversionType conversion_type);
2345 ReduceResult BuildCheckSmi(ValueNode*
object,
bool elidable =
true);
2346 ReduceResult BuildCheckNumber(ValueNode*
object);
2347 ReduceResult BuildCheckHeapObject(ValueNode*
object);
2348 ReduceResult BuildCheckJSReceiver(ValueNode*
object);
2349 ReduceResult BuildCheckJSReceiverOrNullOrUndefined(ValueNode*
object);
2350 ReduceResult BuildCheckString(ValueNode*
object);
2351 ReduceResult BuildCheckStringOrStringWrapper(ValueNode*
object);
2352 ReduceResult BuildCheckSymbol(ValueNode*
object);
2353 ReduceResult BuildCheckMaps(
2355 std::optional<ValueNode*> map = {},
2356 bool has_deprecated_map_without_migration_target =
false);
2357 ReduceResult BuildTransitionElementsKindOrCheckMap(
2358 ValueNode* heap_object, ValueNode* object_map,
2359 const ZoneVector<compiler::MapRef>& transition_sources,
2360 compiler::MapRef transition_target);
2361 ReduceResult BuildCompareMaps(
2362 ValueNode* heap_object, ValueNode* object_map,
2363 base::Vector<const compiler::MapRef> maps,
2364 MaglevSubGraphBuilder* sub_graph,
2365 std::optional<MaglevSubGraphBuilder::Label>& if_not_matched);
2366 ReduceResult BuildTransitionElementsKindAndCompareMaps(
2367 ValueNode* heap_object, ValueNode* object_map,
2368 const ZoneVector<compiler::MapRef>& transition_sources,
2369 compiler::MapRef transition_target, MaglevSubGraphBuilder* sub_graph,
2370 std::optional<MaglevSubGraphBuilder::Label>& if_not_matched);
2373 ReduceResult BuildCheckInternalizedStringValueOrByReference(
2374 ValueNode* node, compiler::HeapObjectRef ref, DeoptimizeReason reason);
2375 ReduceResult BuildCheckNumericalValueOrByReference(ValueNode* node,
2376 compiler::ObjectRef ref,
2377 DeoptimizeReason reason);
2378 ReduceResult BuildCheckValueByReference(ValueNode* node,
2379 compiler::HeapObjectRef ref,
2380 DeoptimizeReason reason);
2381 ReduceResult BuildCheckNumericalValue(ValueNode* node,
2382 compiler::ObjectRef ref,
2383 DeoptimizeReason reason);
2385 ValueNode* BuildConvertHoleToUndefined(ValueNode* node);
2386 ReduceResult BuildCheckNotHole(ValueNode* node);
2388 template <
bool flip = false>
2389 ValueNode* BuildToBoolean(ValueNode* node);
2390 ValueNode* BuildLogicalNot(ValueNode* value);
2391 ValueNode* BuildTestUndetectable(ValueNode* value);
2392 ReduceResult BuildToNumberOrToNumeric(Object::Conversion mode);
2395 bool CanTrackObjectChanges(
ValueNode*
object, TrackObjectMode mode);
2405 if (
offset != HeapObject::kMapOffset &&
2406 CanTrackObjectChanges(
object, TrackObjectMode::kLoad)) {
2411 if (vobject->
type() == VirtualObject::kDefault) {
2414 DCHECK_EQ(vobject->
type(), VirtualObject::kFixedDoubleArray);
2420 if (
v8_flags.trace_maglev_object_tracking) {
2421 std::cout <<
" * Reusing value in virtual object "
2423 <<
"]: " <<
PrintNode(graph_labeller(), value) << std::endl;
2427 return AddNewNode<Instruction>({
object},
offset,
2428 std::forward<Args>(
args)...);
2456 ValueNode* index_object = current_interpreter_frame_.get(
reg);
2457 return GetInt32ElementIndex(index_object);
2462 ValueNode* index_object = current_interpreter_frame_.get(
reg);
2463 return GetUint32ElementIndex(index_object);
2467 bool CanTreatHoleAsUndefined(
2470 compiler::OptionalObjectRef TryFoldLoadDictPrototypeConstant(
2472 compiler::OptionalJSObjectRef TryGetConstantDataFieldHolder(
2475 compiler::OptionalObjectRef TryFoldLoadConstantDataField(
2478 std::optional<Float64> TryFoldLoadConstantDoubleField(
2504 NodeType length_type = NodeType::kSmi);
2522 template <
typename GenericAccessFunc>
2528 GenericAccessFunc&& build_generic_access);
2530 template <
typename GenericAccessFunc>
2534 GenericAccessFunc&& build_generic_access);
2565 template <
typename GenericAccessFunc>
2570 GenericAccessFunc&& build_generic_access);
2571 template <
typename GenericAccessFunc>
2576 GenericAccessFunc&& build_generic_access);
2577 template <
typename GenericAccessFunc>
2583 GenericAccessFunc&& build_generic_access);
2588 void RecordKnownProperty(
ValueNode* lookup_start_object,
2603 kIsNotInPrototypeChain
2605 InferHasInPrototypeChainResult InferHasInPrototypeChain(
2630 uint32_t slot_count_including_map);
2632 VirtualObject* CreateDoubleFixedArray(uint32_t elements_length,
2647 std::optional<ValueNode*>
extension = {});
2648 VirtualObject* CreateArgumentsObject(
compiler::MapRef map, ValueNode* length,
2649 ValueNode* elements,
2650 std::optional<ValueNode*> callee = {});
2651 VirtualObject* CreateMappedArgumentsElements(compiler::MapRef map,
2654 ValueNode* unmapped_elements);
2655 VirtualObject* CreateRegExpLiteralObject(
2656 compiler::MapRef map, compiler::RegExpBoilerplateDescriptionRef
literal);
2657 VirtualObject* CreateJSGeneratorObject(compiler::MapRef map,
2658 int instance_size, ValueNode* context,
2661 ValueNode* register_file);
2662 VirtualObject* CreateJSIteratorResult(compiler::MapRef map, ValueNode* value,
2664 VirtualObject* CreateJSStringIterator(compiler::MapRef map,
2667 InlinedAllocation* ExtendOrReallocateCurrentAllocationBlock(
2668 AllocationType allocation_type, VirtualObject* value);
2670 void ClearCurrentAllocationBlock();
2673 if (node ==
nullptr)
return;
2677 current_interpreter_frame_.virtual_objects().FindAllocatedWith(alloc);
2680 AddDeoptUse(vobject);
2682 AddNonEscapingUses(alloc, 1);
2684 DCHECK(alloc->is_returned_value_from_inline_call());
2694 std::optional<VirtualObject*> TryGetNonEscapingArgumentsObject(
2699 std::optional<VirtualObject*> TryReadBoilerplateForFastLiteral(
2701 int max_depth,
int* max_properties);
2711 ValueNode* BuildInlinedArgumentsElements(
int start_index,
int length);
2712 ValueNode* BuildInlinedUnmappedArgumentsElements(
int mapped_count);
2714 template <CreateArgumentsType type>
2716 template <CreateArgumentsType type>
2717 ValueNode* BuildAndAllocateArgumentsObject();
2719 bool CanAllocateSloppyArgumentElements();
2720 bool CanAllocateInlinedArgumentElements();
2725 template <Operation kOperation>
2726 void BuildGenericUnaryOperationNode();
2727 template <Operation kOperation>
2728 void BuildGenericBinaryOperationNode();
2729 template <Operation kOperation>
2730 void BuildGenericBinarySmiOperationNode();
2732 template <Operation kOperation>
2733 bool TryReduceCompareEqualAgainstConstant();
2735 template <Operation kOperation>
2737 template <Operation kOperation>
2740 template <Operation kOperation>
2744 template <Operation kOperation>
2746 ReduceResult BuildTruncatingInt32BitwiseNotForToNumber(
2749 template <Operation kOperation>
2751 template <Operation kOperation>
2753 template <Operation kOperation>
2754 ReduceResult BuildTruncatingInt32BinaryOperationNodeForToNumber(
2757 template <Operation kOperation>
2758 ReduceResult BuildTruncatingInt32BinarySmiOperationNodeForToNumber(
2762 template <Operation kOperation>
2765 template <Operation kOperation>
2769 template <Operation kOperation>
2774 template <Operation kOperation>
2775 ReduceResult BuildFloat64UnaryOperationNodeForToNumber(
2778 template <Operation kOperation>
2779 ReduceResult BuildFloat64BinaryOperationNodeForToNumber(
2782 template <Operation kOperation>
2783 ReduceResult BuildFloat64BinarySmiOperationNodeForToNumber(
2787 template <Operation kOperation>
2789 template <Operation kOperation>
2791 template <Operation kOperation>
2798 size_t StringLengthStaticLowerBound(
ValueNode*
string,
int max_depth = 2);
2803 template <Operation kOperation>
2807 template <
typename Function>
2812 void BeginLoopEffects(
int loop_header);
2813 void EndLoopEffects(
int loop_header);
2814 void MergeIntoFrameState(
BasicBlock* block,
int target);
2815 void MergeDeadIntoFrameState(
int target);
2816 void MergeDeadLoopIntoFrameState(
int target);
2817 void MergeIntoInlinedReturnFrameState(
BasicBlock* block);
2836 switch (jump_type) {
2837 case BranchType::kBranchIfTrue:
2838 return BranchType::kBranchIfFalse;
2839 case BranchType::kBranchIfFalse:
2840 return BranchType::kBranchIfTrue;
2859 root_index_(root_index),
2860 jump_type_(builder.GetCurrentBranchType()) {
2861 if (builder.
mode() == kBytecodeJumpTarget) {
2862 builder_.data_.bytecode_target.patch_accumulator_scope = this;
2867 builder_.data_.bytecode_target.patch_accumulator_scope =
nullptr;
2881 : jump_target_offset(jump_target_offset),
2882 fallthrough_offset(fallthrough_offset),
2883 patch_accumulator_scope(nullptr) {}
2891 : jump_label(jump_label), fallthrough() {}
2897 Data(
int jump_target_offset,
int fallthrough_offset)
2898 : bytecode_target(jump_target_offset, fallthrough_offset) {}
2900 : label_target(jump_label) {}
2908 sub_builder_(nullptr),
2909 jump_type_(jump_type),
2910 data_(builder->iterator_.GetJumpTargetOffset(),
2911 builder->iterator_.next_offset()) {}
2918 sub_builder_(sub_builder),
2919 jump_type_(jump_type),
2920 data_(jump_label) {}
2923 return sub_builder_ ==
nullptr ? kBytecodeJumpTarget : kLabelJumpTarget;
2929 branch_specialization_mode_ =
mode;
2942 template <
typename NodeT,
typename... Args>
2951 BranchSpecializationMode::kDefault;
2954 void StartFallthroughBlock(
BasicBlock* predecessor);
2955 void SetAccumulatorInBranch(
BranchType jump_type)
const;
2959 BranchType jump_type = BranchType::kBranchIfTrue) {
2964 BranchType jump_type = BranchType::kBranchIfTrue) {
2965 return BranchBuilder(
this, subgraph, jump_type, jump_label);
2968 BranchResult BuildBranchIfRootConstant(BranchBuilder& builder,
2970 BranchResult BuildBranchIfToBooleanTrue(BranchBuilder& builder,
2972 BranchResult BuildBranchIfInt32ToBooleanTrue(BranchBuilder& builder,
2974 BranchResult BuildBranchIfIntPtrToBooleanTrue(BranchBuilder& builder,
2976 BranchResult BuildBranchIfFloat64ToBooleanTrue(BranchBuilder& builder,
2978 BranchResult BuildBranchIfFloat64IsHole(BranchBuilder& builder,
2980 BranchResult BuildBranchIfReferenceEqual(BranchBuilder& builder,
2982 BranchResult BuildBranchIfInt32Compare(BranchBuilder& builder,
Operation op,
2984 BranchResult BuildBranchIfUint32Compare(BranchBuilder& builder,
Operation op,
2986 BranchResult BuildBranchIfUndefinedOrNull(BranchBuilder& builder,
2988 BranchResult BuildBranchIfUndetectable(BranchBuilder& builder,
2990 BranchResult BuildBranchIfJSReceiver(BranchBuilder& builder,
2993 BranchResult BuildBranchIfTrue(BranchBuilder& builder,
ValueNode* node);
2994 BranchResult BuildBranchIfNull(BranchBuilder& builder,
ValueNode* node);
2995 BranchResult BuildBranchIfUndefined(BranchBuilder& builder,
ValueNode* node);
3000 template <
typename FCond,
typename FTrue,
typename FFalse>
3001 ValueNode* Select(FCond cond, FTrue if_true, FFalse if_false);
3003 template <
typename FCond,
typename FTrue,
typename FFalse>
3006 void MarkBranchDeadAndJumpIfNeeded(
bool is_jump_taken);
3011 uint32_t array_length =
bytecode().length() + 1;
3012 predecessor_count_ = zone()->AllocateArray<uint32_t>(array_length);
3015 array_length - entrypoint_);
3017 const int max_peelings =
v8_flags.maglev_optimistic_peeled_loops ? 2 : 1;
3019 bool is_loop_peeling_iteration =
false;
3020 std::optional<int> peeled_loop_end;
3025 if (allow_loop_peeling_ &&
3034 size <
v8_flags.maglev_loop_peeling_max_size &&
3035 size +
graph_->total_peeled_bytecode_size() <
3036 v8_flags.maglev_loop_peeling_max_size_cumulative) {
3037 DCHECK(!is_loop_peeling_iteration);
3038 graph_->add_peeled_bytecode_size(size);
3039 is_loop_peeling_iteration =
true;
3041 peeled_loop_end = bytecode_analysis().GetLoopEndOffsetForInnermost(
3045 if (interpreter::Bytecodes::IsJump(bytecode)) {
3046 if (is_loop_peeling_iteration &&
3047 bytecode == interpreter::Bytecode::kJumpLoop) {
3049 is_loop_peeling_iteration =
false;
3050 peeled_loop_end = {};
3053 static_assert(kLoopsMustBeEnteredThroughHeader);
3063 if (is_loop_peeling_iteration &&
3069 if (!interpreter::Bytecodes::IsConditionalJump(bytecode)) {
3070 UpdatePredecessorCount(iterator.
next_offset(), -1);
3072 }
else if (interpreter::Bytecodes::IsSwitch(bytecode)) {
3074 UpdatePredecessorCount(
offset.target_offset, 1);
3076 }
else if (interpreter::Bytecodes::Returns(bytecode) ||
3077 interpreter::Bytecodes::UnconditionallyThrows(bytecode)) {
3078 UpdatePredecessorCount(iterator.
next_offset(), -1);
3080 if (is_inline() && interpreter::Bytecodes::Returns(bytecode)) {
3081 UpdatePredecessorCount(array_length - 1, 1);
3082 if (is_loop_peeling_iteration) {
3083 UpdatePredecessorCount(array_length - 1, max_peelings);
3096 return compilation_unit_->feedback();
3100 GetSlotOperand(slot_operand_index),
3101 broker()->feedback_nexus_config());
3105 broker()->feedback_nexus_config());
3108 return compilation_unit_->bytecode();
3111 return bytecode_analysis_;
3117 return *current_interpreter_frame_.known_node_aspects();
3121 if (!is_inline())
return 1.0f;
3122 return caller_details_->call_frequency;
3127 return static_cast<int>(caller_details_->arguments.size());
3132 if (is_inline() && caller_details()->is_inside_loop)
return true;
3133 int loop_header_offset =
3134 bytecode_analysis().GetLoopOffsetFor(iterator_.current_offset());
3135 if (loop_header_offset != -1) {
3137 bytecode_analysis().GetLoopInfoFor(loop_header_offset);
3141 return !in_peeled_iteration() || in_optimistic_peeling_iteration();
3174 merge_states_[
offset]->predecessor_count() ==
3175 predecessor_count_[
offset] - 1);
3176 predecessor_count_[
offset]--;
3177 if (in_peeled_iteration()) {
3178 decremented_predecessor_offsets_.push_back(
offset);
3180 DCHECK(decremented_predecessor_offsets_.empty());
3187 predecessor_count_[
offset] = amount;
3191 DCHECK_LE(0,
static_cast<int64_t
>(predecessor_count_[
offset]) + diff);
3193 merge_states_[
offset]->predecessor_count() ==
3194 predecessor_count_[
offset] + diff);
3195 predecessor_count_[
offset] += diff;
3200 in_peeled_iteration());
3201 uint32_t actual = predecessor_count_[
offset];
3203 merge_states_[
offset]->predecessor_count() == actual);
3208 int peeled_iteration_count_ = 0;
3209 bool any_peeled_loop_ =
false;
3214 return peeled_iteration_count_ > 0;
3222 return v8_flags.maglev_optimistic_peeled_loops &&
3223 peeled_iteration_count_ == 1;
3241 bool in_prologue_ =
true;
3252 bool receiver_needs_map_check =
false;
3267 bool is_turbolev_ =
false;
3277 int next_handler_table_index_ = 0;
3289 bool IsNodeCreatedForThisBytecode(
ValueNode* node)
const {
3290 return new_nodes_.find(node) != new_nodes_.end();
3293 std::unordered_set<Node*> new_nodes_;
3300 return h + 0x9e3779b9 + (seed << 6) + (seed >> 2);
3303 template <
typename T>
3309 return map.hash_value();
3328 template <
typename T>
3331 for (
auto e : vector) {
3337 template <
typename T>
3340 for (
auto e : vector) {
3347 return current_speculation_feedback_.
IsValid();
3351 for (
int i = 0;
i < node->input_count(); ++
i) {
3352 node->input(
i).clear();
3354 node->OverwriteWith(Opcode::kDead);
#define DECLARE_VISITOR(name,...)
#define BYTECODE_CASE(name,...)
uint8_t data_[MAX_STACK_LENGTH]
interpreter::Bytecode bytecode
#define BYTECODE_LIST(V, V_TSA)
bool Contains(int i) const
static V8_EXPORT_PRIVATE bool IsCpp(Builtin builtin)
constexpr int ToInt() const
V8_EXPORT_PRIVATE Address address() const
constexpr Kind kind() const
static constexpr Tagged< Smi > FromInt(int value)
static bool constexpr IsValid(T value)
SourcePosition source_position() const
static const int kNotInlined
static Tagged< TaggedIndex > FromIntptr(intptr_t value)
static bool constexpr IsValid(intptr_t value)
void push_back(const T &value)
BytecodeOffset osr_bailout_id() const
bool HasOuterScopeInfo() const
ScopeInfoRef OuterScopeInfo(JSHeapBroker *broker) const
IndirectHandle< SharedFunctionInfo > object() const
ScopeInfoRef scope_info(JSHeapBroker *broker) const
Bytecode current_bytecode() const
int GetJumpTargetOffset() const
uint8_t * current_address() const
int current_offset() const
void AdvanceTo(int offset)
JumpTableTargetOffsets GetJumpTableTargetOffsets() const
void SetOffset(int offset)
static std::ostream & Decode(std::ostream &os, const uint8_t *bytecode_start, bool with_hex=true)
static constexpr bool UnconditionallyThrows(Bytecode bytecode)
static constexpr bool Returns(Bytecode bytecode)
static constexpr bool IsForwardJump(Bytecode bytecode)
static constexpr bool IsSwitch(Bytecode bytecode)
static constexpr bool IsConditionalJump(Bytecode bytecode)
static constexpr bool IsJump(Bytecode bytecode)
static constexpr Register FromParameterIndex(int index)
constexpr int index() const
static constexpr Register invalid_value()
BasicBlock * block_ptr() const
void Bind(BasicBlock *block)
BasicBlockRef * MoveToRefList(BasicBlockRef *ref_list_head)
BasicBlockRef * SetToBlockAndReturnNext(BasicBlock *block)
void set_deferred(bool deferred)
void set_predecessor_id(int id)
void AddExceptionHandler(ExceptionHandlerInfo *handler)
void set_edge_split_block(BasicBlock *predecessor)
ControlNode * control_node() const
int InputCountWithoutContext() const
void set_feedback(compiler::FeedbackSource const &feedback, FeedbackSlotType slot_type)
void set_arg(int i, ValueNode *node)
ZoneMap< int, TaggedIndexConstant * > & tagged_index()
void record_scope_info(ValueNode *context, compiler::OptionalScopeInfoRef scope_info)
ZoneMap< uint32_t, Uint32Constant * > & uint32()
ZoneVector< Node * > & node_buffer()
ZoneMap< int32_t, Int32Constant * > & int32()
ZoneVector< InitialValue * > & parameters()
ZoneMap< uint64_t, Float64Constant * > & float64()
ZoneMap< int, SmiConstant * > & smi()
void CopyFrom(const MaglevCompilationUnit &info, MergePointInterpreterFrameState &state, bool preserve_known_node_aspects, Zone *zone)
KnownNodeAspects * known_node_aspects()
const VirtualObjectList & virtual_objects() const
compiler::SharedFunctionInfoRef shared_function_info() const
MaglevGraphLabeller * graph_labeller() const
compiler::JSHeapBroker * broker() const
bool has_graph_labeller() const
int inlining_depth() const
~PatchAccumulatorInBranchScope()
PatchAccumulatorInBranchScope(BranchBuilder &builder, ValueNode *node, RootIndex root_index)
MaglevGraphBuilder::MaglevSubGraphBuilder * sub_builder_
BranchResult AlwaysFalse() const
BranchBuilder(MaglevGraphBuilder *builder, MaglevSubGraphBuilder *sub_builder, BranchType jump_type, MaglevSubGraphBuilder::Label *jump_label)
BranchResult Build(std::initializer_list< ValueNode * > inputs, Args &&... args)
BranchType GetCurrentBranchType() const
BranchResult AlwaysTrue() const
void SetBranchSpecializationMode(BranchSpecializationMode mode)
BranchBuilder(MaglevGraphBuilder *builder, BranchType jump_type)
MaglevGraphBuilder * builder_
MaglevGraphBuilder * builder_
LazyDeoptResultLocationScope * previous_
interpreter::Register result_location_
interpreter::Register result_location()
void set(Variable &var, ValueNode *value)
MaglevCompilationUnit * compilation_unit_
V8_NODISCARD ReduceResult TrimPredecessorsAndBind(Label *label)
void GotoIfTrue(Label *true_target, std::initializer_list< ValueNode * > control_inputs, Args &&... args)
void GotoOrTrim(Label *label)
void MergeIntoLabel(Label *label, BasicBlock *predecessor)
void GotoIfFalse(Label *false_target, std::initializer_list< ValueNode * > control_inputs, Args &&... args)
MaglevGraphBuilder * builder_
ValueNode * get(const Variable &var) const
void ReducePredecessorCount(Label *label, unsigned num=1)
void TakeKnownNodeAspectsAndVOsFromParent()
InterpreterFrameState pseudo_frame_
void EndLoop(LoopLabel *loop_label)
void MoveKnownNodeAspectsAndVOsToParent()
MaglevSubGraphBuilder(MaglevGraphBuilder *builder, int variable_count)
ReduceResult Branch(std::initializer_list< Variable * > vars, FCond cond, FTrue if_true, FFalse if_false)
LoopLabel BeginLoop(std::initializer_list< Variable * > loop_vars)
void RecordUseReprHint(Phi *phi, UseRepresentationSet reprs)
ValueNode * BuildLoadTaggedField(ValueNode *object, uint32_t offset, Args &&... args)
void DecrementDeadPredecessorAndAccountForPeeling(uint32_t offset)
int argument_count() const
static compiler::OptionalHeapObjectRef TryGetConstant(compiler::JSHeapBroker *broker, LocalIsolate *isolate, ValueNode *node)
ValueNode * TrySpecializeLoadScriptContextSlot(ValueNode *context, int index)
ValueNode * GetTruncatedInt32ForToNumber(interpreter::Register reg, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
bool IsInsideLoop() const
static BranchType NegateBranchType(BranchType jump_type)
bool HasDisjointType(ValueNode *lhs, NodeType rhs_type)
void SetNodeInputs(NodeT *node, std::initializer_list< ValueNode * > inputs)
BasicBlock * current_block_
const compiler::BytecodeLivenessState * GetInLiveness() const
const FeedbackNexus FeedbackNexusForOperand(int slot_operand_index) const
NodeT * AddNewNode(std::initializer_list< ValueNode * > inputs, Args &&... args)
compiler::JSHeapBroker * broker_
Uint32Constant * GetUint32Constant(int constant)
compiler::FeedbackSource current_speculation_feedback_
void UpdatePredecessorCount(uint32_t offset, int diff)
bool in_optimistic_peeling_iteration() const
ValueNode * GetContext() const
ValueNode * ConvertInputTo(ValueNode *input, ValueRepresentation expected)
bool HaveDisjointTypes(ValueNode *lhs, ValueNode *rhs)
void SetArgument(int i, ValueNode *value)
NodeT * CreateNewConstantNode(Args &&... args) const
ZoneVector< Node * > & node_buffer()
compiler::ref_traits< T >::ref_type GetRefOperand(int operand_index)
int bailout_for_entrypoint()
NodeType CheckTypes(ValueNode *node, std::initializer_list< NodeType > types)
static size_t fast_hash_combine(size_t seed, size_t h)
ReduceResult BuildInlineFunction(SourcePosition call_site_position, ValueNode *context, ValueNode *function, ValueNode *new_target)
std::optional< InterpretedDeoptFrame > entry_stack_check_frame_
BasicBlock * BuildBranchIfReferenceEqual(ValueNode *lhs, ValueNode *rhs, BasicBlockRef *true_target, BasicBlockRef *false_target)
const DeoptFrameScope * current_deopt_scope() const
const InterpreterFrameState & current_interpreter_frame() const
LocalIsolate *const local_isolate_
ValueNode * GetRegisterInput(Register reg)
ZoneVector< int > decremented_predecessor_offsets_
ExternalConstant * GetExternalConstant(ExternalReference reference)
SourcePosition current_source_position_
bool TopLevelFunctionPassMaglevPrintFilter()
int register_count() const
void Print(ValueNode *value)
ReduceResult StoreAndCacheContextSlot(ValueNode *context, int index, ValueNode *value, ContextKind context_kind)
void StartFallthroughBlock(int next_block_offset, BasicBlock *predecessor)
void EnsureInt32(ValueNode *value, bool can_be_heap_number=false)
uint32_t GetFlag16Operand(int operand_index) const
static size_t gvn_hash_value(const v8::internal::ZoneVector< T > &vector)
ValueNode * GetHoleyFloat64ForToNumber(interpreter::Register reg, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
ValueNode * GetContextAtDepth(ValueNode *context, size_t depth)
void MergeDeadIntoFrameState(int target)
BasicBlock * CreateEdgeSplitBlock(BasicBlockRef &jump_targets, BasicBlock *predecessor)
bool MayBeNullOrUndefined(ValueNode *node)
MaglevCallerDetails * caller_details_
const compiler::BytecodeLivenessState * GetInLivenessFor(int offset) const
SourcePositionTableIterator source_position_iterator_
CatchBlockDetails GetCurrentTryCatchBlock()
bool is_non_eager_inlining_enabled() const
void AddDeoptUse(ValueNode *node)
BasicBlock * FinishBlock(std::initializer_list< ValueNode * > control_inputs, Args &&... args)
DeoptFrame GetLatestCheckpointedFrame()
int max_inlined_bytecode_size()
bool IsRegisterEqualToAccumulator(int operand_index)
ValueNode * GetAccumulatorTruncatedInt32ForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
bool ShouldEmitOsrInterruptBudgetChecks()
static size_t gvn_hash_value(const interpreter::Register ®)
NodeT * AddNewNode(size_t input_count, Function &&post_create_input_initializer, Args &&... args)
void StoreRegister(interpreter::Register target, NodeT *value)
AllocationBlock * current_allocation_block_
int argument_count_without_receiver() const
uint32_t GetFlag8Operand(int operand_index) const
CallCPPBuiltin * BuildCallCPPBuiltin(Builtin builtin, ValueNode *target, ValueNode *new_target, std::initializer_list< ValueNode * > inputs)
BasicBlock * EndPrologue()
ValueNode * GetTaggedValue(interpreter::Register reg, UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
BitVector loop_headers_to_peel_
static constexpr UseReprHintRecording ShouldRecordUseReprHint()
NodeInfo * GetOrCreateInfoFor(ValueNode *node)
ValueNode * GetFloat64(interpreter::Register reg)
static size_t gvn_hash_value(const v8::internal::ZoneCompactSet< T > &vector)
void RegisterPhisWithGraphLabeller(MergePointInterpreterFrameState &merge_state)
void CalculatePredecessorCounts()
ValueNode * GetAccumulator()
ValueNode * GetUint8ClampedForToNumber(interpreter::Register reg)
int parameter_count() const
int parameter_count_without_receiver() const
void ProcessMergePointPredecessors(MergePointInterpreterFrameState &merge_state, BasicBlockRef &jump_targets)
ReduceResult EmitUnconditionalDeopt(DeoptimizeReason reason)
compiler::BytecodeAnalysis bytecode_analysis_
MaglevGraphLabeller * graph_labeller() const
void ClobberAccumulator()
DeoptFrame * AddInlinedArgumentsToDeoptFrame(DeoptFrame *deopt_frame, const MaglevCompilationUnit *unit, ValueNode *closure, base::Vector< ValueNode * > args)
void ProcessMergePoint(int offset, bool preserve_known_node_aspects)
void MinimizeContextChainDepth(ValueNode **context, size_t *depth)
float GetCurrentCallFrequency()
ValueNode * GetAccumulatorUint8ClampedForToNumber()
void SetContext(ValueNode *context)
ValueNode * GetNumberConstant(double constant)
void MoveNodeBetweenRegisters(interpreter::Register src, interpreter::Register dst)
void Print(const char *str)
RootConstant * GetRootConstant(RootIndex index)
LocalIsolate * local_isolate() const
BranchBuilder CreateBranchBuilder(BranchType jump_type=BranchType::kBranchIfTrue)
void ProcessMergePointAtExceptionHandlerStart(int offset)
DeoptFrame GetDeoptFrameForLazyDeopt(interpreter::Register result_location, int result_size)
ZoneDeque< LoopEffects * > loop_effects_stack_
void RecordUseReprHint(Phi *phi, UseRepresentation repr)
bool MaglevIsTopTier() const
BranchBuilder CreateBranchBuilder(MaglevSubGraphBuilder *subgraph, MaglevSubGraphBuilder::Label *jump_label, BranchType jump_type=BranchType::kBranchIfTrue)
bool EnsureType(ValueNode *node, NodeType type, NodeType *old=nullptr)
void UpdateSourceAndBytecodePosition(int offset)
int max_inlined_bytecode_size_small()
int max_inlined_bytecode_size_cumulative()
void SetAccumulatorInBranch(ValueNode *value)
compiler::FeedbackVectorRef feedback() const
static size_t gvn_hash_value(const ExternalReference &ref)
TaggedIndexConstant * GetTaggedIndexConstant(int constant)
void InitializeRegister(interpreter::Register reg, ValueNode *value)
interpreter::BytecodeArrayIterator iterator_
static size_t gvn_hash_value(const T &in)
InterpretedDeoptFrame GetDeoptFrameForEntryStackCheck()
constexpr bool RuntimeFunctionCanThrow(Runtime::FunctionId function_id)
void MergeDeadLoopIntoFrameState(int target)
int inline_exit_offset() const
void AttachExceptionHandlerInfo(NodeT *node)
bool ShouldEmitInterruptBudgetChecks()
RootConstant * GetBooleanConstant(bool value)
int next_handler_table_index_
void BuildStoreMap(ValueNode *object, compiler::MapRef map, StoreMap::Kind kind)
CatchBlockDetails GetTryCatchBlockFromInfo(ExceptionHandlerInfo *info)
void BuildRegisterFrameInitialization(ValueNode *context=nullptr, ValueNode *closure=nullptr, ValueNode *new_target=nullptr)
void AttachEagerDeoptInfo(NodeT *node)
CallBuiltin * BuildCallBuiltin(std::initializer_list< ValueNode * > inputs, compiler::FeedbackSource const &feedback, CallBuiltin::FeedbackSlotType slot_type=CallBuiltin::kTaggedIndex)
void StartNewBlock(BasicBlock *predecessor, MergePointInterpreterFrameState *merge_state, BasicBlockRef &refs_to_block)
NodeT * AttachExtraInfoAndAddToGraph(NodeT *node)
void StartNewBlock(int offset, BasicBlock *predecessor)
void BuildLoadContextSlot(ValueNode *context, size_t depth, int slot_index, ContextSlotMutability slot_mutability, ContextKind context_kind)
DeoptFrameScope * current_deopt_scope_
compiler::BytecodeArrayRef bytecode() const
InterpreterFrameState current_interpreter_frame_
bool is_loop_effect_tracking()
const compiler::BytecodeLivenessState * GetOutLiveness() const
void StoreRegisterPair(std::pair< interpreter::Register, interpreter::Register > target, NodeT *value)
ValueNode * LoadAndCacheContextSlot(ValueNode *context, int offset, ContextSlotMutability slot_mutability, ContextKind context_kind)
void SetKnownValue(ValueNode *node, compiler::ObjectRef constant, NodeType new_node_type)
void MarkNodeDead(Node *node)
void ResetBuilderCachedState()
bool IsOffsetAMergePoint(int offset)
LazyDeoptResultLocationScope * lazy_deopt_result_location_scope_
ValueNode * GetFloat64ForToNumber(interpreter::Register reg, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
uint32_t * predecessor_count_
MergePointInterpreterFrameState ** merge_states_
MaglevCallerDetails * caller_details() const
MergePointInterpreterFrameState * GetCatchBlockFrameState()
BasicBlock * FinishInlinedBlockForCaller(ControlNode *control_node, ZoneVector< Node * > rem_nodes_in_call_block)
ReduceResult GetUint32ElementIndex(interpreter::Register reg)
void MarkPossibleSideEffect(NodeT *node)
bool need_checkpointed_loop_entry()
ReduceResult BuildCallRuntime(Runtime::FunctionId function_id, std::initializer_list< ValueNode * > inputs)
BasicBlockRef * jump_targets_
Float64Constant * GetFloat64Constant(double constant)
ValueNode * GetClosure() const
ValueNode * GetInlinedArgument(int i)
void Print(const char *str, ValueNode *value)
MaybeReduceResult GetSmiValue(interpreter::Register reg, UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
void SetAccumulator(NodeT *node)
const FeedbackNexus FeedbackNexusForSlot(FeedbackSlot slot) const
MaybeReduceResult TrySpecializeStoreScriptContextSlot(ValueNode *context, int index, ValueNode *value, Node **store)
static size_t gvn_hash_value(const compiler::MapRef &map)
void EnsureInt32(interpreter::Register reg)
MaglevGraphBuilder(LocalIsolate *local_isolate, MaglevCompilationUnit *compilation_unit, Graph *graph, MaglevCallerDetails *caller_details=nullptr)
ValueNode * GetAccumulatorHoleyFloat64ForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
void KillPeeledLoopTargets(int peelings)
void PrintVirtualObjects()
ZoneStack< HandlerTableEntry > catch_block_stack_
ValueNode * TryGetParentContext(ValueNode *node)
void AttachDeoptCheckpoint(NodeT *node)
static size_t gvn_hash_value(const PolymorphicAccessInfo &access_info)
void BuildLoopForPeeling()
ValueNode * GetArgument(int i)
void AttachLazyDeoptInfo(NodeT *node)
void BeginLoopEffects(int loop_header)
bool ContextMayAlias(ValueNode *context, compiler::OptionalScopeInfoRef scope_info)
ValueNode * LoadRegisterHoleyFloat64ForToNumber(int operand_index, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
void VisitSingleBytecode()
int inlining_depth() const
std::pair< interpreter::Register, int > GetResultLocationAndSize() const
bool is_eager_inline() const
KnownNodeAspects & known_node_aspects()
MaybeReduceResult GetAccumulatorSmi(UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
bool in_peeled_iteration() const
bool IsInsideTryBlock() const
ReduceResult BuildAbort(AbortReason reason)
bool has_graph_labeller() const
bool CheckContextExtensions(size_t depth)
ZoneUnorderedMap< KnownNodeAspects::LoadedContextSlotsKey, Node * > unobserved_context_slot_stores_
MaglevCompilationUnit * compilation_unit() const
CallBuiltin * BuildCallBuiltin(std::initializer_list< ValueNode * > inputs)
NodeType GetType(ValueNode *node)
InferHasInPrototypeChainResult
bool CanSpeculateCall() const
static constexpr bool kLoopsMustBeEnteredThroughHeader
void InitializePredecessorCount(uint32_t offset, int amount)
void AddInitializedNodeToGraph(Node *node)
ReduceResult BuildStoreContextSlot(ValueNode *context, size_t depth, int slot_index, ValueNode *value, ContextKind context_kind)
std::function< DeoptFrameScope( compiler::JSFunctionRef, ValueNode *, ValueNode *, ValueNode *, ValueNode *, ValueNode *, ValueNode *)> GetDeoptScopeCallback
Float64Constant * GetFloat64Constant(Float64 constant)
ValueNode * GetFeedbackCell()
MaglevCompilationUnit *const compilation_unit_
ValueNode * GetSilencedNaN(ValueNode *value)
const compiler::BytecodeAnalysis & bytecode_analysis() const
FeedbackSlot GetSlotOperand(int operand_index) const
ForInState current_for_in_state
const compiler::BytecodeLivenessState * GetOutLivenessFor(int offset) const
ValueNode * LoadRegister(int operand_index)
ValueNode * BuildExtendPropertiesBackingStore(compiler::MapRef map, ValueNode *receiver, ValueNode *property_array)
bool is_loop_effect_tracking_enabled()
NodeT * AddNewNodeOrGetEquivalent(std::initializer_list< ValueNode * > raw_inputs, Args &&... args)
void set_current_block(BasicBlock *block)
ValueNode * GetSecondValue(ValueNode *result)
ValueNode * GetValueOrUndefined(ValueNode *maybe_value)
SmiConstant * GetSmiConstant(int constant) const
std::function< ReduceResult(ValueNode *)> InitialCallback
static size_t gvn_hash_value(const Representation &rep)
ValueNode * GetInt32ElementIndex(interpreter::Register reg)
compiler::JSHeapBroker * broker() const
uint32_t predecessor_count(uint32_t offset)
Int32Constant * GetInt32Constant(int32_t constant)
bool CheckStaticType(ValueNode *node, NodeType type, NodeType *old=nullptr)
float min_inlining_frequency()
std::optional< DeoptFrame > latest_checkpointed_frame_
bool TrySpecializeLoadContextSlotToFunctionContext(ValueNode *context, int slot_index, ContextSlotMutability slot_mutability)
std::function< void(ValueNode *, ValueNode *)> ProcessElementCallback
void RecordUseReprHintIfPhi(ValueNode *node, UseRepresentation repr)
void RegisterNode(const NodeBase *node, const MaglevCompilationUnit *unit, BytecodeOffset bytecode_offset, SourcePosition position)
bool IsDoneWithoutValue() const
static MaybeReduceResult Fail()
MaybeReduceResult(base::PointerWithPayload< ValueNode, Kind, 3 > payload)
MaybeReduceResult & operator=(const MaybeReduceResult &) V8_NOEXCEPT=default
base::PointerWithPayload< ValueNode, Kind, 3 > payload_
ValueNode * value() const
bool IsDoneWithValue() const
MaybeReduceResult(ValueNode *value)
MaybeReduceResult(const MaybeReduceResult &) V8_NOEXCEPT=default
bool IsDoneWithAbort() const
MaybeReduceResult(Kind kind)
base::PointerWithPayload< ValueNode, Kind, 3 > GetPayload() const
BasicBlock * predecessor_at(int i) const
void MergeDead(const MaglevCompilationUnit &compilation_unit, unsigned num=1)
void InitializeWithBasicBlock(BasicBlock *current_block)
uint32_t predecessors_so_far() const
bool is_unmerged_loop() const
bool is_exception_handler() const
void Merge(MaglevGraphBuilder *graph_builder, InterpreterFrameState &unmerged, BasicBlock *predecessor)
bool is_unmerged_unreachable_loop() const
bool is_resumable_loop() const
void set_predecessor_at(int i, BasicBlock *val)
bool exception_handler_was_used() const
uint32_t predecessor_count() const
constexpr bool Is() const
static constexpr Opcode opcode_of
static Derived * New(Zone *zone, std::initializer_list< ValueNode * > inputs, Args &&... args)
constexpr OpProperties properties() const
static constexpr bool needs_epoch_check(Opcode op)
static constexpr bool participate_in_cse(Opcode op)
constexpr ValueRepresentation value_representation() const
size_t hash_value() const
static ReduceResult Done(ValueNode *value)
static ReduceResult DoneWithAbort()
static ReduceResult Done()
ReduceResult(const MaybeReduceResult &other)
ReduceResult(ValueNode *value)
ValueNode * get(uint32_t offset) const
uint32_t double_elements_length() const
JSHeapBroker *const broker_
base::Vector< const DirectHandle< Object > > args
DirectHandle< Object > new_target
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
#define INTRINSICS_LIST(V)
DirectHandle< JSReceiver > options
ZoneVector< RpoNumber > & result
FunctionLiteral * literal
BasicBlock * current_block_
#define DEFINE_BUILTIN_REDUCER(Name,...)
#define MAGLEV_REDUCED_BUILTIN(V)
#define BAILOUT(name,...)
#define BYTECODE_VISITOR(name,...)
#define DEFINE_IS_ROOT_OBJECT(type, name, CamelName)
V8_INLINE size_t hash_value(unsigned int v)
constexpr uint64_t double_to_uint64(double d)
V8_INLINE size_t fast_hash_combine()
constexpr OpProperties StaticPropertiesForOpcode(Opcode opcode)
constexpr bool IsConstantNode(Opcode opcode)
NodeType StaticTypeForNode(compiler::JSHeapBroker *broker, LocalIsolate *isolate, ValueNode *node)
constexpr bool IsCommutativeNode(Opcode opcode)
constexpr bool IsSimpleFieldStore(Opcode opcode)
constexpr bool IsTypedArrayStore(Opcode opcode)
TaggedToFloat64ConversionType
NodeTMixin< Node, Derived > NodeT
constexpr bool IsElementsArrayWrite(Opcode opcode)
constexpr int kFunctionEntryBytecodeOffset
V8_EXPORT_PRIVATE FlagValues v8_flags
void MemsetUint32(uint32_t *dest, uint32_t value, size_t counter)
#define FOR_EACH_THROWING_INTRINSIC(F)
#define DCHECK_LE(v1, v2)
#define DCHECK_NOT_NULL(val)
#define CHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
int parent_offset() const
bool exception_handler_was_used
ZoneMap< uint32_t, AvailableExpression > available_expressions
void ClearAvailableExpressions()
NodeInfo * GetOrCreateInfoFor(ValueNode *node, compiler::JSHeapBroker *broker, LocalIsolate *isolate)
uint32_t effect_epoch() const
static constexpr uint32_t kEffectEpochForPureInstructions
static constexpr uint32_t kEffectEpochOverflow
CallKnownJSFunction * generic_call_node
MaglevCallerDetails caller_details
compiler::FeedbackCellRef feedback_cell
ZoneUnorderedMap< KnownNodeAspects::LoadedContextSlotsKey, Node * > unobserved_context_slot_stores
KnownNodeAspects * known_node_aspects
base::Vector< ValueNode * > arguments
CatchBlockDetails catch_block
LoopEffects * loop_effects
BytecodeJumpTarget(int jump_target_offset, int fallthrough_offset)
PatchAccumulatorInBranchScope * patch_accumulator_scope
BasicBlockRef fallthrough
LabelJumpTarget(MaglevSubGraphBuilder::Label *jump_label)
MaglevSubGraphBuilder::Label * jump_label
ValueNode * enum_cache_indices
Data(MaglevSubGraphBuilder::Label *jump_label)
Data(int jump_target_offset, int fallthrough_offset)
BytecodeJumpTarget bytecode_target
LabelJumpTarget label_target
#define V8_UNLIKELY(condition)
std::unique_ptr< ValueMirror > value
std::unique_ptr< ValueMirror > key