12 :
table_(graph.op_id_count(), zone, &graph),
14 saturated_uses_(zone) {
18 uses_.reserve(graph.op_id_count() * 2);
22 for (uint32_t index = 0; index < graph.block_count(); ++
index) {
24 const Block& block = graph.Get(block_index);
26 auto block_ops = graph.OperationIndices(block);
27 for (
OpIndex op_index : block_ops) {
28 const Operation& op = graph.Get(op_index);
44 if (filter(op, zone))
continue;
59 AddUse(&graph, input_index, op_index);
64 for (
auto [input_index, op_index] : delayed_phi_uses) {
65 AddUse(&graph, input_index, op_index);
88 DCHECK_LT(input_count, graph->Get(node).saturated_use_count.Get());
90 uses_[input_offset + input_count] =
use;
94 uses.emplace_back(use);
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
T & emplace_back(Args &&... args)
ZoneVector< ZoneVector< OpIndex > > saturated_uses_
FixedOpIndexSidetable< PerOperationUses > table_
ZoneVector< OpIndex > uses_
base::Vector< const OpIndex > uses(OpIndex index) const
void AddUse(const Graph *graph, OpIndex node, OpIndex use)
UseMap(const Graph &graph, Zone *zone, FunctionType filter)
bool(* FunctionType)(const Operation &op, Zone *zone)
other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can use(0 for unbounded)") DEFINE_BOOL( stress_concurrent_inlining
SourcePositionTable *const table_
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
V8_INLINE OpIndex input(size_t i) const
const uint16_t input_count
base::Vector< const OpIndex > inputs() const
SaturatedUint8 saturated_use_count
static constexpr size_t kLoopPhiBackEdgeIndex
#define V8_LIKELY(condition)