v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instruction-scheduler.h
Go to the documentation of this file.
1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
6#define V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
7
8#include <optional>
9
13
14namespace v8 {
15namespace internal {
16namespace compiler {
17
18// A set of flags describing properties of the instructions so that the
19// scheduler is aware of dependencies between instructions.
22 kHasSideEffect = 1, // The instruction has some side effects (memory
23 // store, function call...)
24 kIsLoadOperation = 2, // The instruction is a memory load.
25 kMayNeedDeoptOrTrapCheck = 4, // The instruction may be associated with a
26 // deopt or trap check which must be run before
27 // instruction e.g. div on Intel platform which
28 // will raise an exception when the divisor is
29 // zero.
30 kIsBarrier = 8, // The instruction can cause GC or it reads/writes registers
31 // that are not explicitly given. Nothing can be reordered
32 // across such an instruction.
33};
34
35class InstructionScheduler final : public ZoneObject {
36 public:
38 InstructionSequence* sequence);
39
42
45
46 static bool SchedulerSupported();
47
48 private:
49 // A scheduling graph node.
50 // Represent an instruction and their dependencies.
52 public:
54
55 // Mark the instruction represented by 'node' as a dependency of this one.
56 // The current instruction will be registered as an unscheduled predecessor
57 // of 'node' (i.e. it must be scheduled before 'node').
59
60 // Check if all the predecessors of this instruction have been scheduled.
64
65 // Record that we have scheduled one of the predecessors of this node.
70
73 int latency() const { return latency_; }
74
75 int total_latency() const { return total_latency_; }
76 void set_total_latency(int latency) { total_latency_ = latency; }
77
78 int start_cycle() const { return start_cycle_; }
80
81 private:
84
85 // Number of unscheduled predecessors for this node.
87
88 // Estimate of the instruction latency (the number of cycles it takes for
89 // instruction to complete).
91
92 // The sum of all the latencies on the path from this node to the end of
93 // the graph (i.e. a node with no successor).
95
96 // The scheduler keeps a nominal cycle count to keep track of when the
97 // result of an instruction is available. This field is updated by the
98 // scheduler to indicate when the value of all the operands of this
99 // instruction will be available.
101 };
102
103 // Keep track of all nodes ready to be scheduled (i.e. all their dependencies
104 // have been scheduled. Note that this class is inteded to be extended by
105 // concrete implementation of the scheduling queue which define the policy
106 // to pop node from the queue.
108 public:
110 : scheduler_(scheduler), nodes_(scheduler->zone()) {}
111
112 void AddNode(ScheduleGraphNode* node);
113
114 bool IsEmpty() const { return nodes_.empty(); }
115
116 protected:
119 };
120
121 // A scheduling queue which prioritize nodes on the critical path (we look
122 // for the instruction with the highest latency on the path to reach the end
123 // of the graph).
125 public:
127 : SchedulingQueueBase(scheduler) {}
128
129 // Look for the best candidate to schedule, remove it from the queue and
130 // return it.
132 };
133
134 // A queue which pop a random node from the queue to perform stress tests on
135 // the scheduler.
137 public:
139 : SchedulingQueueBase(scheduler) {}
140
142
143 private:
147 };
148
149 // Perform scheduling for the current block specifying the queue type to
150 // use to determine the next best candidate.
151 template <typename QueueType>
152 void Schedule();
153
154 // Return the scheduling properties of the given instruction.
157
158 bool IsBarrier(const Instruction* instr) const {
159 return (GetInstructionFlags(instr) & kIsBarrier) != 0;
160 }
161
162 // Check whether the given instruction has side effects (e.g. function call,
163 // memory store).
164 bool HasSideEffect(const Instruction* instr) const {
166 }
167
168 // Return true if the instruction is a memory load.
169 bool IsLoadOperation(const Instruction* instr) const {
171 }
172
173 bool CanTrap(const Instruction* instr) const {
174 return instr->IsTrap() ||
175 (instr->HasMemoryAccessMode() &&
176 instr->memory_access_mode() != kMemoryAccessDirect);
177 }
178
179 // The scheduler will not move the following instructions before the last
180 // deopt/trap check:
181 // * loads (this is conservative)
182 // * instructions with side effect
183 // * other deopts/traps
184 // Any other instruction can be moved, apart from those that raise exceptions
185 // on specific inputs - these are filtered out by the deopt/trap check.
189
190 // Return true if the instruction cannot be moved before the last deopt or
191 // trap point we encountered.
193 return MayNeedDeoptOrTrapCheck(instr) || instr->IsDeoptimizeCall() ||
195 }
196
197 // Identify nops used as a definition point for live-in registers at
198 // function entry.
200 return (instr->arch_opcode() == kArchNop) && (instr->OutputCount() == 1) &&
201 (instr->OutputAt(0)->IsUnallocated()) &&
202 (UnallocatedOperand::cast(instr->OutputAt(0))
203 ->HasFixedRegisterPolicy() ||
204 UnallocatedOperand::cast(instr->OutputAt(0))
205 ->HasFixedFPRegisterPolicy());
206 }
207
209
210 static int GetInstructionLatency(const Instruction* instr);
211
212 Zone* zone() { return zone_; }
217
221
223
224 // Last side effect instruction encountered while building the graph.
226
227 // Set of load instructions encountered since the last side effect instruction
228 // which will be added as predecessors of the next instruction with side
229 // effects.
231
232 // Live-in register markers are nop instructions which are emitted at the
233 // beginning of a basic block so that the register allocator will find a
234 // defining instruction for live-in values. They must not be moved.
235 // All these nops are chained together and added as a predecessor of every
236 // other instructions in the basic block.
238
239 // Last deoptimization or trap instruction encountered while building the
240 // graph.
242
243 // Keep track of definition points for virtual registers. This is used to
244 // record operand dependencies in the scheduling graph.
246
247 std::optional<base::RandomNumberGenerator> random_number_generator_;
248};
249
250} // namespace compiler
251} // namespace internal
252} // namespace v8
253
254#endif // V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
V8_EXPORT_PRIVATE InstructionScheduler(Zone *zone, InstructionSequence *sequence)
ZoneVector< ScheduleGraphNode * > pending_loads_
base::RandomNumberGenerator * random_number_generator()
std::optional< base::RandomNumberGenerator > random_number_generator_
bool IsBarrier(const Instruction *instr) const
V8_EXPORT_PRIVATE void StartBlock(RpoNumber rpo)
bool HasSideEffect(const Instruction *instr) const
bool DependsOnDeoptOrTrap(const Instruction *instr) const
V8_EXPORT_PRIVATE void AddTerminator(Instruction *instr)
bool IsLoadOperation(const Instruction *instr) const
ZoneMap< int32_t, ScheduleGraphNode * > operands_map_
bool MayNeedDeoptOrTrapCheck(const Instruction *instr) const
bool CanTrap(const Instruction *instr) const
V8_EXPORT_PRIVATE int GetInstructionFlags(const Instruction *instr) const
V8_EXPORT_PRIVATE void AddInstruction(Instruction *instr)
int GetTargetInstructionFlags(const Instruction *instr) const
static int GetInstructionLatency(const Instruction *instr)
bool IsFixedRegisterParameter(const Instruction *instr) const
V8_EXPORT_PRIVATE void EndBlock(RpoNumber rpo)
Instruction * instr
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define V8_EXPORT_PRIVATE
Definition macros.h:460