v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
jump-threading.cc
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
7
8namespace v8 {
9namespace internal {
10namespace compiler {
11
12#define TRACE(...) \
13 do { \
14 if (v8_flags.trace_turbo_jt) PrintF(__VA_ARGS__); \
15 } while (false)
16
17namespace {
18
19struct JumpThreadingState {
21 ZoneVector<RpoNumber>& result;
22 ZoneStack<RpoNumber>& stack;
23
24 void Clear(size_t count) { result.assign(count, unvisited()); }
25 void PushIfUnvisited(RpoNumber num) {
26 if (result[num.ToInt()] == unvisited()) {
27 stack.push(num);
28 result[num.ToInt()] = onstack();
29 }
30 }
31 void Forward(RpoNumber to) {
32 RpoNumber from = stack.top();
33 RpoNumber to_to = result[to.ToInt()];
34 bool pop = true;
35 if (to == from) {
36 TRACE(" xx %d\n", from.ToInt());
37 result[from.ToInt()] = from;
38 } else if (to_to == unvisited()) {
39 TRACE(" fw %d -> %d (recurse)\n", from.ToInt(), to.ToInt());
40 stack.push(to);
41 result[to.ToInt()] = onstack();
42 pop = false; // recurse.
43 } else if (to_to == onstack()) {
44 TRACE(" fw %d -> %d (cycle)\n", from.ToInt(), to.ToInt());
45 result[from.ToInt()] = to; // break the cycle.
46 forwarded = true;
47 } else {
48 TRACE(" fw %d -> %d (forward)\n", from.ToInt(), to.ToInt());
49 result[from.ToInt()] = to_to; // forward the block.
50 forwarded = true;
51 }
52 if (pop) stack.pop();
53 }
54 RpoNumber unvisited() { return RpoNumber::FromInt(-1); }
55 RpoNumber onstack() { return RpoNumber::FromInt(-2); }
56};
57
58struct GapJumpRecord {
59 explicit GapJumpRecord(Zone* zone) : zone_(zone), gap_jump_records_(zone) {}
60
61 struct Record {
62 RpoNumber block;
63 Instruction* instr;
64 };
65
66 struct RpoNumberHash {
67 std::size_t operator()(const RpoNumber& key) const {
68 return std::hash<int>()(key.ToInt());
69 }
70 };
71
72 bool CanForwardGapJump(Instruction* instr, RpoNumber instr_block,
73 RpoNumber target_block, RpoNumber* forward_to) {
74 DCHECK_EQ(instr->arch_opcode(), kArchJmp);
75 bool can_forward = false;
76 auto search = gap_jump_records_.find(target_block);
77 if (search != gap_jump_records_.end()) {
78 for (Record& record : search->second) {
79 Instruction* record_instr = record.instr;
80 DCHECK_EQ(record_instr->arch_opcode(), kArchJmp);
81 bool is_same_instr = true;
85 static_cast<Instruction::GapPosition>(i);
86 ParallelMove* record_move = record_instr->GetParallelMove(pos);
87 ParallelMove* instr_move = instr->GetParallelMove(pos);
88 if (record_move == nullptr && instr_move == nullptr) continue;
89 if (((record_move == nullptr) != (instr_move == nullptr)) ||
90 !record_move->Equals(*instr_move)) {
91 is_same_instr = false;
92 break;
93 }
94 }
95 if (is_same_instr) {
96 // Found an instruction same as the recorded one.
97 *forward_to = record.block;
98 can_forward = true;
99 break;
100 }
101 }
102 if (!can_forward) {
103 // No recorded instruction has been found for this target block,
104 // so create a new record with the given instruction.
105 search->second.push_back({instr_block, instr});
106 }
107 } else {
108 // This is the first explored gap jump to target block.
109 auto ins =
110 gap_jump_records_.insert({target_block, ZoneVector<Record>(zone_)});
111 if (ins.second) {
112 ins.first->second.reserve(4);
113 ins.first->second.push_back({instr_block, instr});
114 }
115 }
116 return can_forward;
117 }
118
120 ZoneUnorderedMap<RpoNumber, ZoneVector<Record>, RpoNumberHash>
122};
123
124} // namespace
125
129 bool frame_at_start) {
130 ZoneStack<RpoNumber> stack(local_zone);
131 JumpThreadingState state = {false, *result, stack};
132 state.Clear(code->InstructionBlockCount());
133 RpoNumber empty_deconstruct_frame_return_block = RpoNumber::Invalid();
134 int32_t empty_deconstruct_frame_return_size;
135 RpoNumber empty_no_deconstruct_frame_return_block = RpoNumber::Invalid();
136 int32_t empty_no_deconstruct_frame_return_size;
137 GapJumpRecord record(local_zone);
138
139 // Iterate over the blocks forward, pushing the blocks onto the stack.
140 for (auto const instruction_block : code->instruction_blocks()) {
141 RpoNumber current = instruction_block->rpo_number();
142 state.PushIfUnvisited(current);
143
144 // Process the stack, which implements DFS through empty blocks.
145 while (!state.stack.empty()) {
146 InstructionBlock* block = code->InstructionBlockAt(state.stack.top());
147 // Process the instructions in a block up to a non-empty instruction.
148 TRACE("jt [%d] B%d\n", static_cast<int>(stack.size()),
149 block->rpo_number().ToInt());
150 RpoNumber fw = block->rpo_number();
151 for (int i = block->code_start(); i < block->code_end(); ++i) {
152 Instruction* instr = code->InstructionAt(i);
153 if (!instr->AreMovesRedundant()) {
154 TRACE(" parallel move");
155 // can't skip instructions with non redundant moves, except when we
156 // can forward to a block with identical gap-moves.
157 if (instr->arch_opcode() == kArchJmp) {
158 TRACE(" jmp");
159 RpoNumber forward_to;
160 if ((frame_at_start || !(block->must_deconstruct_frame() ||
161 block->must_construct_frame())) &&
162 record.CanForwardGapJump(instr, block->rpo_number(),
163 code->InputRpo(instr, 0),
164 &forward_to)) {
165 DCHECK(forward_to.IsValid());
166 fw = forward_to;
167 TRACE("\n merge B%d into B%d", block->rpo_number().ToInt(),
168 forward_to.ToInt());
169 }
170 }
171 TRACE("\n");
172 } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
173 // can't skip instructions with flags continuations.
174 TRACE(" flags\n");
175 } else if (instr->IsNop()) {
176 // skip nops.
177 TRACE(" nop\n");
178 continue;
179 } else if (instr->arch_opcode() == kArchJmp) {
180 // try to forward the jump instruction.
181 TRACE(" jmp\n");
182 // if this block deconstructs the frame, we can't forward it.
183 // TODO(mtrofin): we can still forward if we end up building
184 // the frame at start. So we should move the decision of whether
185 // to build a frame or not in the register allocator, and trickle it
186 // here and to the code generator.
187 if (frame_at_start || !(block->must_deconstruct_frame() ||
188 block->must_construct_frame())) {
189 fw = code->InputRpo(instr, 0);
190 }
191 } else if (instr->IsRet()) {
192 TRACE(" ret\n");
193 CHECK_IMPLIES(block->must_construct_frame(),
194 block->must_deconstruct_frame());
195 // Only handle returns with immediate/constant operands, since
196 // they must always be the same for all returns in a function.
197 // Dynamic return values might use different registers at
198 // different return sites and therefore cannot be shared.
199 if (instr->InputAt(0)->IsImmediate()) {
200 int32_t return_size =
201 ImmediateOperand::cast(instr->InputAt(0))->inline_int32_value();
202 // Instructions can be shared only for blocks that share
203 // the same |must_deconstruct_frame| attribute.
204 if (block->must_deconstruct_frame()) {
205 if (empty_deconstruct_frame_return_block ==
207 empty_deconstruct_frame_return_block = block->rpo_number();
208 empty_deconstruct_frame_return_size = return_size;
209 } else if (empty_deconstruct_frame_return_size == return_size) {
210 fw = empty_deconstruct_frame_return_block;
211 block->clear_must_deconstruct_frame();
212 }
213 } else {
214 if (empty_no_deconstruct_frame_return_block ==
216 empty_no_deconstruct_frame_return_block = block->rpo_number();
217 empty_no_deconstruct_frame_return_size = return_size;
218 } else if (empty_no_deconstruct_frame_return_size ==
219 return_size) {
220 fw = empty_no_deconstruct_frame_return_block;
221 }
222 }
223 }
224 } else {
225 // can't skip other instructions.
226 TRACE(" other\n");
227 }
228 break;
229 }
230 state.Forward(fw);
231 }
232 }
233
234#ifdef DEBUG
235 for (RpoNumber num : *result) {
236 DCHECK(num.IsValid());
237 }
238#endif
239
240 if (v8_flags.trace_turbo_jt) {
241 for (int i = 0; i < static_cast<int>(result->size()); i++) {
242 TRACE("B%d ", i);
243 int to = (*result)[i].ToInt();
244 if (i != to) {
245 TRACE("-> B%d\n", to);
246 } else {
247 TRACE("\n");
248 }
249 }
250 }
251
252 return state.forwarded;
253}
254
257 InstructionSequence* code) {
258 if (!v8_flags.turbo_jt) return;
259
260 // Skip empty blocks except for the first block.
261 int ao = 0;
262 for (auto const block : code->ao_blocks()) {
263 RpoNumber block_rpo = block->rpo_number();
264 int block_num = block_rpo.ToInt();
265 RpoNumber result_rpo = result[block_num];
266 bool skip = block_rpo != RpoNumber::FromInt(0) && result_rpo != block_rpo;
267
268 if (result_rpo != block_rpo) {
269 // We need the handler and switch target information to be propagated, so
270 // that branch targets are annotated as necessary for control flow
271 // integrity checks (when enabled).
272 if (code->InstructionBlockAt(block_rpo)->IsHandler()) {
273 code->InstructionBlockAt(result_rpo)->MarkHandler();
274 }
275 if (code->InstructionBlockAt(block_rpo)->IsSwitchTarget()) {
276 code->InstructionBlockAt(result_rpo)->set_switch_target(true);
277 }
278 }
279
280 if (skip) {
281 for (int instr_idx = block->code_start(); instr_idx < block->code_end();
282 ++instr_idx) {
283 Instruction* instr = code->InstructionAt(instr_idx);
285 if (instr->arch_opcode() == kArchJmp ||
286 instr->arch_opcode() == kArchRet) {
287 // Overwrite a redundant jump with a nop.
288 TRACE("jt-fw nop @%d\n", instr_idx);
289 instr->OverwriteWithNop();
290 // Eliminate all the ParallelMoves.
294 static_cast<Instruction::GapPosition>(i);
295 ParallelMove* instr_move = instr->GetParallelMove(pos);
296 if (instr_move != nullptr) {
297 instr_move->Eliminate();
298 }
299 }
300 // If this block was marked as a handler or a switch target, it can be
301 // unmarked now.
302 code->InstructionBlockAt(block_rpo)->UnmarkHandler();
303 code->InstructionBlockAt(block_rpo)->set_switch_target(false);
304 code->InstructionBlockAt(block_rpo)->set_omitted_by_jump_threading();
305 }
306 }
307 }
308
309 // Renumber the blocks so that IsNextInAssemblyOrder() will return true,
310 // even if there are skipped blocks in-between.
311 block->set_ao_number(RpoNumber::FromInt(ao));
312 if (!skip) ao++;
313 }
314
315 // Patch RPO immediates.
316 InstructionSequence::RpoImmediates& rpo_immediates = code->rpo_immediates();
317 for (size_t i = 0; i < rpo_immediates.size(); i++) {
318 RpoNumber rpo = rpo_immediates[i];
319 if (rpo.IsValid()) {
320 RpoNumber fw = result[rpo.ToInt()];
321 if (fw != rpo) rpo_immediates[i] = fw;
322 }
323 }
324}
325
326#undef TRACE
327
328} // namespace compiler
329} // namespace internal
330} // namespace v8
friend Zone
Definition asm-types.cc:195
SourcePosition pos
static constexpr T decode(U value)
Definition bit-field.h:66
static bool ComputeForwarding(Zone *local_zone, ZoneVector< RpoNumber > *result, InstructionSequence *code, bool frame_at_start)
static void ApplyForwarding(Zone *local_zone, ZoneVector< RpoNumber > const &forwarding, InstructionSequence *code)
static RpoNumber FromInt(int index)
Zone * zone_
DurationRecord record
bool forwarded
Instruction * instr
ZoneVector< RpoNumber > & result
#define TRACE(...)
ZoneStack< RpoNumber > & stack
ZoneUnorderedMap< RpoNumber, ZoneVector< Record >, RpoNumberHash > gap_jump_records_
Point from
Point to
V8_EXPORT_PRIVATE FlagValues v8_flags
#define CHECK_IMPLIES(lhs, rhs)
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485