v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
code-generator.h
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_BACKEND_CODE_GENERATOR_H_
6#define V8_COMPILER_BACKEND_CODE_GENERATOR_H_
7
8#include <memory>
9#include <optional>
10
18#include "src/compiler/osr.h"
22
23#if V8_ENABLE_WEBASSEMBLY
25#endif // V8_ENABLE_WEBASSEMBLY
26
27namespace v8::internal::compiler {
28
29// Forward declarations.
30class DeoptimizationExit;
31class FrameAccessState;
32class Linkage;
33class OutOfLineCode;
34
41
43 public:
46
47 Instruction* instruction() const { return instr_; }
49
50 private:
52 size_t pos_;
53};
54
55// These structs hold pc offsets for generated instructions and is only used
56// when tracing for turbolizer is enabled.
66
72
73// Generates native code for a sequence of instructions.
75 public:
76 explicit CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
77 InstructionSequence* instructions,
78 OptimizedCompilationInfo* info, Isolate* isolate,
79 std::optional<OsrHelper> osr_helper,
80 int start_source_position,
81 JumpOptimizationInfo* jump_opt,
82 const AssemblerOptions& options, Builtin builtin,
83 size_t max_unoptimized_frame_height,
84 size_t max_pushed_argument_count,
85 const char* debug_name = nullptr);
86
87 // Generate native code. After calling AssembleCode, call FinalizeCode to
88 // produce the actual code object. If an error occurs during either phase,
89 // FinalizeCode returns an empty MaybeHandle.
90 void AssembleCode(); // Does not need to run on main thread.
91 MaybeHandle<Code> FinalizeCode();
92
93#if V8_ENABLE_WEBASSEMBLY
94 base::OwnedVector<uint8_t> GenerateWasmDeoptimizationData();
95#endif
96
97 base::OwnedVector<uint8_t> GetSourcePositionTable();
98 base::OwnedVector<uint8_t> GetProtectedInstructionsData();
99
100 InstructionSequence* instructions() const { return instructions_; }
101 FrameAccessState* frame_access_state() const { return frame_access_state_; }
102 const Frame* frame() const { return frame_access_state_->frame(); }
103 Isolate* isolate() const { return isolate_; }
104 Linkage* linkage() const { return linkage_; }
105
106 Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
107
108 void RecordProtectedInstruction(uint32_t instr_offset);
109
111 return start_source_position_;
112 }
113
114 void AssembleSourcePosition(Instruction* instr);
115 void AssembleSourcePosition(SourcePosition source_position);
116
117 // Record a safepoint with the given pointer map. When pc_offset is 0, then
118 // the current pc is used to define the safepoint. Otherwise the provided
119 // pc_offset is used.
120 void RecordSafepoint(ReferenceMap* references, int pc_offset = 0);
121
122 Zone* zone() const { return zone_; }
123 MacroAssembler* masm() { return &masm_; }
126
127 const ZoneVector<int>& block_starts() const { return block_starts_; }
129 return instr_starts_;
130 }
131
133 return offsets_info_;
134 }
135
136#if V8_ENABLE_WEBASSEMBLY
137 bool IsWasm() const { return info()->IsWasm(); }
138#endif
139
140 static constexpr int kBinarySearchSwitchMinimalCases = 4;
141
142 // Returns true if an offset should be applied to the given stack check. There
143 // are two reasons that this could happen:
144 // 1. The optimized frame is smaller than the corresponding deoptimized frames
145 // and an offset must be applied in order to be able to deopt safely.
146 // 2. The current function pushes a large number of arguments to the stack.
147 // These are not accounted for by the initial frame setup.
148 bool ShouldApplyOffsetToStackCheck(Instruction* instr, uint32_t* offset);
149 uint32_t GetStackCheckOffset();
150
151 CodeKind code_kind() const { return info_->code_kind(); }
152
153 private:
155 SafepointTableBuilder* safepoints() { return &safepoints_; }
157 OsrHelper* osr_helper() { return &(*osr_helper_); }
158
159 // Create the FrameAccessState object. The Frame is immutable from here on.
160 void CreateFrameAccessState(Frame* frame);
161
162 // Architecture - specific frame finalization.
163 void FinishFrame(Frame* frame);
164
165 // Checks if {block} will appear directly after {current_block_} when
166 // assembling code, in which case, a fall-through can be used.
167 bool IsNextInAssemblyOrder(RpoNumber block) const;
168
169 // Check if a heap object can be materialized by loading from a heap root,
170 // which is cheaper on some platforms than materializing the actual heap
171 // object constant.
172 bool IsMaterializableFromRoot(Handle<HeapObject> object,
173 RootIndex* index_return);
174
175 enum CodeGenResult { kSuccess, kTooManyDeoptimizationBailouts };
176
177 // Assemble instructions for the specified block.
178 CodeGenResult AssembleBlock(const InstructionBlock* block);
179
180 // Assemble code for the specified instruction.
181 CodeGenResult AssembleInstruction(int instruction_index,
182 const InstructionBlock* block);
183 void AssembleGaps(Instruction* instr);
184
185 // Compute branch info from given instruction. Returns a valid rpo number
186 // if the branch is redundant, the returned rpo number point to the target
187 // basic block.
188 RpoNumber ComputeBranchInfo(BranchInfo* branch, FlagsCondition condition,
190
191 // Returns true if a instruction is a tail call that needs to adjust the stack
192 // pointer before execution. The stack slot index to the empty slot above the
193 // adjusted stack pointer is returned in |slot|.
194 bool GetSlotAboveSPBeforeTailCall(Instruction* instr, int* slot);
195
196 // Determines how to call helper stubs depending on the code kind.
197 StubCallMode DetermineStubCallMode() const;
198
199 CodeGenResult AssembleDeoptimizerCall(DeoptimizationExit* exit);
200
201 DeoptimizationExit* BuildTranslation(Instruction* instr, int pc_offset,
202 size_t frame_state_offset,
203 size_t immediate_args_count,
204 OutputFrameStateCombine state_combine);
205
206 // ===========================================================================
207 // ============= Architecture-specific code generation methods. ==============
208 // ===========================================================================
209
210 CodeGenResult AssembleArchInstruction(Instruction* instr);
211 void AssembleArchJump(RpoNumber target);
212 void AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target);
213 void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
214 void AssembleArchConditionalBranch(Instruction* instr, BranchInfo* branch);
215
216 // Generates special branch for deoptimization condition.
217 void AssembleArchDeoptBranch(Instruction* instr, BranchInfo* branch);
218
219 void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
220 void AssembleArchConditionalBoolean(Instruction* instr);
221 void AssembleArchSelect(Instruction* instr, FlagsCondition condition);
222#if V8_ENABLE_WEBASSEMBLY
223 void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
224#endif // V8_ENABLE_WEBASSEMBLY
225#if V8_TARGET_ARCH_X64
226 void AssembleArchBinarySearchSwitchRange(
227 Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
228 std::pair<int32_t, Label*>* end, std::optional<int32_t>& last_cmp_value);
229#else
230 void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block,
231 std::pair<int32_t, Label*>* begin,
232 std::pair<int32_t, Label*>* end);
233#endif // V8_TARGET_ARCH_X64
234 void AssembleArchBinarySearchSwitch(Instruction* instr);
235 void AssembleArchTableSwitch(Instruction* instr);
236
237 // Generates code to check whether the {kJavaScriptCallCodeStartRegister}
238 // contains the expected pointer to the start of the instruction stream.
239 void AssembleCodeStartRegisterCheck();
240
241#ifdef V8_ENABLE_LEAPTIERING
242 // Generates code to check whether the {kJavaScriptCallDispatchHandleRegister}
243 // references a valid entry compatible with this code.
244 void AssembleDispatchHandleRegisterCheck();
245#endif // V8_ENABLE_LEAPTIERING
246
247 // When entering a code that is marked for deoptimization, rather continuing
248 // with its execution, we jump to a lazy compiled code. We need to do this
249 // because this code has already been deoptimized and needs to be unlinked
250 // from the JS functions referring it.
251 // TODO(olivf, 42204201) Rename this to AssertNotDeoptimized once
252 // non-leaptiering is removed from the codebase.
253 void BailoutIfDeoptimized();
254
255 // Assemble NOP instruction for lazy deoptimization. This place will be
256 // patched later as a jump instruction to deoptimization trampoline.
257 void AssemblePlaceHolderForLazyDeopt(Instruction* instr);
258
259 // Generates an architecture-specific, descriptor-specific prologue
260 // to set up a stack frame.
261 void AssembleConstructFrame();
262
263 // Generates an architecture-specific, descriptor-specific return sequence
264 // to tear down a stack frame.
265 void AssembleReturn(InstructionOperand* pop);
266
267 void AssembleDeconstructFrame();
268
269 // Generates code to manipulate the stack in preparation for a tail call.
270 void AssemblePrepareTailCall();
271
273 kImmediatePush = 0x1,
274 kRegisterPush = 0x2,
275 kStackSlotPush = 0x4,
276 kScalarPush = kRegisterPush | kStackSlotPush
277 };
278
280
281 static bool IsValidPush(InstructionOperand source, PushTypeFlags push_type);
282
283 // Generate a list of moves from an instruction that are candidates to be
284 // turned into push instructions on platforms that support them. In general,
285 // the list of push candidates are moves to a set of contiguous destination
286 // InstructionOperand locations on the stack that don't clobber values that
287 // are needed to resolve the gap or use values generated by the gap,
288 // i.e. moves that can be hoisted together before the actual gap and assembled
289 // together.
290 static void GetPushCompatibleMoves(Instruction* instr,
291 PushTypeFlags push_type,
293
294 class MoveType {
295 public:
304
305 // Detect what type of move or swap needs to be performed. Note that these
306 // functions do not take into account the representation (Tagged, FP,
307 // ...etc).
308
309 static Type InferMove(InstructionOperand* source,
311 static Type InferSwap(InstructionOperand* source,
313 };
314 // Called before a tail call |instr|'s gap moves are assembled and allows
315 // gap-specific pre-processing, e.g. adjustment of the sp for tail calls that
316 // need it before gap moves or conversion of certain gap moves into pushes.
317 void AssembleTailCallBeforeGap(Instruction* instr,
318 int first_unused_stack_slot);
319 // Called after a tail call |instr|'s gap moves are assembled and allows
320 // gap-specific post-processing, e.g. adjustment of the sp for tail calls that
321 // need it after gap moves.
322 void AssembleTailCallAfterGap(Instruction* instr,
323 int first_unused_stack_slot);
324
325 void FinishCode();
326 void MaybeEmitOutOfLineConstantPool();
327
328 void IncrementStackAccessCounter(InstructionOperand* source,
330
331 // ===========================================================================
332 // ============== Architecture-specific gap resolver methods. ================
333 // ===========================================================================
334
335 // Interface used by the gap resolver to emit moves and swaps.
336 void AssembleMove(InstructionOperand* source,
338 void AssembleSwap(InstructionOperand* source,
340 AllocatedOperand Push(InstructionOperand* src) final;
341 void Pop(InstructionOperand* src, MachineRepresentation rep) final;
342 void PopTempStackSlots() final;
343 void MoveToTempLocation(InstructionOperand* src,
344 MachineRepresentation rep) final;
345 void MoveTempLocationTo(InstructionOperand* dst,
346 MachineRepresentation rep) final;
347 void SetPendingMove(MoveOperands* move) final;
348
349 // ===========================================================================
350 // =================== Jump table construction methods. ======================
351 // ===========================================================================
352
353 class JumpTable;
354 // Adds a jump table that is emitted after the actual code. Returns label
355 // pointing to the beginning of the table. {targets} is assumed to be static
356 // or zone allocated.
357 Label* AddJumpTable(base::Vector<Label*> targets);
358 // Emits a jump table.
359 void AssembleJumpTable(base::Vector<Label*> targets);
360
361 // ===========================================================================
362 // ================== Deoptimization table construction. =====================
363 // ===========================================================================
364
365 void RecordCallPosition(Instruction* instr);
366 void RecordDeoptInfo(Instruction* instr, int pc_offset);
367 Handle<DeoptimizationData> GenerateDeoptimizationData();
368 int DefineProtectedDeoptimizationLiteral(
370 int DefineDeoptimizationLiteral(DeoptimizationLiteral literal);
371 bool HasProtectedDeoptimizationLiteral(
372 IndirectHandle<TrustedObject> object) const;
373 DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr,
374 size_t frame_state_offset);
375
376 void BuildTranslationForFrameStateDescriptor(
378 OutputFrameStateCombine state_combine);
379 void TranslateStateValueDescriptor(StateValueDescriptor* desc,
380 StateValueList* nested,
382 void TranslateFrameStateDescriptorOperands(FrameStateDescriptor* desc,
384 void AddTranslationForOperand(Instruction* instr, InstructionOperand* op,
385 MachineType type);
386
387 void PrepareForDeoptimizationExits(ZoneDeque<DeoptimizationExit*>* exits);
388 DeoptimizationExit* AddDeoptimizationExit(Instruction* instr,
389 size_t frame_state_offset,
390 size_t immediate_args_count);
391
392 // ===========================================================================
393
394 struct HandlerInfo {
395 // {handler} is nullptr if the Call should lazy deopt on exceptions.
398 };
399
400 friend class OutOfLineCode;
401 friend class CodeGeneratorTester;
402
419 int next_deoptimization_id_ = 0;
420 int deopt_exit_start_offset_ = 0;
421 int eager_deopt_count_ = 0;
422 int lazy_deopt_count_ = 0;
426 size_t inlined_function_count_ = 0;
429
430 // Deoptimization exits must be as small as possible, since their count grows
431 // with function size. {jump_deoptimization_entry_labels_} is an optimization
432 // to that effect, which extracts the (potentially large) instruction
433 // sequence for the final jump to the deoptimization entry into a single spot
434 // per InstructionStream object. All deopt exits can then near-call to this
435 // label. Note: not used on all architectures.
436 Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount];
437
438 // The maximal combined height of all frames produced upon deoptimization, and
439 // the maximal number of pushed arguments for function calls. Applied as an
440 // offset to the first stack check of an optimized function.
443
444 // The number of incoming parameters for code using JS linkage (i.e.
445 // JavaScript functions). Only computed during AssembleCode.
446 uint16_t parameter_count_ = 0;
447
448 // kArchCallCFunction could be reached either:
449 // kArchCallCFunction;
450 // or:
451 // kArchSaveCallerRegisters;
452 // kArchCallCFunction;
453 // kArchRestoreCallerRegisters;
454 // The boolean is used to distinguish the two cases. In the latter case, we
455 // also need to decide if FP registers need to be saved, which is controlled
456 // by fp_mode_.
459
462 std::optional<OsrHelper> osr_helper_;
465#if V8_ENABLE_WEBASSEMBLY
467#endif // V8_ENABLE_WEBASSEMBLY
473
474 const char* debug_name_ = nullptr;
475};
476
477} // namespace v8::internal::compiler
478
479#endif // V8_COMPILER_BACKEND_CODE_GENERATOR_H_
Isolate * isolate_
SourcePosition pos
OptimizedCompilationInfo *const info_
const ZoneVector< int > & block_starts() const
ZoneVector< HandlerInfo > handlers_
FrameAccessState * frame_access_state() const
SourcePositionTableBuilder source_position_table_builder_
InstructionSequence *const instructions_
SourcePosition start_source_position() const
SafepointTableBuilder * safepoints()
ZoneDeque< IndirectHandle< TrustedObject > > protected_deoptimization_literals_
ZoneDeque< DeoptimizationLiteral > deoptimization_literals_
const ZoneVector< TurbolizerInstructionStartInfo > & instr_starts() const
ZoneDeque< DeoptimizationExit * > deoptimization_exits_
InstructionSequence * instructions() const
ZoneVector< TurbolizerInstructionStartInfo > instr_starts_
std::optional< OsrHelper > osr_helper_
SafepointTableBuilder * safepoint_table_builder()
TurbolizerCodeOffsetsInfo offsets_info_
OptimizedCompilationInfo * info() const
const TurbolizerCodeOffsetsInfo & offsets_info() const
FrameTranslationBuilder translations_
InstructionOperandIterator(Instruction *instr, size_t pos)
const InstructionOperand * InputAt(size_t i) const
Zone * zone_
Handle< SharedFunctionInfo > info
int end
v8::Global< v8::Promise::Resolver > resolver_
Linkage * linkage
int32_t offset
Instruction * instr
ZoneVector< trap_handler::ProtectedInstructionData > protected_instructions_
int handler_table_offset_
int pc_offset
FunctionLiteral * literal
Definition liveedit.cc:294
Linkage const *const linkage_
MaglevAssembler *const masm_
InstructionOperand destination
constexpr int kDeoptimizeKindCount
Definition globals.h:876
OptimizedCompilationInfo * info_
Definition pipeline.cc:305
#define V8_EXPORT_PRIVATE
Definition macros.h:460