v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-code-generator.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <algorithm>
8
9#include "src/base/hashmap.h"
10#include "src/base/logging.h"
16#include "src/codegen/reglist.h"
19#include "src/common/globals.h"
25#include "src/flags/flags.h"
41
42namespace v8 {
43namespace internal {
44namespace maglev {
45
46#define __ masm()->
47
48namespace {
49
50template <typename RegisterT>
51struct RegisterTHelper;
52template <>
53struct RegisterTHelper<Register> {
54 static constexpr RegList kAllocatableRegisters =
56};
57template <>
58struct RegisterTHelper<DoubleRegister> {
59 static constexpr DoubleRegList kAllocatableRegisters =
61};
62
63enum NeedsDecompression { kDoesNotNeedDecompression, kNeedsDecompression };
64
65// The ParallelMoveResolver is used to resolve multiple moves between registers
66// and stack slots that are intended to happen, semantically, in parallel. It
67// finds chains of moves that would clobber each other, and emits them in a non
68// clobbering order; it also detects cycles of moves and breaks them by moving
69// to a temporary.
70//
71// For example, given the moves:
72//
73// r1 -> r2
74// r2 -> r3
75// r3 -> r4
76// r4 -> r1
77// r4 -> r5
78//
79// These can be represented as a move graph
80//
81// r2 → r3
82// ↑ ↓
83// r1 ← r4 → r5
84//
85// and safely emitted (breaking the cycle with a temporary) as
86//
87// r1 -> tmp
88// r4 -> r1
89// r4 -> r5
90// r3 -> r4
91// r2 -> r3
92// tmp -> r2
93//
94// It additionally keeps track of materialising moves, which don't have a stack
95// slot but rather materialise a value from, e.g., a constant. These can safely
96// be emitted at the end, once all the parallel moves are done.
97template <typename RegisterT, bool DecompressIfNeeded>
98class ParallelMoveResolver {
99 static constexpr auto kAllocatableRegistersT =
100 RegisterTHelper<RegisterT>::kAllocatableRegisters;
101 static_assert(!DecompressIfNeeded || std::is_same_v<Register, RegisterT>);
102 static_assert(!DecompressIfNeeded || COMPRESS_POINTERS_BOOL);
103
104 public:
105 explicit ParallelMoveResolver(MaglevAssembler* masm)
106 : masm_(masm), scratch_(RegisterT::no_reg()) {}
107
108 void RecordMove(ValueNode* source_node, compiler::InstructionOperand source,
109 compiler::AllocatedOperand target,
110 bool target_needs_to_be_decompressed) {
111 if (target.IsAnyRegister()) {
112 RecordMoveToRegister(source_node, source, ToRegisterT<RegisterT>(target),
113 target_needs_to_be_decompressed);
114 } else {
115 RecordMoveToStackSlot(source_node, source,
116 masm_->GetFramePointerOffsetForStackSlot(target),
117 target_needs_to_be_decompressed);
118 }
119 }
120
121 void RecordMove(ValueNode* source_node, compiler::InstructionOperand source,
122 RegisterT target_reg,
123 NeedsDecompression target_needs_to_be_decompressed) {
124 RecordMoveToRegister(source_node, source, target_reg,
125 target_needs_to_be_decompressed);
126 }
127
128 void EmitMoves(RegisterT scratch) {
129 DCHECK(!scratch_.is_valid());
130 scratch_ = scratch;
131 for (RegisterT reg : kAllocatableRegistersT) {
132 StartEmitMoveChain(reg);
133 ValueNode* materializing_register_move =
135 if (materializing_register_move) {
136 materializing_register_move->LoadToRegister(masm_, reg);
137 }
138 }
139 // Emit stack moves until the move set is empty -- each EmitMoveChain will
140 // pop entries off the moves_from_stack_slot map so we can't use a simple
141 // iteration here.
142 while (!moves_from_stack_slot_.empty()) {
143 StartEmitMoveChain(moves_from_stack_slot_.begin()->first);
144 }
145 for (auto [stack_slot, node] : materializing_stack_slot_moves_) {
146 node->LoadToRegister(masm_, scratch_);
147 __ Move(StackSlot{stack_slot}, scratch_);
148 }
149 }
150
151 ParallelMoveResolver(ParallelMoveResolver&&) = delete;
152 ParallelMoveResolver operator=(ParallelMoveResolver&&) = delete;
153 ParallelMoveResolver(const ParallelMoveResolver&) = delete;
154 ParallelMoveResolver operator=(const ParallelMoveResolver&) = delete;
155
156 private:
157 // For the GapMoveTargets::needs_decompression member when DecompressIfNeeded
158 // is false.
159 struct DummyNeedsDecompression {
160 // NOLINTNEXTLINE
161 DummyNeedsDecompression(NeedsDecompression) {}
162 };
163
164 // The targets of moves from a source, i.e. the set of outgoing edges for
165 // a node in the move graph.
166 struct GapMoveTargets {
167 base::SmallVector<int32_t, 1> stack_slots = base::SmallVector<int32_t, 1>{};
168 RegListBase<RegisterT> registers;
169
170 // We only need this field for DecompressIfNeeded, otherwise use an empty
171 // dummy value.
173 std::conditional_t<DecompressIfNeeded, NeedsDecompression,
174 DummyNeedsDecompression>
175 needs_decompression = kDoesNotNeedDecompression;
176
177 GapMoveTargets() = default;
178 GapMoveTargets(GapMoveTargets&&) V8_NOEXCEPT = default;
179 GapMoveTargets& operator=(GapMoveTargets&&) V8_NOEXCEPT = default;
180 GapMoveTargets(const GapMoveTargets&) = delete;
181 GapMoveTargets& operator=(const GapMoveTargets&) = delete;
182
183 bool is_empty() const {
184 return registers.is_empty() && stack_slots.empty();
185 }
186 };
187
188#ifdef DEBUG
189 void CheckNoExistingMoveToRegister(RegisterT target_reg) {
190 for (RegisterT reg : kAllocatableRegistersT) {
191 if (moves_from_register_[reg.code()].registers.has(target_reg)) {
192 FATAL("Existing move from %s to %s", RegisterName(reg),
193 RegisterName(target_reg));
194 }
195 }
196 for (auto& [stack_slot, targets] : moves_from_stack_slot_) {
197 if (targets.registers.has(target_reg)) {
198 FATAL("Existing move from stack slot %d to %s", stack_slot,
199 RegisterName(target_reg));
200 }
201 }
202 if (materializing_register_moves_[target_reg.code()] != nullptr) {
203 FATAL("Existing materialization of %p to %s",
204 materializing_register_moves_[target_reg.code()],
205 RegisterName(target_reg));
206 }
207 }
208
209 void CheckNoExistingMoveToStackSlot(int32_t target_slot) {
210 for (RegisterT reg : kAllocatableRegistersT) {
211 auto& stack_slots = moves_from_register_[reg.code()].stack_slots;
212 if (std::any_of(stack_slots.begin(), stack_slots.end(),
213 [&](int32_t slot) { return slot == target_slot; })) {
214 FATAL("Existing move from %s to stack slot %d", RegisterName(reg),
215 target_slot);
216 }
217 }
218 for (auto& [stack_slot, targets] : moves_from_stack_slot_) {
219 auto& stack_slots = targets.stack_slots;
220 if (std::any_of(stack_slots.begin(), stack_slots.end(),
221 [&](int32_t slot) { return slot == target_slot; })) {
222 FATAL("Existing move from stack slot %d to stack slot %d", stack_slot,
223 target_slot);
224 }
225 }
226 for (auto& [stack_slot, node] : materializing_stack_slot_moves_) {
227 if (stack_slot == target_slot) {
228 FATAL("Existing materialization of %p to stack slot %d", node,
229 stack_slot);
230 }
231 }
232 }
233#else
234 void CheckNoExistingMoveToRegister(RegisterT target_reg) {}
235 void CheckNoExistingMoveToStackSlot(int32_t target_slot) {}
236#endif
237
238 void RecordMoveToRegister(ValueNode* node,
239 compiler::InstructionOperand source,
240 RegisterT target_reg,
241 bool target_needs_to_be_decompressed) {
242 // There shouldn't have been another move to this register already.
243 CheckNoExistingMoveToRegister(target_reg);
244
245 NeedsDecompression needs_decompression = kDoesNotNeedDecompression;
246 if constexpr (DecompressIfNeeded) {
247 if (target_needs_to_be_decompressed &&
248 !node->decompresses_tagged_result()) {
249 needs_decompression = kNeedsDecompression;
250 }
251 } else {
252 DCHECK_IMPLIES(target_needs_to_be_decompressed,
253 node->decompresses_tagged_result());
254 }
255
256 GapMoveTargets* targets;
257 if (source.IsAnyRegister()) {
258 RegisterT source_reg = ToRegisterT<RegisterT>(source);
259 if (target_reg == source_reg) {
260 // We should never have a register aliasing case that needs
261 // decompression, since this path is only used by exception phis and
262 // they have no reg->reg moves.
263 DCHECK_EQ(needs_decompression, kDoesNotNeedDecompression);
264 return;
265 }
266 targets = &moves_from_register_[source_reg.code()];
267 } else if (source.IsAnyStackSlot()) {
268 int32_t source_slot = masm_->GetFramePointerOffsetForStackSlot(
270 targets = &moves_from_stack_slot_[source_slot];
271 } else {
272 DCHECK(source.IsConstant());
273 DCHECK(IsConstantNode(node->opcode()));
274 materializing_register_moves_[target_reg.code()] = node;
275 // No need to update `targets.needs_decompression`, materialization is
276 // always decompressed.
277 return;
278 }
279
280 targets->registers.set(target_reg);
281 if (needs_decompression == kNeedsDecompression) {
282 targets->needs_decompression = kNeedsDecompression;
283 }
284 }
285
286 void RecordMoveToStackSlot(ValueNode* node,
287 compiler::InstructionOperand source,
288 int32_t target_slot,
289 bool target_needs_to_be_decompressed) {
290 // There shouldn't have been another move to this stack slot already.
291 CheckNoExistingMoveToStackSlot(target_slot);
292
293 NeedsDecompression needs_decompression = kDoesNotNeedDecompression;
294 if constexpr (DecompressIfNeeded) {
295 if (target_needs_to_be_decompressed &&
296 !node->decompresses_tagged_result()) {
297 needs_decompression = kNeedsDecompression;
298 }
299 } else {
300 DCHECK_IMPLIES(target_needs_to_be_decompressed,
301 node->decompresses_tagged_result());
302 }
303
304 GapMoveTargets* targets;
305 if (source.IsAnyRegister()) {
306 RegisterT source_reg = ToRegisterT<RegisterT>(source);
307 targets = &moves_from_register_[source_reg.code()];
308 } else if (source.IsAnyStackSlot()) {
309 int32_t source_slot = masm_->GetFramePointerOffsetForStackSlot(
311 if (source_slot == target_slot &&
312 needs_decompression == kDoesNotNeedDecompression) {
313 return;
314 }
315 targets = &moves_from_stack_slot_[source_slot];
316 } else {
317 DCHECK(source.IsConstant());
318 DCHECK(IsConstantNode(node->opcode()));
319 materializing_stack_slot_moves_.emplace_back(target_slot, node);
320 // No need to update `targets.needs_decompression`, materialization is
321 // always decompressed.
322 return;
323 }
324
325 targets->stack_slots.push_back(target_slot);
326 if (needs_decompression == kNeedsDecompression) {
327 targets->needs_decompression = kNeedsDecompression;
328 }
329 }
330
331 // Finds and clears the targets for a given source. In terms of move graph,
332 // this returns and removes all outgoing edges from the source.
333 GapMoveTargets PopTargets(RegisterT source_reg) {
334 return std::exchange(moves_from_register_[source_reg.code()],
335 GapMoveTargets{});
336 }
337 GapMoveTargets PopTargets(int32_t source_slot) {
338 auto handle = moves_from_stack_slot_.extract(source_slot);
339 if (handle.empty()) return {};
340 DCHECK(!handle.mapped().is_empty());
341 return std::move(handle.mapped());
342 }
343
344 // Emit a single move chain starting at the given source (either a register or
345 // a stack slot). This is a destructive operation on the move graph, and
346 // removes the emitted edges from the graph. Subsequent calls with the same
347 // source should emit no code.
348 template <typename SourceT>
349 void StartEmitMoveChain(SourceT source) {
350 DCHECK(!scratch_has_cycle_start_);
351 GapMoveTargets targets = PopTargets(source);
352 if (targets.is_empty()) return;
353
354 // Start recursively emitting the move chain, with this source as the start
355 // of the chain.
356 bool has_cycle = RecursivelyEmitMoveChainTargets(source, targets);
357
358 // Each connected component in the move graph can only have one cycle
359 // (proof: each target can only have one incoming edge, so cycles in the
360 // graph can only have outgoing edges, so there's no way to connect two
361 // cycles). This means that if there's a cycle, the saved value must be the
362 // chain start.
363 if (has_cycle) {
364 if (!scratch_has_cycle_start_) {
365 Pop(scratch_);
367 }
368 EmitMovesFromSource(scratch_, std::move(targets));
370 __ RecordComment("-- * End of cycle");
371 } else {
372 EmitMovesFromSource(source, std::move(targets));
373 __ RecordComment("-- * Chain emitted with no cycles");
374 }
375 }
376
377 template <typename ChainStartT, typename SourceT>
378 bool ContinueEmitMoveChain(ChainStartT chain_start, SourceT source) {
379 if constexpr (std::is_same_v<ChainStartT, SourceT>) {
380 // If the recursion has returned to the start of the chain, then this must
381 // be a cycle.
382 if (chain_start == source) {
383 __ RecordComment("-- * Cycle");
384 DCHECK(!scratch_has_cycle_start_);
385 if constexpr (std::is_same_v<ChainStartT, int32_t>) {
386 __ Move(scratch_, StackSlot{chain_start});
387 } else {
388 __ Move(scratch_, chain_start);
389 }
391 return true;
392 }
393 }
394
395 GapMoveTargets targets = PopTargets(source);
396 if (targets.is_empty()) {
397 __ RecordComment("-- * End of chain");
398 return false;
399 }
400
401 bool has_cycle = RecursivelyEmitMoveChainTargets(chain_start, targets);
402
403 EmitMovesFromSource(source, std::move(targets));
404 return has_cycle;
405 }
406
407 // Calls RecursivelyEmitMoveChain for each target of a source. This is used to
408 // share target visiting code between StartEmitMoveChain and
409 // ContinueEmitMoveChain.
410 template <typename ChainStartT>
411 bool RecursivelyEmitMoveChainTargets(ChainStartT chain_start,
412 GapMoveTargets& targets) {
413 bool has_cycle = false;
414 for (auto target : targets.registers) {
415 has_cycle |= ContinueEmitMoveChain(chain_start, target);
416 }
417 for (int32_t target_slot : targets.stack_slots) {
418 has_cycle |= ContinueEmitMoveChain(chain_start, target_slot);
419 }
420 return has_cycle;
421 }
422
423 void EmitMovesFromSource(RegisterT source_reg, GapMoveTargets&& targets) {
424 DCHECK(moves_from_register_[source_reg.code()].is_empty());
425 if constexpr (DecompressIfNeeded) {
426 // The DecompressIfNeeded clause is redundant with the if-constexpr above,
427 // but otherwise this code cannot be compiled by compilers not yet
428 // implementing CWG2518.
429 static_assert(DecompressIfNeeded && COMPRESS_POINTERS_BOOL);
430
431 if (targets.needs_decompression == kNeedsDecompression) {
432 __ DecompressTagged(source_reg, source_reg);
433 }
434 }
435 for (RegisterT target_reg : targets.registers) {
436 DCHECK(moves_from_register_[target_reg.code()].is_empty());
437 __ Move(target_reg, source_reg);
438 }
439 for (int32_t target_slot : targets.stack_slots) {
440 DCHECK_EQ(moves_from_stack_slot_.find(target_slot),
442 __ Move(StackSlot{target_slot}, source_reg);
443 }
444 }
445
446 void EmitMovesFromSource(int32_t source_slot, GapMoveTargets&& targets) {
447 DCHECK_EQ(moves_from_stack_slot_.find(source_slot),
449
450 // Cache the slot value on a register.
451 RegisterT register_with_slot_value = RegisterT::no_reg();
452 if (!targets.registers.is_empty()) {
453 // If one of the targets is a register, we can move our value into it and
454 // optimize the moves from this stack slot to always be via that register.
455 register_with_slot_value = targets.registers.PopFirst();
456 } else {
457 DCHECK(!targets.stack_slots.empty());
458 // Otherwise, cache the slot value on the scratch register, clobbering it
459 // if necessary.
460 if (scratch_has_cycle_start_) {
461 Push(scratch_);
463 }
464 register_with_slot_value = scratch_;
465 }
466 // Now emit moves from that cached register instead of from the stack slot.
467 DCHECK(register_with_slot_value.is_valid());
468 DCHECK(moves_from_register_[register_with_slot_value.code()].is_empty());
469 __ Move(register_with_slot_value, StackSlot{source_slot});
470 // Decompress after the first move, subsequent moves reuse this register so
471 // they're guaranteed to be decompressed.
472 if constexpr (DecompressIfNeeded) {
473 // The DecompressIfNeeded clause is redundant with the if-constexpr above,
474 // but otherwise this code cannot be compiled by compilers not yet
475 // implementing CWG2518.
476 static_assert(DecompressIfNeeded && COMPRESS_POINTERS_BOOL);
477
478 if (targets.needs_decompression == kNeedsDecompression) {
479 __ DecompressTagged(register_with_slot_value, register_with_slot_value);
480 targets.needs_decompression = kDoesNotNeedDecompression;
481 }
482 }
483 EmitMovesFromSource(register_with_slot_value, std::move(targets));
484 }
485
486 void Push(Register reg) { __ Push(reg); }
487 void Push(DoubleRegister reg) { __ PushAll({reg}); }
488 void Pop(Register reg) { __ Pop(reg); }
489 void Pop(DoubleRegister reg) { __ PopAll({reg}); }
490
491 MaglevAssembler* masm() const { return masm_; }
492
493 MaglevAssembler* const masm_;
494 RegisterT scratch_;
495
496 // Keep moves to/from registers and stack slots separate -- there are a fixed
497 // number of registers but an infinite number of stack slots, so the register
498 // moves can be kept in a fixed size array while the stack slot moves need a
499 // map.
500
501 // moves_from_register_[source] = target.
502 std::array<GapMoveTargets, RegisterT::kNumRegisters> moves_from_register_ =
503 {};
504
505 // TODO(victorgomes): Use MaglevAssembler::StackSlot instead of int32_t.
506 // moves_from_stack_slot_[source] = target.
507 std::unordered_map<int32_t, GapMoveTargets> moves_from_stack_slot_;
508
509 // materializing_register_moves[target] = node.
510 std::array<ValueNode*, RegisterT::kNumRegisters>
512
513 // materializing_stack_slot_moves = {(node,target), ... }.
514 std::vector<std::pair<int32_t, ValueNode*>> materializing_stack_slot_moves_;
515
517};
518
519class ExceptionHandlerTrampolineBuilder {
520 public:
521 static void Build(MaglevAssembler* masm, NodeBase* node) {
522 ExceptionHandlerTrampolineBuilder builder(masm);
523 builder.EmitTrampolineFor(node);
524 }
525
526 private:
527 explicit ExceptionHandlerTrampolineBuilder(MaglevAssembler* masm)
528 : masm_(masm) {}
529
530 struct Move {
531 explicit Move(const ValueLocation& target, ValueNode* source)
532 : target(target), source(source) {}
533 const ValueLocation& target;
534 ValueNode* const source;
535 };
536 using MoveVector = base::SmallVector<Move, 16>;
537
538 void EmitTrampolineFor(NodeBase* node) {
539 DCHECK(node->properties().can_throw());
540
541 ExceptionHandlerInfo* const handler_info = node->exception_handler_info();
542 if (handler_info->ShouldLazyDeopt()) return;
543 DCHECK(handler_info->HasExceptionHandler());
544 BasicBlock* const catch_block = handler_info->catch_block();
545 LazyDeoptInfo* const deopt_info = node->lazy_deopt_info();
546
547 // The exception handler trampoline resolves moves for exception phis and
548 // then jumps to the actual catch block. There are a few points worth
549 // noting:
550 //
551 // - All source locations are assumed to be stack slots, except the
552 // accumulator which is stored in kReturnRegister0. We don't emit an
553 // explicit move for it, instead it is pushed and popped at the boundaries
554 // of the entire move sequence (necessary due to materialisation).
555 //
556 // - Some values may require materialisation, i.e. heap number construction
557 // through calls to the NewHeapNumber builtin. To avoid potential conflicts
558 // with other moves (which may happen due to stack slot reuse, i.e. a
559 // target location of move A may equal source location of move B), we
560 // materialise and push results to new temporary stack slots before the
561 // main move sequence, and then pop results into their final target
562 // locations afterwards. Note this is only safe because a) materialised
563 // values are tagged and b) the stack walk treats unknown stack slots as
564 // tagged.
565
566 const InterpretedDeoptFrame& lazy_frame =
567 deopt_info->GetFrameForExceptionHandler(handler_info);
568
569 // TODO(v8:7700): Handle inlining.
570 ParallelMoveResolver<Register, COMPRESS_POINTERS_BOOL> direct_moves(masm_);
571 MoveVector materialising_moves;
572 bool save_accumulator = false;
573 RecordMoves(lazy_frame.unit(), catch_block, lazy_frame.frame_state(),
574 &direct_moves, &materialising_moves, &save_accumulator);
575 __ BindJumpTarget(&handler_info->trampoline_entry());
576 __ RecordComment("-- Exception handler trampoline START");
577 EmitMaterialisationsAndPushResults(materialising_moves, save_accumulator);
578
579 __ RecordComment("EmitMoves");
580 MaglevAssembler::TemporaryRegisterScope temps(masm_);
581 Register scratch = temps.AcquireScratch();
582 direct_moves.EmitMoves(scratch);
583 EmitPopMaterialisedResults(materialising_moves, save_accumulator, scratch);
584 __ Jump(catch_block->label());
585 __ RecordComment("-- Exception handler trampoline END");
586 }
587
588 MaglevAssembler* masm() const { return masm_; }
589
590 void RecordMoves(
591 const MaglevCompilationUnit& unit, BasicBlock* catch_block,
592 const CompactInterpreterFrameState* register_frame,
593 ParallelMoveResolver<Register, COMPRESS_POINTERS_BOOL>* direct_moves,
594 MoveVector* materialising_moves, bool* save_accumulator) {
595 if (!catch_block->has_phi()) return;
596 for (Phi* phi : *catch_block->phis()) {
597 DCHECK(phi->is_exception_phi());
598 if (!phi->has_valid_live_range()) continue;
599
600 const ValueLocation& target = phi->result();
601 if (phi->owner() == interpreter::Register::virtual_accumulator()) {
602 // If the accumulator is live, then it is the exception object located
603 // at kReturnRegister0. We don't emit a move for it since the value is
604 // already in the right spot, but we do have to ensure it isn't
605 // clobbered by calls to the NewHeapNumber builtin during
606 // materialisation.
607 DCHECK_EQ(target.AssignedGeneralRegister(), kReturnRegister0);
608 *save_accumulator = true;
609 continue;
610 }
611
612 ValueNode* source = register_frame->GetValueOf(phi->owner(), unit);
613 DCHECK_NOT_NULL(source);
614 if (VirtualObject* vobj = source->TryCast<VirtualObject>()) {
615 DCHECK(vobj->allocation()->HasEscaped());
616 source = vobj->allocation();
617 }
618 // All registers must have been spilled due to the call.
619 // TODO(jgruber): Which call? Because any throw requires at least a call
620 // to Runtime::kThrowFoo?
621 DCHECK(!source->allocation().IsRegister());
622
623 switch (source->properties().value_representation()) {
625 direct_moves->RecordMove(
626 source, source->allocation(),
627 compiler::AllocatedOperand::cast(target.operand()),
628 phi->decompresses_tagged_result() ? kNeedsDecompression
629 : kDoesNotNeedDecompression);
630 break;
634 materialising_moves->emplace_back(target, source);
635 break;
638 materialising_moves->emplace_back(target, source);
639 break;
640 UNREACHABLE();
641 }
642 }
643 }
644
645 void EmitMaterialisationsAndPushResults(const MoveVector& moves,
646 bool save_accumulator) const {
647 if (moves.empty()) return;
648
649 // It's possible to optimize this further, at the cost of additional
650 // complexity:
651 //
652 // - If the target location is a register, we could theoretically move the
653 // materialised result there immediately, with the additional complication
654 // that following calls to NewHeapNumber may clobber the register.
655 //
656 // - If the target location is a stack slot which is neither a source nor
657 // target slot for any other moves (direct or materialising), we could move
658 // the result there directly instead of pushing and later popping it. This
659 // doesn't seem worth the extra code complexity though, given we are
660 // talking about a presumably infrequent case for exception handlers.
661
662 __ RecordComment("EmitMaterialisationsAndPushResults");
663
664 if (save_accumulator) __ Push(kReturnRegister0);
665
666#ifdef DEBUG
667 // Allow calls in these materialisations.
668 __ set_allow_call(true);
669#endif
670 for (const Move& move : moves) {
671 // We consider constants after all other operations, since constants
672 // don't need to call NewHeapNumber.
673 if (IsConstantNode(move.source->opcode())) continue;
674 __ MaterialiseValueNode(kReturnRegister0, move.source);
675 __ Push(kReturnRegister0);
676 }
677#ifdef DEBUG
678 __ set_allow_call(false);
679#endif
680 }
681
682 void EmitPopMaterialisedResults(const MoveVector& moves,
683 bool save_accumulator,
684 Register scratch) const {
685 if (moves.empty()) return;
686 __ RecordComment("EmitPopMaterialisedResults");
687 for (const Move& move : base::Reversed(moves)) {
688 const ValueLocation& target = move.target;
689 Register target_reg = target.operand().IsAnyRegister()
690 ? target.AssignedGeneralRegister()
691 : scratch;
692 if (IsConstantNode(move.source->opcode())) {
693 __ MaterialiseValueNode(target_reg, move.source);
694 } else {
695 __ Pop(target_reg);
696 }
697 if (target_reg == scratch) {
698 __ Move(masm_->ToMemOperand(target.operand()), scratch);
699 }
700 }
701 if (save_accumulator) __ Pop(kReturnRegister0);
702 }
703
704 MaglevAssembler* const masm_;
705};
706
707class MaglevCodeGeneratingNodeProcessor {
708 public:
709 MaglevCodeGeneratingNodeProcessor(MaglevAssembler* masm, Zone* zone)
710 : masm_(masm), zone_(zone) {}
711
712 void PreProcessGraph(Graph* graph) {
713 // TODO(victorgomes): I wonder if we want to create a struct that shares
714 // these fields between graph and code_gen_state.
715 code_gen_state()->set_untagged_slots(graph->untagged_stack_slots());
716 code_gen_state()->set_tagged_slots(graph->tagged_stack_slots());
717 code_gen_state()->set_max_deopted_stack_size(
718 graph->max_deopted_stack_size());
719 code_gen_state()->set_max_call_stack_args_(graph->max_call_stack_args());
720
721 if (v8_flags.maglev_break_on_entry) {
722 __ DebugBreak();
723 }
724
725 if (graph->is_osr()) {
726 __ OSRPrologue(graph);
727 } else {
728 __ Prologue(graph);
729 }
730
731 // "Deferred" computation has to be done before block removal, because
732 // block removal doesn't propagate deferredness of removed blocks.
733 int deferred_count = ComputeDeferred(graph);
734
735 // If we deferred the first block, un-defer it. This can happen because we
736 // defer a block if all its successors are deferred (i.e., lead to an
737 // unconditional deopt). E.g., if we only executed exception throwing code
738 // paths, the non-exception code paths might be untaken, and thus contain
739 // unconditional deopts, so we end up deferring all non-exception code
740 // paths, including the first block.
741 if (graph->blocks()[0]->is_deferred()) {
742 graph->blocks()[0]->set_deferred(false);
743 --deferred_count;
744 }
745
746 // Reorder the blocks so that dererred blocks are at the end.
747 int non_deferred_count = graph->num_blocks() - deferred_count;
748
749 ZoneVector<BasicBlock*> new_blocks(graph->num_blocks(), zone_);
750
751 size_t ix_non_deferred = 0;
752 size_t ix_deferred = non_deferred_count;
753 for (auto block_it = graph->begin(); block_it != graph->end(); ++block_it) {
754 BasicBlock* block = *block_it;
755 if (block->is_deferred()) {
756 new_blocks[ix_deferred++] = block;
757 } else {
758 new_blocks[ix_non_deferred++] = block;
759 }
760 }
761 CHECK_EQ(ix_deferred, graph->num_blocks());
762 CHECK_EQ(ix_non_deferred, non_deferred_count);
763 graph->set_blocks(new_blocks);
764
765 // Remove empty blocks.
766 ZoneVector<BasicBlock*>& blocks = graph->blocks();
767 size_t current_ix = 0;
768 for (size_t i = 0; i < blocks.size(); ++i) {
769 BasicBlock* block = blocks[i];
770 if (code_gen_state()->RealJumpTarget(block) == block) {
771 // This block cannot be replaced.
772 blocks[current_ix++] = block;
773 }
774 }
775 blocks.resize(current_ix);
776 }
777
778 void PostProcessGraph(Graph* graph) {}
779 void PostProcessBasicBlock(BasicBlock* block) {}
780 void PostPhiProcessing() {}
781
782 BlockProcessResult PreProcessBasicBlock(BasicBlock* block) {
783 if (block->is_loop()) {
784 __ LoopHeaderAlign();
785 }
786 if (v8_flags.code_comments) {
787 std::stringstream ss;
788 ss << "-- Block b" << block->id();
789 __ RecordComment(ss.str());
790 }
791 __ BindBlock(block);
793 }
794
795 template <typename NodeT>
796 ProcessResult Process(NodeT* node, const ProcessingState& state) {
797 if (v8_flags.code_comments) {
798 std::stringstream ss;
799 ss << "-- " << graph_labeller()->NodeId(node) << ": "
800 << PrintNode(graph_labeller(), node);
801 __ RecordComment(ss.str());
802 }
803
804 if (v8_flags.maglev_assert_stack_size) {
805 __ AssertStackSizeCorrect();
806 }
807
808 PatchJumps(node);
809
810 // Emit Phi moves before visiting the control node.
811 if (std::is_base_of_v<UnconditionalControlNode, NodeT>) {
812 EmitBlockEndGapMoves(node->template Cast<UnconditionalControlNode>(),
813 state);
814 }
815
816 if (v8_flags.slow_debug_code && !std::is_same_v<NodeT, Phi>) {
817 // Check that all int32/uint32 inputs are zero extended.
818 // Note that we don't do this for Phis, since they are virtual operations
819 // whose inputs aren't actual inputs but are injected on incoming
820 // branches. There's thus nothing to verify for the inputs we see for the
821 // phi.
822 for (Input& input : *node) {
824 input.node()->properties().value_representation();
826 // TODO(leszeks): Ideally we'd check non-register inputs too, but
827 // AssertZeroExtended needs the scratch register, so we'd have to do
828 // some manual push/pop here to free up another register.
829 if (input.IsGeneralRegister()) {
830 __ AssertZeroExtended(ToRegister(input));
831 }
832 }
833 }
834 }
835
836 MaglevAssembler::TemporaryRegisterScope scratch_scope(masm());
837 scratch_scope.Include(node->general_temporaries());
838 scratch_scope.IncludeDouble(node->double_temporaries());
839
840#ifdef DEBUG
841 masm()->set_allow_allocate(node->properties().can_allocate());
842 masm()->set_allow_call(node->properties().is_call());
843 masm()->set_allow_deferred_call(node->properties().is_deferred_call());
844#endif
845
846 node->GenerateCode(masm(), state);
847
848#ifdef DEBUG
849 masm()->set_allow_allocate(false);
850 masm()->set_allow_call(false);
851 masm()->set_allow_deferred_call(false);
852#endif
853
854 if (std::is_base_of_v<ValueNode, NodeT>) {
855 ValueNode* value_node = node->template Cast<ValueNode>();
856 if (value_node->has_valid_live_range() && value_node->is_spilled()) {
857 compiler::AllocatedOperand source =
858 compiler::AllocatedOperand::cast(value_node->result().operand());
859 // We shouldn't spill nodes which already output to the stack.
860 if (!source.IsAnyStackSlot()) {
861 if (v8_flags.code_comments) __ RecordComment("-- Spill:");
862 if (source.IsRegister()) {
863 __ Move(masm()->GetStackSlot(value_node->spill_slot()),
864 ToRegister(source));
865 } else {
866 __ StoreFloat64(masm()->GetStackSlot(value_node->spill_slot()),
867 ToDoubleRegister(source));
868 }
869 } else {
870 // Otherwise, the result source stack slot should be equal to the
871 // spill slot.
872 DCHECK_EQ(source.index(), value_node->spill_slot().index());
873 }
874 }
875 }
877 }
878
879 void EmitBlockEndGapMoves(UnconditionalControlNode* node,
880 const ProcessingState& state) {
881 BasicBlock* target = node->target();
882 if (!target->has_state()) {
883 __ RecordComment("-- Target has no state, must be a fallthrough");
884 return;
885 }
886
887 int predecessor_id = state.block()->predecessor_id();
888
889 MaglevAssembler::TemporaryRegisterScope temps(masm_);
890 Register scratch = temps.AcquireScratch();
891 DoubleRegister double_scratch = temps.AcquireScratchDouble();
892
893 // TODO(leszeks): Move these to fields, to allow their data structure
894 // allocations to be reused. Will need some sort of state resetting.
895 ParallelMoveResolver<Register, false> register_moves(masm_);
896 ParallelMoveResolver<DoubleRegister, false> double_register_moves(masm_);
897
898 // Remember what registers were assigned to by a Phi, to avoid clobbering
899 // them with RegisterMoves.
900 RegList registers_set_by_phis;
901 DoubleRegList double_registers_set_by_phis;
902
903 __ RecordComment("-- Gap moves:");
904
905 if (target->has_phi()) {
906 Phi::List* phis = target->phis();
907 for (Phi* phi : *phis) {
908 // Ignore dead phis.
909 // TODO(leszeks): We should remove dead phis entirely and turn this into
910 // a DCHECK.
911 if (!phi->has_valid_live_range()) {
912 if (v8_flags.code_comments) {
913 std::stringstream ss;
914 ss << "-- * "
915 << phi->input(state.block()->predecessor_id()).operand() << " → "
916 << target << " (n" << graph_labeller()->NodeId(phi)
917 << ") [DEAD]";
918 __ RecordComment(ss.str());
919 }
920 continue;
921 }
922 Input& input = phi->input(state.block()->predecessor_id());
923 ValueNode* input_node = input.node();
924 compiler::InstructionOperand source = input.operand();
925 compiler::AllocatedOperand target_operand =
926 compiler::AllocatedOperand::cast(phi->result().operand());
927 if (v8_flags.code_comments) {
928 std::stringstream ss;
929 ss << "-- * " << source << " → " << target << " (n"
930 << graph_labeller()->NodeId(phi) << ")";
931 __ RecordComment(ss.str());
932 }
933 if (phi->use_double_register()) {
934 DCHECK(!phi->decompresses_tagged_result());
935 double_register_moves.RecordMove(input_node, source, target_operand,
936 false);
937 } else {
938 register_moves.RecordMove(input_node, source, target_operand,
939 kDoesNotNeedDecompression);
940 }
941 if (target_operand.IsAnyRegister()) {
942 if (phi->use_double_register()) {
943 double_registers_set_by_phis.set(
944 target_operand.GetDoubleRegister());
945 } else {
946 registers_set_by_phis.set(target_operand.GetRegister());
947 }
948 }
949 }
950 }
951
952 target->state()->register_state().ForEachGeneralRegister(
953 [&](Register reg, RegisterState& state) {
954 // Don't clobber registers set by a Phi.
955 if (registers_set_by_phis.has(reg)) return;
956
957 ValueNode* node;
958 RegisterMerge* merge;
959 if (LoadMergeState(state, &node, &merge)) {
960 compiler::InstructionOperand source =
961 merge->operand(predecessor_id);
962 if (v8_flags.code_comments) {
963 std::stringstream ss;
964 ss << "-- * " << source << " → " << reg;
965 __ RecordComment(ss.str());
966 }
967 register_moves.RecordMove(node, source, reg,
968 kDoesNotNeedDecompression);
969 }
970 });
971
972 register_moves.EmitMoves(scratch);
973
974 __ RecordComment("-- Double gap moves:");
975
976 target->state()->register_state().ForEachDoubleRegister(
977 [&](DoubleRegister reg, RegisterState& state) {
978 // Don't clobber registers set by a Phi.
979 if (double_registers_set_by_phis.has(reg)) return;
980
981 ValueNode* node;
982 RegisterMerge* merge;
983 if (LoadMergeState(state, &node, &merge)) {
984 compiler::InstructionOperand source =
985 merge->operand(predecessor_id);
986 if (v8_flags.code_comments) {
987 std::stringstream ss;
988 ss << "-- * " << source << " → " << reg;
989 __ RecordComment(ss.str());
990 }
991 double_register_moves.RecordMove(node, source, reg,
992 kDoesNotNeedDecompression);
993 }
994 });
995
996 double_register_moves.EmitMoves(double_scratch);
997 }
998
999 Isolate* isolate() const { return masm_->isolate(); }
1000 MaglevAssembler* masm() const { return masm_; }
1001 MaglevCodeGenState* code_gen_state() const {
1002 return masm()->code_gen_state();
1003 }
1004 MaglevGraphLabeller* graph_labeller() const {
1005 return code_gen_state()->graph_labeller();
1006 }
1007
1008 private:
1009 // Jump threading: instead of jumping to an empty block A which just
1010 // unconditionally jumps to B, redirect the jump to B directly.
1011 template <typename NodeT>
1012 void PatchJumps(NodeT* node) {
1014 UnconditionalControlNode* control_node =
1015 node->template Cast<UnconditionalControlNode>();
1016 control_node->set_target(
1017 code_gen_state()->RealJumpTarget(control_node->target()));
1018 } else if constexpr (IsBranchControlNode(Node::opcode_of<NodeT>)) {
1019 BranchControlNode* control_node =
1020 node->template Cast<BranchControlNode>();
1021 control_node->set_if_true(
1022 code_gen_state()->RealJumpTarget(control_node->if_true()));
1023 control_node->set_if_false(
1024 code_gen_state()->RealJumpTarget(control_node->if_false()));
1025 } else if constexpr (Node::opcode_of<NodeT> == Opcode::kSwitch) {
1026 Switch* switch_node = node->template Cast<Switch>();
1027 BasicBlockRef* targets = switch_node->targets();
1028 for (int i = 0; i < switch_node->size(); ++i) {
1029 targets[i].set_block_ptr(
1030 code_gen_state()->RealJumpTarget(targets[i].block_ptr()));
1031 }
1032 if (switch_node->has_fallthrough()) {
1033 switch_node->set_fallthrough(
1034 code_gen_state()->RealJumpTarget(switch_node->fallthrough()));
1035 }
1036 }
1037 }
1038
1039 int ComputeDeferred(Graph* graph) {
1040 int deferred_count = 0;
1041 // Propagate deferredness: If a block is deferred, defer all its successors,
1042 // except if a successor has another predecessor which is not deferred.
1043
1044 // In addition, if all successors of a block are deferred, defer it too.
1045
1046 // Work queue is a queue of blocks which are deferred, so we'll need to
1047 // check whether to defer their successors and predecessors.
1048 SmallZoneVector<BasicBlock*, 32> work_queue(zone_);
1049 for (auto block_it = graph->begin(); block_it != graph->end(); ++block_it) {
1050 BasicBlock* block = *block_it;
1051 if (block->is_deferred()) {
1052 ++deferred_count;
1053 work_queue.emplace_back(block);
1054 }
1055 }
1056
1057 // The algorithm below is O(N * e^2) where e is the maximum number of
1058 // predecessors / successors. We check whether we should defer a block at
1059 // most e times. When doing the check, we check each predecessor / successor
1060 // once.
1061 while (!work_queue.empty()) {
1062 BasicBlock* block = work_queue.back();
1063 work_queue.pop_back();
1064 DCHECK(block->is_deferred());
1065
1066 // Check if we should defer any successor.
1067 block->ForEachSuccessor([&work_queue,
1068 &deferred_count](BasicBlock* successor) {
1069 if (successor->is_deferred()) {
1070 return;
1071 }
1072 bool should_defer = true;
1073 successor->ForEachPredecessor([&should_defer](BasicBlock* predecessor) {
1074 if (!predecessor->is_deferred()) {
1075 should_defer = false;
1076 }
1077 });
1078 if (should_defer) {
1079 ++deferred_count;
1080 work_queue.emplace_back(successor);
1081 successor->set_deferred(true);
1082 }
1083 });
1084
1085 // Check if we should defer any predecessor.
1086 block->ForEachPredecessor([&work_queue,
1087 &deferred_count](BasicBlock* predecessor) {
1088 if (predecessor->is_deferred()) {
1089 return;
1090 }
1091 bool should_defer = true;
1092 predecessor->ForEachSuccessor([&should_defer](BasicBlock* successor) {
1093 if (!successor->is_deferred()) {
1094 should_defer = false;
1095 }
1096 });
1097 if (should_defer) {
1098 ++deferred_count;
1099 work_queue.emplace_back(predecessor);
1100 predecessor->set_deferred(true);
1101 }
1102 });
1103 }
1104 return deferred_count;
1105 }
1106 MaglevAssembler* const masm_;
1108};
1109
1110class SafepointingNodeProcessor {
1111 public:
1112 explicit SafepointingNodeProcessor(LocalIsolate* local_isolate)
1113 : local_isolate_(local_isolate) {}
1114
1115 void PreProcessGraph(Graph* graph) {}
1116 void PostProcessGraph(Graph* graph) {}
1117 void PostProcessBasicBlock(BasicBlock* block) {}
1118 BlockProcessResult PreProcessBasicBlock(BasicBlock* block) {
1120 }
1121 void PostPhiProcessing() {}
1122 ProcessResult Process(NodeBase* node, const ProcessingState& state) {
1123 local_isolate_->heap()->Safepoint();
1125 }
1126
1127 private:
1128 LocalIsolate* local_isolate_;
1129};
1130
1131namespace {
1132DeoptimizationFrameTranslation::FrameCount GetFrameCount(
1133 const DeoptFrame* deopt_frame) {
1134 int total = 0;
1135 int js_frame = 0;
1136 do {
1137 if (deopt_frame->IsJsFrame()) {
1138 js_frame++;
1139 }
1140 total++;
1141 deopt_frame = deopt_frame->parent();
1142 } while (deopt_frame);
1143 return {total, js_frame};
1144}
1145} // namespace
1146
1147class MaglevFrameTranslationBuilder {
1148 public:
1149 MaglevFrameTranslationBuilder(
1150 LocalIsolate* local_isolate, MaglevAssembler* masm,
1151 FrameTranslationBuilder* translation_array_builder,
1152 IdentityMap<int, base::DefaultAllocationPolicy>* protected_deopt_literals,
1153 IdentityMap<int, base::DefaultAllocationPolicy>* deopt_literals)
1154 : local_isolate_(local_isolate),
1155 masm_(masm),
1156 translation_array_builder_(translation_array_builder),
1157 protected_deopt_literals_(protected_deopt_literals),
1158 deopt_literals_(deopt_literals),
1159 object_ids_(10) {}
1160
1161 void BuildEagerDeopt(EagerDeoptInfo* deopt_info) {
1162 BuildBeginDeopt(deopt_info);
1163
1164 const InputLocation* current_input_location = deopt_info->input_locations();
1165 const VirtualObjectList& virtual_objects =
1166 deopt_info->top_frame().GetVirtualObjects();
1167 RecursiveBuildDeoptFrame(deopt_info->top_frame(), current_input_location,
1168 virtual_objects);
1169 }
1170
1171 void BuildLazyDeopt(LazyDeoptInfo* deopt_info) {
1172 BuildBeginDeopt(deopt_info);
1173
1174 const InputLocation* current_input_location = deopt_info->input_locations();
1175 const VirtualObjectList& virtual_objects =
1176 deopt_info->top_frame().GetVirtualObjects();
1177
1178 if (deopt_info->top_frame().parent()) {
1179 // Deopt input locations are in the order of deopt frame emission, so
1180 // update the pointer after emitting the parent frame.
1181 RecursiveBuildDeoptFrame(*deopt_info->top_frame().parent(),
1182 current_input_location, virtual_objects);
1183 }
1184
1185 const DeoptFrame& top_frame = deopt_info->top_frame();
1186 switch (top_frame.type()) {
1188 return BuildSingleDeoptFrame(
1189 top_frame.as_interpreted(), current_input_location, virtual_objects,
1190 deopt_info->result_location(), deopt_info->result_size());
1192 // The inlined arguments frame can never be the top frame.
1193 UNREACHABLE();
1195 return BuildSingleDeoptFrame(top_frame.as_construct_stub(),
1196 current_input_location, virtual_objects);
1198 return BuildSingleDeoptFrame(top_frame.as_builtin_continuation(),
1199 current_input_location, virtual_objects);
1200 }
1201 }
1202
1203 private:
1204 constexpr int DeoptStackSlotIndexFromFPOffset(int offset) {
1205 return 1 - offset / kSystemPointerSize;
1206 }
1207
1208 int DeoptStackSlotFromStackSlot(const compiler::AllocatedOperand& operand) {
1209 return DeoptStackSlotIndexFromFPOffset(
1210 masm_->GetFramePointerOffsetForStackSlot(operand));
1211 }
1212
1213 void BuildBeginDeopt(DeoptInfo* deopt_info) {
1214 object_ids_.clear();
1215 auto [frame_count, jsframe_count] = GetFrameCount(&deopt_info->top_frame());
1216 deopt_info->set_translation_index(
1217 translation_array_builder_->BeginTranslation(
1218 frame_count, jsframe_count,
1219 deopt_info->feedback_to_update().IsValid()));
1220 if (deopt_info->feedback_to_update().IsValid()) {
1221 translation_array_builder_->AddUpdateFeedback(
1222 GetDeoptLiteral(*deopt_info->feedback_to_update().vector),
1223 deopt_info->feedback_to_update().index());
1224 }
1225 }
1226
1227 void RecursiveBuildDeoptFrame(const DeoptFrame& frame,
1228 const InputLocation*& current_input_location,
1229 const VirtualObjectList& virtual_objects) {
1230 if (frame.parent()) {
1231 // Deopt input locations are in the order of deopt frame emission, so
1232 // update the pointer after emitting the parent frame.
1233 RecursiveBuildDeoptFrame(*frame.parent(), current_input_location,
1234 virtual_objects);
1235 }
1236
1237 switch (frame.type()) {
1239 return BuildSingleDeoptFrame(frame.as_interpreted(),
1240 current_input_location, virtual_objects);
1242 return BuildSingleDeoptFrame(frame.as_inlined_arguments(),
1243 current_input_location, virtual_objects);
1245 return BuildSingleDeoptFrame(frame.as_construct_stub(),
1246 current_input_location, virtual_objects);
1248 return BuildSingleDeoptFrame(frame.as_builtin_continuation(),
1249 current_input_location, virtual_objects);
1250 }
1251 }
1252
1253 void BuildSingleDeoptFrame(const InterpretedDeoptFrame& frame,
1254 const InputLocation*& current_input_location,
1255 const VirtualObjectList& virtual_objects,
1256 interpreter::Register result_location,
1257 int result_size) {
1258 int return_offset = frame.ComputeReturnOffset(result_location, result_size);
1259 translation_array_builder_->BeginInterpretedFrame(
1260 frame.bytecode_position(),
1261 GetDeoptLiteral(frame.GetSharedFunctionInfo()),
1262 GetProtectedDeoptLiteral(*frame.GetBytecodeArray().object()),
1263 frame.unit().register_count(), return_offset, result_size);
1264
1265 BuildDeoptFrameValues(frame.unit(), frame.frame_state(), frame.closure(),
1266 current_input_location, virtual_objects,
1267 result_location, result_size);
1268 }
1269
1270 void BuildSingleDeoptFrame(const InterpretedDeoptFrame& frame,
1271 const InputLocation*& current_input_location,
1272 const VirtualObjectList& virtual_objects) {
1273 // Returns offset/count is used for updating an accumulator or register
1274 // after a lazy deopt -- this function is overloaded to allow them to be
1275 // passed in.
1276 const int return_offset = 0;
1277 const int return_count = 0;
1278 translation_array_builder_->BeginInterpretedFrame(
1279 frame.bytecode_position(),
1280 GetDeoptLiteral(frame.GetSharedFunctionInfo()),
1281 GetProtectedDeoptLiteral(*frame.GetBytecodeArray().object()),
1282 frame.unit().register_count(), return_offset, return_count);
1283
1284 BuildDeoptFrameValues(frame.unit(), frame.frame_state(), frame.closure(),
1285 current_input_location, virtual_objects,
1286 interpreter::Register::invalid_value(), return_count);
1287 }
1288
1289 void BuildSingleDeoptFrame(const InlinedArgumentsDeoptFrame& frame,
1290 const InputLocation*& current_input_location,
1291 const VirtualObjectList& virtual_objects) {
1292 translation_array_builder_->BeginInlinedExtraArguments(
1293 GetDeoptLiteral(frame.GetSharedFunctionInfo()),
1294 static_cast<uint32_t>(frame.arguments().size()),
1295 frame.GetBytecodeArray().parameter_count());
1296
1297 // Closure
1298 BuildDeoptFrameSingleValue(frame.closure(), current_input_location,
1299 virtual_objects);
1300
1301 // Arguments
1302 // TODO(victorgomes): Technically we don't need all arguments, only the
1303 // extra ones. But doing this at the moment, since it matches the
1304 // TurboFan behaviour.
1305 for (ValueNode* value : frame.arguments()) {
1306 BuildDeoptFrameSingleValue(value, current_input_location,
1307 virtual_objects);
1308 }
1309 }
1310
1311 void BuildSingleDeoptFrame(const ConstructInvokeStubDeoptFrame& frame,
1312 const InputLocation*& current_input_location,
1313 const VirtualObjectList& virtual_objects) {
1314 translation_array_builder_->BeginConstructInvokeStubFrame(
1315 GetDeoptLiteral(frame.GetSharedFunctionInfo()));
1316
1317 // Implicit receiver
1318 BuildDeoptFrameSingleValue(frame.receiver(), current_input_location,
1319 virtual_objects);
1320
1321 // Context
1322 BuildDeoptFrameSingleValue(frame.context(), current_input_location,
1323 virtual_objects);
1324 }
1325
1326 void BuildSingleDeoptFrame(const BuiltinContinuationDeoptFrame& frame,
1327 const InputLocation*& current_input_location,
1328 const VirtualObjectList& virtual_objects) {
1329 BytecodeOffset bailout_id =
1330 Builtins::GetContinuationBytecodeOffset(frame.builtin_id());
1331 int literal_id = GetDeoptLiteral(frame.GetSharedFunctionInfo());
1332 int height = frame.parameters().length();
1333
1334 constexpr int kExtraFixedJSFrameParameters =
1336 if (frame.is_javascript()) {
1337 translation_array_builder_->BeginJavaScriptBuiltinContinuationFrame(
1338 bailout_id, literal_id, height + kExtraFixedJSFrameParameters);
1339 } else {
1340 translation_array_builder_->BeginBuiltinContinuationFrame(
1341 bailout_id, literal_id, height);
1342 }
1343
1344 // Closure
1345 if (frame.is_javascript()) {
1346 translation_array_builder_->StoreLiteral(
1347 GetDeoptLiteral(frame.javascript_target()));
1348 } else {
1349 translation_array_builder_->StoreOptimizedOut();
1350 }
1351
1352 // Parameters
1353 for (ValueNode* value : frame.parameters()) {
1354 BuildDeoptFrameSingleValue(value, current_input_location,
1355 virtual_objects);
1356 }
1357
1358 // Extra fixed JS frame parameters. These at the end since JS builtins
1359 // push their parameters in reverse order.
1360 if (frame.is_javascript()) {
1363 kExtraFixedJSFrameParameters);
1364 static_assert(kExtraFixedJSFrameParameters ==
1366 // kJavaScriptCallTargetRegister
1367 translation_array_builder_->StoreLiteral(
1368 GetDeoptLiteral(frame.javascript_target()));
1369 // kJavaScriptCallNewTargetRegister
1370 translation_array_builder_->StoreLiteral(
1371 GetDeoptLiteral(ReadOnlyRoots(local_isolate_).undefined_value()));
1372 // kJavaScriptCallArgCountRegister
1373 translation_array_builder_->StoreLiteral(GetDeoptLiteral(
1374 Smi::FromInt(Builtins::GetStackParameterCount(frame.builtin_id()))));
1375#ifdef V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE
1376 // kJavaScriptCallDispatchHandleRegister
1377 translation_array_builder_->StoreLiteral(
1378 GetDeoptLiteral(Smi::FromInt(kInvalidDispatchHandle.value())));
1379#endif
1380 }
1381
1382 // Context
1383 ValueNode* value = frame.context();
1384 BuildDeoptFrameSingleValue(value, current_input_location, virtual_objects);
1385 }
1386
1387 void BuildDeoptStoreRegister(const compiler::AllocatedOperand& operand,
1388 ValueRepresentation repr) {
1389 switch (repr) {
1391 translation_array_builder_->StoreIntPtrRegister(operand.GetRegister());
1392 break;
1394 translation_array_builder_->StoreRegister(operand.GetRegister());
1395 break;
1397 translation_array_builder_->StoreInt32Register(operand.GetRegister());
1398 break;
1400 translation_array_builder_->StoreUint32Register(operand.GetRegister());
1401 break;
1403 translation_array_builder_->StoreDoubleRegister(
1404 operand.GetDoubleRegister());
1405 break;
1407 translation_array_builder_->StoreHoleyDoubleRegister(
1408 operand.GetDoubleRegister());
1409 break;
1410 }
1411 }
1412
1413 void BuildDeoptStoreStackSlot(const compiler::AllocatedOperand& operand,
1414 ValueRepresentation repr) {
1415 int stack_slot = DeoptStackSlotFromStackSlot(operand);
1416 switch (repr) {
1418 translation_array_builder_->StoreIntPtrStackSlot(stack_slot);
1419 break;
1421 translation_array_builder_->StoreStackSlot(stack_slot);
1422 break;
1424 translation_array_builder_->StoreInt32StackSlot(stack_slot);
1425 break;
1427 translation_array_builder_->StoreUint32StackSlot(stack_slot);
1428 break;
1430 translation_array_builder_->StoreDoubleStackSlot(stack_slot);
1431 break;
1433 translation_array_builder_->StoreHoleyDoubleStackSlot(stack_slot);
1434 break;
1435 }
1436 }
1437
1438 int GetDuplicatedId(intptr_t id) {
1439 for (int idx = 0; idx < static_cast<int>(object_ids_.size()); idx++) {
1440 if (object_ids_[idx] == id) {
1441 // Although this is not technically necessary, the translated state
1442 // machinery assign ids to duplicates, so we need to push something to
1443 // get fresh ids.
1444 object_ids_.push_back(id);
1445 return idx;
1446 }
1447 }
1448 object_ids_.push_back(id);
1449 return kNotDuplicated;
1450 }
1451
1452 void BuildHeapNumber(Float64 number) {
1453 DirectHandle<Object> value =
1454 local_isolate_->factory()->NewHeapNumberFromBits<AllocationType::kOld>(
1455 number.get_bits());
1456 translation_array_builder_->StoreLiteral(GetDeoptLiteral(*value));
1457 }
1458
1459 void BuildConsString(const VirtualObject* object,
1460 const InputLocation*& input_location,
1461 const VirtualObjectList& virtual_objects) {
1462 auto cons_string = object->cons_string();
1463 translation_array_builder_->StringConcat();
1464 BuildNestedValue(cons_string.first(), input_location, virtual_objects);
1465 BuildNestedValue(cons_string.second(), input_location, virtual_objects);
1466 }
1467
1468 void BuildFixedDoubleArray(uint32_t length,
1469 compiler::FixedDoubleArrayRef array) {
1470 translation_array_builder_->BeginCapturedObject(length + 2);
1471 translation_array_builder_->StoreLiteral(
1472 GetDeoptLiteral(*local_isolate_->factory()->fixed_double_array_map()));
1473 translation_array_builder_->StoreLiteral(
1474 GetDeoptLiteral(Smi::FromInt(length)));
1475 for (uint32_t i = 0; i < length; i++) {
1476 Float64 value = array.GetFromImmutableFixedDoubleArray(i);
1477 if (value.is_hole_nan()) {
1478 translation_array_builder_->StoreLiteral(
1479 GetDeoptLiteral(ReadOnlyRoots(local_isolate_).the_hole_value()));
1480 } else {
1481 BuildHeapNumber(value);
1482 }
1483 }
1484 }
1485
1486 void BuildNestedValue(const ValueNode* value,
1487 const InputLocation*& input_location,
1488 const VirtualObjectList& virtual_objects) {
1489 if (IsConstantNode(value->opcode())) {
1490 translation_array_builder_->StoreLiteral(
1491 GetDeoptLiteral(*value->Reify(local_isolate_)));
1492 return;
1493 }
1494 // Special nodes.
1495 switch (value->opcode()) {
1496 case Opcode::kArgumentsElements:
1497 translation_array_builder_->ArgumentsElements(
1498 value->Cast<ArgumentsElements>()->type());
1499 // We simulate the deoptimizer deduplication machinery, which will give
1500 // a fresh id to the ArgumentsElements. For that, we need to push
1501 // something object_ids_ We push -1, since no object should have id -1.
1502 object_ids_.push_back(-1);
1503 break;
1504 case Opcode::kArgumentsLength:
1505 translation_array_builder_->ArgumentsLength();
1506 break;
1507 case Opcode::kRestLength:
1508 translation_array_builder_->RestLength();
1509 break;
1510 case Opcode::kVirtualObject:
1511 UNREACHABLE();
1512 default:
1513 BuildDeoptFrameSingleValue(value, input_location, virtual_objects);
1514 break;
1515 }
1516 }
1517
1518 void BuildVirtualObject(const VirtualObject* object,
1519 const InputLocation*& input_location,
1520 const VirtualObjectList& virtual_objects) {
1521 if (object->type() == VirtualObject::kHeapNumber) {
1522 return BuildHeapNumber(object->number());
1523 }
1524 int dup_id =
1525 GetDuplicatedId(reinterpret_cast<intptr_t>(object->allocation()));
1526 if (dup_id != kNotDuplicated) {
1527 translation_array_builder_->DuplicateObject(dup_id);
1528 object->ForEachNestedRuntimeInput(virtual_objects,
1529 [&](ValueNode*) { input_location++; });
1530 return;
1531 }
1532 switch (object->type()) {
1534 // Handled above.
1535 UNREACHABLE();
1537 return BuildConsString(object, input_location, virtual_objects);
1539 return BuildFixedDoubleArray(object->double_elements_length(),
1540 object->double_elements());
1542 translation_array_builder_->BeginCapturedObject(object->slot_count() +
1543 1);
1544 DCHECK(object->has_static_map());
1545 translation_array_builder_->StoreLiteral(
1546 GetDeoptLiteral(*object->map().object()));
1547 object->ForEachInput([&](ValueNode* node) {
1548 BuildNestedValue(node, input_location, virtual_objects);
1549 });
1550 }
1551 }
1552
1553 void BuildDeoptFrameSingleValue(const ValueNode* value,
1554 const InputLocation*& input_location,
1555 const VirtualObjectList& virtual_objects) {
1556 if (value->Is<Identity>()) {
1557 value = value->input(0).node();
1558 }
1559 DCHECK(!value->Is<VirtualObject>());
1560 if (const InlinedAllocation* alloc = value->TryCast<InlinedAllocation>()) {
1561 VirtualObject* vobject = virtual_objects.FindAllocatedWith(alloc);
1562 if (vobject && alloc->HasBeenElided()) {
1563 DCHECK(alloc->HasBeenAnalysed());
1564 BuildVirtualObject(vobject, input_location, virtual_objects);
1565 return;
1566 }
1567 }
1568 if (input_location->operand().IsConstant()) {
1569 translation_array_builder_->StoreLiteral(
1570 GetDeoptLiteral(*value->Reify(local_isolate_)));
1571 } else {
1572 const compiler::AllocatedOperand& operand =
1573 compiler::AllocatedOperand::cast(input_location->operand());
1574 ValueRepresentation repr = value->properties().value_representation();
1575 if (operand.IsAnyRegister()) {
1576 BuildDeoptStoreRegister(operand, repr);
1577 } else {
1578 BuildDeoptStoreStackSlot(operand, repr);
1579 }
1580 }
1581 input_location++;
1582 }
1583
1584 void BuildDeoptFrameValues(
1585 const MaglevCompilationUnit& compilation_unit,
1586 const CompactInterpreterFrameState* checkpoint_state,
1587 const ValueNode* closure, const InputLocation*& input_location,
1588 const VirtualObjectList& virtual_objects,
1589 interpreter::Register result_location, int result_size) {
1590 // TODO(leszeks): The input locations array happens to be in the same
1591 // order as closure+parameters+context+locals+accumulator are accessed
1592 // here. We should make this clearer and guard against this invariant
1593 // failing.
1594
1595 // Closure
1596 BuildDeoptFrameSingleValue(closure, input_location, virtual_objects);
1597
1598 // Parameters
1599 {
1600 int i = 0;
1601 checkpoint_state->ForEachParameter(
1602 compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
1603 DCHECK_EQ(reg.ToParameterIndex(), i);
1604 if (LazyDeoptInfo::InReturnValues(reg, result_location,
1605 result_size)) {
1606 translation_array_builder_->StoreOptimizedOut();
1607 } else {
1608 BuildDeoptFrameSingleValue(value, input_location,
1609 virtual_objects);
1610 }
1611 i++;
1612 });
1613 }
1614
1615 // Context
1616 ValueNode* context_value = checkpoint_state->context(compilation_unit);
1617 BuildDeoptFrameSingleValue(context_value, input_location, virtual_objects);
1618
1619 // Locals
1620 {
1621 int i = 0;
1622 checkpoint_state->ForEachLocal(
1623 compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
1624 DCHECK_LE(i, reg.index());
1625 if (LazyDeoptInfo::InReturnValues(reg, result_location,
1626 result_size))
1627 return;
1628 while (i < reg.index()) {
1629 translation_array_builder_->StoreOptimizedOut();
1630 i++;
1631 }
1632 DCHECK_EQ(i, reg.index());
1633 BuildDeoptFrameSingleValue(value, input_location, virtual_objects);
1634 i++;
1635 });
1636 while (i < compilation_unit.register_count()) {
1637 translation_array_builder_->StoreOptimizedOut();
1638 i++;
1639 }
1640 }
1641
1642 // Accumulator
1643 {
1644 if (checkpoint_state->liveness()->AccumulatorIsLive() &&
1647 result_size)) {
1648 ValueNode* value = checkpoint_state->accumulator(compilation_unit);
1649 BuildDeoptFrameSingleValue(value, input_location, virtual_objects);
1650 } else {
1651 translation_array_builder_->StoreOptimizedOut();
1652 }
1653 }
1654 }
1655
1656 int GetProtectedDeoptLiteral(Tagged<TrustedObject> obj) {
1657 IdentityMapFindResult<int> res =
1658 protected_deopt_literals_->FindOrInsert(obj);
1659 if (!res.already_exists) {
1660 DCHECK_EQ(0, *res.entry);
1661 *res.entry = protected_deopt_literals_->size() - 1;
1662 }
1663 return *res.entry;
1664 }
1665
1666 int GetDeoptLiteral(Tagged<Object> obj) {
1667 IdentityMapFindResult<int> res = deopt_literals_->FindOrInsert(obj);
1668 if (!res.already_exists) {
1669 DCHECK_EQ(0, *res.entry);
1670 *res.entry = deopt_literals_->size() - 1;
1671 }
1672 return *res.entry;
1673 }
1674
1675 int GetDeoptLiteral(compiler::HeapObjectRef ref) {
1676 return GetDeoptLiteral(*ref.object());
1677 }
1678
1679 LocalIsolate* local_isolate_;
1680 MaglevAssembler* masm_;
1681 FrameTranslationBuilder* translation_array_builder_;
1682 IdentityMap<int, base::DefaultAllocationPolicy>* protected_deopt_literals_;
1683 IdentityMap<int, base::DefaultAllocationPolicy>* deopt_literals_;
1684
1685 static const int kNotDuplicated = -1;
1686 std::vector<intptr_t> object_ids_;
1687};
1688
1689} // namespace
1690
1692 LocalIsolate* isolate, MaglevCompilationInfo* compilation_info,
1693 Graph* graph)
1694 : local_isolate_(isolate),
1695 safepoint_table_builder_(compilation_info->zone(),
1696 graph->tagged_stack_slots()),
1697 frame_translation_builder_(compilation_info->zone()),
1698 code_gen_state_(compilation_info, &safepoint_table_builder_,
1699 graph->max_block_id()),
1700 masm_(isolate->GetMainThreadIsolateUnsafe(), compilation_info->zone(),
1701 &code_gen_state_),
1702 graph_(graph),
1703 protected_deopt_literals_(isolate->heap()->heap()),
1704 deopt_literals_(isolate->heap()->heap()),
1705 retained_maps_(isolate->heap()),
1706 is_context_specialized_(
1707 compilation_info->specialize_to_function_context()),
1708 zone_(compilation_info->zone()) {
1710 DCHECK_IMPLIES(compilation_info->toplevel_is_osr(),
1712}
1713
1715 if (!EmitCode()) {
1716#ifdef V8_TARGET_ARCH_ARM
1717 // Even if we fail, we force emit the constant pool, so that it is empty.
1718 __ CheckConstPool(true, false);
1719#endif
1720 return false;
1721 }
1722
1723 EmitMetadata();
1724
1725 if (v8_flags.maglev_build_code_on_background) {
1729 if (code_.ToHandle(&code)) {
1731 }
1732 } else if (v8_flags.maglev_deopt_data_on_background) {
1733 // Only do this if not --maglev-build-code-on-background, since that will do
1734 // it itself.
1737 }
1738 return true;
1739}
1740
1742 if (v8_flags.maglev_build_code_on_background) {
1744 if (code_.ToHandle(&code)) {
1745 return handle(*code, isolate);
1746 }
1747 return kNullMaybeHandle;
1748 }
1749
1750 return BuildCodeObject(isolate->main_thread_local_isolate());
1751}
1752
1755 GlobalHandleVector<Map> maps(isolate->heap());
1756 maps.Reserve(retained_maps_.size());
1757 for (DirectHandle<Map> map : retained_maps_) maps.Push(*map);
1758 return maps;
1759}
1760
1762 GraphProcessor<NodeMultiProcessor<SafepointingNodeProcessor,
1763 MaglevCodeGeneratingNodeProcessor>>
1764 processor(SafepointingNodeProcessor{local_isolate_},
1765 MaglevCodeGeneratingNodeProcessor{masm(), zone_});
1767
1768 if (graph_->is_osr()) {
1769 masm_.Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
1770 masm_.RecordComment("-- OSR entrypoint --");
1772 }
1773
1774 processor.ProcessGraph(graph_);
1776 if (!EmitDeopts()) return false;
1778 __ FinishCode();
1779
1780 code_gen_succeeded_ = true;
1781 return true;
1782}
1783
1785 // The inlined functions should be the first literals.
1786 DCHECK_EQ(0u, deopt_literals_.size());
1790 deopt_literals_.FindOrInsert(inlined.shared_info);
1791 if (!res.already_exists) {
1792 DCHECK_EQ(0, *res.entry);
1793 *res.entry = deopt_literals_.size() - 1;
1794 }
1795 inlined.RegisterInlinedFunctionId(*res.entry);
1796 }
1797 inlined_function_count_ = static_cast<int>(deopt_literals_.size());
1798}
1799
1801 // Loop over deferred_code() multiple times, clearing the vector on each
1802 // outer loop, so that deferred code can itself emit deferred code.
1803 while (!code_gen_state_.deferred_code().empty()) {
1804 for (DeferredCodeInfo* deferred_code : code_gen_state_.TakeDeferredCode()) {
1805 __ RecordComment("-- Deferred block");
1806 __ bind(&deferred_code->deferred_code_label);
1807 deferred_code->Generate(masm());
1808 __ Trap();
1809 }
1810 }
1811}
1812
1814 const size_t num_deopts = code_gen_state_.eager_deopts().size() +
1816 if (num_deopts > Deoptimizer::kMaxNumberOfEntries) {
1817 return false;
1818 }
1819
1820 MaglevFrameTranslationBuilder translation_builder(
1823
1824 // Deoptimization exits must be as small as possible, since their count grows
1825 // with function size. These labels are an optimization which extracts the
1826 // (potentially large) instruction sequence for the final jump to the
1827 // deoptimization entry into a single spot per InstructionStream object. All
1828 // deopt exits can then near-call to this label. Note: not used on all
1829 // architectures.
1830 Label eager_deopt_entry;
1831 Label lazy_deopt_entry;
1832 __ MaybeEmitDeoptBuiltinsCall(
1833 code_gen_state_.eager_deopts().size(), &eager_deopt_entry,
1834 code_gen_state_.lazy_deopts().size(), &lazy_deopt_entry);
1835
1837
1838 int deopt_index = 0;
1839
1840 __ RecordComment("-- Non-lazy deopts");
1841 for (EagerDeoptInfo* deopt_info : code_gen_state_.eager_deopts()) {
1843 translation_builder.BuildEagerDeopt(deopt_info);
1844
1846 IsDeoptimizationWithoutCodeInvalidation(deopt_info->reason())) {
1847 // Note: Maglev uses the deopt_reason to tell the deoptimizer not to
1848 // discard optimized code on deopt during ML-TF OSR. This is why we
1849 // unconditionally emit the deopt_reason when
1850 // IsDeoptimizationWithoutCodeInvalidation is true.
1851 __ RecordDeoptReason(deopt_info->reason(), 0,
1852 deopt_info->top_frame().GetSourcePosition(),
1853 deopt_index);
1854 }
1855 __ bind(deopt_info->deopt_entry_label());
1856
1857 __ CallForDeoptimization(Builtin::kDeoptimizationEntry_Eager, deopt_index,
1858 deopt_info->deopt_entry_label(),
1859 DeoptimizeKind::kEager, nullptr,
1860 &eager_deopt_entry);
1861
1862 deopt_index++;
1863 }
1864
1865 __ RecordComment("-- Lazy deopts");
1866 int last_updated_safepoint = 0;
1867 for (LazyDeoptInfo* deopt_info : code_gen_state_.lazy_deopts()) {
1869 translation_builder.BuildLazyDeopt(deopt_info);
1870
1872 __ RecordDeoptReason(DeoptimizeReason::kUnknown, 0,
1873 deopt_info->top_frame().GetSourcePosition(),
1874 deopt_index);
1875 }
1876 __ BindExceptionHandler(deopt_info->deopt_entry_label());
1877
1878 __ CallForDeoptimization(Builtin::kDeoptimizationEntry_Lazy, deopt_index,
1879 deopt_info->deopt_entry_label(),
1880 DeoptimizeKind::kLazy, nullptr, &lazy_deopt_entry);
1881
1882 last_updated_safepoint = safepoint_table_builder_.UpdateDeoptimizationInfo(
1883 deopt_info->deopting_call_return_pc(),
1884 deopt_info->deopt_entry_label()->pos(), last_updated_safepoint,
1885 deopt_index);
1886 deopt_index++;
1887 }
1888
1889 return true;
1890}
1891
1893 if (code_gen_state_.handlers().empty()) return;
1894 __ RecordComment("-- Exception handler trampolines");
1895 for (NodeBase* node : code_gen_state_.handlers()) {
1896 ExceptionHandlerTrampolineBuilder::Build(masm(), node);
1897 }
1898}
1899
1901 // Final alignment before starting on the metadata section.
1903
1905
1906 // Exception handler table.
1908 for (NodeBase* node : code_gen_state_.handlers()) {
1909 ExceptionHandlerInfo* info = node->exception_handler_info();
1910 DCHECK_IMPLIES(info->ShouldLazyDeopt(),
1911 !info->trampoline_entry().is_bound());
1912 int pos = info->ShouldLazyDeopt() ? HandlerTable::kLazyDeopt
1913 : info->trampoline_entry().pos();
1915 }
1916}
1917
1919 LocalIsolate* local_isolate) {
1920 if (!code_gen_succeeded_) return {};
1921
1922 Handle<DeoptimizationData> deopt_data =
1923 (v8_flags.maglev_deopt_data_on_background &&
1924 !v8_flags.maglev_build_code_on_background)
1925 ? deopt_data_
1926 : GenerateDeoptimizationData(local_isolate);
1927 CHECK(!deopt_data.is_null());
1928
1929 CodeDesc desc;
1930 masm()->GetCode(local_isolate, &desc, &safepoint_table_builder_,
1932 auto builder =
1933 Factory::CodeBuilder{local_isolate, desc, CodeKind::MAGLEV}
1936 .set_deoptimization_data(deopt_data)
1941
1943 builder.set_is_context_specialized();
1944 }
1945
1946 return builder.TryBuild();
1947}
1948
1950 DirectHandle<Code> code) {
1951 DCHECK(code->is_optimized_code());
1952
1956 int const mode_mask = RelocInfo::EmbeddedObjectModeMask();
1957 for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
1958 DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
1959 Tagged<HeapObject> target_object = it.rinfo()->target_object(cage_base);
1960 if (code->IsWeakObjectInOptimizedCode(target_object)) {
1961 if (IsMap(target_object, cage_base)) {
1962 maps.Push(Cast<Map>(target_object));
1963 }
1964 }
1965 }
1966 return maps;
1967}
1968
1970 LocalIsolate* local_isolate) {
1971 int eager_deopt_count =
1972 static_cast<int>(code_gen_state_.eager_deopts().size());
1973 int lazy_deopt_count = static_cast<int>(code_gen_state_.lazy_deopts().size());
1974 int deopt_count = lazy_deopt_count + eager_deopt_count;
1975 if (deopt_count == 0 && !graph_->is_osr()) {
1976 return DeoptimizationData::Empty(local_isolate);
1977 }
1979 DeoptimizationData::New(local_isolate, deopt_count);
1980
1983
1985 local_isolate->factory()->NewSharedFunctionInfoWrapper(
1989 .object());
1990
1991 {
1993 Tagged<DeoptimizationData> raw_data = *data;
1994
1995 raw_data->SetFrameTranslation(*translations);
1996 raw_data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
1997 raw_data->SetOptimizationId(
1998 Smi::FromInt(local_isolate->NextOptimizationId()));
1999
2001 raw_data->SetDeoptExitStart(Smi::FromInt(deopt_exit_start_offset_));
2002 raw_data->SetEagerDeoptCount(Smi::FromInt(eager_deopt_count));
2003 raw_data->SetLazyDeoptCount(Smi::FromInt(lazy_deopt_count));
2004 raw_data->SetWrappedSharedFunctionInfo(*sfi_wrapper);
2005 }
2006
2007 int inlined_functions_size =
2008 static_cast<int>(graph_->inlined_functions().size());
2010 local_isolate->factory()->NewProtectedFixedArray(
2013 local_isolate->factory()->NewDeoptimizationLiteralArray(
2014 deopt_literals_.size());
2017 inlined_functions_size);
2018
2020
2021 Tagged<ProtectedDeoptimizationLiteralArray> raw_protected_literals =
2022 *protected_literals;
2023 {
2026 for (auto it = iterate.begin(); it != iterate.end(); ++it) {
2027 raw_protected_literals->set(*it.entry(), Cast<TrustedObject>(it.key()));
2028 }
2029 }
2030
2031 Tagged<DeoptimizationLiteralArray> raw_literals = *literals;
2032 {
2035 for (auto it = iterate.begin(); it != iterate.end(); ++it) {
2036 raw_literals->set(*it.entry(), it.key());
2037 }
2038 }
2039
2040 for (int i = 0; i < inlined_functions_size; i++) {
2041 auto inlined_function_info = graph_->inlined_functions()[i];
2042 inlining_positions->set(i, inlined_function_info.position);
2043 }
2044
2045 Tagged<DeoptimizationData> raw_data = *data;
2046 raw_data->SetProtectedLiteralArray(raw_protected_literals);
2047 raw_data->SetLiteralArray(raw_literals);
2048 raw_data->SetInliningPositions(*inlining_positions);
2049
2050 auto info = code_gen_state_.compilation_info();
2051 raw_data->SetOsrBytecodeOffset(
2052 Smi::FromInt(info->toplevel_osr_offset().ToInt()));
2053 if (graph_->is_osr()) {
2054 raw_data->SetOsrPcOffset(Smi::FromInt(code_gen_state_.osr_entry()->pos()));
2055 } else {
2056 raw_data->SetOsrPcOffset(Smi::FromInt(-1));
2057 }
2058
2059 // Populate deoptimization entries.
2060 int i = 0;
2061 for (EagerDeoptInfo* deopt_info : code_gen_state_.eager_deopts()) {
2062 DCHECK_NE(deopt_info->translation_index(), -1);
2063 raw_data->SetBytecodeOffset(i, deopt_info->top_frame().GetBytecodeOffset());
2064 raw_data->SetTranslationIndex(
2065 i, Smi::FromInt(deopt_info->translation_index()));
2066 raw_data->SetPc(i, Smi::FromInt(deopt_info->deopt_entry_label()->pos()));
2067#ifdef DEBUG
2068 raw_data->SetNodeId(i, Smi::FromInt(i));
2069#endif // DEBUG
2070 i++;
2071 }
2072 for (LazyDeoptInfo* deopt_info : code_gen_state_.lazy_deopts()) {
2073 DCHECK_NE(deopt_info->translation_index(), -1);
2074 raw_data->SetBytecodeOffset(i, deopt_info->top_frame().GetBytecodeOffset());
2075 raw_data->SetTranslationIndex(
2076 i, Smi::FromInt(deopt_info->translation_index()));
2077 raw_data->SetPc(i, Smi::FromInt(deopt_info->deopt_entry_label()->pos()));
2078#ifdef DEBUG
2079 raw_data->SetNodeId(i, Smi::FromInt(i));
2080#endif // DEBUG
2081 i++;
2082 }
2083
2084#ifdef DEBUG
2085 raw_data->Verify(code_gen_state_.compilation_info()
2087 ->bytecode()
2088 .object());
2089#endif
2090
2091 return data;
2092}
2093
2094} // namespace maglev
2095} // namespace internal
2096} // namespace v8
friend Zone
Definition asm-types.cc:195
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
SourcePosition pos
constexpr UnderlyingType & value() &
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
void GetCode(LocalIsolate *isolate, CodeDesc *desc, SafepointTableBuilderBase *safepoint_table_builder, int handler_table_offset)
static BytecodeOffset GetContinuationBytecodeOffset(Builtin builtin)
Definition builtins.cc:97
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
Definition builtins.cc:189
static V8_EXPORT_PRIVATE int GetStackParameterCount(Builtin builtin)
Definition builtins.cc:160
static Handle< DeoptimizationData > New(Isolate *isolate, int deopt_entry_count)
static V8_EXPORT_PRIVATE Handle< DeoptimizationData > Empty(Isolate *isolate)
static constexpr int kMaxNumberOfEntries
DirectHandle< DeoptimizationLiteralArray > NewDeoptimizationLiteralArray(int length)
Handle< ProtectedFixedArray > NewProtectedFixedArray(int length)
DirectHandle< SharedFunctionInfoWrapper > NewSharedFunctionInfoWrapper(DirectHandle< SharedFunctionInfo > sfi)
CodeBuilder & set_stack_slots(int stack_slots)
Definition factory.h:1227
CodeBuilder & set_deoptimization_data(Handle< DeoptimizationData > deopt_data)
Definition factory.h:1204
CodeBuilder & set_parameter_count(uint16_t parameter_count)
Definition factory.h:1232
CodeBuilder & set_empty_source_position_table()
CodeBuilder & set_osr_offset(BytecodeOffset offset)
Definition factory.h:1182
CodeBuilder & set_inlined_bytecode_size(uint32_t size)
Definition factory.h:1176
V8_WARN_UNUSED_RESULT MaybeHandle< Code > TryBuild()
Definition factory.cc:284
DirectHandle< DeoptimizationFrameTranslation > ToFrameTranslation(LocalFactory *factory)
V8_INLINE bool is_null() const
Definition handles.h:69
static void EmitReturnEntry(Assembler *masm, int offset, int handler)
static int EmitReturnTableStart(Assembler *masm)
static const int kLazyDeopt
static constexpr int kMetadataAlignment
int pos() const
Definition label.h:71
MaybeIndirectHandle< T > NewPersistentMaybeHandle(MaybeHandleType< T > maybe_handle)
Definition local-heap.h:94
IndirectHandle< T > NewPersistentHandle(Tagged< T > object)
Definition local-heap.h:73
v8::internal::LocalFactory * factory()
int UpdateDeoptimizationInfo(int pc, int trampoline, int start, int deopt_index)
V8_EXPORT_PRIVATE void Emit(Assembler *assembler, int stack_slots)
static constexpr bool IsEmbeddedObjectMode(Mode mode)
Definition reloc-info.h:209
static int EmbeddedObjectModeMask()
Definition reloc-info.h:378
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static DirectHandle< TrustedPodArray< T > > New(Isolate *isolate, int length)
IndirectHandle< BytecodeArray > object() const
static LocationOperand * cast(InstructionOperand *op)
IndirectHandle< SharedFunctionInfo > object() const
static constexpr Register virtual_accumulator()
static constexpr Register invalid_value()
ZoneVector< OptimizedCompilationInfo::InlinedFunctionHolder > & inlined_functions()
int total_inlined_bytecode_size() const
static bool InReturnValues(interpreter::Register reg, interpreter::Register result_location, int result_size)
Definition maglev-ir.cc:402
static constexpr DoubleRegList GetAllocatableDoubleRegisters()
MaglevCompilationInfo * compilation_info() const
static constexpr RegList GetAllocatableRegisters()
const std::vector< EagerDeoptInfo * > & eager_deopts() const
const std::vector< DeferredCodeInfo * > & deferred_code() const
std::vector< DeferredCodeInfo * > TakeDeferredCode()
const std::vector< NodeBase * > & handlers() const
const std::vector< LazyDeoptInfo * > & lazy_deopts() const
MaglevCompilationInfo * compilation_info() const
IdentityMap< int, base::DefaultAllocationPolicy > protected_deopt_literals_
MaglevCodeGenerator(LocalIsolate *isolate, MaglevCompilationInfo *compilation_info, Graph *graph)
IdentityMap< int, base::DefaultAllocationPolicy > deopt_literals_
IndirectHandle< DeoptimizationData > deopt_data_
MaybeHandle< Code > BuildCodeObject(LocalIsolate *local_isolate)
GlobalHandleVector< Map > CollectRetainedMaps(DirectHandle< Code > code)
Handle< DeoptimizationData > GenerateDeoptimizationData(LocalIsolate *local_isolate)
GlobalHandleVector< Map > RetainedMaps(Isolate *isolate)
MaybeHandle< Code > Generate(Isolate *isolate)
MaglevSafepointTableBuilder safepoint_table_builder_
MaglevCompilationUnit * toplevel_compilation_unit() const
compiler::SharedFunctionInfoRef shared_function_info() const
compiler::BytecodeArrayRef bytecode() const
static constexpr Opcode opcode_of
Definition maglev-ir.h:1909
base::ThreadedList< Phi > List
Definition maglev-ir.h:9411
Zone * zone_
Handle< Code > code
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
Definition globals.h:161
bool is_empty
Definition sweeper.cc:229
FixedOpIndexSidetable< uint8_t > needs_decompression
Isolate * isolate
int32_t offset
Node * node
RpoNumber block
LiftoffRegister reg
SafepointTableBuilder safepoint_table_builder_
int pc_offset
std::vector< intptr_t > object_ids_
std::array< ValueNode *, RegisterT::kNumRegisters > materializing_register_moves_
static constexpr RegList kAllocatableRegisters
std::unordered_map< int32_t, GapMoveTargets > moves_from_stack_slot_
IdentityMap< int, base::DefaultAllocationPolicy > * deopt_literals_
std::array< GapMoveTargets, RegisterT::kNumRegisters > moves_from_register_
LocalIsolate * local_isolate_
IdentityMap< int, base::DefaultAllocationPolicy > * protected_deopt_literals_
FrameTranslationBuilder * translation_array_builder_
static constexpr auto kAllocatableRegistersT
base::SmallVector< int32_t, 1 > stack_slots
RegListBase< RegisterT > registers
bool scratch_has_cycle_start_
static const int kNotDuplicated
MaglevAssembler *const masm_
RegisterT scratch_
std::vector< std::pair< int32_t, ValueNode * > > materializing_stack_slot_moves_
int int32_t
Definition unicode.cc:40
auto Reversed(T &t)
Definition iterator.h:105
void PushAll(BaselineAssembler *basm, Args... args)
FloatWithBits< 64 > Float64
Definition index.h:234
DoubleRegister ToDoubleRegister(const compiler::InstructionOperand &operand)
static bool IsMaglevOsrEnabled()
Definition compiler.h:59
constexpr bool IsConstantNode(Opcode opcode)
Definition maglev-ir.h:491
Register ToRegister(const compiler::InstructionOperand &operand)
base::PointerWithPayload< void, RegisterStateFlags, 2 > RegisterState
static bool IsMaglevEnabled()
Definition compiler.h:57
constexpr bool IsZeroExtendedRepresentation(ValueRepresentation repr)
Definition maglev-ir.h:606
bool LoadMergeState(RegisterState state, RegisterMerge **merge)
constexpr bool IsBranchControlNode(Opcode opcode)
Definition maglev-ir.h:529
auto ToRegisterT(const compiler::InstructionOperand &operand)
constexpr bool IsUnconditionalControlNode(Opcode opcode)
Definition maglev-ir.h:537
NodeTMixin< Node, Derived > NodeT
Definition maglev-ir.h:2858
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
constexpr NullMaybeHandleType kNullMaybeHandle
constexpr JSDispatchHandle kInvalidDispatchHandle(0xffffffff<< kJSDispatchHandleShift)
DwVfpRegister DoubleRegister
RegListBase< Register > RegList
Definition reglist-arm.h:14
Tagged(T object) -> Tagged< T >
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Register kReturnRegister0
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool IsDeoptimizationWithoutCodeInvalidation(DeoptimizeReason reason)
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
#define V8_NOEXCEPT
#define FATAL(...)
Definition logging.h:47
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define V8_NO_UNIQUE_ADDRESS
Definition v8config.h:722
TFGraph * graph_