v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
baseline-compiler.cc
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <algorithm>
8#include <optional>
9#include <type_traits>
10
11#include "src/base/bits.h"
22#include "src/common/globals.h"
28#include "src/objects/code.h"
33#include "src/roots/roots.h"
34
35#if V8_TARGET_ARCH_X64
37#elif V8_TARGET_ARCH_ARM64
39#elif V8_TARGET_ARCH_IA32
41#elif V8_TARGET_ARCH_ARM
43#elif V8_TARGET_ARCH_PPC64
45#elif V8_TARGET_ARCH_S390X
47#elif V8_TARGET_ARCH_RISCV64
49#elif V8_TARGET_ARCH_RISCV32
51#elif V8_TARGET_ARCH_MIPS64
53#elif V8_TARGET_ARCH_LOONG64
55#else
56#error Unsupported target architecture.
57#endif
58
59namespace v8 {
60namespace internal {
61namespace baseline {
62
63#define __ basm_.
64
65#define RCS_BASELINE_SCOPE(rcs) \
66 RCS_SCOPE(stats_, \
67 local_isolate_->is_main_thread() \
68 ? RuntimeCallCounterId::kCompileBaseline##rcs \
69 : RuntimeCallCounterId::kCompileBackgroundBaseline##rcs)
70
71template <typename IsolateT>
73 IsolateT* isolate) {
74 if (bytes_.empty()) return isolate->factory()->empty_trusted_byte_array();
76 isolate->factory()->NewTrustedByteArray(static_cast<int>(bytes_.size()));
77 MemCopy(table->begin(), bytes_.data(), bytes_.size());
78 return table;
79}
80
81namespace detail {
82
83#ifdef DEBUG
84bool Clobbers(Register target, Register reg) { return target == reg; }
85bool Clobbers(Register target, DirectHandle<Object> handle) { return false; }
86bool Clobbers(Register target, Tagged<Smi> smi) { return false; }
87bool Clobbers(Register target, Tagged<TaggedIndex> index) { return false; }
88bool Clobbers(Register target, int32_t imm) { return false; }
89bool Clobbers(Register target, RootIndex index) { return false; }
90bool Clobbers(Register target, interpreter::Register reg) { return false; }
91bool Clobbers(Register target, interpreter::RegisterList list) { return false; }
92
93// We don't know what's inside machine registers or operands, so assume they
94// match.
95bool MachineTypeMatches(MachineType type, Register reg) { return true; }
96bool MachineTypeMatches(MachineType type, MemOperand reg) { return true; }
97bool MachineTypeMatches(MachineType type, DirectHandle<HeapObject> handle) {
98 return type.IsTagged() && !type.IsTaggedSigned();
99}
100bool MachineTypeMatches(MachineType type, Tagged<Smi> handle) {
101 return type.IsTagged() && !type.IsTaggedPointer();
102}
103bool MachineTypeMatches(MachineType type, Tagged<TaggedIndex> handle) {
104 // Tagged<TaggedIndex> doesn't have a separate type, so check for the same
105 // type as for Smis.
106 return type.IsTagged() && !type.IsTaggedPointer();
107}
108bool MachineTypeMatches(MachineType type, int32_t imm) {
109 // 32-bit immediates can be used for 64-bit params -- they'll be
110 // zero-extended.
111 return type.representation() == MachineRepresentation::kWord32 ||
112 type.representation() == MachineRepresentation::kWord64;
113}
114bool MachineTypeMatches(MachineType type, RootIndex index) {
115 return type.IsTagged() && !type.IsTaggedSigned();
116}
117bool MachineTypeMatches(MachineType type, interpreter::Register reg) {
118 return type.IsTagged();
119}
120
121template <typename Descriptor, typename... Args>
122struct CheckArgsHelper;
123
124template <typename Descriptor>
125struct CheckArgsHelper<Descriptor> {
126 static void Check(BaselineAssembler* masm, int i) {
127 if (Descriptor::AllowVarArgs()) {
128 CHECK_GE(i, Descriptor::GetParameterCount());
129 } else {
130 CHECK_EQ(i, Descriptor::GetParameterCount());
131 }
132 }
133};
134
135template <typename Descriptor, typename Arg, typename... Args>
136struct CheckArgsHelper<Descriptor, Arg, Args...> {
137 static void Check(BaselineAssembler* masm, int i, Arg arg, Args... args) {
138 if (i >= Descriptor::GetParameterCount()) {
139 CHECK(Descriptor::AllowVarArgs());
140 return;
141 }
142 CHECK(MachineTypeMatches(Descriptor().GetParameterType(i), arg));
143 CheckArgsHelper<Descriptor, Args...>::Check(masm, i + 1, args...);
144 }
145};
146
147template <typename Descriptor, typename... Args>
148struct CheckArgsHelper<Descriptor, interpreter::RegisterList, Args...> {
149 static void Check(BaselineAssembler* masm, int i,
150 interpreter::RegisterList list, Args... args) {
151 for (int reg_index = 0; reg_index < list.register_count();
152 ++reg_index, ++i) {
153 if (i >= Descriptor::GetParameterCount()) {
154 CHECK(Descriptor::AllowVarArgs());
155 return;
156 }
157 CHECK(MachineTypeMatches(Descriptor().GetParameterType(i),
158 list[reg_index]));
159 }
160 CheckArgsHelper<Descriptor, Args...>::Check(masm, i, args...);
161 }
162};
163
164template <typename Descriptor, typename... Args>
165void CheckArgs(BaselineAssembler* masm, Args... args) {
166 CheckArgsHelper<Descriptor, Args...>::Check(masm, 0, args...);
167}
168
169void CheckSettingDoesntClobber(Register target) {}
170template <typename Arg, typename... Args>
171void CheckSettingDoesntClobber(Register target, Arg arg, Args... args) {
172 DCHECK(!Clobbers(target, arg));
174}
175
176#else // DEBUG
177
178template <typename Descriptor, typename... Args>
179void CheckArgs(Args... args) {}
180
181template <typename... Args>
183
184#endif // DEBUG
185
186template <typename Descriptor, int ArgIndex, bool kIsRegister, typename... Args>
188
189template <typename Descriptor, int ArgIndex, bool kIsRegister>
190struct ArgumentSettingHelper<Descriptor, ArgIndex, kIsRegister> {
191 static void Set(BaselineAssembler* masm) {
192 // Should only ever be called for the end of register arguments.
193 static_assert(ArgIndex == Descriptor::GetRegisterParameterCount());
194 }
195};
196
197template <typename Descriptor, int ArgIndex, typename Arg, typename... Args>
198struct ArgumentSettingHelper<Descriptor, ArgIndex, true, Arg, Args...> {
199 static void Set(BaselineAssembler* masm, Arg arg, Args... args) {
200 static_assert(ArgIndex < Descriptor::GetRegisterParameterCount());
201 Register target = Descriptor::GetRegisterParameter(ArgIndex);
203 masm->Move(target, arg);
204 ArgumentSettingHelper<Descriptor, ArgIndex + 1,
205 (ArgIndex + 1 <
206 Descriptor::GetRegisterParameterCount()),
207 Args...>::Set(masm, args...);
208 }
209};
210
211template <typename Descriptor, int ArgIndex>
212struct ArgumentSettingHelper<Descriptor, ArgIndex, true,
213 interpreter::RegisterList> {
215 static_assert(ArgIndex < Descriptor::GetRegisterParameterCount());
216 DCHECK_EQ(ArgIndex + list.register_count(),
217 Descriptor::GetRegisterParameterCount());
218 for (int i = 0; ArgIndex + i < Descriptor::GetRegisterParameterCount();
219 ++i) {
220 Register target = Descriptor::GetRegisterParameter(ArgIndex + i);
221 masm->Move(target, masm->RegisterFrameOperand(list[i]));
222 }
223 }
224};
225
226template <typename Descriptor, int ArgIndex, typename Arg, typename... Args>
227struct ArgumentSettingHelper<Descriptor, ArgIndex, false, Arg, Args...> {
228 static void Set(BaselineAssembler* masm, Arg arg, Args... args) {
229 if (Descriptor::kStackArgumentOrder == StackArgumentOrder::kDefault) {
230 masm->Push(arg, args...);
231 } else {
232 masm->PushReverse(arg, args...);
233 }
234 }
235};
236
237template <Builtin kBuiltin, typename... Args>
240 CheckArgs<Descriptor>(masm, args...);
242 (0 < Descriptor::GetRegisterParameterCount()),
243 Args...>::Set(masm, args...);
244 if (Descriptor::HasContextParameter()) {
245 masm->LoadContext(Descriptor::ContextRegister());
246 }
247}
248
249} // namespace detail
250
251namespace {
252
253AssemblerOptions BaselineAssemblerOptions(Isolate* isolate) {
255 options.builtin_call_jump_mode =
256 isolate->is_short_builtin_calls_enabled()
259 return options;
260}
261
262// Rough upper-bound estimate. Copying the data is most likely more expensive
263// than pre-allocating a large enough buffer.
264#ifdef V8_TARGET_ARCH_IA32
265const int kAverageBytecodeToInstructionRatio = 5;
266#else
267const int kAverageBytecodeToInstructionRatio = 7;
268#endif
269std::unique_ptr<AssemblerBuffer> AllocateBuffer(
270 DirectHandle<BytecodeArray> bytecodes) {
271 int estimated_size;
272 {
274 estimated_size = BaselineCompiler::EstimateInstructionSize(*bytecodes);
275 }
276 return NewAssemblerBuffer(RoundUp(estimated_size, 4 * KB));
277}
278} // namespace
279
281 LocalIsolate* local_isolate,
282 Handle<SharedFunctionInfo> shared_function_info,
283 Handle<BytecodeArray> bytecode)
284 : local_isolate_(local_isolate),
285 stats_(local_isolate->runtime_call_stats()),
286 shared_function_info_(shared_function_info),
287 bytecode_(bytecode),
288 zone_(local_isolate->allocator(), ZONE_NAME),
289 masm_(
290 local_isolate->GetMainThreadIsolateUnsafe(), &zone_,
291 BaselineAssemblerOptions(local_isolate->GetMainThreadIsolateUnsafe()),
292 CodeObjectRequired::kNo, AllocateBuffer(bytecode)),
293 basm_(&masm_),
294 iterator_(bytecode_),
295 labels_(zone_.AllocateArray<Label>(bytecode_->length())),
296 label_tags_(2 * bytecode_->length(), &zone_) {
297 // Empirically determined expected size of the offset table at the 95th %ile,
298 // based on the size of the bytecode, to be:
299 //
300 // 16 + (bytecode size) / 4
303}
304
306 {
307 RCS_BASELINE_SCOPE(PreVisit);
308 // Mark exception handlers as valid indirect jump targets. This is required
309 // when CFI is enabled, to allow indirect jumps into baseline code.
310 HandlerTable table(*bytecode_);
311 for (int i = 0; i < table.NumberOfRangeEntries(); ++i) {
312 MarkIndirectJumpTarget(table.GetRangeHandler(i));
313 }
314 for (; !iterator_.done(); iterator_.Advance()) {
316 }
318 }
319
320 // No code generated yet.
321 DCHECK_EQ(__ pc_offset(), 0);
322 __ CodeEntry();
323
324 {
325 RCS_BASELINE_SCOPE(Visit);
326 Prologue();
327 AddPosition();
328 for (; !iterator_.done(); iterator_.Advance()) {
330 AddPosition();
331 }
332 }
333}
334
337 CodeDesc desc;
338 __ GetCode(local_isolate_, &desc);
339
340 // Allocate the bytecode offset table.
341 Handle<TrustedByteArray> bytecode_offset_table =
343
344 Factory::CodeBuilder code_builder(local_isolate_, desc, CodeKind::BASELINE);
345 code_builder.set_bytecode_offset_table(bytecode_offset_table);
346 if (shared_function_info_->HasInterpreterData(local_isolate_)) {
347 code_builder.set_interpreter_data(
350 } else {
351 code_builder.set_interpreter_data(bytecode_);
352 }
353 code_builder.set_parameter_count(bytecode_->parameter_count());
354 return code_builder.TryBuild();
355}
356
358 return bytecode->length() * kAverageBytecodeToInstructionRatio;
359}
360
362 return iterator().GetRegisterOperand(operand_index);
363}
364
365void BaselineCompiler::LoadRegister(Register output, int operand_index) {
366 __ LoadRegister(output, RegisterOperand(operand_index));
367}
368
369void BaselineCompiler::StoreRegister(int operand_index, Register value) {
370#ifdef DEBUG
371 effect_state_.CheckEffect();
372#endif
373 __ Move(RegisterOperand(operand_index), value);
374}
375
377 Register val1) {
378#ifdef DEBUG
379 effect_state_.CheckEffect();
380#endif
381 interpreter::Register reg0, reg1;
382 std::tie(reg0, reg1) = iterator().GetRegisterPairOperand(operand_index);
383 __ StoreRegister(reg0, val0);
384 __ StoreRegister(reg1, val1);
385}
386template <typename Type>
388 return Cast<Type>(
389 iterator().GetConstantForIndexOperand(operand_index, local_isolate_));
390}
392 return iterator().GetConstantAtIndexAsSmi(operand_index);
393}
394template <typename Type>
395void BaselineCompiler::LoadConstant(Register output, int operand_index) {
396 __ Move(output, Constant<Type>(operand_index));
397}
398uint32_t BaselineCompiler::Uint(int operand_index) {
399 return iterator().GetUnsignedImmediateOperand(operand_index);
400}
401int32_t BaselineCompiler::Int(int operand_index) {
402 return iterator().GetImmediateOperand(operand_index);
403}
404uint32_t BaselineCompiler::Index(int operand_index) {
405 return iterator().GetIndexOperand(operand_index);
406}
407uint32_t BaselineCompiler::Flag8(int operand_index) {
408 return iterator().GetFlag8Operand(operand_index);
409}
410uint32_t BaselineCompiler::Flag16(int operand_index) {
411 return iterator().GetFlag16Operand(operand_index);
412}
413uint32_t BaselineCompiler::RegisterCount(int operand_index) {
414 return iterator().GetRegisterCountOperand(operand_index);
415}
417 return TaggedIndex::FromIntptr(Index(operand_index));
418}
420 return TaggedIndex::FromIntptr(Uint(operand_index));
421}
423 return Smi::FromInt(Index(operand_index));
424}
426 return Smi::FromInt(Int(operand_index));
427}
429 return Smi::FromInt(Uint(operand_index));
430}
432 return Smi::FromInt(Flag8(operand_index));
433}
435 return Smi::FromInt(Flag16(operand_index));
436}
437
439 return __ FeedbackVectorOperand();
440}
441
444 __ Move(output, __ FeedbackVectorOperand());
445}
446
448 LoadFeedbackVector(output);
449 __ LoadTaggedField(output, output,
450 FeedbackVector::kClosureFeedbackCellArrayOffset);
451}
452
454 Register output, std::function<void(Label*, Label::Distance)> jump_func) {
455 Label done, set_true;
456 jump_func(&set_true, Label::kNear);
457 __ LoadRoot(output, RootIndex::kFalseValue);
458 __ Jump(&done, Label::kNear);
459 __ Bind(&set_true);
460 __ LoadRoot(output, RootIndex::kTrueValue);
461 __ Bind(&done);
462}
463
467
469 switch (iterator().current_bytecode()) {
470 case interpreter::Bytecode::kJumpLoop:
471 EnsureLabel(iterator().GetJumpTargetOffset(),
473 break;
474 default:
475 break;
476 }
477}
478
480#ifdef DEBUG
481 effect_state_.clear();
482#endif
483 int offset = iterator().current_offset();
484 if (IsJumpTarget(offset)) __ Bind(&labels_[offset]);
485 // This is required when CFI is enabled.
487 __ JumpTarget();
488 }
489
491 std::ostringstream str;
492 iterator().PrintTo(str);
493 return str.str();
494 });
495
496 VerifyFrame();
497
498#ifdef V8_TRACE_UNOPTIMIZED
499 TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
500#endif
501
502 {
503 interpreter::Bytecode bytecode = iterator().current_bytecode();
504
505#ifdef DEBUG
506 std::optional<EnsureAccumulatorPreservedScope> accumulator_preserved_scope;
507 // We should make sure to preserve the accumulator whenever the bytecode
508 // isn't registered as writing to it. We can't do this for jumps or switches
509 // though, since the control flow would not match the control flow of this
510 // scope.
511 if (v8_flags.slow_debug_code &&
515 accumulator_preserved_scope.emplace(&basm_);
516 }
517#endif // DEBUG
518
519 switch (bytecode) {
520#define BYTECODE_CASE(name, ...) \
521 case interpreter::Bytecode::k##name: \
522 Visit##name(); \
523 break;
525#undef BYTECODE_CASE
526 }
527 }
528
529#ifdef V8_TRACE_UNOPTIMIZED
530 TraceBytecode(Runtime::kTraceUnoptimizedBytecodeExit);
531#endif
532}
533
535 if (v8_flags.slow_debug_code) {
537 __ RecordComment(" -- Verify frame size");
539
540 __ RecordComment(" -- Verify feedback vector");
541 {
543 Register scratch = temps.AcquireScratch();
544 __ Move(scratch, __ FeedbackVectorOperand());
545 Label is_smi, is_ok;
546 __ JumpIfSmi(scratch, &is_smi);
547 __ JumpIfObjectTypeFast(kEqual, scratch, FEEDBACK_VECTOR_TYPE, &is_ok);
548 __ Bind(&is_smi);
549 __ masm()->Abort(AbortReason::kExpectedFeedbackVector);
550 __ Bind(&is_ok);
551 }
552
553 // TODO(leszeks): More verification.
554 }
555}
556
557#ifdef V8_TRACE_UNOPTIMIZED
558void BaselineCompiler::TraceBytecode(Runtime::FunctionId function_id) {
559 if (!v8_flags.trace_baseline_exec) return;
561 function_id == Runtime::kTraceUnoptimizedBytecodeEntry
562 ? "Trace bytecode entry"
563 : "Trace bytecode exit");
564 SaveAccumulatorScope accumulator_scope(this, &basm_);
565 CallRuntime(function_id, bytecode_,
567 iterator().current_offset()),
569}
570#endif
571
572#define DECLARE_VISITOR(name, ...) void Visit##name();
574#undef DECLARE_VISITOR
575
576#define DECLARE_VISITOR(name, ...) \
577 void VisitIntrinsic##name(interpreter::RegisterList args);
579#undef DECLARE_VISITOR
580
582 int weight, Label* label, Label* skip_interrupt_label,
583 StackCheckBehavior stack_check_behavior) {
584 if (weight != 0) {
586 __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, skip_interrupt_label);
587
588 DCHECK_LT(weight, 0);
589 CallRuntime(stack_check_behavior == kEnableStackCheck
590 ? Runtime::kBytecodeBudgetInterruptWithStackCheck_Sparkplug
591 : Runtime::kBytecodeBudgetInterrupt_Sparkplug,
592 __ FunctionOperand());
593 }
594 if (label) __ Jump(label);
595}
596
598 Label dont_jump;
602 __ Bind(&dont_jump);
603}
604
606 Label dont_jump;
610 __ Bind(&dont_jump);
611}
612
614 int target_offset = iterator().GetJumpTargetOffset();
615 return EnsureLabel(target_offset);
616}
617
618#if defined(DEBUG) || defined(V8_ENABLE_CET_SHADOW_STACK)
619// Allowlist to mark builtin calls during which it is impossible that the
620// sparkplug frame would have to be deoptimized. Either because they don't
621// execute any user code, or because they would anyway replace the current
622// frame, e.g., due to OSR.
623constexpr static bool BuiltinMayDeopt(Builtin id) {
624 switch (id) {
625 case Builtin::kSuspendGeneratorBaseline:
626 case Builtin::kBaselineOutOfLinePrologue:
627 case Builtin::kIncBlockCounter:
628 case Builtin::kToObject:
629 case Builtin::kStoreScriptContextSlotBaseline:
630 case Builtin::kStoreCurrentScriptContextSlotBaseline:
631 // This one explicitly skips the construct if the debugger is enabled.
632 case Builtin::kFindNonDefaultConstructorOrConstruct:
633 return false;
634 default:
635 return true;
636 }
637}
638#endif // DEBUG || V8_ENABLE_CET_SHADOW_STACK
639
640template <Builtin kBuiltin, typename... Args>
642#ifdef DEBUG
643 effect_state_.CheckEffect();
644 if (BuiltinMayDeopt(kBuiltin)) {
645 effect_state_.MayDeopt();
646 }
647#endif
650 __ CallBuiltin(kBuiltin);
651#ifdef V8_ENABLE_CET_SHADOW_STACK
652 if (BuiltinMayDeopt(kBuiltin)) {
653 __ MaybeEmitPlaceHolderForDeopt();
654 }
655#endif // V8_ENABLE_CET_SHADOW_STACK
656}
657
658template <Builtin kBuiltin, typename... Args>
660#ifdef DEBUG
661 effect_state_.CheckEffect();
662#endif
664 __ TailCallBuiltin(kBuiltin);
665}
666
667template <typename... Args>
669#ifdef DEBUG
670 effect_state_.CheckEffect();
671 effect_state_.MayDeopt();
672#endif
673 __ LoadContext(kContextRegister);
674 int nargs = __ Push(args...);
675 __ CallRuntime(function, nargs);
676#ifdef V8_ENABLE_CET_SHADOW_STACK
677 __ MaybeEmitPlaceHolderForDeopt();
678#endif // V8_ENABLE_CET_SHADOW_STACK
679}
680
681// Returns into kInterpreterAccumulatorRegister
683 Label::Distance distance) {
686 // ToBooleanForBaselineJump returns the ToBoolean value into return reg 1, and
687 // the original value into kInterpreterAccumulatorRegister, so we don't have
688 // to worry about it getting clobbered.
690 __ JumpIfSmi(do_jump_if_true ? kNotEqual : kEqual, kReturnRegister1,
691 Smi::FromInt(0), label, distance);
692}
693
694void BaselineCompiler::VisitLdaZero() {
696}
697
698void BaselineCompiler::VisitLdaSmi() {
699 Tagged<Smi> constant = Smi::FromInt(iterator().GetImmediateOperand(0));
700 __ Move(kInterpreterAccumulatorRegister, constant);
701}
702
703void BaselineCompiler::VisitLdaUndefined() {
704 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
705}
706
707void BaselineCompiler::VisitLdaNull() {
708 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kNullValue);
709}
710
711void BaselineCompiler::VisitLdaTheHole() {
712 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTheHoleValue);
713}
714
715void BaselineCompiler::VisitLdaTrue() {
716 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
717}
718
719void BaselineCompiler::VisitLdaFalse() {
720 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
721}
722
723void BaselineCompiler::VisitLdaConstant() {
725}
726
727void BaselineCompiler::VisitLdaGlobal() {
729 IndexAsTagged(1)); // slot
730}
731
732void BaselineCompiler::VisitLdaGlobalInsideTypeof() {
734 Constant<Name>(0), // name
735 IndexAsTagged(1)); // slot
736}
737
738void BaselineCompiler::VisitStaGlobal() {
740 Constant<Name>(0), // name
742 IndexAsTagged(1)); // slot
743}
744
745void BaselineCompiler::VisitPushContext() {
746 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
747 Register context = scratch_scope.AcquireScratch();
748 __ LoadContext(context);
750 StoreRegister(0, context);
751}
752
753void BaselineCompiler::VisitPopContext() {
754 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
755 Register context = scratch_scope.AcquireScratch();
756 LoadRegister(context, 0);
757 __ StoreContext(context);
758}
759
760void BaselineCompiler::VisitLdaContextSlot() {
761 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
762 Register context = scratch_scope.AcquireScratch();
763 LoadRegister(context, 0);
764 uint32_t index = Index(1);
765 uint32_t depth = Uint(2);
766 __ LdaContextSlot(context, index, depth);
767}
768
769void BaselineCompiler::VisitLdaScriptContextSlot() {
770 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
771 Register context = scratch_scope.AcquireScratch();
772 Label done;
773 LoadRegister(context, 0);
774 uint32_t index = Index(1);
775 uint32_t depth = Uint(2);
776 __ LdaContextSlot(context, index, depth,
778 __ JumpIfSmi(kInterpreterAccumulatorRegister, &done);
779 __ JumpIfObjectTypeFast(kNotEqual, kInterpreterAccumulatorRegister,
780 HEAP_NUMBER_TYPE, &done, Label::kNear);
782 kInterpreterAccumulatorRegister, // heap number
783 context, // context
784 Smi::FromInt(index)); // slot
785 __ Bind(&done);
786}
787
788void BaselineCompiler::VisitLdaImmutableContextSlot() { VisitLdaContextSlot(); }
789
790void BaselineCompiler::VisitLdaCurrentContextSlot() {
791 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
792 Register context = scratch_scope.AcquireScratch();
793 __ LoadContext(context);
794 __ LoadTaggedField(kInterpreterAccumulatorRegister, context,
796}
797
798void BaselineCompiler::VisitLdaCurrentScriptContextSlot() {
799 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
800 Register context = scratch_scope.AcquireScratch();
801 Label done;
802 uint32_t index = Index(0);
803 __ LoadContext(context);
804 __ LoadTaggedField(kInterpreterAccumulatorRegister, context,
806 __ JumpIfSmi(kInterpreterAccumulatorRegister, &done);
807 __ JumpIfObjectTypeFast(kNotEqual, kInterpreterAccumulatorRegister,
808 HEAP_NUMBER_TYPE, &done, Label::kNear);
810 kInterpreterAccumulatorRegister, // heap number
811 context, // context
812 Smi::FromInt(index)); // slot
813 __ Bind(&done);
814}
815
816void BaselineCompiler::VisitLdaImmutableCurrentContextSlot() {
817 VisitLdaCurrentContextSlot();
818}
819
820void BaselineCompiler::VisitStaContextSlot() {
825 LoadRegister(context, 0);
826 uint32_t index = Index(1);
827 uint32_t depth = Uint(2);
828 __ StaContextSlot(context, value, index, depth);
829}
830
831void BaselineCompiler::VisitStaCurrentContextSlot() {
836 __ LoadContext(context);
837 __ StoreTaggedFieldWithWriteBarrier(
838 context, Context::OffsetOfElementAt(Index(0)), value);
839}
840
841void BaselineCompiler::VisitStaScriptContextSlot() {
846 LoadRegister(context, 0);
847 SaveAccumulatorScope accumulator_scope(this, &basm_);
849 context, // context
850 value, // value
851 IndexAsSmi(1), // slot
852 UintAsTagged(2)); // depth
853}
854
855void BaselineCompiler::VisitStaCurrentScriptContextSlot() {
858 SaveAccumulatorScope accumulator_scope(this, &basm_);
861 value, // value
862 IndexAsSmi(0)); // slot
863}
864
865void BaselineCompiler::VisitLdaLookupSlot() {
866 CallRuntime(Runtime::kLoadLookupSlot, Constant<Name>(0));
867}
868
869void BaselineCompiler::VisitLdaLookupContextSlot() {
872}
873
874void BaselineCompiler::VisitLdaLookupScriptContextSlot() {
877}
878
879void BaselineCompiler::VisitLdaLookupGlobalSlot() {
882}
883
884void BaselineCompiler::VisitLdaLookupSlotInsideTypeof() {
885 CallRuntime(Runtime::kLoadLookupSlotInsideTypeof, Constant<Name>(0));
886}
887
888void BaselineCompiler::VisitLdaLookupContextSlotInsideTypeof() {
891}
892
893void BaselineCompiler::VisitLdaLookupScriptContextSlotInsideTypeof() {
896}
897
898void BaselineCompiler::VisitLdaLookupGlobalSlotInsideTypeof() {
901}
902
903void BaselineCompiler::VisitStaLookupSlot() {
904 uint32_t flags = Flag8(1);
905 Runtime::FunctionId function_id;
907 function_id = Runtime::kStoreLookupSlot_Strict;
908 } else if (flags &
910 function_id = Runtime::kStoreLookupSlot_SloppyHoisting;
911 } else {
912 function_id = Runtime::kStoreLookupSlot_Sloppy;
913 }
914 CallRuntime(function_id, Constant<Name>(0), // name
916}
917
918void BaselineCompiler::VisitLdar() {
920}
921
922void BaselineCompiler::VisitStar() {
924}
925
926#define SHORT_STAR_VISITOR(Name, ...) \
927 void BaselineCompiler::Visit##Name() { \
928 __ StoreRegister( \
929 interpreter::Register::FromShortStar(interpreter::Bytecode::k##Name), \
930 kInterpreterAccumulatorRegister); \
931 }
933#undef SHORT_STAR_VISITOR
934
935void BaselineCompiler::VisitMov() {
936 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
937 Register scratch = scratch_scope.AcquireScratch();
938 LoadRegister(scratch, 0);
939 StoreRegister(1, scratch);
940}
941
942void BaselineCompiler::VisitGetNamedProperty() {
944 Constant<Name>(1), // name
945 IndexAsTagged(2)); // slot
946}
947
948void BaselineCompiler::VisitGetNamedPropertyFromSuper() {
949 __ LoadPrototype(
950 LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister(),
952
954 RegisterOperand(0), // object
955 LoadWithReceiverAndVectorDescriptor::
956 LookupStartObjectRegister(), // lookup start
957 Constant<Name>(1), // name
958 IndexAsTagged(2)); // slot
959}
960
961void BaselineCompiler::VisitGetKeyedProperty() {
963 RegisterOperand(0), // object
965 IndexAsTagged(1)); // slot
966}
967
968void BaselineCompiler::VisitGetEnumeratedKeyedProperty() {
969 DCHECK(v8_flags.enable_enumerated_keyed_access_bytecode);
971 RegisterOperand(0), // object
973 RegisterOperand(1), // enum index
974 RegisterOperand(2), // cache type
975 IndexAsTagged(3)); // slot
976}
977
978void BaselineCompiler::VisitLdaModuleVariable() {
979 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
980 Register scratch = scratch_scope.AcquireScratch();
981 __ LoadContext(scratch);
982 int cell_index = Int(0);
983 int depth = Uint(1);
984 __ LdaModuleVariable(scratch, cell_index, depth);
985}
986
987void BaselineCompiler::VisitStaModuleVariable() {
988 int cell_index = Int(0);
989 if (V8_UNLIKELY(cell_index < 0)) {
990 // Not supported (probably never).
991 CallRuntime(Runtime::kAbort,
992 Smi::FromInt(static_cast<int>(
993 AbortReason::kUnsupportedModuleOperation)));
994 __ Trap();
995 }
1000 __ LoadContext(scratch);
1001 int depth = Uint(1);
1002 __ StaModuleVariable(scratch, value, cell_index, depth);
1003}
1004
1005void BaselineCompiler::VisitSetNamedProperty() {
1006 // StoreIC is currently a base class for multiple property store operations
1007 // and contains mixed logic for named and keyed, set and define operations,
1008 // the paths are controlled by feedback.
1009 // TODO(v8:12548): refactor SetNamedIC as a subclass of StoreIC, which can be
1010 // called here.
1012 RegisterOperand(0), // object
1013 Constant<Name>(1), // name
1015 IndexAsTagged(2)); // slot
1016}
1017
1018void BaselineCompiler::VisitDefineNamedOwnProperty() {
1020 RegisterOperand(0), // object
1021 Constant<Name>(1), // name
1023 IndexAsTagged(2)); // slot
1024}
1025
1026void BaselineCompiler::VisitSetKeyedProperty() {
1027 // KeyedStoreIC is currently a base class for multiple keyed property store
1028 // operations and contains mixed logic for set and define operations,
1029 // the paths are controlled by feedback.
1030 // TODO(v8:12548): refactor SetKeyedIC as a subclass of KeyedStoreIC, which
1031 // can be called here.
1033 RegisterOperand(0), // object
1034 RegisterOperand(1), // key
1036 IndexAsTagged(2)); // slot
1037}
1038
1039void BaselineCompiler::VisitDefineKeyedOwnProperty() {
1041 RegisterOperand(0), // object
1042 RegisterOperand(1), // key
1044 Flag8AsSmi(2), // flags
1045 IndexAsTagged(3)); // slot
1046}
1047
1048void BaselineCompiler::VisitStaInArrayLiteral() {
1050 RegisterOperand(0), // object
1051 RegisterOperand(1), // name
1053 IndexAsTagged(2)); // slot
1054}
1055
1056void BaselineCompiler::VisitDefineKeyedOwnPropertyInLiteral() {
1057 // Here we should save the accumulator, since
1058 // DefineKeyedOwnPropertyInLiteral doesn't write the accumulator, but
1059 // Runtime::kDefineKeyedOwnPropertyInLiteral returns the value that we got
1060 // from the accumulator so this still works.
1061 CallRuntime(Runtime::kDefineKeyedOwnPropertyInLiteral,
1062 RegisterOperand(0), // object
1063 RegisterOperand(1), // name
1065 Flag8AsSmi(2), // flags
1066 FeedbackVector(), // feedback vector
1067 IndexAsTagged(3)); // slot
1068}
1069
1070void BaselineCompiler::VisitAdd() {
1073}
1074
1075void BaselineCompiler::VisitSub() {
1078}
1079
1080void BaselineCompiler::VisitMul() {
1083}
1084
1085void BaselineCompiler::VisitDiv() {
1088}
1089
1090void BaselineCompiler::VisitMod() {
1093}
1094
1095void BaselineCompiler::VisitExp() {
1098}
1099
1100void BaselineCompiler::VisitBitwiseOr() {
1103}
1104
1105void BaselineCompiler::VisitBitwiseXor() {
1108}
1109
1110void BaselineCompiler::VisitBitwiseAnd() {
1113}
1114
1115void BaselineCompiler::VisitShiftLeft() {
1118}
1119
1120void BaselineCompiler::VisitShiftRight() {
1123}
1124
1125void BaselineCompiler::VisitShiftRightLogical() {
1128}
1129
1130void BaselineCompiler::VisitAddSmi() {
1132 IntAsSmi(0), Index(1));
1133}
1134
1135void BaselineCompiler::VisitSubSmi() {
1137 IntAsSmi(0), Index(1));
1138}
1139
1140void BaselineCompiler::VisitMulSmi() {
1142 IntAsSmi(0), Index(1));
1143}
1144
1145void BaselineCompiler::VisitDivSmi() {
1147 IntAsSmi(0), Index(1));
1148}
1149
1150void BaselineCompiler::VisitModSmi() {
1152 IntAsSmi(0), Index(1));
1153}
1154
1155void BaselineCompiler::VisitExpSmi() {
1158}
1159
1160void BaselineCompiler::VisitBitwiseOrSmi() {
1162 IntAsSmi(0), Index(1));
1163}
1164
1165void BaselineCompiler::VisitBitwiseXorSmi() {
1167 IntAsSmi(0), Index(1));
1168}
1169
1170void BaselineCompiler::VisitBitwiseAndSmi() {
1172 IntAsSmi(0), Index(1));
1173}
1174
1175void BaselineCompiler::VisitShiftLeftSmi() {
1177 IntAsSmi(0), Index(1));
1178}
1179
1180void BaselineCompiler::VisitShiftRightSmi() {
1182 IntAsSmi(0), Index(1));
1183}
1184
1185void BaselineCompiler::VisitShiftRightLogicalSmi() {
1188}
1189
1190void BaselineCompiler::VisitInc() {
1192 Index(0));
1193}
1194
1195void BaselineCompiler::VisitDec() {
1197 Index(0));
1198}
1199
1200void BaselineCompiler::VisitNegate() {
1202 Index(0));
1203}
1204
1205void BaselineCompiler::VisitBitwiseNot() {
1207 Index(0));
1208}
1209
1210void BaselineCompiler::VisitToBooleanLogicalNot() {
1212 [&](Label* if_true, Label::Distance distance) {
1213 JumpIfToBoolean(false, if_true, distance);
1214 });
1215}
1216
1217void BaselineCompiler::VisitLogicalNot() {
1219 [&](Label* if_true, Label::Distance distance) {
1221 RootIndex::kFalseValue, if_true,
1222 distance);
1223 });
1224}
1225
1226void BaselineCompiler::VisitTypeOf() {
1228 Index(0));
1229}
1230
1231void BaselineCompiler::VisitDeletePropertyStrict() {
1232 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
1233 Register scratch = scratch_scope.AcquireScratch();
1234 __ Move(scratch, kInterpreterAccumulatorRegister);
1237}
1238
1239void BaselineCompiler::VisitDeletePropertySloppy() {
1240 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
1241 Register scratch = scratch_scope.AcquireScratch();
1242 __ Move(scratch, kInterpreterAccumulatorRegister);
1245}
1246
1247void BaselineCompiler::VisitGetSuperConstructor() {
1248 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
1249 Register prototype = scratch_scope.AcquireScratch();
1250 __ LoadPrototype(prototype, kInterpreterAccumulatorRegister);
1251 StoreRegister(0, prototype);
1252}
1253
1254void BaselineCompiler::VisitFindNonDefaultConstructorOrConstruct() {
1255 SaveAccumulatorScope accumulator_scope(this, &basm_);
1259}
1260
1261namespace {
1262constexpr Builtin ConvertReceiverModeToCompactBuiltin(
1263 ConvertReceiverMode mode) {
1264 switch (mode) {
1266 return Builtin::kCall_ReceiverIsAny_Baseline_Compact;
1268 return Builtin::kCall_ReceiverIsNullOrUndefined_Baseline_Compact;
1270 return Builtin::kCall_ReceiverIsNotNullOrUndefined_Baseline_Compact;
1271 }
1272}
1273constexpr Builtin ConvertReceiverModeToBuiltin(ConvertReceiverMode mode) {
1274 switch (mode) {
1276 return Builtin::kCall_ReceiverIsAny_Baseline;
1278 return Builtin::kCall_ReceiverIsNullOrUndefined_Baseline;
1280 return Builtin::kCall_ReceiverIsNotNullOrUndefined_Baseline;
1281 }
1282}
1283} // namespace
1284
1285template <ConvertReceiverMode kMode, typename... Args>
1286void BaselineCompiler::BuildCall(uint32_t slot, uint32_t arg_count,
1287 Args... args) {
1288 uint32_t bitfield;
1290 &bitfield)) {
1292 RegisterOperand(0), // kFunction
1293 bitfield, // kActualArgumentsCount | kSlot
1294 args...); // Arguments
1295 } else {
1297 RegisterOperand(0), // kFunction
1298 arg_count, // kActualArgumentsCount
1299 slot, // kSlot
1300 args...); // Arguments
1301 }
1302}
1303
1304void BaselineCompiler::VisitCallAnyReceiver() {
1305 interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
1306 uint32_t arg_count = args.register_count();
1308}
1309
1310void BaselineCompiler::VisitCallProperty() {
1311 interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
1312 uint32_t arg_count = args.register_count();
1314 args);
1315}
1316
1317void BaselineCompiler::VisitCallProperty0() {
1320}
1321
1322void BaselineCompiler::VisitCallProperty1() {
1325}
1326
1327void BaselineCompiler::VisitCallProperty2() {
1330 RegisterOperand(3));
1331}
1332
1333void BaselineCompiler::VisitCallUndefinedReceiver() {
1334 interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
1335 uint32_t arg_count = JSParameterCount(args.register_count());
1337 Index(3), arg_count, RootIndex::kUndefinedValue, args);
1338}
1339
1340void BaselineCompiler::VisitCallUndefinedReceiver0() {
1342 Index(1), JSParameterCount(0), RootIndex::kUndefinedValue);
1343}
1344
1345void BaselineCompiler::VisitCallUndefinedReceiver1() {
1347 Index(2), JSParameterCount(1), RootIndex::kUndefinedValue,
1348 RegisterOperand(1));
1349}
1350
1351void BaselineCompiler::VisitCallUndefinedReceiver2() {
1353 Index(3), JSParameterCount(2), RootIndex::kUndefinedValue,
1355}
1356
1357void BaselineCompiler::VisitCallWithSpread() {
1358 interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
1359
1360 // Do not push the spread argument
1361 interpreter::Register spread_register = args.last_register();
1362 args = args.Truncate(args.register_count() - 1);
1363
1364 uint32_t arg_count = args.register_count();
1365
1367 RegisterOperand(0), // kFunction
1368 arg_count, // kActualArgumentsCount
1369 spread_register, // kSpread
1370 Index(3), // kSlot
1371 args);
1372}
1373
1374void BaselineCompiler::VisitCallRuntime() {
1375 CallRuntime(iterator().GetRuntimeIdOperand(0),
1376 iterator().GetRegisterListOperand(1));
1377}
1378
1379void BaselineCompiler::VisitCallRuntimeForPair() {
1380 auto builtin = iterator().GetRuntimeIdOperand(0);
1381 switch (builtin) {
1382 case Runtime::kLoadLookupSlotForCall: {
1383 // TODO(olivf) Once we have more builtins to support here we should find
1384 // out how to do this generically.
1385 auto in = iterator().GetRegisterListOperand(1);
1386 auto out = iterator().GetRegisterPairOperand(3);
1387 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
1388 Register out_reg = scratch_scope.AcquireScratch();
1389 __ RegisterFrameAddress(out.first, out_reg);
1390 DCHECK_EQ(in.register_count(), 1);
1391 CallRuntime(Runtime::kLoadLookupSlotForCall_Baseline, in.first_register(),
1392 out_reg);
1393 break;
1394 }
1395 default:
1396 UNREACHABLE();
1397 }
1398}
1399
1400void BaselineCompiler::VisitCallJSRuntime() {
1401 interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
1402 uint32_t arg_count = JSParameterCount(args.register_count());
1403
1404 // Load context for LoadNativeContextSlot.
1405 __ LoadContext(kContextRegister);
1406 __ LoadNativeContextSlot(kJavaScriptCallTargetRegister,
1407 iterator().GetNativeContextIndexOperand(0));
1409 kJavaScriptCallTargetRegister, // kFunction
1410 arg_count, // kActualArgumentsCount
1411 RootIndex::kUndefinedValue, // kReceiver
1412 args);
1413}
1414
1415void BaselineCompiler::VisitInvokeIntrinsic() {
1416 Runtime::FunctionId intrinsic_id = iterator().GetIntrinsicIdOperand(0);
1417 interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
1418 switch (intrinsic_id) {
1419#define CASE(Name, ...) \
1420 case Runtime::kInline##Name: \
1421 VisitIntrinsic##Name(args); \
1422 break;
1424#undef CASE
1425
1426 default:
1427 UNREACHABLE();
1428 }
1429}
1430
1431void BaselineCompiler::VisitIntrinsicCopyDataProperties(
1432 interpreter::RegisterList args) {
1434}
1435
1436void BaselineCompiler::
1437 VisitIntrinsicCopyDataPropertiesWithExcludedPropertiesOnStack(
1438 interpreter::RegisterList args) {
1439 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
1440 Register rscratch = scratch_scope.AcquireScratch();
1441 // Use an offset from args[0] instead of args[1] to pass a valid "end of"
1442 // pointer in the case where args.register_count() == 1.
1443 basm_.RegisterFrameAddress(interpreter::Register(args[0].index() + 1),
1444 rscratch);
1446 args[0], args.register_count() - 1, rscratch);
1447}
1448
1449void BaselineCompiler::VisitIntrinsicCreateIterResultObject(
1450 interpreter::RegisterList args) {
1452}
1453
1454void BaselineCompiler::VisitIntrinsicCreateAsyncFromSyncIterator(
1455 interpreter::RegisterList args) {
1457}
1458
1459void BaselineCompiler::VisitIntrinsicCreateJSGeneratorObject(
1460 interpreter::RegisterList args) {
1462}
1463
1464void BaselineCompiler::VisitIntrinsicGeneratorGetResumeMode(
1465 interpreter::RegisterList args) {
1467 __ LoadTaggedField(kInterpreterAccumulatorRegister,
1469 JSGeneratorObject::kResumeModeOffset);
1470}
1471
1472void BaselineCompiler::VisitIntrinsicGeneratorClose(
1473 interpreter::RegisterList args) {
1475 __ StoreTaggedSignedField(kInterpreterAccumulatorRegister,
1476 JSGeneratorObject::kContinuationOffset,
1478 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1479}
1480
1481void BaselineCompiler::VisitIntrinsicGetImportMetaObject(
1482 interpreter::RegisterList args) {
1484}
1485
1486void BaselineCompiler::VisitIntrinsicAsyncFunctionAwait(
1487 interpreter::RegisterList args) {
1489}
1490
1491void BaselineCompiler::VisitIntrinsicAsyncFunctionEnter(
1492 interpreter::RegisterList args) {
1494}
1495
1496void BaselineCompiler::VisitIntrinsicAsyncFunctionReject(
1497 interpreter::RegisterList args) {
1499}
1500
1501void BaselineCompiler::VisitIntrinsicAsyncFunctionResolve(
1502 interpreter::RegisterList args) {
1504}
1505
1506void BaselineCompiler::VisitIntrinsicAsyncGeneratorAwait(
1507 interpreter::RegisterList args) {
1509}
1510
1511void BaselineCompiler::VisitIntrinsicAsyncGeneratorReject(
1512 interpreter::RegisterList args) {
1514}
1515
1516void BaselineCompiler::VisitIntrinsicAsyncGeneratorResolve(
1517 interpreter::RegisterList args) {
1519}
1520
1521void BaselineCompiler::VisitIntrinsicAsyncGeneratorYieldWithAwait(
1522 interpreter::RegisterList args) {
1524}
1525
1526void BaselineCompiler::VisitConstruct() {
1527 interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
1528 uint32_t arg_count = JSParameterCount(args.register_count());
1530 RegisterOperand(0), // kFunction
1531 kInterpreterAccumulatorRegister, // kNewTarget
1532 arg_count, // kActualArgumentsCount
1533 Index(3), // kSlot
1534 RootIndex::kUndefinedValue, // kReceiver
1535 args);
1536}
1537
1538void BaselineCompiler::VisitConstructWithSpread() {
1539 interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
1540
1541 // Do not push the spread argument
1542 interpreter::Register spread_register = args.last_register();
1543 args = args.Truncate(args.register_count() - 1);
1544
1545 uint32_t arg_count = JSParameterCount(args.register_count());
1546
1547 using Descriptor =
1548 CallInterfaceDescriptorFor<Builtin::kConstructWithSpread_Baseline>::type;
1550 Descriptor::GetRegisterParameter(Descriptor::kNewTarget);
1552
1554 RegisterOperand(0), // kFunction
1555 new_target, // kNewTarget
1556 arg_count, // kActualArgumentsCount
1557 spread_register, // kSpread
1558 IndexAsTagged(3), // kSlot
1559 RootIndex::kUndefinedValue, // kReceiver
1560 args);
1561}
1562
1563void BaselineCompiler::VisitConstructForwardAllArgs() {
1564 using Descriptor = CallInterfaceDescriptorFor<
1565 Builtin::kConstructForwardAllArgs_Baseline>::type;
1567 Descriptor::GetRegisterParameter(Descriptor::kNewTarget);
1569
1571 RegisterOperand(0), // kFunction
1572 new_target, // kNewTarget
1573 IndexAsTagged(1)); // kSlot
1574}
1575
1576void BaselineCompiler::VisitTestEqual() {
1579}
1580
1581void BaselineCompiler::VisitTestEqualStrict() {
1584}
1585
1586void BaselineCompiler::VisitTestLessThan() {
1589}
1590
1591void BaselineCompiler::VisitTestGreaterThan() {
1594}
1595
1596void BaselineCompiler::VisitTestLessThanOrEqual() {
1599}
1600
1601void BaselineCompiler::VisitTestGreaterThanOrEqual() {
1604}
1605
1606void BaselineCompiler::VisitTestReferenceEqual() {
1609 [&](Label* is_true, Label::Distance distance) {
1610 __ JumpIfTagged(kEqual, __ RegisterFrameOperand(RegisterOperand(0)),
1611 kInterpreterAccumulatorRegister, is_true, distance);
1612 });
1613}
1614
1615void BaselineCompiler::VisitTestInstanceOf() {
1616 using Descriptor =
1617 CallInterfaceDescriptorFor<Builtin::kInstanceOf_Baseline>::type;
1618 Register callable = Descriptor::GetRegisterParameter(Descriptor::kRight);
1619 __ Move(callable, kInterpreterAccumulatorRegister);
1620
1622 callable, // callable
1623 Index(1)); // slot
1624}
1625
1626void BaselineCompiler::VisitTestIn() {
1629 RegisterOperand(0), // name
1630 IndexAsTagged(1)); // slot
1631}
1632
1633void BaselineCompiler::VisitTestUndetectable() {
1634 Label done, is_smi, not_undetectable;
1635 __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
1636
1638 __ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
1639 __ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
1640 __ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask, kZero,
1641 &not_undetectable, Label::kNear);
1642
1643 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
1644 __ Jump(&done, Label::kNear);
1645
1646 __ Bind(&is_smi);
1647 __ Bind(&not_undetectable);
1648 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
1649 __ Bind(&done);
1650}
1651
1652void BaselineCompiler::VisitTestNull() {
1654 [&](Label* is_true, Label::Distance distance) {
1656 RootIndex::kNullValue, is_true,
1657 distance);
1658 });
1659}
1660
1661void BaselineCompiler::VisitTestUndefined() {
1663 [&](Label* is_true, Label::Distance distance) {
1665 RootIndex::kUndefinedValue, is_true,
1666 distance);
1667 });
1668}
1669
1670void BaselineCompiler::VisitTestTypeOf() {
1671 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
1672
1673 auto literal_flag =
1675
1676 Label done;
1677 switch (literal_flag) {
1678 case interpreter::TestTypeOfFlags::LiteralFlag::kNumber: {
1679 Label is_smi, is_heap_number;
1680 __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
1681 __ JumpIfObjectTypeFast(kEqual, kInterpreterAccumulatorRegister,
1682 HEAP_NUMBER_TYPE, &is_heap_number, Label::kNear);
1683
1684 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
1685 __ Jump(&done, Label::kNear);
1686
1687 __ Bind(&is_smi);
1688 __ Bind(&is_heap_number);
1689 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
1690 break;
1691 }
1692 case interpreter::TestTypeOfFlags::LiteralFlag::kString: {
1693 Label is_smi, bad_instance_type;
1694 __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
1697 FIRST_NONSTRING_TYPE, scratch_scope.AcquireScratch(),
1698 &bad_instance_type, Label::kNear);
1699
1700 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
1701 __ Jump(&done, Label::kNear);
1702
1703 __ Bind(&is_smi);
1704 __ Bind(&bad_instance_type);
1705 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
1706 break;
1707 }
1708 case interpreter::TestTypeOfFlags::LiteralFlag::kSymbol: {
1709 Label is_smi, bad_instance_type;
1710 __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
1711 __ JumpIfObjectTypeFast(kNotEqual, kInterpreterAccumulatorRegister,
1712 SYMBOL_TYPE, &bad_instance_type, Label::kNear);
1713
1714 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
1715 __ Jump(&done, Label::kNear);
1716
1717 __ Bind(&is_smi);
1718 __ Bind(&bad_instance_type);
1719 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
1720 break;
1721 }
1722 case interpreter::TestTypeOfFlags::LiteralFlag::kBoolean: {
1723 Label is_true, is_false;
1724 __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue,
1725 &is_true, Label::kNear);
1726 __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue,
1727 &is_false, Label::kNear);
1728
1729 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
1730 __ Jump(&done, Label::kNear);
1731
1732 __ Bind(&is_true);
1733 __ Bind(&is_false);
1734 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
1735 break;
1736 }
1737 case interpreter::TestTypeOfFlags::LiteralFlag::kBigInt: {
1738 Label is_smi, bad_instance_type;
1739 __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
1740 __ JumpIfObjectTypeFast(kNotEqual, kInterpreterAccumulatorRegister,
1741 BIGINT_TYPE, &bad_instance_type, Label::kNear);
1742
1743 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
1744 __ Jump(&done, Label::kNear);
1745
1746 __ Bind(&is_smi);
1747 __ Bind(&bad_instance_type);
1748 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
1749 break;
1750 }
1751 case interpreter::TestTypeOfFlags::LiteralFlag::kUndefined: {
1752 Label is_smi, is_null, not_undetectable;
1753 __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
1754
1755 // null is undetectable, so test it explicitly, and return false.
1756 __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kNullValue,
1757 &is_null, Label::kNear);
1758
1759 // All other undetectable maps are typeof undefined.
1761 __ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
1762 __ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
1763 __ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
1764 kZero, &not_undetectable, Label::kNear);
1765
1766 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
1767 __ Jump(&done, Label::kNear);
1768
1769 __ Bind(&is_smi);
1770 __ Bind(&is_null);
1771 __ Bind(&not_undetectable);
1772 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
1773 break;
1774 }
1775 case interpreter::TestTypeOfFlags::LiteralFlag::kFunction: {
1776 Label is_smi, not_callable, undetectable;
1777 __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
1778
1779 // Check if the map is callable but not undetectable.
1781 __ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
1782 __ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
1783 __ TestAndBranch(map_bit_field, Map::Bits1::IsCallableBit::kMask, kZero,
1784 &not_callable, Label::kNear);
1785 __ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
1786 kNotZero, &undetectable, Label::kNear);
1787
1788 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
1789 __ Jump(&done, Label::kNear);
1790
1791 __ Bind(&is_smi);
1792 __ Bind(&not_callable);
1793 __ Bind(&undetectable);
1794 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
1795 break;
1796 }
1797 case interpreter::TestTypeOfFlags::LiteralFlag::kObject: {
1798 Label is_smi, is_null, bad_instance_type, undetectable_or_callable;
1799 __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
1800
1801 // If the object is null, return true.
1802 __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kNullValue,
1803 &is_null, Label::kNear);
1804
1805 // If the object's instance type isn't within the range, return false.
1806 static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
1807 Register map = scratch_scope.AcquireScratch();
1809 FIRST_JS_RECEIVER_TYPE, map, &bad_instance_type,
1810 Label::kNear);
1811
1812 // If the map is undetectable or callable, return false.
1814 __ LoadWord8Field(map_bit_field, map, Map::kBitFieldOffset);
1815 __ TestAndBranch(map_bit_field,
1816 Map::Bits1::IsUndetectableBit::kMask |
1817 Map::Bits1::IsCallableBit::kMask,
1818 kNotZero, &undetectable_or_callable, Label::kNear);
1819
1820 __ Bind(&is_null);
1821 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
1822 __ Jump(&done, Label::kNear);
1823
1824 __ Bind(&is_smi);
1825 __ Bind(&bad_instance_type);
1826 __ Bind(&undetectable_or_callable);
1827 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
1828 break;
1829 }
1830 case interpreter::TestTypeOfFlags::LiteralFlag::kOther:
1831 default:
1832 UNREACHABLE();
1833 }
1834 __ Bind(&done);
1835}
1836
1837void BaselineCompiler::VisitToName() {
1839}
1840
1841void BaselineCompiler::VisitToNumber() {
1843 Index(0));
1844}
1845
1846void BaselineCompiler::VisitToNumeric() {
1848 Index(0));
1849}
1850
1851void BaselineCompiler::VisitToObject() {
1852 SaveAccumulatorScope save_accumulator(this, &basm_);
1855}
1856
1857void BaselineCompiler::VisitToString() {
1859}
1860
1861void BaselineCompiler::VisitToBoolean() {
1863}
1864
1865void BaselineCompiler::VisitCreateRegExpLiteral() {
1867 FeedbackVector(), // feedback vector
1868 IndexAsTagged(1), // slot
1869 Constant<HeapObject>(0), // pattern
1870 Flag16AsSmi(2)); // flags
1871}
1872
1873void BaselineCompiler::VisitCreateArrayLiteral() {
1874 uint32_t flags = Flag8(2);
1875 int32_t flags_raw = static_cast<int32_t>(
1877 if (flags &
1880 FeedbackVector(), // feedback vector
1881 IndexAsTagged(1), // slot
1882 Constant<HeapObject>(0), // constant elements
1883 Smi::FromInt(flags_raw)); // flags
1884 } else {
1886 FeedbackVector(), // feedback vector
1887 IndexAsTagged(1), // slot
1888 Constant<HeapObject>(0), // constant elements
1889 Smi::FromInt(flags_raw)); // flags
1890 }
1891}
1892
1893void BaselineCompiler::VisitCreateArrayFromIterable() {
1896}
1897
1898void BaselineCompiler::VisitCreateEmptyArrayLiteral() {
1900 IndexAsTagged(0));
1901}
1902
1903void BaselineCompiler::VisitCreateObjectLiteral() {
1904 uint32_t flags = Flag8(2);
1905 int32_t flags_raw = static_cast<int32_t>(
1907 if (flags &
1910 FeedbackVector(), // feedback vector
1911 IndexAsTagged(1), // slot
1913 Smi::FromInt(flags_raw)); // flags
1914 } else {
1916 FeedbackVector(), // feedback vector
1917 IndexAsTagged(1), // slot
1919 Smi::FromInt(flags_raw)); // flags
1920 }
1921}
1922
1923void BaselineCompiler::VisitCreateEmptyObjectLiteral() {
1925}
1926
1927void BaselineCompiler::VisitCloneObject() {
1928 uint32_t flags = Flag8(1);
1929 int32_t raw_flags =
1932 RegisterOperand(0), // source
1933 Smi::FromInt(raw_flags), // flags
1934 IndexAsTagged(2)); // slot
1935}
1936
1937void BaselineCompiler::VisitGetTemplateObject() {
1938 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
1940 shared_function_info_, // shared function info
1941 Constant<HeapObject>(0), // description
1942 Index(1), // slot
1943 FeedbackVector()); // feedback_vector
1944}
1945
1946void BaselineCompiler::VisitCreateClosure() {
1947 Register feedback_cell =
1948 FastNewClosureBaselineDescriptor::GetRegisterParameter(
1949 FastNewClosureBaselineDescriptor::kFeedbackCell);
1950 LoadClosureFeedbackArray(feedback_cell);
1951 __ LoadFixedArrayElement(feedback_cell, feedback_cell, Index(1));
1952
1953 uint32_t flags = Flag8(2);
1956 Constant<SharedFunctionInfo>(0), feedback_cell);
1957 } else {
1958 Runtime::FunctionId function_id =
1960 ? Runtime::kNewClosure_Tenured
1961 : Runtime::kNewClosure;
1962 CallRuntime(function_id, Constant<SharedFunctionInfo>(0), feedback_cell);
1963 }
1964}
1965
1966void BaselineCompiler::VisitCreateBlockContext() {
1967 CallRuntime(Runtime::kPushBlockContext, Constant<ScopeInfo>(0));
1968}
1969
1970void BaselineCompiler::VisitCreateCatchContext() {
1971 CallRuntime(Runtime::kPushCatchContext,
1972 RegisterOperand(0), // exception
1974}
1975
1976void BaselineCompiler::VisitCreateFunctionContext() {
1978 uint32_t slot_count = Uint(1);
1980 DCHECK_EQ(info->scope_type(), ScopeType::FUNCTION_SCOPE);
1982}
1983
1984void BaselineCompiler::VisitCreateEvalContext() {
1986 uint32_t slot_count = Uint(1);
1987 if (slot_count < static_cast<uint32_t>(
1989 DCHECK_EQ(info->scope_type(), ScopeType::EVAL_SCOPE);
1991 } else {
1992 CallRuntime(Runtime::kNewFunctionContext, Constant<ScopeInfo>(0));
1993 }
1994}
1995
1996void BaselineCompiler::VisitCreateWithContext() {
1997 CallRuntime(Runtime::kPushWithContext,
1998 RegisterOperand(0), // object
2000}
2001
2002void BaselineCompiler::VisitCreateMappedArguments() {
2003 if (shared_function_info_->has_duplicate_parameters()) {
2004 CallRuntime(Runtime::kNewSloppyArguments, __ FunctionOperand());
2005 } else {
2007 }
2008}
2009
2010void BaselineCompiler::VisitCreateUnmappedArguments() {
2012}
2013
2014void BaselineCompiler::VisitCreateRestParameter() {
2016}
2017
2018void BaselineCompiler::VisitJumpLoop() {
2019#ifndef V8_JITLESS
2020 Label osr_armed, osr_not_armed;
2021 using D = OnStackReplacementDescriptor;
2022 Register feedback_vector = Register::no_reg();
2023 Register osr_state = Register::no_reg();
2024 const int loop_depth = iterator().GetImmediateOperand(1);
2025 {
2026 ASM_CODE_COMMENT_STRING(&masm_, "OSR Check Armed");
2027 BaselineAssembler::ScratchRegisterScope temps(&basm_);
2028 feedback_vector = temps.AcquireScratch();
2029 osr_state = temps.AcquireScratch();
2030 LoadFeedbackVector(feedback_vector);
2031 __ LoadWord8Field(osr_state, feedback_vector,
2032 FeedbackVector::kOsrStateOffset);
2033 static_assert(FeedbackVector::MaybeHasMaglevOsrCodeBit::encode(true) >
2035 static_assert(FeedbackVector::MaybeHasTurbofanOsrCodeBit::encode(true) >
2037 __ JumpIfByte(kUnsignedGreaterThan, osr_state, loop_depth, &osr_armed,
2038 Label::kNear);
2039 }
2040
2041 __ Bind(&osr_not_armed);
2042#endif // !V8_JITLESS
2043 Label* label = &labels_[iterator().GetJumpTargetOffset()];
2044 int weight = iterator().GetRelativeJumpTargetOffset() -
2045 iterator().current_bytecode_size_without_prefix();
2046 // We can pass in the same label twice since it's a back edge and thus already
2047 // bound.
2048 DCHECK(label->is_bound());
2050
2051#ifndef V8_JITLESS
2052 {
2053 // In case we deopt during the above interrupt check then this part of the
2054 // jump loop is skipped. This is not a problem as nothing observable happens
2055 // here.
2056#ifdef DEBUG
2057 effect_state_.safe_to_skip = true;
2058#endif
2059
2060 ASM_CODE_COMMENT_STRING(&masm_, "OSR Handle Armed");
2061 __ Bind(&osr_armed);
2062 Register maybe_target_code = D::MaybeTargetCodeRegister();
2063 Label osr;
2064 {
2065 BaselineAssembler::ScratchRegisterScope temps(&basm_);
2066 Register scratch0 = temps.AcquireScratch();
2067 Register scratch1 = temps.AcquireScratch();
2068 DCHECK_EQ(scratch0, feedback_vector);
2069 DCHECK_EQ(scratch1, osr_state);
2070 DCHECK(!AreAliased(maybe_target_code, scratch0, scratch1));
2071 __ TryLoadOptimizedOsrCode(maybe_target_code, scratch0,
2072 iterator().GetSlotOperand(2), &osr,
2073 Label::kNear);
2074 __ DecodeField<FeedbackVector::OsrUrgencyBits>(scratch1);
2075 __ JumpIfByte(kUnsignedLessThanEqual, scratch1, loop_depth,
2076 &osr_not_armed, Label::kNear);
2077 }
2078
2079 __ Bind(&osr);
2080 Label do_osr;
2081 weight = bytecode_->length() * v8_flags.osr_to_tierup;
2082 __ Push(maybe_target_code);
2083 UpdateInterruptBudgetAndJumpToLabel(-weight, nullptr, &do_osr,
2085 __ Bind(&do_osr);
2086 Register expected_param_count = D::ExpectedParameterCountRegister();
2087 __ Move(expected_param_count, Smi::FromInt(bytecode_->parameter_count()));
2088 __ Pop(maybe_target_code);
2090 expected_param_count);
2091 __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, nullptr);
2092 __ Jump(&osr_not_armed, Label::kNear);
2093
2094#ifdef DEBUG
2095 effect_state_.safe_to_skip = false;
2096#endif
2097 }
2098#endif // !V8_JITLESS
2099}
2100
2101void BaselineCompiler::VisitJump() { __ Jump(BuildForwardJumpLabel()); }
2102
2103void BaselineCompiler::VisitJumpConstant() { VisitJump(); }
2104
2105void BaselineCompiler::VisitJumpIfNullConstant() { VisitJumpIfNull(); }
2106
2107void BaselineCompiler::VisitJumpIfNotNullConstant() { VisitJumpIfNotNull(); }
2108
2109void BaselineCompiler::VisitJumpIfUndefinedConstant() {
2110 VisitJumpIfUndefined();
2111}
2112
2113void BaselineCompiler::VisitJumpIfNotUndefinedConstant() {
2114 VisitJumpIfNotUndefined();
2115}
2116
2117void BaselineCompiler::VisitJumpIfUndefinedOrNullConstant() {
2118 VisitJumpIfUndefinedOrNull();
2119}
2120
2121void BaselineCompiler::VisitJumpIfTrueConstant() { VisitJumpIfTrue(); }
2122
2123void BaselineCompiler::VisitJumpIfFalseConstant() { VisitJumpIfFalse(); }
2124
2125void BaselineCompiler::VisitJumpIfJSReceiverConstant() {
2126 VisitJumpIfJSReceiver();
2127}
2128
2129void BaselineCompiler::VisitJumpIfForInDoneConstant() {
2130 VisitJumpIfForInDone();
2131}
2132
2133void BaselineCompiler::VisitJumpIfToBooleanTrueConstant() {
2134 VisitJumpIfToBooleanTrue();
2135}
2136
2137void BaselineCompiler::VisitJumpIfToBooleanFalseConstant() {
2138 VisitJumpIfToBooleanFalse();
2139}
2140
2141void BaselineCompiler::VisitJumpIfToBooleanTrue() {
2142 Label dont_jump;
2143 JumpIfToBoolean(false, &dont_jump, Label::kNear);
2144 __ Jump(BuildForwardJumpLabel());
2145 __ Bind(&dont_jump);
2146}
2147
2148void BaselineCompiler::VisitJumpIfToBooleanFalse() {
2149 Label dont_jump;
2150 JumpIfToBoolean(true, &dont_jump, Label::kNear);
2151 __ Jump(BuildForwardJumpLabel());
2152 __ Bind(&dont_jump);
2153}
2154
2155void BaselineCompiler::VisitJumpIfTrue() { JumpIfRoot(RootIndex::kTrueValue); }
2156
2157void BaselineCompiler::VisitJumpIfFalse() {
2158 JumpIfRoot(RootIndex::kFalseValue);
2159}
2160
2161void BaselineCompiler::VisitJumpIfNull() { JumpIfRoot(RootIndex::kNullValue); }
2162
2163void BaselineCompiler::VisitJumpIfNotNull() {
2164 JumpIfNotRoot(RootIndex::kNullValue);
2165}
2166
2167void BaselineCompiler::VisitJumpIfUndefined() {
2168 JumpIfRoot(RootIndex::kUndefinedValue);
2169}
2170
2171void BaselineCompiler::VisitJumpIfNotUndefined() {
2172 JumpIfNotRoot(RootIndex::kUndefinedValue);
2173}
2174
2175void BaselineCompiler::VisitJumpIfUndefinedOrNull() {
2176 Label do_jump, dont_jump;
2177 __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue,
2178 &do_jump);
2179 __ JumpIfNotRoot(kInterpreterAccumulatorRegister, RootIndex::kNullValue,
2180 &dont_jump, Label::kNear);
2181 __ Bind(&do_jump);
2182 __ Jump(BuildForwardJumpLabel());
2183 __ Bind(&dont_jump);
2184}
2185
2186void BaselineCompiler::VisitJumpIfJSReceiver() {
2187 Label is_smi, dont_jump;
2188 __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
2189
2190#if V8_STATIC_ROOTS_BOOL
2191 __ JumpIfJSAnyIsPrimitive(kInterpreterAccumulatorRegister, &dont_jump,
2193#else
2194 __ JumpIfObjectTypeFast(kLessThan, kInterpreterAccumulatorRegister,
2195 FIRST_JS_RECEIVER_TYPE, &dont_jump);
2196#endif
2197 __ Jump(BuildForwardJumpLabel());
2198
2199 __ Bind(&is_smi);
2200 __ Bind(&dont_jump);
2201}
2202
2203void BaselineCompiler::VisitJumpIfForInDone() {
2204 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
2205 Register index = scratch_scope.AcquireScratch();
2206 LoadRegister(index, 1);
2207 __ JumpIfTagged(kEqual, index, __ RegisterFrameOperand(RegisterOperand(2)),
2209}
2210
2211void BaselineCompiler::VisitSwitchOnSmiNoFeedback() {
2212 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
2213 interpreter::JumpTableTargetOffsets offsets =
2214 iterator().GetJumpTableTargetOffsets();
2215
2216 if (offsets.size() == 0) return;
2217
2218 int case_value_base = (*offsets.begin()).case_value;
2219
2220 std::unique_ptr<Label*[]> labels = std::make_unique<Label*[]>(offsets.size());
2221 for (interpreter::JumpTableTargetOffset offset : offsets) {
2222 labels[offset.case_value - case_value_base] =
2223 EnsureLabel(offset.target_offset);
2224 }
2225 Register case_value = scratch_scope.AcquireScratch();
2227 __ Switch(case_value, case_value_base, labels.get(), offsets.size());
2228}
2229
2230void BaselineCompiler::VisitForInEnumerate() {
2232}
2233
2234void BaselineCompiler::VisitForInPrepare() {
2238 interpreter::Register first = iterator().GetRegisterOperand(0);
2239 interpreter::Register second(first.index() + 1);
2240 interpreter::Register third(first.index() + 2);
2243}
2244
2245void BaselineCompiler::VisitForInNext() {
2246 interpreter::Register cache_type, cache_array;
2247 std::tie(cache_type, cache_array) = iterator().GetRegisterPairOperand(2);
2248 CallBuiltin<Builtin::kForInNext>(Index(3), // vector slot
2249 RegisterOperand(0), // object
2250 cache_array, // cache array
2251 cache_type, // cache type
2252 RegisterOperand(1), // index
2253 FeedbackVector()); // feedback vector
2254}
2255
2256void BaselineCompiler::VisitForInStep() {
2257 __ IncrementSmi(__ RegisterFrameOperand(RegisterOperand(0)));
2258}
2259
2260void BaselineCompiler::VisitSetPendingMessage() {
2261 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
2262 Register pending_message = scratch_scope.AcquireScratch();
2263 __ Move(pending_message,
2265 Register tmp = scratch_scope.AcquireScratch();
2267 __ Move(kInterpreterAccumulatorRegister, MemOperand(pending_message, 0));
2268 __ Move(MemOperand(pending_message, 0), tmp);
2269}
2270
2271void BaselineCompiler::VisitThrow() {
2273 __ Trap();
2274}
2275
2276void BaselineCompiler::VisitReThrow() {
2277 CallRuntime(Runtime::kReThrow, kInterpreterAccumulatorRegister);
2278 __ Trap();
2279}
2280
2281void BaselineCompiler::VisitReturn() {
2282 ASM_CODE_COMMENT_STRING(&masm_, "Return");
2283 int profiling_weight = iterator().current_offset() +
2284 iterator().current_bytecode_size_without_prefix();
2285 int parameter_count = bytecode_->parameter_count();
2286
2288 -profiling_weight);
2289}
2290
2291void BaselineCompiler::VisitThrowReferenceErrorIfHole() {
2292 Label done;
2293 __ JumpIfNotRoot(kInterpreterAccumulatorRegister, RootIndex::kTheHoleValue,
2294 &done);
2295 CallRuntime(Runtime::kThrowAccessedUninitializedVariable, Constant<Name>(0));
2296 // Unreachable.
2297 __ Trap();
2298 __ Bind(&done);
2299}
2300
2301void BaselineCompiler::VisitThrowSuperNotCalledIfHole() {
2302 Label done;
2303 __ JumpIfNotRoot(kInterpreterAccumulatorRegister, RootIndex::kTheHoleValue,
2304 &done);
2305 CallRuntime(Runtime::kThrowSuperNotCalled);
2306 // Unreachable.
2307 __ Trap();
2308 __ Bind(&done);
2309}
2310
2311void BaselineCompiler::VisitThrowSuperAlreadyCalledIfNotHole() {
2312 Label done;
2313 __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kTheHoleValue,
2314 &done);
2315 CallRuntime(Runtime::kThrowSuperAlreadyCalledError);
2316 // Unreachable.
2317 __ Trap();
2318 __ Bind(&done);
2319}
2320
2321void BaselineCompiler::VisitThrowIfNotSuperConstructor() {
2322 Label done;
2323
2324 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
2325 Register reg = scratch_scope.AcquireScratch();
2326 LoadRegister(reg, 0);
2327 Register map_bit_field = scratch_scope.AcquireScratch();
2328 __ LoadMap(map_bit_field, reg);
2329 __ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
2330 __ TestAndBranch(map_bit_field, Map::Bits1::IsConstructorBit::kMask, kNotZero,
2331 &done, Label::kNear);
2332
2333 CallRuntime(Runtime::kThrowNotSuperConstructor, reg, __ FunctionOperand());
2334
2335 __ Bind(&done);
2336}
2337
2338void BaselineCompiler::VisitSwitchOnGeneratorState() {
2339 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
2340
2341 Label fallthrough;
2342
2343 Register generator_object = scratch_scope.AcquireScratch();
2344 LoadRegister(generator_object, 0);
2345 __ JumpIfRoot(generator_object, RootIndex::kUndefinedValue, &fallthrough);
2346
2347 Register continuation = scratch_scope.AcquireScratch();
2348 __ LoadTaggedSignedFieldAndUntag(continuation, generator_object,
2349 JSGeneratorObject::kContinuationOffset);
2350 __ StoreTaggedSignedField(
2351 generator_object, JSGeneratorObject::kContinuationOffset,
2353
2354 Register context = scratch_scope.AcquireScratch();
2355 __ LoadTaggedField(context, generator_object,
2356 JSGeneratorObject::kContextOffset);
2357 __ StoreContext(context);
2358
2359 interpreter::JumpTableTargetOffsets offsets =
2360 iterator().GetJumpTableTargetOffsets();
2361
2362 if (0 < offsets.size()) {
2363 DCHECK_EQ(0, (*offsets.begin()).case_value);
2364
2365 std::unique_ptr<Label*[]> labels =
2366 std::make_unique<Label*[]>(offsets.size());
2367 for (interpreter::JumpTableTargetOffset offset : offsets) {
2368 labels[offset.case_value] = EnsureLabel(offset.target_offset);
2369 }
2370 __ Switch(continuation, 0, labels.get(), offsets.size());
2371 // We should never fall through this switch.
2372 // TODO(v8:11429,leszeks): Maybe remove the fallthrough check in the Switch?
2373 __ Trap();
2374 }
2375
2376 __ Bind(&fallthrough);
2377}
2378
2379void BaselineCompiler::VisitSuspendGenerator() {
2380 DCHECK_EQ(iterator().GetRegisterOperand(1), interpreter::Register(0));
2381 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
2382 Register generator_object = scratch_scope.AcquireScratch();
2383 LoadRegister(generator_object, 0);
2384 {
2385 SaveAccumulatorScope accumulator_scope(this, &basm_);
2386
2387 int bytecode_offset =
2388 BytecodeArray::kHeaderSize + iterator().current_offset();
2390 generator_object,
2391 static_cast<int>(Uint(3)), // suspend_id
2392 bytecode_offset,
2393 static_cast<int>(RegisterCount(2))); // register_count
2394 }
2395 int parameter_count = bytecode_->parameter_count();
2396
2398}
2399
2400void BaselineCompiler::VisitResumeGenerator() {
2401 DCHECK_EQ(iterator().GetRegisterOperand(1), interpreter::Register(0));
2402 BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
2403 Register generator_object = scratch_scope.AcquireScratch();
2404 LoadRegister(generator_object, 0);
2406 generator_object,
2407 static_cast<int>(RegisterCount(2))); // register_count
2408}
2409
2410void BaselineCompiler::VisitGetIterator() {
2412 IndexAsTagged(1), // load_slot
2413 IndexAsTagged(2)); // call_slot
2414}
2415
2416void BaselineCompiler::VisitDebugger() {
2417 CallRuntime(Runtime::kHandleDebuggerStatement);
2418}
2419
2420void BaselineCompiler::VisitIncBlockCounter() {
2421 SaveAccumulatorScope accumulator_scope(this, &basm_);
2423 IndexAsSmi(0)); // coverage array slot
2424}
2425
2426void BaselineCompiler::VisitAbort() {
2427 CallRuntime(Runtime::kAbort, Smi::FromInt(Index(0)));
2428 __ Trap();
2429}
2430
2431void BaselineCompiler::VisitWide() {
2432 // Consumed by the BytecodeArrayIterator.
2433 UNREACHABLE();
2434}
2435
2436void BaselineCompiler::VisitExtraWide() {
2437 // Consumed by the BytecodeArrayIterator.
2438 UNREACHABLE();
2439}
2440
2441void BaselineCompiler::VisitIllegal() {
2442 // Not emitted in valid bytecode.
2443 UNREACHABLE();
2444}
2445#define DEBUG_BREAK(Name, ...) \
2446 void BaselineCompiler::Visit##Name() { UNREACHABLE(); }
2448#undef DEBUG_BREAK
2449
2451 BaselineAssembler* assembler)
2452 :
2453#ifdef DEBUG
2454 compiler_(compiler),
2455#endif
2456 assembler_(assembler) {
2457#ifdef DEBUG
2458 DCHECK(!compiler_->effect_state_.accumulator_on_stack);
2459 compiler_->effect_state_.accumulator_on_stack = true;
2460#endif // DEBUG
2463}
2464
2466#ifdef DEBUG
2467 DCHECK(compiler_->effect_state_.accumulator_on_stack);
2468 compiler_->effect_state_.accumulator_on_stack = false;
2469#endif // DEBUG
2472}
2473
2474#undef RCS_BASELINE_SCOPE
2475#undef __
2476
2477} // namespace baseline
2478} // namespace internal
2479} // namespace v8
#define RCS_BASELINE_SCOPE(rcs)
#define DECLARE_VISITOR(name,...)
#define SHORT_STAR_VISITOR(Name,...)
#define BYTECODE_CASE(name,...)
#define DEBUG_BREAK(Name,...)
int16_t parameter_count
Definition builtins.cc:67
#define SHORT_STAR_BYTECODE_LIST(V)
Definition bytecodes.h:24
#define DEBUG_BREAK_BYTECODE_LIST(V)
Definition bytecodes.h:502
#define BYTECODE_LIST(V, V_TSA)
Definition bytecodes.h:479
static constexpr T decode(U value)
Definition bit-field.h:66
static constexpr U kMask
Definition bit-field.h:41
static bool EncodeBitField(uint32_t argc, uintptr_t slot, uint32_t *out)
static V8_INLINE constexpr int OffsetOfElementAt(int index)
Definition contexts.h:512
static V8_EXPORT_PRIVATE ExternalReference address_of_pending_message(LocalIsolate *local_isolate)
CodeBuilder & set_bytecode_offset_table(Handle< TrustedByteArray > table)
Definition factory.h:1197
CodeBuilder & set_interpreter_data(Handle< TrustedObject > interpreter_data)
CodeBuilder & set_parameter_count(uint16_t parameter_count)
Definition factory.h:1232
V8_WARN_UNUSED_RESULT MaybeHandle< Code > TryBuild()
Definition factory.cc:284
static constexpr int kMaxOsrUrgency
static constexpr int kHeaderSize
static const int kGeneratorClosed
static const int kGeneratorExecuting
static constexpr Register no_reg()
static constexpr Tagged< Smi > FromEnum(E value)
Definition smi.h:58
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static Tagged< TaggedIndex > FromIntptr(intptr_t value)
static MemOperand RegisterFrameOperand(interpreter::Register interpreter_register)
void Move(Register output, Register source)
void RegisterFrameAddress(interpreter::Register interpreter_register, Register rscratch)
Tagged< TaggedIndex > IndexAsTagged(int operand_index)
Handle< Type > Constant(int operand_index)
void BuildCall(uint32_t slot, uint32_t arg_count, Args... args)
void LoadRegister(Register output, int operand_index)
void JumpIfToBoolean(bool do_jump_if_true, Label *label, Label::Distance distance=Label::kFar)
const interpreter::BytecodeArrayIterator & iterator()
void StoreRegister(int operand_index, Register value)
Handle< SharedFunctionInfo > shared_function_info_
Tagged< Smi > IntAsSmi(int operand_index)
BytecodeOffsetTableBuilder bytecode_offset_table_builder_
Tagged< Smi > Flag8AsSmi(int operand_index)
Tagged< Smi > UintAsSmi(int operand_index)
BaselineCompiler(LocalIsolate *local_isolate, Handle< SharedFunctionInfo > shared_function_info, Handle< BytecodeArray > bytecode)
void SelectBooleanConstant(Register output, std::function< void(Label *, Label::Distance)> jump_func)
static int EstimateInstructionSize(Tagged< BytecodeArray > bytecode)
interpreter::BytecodeArrayIterator iterator_
void StoreRegisterPair(int operand_index, Register val0, Register val1)
Label * EnsureLabel(int offset, MarkAsIndirectJumpTarget mark=MarkAsIndirectJumpTarget::kNo)
void UpdateInterruptBudgetAndJumpToLabel(int weight, Label *label, Label *skip_interrupt_label, StackCheckBehavior stack_check_behavior)
void CallRuntime(Runtime::FunctionId function, Args... args)
Tagged< Smi > IndexAsSmi(int operand_index)
void LoadConstant(Register output, int operand_index)
Tagged< Smi > Flag16AsSmi(int operand_index)
Tagged< Smi > ConstantSmi(int operand_index)
Tagged< TaggedIndex > UintAsTagged(int operand_index)
interpreter::Register RegisterOperand(int operand_index)
Handle< TrustedByteArray > ToBytecodeOffsetTable(IsolateT *isolate)
SaveAccumulatorScope(BaselineCompiler *compiler, BaselineAssembler *assembler)
static constexpr bool IsSwitch(Bytecode bytecode)
Definition bytecodes.h:819
static constexpr bool IsJump(Bytecode bytecode)
Definition bytecodes.h:798
static bool WritesOrClobbersAccumulator(Bytecode bytecode)
Definition bytecodes.h:704
Zone * zone_
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
DirectHandle< Object > new_target
Definition execution.cc:75
Label label
BytecodeAssembler & assembler_
base::Vector< const RegExpInstruction > bytecode_
int32_t offset
#define INTRINSICS_LIST(V)
double second
DirectHandle< JSReceiver > options
LiftoffRegister reg
MovableLabel continuation
int pc_offset
LocalIsolate * local_isolate_
MaglevAssembler *const masm_
int int32_t
Definition unicode.cc:40
constexpr size_t RoundUpToPowerOfTwo(size_t value)
Definition bits.h:252
void MoveArgumentsForBuiltin(BaselineAssembler *masm, Args... args)
void CheckSettingDoesntClobber(Register target, Args... args)
constexpr BuiltinCallJumpMode kFallbackBuiltinCallJumpModeForBaseline
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
PerThreadAssertScopeDebugOnly< false, HEAP_ALLOCATION_ASSERT > DisallowHeapAllocation
constexpr Register kJavaScriptCallTargetRegister
constexpr Register kInterpreterAccumulatorRegister
constexpr Register kReturnRegister1
constexpr Register kReturnRegister0
std::unique_ptr< AssemblerBuffer > NewAssemblerBuffer(int size)
Definition assembler.cc:167
@ INTERNALIZED_TWO_BYTE_STRING_TYPE
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int JSParameterCount(int param_count_without_receiver)
Definition globals.h:2782
void MemCopy(void *dest, const void *src, size_t size)
Definition memcopy.h:124
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
Local< T > Handle
RegExpCompiler * compiler_
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK_GE(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
static AssemblerOptions Default(Isolate *isolate)
Definition assembler.cc:53
#define V8_UNLIKELY(condition)
Definition v8config.h:660
wasm::ValueType type
#define ZONE_NAME
Definition zone.h:22