v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
interpreter-assembler.cc
Go to the documentation of this file.
1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <limits>
8#include <ostream>
9
17
18namespace v8 {
19namespace internal {
20namespace interpreter {
21
23
24using compiler::CodeAssemblerState;
25
27 Bytecode bytecode,
28 OperandScale operand_scale)
29 : CodeStubAssembler(state),
30 bytecode_(bytecode),
31 operand_scale_(operand_scale),
32 TVARIABLE_CONSTRUCTOR(interpreted_frame_pointer_),
33 TVARIABLE_CONSTRUCTOR(bytecode_array_,
34 Parameter<BytecodeArray>(
35 InterpreterDispatchDescriptor::kBytecodeArray)),
37 bytecode_offset_,
38 UncheckedParameter<IntPtrT>(
39 InterpreterDispatchDescriptor::kBytecodeOffset)),
40 TVARIABLE_CONSTRUCTOR(dispatch_table_,
41 UncheckedParameter<ExternalReference>(
42 InterpreterDispatchDescriptor::kDispatchTable)),
44 accumulator_,
45 Parameter<Object>(InterpreterDispatchDescriptor::kAccumulator)),
46 implicit_register_use_(ImplicitRegisterUse::kNone),
47 made_call_(false),
48 reloaded_frame_ptr_(false),
49 bytecode_array_valid_(true) {
50#ifdef V8_TRACE_UNOPTIMIZED
51 TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
52#endif
54 [this] { CallEpilogue(); });
55
56 // Save the bytecode offset immediately if bytecode will make a call along
57 // the critical path, or it is a return bytecode.
59 Bytecodes::Returns(bytecode)) {
61 }
62}
63
65 // If the following check fails the handler does not use the
66 // accumulator in the way described in the bytecode definitions in
67 // bytecodes.h.
71}
72
83
86 (bytecode_offset_.value() ==
88 InterpreterDispatchDescriptor::kBytecodeOffset))) {
90 }
91 return bytecode_offset_.value();
92}
93
96 if (operand_scale() != OperandScale::kSingle) {
97 // Add one to the offset such that it points to the actual bytecode rather
98 // than the Wide / ExtraWide prefix bytecode.
100 }
101 return offset;
102}
103
105 TNode<IntPtrT> bytecode_offset = BytecodeOffset();
106 if (operand_scale() != OperandScale::kSingle) {
107 // Subtract one from the bytecode_offset such that it points to the Wide /
108 // ExtraWide prefix bytecode.
109 bytecode_offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1));
110 }
111 int store_offset =
114
115 if (SmiValuesAre32Bits()) {
116 int zero_offset = store_offset + 4;
117 int payload_offset = store_offset;
118#if V8_TARGET_LITTLE_ENDIAN
119 std::swap(zero_offset, payload_offset);
120#endif
122 IntPtrConstant(zero_offset), Int32Constant(0));
124 IntPtrConstant(payload_offset),
125 TruncateIntPtrToInt32(bytecode_offset));
126 } else {
128 SmiTag(bytecode_offset));
129 }
130}
131
133 // Force a re-load of the bytecode array after every call in case the debugger
134 // has been activated.
138 }
139 return bytecode_array_.value();
140}
141
144 (dispatch_table_.value() ==
146 InterpreterDispatchDescriptor::kDispatchTable))) {
148 ExternalReference::interpreter_dispatch_table_address(isolate()));
149 }
150 return dispatch_table_.value();
151}
152
156
163
170
177
181
185
187 TNode<Uint32T> depth) {
188 TVARIABLE(Context, cur_context, context);
189 TVARIABLE(Uint32T, cur_depth, depth);
190
191 Label context_found(this);
192
193 Label context_search(this, {&cur_depth, &cur_context});
194
195 // Fast path if the depth is 0.
196 Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
197
198 // Loop until the depth is 0.
199 BIND(&context_search);
200 {
201 cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
202 cur_context =
203 CAST(LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
204
205 Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
206 &context_search);
207 }
208
209 BIND(&context_found);
210 return cur_context.value();
211}
212
218
222
226
231
236
239 int index = reg.ToOperand() * kSystemPointerSize;
240 if (SmiValuesAre32Bits()) {
241#if V8_TARGET_LITTLE_ENDIAN
242 index += 4;
243#endif
245 } else {
247 }
248}
249
254
255std::pair<TNode<Object>, TNode<Object>>
257 DCHECK_EQ(OperandType::kRegPair,
258 Bytecodes::GetOperandType(bytecode_, operand_index));
259 TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
260 TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
261 return std::make_pair(LoadRegister(first_reg_index),
262 LoadRegister(second_reg_index));
263}
264
268 Bytecodes::GetOperandType(bytecode_, operand_index)));
269 DCHECK_EQ(OperandType::kRegCount,
270 Bytecodes::GetOperandType(bytecode_, operand_index + 1));
271 TNode<IntPtrT> base_reg = RegisterLocation(BytecodeOperandReg(operand_index));
272 TNode<Uint32T> reg_count = BytecodeOperandCount(operand_index + 1);
273 return RegListNodePair(base_reg, reg_count);
274}
275
277 const RegListNodePair& reg_list, int index) {
278 TNode<IntPtrT> location = RegisterLocationInRegisterList(reg_list, index);
279 return LoadFullTagged(location);
280}
281
283 const RegListNodePair& reg_list, int index) {
284 CSA_DCHECK(this,
285 Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
287 // Register indexes are negative, so subtract index from base location to get
288 // location.
289 return Signed(IntPtrSub(reg_list.base_reg_location(), offset));
290}
291
297
303
305 TNode<WordT> opcode) {
309
311 this, UintPtrGreaterThanOrEqual(opcode, UintPtrConstant(static_cast<int>(
314 this,
316 opcode, UintPtrConstant(static_cast<int>(Bytecode::kLastShortStar))));
317
318 // Compute the constant that we can add to a Bytecode value to map the range
319 // [Bytecode::kStar15, Bytecode::kStar0] to the range
320 // [Register(15).ToOperand(), Register(0).ToOperand()].
321 constexpr int short_star_to_operand =
322 Register(0).ToOperand() - static_cast<int>(Bytecode::kStar0);
323 // Make sure the values count in the right direction.
324 static_assert(short_star_to_operand ==
325 Register(1).ToOperand() - static_cast<int>(Bytecode::kStar1));
326
329 IntPtrConstant(short_star_to_operand * kSystemPointerSize));
331}
332
334 int operand_index) {
335 StoreRegister(value, BytecodeOperandReg(operand_index));
336}
337
339 TNode<Object> value2,
340 int operand_index) {
341 DCHECK_EQ(OperandType::kRegOutPair,
342 Bytecodes::GetOperandType(bytecode_, operand_index));
343 TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
344 StoreRegister(value1, first_reg_index);
345 TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
346 StoreRegister(value2, second_reg_index);
347}
348
350 TNode<Object> value1, TNode<Object> value2, TNode<Object> value3,
351 int operand_index) {
352 DCHECK_EQ(OperandType::kRegOutTriple,
353 Bytecodes::GetOperandType(bytecode_, operand_index));
354 TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
355 StoreRegister(value1, first_reg_index);
356 TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
357 StoreRegister(value2, second_reg_index);
358 TNode<IntPtrT> third_reg_index = NextRegister(second_reg_index);
359 StoreRegister(value3, third_reg_index);
360}
361
363 // Register indexes are negative, so the next index is minus one.
364 return Signed(IntPtrAdd(reg_index, IntPtrConstant(-1)));
365}
366
371
381
391
393 int relative_offset, MachineType result_type) {
394 static const int kMaxCount = 4;
396
397 int count;
398 switch (result_type.representation()) {
400 count = 2;
401 break;
403 count = 4;
404 break;
405 default:
406 UNREACHABLE();
407 }
408 MachineType msb_type =
409 result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();
410
411#if V8_TARGET_LITTLE_ENDIAN
412 const int kStep = -1;
413 int msb_offset = count - 1;
414#elif V8_TARGET_BIG_ENDIAN
415 const int kStep = 1;
416 int msb_offset = 0;
417#else
418#error "Unknown Architecture"
419#endif
420
421 // Read the most signicant bytecode into bytes[0] and then in order
422 // down to least significant in bytes[count - 1].
423 DCHECK_LE(count, kMaxCount);
424 TNode<Word32T> bytes[kMaxCount];
425 for (int i = 0; i < count; i++) {
426 MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
428 IntPtrConstant(relative_offset + msb_offset + i * kStep);
430 bytes[i] = UncheckedCast<Word32T>(
431 Load(machine_type, BytecodeArrayTaggedPointer(), array_offset));
432 }
433
434 // Pack LSB to MSB.
435 TNode<Word32T> result = bytes[--count];
436 for (int i = 1; --count >= 0; i++) {
438 TNode<Word32T> value = Word32Shl(bytes[count], shift);
439 result = Word32Or(value, result);
440 }
441 return result;
442}
443
461
463 int operand_index) {
465 DCHECK_EQ(
468 int operand_offset =
471 return Load<Int16T>(
473 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
474 } else {
477 }
478}
479
496
498 int operand_index) {
501 bytecode_, operand_index, operand_scale()));
502 int operand_offset =
505 return Load<Int32T>(
507 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
508 } else {
511 }
512}
513
515 int operand_index, OperandSize operand_size) {
517 Bytecodes::GetOperandType(bytecode_, operand_index)));
518 switch (operand_size) {
520 return BytecodeOperandSignedByte(operand_index);
522 return BytecodeOperandSignedShort(operand_index);
524 return BytecodeOperandSignedQuad(operand_index);
526 UNREACHABLE();
527 }
528}
529
531 int operand_index, OperandSize operand_size) {
533 Bytecodes::GetOperandType(bytecode_, operand_index)));
534 switch (operand_size) {
536 return BytecodeOperandUnsignedByte(operand_index);
538 return BytecodeOperandUnsignedShort(operand_index);
540 return BytecodeOperandUnsignedQuad(operand_index);
542 UNREACHABLE();
543 }
544}
545
547 DCHECK_EQ(OperandType::kRegCount,
548 Bytecodes::GetOperandType(bytecode_, operand_index));
549 OperandSize operand_size =
551 return BytecodeUnsignedOperand(operand_index, operand_size);
552}
553
555 DCHECK_EQ(OperandType::kFlag8,
556 Bytecodes::GetOperandType(bytecode_, operand_index));
557 OperandSize operand_size =
559 DCHECK_EQ(operand_size, OperandSize::kByte);
560 return BytecodeUnsignedOperand(operand_index, operand_size);
561}
562
564 DCHECK_EQ(OperandType::kFlag16,
565 Bytecodes::GetOperandType(bytecode_, operand_index));
566 OperandSize operand_size =
568 DCHECK_EQ(operand_size, OperandSize::kShort);
569 return BytecodeUnsignedOperand(operand_index, operand_size);
570}
571
573 DCHECK_EQ(OperandType::kUImm,
574 Bytecodes::GetOperandType(bytecode_, operand_index));
575 OperandSize operand_size =
577 return BytecodeUnsignedOperand(operand_index, operand_size);
578}
579
584
588
590 DCHECK_EQ(OperandType::kImm,
591 Bytecodes::GetOperandType(bytecode_, operand_index));
592 OperandSize operand_size =
594 return BytecodeSignedOperand(operand_index, operand_size);
595}
596
601
603 return SmiFromInt32(BytecodeOperandImm(operand_index));
604}
605
607 int operand_index) {
608 DCHECK_EQ(OperandType::kIdx,
609 Bytecodes::GetOperandType(bytecode_, operand_index));
610 OperandSize operand_size =
612 return BytecodeUnsignedOperand(operand_index, operand_size);
613}
614
618
620 return SmiTag(Signed(BytecodeOperandIdx(operand_index)));
621}
622
629
631 int operand_index) {
632 DCHECK_EQ(OperandType::kIdx,
633 Bytecodes::GetOperandType(bytecode_, operand_index));
634 OperandSize operand_size =
636 return ChangeUint32ToWord(
637 BytecodeUnsignedOperand(operand_index, operand_size));
638}
639
642 Bytecodes::GetOperandType(bytecode_, operand_index)));
643 OperandSize operand_size =
645 return ChangeInt32ToIntPtr(
646 BytecodeSignedOperand(operand_index, operand_size));
647}
648
650 int operand_index) {
651 DCHECK_EQ(OperandType::kRuntimeId,
652 Bytecodes::GetOperandType(bytecode_, operand_index));
653 OperandSize operand_size =
655 DCHECK_EQ(operand_size, OperandSize::kShort);
656 return BytecodeUnsignedOperand(operand_index, operand_size);
657}
658
660 int operand_index) {
661 DCHECK_EQ(OperandType::kNativeContextIndex,
662 Bytecodes::GetOperandType(bytecode_, operand_index));
663 OperandSize operand_size =
665 return ChangeUint32ToWord(
666 BytecodeUnsignedOperand(operand_index, operand_size));
667}
668
670 int operand_index) {
671 DCHECK_EQ(OperandType::kIntrinsicId,
672 Bytecodes::GetOperandType(bytecode_, operand_index));
673 OperandSize operand_size =
675 DCHECK_EQ(operand_size, OperandSize::kByte);
676 return BytecodeUnsignedOperand(operand_index, operand_size);
677}
678
686
691
697
703
707
712
715 // Bytecodes that make a call along the critical path save the bytecode
716 // offset in the bytecode handler's prologue. For other bytecodes, if
717 // there are multiple calls in the bytecode handler, you need to spill
718 // before each of them, unless SaveBytecodeOffset has explicitly been called
719 // in a path that dominates _all_ of those calls (which we don't track).
721 }
722
723 bytecode_array_valid_ = false;
724 made_call_ = true;
725}
726
728
730 TNode<JSAny> function, TNode<Context> context, const RegListNodePair& args,
731 ConvertReceiverMode receiver_mode) {
734 bytecode_ == Bytecode::kInvokeIntrinsic);
736
737 TNode<Word32T> args_count = args.reg_count();
738 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
739 // Add receiver. It is not included in args as it is implicit.
740 args_count = Int32Add(args_count, Int32Constant(kJSArgcReceiverSlots));
741 }
742
744 receiver_mode, InterpreterPushArgsMode::kOther);
745
746 TailCallBuiltinThenBytecodeDispatch(builtin, context, args_count,
747 args.base_reg_location(), function);
748 // TailCallStubThenDispatch updates accumulator with result.
751}
752
753template <class... TArgs>
755 TNode<Context> context,
756 TNode<Word32T> arg_count,
757 ConvertReceiverMode receiver_mode,
758 TArgs... args) {
761 bytecode_ == Bytecode::kInvokeIntrinsic);
763 Builtin builtin = Builtins::Call();
764
765 arg_count = JSParameterCount(arg_count);
766 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
767 // The first argument parameter (the receiver) is implied to be undefined.
768 TailCallBuiltinThenBytecodeDispatch(builtin, context, function, arg_count,
769 args..., UndefinedConstant());
770 } else {
771 TailCallBuiltinThenBytecodeDispatch(builtin, context, function, arg_count,
772 args...);
773 }
774 // TailCallStubThenDispatch updates accumulator with result.
777}
778
779// Instantiate CallJSAndDispatch() for argument counts used by interpreter
780// generator.
782 TNode<JSAny> function, TNode<Context> context, TNode<Word32T> arg_count,
783 ConvertReceiverMode receiver_mode);
785 TNode<JSAny> function, TNode<Context> context, TNode<Word32T> arg_count,
786 ConvertReceiverMode receiver_mode, TNode<Object>);
788 TNode<JSAny> function, TNode<Context> context, TNode<Word32T> arg_count,
791 TNode<JSAny> function, TNode<Context> context, TNode<Word32T> arg_count,
794
796 TNode<JSAny> function, TNode<Context> context, const RegListNodePair& args,
797 TNode<UintPtrT> slot_id) {
800
801#ifndef V8_JITLESS
802 TNode<Union<FeedbackVector, Undefined>> maybe_feedback_vector =
804 LazyNode<JSAny> receiver = [=, this] {
806 };
807 CollectCallFeedback(function, receiver, context, maybe_feedback_vector,
808 slot_id);
809#endif // !V8_JITLESS
810
811 Comment("call using CallWithSpread builtin");
814
815 TNode<Word32T> args_count = args.reg_count();
816 TailCallBuiltinThenBytecodeDispatch(builtin, context, args_count,
817 args.base_reg_location(), function);
818 // TailCallStubThenDispatch updates accumulator with result.
821}
822
825 const RegListNodePair& args, TNode<UintPtrT> slot_id,
826 TNode<Union<FeedbackVector, Undefined>> maybe_feedback_vector) {
828 TVARIABLE(Object, var_result);
829 TVARIABLE(AllocationSite, var_site);
830 Label return_result(this), try_fast_construct(this), construct_generic(this),
831 construct_array(this, &var_site);
832
833 TNode<Word32T> args_count = JSParameterCount(args.reg_count());
834 // TODO(42200059): Propagate TaggedIndex usage.
835 CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
836 IntPtrToTaggedIndex(Signed(slot_id)),
838 &try_fast_construct, &construct_array, &var_site);
839
840 BIND(&try_fast_construct);
841 {
842 Comment("call using FastConstruct builtin");
843 GotoIf(TaggedIsSmi(target), &construct_generic);
844 GotoIfNot(IsJSFunction(CAST(target)), &construct_generic);
845 var_result =
846 CallBuiltin(Builtin::kInterpreterPushArgsThenFastConstructFunction,
847 context, args_count, args.base_reg_location(), target,
848 new_target, UndefinedConstant());
849 Goto(&return_result);
850 }
851
852 BIND(&construct_generic);
853 {
854 // TODO(bmeurer): Remove the generic type_info parameter from the Construct.
855 Comment("call using Construct builtin");
858 var_result =
859 CallBuiltin(builtin, context, args_count, args.base_reg_location(),
860 target, new_target, UndefinedConstant());
861 Goto(&return_result);
862 }
863
864 BIND(&construct_array);
865 {
866 // TODO(bmeurer): Introduce a dedicated builtin to deal with the Array
867 // constructor feedback collection inside of Ignition.
868 Comment("call using ConstructArray builtin");
871 var_result =
872 CallBuiltin(builtin, context, args_count, args.base_reg_location(),
873 target, new_target, var_site.value());
874 Goto(&return_result);
875 }
876
877 BIND(&return_result);
878 return var_result.value();
879}
880
883 const RegListNodePair& args, TNode<UintPtrT> slot_id) {
884 // TODO(bmeurer): Unify this with the Construct bytecode feedback
885 // above once we have a way to pass the AllocationSite to the Array
886 // constructor _and_ spread the last argument at the same time.
888
889#ifndef V8_JITLESS
890 // TODO(syg): Is the feedback collection logic here the same as
891 // CollectConstructFeedback?
892 Label extra_checks(this, Label::kDeferred), construct(this);
893 TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
894 GotoIf(IsUndefined(maybe_feedback_vector), &construct);
895 TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
896
897 // Increment the call count.
898 IncrementCallCount(feedback_vector, slot_id);
899
900 // Check if we have monomorphic {new_target} feedback already.
902 CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id));
903 Branch(IsWeakReferenceToObject(feedback, new_target), &construct,
904 &extra_checks);
905
906 BIND(&extra_checks);
907 {
908 Label check_initialized(this), initialize(this), mark_megamorphic(this);
909
910 // Check if it is a megamorphic {new_target}.
911 Comment("check if megamorphic");
912 TNode<BoolT> is_megamorphic = TaggedEqual(
913 feedback,
915 GotoIf(is_megamorphic, &construct);
916
917 Comment("check if weak reference");
918 GotoIfNot(IsWeakOrCleared(feedback), &check_initialized);
919
920 // If the weak reference is cleared, we have a new chance to become
921 // monomorphic.
922 Comment("check if weak reference is cleared");
923 Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
924
925 BIND(&check_initialized);
926 {
927 // Check if it is uninitialized.
928 Comment("check if uninitialized");
929 TNode<BoolT> is_uninitialized =
930 TaggedEqual(feedback, UninitializedSymbolConstant());
931 Branch(is_uninitialized, &initialize, &mark_megamorphic);
932 }
933
934 BIND(&initialize);
935 {
936 Comment("check if function in same native context");
937 GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
938 // Check if the {new_target} is a JSFunction or JSBoundFunction
939 // in the current native context.
940 TVARIABLE(HeapObject, var_current, CAST(new_target));
941 Label loop(this, &var_current), done_loop(this);
942 Goto(&loop);
943 BIND(&loop);
944 {
945 Label if_boundfunction(this), if_function(this);
946 TNode<HeapObject> current = var_current.value();
947 TNode<Uint16T> current_instance_type = LoadInstanceType(current);
948 GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
949 &if_boundfunction);
950 Branch(IsJSFunctionInstanceType(current_instance_type), &if_function,
951 &mark_megamorphic);
952
953 BIND(&if_function);
954 {
955 // Check that the JSFunction {current} is in the current native
956 // context.
957 TNode<Context> current_context =
958 CAST(LoadObjectField(current, JSFunction::kContextOffset));
959 TNode<NativeContext> current_native_context =
960 LoadNativeContext(current_context);
961 Branch(
962 TaggedEqual(LoadNativeContext(context), current_native_context),
963 &done_loop, &mark_megamorphic);
964 }
965
966 BIND(&if_boundfunction);
967 {
968 // Continue with the [[BoundTargetFunction]] of {current}.
969 var_current = LoadObjectField<HeapObject>(
970 current, JSBoundFunction::kBoundTargetFunctionOffset);
971 Goto(&loop);
972 }
973 }
974 BIND(&done_loop);
975 StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
977 ReportFeedbackUpdate(feedback_vector, slot_id,
978 "ConstructWithSpread:Initialize");
979 Goto(&construct);
980 }
981
982 BIND(&mark_megamorphic);
983 {
984 // MegamorphicSentinel is an immortal immovable object so
985 // write-barrier is not needed.
986 Comment("transition to megamorphic");
987 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
989 feedback_vector, slot_id,
992 ReportFeedbackUpdate(feedback_vector, slot_id,
993 "ConstructWithSpread:TransitionMegamorphic");
994 Goto(&construct);
995 }
996 }
997
998 BIND(&construct);
999#endif // !V8_JITLESS
1000 Comment("call using ConstructWithSpread builtin");
1003 TNode<Word32T> args_count = JSParameterCount(args.reg_count());
1004 return CallBuiltin(builtin, context, args_count, args.base_reg_location(),
1005 target, new_target, UndefinedConstant());
1006}
1007
1008// TODO(v8:13249): Add a FastConstruct variant to avoid pushing arguments twice
1009// (once here, and once again in construct stub).
1012 TNode<TaggedIndex> slot_id) {
1014 TVARIABLE(Object, var_result);
1015 TVARIABLE(AllocationSite, var_site);
1016
1017#ifndef V8_JITLESS
1018 Label construct(this);
1019
1020 TNode<Union<FeedbackVector, Undefined>> maybe_feedback_vector =
1022 GotoIf(IsUndefined(maybe_feedback_vector), &construct);
1023
1024 CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
1026 &construct, &construct, &var_site);
1027 BIND(&construct);
1028#endif // !V8_JITLESS
1029
1030 return CallBuiltin(Builtin::kInterpreterForwardAllArgsThenConstruct, context,
1031 target, new_target);
1032}
1033
1034template <class T>
1036 TNode<Context> context,
1037 const RegListNodePair& args,
1038 int return_count) {
1041
1042 // Get the function entry from the function id.
1044 ExternalReference::runtime_function_table_address(isolate())));
1045 TNode<Word32T> function_offset =
1046 Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
1047 TNode<WordT> function =
1048 IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
1049 TNode<RawPtrT> function_entry = Load<RawPtrT>(
1050 function, IntPtrConstant(offsetof(Runtime::Function, entry)));
1051
1052 Builtin centry = Builtins::InterpreterCEntry(return_count);
1053 return CallBuiltin<T>(centry, context, args.reg_count(),
1054 args.base_reg_location(), function_entry);
1055}
1056
1058 TNode<Uint32T> function_id, TNode<Context> context,
1059 const RegListNodePair& args, int return_count);
1062 TNode<Context> context,
1063 const RegListNodePair& args,
1064 int return_count);
1065
1067 TNode<Int32T> weight) {
1069 TNode<FeedbackCell> feedback_cell =
1070 LoadObjectField<FeedbackCell>(function, JSFunction::kFeedbackCellOffset);
1072 feedback_cell, FeedbackCell::kInterruptBudgetOffset);
1073
1074 // Update budget by |weight| and check if it reaches zero.
1075 TNode<Int32T> new_budget = Int32Sub(old_budget, weight);
1076 // Update budget.
1078 feedback_cell, FeedbackCell::kInterruptBudgetOffset, new_budget);
1079 return new_budget;
1080}
1081
1083 TNode<Int32T> weight, StackCheckBehavior stack_check_behavior) {
1084 Comment("[ DecreaseInterruptBudget");
1085 Label done(this), interrupt_check(this);
1086
1087 // Assert that the weight is positive.
1088 CSA_DCHECK(this, Int32GreaterThanOrEqual(weight, Int32Constant(0)));
1089
1090 // Make sure we include the current bytecode in the budget calculation.
1091 TNode<Int32T> weight_after_bytecode =
1093 TNode<Int32T> new_budget = UpdateInterruptBudget(weight_after_bytecode);
1094 Branch(Int32GreaterThanOrEqual(new_budget, Int32Constant(0)), &done,
1095 &interrupt_check);
1096
1097 BIND(&interrupt_check);
1099 CallRuntime(stack_check_behavior == kEnableStackCheck
1100 ? Runtime::kBytecodeBudgetInterruptWithStackCheck_Ignition
1101 : Runtime::kBytecodeBudgetInterrupt_Ignition,
1102 GetContext(), function);
1103 Goto(&done);
1104
1105 BIND(&done);
1106
1107 Comment("] DecreaseInterruptBudget");
1108}
1109
1113
1117
1119 TNode<IntPtrT> next_offset = IntPtrAdd(BytecodeOffset(), delta);
1120 bytecode_offset_ = next_offset;
1121 return next_offset;
1122}
1123
1126#ifdef V8_TRACE_UNOPTIMIZED
1127 TraceBytecode(Runtime::kTraceUnoptimizedBytecodeExit);
1128#endif
1129 bytecode_offset_ = new_bytecode_offset;
1130 TNode<RawPtrT> target_bytecode =
1131 UncheckedCast<RawPtrT>(LoadBytecode(new_bytecode_offset));
1132 DispatchToBytecode(target_bytecode, new_bytecode_offset);
1133}
1134
1136 JumpToOffset(IntPtrAdd(BytecodeOffset(), jump_offset));
1137}
1138
1144
1146 TNode<IntPtrT> jump_offset) {
1147 Label match(this), no_match(this);
1148
1149 Branch(condition, &match, &no_match);
1150 BIND(&match);
1151 Jump(jump_offset);
1152 BIND(&no_match);
1153 Dispatch();
1154}
1155
1157 TNode<BoolT> condition, int operand_index) {
1158 Label match(this), no_match(this);
1159
1160 Branch(condition, &match, &no_match);
1161 BIND(&match);
1162 TNode<IntPtrT> jump_offset = Signed(BytecodeOperandUImmWord(operand_index));
1163 Jump(jump_offset);
1164 BIND(&no_match);
1165 Dispatch();
1166}
1167
1169 TNode<BoolT> condition, int operand_index) {
1170 Label match(this), no_match(this);
1171
1172 Branch(condition, &match, &no_match);
1173 BIND(&match);
1174 TNode<IntPtrT> jump_offset =
1176 Jump(jump_offset);
1177 BIND(&no_match);
1178 Dispatch();
1179}
1180
1182 TNode<Object> rhs,
1183 TNode<IntPtrT> jump_offset) {
1184 JumpConditional(TaggedEqual(lhs, rhs), jump_offset);
1185}
1186
1188 TNode<Object> rhs,
1189 int operand_index) {
1190 JumpConditionalByImmediateOperand(TaggedEqual(lhs, rhs), operand_index);
1191}
1192
1194 TNode<Object> rhs,
1195 int operand_index) {
1196 JumpConditionalByConstantOperand(TaggedEqual(lhs, rhs), operand_index);
1197}
1198
1200 TNode<Object> rhs,
1201 TNode<IntPtrT> jump_offset) {
1202 JumpConditional(TaggedNotEqual(lhs, rhs), jump_offset);
1203}
1204
1206 TNode<Object> rhs,
1207 int operand_index) {
1208 JumpConditionalByImmediateOperand(TaggedNotEqual(lhs, rhs), operand_index);
1209}
1210
1212 TNode<Object> rhs,
1213 int operand_index) {
1214 JumpConditionalByConstantOperand(TaggedNotEqual(lhs, rhs), operand_index);
1215}
1216
1218 TNode<IntPtrT> bytecode_offset) {
1219 TNode<Uint8T> bytecode =
1220 Load<Uint8T>(BytecodeArrayTaggedPointer(), bytecode_offset);
1221 return ChangeUint32ToWord(bytecode);
1222}
1223
1230
1232 Label do_inline_star(this), done(this);
1233
1234 // Check whether the following opcode is one of the short Star codes. All
1235 // opcodes higher than the short Star variants are invalid, and invalid
1236 // opcodes are never deliberately written, so we can use a one-sided check.
1237 // This is no less secure than the normal-length Star handler, which performs
1238 // no validation on its operand.
1239 static_assert(static_cast<int>(Bytecode::kLastShortStar) + 1 ==
1240 static_cast<int>(Bytecode::kIllegal));
1241 static_assert(Bytecode::kIllegal == Bytecode::kLast);
1242 TNode<Int32T> first_short_star_bytecode =
1243 Int32Constant(static_cast<int>(Bytecode::kFirstShortStar));
1244 TNode<BoolT> is_star = Uint32GreaterThanOrEqual(
1245 TruncateWordToInt32(target_bytecode), first_short_star_bytecode);
1246 Branch(is_star, &do_inline_star, &done);
1247
1248 BIND(&do_inline_star);
1249 {
1250 InlineShortStar(target_bytecode);
1251
1252 // Rather than merging control flow to a single indirect jump, we can get
1253 // better branch prediction by duplicating it. This is because the
1254 // instruction following a merged X + StarN is a bad predictor of the
1255 // instruction following a non-merged X, and vice versa.
1257 }
1258 BIND(&done);
1259}
1260
1262 Bytecode previous_bytecode = bytecode_;
1264
1265 // At this point we don't know statically what bytecode we're executing, but
1266 // kStar0 has the right attributes (namely, no operands) for any of the short
1267 // Star codes.
1268 bytecode_ = Bytecode::kStar0;
1270
1271#ifdef V8_TRACE_UNOPTIMIZED
1272 TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
1273#endif
1274
1275 StoreRegisterForShortStar(GetAccumulator(), target_bytecode);
1276
1279
1280 Advance();
1281 bytecode_ = previous_bytecode;
1282 implicit_register_use_ = previous_acc_use;
1283}
1284
1286 Comment("========= Dispatch");
1288 TNode<IntPtrT> target_offset = Advance();
1289 TNode<WordT> target_bytecode = LoadBytecode(target_offset);
1291}
1292
1300
1302 TNode<WordT> target_bytecode, TNode<IntPtrT> new_bytecode_offset) {
1304 TraceBytecodeDispatch(target_bytecode);
1305 }
1306
1307 TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
1308 DispatchTablePointer(), TimesSystemPointerSize(target_bytecode));
1309
1310 DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
1311}
1312
1319
1321 // Dispatching a wide bytecode requires treating the prefix
1322 // bytecode a base pointer into the dispatch table and dispatching
1323 // the bytecode that follows relative to this base.
1324 //
1325 // Indices 0-255 correspond to bytecodes with operand_scale == 0
1326 // Indices 256-511 correspond to bytecodes with operand_scale == 1
1327 // Indices 512-767 correspond to bytecodes with operand_scale == 2
1329 TNode<IntPtrT> next_bytecode_offset = Advance(1);
1330 TNode<WordT> next_bytecode = LoadBytecode(next_bytecode_offset);
1331
1333 TraceBytecodeDispatch(next_bytecode);
1334 }
1335
1336 TNode<IntPtrT> base_index;
1337 switch (operand_scale) {
1338 case OperandScale::kDouble:
1339 base_index = IntPtrConstant(1 << kBitsPerByte);
1340 break;
1341 case OperandScale::kQuadruple:
1342 base_index = IntPtrConstant(2 << kBitsPerByte);
1343 break;
1344 default:
1345 UNREACHABLE();
1346 }
1347 TNode<WordT> target_index = IntPtrAdd(base_index, next_bytecode);
1348 TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
1350
1351 DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
1352}
1353
1355 // TODO(rmcilroy): Investigate whether it is worth supporting self
1356 // optimization of primitive functions like FullCodegen.
1357
1358 // Update profiling count by the number of bytes between the end of the
1359 // current bytecode and the start of the first one, to simulate backedge to
1360 // start of function.
1361 //
1362 // With headers and current offset, the bytecode array layout looks like:
1363 //
1364 // <---------- simulated backedge ----------
1365 // | header | first bytecode | .... | return bytecode |
1366 // |<------ current offset ------->
1367 // ^ tagged bytecode array pointer
1368 //
1369 // UpdateInterruptBudget already handles adding the bytecode size to the
1370 // length of the back-edge, so we just have to correct for the non-zero offset
1371 // of the first bytecode.
1372
1373 TNode<Int32T> profiling_weight =
1377}
1378
1380 TNode<FeedbackVector> feedback_vector) {
1381 // We're loading an 8-bit field, mask it.
1383 LoadObjectField<Int8T>(feedback_vector, FeedbackVector::kOsrStateOffset),
1384 0xFF));
1385}
1386
1388 TNode<Smi> abort_id = SmiConstant(abort_reason);
1389 CallRuntime(Runtime::kAbort, GetContext(), abort_id);
1390}
1391
1393 TNode<WordT> rhs,
1394 AbortReason abort_reason) {
1395 Label ok(this), abort(this, Label::kDeferred);
1396 Branch(WordEqual(lhs, rhs), &ok, &abort);
1397
1398 BIND(&abort);
1399 Abort(abort_reason);
1400 Goto(&ok);
1401
1402 BIND(&ok);
1403}
1404
1406 TNode<Context> context, TNode<FeedbackVector> feedback_vector,
1407 TNode<IntPtrT> relative_jump, TNode<Int32T> loop_depth,
1408 TNode<IntPtrT> feedback_slot, TNode<Int8T> osr_state,
1409 OnStackReplacementParams params) {
1410 // Three cases may cause us to attempt OSR, in the following order:
1411 //
1412 // 1) Presence of cached OSR Turbofan/Maglev code.
1413 // 2) The OSR urgency exceeds the current loop depth - in that case, trigger
1414 // a Turbofan/Maglev OSR compilation.
1415 // 3) Presence of cached OSR Sparkplug code.
1416
1417 TVARIABLE(Object, maybe_target_code, SmiConstant(0));
1418 Label osr_to_opt(this), osr_to_sparkplug(this);
1419
1420 // Case 1).
1421 {
1422 Label next(this);
1423 TNode<MaybeObject> maybe_cached_osr_code =
1424 LoadFeedbackVectorSlot(feedback_vector, feedback_slot);
1425 GotoIf(IsCleared(maybe_cached_osr_code), &next);
1426 maybe_target_code = GetHeapObjectAssumeWeak(maybe_cached_osr_code);
1427
1428 // Is it marked_for_deoptimization? If yes, clear the slot.
1429 TNode<CodeWrapper> code_wrapper = CAST(maybe_target_code.value());
1430 maybe_target_code =
1431 LoadCodePointerFromObject(code_wrapper, CodeWrapper::kCodeOffset);
1432 GotoIfNot(IsMarkedForDeoptimization(CAST(maybe_target_code.value())),
1433 &osr_to_opt);
1434 StoreFeedbackVectorSlot(feedback_vector, Unsigned(feedback_slot),
1436 maybe_target_code = SmiConstant(0);
1437
1438 Goto(&next);
1439 BIND(&next);
1440 }
1441
1442 // Case 2).
1443 {
1444 static_assert(FeedbackVector::OsrUrgencyBits::kShift == 0);
1445 TNode<Int32T> osr_urgency = Word32And(
1446 osr_state, Int32Constant(FeedbackVector::OsrUrgencyBits::kMask));
1447 GotoIf(Uint32LessThan(loop_depth, osr_urgency), &osr_to_opt);
1448
1449 // Case 3).
1451 Goto(&osr_to_sparkplug);
1452 } else {
1455 LoadFunctionClosure(), JSFunction::kSharedFunctionInfoOffset);
1456 GotoIf(SharedFunctionInfoHasBaselineCode(sfi), &osr_to_sparkplug);
1457 JumpBackward(relative_jump);
1458 }
1459 }
1460
1461 BIND(&osr_to_opt);
1462 {
1465 TNode<Uint32T> weight =
1466 Uint32Mul(length, Uint32Constant(v8_flags.osr_to_tierup));
1468 TNode<Smi> expected_param_count =
1470 CallBuiltin(Builtin::kInterpreterOnStackReplacement, context,
1471 maybe_target_code.value(), expected_param_count);
1473 JumpBackward(relative_jump);
1474 }
1475
1476 BIND(&osr_to_sparkplug);
1477 {
1478 // We already compiled the baseline code, so we don't need to handle failed
1479 // compilation as in the Ignition -> Turbofan case. Therefore we can just
1480 // tailcall to the OSR builtin.
1482 TailCallBuiltin(Builtin::kInterpreterOnStackReplacement_ToBaseline,
1483 context);
1484 }
1485}
1486
1491
1494 ExternalReference::interpreter_dispatch_counters(isolate()));
1495 TNode<IntPtrT> source_bytecode_table_index = IntPtrConstant(
1496 static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
1497
1498 TNode<WordT> counter_offset = TimesSystemPointerSize(
1499 IntPtrAdd(source_bytecode_table_index, target_bytecode));
1500 TNode<IntPtrT> old_counter = Load<IntPtrT>(counters_table, counter_offset);
1501
1502 Label counter_ok(this), counter_saturated(this, Label::kDeferred);
1503
1504 TNode<BoolT> counter_reached_max = WordEqual(
1505 old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
1506 Branch(counter_reached_max, &counter_saturated, &counter_ok);
1507
1508 BIND(&counter_ok);
1509 {
1510 TNode<IntPtrT> new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
1512 counter_offset, new_counter);
1513 Goto(&counter_saturated);
1514 }
1515
1516 BIND(&counter_saturated);
1517}
1518
1519// static
1521#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_RISCV32
1522 return false;
1523#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || \
1524 V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC64 || \
1525 V8_TARGET_ARCH_LOONG64
1526 return true;
1527#else
1528#error "Unknown Architecture"
1529#endif
1530}
1531
1533 TNode<FixedArray> parameters_and_registers, TNode<IntPtrT> parameter_count,
1534 TNode<UintPtrT> register_count) {
1535 TNode<IntPtrT> array_size =
1536 LoadAndUntagFixedArrayBaseLength(parameters_and_registers);
1537
1538 Label ok(this), abort(this, Label::kDeferred);
1540 array_size),
1541 &ok, &abort);
1542
1543 BIND(&abort);
1544 Abort(AbortReason::kInvalidParametersAndRegistersInGenerator);
1545 Goto(&ok);
1546
1547 BIND(&ok);
1548}
1549
1552 // Store the formal parameters (without receiver) followed by the
1553 // registers into the generator's internal parameters_and_registers field.
1555 TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1556 if (v8_flags.debug_code) {
1557 CSA_DCHECK(this, IntPtrEqual(registers.base_reg_location(),
1559 AbortIfRegisterCountInvalid(array, parameter_count, register_count);
1560 }
1561
1562 {
1563 TVARIABLE(IntPtrT, var_index);
1564 var_index = IntPtrConstant(0);
1565
1566 // Iterate over parameters and write them into the array.
1567 Label loop(this, &var_index), done_loop(this);
1568
1569 TNode<IntPtrT> reg_base =
1570 IntPtrConstant(Register::FromParameterIndex(0).ToOperand() + 1);
1571
1572 Goto(&loop);
1573 BIND(&loop);
1574 {
1575 TNode<IntPtrT> index = var_index.value();
1576 GotoIfNot(UintPtrLessThan(index, parameter_count), &done_loop);
1577
1578 TNode<IntPtrT> reg_index = IntPtrAdd(reg_base, index);
1579 TNode<Object> value = LoadRegister(reg_index);
1580
1581 StoreFixedArrayElement(array, index, value);
1582
1583 var_index = IntPtrAdd(index, IntPtrConstant(1));
1584 Goto(&loop);
1585 }
1586 BIND(&done_loop);
1587 }
1588
1589 {
1590 // Iterate over register file and write values into array.
1591 // The mapping of register to array index must match that used in
1592 // BytecodeGraphBuilder::VisitResumeGenerator.
1593 TVARIABLE(IntPtrT, var_index);
1594 var_index = IntPtrConstant(0);
1595
1596 Label loop(this, &var_index), done_loop(this);
1597 Goto(&loop);
1598 BIND(&loop);
1599 {
1600 TNode<IntPtrT> index = var_index.value();
1601 GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1602
1603 TNode<IntPtrT> reg_index =
1604 IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1605 TNode<Object> value = LoadRegister(reg_index);
1606
1607 TNode<IntPtrT> array_index = IntPtrAdd(parameter_count, index);
1608 StoreFixedArrayElement(array, array_index, value);
1609
1610 var_index = IntPtrAdd(index, IntPtrConstant(1));
1611 Goto(&loop);
1612 }
1613 BIND(&done_loop);
1614 }
1615
1616 return array;
1617}
1618
1622 TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1623 if (v8_flags.debug_code) {
1624 CSA_DCHECK(this, IntPtrEqual(registers.base_reg_location(),
1626 AbortIfRegisterCountInvalid(array, parameter_count, register_count);
1627 }
1628
1629 TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
1630
1631 // Iterate over array and write values into register file. Also erase the
1632 // array contents to not keep them alive artificially.
1633 Label loop(this, &var_index), done_loop(this);
1634 Goto(&loop);
1635 BIND(&loop);
1636 {
1637 TNode<IntPtrT> index = var_index.value();
1638 GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1639
1640 TNode<IntPtrT> array_index = IntPtrAdd(parameter_count, index);
1641 TNode<Object> value = LoadFixedArrayElement(array, array_index);
1642
1643 TNode<IntPtrT> reg_index =
1644 IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1645 StoreRegister(value, reg_index);
1646
1647 StoreFixedArrayElement(array, array_index, StaleRegisterConstant());
1648
1649 var_index = IntPtrAdd(index, IntPtrConstant(1));
1650 Goto(&loop);
1651 }
1652 BIND(&done_loop);
1653
1654 return array;
1655}
1656
1660
1662 TNode<Object> object = GetAccumulator();
1663 TNode<Context> context = GetContext();
1664
1665 TVARIABLE(Smi, var_type_feedback);
1666 TVARIABLE(Numeric, var_result);
1667 Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this),
1668 if_objectisother(this, Label::kDeferred);
1669
1670 GotoIf(TaggedIsSmi(object), &if_objectissmi);
1671 Branch(IsHeapNumber(CAST(object)), &if_objectisheapnumber, &if_objectisother);
1672
1673 BIND(&if_objectissmi);
1674 {
1675 var_result = CAST(object);
1677 Goto(&if_done);
1678 }
1679
1680 BIND(&if_objectisheapnumber);
1681 {
1682 var_result = CAST(object);
1683 var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
1684 Goto(&if_done);
1685 }
1686
1687 BIND(&if_objectisother);
1688 {
1689 auto builtin = Builtin::kNonNumberToNumber;
1690 if (mode == Object::Conversion::kToNumeric) {
1691 builtin = Builtin::kNonNumberToNumeric;
1692 // Special case for collecting BigInt feedback.
1693 Label not_bigint(this);
1694 GotoIfNot(IsBigInt(CAST(object)), &not_bigint);
1695 {
1696 var_result = CAST(object);
1697 var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
1698 Goto(&if_done);
1699 }
1700 BIND(&not_bigint);
1701 }
1702
1703 // Convert {object} by calling out to the appropriate builtin.
1704 var_result = CAST(CallBuiltin(builtin, context, object));
1705 var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny);
1706 Goto(&if_done);
1707 }
1708
1709 BIND(&if_done);
1710
1711 // Record the type feedback collected for {object}.
1712 TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
1713 TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
1714
1715 MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
1716 slot_index);
1717
1718 SetAccumulator(var_result.value());
1719 Dispatch();
1720}
1721
1722#undef TVARIABLE_CONSTRUCTOR
1723
1725
1726} // namespace interpreter
1727} // namespace internal
1728} // namespace v8
#define BIND(label)
#define TVARIABLE(...)
#define CSA_DCHECK(csa,...)
#define TVARIABLE_CONSTRUCTOR(...)
int16_t parameter_count
Definition builtins.cc:67
static constexpr Builtin InterpreterPushArgsThenCall(ConvertReceiverMode receiver_mode, InterpreterPushArgsMode mode)
static constexpr Builtin InterpreterPushArgsThenConstruct(InterpreterPushArgsMode mode)
static constexpr Builtin Call(ConvertReceiverMode=ConvertReceiverMode::kAny)
static constexpr Builtin InterpreterCEntry(int result_size)
TNode< Object > LoadProtectedPointerField(TNode< TrustedObject > object, TNode< IntPtrT > offset)
TNode< IntPtrT > LoadAndUntagFixedArrayBaseLength(TNode< FixedArrayBase > array)
TNode< Smi > SmiFromInt32(TNode< Int32T > value)
void ReportFeedbackUpdate(TNode< FeedbackVector > feedback_vector, TNode< UintPtrT > slot_id, const char *reason)
TNode< Int32T > LoadBytecodeArrayParameterCount(TNode< BytecodeArray > bytecode_array)
TNode< Int32T > TruncateIntPtrToInt32(TNode< IntPtrT > value)
TNode< BoolT > InstanceTypeEqual(TNode< Int32T > instance_type, int type)
void StoreFixedArrayElement(TNode< FixedArray > object, int index, TNode< Object > value, WriteBarrierMode barrier_mode=UPDATE_WRITE_BARRIER, CheckBounds check_bounds=CheckBounds::kAlways)
TNode< Smi > SmiTag(TNode< IntPtrT > value)
TNode< Smi > SmiFromUint32(TNode< Uint32T > value)
TNode< Code > LoadCodePointerFromObject(TNode< HeapObject > object, int offset)
TNode< BoolT > TaggedEqual(TNode< AnyTaggedT > a, TNode< AnyTaggedT > b)
TNode< T > LoadObjectField(TNode< HeapObject > object, int offset)
TNode< BoolT > IsJSFunction(TNode< HeapObject > object)
TNode< BoolT > TaggedNotEqual(TNode< AnyTaggedT > a, TNode< AnyTaggedT > b)
TNode< Uint32T > LoadAndUntagBytecodeArrayLength(TNode< BytecodeArray > array)
void StoreFeedbackVectorSlot(TNode< FeedbackVector > feedback_vector, TNode< UintPtrT > slot, TNode< AnyTaggedT > value, WriteBarrierMode barrier_mode=UPDATE_WRITE_BARRIER, int additional_offset=0)
TNode< MaybeObject > StoreWeakReferenceInFeedbackVector(TNode< FeedbackVector > feedback_vector, TNode< UintPtrT > slot, TNode< HeapObject > value, int additional_offset=0)
TNode< BoolT > IsWeakOrCleared(TNode< MaybeObject > value)
TNode< IntPtrT > SmiUntag(TNode< Smi > value)
TNode< BoolT > IsWeakReferenceToObject(TNode< MaybeObject > maybe_object, TNode< Object > object)
TNode< BoolT > IsCleared(TNode< MaybeObject > value)
void MaybeUpdateFeedback(TNode< Smi > feedback, TNode< HeapObject > maybe_feedback_vector, TNode< UintPtrT > slot_id)
std::function< TNode< T >()> LazyNode
TNode< IntPtrT > SmiToIntPtr(TNode< Smi > value)
void StoreObjectFieldNoWriteBarrier(TNode< HeapObject > object, TNode< IntPtrT > offset, TNode< T > value)
TNode< Object > LoadFixedArrayElement(TNode< FixedArray > object, TNode< TIndex > index, int additional_offset=0, CheckBounds check_bounds=CheckBounds::kAlways)
TNode< Uint16T > LoadInstanceType(TNode< HeapObject > object)
TNode< NativeContext > LoadNativeContext(TNode< Context > context)
TNode< BoolT > TaggedIsSmi(TNode< MaybeObject > a)
TNode< BoolT > IsMarkedForDeoptimization(TNode< Code > code)
TNode< Int32T > LoadBytecodeArrayParameterCountWithoutReceiver(TNode< BytecodeArray > bytecode_array)
TNode< Int32T > JSParameterCount(int argc_without_receiver)
TNode< BoolT > SharedFunctionInfoHasBaselineCode(TNode< SharedFunctionInfo > sfi)
TNode< BoolT > IsBigInt(TNode< HeapObject > object)
TNode< BoolT > IsJSFunctionInstanceType(TNode< Int32T > instance_type)
TNode< Int32T > TruncateWordToInt32(TNode< WordT > value)
TNode< MaybeObject > LoadFeedbackVectorSlot(TNode< FeedbackVector > feedback_vector, TNode< TIndex > slot, int additional_offset=0)
TNode< TValue > LoadArrayElement(TNode< Array > array, int array_header_size, TNode< TIndex > index, int additional_offset=0)
void IncrementCallCount(TNode< FeedbackVector > feedback_vector, TNode< UintPtrT > slot_id)
TNode< TaggedIndex > IntPtrToTaggedIndex(TNode< IntPtrT > value)
TNode< HeapObject > GetHeapObjectAssumeWeak(TNode< MaybeObject > value)
TNode< WordT > TimesSystemPointerSize(TNode< WordT > value)
static Handle< Symbol > MegamorphicSentinel(Isolate *isolate)
constexpr bool IsSigned() const
static constexpr MachineType Uint8()
constexpr MachineRepresentation representation() const
static constexpr MachineType Int32()
static constexpr MachineType Uint32()
static constexpr MachineType Uint16()
static constexpr MachineType Int16()
static constexpr MachineType Int8()
static constexpr MachineRepresentation PointerRepresentation()
static constexpr bool IsImmortalImmovable(RootIndex root_index)
Definition roots.h:616
TNode< IntPtrT > IntPtrAdd(TNode< IntPtrT > left, TNode< IntPtrT > right)
TNode< Int32T > Signed(TNode< Word32T > x)
void Comment(MessageWithSourceLocation message, Args &&... args)
TNode< IntPtrT > IntPtrConstant(intptr_t value)
TNode< UintPtrT > ChangeUint32ToWord(TNode< Word32T > value)
TNode< T > UncheckedCast(Node *value)
TNode< Int32T > Int32Mul(TNode< Int32T > left, TNode< Int32T > right)
TNode< BoolT > WordEqual(TNode< WordT > left, TNode< WordT > right)
void GotoIfNot(TNode< IntegralT > condition, Label *false_label, GotoHint goto_hint=GotoHint::kNone)
TNode< Object > LoadFullTagged(Node *base)
TNode< Uint32T > Unsigned(TNode< Word32T > x)
TNode< Int32T > Word32And(TNode< Int32T > left, TNode< Int32T > right)
void RegisterCallGenerationCallbacks(const CodeAssemblerCallback &call_prologue, const CodeAssemblerCallback &call_epilogue)
void TailCallBytecodeDispatch(const CallInterfaceDescriptor &descriptor, TNode< RawPtrT > target, TArgs... args)
TNode< T > ReinterpretCast(Node *value)
TNode< Int32T > Int32Add(TNode< Int32T > left, TNode< Int32T > right)
void StoreFullTaggedNoWriteBarrier(TNode< RawPtrT > base, TNode< Object > tagged_value)
TNode< Smi > SmiConstant(Tagged< Smi > value)
void GotoIf(TNode< IntegralT > condition, Label *true_label, GotoHint goto_hint=GotoHint::kNone)
Node * Load(MachineType type, Node *base)
TNode< IntPtrT > ChangeInt32ToIntPtr(TNode< Word32T > value)
TNode< Int32T > Int32Sub(TNode< Int32T > left, TNode< Int32T > right)
TNode< Int32T > Word32Or(TNode< Int32T > left, TNode< Int32T > right)
TNode< Int32T > Word32Shl(TNode< Int32T > left, TNode< Int32T > right)
TNode< BoolT > IntPtrEqual(TNode< WordT > left, TNode< WordT > right)
void TailCallBuiltinThenBytecodeDispatch(Builtin builtin, Node *context, TArgs... args)
TNode< IntPtrT > IntPtrSub(TNode< IntPtrT > left, TNode< IntPtrT > right)
void TailCallBuiltin(Builtin id, TNode< Object > context, TArgs... args)
TNode< ExternalReference > ExternalConstant(ExternalReference address)
TNode< Int32T > Int32Constant(int32_t value)
TNode< Uint32T > Uint32Constant(uint32_t value)
TNode< Type > HeapConstantNoHole(Handle< Type > object)
TNode< BoolT > Word32Equal(TNode< Word32T > left, TNode< Word32T > right)
TNode< T > CallRuntime(Runtime::FunctionId function, TNode< Object > context, TArgs... args)
TNode< Uint32T > Uint32Mul(TNode< Uint32T > left, TNode< Uint32T > right)
TNode< UintPtrT > UintPtrConstant(uintptr_t value)
TNode< T > CallBuiltin(Builtin id, TNode< Object > context, TArgs... args)
void Branch(TNode< IntegralT > condition, Label *true_label, Label *false_label, BranchHint branch_hint=BranchHint::kNone)
void StoreNoWriteBarrier(MachineRepresentation rep, Node *base, Node *value)
static bool ClobbersAccumulator(Bytecode bytecode)
Definition bytecodes.h:698
static ImplicitRegisterUse GetImplicitRegisterUse(Bytecode bytecode)
Definition bytecodes.h:681
static constexpr bool Returns(Bytecode bytecode)
Definition bytecodes.h:872
static OperandSize GetOperandSize(Bytecode bytecode, int i, OperandScale operand_scale)
Definition bytecodes.h:931
static bool WritesAccumulator(Bytecode bytecode)
Definition bytecodes.h:692
static bool MakesCallAlongCriticalPath(Bytecode bytecode)
Definition bytecodes.cc:207
static constexpr bool IsCallRuntime(Bytecode bytecode)
Definition bytecodes.h:858
static int GetOperandOffset(Bytecode bytecode, int i, OperandScale operand_scale)
Definition bytecodes.h:951
static bool ReadsAccumulator(Bytecode bytecode)
Definition bytecodes.h:687
static bool IsRegisterOperandType(OperandType operand_type)
Definition bytecodes.cc:180
static constexpr bool IsCallOrConstruct(Bytecode bytecode)
Definition bytecodes.h:840
static OperandType GetOperandType(Bytecode bytecode, int i)
Definition bytecodes.h:894
static int Size(Bytecode bytecode, OperandScale operand_scale)
Definition bytecodes.h:964
static bool IsRegisterListOperandType(OperandType operand_type)
Definition bytecodes.cc:197
static int NumberOfOperands(Bytecode bytecode)
Definition bytecodes.h:886
static bool IsUnsignedOperandType(OperandType operand_type)
Definition bytecodes.cc:321
static constexpr bool IsShortStar(Bytecode bytecode)
Definition bytecodes.h:732
static ConvertReceiverMode GetReceiverMode(Bytecode bytecode)
Definition bytecodes.h:980
static bool IsStarLookahead(Bytecode bytecode, OperandScale operand_scale)
Definition bytecodes.cc:260
void ClobberAccumulator(TNode< Object > clobber_value)
TNode< Int8T > LoadOsrState(TNode< FeedbackVector > feedback_vector)
void JumpToOffset(TNode< IntPtrT > new_bytecode_offset)
void JumpIfTaggedEqual(TNode< Object > lhs, TNode< Object > rhs, TNode< IntPtrT > jump_offset)
TNode< Uint32T > BytecodeOperandIntrinsicId(int operand_index)
TNode< WordT > LoadBytecode(TNode< IntPtrT > bytecode_offset)
void CallJSWithSpreadAndDispatch(TNode< JSAny > function, TNode< Context > context, const RegListNodePair &args, TNode< UintPtrT > slot_id)
TNode< Uint32T > BytecodeUnsignedOperand(int operand_index, OperandSize operand_size)
TNode< IntPtrT > RegisterFrameOffset(TNode< IntPtrT > index)
TNode< UintPtrT > BytecodeOperandIdx(int operand_index)
TNode< Object > LoadConstantPoolEntry(TNode< WordT > index)
TNode< Object > LoadRegisterAtOperandIndex(int operand_index)
std::pair< TNode< Object >, TNode< Object > > LoadRegisterPairAtOperandIndex(int operand_index)
TNode< Int32T > BytecodeOperandSignedQuad(int operand_index)
void StoreRegisterAtOperandIndex(TNode< Object > value, int operand_index)
TNode< Object > LoadRegisterFromRegisterList(const RegListNodePair &reg_list, int index)
TNode< UintPtrT > BytecodeOperandConstantPoolIdx(int operand_index)
TNode< IntPtrT > BytecodeOperandImmIntPtr(int operand_index)
CodeStubAssembler::TVariable< Object > accumulator_
TNode< Uint32T > BytecodeOperandIdxInt32(int operand_index)
TNode< Uint32T > BytecodeOperandFlag8(int operand_index)
void CallJSAndDispatch(TNode< JSAny > function, TNode< Context > context, const RegListNodePair &args, ConvertReceiverMode receiver_mode)
TNode< IntPtrT > NextRegister(TNode< IntPtrT > reg_index)
void JumpConditionalByConstantOperand(TNode< BoolT > condition, int operand_index)
void JumpIfTaggedEqualConstant(TNode< Object > lhs, TNode< Object > rhs, int operand_index)
TNode< Uint8T > BytecodeOperandUnsignedByte(int operand_index)
void StoreRegisterForShortStar(TNode< Object > value, TNode< WordT > opcode)
TNode< Object > ConstructForwardAllArgs(TNode< JSAny > target, TNode< Context > context, TNode< JSAny > new_target, TNode< TaggedIndex > slot_id)
void DecreaseInterruptBudget(TNode< Int32T > weight, StackCheckBehavior stack_check_behavior)
TNode< Context > GetContextAtDepth(TNode< Context > context, TNode< Uint32T > depth)
void TraceBytecode(Runtime::FunctionId function_id)
TNode< UintPtrT > BytecodeOperandUImmWord(int operand_index)
TNode< T > CallRuntimeN(TNode< Uint32T > function_id, TNode< Context > context, const RegListNodePair &args, int return_count)
void JumpIfTaggedNotEqualConstant(TNode< Object > lhs, TNode< Object > rhs, int operand_index)
void StoreRegisterPairAtOperandIndex(TNode< Object > value1, TNode< Object > value2, int operand_index)
void AbortIfRegisterCountInvalid(TNode< FixedArray > parameters_and_registers, TNode< IntPtrT > parameter_count, TNode< UintPtrT > register_count)
TNode< IntPtrT > LoadAndUntagConstantPoolEntryAtOperandIndex(int operand_index)
RegListNodePair GetRegisterListAtOperandIndex(int operand_index)
void AbortIfWordNotEqual(TNode< WordT > lhs, TNode< WordT > rhs, AbortReason abort_reason)
TNode< Union< FeedbackVector, Undefined > > LoadFeedbackVector()
TNode< UintPtrT > BytecodeOperandNativeContextIndex(int operand_index)
void DispatchToBytecode(TNode< WordT > target_bytecode, TNode< IntPtrT > new_bytecode_offset)
TNode< FixedArray > ExportParametersAndRegisterFile(TNode< FixedArray > array, const RegListNodePair &registers)
void DispatchToBytecodeWithOptionalStarLookahead(TNode< WordT > target_bytecode)
TNode< Int32T > BytecodeOperandImm(int operand_index)
TNode< Object > LoadConstantPoolEntryAtOperandIndex(int operand_index)
void StarDispatchLookahead(TNode< WordT > target_bytecode)
TNode< Int32T > BytecodeSignedOperand(int operand_index, OperandSize operand_size)
TNode< Uint32T > BytecodeOperandUImm(int operand_index)
void InlineShortStar(TNode< WordT > target_bytecode)
TNode< Uint32T > BytecodeOperandRuntimeId(int operand_index)
TNode< TaggedIndex > BytecodeOperandIdxTaggedIndex(int operand_index)
TNode< Int32T > UpdateInterruptBudget(TNode< Int32T > weight)
void JumpConditional(TNode< BoolT > condition, TNode< IntPtrT > jump_offset)
void StoreRegisterTripleAtOperandIndex(TNode< Object > value1, TNode< Object > value2, TNode< Object > value3, int operand_index)
TNode< Word32T > BytecodeOperandReadUnaligned(int relative_offset, MachineType result_type)
void JumpConditionalByImmediateOperand(TNode< BoolT > condition, int operand_index)
TNode< IntPtrT > RegisterLocationInRegisterList(const RegListNodePair &reg_list, int index)
TNode< Int8T > BytecodeOperandSignedByte(int operand_index)
TNode< Uint16T > BytecodeOperandUnsignedShort(int operand_index)
InterpreterAssembler(compiler::CodeAssemblerState *state, Bytecode bytecode, OperandScale operand_scale)
TNode< Uint32T > BytecodeOperandFlag16(int operand_index)
CodeStubAssembler::TVariable< IntPtrT > bytecode_offset_
TNode< Uint32T > BytecodeOperandUnsignedQuad(int operand_index)
TNode< Object > Construct(TNode< JSAny > target, TNode< Context > context, TNode< JSAny > new_target, const RegListNodePair &args, TNode< UintPtrT > slot_id, TNode< Union< FeedbackVector, Undefined > > maybe_feedback_vector)
void StoreRegister(TNode< Object > value, Register reg)
TNode< FixedArray > ImportRegisterFile(TNode< FixedArray > array, const RegListNodePair &registers)
CodeStubAssembler::TVariable< RawPtrT > interpreted_frame_pointer_
void TraceBytecodeDispatch(TNode< WordT > target_bytecode)
CodeStubAssembler::TVariable< ExternalReference > dispatch_table_
void OnStackReplacement(TNode< Context > context, TNode< FeedbackVector > feedback_vector, TNode< IntPtrT > relative_jump, TNode< Int32T > loop_depth, TNode< IntPtrT > feedback_slot, TNode< Int8T > osr_state, OnStackReplacementParams params)
TNode< IntPtrT > BytecodeOperandReg(int operand_index)
TNode< IntPtrT > OperandOffset(int operand_index)
void JumpIfTaggedNotEqual(TNode< Object > lhs, TNode< Object > rhs, TNode< IntPtrT > jump_offset)
TNode< Int16T > BytecodeOperandSignedShort(int operand_index)
TNode< Object > ConstructWithSpread(TNode< JSAny > target, TNode< Context > context, TNode< JSAny > new_target, const RegListNodePair &args, TNode< UintPtrT > slot_id)
void DispatchToBytecodeHandlerEntry(TNode< RawPtrT > handler_entry, TNode< IntPtrT > bytecode_offset)
TNode< IntPtrT > LoadAndUntagConstantPoolEntry(TNode< WordT > index)
TNode< Uint32T > BytecodeOperandCount(int operand_index)
CodeStubAssembler::TVariable< BytecodeArray > bytecode_array_
static constexpr Register bytecode_array()
static constexpr Register FromParameterIndex(int index)
static constexpr Register feedback_vector()
constexpr int32_t ToOperand() const
static constexpr Register current_context()
static constexpr Register function_closure()
static constexpr Register bytecode_offset()
#define CAST(x)
uint32_t count
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
DirectHandle< Object > new_target
Definition execution.cc:75
base::Vector< const RegExpInstruction > bytecode_
int32_t offset
#define V8_IGNITION_DISPATCH_COUNTING_BOOL
TNode< Object > receiver
ZoneVector< RpoNumber > & result
LiftoffRegister reg
Label * no_match
RegListBase< RegisterT > registers
constexpr int kBitsPerByte
Definition globals.h:682
@ SKIP_WRITE_BARRIER
Definition objects.h:52
@ UNSAFE_SKIP_WRITE_BARRIER
Definition objects.h:53
constexpr int kJSArgcReceiverSlots
Definition globals.h:2778
constexpr int kSystemPointerSize
Definition globals.h:410
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool SmiValuesAre32Bits()
return value
Definition map-inl.h:893
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define V8_EXPORT_PRIVATE
Definition macros.h:460
#define OFFSET_OF_DATA_START(Type)