v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
code-assembler.cc
Go to the documentation of this file.
1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <optional>
8#include <ostream>
9#include <thread> // NOLINT(build/c++11) (for this_thread::yield())
10
16#include "src/codegen/tnode.h"
29#include "src/objects/smi.h"
30#include "src/utils/memcopy.h"
31#include "src/zone/zone.h"
32
33namespace v8 {
34namespace internal {
35
36constexpr MachineType MachineTypeOf<Smi>::value;
37constexpr MachineType MachineTypeOf<Object>::value;
38constexpr MachineType MachineTypeOf<MaybeObject>::value;
39
40namespace compiler {
41
42static_assert(std::is_convertible_v<TNode<Number>, TNode<Object>>,
43 "test subtyping");
44static_assert(
45 std::is_convertible_v<TNode<Number>, TNode<UnionOf<Smi, HeapObject>>>,
46 "test subtyping");
47static_assert(
48 !std::is_convertible_v<TNode<UnionOf<Smi, HeapObject>>, TNode<Number>>,
49 "test subtyping");
50
52 Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
53 CodeKind kind, const char* name, Builtin builtin)
54 // TODO(rmcilroy): Should we use Linkage::GetBytecodeDispatchDescriptor for
55 // bytecode handlers?
57 isolate, zone,
58 Linkage::GetStubCallDescriptor(
59 zone, descriptor, descriptor.GetStackParameterCount(),
60 CallDescriptor::kNoFlags, Operator::kNoProperties),
61 kind, name, builtin) {}
62
64 CallDescriptor* call_descriptor,
65 CodeKind kind, const char* name,
66 Builtin builtin)
67 : raw_assembler_(new RawMachineAssembler(
68 isolate, zone->New<TFGraph>(zone), call_descriptor,
69 MachineType::PointerRepresentation(),
70 InstructionSelector::SupportedMachineOperatorFlags(),
71 InstructionSelector::AlignmentRequirements())),
72 kind_(kind),
73 name_(name),
74 builtin_(builtin),
75 code_generated_(false),
76 variables_(zone),
77 jsgraph_(zone->New<JSGraph>(
78 isolate, raw_assembler_->graph(), raw_assembler_->common(),
79 zone->New<JSOperatorBuilder>(zone), raw_assembler_->simplified(),
80 raw_assembler_->machine())) {}
81
83
85 return static_cast<int>(raw_assembler_->parameter_count());
86}
87
89
90#if DEBUG
91void CodeAssemblerState::PrintCurrentBlock(std::ostream& os) {
92 raw_assembler_->PrintCurrentBlock(os);
93}
94#endif
95
96bool CodeAssemblerState::InsideBlock() { return raw_assembler_->InsideBlock(); }
97
99 const char* file,
100 int line) {
101#if DEBUG
102 AssemblerDebugInfo debug_info = {msg, file, line};
103 raw_assembler_->SetCurrentExternalSourcePosition({file, line});
104 raw_assembler_->SetInitialDebugInformation(debug_info);
105#endif // DEBUG
106}
107
109 public:
110 explicit BreakOnNodeDecorator(NodeId node_id) : node_id_(node_id) {}
111
112 void Decorate(Node* node) final {
113 if (node->id() == node_id_) {
115 }
116 }
117
118 private:
120};
121
122void CodeAssembler::BreakOnNode(int node_id) {
123 TFGraph* graph = raw_assembler()->graph();
124 Zone* zone = graph->zone();
125 GraphDecorator* decorator =
126 zone->New<BreakOnNodeDecorator>(static_cast<NodeId>(node_id));
127 graph->AddDecorator(decorator);
128}
129
131 const CodeAssemblerCallback& call_prologue,
132 const CodeAssemblerCallback& call_epilogue) {
133 // The callback can be registered only once.
136 state_->call_prologue_ = call_prologue;
137 state_->call_epilogue_ = call_epilogue;
138}
139
144
150
156
160
162 // Did you forget to call AwaitAndFinalizeCurrentBatch()?
165}
166
168 Isolate* isolate, std::unique_ptr<TurbofanCompilationJob> job) {
169#ifdef V8_USE_ADDRESS_SANITIZER
170 constexpr size_t kInputZoneBatchSize = 128UL * MB;
171#else // !V8_USE_ADDRESS_SANITIZER
172 constexpr size_t kInputZoneBatchSize = 1536UL * MB;
173#endif // V8_USE_ADDRESS_SANITIZER
174
175 // This must be called from the main thread.
176 DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
177
178 DCHECK(job->compilation_info()->code_kind() == CodeKind::BUILTIN ||
179 job->compilation_info()->code_kind() == CodeKind::BYTECODE_HANDLER);
180
181 CHECK_EQ(CompilationJob::SUCCEEDED, job->PrepareJob(isolate));
182
183 if (current_batch_zone_size_ >= kInputZoneBatchSize) {
184 AwaitAndFinalizeCurrentBatch(isolate);
185 }
186
187 QueueJob(isolate, std::move(job));
188}
189
191 Isolate* isolate, std::unique_ptr<TurbofanCompilationJob> job) {
192 current_batch_zone_size_ +=
193 job->compilation_info()->zone()->allocation_size();
194 if (v8_flags.concurrent_builtin_generation) {
195 auto* dispatcher = isolate->optimizing_compile_dispatcher();
196 // Spin until we can queue the job.
197 while (!dispatcher->TryQueueForOptimization(job)) {
198 std::this_thread::yield();
199 }
200 } else {
202 job->ExecuteJob(isolate->counters()->runtime_call_stats(),
203 isolate->main_thread_local_isolate()));
204 if (!v8_flags.turbo_profiling) {
205 main_thread_output_queue_.push_back(std::move(job));
206 } else {
207 // When profiling builtins for PGO, each builtin must be completely
208 // generated one at a time (i.e. PrepareJob, ExecuteJob, and FinalizeJob)
209 // instead of batched.
210 FinalizeJobOnMainThread(isolate, job.get());
211 }
212 }
213}
214
220
222 Isolate* isolate) {
223 if (v8_flags.concurrent_builtin_generation) {
224 auto* dispatcher = isolate->optimizing_compile_dispatcher();
225 dispatcher->WaitUntilCompilationJobsDone();
226 builtins_installed_count_ =
227 dispatcher->InstallGeneratedBuiltins(builtins_installed_count_);
228 } else {
229 DCHECK_IMPLIES(v8_flags.turbo_profiling, main_thread_output_queue_.empty());
230 while (!main_thread_output_queue_.empty()) {
231 FinalizeJobOnMainThread(isolate, main_thread_output_queue_.front().get());
232 main_thread_output_queue_.pop_front();
233 }
234 }
235 current_batch_zone_size_ = 0;
236}
237
238bool CodeAssembler::Is64() const { return raw_assembler()->machine()->Is64(); }
239bool CodeAssembler::Is32() const { return raw_assembler()->machine()->Is32(); }
240
242 return raw_assembler()->machine()->Float64RoundUp().IsSupported();
243}
244
246 return raw_assembler()->machine()->Float64RoundDown().IsSupported();
247}
248
252
256
263
267
271
276
278 return raw_assembler()->machine()->Word32Popcnt().IsSupported();
279}
280
282 return raw_assembler()->machine()->Word64Popcnt().IsSupported();
283}
284
286 return raw_assembler()->machine()->Word32Ctz().IsSupported();
287}
288
290 return raw_assembler()->machine()->Word64Ctz().IsSupported();
291}
292
296
300
304
308
312
316
321
323 int smi_value;
324 if (DoubleToSmiInteger(value, &smi_value)) {
325 return UncheckedCast<Number>(SmiConstant(smi_value));
326 } else {
327 // We allocate the heap number constant eagerly at this point instead of
328 // deferring allocation to code generation
329 // (see AllocateAndInstallRequestedHeapNumbers) since that makes it easier
330 // to generate constant lookups for embedded builtins.
332 isolate()->factory()->NewHeapNumberForCodeAssembler(value)));
333 }
334}
335
337 return UncheckedCast<Smi>(BitcastWordToTaggedSigned(
338 IntPtrConstant(static_cast<intptr_t>(value.ptr()))));
339}
340
342 return SmiConstant(Smi::FromInt(value));
343}
344
346 Handle<HeapObject> object) {
347 // This must be called on the main thread so that the builtins constant
348 // indices are reproducible from run to run of mksnapshot.
349 DCHECK_EQ(ThreadId::Current(), isolate()->thread_id());
350 RootIndex dummy_root;
351 Builtin dummy_builtin;
352 if (isolate()->IsGeneratingEmbeddedBuiltins() &&
353 !isolate()->roots_table().IsRootHandle(object, &dummy_root) &&
354 !isolate()->builtins()->IsBuiltinHandle(object, &dummy_builtin) &&
355 !IsInstructionStream(*object)) {
357 }
358}
359
360// This emits an untyped heap constant that is never a hole.
362 Handle<HeapObject> object) {
363 // jsgraph()->HeapConstantNoHole does a CHECK that it is in fact a hole
364 // value.
367}
368
369// This is used to emit untyped heap constants that can be a hole value.
370// Only use this if you really need to and cannot use *NoHole or *Hole.
376
377// This is used to emit an untyped heap constant that can only be Hole values.
383
385 Handle<String> internalized_string =
388 return UncheckedCast<String>(HeapConstantNoHole(internalized_string));
389}
390
396
402
406
410
414
418
420 int32_t* out_value) {
421 {
422 Int64Matcher m(node);
423 if (m.HasResolvedValue() &&
424 m.IsInRange(std::numeric_limits<int32_t>::min(),
425 std::numeric_limits<int32_t>::max())) {
426 *out_value = static_cast<int32_t>(m.ResolvedValue());
427 return true;
428 }
429 }
430
431 {
432 Int32Matcher m(node);
433 if (m.HasResolvedValue()) {
434 *out_value = m.ResolvedValue();
435 return true;
436 }
437 }
438
439 return false;
440}
441
443 int64_t* out_value) {
444 Int64Matcher m(node);
445 if (m.HasResolvedValue()) *out_value = m.ResolvedValue();
446 return m.HasResolvedValue();
447}
448
450 Node* node = tnode;
451 if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned) {
452 node = node->InputAt(0);
453 }
454 return TryToSmiConstant(ReinterpretCast<IntPtrT>(tnode), out_value);
455}
456
458 Tagged<Smi>* out_value) {
459 IntPtrMatcher m(node);
460 if (m.HasResolvedValue()) {
461 intptr_t value = m.ResolvedValue();
462 // Make sure that the value is actually a smi
463 CHECK_EQ(0, value & ((static_cast<intptr_t>(1) << kSmiShiftSize) - 1));
464 *out_value = Tagged<Smi>(static_cast<Address>(value));
465 return true;
466 }
467 return false;
468}
469
470bool CodeAssembler::TryToIntPtrConstant(TNode<Smi> tnode, intptr_t* out_value) {
471 Node* node = tnode;
472 if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned ||
473 node->opcode() == IrOpcode::kBitcastWordToTagged) {
474 node = node->InputAt(0);
475 }
476 return TryToIntPtrConstant(ReinterpretCast<IntPtrT>(tnode), out_value);
477}
478
480 intptr_t* out_value) {
481 IntPtrMatcher m(node);
482 if (m.HasResolvedValue()) *out_value = m.ResolvedValue();
483 return m.HasResolvedValue();
484}
485
488 return m.Is(isolate()->factory()->undefined_value());
489}
490
493 return m.Is(isolate()->factory()->null_value());
494}
495
497 if (index == kTargetParameterIndex) return raw_assembler()->TargetParameter();
498 return raw_assembler()->Parameter(index);
499}
500
502 auto call_descriptor = raw_assembler()->call_descriptor();
503 return call_descriptor->IsJSFunctionCall();
504}
505
507 auto call_descriptor = raw_assembler()->call_descriptor();
508 DCHECK(call_descriptor->IsJSFunctionCall());
510 static_cast<int>(call_descriptor->JSParameterCount())));
511}
512
516
521
524 // For code to support a dynamic parameter count, it's static parameter count
525 // must currently be zero, i.e. varargs. Otherwise we'd also need to ensure
526 // that the dynamic parameter count is not smaller than the static one.
527 //
528 // TODO(saelo): it would probably be a bit nicer if we could assert here that
529 // IsJSFunctionCall() is true and then use the JSParameterCount() of the
530 // descriptor instead, but that doesn't work because not all users of this
531 // feature are TFJ builtins (some are TFC builtins).
532 DCHECK_EQ(raw_assembler()->call_descriptor()->ParameterSlotCount(), 0);
534}
535
537 DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount());
538 DCHECK(raw_assembler()->call_descriptor()->GetReturnType(0).IsTagged());
539 return raw_assembler()->Return(value);
540}
541
543 DCHECK_EQ(2, raw_assembler()->call_descriptor()->ReturnCount());
544 DCHECK(raw_assembler()->call_descriptor()->GetReturnType(0).IsTagged());
545 DCHECK(raw_assembler()->call_descriptor()->GetReturnType(1).IsTagged());
546 return raw_assembler()->Return(value1, value2);
547}
548
550 TNode<Object> value3) {
551 DCHECK_EQ(3, raw_assembler()->call_descriptor()->ReturnCount());
552 DCHECK(raw_assembler()->call_descriptor()->GetReturnType(0).IsTagged());
553 DCHECK(raw_assembler()->call_descriptor()->GetReturnType(1).IsTagged());
554 DCHECK(raw_assembler()->call_descriptor()->GetReturnType(2).IsTagged());
555 return raw_assembler()->Return(value1, value2, value3);
556}
557
559 DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount());
561 raw_assembler()->call_descriptor()->GetReturnType(0));
562 return raw_assembler()->Return(value);
563}
564
566 DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount());
568 raw_assembler()->call_descriptor()->GetReturnType(0));
569 return raw_assembler()->Return(value);
570}
571
573 DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount());
574 DCHECK_EQ(
576 raw_assembler()->call_descriptor()->GetReturnType(0).representation());
577 return raw_assembler()->Return(value);
578}
579
581 DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount());
583 raw_assembler()->call_descriptor()->GetReturnType(0));
584 return raw_assembler()->Return(value);
585}
586
588 DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount());
590 raw_assembler()->call_descriptor()->GetReturnType(0));
591 return raw_assembler()->Return(value);
592}
593
595 DCHECK_EQ(2, raw_assembler()->call_descriptor()->ReturnCount());
596 DCHECK_EQ(
598 raw_assembler()->call_descriptor()->GetReturnType(0).representation());
599 DCHECK_EQ(
601 raw_assembler()->call_descriptor()->GetReturnType(1).representation());
602 return raw_assembler()->Return(value1, value2);
603}
604
606 DCHECK_EQ(2, raw_assembler()->call_descriptor()->ReturnCount());
607 DCHECK_EQ(
609 raw_assembler()->call_descriptor()->GetReturnType(0).representation());
610 DCHECK_EQ(
612 raw_assembler()->call_descriptor()->GetReturnType(1).representation());
613 return raw_assembler()->Return(value1, value2);
614}
615
617 DCHECK_EQ(2, raw_assembler()->call_descriptor()->ReturnCount());
618 DCHECK_EQ(
620 raw_assembler()->call_descriptor()->GetReturnType(0).representation());
621 DCHECK(raw_assembler()->call_descriptor()->GetReturnType(1).IsTagged());
622 return raw_assembler()->Return(value1, value2);
623}
624
626 DCHECK_EQ(2, raw_assembler()->call_descriptor()->ReturnCount());
627 DCHECK_EQ(
629 raw_assembler()->call_descriptor()->GetReturnType(0).representation());
630 DCHECK(raw_assembler()->call_descriptor()->GetReturnType(1).IsTagged());
631 return raw_assembler()->Return(value1, value2);
632}
633
635 DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount());
636 return raw_assembler()->PopAndReturn(pop, value);
637}
638
639void CodeAssembler::PopAndReturn(Node* pop, Node* value1, Node* value2,
640 Node* value3, Node* value4) {
641 DCHECK_EQ(4, raw_assembler()->call_descriptor()->ReturnCount());
642 return raw_assembler()->PopAndReturn(pop, value1, value2, value3, value4);
643}
644
646 Label if_return(this), if_continue(this);
647 Branch(condition, &if_return, &if_continue);
648 Bind(&if_return);
649 Return(value);
650 Bind(&if_continue);
651}
652
654 raw_assembler()->AbortCSADcheck(message);
655}
656
658
663
664void CodeAssembler::EmitComment(std::string str) {
665 if (!v8_flags.code_comments) return;
666 raw_assembler()->Comment(str);
667}
668
669void CodeAssembler::StaticAssert(TNode<BoolT> value, const char* source) {
670 raw_assembler()->StaticAssert(value, source);
671}
672
673void CodeAssembler::SetSourcePosition(const char* file, int line) {
675}
676
681
685
686const std::vector<FileAndLine>& CodeAssembler::GetMacroSourcePositionStack()
687 const {
689}
690
692
693#if DEBUG
695 return label->Bind(debug_info);
696}
697#endif // DEBUG
698
702
706
707#if V8_ENABLE_WEBASSEMBLY
708TNode<RawPtrT> CodeAssembler::LoadStackPointer() {
709 return UncheckedCast<RawPtrT>(raw_assembler()->LoadStackPointer());
710}
711
712void CodeAssembler::SetStackPointer(TNode<RawPtrT> ptr) {
713 raw_assembler()->SetStackPointer(ptr);
714}
715#endif
716
722
727
729 return UncheckedCast<RawPtrT>(raw_assembler()->StackSlot(size, alignment));
730}
731
732#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \
733 TNode<ResType> CodeAssembler::name(TNode<Arg1Type> a, TNode<Arg2Type> b) { \
734 return UncheckedCast<ResType>(raw_assembler()->name(a, b)); \
735 }
737#undef DEFINE_CODE_ASSEMBLER_BINARY_OP
738
740 TNode<Word32T> lhs_lo_word, TNode<Word32T> lhs_hi_word,
741 TNode<Word32T> rhs_lo_word, TNode<Word32T> rhs_hi_word) {
743 lhs_lo_word, lhs_hi_word, rhs_lo_word, rhs_hi_word));
744}
745
747 TNode<Word32T> lhs_lo_word, TNode<Word32T> lhs_hi_word,
748 TNode<Word32T> rhs_lo_word, TNode<Word32T> rhs_hi_word) {
750 lhs_lo_word, lhs_hi_word, rhs_lo_word, rhs_hi_word));
751}
752
754 return (shift != 0) ? WordShl(value, IntPtrConstant(shift)) : value;
755}
756
758 return (shift != 0) ? WordShr(value, IntPtrConstant(shift)) : value;
759}
760
762 return (shift != 0) ? WordSar(value, IntPtrConstant(shift)) : value;
763}
764
766 return (shift != 0) ? Word32Shr(value, Int32Constant(shift)) : value;
767}
768
770 return (shift != 0) ? Word32Sar(value, Int32Constant(shift)) : value;
771}
772
773#define CODE_ASSEMBLER_COMPARE(Name, ArgT, VarT, ToConstant, op) \
774 TNode<BoolT> CodeAssembler::Name(TNode<ArgT> left, TNode<ArgT> right) { \
775 VarT lhs, rhs; \
776 if (ToConstant(left, &lhs) && ToConstant(right, &rhs)) { \
777 return BoolConstant(lhs op rhs); \
778 } \
779 return UncheckedCast<BoolT>(raw_assembler()->Name(left, right)); \
780 }
781
789#undef CODE_ASSEMBLER_COMPARE
790
792 if (raw_assembler()->machine()->Is64()) {
794 raw_assembler()->ChangeUint32ToUint64(value));
795 }
796 return ReinterpretCast<UintPtrT>(value);
797}
798
800 if (raw_assembler()->machine()->Is64()) {
801 return UncheckedCast<IntPtrT>(raw_assembler()->ChangeInt32ToInt64(value));
802 }
803 return ReinterpretCast<IntPtrT>(value);
804}
805
807 if (raw_assembler()->machine()->Is64()) {
808 return UncheckedCast<IntPtrT>(raw_assembler()->ChangeFloat64ToInt64(value));
809 }
810 return UncheckedCast<IntPtrT>(raw_assembler()->ChangeFloat64ToInt32(value));
811}
812
814 if (raw_assembler()->machine()->Is64()) {
816 raw_assembler()->ChangeFloat64ToUint64(value));
817 }
818 return UncheckedCast<UintPtrT>(raw_assembler()->ChangeFloat64ToUint32(value));
819}
820
822 if (raw_assembler()->machine()->Is64()) {
823 // TODO(turbofan): Maybe we should introduce a ChangeUint64ToFloat64
824 // machine operator to TurboFan here?
826 raw_assembler()->RoundUint64ToFloat64(value));
827 }
828 return UncheckedCast<Float64T>(raw_assembler()->ChangeUint32ToFloat64(value));
829}
830
832 if (raw_assembler()->machine()->Is64()) {
833 return UncheckedCast<Float64T>(raw_assembler()->RoundInt64ToFloat64(value));
834 }
835 return UncheckedCast<Float64T>(raw_assembler()->ChangeInt32ToFloat64(value));
836}
837
846#define DEFINE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType) \
847 TNode<ResType> CodeAssembler::name(TNode<ArgType> a) { \
848 return UncheckedCast<ResType>(raw_assembler()->name(a)); \
849 }
851#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
852
854 return raw_assembler()->Load(type, base);
855}
856
858 return raw_assembler()->Load(type, base, offset);
859}
860
862 return BitcastWordToTagged(Load<RawPtrT>(base));
863}
864
866 // Please use LoadFromObject(MachineType::MapInHeader(), object,
867 // IntPtrConstant(-kHeapObjectTag)) instead.
868 DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
869 return BitcastWordToTagged(Load<RawPtrT>(base, offset));
870}
871
874 DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
875 return raw_assembler()->AtomicLoad(AtomicLoadParameters(type, order), base,
876 offset);
877}
878
879template <class Type>
886
891
896
901
902#ifdef V8_MAP_PACKING
903Node* CodeAssembler::PackMapWord(Node* value) {
904 TNode<IntPtrT> map_word =
905 BitcastTaggedToWordForTagAndSmiBits(UncheckedCast<AnyTaggedT>(value));
906 TNode<WordT> packed = WordXor(UncheckedCast<WordT>(map_word),
907 IntPtrConstant(Internals::kMapWordXorMask));
908 return BitcastWordToTaggedSigned(packed);
909}
910#endif
911
913#ifdef V8_MAP_PACKING
914 Handle<Object> root = isolate()->root_handle(root_index);
915 Node* map = HeapConstantNoHole(Cast<Map>(root));
916 map = PackMapWord(map);
917 return ReinterpretCast<AnyTaggedT>(map);
918#else
919 return LoadRoot(root_index);
920#endif
921}
922
924 if (RootsTable::IsImmortalImmovable(root_index)) {
925 Handle<Object> root = isolate()->root_handle(root_index);
926 if (IsSmi(*root)) {
927 return SmiConstant(i::Cast<Smi>(*root));
928 } else {
930 }
931 }
932
933 // TODO(jgruber): In theory we could generate better code for this by
934 // letting the macro assembler decide how to load from the roots list. In most
935 // cases, it would boil down to loading from a fixed kRootRegister offset.
936 TNode<ExternalReference> isolate_root =
937 ExternalConstant(ExternalReference::isolate_root(isolate()));
938 int offset = IsolateData::root_slot_offset(root_index);
940 LoadFullTagged(isolate_root, IntPtrConstant(offset)));
941}
942
947
952
955 Node* value,
956 StoreToObjectWriteBarrier write_barrier) {
957 WriteBarrierKind write_barrier_kind;
958 switch (write_barrier) {
960 write_barrier_kind = WriteBarrierKind::kFullWriteBarrier;
961 break;
963 write_barrier_kind = WriteBarrierKind::kMapWriteBarrier;
964 break;
966 if (CanBeTaggedPointer(rep)) {
967 write_barrier_kind = WriteBarrierKind::kAssertNoWriteBarrier;
968 } else {
969 write_barrier_kind = WriteBarrierKind::kNoWriteBarrier;
970 }
971 break;
972 }
973 raw_assembler()->StoreToObject(rep, object, offset, value,
974 write_barrier_kind);
975}
976
983
992
998
1005
1012
1017
1019 // Please use OptimizedStoreMap(base, value) instead.
1020 DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
1023}
1024
1026 DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
1029}
1030
1037
1039 Node* offset, Node* value) {
1040 // Please use OptimizedStoreMap(base, value) instead.
1041 DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
1043 rep, base, offset, value,
1045}
1046
1051
1053 Node* base, Node* offset,
1054 Node* value) {
1055 // Please use OptimizedStoreMap(base, value) instead.
1056 DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
1057 raw_assembler()->Store(rep, base, offset, value, kNoWriteBarrier);
1058}
1059
1065
1068 TNode<Object> tagged_value) {
1069 // Please use OptimizedStoreMap(base, tagged_value) instead.
1070 DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
1072 BitcastTaggedToWord(tagged_value));
1073}
1074
1083
1092
1093#define ATOMIC_FUNCTION(name) \
1094 TNode<Word32T> CodeAssembler::Atomic##name( \
1095 MachineType type, TNode<RawPtrT> base, TNode<UintPtrT> offset, \
1096 TNode<Word32T> value) { \
1097 return UncheckedCast<Word32T>( \
1098 raw_assembler()->Atomic##name(type, base, offset, value)); \
1099 } \
1100 template <class Type> \
1101 TNode<Type> CodeAssembler::Atomic##name##64( \
1102 TNode<RawPtrT> base, TNode<UintPtrT> offset, TNode<UintPtrT> value, \
1103 TNode<UintPtrT> value_high) { \
1104 return UncheckedCast<Type>( \
1105 raw_assembler()->Atomic##name##64(base, offset, value, value_high)); \
1106 } \
1107 template TNode<AtomicInt64> CodeAssembler::Atomic##name##64 < AtomicInt64 > \
1108 (TNode<RawPtrT> base, TNode<UintPtrT> offset, TNode<UintPtrT> value, \
1109 TNode<UintPtrT> value_high); \
1110 template TNode<AtomicUint64> CodeAssembler::Atomic##name##64 < \
1111 AtomicUint64 > (TNode<RawPtrT> base, TNode<UintPtrT> offset, \
1112 TNode<UintPtrT> value, TNode<UintPtrT> value_high);
1113ATOMIC_FUNCTION(Add)
1114ATOMIC_FUNCTION(Sub)
1115ATOMIC_FUNCTION(And)
1117ATOMIC_FUNCTION(Xor)
1118ATOMIC_FUNCTION(Exchange)
1119#undef ATOMIC_FUNCTION
1120
1122 TNode<RawPtrT> base,
1124 TNode<Word32T> old_value,
1125 TNode<Word32T> new_value) {
1127 type, base, offset, old_value, new_value));
1128}
1129
1130template <class Type>
1133 TNode<UintPtrT> new_value, TNode<UintPtrT> old_value_high,
1134 TNode<UintPtrT> new_value_high) {
1135 // This uses Uint64() intentionally: AtomicCompareExchange is not implemented
1136 // for Int64(), which is fine because the machine instruction only cares
1137 // about words.
1139 base, offset, old_value, old_value_high, new_value, new_value_high));
1140}
1141
1144 TNode<UintPtrT> new_value, TNode<UintPtrT> old_value_high,
1145 TNode<UintPtrT> new_value_high);
1146template TNode<AtomicUint64>
1149 TNode<UintPtrT> new_value, TNode<UintPtrT> old_value_high,
1150 TNode<UintPtrT> new_value_high);
1151
1155
1158 TNode<ExternalReference> isolate_root =
1159 ExternalConstant(ExternalReference::isolate_root(isolate()));
1160 int offset = IsolateData::root_slot_offset(root_index);
1162}
1163
1165 DCHECK_LT(index, value->op()->ValueOutputCount());
1166 return raw_assembler()->Projection(index, value);
1167}
1168
1174
1176 if (state_->exception_handler_labels_.empty()) return;
1179
1180 if (node->op()->HasProperty(Operator::kNoThrow)) {
1181 return;
1182 }
1183
1184 Label success(this), exception(this, Label::kDeferred);
1185 success.MergeVariables();
1186 exception.MergeVariables();
1187
1188 raw_assembler()->Continuations(node, success.label_, exception.label_);
1189
1190 Bind(&exception);
1191 const Operator* op = raw_assembler()->common()->IfException();
1192 Node* exception_value = raw_assembler()->AddNode(op, node, node);
1193 label->AddInputs({CAST(exception_value)});
1194 Goto(label->plain_label());
1195
1196 Bind(&success);
1197 raw_assembler()->AddNode(raw_assembler()->common()->IfSuccess(), node);
1198}
1199
1200namespace {
1201template <size_t kMaxSize>
1202class NodeArray {
1203 public:
1204 void Add(Node* node) {
1206 *ptr_++ = node;
1207 }
1208
1209 Node* const* data() const { return arr_; }
1210 int size() const { return static_cast<int>(ptr_ - arr_); }
1211
1212 private:
1214 Node** ptr_ = arr_;
1215};
1216
1217#ifdef DEBUG
1218bool IsValidArgumentCountFor(const CallInterfaceDescriptor& descriptor,
1219 size_t argument_count) {
1220 size_t parameter_count = descriptor.GetParameterCount();
1221 if (descriptor.AllowVarArgs()) {
1222 return argument_count >= parameter_count;
1223 } else {
1224 return argument_count == parameter_count;
1225 }
1226}
1227#endif // DEBUG
1228} // namespace
1229
1231 Runtime::FunctionId function, TNode<Object> context,
1232 std::initializer_list<TNode<Object>> args) {
1233 int result_size = Runtime::FunctionForId(function)->result_size;
1234#if V8_ENABLE_WEBASSEMBLY
1235 bool switch_to_the_central_stack =
1236 state_->kind_ == CodeKind::WASM_FUNCTION ||
1237 state_->kind_ == CodeKind::WASM_TO_JS_FUNCTION ||
1238 state_->kind_ == CodeKind::JS_TO_WASM_FUNCTION ||
1239 state_->builtin_ == Builtin::kJSToWasmWrapper ||
1240 state_->builtin_ == Builtin::kJSToWasmHandleReturns ||
1241 state_->builtin_ == Builtin::kWasmToJsWrapperCSA ||
1243#else
1244 bool switch_to_the_central_stack = false;
1245#endif
1246 Builtin centry =
1247 Builtins::RuntimeCEntry(result_size, switch_to_the_central_stack);
1248 TNode<Code> centry_code =
1249 HeapConstantNoHole(isolate()->builtins()->code_handle(centry));
1250 constexpr size_t kMaxNumArgs = 7;
1251 DCHECK_GE(kMaxNumArgs, args.size());
1252 int argc = static_cast<int>(args.size());
1253 auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
1254 zone(), function, argc, Operator::kNoProperties,
1257
1260 TNode<Int32T> arity = Int32Constant(argc);
1261
1262 NodeArray<kMaxNumArgs + 4> inputs;
1263 inputs.Add(centry_code);
1264 for (const auto& arg : args) inputs.Add(arg);
1265 inputs.Add(ref);
1266 inputs.Add(arity);
1267 inputs.Add(context);
1268
1269 CallPrologue();
1270 Node* return_value =
1271 raw_assembler()->CallN(call_descriptor, inputs.size(), inputs.data());
1272 HandleException(return_value);
1273 CallEpilogue();
1274 return return_value;
1275}
1276
1278
1279#if V8_ENABLE_WEBASSEMBLY
1282 ExternalReference::wasm_switch_to_the_central_stack_for_js());
1284 do_switch, MachineType::Pointer(),
1285 std::make_pair(MachineType::Pointer(),
1287 std::make_pair(MachineType::Pointer(), LoadFramePointer())));
1288
1289 TNode<RawPtrT> old_sp = LoadStackPointer();
1290 SetStackPointer(central_stack_sp);
1291 return old_sp;
1292}
1293
1296 ExternalReference::wasm_switch_from_the_central_stack_for_js());
1297 CodeAssemblerLabel skip(this);
1298 GotoIf(IntPtrEqual(old_sp, UintPtrConstant(0)), &skip);
1300 do_switch, MachineType::Pointer(),
1301 std::make_pair(MachineType::Pointer(),
1303 SetStackPointer(old_sp);
1304 Goto(&skip);
1305 Bind(&skip);
1306}
1307
1309 TVariable<RawPtrT> old_sp(PointerConstant(nullptr), this);
1310 Label no_switch(this);
1311 Label end(this); // -> return value of the call (kTaggedPointer)
1312 TNode<Uint8T> is_on_central_stack_flag = LoadUint8FromRootRegister(
1313 IntPtrConstant(IsolateData::is_on_central_stack_flag_offset()));
1314 GotoIf(is_on_central_stack_flag, &no_switch);
1315 old_sp = SwitchToTheCentralStack();
1316 Goto(&no_switch);
1317 Bind(&no_switch);
1318 return old_sp.value();
1319}
1320#endif
1321
1323 Runtime::FunctionId function, TNode<Int32T> arity, TNode<Object> context,
1324 std::initializer_list<TNode<Object>> args) {
1325 int result_size = Runtime::FunctionForId(function)->result_size;
1326#if V8_ENABLE_WEBASSEMBLY
1327 bool switch_to_the_central_stack =
1328 state_->kind_ == CodeKind::WASM_FUNCTION ||
1329 state_->kind_ == CodeKind::WASM_TO_JS_FUNCTION ||
1330 state_->kind_ == CodeKind::JS_TO_WASM_FUNCTION ||
1331 state_->builtin_ == Builtin::kJSToWasmWrapper ||
1332 state_->builtin_ == Builtin::kJSToWasmHandleReturns ||
1333 state_->builtin_ == Builtin::kWasmToJsWrapperCSA ||
1335#else
1336 bool switch_to_the_central_stack = false;
1337#endif
1338 Builtin centry =
1339 Builtins::RuntimeCEntry(result_size, switch_to_the_central_stack);
1340 TNode<Code> centry_code =
1341 HeapConstantNoHole(isolate()->builtins()->code_handle(centry));
1342
1343 constexpr size_t kMaxNumArgs = 6;
1344 DCHECK_GE(kMaxNumArgs, args.size());
1345 int argc = static_cast<int>(args.size());
1346 auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
1347 zone(), function, argc, Operator::kNoProperties,
1349
1352
1353 NodeArray<kMaxNumArgs + 4> inputs;
1354 inputs.Add(centry_code);
1355 for (const auto& arg : args) inputs.Add(arg);
1356 inputs.Add(ref);
1357 inputs.Add(arity);
1358 inputs.Add(context);
1359
1360 raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data());
1361}
1362
1364 const CallInterfaceDescriptor& descriptor,
1365 int input_count, Node* const* inputs) {
1368
1369 // implicit nodes are target and optionally context.
1370 int implicit_nodes = descriptor.HasContextParameter() ? 2 : 1;
1371 DCHECK_LE(implicit_nodes, input_count);
1372 int argc = input_count - implicit_nodes;
1373 DCHECK(IsValidArgumentCountFor(descriptor, argc));
1374 // Extra arguments not mentioned in the descriptor are passed on the stack.
1375 int stack_parameter_count = argc - descriptor.GetRegisterParameterCount();
1376 DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
1377
1378 auto call_descriptor = Linkage::GetStubCallDescriptor(
1379 zone(), descriptor, stack_parameter_count, CallDescriptor::kNoFlags,
1380 Operator::kNoProperties, call_mode);
1381
1382 CallPrologue();
1383 Node* return_value =
1384 raw_assembler()->CallN(call_descriptor, input_count, inputs);
1385 HandleException(return_value);
1386 CallEpilogue();
1387 return return_value;
1388}
1389
1391 TNode<Code> target, TNode<Object> context,
1392 std::initializer_list<Node*> args) {
1393 constexpr size_t kMaxNumArgs = 11;
1394 DCHECK_GE(kMaxNumArgs, args.size());
1395 DCHECK(IsValidArgumentCountFor(descriptor, args.size()));
1396 auto call_descriptor = Linkage::GetStubCallDescriptor(
1397 zone(), descriptor, descriptor.GetStackParameterCount(),
1399
1400 NodeArray<kMaxNumArgs + 2> inputs;
1401 inputs.Add(target);
1402 for (auto arg : args) inputs.Add(arg);
1403 if (descriptor.HasContextParameter()) {
1404 inputs.Add(context);
1405 }
1406
1407 raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data());
1408}
1409
1411 const CallInterfaceDescriptor& descriptor,
1412 TNode<Object> target, TNode<Object> context,
1413 std::initializer_list<Node*> args) {
1416 DCHECK(IsValidArgumentCountFor(descriptor, args.size()));
1417
1418 constexpr size_t kMaxNumArgs = 10;
1419 DCHECK_GE(kMaxNumArgs, args.size());
1420
1421 NodeArray<kMaxNumArgs + 2> inputs;
1422 inputs.Add(target);
1423 for (auto arg : args) inputs.Add(arg);
1424 if (descriptor.HasContextParameter()) {
1425 inputs.Add(context);
1426 }
1427
1428 return CallStubN(call_mode, descriptor, inputs.size(), inputs.data());
1429}
1430
1432 const CallInterfaceDescriptor& descriptor, TNode<Object> target,
1433 TNode<Object> context, TNode<Object> function,
1434 std::optional<TNode<Object>> new_target, TNode<Int32T> arity,
1435 std::optional<TNode<JSDispatchHandleT>> dispatch_handle,
1436 std::initializer_list<Node*> args) {
1437 constexpr size_t kMaxNumArgs = 10;
1438 DCHECK_GE(kMaxNumArgs, args.size());
1439 NodeArray<kMaxNumArgs + 6> inputs;
1440
1441 inputs.Add(target);
1442 inputs.Add(function);
1443 if (new_target) {
1444 inputs.Add(*new_target);
1445 }
1446 inputs.Add(arity);
1447#ifdef V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE
1448 if (dispatch_handle) {
1449 inputs.Add(*dispatch_handle);
1450 }
1451#endif
1452 for (auto arg : args) inputs.Add(arg);
1453 // Context argument is implicit so isn't counted.
1454 DCHECK(IsValidArgumentCountFor(descriptor, inputs.size()));
1455 if (descriptor.HasContextParameter()) {
1456 inputs.Add(context);
1457 }
1458
1459 return CallStubN(StubCallMode::kCallCodeObject, descriptor, inputs.size(),
1460 inputs.data());
1461}
1462
1464 const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
1465 std::initializer_list<Node*> args) {
1466 constexpr size_t kMaxNumArgs = 6;
1467 DCHECK_GE(kMaxNumArgs, args.size());
1468 DCHECK(IsValidArgumentCountFor(descriptor, args.size()));
1469
1470 int argc = static_cast<int>(args.size());
1471 // Extra arguments not mentioned in the descriptor are passed on the stack.
1472 int stack_parameter_count = argc - descriptor.GetRegisterParameterCount();
1473 DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
1474 auto call_descriptor = Linkage::GetStubCallDescriptor(
1475 zone(), descriptor, stack_parameter_count, CallDescriptor::kNoFlags,
1477
1478 NodeArray<kMaxNumArgs + 2> inputs;
1479 inputs.Add(target);
1480 for (auto arg : args) inputs.Add(arg);
1481 inputs.Add(context);
1482
1483 raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data());
1484}
1485
1486template <class... TArgs>
1488 const CallInterfaceDescriptor& descriptor, TNode<RawPtrT> target,
1489 TArgs... args) {
1490 DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
1491 auto call_descriptor = Linkage::GetBytecodeDispatchCallDescriptor(
1492 zone(), descriptor, descriptor.GetStackParameterCount());
1493
1494 Node* nodes[] = {target, args...};
1495 CHECK_EQ(descriptor.GetParameterCount() + 1, arraysize(nodes));
1496 raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
1497}
1498
1499// Instantiate TailCallBytecodeDispatch() for argument counts used by
1500// CSA-generated code
1502 const CallInterfaceDescriptor& descriptor, TNode<RawPtrT> target,
1505
1507 TNode<JSFunction> function,
1509 TNode<Int32T> arg_count,
1510 TNode<JSDispatchHandleT> dispatch_handle) {
1511 JSTrampolineDescriptor descriptor;
1512 auto call_descriptor = Linkage::GetStubCallDescriptor(
1513 zone(), descriptor, descriptor.GetStackParameterCount(),
1516
1517#ifdef V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE
1518 Node* nodes[] = {code, function, new_target,
1519 arg_count, dispatch_handle, context};
1520#else
1521 Node* nodes[] = {code, function, new_target, arg_count, context};
1522#endif
1523 // + 2 for code and context.
1524 CHECK_EQ(descriptor.GetParameterCount() + 2, arraysize(nodes));
1525 raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
1526}
1527
1529 int input_count, Node* const* inputs) {
1530 auto call_descriptor = Linkage::GetSimplifiedCDescriptor(zone(), signature);
1531 return raw_assembler()->CallN(call_descriptor, input_count, inputs);
1532}
1533
1535 Node* function, std::optional<MachineType> return_type,
1536 std::initializer_list<CodeAssembler::CFunctionArg> args) {
1537 return raw_assembler()->CallCFunction(function, return_type, args);
1538}
1539
1541 Node* function, MachineType return_type,
1542 std::initializer_list<CodeAssembler::CFunctionArg> args) {
1544 function, return_type, args);
1545}
1546
1548 Node* function, MachineType return_type, SaveFPRegsMode mode,
1549 std::initializer_list<CodeAssembler::CFunctionArg> args) {
1550 DCHECK(return_type.LessThanOrEqualPointerSize());
1552 function, return_type, mode, args);
1553}
1554
1559
1561 GotoHint goto_hint) {
1562 Label false_label(this);
1563 BranchHint branch_hint = BranchHint::kNone;
1564 if (goto_hint == GotoHint::kLabel) {
1565 branch_hint = BranchHint::kTrue;
1566 } else if (goto_hint == GotoHint::kFallthrough) {
1567 branch_hint = BranchHint::kFalse;
1568 }
1569 Branch(condition, true_label, &false_label, branch_hint);
1570 Bind(&false_label);
1571}
1572
1574 GotoHint goto_hint) {
1575 Label true_label(this);
1576 BranchHint branch_hint = BranchHint::kNone;
1577 if (goto_hint == GotoHint::kLabel) {
1578 branch_hint = BranchHint::kFalse;
1579 } else if (goto_hint == GotoHint::kFallthrough) {
1580 branch_hint = BranchHint::kTrue;
1581 }
1582 Branch(condition, &true_label, false_label, branch_hint);
1583 Bind(&true_label);
1584}
1585
1587 Label* false_label, BranchHint branch_hint) {
1588 int32_t constant;
1589 if (TryToInt32Constant(condition, &constant)) {
1590 if ((true_label->is_used() || true_label->is_bound()) &&
1591 (false_label->is_used() || false_label->is_bound())) {
1592 return Goto(constant ? true_label : false_label);
1593 }
1594 }
1595 true_label->MergeVariables();
1596 false_label->MergeVariables();
1597 return raw_assembler()->Branch(condition, true_label->label_,
1598 false_label->label_, branch_hint);
1599}
1600
1602 const std::function<void()>& true_body,
1603 const std::function<void()>& false_body) {
1604 int32_t constant;
1605 if (TryToInt32Constant(condition, &constant)) {
1606 return constant ? true_body() : false_body();
1607 }
1608
1609 Label vtrue(this), vfalse(this);
1610 Branch(condition, &vtrue, &vfalse);
1611
1612 Bind(&vtrue);
1613 true_body();
1614
1615 Bind(&vfalse);
1616 false_body();
1617}
1618
1620 const std::function<void()>& false_body) {
1621 int32_t constant;
1622 if (TryToInt32Constant(condition, &constant)) {
1623 return constant ? Goto(true_label) : false_body();
1624 }
1625
1626 Label vfalse(this);
1627 Branch(condition, true_label, &vfalse);
1628 Bind(&vfalse);
1629 false_body();
1630}
1631
1633 const std::function<void()>& true_body,
1634 Label* false_label) {
1635 int32_t constant;
1636 if (TryToInt32Constant(condition, &constant)) {
1637 return constant ? true_body() : Goto(false_label);
1638 }
1639
1640 Label vtrue(this);
1641 Branch(condition, &vtrue, false_label);
1642 Bind(&vtrue);
1643 true_body();
1644}
1645
1646void CodeAssembler::Switch(Node* index, Label* default_label,
1647 const int32_t* case_values, Label** case_labels,
1648 size_t case_count) {
1649 RawMachineLabel** labels =
1650 zone()->AllocateArray<RawMachineLabel*>(case_count);
1651 for (size_t i = 0; i < case_count; ++i) {
1652 labels[i] = case_labels[i]->label_;
1653 case_labels[i]->MergeVariables();
1654 }
1655 default_label->MergeVariables();
1656 return raw_assembler()->Switch(index, default_label->label_, case_values,
1657 labels, case_count);
1658}
1659
1666
1667// RawMachineAssembler delegate helpers:
1669
1671
1673
1677
1679 return state_->raw_assembler_.get();
1680}
1681
1683
1684// The core implementation of Variable is stored through an indirection so
1685// that it can outlive the often block-scoped Variable declarations. This is
1686// needed to ensure that variable binding and merging through phis can
1687// properly be verified.
1689 public:
1691 :
1692#if DEBUG
1693 debug_info_(AssemblerDebugInfo(nullptr, nullptr, -1)),
1694#endif
1695 value_(nullptr),
1696 rep_(rep),
1697 var_id_(id) {
1698 }
1699
1700#if DEBUG
1701 AssemblerDebugInfo debug_info() const { return debug_info_; }
1702 void set_debug_info(AssemblerDebugInfo debug_info) {
1703 debug_info_ = debug_info;
1704 }
1705
1706 AssemblerDebugInfo debug_info_;
1707#endif // DEBUG
1708 bool operator<(const CodeAssemblerVariable::Impl& other) const {
1709 return var_id_ < other.var_id_;
1710 }
1714};
1715
1718 const CodeAssemblerVariable::Impl* b) const {
1719 return *a < *b;
1720}
1721
1724 : impl_(assembler->zone()->New<Impl>(rep,
1725 assembler->state()->NextVariableId())),
1726 state_(assembler->state()) {
1727 state_->variables_.insert(impl_);
1728}
1729
1732 Node* initial_value)
1733 : CodeAssemblerVariable(assembler, rep) {
1734 Bind(initial_value);
1735}
1736
1737#if DEBUG
1739 AssemblerDebugInfo debug_info,
1741 : impl_(assembler->zone()->New<Impl>(rep,
1742 assembler->state()->NextVariableId())),
1743 state_(assembler->state()) {
1744 impl_->set_debug_info(debug_info);
1745 state_->variables_.insert(impl_);
1746}
1747
1748CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
1749 AssemblerDebugInfo debug_info,
1751 Node* initial_value)
1752 : CodeAssemblerVariable(assembler, debug_info, rep) {
1753 impl_->set_debug_info(debug_info);
1754 Bind(initial_value);
1755}
1756#endif // DEBUG
1757
1761
1763
1765#if DEBUG
1766 if (!IsBound()) {
1767 std::stringstream str;
1768 str << "#Use of unbound variable:"
1769 << "#\n Variable: " << *this << "#\n Current Block: ";
1770 state_->PrintCurrentBlock(str);
1771 FATAL("%s", str.str().c_str());
1772 }
1773 if (!state_->InsideBlock()) {
1774 std::stringstream str;
1775 str << "#Accessing variable value outside a block:"
1776 << "#\n Variable: " << *this;
1777 FATAL("%s", str.str().c_str());
1778 }
1779#endif // DEBUG
1780 return impl_->value_;
1781}
1782
1784
1785bool CodeAssemblerVariable::IsBound() const { return impl_->value_ != nullptr; }
1786
1787std::ostream& operator<<(std::ostream& os,
1788 const CodeAssemblerVariable::Impl& impl) {
1789#if DEBUG
1790 AssemblerDebugInfo info = impl.debug_info();
1791 if (info.name) os << "V" << info;
1792#endif // DEBUG
1793 return os;
1794}
1795
1796std::ostream& operator<<(std::ostream& os,
1797 const CodeAssemblerVariable& variable) {
1798 os << *variable.impl_;
1799 return os;
1800}
1801
1803 size_t vars_count,
1804 CodeAssemblerVariable* const* vars,
1806 : bound_(false),
1807 merge_count_(0),
1808 state_(assembler->state()),
1809 label_(nullptr) {
1810 label_ = assembler->zone()->New<RawMachineLabel>(
1813 for (size_t i = 0; i < vars_count; ++i) {
1814 variable_phis_[vars[i]->impl_] = nullptr;
1815 }
1816}
1817
1819
1821 ++merge_count_;
1823 size_t count = 0;
1824 Node* node = var->value_;
1825 if (node != nullptr) {
1826 auto i = variable_merges_.find(var);
1827 if (i != variable_merges_.end()) {
1828 i->second.push_back(node);
1829 count = i->second.size();
1830 } else {
1831 count = 1;
1832 variable_merges_[var] = std::vector<Node*>(1, node);
1833 }
1834 }
1835 // If the following asserts, then you've jumped to a label without a bound
1836 // variable along that path that expects to merge its value into a phi.
1837 // This can also occur if a label is bound that is never jumped to.
1838 DCHECK(variable_phis_.find(var) == variable_phis_.end() ||
1839 count == merge_count_);
1840 USE(count);
1841
1842 // If the label is already bound, we already know the set of variables to
1843 // merge and phi nodes have already been created.
1844 if (bound_) {
1845 auto phi = variable_phis_.find(var);
1846 if (phi != variable_phis_.end()) {
1847 DCHECK_NOT_NULL(phi->second);
1848 state_->raw_assembler_->AppendPhiInput(phi->second, node);
1849 } else {
1850 auto i = variable_merges_.find(var);
1851 if (i != variable_merges_.end()) {
1852 // If the following assert fires, then you've declared a variable that
1853 // has the same bound value along all paths up until the point you
1854 // bound this label, but then later merged a path with a new value for
1855 // the variable after the label bind (it's not possible to add phis to
1856 // the bound label after the fact, just make sure to list the variable
1857 // in the label's constructor's list of merged variables).
1858#if DEBUG
1859 if (find_if(i->second.begin(), i->second.end(),
1860 [node](Node* e) -> bool { return node != e; }) !=
1861 i->second.end()) {
1862 std::stringstream str;
1863 str << "Unmerged variable found when jumping to block. \n"
1864 << "# Variable: " << *var;
1865 if (bound_) {
1866 str << "\n# Target block: " << *label_->block();
1867 }
1868 str << "\n# Current Block: ";
1869 state_->PrintCurrentBlock(str);
1870 FATAL("%s", str.str().c_str());
1871 }
1872#endif // DEBUG
1873 }
1874 }
1875 }
1876 }
1877}
1878
1879#if DEBUG
1881 if (bound_) {
1882 std::stringstream str;
1883 str << "Cannot bind the same label twice:"
1884 << "\n# current: " << debug_info
1885 << "\n# previous: " << *label_->block();
1886 FATAL("%s", str.str().c_str());
1887 }
1888 if (v8_flags.enable_source_at_csa_bind) {
1889 state_->raw_assembler_->SetCurrentExternalSourcePosition(
1890 {debug_info.file, debug_info.line});
1891 }
1892 state_->raw_assembler_->Bind(label_, debug_info);
1894}
1895#endif // DEBUG
1896
1902
1904 // Make sure that all variables that have changed along any path up to this
1905 // point are marked as merge variables.
1906 for (auto var : state_->variables_) {
1907 Node* shared_value = nullptr;
1908 auto i = variable_merges_.find(var);
1909 if (i != variable_merges_.end()) {
1910 for (auto value : i->second) {
1911 DCHECK_NOT_NULL(value);
1912 if (value != shared_value) {
1913 if (shared_value == nullptr) {
1914 shared_value = value;
1915 } else {
1916 variable_phis_[var] = nullptr;
1917 }
1918 }
1919 }
1920 }
1921 }
1922
1923 for (auto var : variable_phis_) {
1924 CodeAssemblerVariable::Impl* var_impl = var.first;
1925 auto i = variable_merges_.find(var_impl);
1926#if DEBUG
1927 bool not_found = i == variable_merges_.end();
1928 if (not_found || i->second.size() != merge_count_) {
1929 std::stringstream str;
1930 str << "A variable that has been marked as beeing merged at the label"
1931 << "\n# doesn't have a bound value along all of the paths that "
1932 << "\n# have been merged into the label up to this point."
1933 << "\n#"
1934 << "\n# This can happen in the following cases:"
1935 << "\n# - By explicitly marking it so in the label constructor"
1936 << "\n# - By having seen different bound values at branches"
1937 << "\n#"
1938 << "\n# Merge count: expected=" << merge_count_
1939 << " vs. found=" << (not_found ? 0 : i->second.size())
1940 << "\n# Variable: " << *var_impl
1941 << "\n# Current Block: " << *label_->block();
1942 FATAL("%s", str.str().c_str());
1943 }
1944#endif // DEBUG
1945 Node* phi = state_->raw_assembler_->Phi(
1946 var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
1947 variable_phis_[var_impl] = phi;
1948 }
1949
1950 // Bind all variables to a merge phi, the common value along all paths or
1951 // null.
1952 for (auto var : state_->variables_) {
1953 auto i = variable_phis_.find(var);
1954 if (i != variable_phis_.end()) {
1955 var->value_ = i->second;
1956 } else {
1957 auto j = variable_merges_.find(var);
1958 if (j != variable_merges_.end() && j->second.size() == merge_count_) {
1959 var->value_ = j->second.back();
1960 } else {
1961 var->value_ = nullptr;
1962 }
1963 }
1964 }
1965
1966 bound_ = true;
1967}
1968
1969void CodeAssemblerParameterizedLabelBase::AddInputs(std::vector<Node*> inputs) {
1970 if (!phi_nodes_.empty()) {
1971 DCHECK_EQ(inputs.size(), phi_nodes_.size());
1972 for (size_t i = 0; i < inputs.size(); ++i) {
1973 // We use {nullptr} as a sentinel for an uninitialized value.
1974 if (phi_nodes_[i] == nullptr) continue;
1975 state_->raw_assembler_->AppendPhiInput(phi_nodes_[i], inputs[i]);
1976 }
1977 } else {
1978 DCHECK_EQ(inputs.size(), phi_inputs_.size());
1979 for (size_t i = 0; i < inputs.size(); ++i) {
1980 phi_inputs_[i].push_back(inputs[i]);
1981 }
1982 }
1983}
1984
1986 MachineRepresentation rep, const std::vector<Node*>& inputs) {
1987 for (Node* input : inputs) {
1988 // We use {nullptr} as a sentinel for an uninitialized value. We must not
1989 // create phi nodes for these.
1990 if (input == nullptr) return nullptr;
1991 }
1992 return state_->raw_assembler_->Phi(rep, static_cast<int>(inputs.size()),
1993 &inputs.front());
1994}
1995
1997 std::vector<MachineRepresentation> representations) {
1998 DCHECK(is_used());
1999 DCHECK(phi_nodes_.empty());
2000 phi_nodes_.reserve(phi_inputs_.size());
2001 DCHECK_EQ(representations.size(), phi_inputs_.size());
2002 for (size_t i = 0; i < phi_inputs_.size(); ++i) {
2003 phi_nodes_.push_back(CreatePhi(representations[i], phi_inputs_[i]));
2004 }
2005 return phi_nodes_;
2006}
2007
2012
2016
2019 : has_handler_(label != nullptr),
2020 assembler_(assembler),
2021 compatibility_label_(nullptr),
2022 exception_(nullptr) {
2023 if (has_handler_) {
2025 }
2026}
2027
2031 : has_handler_(label != nullptr),
2032 assembler_(assembler),
2033 compatibility_label_(label),
2034 exception_(exception) {
2035 if (has_handler_) {
2036 label_ = std::make_unique<CodeAssemblerExceptionHandlerLabel>(
2039 }
2040}
2041
2043 if (has_handler_) {
2045 }
2046 if (label_ && label_->is_used()) {
2048 bool inside_block = assembler_->state()->InsideBlock();
2049 if (inside_block) {
2050 assembler_->Goto(&skip);
2051 }
2052 TNode<JSAny> e;
2053 assembler_->Bind(label_.get(), &e);
2054 if (exception_ != nullptr) *exception_ = e;
2056 if (inside_block) {
2057 assembler_->Bind(&skip);
2058 }
2059 }
2060}
2061
2062} // namespace compiler
2063
2064} // namespace internal
2065} // namespace v8
int16_t parameter_count
Definition builtins.cc:67
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
Builtins::Kind kind
Definition builtins.cc:40
static void DebugBreak()
static constexpr Builtin RuntimeCEntry(int result_size, bool switch_to_central_stack=false)
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
static ExternalReference Create(const SCTableReference &table_ref)
Handle< Boolean > ToBoolean(bool value)
Handle< String > InternalizeString(base::Vector< const char > str, bool convert_encoding=false)
Definition factory.h:216
static constexpr int root_slot_offset(RootIndex root_index)
BuiltinsConstantsTableBuilder * builtins_constants_table_builder() const
Definition isolate.h:1901
v8::internal::Factory * factory()
Definition isolate.h:1527
Handle< Object > root_handle(RootIndex index)
Definition isolate.h:1269
static constexpr MachineType Float64()
static constexpr MachineType Pointer()
static constexpr MachineType Uint8()
static constexpr MachineType Int32()
static constexpr MachineType Uint64()
static constexpr MachineType Uint32()
static constexpr MachineType Float32()
static constexpr MachineRepresentation PointerRepresentation()
static constexpr MachineType IntPtr()
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Status FinalizeJob(Isolate *isolate)
Definition compiler.cc:474
static constexpr bool IsImmortalImmovable(RootIndex root_index)
Definition roots.h:616
static V8_EXPORT_PRIVATE const Function * FunctionForId(FunctionId id)
Definition runtime.cc:350
static bool MayAllocate(FunctionId id)
Definition runtime.cc:186
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static TNode UncheckedCast(compiler::Node *node)
Definition tnode.h:413
static bool constexpr IsValid(intptr_t value)
static ThreadId Current()
Definition thread-id.h:32
T * AllocateArray(size_t length)
Definition zone.h:127
T * New(Args &&... args)
Definition zone.h:114
std::map< CodeAssemblerVariable::Impl *, std::vector< Node * >, CodeAssemblerVariable::ImplComparator > variable_merges_
CodeAssemblerLabel(CodeAssembler *assembler, CodeAssemblerLabel::Type type=CodeAssemblerLabel::kNonDeferred)
std::map< CodeAssemblerVariable::Impl *, Node *, CodeAssemblerVariable::ImplComparator > variable_phis_
const std::vector< Node * > & CreatePhis(std::vector< MachineRepresentation > representations)
Node * CreatePhi(MachineRepresentation rep, const std::vector< Node * > &inputs)
void PushExceptionHandler(CodeAssemblerExceptionHandlerLabel *label)
std::vector< FileAndLine > macro_call_stack_
std::vector< CodeAssemblerExceptionHandlerLabel * > exception_handler_labels_
std::unique_ptr< RawMachineAssembler > raw_assembler_
CodeAssemblerState(Isolate *isolate, Zone *zone, const CallInterfaceDescriptor &descriptor, CodeKind kind, const char *name, Builtin builtin=Builtin::kNoBuiltinId)
ZoneSet< CodeAssemblerVariable::Impl *, CodeAssemblerVariable::ImplComparator > variables_
void SetInitialDebugInformation(const char *msg, const char *file, int line)
bool operator<(const CodeAssemblerVariable::Impl &other) const
Impl(MachineRepresentation rep, CodeAssemblerState::VariableId id)
CodeAssemblerVariable(const CodeAssemblerVariable &)=delete
void CompileCode(Isolate *isolate, std::unique_ptr< TurbofanCompilationJob > job)
void QueueJob(Isolate *isolate, std::unique_ptr< TurbofanCompilationJob > job)
std::deque< std::unique_ptr< TurbofanCompilationJob > > main_thread_output_queue_
void FinalizeJobOnMainThread(Isolate *isolate, TurbofanCompilationJob *job)
Node * CallStubN(StubCallMode call_mode, const CallInterfaceDescriptor &descriptor, int input_count, Node *const *inputs)
TNode< IntPtrT > UniqueIntPtrConstant(intptr_t value)
TNode< BoolT > Word32NotEqual(TNode< Word32T > left, TNode< Word32T > right)
Node * CallCFunctionWithoutFunctionDescriptor(Node *function, MachineType return_type, CArgs... cargs)
void OptimizedStoreFieldAssertNoWriteBarrier(MachineRepresentation rep, TNode< HeapObject > object, int offset, Node *value)
const std::vector< FileAndLine > & GetMacroSourcePositionStack() const
Node * CallRuntimeImpl(Runtime::FunctionId function, TNode< Object > context, std::initializer_list< TNode< Object > > args)
TNode< PairT< Word32T, Word32T > > Int32PairAdd(TNode< Word32T > lhs_lo_word, TNode< Word32T > lhs_hi_word, TNode< Word32T > rhs_lo_word, TNode< Word32T > rhs_hi_word)
void SetDynamicJSParameterCount(TNode< Uint16T > parameter_count)
void StaticAssert(TNode< BoolT > value, const char *source="unknown position")
TNode< Type > AtomicLoad64(AtomicMemoryOrder order, TNode< RawPtrT > base, TNode< WordT > offset)
bool IsUndefinedConstant(TNode< Object > node)
TNode< RawPtrT > SwitchToTheCentralStack()
Node * LoadProtectedPointerFromObject(TNode< Object > object, TNode< IntPtrT > offset)
void TailCallRuntimeImpl(Runtime::FunctionId function, TNode< Int32T > arity, TNode< Object > context, std::initializer_list< TNode< Object > > args)
TNode< IntPtrT > IntPtrConstant(intptr_t value)
Node * CallCFunctionWithCallerSavedRegisters(Node *function, MachineType return_type, SaveFPRegsMode mode, CArgs... cargs)
void OptimizedStoreFieldUnsafeNoWriteBarrier(MachineRepresentation rep, TNode< HeapObject > object, int offset, Node *value)
TNode< Int64T > Int64Constant(int64_t value)
void StoreEphemeronKey(Node *base, Node *offset, Node *value)
void UnsafeStoreNoWriteBarrier(MachineRepresentation rep, Node *base, Node *value)
TNode< UintPtrT > ChangeUint32ToWord(TNode< Word32T > value)
TNode< AnyTaggedT > LoadRootMapWord(RootIndex root_index)
void ReturnIf(TNode< BoolT > condition, TNode< Object > value)
TNode< T > UncheckedCast(Node *value)
TNode< IntPtrT > WordShl(TNode< IntPtrT > left, TNode< IntegralT > right)
TNode< UintPtrT > ChangeFloat64ToUintPtr(TNode< Float64T > value)
TNode< Float32T > Float32Constant(double value)
TNode< Uint8T > LoadUint8FromRootRegister(TNode< IntPtrT > offset)
TNode< Type > HeapConstantHole(Handle< Type > object)
bool UnalignedStoreSupported(MachineRepresentation rep) const
TNode< BoolT > WordEqual(TNode< WordT > left, TNode< WordT > right)
void GotoIfNot(TNode< IntegralT > condition, Label *false_label, GotoHint goto_hint=GotoHint::kNone)
void Return(TNode< Object > value)
void PopAndReturn(Node *pop, Node *value)
TNode< HeapObject > UntypedHeapConstantMaybeHole(Handle< HeapObject > object)
TNode< IntPtrT > WordSar(TNode< IntPtrT > left, TNode< IntegralT > right)
TNode< RawPtrT > SwitchToTheCentralStackIfNeeded()
void OptimizedStoreIndirectPointerField(TNode< HeapObject > object, int offset, IndirectPointerTag tag, Node *value)
void StoreToObject(MachineRepresentation rep, TNode< Object > object, TNode< IntPtrT > offset, Node *value, StoreToObjectWriteBarrier write_barrier)
Node * LoadFromObject(MachineType type, TNode< Object > object, TNode< IntPtrT > offset)
TNode< Object > LoadFullTagged(Node *base)
TNode< Type > HeapConstantMaybeHole(Handle< Type > object)
TNode< Int32T > UniqueInt32Constant(int32_t value)
bool IsNullConstant(TNode< Object > node)
void RegisterCallGenerationCallbacks(const CodeAssemblerCallback &call_prologue, const CodeAssemblerCallback &call_epilogue)
void TailCallBytecodeDispatch(const CallInterfaceDescriptor &descriptor, TNode< RawPtrT > target, TArgs... args)
bool UnalignedLoadSupported(MachineRepresentation rep) const
TNode< Boolean > BooleanConstant(bool value)
bool TryToSmiConstant(TNode< IntegralT > node, Tagged< Smi > *out_value)
TNode< T > ReinterpretCast(Node *value)
TNode< Type > AtomicCompareExchange64(TNode< RawPtrT > base, TNode< WordT > offset, TNode< UintPtrT > old_value, TNode< UintPtrT > new_value, TNode< UintPtrT > old_value_high, TNode< UintPtrT > new_value_high)
TNode< HeapObject > UntypedHeapConstantHole(Handle< HeapObject > object)
TNode< IntPtrT > BitcastTaggedToWord(TNode< Smi > node)
TNode< String > StringConstant(const char *str)
Node * CallCFunctionN(Signature< MachineType > *signature, int input_count, Node *const *inputs)
TNode< Int64T > TruncateFloat64ToInt64(TNode< Float64T > value)
TNode< RawPtrT > LoadPointerFromRootRegister(TNode< IntPtrT > offset)
void AtomicStore(MachineRepresentation rep, AtomicMemoryOrder order, TNode< RawPtrT > base, TNode< WordT > offset, TNode< Word32T > value)
void StoreFullTaggedNoWriteBarrier(TNode< RawPtrT > base, TNode< Object > tagged_value)
TNode< BoolT > Word64NotEqual(TNode< Word64T > left, TNode< Word64T > right)
TNode< Smi > SmiConstant(Tagged< Smi > value)
void TailCallStubThenBytecodeDispatchImpl(const CallInterfaceDescriptor &descriptor, Node *target, Node *context, std::initializer_list< Node * > args)
Node * CallStubRImpl(StubCallMode call_mode, const CallInterfaceDescriptor &descriptor, TNode< Object > target, TNode< Object > context, std::initializer_list< Node * > args)
TNode< Word32T > AtomicCompareExchange(MachineType type, TNode< RawPtrT > base, TNode< WordT > offset, TNode< Word32T > old_value, TNode< Word32T > new_value)
void GotoIf(TNode< IntegralT > condition, Label *true_label, GotoHint goto_hint=GotoHint::kNone)
TNode< RawPtrT > StackSlotPtr(int size, int alignment)
Node * Load(MachineType type, Node *base)
void OptimizedStoreField(MachineRepresentation rep, TNode< HeapObject > object, int offset, Node *value)
void OptimizedStoreMap(TNode< HeapObject > object, TNode< Map >)
TNode< Float64T > RoundIntPtrToFloat64(Node *value)
TNode< IntPtrT > ChangeFloat64ToIntPtr(TNode< Float64T > value)
TNode< IntPtrT > ChangeInt32ToIntPtr(TNode< Word32T > value)
void TailCallJSCode(TNode< Code > code, TNode< Context > context, TNode< JSFunction > function, TNode< Object > new_target, TNode< Int32T > arg_count, TNode< JSDispatchHandleT > dispatch_handle)
TNode< BoolT > IntPtrEqual(TNode< WordT > left, TNode< WordT > right)
bool TryToIntPtrConstant(TNode< IntegralT > node, intptr_t *out_value)
TNode< Int64T > UniqueInt64Constant(int64_t value)
TNode< Int32T > TruncateFloat32ToInt32(TNode< Float32T > value)
void Switch(Node *index, Label *default_label, const int32_t *case_values, Label **case_labels, size_t case_count)
bool TryToInt32Constant(TNode< IntegralT > node, int32_t *out_value)
bool TryToInt64Constant(TNode< IntegralT > node, int64_t *out_value)
RawMachineAssembler * raw_assembler() const
TNode< typename std::tuple_element< index, std::tuple< T1, T2 > >::type > Projection(TNode< PairT< T1, T2 > > value)
TNode< Object > LoadRoot(RootIndex root_index)
void OptimizedStoreIndirectPointerFieldNoWriteBarrier(TNode< HeapObject > object, int offset, IndirectPointerTag tag, Node *value)
TNode< Float64T > Float64Constant(double value)
void CanonicalizeEmbeddedBuiltinsConstantIfNeeded(Handle< HeapObject > object)
TNode< Float64T > ChangeUintPtrToFloat64(TNode< UintPtrT > value)
void SetSourcePosition(const char *file, int line)
TNode< Uint32T > Word32Shr(TNode< Uint32T > left, TNode< Uint32T > right)
TNode< ExternalReference > ExternalConstant(ExternalReference address)
void StoreRoot(RootIndex root_index, TNode< Object > value)
TNode< TaggedIndex > TaggedIndexConstant(intptr_t value)
void AtomicStore64(AtomicMemoryOrder order, TNode< RawPtrT > base, TNode< WordT > offset, TNode< UintPtrT > value, TNode< UintPtrT > value_high)
TNode< Int32T > Int32Constant(int32_t value)
TNode< BoolT > WordNotEqual(TNode< WordT > left, TNode< WordT > right)
Node * CallCFunction(Node *function, std::optional< MachineType > return_type, CArgs... cargs)
TNode< RawPtrT > PointerConstant(void *value)
TNode< Type > HeapConstantNoHole(Handle< Type > object)
void MemoryBarrier(AtomicMemoryOrder order)
TNode< Type > UnalignedLoad(TNode< RawPtrT > base, TNode< IntPtrT > offset)
TNode< Int32T > Word32Sar(TNode< Int32T > left, TNode< Int32T > right)
TNode< BoolT > Word32Equal(TNode< Word32T > left, TNode< Word32T > right)
void SwitchFromTheCentralStack(TNode< RawPtrT > old_sp)
TNode< PairT< Word32T, Word32T > > Int32PairSub(TNode< Word32T > lhs_lo_word, TNode< Word32T > lhs_hi_word, TNode< Word32T > rhs_lo_word, TNode< Word32T > rhs_hi_word)
TNode< UintPtrT > UintPtrConstant(uintptr_t value)
TNode< Number > NumberConstant(double value)
TNode< ExternalReference > IsolateField(IsolateFieldId id)
TypedCodeAssemblerVariable< T > TVariable
TNode< Type > AtomicLoad(AtomicMemoryOrder order, TNode< RawPtrT > base, TNode< WordT > offset)
void Store(Node *base, Node *value)
TNode< UintPtrT > WordShr(TNode< UintPtrT > left, TNode< IntegralT > right)
Node * CallJSStubImpl(const CallInterfaceDescriptor &descriptor, TNode< Object > target, TNode< Object > context, TNode< Object > function, std::optional< TNode< Object > > new_target, TNode< Int32T > arity, std::optional< TNode< JSDispatchHandleT > > dispatch_handle, std::initializer_list< Node * > args)
void TailCallStubImpl(const CallInterfaceDescriptor &descriptor, TNode< Code > target, TNode< Object > context, std::initializer_list< Node * > args)
TNode< HeapObject > UntypedHeapConstantNoHole(Handle< HeapObject > object)
void Branch(TNode< IntegralT > condition, Label *true_label, Label *false_label, BranchHint branch_hint=BranchHint::kNone)
TNode< HeapObject > OptimizedAllocate(TNode< IntPtrT > size, AllocationType allocation)
void StoreNoWriteBarrier(MachineRepresentation rep, Node *base, Node *value)
TNode< T > Parameter(int value, const SourceLocation &loc=SourceLocation::Current())
CheckedNode< Object, false > Cast(Node *value, const char *location="")
TNode< BoolT > Word64Equal(TNode< Word64T > left, TNode< Word64T > right)
static constexpr int GetJSCallContextParamIndex(int parameter_count)
Definition linkage.h:495
static CallDescriptor * GetSimplifiedCDescriptor(Zone *zone, const MachineSignature *sig, CallDescriptor::Flags flags=CallDescriptor::kNoFlags, Operator::Properties properties=Operator::kNoThrow)
Definition c-linkage.cc:269
static CallDescriptor * GetStubCallDescriptor(Zone *zone, const CallInterfaceDescriptor &descriptor, int stack_parameter_count, CallDescriptor::Flags flags, Operator::Properties properties=Operator::kNoProperties, StubCallMode stub_mode=StubCallMode::kCallCodeObject)
Definition linkage.cc:587
static CallDescriptor * GetRuntimeCallDescriptor(Zone *zone, Runtime::FunctionId function, int js_parameter_count, Operator::Properties properties, CallDescriptor::Flags flags, LazyDeoptOnThrow lazy_deopt_on_throw=LazyDeoptOnThrow::kNo)
Definition linkage.cc:426
static CallDescriptor * GetBytecodeDispatchCallDescriptor(Zone *zone, const CallInterfaceDescriptor &descriptor, int stack_parameter_count)
Definition linkage.cc:686
bool UnalignedLoadSupported(MachineRepresentation rep)
bool UnalignedStoreSupported(MachineRepresentation rep)
void StoreToObject(MachineRepresentation rep, Node *object, Node *offset, Node *value, WriteBarrierKind write_barrier)
void OptimizedStoreIndirectPointerField(Node *object, int offset, IndirectPointerTag tag, Node *value, WriteBarrierKind write_barrier)
void OptimizedStoreField(MachineRepresentation rep, Node *object, int offset, Node *value, WriteBarrierKind write_barrier)
void OptimizedStoreMap(Node *object, Node *value, WriteBarrierKind write_barrier=kMapWriteBarrier)
void Branch(Node *condition, RawMachineLabel *true_val, RawMachineLabel *false_val, BranchHint branch_hint=BranchHint::kNone)
void Continuations(Node *call, RawMachineLabel *if_success, RawMachineLabel *if_exception)
Node * AddNode(const Operator *op, int input_count, Node *const *inputs)
Node * AtomicLoad(AtomicLoadParameters rep, Node *base, Node *index)
Node * AtomicStore64(AtomicStoreParameters params, Node *base, Node *index, Node *value, Node *value_high)
Node * Load(MachineType type, Node *base)
Node * Store(MachineRepresentation rep, Node *base, Node *value, WriteBarrierKind write_barrier)
Node * Int32PairAdd(Node *a_low, Node *a_high, Node *b_low, Node *b_high)
void TailCallN(CallDescriptor *call_descriptor, int input_count, Node *const *inputs)
Node * UnalignedLoad(MachineType type, Node *base)
void SetCurrentExternalSourcePosition(FileAndLine file_and_line)
Node * AtomicStore(AtomicStoreParameters params, Node *base, Node *index, Node *value)
Node * Int32PairSub(Node *a_low, Node *a_high, Node *b_low, Node *b_high)
Node * CallCFunctionWithCallerSavedRegisters(Node *function, MachineType return_type, SaveFPRegsMode mode, CArgs... cargs)
void Switch(Node *index, RawMachineLabel *default_label, const int32_t *case_values, RawMachineLabel **case_labels, size_t case_count)
Node * CallCFunction(Node *function, std::optional< MachineType > return_type, CArgs... cargs)
Node * LoadFromObject(MachineType type, Node *base, Node *offset)
void set_dynamic_js_parameter_count(Node *parameter_count)
Node * CallN(CallDescriptor *call_descriptor, int input_count, Node *const *inputs)
void StaticAssert(Node *value, const char *source)
Node * LoadProtectedPointerFromObject(Node *base, Node *offset)
Node * CallCFunctionWithoutFunctionDescriptor(Node *function, MachineType return_type, CArgs... cargs)
TypedCodeAssemblerVariable< Object > * exception_
ScopedExceptionHandler(CodeAssembler *assembler, CodeAssemblerExceptionHandlerLabel *label)
std::unique_ptr< CodeAssemblerExceptionHandlerLabel > label_
static bool IsWasmBuiltinId(Builtin id)
Node * arr_[kMaxSize]
#define CODE_ASSEMBLER_COMPARE(Name, ArgT, VarT, ToConstant, op)
Node ** ptr_
#define DEFINE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType)
#define ATOMIC_FUNCTION(name)
#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type)
#define CODE_ASSEMBLER_UNARY_OP_LIST(V)
#define CODE_ASSEMBLER_BINARY_OP_LIST(V)
#define CAST(x)
Register const value_
Handle< Code > code
const PropertyKind kind_
Handle< SharedFunctionInfo > info
int end
enum v8::internal::@1270::DeoptimizableCodeIterator::@67 state_
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
DirectHandle< Object > new_target
Definition execution.cc:75
Label label
BytecodeAssembler & assembler_
std::ostream & impl_
int32_t offset
TNode< Object > target
Node * node
Label label_
int position
Definition liveedit.cc:290
const char * name_
int m
Definition mul-fft.cc:294
Vector< const uint8_t > OneByteVector(const char *data, size_t length)
Definition vector.h:337
std::function< void()> CodeAssemblerCallback
std::ostream & operator<<(std::ostream &os, AccessMode access_mode)
bool DoubleToSmiInteger(double value, int *smi_int_value)
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
constexpr bool CanBeTaggedPointer(MachineRepresentation rep)
const int kSmiShiftSize
V8_EXPORT_PRIVATE FlagValues v8_flags
return value
Definition map-inl.h:893
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Definition flags.cc:2197
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
static constexpr int kMaxSize
#define FATAL(...)
Definition logging.h:47
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
#define V8_EXPORT_PRIVATE
Definition macros.h:460
#define arraysize(array)
Definition macros.h:67
static constexpr MachineType value
Definition tnode.h:198
static constexpr MachineType value
Definition tnode.h:194
static constexpr MachineType value
Definition tnode.h:211
bool operator()(const CodeAssemblerVariable::Impl *a, const CodeAssemblerVariable::Impl *b) const
Symbol file