v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
pipeline.cc
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <fstream>
8#include <iostream>
9#include <memory>
10#include <optional>
11#include <sstream>
12
13#include "src/base/macros.h"
23#include "src/common/globals.h"
68#include "src/compiler/osr.h"
70#include "src/compiler/phase.h"
98#include "src/flags/flags.h"
100#include "src/heap/local-heap.h"
102#include "src/logging/counters.h"
109#include "src/utils/ostreams.h"
110#include "src/utils/utils.h"
111
112#if V8_ENABLE_WEBASSEMBLY
132#include "src/wasm/wasm-engine.h"
133
134#if V8_TARGET_ARCH_ARM64
136#endif // V8_TARGET_ARCH_ARM64
137
138#endif // V8_ENABLE_WEBASSEMBLY
139
140#if V8_ENABLE_WASM_SIMD256_REVEC
143#endif // V8_ENABLE_WASM_SIMD256_REVEC
144
145namespace v8 {
146namespace internal {
147namespace compiler {
148
149static constexpr char kPipelineCompilationJobZoneName[] =
150 "pipeline-compilation-job-zone";
151
152#define RUN_MAYBE_ABORT(phase, ...) \
153 if (V8_UNLIKELY(!Run<phase>(__VA_ARGS__))) return {};
154
155#define PIPELINE_RUN_MAYBE_ABORT(pipeline, phase, ...) \
156 if (V8_UNLIKELY(!(pipeline).Run<phase>(__VA_ARGS__))) return {};
157
158class PipelineImpl final {
159 public:
160 explicit PipelineImpl(TFPipelineData* data) : data_(data) {}
161
162 // Helpers for executing pipeline phases.
163 template <turboshaft::TurbofanPhase Phase, typename... Args>
165
166 // Step A.1. Initialize the heap broker.
168
169 // Step A.2. Run the graph creation and initial optimization passes.
171
172 // Step B. Run the concurrent optimization passes.
174
175 // Substep B.1. Produce a scheduled graph.
177
178#if V8_ENABLE_WASM_SIMD256_REVEC
179 V8_WARN_UNUSED_RESULT bool Revectorize();
180#endif // V8_ENABLE_WASM_SIMD256_REVEC
181
182 // Substep B.3. Run register allocation on the instruction sequence.
184 bool has_dummy_end_block);
185
186 // Step C. Run the code finalization pass.
187 MaybeDirectHandle<Code> FinalizeCode(bool retire_broker = true);
188
189 // Step D. Install any code dependencies.
191
192 void RunPrintAndVerify(const char* phase, bool untyped = false);
193
194 TFPipelineData* data() const { return data_; }
196 Isolate* isolate() const;
198
200
201 private:
203};
204
205namespace {
206
207class SourcePositionWrapper final : public Reducer {
208 public:
209 SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
210 : reducer_(reducer), table_(table) {}
211 ~SourcePositionWrapper() final = default;
212 SourcePositionWrapper(const SourcePositionWrapper&) = delete;
213 SourcePositionWrapper& operator=(const SourcePositionWrapper&) = delete;
214
215 const char* reducer_name() const override { return reducer_->reducer_name(); }
216
217 Reduction Reduce(Node* node) final {
218 SourcePosition const pos = table_->GetSourcePosition(node);
220 return reducer_->Reduce(node, nullptr);
221 }
222
223 void Finalize() final { reducer_->Finalize(); }
224
225 private:
226 Reducer* const reducer_;
227 SourcePositionTable* const table_;
228};
229
230class NodeOriginsWrapper final : public Reducer {
231 public:
232 NodeOriginsWrapper(Reducer* reducer, NodeOriginTable* table)
233 : reducer_(reducer), table_(table) {}
234 ~NodeOriginsWrapper() final = default;
235 NodeOriginsWrapper(const NodeOriginsWrapper&) = delete;
236 NodeOriginsWrapper& operator=(const NodeOriginsWrapper&) = delete;
237
238 const char* reducer_name() const override { return reducer_->reducer_name(); }
239
240 Reduction Reduce(Node* node) final {
241 NodeOriginTable::Scope position(table_, reducer_name(), node);
242 return reducer_->Reduce(node, nullptr);
243 }
244
245 void Finalize() final { reducer_->Finalize(); }
246
247 private:
248 Reducer* const reducer_;
249 NodeOriginTable* const table_;
250};
251
252class V8_NODISCARD PipelineRunScope {
253 public:
254#ifdef V8_RUNTIME_CALL_STATS
255 PipelineRunScope(
256 TFPipelineData* data, const char* phase_name,
257 RuntimeCallCounterId runtime_call_counter_id,
258 RuntimeCallStats::CounterMode counter_mode = RuntimeCallStats::kExact)
259 : phase_scope_(data->pipeline_statistics(), phase_name),
260 zone_scope_(data->zone_stats(), phase_name),
261 origin_scope_(data->node_origins(), phase_name),
262 runtime_call_timer_scope(data->runtime_call_stats(),
263 runtime_call_counter_id, counter_mode) {
264 DCHECK_NOT_NULL(phase_name);
265 }
266#else // V8_RUNTIME_CALL_STATS
267 PipelineRunScope(TFPipelineData* data, const char* phase_name)
268 : phase_scope_(data->pipeline_statistics(), phase_name),
269 zone_scope_(data->zone_stats(), phase_name),
270 origin_scope_(data->node_origins(), phase_name) {
271 DCHECK_NOT_NULL(phase_name);
272 }
273#endif // V8_RUNTIME_CALL_STATS
274
275 Zone* zone() { return zone_scope_.zone(); }
276
277 private:
278 PhaseScope phase_scope_;
279 ZoneStats::Scope zone_scope_;
280 NodeOriginTable::PhaseScope origin_scope_;
281#ifdef V8_RUNTIME_CALL_STATS
282 RuntimeCallTimerScope runtime_call_timer_scope;
283#endif // V8_RUNTIME_CALL_STATS
284};
285
286// LocalIsolateScope encapsulates the phase where persistent handles are
287// attached to the LocalHeap inside {local_isolate}.
288class V8_NODISCARD LocalIsolateScope {
289 public:
290 explicit LocalIsolateScope(JSHeapBroker* broker,
291 OptimizedCompilationInfo* info,
292 LocalIsolate* local_isolate)
293 : broker_(broker), info_(info) {
294 broker_->AttachLocalIsolate(info_, local_isolate);
295 info_->tick_counter().AttachLocalHeap(local_isolate->heap());
296 }
297
298 ~LocalIsolateScope() {
299 info_->tick_counter().DetachLocalHeap();
300 broker_->DetachLocalIsolate(info_);
301 }
302
303 private:
304 JSHeapBroker* broker_;
305 OptimizedCompilationInfo* info_;
306};
307
308void PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
309 int source_id,
310 DirectHandle<SharedFunctionInfo> shared) {
311 if (!IsUndefined(shared->script(), isolate)) {
312 DirectHandle<Script> script(Cast<Script>(shared->script()), isolate);
313
314 if (!IsUndefined(script->source(), isolate)) {
315 CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
316 Tagged<Object> source_name = script->name();
317 auto& os = tracing_scope.stream();
318 os << "--- FUNCTION SOURCE (";
319 if (IsString(source_name)) {
320 os << Cast<String>(source_name)->ToCString().get() << ":";
321 }
322 os << shared->DebugNameCStr().get() << ") id{";
323 os << info->optimization_id() << "," << source_id << "} start{";
324 os << shared->StartPosition() << "} ---\n";
325 {
327 int start = shared->StartPosition();
328 int len = shared->EndPosition() - start;
329 SubStringRange source(Cast<String>(script->source()), no_gc, start,
330 len);
331 for (auto c : source) {
332 os << AsReversiblyEscapedUC16(c);
333 }
334 }
335
336 os << "\n--- END ---\n";
337 }
338 }
339}
340
341// Print information for the given inlining: which function was inlined and
342// where the inlining occurred.
343void PrintInlinedFunctionInfo(
344 OptimizedCompilationInfo* info, Isolate* isolate, int source_id,
345 int inlining_id, const OptimizedCompilationInfo::InlinedFunctionHolder& h) {
346 CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
347 auto& os = tracing_scope.stream();
348 os << "INLINE (" << h.shared_info->DebugNameCStr().get() << ") id{"
349 << info->optimization_id() << "," << source_id << "} AS " << inlining_id
350 << " AT ";
351 const SourcePosition position = h.position.position;
352 if (position.IsKnown()) {
353 os << "<" << position.InliningId() << ":" << position.ScriptOffset() << ">";
354 } else {
355 os << "<?>";
356 }
357 os << std::endl;
358}
359
360// Print the source of all functions that participated in this optimizing
361// compilation. For inlined functions print source position of their inlining.
362void PrintParticipatingSource(OptimizedCompilationInfo* info,
363 Isolate* isolate) {
364 SourceIdAssigner id_assigner(info->inlined_functions().size());
365 PrintFunctionSource(info, isolate, -1, info->shared_info());
366 const auto& inlined = info->inlined_functions();
367 for (unsigned id = 0; id < inlined.size(); id++) {
368 const int source_id = id_assigner.GetIdFor(inlined[id].shared_info);
369 PrintFunctionSource(info, isolate, source_id, inlined[id].shared_info);
370 PrintInlinedFunctionInfo(info, isolate, source_id, id, inlined[id]);
371 }
372}
373
374void TraceScheduleAndVerify(OptimizedCompilationInfo* info,
375 TFPipelineData* data, Schedule* schedule,
376 const char* phase_name) {
377 RCS_SCOPE(data->runtime_call_stats(),
378 RuntimeCallCounterId::kOptimizeTraceScheduleAndVerify,
379 RuntimeCallStats::kThreadSpecific);
381 "V8.TraceScheduleAndVerify");
382
383 TraceSchedule(info, data, schedule, phase_name);
384
385 if (v8_flags.turbo_verify) ScheduleVerifier::Run(schedule);
386}
387
388void AddReducer(TFPipelineData* data, GraphReducer* graph_reducer,
389 Reducer* reducer) {
390 if (data->info()->source_positions()) {
391 SourcePositionWrapper* const wrapper =
392 data->graph_zone()->New<SourcePositionWrapper>(
393 reducer, data->source_positions());
394 reducer = wrapper;
395 }
396 if (data->info()->trace_turbo_json()) {
397 NodeOriginsWrapper* const wrapper =
398 data->graph_zone()->New<NodeOriginsWrapper>(reducer,
399 data->node_origins());
400 reducer = wrapper;
401 }
402
403 graph_reducer->AddReducer(reducer);
404}
405
406TurbofanPipelineStatistics* CreatePipelineStatistics(
407 DirectHandle<Script> script, OptimizedCompilationInfo* info,
408 Isolate* isolate, ZoneStats* zone_stats) {
409 TurbofanPipelineStatistics* pipeline_statistics = nullptr;
410
411 bool tracing_enabled;
413 &tracing_enabled);
414 if (tracing_enabled || v8_flags.turbo_stats || v8_flags.turbo_stats_nvp) {
415 pipeline_statistics = new TurbofanPipelineStatistics(
416 info, isolate->GetTurboStatistics(), zone_stats);
417 pipeline_statistics->BeginPhaseKind("V8.TFInitializing");
418 }
419
420 if (info->trace_turbo_json()) {
421 TurboJsonFile json_of(info, std::ios_base::trunc);
422 json_of << "{\"function\" : ";
423 JsonPrintFunctionSource(json_of, -1, info->GetDebugName(), script, isolate,
424 info->shared_info());
425 json_of << ",\n\"phases\":[";
426 }
427
428 return pipeline_statistics;
429}
430
431#if V8_ENABLE_WEBASSEMBLY
432TurbofanPipelineStatistics* CreatePipelineStatistics(
433 WasmCompilationData& compilation_data, const wasm::WasmModule* wasm_module,
434 OptimizedCompilationInfo* info, ZoneStats* zone_stats) {
435 TurbofanPipelineStatistics* pipeline_statistics = nullptr;
436
437 bool tracing_enabled;
439 TRACE_DISABLED_BY_DEFAULT("v8.wasm.turbofan"), &tracing_enabled);
440 if (tracing_enabled || v8_flags.turbo_stats_wasm) {
441 pipeline_statistics = new TurbofanPipelineStatistics(
442 info, wasm::GetWasmEngine()->GetOrCreateTurboStatistics(), zone_stats);
443 pipeline_statistics->BeginPhaseKind("V8.WasmInitializing");
444 }
445
446 if (info->trace_turbo_json()) {
447 TurboJsonFile json_of(info, std::ios_base::trunc);
448 std::unique_ptr<char[]> function_name = info->GetDebugName();
449 json_of << "{\"function\":\"" << function_name.get() << "\", \"source\":\"";
450 std::ostringstream disassembly;
451 std::vector<uint32_t> source_positions;
452 base::Vector<const uint8_t> function_bytes{compilation_data.func_body.start,
453 compilation_data.body_size()};
454 base::Vector<const uint8_t> module_bytes{nullptr, 0};
455 std::optional<wasm::ModuleWireBytes> maybe_wire_bytes =
456 compilation_data.wire_bytes_storage->GetModuleBytes();
457 if (maybe_wire_bytes) module_bytes = maybe_wire_bytes->module_bytes();
458
460 wasm_module, compilation_data.func_index, function_bytes, module_bytes,
461 compilation_data.func_body.offset, disassembly, &source_positions);
462 for (const auto& c : disassembly.str()) {
463 json_of << AsEscapedUC16ForJSON(c);
464 }
465 json_of << "\",\n\"sourceLineToBytecodePosition\" : [";
466 bool insert_comma = false;
467 for (auto val : source_positions) {
468 if (insert_comma) {
469 json_of << ", ";
470 }
471 json_of << val;
472 insert_comma = true;
473 }
474 json_of << "],\n\"phases\":[";
475 }
476
477 return pipeline_statistics;
478}
479#endif // V8_ENABLE_WEBASSEMBLY
480
481// This runs instruction selection, register allocation and code generation.
482[[nodiscard]] bool GenerateCodeFromTurboshaftGraph(
483 Linkage* linkage, turboshaft::Pipeline& turboshaft_pipeline,
484 PipelineImpl* turbofan_pipeline = nullptr,
485 std::shared_ptr<OsrHelper> osr_helper = {}) {
486 turboshaft::PipelineData* turboshaft_data = turboshaft_pipeline.data();
487 turboshaft_data->InitializeCodegenComponent(osr_helper);
488 // Run Turboshaft instruction selection.
489 if (!turboshaft_pipeline.PrepareForInstructionSelection()) return false;
490 if (!turboshaft_pipeline.SelectInstructions(linkage)) return false;
491 // We can release the graph now.
492 turboshaft_data->ClearGraphComponent();
493
494 if (!turboshaft_pipeline.AllocateRegisters(
496 return false;
497 }
498 if (!turboshaft_pipeline.AssembleCode(linkage)) {
499 return false;
500 }
501 return true;
502}
503
504} // namespace
505
532
534 Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
535 Handle<JSFunction> function, BytecodeOffset osr_offset, CodeKind code_kind)
536 // Note that the OptimizedCompilationInfo is not initialized at the time
537 // we pass it to the CompilationJob constructor, but it is not
538 // dereferenced there.
541 zone_(isolate->allocator(), kPipelineCompilationJobZoneName),
542 zone_stats_(isolate->allocator()),
543 compilation_info_(&zone_, isolate, shared_info, function, code_kind,
544 osr_offset),
545 pipeline_statistics_(CreatePipelineStatistics(
546 direct_handle(Cast<Script>(shared_info->script()), isolate),
547 compilation_info(), isolate, &zone_stats_)),
551 isolate, compilation_info(),
552 AssemblerOptions::Default(isolate)),
554 linkage_(nullptr) {
556}
557
559
561 Schedule* schedule, const char* phase_name) {
562 if (info->trace_turbo_json()) {
563 UnparkedScopeIfNeeded scope(data->broker());
564 AllowHandleDereference allow_deref;
565
566 TurboJsonFile json_of(info, std::ios_base::app);
567 json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"schedule\""
568 << ",\"data\":\"";
569 std::stringstream schedule_stream;
570 schedule_stream << *schedule;
571 std::string schedule_string(schedule_stream.str());
572 for (const auto& c : schedule_string) {
573 json_of << AsEscapedUC16ForJSON(c);
574 }
575 json_of << "\"},\n";
576 }
577
578 if (info->trace_turbo_graph() || v8_flags.trace_turbo_scheduler) {
579 UnparkedScopeIfNeeded scope(data->broker());
580 AllowHandleDereference allow_deref;
581
582 CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
583 tracing_scope.stream() << "----- " << phase_name << " -----\n" << *schedule;
584 }
585}
586
587// Print the code after compiling it.
590 if (v8_flags.print_opt_source && info->IsOptimizing()) {
591 PrintParticipatingSource(info, isolate);
592 }
593
594#ifdef ENABLE_DISASSEMBLER
595 const bool print_code =
596 v8_flags.print_code ||
597 (info->IsOptimizing() && v8_flags.print_opt_code &&
598 info->shared_info()->PassesFilter(v8_flags.print_opt_code_filter));
599 if (print_code) {
600 std::unique_ptr<char[]> debug_name = info->GetDebugName();
601 CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
602 std::ostream& os = tracing_scope.stream();
603
604 // Print the source code if available.
605 const bool print_source = info->IsOptimizing();
606 if (print_source) {
607 DirectHandle<SharedFunctionInfo> shared = info->shared_info();
608 if (IsScript(shared->script()) &&
609 !IsUndefined(Cast<Script>(shared->script())->source(), isolate)) {
610 os << "--- Raw source ---\n";
612 Cast<String>(Cast<Script>(shared->script())->source()),
613 shared->StartPosition());
614 // fun->end_position() points to the last character in the stream. We
615 // need to compensate by adding one to calculate the length.
616 int source_len = shared->EndPosition() - shared->StartPosition() + 1;
617 for (int i = 0; i < source_len; i++) {
618 if (stream.HasMore()) {
619 os << AsReversiblyEscapedUC16(stream.GetNext());
620 }
621 }
622 os << "\n\n";
623 }
624 }
625 if (info->IsOptimizing()) {
626 os << "--- Optimized code ---\n"
627 << "optimization_id = " << info->optimization_id() << "\n";
628 } else {
629 os << "--- Code ---\n";
630 }
631 if (print_source) {
632 DirectHandle<SharedFunctionInfo> shared = info->shared_info();
633 os << "source_position = " << shared->StartPosition() << "\n";
634 }
635 code->Disassemble(debug_name.get(), os, isolate);
636 os << "--- End code ---\n";
637 }
638#endif // ENABLE_DISASSEMBLER
639}
640
641// The CheckMaps node can migrate objects with deprecated maps. Afterwards, we
642// check the resulting object against a fixed list of maps known at compile
643// time. This is problematic if we made any assumptions about an object with the
644// deprecated map, as it now changed shape. Therefore, we want to avoid
645// embedding deprecated maps, as objects with these maps can be changed by
646// CheckMaps.
647// The following code only checks for deprecated maps at the end of compilation,
648// but doesn't protect us against the embedded maps becoming deprecated later.
649// However, this is enough, since if the map becomes deprecated later, it will
650// migrate to a new map not yet known at compile time, so if we migrate to it as
651// part of a CheckMaps, this check will always fail afterwards and deoptimize.
652// This in turn relies on a runtime invariant that map migrations always target
653// newly allocated maps.
655 int mode_mask = RelocInfo::EmbeddedObjectModeMask();
656 for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
657 DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
658 Tagged<HeapObject> obj = it.rinfo()->target_object(isolate);
659 if (IsMap(obj) && Cast<Map>(obj)->is_deprecated()) {
660 return false;
661 }
662 }
663 return true;
664}
665
666namespace {
667// Ensure that the RuntimeStats table is set on the PipelineData for
668// duration of the job phase and unset immediately afterwards. Each job
669// needs to set the correct RuntimeCallStats table depending on whether it
670// is running on a background or foreground thread.
671class V8_NODISCARD PipelineJobScope {
672 public:
673 PipelineJobScope(TFPipelineData* data, RuntimeCallStats* stats)
674 : data_(data), current_broker_(data_->broker()) {
675 data_->set_runtime_call_stats(stats);
676 }
677 PipelineJobScope(turboshaft::PipelineData* turboshaft_data,
678 RuntimeCallStats* stats)
679 : turboshaft_data_(turboshaft_data),
681 turboshaft_data_->set_runtime_call_stats(stats);
682 }
683
684 ~PipelineJobScope() {
685 if (data_) data_->set_runtime_call_stats(nullptr);
686 if (turboshaft_data_) turboshaft_data_->set_runtime_call_stats(nullptr);
687 }
688
689 private:
690 HighAllocationThroughputScope high_throughput_scope_{
691 V8::GetCurrentPlatform()};
692 TFPipelineData* data_ = nullptr;
693 turboshaft::PipelineData* turboshaft_data_ = nullptr;
694 CurrentHeapBrokerScope current_broker_;
695};
696} // namespace
697
699 Isolate* isolate) {
700 // Ensure that the RuntimeCallStats table of main thread is available for
701 // phases happening during PrepareJob.
702 PipelineJobScope scope(&data_, isolate->counters()->runtime_call_stats());
703
704 if (compilation_info()->bytecode_array()->length() >
705 v8_flags.max_optimized_bytecode_size) {
706 return AbortOptimization(BailoutReason::kFunctionTooBig);
707 }
708
709 if (!v8_flags.always_turbofan) {
710 compilation_info()->set_bailout_on_uninitialized();
711 }
712 if (v8_flags.turbo_loop_peeling) {
713 compilation_info()->set_loop_peeling();
714 }
715 if (v8_flags.turbo_inlining) {
716 compilation_info()->set_inlining();
717 }
718 if (v8_flags.turbo_allocation_folding) {
719 compilation_info()->set_allocation_folding();
720 }
721
722 // Determine whether to specialize the code for the function's context.
723 // We can't do this in the case of OSR, because we want to cache the
724 // generated code on the native context keyed on SharedFunctionInfo.
725 // TODO(mythria): Check if it is better to key the OSR cache on JSFunction and
726 // allow context specialization for OSR code.
727 if (!compilation_info()
728 ->shared_info()
729 ->function_context_independent_compiled() &&
730 compilation_info()->closure()->raw_feedback_cell()->map() ==
731 ReadOnlyRoots(isolate).one_closure_cell_map() &&
732 !compilation_info()->is_osr()) {
733 compilation_info()->set_function_context_specializing();
735 }
736
739 isolate, compilation_info()->shared_info());
740 }
741
743 compilation_info()->shared_info()->StartPosition());
744
747
748 if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
749
750 // InitializeHeapBroker() and CreateGraph() may already use
751 // IsPendingAllocation.
752 isolate->heap()->PublishMainThreadPendingAllocations();
753
755 return FAILED;
756 }
757
758 // Serialization may have allocated.
759 isolate->heap()->PublishMainThreadPendingAllocations();
760
761 return SUCCEEDED;
762}
763
765 RuntimeCallStats* stats, LocalIsolate* local_isolate) {
766 // Ensure that the RuntimeCallStats table is only available during execution
767 // and not during finalization as that might be on a different thread.
768 PipelineJobScope scope(&data_, stats);
769 LocalIsolateScope local_isolate_scope(data_.broker(), data_.info(),
770 local_isolate);
771
774 turboshaft::Pipeline turboshaft_pipeline(&turboshaft_data_);
776 return AbortOptimization(BailoutReason::kCancelled);
777 }
778
779 if (V8_UNLIKELY(v8_flags.turbolev)) {
780 if (!turboshaft_pipeline.CreateGraphWithMaglev(linkage_)) {
781 return AbortOptimization(BailoutReason::kGraphBuildingFailed);
782 }
783 } else {
785 return AbortOptimization(BailoutReason::kGraphBuildingFailed);
786 }
787
788 // We selectively Unpark inside OptimizeTurbofanGraph.
790
791 // We convert the turbofan graph to turboshaft.
792 if (!turboshaft_pipeline.CreateGraphFromTurbofan(&data_, linkage_)) {
794 return FAILED;
795 }
796 }
797
798 if (!turboshaft_pipeline.OptimizeTurboshaftGraph(linkage_)) {
799 return FAILED;
800 }
801
802 const bool success = GenerateCodeFromTurboshaftGraph(
803 linkage_, turboshaft_pipeline, &pipeline_, data_.osr_helper_ptr());
804 return success ? SUCCEEDED : FAILED;
805}
806
808 Isolate* isolate) {
809 // Ensure that the RuntimeCallStats table of main thread is available for
810 // phases happening during PrepareJob.
811 PipelineJobScope scope(&data_, isolate->counters()->runtime_call_stats());
812 RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeFinalizePipelineJob);
815 turboshaft::Pipeline turboshaft_pipeline(&turboshaft_data_);
816 MaybeHandle<Code> maybe_code = turboshaft_pipeline.FinalizeCode();
817 if (!maybe_code.ToHandle(&code)) {
818 if (compilation_info()->bailout_reason() == BailoutReason::kNoReason) {
819 return AbortOptimization(BailoutReason::kCodeGenerationFailed);
820 }
821 return FAILED;
822 }
824 isolate);
825 if (context->IsDetached()) {
826 return AbortOptimization(BailoutReason::kDetachedNativeContext);
827 }
828 if (!CheckNoDeprecatedMaps(code, isolate)) {
829 return RetryOptimization(BailoutReason::kConcurrentMapDeprecation);
830 }
831 if (!turboshaft_pipeline.CommitDependencies(code)) {
832 return RetryOptimization(BailoutReason::kBailedOutDueToDependencyChange);
833 }
834 compilation_info()->SetCode(code);
835 GlobalHandleVector<Map> maps = CollectRetainedMaps(isolate, code);
836 RegisterWeakObjectsInOptimizedCode(isolate, context, code, std::move(maps));
837 return SUCCEEDED;
838}
839
840template <turboshaft::TurbofanPhase Phase, typename... Args>
841bool PipelineImpl::Run(Args&&... args) {
842#ifdef V8_RUNTIME_CALL_STATS
843 PipelineRunScope scope(this->data_, Phase::phase_name(),
844 Phase::kRuntimeCallCounterId, Phase::kCounterMode);
845#else
846 PipelineRunScope scope(this->data_, Phase::phase_name());
847#endif
848 Phase phase;
849 static_assert(Phase::kKind == PhaseKind::kTurbofan);
850 phase.Run(this->data_, scope.zone(), std::forward<Args>(args)...);
851 return !info()->was_cancelled();
852}
853
856
857 void Run(TFPipelineData* data, Zone* temp_zone, Linkage* linkage) {
859 if (data->info()->analyze_environment_liveness()) {
860 flags |= BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness;
861 }
862 if (data->info()->bailout_on_uninitialized()) {
863 flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
864 }
865
866 JSHeapBroker* broker = data->broker();
868 JSFunctionRef closure = MakeRef(broker, data->info()->closure());
869 BytecodeArrayRef bytecode = MakeRef(broker, data->info()->bytecode_array());
870 CallFrequency frequency(1.0f);
872 broker, temp_zone, closure.shared(broker), bytecode,
873 closure.raw_feedback_cell(broker), data->info()->osr_offset(),
874 data->jsgraph(), frequency, data->source_positions(),
875 data->node_origins(), SourcePosition::kNotInlined,
876 data->info()->code_kind(), flags, &data->info()->tick_counter(),
877 ObserveNodeInfo{data->observe_node_manager(),
878 data->info()->node_observer()});
879
880 // We need to be certain that the parameter count reported by our output
881 // Code object matches what the code we compile expects. Otherwise, this
882 // may lead to effectively signature mismatches during function calls. This
883 // CHECK is a defense-in-depth measure to ensure this doesn't happen.
885 StartNode(data->jsgraph()->graph()->start()).FormalParameterCount(),
886 linkage->GetIncomingDescriptor()->ParameterSlotCount());
887 }
888};
889
892
893 void Run(TFPipelineData* data, Zone* temp_zone) {
894 OptimizedCompilationInfo* info = data->info();
895 GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
896 data->broker(), data->jsgraph()->Dead(),
897 data->observe_node_manager());
898 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
899 data->common(), temp_zone);
900 CheckpointElimination checkpoint_elimination(&graph_reducer);
901 CommonOperatorReducer common_reducer(
902 &graph_reducer, data->graph(), data->broker(), data->common(),
903 data->machine(), temp_zone, BranchSemantics::kJS);
904 JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
905 if (data->info()->bailout_on_uninitialized()) {
906 call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
907 }
908 if (data->info()->inline_js_wasm_calls() && data->info()->inlining()) {
909 call_reducer_flags |= JSCallReducer::kInlineJSToWasmCalls;
910 }
911 JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(),
912 temp_zone, call_reducer_flags);
913 JSContextSpecialization context_specialization(
914 &graph_reducer, data->jsgraph(), data->broker(),
915 data->specialization_context(),
916 data->info()->function_context_specializing()
917 ? data->info()->closure()
920 JSNativeContextSpecialization::kNoFlags;
921 if (data->info()->bailout_on_uninitialized()) {
922 flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
923 }
924 // Passing the OptimizedCompilationInfo's shared zone here as
925 // JSNativeContextSpecialization allocates out-of-heap objects
926 // that need to live until code generation.
927 JSNativeContextSpecialization native_context_specialization(
928 &graph_reducer, data->jsgraph(), data->broker(), flags, temp_zone,
929 info->zone());
930 JSInliningHeuristic inlining(
931 &graph_reducer, temp_zone, data->info(), data->jsgraph(),
932 data->broker(), data->source_positions(), data->node_origins(),
933 JSInliningHeuristic::kJSOnly, nullptr, nullptr);
934
935 JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph(),
936 data->broker());
937 AddReducer(data, &graph_reducer, &dead_code_elimination);
938 AddReducer(data, &graph_reducer, &checkpoint_elimination);
939 AddReducer(data, &graph_reducer, &common_reducer);
940 AddReducer(data, &graph_reducer, &native_context_specialization);
941 AddReducer(data, &graph_reducer, &context_specialization);
942 AddReducer(data, &graph_reducer, &intrinsic_lowering);
943 AddReducer(data, &graph_reducer, &call_reducer);
944 if (data->info()->inlining()) {
945 AddReducer(data, &graph_reducer, &inlining);
946 }
947 graph_reducer.ReduceGraph();
948 info->set_inlined_bytecode_size(inlining.total_inlined_bytecode_size());
949
950#if V8_ENABLE_WEBASSEMBLY
951 // Not forwarding this information to the TurboFan pipeline data here later
952 // skips `JSWasmInliningPhase` if there are no JS-to-Wasm functions calls.
953 if (call_reducer.has_js_wasm_calls()) {
954 const wasm::WasmModule* wasm_module =
955 call_reducer.wasm_module_for_inlining();
956 DCHECK_NOT_NULL(wasm_module);
957 data->set_wasm_module_for_inlining(wasm_module);
958 // Enable source positions if not enabled yet. While JS only uses the
959 // source position table for tracing, profiling, ..., wasm needs it at
960 // compile time for keeping track of source locations for wasm traps.
961 // Note: By not setting data->info()->set_source_positions(), even with
962 // wasm inlining, source positions shouldn't be kept alive after
963 // compilation is finished (if not for tracing, ...)
964 if (!data->source_positions()->IsEnabled()) {
965 data->source_positions()->Enable();
966 data->source_positions()->AddDecorator();
967 }
968 }
969#endif
970 }
971};
972
973#if V8_ENABLE_WEBASSEMBLY
974struct JSWasmInliningPhase {
975 DECL_PIPELINE_PHASE_CONSTANTS(JSWasmInlining)
976 void Run(TFPipelineData* data, Zone* temp_zone) {
977 DCHECK(data->has_js_wasm_calls());
978 DCHECK_NOT_NULL(data->wasm_module_for_inlining());
979
980 OptimizedCompilationInfo* info = data->info();
981 GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
982 data->broker(), data->jsgraph()->Dead());
983 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
984 data->common(), temp_zone);
985 CommonOperatorReducer common_reducer(
986 &graph_reducer, data->graph(), data->broker(), data->common(),
987 data->machine(), temp_zone, BranchSemantics::kMachine);
988 // If we want to inline in Turboshaft instead (i.e., later in the
989 // pipeline), only inline the wrapper here in TurboFan.
990 // TODO(dlehmann,353475584): Long-term, also inline the JS-to-Wasm wrappers
991 // in Turboshaft (or in Maglev, depending on the shared frontend).
992 JSInliningHeuristic::Mode mode =
993 (v8_flags.turboshaft_wasm_in_js_inlining)
994 ? JSInliningHeuristic::kWasmWrappersOnly
995 : JSInliningHeuristic::kWasmFullInlining;
996 JSInliningHeuristic inlining(
997 &graph_reducer, temp_zone, data->info(), data->jsgraph(),
998 data->broker(), data->source_positions(), data->node_origins(), mode,
999 data->wasm_module_for_inlining(), data->js_wasm_calls_sidetable());
1000 AddReducer(data, &graph_reducer, &dead_code_elimination);
1001 AddReducer(data, &graph_reducer, &common_reducer);
1002 AddReducer(data, &graph_reducer, &inlining);
1003 graph_reducer.ReduceGraph();
1004 }
1005};
1006
1007struct JSWasmLoweringPhase {
1008 DECL_PIPELINE_PHASE_CONSTANTS(JSWasmLowering)
1009 void Run(TFPipelineData* data, Zone* temp_zone) {
1010 DCHECK(data->has_js_wasm_calls());
1011 DCHECK_NOT_NULL(data->wasm_module_for_inlining());
1012
1013 OptimizedCompilationInfo* info = data->info();
1014 GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
1015 data->broker(), data->jsgraph()->Dead());
1016 // The Wasm trap handler is not supported in JavaScript.
1017 const bool disable_trap_handler = true;
1018 WasmGCLowering lowering(&graph_reducer, data->jsgraph(),
1019 data->wasm_module_for_inlining(),
1020 disable_trap_handler, data->source_positions());
1021 AddReducer(data, &graph_reducer, &lowering);
1022 graph_reducer.ReduceGraph();
1023 }
1024};
1025#endif // V8_ENABLE_WEBASSEMBLY
1026
1028 DECL_PIPELINE_PHASE_CONSTANTS(EarlyGraphTrimming)
1029
1030 void Run(TFPipelineData* data, Zone* temp_zone) {
1031 GraphTrimmer trimmer(temp_zone, data->graph());
1032 NodeVector roots(temp_zone);
1033 data->jsgraph()->GetCachedNodes(&roots);
1034 UnparkedScopeIfNeeded scope(data->broker(), v8_flags.trace_turbo_trimming);
1035 trimmer.TrimGraph(roots.begin(), roots.end());
1036 }
1037};
1038
1041
1042 void Run(TFPipelineData* data, Zone* temp_zone, Typer* typer) {
1043 NodeVector roots(temp_zone);
1044 data->jsgraph()->GetCachedNodes(&roots);
1045
1046 // Make sure we always type True and False. Needed for escape analysis.
1047 roots.push_back(data->jsgraph()->TrueConstant());
1048 roots.push_back(data->jsgraph()->FalseConstant());
1049
1050 LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
1051 data->common(), temp_zone);
1052 if (v8_flags.turbo_loop_variable) induction_vars.Run();
1053
1054 // The typer inspects heap objects, so we need to unpark the local heap.
1055 UnparkedScopeIfNeeded scope(data->broker());
1056 typer->Run(roots, &induction_vars);
1057 }
1058};
1059
1062
1063 void Run(TFPipelineData* data, Zone* temp_zone) {
1064 class RemoveTypeReducer final : public Reducer {
1065 public:
1066 const char* reducer_name() const override { return "RemoveTypeReducer"; }
1067 Reduction Reduce(Node* node) final {
1068 if (NodeProperties::IsTyped(node)) {
1069 NodeProperties::RemoveType(node);
1070 return Changed(node);
1071 }
1072 return NoChange();
1073 }
1074 };
1075
1076 NodeVector roots(temp_zone);
1077 data->jsgraph()->GetCachedNodes(&roots);
1078 for (Node* node : roots) {
1079 NodeProperties::RemoveType(node);
1080 }
1081
1082 GraphReducer graph_reducer(
1083 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1084 data->jsgraph()->Dead(), data->observe_node_manager());
1085 RemoveTypeReducer remove_type_reducer;
1086 AddReducer(data, &graph_reducer, &remove_type_reducer);
1087 graph_reducer.ReduceGraph();
1088 }
1089};
1090
1092 DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(HeapBrokerInitialization)
1093
1094 void Run(TFPipelineData* data, Zone* temp_zone) {
1095 data->broker()->AttachCompilationInfo(data->info());
1096 data->broker()->InitializeAndStartSerializing(data->native_context());
1097 }
1098};
1099
1101 DECL_PIPELINE_PHASE_CONSTANTS(TypedLowering)
1102
1103 void Run(TFPipelineData* data, Zone* temp_zone) {
1104 GraphReducer graph_reducer(
1105 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1106 data->jsgraph()->Dead(), data->observe_node_manager());
1107 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1108 data->common(), temp_zone);
1109 JSCreateLowering create_lowering(&graph_reducer, data->jsgraph(),
1110 data->broker(), temp_zone);
1111 JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(),
1112 data->broker(), temp_zone);
1113 ConstantFoldingReducer constant_folding_reducer(
1114 &graph_reducer, data->jsgraph(), data->broker());
1115 TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
1116 data->jsgraph(), data->broker());
1117 SimplifiedOperatorReducer simple_reducer(
1118 &graph_reducer, data->jsgraph(), data->broker(), BranchSemantics::kJS);
1119 CheckpointElimination checkpoint_elimination(&graph_reducer);
1120 CommonOperatorReducer common_reducer(
1121 &graph_reducer, data->graph(), data->broker(), data->common(),
1122 data->machine(), temp_zone, BranchSemantics::kJS);
1123 AddReducer(data, &graph_reducer, &dead_code_elimination);
1124
1125 AddReducer(data, &graph_reducer, &create_lowering);
1126 AddReducer(data, &graph_reducer, &constant_folding_reducer);
1127 AddReducer(data, &graph_reducer, &typed_lowering);
1128 AddReducer(data, &graph_reducer, &typed_optimization);
1129 AddReducer(data, &graph_reducer, &simple_reducer);
1130 AddReducer(data, &graph_reducer, &checkpoint_elimination);
1131 AddReducer(data, &graph_reducer, &common_reducer);
1132
1133 // ConstantFoldingReducer, JSCreateLowering, JSTypedLowering, and
1134 // TypedOptimization access the heap.
1135 UnparkedScopeIfNeeded scope(data->broker());
1136
1137 graph_reducer.ReduceGraph();
1138 }
1139};
1140
1143
1144 void Run(TFPipelineData* data, Zone* temp_zone) {
1145 EscapeAnalysis escape_analysis(data->jsgraph(),
1146 &data->info()->tick_counter(), temp_zone);
1147 escape_analysis.ReduceGraph();
1148
1149 GraphReducer reducer(temp_zone, data->graph(),
1150 &data->info()->tick_counter(), data->broker(),
1151 data->jsgraph()->Dead(), data->observe_node_manager());
1152 EscapeAnalysisReducer escape_reducer(
1153 &reducer, data->jsgraph(), data->broker(),
1154 escape_analysis.analysis_result(), temp_zone);
1155
1156 AddReducer(data, &reducer, &escape_reducer);
1157
1158 // EscapeAnalysisReducer accesses the heap.
1159 UnparkedScopeIfNeeded scope(data->broker());
1160
1161 reducer.ReduceGraph();
1162 // TODO(turbofan): Turn this into a debug mode check once we have
1163 // confidence.
1164 escape_reducer.VerifyReplacement();
1165 }
1166};
1167
1169 DECL_PIPELINE_PHASE_CONSTANTS(TypeAssertions)
1170
1171 void Run(TFPipelineData* data, Zone* temp_zone) {
1172 Schedule* schedule = Scheduler::ComputeSchedule(
1173 temp_zone, data->graph(), Scheduler::kTempSchedule,
1174 &data->info()->tick_counter(), data->profile_data());
1175
1176 AddTypeAssertions(data->jsgraph(), schedule, temp_zone);
1177 }
1178};
1179
1182
1183 void Run(TFPipelineData* data, Zone* temp_zone, Linkage* linkage) {
1184 SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
1185 data->source_positions(), data->node_origins(),
1186 &data->info()->tick_counter(), linkage,
1187 data->info(), data->observe_node_manager());
1188
1189 // RepresentationChanger accesses the heap.
1190 UnparkedScopeIfNeeded scope(data->broker());
1191
1192 lowering.LowerAllNodes();
1193 }
1194};
1195
1198
1199 void Run(TFPipelineData* data, Zone* temp_zone) {
1200 GraphTrimmer trimmer(temp_zone, data->graph());
1201 NodeVector roots(temp_zone);
1202 data->jsgraph()->GetCachedNodes(&roots);
1203 {
1204 UnparkedScopeIfNeeded scope(data->broker(),
1205 v8_flags.trace_turbo_trimming);
1206 trimmer.TrimGraph(roots.begin(), roots.end());
1207 }
1208
1209 LoopTree* loop_tree = LoopFinder::BuildLoopTree(
1210 data->jsgraph()->graph(), &data->info()->tick_counter(), temp_zone);
1211 // We call the typer inside of PeelInnerLoopsOfTree which inspects heap
1212 // objects, so we need to unpark the local heap.
1213 UnparkedScopeIfNeeded scope(data->broker());
1214 LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
1215 data->source_positions(), data->node_origins())
1217 }
1218};
1219
1221 DECL_PIPELINE_PHASE_CONSTANTS(LoopExitElimination)
1222
1223 void Run(TFPipelineData* data, Zone* temp_zone) {
1224 LoopPeeler::EliminateLoopExits(data->graph(), temp_zone);
1225 }
1226};
1227
1229 DECL_PIPELINE_PHASE_CONSTANTS(GenericLowering)
1230
1231 void Run(TFPipelineData* data, Zone* temp_zone) {
1232 GraphReducer graph_reducer(
1233 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1234 data->jsgraph()->Dead(), data->observe_node_manager());
1235 JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer,
1236 data->broker());
1237 AddReducer(data, &graph_reducer, &generic_lowering);
1238
1239 // JSGEnericLowering accesses the heap due to ObjectRef's type checks.
1240 UnparkedScopeIfNeeded scope(data->broker());
1241
1242 graph_reducer.ReduceGraph();
1243 }
1244};
1245
1247 DECL_PIPELINE_PHASE_CONSTANTS(EarlyOptimization)
1248
1249 void Run(TFPipelineData* data, Zone* temp_zone) {
1250 GraphReducer graph_reducer(
1251 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1252 data->jsgraph()->Dead(), data->observe_node_manager());
1253 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1254 data->common(), temp_zone);
1255 SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
1256 data->broker(),
1257 BranchSemantics::kMachine);
1258 RedundancyElimination redundancy_elimination(&graph_reducer,
1259 data->jsgraph(), temp_zone);
1260 ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1261 MachineOperatorReducer machine_reducer(
1262 &graph_reducer, data->jsgraph(),
1263 MachineOperatorReducer::kPropagateSignallingNan);
1264 CommonOperatorReducer common_reducer(
1265 &graph_reducer, data->graph(), data->broker(), data->common(),
1266 data->machine(), temp_zone, BranchSemantics::kMachine);
1267 AddReducer(data, &graph_reducer, &dead_code_elimination);
1268 AddReducer(data, &graph_reducer, &simple_reducer);
1269 AddReducer(data, &graph_reducer, &redundancy_elimination);
1270 AddReducer(data, &graph_reducer, &machine_reducer);
1271 AddReducer(data, &graph_reducer, &common_reducer);
1272 AddReducer(data, &graph_reducer, &value_numbering);
1273 graph_reducer.ReduceGraph();
1274 }
1275};
1276
1279
1280 void Run(TFPipelineData* data, Zone* temp_zone) {
1281 GraphReducer graph_reducer(
1282 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1283 data->jsgraph()->Dead(), data->observe_node_manager());
1284 BranchElimination branch_condition_elimination(
1285 &graph_reducer, data->jsgraph(), temp_zone, BranchElimination::kEARLY);
1286 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1287 data->common(), temp_zone);
1288 RedundancyElimination redundancy_elimination(&graph_reducer,
1289 data->jsgraph(), temp_zone);
1290 LoadElimination load_elimination(&graph_reducer, data->broker(),
1291 data->jsgraph(), temp_zone);
1292 CheckpointElimination checkpoint_elimination(&graph_reducer);
1293 ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1294 CommonOperatorReducer common_reducer(
1295 &graph_reducer, data->graph(), data->broker(), data->common(),
1296 data->machine(), temp_zone, BranchSemantics::kJS);
1297 TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
1298 data->jsgraph(), data->broker());
1299 ConstantFoldingReducer constant_folding_reducer(
1300 &graph_reducer, data->jsgraph(), data->broker());
1301 TypeNarrowingReducer type_narrowing_reducer(&graph_reducer, data->jsgraph(),
1302 data->broker());
1303
1304 AddReducer(data, &graph_reducer, &branch_condition_elimination);
1305 AddReducer(data, &graph_reducer, &dead_code_elimination);
1306 AddReducer(data, &graph_reducer, &redundancy_elimination);
1307 AddReducer(data, &graph_reducer, &load_elimination);
1308 AddReducer(data, &graph_reducer, &type_narrowing_reducer);
1309 AddReducer(data, &graph_reducer, &constant_folding_reducer);
1310 AddReducer(data, &graph_reducer, &typed_optimization);
1311 AddReducer(data, &graph_reducer, &checkpoint_elimination);
1312 AddReducer(data, &graph_reducer, &common_reducer);
1313 AddReducer(data, &graph_reducer, &value_numbering);
1314
1315 // ConstantFoldingReducer and TypedOptimization access the heap.
1316 UnparkedScopeIfNeeded scope(data->broker());
1317
1318 graph_reducer.ReduceGraph();
1319 }
1320};
1321
1323 DECL_PIPELINE_PHASE_CONSTANTS(MemoryOptimization)
1324
1325 void Run(TFPipelineData* data, Zone* temp_zone) {
1326 // The memory optimizer requires the graphs to be trimmed, so trim now.
1327 GraphTrimmer trimmer(temp_zone, data->graph());
1328 NodeVector roots(temp_zone);
1329 data->jsgraph()->GetCachedNodes(&roots);
1330 {
1331 UnparkedScopeIfNeeded scope(data->broker(),
1332 v8_flags.trace_turbo_trimming);
1333 trimmer.TrimGraph(roots.begin(), roots.end());
1334 }
1335
1336 // Optimize allocations and load/store operations.
1337#if V8_ENABLE_WEBASSEMBLY
1338 bool is_wasm = data->info()->IsWasm() || data->info()->IsWasmBuiltin();
1339#else
1340 bool is_wasm = false;
1341#endif
1342 MemoryOptimizer optimizer(
1343 data->broker(), data->jsgraph(), temp_zone,
1344 data->info()->allocation_folding()
1345 ? MemoryLowering::AllocationFolding::kDoAllocationFolding
1346 : MemoryLowering::AllocationFolding::kDontAllocationFolding,
1347 data->debug_name(), &data->info()->tick_counter(), is_wasm);
1348 optimizer.Optimize();
1349 }
1350};
1351
1352#if V8_ENABLE_WEBASSEMBLY
1353struct WasmTypingPhase {
1355
1356 void Run(TFPipelineData* data, Zone* temp_zone, uint32_t function_index) {
1357 MachineGraph* mcgraph = data->mcgraph() ? data->mcgraph() : data->jsgraph();
1358 GraphReducer graph_reducer(
1359 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1360 data->jsgraph()->Dead(), data->observe_node_manager());
1361 WasmTyper typer(&graph_reducer, mcgraph, function_index);
1362 AddReducer(data, &graph_reducer, &typer);
1363 graph_reducer.ReduceGraph();
1364 }
1365};
1366
1367struct WasmGCOptimizationPhase {
1368 DECL_PIPELINE_PHASE_CONSTANTS(WasmGCOptimization)
1369
1370 void Run(TFPipelineData* data, Zone* temp_zone,
1371 const wasm::WasmModule* module, MachineGraph* mcgraph) {
1372 GraphReducer graph_reducer(
1373 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1374 data->jsgraph()->Dead(), data->observe_node_manager());
1375 WasmLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
1376 temp_zone);
1377 WasmGCOperatorReducer wasm_gc(&graph_reducer, temp_zone, mcgraph, module,
1378 data->source_positions());
1379 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1380 data->common(), temp_zone);
1381 AddReducer(data, &graph_reducer, &load_elimination);
1382 AddReducer(data, &graph_reducer, &wasm_gc);
1383 AddReducer(data, &graph_reducer, &dead_code_elimination);
1384 graph_reducer.ReduceGraph();
1385 }
1386};
1387
1388struct SimplifyLoopsPhase {
1389 DECL_PIPELINE_PHASE_CONSTANTS(SimplifyLoops)
1390
1391 void Run(TFPipelineData* data, Zone* temp_zone) {
1392 GraphReducer graph_reducer(
1393 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1394 data->jsgraph()->Dead(), data->observe_node_manager());
1395 SimplifyTFLoops simplify_loops(&graph_reducer, data->mcgraph());
1396 AddReducer(data, &graph_reducer, &simplify_loops);
1397 graph_reducer.ReduceGraph();
1398 }
1399};
1400
1401struct WasmGCLoweringPhase {
1402 DECL_PIPELINE_PHASE_CONSTANTS(WasmGCLowering)
1403
1404 void Run(TFPipelineData* data, Zone* temp_zone,
1405 const wasm::WasmModule* module) {
1406 GraphReducer graph_reducer(
1407 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1408 data->jsgraph()->Dead(), data->observe_node_manager());
1409 WasmGCLowering lowering(&graph_reducer, data->mcgraph(), module, false,
1410 data->source_positions());
1411 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1412 data->common(), temp_zone);
1413 AddReducer(data, &graph_reducer, &lowering);
1414 AddReducer(data, &graph_reducer, &dead_code_elimination);
1415 graph_reducer.ReduceGraph();
1416 }
1417};
1418
1419struct WasmOptimizationPhase {
1420 DECL_PIPELINE_PHASE_CONSTANTS(WasmOptimization)
1421
1422 void Run(TFPipelineData* data, Zone* temp_zone,
1423 MachineOperatorReducer::SignallingNanPropagation
1424 signalling_nan_propagation,
1425 wasm::WasmDetectedFeatures detected_features) {
1426 // Run optimizations in two rounds: First one around load elimination and
1427 // then one around branch elimination. This is because those two
1428 // optimizations sometimes display quadratic complexity when run together.
1429 // We only need load elimination for managed objects.
1430 if (detected_features.has_gc()) {
1431 GraphReducer graph_reducer(temp_zone, data->graph(),
1432 &data->info()->tick_counter(), data->broker(),
1433 data->jsgraph()->Dead(),
1434 data->observe_node_manager());
1435 MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
1436 signalling_nan_propagation);
1437 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1438 data->common(), temp_zone);
1439 CommonOperatorReducer common_reducer(
1440 &graph_reducer, data->graph(), data->broker(), data->common(),
1441 data->machine(), temp_zone, BranchSemantics::kMachine);
1442 ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1443 CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
1444 temp_zone);
1445 WasmEscapeAnalysis escape(&graph_reducer, data->mcgraph());
1446 AddReducer(data, &graph_reducer, &machine_reducer);
1447 AddReducer(data, &graph_reducer, &dead_code_elimination);
1448 AddReducer(data, &graph_reducer, &common_reducer);
1449 AddReducer(data, &graph_reducer, &value_numbering);
1450 AddReducer(data, &graph_reducer, &load_elimination);
1451 AddReducer(data, &graph_reducer, &escape);
1452 graph_reducer.ReduceGraph();
1453 }
1454 {
1455 GraphReducer graph_reducer(temp_zone, data->graph(),
1456 &data->info()->tick_counter(), data->broker(),
1457 data->jsgraph()->Dead(),
1458 data->observe_node_manager());
1459 MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
1460 signalling_nan_propagation);
1461 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1462 data->common(), temp_zone);
1463 CommonOperatorReducer common_reducer(
1464 &graph_reducer, data->graph(), data->broker(), data->common(),
1465 data->machine(), temp_zone, BranchSemantics::kMachine);
1466 ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1467 BranchElimination branch_condition_elimination(
1468 &graph_reducer, data->jsgraph(), temp_zone);
1469 AddReducer(data, &graph_reducer, &machine_reducer);
1470 AddReducer(data, &graph_reducer, &dead_code_elimination);
1471 AddReducer(data, &graph_reducer, &common_reducer);
1472 AddReducer(data, &graph_reducer, &value_numbering);
1473 AddReducer(data, &graph_reducer, &branch_condition_elimination);
1474 graph_reducer.ReduceGraph();
1475 }
1476 }
1477};
1478#endif // V8_ENABLE_WEBASSEMBLY
1479
1482
1483 void Run(TFPipelineData* data, Zone* temp_zone) {
1484 Schedule* schedule = Scheduler::ComputeSchedule(
1485 temp_zone, data->graph(),
1486 data->info()->splitting() ? Scheduler::kSplitNodes
1487 : Scheduler::kNoFlags,
1488 &data->info()->tick_counter(), data->profile_data());
1489 data->set_schedule(schedule);
1490 }
1491};
1492
1493#if V8_ENABLE_WASM_SIMD256_REVEC
1494struct RevectorizePhase {
1495 DECL_PIPELINE_PHASE_CONSTANTS(Revectorizer)
1496
1497 void Run(TFPipelineData* data, Zone* temp_zone) {
1498 Revectorizer revec(temp_zone, data->graph(), data->mcgraph(),
1499 data->source_positions());
1500 revec.TryRevectorize(data->info()->GetDebugName().get());
1501 }
1502};
1503#endif // V8_ENABLE_WASM_SIMD256_REVEC
1504
1507
1508 void Run(TFPipelineData* data, Zone* temp_zone, const char* phase) {
1509 OptimizedCompilationInfo* info = data->info();
1510 TFGraph* graph = data->graph();
1511 if (info->trace_turbo_json()) { // Print JSON.
1512 UnparkedScopeIfNeeded scope(data->broker());
1513 AllowHandleDereference allow_deref;
1514
1515 TurboJsonFile json_of(info, std::ios_base::app);
1516 json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
1517 << AsJSON(*graph, data->source_positions(), data->node_origins())
1518 << "},\n";
1519 }
1520
1521 if (info->trace_turbo_scheduled()) {
1522 AccountingAllocator allocator;
1523 Schedule* schedule = data->schedule();
1524 if (schedule == nullptr) {
1525 schedule = Scheduler::ComputeSchedule(
1526 temp_zone, data->graph(), Scheduler::kNoFlags,
1527 &info->tick_counter(), data->profile_data());
1528 }
1529
1530 UnparkedScopeIfNeeded scope(data->broker());
1531 AllowHandleDereference allow_deref;
1532 CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
1533 tracing_scope.stream()
1534 << "----- Graph after " << phase << " ----- " << std::endl
1536 } else if (info->trace_turbo_graph()) { // Simple textual RPO.
1537 UnparkedScopeIfNeeded scope(data->broker());
1538 AllowHandleDereference allow_deref;
1539 CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
1540 tracing_scope.stream()
1541 << "----- Graph after " << phase << " ----- " << std::endl
1542 << AsRPO(*graph);
1543 }
1544 }
1545};
1546
1549
1550 void Run(TFPipelineData* data, Zone* temp_zone, const bool untyped,
1551 bool values_only = false) {
1552 Verifier::CodeType code_type;
1553 switch (data->info()->code_kind()) {
1554 case CodeKind::WASM_FUNCTION:
1555 case CodeKind::WASM_TO_CAPI_FUNCTION:
1556 case CodeKind::WASM_TO_JS_FUNCTION:
1557 case CodeKind::JS_TO_WASM_FUNCTION:
1558 case CodeKind::C_WASM_ENTRY:
1559 code_type = Verifier::kWasm;
1560 break;
1561 default:
1562 code_type = Verifier::kDefault;
1563 }
1564 Verifier::Run(data->graph(), !untyped ? Verifier::TYPED : Verifier::UNTYPED,
1565 values_only ? Verifier::kValuesOnly : Verifier::kAll,
1566 code_type);
1567 }
1568};
1569
1570#undef DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS
1571#undef DECL_PIPELINE_PHASE_CONSTANTS
1572#undef DECL_PIPELINE_PHASE_CONSTANTS_HELPER
1573
1574#if V8_ENABLE_WEBASSEMBLY
1575class WasmHeapStubCompilationJob final : public TurbofanCompilationJob {
1576 public:
1577 WasmHeapStubCompilationJob(Isolate* isolate, CallDescriptor* call_descriptor,
1578 std::unique_ptr<Zone> zone, TFGraph* graph,
1579 CodeKind kind, std::unique_ptr<char[]> debug_name,
1580 const AssemblerOptions& options)
1581 // Note that the OptimizedCompilationInfo is not initialized at the time
1582 // we pass it to the CompilationJob constructor, but it is not
1583 // dereferenced there.
1584 : TurbofanCompilationJob(isolate, &info_,
1585 CompilationJob::State::kReadyToExecute),
1586 debug_name_(std::move(debug_name)),
1587 info_(base::CStrVector(debug_name_.get()), graph->zone(), kind),
1588 call_descriptor_(call_descriptor),
1589 zone_stats_(zone->allocator()),
1590 zone_(std::move(zone)),
1591 graph_(graph),
1592 turboshaft_data_(&zone_stats_,
1593 turboshaft::TurboshaftPipelineKind::kWasm, isolate,
1594 &info_, options),
1595 data_(&zone_stats_, &info_, isolate, wasm::GetWasmEngine()->allocator(),
1596 graph_, nullptr, nullptr, nullptr,
1597 zone_->New<NodeOriginTable>(graph_), nullptr, options, nullptr),
1598 pipeline_(&data_) {}
1599
1600 WasmHeapStubCompilationJob(const WasmHeapStubCompilationJob&) = delete;
1601 WasmHeapStubCompilationJob& operator=(const WasmHeapStubCompilationJob&) =
1602 delete;
1603
1604 protected:
1605 Status PrepareJobImpl(Isolate* isolate) final;
1606 Status ExecuteJobImpl(RuntimeCallStats* stats,
1607 LocalIsolate* local_isolate) final;
1608 Status FinalizeJobImpl(Isolate* isolate) final;
1609
1610 private:
1611 const std::unique_ptr<char[]> debug_name_;
1612 OptimizedCompilationInfo info_;
1613 CallDescriptor* const call_descriptor_;
1614 ZoneStats zone_stats_;
1615 const std::unique_ptr<Zone> zone_;
1616 TFGraph* const graph_;
1617 turboshaft::PipelineData turboshaft_data_;
1618 TFPipelineData data_;
1619 PipelineImpl pipeline_;
1620};
1621
1622#if V8_ENABLE_WEBASSEMBLY
1623class WasmTurboshaftWrapperCompilationJob final
1624 : public turboshaft::TurboshaftCompilationJob {
1625 public:
1626 WasmTurboshaftWrapperCompilationJob(Isolate* isolate,
1627 const wasm::CanonicalSig* sig,
1628 wasm::WrapperCompilationInfo wrapper_info,
1629 std::unique_ptr<char[]> debug_name,
1630 const AssemblerOptions& options)
1631 // Note that the OptimizedCompilationInfo is not initialized at the time
1632 // we pass it to the CompilationJob constructor, but it is not
1633 // dereferenced there.
1634 : TurboshaftCompilationJob(&info_,
1635 CompilationJob::State::kReadyToExecute),
1636 zone_(wasm::GetWasmEngine()->allocator(), ZONE_NAME),
1637 debug_name_(std::move(debug_name)),
1638 info_(base::CStrVector(debug_name_.get()), &zone_,
1639 wrapper_info.code_kind),
1640 sig_(sig),
1641 wrapper_info_(wrapper_info),
1642 call_descriptor_(
1643 GetCallDescriptor(&zone_, sig, wrapper_info_.code_kind)),
1644 zone_stats_(zone_.allocator()),
1646 &zone_stats_,
1647 wrapper_info_.code_kind == CodeKind::JS_TO_WASM_FUNCTION
1648 ? turboshaft::TurboshaftPipelineKind::kJSToWasm
1649 : turboshaft::TurboshaftPipelineKind::kWasm,
1650 isolate, &info_, options),
1651 data_(&zone_stats_, &info_, isolate, wasm::GetWasmEngine()->allocator(),
1652 nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, options,
1653 nullptr),
1654 pipeline_(&data_) {}
1655
1656 WasmTurboshaftWrapperCompilationJob(
1657 const WasmTurboshaftWrapperCompilationJob&) = delete;
1658 WasmTurboshaftWrapperCompilationJob& operator=(
1659 const WasmTurboshaftWrapperCompilationJob&) = delete;
1660
1661 protected:
1662 Status PrepareJobImpl(Isolate* isolate) final;
1663 Status ExecuteJobImpl(RuntimeCallStats* stats,
1664 LocalIsolate* local_isolate) final;
1665 Status FinalizeJobImpl(Isolate* isolate) final;
1666
1667 private:
1668 static CallDescriptor* GetCallDescriptor(Zone* zone,
1669 const wasm::CanonicalSig* sig,
1670 CodeKind code_kind) {
1671 if (code_kind == CodeKind::WASM_TO_JS_FUNCTION) {
1672 CallDescriptor* call_descriptor = compiler::GetWasmCallDescriptor(
1673 zone, sig, WasmCallKind::kWasmImportWrapper);
1674 return Is64() ? call_descriptor
1675 : GetI32WasmCallDescriptor(zone, call_descriptor);
1676 }
1677 DCHECK_EQ(code_kind, CodeKind::JS_TO_WASM_FUNCTION);
1678 return Linkage::GetJSCallDescriptor(
1679 zone, false, static_cast<int>(sig->parameter_count()) + 1,
1680 CallDescriptor::kNoFlags);
1681 }
1682
1683 Zone zone_;
1684 const std::unique_ptr<char[]> debug_name_;
1685 OptimizedCompilationInfo info_;
1686 const wasm::CanonicalSig* const sig_;
1687 const wasm::WrapperCompilationInfo wrapper_info_;
1688 CallDescriptor* const call_descriptor_; // Incoming call descriptor.
1689 ZoneStats zone_stats_;
1690 turboshaft::PipelineData turboshaft_data_;
1691 TFPipelineData data_;
1692 PipelineImpl pipeline_;
1693 std::unique_ptr<TurbofanPipelineStatistics> pipeline_statistics_;
1694};
1695
1696// static
1697std::unique_ptr<TurbofanCompilationJob> Pipeline::NewWasmHeapStubCompilationJob(
1698 Isolate* isolate, CallDescriptor* call_descriptor,
1699 std::unique_ptr<Zone> zone, TFGraph* graph, CodeKind kind,
1700 std::unique_ptr<char[]> debug_name, const AssemblerOptions& options) {
1701 return std::make_unique<WasmHeapStubCompilationJob>(
1702 isolate, call_descriptor, std::move(zone), graph, kind,
1703 std::move(debug_name), options);
1704}
1705
1706// static
1707std::unique_ptr<turboshaft::TurboshaftCompilationJob>
1708Pipeline::NewWasmTurboshaftWrapperCompilationJob(
1709 Isolate* isolate, const wasm::CanonicalSig* sig,
1710 wasm::WrapperCompilationInfo wrapper_info,
1711 std::unique_ptr<char[]> debug_name, const AssemblerOptions& options) {
1712 return std::make_unique<WasmTurboshaftWrapperCompilationJob>(
1713 isolate, sig, wrapper_info, std::move(debug_name), options);
1714}
1715#endif
1716
1717CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
1718 Isolate* isolate) {
1719 UNREACHABLE();
1720}
1721
1722namespace {
1723// Temporary helpers for logic shared by the TurboFan and Turboshaft wrapper
1724// compilation jobs. Remove them once wrappers are fully ported to Turboshaft.
1725void TraceWrapperCompilation(const char* compiler,
1726 OptimizedCompilationInfo* info,
1727 TFPipelineData* data) {
1728 if (info->trace_turbo_json() || info->trace_turbo_graph()) {
1729 CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
1730 tracing_scope.stream()
1731 << "---------------------------------------------------\n"
1732 << "Begin compiling method " << info->GetDebugName().get() << " using "
1733 << compiler << std::endl;
1734 }
1735
1736 if (info->trace_turbo_json()) {
1737 TurboJsonFile json_of(info, std::ios_base::trunc);
1738 json_of << "{\"function\":\"" << info->GetDebugName().get()
1739 << "\", \"source\":\"\",\n\"phases\":[";
1740 }
1741}
1742
1743void TraceWrapperCompilation(OptimizedCompilationInfo* info,
1744 turboshaft::PipelineData* data) {
1745 if (info->trace_turbo_json() || info->trace_turbo_graph()) {
1746 CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
1747 tracing_scope.stream()
1748 << "---------------------------------------------------\n"
1749 << "Begin compiling method " << info->GetDebugName().get()
1750 << " using Turboshaft" << std::endl;
1751 }
1752
1753 if (info->trace_turbo_json()) {
1754 TurboJsonFile json_of(info, std::ios_base::trunc);
1755 json_of << "{\"function\":\"" << info->GetDebugName().get()
1756 << "\", \"source\":\"\",\n\"phases\":[";
1757 }
1758}
1759
1760CompilationJob::Status FinalizeWrapperCompilation(
1761 turboshaft::PipelineData* turboshaft_data, OptimizedCompilationInfo* info,
1762 CallDescriptor* call_descriptor, Isolate* isolate,
1763 const char* method_name) {
1765 turboshaft::Pipeline pipeline(turboshaft_data);
1766 if (!pipeline.FinalizeCode(call_descriptor).ToHandle(&code)) {
1767 V8::FatalProcessOutOfMemory(isolate, method_name);
1768 }
1769 DCHECK_NULL(turboshaft_data->depedencies());
1770 info->SetCode(code);
1771#ifdef ENABLE_DISASSEMBLER
1772 if (v8_flags.print_wasm_code) {
1773 CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
1774 code->Disassemble(info->GetDebugName().get(), tracing_scope.stream(),
1775 isolate);
1776 }
1777#endif
1778
1779 if (isolate->IsLoggingCodeCreation()) {
1780 PROFILE(isolate, CodeCreateEvent(LogEventListener::CodeTag::kStub,
1781 Cast<AbstractCode>(code),
1782 info->GetDebugName().get()));
1783 }
1784 if (code->kind() == CodeKind::WASM_TO_JS_FUNCTION) {
1785 code->set_wasm_js_tagged_parameter_count(
1786 call_descriptor->GetTaggedParameterSlots() & 0xffff);
1787 code->set_wasm_js_first_tagged_parameter(
1788 call_descriptor->GetTaggedParameterSlots() >> 16);
1789 }
1791}
1792} // namespace
1793
1794CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl(
1795 RuntimeCallStats* stats, LocalIsolate* local_isolate) {
1796 std::unique_ptr<TurbofanPipelineStatistics> pipeline_statistics;
1797 if (v8_flags.turbo_stats || v8_flags.turbo_stats_nvp) {
1798 pipeline_statistics.reset(new TurbofanPipelineStatistics(
1799 &info_, wasm::GetWasmEngine()->GetOrCreateTurboStatistics(),
1800 &zone_stats_));
1801 pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
1802 }
1803 TraceWrapperCompilation("Turbofan", &info_, &data_);
1804 pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
1805 if (!pipeline_.Run<MemoryOptimizationPhase>()) return FAILED;
1806 if (!pipeline_.ComputeScheduledGraph()) return FAILED;
1807
1808 turboshaft::Pipeline turboshaft_pipeline(&turboshaft_data_);
1809
1810 // We convert the turbofan graph to turboshaft.
1811 Linkage linkage(call_descriptor_);
1812 if (!turboshaft_pipeline.CreateGraphFromTurbofan(&data_, &linkage)) {
1813 return FAILED;
1814 }
1815
1816 // We need to run simplification to normalize some patterns for instruction
1817 // selection (e.g. loads and stores).
1818 if (!turboshaft_pipeline.RunSimplificationAndNormalizationPhase()) {
1819 return FAILED;
1820 }
1821
1822 const bool success = GenerateCodeFromTurboshaftGraph(
1823 &linkage, turboshaft_pipeline, &pipeline_, data_.osr_helper_ptr());
1824 return success ? SUCCEEDED : FAILED;
1825}
1826
1827CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
1828 Isolate* isolate) {
1829 return FinalizeWrapperCompilation(
1830 &turboshaft_data_, &info_, call_descriptor_, isolate,
1831 "WasmHeapStubCompilationJob::FinalizeJobImpl");
1832}
1833
1834CompilationJob::Status WasmTurboshaftWrapperCompilationJob::PrepareJobImpl(
1835 Isolate* isolate) {
1836 UNREACHABLE();
1837}
1838
1839CompilationJob::Status WasmTurboshaftWrapperCompilationJob::ExecuteJobImpl(
1840 RuntimeCallStats* stats, LocalIsolate* local_isolate) {
1841 if (v8_flags.turbo_stats || v8_flags.turbo_stats_nvp) {
1842 DCHECK_NULL(pipeline_statistics_);
1843 pipeline_statistics_ = std::make_unique<TurbofanPipelineStatistics>(
1844 &info_, wasm::GetWasmEngine()->GetOrCreateTurboStatistics(),
1845 &zone_stats_);
1846 pipeline_statistics_->BeginPhaseKind("V8.WasmStubCodegen");
1847 }
1848 TraceWrapperCompilation(&info_, &turboshaft_data_);
1849 Linkage linkage(call_descriptor_);
1850
1851 turboshaft_data_.set_pipeline_statistics(pipeline_statistics_.get());
1852 turboshaft_data_.SetIsWasmWrapper(sig_);
1853
1854 AccountingAllocator allocator;
1857 sig_, wrapper_info_);
1858 CodeTracer* code_tracer = nullptr;
1859 if (info_.trace_turbo_graph()) {
1860 // NOTE: We must not call `GetCodeTracer` if tracing is not enabled,
1861 // because it may not yet be initialized then and doing so from the
1862 // background thread is not threadsafe.
1863 code_tracer = turboshaft_data_.GetCodeTracer();
1864 }
1865 Zone printing_zone(&allocator, ZONE_NAME);
1867 code_tracer, "Graph generation");
1868
1869 turboshaft::Pipeline turboshaft_pipeline(&turboshaft_data_);
1870 // Skip the LoopUnrolling, WasmGCOptimize and WasmLowering phases for
1871 // wrappers.
1872 // TODO(14108): Do we need value numbering if wasm_opt is turned off?
1873 if (v8_flags.wasm_opt) {
1874 if (!turboshaft_pipeline.Run<turboshaft::WasmOptimizePhase>()) {
1875 return FAILED;
1876 }
1877 }
1878#if DEBUG
1879 if (!v8_flags.wasm_opt) {
1880 // We still need to lower allocation operations even with optimizations
1881 // being turned off.
1882 if (!turboshaft_pipeline.Run<turboshaft::WasmDebugMemoryLoweringPhase>()) {
1883 return FAILED;
1884 }
1885 }
1886#endif
1887
1888 if (!Is64()) {
1889 if (!turboshaft_pipeline.Run<turboshaft::Int64LoweringPhase>()) {
1890 return FAILED;
1891 }
1892 }
1893
1894 if (!turboshaft_pipeline.Run<turboshaft::WasmDeadCodeEliminationPhase>()) {
1895 return FAILED;
1896 }
1897
1898 if (V8_UNLIKELY(v8_flags.turboshaft_enable_debug_features)) {
1899 // This phase has to run very late to allow all previous phases to use
1900 // debug features.
1901 if (!turboshaft_pipeline.Run<turboshaft::DebugFeatureLoweringPhase>()) {
1902 return FAILED;
1903 }
1904 }
1905
1906 turboshaft_pipeline.BeginPhaseKind("V8.InstructionSelection");
1907
1908 const bool success = GenerateCodeFromTurboshaftGraph(
1909 &linkage, turboshaft_pipeline, &pipeline_);
1910 return success ? SUCCEEDED : FAILED;
1911}
1912
1913CompilationJob::Status WasmTurboshaftWrapperCompilationJob::FinalizeJobImpl(
1914 Isolate* isolate) {
1915 return FinalizeWrapperCompilation(
1916 &turboshaft_data_, &info_, call_descriptor_, isolate,
1917 "WasmTurboshaftWrapperCompilationJob::FinalizeJobImpl");
1918}
1919
1920#endif // V8_ENABLE_WEBASSEMBLY
1921
1922void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
1923 if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
1924 USE(Run<PrintGraphPhase>(phase));
1925 }
1926 if (v8_flags.turbo_verify) {
1927 USE(Run<VerifyGraphPhase>(untyped));
1928 }
1929}
1930
1932 TFPipelineData* data = data_;
1933
1934 data->BeginPhaseKind("V8.TFBrokerInitAndSerialization");
1935
1936 if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
1937 CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
1938 tracing_scope.stream()
1939 << "---------------------------------------------------\n"
1940 << "Begin compiling method " << info()->GetDebugName().get()
1941 << " using TurboFan" << std::endl;
1942 }
1943 if (info()->trace_turbo_json()) {
1944 TurboCfgFile tcf(isolate());
1945 tcf << AsC1VCompilation(info());
1946 }
1947 if (data->info()->has_bytecode_array()) {
1948 if (data->info()->bytecode_array()->SourcePositionTable()->DataSize() ==
1949 0) {
1950 data->source_positions()->Disable();
1951 }
1952 data->source_positions()->AddDecorator();
1953 }
1954 if (data->info()->trace_turbo_json()) {
1955 data->node_origins()->AddDecorator();
1956 }
1957
1959 data->broker()->StopSerializing();
1960 data->EndPhaseKind();
1961 return !info()->was_cancelled();
1962}
1963
1965 DCHECK(!v8_flags.turbolev);
1966 TFPipelineData* data = this->data_;
1967 UnparkedScopeIfNeeded unparked_scope(data->broker());
1968
1969 data->BeginPhaseKind("V8.TFGraphCreation");
1970
1972 RunPrintAndVerify(GraphBuilderPhase::phase_name(), true);
1973
1974 // Perform function context specialization and inlining (if enabled).
1976 RunPrintAndVerify(InliningPhase::phase_name(), true);
1977
1978 // Determine the Typer operation flags.
1979 {
1980 SharedFunctionInfoRef shared_info =
1981 MakeRef(data->broker(), info()->shared_info());
1982 if (is_sloppy(shared_info.language_mode()) &&
1983 shared_info.IsUserJavaScript()) {
1984 // Sloppy mode functions always have an Object for this.
1985 data->AddTyperFlag(Typer::kThisIsReceiver);
1986 }
1987 if (IsClassConstructor(shared_info.kind())) {
1988 // Class constructors cannot be [[Call]]ed.
1989 data->AddTyperFlag(Typer::kNewTargetIsReceiver);
1990 }
1991 }
1992
1993 data->EndPhaseKind();
1994
1995 return !info()->was_cancelled();
1996}
1997
1999 DCHECK(!v8_flags.turbolev);
2000 TFPipelineData* data = this->data_;
2001
2002 data->BeginPhaseKind("V8.TFLowering");
2003
2004 // Trim the graph before typing to ensure all nodes are typed.
2006 RunPrintAndVerify(EarlyGraphTrimmingPhase::phase_name(), true);
2007
2008 // Type the graph and keep the Typer running such that new nodes get
2009 // automatically typed when they are created.
2010 RUN_MAYBE_ABORT(TyperPhase, data->CreateTyper());
2011 RunPrintAndVerify(TyperPhase::phase_name());
2012
2014 RunPrintAndVerify(TypedLoweringPhase::phase_name());
2015
2016 if (data->info()->loop_peeling()) {
2018 RunPrintAndVerify(LoopPeelingPhase::phase_name(), true);
2019 } else {
2021 RunPrintAndVerify(LoopExitEliminationPhase::phase_name(), true);
2022 }
2023
2024 if (v8_flags.turbo_load_elimination) {
2026 RunPrintAndVerify(LoadEliminationPhase::phase_name());
2027 }
2028 data->DeleteTyper();
2029
2030 if (v8_flags.turbo_escape) {
2032 RunPrintAndVerify(EscapeAnalysisPhase::phase_name());
2033 }
2034
2035 if (v8_flags.assert_types) {
2037 RunPrintAndVerify(TypeAssertionsPhase::phase_name());
2038 }
2039
2040 // Perform simplified lowering. This has to run w/o the Typer decorator,
2041 // because we cannot compute meaningful types anyways, and the computed
2042 // types might even conflict with the representation/truncation logic.
2044 RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
2045
2046#if V8_ENABLE_WEBASSEMBLY
2047 if (data->has_js_wasm_calls()) {
2048 DCHECK(data->info()->inline_js_wasm_calls());
2049 RUN_MAYBE_ABORT(JSWasmInliningPhase);
2050 RunPrintAndVerify(JSWasmInliningPhase::phase_name(), true);
2051
2052 RUN_MAYBE_ABORT(WasmTypingPhase, -1);
2053 RunPrintAndVerify(WasmTypingPhase::phase_name(), true);
2054
2055 if (v8_flags.wasm_opt) {
2056 RUN_MAYBE_ABORT(WasmGCOptimizationPhase, data->wasm_module_for_inlining(),
2057 data->jsgraph());
2058 RunPrintAndVerify(WasmGCOptimizationPhase::phase_name(), true);
2059 }
2060 RUN_MAYBE_ABORT(JSWasmLoweringPhase);
2061 RunPrintAndVerify(JSWasmLoweringPhase::phase_name(), true);
2062
2063 if (v8_flags.turbo_optimize_inlined_js_wasm_wrappers && v8_flags.wasm_opt) {
2064 wasm::WasmDetectedFeatures detected({wasm::WasmDetectedFeature::gc});
2065 RUN_MAYBE_ABORT(WasmOptimizationPhase,
2067 RunPrintAndVerify(WasmOptimizationPhase::phase_name(), true);
2068 }
2069 }
2070#endif // V8_ENABLE_WEBASSEMBLY
2071
2072 // From now on it is invalid to look at types on the nodes, because the
2073 // types on the nodes might not make sense after representation selection
2074 // due to the way we handle truncations; if we'd want to look at types
2075 // afterwards we'd essentially need to re-type (large portions of) the
2076 // graph.
2077
2078 // In order to catch bugs related to type access after this point, we now
2079 // remove the types from the nodes (currently only in Debug builds).
2080#ifdef DEBUG
2082 RunPrintAndVerify(UntyperPhase::phase_name(), true);
2083#endif
2084
2085 // Run generic lowering pass.
2087 RunPrintAndVerify(GenericLoweringPhase::phase_name(), true);
2088
2089 data->BeginPhaseKind("V8.TFBlockBuilding");
2090
2091 data->InitializeFrameData(linkage->GetIncomingDescriptor());
2092
2093 // Run early optimization pass.
2095 RunPrintAndVerify(EarlyOptimizationPhase::phase_name(), true);
2096
2097 data->source_positions()->RemoveDecorator();
2098 if (data->info()->trace_turbo_json()) {
2099 data->node_origins()->RemoveDecorator();
2100 }
2101
2102 if (!ComputeScheduledGraph()) return false;
2103
2104 return !info()->was_cancelled();
2105}
2106
2107namespace {
2108
2109int HashGraphForPGO(const turboshaft::Graph* graph) {
2110 size_t hash = 0;
2111 for (const turboshaft::Operation& op : graph->AllOperations()) {
2112 VisitOperation(op, [&hash, &graph](const auto& derived) {
2113 const auto op_hash =
2115 hash = turboshaft::fast_hash_combine(hash, op_hash);
2116 // Use for tracing while developing:
2117 constexpr bool kTraceHashing = false;
2118 if constexpr (kTraceHashing) {
2119 std::cout << "[" << std::setw(3) << graph->Index(derived)
2120 << "] Type: " << std::setw(30)
2122 turboshaft::operation_to_opcode_v<decltype(derived)>);
2123 std::cout << " + 0x" << std::setw(20) << std::left << std::hex
2124 << op_hash << " => 0x" << hash << std::dec << std::endl;
2125 }
2126 });
2127 }
2128 return Tagged<Smi>(IntToSmi(static_cast<int>(hash))).value();
2129}
2130
2131// Compute a hash of the given graph, in a way that should provide the same
2132// result in multiple runs of mksnapshot, meaning the hash cannot depend on any
2133// external pointer values or uncompressed heap constants. This hash can be used
2134// to reject profiling data if the builtin's current code doesn't match the
2135// version that was profiled. Hash collisions are not catastrophic; in the worst
2136// case, we just defer some blocks that ideally shouldn't be deferred. The
2137// result value is in the valid Smi range.
2138int HashGraphForPGO(const TFGraph* graph) {
2139 AccountingAllocator allocator;
2140 Zone local_zone(&allocator, ZONE_NAME);
2141
2142 constexpr NodeId kUnassigned = static_cast<NodeId>(-1);
2143
2144 constexpr uint8_t kUnvisited = 0;
2145 constexpr uint8_t kOnStack = 1;
2146 constexpr uint8_t kVisited = 2;
2147
2148 // Do a depth-first post-order traversal of the graph. For every node, hash:
2149 //
2150 // - the node's traversal number
2151 // - the opcode
2152 // - the number of inputs
2153 // - each input node's traversal number
2154 //
2155 // What's a traversal number? We can't use node IDs because they're not stable
2156 // build-to-build, so we assign a new number for each node as it is visited.
2157
2158 ZoneVector<uint8_t> state(graph->NodeCount(), kUnvisited, &local_zone);
2159 ZoneVector<NodeId> traversal_numbers(graph->NodeCount(), kUnassigned,
2160 &local_zone);
2161 ZoneStack<Node*> stack(&local_zone);
2162
2163 NodeId visited_count = 0;
2164 size_t hash = 0;
2165
2166 stack.push(graph->end());
2167 state[graph->end()->id()] = kOnStack;
2168 traversal_numbers[graph->end()->id()] = visited_count++;
2169 while (!stack.empty()) {
2170 Node* n = stack.top();
2171 bool pop = true;
2172 for (Node* const i : n->inputs()) {
2173 if (state[i->id()] == kUnvisited) {
2174 state[i->id()] = kOnStack;
2175 traversal_numbers[i->id()] = visited_count++;
2176 stack.push(i);
2177 pop = false;
2178 break;
2179 }
2180 }
2181 if (pop) {
2182 state[n->id()] = kVisited;
2183 stack.pop();
2184 hash = base::hash_combine(hash, traversal_numbers[n->id()], n->opcode(),
2185 n->InputCount());
2186 for (Node* const i : n->inputs()) {
2187 DCHECK(traversal_numbers[i->id()] != kUnassigned);
2188 hash = base::hash_combine(hash, traversal_numbers[i->id()]);
2189 }
2190 }
2191 }
2192 return Tagged<Smi>(IntToSmi(static_cast<int>(hash))).value();
2193}
2194
2195template <typename Graph>
2196int ComputeInitialGraphHash(Builtin builtin,
2197 const ProfileDataFromFile* profile_data,
2198 const Graph* graph) {
2199 int initial_graph_hash = 0;
2200 if (v8_flags.turbo_profiling || v8_flags.dump_builtins_hashes_to_file ||
2201 profile_data != nullptr) {
2202 initial_graph_hash = HashGraphForPGO(graph);
2203 if (v8_flags.dump_builtins_hashes_to_file) {
2204 std::ofstream out(v8_flags.dump_builtins_hashes_to_file,
2205 std::ios_base::app);
2206 out << "Builtin: " << Builtins::name(builtin) << ", hash: 0x" << std::hex
2207 << initial_graph_hash << std::endl;
2208 }
2209 }
2210 return initial_graph_hash;
2211}
2212
2213const ProfileDataFromFile* ValidateProfileData(
2214 const ProfileDataFromFile* profile_data, int initial_graph_hash,
2215 const char* debug_name) {
2216 if (profile_data != nullptr && profile_data->hash() != initial_graph_hash) {
2217 if (v8_flags.reorder_builtins) {
2219 }
2220 if (v8_flags.abort_on_bad_builtin_profile_data ||
2221 v8_flags.warn_about_builtin_profile_data) {
2222 base::EmbeddedVector<char, 256> msg;
2223 SNPrintF(msg,
2224 "Rejected profile data for %s due to function change. "
2225 "Please use tools/builtins-pgo/generate.py to refresh it.",
2226 debug_name);
2227 if (v8_flags.abort_on_bad_builtin_profile_data) {
2228 // mksnapshot might fail here because of the following reasons:
2229 // * builtins were changed since the builtins profile generation,
2230 // * current build options affect builtins code and they don't match
2231 // the options used for building the profile (for example, it might
2232 // be because of gn argument 'dcheck_always_on=true').
2233 // To fix the issue one must either update the builtins PGO profiles
2234 // (see tools/builtins-pgo/generate.py) or disable builtins PGO by
2235 // setting gn argument v8_builtins_profiling_log_file="".
2236 // One might also need to update the tools/builtins-pgo/generate.py if
2237 // the set of default release arguments has changed.
2238 FATAL("%s", msg.begin());
2239 } else {
2240 PrintF("%s\n", msg.begin());
2241 }
2242 }
2243#ifdef LOG_BUILTIN_BLOCK_COUNT
2244 if (v8_flags.turbo_log_builtins_count_input) {
2245 PrintF("The hash came from execution count file for %s was not match!\n",
2246 debug_name);
2247 }
2248#endif
2249 return nullptr;
2250 }
2251 return profile_data;
2252}
2253
2254} // namespace
2255
2258 : data(&job->zone_stats_, &job->compilation_info_,
2259 job->raw_assembler()->isolate(),
2260 job->raw_assembler()->isolate()->allocator(),
2261 job->raw_assembler()->graph(), job->jsgraph(), nullptr,
2263 &job->node_origins_.value(), job->jump_opt_.get(),
2265 pipeline(&data) {
2266 data.set_verify_graph(v8_flags.verify_csa);
2267 }
2270};
2271
2273 Isolate* isolate, Builtin builtin, CodeAssemblerGenerator generator,
2274 CodeAssemblerInstaller installer, const AssemblerOptions& assembler_options,
2275 std::function<compiler::CallDescriptor*(Zone*)> get_call_descriptor,
2276 CodeKind code_kind, const char* name,
2277 const ProfileDataFromFile* profile_data, int finalize_order)
2278 : TurbofanCompilationJob(isolate, &compilation_info_,
2279 State::kReadyToPrepare),
2280 generator_(generator),
2281 installer_(installer),
2282 profile_data_(profile_data),
2283 zone_(isolate->allocator(), ZONE_NAME, kCompressGraphZone),
2284 zone_stats_(isolate->allocator()),
2285 code_assembler_state_(isolate, &zone_, get_call_descriptor(&zone_),
2286 code_kind, name, builtin),
2287 assembler_options_(assembler_options),
2288 compilation_info_(base::CStrVector(name), &zone_, code_kind),
2289 jump_opt_(ShouldOptimizeJumps(isolate) ? new JumpOptimizationInfo()
2290 : nullptr),
2291 finalize_order_(finalize_order) {
2292 DCHECK(code_kind == CodeKind::BUILTIN ||
2293 code_kind == CodeKind::BYTECODE_HANDLER ||
2294 code_kind == CodeKind::FOR_TESTING);
2296}
2297
2298// static
2300 return isolate->serializer_enabled() && v8_flags.turbo_rewrite_far_jumps &&
2301 !v8_flags.turbo_profiling && !v8_flags.dump_builtins_hashes_to_file;
2302}
2303
2305 Isolate* isolate) {
2306 {
2307 // Work around that the PersistentHandlesScope inside CompilationHandleScope
2308 // requires there to be at least one handle.
2309 HandleScope handle_scope(isolate);
2310 DirectHandle<Object> dummy(ReadOnlyRoots(isolate->heap()).empty_string(),
2311 isolate);
2312 CompilationHandleScope compilation_scope(isolate, &compilation_info_);
2314 }
2315
2316 node_origins_.emplace(raw_assembler()->ExportForOptimization());
2317
2318 PipelineImpl* pipeline = EmplacePipeline(isolate);
2319 TFPipelineData* data = pipeline->data();
2320 PipelineJobScope pipeline_scope(data,
2321 isolate->counters()->runtime_call_stats());
2322 if (v8_flags.turbo_stats || v8_flags.turbo_stats_nvp) {
2324 &compilation_info_, isolate->GetTurboStatistics(), &zone_stats_));
2325 }
2326
2327 const char* debug_name = code_assembler_state_.name_;
2328 if (compilation_info_.trace_turbo_json() ||
2329 compilation_info_.trace_turbo_graph()) {
2330 CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
2331 tracing_scope.stream()
2332 << "---------------------------------------------------\n"
2333 << "Begin compiling " << debug_name << " using TurboFan" << std::endl;
2334 if (compilation_info_.trace_turbo_json()) {
2335 TurboJsonFile json_of(&compilation_info_, std::ios_base::trunc);
2336 json_of << "{\"function\" : ";
2338 DirectHandle<Script>(), isolate,
2340 json_of << ",\n\"phases\":[";
2341 }
2342 if (!pipeline->Run<PrintGraphPhase>("V8.TFMachineCode")) return FAILED;
2343 }
2344
2345 // Validate pgo profile.
2346 initial_graph_hash_ = ComputeInitialGraphHash(compilation_info_.builtin(),
2347 profile_data_, data->graph());
2348 profile_data_ = ValidateProfileData(profile_data_, initial_graph_hash_,
2350 data->set_profile_data(profile_data_);
2351
2352 return SUCCEEDED;
2353}
2354
2356 Isolate* isolate) {
2357 RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeFinalizePipelineJob);
2358
2359 HandleScope scope(isolate);
2360 Handle<Code> code = FinalizeCode(isolate);
2361
2362#ifdef ENABLE_DISASSEMBLER
2363 if (v8_flags.trace_ignition_codegen &&
2364 code->kind() == CodeKind::BYTECODE_HANDLER) {
2365 StdoutStream os;
2366 code->Disassemble(code_assembler_state_.name_, os, isolate);
2367 os << std::flush;
2368 }
2369#endif // ENABLE_DISASSEMBLER
2370
2373
2374 return SUCCEEDED;
2375}
2376
2379 public:
2381
2387
2389 LocalIsolate* local_isolate) final;
2390
2391 private:
2392 PipelineImpl* EmplacePipeline(Isolate* isolate) final;
2393 Handle<Code> FinalizeCode(Isolate* isolate) final;
2394
2395 // Initialized after CSA generates the graph in PrepareJobImpl.
2397 std::optional<turboshaft::PipelineData> turboshaft_data_;
2398};
2399
2402
2404 Isolate* isolate) {
2405 pipeline_.emplace(this);
2406 turboshaft_data_.emplace(pipeline_->data.zone_stats(),
2408 pipeline_->data.info(), assembler_options_,
2409 pipeline_->data.start_source_position());
2410 return &pipeline_->pipeline;
2411}
2412
2415 RuntimeCallStats* stats, LocalIsolate* local_isolate) {
2416 if (!pipeline_->pipeline.ComputeScheduledGraph()) return FAILED;
2417 DCHECK_NULL(pipeline_->data.frame());
2418 DCHECK_NOT_NULL(pipeline_->data.schedule());
2419
2420 turboshaft::BuiltinPipeline turboshaft_pipeline(&turboshaft_data_.value());
2421 CallDescriptor* call_descriptor = raw_assembler()->call_descriptor();
2422 Linkage linkage(call_descriptor);
2423
2424 CHECK(
2425 turboshaft_pipeline.CreateGraphFromTurbofan(&pipeline_->data, &linkage));
2426
2427 turboshaft_pipeline.OptimizeBuiltin();
2428
2429 CHECK_NULL(pipeline_->data.osr_helper_ptr());
2430 CHECK(turboshaft_pipeline.GenerateCode(
2431 &linkage, pipeline_->data.osr_helper_ptr(), jump_opt_.get(),
2433
2434 return SUCCEEDED;
2435}
2436
2438 Isolate* isolate) {
2439 PipelineJobScope scope(&pipeline_->data,
2440 isolate->counters()->runtime_call_stats());
2441 turboshaft::Pipeline turboshaft_pipeline(&turboshaft_data_.value());
2442 turboshaft::Tracing::Scope tracing_scope(pipeline_->data.info());
2443 return turboshaft_pipeline.FinalizeCode().ToHandleChecked();
2444}
2445
2446// static
2447std::unique_ptr<TurbofanCompilationJob>
2449 Isolate* isolate, Builtin builtin, CodeAssemblerGenerator generator,
2450 CodeAssemblerInstaller installer, const AssemblerOptions& assembler_options,
2451 CallDescriptors::Key interface_descriptor, const char* name,
2452 const ProfileDataFromFile* profile_data, int finalize_order) {
2453 auto get_call_descriptor =
2454 [interface_descriptor](Zone* zone) {
2455 CallInterfaceDescriptor descriptor(interface_descriptor);
2456 // Ensure descriptor is already initialized.
2457 DCHECK_LE(0, descriptor.GetRegisterParameterCount());
2459 zone, descriptor, descriptor.GetStackParameterCount(),
2461 };
2462 return std::make_unique<CodeAssemblerTurboshaftCompilationJob>(
2463 isolate, builtin, generator, installer, assembler_options,
2464 get_call_descriptor, CodeKind::BUILTIN, name, profile_data,
2465 finalize_order);
2466}
2467
2468// static
2469std::unique_ptr<TurbofanCompilationJob>
2471 Isolate* isolate, Builtin builtin, CodeAssemblerGenerator generator,
2472 CodeAssemblerInstaller installer, const AssemblerOptions& assembler_options,
2473 int argc, const char* name, const ProfileDataFromFile* profile_data,
2474 int finalize_order) {
2475 auto get_call_descriptor = [argc](Zone* zone) {
2476 return Linkage::GetJSCallDescriptor(zone, false, argc,
2478 };
2479 return std::make_unique<CodeAssemblerTurboshaftCompilationJob>(
2480 isolate, builtin, generator, installer, assembler_options,
2481 get_call_descriptor, CodeKind::BUILTIN, name, profile_data,
2482 finalize_order);
2483}
2484
2485// static
2486std::unique_ptr<TurbofanCompilationJob>
2488 Isolate* isolate, Builtin builtin, CodeAssemblerGenerator generator,
2489 CodeAssemblerInstaller installer, const AssemblerOptions& assembler_options,
2490 const char* name, const ProfileDataFromFile* profile_data,
2491 int finalize_order) {
2492 auto get_call_descriptor =
2493 [](Zone* zone) {
2496 zone, descriptor, descriptor.GetStackParameterCount(),
2498 };
2499 return std::make_unique<CodeAssemblerTurboshaftCompilationJob>(
2500 isolate, builtin, generator, installer, assembler_options,
2501 get_call_descriptor, CodeKind::BYTECODE_HANDLER, name, profile_data,
2502 finalize_order);
2503}
2504
2505// static
2506std::unique_ptr<CodeAssemblerCompilationJob>
2508 Isolate* isolate, Builtin builtin, CodeAssemblerGenerator generator,
2509 CodeAssemblerInstaller installer,
2510 std::function<compiler::CallDescriptor*(Zone*)> get_call_descriptor,
2511 CodeKind code_kind, const char* name) {
2512 AssemblerOptions assembler_options = AssemblerOptions::Default(isolate);
2513 return std::make_unique<CodeAssemblerTurboshaftCompilationJob>(
2514 isolate, builtin, generator, installer, assembler_options,
2515 get_call_descriptor, code_kind, name, nullptr, -1);
2516}
2517
2519 turboshaft::PipelineData* turboshaft_data, CallDescriptor* call_descriptor,
2520 Builtin builtin, const char* debug_name,
2521 const ProfileDataFromFile* profile_data) {
2522 DCHECK_EQ(builtin, turboshaft_data->info()->builtin());
2523 Isolate* isolate = turboshaft_data->isolate();
2524
2525#if V8_ENABLE_EXPERIMENTAL_TSA_BUILTINS
2526// TODO(nicohartmann): Use during development and remove afterwards.
2527#ifdef DEBUG
2528 std::cout << "=== Generating Builtin '" << debug_name
2529 << "' with Turboshaft ===" << std::endl;
2530#endif
2531
2532#endif
2533
2534 // Initialize JumpOptimizationInfo if required.
2535 JumpOptimizationInfo jump_opt;
2536 bool should_optimize_jumps =
2537 isolate->serializer_enabled() && v8_flags.turbo_rewrite_far_jumps &&
2538 !v8_flags.turbo_profiling && !v8_flags.dump_builtins_hashes_to_file;
2539 JumpOptimizationInfo* jump_optimization_info =
2540 should_optimize_jumps ? &jump_opt : nullptr;
2541
2542 PipelineJobScope scope(turboshaft_data,
2543 isolate->counters()->runtime_call_stats());
2544 RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode);
2545
2546 std::unique_ptr<TurbofanPipelineStatistics> pipeline_statistics(
2547 CreatePipelineStatistics(Handle<Script>::null(), turboshaft_data->info(),
2548 isolate, turboshaft_data->zone_stats()));
2549
2550 turboshaft::BuiltinPipeline turboshaft_pipeline(turboshaft_data);
2551 OptimizedCompilationInfo* info = turboshaft_data->info();
2552 if (info->trace_turbo_graph() || info->trace_turbo_json()) {
2554 turboshaft_data->zone_stats(), turboshaft::kTempZoneName);
2555 std::vector<char> name_buffer(strlen("TSA: ") + strlen(debug_name) + 1);
2556 memcpy(name_buffer.data(), "TSA: ", 5);
2557 memcpy(name_buffer.data() + 5, debug_name, strlen(debug_name));
2558 turboshaft_pipeline.PrintGraph(print_zone, name_buffer.data());
2559 }
2560
2561 // Validate pgo profile.
2562 const int initial_graph_hash =
2563 ComputeInitialGraphHash(builtin, profile_data, &turboshaft_data->graph());
2564 profile_data =
2565 ValidateProfileData(profile_data, initial_graph_hash, debug_name);
2566
2567 turboshaft_pipeline.OptimizeBuiltin();
2568 Linkage linkage(call_descriptor);
2569 CHECK(turboshaft_pipeline.GenerateCode(&linkage, {}, jump_optimization_info,
2570 profile_data, initial_graph_hash));
2571 return turboshaft_pipeline.FinalizeCode();
2572}
2573
2574#if V8_ENABLE_WEBASSEMBLY
2575
2576namespace {
2577
2578wasm::WasmCompilationResult WrapperCompilationResult(
2579 CodeGenerator* code_generator, CallDescriptor* call_descriptor,
2580 CodeKind kind) {
2582 code_generator->masm()->GetCode(
2583 nullptr, &result.code_desc, code_generator->safepoint_table_builder(),
2584 static_cast<int>(code_generator->handler_table_offset()));
2585 result.instr_buffer = code_generator->masm()->ReleaseBuffer();
2586 result.source_positions = code_generator->GetSourcePositionTable();
2587 result.protected_instructions_data =
2588 code_generator->GetProtectedInstructionsData();
2589 result.frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
2590 result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
2592 if (kind == CodeKind::WASM_TO_JS_FUNCTION) {
2594 }
2595 return result;
2596}
2597
2598void TraceFinishWrapperCompilation(OptimizedCompilationInfo& info,
2599 CodeTracer* code_tracer,
2600 const wasm::WasmCompilationResult& result,
2601 CodeGenerator* code_generator) {
2602 if (info.trace_turbo_json()) {
2603 TurboJsonFile json_of(&info, std::ios_base::app);
2604 json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
2605 << BlockStartsAsJSON{&code_generator->block_starts()}
2606 << "\"data\":\"";
2607#ifdef ENABLE_DISASSEMBLER
2608 std::stringstream disassembler_stream;
2610 nullptr, disassembler_stream, result.code_desc.buffer,
2611 result.code_desc.buffer + result.code_desc.safepoint_table_offset,
2612 CodeReference(&result.code_desc));
2613 for (auto const c : disassembler_stream.str()) {
2614 json_of << AsEscapedUC16ForJSON(c);
2615 }
2616#endif // ENABLE_DISASSEMBLER
2617 json_of << "\"}\n]";
2618 json_of << "\n}";
2619 }
2620
2621 if (info.trace_turbo_json() || info.trace_turbo_graph()) {
2622 CodeTracer::StreamScope tracing_scope(code_tracer);
2623 tracing_scope.stream()
2624 << "---------------------------------------------------\n"
2625 << "Finished compiling method " << info.GetDebugName().get()
2626 << " using TurboFan" << std::endl;
2627 }
2628}
2629
2630} // namespace
2631
2632// static
2633wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
2634 CallDescriptor* call_descriptor, MachineGraph* mcgraph, CodeKind kind,
2635 const char* debug_name, const AssemblerOptions& options,
2636 SourcePositionTable* source_positions) {
2637 TFGraph* graph = mcgraph->graph();
2638 OptimizedCompilationInfo info(base::CStrVector(debug_name), graph->zone(),
2639 kind);
2640 // Construct a pipeline for scheduling and code generation.
2641 wasm::WasmEngine* wasm_engine = wasm::GetWasmEngine();
2642 ZoneStats zone_stats(wasm_engine->allocator());
2643 NodeOriginTable* node_positions = graph->zone()->New<NodeOriginTable>(graph);
2644 TFPipelineData data(&zone_stats, wasm_engine, &info, mcgraph, nullptr,
2645 source_positions, node_positions, options);
2646 std::unique_ptr<TurbofanPipelineStatistics> pipeline_statistics;
2647 if (v8_flags.turbo_stats || v8_flags.turbo_stats_nvp) {
2648 pipeline_statistics.reset(new TurbofanPipelineStatistics(
2649 &info, wasm_engine->GetOrCreateTurboStatistics(), &zone_stats));
2650 pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
2651 }
2652 TraceWrapperCompilation("TurboFan", &info, &data);
2653
2654 PipelineImpl pipeline(&data);
2655 pipeline.RunPrintAndVerify("V8.WasmNativeStubMachineCode", true);
2656
2657 CHECK(pipeline.Run<MemoryOptimizationPhase>());
2658 pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
2659
2660 CHECK(pipeline.ComputeScheduledGraph());
2661
2662 Linkage linkage(call_descriptor);
2663
2664 turboshaft::PipelineData turboshaft_data(
2665 &zone_stats, turboshaft::TurboshaftPipelineKind::kWasm, nullptr, &info,
2666 options);
2667 turboshaft::Pipeline turboshaft_pipeline(&turboshaft_data);
2668
2669 CHECK(turboshaft_pipeline.CreateGraphFromTurbofan(&data, &linkage));
2670 // We need to run simplification to normalize some patterns for instruction
2671 // selection (e.g. loads and stores).
2672 CHECK(turboshaft_pipeline.RunSimplificationAndNormalizationPhase());
2673
2674 CHECK(GenerateCodeFromTurboshaftGraph(&linkage, turboshaft_pipeline,
2675 &pipeline, data.osr_helper_ptr()));
2676
2677 auto result = WrapperCompilationResult(turboshaft_data.code_generator(),
2678 call_descriptor, kind);
2679 DCHECK(result.succeeded());
2680 CodeTracer* code_tracer = nullptr;
2681 if (info.trace_turbo_json() || info.trace_turbo_graph()) {
2682 code_tracer = data.GetCodeTracer();
2683 }
2684 TraceFinishWrapperCompilation(info, code_tracer, result,
2685 pipeline.code_generator());
2686 return result;
2687}
2688
2689// static
2690wasm::WasmCompilationResult
2691Pipeline::GenerateCodeForWasmNativeStubFromTurboshaft(
2692 const wasm::CanonicalSig* sig, wasm::WrapperCompilationInfo wrapper_info,
2693 const char* debug_name, const AssemblerOptions& options,
2694 SourcePositionTable* source_positions) {
2695 wasm::WasmEngine* wasm_engine = wasm::GetWasmEngine();
2696 Zone zone(wasm_engine->allocator(), ZONE_NAME, kCompressGraphZone);
2697 WasmCallKind call_kind =
2698 wrapper_info.code_kind == CodeKind::WASM_TO_JS_FUNCTION
2701 CallDescriptor* call_descriptor =
2702 GetWasmCallDescriptor(&zone, sig, call_kind);
2703 if (!Is64()) {
2704 call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor);
2705 }
2706 Linkage linkage(call_descriptor);
2707 OptimizedCompilationInfo info(base::CStrVector(debug_name), &zone,
2708 wrapper_info.code_kind);
2709 ZoneStats zone_stats(wasm_engine->allocator());
2710 TFPipelineData data(&zone_stats, &info, nullptr,
2711 wasm::GetWasmEngine()->allocator(), nullptr, nullptr,
2712 nullptr, nullptr, nullptr, nullptr, options, nullptr);
2713 std::unique_ptr<TurbofanPipelineStatistics> pipeline_statistics;
2714 if (v8_flags.turbo_stats || v8_flags.turbo_stats_nvp) {
2715 pipeline_statistics.reset(new TurbofanPipelineStatistics(
2716 &info, wasm_engine->GetOrCreateTurboStatistics(), &zone_stats));
2717 pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
2718 }
2719 TraceWrapperCompilation("Turboshaft", &info, &data);
2720
2721 PipelineImpl pipeline(&data);
2722
2723 {
2724 turboshaft::PipelineData turboshaft_data(
2725 &zone_stats, turboshaft::TurboshaftPipelineKind::kWasm, nullptr, &info,
2726 options);
2727 turboshaft_data.SetIsWasmWrapper(sig);
2728 AccountingAllocator allocator;
2729 turboshaft_data.InitializeGraphComponent(source_positions);
2730 BuildWasmWrapper(&turboshaft_data, &allocator, turboshaft_data.graph(), sig,
2731 wrapper_info);
2732 CodeTracer* code_tracer = nullptr;
2733 if (info.trace_turbo_json() || info.trace_turbo_graph()) {
2734 // NOTE: We must not call `GetCodeTracer` if tracing is not enabled,
2735 // because it may not yet be initialized then and doing so from the
2736 // background thread is not threadsafe.
2737 code_tracer = data.GetCodeTracer();
2738 }
2739 Zone printing_zone(&allocator, ZONE_NAME);
2740 turboshaft::PrintTurboshaftGraph(&turboshaft_data, &printing_zone,
2741 code_tracer, "Graph generation");
2742
2743 // Skip the LoopUnrolling, WasmGCOptimize and WasmLowering phases for
2744 // wrappers.
2745 // TODO(14108): Do we need value numbering if wasm_opt is turned off?
2746 turboshaft::Pipeline turboshaft_pipeline(&turboshaft_data);
2747 if (v8_flags.wasm_opt) {
2748 CHECK(turboshaft_pipeline.Run<turboshaft::WasmOptimizePhase>());
2749 }
2750#if DEBUG
2751 if (!v8_flags.wasm_opt) {
2752 // We still need to lower allocation operations even with optimizations
2753 // being turned off.
2754 CHECK(
2755 turboshaft_pipeline.Run<turboshaft::WasmDebugMemoryLoweringPhase>());
2756 }
2757#endif
2758
2759 if (!Is64()) {
2760 CHECK(turboshaft_pipeline.Run<turboshaft::Int64LoweringPhase>());
2761 }
2762
2763 CHECK(turboshaft_pipeline.Run<turboshaft::WasmDeadCodeEliminationPhase>());
2764
2765 if (V8_UNLIKELY(v8_flags.turboshaft_enable_debug_features)) {
2766 // This phase has to run very late to allow all previous phases to use
2767 // debug features.
2768 CHECK(turboshaft_pipeline.Run<turboshaft::DebugFeatureLoweringPhase>());
2769 }
2770
2771 data.BeginPhaseKind("V8.InstructionSelection");
2772
2773 const bool success = GenerateCodeFromTurboshaftGraph(
2774 &linkage, turboshaft_pipeline, &pipeline, data.osr_helper_ptr());
2775 CHECK(success);
2776
2777 auto result =
2778 WrapperCompilationResult(turboshaft_data.code_generator(),
2779 call_descriptor, wrapper_info.code_kind);
2780 DCHECK(result.succeeded());
2781
2782 TraceFinishWrapperCompilation(info, code_tracer, result,
2783 turboshaft_data.code_generator());
2784 return result;
2785 }
2786}
2787
2788namespace {
2789
2790base::OwnedVector<uint8_t> SerializeInliningPositions(
2791 const ZoneVector<WasmInliningPosition>& positions) {
2792 const size_t entry_size = sizeof positions[0].inlinee_func_index +
2793 sizeof positions[0].was_tail_call +
2794 sizeof positions[0].caller_pos;
2795 auto result = base::OwnedVector<uint8_t>::New(positions.size() * entry_size);
2796 uint8_t* iter = result.begin();
2797 for (const auto& [func_index, was_tail_call, caller_pos] : positions) {
2798 size_t index_size = sizeof func_index;
2799 std::memcpy(iter, &func_index, index_size);
2800 iter += index_size;
2801 size_t was_tail_call_size = sizeof was_tail_call;
2802 std::memcpy(iter, &was_tail_call, was_tail_call_size);
2803 iter += was_tail_call_size;
2804 size_t pos_size = sizeof caller_pos;
2805 std::memcpy(iter, &caller_pos, pos_size);
2806 iter += pos_size;
2807 }
2808 DCHECK_EQ(iter, result.end());
2809 return result;
2810}
2811
2812} // namespace
2813
2814// static
2815wasm::WasmCompilationResult Pipeline::GenerateWasmCode(
2816 wasm::CompilationEnv* env, WasmCompilationData& compilation_data,
2817 wasm::WasmDetectedFeatures* detected, Counters* counters) {
2818 auto* wasm_engine = wasm::GetWasmEngine();
2819 const wasm::WasmModule* module = env->module;
2820 base::TimeTicks start_time;
2821 if (V8_UNLIKELY(v8_flags.trace_wasm_compilation_times)) {
2822 start_time = base::TimeTicks::Now();
2823 }
2824 ZoneStats zone_stats(wasm_engine->allocator());
2825
2826 Zone graph_zone{wasm_engine->allocator(), ZONE_NAME, kCompressGraphZone};
2827 OptimizedCompilationInfo info(
2828 GetDebugName(&graph_zone, env->module,
2829 compilation_data.wire_bytes_storage,
2830 compilation_data.func_index),
2831 &graph_zone, CodeKind::WASM_FUNCTION);
2832
2833 if (info.trace_turbo_json()) {
2834 TurboCfgFile tcf;
2835 tcf << AsC1VCompilation(&info);
2836 }
2837
2838 // TODO(nicohartmann): We should not allocate TurboFan graph(s) here but
2839 // instead use only Turboshaft.
2840 compiler::MachineGraph* mcgraph = graph_zone.New<compiler::MachineGraph>(
2841 graph_zone.New<compiler::TFGraph>(&graph_zone),
2842 graph_zone.New<CommonOperatorBuilder>(&graph_zone),
2843 graph_zone.New<MachineOperatorBuilder>(
2847 if (info.trace_turbo_json()) {
2848 compilation_data.node_origins =
2849 graph_zone.New<NodeOriginTable>(mcgraph->graph());
2850 }
2851
2852 compilation_data.source_positions =
2853 mcgraph->zone()->New<SourcePositionTable>(mcgraph->graph());
2854 auto call_descriptor =
2855 GetWasmCallDescriptor(&graph_zone, compilation_data.func_body.sig);
2856
2857 std::unique_ptr<TurbofanPipelineStatistics> pipeline_statistics(
2858 CreatePipelineStatistics(compilation_data, module, &info, &zone_stats));
2859 AssemblerOptions options = WasmAssemblerOptions();
2860 TFPipelineData data(&zone_stats, wasm_engine, &info, mcgraph,
2861 pipeline_statistics.get(),
2862 compilation_data.source_positions,
2863 compilation_data.node_origins, options);
2864
2865 PipelineImpl pipeline(&data);
2866
2867 if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
2868 CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
2869 tracing_scope.stream()
2870 << "---------------------------------------------------\n"
2871 << "Begin compiling method " << data.info()->GetDebugName().get()
2872 << " using Turboshaft" << std::endl;
2873 }
2874
2875 if (mcgraph->machine()->Is32()) {
2876 call_descriptor =
2877 GetI32WasmCallDescriptor(mcgraph->zone(), call_descriptor);
2878 }
2879 Linkage linkage(call_descriptor);
2880
2881 Zone inlining_positions_zone(wasm_engine->allocator(), ZONE_NAME);
2882 ZoneVector<WasmInliningPosition> inlining_positions(&inlining_positions_zone);
2883
2884 turboshaft::PipelineData turboshaft_data(
2885 &zone_stats, turboshaft::TurboshaftPipelineKind::kWasm, nullptr, &info,
2886 options);
2887 turboshaft_data.set_pipeline_statistics(pipeline_statistics.get());
2888 const wasm::FunctionSig* sig = compilation_data.func_body.sig;
2889 turboshaft_data.SetIsWasmFunction(env->module, sig,
2890 compilation_data.func_body.is_shared);
2891 DCHECK_NOT_NULL(turboshaft_data.wasm_module());
2892
2893 // TODO(nicohartmann): This only works here because source positions are not
2894 // actually allocated inside the graph zone of TFPipelineData. We should
2895 // properly allocate source positions inside Turboshaft's graph zone right
2896 // from the beginning.
2897 turboshaft_data.InitializeGraphComponent(data.source_positions());
2898
2899 AccountingAllocator allocator;
2900 wasm::BuildTSGraph(&turboshaft_data, &allocator, env, detected,
2901 turboshaft_data.graph(), compilation_data.func_body,
2902 compilation_data.wire_bytes_storage,
2903 &compilation_data.assumptions, &inlining_positions,
2904 compilation_data.func_index);
2905 CodeTracer* code_tracer = nullptr;
2906 if (turboshaft_data.info()->trace_turbo_graph()) {
2907 // NOTE: We must not call `GetCodeTracer` if tracing is not enabled,
2908 // because it may not yet be initialized then and doing so from the
2909 // background thread is not threadsafe.
2910 code_tracer = data.GetCodeTracer();
2911 }
2912 Zone printing_zone(&allocator, ZONE_NAME);
2913 turboshaft::PrintTurboshaftGraph(&turboshaft_data, &printing_zone,
2914 code_tracer, "Graph generation");
2915
2916 data.BeginPhaseKind("V8.WasmOptimization");
2917 turboshaft::Pipeline turboshaft_pipeline(&turboshaft_data);
2918
2919#if defined(V8_ENABLE_WASM_SIMD256_REVEC) && defined(V8_TARGET_ARCH_X64)
2920 if (v8_flags.experimental_wasm_revectorize) {
2921 bool cpu_feature_support =
2923 if (cpu_feature_support && detected->has_simd()) {
2924 if (v8_flags.trace_wasm_revectorize) {
2925 std::cout << "Begin revec function "
2926 << data.info()->GetDebugName().get() << std::endl;
2927 }
2928 CHECK(turboshaft_pipeline.Run<turboshaft::WasmRevecPhase>());
2929 if (v8_flags.trace_wasm_revectorize) {
2930 std::cout << "Finished revec function "
2931 << data.info()->GetDebugName().get() << std::endl;
2932 }
2933 }
2934 }
2935#endif // V8_ENABLE_WASM_SIMD256_REVEC
2936
2937 const bool uses_wasm_gc_features =
2938 detected->has_gc() || detected->has_typed_funcref() ||
2939 detected->has_stringref() || detected->has_imported_strings() ||
2940 detected->has_imported_strings_utf8();
2941 if (v8_flags.wasm_loop_peeling && uses_wasm_gc_features) {
2942 CHECK(turboshaft_pipeline.Run<turboshaft::LoopPeelingPhase>());
2943 }
2944
2945 if (v8_flags.wasm_loop_unrolling) {
2946 // TODO(384870251): Note that if we don't run this, subsequent analyses and
2947 // optimizations (DCE, decompression optimization) can run much slower.
2948 CHECK(turboshaft_pipeline.Run<turboshaft::LoopUnrollingPhase>());
2949 }
2950
2951 if (v8_flags.wasm_opt && uses_wasm_gc_features) {
2952 CHECK(turboshaft_pipeline.Run<turboshaft::WasmGCOptimizePhase>());
2953 }
2954
2955 // TODO(mliedtke): This phase could be merged with the WasmGCOptimizePhase
2956 // if wasm_opt is enabled to improve compile time. Consider potential code
2957 // size increase.
2958 // TODO(384870251,dmercardier): Run a CopyingPhase with LoopFinder to
2959 // improve block ordering, independent of loop unrolling.
2960 CHECK(turboshaft_pipeline.Run<turboshaft::WasmLoweringPhase>());
2961
2962 // TODO(14108): Do we need value numbering if wasm_opt is turned off?
2963 const bool is_asm_js = is_asmjs_module(module);
2964 if (v8_flags.wasm_opt || is_asm_js) {
2965 CHECK(turboshaft_pipeline.Run<turboshaft::WasmOptimizePhase>());
2966 }
2967
2968#if V8_TARGET_ARCH_ARM64
2969 if (v8_flags.experimental_wasm_simd_opt && v8_flags.wasm_opt &&
2970 detected->has_simd()) {
2971 CHECK(turboshaft_pipeline.Run<turboshaft::WasmSimdPhase>());
2972 }
2973#endif // V8_TARGET_ARCH_ARM64
2974
2975#if DEBUG
2976 if (!v8_flags.wasm_opt) {
2977 // We still need to lower allocation operations even with optimizations
2978 // being turned off.
2979 CHECK(turboshaft_pipeline.Run<turboshaft::WasmDebugMemoryLoweringPhase>());
2980 }
2981#endif
2982
2983 if (mcgraph->machine()->Is32()) {
2984 CHECK(turboshaft_pipeline.Run<turboshaft::Int64LoweringPhase>());
2985 }
2986
2987 CHECK(turboshaft_pipeline.Run<turboshaft::WasmDeadCodeEliminationPhase>());
2988
2989 if (V8_UNLIKELY(v8_flags.turboshaft_enable_debug_features)) {
2990 // This phase has to run very late to allow all previous phases to use
2991 // debug features.
2992 CHECK(turboshaft_pipeline.Run<turboshaft::DebugFeatureLoweringPhase>());
2993 }
2994
2995 data.BeginPhaseKind("V8.InstructionSelection");
2996
2997 // Instruction selection for JavaScript may fail, but for Wasm we should
2998 // always succeed (e.g., by enforcing limits in earlier phases).
2999 CHECK(GenerateCodeFromTurboshaftGraph(&linkage, turboshaft_pipeline,
3000 &pipeline, data.osr_helper_ptr()));
3001
3002 CodeGenerator* code_generator = turboshaft_data.code_generator();
3003
3004 wasm::WasmCompilationResult result;
3005 code_generator->masm()->GetCode(
3006 nullptr, &result.code_desc, code_generator->safepoint_table_builder(),
3007 static_cast<int>(code_generator->handler_table_offset()));
3008
3009 result.instr_buffer = code_generator->masm()->ReleaseBuffer();
3010 result.frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
3011 result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
3012 result.source_positions = code_generator->GetSourcePositionTable();
3013 result.inlining_positions = SerializeInliningPositions(inlining_positions);
3014 result.protected_instructions_data =
3015 code_generator->GetProtectedInstructionsData();
3016 result.deopt_data = code_generator->GenerateWasmDeoptimizationData();
3018
3019 if (data.info()->trace_turbo_json()) {
3020 TurboJsonFile json_of(data.info(), std::ios_base::app);
3021 json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
3022 << BlockStartsAsJSON{&code_generator->block_starts()}
3023 << "\"data\":\"";
3024#ifdef ENABLE_DISASSEMBLER
3025 std::stringstream disassembler_stream;
3027 nullptr, disassembler_stream, result.code_desc.buffer,
3028 result.code_desc.buffer + result.code_desc.safepoint_table_offset,
3029 CodeReference(&result.code_desc));
3030 for (auto const c : disassembler_stream.str()) {
3031 json_of << AsEscapedUC16ForJSON(c);
3032 }
3033#endif // ENABLE_DISASSEMBLER
3034 json_of << "\"}\n],\n";
3035 JsonPrintAllSourceWithPositionsWasm(json_of, module,
3036 compilation_data.wire_bytes_storage,
3037 base::VectorOf(inlining_positions));
3038 json_of << "}";
3039 json_of << "\n}";
3040 }
3041
3042 if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
3043 CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
3044 tracing_scope.stream()
3045 << "---------------------------------------------------\n"
3046 << "Finished compiling method " << data.info()->GetDebugName().get()
3047 << " using Turboshaft" << std::endl;
3048 }
3049
3050 if (V8_UNLIKELY(v8_flags.trace_wasm_compilation_times)) {
3051 base::TimeDelta time = base::TimeTicks::Now() - start_time;
3052 int codesize = result.code_desc.body_size();
3053 StdoutStream{} << "Compiled function "
3054 << reinterpret_cast<const void*>(module) << "#"
3055 << compilation_data.func_index << " using TurboFan, took "
3056 << time.InMilliseconds() << " ms and "
3057 << zone_stats.GetMaxAllocatedBytes() << " / "
3058 << zone_stats.GetTotalAllocatedBytes()
3059 << " max/total bytes; bodysize "
3060 << compilation_data.body_size() << " codesize " << codesize
3061 << " name " << data.info()->GetDebugName().get()
3062 << std::endl;
3063 }
3064
3065 if (counters && compilation_data.body_size() >= 100 * KB) {
3066 size_t zone_bytes = zone_stats.GetMaxAllocatedBytes();
3067 counters->wasm_compile_huge_function_peak_memory_bytes()->AddSample(
3068 static_cast<int>(std::min(size_t{kMaxInt}, zone_bytes)));
3069 }
3070
3071 // Add a "0 deopts" sample for the first tier-up of a function that contains
3072 // any deopt data. This indicates a baseline of how many functions can
3073 // potentially deopt, so that the statistics of having x functions that
3074 // deopted at least once becomes more meaningful.
3075 if (counters && !result.deopt_data.empty()) {
3076 DCHECK(v8_flags.wasm_deopt);
3077 bool is_first_tierup = false;
3078 {
3079 const wasm::TypeFeedbackStorage& feedback = module->type_feedback;
3080 base::MutexGuard mutex_guard(&feedback.mutex);
3081 is_first_tierup = !feedback.deopt_count_for_function.contains(
3082 compilation_data.func_index);
3083 }
3084 if (is_first_tierup) {
3085 counters->wasm_deopts_per_function()->AddSample(0);
3086 }
3087 }
3088
3089 DCHECK(result.succeeded());
3090 return result;
3091}
3092#endif // V8_ENABLE_WEBASSEMBLY
3093
3094// static
3096 OptimizedCompilationInfo* info, Isolate* isolate) {
3097 ZoneStats zone_stats(isolate->allocator());
3098 std::unique_ptr<TurbofanPipelineStatistics> pipeline_statistics(
3099 CreatePipelineStatistics(Handle<Script>::null(), info, isolate,
3100 &zone_stats));
3101
3102 TFPipelineData data(&zone_stats, isolate, info, pipeline_statistics.get());
3103 turboshaft::PipelineData turboshaft_data(
3104 &zone_stats, turboshaft::TurboshaftPipelineKind::kJS, isolate, info,
3105 AssemblerOptions::Default(isolate));
3106 turboshaft_data.set_pipeline_statistics(pipeline_statistics.get());
3107 PipelineJobScope scope(&data, isolate->counters()->runtime_call_stats());
3108 PipelineImpl pipeline(&data);
3109 turboshaft::Pipeline turboshaft_pipeline(&turboshaft_data);
3110
3111 Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
3112
3113 {
3114 CompilationHandleScope compilation_scope(isolate, info);
3115 info->ReopenAndCanonicalizeHandlesInNewScope(isolate);
3116 if (!pipeline.InitializeHeapBroker()) return {};
3117 }
3118
3119 {
3120 LocalIsolateScope local_isolate_scope(data.broker(), info,
3121 isolate->main_thread_local_isolate());
3122 turboshaft_data.InitializeBrokerAndDependencies(data.broker_ptr(),
3123 data.dependencies());
3124
3125 if (V8_UNLIKELY(v8_flags.turbolev)) {
3126 if (!turboshaft_pipeline.CreateGraphWithMaglev(&linkage)) return {};
3127 } else {
3128 if (!pipeline.CreateGraph(&linkage)) return {};
3129 // We selectively Unpark inside OptimizeTurbofanGraph.
3130 if (!pipeline.OptimizeTurbofanGraph(&linkage)) return {};
3131
3132 // We convert the turbofan graph to turboshaft.
3133 if (!turboshaft_pipeline.CreateGraphFromTurbofan(&data, &linkage)) {
3134 data.EndPhaseKind();
3135 return {};
3136 }
3137 }
3138
3139 if (!turboshaft_pipeline.OptimizeTurboshaftGraph(&linkage)) {
3140 return {};
3141 }
3142
3143 const bool success = GenerateCodeFromTurboshaftGraph(
3144 &linkage, turboshaft_pipeline, &pipeline, data.osr_helper_ptr());
3145 if (!success) return {};
3146
3148 if (turboshaft_pipeline.FinalizeCode().ToHandle(&code) &&
3149 turboshaft_pipeline.CommitDependencies(code)) {
3150 return code;
3151 }
3152 return {};
3153 }
3154}
3155
3156// static
3158 OptimizedCompilationInfo* info, Isolate* isolate,
3159 CallDescriptor* call_descriptor, TFGraph* graph,
3160 const AssemblerOptions& opts, Schedule* schedule) {
3161 // TODO(nicohartmann): Callers should properly set this, but it's hard to do
3162 // this through testing logic shared between JS and Wasm.
3163 AssemblerOptions options = opts;
3164#if V8_ENABLE_WEBASSEMBLY
3165 if (info->IsWasm() || info->IsWasmBuiltin()) {
3166 options.is_wasm = true;
3167 }
3168#endif
3169 // Construct a pipeline for scheduling and code generation.
3170 ZoneStats zone_stats(isolate->allocator());
3171 NodeOriginTable* node_positions = info->zone()->New<NodeOriginTable>(graph);
3172 TFPipelineData data(&zone_stats, info, isolate, isolate->allocator(), graph,
3173 nullptr, schedule, nullptr, node_positions, nullptr,
3174 options, nullptr);
3175 turboshaft::PipelineData turboshaft_data(
3176 &zone_stats, turboshaft::TurboshaftPipelineKind::kCSA, isolate, info,
3177 options);
3178 PipelineJobScope scope(&data, isolate->counters()->runtime_call_stats());
3179 std::unique_ptr<TurbofanPipelineStatistics> pipeline_statistics;
3180 if (v8_flags.turbo_stats || v8_flags.turbo_stats_nvp) {
3181 pipeline_statistics.reset(new TurbofanPipelineStatistics(
3182 info, isolate->GetTurboStatistics(), &zone_stats));
3183 pipeline_statistics->BeginPhaseKind("V8.TFTestCodegen");
3184 }
3185
3186 PipelineImpl pipeline(&data);
3187 turboshaft::Pipeline turboshaft_pipeline(&turboshaft_data);
3188
3189 if (info->trace_turbo_json()) {
3190 TurboJsonFile json_of(info, std::ios_base::trunc);
3191 json_of << "{\"function\":\"" << info->GetDebugName().get()
3192 << "\", \"source\":\"\",\n\"phases\":[";
3193 }
3194 // TODO(rossberg): Should this really be untyped?
3195 pipeline.RunPrintAndVerify("V8.TFMachineCode", true);
3196
3197 // Ensure we have a schedule.
3198 if (data.schedule() == nullptr) {
3199 CHECK(pipeline.ComputeScheduledGraph());
3200 }
3201
3202 Linkage linkage(call_descriptor);
3203
3204 // We convert the turbofan graph to turboshaft.
3205 if (!turboshaft_pipeline.CreateGraphFromTurbofan(&data, &linkage)) {
3206 return {};
3207 }
3208
3209 // We need to run simplification to normalize some patterns for instruction
3210 // selection (e.g. loads and stores).
3211 if (!turboshaft_pipeline.RunSimplificationAndNormalizationPhase()) {
3212 return {};
3213 }
3214
3215 if (!GenerateCodeFromTurboshaftGraph(&linkage, turboshaft_pipeline, nullptr,
3216 data.osr_helper_ptr())) {
3217 return {};
3218 }
3219
3220 MaybeHandle<Code> maybe_code = turboshaft_pipeline.FinalizeCode();
3222 if (maybe_code.ToHandle(&code)) {
3223 return code;
3224 }
3225 return {};
3226}
3227
3228// static
3230 CallDescriptor* call_descriptor, turboshaft::PipelineData* data) {
3231 Isolate* isolate = data->isolate();
3232 OptimizedCompilationInfo* info = data->info();
3233 PipelineJobScope scope(data, isolate->counters()->runtime_call_stats());
3234 std::unique_ptr<TurbofanPipelineStatistics> pipeline_statistics;
3235 if (v8_flags.turbo_stats || v8_flags.turbo_stats_nvp) {
3236 pipeline_statistics.reset(new TurbofanPipelineStatistics(
3237 info, isolate->GetTurboStatistics(), data->zone_stats()));
3238 pipeline_statistics->BeginPhaseKind("V8.TFTestCodegen");
3239 }
3240
3241 turboshaft::Pipeline pipeline(data);
3242
3243 if (info->trace_turbo_json()) {
3244 {
3245 TurboJsonFile json_of(info, std::ios_base::trunc);
3246 json_of << "{\"function\":\"" << info->GetDebugName().get()
3247 << "\", \"source\":\"\",\n\"phases\":[";
3248 }
3249 {
3250 UnparkedScopeIfNeeded unparked_scope(data->broker());
3251 AllowHandleDereference allow_deref;
3252
3253 TurboJsonFile json_of(data->info(), std::ios_base::app);
3255 json_of, data->graph(), "V8.TSMachineCode", data->node_origins(),
3256 data->graph_zone());
3257 }
3258 }
3259
3260 info->tick_counter().TickAndMaybeEnterSafepoint();
3261
3262 data->InitializeCodegenComponent(nullptr);
3263
3265 if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
3266 pipeline.CommitDependencies(code)) {
3267 return code;
3268 }
3269 return {};
3270}
3271
3272// static
3273std::unique_ptr<TurbofanCompilationJob> Pipeline::NewCompilationJob(
3274 Isolate* isolate, Handle<JSFunction> function, CodeKind code_kind,
3275 bool has_script, BytecodeOffset osr_offset) {
3276 Handle<SharedFunctionInfo> shared(function->shared(), isolate);
3277 return std::make_unique<PipelineCompilationJob>(isolate, shared, function,
3278 osr_offset, code_kind);
3279}
3280
3282 TFPipelineData* data = this->data_;
3283
3284 // We should only schedule the graph if it is not scheduled yet.
3285 DCHECK_NULL(data->schedule());
3286
3288 TraceScheduleAndVerify(data->info(), data, data->schedule(), "schedule");
3289 return true;
3290}
3291
3292#if V8_ENABLE_WASM_SIMD256_REVEC
3293bool PipelineImpl::Revectorize() { return Run<RevectorizePhase>(); }
3294#endif // V8_ENABLE_WASM_SIMD256_REVEC
3295
3297
3299
3301 return data_->code_generator();
3302}
3303
3305 return data_->observe_node_manager();
3306}
3307
3308std::ostream& operator<<(std::ostream& out, const InstructionRangesAsJSON& s) {
3309 const int max = static_cast<int>(s.sequence->LastInstructionIndex());
3310
3311 out << ", \"nodeIdToInstructionRange\": {";
3312 bool need_comma = false;
3313 for (size_t i = 0; i < s.instr_origins->size(); ++i) {
3314 std::pair<int, int> offset = (*s.instr_origins)[i];
3315 if (offset.first == -1) continue;
3316 const int first = max - offset.first + 1;
3317 const int second = max - offset.second + 1;
3318 if (need_comma) out << ", ";
3319 out << "\"" << i << "\": [" << first << ", " << second << "]";
3320 need_comma = true;
3321 }
3322 out << "}";
3323 out << ", \"blockIdToInstructionRange\": {";
3324 need_comma = false;
3325 for (auto block : s.sequence->instruction_blocks()) {
3326 if (need_comma) out << ", ";
3327 out << "\"" << block->rpo_number() << "\": [" << block->code_start() << ", "
3328 << block->code_end() << "]";
3329 need_comma = true;
3330 }
3331 out << "}";
3332 return out;
3333}
3334
3335} // namespace compiler
3336} // namespace internal
3337} // namespace v8
Schedule * schedule
TFGraph * graph
friend Zone
Definition asm-types.cc:195
uint8_t data_[MAX_STACK_LENGTH]
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
Builtins::Kind kind
Definition builtins.cc:40
#define SBXCHECK_EQ(lhs, rhs)
Definition check.h:62
SourcePosition pos
static OwnedVector< T > New(size_t size)
Definition vector.h:287
static TimeTicks Now()
Definition time.cc:736
std::unique_ptr< AssemblerBuffer > ReleaseBuffer()
Definition assembler.h:399
void GetCode(LocalIsolate *isolate, CodeDesc *desc, SafepointTableBuilderBase *safepoint_table_builder, int handler_table_offset)
V8_INLINE void set_all_hash_matched(bool all_hash_matched)
static BuiltinsCallGraph * Get()
static V8_EXPORT_PRIVATE const char * name(Builtin builtin)
Definition builtins.cc:226
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE int Decode(Isolate *isolate, std::ostream &os, uint8_t *begin, uint8_t *end, CodeReference code={}, Address current_pc=kNullAddress, size_t range_limit=0)
Isolate * isolate() const
Definition factory.h:1281
HandleType< T > CloseAndEscape(HandleType< T > handle_value)
static constexpr MachineRepresentation PointerRepresentation()
V8_WARN_UNUSED_RESULT V8_INLINE bool ToHandle(Handle< S > *out) const
void RegisterWeakObjectsInOptimizedCode(Isolate *isolate, DirectHandle< NativeContext > context, DirectHandle< Code > code, GlobalHandleVector< Map > maps)
Definition compiler.cc:504
GlobalHandleVector< Map > CollectRetainedMaps(Isolate *isolate, DirectHandle< Code > code)
Definition compiler.cc:484
static constexpr bool IsEmbeddedObjectMode(Mode mode)
Definition reloc-info.h:209
static int EmbeddedObjectModeMask()
Definition reloc-info.h:378
static void EnsureSourcePositionsAvailable(Isolate *isolate, DirectHandle< SharedFunctionInfo > shared_info)
Status RetryOptimization(BailoutReason reason)
Definition compiler.cc:531
Status AbortOptimization(BailoutReason reason)
Definition compiler.cc:538
OptimizedCompilationInfo * compilation_info() const
Definition compiler.h:469
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
void push_back(const T &value)
T * New(Args &&... args)
Definition zone.h:114
Status PrepareJobImpl(Isolate *isolate) final
Definition pipeline.cc:2304
static V8_EXPORT_PRIVATE std::unique_ptr< CodeAssemblerCompilationJob > NewJobForTesting(Isolate *isolate, Builtin builtin, CodeAssemblerGenerator generator, CodeAssemblerInstaller installer, std::function< compiler::CallDescriptor *(Zone *)> get_call_descriptor, CodeKind code_kind, const char *name)
Definition pipeline.cc:2507
Status FinalizeJobImpl(Isolate *isolate) final
Definition pipeline.cc:2355
std::function< void(Builtin builtin, Handle< Code > code)> CodeAssemblerInstaller
CodeAssemblerCompilationJob(Isolate *isolate, Builtin builtin, CodeAssemblerGenerator generator, CodeAssemblerInstaller installer, const AssemblerOptions &assembler_options, std::function< compiler::CallDescriptor *(Zone *)> get_call_descriptor, CodeKind code_kind, const char *name, const ProfileDataFromFile *profile_data, int finalize_order)
Definition pipeline.cc:2272
std::unique_ptr< TurbofanPipelineStatistics > pipeline_statistics_
virtual Handle< Code > FinalizeCode(Isolate *isolate)=0
static bool ShouldOptimizeJumps(Isolate *isolate)
Definition pipeline.cc:2299
std::function< void(compiler::CodeAssemblerState *)> CodeAssemblerGenerator
virtual PipelineImpl * EmplacePipeline(Isolate *isolate)=0
PipelineImpl * EmplacePipeline(Isolate *isolate) final
Definition pipeline.cc:2403
Handle< Code > FinalizeCode(Isolate *isolate) final
Definition pipeline.cc:2437
std::optional< turboshaft::PipelineData > turboshaft_data_
Definition pipeline.cc:2397
Status ExecuteJobImpl(RuntimeCallStats *stats, LocalIsolate *local_isolate) final
Definition pipeline.cc:2414
SafepointTableBuilder * safepoint_table_builder()
base::OwnedVector< uint8_t > GetProtectedInstructionsData()
base::OwnedVector< uint8_t > GetSourcePositionTable()
static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements()
static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags()
FeedbackCellRef raw_feedback_cell(JSHeapBroker *broker) const
SharedFunctionInfoRef shared(JSHeapBroker *broker) const
static CallDescriptor * GetStubCallDescriptor(Zone *zone, const CallInterfaceDescriptor &descriptor, int stack_parameter_count, CallDescriptor::Flags flags, Operator::Properties properties=Operator::kNoProperties, StubCallMode stub_mode=StubCallMode::kCallCodeObject)
Definition linkage.cc:587
static CallDescriptor * GetJSCallDescriptor(Zone *zone, bool is_osr, int parameter_count, CallDescriptor::Flags flags, Operator::Properties properties=Operator::kNoProperties)
Definition linkage.cc:507
CallDescriptor * GetIncomingDescriptor() const
Definition linkage.h:405
static CallDescriptor * ComputeIncoming(Zone *zone, OptimizedCompilationInfo *info)
Definition linkage.cc:361
std::unique_ptr< TurbofanPipelineStatistics > pipeline_statistics_
Definition pipeline.cc:526
PipelineCompilationJob(Isolate *isolate, Handle< SharedFunctionInfo > shared_info, Handle< JSFunction > function, BytecodeOffset osr_offset, CodeKind code_kind)
Definition pipeline.cc:533
Status PrepareJobImpl(Isolate *isolate) final
Definition pipeline.cc:698
Status ExecuteJobImpl(RuntimeCallStats *stats, LocalIsolate *local_isolate) final
Definition pipeline.cc:764
Status FinalizeJobImpl(Isolate *isolate) final
Definition pipeline.cc:807
V8_WARN_UNUSED_RESULT bool AllocateRegisters(CallDescriptor *call_descriptor, bool has_dummy_end_block)
V8_WARN_UNUSED_RESULT bool InitializeHeapBroker()
Definition pipeline.cc:1931
ObserveNodeManager * observe_node_manager() const
Definition pipeline.cc:3304
bool OptimizeTurbofanGraph(Linkage *linkage)
Definition pipeline.cc:1998
PipelineImpl(TFPipelineData *data)
Definition pipeline.cc:160
TFPipelineData * data() const
Definition pipeline.cc:194
void RunPrintAndVerify(const char *phase, bool untyped=false)
Definition pipeline.cc:1922
V8_WARN_UNUSED_RESULT bool Run(Args &&... args)
V8_WARN_UNUSED_RESULT bool CreateGraph(Linkage *linkage)
Definition pipeline.cc:1964
CodeGenerator * code_generator() const
Definition pipeline.cc:3300
V8_WARN_UNUSED_RESULT bool ComputeScheduledGraph()
Definition pipeline.cc:3281
MaybeDirectHandle< Code > FinalizeCode(bool retire_broker=true)
bool CommitDependencies(Handle< Code > code)
OptimizedCompilationInfo * info() const
Definition pipeline.cc:3296
static std::unique_ptr< TurbofanCompilationJob > NewBytecodeHandlerCompilationJob(Isolate *isolate, Builtin builtin, CodeAssemblerGenerator generator, CodeAssemblerInstaller installer, const AssemblerOptions &assembler_options, const char *name, const ProfileDataFromFile *profile_data, int finalize_order)
Definition pipeline.cc:2487
static MaybeHandle< Code > GenerateCodeForTurboshaftBuiltin(turboshaft::PipelineData *turboshaft_data, CallDescriptor *call_descriptor, Builtin builtin, const char *debug_name, const ProfileDataFromFile *profile_data)
Definition pipeline.cc:2518
std::function< void(Builtin builtin, Handle< Code > code)> CodeAssemblerInstaller
Definition pipeline.h:77
static std::unique_ptr< TurbofanCompilationJob > NewCSLinkageCodeStubBuiltinCompilationJob(Isolate *isolate, Builtin builtin, CodeAssemblerGenerator generator, CodeAssemblerInstaller installer, const AssemblerOptions &assembler_options, CallDescriptors::Key interface_descriptor, const char *name, const ProfileDataFromFile *profile_data, int finalize_order)
Definition pipeline.cc:2448
static V8_EXPORT_PRIVATE MaybeHandle< Code > GenerateCodeForTesting(OptimizedCompilationInfo *info, Isolate *isolate)
Definition pipeline.cc:3095
static V8_EXPORT_PRIVATE MaybeHandle< Code > GenerateTurboshaftCodeForTesting(CallDescriptor *call_descriptor, turboshaft::PipelineData *data)
Definition pipeline.cc:3229
static V8_EXPORT_PRIVATE std::unique_ptr< TurbofanCompilationJob > NewCompilationJob(Isolate *isolate, Handle< JSFunction > function, CodeKind code_kind, bool has_script, BytecodeOffset osr_offset=BytecodeOffset::None())
Definition pipeline.cc:3273
std::function< void(compiler::CodeAssemblerState *)> CodeAssemblerGenerator
Definition pipeline.h:75
static std::unique_ptr< TurbofanCompilationJob > NewJSLinkageCodeStubBuiltinCompilationJob(Isolate *isolate, Builtin builtin, CodeAssemblerGenerator generator, CodeAssemblerInstaller installer, const AssemblerOptions &assembler_options, int argc, const char *name, const ProfileDataFromFile *profile_data, int finalize_order)
Definition pipeline.cc:2470
static void Run(Schedule *schedule)
Definition verifier.cc:2139
OptimizedCompilationInfo * info() const
void BeginPhaseKind(const char *phase_kind_name)
std::shared_ptr< OsrHelper > osr_helper_ptr() const
CompilationDependencies * dependencies() const
std::shared_ptr< JSHeapBroker > broker_ptr()
RuntimeCallStats * runtime_call_stats() const
static void Run(TFGraph *graph, Typing typing=TYPED, CheckInputs check_inputs=kAll, CodeType code_type=kDefault)
Definition verifier.cc:2050
void set_pipeline_statistics(TurbofanPipelineStatistics *pipeline_statistics)
Definition phase.h:403
RuntimeCallStats * runtime_call_stats() const
Definition phase.h:388
turboshaft::Graph & graph() const
Definition phase.h:355
void InitializeBrokerAndDependencies(std::shared_ptr< JSHeapBroker > broker, CompilationDependencies *dependencies)
Definition phase.h:206
OptimizedCompilationInfo * info() const
Definition phase.h:329
void InitializeGraphComponent(SourcePositionTable *source_positions)
Definition phase.h:224
V8_WARN_UNUSED_RESULT bool RunSimplificationAndNormalizationPhase()
Definition pipelines.h:225
bool CreateGraphFromTurbofan(compiler::TFPipelineData *turbofan_data, Linkage *linkage)
Definition pipelines.h:152
void PrintGraph(Zone *zone, const char *phase_name)
Definition pipelines.h:100
bool CommitDependencies(Handle< Code > code)
Definition pipelines.h:506
MaybeHandle< Code > GenerateCode(CallDescriptor *call_descriptor)
Definition pipelines.h:423
MaybeIndirectHandle< Code > FinalizeCode(bool retire_broker=true)
Definition pipelines.h:441
bool CreateGraphWithMaglev(Linkage *linkage)
Definition pipelines.h:135
bool OptimizeTurboshaftGraph(Linkage *linkage)
Definition pipelines.h:171
#define PROFILE(the_isolate, Call)
Definition code-events.h:59
Zone * zone_
Handle< Code > code
JSHeapBroker *const broker_
int start
Handle< SharedFunctionInfo > info
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
other heap size flags(e.g. initial_heap_size) take precedence") DEFINE_SIZE_T( max_shared_heap_size
Zone * graph_zone
SourcePositionTable * source_positions
JSHeapBroker * broker
Linkage * linkage
int32_t offset
TNode< Context > context
SharedFunctionInfoRef shared
std::map< const std::string, const std::string > map
std::unique_ptr< icu::DateTimePatternGenerator > generator_
double second
ZoneVector< RpoNumber > & result
ZoneStack< RpoNumber > & stack
LiftoffAssembler::CacheState state
int position
Definition liveedit.cc:290
InstructionOperand source
STL namespace.
V8_INLINE size_t hash_combine(size_t seed, size_t hash)
Definition hashing.h:77
int SNPrintF(Vector< char > str, const char *format,...)
Definition strings.cc:20
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
Vector< const char > CStrVector(const char *data)
Definition vector.h:331
LockGuard< Mutex > MutexGuard
Definition mutex.h:219
void PrintTurboshaftGraph(PipelineData *data, Zone *temp_zone, CodeTracer *code_tracer, const char *phase_name)
Definition phase.cc:39
void PrintTurboshaftGraphForTurbolizer(std::ofstream &stream, const Graph &graph, const char *phase_name, NodeOriginTable *node_origins, Zone *temp_zone)
Definition phase.cc:62
constexpr Opcode operation_to_opcode_v
Definition operations.h:397
V8_INLINE size_t fast_hash_combine()
Definition fast-hash.h:19
const char * OpcodeName(Opcode opcode)
auto VisitOperation(const Operation &op, F &&f)
AssemblerOptions WasmAssemblerOptions()
base::Vector< const char > GetDebugName(Zone *zone, const wasm::WasmModule *module, const wasm::WireBytesStorage *wire_bytes, int index)
void PrintCode(Isolate *isolate, DirectHandle< Code > code, OptimizedCompilationInfo *info)
Definition pipeline.cc:588
bool CheckNoDeprecatedMaps(DirectHandle< Code > code, Isolate *isolate)
Definition pipeline.cc:654
true JSFunctionData::kInitialMap raw_feedback_cell
static constexpr char kPipelineCompilationJobZoneName[]
Definition pipeline.cc:149
void JsonPrintFunctionSource(std::ostream &os, int source_id, std::unique_ptr< char[]> function_name, DirectHandle< Script > script, Isolate *isolate, DirectHandle< SharedFunctionInfo > shared, bool with_key)
CallDescriptor * GetWasmCallDescriptor(Zone *zone, const Signature< T > *fsig, WasmCallKind call_kind, bool need_frame_state)
void TraceSchedule(OptimizedCompilationInfo *info, TFPipelineData *data, Schedule *schedule, const char *phase_name)
Definition pipeline.cc:560
void BuildGraphFromBytecode(JSHeapBroker *broker, Zone *local_zone, SharedFunctionInfoRef shared_info, BytecodeArrayRef bytecode, FeedbackCellRef feedback_cell, BytecodeOffset osr_offset, JSGraph *jsgraph, CallFrequency const &invocation_frequency, SourcePositionTable *source_positions, NodeOriginTable *node_origins, int inlining_id, CodeKind code_kind, BytecodeGraphBuilderFlags flags, TickCounter *tick_counter, ObserveNodeInfo const &observe_node_info)
std::ostream & operator<<(std::ostream &os, AccessMode access_mode)
ref_traits< T >::ref_type MakeRef(JSHeapBroker *broker, Tagged< T > object)
void AddTypeAssertions(JSGraph *jsgraph, Schedule *schedule, Zone *phase_zone)
void DisassembleFunction(const WasmModule *module, int func_index, base::Vector< const uint8_t > wire_bytes, NamesProvider *names, std::ostream &os)
V8_EXPORT_PRIVATE void BuildTSGraph(compiler::turboshaft::PipelineData *data, AccountingAllocator *allocator, CompilationEnv *env, WasmDetectedFeatures *detected, Graph &graph, const FunctionBody &func_body, const WireBytesStorage *wire_bytes, std::unique_ptr< AssumptionsJournal > *assumptions, ZoneVector< WasmInliningPosition > *inlining_positions, int func_index)
void BuildWasmWrapper(compiler::turboshaft::PipelineData *data, AccountingAllocator *allocator, compiler::turboshaft::Graph &graph, const wasm::CanonicalSig *sig, WrapperCompilationInfo)
Definition wrappers.cc:1284
bool is_asmjs_module(const WasmModule *module)
WasmEngine * GetWasmEngine()
Signature< ValueType > FunctionSig
PerThreadAssertScopeDebugOnly< false, SAFEPOINTS_ASSERT, HEAP_ALLOCATION_ASSERT > DisallowGarbageCollection
bool is_sloppy(LanguageMode language_mode)
Definition globals.h:773
bool IsClassConstructor(FunctionKind kind)
void PrintF(const char *format,...)
Definition utils.cc:39
V8_INLINE DirectHandle< T > direct_handle(Tagged< T > object, Isolate *isolate)
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
static constexpr bool kCompressGraphZone
Definition globals.h:525
Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit Map::Bits2::IsImmutablePrototypeBit is_deprecated
Definition map-inl.h:129
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool Is64()
constexpr int kMaxInt
Definition globals.h:374
static V8_INLINE constexpr Address IntToSmi(int value)
kInterpreterTrampolineOffset script
!IsContextMap !IsContextMap native_context
Definition map-inl.h:877
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
Local< T > Handle
Definition c-api.cc:87
#define DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(Name)
Definition phase.h:28
#define DECL_PIPELINE_PHASE_CONSTANTS(Name)
Definition phase.h:24
HighAllocationThroughputScope high_throughput_scope_
Definition pipeline.cc:690
#define RUN_MAYBE_ABORT(phase,...)
Definition pipeline.cc:152
SourcePositionTable *const table_
Definition pipeline.cc:227
Reducer *const reducer_
Definition pipeline.cc:226
CurrentHeapBrokerScope current_broker_
Definition pipeline.cc:694
turboshaft::PipelineData * turboshaft_data_
Definition pipeline.cc:693
ZoneStats::Scope zone_scope_
Definition pipeline.cc:279
PhaseScope phase_scope_
Definition pipeline.cc:278
OptimizedCompilationInfo * info_
Definition pipeline.cc:305
NodeOriginTable::PhaseScope origin_scope_
Definition pipeline.cc:280
#define RCS_SCOPE(...)
#define FATAL(...)
Definition logging.h:47
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define CHECK_NULL(val)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
static AssemblerOptions Default(Isolate *isolate)
Definition assembler.cc:53
void Run(TFPipelineData *data, Zone *temp_zone, const bool untyped, bool values_only=false)
Definition pipeline.cc:1550
#define TRACE_EVENT0(category_group, name)
#define TRACE_DISABLED_BY_DEFAULT(name)
#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret)
#define V8_WARN_UNUSED_RESULT
Definition v8config.h:671
#define V8_UNLIKELY(condition)
Definition v8config.h:660
#define V8_NODISCARD
Definition v8config.h:693
TFGraph * graph_
#define ZONE_NAME
Definition zone.h:22