v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
implementation-visitor.cc
Go to the documentation of this file.
1// Copyright 2017 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <algorithm>
8#include <iomanip>
9#include <optional>
10#include <string>
11
12#include "src/common/globals.h"
15#include "src/torque/cfg.h"
27#include "src/torque/types.h"
28#include "src/torque/utils.h"
29
30namespace v8::internal::torque {
31
33
34// Sadly, 'using std::string_literals::operator""s;' is bugged in MSVC (see
35// https://developercommunity.visualstudio.com/t/Incorrect-warning-when-using-standard-st/673948).
36// TODO(nicohartmann@): Change to 'using std::string_literals::operator""s;'
37// once this is fixed.
38using namespace std::string_literals; // NOLINT(build/namespaces)
39
40namespace {
41const char* BuiltinIncludesMarker = "// __BUILTIN_INCLUDES_MARKER__\n";
42} // namespace
43
45 CurrentSourcePosition::Scope scope(expr->pos);
46 switch (expr->kind) {
47#define ENUM_ITEM(name) \
48 case AstNode::Kind::k##name: \
49 return Visit(name::cast(expr));
51#undef ENUM_ITEM
52 default:
54 }
55}
56
58 CurrentSourcePosition::Scope scope(stmt->pos);
59 StackScope stack_scope(this);
60 const Type* result;
61 switch (stmt->kind) {
62#define ENUM_ITEM(name) \
63 case AstNode::Kind::k##name: \
64 result = Visit(name::cast(stmt)); \
65 break;
67#undef ENUM_ITEM
68 default:
70 }
72 assembler().CurrentBlockIsComplete());
73 return result;
74}
75
77 std::set<SourceId> contains_class_definitions;
78 std::set<SourceId> contains_class_asserts;
79 for (const ClassType* type : TypeOracle::GetClasses()) {
80 if (type->ShouldGenerateCppClassDefinitions()) {
81 contains_class_definitions.insert(type->AttributedToFile());
82 }
83 if (type->ShouldGenerateCppObjectDefinitionAsserts() ||
84 type->ShouldGenerateCppObjectLayoutDefinitionAsserts()) {
85 contains_class_asserts.insert(type->AttributedToFile());
86 }
87 }
88
89 for (SourceId source : SourceFileMap::AllSources()) {
90 auto& streams = GlobalContext::GeneratedPerFile(source);
91 // Output beginning of CSA .cc file.
92 {
93 cpp::File& file = streams.csa_cc;
94
95 for (const std::string& include_path : GlobalContext::CppIncludes()) {
96 file << "#include " << StringLiteralQuote(include_path) << "\n";
97 }
98 file << "#include \"src/codegen/code-stub-assembler-inl.h\"\n";
99
100 file << "// Required Builtins:\n";
101 file << "#include \"torque-generated/" +
103 "-tq-csa.h\"\n";
104 // Now that required include files are collected while generating the
105 // file, we only know the full set at the end. Insert a marker here that
106 // is replaced with the list of includes at the very end.
107 // TODO(nicohartmann@): This is not the most beautiful way to do this,
108 // replace once the cpp file builder is available, where this can be
109 // handled easily.
110 file << BuiltinIncludesMarker;
111 file << "\n";
112
113 streams.csa_cc.BeginNamespace("v8", "internal");
114 streams.csa_ccfile << "\n";
115 }
116 // Output beginning of CSA .h file.
117 {
118 cpp::File& file = streams.csa_header;
119 std::string header_define =
120 "V8_GEN_TORQUE_GENERATED_" +
122 streams.csa_header.BeginIncludeGuard(header_define);
123 file << "#include \"src/builtins/torque-csa-header-includes.h\"\n";
124 file << "\n";
125
126 streams.csa_header.BeginNamespace("v8", "internal");
127 streams.csa_headerfile << "\n";
128 }
129 // Output beginning of class definition .cc file.
130 {
131 cpp::File& file = streams.class_definition_cc;
132 if (contains_class_definitions.count(source) != 0) {
133 file << "#include \""
135 << "-inl.h\"\n\n";
136 file << "#include \"torque-generated/class-verifiers.h\"\n";
137 file << "#include \"src/objects/instance-type-inl.h\"\n\n";
138 }
139 if (contains_class_asserts.count(source) != 0) {
140 file << "#include \""
142 << ".h\"\n\n";
143 }
144
145 streams.class_definition_cc.BeginNamespace("v8", "internal");
146 streams.class_definition_ccfile << "\n";
147 }
148 }
149}
150
152 for (SourceId file : SourceFileMap::AllSources()) {
153 auto& streams = GlobalContext::GeneratedPerFile(file);
154
155 // Output ending of CSA .cc file.
156 streams.csa_cc.EndNamespace("v8", "internal");
157
158 // Output ending of CSA .h file.
159 {
160 std::string header_define =
161 "V8_GEN_TORQUE_GENERATED_" +
163
164 streams.csa_header.EndNamespace("v8", "internal");
165 streams.csa_headerfile << "\n";
166 streams.csa_header.EndIncludeGuard(header_define);
167 }
168
169 // Output ending of class definition .cc file.
170 streams.class_definition_cc.EndNamespace("v8", "internal");
171 }
172}
173
175 // TODO(torque-builer): Can use builder for debug_macros_*_
176 std::ostream& source = debug_macros_cc_;
177 std::ostream& header = debug_macros_h_;
178
179 source << "#include \"torque-generated/debug-macros.h\"\n\n";
180 source << "#include \"src/objects/swiss-name-dictionary.h\"\n";
181 source << "#include \"src/objects/ordered-hash-table.h\"\n";
182 source << "#include \"src/torque/runtime-support.h\"\n";
183 source << "#include \"tools/debug_helper/debug-macro-shims.h\"\n";
184 source << "#include \"include/v8-internal.h\"\n";
185 source << "\n";
186
187 source << "namespace v8 {\n"
188 << "namespace internal {\n"
189 << "namespace debug_helper_internal {\n"
190 << "\n";
191
192 const char* kHeaderDefine = "V8_GEN_TORQUE_GENERATED_DEBUG_MACROS_H_";
193 header << "#ifndef " << kHeaderDefine << "\n";
194 header << "#define " << kHeaderDefine << "\n\n";
195 header << "#include \"tools/debug_helper/debug-helper-internal.h\"\n";
196 header << "#include \"src/numbers/integer-literal.h\"\n";
197 header << "\n";
198
199 header << "namespace v8 {\n"
200 << "namespace internal {\n"
201 << "namespace debug_helper_internal {\n"
202 << "\n";
203}
204
206 // TODO(torque-builder): Can use builder for debug_macros_*_
207 std::ostream& source = debug_macros_cc_;
208 std::ostream& header = debug_macros_h_;
209
210 source << "} // namespace internal\n"
211 << "} // namespace v8\n"
212 << "} // namespace debug_helper_internal\n"
213 << "\n";
214
215 header << "\n} // namespace internal\n"
216 << "} // namespace v8\n"
217 << "} // namespace debug_helper_internal\n"
218 << "\n";
219 header << "#endif // V8_GEN_TORQUE_GENERATED_DEBUG_MACROS_H_\n";
220}
221
223 Signature signature{{}, std::nullopt, {{}, false}, 0,
224 decl->type(), {}, false};
225
226 BindingsManagersScope bindings_managers_scope;
227
228 cpp::Function f =
229 GenerateFunction(nullptr, decl->external_name(), signature, {});
230
232
233 f.PrintDefinition(csa_ccfile(), [&](std::ostream& stream) {
234 stream << " compiler::CodeAssembler ca_(state_);\n";
235
236 DCHECK(!signature.return_type->IsVoidOrNever());
237
239
240 VisitResult expression_result = Visit(decl->body());
241 VisitResult return_result =
242 GenerateImplicitConvert(signature.return_type, expression_result);
243
244 CSAGenerator csa_generator{assembler().Result(), stream};
245 Stack<std::string> values = *csa_generator.EmitGraph(Stack<std::string>{});
246
247 assembler_ = std::nullopt;
248
249 stream << " return ";
250 CSAGenerator::EmitCSAValue(return_result, values, stream);
251 stream << ";";
252 });
253}
254
256 if (alias->IsRedeclaration()) return;
257 if (const ClassType* class_type = ClassType::DynamicCast(alias->type())) {
258 if (class_type->IsExtern() && !class_type->nspace()->IsDefaultNamespace()) {
259 Error(
260 "extern classes are currently only supported in the default "
261 "namespace");
262 }
263 }
264}
265
267 public:
269 : visitor_(visitor), macro_(macro) {
270 if (!visitor_->inlining_macros_.insert(macro).second) {
271 // Recursive macro expansion would just keep going until stack overflow.
272 // To avoid crashes, throw an error immediately.
273 ReportError("Recursive macro call to ", *macro);
274 }
275 }
277
278 private:
280 const Macro* macro_;
281};
282
284 Macro* macro, std::optional<LocationReference> this_reference,
285 const std::vector<VisitResult>& arguments,
286 const std::vector<Block*> label_blocks) {
287 MacroInliningScope macro_inlining_scope(this, macro);
288 CurrentScope::Scope current_scope(macro);
289 BindingsManagersScope bindings_managers_scope;
290 CurrentCallable::Scope current_callable(macro);
291 CurrentReturnValue::Scope current_return_value;
292 const Signature& signature = macro->signature();
293 const Type* return_type = macro->signature().return_type;
294 bool can_return = return_type != TypeOracle::GetNeverType();
295
296 BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
297 BlockBindings<LocalLabel> label_bindings(&LabelBindingsManager::Get());
298 DCHECK_EQ(macro->signature().parameter_names.size(),
299 arguments.size() + (this_reference ? 1 : 0));
300 DCHECK_EQ(this_reference.has_value(), macro->IsMethod());
301
302 // Bind the this for methods. Methods that modify a struct-type "this" must
303 // only be called if the this is in a variable, in which case the
304 // LocalValue is non-const. Otherwise, the LocalValue used for the parameter
305 // binding is const, and thus read-only, which will cause errors if
306 // modified, e.g. when called by a struct method that sets the structs
307 // fields. This prevents using temporary struct values for anything other
308 // than read operations.
309 if (this_reference) {
310 DCHECK(macro->IsMethod());
311 parameter_bindings.Add(kThisParameterName, LocalValue{*this_reference},
312 true);
313 // TODO(v8:12261): Tracking 'this'-binding for kythe led to a few weird
314 // issues. Review to fully support 'this' in methods.
315 }
316
317 size_t count = 0;
318 for (const auto& arg : arguments) {
319 if (this_reference && count == signature.implicit_count) count++;
320 const bool mark_as_used = signature.implicit_count > count;
321 const Identifier* name = macro->parameter_names()[count++];
322 Binding<LocalValue>* binding =
323 parameter_bindings.Add(name,
325 arg, "parameter " + name->value)},
326 mark_as_used);
329 }
330 }
331
332 DCHECK_EQ(label_blocks.size(), signature.labels.size());
333 for (size_t i = 0; i < signature.labels.size(); ++i) {
334 const LabelDeclaration& label_info = signature.labels[i];
335 Binding<LocalLabel>* binding = label_bindings.Add(
336 label_info.name, LocalLabel{label_blocks[i], label_info.types});
339 }
340 }
341
342 Block* macro_end;
343 std::optional<Binding<LocalLabel>> macro_end_binding;
344 if (can_return) {
346 std::vector<const Type*> lowered_return_types = LowerType(return_type);
347 stack.PushMany(lowered_return_types);
348 if (!return_type->IsConstexpr()) {
349 SetReturnValue(VisitResult(return_type,
350 stack.TopRange(lowered_return_types.size())));
351 }
352 // The stack copy used to initialize the _macro_end block is only used
353 // as a template for the actual gotos generated by return statements. It
354 // doesn't correspond to any real return values, and thus shouldn't contain
355 // top types, because these would pollute actual return value types that get
356 // unioned with them for return statements, erroneously forcing them to top.
357 for (auto i = stack.begin(); i != stack.end(); ++i) {
358 if ((*i)->IsTopType()) {
359 *i = TopType::cast(*i)->source_type();
360 }
361 }
362 macro_end = assembler().NewBlock(std::move(stack));
363 macro_end_binding.emplace(&LabelBindingsManager::Get(), kMacroEndLabelName,
364 LocalLabel{macro_end, {return_type}});
365 } else {
367 }
368
369 const Type* result = Visit(*macro->body());
370
371 if (result->IsNever()) {
372 if (!return_type->IsNever() && !macro->HasReturns()) {
373 std::stringstream s;
374 s << "macro " << macro->ReadableName()
375 << " that never returns must have return type never";
376 ReportError(s.str());
377 }
378 } else {
379 if (return_type->IsNever()) {
380 std::stringstream s;
381 s << "macro " << macro->ReadableName()
382 << " has implicit return at end of its declartion but return type "
383 "never";
384 ReportError(s.str());
385 } else if (!macro->signature().return_type->IsVoid()) {
386 std::stringstream s;
387 s << "macro " << macro->ReadableName()
388 << " expects to return a value but doesn't on all paths";
389 ReportError(s.str());
390 }
391 }
392 if (!result->IsNever()) {
393 assembler().Goto(macro_end);
394 }
395
396 if (macro->HasReturns() || !result->IsNever()) {
397 assembler().Bind(macro_end);
398 }
399
400 return GetAndClearReturnValue();
401}
402
404 CurrentCallable::Scope current_callable(macro);
405 const Signature& signature = macro->signature();
406 const Type* return_type = macro->signature().return_type;
407 bool can_return = return_type != TypeOracle::GetNeverType();
408 bool has_return_value =
409 can_return && return_type != TypeOracle::GetVoidType();
410
413 csa_headerfile() << "\n";
414
415 cpp::File csa_cc(csa_ccfile());
416
417 // Avoid multiple-definition errors since it is possible for multiple
418 // generated -inl.inc files to all contain function definitions for the same
419 // Torque macro.
420 std::optional<cpp::IncludeGuardScope> include_guard;
422 include_guard.emplace(&csa_cc, "V8_INTERNAL_DEFINED_"s + macro->CCName());
423 } else if (output_type_ == OutputType::kCCDebug) {
424 include_guard.emplace(&csa_cc,
425 "V8_INTERNAL_DEFINED_"s + macro->CCDebugName());
426 }
427
429
431 // For now, generated C++ is only for field offset computations. If we ever
432 // generate C++ code that can allocate, then it should be handlified.
433 csa_ccfile() << " DisallowGarbageCollection no_gc;\n";
434 } else if (output_type_ == OutputType::kCSA) {
435 csa_ccfile() << " compiler::CodeAssembler ca_(state_);\n";
436 csa_ccfile()
437 << " compiler::CodeAssembler::SourcePositionScope pos_scope(&ca_);\n";
438 }
439
440 Stack<std::string> lowered_parameters;
441 Stack<const Type*> lowered_parameter_types;
442
443 std::vector<VisitResult> arguments;
444
445 std::optional<LocationReference> this_reference;
446 if (Method* method = Method::DynamicCast(macro)) {
447 const Type* this_type = method->aggregate_type();
449 &lowered_parameters);
450 StackRange range = lowered_parameter_types.PushMany(LowerType(this_type));
451 VisitResult this_result = VisitResult(this_type, range);
452 // For classes, mark 'this' as a temporary to prevent assignment to it.
453 // Note that using a VariableAccess for non-class types is technically
454 // incorrect because changes to the 'this' variable do not get reflected
455 // to the caller. Therefore struct methods should always be inlined and a
456 // C++ version should never be generated, since it would be incorrect.
457 // However, in order to be able to type- and semantics-check even unused
458 // struct methods, set the this_reference to be the local variable copy of
459 // the passed-in this, which allows the visitor to at least find and report
460 // errors.
461 this_reference =
462 (this_type->IsClassType())
463 ? LocationReference::Temporary(this_result, "this parameter")
465 }
466
467 for (size_t i = 0; i < macro->signature().parameter_names.size(); ++i) {
468 if (this_reference && i == macro->signature().implicit_count) continue;
469 const std::string& name = macro->parameter_names()[i]->value;
470 std::string external_name = ExternalParameterName(name);
471 const Type* type = macro->signature().types()[i];
472
473 if (type->IsConstexpr()) {
474 arguments.push_back(VisitResult(type, external_name));
475 } else {
476 LowerParameter(type, external_name, &lowered_parameters);
477 StackRange range = lowered_parameter_types.PushMany(LowerType(type));
478 arguments.push_back(VisitResult(type, range));
479 }
480 }
481
482 DCHECK_EQ(lowered_parameters.Size(), lowered_parameter_types.Size());
483 assembler_ = CfgAssembler(lowered_parameter_types);
484
485 std::vector<Block*> label_blocks;
486 for (const LabelDeclaration& label_info : signature.labels) {
487 Stack<const Type*> label_input_stack;
488 for (const Type* type : label_info.types) {
489 label_input_stack.PushMany(LowerType(type));
490 }
491 Block* block = assembler().NewBlock(std::move(label_input_stack));
492 label_blocks.push_back(block);
493 }
494
495 VisitResult return_value =
496 InlineMacro(macro, this_reference, arguments, label_blocks);
498 if (return_type != TypeOracle::GetNeverType()) {
499 assembler().Goto(end);
500 }
501
502 for (size_t i = 0; i < label_blocks.size(); ++i) {
503 Block* label_block = label_blocks[i];
504 const LabelDeclaration& label_info = signature.labels[i];
505 assembler().Bind(label_block);
506 std::vector<std::string> label_parameter_variables;
507 for (size_t j = 0; j < label_info.types.size(); ++j) {
508 LowerLabelParameter(label_info.types[j],
509 ExternalLabelParameterName(label_info.name->value, j),
510 &label_parameter_variables);
511 }
512 assembler().Emit(
514 std::move(label_parameter_variables)});
515 }
516
517 if (return_type != TypeOracle::GetNeverType()) {
518 assembler().Bind(end);
519 }
520
521 std::optional<Stack<std::string>> values;
523 CCGenerator cc_generator{assembler().Result(), csa_ccfile()};
524 values = cc_generator.EmitGraph(lowered_parameters);
525 } else if (output_type_ == OutputType::kCCDebug) {
526 CCGenerator cc_generator{assembler().Result(), csa_ccfile(), true};
527 values = cc_generator.EmitGraph(lowered_parameters);
528 } else {
529 CSAGenerator csa_generator{assembler().Result(), csa_ccfile()};
530 values = csa_generator.EmitGraph(lowered_parameters);
531 }
532
533 assembler_ = std::nullopt;
534
535 if (has_return_value) {
536 csa_ccfile() << " return ";
538 csa_ccfile() << "{d::MemoryAccessResult::kOk, ";
539 CCGenerator::EmitCCValue(return_value, *values, csa_ccfile());
540 csa_ccfile() << "}";
541 } else if (output_type_ == OutputType::kCC) {
542 CCGenerator::EmitCCValue(return_value, *values, csa_ccfile());
543 } else {
544 CSAGenerator::EmitCSAValue(return_value, *values, csa_ccfile());
545 }
546 csa_ccfile() << ";\n";
547 }
549
550 include_guard.reset();
551}
552
556
561
562namespace {
563
564std::string AddParameter(size_t i, Builtin* builtin,
565 Stack<std::string>* parameters,
566 Stack<const Type*>* parameter_types,
567 BlockBindings<LocalValue>* parameter_bindings,
568 bool mark_as_used) {
569 const Identifier* name = builtin->signature().parameter_names[i];
570 const Type* type = builtin->signature().types()[i];
571 std::string external_name = "parameter" + std::to_string(i);
572 parameters->Push(external_name);
573 StackRange range = parameter_types->PushMany(LowerType(type));
574 Binding<LocalValue>* binding = parameter_bindings->Add(
575 name,
577 "parameter " + name->value)},
578 mark_as_used);
581 }
582 return external_name;
583}
584
585} // namespace
586
588 if (builtin->IsExternal()) return;
589 CurrentScope::Scope current_scope(builtin);
590 CurrentCallable::Scope current_callable(builtin);
591 CurrentReturnValue::Scope current_return_value;
592
593 const std::string& name = builtin->ExternalName();
594 const Signature& signature = builtin->signature();
595 csa_ccfile() << "TF_BUILTIN(" << name << ", CodeStubAssembler) {\n"
596 << " compiler::CodeAssemblerState* state_ = state();"
597 << " compiler::CodeAssembler ca_(state());\n";
598
599 Stack<const Type*> parameter_types;
600 Stack<std::string> parameters;
601
602 BindingsManagersScope bindings_managers_scope;
603
604 BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
605
606 if (builtin->IsVarArgsJavaScript() || builtin->IsFixedArgsJavaScript()) {
607 if (builtin->IsVarArgsJavaScript()) {
609 if (signature.ExplicitCount() > 0) {
610 Error("Cannot mix explicit parameters with varargs.")
611 .Position(signature.parameter_names[signature.implicit_count]->pos);
612 }
613
614 csa_ccfile() << " TNode<Word32T> argc = UncheckedParameter<Word32T>("
615 << "Descriptor::kJSActualArgumentsCount);\n";
616 csa_ccfile() << " TNode<IntPtrT> "
617 "arguments_length(ChangeInt32ToIntPtr(UncheckedCast<"
618 "Int32T>(argc)));\n";
619 csa_ccfile() << " TNode<RawPtrT> arguments_frame = "
620 "UncheckedCast<RawPtrT>(LoadFramePointer());\n";
621 csa_ccfile()
622 << " TorqueStructArguments "
623 "torque_arguments(GetFrameArguments(arguments_frame, "
624 "arguments_length, FrameArgumentsArgcType::kCountIncludesReceiver"
625 << "));\n";
626 csa_ccfile()
627 << " CodeStubArguments arguments(this, torque_arguments);\n";
628
629 parameters.Push("torque_arguments.frame");
630 parameters.Push("torque_arguments.base");
631 parameters.Push("torque_arguments.length");
632 parameters.Push("torque_arguments.actual_count");
633 const Type* arguments_type = TypeOracle::GetArgumentsType();
634 StackRange range = parameter_types.PushMany(LowerType(arguments_type));
635 parameter_bindings.Add(*signature.arguments_variable,
636 LocalValue{LocationReference::Temporary(
637 VisitResult(arguments_type, range),
638 "parameter " + *signature.arguments_variable)},
639 true);
640 }
641
642 for (size_t i = 0; i < signature.implicit_count; ++i) {
643 const std::string& param_name = signature.parameter_names[i]->value;
644 SourcePosition param_pos = signature.parameter_names[i]->pos;
645 std::string generated_name = AddParameter(
646 i, builtin, &parameters, &parameter_types, &parameter_bindings, true);
647 const Type* actual_type = signature.parameter_types.types[i];
648 std::vector<const Type*> expected_types;
649 if (param_name == "context") {
650 csa_ccfile() << " TNode<NativeContext> " << generated_name
651 << " = UncheckedParameter<NativeContext>("
652 << "Descriptor::kContext);\n";
653 csa_ccfile() << " USE(" << generated_name << ");\n";
654 expected_types = {TypeOracle::GetNativeContextType(),
656 } else if (param_name == "receiver") {
657 csa_ccfile()
658 << " TNode<JSAny> " << generated_name << " = "
659 << (builtin->IsVarArgsJavaScript()
660 ? "arguments.GetReceiver()"
661 : "UncheckedParameter<JSAny>(Descriptor::kReceiver)")
662 << ";\n";
663 csa_ccfile() << " USE(" << generated_name << ");\n";
664 expected_types = {TypeOracle::GetJSAnyType()};
665 } else if (param_name == "newTarget") {
666 csa_ccfile() << " TNode<JSAny> " << generated_name
667 << " = UncheckedParameter<JSAny>("
668 << "Descriptor::kJSNewTarget);\n";
669 csa_ccfile() << " USE(" << generated_name << ");\n";
670 expected_types = {TypeOracle::GetJSAnyType()};
671 } else if (param_name == "target") {
672 csa_ccfile() << " TNode<JSFunction> " << generated_name
673 << " = UncheckedParameter<JSFunction>("
674 << "Descriptor::kJSTarget);\n";
675 csa_ccfile() << " USE(" << generated_name << ");\n";
676 expected_types = {TypeOracle::GetJSFunctionType()};
677 } else if (param_name == "dispatchHandle") {
679 csa_ccfile() << " TNode<JSDispatchHandleT> " << generated_name
680 << " = "
681 "UncheckedParameter<JSDispatchHandleT>(Descriptor::"
682 "kJSDispatchHandle);\n";
683 } else if (V8_ENABLE_LEAPTIERING_BOOL) {
684 csa_ccfile() << " TNode<JSDispatchHandleT> " << generated_name
685 << " = "
686 "ReinterpretCast<JSDispatchHandleT>("
687 "LoadJSFunctionDispatchHandle("
688 "UncheckedParameter<JSFunction>("
689 << "Descriptor::kJSTarget)));\n";
690 } else {
691 csa_ccfile() << " TNode<JSDispatchHandleT> " << generated_name
692 << " = InvalidDispatchHandleConstant();\n";
693 }
694 csa_ccfile() << " USE(" << generated_name << ");\n";
695 expected_types = {TypeOracle::GetDispatchHandleType()};
696 } else {
697 Error(
698 "Unexpected implicit parameter \"", param_name,
699 "\" for JavaScript calling convention, "
700 "expected \"context\", \"receiver\", \"target\", or \"newTarget\"")
701 .Position(param_pos);
702 expected_types = {actual_type};
703 }
704 if (std::find(expected_types.begin(), expected_types.end(),
705 actual_type) == expected_types.end()) {
706 Error("According to JavaScript calling convention, expected parameter ",
707 param_name, " to have type ", PrintList(expected_types, " or "),
708 " but found type ", *actual_type)
709 .Position(param_pos);
710 }
711 }
712
713 for (size_t i = signature.implicit_count;
714 i < signature.parameter_names.size(); ++i) {
715 const std::string& parameter_name = signature.parameter_names[i]->value;
716 const Type* type = signature.types()[i];
717 const bool mark_as_used = signature.implicit_count > i;
718 std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
719 &parameter_bindings, mark_as_used);
720 csa_ccfile() << " " << type->GetGeneratedTypeName() << " " << var
721 << " = "
722 << "UncheckedParameter<" << type->GetGeneratedTNodeTypeName()
723 << ">(Descriptor::k" << CamelifyString(parameter_name)
724 << ");\n";
725 csa_ccfile() << " USE(" << var << ");\n";
726 }
727
728 } else {
729 DCHECK(builtin->IsStub());
730
731 for (size_t i = 0; i < signature.parameter_names.size(); ++i) {
732 const std::string& parameter_name = signature.parameter_names[i]->value;
733 const Type* type = signature.types()[i];
734 const bool mark_as_used = signature.implicit_count > i;
735 std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
736 &parameter_bindings, mark_as_used);
737 csa_ccfile() << " " << type->GetGeneratedTypeName() << " " << var
738 << " = "
739 << "UncheckedParameter<" << type->GetGeneratedTNodeTypeName()
740 << ">(Descriptor::k" << CamelifyString(parameter_name)
741 << ");\n";
742 csa_ccfile() << " USE(" << var << ");\n";
743 }
744 }
745
746 if (builtin->use_counter_name()) {
747 DCHECK(!signature.parameter_types.types.empty());
748 DCHECK(signature.parameter_types.types[0] ==
751 csa_ccfile() << " CodeStubAssembler(state_).CallRuntime("
752 << "Runtime::kIncrementUseCounter, parameter0, "
753 << "CodeStubAssembler(state_).SmiConstant("
754 << *builtin->use_counter_name() << "));\n";
755 }
756
757 assembler_ = CfgAssembler(parameter_types);
758 const Type* body_result = Visit(*builtin->body());
759 if (body_result != TypeOracle::GetNeverType()) {
760 ReportError("control reaches end of builtin, expected return of a value");
761 }
762 CSAGenerator csa_generator{assembler().Result(), csa_ccfile(),
763 builtin->kind()};
764 csa_generator.EmitGraph(parameters);
765 assembler_ = std::nullopt;
766 csa_ccfile() << "}\n\n";
767}
768
770 BlockBindings<LocalValue> block_bindings(&ValueBindingsManager::Get());
771 return Visit(stmt, &block_bindings);
772}
773
775 VarDeclarationStatement* stmt, BlockBindings<LocalValue>* block_bindings) {
776 // const qualified variables are required to be initialized properly.
777 if (stmt->const_qualified && !stmt->initializer) {
778 ReportError("local constant \"", stmt->name, "\" is not initialized.");
779 }
780
781 std::optional<const Type*> type;
782 if (stmt->type) {
783 type = TypeVisitor::ComputeType(*stmt->type);
784 }
785 std::optional<VisitResult> init_result;
786 if (stmt->initializer) {
787 StackScope scope(this);
788 init_result = Visit(*stmt->initializer);
789 if (type) {
790 init_result = GenerateImplicitConvert(*type, *init_result);
791 }
792 type = init_result->type();
793 if ((*type)->IsConstexpr() && !stmt->const_qualified) {
794 Error("Use 'const' instead of 'let' for variable '", stmt->name->value,
795 "' of constexpr type '", (*type)->ToString(), "'.")
796 .Position(stmt->name->pos)
797 .Throw();
798 }
799 init_result = scope.Yield(*init_result);
800 } else {
801 DCHECK(type.has_value());
802 if ((*type)->IsConstexpr()) {
803 ReportError("constexpr variables need an initializer");
804 }
805 TypeVector lowered_types = LowerType(*type);
806 for (const Type* t : lowered_types) {
808 "uninitialized variable '" + stmt->name->value + "' of type " +
809 t->ToString() + " originally defined at " +
810 PositionAsString(stmt->pos),
811 t)});
812 }
813 init_result =
814 VisitResult(*type, assembler().TopRange(lowered_types.size()));
815 }
818 *init_result, "const " + stmt->name->value)
819 : LocationReference::VariableAccess(*init_result);
820 block_bindings->Add(stmt->name, LocalValue{std::move(ref)});
822}
823
825 return Visit(stmt->call, true).type();
826}
827
829 Block* true_block = assembler().NewBlock(assembler().CurrentStack());
830 Block* false_block = assembler().NewBlock(assembler().CurrentStack());
831 Block* done_block = assembler().NewBlock();
832 Block* true_conversion_block = assembler().NewBlock();
833 GenerateExpressionBranch(expr->condition, true_block, false_block);
834
835 VisitResult left;
836 VisitResult right;
837
838 {
839 // The code for both paths of the conditional need to be generated first
840 // before evaluating the conditional expression because the common type of
841 // the result of both the true and false of the condition needs to be known
842 // to convert both branches to a common type.
843 assembler().Bind(true_block);
844 StackScope left_scope(this);
845 left = Visit(expr->if_true);
846 assembler().Goto(true_conversion_block);
847
848 const Type* common_type;
849 {
850 assembler().Bind(false_block);
851 StackScope right_scope(this);
852 right = Visit(expr->if_false);
853 common_type = GetCommonType(left.type(), right.type());
854 right = right_scope.Yield(GenerateImplicitConvert(common_type, right));
855 assembler().Goto(done_block);
856 }
857
858 assembler().Bind(true_conversion_block);
859 left = left_scope.Yield(GenerateImplicitConvert(common_type, left));
860 assembler().Goto(done_block);
861 }
862
863 assembler().Bind(done_block);
864 CHECK_EQ(left, right);
865 return left;
866}
867
869 StackScope outer_scope(this);
870 VisitResult left_result = Visit(expr->left);
871
872 if (left_result.type()->IsConstexprBool()) {
873 VisitResult right_result = Visit(expr->right);
874 if (!right_result.type()->IsConstexprBool()) {
876 "expected type constexpr bool on right-hand side of operator "
877 "||");
878 }
880 std::string("(") + left_result.constexpr_value() +
881 " || " + right_result.constexpr_value() + ")");
882 }
883
884 Block* true_block = assembler().NewBlock();
885 Block* false_block = assembler().NewBlock();
886 Block* done_block = assembler().NewBlock();
887
888 left_result = GenerateImplicitConvert(TypeOracle::GetBoolType(), left_result);
889 GenerateBranch(left_result, true_block, false_block);
890
891 assembler().Bind(true_block);
892 VisitResult true_result = GenerateBoolConstant(true);
893 assembler().Goto(done_block);
894
895 assembler().Bind(false_block);
896 VisitResult false_result;
897 {
898 StackScope false_block_scope(this);
899 false_result = false_block_scope.Yield(
901 }
902 assembler().Goto(done_block);
903
904 assembler().Bind(done_block);
905 DCHECK_EQ(true_result, false_result);
906 return outer_scope.Yield(true_result);
907}
908
910 StackScope outer_scope(this);
911 VisitResult left_result = Visit(expr->left);
912
913 if (left_result.type()->IsConstexprBool()) {
914 VisitResult right_result = Visit(expr->right);
915 if (!right_result.type()->IsConstexprBool()) {
917 "expected type constexpr bool on right-hand side of operator "
918 "&&");
919 }
921 std::string("(") + left_result.constexpr_value() +
922 " && " + right_result.constexpr_value() + ")");
923 }
924
925 Block* true_block = assembler().NewBlock();
926 Block* false_block = assembler().NewBlock();
927 Block* done_block = assembler().NewBlock();
928
929 left_result = GenerateImplicitConvert(TypeOracle::GetBoolType(), left_result);
930 GenerateBranch(left_result, true_block, false_block);
931
932 assembler().Bind(true_block);
933 VisitResult true_result;
934 {
935 StackScope true_block_scope(this);
936 VisitResult right_result = Visit(expr->right);
937 if (TryGetSourceForBitfieldExpression(expr->left) != nullptr &&
938 TryGetSourceForBitfieldExpression(expr->right) != nullptr &&
941 Lint(
942 "Please use & rather than && when checking multiple bitfield "
943 "values, to avoid complexity in generated code.");
944 }
945 true_result = true_block_scope.Yield(
947 }
948 assembler().Goto(done_block);
949
950 assembler().Bind(false_block);
951 VisitResult false_result = GenerateBoolConstant(false);
952 assembler().Goto(done_block);
953
954 assembler().Bind(done_block);
955 DCHECK_EQ(true_result, false_result);
956 return outer_scope.Yield(true_result);
957}
958
960 StackScope scope(this);
961 LocationReference location_ref = GetLocationReference(expr->location);
962 VisitResult current_value = GenerateFetchFromLocation(location_ref);
965 args.parameters = {current_value, one};
966 VisitResult assignment_value = GenerateCall(
967 expr->op == IncrementDecrementOperator::kIncrement ? "+" : "-", args);
968 GenerateAssignToLocation(location_ref, assignment_value);
969 return scope.Yield(expr->postfix ? current_value : assignment_value);
970}
971
973 StackScope scope(this);
974 LocationReference location_ref = GetLocationReference(expr->location);
975 VisitResult assignment_value;
976 if (expr->op) {
977 VisitResult location_value = GenerateFetchFromLocation(location_ref);
978 assignment_value = Visit(expr->value);
980 args.parameters = {location_value, assignment_value};
981 assignment_value = GenerateCall(*expr->op, args);
982 GenerateAssignToLocation(location_ref, assignment_value);
983 } else {
984 assignment_value = Visit(expr->value);
985 GenerateAssignToLocation(location_ref, assignment_value);
986 }
987 return scope.Yield(assignment_value);
988}
989
991 const Type* result_type = TypeOracle::GetConstFloat64Type();
992 std::stringstream str;
993 str << std::setprecision(std::numeric_limits<double>::digits10 + 1)
994 << expr->value;
995 return VisitResult{result_type, str.str()};
996}
997
999 const Type* result_type = TypeOracle::GetIntegerLiteralType();
1000 std::stringstream str;
1001 str << "IntegerLiteral("
1002 << (expr->value.is_negative() ? "true, 0x" : "false, 0x") << std::hex
1003 << expr->value.absolute_value() << std::dec << "ull)";
1004 return VisitResult{result_type, str.str()};
1005}
1006
1009 const Type* result_type = SubtractType(
1011 if (result_type->IsNever()) {
1012 ReportError("unreachable code");
1013 }
1014 CHECK_EQ(LowerType(result_type), TypeVector{result_type});
1015 assembler().Emit(UnsafeCastInstruction{result_type});
1016 result.SetType(result_type);
1017 return result;
1018}
1019
1021 return VisitResult{
1023 "\"" + expr->literal.substr(1, expr->literal.size() - 2) + "\""};
1024}
1025
1027 if (builtin->IsExternal() || builtin->kind() != Builtin::kStub) {
1029 "creating function pointers is only allowed for internal builtins with "
1030 "stub linkage");
1031 }
1033 builtin->signature().parameter_types.types,
1034 builtin->signature().return_type);
1035 assembler().Emit(
1036 PushBuiltinPointerInstruction{builtin->ExternalName(), type});
1037 return VisitResult(type, assembler().TopRange(1));
1038}
1039
1044
1046 StackScope scope(this);
1047 LocationReference location = GetLocationReference(expr);
1048 if (location.IsBitFieldAccess()) {
1049 if (auto* identifier = IdentifierExpression::DynamicCast(expr->object)) {
1050 bitfield_expressions_[expr] = identifier->name;
1051 }
1052 }
1053 return scope.Yield(GenerateFetchFromLocation(location));
1054}
1055
1058 size_t parameter_count = label->parameter_types.size();
1059 if (stmt->arguments.size() != parameter_count) {
1060 ReportError("goto to label has incorrect number of parameters (expected ",
1061 parameter_count, " found ", stmt->arguments.size(), ")");
1062 }
1063
1066 label->declaration_position());
1067 }
1070 }
1071
1072 size_t i = 0;
1073 StackRange arguments = assembler().TopRange(0);
1074 for (Expression* e : stmt->arguments) {
1075 StackScope scope(this);
1077 const Type* parameter_type = label->parameter_types[i++];
1078 result = GenerateImplicitConvert(parameter_type, result);
1079 arguments.Extend(scope.Yield(result).stack_range());
1080 }
1081
1082 assembler().Goto(label->block, arguments.Size());
1083 return TypeOracle::GetNeverType();
1084}
1085
1087 bool has_else = stmt->if_false.has_value();
1088
1089 if (stmt->is_constexpr) {
1090 VisitResult expression_result = Visit(stmt->condition);
1091
1092 if (!(expression_result.type() == TypeOracle::GetConstexprBoolType())) {
1093 std::stringstream stream;
1094 stream << "expression should return type constexpr bool "
1095 << "but returns type " << *expression_result.type();
1096 ReportError(stream.str());
1097 }
1098
1099 Block* true_block = assembler().NewBlock();
1100 Block* false_block = assembler().NewBlock();
1101 Block* done_block = assembler().NewBlock();
1102
1104 expression_result.constexpr_value(), true_block, false_block});
1105
1106 assembler().Bind(true_block);
1107 const Type* left_result = Visit(stmt->if_true);
1108 if (left_result == TypeOracle::GetVoidType()) {
1109 assembler().Goto(done_block);
1110 }
1111
1112 assembler().Bind(false_block);
1113 const Type* right_result = TypeOracle::GetVoidType();
1114 if (has_else) {
1115 right_result = Visit(*stmt->if_false);
1116 }
1117 if (right_result == TypeOracle::GetVoidType()) {
1118 assembler().Goto(done_block);
1119 }
1120
1121 if (left_result->IsNever() != right_result->IsNever()) {
1122 std::stringstream stream;
1123 stream << "either both or neither branches in a constexpr if statement "
1124 "must reach their end at"
1125 << PositionAsString(stmt->pos);
1126 ReportError(stream.str());
1127 }
1128
1129 if (left_result != TypeOracle::GetNeverType()) {
1130 assembler().Bind(done_block);
1131 }
1132 return left_result;
1133 } else {
1134 Block* true_block = assembler().NewBlock(assembler().CurrentStack(),
1135 IsDeferred(stmt->if_true));
1136 Block* false_block =
1137 assembler().NewBlock(assembler().CurrentStack(),
1138 stmt->if_false && IsDeferred(*stmt->if_false));
1139 GenerateExpressionBranch(stmt->condition, true_block, false_block);
1140
1141 Block* done_block;
1142 bool live = false;
1143 if (has_else) {
1144 done_block = assembler().NewBlock();
1145 } else {
1146 done_block = false_block;
1147 live = true;
1148 }
1149
1150 assembler().Bind(true_block);
1151 {
1152 const Type* result = Visit(stmt->if_true);
1154 live = true;
1155 assembler().Goto(done_block);
1156 }
1157 }
1158
1159 if (has_else) {
1160 assembler().Bind(false_block);
1161 const Type* result = Visit(*stmt->if_false);
1163 live = true;
1164 assembler().Goto(done_block);
1165 }
1166 }
1167
1168 if (live) {
1169 assembler().Bind(done_block);
1170 }
1172 }
1173}
1174
1176 Block* body_block = assembler().NewBlock(assembler().CurrentStack());
1177 Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
1178
1179 Block* header_block = assembler().NewBlock();
1180 assembler().Goto(header_block);
1181
1182 assembler().Bind(header_block);
1183 GenerateExpressionBranch(stmt->condition, body_block, exit_block);
1184
1185 assembler().Bind(body_block);
1186 {
1187 BreakContinueActivator activator{exit_block, header_block};
1188 const Type* body_result = Visit(stmt->body);
1189 if (body_result != TypeOracle::GetNeverType()) {
1190 assembler().Goto(header_block);
1191 }
1192 }
1193
1194 assembler().Bind(exit_block);
1195 return TypeOracle::GetVoidType();
1196}
1197
1199 BlockBindings<LocalValue> block_bindings(&ValueBindingsManager::Get());
1200 const Type* type = TypeOracle::GetVoidType();
1201 for (Statement* s : block->statements) {
1202 CurrentSourcePosition::Scope source_position(s->pos);
1203 if (type->IsNever()) {
1204 ReportError("statement after non-returning statement");
1205 }
1206 if (auto* var_declaration = VarDeclarationStatement::DynamicCast(s)) {
1207 type = Visit(var_declaration, &block_bindings);
1208 } else {
1209 type = Visit(s);
1210 }
1211 }
1212 return type;
1213}
1214
1216 std::string reason;
1217 const Type* return_type;
1219 switch (stmt->kind) {
1221 // Use the same string as in C++ to simplify fuzzer pattern-matching.
1223 return_type = TypeOracle::GetNeverType();
1225 break;
1227 reason = "debug break";
1228 return_type = TypeOracle::GetVoidType();
1230 break;
1231 }
1232#if defined(DEBUG)
1233 assembler().Emit(PrintErrorInstruction{"halting because of " + reason +
1234 " at " + PositionAsString(stmt->pos)});
1235#endif
1237 return return_type;
1238}
1239
1240namespace {
1241
1242std::string FormatAssertSource(const std::string& str) {
1243 // Replace all whitespace characters with a space character.
1244 std::string str_no_newlines = str;
1245 std::replace_if(
1246 str_no_newlines.begin(), str_no_newlines.end(),
1247 [](unsigned char c) { return isspace(c); }, ' ');
1248
1249 // str might include indentation, squash multiple space characters into one.
1250 std::string result;
1251 std::unique_copy(str_no_newlines.begin(), str_no_newlines.end(),
1252 std::back_inserter(result),
1253 [](char a, char b) { return a == ' ' && b == ' '; });
1254 return result;
1255}
1256
1257} // namespace
1258
1261 std::string message =
1262 "static_assert(" + stmt->source + ") at " + ToString(stmt->pos);
1265 Arguments{{Visit(stmt->expression),
1267 StringLiteralQuote(message))},
1268 {}});
1269 return TypeOracle::GetVoidType();
1270 }
1271 // When the sandbox is off, sbxchecks become dchecks.
1274 bool do_check = stmt->kind != AssertStatement::AssertKind::kDcheck ||
1276#if defined(DEBUG)
1277 do_check = true;
1278#endif
1279 Block* resume_block;
1280
1281 if (!do_check) {
1282 Block* unreachable_block = assembler().NewBlock(assembler().CurrentStack());
1283 resume_block = assembler().NewBlock(assembler().CurrentStack());
1284 assembler().Goto(resume_block);
1285 assembler().Bind(unreachable_block);
1286 }
1287
1288 // CSA_DCHECK & co. are not used here on purpose for two reasons. First,
1289 // Torque allows and handles two types of expressions in the if protocol
1290 // automagically, ones that return TNode<BoolT> and those that use the
1291 // BranchIf(..., Label* true, Label* false) idiom. Because the machinery to
1292 // handle this is embedded in the expression handling and to it's not
1293 // possible to make the decision to use CSA_DCHECK or CSA_DCHECK_BRANCH
1294 // isn't trivial up-front. Secondly, on failure, the assert text should be
1295 // the corresponding Torque code, not the -gen.cc code, which would be the
1296 // case when using CSA_DCHECK_XXX.
1297 Block* true_block = assembler().NewBlock(assembler().CurrentStack());
1298 Block* false_block = assembler().NewBlock(assembler().CurrentStack(), true);
1299 GenerateExpressionBranch(stmt->expression, true_block, false_block);
1300
1301 assembler().Bind(false_block);
1302
1305 "Torque assert '" + FormatAssertSource(stmt->source) + "' failed"});
1306
1307 assembler().Bind(true_block);
1308
1309 if (!do_check) {
1310 assembler().Bind(resume_block);
1311 }
1312
1313 return TypeOracle::GetVoidType();
1314}
1315
1317 const Type* type = Visit(stmt->expression).type();
1318 return type->IsNever() ? type : TypeOracle::GetVoidType();
1319}
1320
1322 Callable* current_callable = CurrentCallable::Get();
1323 if (current_callable->signature().return_type->IsNever()) {
1324 std::stringstream s;
1325 s << "cannot return from a function with return type never";
1326 ReportError(s.str());
1327 }
1328 LocalLabel* end =
1329 current_callable->IsMacro() ? LookupLabel(kMacroEndLabelName) : nullptr;
1330 if (current_callable->HasReturnValue()) {
1331 if (!stmt->value) {
1332 std::stringstream s;
1333 s << "return expression needs to be specified for a return type of "
1334 << *current_callable->signature().return_type;
1335 ReportError(s.str());
1336 }
1337 VisitResult expression_result = Visit(*stmt->value);
1338 VisitResult return_result = GenerateImplicitConvert(
1339 current_callable->signature().return_type, expression_result);
1340 if (current_callable->IsMacro()) {
1341 if (return_result.IsOnStack()) {
1342 StackRange return_value_range =
1343 GenerateLabelGoto(end, return_result.stack_range());
1344 SetReturnValue(VisitResult(return_result.type(), return_value_range));
1345 } else {
1347 SetReturnValue(return_result);
1348 }
1349 } else if (current_callable->IsBuiltin()) {
1351 LoweredSlotCount(current_callable->signature().return_type)});
1352 } else {
1353 UNREACHABLE();
1354 }
1355 } else {
1356 if (stmt->value) {
1357 std::stringstream s;
1358 s << "return expression can't be specified for a void or never return "
1359 "type";
1360 ReportError(s.str());
1361 }
1363 }
1364 current_callable->IncrementReturns();
1365 return TypeOracle::GetNeverType();
1366}
1367
1369 size_t parameter_count = expr->label_block->parameters.names.size();
1370 std::vector<VisitResult> parameters;
1371
1372 Block* label_block = nullptr;
1373 Block* done_block = assembler().NewBlock();
1374 VisitResult try_result;
1375
1376 {
1377 CurrentSourcePosition::Scope source_position(expr->label_block->pos);
1378 if (expr->label_block->parameters.has_varargs) {
1379 ReportError("cannot use ... for label parameters");
1380 }
1381 Stack<const Type*> label_input_stack = assembler().CurrentStack();
1382 TypeVector parameter_types;
1383 for (size_t i = 0; i < parameter_count; ++i) {
1384 const Type* type =
1386 parameter_types.push_back(type);
1387 if (type->IsConstexpr()) {
1388 ReportError("no constexpr type allowed for label arguments");
1389 }
1390 StackRange range = label_input_stack.PushMany(LowerType(type));
1391 parameters.push_back(VisitResult(type, range));
1392 }
1393 label_block = assembler().NewBlock(label_input_stack,
1394 IsDeferred(expr->label_block->body));
1395
1396 Binding<LocalLabel> label_binding{
1397 &LabelBindingsManager::Get(), expr->label_block->label,
1398 LocalLabel{label_block, std::move(parameter_types)}};
1399
1400 // Visit try
1401 StackScope stack_scope(this);
1402 try_result = Visit(expr->try_expression);
1403 if (try_result.type() != TypeOracle::GetNeverType()) {
1404 try_result = stack_scope.Yield(try_result);
1405 assembler().Goto(done_block);
1406 }
1407 }
1408
1409 // Visit and output the code for the label block. If the label block falls
1410 // through, then the try must not return a value. Also, if the try doesn't
1411 // fall through, but the label does, then overall the try-label block
1412 // returns type void.
1413 assembler().Bind(label_block);
1414 const Type* label_result;
1415 {
1416 BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
1417 for (size_t i = 0; i < parameter_count; ++i) {
1418 Identifier* name = expr->label_block->parameters.names[i];
1419 parameter_bindings.Add(name,
1421 parameters[i], "parameter " + name->value)});
1422 }
1423
1424 label_result = Visit(expr->label_block->body);
1425 }
1426 if (!try_result.type()->IsVoidOrNever() && label_result->IsVoid()) {
1428 "otherwise clauses cannot fall through in a non-void expression");
1429 }
1430 if (label_result != TypeOracle::GetNeverType()) {
1431 assembler().Goto(done_block);
1432 }
1433 if (label_result->IsVoid() && try_result.type()->IsNever()) {
1434 try_result =
1436 }
1437
1438 if (!try_result.type()->IsNever()) {
1439 assembler().Bind(done_block);
1440 }
1441 return try_result;
1442}
1443
1447
1449 const ClassType* class_type,
1450 const std::vector<NameAndExpression>& initializers) {
1452 for (const NameAndExpression& initializer : initializers) {
1453 result.names.push_back(initializer.name);
1454 Expression* e = initializer.expression;
1455 const Field& field = class_type->LookupField(initializer.name->value);
1456 bool has_index = field.index.has_value();
1457 if (SpreadExpression* s = SpreadExpression::DynamicCast(e)) {
1458 if (!has_index) {
1460 "spread expressions can only be used to initialize indexed class "
1461 "fields ('",
1462 initializer.name->value, "' is not)");
1463 }
1464 e = s->spreadee;
1465 } else if (has_index) {
1466 ReportError("the indexed class field '", initializer.name->value,
1467 "' must be initialized with a spread operator");
1468 }
1469 result.field_value_map[field.name_and_type.name] = Visit(e);
1470 }
1471 return result;
1472}
1473
1475 VisitResult object, const Field& field, const ClassType* class_type,
1476 bool treat_optional_as_indexed) {
1477 if (field.index.has_value()) {
1479 GenerateCall(class_type->GetSliceMacroName(field), {{object}, {}}));
1480 if (field.index->optional && !treat_optional_as_indexed) {
1481 // This field was declared using optional syntax, so any reference to it
1482 // is implicitly a reference to the first item.
1484 slice, {TypeOracle::GetConstInt31Type(), "0"});
1485 } else {
1486 return slice;
1487 }
1488 }
1489 DCHECK(field.offset.has_value());
1490 StackRange result_range = assembler().TopRange(0);
1491 result_range.Extend(GenerateCopy(object).stack_range());
1494 offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
1495 result_range.Extend(offset.stack_range());
1496 const Type* type = TypeOracle::GetReferenceType(field.name_and_type.type,
1497 field.const_qualified);
1498 return LocationReference::HeapReference(VisitResult(type, result_range),
1499 field.synchronization);
1500}
1501
1502// This is used to generate field references during initialization, where we can
1503// reuse the offsets used for computing the allocation size.
1504LocationReference ImplementationVisitor::GenerateFieldReferenceForInit(
1505 VisitResult object, const Field& field,
1506 const LayoutForInitialization& layout) {
1507 StackRange result_range = assembler().TopRange(0);
1508 result_range.Extend(GenerateCopy(object).stack_range());
1509 VisitResult offset = GenerateImplicitConvert(
1510 TypeOracle::GetIntPtrType(), layout.offsets.at(field.name_and_type.name));
1511 result_range.Extend(offset.stack_range());
1512 if (field.index) {
1513 VisitResult length =
1514 GenerateCopy(layout.array_lengths.at(field.name_and_type.name));
1515 result_range.Extend(length.stack_range());
1516 const Type* slice_type =
1517 TypeOracle::GetMutableSliceType(field.name_and_type.type);
1518 return LocationReference::HeapSlice(VisitResult(slice_type, result_range));
1519 } else {
1520 // Const fields are writable during initialization.
1521 VisitResult heap_reference(
1522 TypeOracle::GetMutableReferenceType(field.name_and_type.type),
1523 result_range);
1524 return LocationReference::HeapReference(heap_reference);
1525 }
1526}
1527
1528void ImplementationVisitor::InitializeClass(
1529 const ClassType* class_type, VisitResult allocate_result,
1530 const InitializerResults& initializer_results,
1531 const LayoutForInitialization& layout) {
1532 if (const ClassType* super = class_type->GetSuperClass()) {
1533 InitializeClass(super, allocate_result, initializer_results, layout);
1534 }
1535
1536 for (const Field& f : class_type->fields()) {
1537 // Support optional padding fields.
1538 if (f.name_and_type.type->IsVoid()) continue;
1539 VisitResult initializer_value =
1540 initializer_results.field_value_map.at(f.name_and_type.name);
1541 LocationReference field =
1542 GenerateFieldReferenceForInit(allocate_result, f, layout);
1543 if (f.index) {
1544 DCHECK(field.IsHeapSlice());
1545 VisitResult slice = field.GetVisitResult();
1546 GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
1547 "InitializeFieldsFromIterator"),
1548 {{slice, initializer_value}, {}});
1549 } else {
1550 GenerateAssignToLocation(field, initializer_value);
1551 }
1552 }
1553}
1554
1555VisitResult ImplementationVisitor::GenerateArrayLength(
1556 Expression* array_length, Namespace* nspace,
1557 const std::map<std::string, LocalValue>& bindings) {
1558 StackScope stack_scope(this);
1559 CurrentSourcePosition::Scope pos_scope(array_length->pos);
1560 // Switch to the namespace where the class was declared.
1561 CurrentScope::Scope current_scope_scope(nspace);
1562 // Reset local bindings and install local binding for the preceding fields.
1563 BindingsManagersScope bindings_managers_scope;
1564 BlockBindings<LocalValue> field_bindings(&ValueBindingsManager::Get());
1565 for (auto& p : bindings) {
1566 field_bindings.Add(p.first, LocalValue{p.second}, true);
1567 }
1568 VisitResult length = Visit(array_length);
1569 VisitResult converted_length =
1570 GenerateCall("Convert", Arguments{{length}, {}},
1571 {TypeOracle::GetIntPtrType(), length.type()}, false);
1572 return stack_scope.Yield(converted_length);
1573}
1574
1575VisitResult ImplementationVisitor::GenerateArrayLength(VisitResult object,
1576 const Field& field) {
1577 DCHECK(field.index);
1578
1579 StackScope stack_scope(this);
1580 const ClassType* class_type = *object.type()->ClassSupertype();
1581 std::map<std::string, LocalValue> bindings;
1582 bool before_current = true;
1583 for (const Field& f : class_type->ComputeAllFields()) {
1584 if (field.name_and_type.name == f.name_and_type.name) {
1585 before_current = false;
1586 }
1587 // We can't generate field references eagerly here, because some preceding
1588 // fields might be optional, and attempting to get a reference to an
1589 // optional field can crash the program if the field isn't present.
1590 // Instead, we use the lazy form of LocalValue to only generate field
1591 // references if they are used in the length expression.
1592 bindings.insert(
1593 {f.name_and_type.name,
1594 f.const_qualified
1595 ? (before_current
1596 ? LocalValue{[this, object, f, class_type]() {
1597 return GenerateFieldReference(object, f, class_type);
1598 }}
1599 : LocalValue("Array lengths may only refer to fields "
1600 "defined earlier"))
1601 : LocalValue(
1602 "Non-const fields cannot be used for array lengths.")});
1603 }
1604 return stack_scope.Yield(
1605 GenerateArrayLength(field.index->expr, class_type->nspace(), bindings));
1606}
1607
1608VisitResult ImplementationVisitor::GenerateArrayLength(
1609 const ClassType* class_type, const InitializerResults& initializer_results,
1610 const Field& field) {
1611 DCHECK(field.index);
1612
1613 StackScope stack_scope(this);
1614 std::map<std::string, LocalValue> bindings;
1615 for (const Field& f : class_type->ComputeAllFields()) {
1616 if (f.index) break;
1617 const std::string& fieldname = f.name_and_type.name;
1618 VisitResult value = initializer_results.field_value_map.at(fieldname);
1619 bindings.insert(
1620 {fieldname,
1621 f.const_qualified
1622 ? LocalValue{LocationReference::Temporary(
1623 value, "initial field " + fieldname)}
1624 : LocalValue(
1625 "Non-const fields cannot be used for array lengths.")});
1626 }
1627 return stack_scope.Yield(
1628 GenerateArrayLength(field.index->expr, class_type->nspace(), bindings));
1629}
1630
1631LayoutForInitialization ImplementationVisitor::GenerateLayoutForInitialization(
1632 const ClassType* class_type,
1633 const InitializerResults& initializer_results) {
1636 for (Field f : class_type->ComputeAllFields()) {
1637 if (f.offset.has_value()) {
1638 offset =
1639 VisitResult(TypeOracle::GetConstInt31Type(), ToString(*f.offset));
1640 }
1641 layout.offsets[f.name_and_type.name] = offset;
1642 if (f.index) {
1643 size_t element_size;
1644 std::string element_size_string;
1645 std::tie(element_size, element_size_string) =
1646 *SizeOf(f.name_and_type.type);
1647 VisitResult array_element_size =
1648 VisitResult(TypeOracle::GetConstInt31Type(), element_size_string);
1649 VisitResult array_length =
1650 GenerateArrayLength(class_type, initializer_results, f);
1651 layout.array_lengths[f.name_and_type.name] = array_length;
1652 Arguments arguments;
1653 arguments.parameters = {offset, array_length, array_element_size};
1654 offset = GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
1655 "AddIndexedFieldSizeToObjectSize"),
1656 arguments);
1657 } else {
1658 DCHECK(f.offset.has_value());
1659 }
1660 }
1661 if (class_type->size().SingleValue()) {
1662 layout.size = VisitResult(TypeOracle::GetConstInt31Type(),
1663 ToString(*class_type->size().SingleValue()));
1664 } else {
1665 layout.size = offset;
1666 }
1667 if ((size_t{1} << class_type->size().AlignmentLog2()) <
1668 TargetArchitecture::TaggedSize()) {
1669 Arguments arguments;
1670 arguments.parameters = {layout.size};
1671 layout.size = GenerateCall(
1672 QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "AlignTagged"),
1673 arguments);
1674 }
1675 return layout;
1676}
1677
1678VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
1679 StackScope stack_scope(this);
1680 const Type* type = TypeVisitor::ComputeType(expr->type);
1681 const ClassType* class_type = ClassType::DynamicCast(type);
1682 if (class_type == nullptr) {
1683 ReportError("type for new expression must be a class, \"", *type,
1684 "\" is not");
1685 }
1686
1687 if (!class_type->AllowInstantiation()) {
1688 // Classes that are only used for testing should never be instantiated.
1689 ReportError(*class_type,
1690 " cannot be allocated with new (it's used for testing)");
1691 }
1692
1693 InitializerResults initializer_results =
1694 VisitInitializerResults(class_type, expr->initializers);
1695
1696 const Field& map_field = class_type->LookupField("map");
1697 if (*map_field.offset != 0) {
1698 ReportError("class initializers must have a map as first parameter");
1699 }
1700 const std::map<std::string, VisitResult>& initializer_fields =
1701 initializer_results.field_value_map;
1702 auto it_object_map = initializer_fields.find(map_field.name_and_type.name);
1703 VisitResult object_map;
1704 if (class_type->IsExtern()) {
1705 if (it_object_map == initializer_fields.end()) {
1706 ReportError("Constructor for ", class_type->name(),
1707 " needs Map argument!");
1708 }
1709 object_map = it_object_map->second;
1710 } else {
1711 if (it_object_map != initializer_fields.end()) {
1713 "Constructor for ", class_type->name(),
1714 " must not specify Map argument; it is automatically inserted.");
1715 }
1716 Arguments get_struct_map_arguments;
1717 get_struct_map_arguments.parameters.push_back(
1718 VisitResult(TypeOracle::GetConstexprInstanceTypeType(),
1719 CapifyStringWithUnderscores(class_type->name()) + "_TYPE"));
1720 object_map = GenerateCall(
1721 QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "GetInstanceTypeMap"),
1722 get_struct_map_arguments, {}, false);
1723 CurrentSourcePosition::Scope current_pos(expr->pos);
1724 initializer_results.names.insert(initializer_results.names.begin(),
1725 MakeNode<Identifier>("map"));
1726 initializer_results.field_value_map[map_field.name_and_type.name] =
1727 object_map;
1728 }
1729
1730 CheckInitializersWellformed(class_type->name(),
1731 class_type->ComputeAllFields(),
1732 expr->initializers, !class_type->IsExtern());
1733
1735 GenerateLayoutForInitialization(class_type, initializer_results);
1736
1737 Arguments allocate_arguments;
1738 allocate_arguments.parameters.push_back(layout.size);
1739 allocate_arguments.parameters.push_back(object_map);
1740 allocate_arguments.parameters.push_back(
1741 GenerateBoolConstant(expr->pretenured));
1742 allocate_arguments.parameters.push_back(
1743 GenerateBoolConstant(expr->clear_padding));
1744 VisitResult allocate_result = GenerateCall(
1745 QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "AllocateFromNew"),
1746 allocate_arguments, {class_type}, false);
1747 DCHECK(allocate_result.IsOnStack());
1748
1749 InitializeClass(class_type, allocate_result, initializer_results, layout);
1750
1751 return stack_scope.Yield(GenerateCall(
1752 "%RawDownCast", Arguments{{allocate_result}, {}}, {class_type}));
1753}
1754
1755const Type* ImplementationVisitor::Visit(BreakStatement* stmt) {
1756 std::optional<Binding<LocalLabel>*> break_label =
1757 TryLookupLabel(kBreakLabelName);
1758 if (!break_label) {
1759 ReportError("break used outside of loop");
1760 }
1761 assembler().Goto((*break_label)->block);
1762 return TypeOracle::GetNeverType();
1763}
1764
1765const Type* ImplementationVisitor::Visit(ContinueStatement* stmt) {
1766 std::optional<Binding<LocalLabel>*> continue_label =
1767 TryLookupLabel(kContinueLabelName);
1768 if (!continue_label) {
1769 ReportError("continue used outside of loop");
1770 }
1771 assembler().Goto((*continue_label)->block);
1772 return TypeOracle::GetNeverType();
1773}
1774
1775const Type* ImplementationVisitor::Visit(ForLoopStatement* stmt) {
1776 BlockBindings<LocalValue> loop_bindings(&ValueBindingsManager::Get());
1777
1778 if (stmt->var_declaration) Visit(*stmt->var_declaration, &loop_bindings);
1779
1780 Block* body_block = assembler().NewBlock(assembler().CurrentStack());
1781 Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
1782
1783 Block* header_block = assembler().NewBlock();
1784 assembler().Goto(header_block);
1785 assembler().Bind(header_block);
1786
1787 // The continue label is where "continue" statements jump to. If no action
1788 // expression is provided, we jump directly to the header.
1789 Block* continue_block = header_block;
1790
1791 // The action label is only needed when an action expression was provided.
1792 Block* action_block = nullptr;
1793 if (stmt->action) {
1794 action_block = assembler().NewBlock();
1795
1796 // The action expression needs to be executed on a continue.
1797 continue_block = action_block;
1798 }
1799
1800 if (stmt->test) {
1801 GenerateExpressionBranch(*stmt->test, body_block, exit_block);
1802 } else {
1803 assembler().Goto(body_block);
1804 }
1805
1806 assembler().Bind(body_block);
1807 {
1808 BreakContinueActivator activator(exit_block, continue_block);
1809 const Type* body_result = Visit(stmt->body);
1810 if (body_result != TypeOracle::GetNeverType()) {
1811 assembler().Goto(continue_block);
1812 }
1813 }
1814
1815 if (stmt->action) {
1816 assembler().Bind(action_block);
1817 const Type* action_result = Visit(*stmt->action);
1818 if (action_result != TypeOracle::GetNeverType()) {
1819 assembler().Goto(header_block);
1820 }
1821 }
1822
1823 assembler().Bind(exit_block);
1824 return TypeOracle::GetVoidType();
1825}
1826
1827VisitResult ImplementationVisitor::Visit(SpreadExpression* expr) {
1829 "spread operators are only currently supported in indexed class field "
1830 "initialization expressions");
1831}
1832
1833void ImplementationVisitor::GenerateImplementation(const std::string& dir) {
1834 for (SourceId file : SourceFileMap::AllSources()) {
1835 std::string base_filename =
1836 dir + "/" + SourceFileMap::PathFromV8RootWithoutExtension(file);
1838 GlobalContext::GeneratedPerFile(file);
1839
1840 std::string csa_cc = streams.csa_ccfile.str();
1841 // Insert missing builtin includes where the marker is.
1842 {
1843 auto pos = csa_cc.find(BuiltinIncludesMarker);
1844 CHECK_NE(pos, std::string::npos);
1845 std::string includes;
1846 for (const SourceId& include : streams.required_builtin_includes) {
1847 std::string include_file =
1848 SourceFileMap::PathFromV8RootWithoutExtension(include);
1849 includes += "#include \"torque-generated/";
1850 includes += include_file;
1851 includes += "-tq-csa.h\"\n";
1852 }
1853 csa_cc.replace(pos, strlen(BuiltinIncludesMarker), std::move(includes));
1854 }
1855
1856 // TODO(torque-builder): Pass file directly.
1857 WriteFile(base_filename + "-tq-csa.cc", std::move(csa_cc));
1858 WriteFile(base_filename + "-tq-csa.h", streams.csa_headerfile.str());
1859 WriteFile(base_filename + "-tq.inc",
1860 streams.class_definition_headerfile.str());
1861 WriteFile(
1862 base_filename + "-tq-inl.inc",
1866 WriteFile(base_filename + "-tq.cc", streams.class_definition_ccfile.str());
1867 }
1868
1869 WriteFile(dir + "/debug-macros.h", debug_macros_h_.str());
1870 WriteFile(dir + "/debug-macros.cc", debug_macros_cc_.str());
1871}
1872
1873cpp::Function ImplementationVisitor::GenerateMacroFunctionDeclaration(
1874 Macro* macro) {
1875 return GenerateFunction(nullptr,
1876 output_type_ == OutputType::kCC
1877 ? macro->CCName()
1878 : output_type_ == OutputType::kCCDebug
1879 ? macro->CCDebugName()
1880 : macro->ExternalName(),
1881 macro->signature(), macro->parameter_names());
1882}
1883
1884cpp::Function ImplementationVisitor::GenerateFunction(
1885 cpp::Class* owner, const std::string& name, const Signature& signature,
1886 const NameVector& parameter_names, bool pass_code_assembler_state,
1887 std::vector<std::string>* generated_parameter_names) {
1888 cpp::Function f(owner, name);
1889 f.SetInline(output_type_ == OutputType::kCC);
1890
1891 // Set return type.
1892 // TODO(torque-builder): Consider an overload of SetReturnType that handles
1893 // this.
1894 if (signature.return_type->IsVoidOrNever()) {
1895 f.SetReturnType("void");
1896 } else if (output_type_ == OutputType::kCCDebug) {
1897 f.SetReturnType(std::string("Value<") +
1898 signature.return_type->GetDebugType() + ">");
1899 } else if (output_type_ == OutputType::kCC) {
1900 f.SetReturnType(signature.return_type->GetRuntimeType());
1901 } else {
1902 DCHECK_EQ(output_type_, OutputType::kCSA);
1903 f.SetReturnType(signature.return_type->IsConstexpr()
1904 ? signature.return_type->TagglifiedCppTypeName()
1905 : signature.return_type->GetGeneratedTypeName());
1906 }
1907
1908 bool ignore_first_parameter = true;
1909 if (output_type_ == OutputType::kCCDebug) {
1910 f.AddParameter("d::MemoryAccessor", "accessor");
1911 } else if (output_type_ == OutputType::kCSA && pass_code_assembler_state) {
1912 f.AddParameter("compiler::CodeAssemblerState*", "state_");
1913 } else {
1914 ignore_first_parameter = false;
1915 }
1916
1917 // TODO(torque-builder): Consider an overload for AddParameter that handles
1918 // this.
1919 DCHECK_GE(signature.types().size(), parameter_names.size());
1920 for (std::size_t i = 0; i < signature.types().size(); ++i) {
1921 const Type* parameter_type = signature.types()[i];
1922 std::string type;
1923 if (output_type_ == OutputType::kCC) {
1924 type = parameter_type->GetRuntimeType();
1925 } else if (output_type_ == OutputType::kCCDebug) {
1926 type = parameter_type->GetDebugType();
1927 } else {
1928 DCHECK_EQ(output_type_, OutputType::kCSA);
1929 if (parameter_type->IsConstexpr()) {
1930 type = parameter_type->TagglifiedCppTypeName();
1931 } else {
1932 type = parameter_type->GetGeneratedTypeName();
1933 }
1934 }
1935 f.AddParameter(std::move(type),
1936 ExternalParameterName(i < parameter_names.size()
1937 ? parameter_names[i]->value
1938 : std::to_string(i)));
1939 }
1940
1941 for (const LabelDeclaration& label_info : signature.labels) {
1942 if (output_type_ == OutputType::kCC ||
1943 output_type_ == OutputType::kCCDebug) {
1944 ReportError("Macros that generate runtime code can't have label exits");
1945 }
1946 f.AddParameter("compiler::CodeAssemblerLabel*",
1947 ExternalLabelName(label_info.name->value));
1948 size_t i = 0;
1949 for (const Type* type : label_info.types) {
1950 std::string generated_type_name;
1951 if (type->StructSupertype()) {
1952 generated_type_name = "\n#error no structs allowed in labels\n";
1953 } else {
1954 generated_type_name = "compiler::TypedCodeAssemblerVariable<";
1955 generated_type_name += type->GetGeneratedTNodeTypeName();
1956 generated_type_name += ">*";
1957 }
1958 f.AddParameter(generated_type_name,
1959 ExternalLabelParameterName(label_info.name->value, i));
1960 ++i;
1961 }
1962 }
1963
1964 if (generated_parameter_names) {
1965 *generated_parameter_names = f.GetParameterNames();
1966 if (ignore_first_parameter) {
1967 DCHECK(!generated_parameter_names->empty());
1968 generated_parameter_names->erase(generated_parameter_names->begin());
1969 }
1970 }
1971 return f;
1972}
1973
1974namespace {
1975
1976void FailCallableLookup(
1977 const std::string& reason, const QualifiedName& name,
1978 const TypeVector& parameter_types,
1979 const std::vector<Binding<LocalLabel>*>& labels,
1980 const std::vector<Signature>& candidates,
1981 const std::vector<std::pair<GenericCallable*, std::string>>
1982 inapplicable_generics) {
1983 std::stringstream stream;
1984 stream << "\n" << reason << ": \n " << name << "(" << parameter_types << ")";
1985 if (!labels.empty()) {
1986 stream << " labels ";
1987 for (size_t i = 0; i < labels.size(); ++i) {
1988 stream << labels[i]->name() << "(" << labels[i]->parameter_types << ")";
1989 }
1990 }
1991 stream << "\ncandidates are:";
1992 for (const Signature& signature : candidates) {
1993 stream << "\n " << name;
1994 PrintSignature(stream, signature, false);
1995 }
1996 if (!inapplicable_generics.empty()) {
1997 stream << "\nfailed to instantiate all of these generic declarations:";
1998 for (auto& failure : inapplicable_generics) {
1999 GenericCallable* generic = failure.first;
2000 const std::string& fail_reason = failure.second;
2001 stream << "\n " << generic->name() << " defined at "
2002 << PositionAsString(generic->Position()) << ":\n "
2003 << fail_reason << "\n";
2004 }
2005 }
2006 ReportError(stream.str());
2007}
2008
2009Callable* GetOrCreateSpecialization(
2010 const SpecializationKey<GenericCallable>& key) {
2011 if (std::optional<Callable*> specialization =
2012 key.generic->GetSpecialization(key.specialized_types)) {
2013 return *specialization;
2014 }
2015 return DeclarationVisitor::SpecializeImplicit(key);
2016}
2017
2018} // namespace
2019
2020std::optional<Binding<LocalValue>*> ImplementationVisitor::TryLookupLocalValue(
2021 const std::string& name) {
2022 return ValueBindingsManager::Get().TryLookup(name);
2023}
2024
2025std::optional<Binding<LocalLabel>*> ImplementationVisitor::TryLookupLabel(
2026 const std::string& name) {
2027 return LabelBindingsManager::Get().TryLookup(name);
2028}
2029
2030Binding<LocalLabel>* ImplementationVisitor::LookupLabel(
2031 const std::string& name) {
2032 std::optional<Binding<LocalLabel>*> label = TryLookupLabel(name);
2033 if (!label) ReportError("cannot find label ", name);
2034 return *label;
2035}
2036
2037Block* ImplementationVisitor::LookupSimpleLabel(const std::string& name) {
2038 LocalLabel* label = LookupLabel(name);
2039 if (!label->parameter_types.empty()) {
2040 ReportError("label ", name,
2041 "was expected to have no parameters, but has parameters (",
2042 label->parameter_types, ")");
2043 }
2044 return label->block;
2045}
2046
2047// Try to lookup a callable with the provided argument types. Do not report
2048// an error if no matching callable was found, but return false instead.
2049// This is used to test the presence of overloaded field accessors.
2050bool ImplementationVisitor::TestLookupCallable(
2051 const QualifiedName& name, const TypeVector& parameter_types) {
2052 return LookupCallable(name, Declarations::TryLookup(name), parameter_types,
2053 {}, {}, true) != nullptr;
2054}
2055
2056TypeArgumentInference ImplementationVisitor::InferSpecializationTypes(
2057 GenericCallable* generic, const TypeVector& explicit_specialization_types,
2058 const TypeVector& explicit_arguments) {
2059 std::vector<std::optional<const Type*>> all_arguments;
2060 const ParameterList& parameters = generic->declaration()->parameters;
2061 for (size_t i = 0; i < parameters.implicit_count; ++i) {
2062 std::optional<Binding<LocalValue>*> val =
2063 TryLookupLocalValue(parameters.names[i]->value);
2064 all_arguments.push_back(
2065 val ? (*val)->GetLocationReference(*val).ReferencedType()
2066 : std::nullopt);
2067 }
2068 for (const Type* explicit_argument : explicit_arguments) {
2069 all_arguments.push_back(explicit_argument);
2070 }
2071 return generic->InferSpecializationTypes(explicit_specialization_types,
2072 all_arguments);
2073}
2074
2075template <class Container>
2076Callable* ImplementationVisitor::LookupCallable(
2077 const QualifiedName& name, const Container& declaration_container,
2078 const TypeVector& parameter_types,
2079 const std::vector<Binding<LocalLabel>*>& labels,
2080 const TypeVector& specialization_types, bool silence_errors) {
2081 Callable* result = nullptr;
2082
2083 std::vector<Declarable*> overloads;
2084 std::vector<Signature> overload_signatures;
2085 std::vector<std::pair<GenericCallable*, std::string>> inapplicable_generics;
2086 for (auto* declarable : declaration_container) {
2087 if (GenericCallable* generic = GenericCallable::DynamicCast(declarable)) {
2088 TypeArgumentInference inference = InferSpecializationTypes(
2089 generic, specialization_types, parameter_types);
2090 if (inference.HasFailed()) {
2091 inapplicable_generics.push_back(
2092 std::make_pair(generic, inference.GetFailureReason()));
2093 continue;
2094 }
2095 overloads.push_back(generic);
2096 overload_signatures.push_back(
2097 DeclarationVisitor::MakeSpecializedSignature(
2099 inference.GetResult()}));
2100 } else if (Callable* callable = Callable::DynamicCast(declarable)) {
2101 overloads.push_back(callable);
2102 overload_signatures.push_back(callable->signature());
2103 }
2104 }
2105 // Indices of candidates in overloads/overload_signatures.
2106 std::vector<size_t> candidates;
2107 for (size_t i = 0; i < overloads.size(); ++i) {
2108 const Signature& signature = overload_signatures[i];
2109 if (IsCompatibleSignature(signature, parameter_types, labels.size())) {
2110 candidates.push_back(i);
2111 }
2112 }
2113
2114 if (overloads.empty() && inapplicable_generics.empty()) {
2115 if (silence_errors) return nullptr;
2116 std::stringstream stream;
2117 stream << "no matching declaration found for " << name;
2118 ReportError(stream.str());
2119 } else if (candidates.empty()) {
2120 if (silence_errors) return nullptr;
2121 FailCallableLookup("cannot find suitable callable with name", name,
2122 parameter_types, labels, overload_signatures,
2123 inapplicable_generics);
2124 }
2125
2126 auto is_better_candidate = [&](size_t a, size_t b) {
2127 return ParameterDifference(overload_signatures[a].GetExplicitTypes(),
2128 parameter_types)
2130 overload_signatures[b].GetExplicitTypes(), parameter_types));
2131 };
2132
2133 size_t best = *std::min_element(candidates.begin(), candidates.end(),
2134 is_better_candidate);
2135 // This check is contained in libstdc++'s std::min_element.
2136 DCHECK(!is_better_candidate(best, best));
2137 for (size_t candidate : candidates) {
2138 if (candidate != best && !is_better_candidate(best, candidate)) {
2139 std::vector<Signature> candidate_signatures;
2140 candidate_signatures.reserve(candidates.size());
2141 for (size_t i : candidates) {
2142 candidate_signatures.push_back(overload_signatures[i]);
2143 }
2144 FailCallableLookup("ambiguous callable ", name, parameter_types, labels,
2145 candidate_signatures, inapplicable_generics);
2146 }
2147 }
2148
2149 if (GenericCallable* generic =
2150 GenericCallable::DynamicCast(overloads[best])) {
2151 TypeArgumentInference inference = InferSpecializationTypes(
2152 generic, specialization_types, parameter_types);
2153 result = GetOrCreateSpecialization(
2154 SpecializationKey<GenericCallable>{generic, inference.GetResult()});
2155 } else {
2156 result = Callable::cast(overloads[best]);
2157 }
2158
2159 size_t caller_size = parameter_types.size();
2160 size_t callee_size =
2161 result->signature().types().size() - result->signature().implicit_count;
2162 if (caller_size != callee_size &&
2163 !result->signature().parameter_types.var_args) {
2164 std::stringstream stream;
2165 stream << "parameter count mismatch calling " << *result << " - expected "
2166 << std::to_string(callee_size) << ", found "
2167 << std::to_string(caller_size);
2168 ReportError(stream.str());
2169 }
2170
2171 return result;
2172}
2173
2174template <class Container>
2175Callable* ImplementationVisitor::LookupCallable(
2176 const QualifiedName& name, const Container& declaration_container,
2177 const Arguments& arguments, const TypeVector& specialization_types) {
2178 return LookupCallable(name, declaration_container,
2179 arguments.parameters.ComputeTypeVector(),
2180 arguments.labels, specialization_types);
2181}
2182
2183Method* ImplementationVisitor::LookupMethod(
2184 const std::string& name, const AggregateType* receiver_type,
2185 const Arguments& arguments, const TypeVector& specialization_types) {
2186 TypeVector types(arguments.parameters.ComputeTypeVector());
2187 types.insert(types.begin(), receiver_type);
2188 return Method::cast(LookupCallable({{}, name}, receiver_type->Methods(name),
2189 types, arguments.labels,
2190 specialization_types));
2191}
2192
2193const Type* ImplementationVisitor::GetCommonType(const Type* left,
2194 const Type* right) {
2195 const Type* common_type;
2196 if (IsAssignableFrom(left, right)) {
2197 common_type = left;
2198 } else if (IsAssignableFrom(right, left)) {
2199 common_type = right;
2200 } else {
2201 common_type = TypeOracle::GetUnionType(left, right);
2202 }
2203 common_type = common_type->NonConstexprVersion();
2204 return common_type;
2205}
2206
2207VisitResult ImplementationVisitor::GenerateCopy(const VisitResult& to_copy) {
2208 if (to_copy.IsOnStack()) {
2209 return VisitResult(to_copy.type(),
2210 assembler().Peek(to_copy.stack_range(), to_copy.type()));
2211 }
2212 return to_copy;
2213}
2214
2215VisitResult ImplementationVisitor::Visit(StructExpression* expr) {
2216 StackScope stack_scope(this);
2217
2218 auto& initializers = expr->initializers;
2219 std::vector<VisitResult> values;
2220 std::vector<const Type*> term_argument_types;
2221 values.reserve(initializers.size());
2222 term_argument_types.reserve(initializers.size());
2223
2224 // Compute values and types of all initializer arguments
2225 for (const NameAndExpression& initializer : initializers) {
2226 VisitResult value = Visit(initializer.expression);
2227 values.push_back(value);
2228 term_argument_types.push_back(value.type());
2229 }
2230
2231 // Compute and check struct type from given struct name and argument types
2232 const Type* type = TypeVisitor::ComputeTypeForStructExpression(
2233 expr->type, term_argument_types);
2234 if (const auto* struct_type = StructType::DynamicCast(type)) {
2235 CheckInitializersWellformed(struct_type->name(), struct_type->fields(),
2236 initializers);
2237
2238 // Implicitly convert values and thereby build the struct on the stack
2239 StackRange struct_range = assembler().TopRange(0);
2240 auto& fields = struct_type->fields();
2241 for (size_t i = 0; i < values.size(); i++) {
2242 values[i] =
2243 GenerateImplicitConvert(fields[i].name_and_type.type, values[i]);
2244 struct_range.Extend(values[i].stack_range());
2245 }
2246
2247 return stack_scope.Yield(VisitResult(struct_type, struct_range));
2248 } else {
2249 const auto* bitfield_struct_type = BitFieldStructType::cast(type);
2250 CheckInitializersWellformed(bitfield_struct_type->name(),
2251 bitfield_struct_type->fields(), initializers);
2252
2253 // Create a zero and cast it to the desired bitfield struct type.
2254 VisitResult result{TypeOracle::GetConstInt32Type(), "0"};
2255 result = GenerateImplicitConvert(TypeOracle::GetInt32Type(), result);
2256 result = GenerateCall("Unsigned", Arguments{{result}, {}}, {});
2257 result = GenerateCall("%RawDownCast", Arguments{{result}, {}},
2258 {bitfield_struct_type});
2259
2260 // Set each field in the result. If these fields are constexpr, then all of
2261 // this initialization will end up reduced to a single value during TurboFan
2262 // optimization.
2263 auto& fields = bitfield_struct_type->fields();
2264 for (size_t i = 0; i < values.size(); i++) {
2265 values[i] =
2266 GenerateImplicitConvert(fields[i].name_and_type.type, values[i]);
2267 result = GenerateSetBitField(bitfield_struct_type, fields[i], result,
2268 values[i], /*starts_as_zero=*/true);
2269 }
2270
2271 return stack_scope.Yield(result);
2272 }
2273}
2274
2275VisitResult ImplementationVisitor::GenerateSetBitField(
2276 const Type* bitfield_struct_type, const BitField& bitfield,
2277 VisitResult bitfield_struct, VisitResult value, bool starts_as_zero) {
2278 GenerateCopy(bitfield_struct);
2279 GenerateCopy(value);
2280 assembler().Emit(
2281 StoreBitFieldInstruction{bitfield_struct_type, bitfield, starts_as_zero});
2282 return VisitResult(bitfield_struct_type, assembler().TopRange(1));
2283}
2284
2285LocationReference ImplementationVisitor::GetLocationReference(
2286 Expression* location) {
2287 switch (location->kind) {
2288 case AstNode::Kind::kIdentifierExpression:
2289 return GetLocationReference(static_cast<IdentifierExpression*>(location));
2290 case AstNode::Kind::kFieldAccessExpression:
2291 return GetLocationReference(
2292 static_cast<FieldAccessExpression*>(location));
2293 case AstNode::Kind::kElementAccessExpression:
2294 return GetLocationReference(
2295 static_cast<ElementAccessExpression*>(location));
2296 case AstNode::Kind::kDereferenceExpression:
2297 return GetLocationReference(
2298 static_cast<DereferenceExpression*>(location));
2299 default:
2300 return LocationReference::Temporary(Visit(location), "expression");
2301 }
2302}
2303
2304LocationReference ImplementationVisitor::GetLocationReference(
2305 FieldAccessExpression* expr) {
2306 return GenerateFieldAccess(GetLocationReference(expr->object),
2307 expr->field->value, false, expr->field->pos);
2308}
2309
2310LocationReference ImplementationVisitor::GenerateFieldAccess(
2311 LocationReference reference, const std::string& fieldname,
2312 bool ignore_stuct_field_constness, std::optional<SourcePosition> pos) {
2313 if (reference.IsVariableAccess() &&
2314 reference.variable().type()->StructSupertype()) {
2315 const StructType* type = *reference.variable().type()->StructSupertype();
2316 const Field& field = type->LookupField(fieldname);
2317 if (GlobalContext::collect_language_server_data() && pos.has_value()) {
2318 LanguageServerData::AddDefinition(*pos, field.pos);
2319 }
2320 if (GlobalContext::collect_kythe_data() && pos.has_value()) {
2321 KytheData::AddClassFieldUse(*pos, &field);
2322 }
2323 if (field.const_qualified) {
2324 VisitResult t_value = ProjectStructField(reference.variable(), fieldname);
2325 return LocationReference::Temporary(
2326 t_value, "for constant field '" + field.name_and_type.name + "'");
2327 } else {
2328 return LocationReference::VariableAccess(
2329 ProjectStructField(reference.variable(), fieldname));
2330 }
2331 }
2332 if (reference.IsTemporary() &&
2333 reference.temporary().type()->StructSupertype()) {
2334 if (GlobalContext::collect_language_server_data() && pos.has_value()) {
2335 const StructType* type = *reference.temporary().type()->StructSupertype();
2336 const Field& field = type->LookupField(fieldname);
2337 LanguageServerData::AddDefinition(*pos, field.pos);
2338 }
2339 return LocationReference::Temporary(
2340 ProjectStructField(reference.temporary(), fieldname),
2341 reference.temporary_description());
2342 }
2343 if (std::optional<const Type*> referenced_type = reference.ReferencedType()) {
2344 if ((*referenced_type)->IsBitFieldStructType()) {
2345 const BitFieldStructType* bitfield_struct =
2346 BitFieldStructType::cast(*referenced_type);
2347 const BitField& field = bitfield_struct->LookupField(fieldname);
2348 return LocationReference::BitFieldAccess(reference, field);
2349 }
2350 if (const auto type_wrapped_in_smi = Type::MatchUnaryGeneric(
2351 (*referenced_type), TypeOracle::GetSmiTaggedGeneric())) {
2352 const BitFieldStructType* bitfield_struct =
2353 BitFieldStructType::DynamicCast(*type_wrapped_in_smi);
2354 if (bitfield_struct == nullptr) {
2356 "When a value of type SmiTagged<T> is used in a field access "
2357 "expression, T is expected to be a bitfield struct type. Instead, "
2358 "T "
2359 "is ",
2360 **type_wrapped_in_smi);
2361 }
2362 const BitField& field = bitfield_struct->LookupField(fieldname);
2363 return LocationReference::BitFieldAccess(reference, field);
2364 }
2365 }
2366 if (reference.IsHeapReference()) {
2367 VisitResult ref = reference.heap_reference();
2368 bool is_const;
2369 auto generic_type =
2370 TypeOracle::MatchReferenceGeneric(ref.type(), &is_const);
2371 if (!generic_type) {
2373 "Left-hand side of field access expression is marked as a reference "
2374 "but is not of type Reference<...>. Found type: ",
2375 ref.type()->ToString());
2376 }
2377 if (auto struct_type = (*generic_type)->StructSupertype()) {
2378 const Field& field = (*struct_type)->LookupField(fieldname);
2379 // Update the Reference's type to refer to the field type within the
2380 // struct.
2381 ref.SetType(TypeOracle::GetReferenceType(
2382 field.name_and_type.type,
2383 is_const ||
2384 (field.const_qualified && !ignore_stuct_field_constness)));
2385 if (!field.offset.has_value()) {
2386 Error("accessing field with unknown offset").Throw();
2387 }
2388 if (*field.offset != 0) {
2389 // Copy the Reference struct up the stack and update the new copy's
2390 // |offset| value to point to the struct field.
2391 StackScope scope(this);
2392 ref = GenerateCopy(ref);
2393 VisitResult ref_offset = ProjectStructField(ref, "offset");
2394 VisitResult struct_offset{
2395 TypeOracle::GetIntPtrType()->ConstexprVersion(),
2396 std::to_string(*field.offset)};
2397 VisitResult updated_offset =
2398 GenerateCall("+", Arguments{{ref_offset, struct_offset}, {}});
2399 assembler().Poke(ref_offset.stack_range(), updated_offset.stack_range(),
2400 ref_offset.type());
2401 ref = scope.Yield(ref);
2402 }
2403 return LocationReference::HeapReference(ref);
2404 }
2405 }
2406 VisitResult object_result = GenerateFetchFromLocation(reference);
2407 if (std::optional<const ClassType*> class_type =
2408 object_result.type()->ClassSupertype()) {
2409 // This is a hack to distinguish the situation where we want to use
2410 // overloaded field accessors from when we want to create a reference.
2411 bool has_explicit_overloads = TestLookupCallable(
2412 QualifiedName{"." + fieldname}, {object_result.type()});
2413 if ((*class_type)->HasField(fieldname) && !has_explicit_overloads) {
2414 const Field& field = (*class_type)->LookupField(fieldname);
2415 if (GlobalContext::collect_language_server_data() && pos.has_value()) {
2416 LanguageServerData::AddDefinition(*pos, field.pos);
2417 }
2418 if (GlobalContext::collect_kythe_data()) {
2419 KytheData::AddClassFieldUse(*pos, &field);
2420 }
2421 return GenerateFieldReference(object_result, field, *class_type);
2422 }
2423 }
2424 return LocationReference::FieldAccess(object_result, fieldname);
2425}
2426
2427LocationReference ImplementationVisitor::GetLocationReference(
2429 LocationReference reference = GetLocationReference(expr->array);
2430 VisitResult index = Visit(expr->index);
2431 if (reference.IsHeapSlice()) {
2432 return GenerateReferenceToItemInHeapSlice(reference, index);
2433 } else {
2434 return LocationReference::ArrayAccess(GenerateFetchFromLocation(reference),
2435 index);
2436 }
2437}
2438
2439LocationReference ImplementationVisitor::GenerateReferenceToItemInHeapSlice(
2440 LocationReference slice, VisitResult index) {
2441 DCHECK(slice.IsHeapSlice());
2442 Arguments arguments{{index}, {}};
2443 const StructType* slice_type = *slice.heap_slice().type()->StructSupertype();
2444 Method* method = LookupMethod("AtIndex", slice_type, arguments, {});
2445 // The reference has to be treated like a normal value when calling methods
2446 // on the underlying slice implementation.
2447 LocationReference slice_value =
2448 LocationReference::Temporary(slice.GetVisitResult(), "slice as value");
2449 return LocationReference::HeapReference(
2450 GenerateCall(method, std::move(slice_value), arguments, {}, false));
2451}
2452
2453LocationReference ImplementationVisitor::GetLocationReference(
2454 IdentifierExpression* expr) {
2455 if (expr->namespace_qualification.empty()) {
2456 if (std::optional<Binding<LocalValue>*> value =
2457 TryLookupLocalValue(expr->name->value)) {
2458 if (GlobalContext::collect_language_server_data()) {
2459 LanguageServerData::AddDefinition(expr->name->pos,
2460 (*value)->declaration_position());
2461 }
2462 if (GlobalContext::collect_kythe_data()) {
2463 if (!expr->IsThis()) {
2464 DCHECK_EQ(expr->name->pos.end.column - expr->name->pos.start.column,
2465 expr->name->value.length());
2466 KytheData::AddBindingUse(expr->name->pos, *value);
2467 }
2468 }
2469 if (!expr->generic_arguments.empty()) {
2470 ReportError("cannot have generic parameters on local name ",
2471 expr->name);
2472 }
2473 return (*value)->GetLocationReference(*value);
2474 }
2475 }
2476
2477 if (expr->IsThis()) {
2478 ReportError("\"this\" cannot be qualified");
2479 }
2480 QualifiedName name =
2482 if (std::optional<Builtin*> builtin = Declarations::TryLookupBuiltin(name)) {
2483 if (GlobalContext::collect_language_server_data()) {
2484 LanguageServerData::AddDefinition(expr->name->pos,
2485 (*builtin)->Position());
2486 }
2487 // TODO(v8:12261): Consider collecting KytheData here.
2488 return LocationReference::Temporary(GetBuiltinCode(*builtin),
2489 "builtin " + expr->name->value);
2490 }
2491 if (!expr->generic_arguments.empty()) {
2492 GenericCallable* generic = Declarations::LookupUniqueGeneric(name);
2493 Callable* specialization =
2494 GetOrCreateSpecialization(SpecializationKey<GenericCallable>{
2495 generic, TypeVisitor::ComputeTypeVector(expr->generic_arguments)});
2496 if (Builtin* builtin = Builtin::DynamicCast(specialization)) {
2497 DCHECK(!builtin->IsExternal());
2498 return LocationReference::Temporary(GetBuiltinCode(builtin),
2499 "builtin " + expr->name->value);
2500 } else {
2501 ReportError("cannot create function pointer for non-builtin ",
2502 generic->name());
2503 }
2504 }
2505 Value* value = Declarations::LookupValue(name);
2506 CHECK(value->Position().source.IsValid());
2507 if (auto stream = CurrentFileStreams::Get()) {
2508 stream->required_builtin_includes.insert(value->Position().source);
2509 }
2510 if (GlobalContext::collect_language_server_data()) {
2511 LanguageServerData::AddDefinition(expr->name->pos, value->name()->pos);
2512 }
2513 if (auto* constant = NamespaceConstant::DynamicCast(value)) {
2514 if (GlobalContext::collect_kythe_data()) {
2515 KytheData::AddConstantUse(expr->name->pos, constant);
2516 }
2517 if (constant->type()->IsConstexpr()) {
2518 return LocationReference::Temporary(
2519 VisitResult(constant->type(), constant->external_name() + "(state_)"),
2520 "namespace constant " + expr->name->value);
2521 }
2522 assembler().Emit(NamespaceConstantInstruction{constant});
2523 StackRange stack_range =
2524 assembler().TopRange(LoweredSlotCount(constant->type()));
2525 return LocationReference::Temporary(
2526 VisitResult(constant->type(), stack_range),
2527 "namespace constant " + expr->name->value);
2528 }
2529 ExternConstant* constant = ExternConstant::cast(value);
2530 if (GlobalContext::collect_kythe_data()) {
2531 KytheData::AddConstantUse(expr->name->pos, constant);
2532 }
2533 return LocationReference::Temporary(constant->value(),
2534 "extern value " + expr->name->value);
2535}
2536
2537LocationReference ImplementationVisitor::GetLocationReference(
2538 DereferenceExpression* expr) {
2539 VisitResult ref = Visit(expr->reference);
2540 if (!TypeOracle::MatchReferenceGeneric(ref.type())) {
2541 Error("Operator * expects a reference type but found a value of type ",
2542 *ref.type())
2543 .Throw();
2544 }
2545 return LocationReference::HeapReference(ref);
2546}
2547
2548VisitResult ImplementationVisitor::GenerateFetchFromLocation(
2549 const LocationReference& reference) {
2550 if (reference.IsTemporary()) {
2551 return GenerateCopy(reference.temporary());
2552 } else if (reference.IsVariableAccess()) {
2553 return GenerateCopy(reference.variable());
2554 } else if (reference.IsHeapReference()) {
2555 const Type* referenced_type = *reference.ReferencedType();
2556 if (referenced_type == TypeOracle::GetFloat64OrUndefinedOrHoleType()) {
2557 return GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
2558#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
2559 "LoadFloat64OrUndefinedOrHole"
2560#else
2561 "LoadFloat64OrHole"
2562#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
2563 ),
2564 Arguments{{reference.heap_reference()}, {}});
2565 } else if (auto struct_type = referenced_type->StructSupertype()) {
2566 StackRange result_range = assembler().TopRange(0);
2567 for (const Field& field : (*struct_type)->fields()) {
2568 StackScope scope(this);
2569 const std::string& fieldname = field.name_and_type.name;
2570 VisitResult field_value = scope.Yield(GenerateFetchFromLocation(
2571 GenerateFieldAccess(reference, fieldname)));
2572 result_range.Extend(field_value.stack_range());
2573 }
2574 return VisitResult(referenced_type, result_range);
2575 } else {
2576 GenerateCopy(reference.heap_reference());
2578 assembler().Emit(LoadReferenceInstruction{referenced_type, sync});
2579 DCHECK_EQ(1, LoweredSlotCount(referenced_type));
2580 return VisitResult(referenced_type, assembler().TopRange(1));
2581 }
2582 } else if (reference.IsBitFieldAccess()) {
2583 // First fetch the bitfield struct, then get the bits out of it.
2584 VisitResult bit_field_struct =
2585 GenerateFetchFromLocation(reference.bit_field_struct_location());
2586 assembler().Emit(LoadBitFieldInstruction{bit_field_struct.type(),
2587 reference.bit_field()});
2588 return VisitResult(*reference.ReferencedType(), assembler().TopRange(1));
2589 } else {
2590 if (reference.IsHeapSlice()) {
2592 "fetching a value directly from an indexed field isn't allowed");
2593 }
2594 DCHECK(reference.IsCallAccess());
2595 return GenerateCall(reference.eval_function(),
2596 Arguments{reference.call_arguments(), {}});
2597 }
2598}
2599
2600void ImplementationVisitor::GenerateAssignToLocation(
2601 const LocationReference& reference, const VisitResult& assignment_value) {
2602 if (reference.IsCallAccess()) {
2603 Arguments arguments{reference.call_arguments(), {}};
2604 arguments.parameters.push_back(assignment_value);
2605 GenerateCall(reference.assign_function(), arguments);
2606 } else if (reference.IsVariableAccess()) {
2607 VisitResult variable = reference.variable();
2608 VisitResult converted_value =
2609 GenerateImplicitConvert(variable.type(), assignment_value);
2610 assembler().Poke(variable.stack_range(), converted_value.stack_range(),
2611 variable.type());
2612
2613 // Local variables are detected by the existence of a binding. Assignment
2614 // to local variables is recorded to support lint errors.
2615 if (reference.binding()) {
2616 (*reference.binding())->SetWritten();
2617 }
2618 } else if (reference.IsHeapSlice()) {
2619 ReportError("assigning a value directly to an indexed field isn't allowed");
2620 } else if (reference.IsHeapReference()) {
2621 const Type* referenced_type = *reference.ReferencedType();
2622 if (reference.IsConst()) {
2623 Error("cannot assign to const value of type ", *referenced_type).Throw();
2624 }
2625 if (referenced_type == TypeOracle::GetFloat64OrUndefinedOrHoleType()) {
2626 GenerateCall(
2627 QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
2628#ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
2629 "StoreFloat64OrUndefinedOrHole"
2630#else
2631 "StoreFloat64OrHole"
2632#endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
2633 ),
2634 Arguments{{reference.heap_reference(), assignment_value}, {}});
2635 } else if (auto struct_type = referenced_type->StructSupertype()) {
2636 if (!assignment_value.type()->IsSubtypeOf(referenced_type)) {
2637 ReportError("Cannot assign to ", *referenced_type,
2638 " with value of type ", *assignment_value.type());
2639 }
2640 for (const Field& field : (*struct_type)->fields()) {
2641 const std::string& fieldname = field.name_and_type.name;
2642 // Allow assignment of structs even if they contain const fields.
2643 // Const on struct fields just disallows direct writes to them.
2644 bool ignore_stuct_field_constness = true;
2645 GenerateAssignToLocation(
2646 GenerateFieldAccess(reference, fieldname,
2647 ignore_stuct_field_constness),
2648 ProjectStructField(assignment_value, fieldname));
2649 }
2650 } else {
2651 GenerateCopy(reference.heap_reference());
2652 VisitResult converted_assignment_value =
2653 GenerateImplicitConvert(referenced_type, assignment_value);
2654 if (referenced_type == TypeOracle::GetFloat64Type()) {
2655 VisitResult silenced_float_value = GenerateCall(
2656 "Float64SilenceNaN", Arguments{{assignment_value}, {}});
2657 assembler().Poke(converted_assignment_value.stack_range(),
2658 silenced_float_value.stack_range(), referenced_type);
2659 }
2660 assembler().Emit(StoreReferenceInstruction{referenced_type});
2661 }
2662 } else if (reference.IsBitFieldAccess()) {
2663 // First fetch the bitfield struct, then set the updated bits, then store
2664 // it back to where we found it.
2665 VisitResult bit_field_struct =
2666 GenerateFetchFromLocation(reference.bit_field_struct_location());
2667 VisitResult converted_value =
2668 GenerateImplicitConvert(*reference.ReferencedType(), assignment_value);
2669 VisitResult updated_bit_field_struct =
2670 GenerateSetBitField(bit_field_struct.type(), reference.bit_field(),
2671 bit_field_struct, converted_value);
2672 GenerateAssignToLocation(reference.bit_field_struct_location(),
2673 updated_bit_field_struct);
2674 } else {
2675 DCHECK(reference.IsTemporary());
2676 ReportError("cannot assign to const-bound or temporary ",
2677 reference.temporary_description());
2678 }
2679}
2680
2681VisitResult ImplementationVisitor::GeneratePointerCall(
2682 Expression* callee, const Arguments& arguments, bool is_tailcall) {
2683 StackScope scope(this);
2684 TypeVector parameter_types(arguments.parameters.ComputeTypeVector());
2685 VisitResult callee_result = Visit(callee);
2686 if (!callee_result.type()->IsBuiltinPointerType()) {
2687 std::stringstream stream;
2688 stream << "Expected a function pointer type but found "
2689 << *callee_result.type();
2690 ReportError(stream.str());
2691 }
2692 const BuiltinPointerType* type =
2693 BuiltinPointerType::cast(callee_result.type());
2694
2695 if (type->parameter_types().size() != parameter_types.size()) {
2696 std::stringstream stream;
2697 stream << "parameter count mismatch calling function pointer with Type: "
2698 << *type << " - expected "
2699 << std::to_string(type->parameter_types().size()) << ", found "
2700 << std::to_string(parameter_types.size());
2701 ReportError(stream.str());
2702 }
2703
2704 ParameterTypes types{type->parameter_types(), false};
2705 Signature sig;
2706 sig.parameter_types = types;
2707 if (!IsCompatibleSignature(sig, parameter_types, 0)) {
2708 std::stringstream stream;
2709 stream << "parameters do not match function pointer signature. Expected: ("
2710 << type->parameter_types() << ") but got: (" << parameter_types
2711 << ")";
2712 ReportError(stream.str());
2713 }
2714
2715 callee_result = GenerateCopy(callee_result);
2716 StackRange arg_range = assembler().TopRange(0);
2717 for (size_t current = 0; current < arguments.parameters.size(); ++current) {
2718 const Type* to_type = type->parameter_types()[current];
2719 arg_range.Extend(
2720 GenerateImplicitConvert(to_type, arguments.parameters[current])
2721 .stack_range());
2722 }
2723
2724 assembler().Emit(
2725 CallBuiltinPointerInstruction{is_tailcall, type, arg_range.Size()});
2726
2727 if (is_tailcall) {
2728 return VisitResult::NeverResult();
2729 }
2730 DCHECK_EQ(1, LoweredSlotCount(type->return_type()));
2731 return scope.Yield(VisitResult(type->return_type(), assembler().TopRange(1)));
2732}
2733
2734void ImplementationVisitor::AddCallParameter(
2735 Callable* callable, VisitResult parameter, const Type* parameter_type,
2736 std::vector<VisitResult>* converted_arguments, StackRange* argument_range,
2737 std::vector<std::string>* constexpr_arguments, bool inline_macro) {
2738 VisitResult converted;
2739 if ((converted_arguments->size() < callable->signature().implicit_count) &&
2740 parameter.type()->IsTopType()) {
2741 converted = GenerateCopy(parameter);
2742 } else {
2743 converted = GenerateImplicitConvert(parameter_type, parameter);
2744 }
2745 converted_arguments->push_back(converted);
2746 if (!inline_macro) {
2747 if (converted.IsOnStack()) {
2748 argument_range->Extend(converted.stack_range());
2749 } else {
2750 constexpr_arguments->push_back(converted.constexpr_value());
2751 }
2752 }
2753}
2754
2755namespace {
2756std::pair<std::string, std::string> GetClassInstanceTypeRange(
2757 const ClassType* class_type) {
2758 std::pair<std::string, std::string> result;
2759 if (class_type->InstanceTypeRange()) {
2760 auto instance_type_range = *class_type->InstanceTypeRange();
2761 std::string instance_type_string_first =
2762 "static_cast<InstanceType>(" +
2763 std::to_string(instance_type_range.first) + ")";
2764 std::string instance_type_string_second =
2765 "static_cast<InstanceType>(" +
2766 std::to_string(instance_type_range.second) + ")";
2767 result =
2768 std::make_pair(instance_type_string_first, instance_type_string_second);
2769 } else {
2770 ReportError(
2771 "%Min/MaxInstanceType must take a class type that is either a string "
2772 "or has a generated instance type range");
2773 }
2774 return result;
2775}
2776} // namespace
2777
2778VisitResult ImplementationVisitor::GenerateCall(
2779 Callable* callable, std::optional<LocationReference> this_reference,
2780 Arguments arguments, const TypeVector& specialization_types,
2781 bool is_tailcall) {
2782 CHECK(callable->Position().source.IsValid());
2783 if (auto stream = CurrentFileStreams::Get()) {
2784 stream->required_builtin_includes.insert(callable->Position().source);
2785 }
2786
2787 const Type* return_type = callable->signature().return_type;
2788
2789 if (is_tailcall) {
2790 if (Builtin* builtin = Builtin::DynamicCast(CurrentCallable::Get())) {
2791 const Type* outer_return_type = builtin->signature().return_type;
2792 if (!return_type->IsSubtypeOf(outer_return_type)) {
2793 Error("Cannot tailcall, type of result is ", *return_type,
2794 " but should be a subtype of ", *outer_return_type, ".");
2795 }
2796 } else {
2797 Error("Tail calls are only allowed from builtins");
2798 }
2799 }
2800
2801 bool inline_macro = callable->ShouldBeInlined(output_type_);
2802 std::vector<VisitResult> implicit_arguments;
2803 for (size_t i = 0; i < callable->signature().implicit_count; ++i) {
2804 std::string implicit_name = callable->signature().parameter_names[i]->value;
2805 std::optional<Binding<LocalValue>*> val =
2806 TryLookupLocalValue(implicit_name);
2807 if (val) {
2808 implicit_arguments.push_back(
2809 GenerateFetchFromLocation((*val)->GetLocationReference(*val)));
2810 } else {
2811 VisitResult unititialized = VisitResult::TopTypeResult(
2812 "implicit parameter '" + implicit_name +
2813 "' is not defined when invoking " + callable->ReadableName() +
2814 " at " + PositionAsString(CurrentSourcePosition::Get()),
2815 callable->signature().parameter_types.types[i]);
2816 implicit_arguments.push_back(unititialized);
2817 }
2818 const Type* type = implicit_arguments.back().type();
2819 if (const TopType* top_type = TopType::DynamicCast(type)) {
2820 if (!callable->IsMacro() || callable->IsExternal()) {
2822 "unititialized implicit parameters can only be passed to "
2823 "Torque-defined macros: the ",
2824 top_type->reason());
2825 }
2826 inline_macro = true;
2827 }
2828 }
2829
2830 std::vector<VisitResult> converted_arguments;
2831 StackRange argument_range = assembler().TopRange(0);
2832 std::vector<std::string> constexpr_arguments;
2833
2834 size_t current = 0;
2835 for (; current < callable->signature().implicit_count; ++current) {
2836 AddCallParameter(callable, implicit_arguments[current],
2837 callable->signature().parameter_types.types[current],
2838 &converted_arguments, &argument_range,
2839 &constexpr_arguments, inline_macro);
2840 }
2841
2842 if (this_reference) {
2843 DCHECK(callable->IsMethod());
2844 Method* method = Method::cast(callable);
2845 // By now, the this reference should either be a variable, a temporary or
2846 // a Slice. In either case the fetch of the VisitResult should succeed.
2847 VisitResult this_value = this_reference->GetVisitResult();
2848 if (inline_macro) {
2849 if (!this_value.type()->IsSubtypeOf(method->aggregate_type())) {
2850 ReportError("this parameter must be a subtype of ",
2851 *method->aggregate_type(), " but it is of type ",
2852 *this_value.type());
2853 }
2854 } else {
2855 AddCallParameter(callable, this_value, method->aggregate_type(),
2856 &converted_arguments, &argument_range,
2857 &constexpr_arguments, inline_macro);
2858 }
2859 ++current;
2860 }
2861
2862 for (const auto& arg : arguments.parameters) {
2863 const Type* to_type = (current >= callable->signature().types().size())
2864 ? TypeOracle::GetObjectType()
2865 : callable->signature().types()[current++];
2866 AddCallParameter(callable, arg, to_type, &converted_arguments,
2867 &argument_range, &constexpr_arguments, inline_macro);
2868 }
2869
2870 size_t label_count = callable->signature().labels.size();
2871 if (label_count != arguments.labels.size()) {
2872 std::stringstream s;
2873 s << "unexpected number of otherwise labels for "
2874 << callable->ReadableName() << " (expected "
2875 << std::to_string(label_count) << " found "
2876 << std::to_string(arguments.labels.size()) << ")";
2877 ReportError(s.str());
2878 }
2879
2880 if (callable->IsTransitioning()) {
2881 if (!CurrentCallable::Get()->IsTransitioning()) {
2882 std::stringstream s;
2883 s << *CurrentCallable::Get()
2884 << " isn't marked transitioning but calls the transitioning "
2885 << *callable;
2886 ReportError(s.str());
2887 }
2888 }
2889
2890 if (auto* builtin = Builtin::DynamicCast(callable)) {
2891 std::optional<Block*> catch_block = GetCatchBlock();
2893 is_tailcall, builtin, argument_range.Size(), catch_block});
2894 GenerateCatchBlock(catch_block);
2895 if (is_tailcall) {
2896 return VisitResult::NeverResult();
2897 } else if (return_type->IsNever()) {
2898 assembler().Emit(AbortInstruction{AbortInstruction::Kind::kUnreachable});
2899 return VisitResult::NeverResult();
2900 } else {
2901 size_t slot_count = LoweredSlotCount(return_type);
2902 if (builtin->IsStub()) {
2903 if (slot_count < 1 || slot_count > 2) {
2905 "Builtin with stub linkage is expected to return one or two "
2906 "values but returns ",
2907 slot_count);
2908 }
2909 } else {
2910 if (slot_count != 1) {
2912 "Builtin with JS linkage is expected to return one value but "
2913 "returns ",
2914 slot_count);
2915 }
2916 }
2917 return VisitResult(return_type, assembler().TopRange(slot_count));
2918 }
2919 } else if (auto* macro = Macro::DynamicCast(callable)) {
2920 if (is_tailcall) {
2921 ReportError("can't tail call a macro");
2922 }
2923
2924 macro->SetUsed();
2925
2926 // If we're currently generating a C++ macro and it's calling another macro,
2927 // then we need to make sure that we also generate C++ code for the called
2928 // macro within the same -inl.inc file.
2929 if ((output_type_ == OutputType::kCC ||
2930 output_type_ == OutputType::kCCDebug) &&
2931 !inline_macro) {
2932 if (auto* torque_macro = TorqueMacro::DynamicCast(macro)) {
2933 auto* streams = CurrentFileStreams::Get();
2934 SourceId file = streams ? streams->file : SourceId::Invalid();
2935 GlobalContext::EnsureInCCOutputList(torque_macro, file);
2936 }
2937 }
2938
2939 // TODO(torque-builder): Consider a function builder here.
2940 if (return_type->IsConstexpr()) {
2941 DCHECK_EQ(0, arguments.labels.size());
2942 std::stringstream result;
2943 result << "(";
2944 bool first = true;
2945 switch (output_type_) {
2946 case OutputType::kCSA: {
2947 if (auto* extern_macro = ExternMacro::DynamicCast(macro)) {
2948 result << extern_macro->external_assembler_name() << "(state_)."
2949 << extern_macro->ExternalName() << "(";
2950 } else {
2951 result << macro->ExternalName() << "(state_";
2952 first = false;
2953 }
2954 break;
2955 }
2956 case OutputType::kCC: {
2957 auto* extern_macro = ExternMacro::DynamicCast(macro);
2958 CHECK_NOT_NULL(extern_macro);
2959 result << extern_macro->CCName() << "(";
2960 break;
2961 }
2962 case OutputType::kCCDebug: {
2963 auto* extern_macro = ExternMacro::DynamicCast(macro);
2964 CHECK_NOT_NULL(extern_macro);
2965 result << extern_macro->CCDebugName() << "(accessor";
2966 first = false;
2967 break;
2968 }
2969 }
2970 for (const VisitResult& arg : converted_arguments) {
2971 DCHECK(!arg.IsOnStack());
2972 if (!first) {
2973 result << ", ";
2974 }
2975 first = false;
2976 result << arg.constexpr_value();
2977 }
2978 result << "))";
2979 return VisitResult(return_type, result.str());
2980 } else if (inline_macro) {
2981 std::vector<Block*> label_blocks;
2982 label_blocks.reserve(arguments.labels.size());
2983 for (Binding<LocalLabel>* label : arguments.labels) {
2984 label_blocks.push_back(label->block);
2985 }
2986 return InlineMacro(macro, this_reference, converted_arguments,
2987 std::move(label_blocks));
2988 } else if (arguments.labels.empty() &&
2989 return_type != TypeOracle::GetNeverType()) {
2990 std::optional<Block*> catch_block = GetCatchBlock();
2992 macro, std::move(constexpr_arguments), catch_block});
2993 GenerateCatchBlock(catch_block);
2994 size_t return_slot_count = LoweredSlotCount(return_type);
2995 return VisitResult(return_type, assembler().TopRange(return_slot_count));
2996 } else {
2997 std::optional<Block*> return_continuation;
2998 if (return_type != TypeOracle::GetNeverType()) {
2999 return_continuation = assembler().NewBlock();
3000 }
3001
3002 std::vector<Block*> label_blocks;
3003
3004 for (size_t i = 0; i < label_count; ++i) {
3005 label_blocks.push_back(assembler().NewBlock());
3006 }
3007 std::optional<Block*> catch_block = GetCatchBlock();
3009 macro, constexpr_arguments, return_continuation, label_blocks,
3010 catch_block});
3011 GenerateCatchBlock(catch_block);
3012
3013 for (size_t i = 0; i < label_count; ++i) {
3014 Binding<LocalLabel>* label = arguments.labels[i];
3015 size_t callee_label_parameters =
3016 callable->signature().labels[i].types.size();
3017 if (label->parameter_types.size() != callee_label_parameters) {
3018 std::stringstream s;
3019 s << "label " << label->name()
3020 << " doesn't have the right number of parameters (found "
3021 << std::to_string(label->parameter_types.size()) << " expected "
3022 << std::to_string(callee_label_parameters) << ")";
3023 ReportError(s.str());
3024 }
3025 assembler().Bind(label_blocks[i]);
3026 assembler().Goto(
3027 label->block,
3028 LowerParameterTypes(callable->signature().labels[i].types).size());
3029
3030 size_t j = 0;
3031 for (auto t : callable->signature().labels[i].types) {
3032 const Type* parameter_type = label->parameter_types[j];
3033 if (!t->IsSubtypeOf(parameter_type)) {
3034 ReportError("mismatch of label parameters (label expects ",
3035 *parameter_type, " but macro produces ", *t,
3036 " for parameter ", i + 1, ")");
3037 }
3038 j++;
3039 }
3040 }
3041
3042 if (return_continuation) {
3043 assembler().Bind(*return_continuation);
3044 size_t return_slot_count = LoweredSlotCount(return_type);
3045 return VisitResult(return_type,
3046 assembler().TopRange(return_slot_count));
3047 } else {
3048 return VisitResult::NeverResult();
3049 }
3050 }
3051 } else if (auto* runtime_function = RuntimeFunction::DynamicCast(callable)) {
3052 std::optional<Block*> catch_block = GetCatchBlock();
3054 is_tailcall, runtime_function, argument_range.Size(), catch_block});
3055 GenerateCatchBlock(catch_block);
3056 if (is_tailcall || return_type == TypeOracle::GetNeverType()) {
3057 return VisitResult::NeverResult();
3058 } else {
3059 size_t slot_count = LoweredSlotCount(return_type);
3060 DCHECK_LE(slot_count, 1);
3061 // TODO(turbofan): Actually, runtime functions have to return a value, so
3062 // we should assert slot_count == 1 here.
3063 return VisitResult(return_type, assembler().TopRange(slot_count));
3064 }
3065 } else if (auto* intrinsic = Intrinsic::DynamicCast(callable)) {
3066 if (intrinsic->ExternalName() == "%SizeOf") {
3067 if (specialization_types.size() != 1) {
3068 ReportError("%SizeOf must take a single type parameter");
3069 }
3070 const Type* type = specialization_types[0];
3071 std::string size_string;
3072 if (std::optional<std::tuple<size_t, std::string>> size = SizeOf(type)) {
3073 size_string = std::get<1>(*size);
3074 } else {
3075 Error("size of ", *type, " is not known.");
3076 }
3077 return VisitResult(return_type, size_string);
3078 } else if (intrinsic->ExternalName() == "%ClassHasMapConstant") {
3079 const Type* type = specialization_types[0];
3080 const ClassType* class_type = ClassType::DynamicCast(type);
3081 if (!class_type) {
3082 ReportError("%ClassHasMapConstant must take a class type parameter");
3083 }
3084 // If the class isn't actually used as the parameter to a TNode,
3085 // then we can't rely on the class existing in C++ or being of the same
3086 // type (e.g. it could be a template), so don't use the template CSA
3087 // machinery for accessing the class' map.
3088 if (class_type->name() != class_type->GetGeneratedTNodeTypeName()) {
3089 return VisitResult(return_type, std::string("false"));
3090 } else {
3091 return VisitResult(
3092 return_type,
3093 std::string("CodeStubAssembler(state_).ClassHasMapConstant<") +
3094 class_type->name() + ">()");
3095 }
3096 } else if (intrinsic->ExternalName() == "%MinInstanceType") {
3097 if (specialization_types.size() != 1) {
3098 ReportError("%MinInstanceType must take a single type parameter");
3099 }
3100 const Type* type = specialization_types[0];
3101 const ClassType* class_type = ClassType::DynamicCast(type);
3102 if (!class_type) {
3103 ReportError("%MinInstanceType must take a class type parameter");
3104 }
3105 std::pair<std::string, std::string> instance_types =
3106 GetClassInstanceTypeRange(class_type);
3107 return VisitResult(return_type, instance_types.first);
3108 } else if (intrinsic->ExternalName() == "%MaxInstanceType") {
3109 if (specialization_types.size() != 1) {
3110 ReportError("%MaxInstanceType must take a single type parameter");
3111 }
3112 const Type* type = specialization_types[0];
3113 const ClassType* class_type = ClassType::DynamicCast(type);
3114 if (!class_type) {
3115 ReportError("%MaxInstanceType must take a class type parameter");
3116 }
3117 std::pair<std::string, std::string> instance_types =
3118 GetClassInstanceTypeRange(class_type);
3119 return VisitResult(return_type, instance_types.second);
3120 } else if (intrinsic->ExternalName() == "%RawConstexprCast") {
3121 if (intrinsic->signature().parameter_types.types.size() != 1 ||
3122 constexpr_arguments.size() != 1) {
3124 "%RawConstexprCast must take a single parameter with constexpr "
3125 "type");
3126 }
3127 if (!return_type->IsConstexpr()) {
3128 std::stringstream s;
3129 s << *return_type
3130 << " return type for %RawConstexprCast is not constexpr";
3131 ReportError(s.str());
3132 }
3133 std::stringstream result;
3134 result << "static_cast<" << return_type->GetGeneratedTypeName() << ">(";
3135 result << constexpr_arguments[0];
3136 result << ")";
3137 return VisitResult(return_type, result.str());
3138 } else if (intrinsic->ExternalName() == "%IndexedFieldLength") {
3139 const Type* type = specialization_types[0];
3140 const ClassType* class_type = ClassType::DynamicCast(type);
3141 if (!class_type) {
3142 ReportError("%IndexedFieldLength must take a class type parameter");
3143 }
3144 const Field& field =
3145 class_type->LookupField(StringLiteralUnquote(constexpr_arguments[0]));
3146 return GenerateArrayLength(VisitResult(type, argument_range), field);
3147 } else if (intrinsic->ExternalName() == "%MakeLazy") {
3148 if (specialization_types[0]->IsStructType()) {
3149 ReportError("%MakeLazy can't use macros that return structs");
3150 }
3151 std::string getter_name = StringLiteralUnquote(constexpr_arguments[0]);
3152
3153 // Normally the parser would split namespace names for us, but we
3154 // sidestepped it by putting the macro name in a string literal.
3155 QualifiedName qualified_getter_name = QualifiedName::Parse(getter_name);
3156
3157 // converted_arguments contains all of the arguments to %MakeLazy. We're
3158 // looking for a function that takes all but the first.
3159 Arguments arguments_to_getter;
3160 arguments_to_getter.parameters.insert(
3161 arguments_to_getter.parameters.begin(),
3162 converted_arguments.begin() + 1, converted_arguments.end());
3163
3164 Callable* callable_macro = LookupCallable(
3165 qualified_getter_name, Declarations::Lookup(qualified_getter_name),
3166 arguments_to_getter, {});
3167 Macro* getter = Macro::DynamicCast(callable_macro);
3168 if (!getter || getter->IsMethod()) {
3170 "%MakeLazy expects a macro, not builtin or other type of callable");
3171 }
3172 if (!getter->signature().labels.empty()) {
3173 ReportError("%MakeLazy requires a macro with no labels");
3174 }
3175 if (!getter->signature().return_type->IsSubtypeOf(
3176 specialization_types[0])) {
3177 ReportError("%MakeLazy expected return type ", *specialization_types[0],
3178 " but found ", *getter->signature().return_type);
3179 }
3180 if (getter->signature().implicit_count > 0) {
3181 ReportError("Implicit parameters are not yet supported in %MakeLazy");
3182 }
3183
3184 getter->SetUsed(); // Prevent warnings about unused macros.
3185
3186 // Now that we've looked up the getter macro, we have to convert the
3187 // arguments again, so that, for example, constexpr arguments can be
3188 // coerced to non-constexpr types and put on the stack.
3189
3190 std::vector<VisitResult> converted_arguments_for_getter;
3191 StackRange argument_range_for_getter = assembler().TopRange(0);
3192 std::vector<std::string> constexpr_arguments_for_getter;
3193
3194 size_t arg_count = 0;
3195 for (const auto& arg : arguments_to_getter.parameters) {
3196 DCHECK_LT(arg_count, getter->signature().types().size());
3197 const Type* to_type = getter->signature().types()[arg_count++];
3198 AddCallParameter(getter, arg, to_type, &converted_arguments_for_getter,
3199 &argument_range_for_getter,
3200 &constexpr_arguments_for_getter,
3201 /*inline_macro=*/false);
3202 }
3203
3204 // Now that the arguments are prepared, emit the instruction that consumes
3205 // them.
3207 getter, return_type, std::move(constexpr_arguments_for_getter)});
3208 return VisitResult(return_type, assembler().TopRange(1));
3209 } else if (intrinsic->ExternalName() == "%FieldSlice") {
3210 const Type* type = specialization_types[0];
3211 const ClassType* class_type = ClassType::DynamicCast(type);
3212 if (!class_type) {
3213 ReportError("The first type parameter to %FieldSlice must be a class");
3214 }
3215 const Field& field =
3216 class_type->LookupField(StringLiteralUnquote(constexpr_arguments[0]));
3217 const Type* expected_slice_type =
3218 field.const_qualified
3219 ? TypeOracle::GetConstSliceType(field.name_and_type.type)
3220 : TypeOracle::GetMutableSliceType(field.name_and_type.type);
3221 const Type* declared_slice_type = specialization_types[1];
3222 if (expected_slice_type != declared_slice_type) {
3223 Error(
3224 "The second type parameter to %FieldSlice must be the precise "
3225 "slice type for the named field");
3226 }
3227 LocationReference ref = GenerateFieldReference(
3228 VisitResult(type, argument_range), field, class_type,
3229 /*treat_optional_as_indexed=*/true);
3230 if (!ref.IsHeapSlice()) {
3231 ReportError("%FieldSlice expected an indexed or optional field");
3232 }
3233 return ref.heap_slice();
3234 } else {
3235 assembler().Emit(CallIntrinsicInstruction{intrinsic, specialization_types,
3236 constexpr_arguments});
3237 size_t return_slot_count =
3238 LoweredSlotCount(intrinsic->signature().return_type);
3239 return VisitResult(return_type, assembler().TopRange(return_slot_count));
3240 }
3241 } else {
3242 UNREACHABLE();
3243 }
3244}
3245
3246VisitResult ImplementationVisitor::GenerateCall(
3247 const QualifiedName& callable_name, Arguments arguments,
3248 const TypeVector& specialization_types, bool is_tailcall) {
3249 Callable* callable =
3250 LookupCallable(callable_name, Declarations::Lookup(callable_name),
3251 arguments, specialization_types);
3252 return GenerateCall(callable, std::nullopt, arguments, specialization_types,
3253 is_tailcall);
3254}
3255
3256VisitResult ImplementationVisitor::Visit(CallExpression* expr,
3257 bool is_tailcall) {
3258 StackScope scope(this);
3259
3260 if (expr->callee->name->value == "&" && expr->arguments.size() == 1) {
3261 if (auto* loc_expr = LocationExpression::DynamicCast(expr->arguments[0])) {
3262 LocationReference ref = GetLocationReference(loc_expr);
3263 if (ref.IsHeapReference()) return scope.Yield(ref.heap_reference());
3264 if (ref.IsHeapSlice()) return scope.Yield(ref.heap_slice());
3265 }
3266 ReportError("Unable to create a heap reference.");
3267 }
3268
3269 Arguments arguments;
3271 expr->callee->name->value);
3272 TypeVector specialization_types =
3273 TypeVisitor::ComputeTypeVector(expr->callee->generic_arguments);
3274 bool has_template_arguments = !specialization_types.empty();
3275 for (Expression* arg : expr->arguments)
3276 arguments.parameters.push_back(Visit(arg));
3277 arguments.labels = LabelsFromIdentifiers(expr->labels);
3278 if (!has_template_arguments && name.namespace_qualification.empty() &&
3279 TryLookupLocalValue(name.name)) {
3280 return scope.Yield(
3281 GeneratePointerCall(expr->callee, arguments, is_tailcall));
3282 } else {
3283 if (GlobalContext::collect_language_server_data()) {
3284 Callable* callable = LookupCallable(name, Declarations::Lookup(name),
3285 arguments, specialization_types);
3286 LanguageServerData::AddDefinition(expr->callee->name->pos,
3287 callable->IdentifierPosition());
3288 }
3289 if (GlobalContext::collect_kythe_data()) {
3290 Callable* callable = LookupCallable(name, Declarations::Lookup(name),
3291 arguments, specialization_types);
3292 Callable* caller = CurrentCallable::Get();
3293 KytheData::AddCall(caller, expr->callee->name->pos, callable);
3294 }
3295 if (expr->callee->name->value == "!" && arguments.parameters.size() == 1) {
3296 PropagateBitfieldMark(expr->arguments[0], expr);
3297 }
3298 if (expr->callee->name->value == "==" && arguments.parameters.size() == 2) {
3299 if (arguments.parameters[0].type()->IsConstexpr()) {
3300 PropagateBitfieldMark(expr->arguments[1], expr);
3301 } else if (arguments.parameters[1].type()->IsConstexpr()) {
3302 PropagateBitfieldMark(expr->arguments[0], expr);
3303 }
3304 }
3305 return scope.Yield(
3306 GenerateCall(name, arguments, specialization_types, is_tailcall));
3307 }
3308}
3309
3310VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
3311 StackScope scope(this);
3312 Arguments arguments;
3313 std::string method_name = expr->method->name->value;
3314 TypeVector specialization_types =
3315 TypeVisitor::ComputeTypeVector(expr->method->generic_arguments);
3316 LocationReference target = GetLocationReference(expr->target);
3317 if (!target.IsVariableAccess()) {
3318 VisitResult result = GenerateFetchFromLocation(target);
3319 target = LocationReference::Temporary(result, "this parameter");
3320 }
3321 const AggregateType* target_type =
3322 (*target.ReferencedType())->AggregateSupertype().value_or(nullptr);
3323 if (!target_type) {
3324 ReportError("target of method call not a struct or class type");
3325 }
3326 for (Expression* arg : expr->arguments) {
3327 arguments.parameters.push_back(Visit(arg));
3328 }
3329 arguments.labels = LabelsFromIdentifiers(expr->labels);
3330 TypeVector argument_types = arguments.parameters.ComputeTypeVector();
3331 DCHECK_EQ(expr->method->namespace_qualification.size(), 0);
3332 QualifiedName qualified_name = QualifiedName(method_name);
3333 Callable* callable = LookupMethod(method_name, target_type, arguments, {});
3334 if (GlobalContext::collect_language_server_data()) {
3335 LanguageServerData::AddDefinition(expr->method->name->pos,
3336 callable->IdentifierPosition());
3337 }
3338 if (GlobalContext::collect_kythe_data()) {
3339 Callable* caller = CurrentCallable::Get();
3340 KytheData::AddCall(caller, expr->method->name->pos, callable);
3341 }
3342 return scope.Yield(GenerateCall(callable, target, arguments, {}, false));
3343}
3344
3345VisitResult ImplementationVisitor::Visit(IntrinsicCallExpression* expr) {
3346 StackScope scope(this);
3347 Arguments arguments;
3348 TypeVector specialization_types =
3349 TypeVisitor::ComputeTypeVector(expr->generic_arguments);
3350 for (Expression* arg : expr->arguments)
3351 arguments.parameters.push_back(Visit(arg));
3352 return scope.Yield(
3353 GenerateCall(expr->name->value, arguments, specialization_types, false));
3354}
3355
3356void ImplementationVisitor::GenerateBranch(const VisitResult& condition,
3357 Block* true_block,
3358 Block* false_block) {
3360 VisitResult(TypeOracle::GetBoolType(), assembler().TopRange(1)));
3361 assembler().Branch(true_block, false_block);
3362}
3363
3364VisitResult ImplementationVisitor::GenerateBoolConstant(bool constant) {
3365 return GenerateImplicitConvert(TypeOracle::GetBoolType(),
3366 VisitResult(TypeOracle::GetConstexprBoolType(),
3367 constant ? "true" : "false"));
3368}
3369
3370void ImplementationVisitor::GenerateExpressionBranch(Expression* expression,
3371 Block* true_block,
3372 Block* false_block) {
3373 StackScope stack_scope(this);
3374 VisitResult expression_result = this->Visit(expression);
3375 expression_result = stack_scope.Yield(
3376 GenerateImplicitConvert(TypeOracle::GetBoolType(), expression_result));
3377 GenerateBranch(expression_result, true_block, false_block);
3378}
3379
3380VisitResult ImplementationVisitor::GenerateImplicitConvert(
3381 const Type* destination_type, VisitResult source) {
3382 StackScope scope(this);
3383 if (source.type() == TypeOracle::GetNeverType()) {
3384 ReportError("it is not allowed to use a value of type never");
3385 }
3386
3387 if (destination_type == source.type()) {
3388 return scope.Yield(GenerateCopy(source));
3389 }
3390
3391 if (auto from = TypeOracle::ImplicitlyConvertableFrom(destination_type,
3392 source.type())) {
3393 return scope.Yield(GenerateCall(kFromConstexprMacroName,
3394 Arguments{{source}, {}},
3395 {destination_type, *from}, false));
3396 } else if (IsAssignableFrom(destination_type, source.type())) {
3397 source.SetType(destination_type);
3398 return scope.Yield(GenerateCopy(source));
3399 } else {
3400 std::stringstream s;
3401 if (const TopType* top_type = TopType::DynamicCast(source.type())) {
3402 s << "undefined expression of type " << *destination_type << ": the "
3403 << top_type->reason();
3404 } else {
3405 s << "cannot use expression of type " << *source.type()
3406 << " as a value of type " << *destination_type;
3407 }
3408 ReportError(s.str());
3409 }
3410}
3411
3412StackRange ImplementationVisitor::GenerateLabelGoto(
3413 LocalLabel* label, std::optional<StackRange> arguments) {
3414 return assembler().Goto(label->block, arguments ? arguments->Size() : 0);
3415}
3416
3417std::vector<Binding<LocalLabel>*> ImplementationVisitor::LabelsFromIdentifiers(
3418 const std::vector<Identifier*>& names) {
3419 std::vector<Binding<LocalLabel>*> result;
3420 result.reserve(names.size());
3421 for (const auto& name : names) {
3422 Binding<LocalLabel>* label = LookupLabel(name->value);
3424
3425 // Link up labels in "otherwise" part of the call expression with
3426 // either the label in the signature of the calling macro or the label
3427 // block ofa surrounding "try".
3428 if (GlobalContext::collect_language_server_data()) {
3429 LanguageServerData::AddDefinition(name->pos,
3430 label->declaration_position());
3431 }
3432 // TODO(v8:12261): Might have to track KytheData here.
3433 }
3434 return result;
3435}
3436
3437StackRange ImplementationVisitor::LowerParameter(
3438 const Type* type, const std::string& parameter_name,
3439 Stack<std::string>* lowered_parameters) {
3440 if (std::optional<const StructType*> struct_type = type->StructSupertype()) {
3441 StackRange range = lowered_parameters->TopRange(0);
3442 for (auto& field : (*struct_type)->fields()) {
3443 StackRange parameter_range = LowerParameter(
3444 field.name_and_type.type,
3445 parameter_name + "." + field.name_and_type.name, lowered_parameters);
3446 range.Extend(parameter_range);
3447 }
3448 return range;
3449 } else {
3450 lowered_parameters->Push(parameter_name);
3451 return lowered_parameters->TopRange(1);
3452 }
3453}
3454
3455void ImplementationVisitor::LowerLabelParameter(
3456 const Type* type, const std::string& parameter_name,
3457 std::vector<std::string>* lowered_parameters) {
3458 if (std::optional<const StructType*> struct_type = type->StructSupertype()) {
3459 for (auto& field : (*struct_type)->fields()) {
3460 LowerLabelParameter(
3461 field.name_and_type.type,
3462 "&((*" + parameter_name + ")." + field.name_and_type.name + ")",
3463 lowered_parameters);
3464 }
3465 } else {
3466 lowered_parameters->push_back(parameter_name);
3467 }
3468}
3469
3470std::string ImplementationVisitor::ExternalLabelName(
3471 const std::string& label_name) {
3472 return "label_" + label_name;
3473}
3474
3475std::string ImplementationVisitor::ExternalLabelParameterName(
3476 const std::string& label_name, size_t i) {
3477 return "label_" + label_name + "_parameter_" + std::to_string(i);
3478}
3479
3480std::string ImplementationVisitor::ExternalParameterName(
3481 const std::string& name) {
3482 return std::string("p_") + name;
3483}
3484
3486 size_t label_count) {
3487 auto i = sig.parameter_types.types.begin() + sig.implicit_count;
3488 if ((sig.parameter_types.types.size() - sig.implicit_count) > types.size())
3489 return false;
3490 if (sig.labels.size() != label_count) return false;
3491 for (auto current : types) {
3492 if (i == sig.parameter_types.types.end()) {
3493 if (!sig.parameter_types.var_args) return false;
3494 if (!IsAssignableFrom(TypeOracle::GetObjectType(), current)) return false;
3495 } else {
3496 if (!IsAssignableFrom(*i++, current)) return false;
3497 }
3498 }
3499 return true;
3500}
3501
3502std::optional<Block*> ImplementationVisitor::GetCatchBlock() {
3503 std::optional<Block*> catch_block;
3504 if (TryLookupLabel(kCatchLabelName)) {
3505 catch_block = assembler().NewBlock(std::nullopt, true);
3506 }
3507 return catch_block;
3508}
3509
3510void ImplementationVisitor::GenerateCatchBlock(
3511 std::optional<Block*> catch_block) {
3512 if (catch_block) {
3513 std::optional<Binding<LocalLabel>*> catch_handler =
3514 TryLookupLabel(kCatchLabelName);
3515 // Reset the local scopes to prevent the macro calls below from using the
3516 // current catch handler.
3517 BindingsManagersScope bindings_managers_scope;
3518 if (assembler().CurrentBlockIsComplete()) {
3519 assembler().Bind(*catch_block);
3520 GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
3521 "GetAndResetPendingMessage"),
3522 Arguments{{}, {}}, {}, false);
3523 assembler().Goto((*catch_handler)->block, 2);
3524 } else {
3525 CfgAssemblerScopedTemporaryBlock temp(&assembler(), *catch_block);
3526 GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
3527 "GetAndResetPendingMessage"),
3528 Arguments{{}, {}}, {}, false);
3529 assembler().Goto((*catch_handler)->block, 2);
3530 }
3531 }
3532}
3533void ImplementationVisitor::VisitAllDeclarables() {
3534 CurrentCallable::Scope current_callable(nullptr);
3535 const std::vector<std::unique_ptr<Declarable>>& all_declarables =
3536 GlobalContext::AllDeclarables();
3537
3538 // This has to be an index-based loop because all_declarables can be extended
3539 // during the loop.
3540 for (size_t i = 0; i < all_declarables.size(); ++i) {
3541 try {
3542 Visit(all_declarables[i].get());
3543 } catch (TorqueAbortCompilation&) {
3544 // Recover from compile errors here. The error is recorded already.
3545 }
3546 }
3547
3548 // Do the same for macros which generate C++ code.
3549 output_type_ = OutputType::kCC;
3550 const std::vector<std::pair<TorqueMacro*, SourceId>>& cc_macros =
3551 GlobalContext::AllMacrosForCCOutput();
3552 for (size_t i = 0; i < cc_macros.size(); ++i) {
3553 try {
3554 Visit(static_cast<Declarable*>(cc_macros[i].first), cc_macros[i].second);
3555 } catch (TorqueAbortCompilation&) {
3556 // Recover from compile errors here. The error is recorded already.
3557 }
3558 }
3559
3560 // Do the same for macros which generate C++ debug code.
3561 // The set of macros is the same as C++ macros.
3562 output_type_ = OutputType::kCCDebug;
3563 const std::vector<std::pair<TorqueMacro*, SourceId>>& cc_debug_macros =
3564 GlobalContext::AllMacrosForCCDebugOutput();
3565 for (size_t i = 0; i < cc_debug_macros.size(); ++i) {
3566 try {
3567 Visit(static_cast<Declarable*>(cc_debug_macros[i].first),
3568 cc_debug_macros[i].second);
3569 } catch (TorqueAbortCompilation&) {
3570 // Recover from compile errors here. The error is recorded already.
3571 }
3572 }
3573
3574 output_type_ = OutputType::kCSA;
3575}
3576
3577void ImplementationVisitor::Visit(Declarable* declarable,
3578 std::optional<SourceId> file) {
3579 CurrentScope::Scope current_scope(declarable->ParentScope());
3580 CurrentSourcePosition::Scope current_source_position(declarable->Position());
3581 CurrentFileStreams::Scope current_file_streams(
3582 &GlobalContext::GeneratedPerFile(file ? *file
3583 : declarable->Position().source));
3584 if (Callable* callable = Callable::DynamicCast(declarable)) {
3585 if (!callable->ShouldGenerateExternalCode(output_type_))
3586 CurrentFileStreams::Get() = nullptr;
3587 }
3588 switch (declarable->kind()) {
3589 case Declarable::kExternMacro:
3590 return Visit(ExternMacro::cast(declarable));
3591 case Declarable::kTorqueMacro:
3592 return Visit(TorqueMacro::cast(declarable));
3593 case Declarable::kMethod:
3594 return Visit(Method::cast(declarable));
3595 case Declarable::kBuiltin:
3596 return Visit(Builtin::cast(declarable));
3597 case Declarable::kTypeAlias:
3598 return Visit(TypeAlias::cast(declarable));
3599 case Declarable::kNamespaceConstant:
3600 return Visit(NamespaceConstant::cast(declarable));
3601 case Declarable::kRuntimeFunction:
3602 case Declarable::kIntrinsic:
3603 case Declarable::kExternConstant:
3604 case Declarable::kNamespace:
3605 case Declarable::kGenericCallable:
3606 case Declarable::kGenericType:
3607 return;
3608 }
3609}
3610
3611std::string MachineTypeString(const Type* type) {
3612 if (type->IsSubtypeOf(TypeOracle::GetSmiType())) {
3613 return "MachineType::TaggedSigned()";
3614 }
3615 if (type->IsSubtypeOf(TypeOracle::GetHeapObjectType())) {
3616 return "MachineType::TaggedPointer()";
3617 }
3618 if (type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
3619 return "MachineType::AnyTagged()";
3620 }
3621 return "MachineTypeOf<" + type->GetGeneratedTNodeTypeName() + ">::value";
3622}
3623
3624void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
3625 const std::string& output_directory) {
3626 std::stringstream builtin_definitions;
3627 std::string builtin_definitions_file_name = "builtin-definitions.h";
3628
3629 // This file contains plain interface descriptor definitions and has to be
3630 // included in the middle of interface-descriptors.h. Thus it is not a normal
3631 // header file and uses the .inc suffix instead of the .h suffix.
3632 std::stringstream interface_descriptors;
3633 std::string interface_descriptors_file_name = "interface-descriptors.inc";
3634 {
3635 IncludeGuardScope builtin_definitions_include_guard(
3636 builtin_definitions, builtin_definitions_file_name);
3637
3638 builtin_definitions
3639 << "\n"
3640 "#define BUILTIN_LIST_FROM_TORQUE(CPP, TFJ, TFC, TFS, TFH, "
3641 "ASM) "
3642 "\\\n";
3643 for (auto& declarable : GlobalContext::AllDeclarables()) {
3644 Builtin* builtin = Builtin::DynamicCast(declarable.get());
3645 if (!builtin || builtin->IsExternal()) continue;
3646 if (builtin->IsStub()) {
3647 builtin_definitions << "TFC(" << builtin->ExternalName() << ", "
3648 << builtin->ExternalName();
3649 if (!builtin->HasCustomInterfaceDescriptor()) {
3650 std::string descriptor_name = builtin->ExternalName() + "Descriptor";
3651 bool has_context_parameter =
3652 builtin->signature().HasContextParameter();
3653 size_t kFirstNonContextParameter = has_context_parameter ? 1 : 0;
3654 TypeVector return_types = LowerType(builtin->signature().return_type);
3655
3656 interface_descriptors << "class " << descriptor_name
3657 << " : public StaticCallInterfaceDescriptor<"
3658 << descriptor_name << "> {\n";
3659
3660 interface_descriptors << " public:\n";
3661
3662 // Currently, no torque-defined builtins are directly exposed to
3663 // objects inside the sandbox via the code pointer table.
3664 interface_descriptors << " INTERNAL_DESCRIPTOR()\n";
3665
3666 if (has_context_parameter) {
3667 interface_descriptors << " DEFINE_RESULT_AND_PARAMETERS(";
3668 } else {
3669 interface_descriptors
3670 << " DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(";
3671 }
3672 interface_descriptors << return_types.size();
3673 for (size_t i = kFirstNonContextParameter;
3674 i < builtin->parameter_names().size(); ++i) {
3675 Identifier* parameter = builtin->parameter_names()[i];
3676 interface_descriptors << ", k" << CamelifyString(parameter->value);
3677 }
3678 interface_descriptors << ")\n";
3679
3680 interface_descriptors << " DEFINE_RESULT_AND_PARAMETER_TYPES(";
3681 PrintCommaSeparatedList(interface_descriptors, return_types,
3683 bool is_first = return_types.empty();
3684 for (size_t i = kFirstNonContextParameter;
3685 i < builtin->parameter_names().size(); ++i) {
3686 const Type* type = builtin->signature().parameter_types.types[i];
3687 interface_descriptors << (is_first ? "" : ", ")
3688 << MachineTypeString(type);
3689 is_first = false;
3690 }
3691 interface_descriptors << ")\n";
3692
3693 interface_descriptors << " DECLARE_DEFAULT_DESCRIPTOR("
3694 << descriptor_name << ")\n";
3695 interface_descriptors << "};\n\n";
3696 }
3697 } else {
3698 builtin_definitions << "TFJ(" << builtin->ExternalName();
3699 if (builtin->IsVarArgsJavaScript()) {
3700 builtin_definitions << ", kDontAdaptArgumentsSentinel";
3701 } else {
3702 DCHECK(builtin->IsFixedArgsJavaScript());
3703 // FixedArg javascript builtins need to offer the parameter
3704 // count.
3705 int parameter_count =
3706 static_cast<int>(builtin->signature().ExplicitCount());
3707 builtin_definitions << ", JSParameterCount(" << parameter_count
3708 << ")";
3709 // And the receiver is explicitly declared.
3710 builtin_definitions << ", kReceiver";
3711 for (size_t i = builtin->signature().implicit_count;
3712 i < builtin->parameter_names().size(); ++i) {
3713 Identifier* parameter = builtin->parameter_names()[i];
3714 builtin_definitions << ", k" << CamelifyString(parameter->value);
3715 }
3716 }
3717 }
3718 builtin_definitions << ") \\\n";
3719 }
3720 builtin_definitions << "\n";
3721
3722 builtin_definitions
3723 << "#define TORQUE_FUNCTION_POINTER_TYPE_TO_BUILTIN_MAP(V) \\\n";
3724 for (const BuiltinPointerType* type :
3725 TypeOracle::AllBuiltinPointerTypes()) {
3726 Builtin* example_builtin =
3727 Declarations::FindSomeInternalBuiltinWithType(type);
3728 if (!example_builtin) {
3729 CurrentSourcePosition::Scope current_source_position(
3730 SourcePosition{CurrentSourceFile::Get(), LineAndColumn::Invalid(),
3731 LineAndColumn::Invalid()});
3732 ReportError("unable to find any builtin with type \"", *type, "\"");
3733 }
3734 builtin_definitions << " V(" << type->function_pointer_type_id() << ","
3735 << example_builtin->ExternalName() << ")\\\n";
3736 }
3737 builtin_definitions << "\n";
3738 }
3739 WriteFile(output_directory + "/" + builtin_definitions_file_name,
3740 builtin_definitions.str());
3741 WriteFile(output_directory + "/" + interface_descriptors_file_name,
3742 interface_descriptors.str());
3743}
3744
3745namespace {
3746
3747enum class FieldSectionType : uint32_t {
3748 kNoSection = 0,
3749 kWeakSection = 1 << 0,
3750 kStrongSection = 2 << 0,
3751 kScalarSection = 3 << 0
3752};
3753
3754bool IsPointerSection(FieldSectionType type) {
3755 return type == FieldSectionType::kWeakSection ||
3756 type == FieldSectionType::kStrongSection;
3757}
3758
3759using FieldSections = base::Flags<FieldSectionType>;
3760
3761std::string ToString(FieldSectionType type) {
3762 switch (type) {
3763 case FieldSectionType::kNoSection:
3764 return "NoSection";
3765 case FieldSectionType::kWeakSection:
3766 return "WeakFields";
3767 case FieldSectionType::kStrongSection:
3768 return "StrongFields";
3769 case FieldSectionType::kScalarSection:
3770 return "ScalarFields";
3771 }
3772 UNREACHABLE();
3773}
3774
3775class FieldOffsetsGenerator {
3776 public:
3777 explicit FieldOffsetsGenerator(const ClassType* type) : type_(type) {}
3778
3779 virtual void WriteField(const Field& f, const std::string& size_string) = 0;
3780 virtual void WriteFieldOffsetGetter(const Field& f) = 0;
3781 virtual void WriteMarker(const std::string& marker) = 0;
3782
3783 virtual ~FieldOffsetsGenerator() { CHECK(is_finished_); }
3784
3785 void RecordOffsetFor(const Field& f) {
3787 UpdateSection(f);
3788
3789 // Emit kHeaderSize before any indexed field.
3790 if (f.index.has_value() && !header_size_emitted_) {
3791 WriteMarker("kHeaderSize");
3792 header_size_emitted_ = true;
3793 }
3794
3795 // We don't know statically how much space an indexed field takes, so report
3796 // it as zero.
3797 std::string size_string = "0";
3798 if (!f.index.has_value()) {
3799 size_t field_size;
3800 std::tie(field_size, size_string) = f.GetFieldSizeInformation();
3801 }
3802 if (f.offset.has_value()) {
3803 WriteField(f, size_string);
3804 } else {
3805 WriteFieldOffsetGetter(f);
3806 }
3807 }
3808
3809 void Finish() {
3810 End(current_section_);
3811 if (!(completed_sections_ & FieldSectionType::kWeakSection)) {
3812 Begin(FieldSectionType::kWeakSection);
3813 End(FieldSectionType::kWeakSection);
3814 }
3815 if (!(completed_sections_ & FieldSectionType::kStrongSection)) {
3816 Begin(FieldSectionType::kStrongSection);
3817 End(FieldSectionType::kStrongSection);
3818 }
3819 is_finished_ = true;
3820
3821 // In the presence of indexed fields, we already emitted kHeaderSize before
3822 // the indexed field.
3823 if (!type_->IsShape() && !header_size_emitted_) {
3824 WriteMarker("kHeaderSize");
3825 }
3826 if (!type_->IsAbstract() && type_->HasStaticSize()) {
3827 WriteMarker("kSize");
3828 }
3829 }
3830
3831 protected:
3832 const ClassType* type_;
3833
3834 private:
3835 FieldSectionType GetSectionFor(const Field& f) {
3836 const Type* field_type = f.name_and_type.type;
3837 if (field_type == TypeOracle::GetVoidType()) {
3838 // Allow void type for marker constants of size zero.
3839 return current_section_;
3840 }
3841 StructType::Classification struct_contents =
3842 StructType::ClassificationFlag::kEmpty;
3843 if (auto field_as_struct = field_type->StructSupertype()) {
3844 struct_contents = (*field_as_struct)->ClassifyContents();
3845 }
3846 if ((struct_contents & StructType::ClassificationFlag::kStrongTagged) &&
3847 (struct_contents & StructType::ClassificationFlag::kWeakTagged)) {
3848 // It's okay for a struct to contain both strong and weak data. We'll just
3849 // treat the whole thing as weak. This is required for DescriptorEntry.
3850 struct_contents &= ~StructType::Classification(
3851 StructType::ClassificationFlag::kStrongTagged);
3852 }
3853 bool struct_contains_tagged_fields =
3854 (struct_contents & StructType::ClassificationFlag::kStrongTagged) ||
3855 (struct_contents & StructType::ClassificationFlag::kWeakTagged);
3856 if (struct_contains_tagged_fields &&
3857 (struct_contents & StructType::ClassificationFlag::kUntagged)) {
3858 // We can't declare what section a struct goes in if it has multiple
3859 // categories of data within.
3860 Error(
3861 "Classes do not support fields which are structs containing both "
3862 "tagged and untagged data.")
3863 .Position(f.pos);
3864 }
3865 if ((field_type->IsSubtypeOf(TypeOracle::GetStrongTaggedType()) ||
3866 struct_contents == StructType::ClassificationFlag::kStrongTagged) &&
3867 !f.custom_weak_marking) {
3868 return FieldSectionType::kStrongSection;
3869 } else if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType()) ||
3870 struct_contains_tagged_fields) {
3871 return FieldSectionType::kWeakSection;
3872 } else {
3873 return FieldSectionType::kScalarSection;
3874 }
3875 }
3876 void UpdateSection(const Field& f) {
3877 FieldSectionType type = GetSectionFor(f);
3878 if (current_section_ == type) return;
3879 if (IsPointerSection(type)) {
3880 if (completed_sections_ & type) {
3881 std::stringstream s;
3882 s << "cannot declare field " << f.name_and_type.name << " in class "
3883 << type_->name() << ", because section " << ToString(type)
3884 << " to which it belongs has already been finished.";
3885 Error(s.str()).Position(f.pos);
3886 }
3887 }
3888 End(current_section_);
3890 Begin(current_section_);
3891 }
3892 void Begin(FieldSectionType type) {
3893 DCHECK(type != FieldSectionType::kNoSection);
3894 if (!IsPointerSection(type)) return;
3895 WriteMarker("kStartOf" + ToString(type) + "Offset");
3896 }
3897 void End(FieldSectionType type) {
3898 if (!IsPointerSection(type)) return;
3900 WriteMarker("kEndOf" + ToString(type) + "Offset");
3901 }
3902
3903 FieldSectionType current_section_ = FieldSectionType::kNoSection;
3904 FieldSections completed_sections_ = FieldSectionType::kNoSection;
3905 bool is_finished_ = false;
3907};
3908
3909void GenerateClassExport(const ClassType* type, std::ostream& header,
3910 std::ostream& inl_header) {
3911 const ClassType* super = type->GetSuperClass();
3912 std::string parent = "TorqueGenerated" + type->name() + "<" + type->name() +
3913 ", " + super->name() + ">";
3914 header << "class " << type->name() << " : public " << parent << " {\n";
3915 header << " public:\n";
3916 if (type->ShouldGenerateBodyDescriptor()) {
3917 header << " class BodyDescriptor;\n";
3918 }
3919 header << " TQ_OBJECT_CONSTRUCTORS(" << type->name() << ")\n";
3920 header << "};\n\n";
3921 inl_header << "TQ_OBJECT_CONSTRUCTORS_IMPL(" << type->name() << ")\n";
3922}
3923
3924} // namespace
3925
3926void ImplementationVisitor::GenerateVisitorLists(
3927 const std::string& output_directory) {
3928 std::stringstream header;
3929 std::string file_name = "visitor-lists.h";
3930 {
3931 IncludeGuardScope include_guard(header, file_name);
3932
3933 header << "#define TORQUE_INSTANCE_TYPE_TO_BODY_DESCRIPTOR_LIST(V)\\\n";
3934 for (const ClassType* type : TypeOracle::GetClasses()) {
3935 if (type->ShouldGenerateBodyDescriptor() && type->OwnInstanceType()) {
3936 std::string type_name =
3937 CapifyStringWithUnderscores(type->name()) + "_TYPE";
3938 header << "V(" << type_name << "," << type->name() << ")\\\n";
3939 }
3940 }
3941 header << "\n";
3942
3943 header << "#define TORQUE_DATA_ONLY_VISITOR_ID_LIST(V)\\\n";
3944 for (const ClassType* type : TypeOracle::GetClasses()) {
3945 if (type->ShouldGenerateBodyDescriptor() &&
3946 type->HasNoPointerSlotsExceptMap()) {
3947 header << "V(" << type->name() << ")\\\n";
3948 }
3949 }
3950 header << "\n";
3951
3952 header << "#define TORQUE_POINTER_VISITOR_ID_LIST(V)\\\n";
3953 for (const ClassType* type : TypeOracle::GetClasses()) {
3954 if (type->ShouldGenerateBodyDescriptor() &&
3955 !type->HasNoPointerSlotsExceptMap()) {
3956 header << "V(" << type->name() << ")\\\n";
3957 }
3958 }
3959 header << "\n";
3960 }
3961 const std::string output_header_path = output_directory + "/" + file_name;
3962 WriteFile(output_header_path, header.str());
3963}
3964
3965void ImplementationVisitor::GenerateBitFields(
3966 const std::string& output_directory) {
3967 std::stringstream header;
3968 std::string file_name = "bit-fields.h";
3969 {
3970 IncludeGuardScope include_guard(header, file_name);
3971 header << "#include \"src/base/bit-field.h\"\n\n";
3972 NamespaceScope namespaces(header, {"v8", "internal"});
3973
3974 for (const auto& type : TypeOracle::GetBitFieldStructTypes()) {
3975 bool all_single_bits = true; // Track whether every field is one bit.
3976 header << "// " << type->GetPosition() << "\n";
3977 header << "#define DEFINE_TORQUE_GENERATED_"
3978 << CapifyStringWithUnderscores(type->name()) << "() \\\n";
3979 std::string type_name = type->GetConstexprGeneratedTypeName();
3980 for (const auto& field : type->fields()) {
3981 const char* suffix = field.num_bits == 1 ? "Bit" : "Bits";
3982 all_single_bits = all_single_bits && field.num_bits == 1;
3983 std::string field_type_name =
3984 field.name_and_type.type->GetConstexprGeneratedTypeName();
3985 header << " using " << CamelifyString(field.name_and_type.name)
3986 << suffix << " = base::BitField<" << field_type_name << ", "
3987 << field.offset << ", " << field.num_bits << ", " << type_name
3988 << ">; \\\n";
3989 }
3990
3991 // If every field is one bit, we can also generate a convenient enum.
3992 if (all_single_bits) {
3993 header << " enum Flag: " << type_name << " { \\\n";
3994 header << " kNone = 0, \\\n";
3995 for (const auto& field : type->fields()) {
3996 header << " k" << CamelifyString(field.name_and_type.name) << " = "
3997 << type_name << "{1} << " << field.offset << ", \\\n";
3998 }
3999 header << " }; \\\n";
4000 header << " using Flags = base::Flags<Flag>; \\\n";
4001 header << " static constexpr int kFlagCount = "
4002 << type->fields().size() << "; \\\n";
4003 }
4004
4005 header << "\n";
4006 }
4007 }
4008 const std::string output_header_path = output_directory + "/" + file_name;
4009 WriteFile(output_header_path, header.str());
4010}
4011
4012namespace {
4013
4014class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
4015 public:
4016 ClassFieldOffsetGenerator(std::ostream& header, std::ostream& inline_header,
4017 const ClassType* type, std::string gen_name,
4018 const ClassType* parent, bool use_templates = true)
4019 : FieldOffsetsGenerator(type),
4020 hdr_(header),
4021 inl_(inline_header),
4022 previous_field_end_(FirstFieldStart(type, parent, use_templates)),
4023 gen_name_(gen_name) {}
4024
4025 void WriteField(const Field& f, const std::string& size_string) override {
4026 hdr_ << " // " << f.pos << "\n";
4027 std::string field = "k" + CamelifyString(f.name_and_type.name) + "Offset";
4028 std::string field_end = field + "End";
4029 hdr_ << " static constexpr int " << field << " = " << previous_field_end_
4030 << ";\n";
4031 hdr_ << " static constexpr int " << field_end << " = " << field << " + "
4032 << size_string << " - 1;\n";
4033 previous_field_end_ = field_end + " + 1";
4034 }
4035
4036 void WriteFieldOffsetGetter(const Field& f) override {
4037 // A static constexpr int is more convenient than a getter if the offset is
4038 // known.
4039 DCHECK(!f.offset.has_value());
4040
4041 std::string function_name = CamelifyString(f.name_and_type.name) + "Offset";
4042
4043 std::vector<cpp::TemplateParameter> params = {cpp::TemplateParameter("D"),
4044 cpp::TemplateParameter("P")};
4045 cpp::Class owner(std::move(params), gen_name_);
4046
4047 auto getter = cpp::Function::DefaultGetter("int", &owner, function_name);
4048 getter.PrintDeclaration(hdr_);
4049 getter.PrintDefinition(inl_, [&](std::ostream& stream) {
4050 // Item 1 in a flattened slice is the offset.
4051 stream << " return static_cast<int>(std::get<1>("
4052 << Callable::PrefixNameForCCOutput(type_->GetSliceMacroName(f))
4053 << "(*static_cast<const D*>(this))));\n";
4054 });
4055 }
4056 void WriteMarker(const std::string& marker) override {
4057 hdr_ << " static constexpr int " << marker << " = " << previous_field_end_
4058 << ";\n";
4059 }
4060
4061 private:
4062 static std::string FirstFieldStart(const ClassType* type,
4063 const ClassType* parent,
4064 bool use_templates = true) {
4065 std::string parent_name = use_templates ? "P" : parent->name();
4066
4067 if (type->IsLayoutDefinedInCpp()) {
4068 // TODO(leszeks): Hacked in support for some classes (e.g.
4069 // HeapObject) being mirrored by a *Layout class. Remove once
4070 // everything is ported to layout classes.
4071 if (parent_name == "HeapObject" || parent_name == "TrustedObject" ||
4072 parent_name == "Struct") {
4073 parent_name += "Layout";
4074 }
4075
4076 return "sizeof(" + parent_name + ")";
4077 }
4078
4079 if (parent && parent->IsShape()) {
4080 return parent_name + "::kSize";
4081 }
4082 return parent_name + "::kHeaderSize";
4083 }
4084
4085 std::ostream& hdr_;
4086 std::ostream& inl_;
4088 std::string gen_name_;
4089};
4090
4091class CppClassGenerator {
4092 public:
4093 CppClassGenerator(const ClassType* type, std::ostream& header,
4094 std::ostream& inl_header, std::ostream& impl)
4095 : type_(type),
4096 super_(type->GetSuperClass()),
4097 name_(type->name()),
4098 gen_name_("TorqueGenerated" + name_),
4099 gen_name_T_(gen_name_ + "<D, P>"),
4100 gen_name_I_(gen_name_ + "<" + name_ + ", " + super_->name() + ">"),
4101 hdr_(header),
4102 inl_(inl_header),
4103 impl_(impl) {}
4104 const std::string template_decl() const {
4105 return "template <class D, class P>";
4106 }
4107
4108 void GenerateClass();
4109 void GenerateCppObjectDefinitionAsserts();
4110 void GenerateCppObjectLayoutDefinitionAsserts();
4111
4112 private:
4113 SourcePosition Position();
4114
4115 void GenerateClassConstructors();
4116
4117 // Generates getter and setter runtime member functions for the given class
4118 // field. Traverses depth-first through any nested struct fields to generate
4119 // accessors for them also; struct_fields represents the stack of currently
4120 // active struct fields.
4121 void GenerateFieldAccessors(const Field& class_field,
4122 std::vector<const Field*>& struct_fields);
4123 void EmitLoadFieldStatement(std::ostream& stream, const Field& class_field,
4124 std::vector<const Field*>& struct_fields);
4125 void EmitStoreFieldStatement(std::ostream& stream, const Field& class_field,
4126 std::vector<const Field*>& struct_fields);
4127
4128 std::string GetFieldOffsetForAccessor(const Field& f);
4129
4130 // Gets the C++ type name that should be used in accessors for referring to
4131 // the value of a class field.
4132 std::string GetTypeNameForAccessor(const Field& f);
4133
4134 bool CanContainHeapObjects(const Type* t);
4135
4136 const ClassType* type_;
4137 const ClassType* super_;
4138 const std::string name_;
4139 const std::string gen_name_;
4140 const std::string gen_name_T_;
4141 const std::string gen_name_I_;
4142 std::ostream& hdr_;
4143 std::ostream& inl_;
4144 std::ostream& impl_;
4145};
4146
4147std::optional<std::vector<Field>> GetOrderedUniqueIndexFields(
4148 const ClassType& type) {
4149 std::vector<Field> result;
4150 std::set<std::string> index_names;
4151 for (const Field& field : type.ComputeAllFields()) {
4152 if (field.index) {
4153 auto name_and_type = ExtractSimpleFieldArraySize(type, field.index->expr);
4154 if (!name_and_type) {
4155 return std::nullopt;
4156 }
4157 index_names.insert(name_and_type->name);
4158 }
4159 }
4160
4161 for (const Field& field : type.ComputeAllFields()) {
4162 if (index_names.count(field.name_and_type.name) != 0) {
4163 result.push_back(field);
4164 }
4165 }
4166
4167 return result;
4168}
4169
4170void CppClassGenerator::GenerateClass() {
4171 // Is<name>_NonInline(Tagged<HeapObject>)
4172 if (!type_->IsShape()) {
4173 cpp::Function f("Is"s + name_ + "_NonInline");
4174 f.SetDescription("Alias for Is"s + name_ + "() that avoids inlining.");
4175 f.SetExport(true);
4176 f.SetReturnType("bool");
4177 f.AddParameter("Tagged<HeapObject>", "o");
4178
4179 f.PrintDeclaration(hdr_);
4180 hdr_ << "\n";
4181 f.PrintDefinition(impl_, [&](std::ostream& stream) {
4182 stream << " return Is" << name_ << "(o);\n";
4183 });
4184 }
4185 hdr_ << "// Definition " << Position() << "\n";
4186 hdr_ << template_decl() << "\n";
4187 hdr_ << "class " << gen_name_ << " : public P {\n";
4188 hdr_ << " static_assert(\n"
4189 << " std::is_same<" << name_ << ", D>::value,\n"
4190 << " \"Use this class as direct base for " << name_ << ".\");\n";
4191 hdr_ << " static_assert(\n"
4192 << " std::is_same<" << super_->name() << ", P>::value,\n"
4193 << " \"Pass in " << super_->name()
4194 << " as second template parameter for " << gen_name_ << ".\");\n\n";
4195 hdr_ << " public: \n";
4196 hdr_ << " using Super = P;\n";
4197 hdr_ << " using TorqueGeneratedClass = " << gen_name_ << "<D,P>;\n\n";
4198 if (!type_->ShouldExport() && !type_->IsExtern()) {
4199 hdr_ << " protected: // not extern or @export\n";
4200 }
4201 for (const Field& f : type_->fields()) {
4202 CurrentSourcePosition::Scope scope(f.pos);
4203 std::vector<const Field*> struct_fields;
4204 GenerateFieldAccessors(f, struct_fields);
4205 }
4206 if (!type_->ShouldExport() && !type_->IsExtern()) {
4207 hdr_ << " public:\n";
4208 }
4209
4210 std::vector<cpp::TemplateParameter> templateArgs = {
4211 cpp::TemplateParameter("D"), cpp::TemplateParameter("P")};
4212 cpp::Class c(std::move(templateArgs), gen_name_);
4213
4214 if (type_->ShouldGeneratePrint()) {
4215 hdr_ << " DECL_PRINTER(" << name_ << ")\n\n";
4216 }
4217
4218 if (type_->ShouldGenerateVerify()) {
4219 IfDefScope hdr_scope(hdr_, "VERIFY_HEAP");
4220 // V8_EXPORT_PRIVATE void Verify(Isolate*);
4221 cpp::Function f(&c, name_ + "Verify");
4222 f.SetExport();
4223 f.SetReturnType("void");
4224 f.AddParameter("Isolate*", "isolate");
4225 f.PrintDeclaration(hdr_);
4226
4227 IfDefScope impl_scope(impl_, "VERIFY_HEAP");
4228 impl_ << "\ntemplate <>\n";
4229 impl_ << "void " << gen_name_I_ << "::" << name_
4230 << "Verify(Isolate* isolate) {\n";
4231 impl_ << " TorqueGeneratedClassVerifiers::" << name_ << "Verify(Cast<"
4232 << name_
4233 << ">(*this), "
4234 "isolate);\n";
4235 impl_ << "}\n\n";
4236 impl_ << "\n";
4237 }
4238
4239 hdr_ << "\n";
4240 ClassFieldOffsetGenerator g(hdr_, inl_, type_, gen_name_,
4241 type_->GetSuperClass());
4242 for (const auto& f : type_->fields()) {
4243 CurrentSourcePosition::Scope scope(f.pos);
4244 g.RecordOffsetFor(f);
4245 }
4246 g.Finish();
4247 hdr_ << "\n";
4248
4249 auto index_fields = GetOrderedUniqueIndexFields(*type_);
4250
4251 if (!index_fields.has_value()) {
4252 hdr_ << " // SizeFor implementations not generated due to complex array "
4253 "lengths\n\n";
4254
4255 const Field& last_field = type_->LastField();
4256 std::string last_field_item_size =
4257 std::get<1>(*SizeOf(last_field.name_and_type.type));
4258
4259 // int AllocatedSize() const
4260 {
4261 cpp::Function f =
4262 cpp::Function::DefaultGetter("int", &c, "AllocatedSize");
4263 f.PrintDeclaration(hdr_);
4264
4265 f.PrintDefinition(inl_, [&](std::ostream& stream) {
4266 stream << " auto slice = "
4267 << Callable::PrefixNameForCCOutput(
4268 type_->GetSliceMacroName(last_field))
4269 << "(*static_cast<const D*>(this));\n";
4270 stream << " return static_cast<int>(std::get<1>(slice)) + "
4271 << last_field_item_size
4272 << " * static_cast<int>(std::get<2>(slice));\n";
4273 });
4274 }
4275 } else if (type_->ShouldGenerateBodyDescriptor() ||
4276 (!type_->IsAbstract() &&
4277 !type_->IsSubtypeOf(TypeOracle::GetJSObjectType()))) {
4278 cpp::Function f(&c, "SizeFor");
4279 f.SetReturnType("int32_t");
4280 f.SetFlags(cpp::Function::kStatic | cpp::Function::kConstexpr |
4281 cpp::Function::kV8Inline);
4282 for (const Field& field : *index_fields) {
4283 f.AddParameter("int", field.name_and_type.name);
4284 }
4285 f.PrintInlineDefinition(hdr_, [&](std::ostream& stream) {
4286 if (index_fields->empty()) {
4287 stream << " DCHECK(kHeaderSize == kSize && kHeaderSize == "
4288 << *type_->size().SingleValue() << ");\n";
4289 }
4290 stream << " int32_t size = kHeaderSize;\n";
4291 for (const Field& field : type_->ComputeAllFields()) {
4292 if (field.index) {
4293 auto index_name_and_type =
4294 *ExtractSimpleFieldArraySize(*type_, field.index->expr);
4295 stream << " size += " << index_name_and_type.name << " * "
4296 << std::get<0>(field.GetFieldSizeInformation()) << ";\n";
4297 }
4298 }
4299 if (type_->size().Alignment() < TargetArchitecture::TaggedSize()) {
4300 stream << " size = OBJECT_POINTER_ALIGN(size);\n";
4301 }
4302 stream << " return size;\n";
4303 });
4304
4305 // V8_INLINE int32_t AllocatedSize() const
4306 {
4307 cpp::Function allocated_size_f =
4308 cpp::Function::DefaultGetter("int32_t", &c, "AllocatedSize");
4309 allocated_size_f.SetFlag(cpp::Function::kV8Inline);
4310 allocated_size_f.PrintInlineDefinition(hdr_, [&](std::ostream& stream) {
4311 stream << " return SizeFor(";
4312 bool first = true;
4313 for (const auto& field : *index_fields) {
4314 if (!first) stream << ", ";
4315 stream << "this->" << field.name_and_type.name << "()";
4316 first = false;
4317 }
4318 stream << ");\n";
4319 });
4320 }
4321 }
4322
4323 hdr_ << " friend class Factory;\n\n";
4324
4325 GenerateClassConstructors();
4326
4327 hdr_ << "};\n\n";
4328
4329 if (type_->ShouldGenerateFullClassDefinition()) {
4330 // If this class extends from another class which is defined in the same tq
4331 // file, and that other class doesn't generate a full class definition, then
4332 // the resulting .inc file would be uncompilable due to ordering
4333 // requirements: the generated file must go before the hand-written
4334 // definition of the base class, but it must also go after that same
4335 // hand-written definition.
4336 std::optional<const ClassType*> parent = type_->parent()->ClassSupertype();
4337 while (parent) {
4338 if ((*parent)->ShouldGenerateCppClassDefinitions() &&
4339 !(*parent)->ShouldGenerateFullClassDefinition() &&
4340 (*parent)->AttributedToFile() == type_->AttributedToFile()) {
4341 Error("Exported ", *type_,
4342 " cannot be in the same file as its parent extern ", **parent);
4343 }
4344 parent = (*parent)->parent()->ClassSupertype();
4345 }
4346
4347 GenerateClassExport(type_, hdr_, inl_);
4348 }
4349}
4350
4351void CppClassGenerator::GenerateCppObjectDefinitionAsserts() {
4352 impl_ << "// Definition " << Position() << "\n"
4353 << "class " << gen_name_ << "Asserts {\n";
4354
4355 ClassFieldOffsetGenerator g(impl_, impl_, type_, gen_name_,
4356 type_->GetSuperClass(), false);
4357 for (const auto& f : type_->fields()) {
4358 CurrentSourcePosition::Scope scope(f.pos);
4359 g.RecordOffsetFor(f);
4360 }
4361 g.Finish();
4362 impl_ << "\n";
4363
4364 for (const auto& f : type_->fields()) {
4365 std::string field_offset =
4366 "k" + CamelifyString(f.name_and_type.name) + "Offset";
4367 impl_ << " static_assert(" << field_offset << " == " << name_
4368 << "::" << field_offset << ",\n"
4369 << " \"Values of " << name_ << "::" << field_offset
4370 << " defined in Torque and C++ do not match\");\n";
4371 }
4372 if (!type_->IsAbstract() && type_->HasStaticSize()) {
4373 impl_ << " static_assert(kSize == " << name_ << "::kSize);\n";
4374 }
4375
4376 impl_ << "};\n\n";
4377}
4378
4379void CppClassGenerator::GenerateCppObjectLayoutDefinitionAsserts() {
4380 impl_ << "// Definition " << Position() << "\n"
4381 << "class " << gen_name_ << "Asserts {\n";
4382
4383 ClassFieldOffsetGenerator g(impl_, impl_, type_, gen_name_,
4384 type_->GetSuperClass(), false);
4385 for (const auto& f : type_->fields()) {
4386 CurrentSourcePosition::Scope scope(f.pos);
4387 g.RecordOffsetFor(f);
4388 }
4389 g.Finish();
4390 impl_ << "\n";
4391
4392 for (const auto& f : type_->fields()) {
4393 std::string field_offset =
4394 "k" + CamelifyString(f.name_and_type.name) + "Offset";
4395 std::string cpp_field_offset =
4396 f.index.has_value()
4397 ? "OFFSET_OF_DATA_START(" + name_ + ")"
4398 : "offsetof(" + name_ + ", " + f.name_and_type.name + "_)";
4399 impl_ << " static_assert(" << field_offset << " == " << cpp_field_offset
4400 << ",\n"
4401 << " \"Value of " << name_ << "::" << field_offset
4402 << " defined in Torque and offset of field " << name_
4403 << "::" << f.name_and_type.name << " in C++ do not match\");\n";
4404 }
4405 if (!type_->IsAbstract() && type_->HasStaticSize()) {
4406 impl_ << " static_assert(kSize == sizeof(" + name_ + "));\n";
4407 }
4408
4409 impl_ << "};\n\n";
4410}
4411
4412SourcePosition CppClassGenerator::Position() { return type_->GetPosition(); }
4413
4414void CppClassGenerator::GenerateClassConstructors() {
4415 const ClassType* typecheck_type = type_;
4416 while (typecheck_type->IsShape()) {
4417 typecheck_type = typecheck_type->GetSuperClass();
4418
4419 // Shapes have already been checked earlier to inherit from JSObject, so we
4420 // should have found an appropriate type.
4421 DCHECK(typecheck_type);
4422 }
4423
4424 hdr_ << " template <class DAlias = D>\n";
4425 hdr_ << " constexpr " << gen_name_ << "() : P() {\n";
4426 hdr_ << " static_assert(\n";
4427 hdr_ << " std::is_base_of<" << gen_name_ << ", DAlias>::value,\n";
4428 hdr_ << " \"class " << gen_name_
4429 << " should be used as direct base for " << name_ << ".\");\n";
4430 hdr_ << " }\n\n";
4431
4432 hdr_ << " protected:\n";
4433 hdr_ << " inline explicit constexpr " << gen_name_
4434 << "(Address ptr, typename P::SkipTypeCheckTag\n)";
4435 hdr_ << " : P(ptr, typename P::SkipTypeCheckTag{}) {}\n";
4436 hdr_ << " inline explicit " << gen_name_ << "(Address ptr);\n";
4437
4438 inl_ << "template<class D, class P>\n";
4439 inl_ << "inline " << gen_name_T_ << "::" << gen_name_ << "(Address ptr)\n";
4440 inl_ << " : P(ptr) {\n";
4441 inl_ << " SLOW_DCHECK(Is" << typecheck_type->name()
4442 << "_NonInline(*this));\n";
4443 inl_ << "}\n";
4444}
4445
4446namespace {
4447std::string GenerateRuntimeTypeCheck(const Type* type,
4448 const std::string& value) {
4449 bool maybe_object = !type->IsSubtypeOf(TypeOracle::GetStrongTaggedType());
4450 std::stringstream type_check;
4451 bool at_start = true;
4452 // If weak pointers are allowed, then start by checking for a cleared value.
4453 if (maybe_object) {
4454 type_check << value << ".IsCleared()";
4455 at_start = false;
4456 }
4457 for (const TypeChecker& runtime_type : type->GetTypeCheckers()) {
4458 if (!at_start) type_check << " || ";
4459 at_start = false;
4460 if (maybe_object) {
4461 bool strong = runtime_type.weak_ref_to.empty();
4462 if (strong && runtime_type.type == WEAK_HEAP_OBJECT) {
4463 // Rather than a generic Weak<T>, this is the basic type WeakHeapObject.
4464 // We can't validate anything more about the type of the object pointed
4465 // to, so just check that it's weak.
4466 type_check << value << ".IsWeak()";
4467 } else {
4468 type_check << "(" << (strong ? "!" : "") << value << ".IsWeak() && Is"
4469 << (strong ? runtime_type.type : runtime_type.weak_ref_to)
4470 << "(" << value << ".GetHeapObjectOrSmi()))";
4471 }
4472 } else {
4473 type_check << "Is" << runtime_type.type << "(" << value << ")";
4474 }
4475 }
4476 return type_check.str();
4477}
4478
4479void GenerateBoundsDCheck(std::ostream& os, const std::string& index,
4480 const ClassType* type, const Field& f) {
4481 os << " DCHECK_GE(" << index << ", 0);\n";
4482 std::string length_expression;
4483 if (std::optional<NameAndType> array_length =
4484 ExtractSimpleFieldArraySize(*type, f.index->expr)) {
4485 length_expression = "this ->" + array_length->name + "()";
4486 } else {
4487 // The length is element 2 in the flattened field slice.
4488 length_expression =
4489 "static_cast<int>(std::get<2>(" +
4490 Callable::PrefixNameForCCOutput(type->GetSliceMacroName(f)) +
4491 "(*static_cast<const D*>(this))))";
4492 }
4493 os << " DCHECK_LT(" << index << ", " << length_expression << ");\n";
4494}
4495
4496bool CanGenerateFieldAccessors(const Type* field_type) {
4497 // float64_or_undefined_or_hole should be treated like float64. For now, we
4498 // don't need it.
4499 // TODO(v8:10391) Generate accessors for external pointers.
4500 return field_type != TypeOracle::GetVoidType() &&
4501 field_type != TypeOracle::GetFloat64OrUndefinedOrHoleType() &&
4502 !field_type->IsSubtypeOf(TypeOracle::GetExternalPointerType()) &&
4503 !field_type->IsSubtypeOf(TypeOracle::GetTrustedPointerType()) &&
4504 !field_type->IsSubtypeOf(TypeOracle::GetProtectedPointerType());
4505}
4506} // namespace
4507
4508// TODO(sigurds): Keep in sync with DECL_ACCESSORS and ACCESSORS macro.
4509void CppClassGenerator::GenerateFieldAccessors(
4510 const Field& class_field, std::vector<const Field*>& struct_fields) {
4511 const Field& innermost_field =
4512 struct_fields.empty() ? class_field : *struct_fields.back();
4513 const Type* field_type = innermost_field.name_and_type.type;
4514 if (!CanGenerateFieldAccessors(field_type)) return;
4515
4516 if (const StructType* struct_type = StructType::DynamicCast(field_type)) {
4517 struct_fields.resize(struct_fields.size() + 1);
4518 for (const Field& struct_field : struct_type->fields()) {
4519 struct_fields[struct_fields.size() - 1] = &struct_field;
4520 GenerateFieldAccessors(class_field, struct_fields);
4521 }
4522 struct_fields.resize(struct_fields.size() - 1);
4523 return;
4524 }
4525
4526 bool indexed = class_field.index && !class_field.index->optional;
4527 std::string type_name = GetTypeNameForAccessor(innermost_field);
4528 bool can_contain_heap_objects = CanContainHeapObjects(field_type);
4529
4530 // Assemble an accessor name by accumulating together all of the nested field
4531 // names.
4532 std::string name = class_field.name_and_type.name;
4533 for (const Field* nested_struct_field : struct_fields) {
4534 name += "_" + nested_struct_field->name_and_type.name;
4535 }
4536
4537 // Generate declarations in header.
4538 if (can_contain_heap_objects && !field_type->IsClassType() &&
4539 !field_type->IsStructType() &&
4540 field_type != TypeOracle::GetObjectType()) {
4541 hdr_ << " // Torque type: " << field_type->ToString() << "\n";
4542 }
4543
4544 std::vector<cpp::TemplateParameter> templateParameters = {
4545 cpp::TemplateParameter("D"), cpp::TemplateParameter("P")};
4546 cpp::Class owner(std::move(templateParameters), gen_name_);
4547
4548 // getter
4549 {
4550 auto getter = cpp::Function::DefaultGetter(type_name, &owner, name);
4551 if (indexed) {
4552 getter.AddParameter("int", "i");
4553 }
4554 const char* tag_argument;
4555 switch (class_field.synchronization) {
4556 case FieldSynchronization::kNone:
4557 tag_argument = "";
4558 break;
4559 case FieldSynchronization::kRelaxed:
4560 getter.AddParameter("RelaxedLoadTag");
4561 tag_argument = ", kRelaxedLoad";
4562 break;
4563 case FieldSynchronization::kAcquireRelease:
4564 getter.AddParameter("AcquireLoadTag");
4565 tag_argument = ", kAcquireLoad";
4566 break;
4567 }
4568
4569 getter.PrintDeclaration(hdr_);
4570
4571 // For tagged data, generate the extra getter that derives an
4572 // PtrComprCageBase from the current object's pointer.
4573 if (can_contain_heap_objects) {
4574 getter.PrintDefinition(inl_, [&](auto& stream) {
4575 stream
4576 << " PtrComprCageBase cage_base = GetPtrComprCageBase(*this);\n";
4577 stream << " return " << gen_name_ << "::" << name << "(cage_base"
4578 << (indexed ? ", i" : "") << tag_argument << ");\n";
4579 });
4580
4581 getter.InsertParameter(0, "PtrComprCageBase", "cage_base");
4582 getter.PrintDeclaration(hdr_);
4583 }
4584
4585 getter.PrintDefinition(inl_, [&](auto& stream) {
4586 EmitLoadFieldStatement(stream, class_field, struct_fields);
4587 stream << " return value;\n";
4588 });
4589 }
4590
4591 // setter
4592 {
4593 auto setter = cpp::Function::DefaultSetter(
4594 &owner, std::string("set_") + name, type_name, "value");
4595 if (indexed) {
4596 setter.InsertParameter(0, "int", "i");
4597 }
4598 switch (class_field.synchronization) {
4599 case FieldSynchronization::kNone:
4600 break;
4601 case FieldSynchronization::kRelaxed:
4602 setter.AddParameter("RelaxedStoreTag");
4603 break;
4604 case FieldSynchronization::kAcquireRelease:
4605 setter.AddParameter("ReleaseStoreTag");
4606 break;
4607 }
4608 if (can_contain_heap_objects) {
4609 setter.AddParameter("WriteBarrierMode", "mode", "UPDATE_WRITE_BARRIER");
4610 }
4611 setter.PrintDeclaration(hdr_);
4612
4613 setter.PrintDefinition(inl_, [&](auto& stream) {
4614 EmitStoreFieldStatement(stream, class_field, struct_fields);
4615 });
4616 }
4617
4618 hdr_ << "\n";
4619}
4620
4621std::string CppClassGenerator::GetFieldOffsetForAccessor(const Field& f) {
4622 if (f.offset.has_value()) {
4623 return "k" + CamelifyString(f.name_and_type.name) + "Offset";
4624 }
4625 return CamelifyString(f.name_and_type.name) + "Offset()";
4626}
4627
4628std::string CppClassGenerator::GetTypeNameForAccessor(const Field& f) {
4629 const Type* field_type = f.name_and_type.type;
4630 if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
4631 const Type* constexpr_version = field_type->ConstexprVersion();
4632 if (!constexpr_version) {
4633 Error("Field accessor for ", type_->name(), ":: ", f.name_and_type.name,
4634 " cannot be generated because its type ", *field_type,
4635 " is neither a subclass of Object nor does the type have a "
4636 "constexpr "
4637 "version.")
4638 .Position(f.pos)
4639 .Throw();
4640 }
4641 return constexpr_version->GetGeneratedTypeName();
4642 }
4643 return field_type->TagglifiedCppTypeName();
4644}
4645
4646bool CppClassGenerator::CanContainHeapObjects(const Type* t) {
4647 return t->IsSubtypeOf(TypeOracle::GetTaggedType()) &&
4648 !t->IsSubtypeOf(TypeOracle::GetSmiType());
4649}
4650
4651void CppClassGenerator::EmitLoadFieldStatement(
4652 std::ostream& stream, const Field& class_field,
4653 std::vector<const Field*>& struct_fields) {
4654 const Field& innermost_field =
4655 struct_fields.empty() ? class_field : *struct_fields.back();
4656 const Type* field_type = innermost_field.name_and_type.type;
4657 std::string type_name = GetTypeNameForAccessor(innermost_field);
4658 const std::string class_field_size =
4659 std::get<1>(class_field.GetFieldSizeInformation());
4660
4661 // field_offset contains both the offset from the beginning of the object to
4662 // the class field and the combined offsets of any nested struct fields
4663 // within, but not the index adjustment.
4664 std::string field_offset = GetFieldOffsetForAccessor(class_field);
4665 for (const Field* nested_struct_field : struct_fields) {
4666 field_offset += " + " + std::to_string(*nested_struct_field->offset);
4667 }
4668
4669 std::string offset = field_offset;
4670 if (class_field.index) {
4671 const char* index = class_field.index->optional ? "0" : "i";
4672 GenerateBoundsDCheck(stream, index, type_, class_field);
4673 stream << " int offset = " << field_offset << " + " << index << " * "
4674 << class_field_size << ";\n";
4675 offset = "offset";
4676 }
4677
4678 stream << " " << type_name << " value = ";
4679
4680 if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
4681 const char* load;
4682 switch (class_field.synchronization) {
4683 case FieldSynchronization::kNone:
4684 load = "ReadField";
4685 break;
4686 case FieldSynchronization::kRelaxed:
4687 load = "Relaxed_ReadField";
4688 break;
4689 case FieldSynchronization::kAcquireRelease:
4690 ReportError("Torque doesn't support @cppAcquireLoad on untagged data");
4691 }
4692 stream << "this->template " << load << "<" << type_name << ">(" << offset
4693 << ");\n";
4694 } else {
4695 const char* load;
4696 switch (class_field.synchronization) {
4697 case FieldSynchronization::kNone:
4698 load = "load";
4699 break;
4700 case FieldSynchronization::kRelaxed:
4701 load = "Relaxed_Load";
4702 break;
4703 case FieldSynchronization::kAcquireRelease:
4704 load = "Acquire_Load";
4705 break;
4706 }
4707 bool is_smi = field_type->IsSubtypeOf(TypeOracle::GetSmiType());
4708 const std::string load_type = is_smi ? "Smi" : type_name;
4709 const char* postfix = is_smi ? ".value()" : "";
4710 const char* optional_cage_base = is_smi ? "" : "cage_base, ";
4711
4712 stream << "TaggedField<" << load_type << ">::" << load << "("
4713 << optional_cage_base << "*this, " << offset << ")" << postfix
4714 << ";\n";
4715 }
4716
4717 if (CanContainHeapObjects(field_type)) {
4718 stream << " DCHECK(" << GenerateRuntimeTypeCheck(field_type, "value")
4719 << ");\n";
4720 }
4721}
4722
4723void CppClassGenerator::EmitStoreFieldStatement(
4724 std::ostream& stream, const Field& class_field,
4725 std::vector<const Field*>& struct_fields) {
4726 const Field& innermost_field =
4727 struct_fields.empty() ? class_field : *struct_fields.back();
4728 const Type* field_type = innermost_field.name_and_type.type;
4729 std::string type_name = GetTypeNameForAccessor(innermost_field);
4730 const std::string class_field_size =
4731 std::get<1>(class_field.GetFieldSizeInformation());
4732
4733 // field_offset contains both the offset from the beginning of the object to
4734 // the class field and the combined offsets of any nested struct fields
4735 // within, but not the index adjustment.
4736 std::string field_offset = GetFieldOffsetForAccessor(class_field);
4737 for (const Field* nested_struct_field : struct_fields) {
4738 field_offset += " + " + std::to_string(*nested_struct_field->offset);
4739 }
4740
4741 std::string offset = field_offset;
4742 if (class_field.index) {
4743 const char* index = class_field.index->optional ? "0" : "i";
4744 GenerateBoundsDCheck(stream, index, type_, class_field);
4745 stream << " int offset = " << field_offset << " + " << index << " * "
4746 << class_field_size << ";\n";
4747 offset = "offset";
4748 }
4749
4750 if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
4751 const char* store;
4752 switch (class_field.synchronization) {
4753 case FieldSynchronization::kNone:
4754 store = "WriteField";
4755 break;
4756 case FieldSynchronization::kRelaxed:
4757 store = "Relaxed_WriteField";
4758 break;
4759 case FieldSynchronization::kAcquireRelease:
4760 ReportError("Torque doesn't support @cppReleaseStore on untagged data");
4761 }
4762 stream << " this->template " << store << "<" << type_name << ">(" << offset
4763 << ", value);\n";
4764 } else {
4765 bool strong_pointer = field_type->IsSubtypeOf(TypeOracle::GetObjectType());
4766 bool is_smi = field_type->IsSubtypeOf(TypeOracle::GetSmiType());
4767 const char* write_macro;
4768 if (!strong_pointer) {
4769 if (class_field.synchronization ==
4770 FieldSynchronization::kAcquireRelease) {
4771 ReportError("Torque doesn't support @cppReleaseStore on weak fields");
4772 }
4773 write_macro = "RELAXED_WRITE_WEAK_FIELD";
4774 } else {
4775 switch (class_field.synchronization) {
4776 case FieldSynchronization::kNone:
4777 write_macro = "WRITE_FIELD";
4778 break;
4779 case FieldSynchronization::kRelaxed:
4780 write_macro = "RELAXED_WRITE_FIELD";
4781 break;
4782 case FieldSynchronization::kAcquireRelease:
4783 write_macro = "RELEASE_WRITE_FIELD";
4784 break;
4785 }
4786 }
4787 std::string value_to_write;
4788 if (const auto type_wrapped_in_smi = Type::MatchUnaryGeneric(
4789 field_type, TypeOracle::GetSmiTaggedGeneric())) {
4790 DCHECK(is_smi);
4791 stream << " // " << type_wrapped_in_smi.value()->ToString() << "\n";
4792 value_to_write = "Smi::From31BitPattern(value)";
4793 } else {
4794 value_to_write = is_smi ? "Smi::FromInt(value)" : "value";
4795 }
4796
4797 if (!is_smi) {
4798 // Don't DCHECK types if the roots aren't initialized, so that we don't
4799 // incorrectly fail these checks during initial heap setup.
4800 stream << " "
4801 "SLOW_DCHECK(!IsolateGroup::current()->shared_read_only_heap()-"
4802 ">roots_init_complete() || ("
4803 << GenerateRuntimeTypeCheck(field_type, "value") << "));\n";
4804 }
4805 stream << " " << write_macro << "(*this, " << offset << ", "
4806 << value_to_write << ");\n";
4807 if (!is_smi) {
4808 stream << " CONDITIONAL_WRITE_BARRIER(*this, " << offset
4809 << ", value, mode);\n";
4810 }
4811 }
4812}
4813
4814void GenerateStructLayoutDescription(std::ostream& header,
4815 const StructType* type) {
4816 header << "struct TorqueGenerated" << CamelifyString(type->name())
4817 << "Offsets {\n";
4818 for (const Field& field : type->fields()) {
4819 header << " static constexpr int k"
4820 << CamelifyString(field.name_and_type.name)
4821 << "Offset = " << *field.offset << ";\n";
4822 }
4823 header << " static constexpr int kSize = " << type->PackedSize() << ";\n";
4824 header << "};\n\n";
4825}
4826
4827} // namespace
4828
4829void ImplementationVisitor::GenerateClassDefinitions(
4830 const std::string& output_directory) {
4831 std::stringstream factory_header;
4832 std::stringstream factory_impl;
4833 std::string factory_basename = "factory";
4834
4835 std::stringstream forward_declarations;
4836 std::string forward_declarations_filename = "class-forward-declarations.h";
4837
4838 {
4839 factory_impl << "#include \"src/heap/factory-base.h\"\n";
4840 factory_impl << "#include \"src/heap/factory-base-inl.h\"\n";
4841 factory_impl << "#include \"src/heap/heap.h\"\n";
4842 factory_impl << "#include \"src/heap/heap-inl.h\"\n";
4843 factory_impl << "#include \"src/execution/isolate.h\"\n";
4844 factory_impl << "#include "
4845 "\"src/objects/all-objects-inl.h\"\n\n";
4846 NamespaceScope factory_impl_namespaces(factory_impl, {"v8", "internal"});
4847 factory_impl << "\n";
4848
4849 IncludeGuardScope include_guard(forward_declarations,
4850 forward_declarations_filename);
4851 NamespaceScope forward_declarations_namespaces(forward_declarations,
4852 {"v8", "internal"});
4853
4854 std::set<const StructType*, TypeLess> structs_used_in_classes;
4855
4856 // Emit forward declarations.
4857 for (const ClassType* type : TypeOracle::GetClasses()) {
4858 CurrentSourcePosition::Scope position_activator(type->GetPosition());
4859 auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
4860 std::ostream& header = streams.class_definition_headerfile;
4861 std::string name = type->ShouldGenerateCppClassDefinitions()
4862 ? type->name()
4863 : type->GetGeneratedTNodeTypeName();
4864 if (type->ShouldGenerateCppClassDefinitions()) {
4865 header << "class " << name << ";\n";
4866 }
4867 forward_declarations << "class " << name << ";\n";
4868 }
4869
4870 for (const ClassType* type : TypeOracle::GetClasses()) {
4871 CurrentSourcePosition::Scope position_activator(type->GetPosition());
4872 auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
4873 std::ostream& header = streams.class_definition_headerfile;
4874 std::ostream& inline_header = streams.class_definition_inline_headerfile;
4875 std::ostream& implementation = streams.class_definition_ccfile;
4876
4877 if (type->ShouldGenerateCppClassDefinitions()) {
4878 CppClassGenerator g(type, header, inline_header, implementation);
4879 g.GenerateClass();
4880 } else if (type->ShouldGenerateCppObjectDefinitionAsserts()) {
4881 CppClassGenerator g(type, header, inline_header, implementation);
4882 g.GenerateCppObjectDefinitionAsserts();
4883 } else if (type->ShouldGenerateCppObjectLayoutDefinitionAsserts()) {
4884 CppClassGenerator g(type, header, inline_header, implementation);
4885 g.GenerateCppObjectLayoutDefinitionAsserts();
4886 }
4887 for (const Field& f : type->fields()) {
4888 const Type* field_type = f.name_and_type.type;
4889 if (auto field_as_struct = field_type->StructSupertype()) {
4890 structs_used_in_classes.insert(*field_as_struct);
4891 }
4892 }
4893 if (type->ShouldGenerateFactoryFunction()) {
4894 std::string return_type =
4895 type->HandlifiedCppTypeName(Type::HandleKind::kIndirect);
4896 std::string function_name = "New" + type->name();
4897 std::stringstream parameters;
4898 for (const Field& f : type->ComputeAllFields()) {
4899 if (f.name_and_type.name == "map") continue;
4900 if (f.name_and_type.name == "self_indirect_pointer") continue;
4901 if (!f.index) {
4902 std::string type_string =
4903 f.name_and_type.type->HandlifiedCppTypeName(
4904 Type::HandleKind::kDirect);
4905 parameters << type_string << " " << f.name_and_type.name << ", ";
4906 }
4907 }
4908 parameters << "AllocationType allocation_type";
4909
4910 factory_header << return_type << " " << function_name << "("
4911 << parameters.str() << ");\n";
4912 factory_impl << "template <typename Impl>\n";
4913 factory_impl << return_type
4914 << " TorqueGeneratedFactory<Impl>::" << function_name
4915 << "(" << parameters.str() << ") {\n";
4916
4917 factory_impl << " int size = ";
4918 const ClassType* super = type->GetSuperClass();
4919 std::string gen_name = "TorqueGenerated" + type->name();
4920 std::string gen_name_T =
4921 gen_name + "<" + type->name() + ", " + super->name() + ">";
4922 factory_impl << gen_name_T << "::SizeFor(";
4923
4924 bool first = true;
4925 auto index_fields = GetOrderedUniqueIndexFields(*type);
4926 CHECK(index_fields.has_value());
4927 for (const auto& index_field : *index_fields) {
4928 if (!first) {
4929 factory_impl << ", ";
4930 }
4931 factory_impl << index_field.name_and_type.name;
4932 first = false;
4933 }
4934
4935 factory_impl << ");\n";
4936 factory_impl << " Tagged<Map> map = factory()->read_only_roots()."
4937 << SnakeifyString(type->name()) << "_map();\n";
4938 factory_impl << " Tagged<HeapObject> raw_object =\n";
4939 factory_impl << " factory()->AllocateRawWithImmortalMap(size, "
4940 "allocation_type, map);\n";
4941 factory_impl << " " << type->TagglifiedCppTypeName()
4942 << " result = Cast<"
4943 << type->GetConstexprGeneratedTypeName()
4944 << ">(raw_object);\n";
4945 factory_impl << " DisallowGarbageCollection no_gc;\n";
4946 factory_impl << " WriteBarrierMode write_barrier_mode =\n"
4947 << " allocation_type == AllocationType::kYoung\n"
4948 << " ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;\n"
4949 << " USE(write_barrier_mode);\n";
4950
4951 for (const Field& f : type->ComputeAllFields()) {
4952 if (f.name_and_type.name == "map") continue;
4953 if (f.name_and_type.name == "self_indirect_pointer") {
4954 factory_impl << " "
4955 "result->init_self_indirect_pointer(factory()->"
4956 "isolate());\n";
4957 } else if (!f.index) {
4958 factory_impl << " result->TorqueGeneratedClass::set_"
4959 << SnakeifyString(f.name_and_type.name) << "(";
4960 if (f.name_and_type.type->IsSubtypeOf(
4961 TypeOracle::GetTaggedType()) &&
4962 !f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType())) {
4963 factory_impl << "*" << f.name_and_type.name
4964 << ", write_barrier_mode";
4965 } else {
4966 factory_impl << f.name_and_type.name;
4967 }
4968 factory_impl << ");\n";
4969 }
4970 }
4971
4972 factory_impl << " return handle(result, factory()->isolate());\n";
4973 factory_impl << "}\n\n";
4974
4975 factory_impl << "template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) "
4976 << return_type
4977 << " TorqueGeneratedFactory<Factory>::" << function_name
4978 << "(" << parameters.str() << ");\n";
4979 factory_impl << "template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) "
4980 << return_type << " TorqueGeneratedFactory<LocalFactory>::"
4981 << function_name << "(" << parameters.str() << ");\n";
4982
4983 factory_impl << "\n\n";
4984 }
4985 }
4986
4987 for (const StructType* type : structs_used_in_classes) {
4988 CurrentSourcePosition::Scope position_activator(type->GetPosition());
4989 std::ostream& header =
4990 GlobalContext::GeneratedPerFile(type->GetPosition().source)
4991 .class_definition_headerfile;
4992 if (type != TypeOracle::GetFloat64OrUndefinedOrHoleType()) {
4993 GenerateStructLayoutDescription(header, type);
4994 }
4995 }
4996 }
4997 WriteFile(output_directory + "/" + factory_basename + ".inc",
4998 factory_header.str());
4999 WriteFile(output_directory + "/" + factory_basename + ".cc",
5000 factory_impl.str());
5001 WriteFile(output_directory + "/" + forward_declarations_filename,
5002 forward_declarations.str());
5003}
5004
5005namespace {
5006void GeneratePrintDefinitionsForClass(std::ostream& impl, const ClassType* type,
5007 const std::string& gen_name,
5008 const std::string& gen_name_T,
5009 const std::string template_params) {
5010 impl << template_params << "\n";
5011 impl << "void " << gen_name_T << "::" << type->name()
5012 << "Print(std::ostream& os) {\n";
5013 impl << " this->PrintHeader(os, \"" << type->name() << "\");\n";
5014 auto hierarchy = type->GetHierarchy();
5015 std::map<std::string, const AggregateType*> field_names;
5016 for (const AggregateType* aggregate_type : hierarchy) {
5017 for (const Field& f : aggregate_type->fields()) {
5018 if (f.name_and_type.name == "map" || f.index.has_value() ||
5019 !CanGenerateFieldAccessors(f.name_and_type.type)) {
5020 continue;
5021 }
5022 std::string getter = f.name_and_type.name;
5023 if (aggregate_type != type) {
5024 // We must call getters directly on the class that provided them,
5025 // because a subclass could have hidden them.
5026 getter = aggregate_type->name() + "::TorqueGeneratedClass::" + getter;
5027 }
5028 if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType()) ||
5029 !f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
5030 impl << " os << \"\\n - " << f.name_and_type.name << ": \" << ";
5031 if (f.name_and_type.type->StructSupertype()) {
5032 // TODO(turbofan): Print struct fields too.
5033 impl << "\" <struct field printing still unimplemented>\";\n";
5034 } else {
5035 impl << "this->" << getter;
5036 switch (f.synchronization) {
5037 case FieldSynchronization::kNone:
5038 impl << "();\n";
5039 break;
5040 case FieldSynchronization::kRelaxed:
5041 impl << "(kRelaxedLoad);\n";
5042 break;
5043 case FieldSynchronization::kAcquireRelease:
5044 impl << "(kAcquireLoad);\n";
5045 break;
5046 }
5047 }
5048 } else {
5049 impl << " os << \"\\n - " << f.name_and_type.name << ": \" << "
5050 << "Brief(this->" << getter;
5051 switch (f.synchronization) {
5052 case FieldSynchronization::kNone:
5053 impl << "());\n";
5054 break;
5055 case FieldSynchronization::kRelaxed:
5056 impl << "(kRelaxedLoad));\n";
5057 break;
5058 case FieldSynchronization::kAcquireRelease:
5059 impl << "(kAcquireLoad));\n";
5060 break;
5061 }
5062 }
5063 }
5064 }
5065 impl << " os << '\\n';\n";
5066 impl << "}\n\n";
5067}
5068} // namespace
5069
5070void ImplementationVisitor::GeneratePrintDefinitions(
5071 const std::string& output_directory) {
5072 std::stringstream impl;
5073 std::string file_name = "objects-printer.cc";
5074 {
5075 IfDefScope object_print(impl, "OBJECT_PRINT");
5076
5077 impl << "#include <iosfwd>\n\n";
5078 impl << "#include \"src/objects/all-objects-inl.h\"\n\n";
5079
5080 NamespaceScope impl_namespaces(impl, {"v8", "internal"});
5081
5082 for (const ClassType* type : TypeOracle::GetClasses()) {
5083 if (!type->ShouldGeneratePrint()) continue;
5084 DCHECK(type->ShouldGenerateCppClassDefinitions());
5085 const ClassType* super = type->GetSuperClass();
5086 std::string gen_name = "TorqueGenerated" + type->name();
5087 std::string gen_name_T =
5088 gen_name + "<" + type->name() + ", " + super->name() + ">";
5089 std::string template_decl = "template <>";
5090 GeneratePrintDefinitionsForClass(impl, type, gen_name, gen_name_T,
5091 template_decl);
5092 }
5093 }
5094
5095 std::string new_contents(impl.str());
5096 WriteFile(output_directory + "/" + file_name, new_contents);
5097}
5098
5099std::optional<std::string> MatchSimpleBodyDescriptor(const ClassType* type) {
5100 std::vector<ObjectSlotKind> slots = type->ComputeHeaderSlotKinds();
5101 if (!type->HasStaticSize()) {
5102 slots.push_back(*type->ComputeArraySlotKind());
5103 }
5104
5105 // Skip the map slot.
5106 size_t i = 1;
5107 while (i < slots.size() && slots[i] == ObjectSlotKind::kNoPointer) ++i;
5108 if (i == slots.size()) return "DataOnlyBodyDescriptor";
5109 bool has_weak_pointers = false;
5110 size_t start_index = i;
5111 for (; i < slots.size(); ++i) {
5112 if (slots[i] == ObjectSlotKind::kStrongPointer) {
5113 continue;
5114 } else if (slots[i] == ObjectSlotKind::kMaybeObjectPointer) {
5115 has_weak_pointers = true;
5116 } else if (slots[i] == ObjectSlotKind::kNoPointer) {
5117 break;
5118 } else {
5119 return std::nullopt;
5120 }
5121 }
5122 size_t end_index = i;
5123 for (; i < slots.size(); ++i) {
5124 if (slots[i] != ObjectSlotKind::kNoPointer) return std::nullopt;
5125 }
5126 size_t start_offset = start_index * TargetArchitecture::TaggedSize();
5127 size_t end_offset = end_index * TargetArchitecture::TaggedSize();
5128 // We pick a suffix-range body descriptor even in cases where the object size
5129 // is fixed, to reduce the amount of code executed for object visitation.
5130 if (end_index == slots.size()) {
5131 return ToString("SuffixRange", has_weak_pointers ? "Weak" : "",
5132 "BodyDescriptor<", start_offset, ">");
5133 }
5134 if (!has_weak_pointers) {
5135 return ToString("FixedRangeBodyDescriptor<", start_offset, ", ", end_offset,
5136 ">");
5137 }
5138 return std::nullopt;
5139}
5140
5141void ImplementationVisitor::GenerateBodyDescriptors(
5142 const std::string& output_directory) {
5143 std::string file_name = "objects-body-descriptors-inl.inc";
5144 std::stringstream h_contents;
5145
5146 for (const ClassType* type : TypeOracle::GetClasses()) {
5147 std::string name = type->name();
5148 if (!type->ShouldGenerateBodyDescriptor()) continue;
5149
5150 bool has_array_fields = !type->HasStaticSize();
5151 std::vector<ObjectSlotKind> header_slot_kinds =
5152 type->ComputeHeaderSlotKinds();
5153 std::optional<ObjectSlotKind> array_slot_kind =
5154 type->ComputeArraySlotKind();
5155 DCHECK_EQ(has_array_fields, array_slot_kind.has_value());
5156
5157 h_contents << "class " << name << "::BodyDescriptor final : public ";
5158 if (auto descriptor_name = MatchSimpleBodyDescriptor(type)) {
5159 h_contents << *descriptor_name << " {\n";
5160 h_contents << " public:\n";
5161 } else {
5162 h_contents << "BodyDescriptorBase {\n";
5163 h_contents << " public:\n";
5164
5165 h_contents << " template <typename ObjectVisitor>\n";
5166 h_contents
5167 << " static inline void IterateBody(Tagged<Map> map, "
5168 "Tagged<HeapObject> obj, int object_size, ObjectVisitor* v) {\n";
5169
5170 std::vector<ObjectSlotKind> slots = std::move(header_slot_kinds);
5171 if (has_array_fields) slots.push_back(*array_slot_kind);
5172
5173 // Skip the map slot.
5174 slots.erase(slots.begin());
5175 size_t start_offset = TargetArchitecture::TaggedSize();
5176
5177 size_t end_offset = start_offset;
5178 ObjectSlotKind section_kind;
5179 for (size_t i = 0; i <= slots.size(); ++i) {
5180 std::optional<ObjectSlotKind> next_section_kind;
5181 bool finished_section = false;
5182 if (i == 0) {
5183 next_section_kind = slots[i];
5184 } else if (i < slots.size()) {
5185 if (auto combined = Combine(section_kind, slots[i])) {
5186 next_section_kind = *combined;
5187 } else {
5188 next_section_kind = slots[i];
5189 finished_section = true;
5190 }
5191 } else {
5192 finished_section = true;
5193 }
5194 if (finished_section) {
5195 bool is_array_slot = i == slots.size() && has_array_fields;
5196 bool multiple_slots =
5197 is_array_slot ||
5198 (end_offset - start_offset > TargetArchitecture::TaggedSize());
5199 std::optional<std::string> iterate_command;
5200 switch (section_kind) {
5201 case ObjectSlotKind::kStrongPointer:
5202 iterate_command = "IteratePointer";
5203 break;
5204 case ObjectSlotKind::kMaybeObjectPointer:
5205 iterate_command = "IterateMaybeWeakPointer";
5206 break;
5207 case ObjectSlotKind::kCustomWeakPointer:
5208 iterate_command = "IterateCustomWeakPointer";
5209 break;
5210 case ObjectSlotKind::kNoPointer:
5211 break;
5212 }
5213 if (iterate_command) {
5214 if (multiple_slots) *iterate_command += "s";
5215 h_contents << " " << *iterate_command << "(obj, "
5216 << start_offset;
5217 if (multiple_slots) {
5218 h_contents << ", "
5219 << (i == slots.size() ? "object_size"
5220 : std::to_string(end_offset));
5221 }
5222 h_contents << ", v);\n";
5223 }
5224 start_offset = end_offset;
5225 }
5226 if (i < slots.size()) section_kind = *next_section_kind;
5227 end_offset += TargetArchitecture::TaggedSize();
5228 }
5229
5230 h_contents << " }\n\n";
5231 }
5232
5233 h_contents << " static inline int SizeOf(Tagged<Map> map, "
5234 "Tagged<HeapObject> raw_object) {\n";
5235 if (type->size().SingleValue()) {
5236 h_contents << " return " << *type->size().SingleValue() << ";\n";
5237 } else {
5238 // We use an UncheckedCast here because this is used for concurrent
5239 // marking, where we shouldn't re-read the map.
5240 h_contents << " return UncheckedCast<" << name
5241 << ">(raw_object)->AllocatedSize();\n";
5242 }
5243 h_contents << " }\n\n";
5244
5245 h_contents << "};\n";
5246 }
5247
5248 WriteFile(output_directory + "/" + file_name, h_contents.str());
5249}
5250
5251namespace {
5252
5253// Generate verification code for a single piece of class data, which might be
5254// nested within a struct or might be a single element in an indexed field (or
5255// both).
5256void GenerateFieldValueVerifier(const std::string& class_name, bool indexed,
5257 std::string offset, const Field& leaf_field,
5258 std::string indexed_field_size,
5259 std::ostream& cc_contents, bool is_map) {
5260 const Type* field_type = leaf_field.name_and_type.type;
5261
5262 bool maybe_object =
5263 !field_type->IsSubtypeOf(TypeOracle::GetStrongTaggedType());
5264 const char* object_type = maybe_object ? "MaybeObject" : "Object";
5265 const char* tagged_object_type =
5266 maybe_object ? "Tagged<MaybeObject>" : "Tagged<Object>";
5267 const char* verify_fn =
5268 maybe_object ? "VerifyMaybeObjectPointer" : "VerifyPointer";
5269 if (indexed) {
5270 offset += " + i * " + indexed_field_size;
5271 }
5272 // Name the local var based on the field name for nicer CHECK output.
5273 const std::string value = leaf_field.name_and_type.name + "__value";
5274
5275 // Read the field.
5276 if (is_map) {
5277 cc_contents << " " << tagged_object_type << " " << value
5278 << " = o->map();\n";
5279 } else {
5280 cc_contents << " " << tagged_object_type << " " << value
5281 << " = TaggedField<" << object_type << ">::load(o, " << offset
5282 << ");\n";
5283 }
5284
5285 // Call VerifyPointer or VerifyMaybeObjectPointer on it.
5286 cc_contents << " Object::" << verify_fn << "(isolate, " << value << ");\n";
5287
5288 // Check that the value is of an appropriate type. We can skip this part for
5289 // the Object type because it would not check anything beyond what we already
5290 // checked with VerifyPointer.
5291 if (field_type != TypeOracle::GetObjectType()) {
5292 cc_contents << " CHECK(" << GenerateRuntimeTypeCheck(field_type, value)
5293 << ");\n";
5294 }
5295}
5296
5297void GenerateClassFieldVerifier(const std::string& class_name,
5298 const ClassType& class_type, const Field& f,
5299 std::ostream& h_contents,
5300 std::ostream& cc_contents) {
5301 const Type* field_type = f.name_and_type.type;
5302
5303 // We only verify tagged types, not raw numbers or pointers. Structs
5304 // consisting of tagged types are also included.
5305 if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType()) &&
5306 !field_type->StructSupertype())
5307 return;
5308 // Protected pointer fields cannot be read or verified from torque yet.
5309 if (field_type->IsSubtypeOf(TypeOracle::GetProtectedPointerType())) return;
5310 if (field_type == TypeOracle::GetFloat64OrUndefinedOrHoleType()) return;
5311 // Do not verify if the field may be uninitialized.
5312 if (TypeOracle::GetUninitializedType()->IsSubtypeOf(field_type)) return;
5313
5314 std::string field_start_offset;
5315 if (f.index) {
5316 field_start_offset = f.name_and_type.name + "__offset";
5317 std::string length = f.name_and_type.name + "__length";
5318 cc_contents << " intptr_t " << field_start_offset << ", " << length
5319 << ";\n";
5320 cc_contents << " std::tie(std::ignore, " << field_start_offset << ", "
5321 << length << ") = "
5322 << Callable::PrefixNameForCCOutput(
5323 class_type.GetSliceMacroName(f))
5324 << "(o);\n";
5325
5326 // Slices use intptr, but TaggedField<T>.load() uses int, so verify that
5327 // such a cast is valid.
5328 cc_contents << " CHECK_EQ(" << field_start_offset << ", static_cast<int>("
5329 << field_start_offset << "));\n";
5330 cc_contents << " CHECK_EQ(" << length << ", static_cast<int>(" << length
5331 << "));\n";
5332 field_start_offset = "static_cast<int>(" + field_start_offset + ")";
5333 length = "static_cast<int>(" + length + ")";
5334
5335 cc_contents << " for (int i = 0; i < " << length << "; ++i) {\n";
5336 } else {
5337 // Non-indexed fields have known offsets.
5338 field_start_offset = std::to_string(*f.offset);
5339 cc_contents << " {\n";
5340 }
5341
5342 if (auto struct_type = field_type->StructSupertype()) {
5343 for (const Field& struct_field : (*struct_type)->fields()) {
5344 if (struct_field.name_and_type.type->IsSubtypeOf(
5345 TypeOracle::GetTaggedType())) {
5346 GenerateFieldValueVerifier(
5347 class_name, f.index.has_value(),
5348 field_start_offset + " + " + std::to_string(*struct_field.offset),
5349 struct_field, std::to_string((*struct_type)->PackedSize()),
5350 cc_contents, f.name_and_type.name == "map");
5351 }
5352 }
5353 } else {
5354 GenerateFieldValueVerifier(class_name, f.index.has_value(),
5355 field_start_offset, f, "kTaggedSize",
5356 cc_contents, f.name_and_type.name == "map");
5357 }
5358
5359 cc_contents << " }\n";
5360}
5361
5362} // namespace
5363
5364void ImplementationVisitor::GenerateClassVerifiers(
5365 const std::string& output_directory) {
5366 std::string file_name = "class-verifiers";
5367 std::stringstream h_contents;
5368 std::stringstream cc_contents;
5369 {
5370 IncludeGuardScope include_guard(h_contents, file_name + ".h");
5371 IfDefScope verify_heap_h(h_contents, "VERIFY_HEAP");
5372 IfDefScope verify_heap_cc(cc_contents, "VERIFY_HEAP");
5373
5374 h_contents << "#include \"src/base/macros.h\"\n\n";
5375
5376 cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n\n";
5377 cc_contents << "#include \"src/objects/all-objects-inl.h\"\n";
5378
5379 IncludeObjectMacrosScope object_macros(cc_contents);
5380
5381 NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
5382 NamespaceScope cc_namespaces(cc_contents, {"v8", "internal"});
5383
5384 cc_contents
5385 << "#include \"torque-generated/test/torque/test-torque-tq-inl.inc\"\n";
5386
5387 // Generate forward declarations to avoid including any headers.
5388 h_contents << "class Isolate;\n";
5389 h_contents << "template<typename T>\nclass Tagged;\n";
5390 for (const ClassType* type : TypeOracle::GetClasses()) {
5391 if (!type->ShouldGenerateVerify()) continue;
5392 h_contents << "class " << type->name() << ";\n";
5393 }
5394
5395 const char* verifier_class = "TorqueGeneratedClassVerifiers";
5396
5397 h_contents << "class V8_EXPORT_PRIVATE " << verifier_class << "{\n";
5398 h_contents << " public:\n";
5399
5400 for (const ClassType* type : TypeOracle::GetClasses()) {
5401 std::string name = type->name();
5402 std::string cpp_name = type->TagglifiedCppTypeName();
5403 if (!type->ShouldGenerateVerify()) continue;
5404
5405 std::string method_name = name + "Verify";
5406
5407 h_contents << " static void " << method_name << "(" << cpp_name
5408 << " o, Isolate* isolate);\n";
5409
5410 cc_contents << "void " << verifier_class << "::" << method_name << "("
5411 << cpp_name << " o, Isolate* isolate) {\n";
5412
5413 // First, do any verification for the super class. Not all classes have
5414 // verifiers, so skip to the nearest super class that has one.
5415 const ClassType* super_type = type->GetSuperClass();
5416 while (super_type && !super_type->ShouldGenerateVerify()) {
5417 super_type = super_type->GetSuperClass();
5418 }
5419 if (super_type) {
5420 std::string super_name = super_type->name();
5421 cc_contents << " o->" << super_name << "Verify(isolate);\n";
5422 }
5423
5424 // Second, verify that this object is what it claims to be.
5425 cc_contents << " CHECK(Is" << name << "(o, isolate));\n";
5426
5427 // Third, verify its properties.
5428 for (const auto& f : type->fields()) {
5429 GenerateClassFieldVerifier(name, *type, f, h_contents, cc_contents);
5430 }
5431
5432 cc_contents << "}\n";
5433 }
5434
5435 h_contents << "};\n";
5436 }
5437 WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
5438 WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
5439}
5440
5441void ImplementationVisitor::GenerateEnumVerifiers(
5442 const std::string& output_directory) {
5443 std::string file_name = "enum-verifiers";
5444 std::stringstream cc_contents;
5445 {
5446 cc_contents << "#include \"src/compiler/code-assembler.h\"\n";
5447 for (const std::string& include_path : GlobalContext::CppIncludes()) {
5448 cc_contents << "#include " << StringLiteralQuote(include_path) << "\n";
5449 }
5450 cc_contents << "\n";
5451
5452 NamespaceScope cc_namespaces(cc_contents, {"v8", "internal", ""});
5453
5454 cc_contents << "class EnumVerifier {\n";
5455 for (const auto& desc : GlobalContext::Get().ast()->EnumDescriptions()) {
5456 std::stringstream alias_checks;
5457 cc_contents << " // " << desc.name << " (" << desc.pos << ")\n";
5458 cc_contents << " void VerifyEnum_" << desc.name << "("
5459 << desc.constexpr_generates
5460 << " x) {\n"
5461 " switch(x) {\n";
5462 for (const auto& entry : desc.entries) {
5463 if (entry.alias_entry.empty()) {
5464 cc_contents << " case " << entry.name << ": break;\n";
5465 } else {
5466 // We don't add a case for this, because it aliases another entry, so
5467 // we would have two cases for the same value.
5468 alias_checks << " static_assert(" << entry.name
5469 << " == " << entry.alias_entry << ");\n";
5470 }
5471 }
5472 if (desc.is_open) cc_contents << " default: break;\n";
5473 cc_contents << " }\n";
5474 cc_contents << alias_checks.str();
5475 cc_contents << " }\n\n";
5476 }
5477 cc_contents << "};\n";
5478 }
5479
5480 WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
5481}
5482
5483void ImplementationVisitor::GenerateExportedMacrosAssembler(
5484 const std::string& output_directory) {
5485 std::string file_name = "exported-macros-assembler";
5486 std::stringstream h_contents;
5487 std::stringstream cc_contents;
5488 {
5489 IncludeGuardScope include_guard(h_contents, file_name + ".h");
5490
5491 h_contents << "#include \"src/compiler/code-assembler.h\"\n";
5492 h_contents << "#include \"src/execution/frames.h\"\n";
5493 h_contents << "#include \"torque-generated/csa-types.h\"\n";
5494
5495 for (const std::string& include_path : GlobalContext::CppIncludes()) {
5496 cc_contents << "#include " << StringLiteralQuote(include_path) << "\n";
5497 }
5498 cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
5499
5500 for (SourceId file : SourceFileMap::AllSources()) {
5501 cc_contents << "#include \"torque-generated/" +
5502 SourceFileMap::PathFromV8RootWithoutExtension(file) +
5503 "-tq-csa.h\"\n";
5504 }
5505
5506 NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
5507 NamespaceScope cc_namespaces(cc_contents, {"v8", "internal"});
5508
5509 h_contents << "class V8_EXPORT_PRIVATE "
5510 "TorqueGeneratedExportedMacrosAssembler {\n"
5511 << " public:\n"
5512 << " explicit TorqueGeneratedExportedMacrosAssembler"
5513 "(compiler::CodeAssemblerState* state) : state_(state) {\n"
5514 << " USE(state_);\n"
5515 << " }\n";
5516
5517 for (auto& declarable : GlobalContext::AllDeclarables()) {
5518 TorqueMacro* macro = TorqueMacro::DynamicCast(declarable.get());
5519 if (!(macro && macro->IsExportedToCSA())) continue;
5520 CurrentSourcePosition::Scope position_activator(macro->Position());
5521
5522 cpp::Class assembler("TorqueGeneratedExportedMacrosAssembler");
5523 std::vector<std::string> generated_parameter_names;
5524 cpp::Function f = GenerateFunction(
5525 &assembler, macro->ReadableName(), macro->signature(),
5526 macro->parameter_names(), false, &generated_parameter_names);
5527
5528 f.PrintDeclaration(h_contents);
5529 f.PrintDefinition(cc_contents, [&](std::ostream& stream) {
5530 stream << "return " << macro->ExternalName() << "(state_";
5531 for (const auto& name : generated_parameter_names) {
5532 stream << ", " << name;
5533 }
5534 stream << ");";
5535 });
5536 }
5537
5538 h_contents << " private:\n"
5539 << " compiler::CodeAssemblerState* state_;\n"
5540 << "};\n";
5541 }
5542 WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
5543 WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
5544}
5545
5546namespace {
5547
5548void CollectAllFields(const std::string& path, const Field& field,
5549 std::vector<std::string>& result) {
5550 if (field.name_and_type.type->StructSupertype()) {
5551 std::string next_path = path + field.name_and_type.name + ".";
5552 const StructType* struct_type =
5553 StructType::DynamicCast(field.name_and_type.type);
5554 for (const auto& inner_field : struct_type->fields()) {
5555 CollectAllFields(next_path, inner_field, result);
5556 }
5557 } else {
5558 result.push_back(path + field.name_and_type.name);
5559 }
5560}
5561
5562} // namespace
5563
5564void ImplementationVisitor::GenerateCSATypes(
5565 const std::string& output_directory) {
5566 std::string file_name = "csa-types";
5567 std::stringstream h_contents;
5568 {
5569 IncludeGuardScope include_guard(h_contents, file_name + ".h");
5570 h_contents << "#include \"src/compiler/code-assembler.h\"\n\n";
5571
5572 NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
5573
5574 // Generates headers for all structs in a topologically-sorted order, since
5575 // TypeOracle keeps them in the order of their resolution
5576 for (const auto& type : TypeOracle::GetAggregateTypes()) {
5577 const StructType* struct_type = StructType::DynamicCast(type.get());
5578 if (!struct_type) continue;
5579 h_contents << "struct " << struct_type->GetGeneratedTypeNameImpl()
5580 << " {\n";
5581 for (auto& field : struct_type->fields()) {
5582 h_contents << " " << field.name_and_type.type->GetGeneratedTypeName();
5583 h_contents << " " << field.name_and_type.name << ";\n";
5584 }
5585 h_contents << "\n std::tuple<";
5586 bool first = true;
5587 for (const Type* lowered_type : LowerType(struct_type)) {
5588 if (!first) {
5589 h_contents << ", ";
5590 }
5591 first = false;
5592 h_contents << lowered_type->GetGeneratedTypeName();
5593 }
5594 std::vector<std::string> all_fields;
5595 for (auto& field : struct_type->fields()) {
5596 CollectAllFields("", field, all_fields);
5597 }
5598 h_contents << "> Flatten() const {\n"
5599 " return std::make_tuple(";
5600 PrintCommaSeparatedList(h_contents, all_fields);
5601 h_contents << ");\n";
5602 h_contents << " }\n";
5603 h_contents << "};\n";
5604 }
5605 }
5606 WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
5607}
5608
5610 for (const auto& declarable : GlobalContext::AllDeclarables()) {
5611 if (!declarable->IsMacro() || declarable->IsExternMacro()) continue;
5612
5613 Macro* macro = Macro::cast(declarable.get());
5614 if (macro->IsUsed()) continue;
5615
5616 if (macro->IsTorqueMacro() && TorqueMacro::cast(macro)->IsExportedToCSA()) {
5617 continue;
5618 }
5619 // TODO(gsps): Mark methods of generic structs used if they are used in any
5620 // instantiation
5621 if (Method* method = Method::DynamicCast(macro)) {
5622 if (StructType* struct_type =
5623 StructType::DynamicCast(method->aggregate_type())) {
5624 if (struct_type->GetSpecializedFrom().has_value()) {
5625 continue;
5626 }
5627 }
5628 }
5629
5630 std::vector<std::string> ignored_prefixes = {"Convert<", "Cast<",
5631 "FromConstexpr<"};
5632 const std::string name = macro->ReadableName();
5633 const bool ignore =
5635 std::any_of(ignored_prefixes.begin(), ignored_prefixes.end(),
5636 [&name](const std::string& prefix) {
5637 return StringStartsWith(name, prefix);
5638 });
5639
5640 if (!ignore) {
5641 Lint("Macro '", macro->ReadableName(), "' is never used.")
5642 .Position(macro->IdentifierPosition());
5643 }
5644 }
5645}
5646
5647} // namespace v8::internal::torque
#define one
int16_t parameter_count
Definition builtins.cc:67
Builtins::Kind kind
Definition builtins.cc:40
PropertyT * getter
NameAndType name_and_type
SourcePosition pos
void reserve(size_t new_cap)
void push_back(const T &value)
Namespace * nspace() const
Definition types.h:563
const Field & LookupField(const std::string &name) const
Definition types.cc:499
const std::string & name() const
Definition types.h:562
const std::vector< Field > & fields() const
Definition types.h:556
const std::vector< Method * > & Methods() const
Definition types.h:571
const BitField & LookupField(const std::string &name) const
Definition types.cc:421
Binding< T > * Add(std::string name, T value, bool mark_as_used=false)
std::optional< Stack< std::string > > EmitGraph(Stack< std::string > parameters)
static void EmitCCValue(VisitResult result, const Stack< std::string > &values, std::ostream &out)
std::optional< Stack< std::string > > EmitGraph(Stack< std::string > parameters)
static void EmitCSAValue(VisitResult result, const Stack< std::string > &values, std::ostream &out)
virtual bool ShouldBeInlined(OutputType output_type) const
Definition declarable.h:318
const Signature & signature() const
Definition declarable.h:306
const std::string & ExternalName() const
Definition declarable.h:304
const std::string & ReadableName() const
Definition declarable.h:305
void Bind(Block *block)
Definition cfg.cc:72
StackRange TopRange(size_t slot_count) const
Definition cfg.h:182
const Stack< const Type * > & CurrentStack() const
Definition cfg.h:180
Block * NewBlock(std::optional< Stack< const Type * > > input_types=std::nullopt, bool is_deferred=false)
Definition cfg.h:162
void Emit(Instruction instruction)
Definition cfg.h:175
const ControlFlowGraph & Result()
Definition cfg.h:152
void Goto(Block *block)
Definition cfg.cc:81
const ClassType * GetSuperClass() const
Definition types.h:742
std::optional< std::pair< int, int > > InstanceTypeRange() const
Definition types.cc:698
ResidueClass size() const
Definition types.h:738
std::string GetSliceMacroName(const Field &field) const
Definition types.cc:812
bool ShouldGenerateVerify() const
Definition types.h:686
bool AllowInstantiation() const
Definition types.cc:634
std::vector< Field > ComputeAllFields() const
Definition types.cc:648
SourcePosition IdentifierPosition() const
Definition declarable.h:103
SourcePosition Position() const
Definition declarable.h:96
static PerFileStreams & GeneratedPerFile(SourceId file)
static const std::set< std::string > & CppIncludes()
MacroInliningScope(ImplementationVisitor *visitor, const Macro *macro)
std::unordered_set< const Macro * > inlining_macros_
VisitResult GenerateFetchFromLocation(const LocationReference &reference)
void GenerateAssignToLocation(const LocationReference &reference, const VisitResult &assignment_value)
void LowerLabelParameter(const Type *type, const std::string &parameter_name, std::vector< std::string > *lowered_parameters)
cpp::Function GenerateFunction(cpp::Class *owner, const std::string &name, const Signature &signature, const NameVector &parameter_names, bool pass_code_assembler_state=true, std::vector< std::string > *generated_parameter_names=nullptr)
void GenerateBranch(const VisitResult &condition, Block *true_block, Block *false_block)
std::string ExternalLabelParameterName(const std::string &label_name, size_t i)
Binding< LocalLabel > * LookupLabel(const std::string &name)
InitializerResults VisitInitializerResults(const ClassType *class_type, const std::vector< NameAndExpression > &expressions)
LocationReference GenerateFieldReference(VisitResult object, const Field &field, const ClassType *class_type, bool treat_optional_as_indexed=false)
VisitResult GenerateCall(Callable *callable, std::optional< LocationReference > this_parameter, Arguments parameters, const TypeVector &specialization_types={}, bool tail_call=false)
LocationReference GenerateReferenceToItemInHeapSlice(LocationReference slice, VisitResult index)
std::string ExternalParameterName(const std::string &name)
const Identifier * TryGetSourceForBitfieldExpression(const Expression *expr) const
VisitResult InlineMacro(Macro *macro, std::optional< LocationReference > this_reference, const std::vector< VisitResult > &arguments, const std::vector< Block * > label_blocks)
LocationReference GetLocationReference(Expression *location)
StackRange LowerParameter(const Type *type, const std::string &parameter_name, Stack< std::string > *lowered_parameters)
StackRange GenerateLabelGoto(LocalLabel *label, std::optional< StackRange > arguments={})
VisitResult GenerateImplicitConvert(const Type *destination_type, VisitResult source)
const Type * GetCommonType(const Type *left, const Type *right)
std::string ExternalLabelName(const std::string &label_name)
void GenerateExpressionBranch(Expression *expression, Block *true_block, Block *false_block)
std::unordered_map< const Expression *, const Identifier * > bitfield_expressions_
static V8_EXPORT_PRIVATE void AddBindingUse(SourcePosition use_position, Binding< LocalValue > *binding)
static V8_EXPORT_PRIVATE kythe_entity_t AddBindingDefinition(Binding< LocalValue > *binding)
static V8_EXPORT_PRIVATE void AddDefinition(SourcePosition token, SourcePosition definition)
FieldSynchronization heap_reference_synchronization() const
const LocationReference & bit_field_struct_location() const
static LocationReference HeapReference(VisitResult heap_reference, FieldSynchronization synchronization=FieldSynchronization::kNone)
static LocationReference Temporary(VisitResult temporary, std::string description)
static LocationReference HeapSlice(VisitResult heap_slice)
static LocationReference VariableAccess(VisitResult variable, std::optional< Binding< LocalValue > * > binding=std::nullopt)
const VisitResultVector & call_arguments() const
const std::string & temporary_description() const
std::optional< const Type * > ReferencedType() const
std::optional< Binding< LocalValue > * > binding() const
MessageBuilder & Position(SourcePosition position)
Definition utils.h:52
const std::string & external_name() const
Definition declarable.h:267
bool StrictlyBetterThan(const ParameterDifference &other) const
std::optional< size_t > SingleValue() const
Definition utils.h:440
static std::vector< SourceId > AllSources()
static const std::string & PathFromV8Root(SourceId file)
static std::string PathFromV8RootWithoutExtension(SourceId file)
void Extend(StackRange adjacent)
Definition utils.h:217
size_t Size() const
Definition utils.h:243
StackRange TopRange(size_t slot_count) const
Definition utils.h:253
StackRange PushMany(const std::vector< T > &v)
Definition utils.h:257
std::string GetGeneratedTypeNameImpl() const override
Definition types.cc:517
const Type * type() const
Definition declarable.h:673
static const Type * GetConstexprStringType()
static const Type * GetJSFunctionType()
static std::vector< const ClassType * > GetClasses()
static const Type * GetConstFloat64Type()
static const Type * GetConstexprBoolType()
static const Type * GetArgumentsType()
static const BuiltinPointerType * GetBuiltinPointerType(TypeVector argument_types, const Type *return_type)
Definition type-oracle.h:67
static const Type * GetNativeContextType()
static const Type * GetVoidType()
static const TopType * GetTopType(std::string reason, const Type *source_type)
static const Type * GetIntPtrType()
static const Type * GetReferenceType(const Type *referenced_type, bool is_const)
static const Type * GetBoolType()
static const Type * GetContextType()
static const Type * GetJSAnyType()
static const Type * GetNeverType()
static const Type * GetIntegerLiteralType()
static const Type * GetDispatchHandleType()
static const Type * GetConstStringType()
static const Type * GetConstInt31Type()
static const Type * ComputeType(TypeExpression *type_expression)
bool IsNever() const
Definition types.h:126
std::string GetGeneratedTNodeTypeName() const
Definition types.cc:189
virtual bool IsSubtypeOf(const Type *supertype) const
Definition types.cc:101
virtual const Type * NonConstexprVersion() const
Definition types.h:141
std::optional< const AggregateType * > AggregateSupertype() const
Definition types.cc:142
virtual std::string GetRuntimeType() const
Definition types.cc:1444
std::optional< const StructType * > StructSupertype() const
Definition types.cc:133
bool IsVoid() const
Definition types.h:125
std::string GetGeneratedTypeName() const
Definition types.cc:180
virtual std::string GetDebugType() const
Definition types.cc:1464
std::optional< const ClassType * > ClassSupertype() const
Definition types.cc:124
std::string TagglifiedCppTypeName() const
Definition types.cc:92
virtual bool IsConstexpr() const
Definition types.h:136
const Type * type() const
Definition declarable.h:246
static VisitResult NeverResult()
Definition types.cc:1236
const Type * type() const
Definition types.h:846
const StackRange & stack_range() const
Definition types.h:848
const std::string & constexpr_value() const
Definition types.h:847
void SetType(const Type *new_type)
Definition types.h:849
void BeginNamespace(std::string name)
void AddParameter(std::string type, std::string name={}, std::string default_value={})
void PrintDeclaration(std::ostream &stream, int indentation=kAutomaticIndentation) const
std::vector< std::string > GetParameterNames() const
void PrintEndDefinition(std::ostream &stream, int indentation=0) const
void PrintBeginDefinition(std::ostream &stream, int indentation=0) const
void PrintDefinition(std::ostream &stream, const std::function< void(std::ostream &)> &builder, int indentation=0) const
void SetReturnType(std::string return_type)
#define V8_ENABLE_LEAPTIERING_BOOL
Definition globals.h:151
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
Definition globals.h:161
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
const ObjectRef type_
uint32_t count
int end
ZoneVector< OpIndex > candidates
LineAndColumn current
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
AssemblerT assembler
FieldSections completed_sections_
std::ostream & impl_
const ClassType * super_
bool is_finished_
std::ostream & hdr_
std::ostream & inl_
const std::string gen_name_I_
bool header_size_emitted_
FieldSectionType current_section_
std::string previous_field_end_
std::string gen_name_
const std::string gen_name_T_
int32_t offset
std::optional< TNode< JSArray > > a
double second
ZoneVector< RpoNumber > & result
Builtin builtin
const char * name_
int s
Definition mul-fft.cc:297
constexpr const char * kUnreachableCodeMessage
Definition logging.h:63
constexpr auto Field(char const (&s)[count], uint8_t type)
static const char *const TORQUE_INTERNAL_NAMESPACE_STRING
Definition constants.h:78
std::optional< std::string > MatchSimpleBodyDescriptor(const ClassType *type)
static const char *const kThisParameterName
Definition ast.h:253
std::string CapifyStringWithUnderscores(const std::string &camellified_string)
Definition utils.cc:254
bool IsAssignableFrom(const Type *to, const Type *from)
Definition types.cc:1169
size_t LoweredSlotCount(const Type *type)
Definition types.cc:1216
std::optional< NameAndType > ExtractSimpleFieldArraySize(const ClassType &class_type, Expression *array_size)
Definition types.cc:1433
std::string StringLiteralQuote(const std::string &s)
Definition utils.cc:54
void ReportError(Args &&... args)
Definition utils.h:96
std::string MachineTypeString(const Type *type)
std::string ToString(Args &&... args)
Definition utils.h:41
MessageBuilder Lint(Args &&... args)
Definition utils.h:85
VisitResult ProjectStructField(VisitResult structure, const std::string &fieldname)
Definition types.cc:1177
std::string SnakeifyString(const std::string &camel_string)
Definition utils.cc:295
static const char *const STATIC_ASSERT_MACRO_STRING
Definition constants.h:92
std::vector< Identifier * > NameVector
Definition types.h:906
bool StartsWithSingleUnderscore(const std::string &str)
Definition utils.cc:323
std::string PositionAsString(SourcePosition pos)
bool IsDeferred(Statement *stmt)
Definition ast.h:1276
std::string CamelifyString(const std::string &underscore_string)
Definition utils.cc:278
std::optional< std::tuple< size_t, std::string > > SizeOf(const Type *type)
Definition types.cc:1337
auto PrintList(const T &list, const std::string &separator=", ")
Definition utils.h:150
static constexpr const char * kMacroEndLabelName
std::vector< const Type * > TypeVector
Definition types.h:85
TypeVector LowerType(const Type *type)
Definition types.cc:1210
const Type * SubtractType(const Type *a, const Type *b)
Definition types.cc:411
void PrintCommaSeparatedList(std::ostream &os, const T &list, C &&transform)
Definition utils.h:163
std::string UnderlinifyPath(std::string path)
Definition utils.cc:314
MessageBuilder Error(Args &&... args)
Definition utils.h:81
bool IsCompatibleSignature(const Signature &sig, const TypeVector &types, size_t label_count)
std::string StringLiteralUnquote(const std::string &s)
Definition utils.cc:23
TypeVector LowerParameterTypes(const TypeVector &parameters)
Definition types.cc:1218
V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype, const WasmModule *sub_module, const WasmModule *super_module)
constexpr const char * ToString(DeoptimizeKind kind)
Definition globals.h:880
ContainedInLattice Combine(ContainedInLattice a, ContainedInLattice b)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(trace_gc_object_stats
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in name
Definition flags.cc:2086
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
auto impl(C *x) -> typename implement< C >::type *
Definition c-api.cc:243
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define CHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
std::optional< std::string > op
Definition ast.h:555
SourcePosition pos
Definition ast.h:112
std::vector< Identifier * > labels
Definition ast.h:365
IdentifierExpression * callee
Definition ast.h:363
std::vector< Expression * > arguments
Definition ast.h:364
IdentifierExpression * method
Definition ast.h:340
std::vector< Identifier * > labels
Definition ast.h:342
std::vector< Expression * > arguments
Definition ast.h:341
std::optional< ClassFieldIndexInfo > index
Definition types.h:221
NameAndType name_and_type
Definition types.h:222
SourcePosition pos
Definition types.h:219
std::optional< size_t > offset
Definition types.h:230
std::optional< Statement * > action
Definition ast.h:811
std::optional< Expression * > test
Definition ast.h:810
std::optional< VarDeclarationStatement * > var_declaration
Definition ast.h:809
std::vector< Expression * > arguments
Definition ast.h:793
std::vector< TypeExpression * > generic_arguments
Definition ast.h:293
std::vector< std::string > namespace_qualification
Definition ast.h:291
std::optional< Statement * > if_false
Definition ast.h:714
std::map< std::string, VisitResult > field_value_map
std::vector< TypeExpression * > generic_arguments
Definition ast.h:314
std::vector< Expression * > arguments
Definition ast.h:315
std::map< std::string, VisitResult > offsets
std::map< std::string, VisitResult > array_lengths
std::vector< NameAndExpression > initializers
Definition ast.h:618
std::vector< TypeExpression * > types
Definition ast.h:627
std::vector< Identifier * > names
Definition ast.h:626
std::optional< Expression * > value
Definition ast.h:729
LabelDeclarationVector labels
Definition types.h:927
ParameterTypes parameter_types
Definition types.h:923
size_t ExplicitCount() const
Definition types.h:925
const TypeVector & types() const
Definition types.h:920
std::optional< std::string > arguments_variable
Definition types.h:922
std::vector< NameAndExpression > initializers
Definition ast.h:389
std::optional< Expression * > initializer
Definition ast.h:774
std::optional< TypeExpression * > type
Definition ast.h:773
Symbol identifier
Symbol method
#define AST_STATEMENT_NODE_KIND_LIST(V)
Definition ast.h:53
#define AST_EXPRESSION_NODE_KIND_LIST(V)
Definition ast.h:24
wasm::ValueType type