v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
raw-machine-assembler.h
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
6#define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
7
8#include <initializer_list>
9#include <optional>
10#include <type_traits>
11
12#include "src/common/globals.h"
18#include "src/compiler/node.h"
24#include "src/heap/factory.h"
25#include "src/objects/string.h"
26
27namespace v8 {
28namespace internal {
29namespace compiler {
30
31class BasicBlock;
32class RawMachineLabel;
33class Schedule;
34class SourcePositionTable;
35
36// The RawMachineAssembler produces a low-level IR graph. All nodes are wired
37// into a graph and also placed into a schedule immediately, hence subsequent
38// code generation can happen without the need for scheduling.
39//
40// In order to create a schedule on-the-fly, the assembler keeps track of basic
41// blocks by having one current basic block being populated and by referencing
42// other basic blocks through the use of labels.
43//
44// Also note that the generated graph is only valid together with the generated
45// schedule, using one without the other is invalid as the graph is inherently
46// non-schedulable due to missing control and effect dependencies.
48 public:
50 Isolate* isolate, TFGraph* graph, CallDescriptor* call_descriptor,
51 MachineRepresentation word = MachineType::PointerRepresentation(),
53 MachineOperatorBuilder::Flag::kNoFlags,
56 FullUnalignedAccessSupport());
58
61
62 Isolate* isolate() const { return isolate_; }
63 TFGraph* graph() const { return graph_; }
64 Zone* zone() const { return graph()->zone(); }
65 MachineOperatorBuilder* machine() { return &machine_; }
66 CommonOperatorBuilder* common() { return &common_; }
67 SimplifiedOperatorBuilder* simplified() { return &simplified_; }
68 CallDescriptor* call_descriptor() const { return call_descriptor_; }
69
70 // Only used for tests: Finalizes the schedule and exports it to be used for
71 // code generation. Note that this RawMachineAssembler becomes invalid after
72 // export.
73 Schedule* ExportForTest();
74 // Finalizes the schedule and transforms it into a graph that's suitable for
75 // it to be used for Turbofan optimization and re-scheduling. Note that this
76 // RawMachineAssembler becomes invalid after export.
77 TFGraph* ExportForOptimization();
78
79 // ===========================================================================
80 // The following utility methods create new nodes with specific operators and
81 // place them into the current basic block. They don't perform control flow,
82 // hence will not switch the current basic block.
83
84 Node* NullConstant();
85 Node* UndefinedConstant();
86
87 // Constants.
88 Node* PointerConstant(void* value) {
89 return IntPtrConstant(reinterpret_cast<intptr_t>(value));
90 }
91 Node* IntPtrConstant(intptr_t value) {
92 // TODO(dcarney): mark generated code as unserializable if value != 0.
93 return kSystemPointerSize == 8 ? Int64Constant(value)
94 : Int32Constant(static_cast<int>(value));
95 }
96 Node* RelocatableIntPtrConstant(intptr_t value, RelocInfo::Mode rmode);
97 Node* Int32Constant(int32_t value) {
98 return AddNode(common()->Int32Constant(value));
99 }
100 Node* StackSlot(MachineRepresentation rep, int alignment = 0) {
101 return AddNode(machine()->StackSlot(rep, alignment));
102 }
103 Node* StackSlot(int size, int alignment) {
104 return AddNode(machine()->StackSlot(size, alignment));
105 }
106 Node* Int64Constant(int64_t value) {
107 return AddNode(common()->Int64Constant(value));
108 }
109 Node* NumberConstant(double value) {
110 return AddNode(common()->NumberConstant(value));
111 }
112 Node* Float32Constant(float value) {
113 return AddNode(common()->Float32Constant(value));
114 }
115 Node* Float64Constant(double value) {
116 return AddNode(common()->Float64Constant(value));
117 }
119 return AddNode(common()->HeapConstant(object));
120 }
122 return AddNode(common()->ExternalConstant(address));
123 }
125 return AddNode(common()->RelocatableInt32Constant(value, rmode));
126 }
128 return AddNode(common()->RelocatableInt64Constant(value, rmode));
129 }
130
131 Node* Projection(int index, Node* a) {
132 return AddNode(common()->Projection(index), a);
133 }
134
135 // Memory Operations.
137 return Load(type, base, IntPtrConstant(0));
138 }
139 Node* Load(MachineType type, Node* base, Node* index) {
140 const Operator* op = machine()->Load(type);
141 Node* load = AddNode(op, base, index);
142 return load;
143 }
145 return LoadImmutable(type, base, IntPtrConstant(0));
146 }
148 const Operator* op = machine()->LoadImmutable(type);
149 return AddNode(op, base, index);
150 }
152 Int64Matcher m(node);
153 if (m.Is(HeapObject::kMapOffset)) return true;
154 // Test if `node` is a `Phi(Int64Constant(0))`
155 if (node->opcode() == IrOpcode::kPhi) {
156 for (Node* input : node->inputs()) {
157 if (!Int64Matcher(input).Is(HeapObject::kMapOffset)) return false;
158 }
159 return true;
160 }
161 return false;
162 }
164 Int64Matcher m(node);
165 return m.Is(HeapObject::kMapOffset - kHeapObjectTag);
166 }
168 return offset == HeapObject::kMapOffset - kHeapObjectTag;
169 }
171 DCHECK_IMPLIES(V8_MAP_PACKING_BOOL && IsMapOffsetConstantMinusTag(offset),
172 type == MachineType::MapInHeader());
173 ObjectAccess access = {type, WriteBarrierKind::kNoWriteBarrier};
174 Node* load = AddNode(simplified()->LoadFromObject(access), base, offset);
175 return load;
176 }
177
179#if V8_ENABLE_SANDBOX
180 static_assert(COMPRESS_POINTERS_BOOL);
181 Node* tagged = LoadFromObject(MachineType::Int32(), base, offset);
182 Node* trusted_cage_base =
183 LoadImmutable(MachineType::Pointer(), LoadRootRegister(),
184 IntPtrConstant(IsolateData::trusted_cage_base_offset()));
185 return BitcastWordToTagged(
186 WordOr(trusted_cage_base, ChangeUint32ToUint64(tagged)));
187#else
188 return LoadFromObject(MachineType::AnyTagged(), base, offset);
189#endif // V8_ENABLE_SANDBOX
190 }
191
193 WriteBarrierKind write_barrier) {
194 return Store(rep, base, IntPtrConstant(0), value, write_barrier);
195 }
197 WriteBarrierKind write_barrier) {
198 return AddNode(machine()->Store(StoreRepresentation(rep, write_barrier)),
199 base, index, value);
200 }
202 Node* value, WriteBarrierKind write_barrier) {
203 ObjectAccess access = {MachineType::TypeForRepresentation(rep),
204 write_barrier};
205 DCHECK(!IsMapOffsetConstantMinusTag(offset));
206 AddNode(simplified()->StoreToObject(access), object, offset, value);
207 }
209 Node* value, WriteBarrierKind write_barrier) {
210 DCHECK(!IsMapOffsetConstantMinusTag(offset));
211 DCHECK_NE(rep, MachineRepresentation::kIndirectPointer);
212 AddNode(simplified()->StoreField(
213 FieldAccess(BaseTaggedness::kTaggedBase, offset,
214 MaybeHandle<Name>(), OptionalMapRef(), Type::Any(),
215 MachineType::TypeForRepresentation(rep),
216 write_barrier, "OptimizedStoreField")),
217 object, value);
218 }
220 IndirectPointerTag tag, Node* value,
221 WriteBarrierKind write_barrier) {
222 DCHECK(!IsMapOffsetConstantMinusTag(offset));
223 DCHECK(write_barrier == WriteBarrierKind::kNoWriteBarrier ||
224 write_barrier == WriteBarrierKind::kIndirectPointerWriteBarrier);
225 FieldAccess access(BaseTaggedness::kTaggedBase, offset, MaybeHandle<Name>(),
226 OptionalMapRef(), Type::Any(),
227 MachineType::IndirectPointer(), write_barrier,
228 "OptimizedStoreIndirectPointerField");
229 access.indirect_pointer_tag = tag;
230 AddNode(simplified()->StoreField(access), object, value);
231 }
232 void OptimizedStoreMap(Node* object, Node* value,
233 WriteBarrierKind write_barrier = kMapWriteBarrier) {
234 AddNode(simplified()->StoreField(AccessBuilder::ForMap(write_barrier)),
235 object, value);
236 }
237 Node* Retain(Node* value) { return AddNode(common()->Retain(), value); }
238
239 Node* OptimizedAllocate(Node* size, AllocationType allocation);
240
241 // Unaligned memory operations
243 return UnalignedLoad(type, base, IntPtrConstant(0));
244 }
246 MachineRepresentation rep = type.representation();
247 // Tagged or compressed should never be unaligned
248 DCHECK(!(IsAnyTagged(rep) || IsAnyCompressed(rep)));
249 if (machine()->UnalignedLoadSupported(rep)) {
250 return AddNode(machine()->Load(type), base, index);
251 } else {
252 return AddNode(machine()->UnalignedLoad(type), base, index);
253 }
254 }
256 return UnalignedStore(rep, base, IntPtrConstant(0), value);
257 }
259 Node* value) {
260 // Tagged or compressed should never be unaligned
261 DCHECK(!(IsAnyTagged(rep) || IsAnyCompressed(rep)));
262 if (machine()->UnalignedStoreSupported(rep)) {
263 return AddNode(machine()->Store(StoreRepresentation(
264 rep, WriteBarrierKind::kNoWriteBarrier)),
265 base, index, value);
266 } else {
267 return AddNode(
268 machine()->UnalignedStore(UnalignedStoreRepresentation(rep)), base,
269 index, value);
270 }
271 }
272
273 // Atomic memory operations.
276 MachineRepresentation::kWord64);
277 return AddNode(machine()->Word32AtomicLoad(rep), base, index);
278 }
279
281 if (machine()->Is64()) {
282 // This uses Uint64() intentionally: AtomicLoad is not implemented for
283 // Int64(), which is fine because the machine instruction only cares
284 // about words.
285 return AddNode(machine()->Word64AtomicLoad(rep), base, index);
286 } else {
287 return AddNode(machine()->Word32AtomicPairLoad(rep.order()), base, index);
288 }
289 }
290
291#if defined(V8_TARGET_BIG_ENDIAN)
292#define VALUE_HALVES value_high, value
293#else
294#define VALUE_HALVES value, value_high
295#endif
296
298 Node* value) {
299 DCHECK(!IsMapOffsetConstantMinusTag(index));
300 DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
301 return AddNode(machine()->Word32AtomicStore(params), base, index, value);
302 }
303
305 Node* value, Node* value_high) {
306 if (machine()->Is64()) {
307 DCHECK_NULL(value_high);
308 return AddNode(machine()->Word64AtomicStore(params), base, index, value);
309 } else {
310 DCHECK(params.representation() != MachineRepresentation::kTaggedPointer &&
311 params.representation() != MachineRepresentation::kTaggedSigned &&
312 params.representation() != MachineRepresentation::kTagged);
313 return AddNode(machine()->Word32AtomicPairStore(params.order()), base,
314 index, VALUE_HALVES);
315 }
316 }
317
318#define ATOMIC_FUNCTION(name) \
319 Node* Atomic##name(MachineType type, Node* base, Node* index, Node* value) { \
320 DCHECK_NE(type.representation(), MachineRepresentation::kWord64); \
321 return AddNode(machine()->Word32Atomic##name(type), base, index, value); \
322 } \
323 Node* Atomic##name##64(Node * base, Node * index, Node * value, \
324 Node * value_high) { \
325 if (machine()->Is64()) { \
326 DCHECK_NULL(value_high); \
327 /* This uses Uint64() intentionally: Atomic operations are not */ \
328 /* implemented for Int64(), which is fine because the machine */ \
329 /* instruction only cares about words. */ \
330 return AddNode(machine()->Word64Atomic##name(MachineType::Uint64()), \
331 base, index, value); \
332 } else { \
333 return AddNode(machine()->Word32AtomicPair##name(), base, index, \
334 VALUE_HALVES); \
335 } \
336 }
337 ATOMIC_FUNCTION(Exchange)
338 ATOMIC_FUNCTION(Add)
339 ATOMIC_FUNCTION(Sub)
340 ATOMIC_FUNCTION(And)
342 ATOMIC_FUNCTION(Xor)
343#undef ATOMIC_FUNCTION
344#undef VALUE_HALVES
345
347 Node* old_value, Node* new_value) {
348 DCHECK_NE(type.representation(), MachineRepresentation::kWord64);
349 return AddNode(machine()->Word32AtomicCompareExchange(type), base, index,
350 old_value, new_value);
351 }
352
354 Node* old_value_high, Node* new_value,
355 Node* new_value_high) {
356 if (machine()->Is64()) {
357 DCHECK_NULL(old_value_high);
358 DCHECK_NULL(new_value_high);
359 // This uses Uint64() intentionally: AtomicCompareExchange is not
360 // implemented for Int64(), which is fine because the machine instruction
361 // only cares about words.
362 return AddNode(
363 machine()->Word64AtomicCompareExchange(MachineType::Uint64()), base,
364 index, old_value, new_value);
365 } else {
366 return AddNode(machine()->Word32AtomicPairCompareExchange(), base, index,
367 old_value, old_value_high, new_value, new_value_high);
368 }
369 }
370
372 return AddNode(machine()->MemoryBarrier(order));
373 }
374
375 // Arithmetic Operations.
377 return AddNode(machine()->WordAnd(), a, b);
378 }
379 Node* WordOr(Node* a, Node* b) { return AddNode(machine()->WordOr(), a, b); }
381 return AddNode(machine()->WordXor(), a, b);
382 }
384 return AddNode(machine()->WordShl(), a, b);
385 }
387 return AddNode(machine()->WordShr(), a, b);
388 }
390 return AddNode(machine()->WordSar(), a, b);
391 }
393 return AddNode(machine()->WordSarShiftOutZeros(), a, b);
394 }
396 return AddNode(machine()->WordRor(), a, b);
397 }
399 return AddNode(machine()->WordEqual(), a, b);
400 }
402 return Word32BinaryNot(WordEqual(a, b));
403 }
405 if (machine()->Is32()) {
406 return Word32BitwiseNot(a);
407 } else {
408 return Word64Not(a);
409 }
410 }
411
413 return AddNode(machine()->Word32And(), a, b);
414 }
416 return AddNode(machine()->Word32Or(), a, b);
417 }
419 return AddNode(machine()->Word32Xor(), a, b);
420 }
422 return AddNode(machine()->Word32Shl(), a, b);
423 }
425 return AddNode(machine()->Word32Shr(), a, b);
426 }
428 return AddNode(machine()->Word32Sar(), a, b);
429 }
431 return AddNode(machine()->Word32SarShiftOutZeros(), a, b);
432 }
434 return AddNode(machine()->Word32Ror(), a, b);
435 }
436 Node* Word32Clz(Node* a) { return AddNode(machine()->Word32Clz(), a); }
438 return AddNode(machine()->Word32Equal(), a, b);
439 }
441 return Word32BinaryNot(Word32Equal(a, b));
442 }
443 Node* Word32BitwiseNot(Node* a) { return Word32Xor(a, Int32Constant(-1)); }
444 Node* Word32BinaryNot(Node* a) { return Word32Equal(a, Int32Constant(0)); }
445
447 return AddNode(machine()->Word64And(), a, b);
448 }
450 return AddNode(machine()->Word64Or(), a, b);
451 }
453 return AddNode(machine()->Word64Xor(), a, b);
454 }
456 return AddNode(machine()->Word64Shl(), a, b);
457 }
459 return AddNode(machine()->Word64Shr(), a, b);
460 }
462 return AddNode(machine()->Word64Sar(), a, b);
463 }
465 return AddNode(machine()->Word64Ror(), a, b);
466 }
467 Node* Word64Clz(Node* a) { return AddNode(machine()->Word64Clz(), a); }
469 return AddNode(machine()->Word64Equal(), a, b);
470 }
472 return Word32BinaryNot(Word64Equal(a, b));
473 }
474 Node* Word64Not(Node* a) { return Word64Xor(a, Int64Constant(-1)); }
475
477 return AddNode(machine()->Int32Add(), a, b);
478 }
480 return AddNode(machine()->Int32AddWithOverflow(), a, b);
481 }
483 return AddNode(machine()->Int32Sub(), a, b);
484 }
486 return AddNode(machine()->Int32SubWithOverflow(), a, b);
487 }
489 return AddNode(machine()->Int32Mul(), a, b);
490 }
492 return AddNode(machine()->Int32MulHigh(), a, b);
493 }
495 return AddNode(machine()->Int32MulWithOverflow(), a, b);
496 }
498 return AddNode(machine()->Int32Div(), a, b);
499 }
501 return AddNode(machine()->Int32Mod(), a, b);
502 }
504 return AddNode(machine()->Int32LessThan(), a, b);
505 }
507 return AddNode(machine()->Int32LessThanOrEqual(), a, b);
508 }
510 return AddNode(machine()->Uint32Div(), a, b);
511 }
513 return AddNode(machine()->Uint32LessThan(), a, b);
514 }
516 return AddNode(machine()->Uint32LessThanOrEqual(), a, b);
517 }
519 return AddNode(machine()->Uint32Mod(), a, b);
520 }
522 return AddNode(machine()->Uint32MulHigh(), a, b);
523 }
524 Node* Int32GreaterThan(Node* a, Node* b) { return Int32LessThan(b, a); }
526 return Int32LessThanOrEqual(b, a);
527 }
528 Node* Uint32GreaterThan(Node* a, Node* b) { return Uint32LessThan(b, a); }
530 return Uint32LessThanOrEqual(b, a);
531 }
532 Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); }
533
535 return AddNode(machine()->Int64Add(), a, b);
536 }
538 return AddNode(machine()->Int64AddWithOverflow(), a, b);
539 }
541 return AddNode(machine()->Int64Sub(), a, b);
542 }
544 return AddNode(machine()->Int64SubWithOverflow(), a, b);
545 }
547 return AddNode(machine()->Int64Mul(), a, b);
548 }
550 return AddNode(machine()->Int64MulHigh(), a, b);
551 }
553 return AddNode(machine()->Uint64MulHigh(), a, b);
554 }
556 return AddNode(machine()->Int64MulWithOverflow(), a, b);
557 }
559 return AddNode(machine()->Int64Div(), a, b);
560 }
562 return AddNode(machine()->Int64Mod(), a, b);
563 }
564 Node* Int64Neg(Node* a) { return Int64Sub(Int64Constant(0), a); }
566 return AddNode(machine()->Int64LessThan(), a, b);
567 }
569 return AddNode(machine()->Int64LessThanOrEqual(), a, b);
570 }
572 return AddNode(machine()->Uint64LessThan(), a, b);
573 }
575 return AddNode(machine()->Uint64LessThanOrEqual(), a, b);
576 }
577 Node* Int64GreaterThan(Node* a, Node* b) { return Int64LessThan(b, a); }
579 return Int64LessThanOrEqual(b, a);
580 }
581 Node* Uint64GreaterThan(Node* a, Node* b) { return Uint64LessThan(b, a); }
583 return Uint64LessThanOrEqual(b, a);
584 }
586 return AddNode(machine()->Uint64Div(), a, b);
587 }
589 return AddNode(machine()->Uint64Mod(), a, b);
590 }
591 Node* Int32PairAdd(Node* a_low, Node* a_high, Node* b_low, Node* b_high) {
592 return AddNode(machine()->Int32PairAdd(), a_low, a_high, b_low, b_high);
593 }
594 Node* Int32PairSub(Node* a_low, Node* a_high, Node* b_low, Node* b_high) {
595 return AddNode(machine()->Int32PairSub(), a_low, a_high, b_low, b_high);
596 }
597 Node* Int32PairMul(Node* a_low, Node* a_high, Node* b_low, Node* b_high) {
598 return AddNode(machine()->Int32PairMul(), a_low, a_high, b_low, b_high);
599 }
600 Node* Word32PairShl(Node* low_word, Node* high_word, Node* shift) {
601 return AddNode(machine()->Word32PairShl(), low_word, high_word, shift);
602 }
603 Node* Word32PairShr(Node* low_word, Node* high_word, Node* shift) {
604 return AddNode(machine()->Word32PairShr(), low_word, high_word, shift);
605 }
606 Node* Word32PairSar(Node* low_word, Node* high_word, Node* shift) {
607 return AddNode(machine()->Word32PairSar(), low_word, high_word, shift);
608 }
610 return AddNode(machine()->Word32Popcnt().op(), a);
611 }
613 return AddNode(machine()->Word64Popcnt().op(), a);
614 }
615 Node* Word32Ctz(Node* a) { return AddNode(machine()->Word32Ctz().op(), a); }
616 Node* Word64Ctz(Node* a) { return AddNode(machine()->Word64Ctz().op(), a); }
617
619 return AddNode(machine()->Word32Select().op(), condition, b, c);
620 }
621
623 return AddNode(machine()->Word64Select().op(), condition, b, c);
624 }
625
627 return AddNode(
628 machine()->StackPointerGreaterThan(StackCheckKind::kCodeStubAssembler),
629 value);
630 }
631
632#define INTPTR_BINOP(prefix, name) \
633 Node* IntPtr##name(Node* a, Node* b) { \
634 return kSystemPointerSize == 8 ? prefix##64##name(a, b) \
635 : prefix##32##name(a, b); \
636 }
637
638 INTPTR_BINOP(Int, Add)
639 INTPTR_BINOP(Int, AddWithOverflow)
640 INTPTR_BINOP(Int, Sub)
641 INTPTR_BINOP(Int, SubWithOverflow)
642 INTPTR_BINOP(Int, Mul)
643 INTPTR_BINOP(Int, MulHigh)
644 INTPTR_BINOP(Int, MulWithOverflow)
645 INTPTR_BINOP(Int, Div)
646 INTPTR_BINOP(Int, Mod)
647 INTPTR_BINOP(Int, LessThan)
648 INTPTR_BINOP(Int, LessThanOrEqual)
649 INTPTR_BINOP(Word, Equal)
650 INTPTR_BINOP(Word, NotEqual)
651 INTPTR_BINOP(Int, GreaterThanOrEqual)
652 INTPTR_BINOP(Int, GreaterThan)
653
654#undef INTPTR_BINOP
655
656#define UINTPTR_BINOP(prefix, name) \
657 Node* UintPtr##name(Node* a, Node* b) { \
658 return kSystemPointerSize == 8 ? prefix##64##name(a, b) \
659 : prefix##32##name(a, b); \
660 }
661
662 UINTPTR_BINOP(Uint, LessThan)
663 UINTPTR_BINOP(Uint, LessThanOrEqual)
664 UINTPTR_BINOP(Uint, GreaterThanOrEqual)
665 UINTPTR_BINOP(Uint, GreaterThan)
666 UINTPTR_BINOP(Uint, MulHigh)
667
668#undef UINTPTR_BINOP
669
671 return AddNode(machine()->Int32AbsWithOverflow().op(), a);
672 }
673
675 return AddNode(machine()->Int64AbsWithOverflow().op(), a);
676 }
677
679 return kSystemPointerSize == 8 ? Int64AbsWithOverflow(a)
680 : Int32AbsWithOverflow(a);
681 }
682
684 return AddNode(machine()->Float32Add(), a, b);
685 }
687 return AddNode(machine()->Float32Sub(), a, b);
688 }
690 return AddNode(machine()->Float32Mul(), a, b);
691 }
693 return AddNode(machine()->Float32Div(), a, b);
694 }
695 Node* Float32Abs(Node* a) { return AddNode(machine()->Float32Abs(), a); }
696 Node* Float32Neg(Node* a) { return AddNode(machine()->Float32Neg(), a); }
697 Node* Float32Sqrt(Node* a) { return AddNode(machine()->Float32Sqrt(), a); }
699 return AddNode(machine()->Float32Equal(), a, b);
700 }
702 return Word32BinaryNot(Float32Equal(a, b));
703 }
705 return AddNode(machine()->Float32LessThan(), a, b);
706 }
708 return AddNode(machine()->Float32LessThanOrEqual(), a, b);
709 }
710 Node* Float32GreaterThan(Node* a, Node* b) { return Float32LessThan(b, a); }
712 return Float32LessThanOrEqual(b, a);
713 }
715 return AddNode(machine()->Float32Max(), a, b);
716 }
718 return AddNode(machine()->Float32Min(), a, b);
719 }
721 return AddNode(machine()->Float64Add(), a, b);
722 }
724 return AddNode(machine()->Float64Sub(), a, b);
725 }
727 return AddNode(machine()->Float64Mul(), a, b);
728 }
730 return AddNode(machine()->Float64Div(), a, b);
731 }
733 return AddNode(machine()->Float64Mod(), a, b);
734 }
736 return AddNode(machine()->Float64Max(), a, b);
737 }
739 return AddNode(machine()->Float64Min(), a, b);
740 }
741 Node* Float64Abs(Node* a) { return AddNode(machine()->Float64Abs(), a); }
742 Node* Float64Neg(Node* a) { return AddNode(machine()->Float64Neg(), a); }
743 Node* Float64Acos(Node* a) { return AddNode(machine()->Float64Acos(), a); }
744 Node* Float64Acosh(Node* a) { return AddNode(machine()->Float64Acosh(), a); }
745 Node* Float64Asin(Node* a) { return AddNode(machine()->Float64Asin(), a); }
746 Node* Float64Asinh(Node* a) { return AddNode(machine()->Float64Asinh(), a); }
747 Node* Float64Atan(Node* a) { return AddNode(machine()->Float64Atan(), a); }
748 Node* Float64Atanh(Node* a) { return AddNode(machine()->Float64Atanh(), a); }
750 return AddNode(machine()->Float64Atan2(), a, b);
751 }
752 Node* Float64Cbrt(Node* a) { return AddNode(machine()->Float64Cbrt(), a); }
753 Node* Float64Cos(Node* a) { return AddNode(machine()->Float64Cos(), a); }
754 Node* Float64Cosh(Node* a) { return AddNode(machine()->Float64Cosh(), a); }
755 Node* Float64Exp(Node* a) { return AddNode(machine()->Float64Exp(), a); }
756 Node* Float64Expm1(Node* a) { return AddNode(machine()->Float64Expm1(), a); }
757 Node* Float64Log(Node* a) { return AddNode(machine()->Float64Log(), a); }
758 Node* Float64Log1p(Node* a) { return AddNode(machine()->Float64Log1p(), a); }
759 Node* Float64Log10(Node* a) { return AddNode(machine()->Float64Log10(), a); }
760 Node* Float64Log2(Node* a) { return AddNode(machine()->Float64Log2(), a); }
762 return AddNode(machine()->Float64Pow(), a, b);
763 }
764 Node* Float64Sin(Node* a) { return AddNode(machine()->Float64Sin(), a); }
765 Node* Float64Sinh(Node* a) { return AddNode(machine()->Float64Sinh(), a); }
766 Node* Float64Sqrt(Node* a) { return AddNode(machine()->Float64Sqrt(), a); }
767 Node* Float64Tan(Node* a) { return AddNode(machine()->Float64Tan(), a); }
768 Node* Float64Tanh(Node* a) { return AddNode(machine()->Float64Tanh(), a); }
770 return AddNode(machine()->Float64Equal(), a, b);
771 }
773 return Word32BinaryNot(Float64Equal(a, b));
774 }
776 return AddNode(machine()->Float64LessThan(), a, b);
777 }
779 return AddNode(machine()->Float64LessThanOrEqual(), a, b);
780 }
781 Node* Float64GreaterThan(Node* a, Node* b) { return Float64LessThan(b, a); }
783 return Float64LessThanOrEqual(b, a);
784 }
786 return AddNode(machine()->Float32Select().op(), condition, b, c);
787 }
789 return AddNode(machine()->Float64Select().op(), condition, b, c);
790 }
791
792 // Conversions.
794 return AddNode(machine()->BitcastTaggedToWord(), a);
795 }
797 return AddNode(machine()->BitcastTaggedToWordForTagAndSmiBits(), a);
798 }
800 return AddNode(machine()->BitcastMaybeObjectToWord(), a);
801 }
803 return AddNode(machine()->BitcastWordToTagged(), a);
804 }
806 return AddNode(machine()->BitcastWordToTaggedSigned(), a);
807 }
809 return AddNode(machine()->TruncateFloat64ToWord32(), a);
810 }
812 return AddNode(machine()->ChangeFloat32ToFloat64(), a);
813 }
815 return AddNode(machine()->ChangeInt32ToFloat64(), a);
816 }
818 return AddNode(machine()->ChangeInt64ToFloat64(), a);
819 }
821 return AddNode(machine()->ChangeUint32ToFloat64(), a);
822 }
824 return AddNode(machine()->ChangeFloat64ToInt32(), a);
825 }
827 return AddNode(machine()->ChangeFloat64ToInt64(), a);
828 }
830 return AddNode(machine()->ChangeFloat64ToUint32(), a);
831 }
833 return AddNode(machine()->ChangeFloat64ToUint64(), a);
834 }
836 return AddNode(machine()->TruncateFloat64ToUint32(), a);
837 }
839 return AddNode(machine()->TruncateFloat32ToInt32(kind), a);
840 }
842 return AddNode(machine()->TruncateFloat32ToUint32(kind), a);
843 }
845 return AddNode(machine()->TruncateFloat64ToInt64(kind), a);
846 }
848 return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
849 }
851 return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
852 }
854 return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
855 }
857 return AddNode(machine()->TryTruncateFloat64ToUint64(), a);
858 }
860 return AddNode(machine()->TryTruncateFloat64ToInt32(), a);
861 }
863 return AddNode(machine()->TryTruncateFloat64ToUint32(), a);
864 }
866 return AddNode(machine()->ChangeInt32ToInt64(), a);
867 }
869 if (kSystemPointerSize == 8) {
870 return ChangeInt32ToInt64(a);
871 } else {
872 return a;
873 }
874 }
876 return AddNode(machine()->ChangeUint32ToUint64(), a);
877 }
879 return AddNode(machine()->TruncateFloat64ToFloat32(), a);
880 }
882 return AddNode(machine()->TruncateFloat64ToFloat16RawBits().placeholder(),
883 a);
884 }
886 return AddNode(machine()->TruncateInt64ToInt32(), a);
887 }
889 return AddNode(machine()->RoundFloat64ToInt32(), a);
890 }
892 return AddNode(machine()->RoundInt32ToFloat32(), a);
893 }
895 return AddNode(machine()->RoundInt64ToFloat32(), a);
896 }
898 return AddNode(machine()->RoundInt64ToFloat64(), a);
899 }
901 return AddNode(machine()->RoundUint32ToFloat32(), a);
902 }
904 return AddNode(machine()->RoundUint64ToFloat32(), a);
905 }
907 return AddNode(machine()->RoundUint64ToFloat64(), a);
908 }
910 return AddNode(machine()->BitcastFloat32ToInt32(), a);
911 }
913 return AddNode(machine()->BitcastFloat64ToInt64(), a);
914 }
916 return AddNode(machine()->BitcastInt32ToFloat32(), a);
917 }
919 return AddNode(machine()->BitcastInt64ToFloat64(), a);
920 }
922 return AddNode(machine()->Float32RoundDown().op(), a);
923 }
925 return AddNode(machine()->Float64RoundDown().placeholder(), a);
926 }
928 return AddNode(machine()->Float32RoundUp().op(), a);
929 }
931 return AddNode(machine()->Float64RoundUp().placeholder(), a);
932 }
934 return AddNode(machine()->Float32RoundTruncate().op(), a);
935 }
937 return AddNode(machine()->Float64RoundTruncate().placeholder(), a);
938 }
940 return AddNode(machine()->Float64RoundTiesAway().op(), a);
941 }
943 return AddNode(machine()->Float32RoundTiesEven().op(), a);
944 }
946 return AddNode(machine()->Float64RoundTiesEven().placeholder(), a);
947 }
949 return AddNode(machine()->Word32ReverseBytes(), a);
950 }
952 return AddNode(machine()->Word64ReverseBytes(), a);
953 }
954
955 // Float64 bit operations.
957 return AddNode(machine()->Float64ExtractLowWord32(), a);
958 }
960 return AddNode(machine()->Float64ExtractHighWord32(), a);
961 }
963 return AddNode(machine()->Float64InsertLowWord32(), a, b);
964 }
966 return AddNode(machine()->Float64InsertHighWord32(), a, b);
967 }
969 return AddNode(machine()->Float64SilenceNaN(), a);
970 }
971
972 // Stack operations.
973 Node* LoadFramePointer() { return AddNode(machine()->LoadFramePointer()); }
975 return AddNode(machine()->LoadParentFramePointer());
976 }
977
978 // SIMD operations that are needed outside of Wasm (e.g. in swisstable).
979 Node* I8x16Splat(Node* a) { return AddNode(machine()->I8x16Splat(), a); }
980 Node* I8x16BitMask(Node* a) { return AddNode(machine()->I8x16BitMask(), a); }
982 return AddNode(machine()->I8x16Eq(), a, b);
983 }
984
985#if V8_ENABLE_WEBASSEMBLY
986 // SIMD operations.
987 Node* S128Const(const uint8_t value[16]) {
988 return AddNode(machine()->S128Const(value));
989 }
990 Node* I64x2Splat(Node* a) { return AddNode(machine()->I64x2Splat(), a); }
991 Node* I64x2SplatI32Pair(Node* a, Node* b) {
992 return AddNode(machine()->I64x2SplatI32Pair(), a, b);
993 }
994 Node* I32x4Splat(Node* a) { return AddNode(machine()->I32x4Splat(), a); }
995 Node* I16x8Splat(Node* a) { return AddNode(machine()->I16x8Splat(), a); }
996
997 Node* LoadStackPointer() { return AddNode(machine()->LoadStackPointer()); }
998 void SetStackPointer(Node* ptr) {
999 AddNode(machine()->SetStackPointer(), ptr);
1000 }
1001#endif
1002
1003 // Parameters.
1004 Node* TargetParameter();
1005 Node* Parameter(size_t index);
1006 Node* LoadRootRegister() { return AddNode(machine()->LoadRootRegister()); }
1007
1008 // Pointer utilities.
1009 Node* LoadFromPointer(void* address, MachineType type, int32_t offset = 0) {
1010 return Load(type, PointerConstant(address), Int32Constant(offset));
1011 }
1012 Node* StoreToPointer(void* address, MachineRepresentation rep, Node* node) {
1013 return Store(rep, PointerConstant(address), node, kNoWriteBarrier);
1014 }
1016 int32_t offset = 0) {
1017 return UnalignedLoad(type, PointerConstant(address), Int32Constant(offset));
1018 }
1020 Node* node) {
1021 return UnalignedStore(rep, PointerConstant(address), node);
1022 }
1023 Node* StringConstant(const char* string) {
1024 return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
1025 }
1026
1027 // Call a given call descriptor and the given arguments.
1028 // The call target is passed as part of the {inputs} array.
1029 Node* CallN(CallDescriptor* call_descriptor, int input_count,
1030 Node* const* inputs);
1031
1032 // Call a given call descriptor and the given arguments and frame-state.
1033 // The call target and frame state are passed as part of the {inputs} array.
1034 Node* CallNWithFrameState(CallDescriptor* call_descriptor, int input_count,
1035 Node* const* inputs);
1036
1037 // Tail call a given call descriptor and the given arguments.
1038 // The call target is passed as part of the {inputs} array.
1039 void TailCallN(CallDescriptor* call_descriptor, int input_count,
1040 Node* const* inputs);
1041
1042 // Type representing C function argument with type info.
1043 using CFunctionArg = std::pair<MachineType, Node*>;
1044
1045 // Call to a C function.
1046 template <class... CArgs>
1047 Node* CallCFunction(Node* function, std::optional<MachineType> return_type,
1048 CArgs... cargs) {
1049 static_assert(
1050 std::conjunction_v<std::is_convertible<CArgs, CFunctionArg>...>,
1051 "invalid argument types");
1052 return CallCFunction(function, return_type, {cargs...});
1053 }
1054
1055 Node* CallCFunction(Node* function, std::optional<MachineType> return_type,
1056 std::initializer_list<CFunctionArg> args);
1057
1058 // Call to a C function without a function discriptor on AIX.
1059 template <class... CArgs>
1061 MachineType return_type,
1062 CArgs... cargs) {
1063 static_assert(
1064 std::conjunction_v<std::is_convertible<CArgs, CFunctionArg>...>,
1065 "invalid argument types");
1066 return CallCFunctionWithoutFunctionDescriptor(function, return_type,
1067 {cargs...});
1068 }
1069
1071 Node* function, MachineType return_type,
1072 std::initializer_list<CFunctionArg> args);
1073
1074 // Call to a C function, while saving/restoring caller registers.
1075 template <class... CArgs>
1077 MachineType return_type,
1078 SaveFPRegsMode mode,
1079 CArgs... cargs) {
1080 static_assert(
1081 std::conjunction_v<std::is_convertible<CArgs, CFunctionArg>...>,
1082 "invalid argument types");
1083 return CallCFunctionWithCallerSavedRegisters(function, return_type, mode,
1084 {cargs...});
1085 }
1086
1088 Node* function, MachineType return_type, SaveFPRegsMode mode,
1089 std::initializer_list<CFunctionArg> args);
1090
1091 // ===========================================================================
1092 // The following utility methods deal with control flow, hence might switch
1093 // the current basic block or create new basic blocks for labels.
1094
1095 // Control flow.
1096 void Goto(RawMachineLabel* label);
1097 void Branch(Node* condition, RawMachineLabel* true_val,
1098 RawMachineLabel* false_val,
1099 BranchHint branch_hint = BranchHint::kNone);
1100 void Switch(Node* index, RawMachineLabel* default_label,
1101 const int32_t* case_values, RawMachineLabel** case_labels,
1102 size_t case_count);
1103 void Return(Node* value);
1104 void Return(Node* v1, Node* v2);
1105 void Return(Node* v1, Node* v2, Node* v3);
1106 void Return(Node* v1, Node* v2, Node* v3, Node* v4);
1107 void Return(int count, Node* v[]);
1108 void PopAndReturn(Node* pop, Node* value);
1109 void PopAndReturn(Node* pop, Node* v1, Node* v2);
1110 void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3);
1111 void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, Node* v4);
1112 void Bind(RawMachineLabel* label);
1113 void Deoptimize(Node* state);
1114 void AbortCSADcheck(Node* message);
1115 void DebugBreak();
1116 void Unreachable();
1117 void Comment(const std::string& msg);
1118 void StaticAssert(Node* value, const char* source);
1119
1120#if DEBUG
1121 void Bind(RawMachineLabel* label, AssemblerDebugInfo info);
1122 void SetInitialDebugInformation(AssemblerDebugInfo info);
1123 void PrintCurrentBlock(std::ostream& os);
1124#endif // DEBUG
1125 bool InsideBlock();
1126
1127 // Add success / exception successor blocks and ends the current block ending
1128 // in a potentially throwing call node.
1129 void Continuations(Node* call, RawMachineLabel* if_success,
1130 RawMachineLabel* if_exception);
1131
1132 // Variables.
1134 return AddNode(common()->Phi(rep, 2), n1, n2, graph()->start());
1135 }
1137 return AddNode(common()->Phi(rep, 3), n1, n2, n3, graph()->start());
1138 }
1139 Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3, Node* n4) {
1140 return AddNode(common()->Phi(rep, 4), n1, n2, n3, n4, graph()->start());
1141 }
1142 Node* Phi(MachineRepresentation rep, int input_count, Node* const* inputs);
1143 void AppendPhiInput(Node* phi, Node* new_input);
1144
1145 // ===========================================================================
1146 // The following generic node creation methods can be used for operators that
1147 // are not covered by the above utility methods. There should rarely be a need
1148 // to do that outside of testing though.
1149
1150 Node* AddNode(const Operator* op, int input_count, Node* const* inputs);
1151
1152 Node* AddNode(const Operator* op) {
1153 return AddNode(op, 0, static_cast<Node* const*>(nullptr));
1154 }
1155
1156 template <class... TArgs>
1157 Node* AddNode(const Operator* op, Node* n1, TArgs... args) {
1158 Node* buffer[] = {n1, args...};
1159 return AddNode(op, sizeof...(args) + 1, buffer);
1160 }
1161
1162 void SetCurrentExternalSourcePosition(FileAndLine file_and_line);
1163 FileAndLine GetCurrentExternalSourcePosition() const;
1164 SourcePositionTable* source_positions() { return source_positions_; }
1165
1166 // The parameter count of the code, as specified by the call descriptor.
1167 size_t parameter_count() const { return call_descriptor_->ParameterCount(); }
1168
1169 // Most of the time, the parameter count is static and known at
1170 // code-generation time through the call descriptor. However, certain
1171 // varargs JS builtins can be used for different functions with different
1172 // JS parameter counts. In those (rare) cases, we need to obtain the actual
1173 // parameter count of the function object through which the code is invoked
1174 // to be able to determine the total argument count (including padding
1175 // arguments), which is in turn required to pop all arguments from the stack
1176 // in the function epilogue.
1177 //
1178 // If we're generating the code for one of these special builtins, this
1179 // function will return a node containing the actual JS parameter count.
1180 // Otherwise it will be nullptr.
1181 //
1182 // TODO(saelo): it would be a bit nicer if we could automatically determine
1183 // that the dynamic parameter count is required (for example from the call
1184 // descriptor) and then directly fetch it in the prologue and use it in the
1185 // epilogue without the higher-level assemblers having to get involved. It's
1186 // not clear if it's worth the effort though for the handful of builtins that
1187 // work this way though.
1188 Node* dynamic_js_parameter_count() { return dynamic_js_parameter_count_; }
1190 dynamic_js_parameter_count_ = parameter_count;
1191 }
1192
1193 private:
1194 Node* MakeNode(const Operator* op, int input_count, Node* const* inputs);
1196 BasicBlock* EnsureBlock(RawMachineLabel* label);
1197 BasicBlock* CurrentBlock();
1198
1199 // A post-processing pass to add effect and control edges so that the graph
1200 // can be optimized and re-scheduled.
1201 // TODO(turbofan): Move this to a separate class.
1202 void MakeReschedulable();
1203 Node* CreateNodeFromPredecessors(const std::vector<BasicBlock*>& predecessors,
1204 const std::vector<Node*>& sidetable,
1205 const Operator* op,
1206 const std::vector<Node*>& additional_inputs);
1207 void MakePhiBinary(Node* phi, int split_point, Node* left_control,
1208 Node* right_control);
1209 void MarkControlDeferred(Node* control_input);
1210
1212
1213 static void OptimizeControlFlow(Schedule* schedule, TFGraph* graph,
1214 CommonOperatorBuilder* common);
1215
1217
1225 // See the dynamic_js_parameter_count() getter for an explanation of this
1226 // field. If we're generating the code for a builtin that needs to obtain the
1227 // parameter count at runtime, then this field will contain a node storing
1228 // the actual parameter count. Otherwise it will be nullptr.
1233};
1234
1236 public:
1238
1239 explicit RawMachineLabel(Type type = kNonDeferred)
1240 : deferred_(type == kDeferred) {}
1244
1245 BasicBlock* block() const { return block_; }
1246
1247 private:
1248 BasicBlock* block_ = nullptr;
1249 bool used_ = false;
1250 bool bound_ = false;
1253};
1254
1255} // namespace compiler
1256} // namespace internal
1257} // namespace v8
1258
1259#endif // V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
Schedule * schedule
TFGraph * graph
SimplifiedOperatorBuilder * simplified
Isolate * isolate_
int16_t parameter_count
Definition builtins.cc:67
Builtins::Kind kind
Definition builtins.cc:40
constexpr MachineRepresentation representation() const
Inputs inputs() const
Definition node.h:478
Node * CallCFunction(Node *function, std::optional< MachineType > return_type, std::initializer_list< CFunctionArg > args)
Node * Float32Select(Node *condition, Node *b, Node *c)
Node * HeapConstant(Handle< HeapObject > object)
Node * RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode)
void StoreToObject(MachineRepresentation rep, Node *object, Node *offset, Node *value, WriteBarrierKind write_barrier)
Node * Word32Select(Node *condition, Node *b, Node *c)
Node * Store(MachineRepresentation rep, Node *base, Node *index, Node *value, WriteBarrierKind write_barrier)
Node * UnalignedStore(MachineRepresentation rep, Node *base, Node *index, Node *value)
void OptimizedStoreIndirectPointerField(Node *object, int offset, IndirectPointerTag tag, Node *value, WriteBarrierKind write_barrier)
Node * CallCFunctionWithCallerSavedRegisters(Node *function, MachineType return_type, SaveFPRegsMode mode, std::initializer_list< CFunctionArg > args)
void OptimizedStoreField(MachineRepresentation rep, Node *object, int offset, Node *value, WriteBarrierKind write_barrier)
void OptimizedStoreMap(Node *object, Node *value, WriteBarrierKind write_barrier=kMapWriteBarrier)
Node * AtomicLoad64(AtomicLoadParameters rep, Node *base, Node *index)
Node * Load(MachineType type, Node *base, Node *index)
Node * AddNode(const Operator *op, Node *n1, TArgs... args)
Node * AtomicLoad(AtomicLoadParameters rep, Node *base, Node *index)
Node * Phi(MachineRepresentation rep, Node *n1, Node *n2)
Node * AtomicStore64(AtomicStoreParameters params, Node *base, Node *index, Node *value, Node *value_high)
Node * TruncateFloat64ToInt64(Node *a, TruncateKind kind)
Node * Phi(MachineRepresentation rep, Node *n1, Node *n2, Node *n3, Node *n4)
Node * UnalignedLoadFromPointer(void *address, MachineType type, int32_t offset=0)
Node * Word32PairShl(Node *low_word, Node *high_word, Node *shift)
Node * Word64Select(Node *condition, Node *b, Node *c)
Node * Word32PairSar(Node *low_word, Node *high_word, Node *shift)
Node * LoadImmutable(MachineType type, Node *base)
Node * UnalignedStoreToPointer(void *address, MachineRepresentation rep, Node *node)
Node * Load(MachineType type, Node *base)
Node * RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode)
Node * AtomicCompareExchange64(Node *base, Node *index, Node *old_value, Node *old_value_high, Node *new_value, Node *new_value_high)
Node * Float64Select(Node *condition, Node *b, Node *c)
Node * AtomicCompareExchange(MachineType type, Node *base, Node *index, Node *old_value, Node *new_value)
Node * Store(MachineRepresentation rep, Node *base, Node *value, WriteBarrierKind write_barrier)
Node * ExternalConstant(ExternalReference address)
Node * Int32PairAdd(Node *a_low, Node *a_high, Node *b_low, Node *b_high)
RawMachineAssembler(const RawMachineAssembler &)=delete
Node * LoadImmutable(MachineType type, Node *base, Node *index)
Node * UnalignedLoad(MachineType type, Node *base)
RawMachineAssembler & operator=(const RawMachineAssembler &)=delete
Node * TruncateFloat32ToInt32(Node *a, TruncateKind kind)
Node * TruncateFloat32ToUint32(Node *a, TruncateKind kind)
Node * Phi(MachineRepresentation rep, Node *n1, Node *n2, Node *n3)
Node * Int32PairMul(Node *a_low, Node *a_high, Node *b_low, Node *b_high)
Node * AtomicStore(AtomicStoreParameters params, Node *base, Node *index, Node *value)
Node * Int32PairSub(Node *a_low, Node *a_high, Node *b_low, Node *b_high)
Node * CallCFunctionWithCallerSavedRegisters(Node *function, MachineType return_type, SaveFPRegsMode mode, CArgs... cargs)
Node * StackSlot(MachineRepresentation rep, int alignment=0)
Node * CallCFunctionWithoutFunctionDescriptor(Node *function, MachineType return_type, std::initializer_list< CFunctionArg > args)
Node * CallCFunction(Node *function, std::optional< MachineType > return_type, CArgs... cargs)
Node * UnalignedStore(MachineRepresentation rep, Node *base, Node *value)
Node * LoadFromObject(MachineType type, Node *base, Node *offset)
Node * Word32PairShr(Node *low_word, Node *high_word, Node *shift)
void set_dynamic_js_parameter_count(Node *parameter_count)
Node * LoadFromPointer(void *address, MachineType type, int32_t offset=0)
Node * StoreToPointer(void *address, MachineRepresentation rep, Node *node)
Node * LoadProtectedPointerFromObject(Node *base, Node *offset)
Node * CallCFunctionWithoutFunctionDescriptor(Node *function, MachineType return_type, CArgs... cargs)
Node * UnalignedLoad(MachineType type, Node *base, Node *index)
RawMachineLabel(const RawMachineLabel &)=delete
RawMachineLabel & operator=(const RawMachineLabel &)=delete
#define ATOMIC_FUNCTION(name)
#define V8_MAP_PACKING_BOOL
Definition globals.h:93
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
int start
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
Isolate * isolate
int32_t offset
std::optional< TNode< JSArray > > a
Schedule const *const schedule_
int m
Definition mul-fft.cc:294
NumberConstant(std::numeric_limits< double >::quiet_NaN())) DEFINE_GETTER(EmptyStateValues
TNode< Float64T > Float64Add(TNode< Float64T > a, TNode< Float64T > b)
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
constexpr bool IsAnyTagged(MachineRepresentation rep)
constexpr bool IsAnyCompressed(MachineRepresentation rep)
constexpr int kSystemPointerSize
Definition globals.h:410
const int kHeapObjectTag
Definition v8-internal.h:72
constexpr bool Is64()
std::pair< const char *, int > FileAndLine
Definition globals.h:2481
i::Address Load(i::Address address)
Definition unwinder.cc:19
#define VALUE_HALVES
#define UINTPTR_BINOP(prefix, name)
#define INTPTR_BINOP(prefix, name)
#define DCHECK_NULL(val)
Definition logging.h:491
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define V8_EXPORT_PRIVATE
Definition macros.h:460
TFGraph * graph_
wasm::ValueType type