v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
baseline-assembler-arm64-inl.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_BASELINE_ARM64_BASELINE_ASSEMBLER_ARM64_INL_H_
6#define V8_BASELINE_ARM64_BASELINE_ASSEMBLER_ARM64_INL_H_
7
12
13namespace v8 {
14namespace internal {
15namespace baseline {
16
17class BaselineAssembler::ScratchRegisterScope {
18 public:
20 : assembler_(assembler),
22 wrapped_scope_(assembler->masm()) {
24 // If we haven't opened a scratch scope yet, for the first one add a
25 // couple of extra registers.
26 wrapped_scope_.Include(x14, x15);
27 wrapped_scope_.Include(x19);
28 }
30 }
32
34
35 private:
39};
40
41namespace detail {
42
43#ifdef DEBUG
44inline bool Clobbers(Register target, MemOperand op) {
45 return op.base() == target || op.regoffset() == target;
46}
47#endif
48
49} // namespace detail
50
51#define __ masm_->
52
54 interpreter::Register interpreter_register) {
55 return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
56}
58 interpreter::Register interpreter_register, Register rscratch) {
59 return __ Add(rscratch, fp,
60 interpreter_register.ToOperand() * kSystemPointerSize);
61}
64}
67}
68
69void BaselineAssembler::Bind(Label* label) { __ Bind(label); }
70
72
73void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
74 __ B(target);
75}
76
77void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
78 Label* target, Label::Distance) {
79 __ JumpIfRoot(value, index, target);
80}
81
82void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
83 Label* target, Label::Distance) {
84 __ JumpIfNotRoot(value, index, target);
85}
86
87void BaselineAssembler::JumpIfSmi(Register value, Label* target,
89 __ JumpIfSmi(value, target);
90}
91
92void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
94 __ JumpIfNotSmi(value, target);
95}
96
97void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
98 Label* target,
99 Label::Distance distance) {
100 JumpIf(cc, left, Immediate(right), target, distance);
101}
102
103void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
104 Label* target, Label::Distance) {
105 if (cc == kZero) {
106 __ TestAndBranchIfAllClear(value, mask, target);
107 } else if (cc == kNotZero) {
108 __ TestAndBranchIfAnySet(value, mask, target);
109 } else {
110 __ Tst(value, Immediate(mask));
111 __ B(cc, target);
112 }
113}
114
115void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
116 Label* target, Label::Distance) {
117 __ CompareAndBranch(lhs, rhs, cc, target);
118}
119#if V8_STATIC_ROOTS_BOOL
120void BaselineAssembler::JumpIfJSAnyIsPrimitive(Register heap_object,
121 Label* target,
122 Label::Distance distance) {
123 __ AssertNotSmi(heap_object);
124 ScratchRegisterScope temps(this);
125 Register scratch = temps.AcquireScratch();
126 __ JumpIfJSAnyIsPrimitive(heap_object, scratch, target, distance);
127}
128#endif // V8_STATIC_ROOTS_BOOL
130 InstanceType instance_type,
131 Label* target,
132 Label::Distance distance) {
133 ScratchRegisterScope temps(this);
134 Register scratch = temps.AcquireScratch();
135 if (cc == eq || cc == ne) {
136 __ IsObjectType(object, scratch, scratch, instance_type);
137 __ B(cc, target);
138 return;
139 }
140 JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
141}
143 InstanceType instance_type,
144 Register map, Label* target,
146 ScratchRegisterScope temps(this);
147 Register type = temps.AcquireScratch();
148 __ LoadMap(map, object);
149 __ Ldrh(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
150 JumpIf(cc, type, instance_type, target);
151}
153 InstanceType instance_type,
154 Label* target, Label::Distance) {
155 ScratchRegisterScope temps(this);
156 Register type = temps.AcquireScratch();
157 if (v8_flags.debug_code) {
158 __ AssertNotSmi(map);
159 __ CompareObjectType(map, type, type, MAP_TYPE);
160 __ Assert(eq, AbortReason::kUnexpectedValue);
161 }
162 __ Ldrh(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
163 JumpIf(cc, type, instance_type, target);
164}
166 MemOperand operand, Label* target,
168 ScratchRegisterScope temps(this);
169 Register tmp = temps.AcquireScratch();
170 __ Ldr(tmp, operand);
171 JumpIf(cc, value, tmp, target);
172}
173void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Tagged<Smi> smi,
174 Label* target, Label::Distance distance) {
175 __ AssertSmi(value);
176 __ CompareTaggedAndBranch(value, smi, cc, target);
177}
178
179void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
180 Label* target, Label::Distance) {
181 __ AssertSmi(lhs);
182 __ AssertSmi(rhs);
183 __ CompareTaggedAndBranch(lhs, rhs, cc, target);
184}
185void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
186 MemOperand operand, Label* target,
188 ScratchRegisterScope temps(this);
189 Register tmp = temps.AcquireScratch();
190 __ Ldr(tmp, operand);
191 __ CompareTaggedAndBranch(value, tmp, cc, target);
192}
194 Register value, Label* target,
196 ScratchRegisterScope temps(this);
197 Register tmp = temps.AcquireScratch();
198 __ Ldr(tmp, operand);
199 __ CompareTaggedAndBranch(tmp, value, cc, target);
200}
201void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
202 Label* target, Label::Distance) {
203 JumpIf(cc, value, Immediate(byte), target);
204}
205
206void BaselineAssembler::Move(interpreter::Register output, Register source) {
207 Move(RegisterFrameOperand(output), source);
208}
209void BaselineAssembler::Move(Register output, Tagged<TaggedIndex> value) {
210 __ Mov(output, Immediate(value.ptr()));
211}
212void BaselineAssembler::Move(MemOperand output, Register source) {
213 __ Str(source, output);
214}
215void BaselineAssembler::Move(Register output, ExternalReference reference) {
216 __ Mov(output, Operand(reference));
217}
218void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
219 __ Mov(output, Operand(value));
220}
221void BaselineAssembler::Move(Register output, int32_t value) {
222 __ Mov(output, Immediate(value));
223}
224void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
225 __ Mov(output, source);
226}
227void BaselineAssembler::MoveSmi(Register output, Register source) {
228 __ Mov(output, source);
229}
230
231namespace detail {
232
233template <typename Arg>
234inline Register ToRegister(BaselineAssembler* basm,
235 BaselineAssembler::ScratchRegisterScope* scope,
236 Arg arg) {
237 Register reg = scope->AcquireScratch();
238 basm->Move(reg, arg);
239 return reg;
240}
241inline Register ToRegister(BaselineAssembler* basm,
242 BaselineAssembler::ScratchRegisterScope* scope,
243 Register reg) {
244 return reg;
245}
246
247template <typename... Args>
249template <>
251 static int Count() { return 0; }
252};
253template <typename Arg, typename... Args>
254struct CountPushHelper<Arg, Args...> {
255 static int Count(Arg arg, Args... args) {
257 }
258};
259template <typename... Args>
260struct CountPushHelper<interpreter::RegisterList, Args...> {
261 static int Count(interpreter::RegisterList list, Args... args) {
263 }
264};
265
266template <typename... Args>
267struct PushAllHelper;
268template <typename... Args>
269inline void PushAll(BaselineAssembler* basm, Args... args) {
271}
272template <typename... Args>
273inline void PushAllReverse(BaselineAssembler* basm, Args... args) {
275}
276
277template <>
278struct PushAllHelper<> {
279 static void Push(BaselineAssembler* basm) {}
280 static void PushReverse(BaselineAssembler* basm) {}
281};
282template <typename Arg>
283struct PushAllHelper<Arg> {
284 static void Push(BaselineAssembler* basm, Arg) { FATAL("Unaligned push"); }
285 static void PushReverse(BaselineAssembler* basm, Arg arg) {
286 // Push the padding register to round up the amount of values pushed.
287 return PushAllReverse(basm, arg, padreg);
288 }
289};
290template <typename Arg1, typename Arg2, typename... Args>
291struct PushAllHelper<Arg1, Arg2, Args...> {
292 static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
293 Args... args) {
294 {
296 basm->masm()->Push(ToRegister(basm, &scope, arg1),
297 ToRegister(basm, &scope, arg2));
298 }
299 PushAll(basm, args...);
300 }
301 static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
302 Args... args) {
303 PushAllReverse(basm, args...);
304 {
306 basm->masm()->Push(ToRegister(basm, &scope, arg2),
307 ToRegister(basm, &scope, arg1));
308 }
309 }
310};
311// Currently RegisterLists are always be the last argument, so we don't
312// specialize for the case where they're not. We do still specialise for the
313// aligned and unaligned cases.
314template <typename Arg>
315struct PushAllHelper<Arg, interpreter::RegisterList> {
316 static void Push(BaselineAssembler* basm, Arg arg,
318 DCHECK_EQ(list.register_count() % 2, 1);
319 PushAll(basm, arg, list[0], list.PopLeft());
320 }
321 static void PushReverse(BaselineAssembler* basm, Arg arg,
323 if (list.register_count() == 0) {
324 PushAllReverse(basm, arg);
325 } else {
326 PushAllReverse(basm, arg, list[0], list.PopLeft());
327 }
328 }
329};
330template <>
331struct PushAllHelper<interpreter::RegisterList> {
333 DCHECK_EQ(list.register_count() % 2, 0);
334 for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) {
335 PushAll(basm, list[reg_index], list[reg_index + 1]);
336 }
337 }
340 int reg_index = list.register_count() - 1;
341 if (reg_index % 2 == 0) {
342 // Push the padding register to round up the amount of values pushed.
343 PushAllReverse(basm, list[reg_index], padreg);
344 reg_index--;
345 }
346 for (; reg_index >= 1; reg_index -= 2) {
347 PushAllReverse(basm, list[reg_index - 1], list[reg_index]);
348 }
349 }
350};
351
352template <typename... T>
353struct PopAllHelper;
354template <>
355struct PopAllHelper<> {
356 static void Pop(BaselineAssembler* basm) {}
357};
358template <>
359struct PopAllHelper<Register> {
360 static void Pop(BaselineAssembler* basm, Register reg) {
361 basm->masm()->Pop(reg, padreg);
362 }
363};
364template <typename... T>
366 static void Pop(BaselineAssembler* basm, Register reg1, Register reg2,
367 T... tail) {
368 basm->masm()->Pop(reg1, reg2);
369 PopAllHelper<T...>::Pop(basm, tail...);
370 }
371};
372
373} // namespace detail
374
375template <typename... T>
376int BaselineAssembler::Push(T... vals) {
377 // We have to count the pushes first, to decide whether to add padding before
378 // the first push.
379 int push_count = detail::CountPushHelper<T...>::Count(vals...);
380 if (push_count % 2 == 0) {
381 detail::PushAll(this, vals...);
382 } else {
383 detail::PushAll(this, padreg, vals...);
384 }
385 return push_count;
386}
387
388template <typename... T>
389void BaselineAssembler::PushReverse(T... vals) {
390 detail::PushAllReverse(this, vals...);
391}
392
393template <typename... T>
395 detail::PopAllHelper<T...>::Pop(this, registers...);
396}
397
398void BaselineAssembler::LoadTaggedField(Register output, Register source,
399 int offset) {
400 __ LoadTaggedField(output, FieldMemOperand(source, offset));
401}
402
403void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
404 int offset) {
406}
407
409 Register source,
410 int offset) {
411 LoadTaggedSignedField(output, source, offset);
412 SmiUntag(output);
413}
414
416 Register source, int offset) {
417 __ Ldrh(output, FieldMemOperand(source, offset));
418}
419
420void BaselineAssembler::LoadWord8Field(Register output, Register source,
421 int offset) {
422 __ Ldrb(output, FieldMemOperand(source, offset));
423}
424
425void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
426 Tagged<Smi> value) {
428 ScratchRegisterScope temps(this);
429 Register tmp = temps.AcquireScratch();
430 __ Mov(tmp, Operand(value));
431 __ StoreTaggedField(tmp, FieldMemOperand(target, offset));
432}
433
435 int offset,
436 Register value) {
438 __ StoreTaggedField(value, FieldMemOperand(target, offset));
439 __ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
441}
442
444 int offset,
445 Register value) {
446 __ StoreTaggedField(value, FieldMemOperand(target, offset));
447}
448
449void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
450 Register feedback_vector,
451 FeedbackSlot slot,
452 Label* on_result,
454 __ TryLoadOptimizedOsrCode(scratch_and_result, CodeKind::MAGLEV,
455 feedback_vector, slot, on_result,
457}
458
460 int32_t weight, Label* skip_interrupt_label) {
462 ScratchRegisterScope scratch_scope(this);
463 Register feedback_cell = scratch_scope.AcquireScratch();
464 LoadFeedbackCell(feedback_cell);
465
466 Register interrupt_budget = scratch_scope.AcquireScratch().W();
467 __ Ldr(interrupt_budget,
468 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
469 // Remember to set flags as part of the add!
470 __ Adds(interrupt_budget, interrupt_budget, weight);
471 __ Str(interrupt_budget,
472 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
473 if (skip_interrupt_label) {
474 // Use compare flags set by Adds
475 DCHECK_LT(weight, 0);
476 __ B(ge, skip_interrupt_label);
477 }
478}
479
481 Register weight, Label* skip_interrupt_label) {
483 ScratchRegisterScope scratch_scope(this);
484 Register feedback_cell = scratch_scope.AcquireScratch();
485 LoadFeedbackCell(feedback_cell);
486
487 Register interrupt_budget = scratch_scope.AcquireScratch().W();
488 __ Ldr(interrupt_budget,
489 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
490 // Remember to set flags as part of the add!
491 __ Adds(interrupt_budget, interrupt_budget, weight.W());
492 __ Str(interrupt_budget,
493 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
494 if (skip_interrupt_label) __ B(ge, skip_interrupt_label);
495}
496
497void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
498 uint32_t depth,
499 CompressionMode compression_mode) {
500 for (; depth > 0; --depth) {
501 LoadTaggedField(context, context, Context::kPreviousOffset);
502 }
505}
506
507void BaselineAssembler::StaContextSlot(Register context, Register value,
508 uint32_t index, uint32_t depth) {
509 for (; depth > 0; --depth) {
510 LoadTaggedField(context, context, Context::kPreviousOffset);
511 }
513 value);
514}
515
516void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
517 uint32_t depth) {
518 for (; depth > 0; --depth) {
519 LoadTaggedField(context, context, Context::kPreviousOffset);
520 }
522 if (cell_index > 0) {
523 LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
524 // The actual array index is (cell_index - 1).
525 cell_index -= 1;
526 } else {
527 LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
528 // The actual array index is (-cell_index - 1).
529 cell_index = -cell_index - 1;
530 }
531 LoadFixedArrayElement(context, context, cell_index);
532 LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
533}
534
535void BaselineAssembler::StaModuleVariable(Register context, Register value,
536 int cell_index, uint32_t depth) {
537 for (; depth > 0; --depth) {
538 LoadTaggedField(context, context, Context::kPreviousOffset);
539 }
541 LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
542
543 // The actual array index is (cell_index - 1).
544 cell_index -= 1;
545 LoadFixedArrayElement(context, context, cell_index);
546 StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
547}
548
550 BaselineAssembler::ScratchRegisterScope temps(this);
551 Register tmp = temps.AcquireScratch();
552 if (SmiValuesAre31Bits()) {
553 tmp = tmp.W();
554 }
555 __ Ldr(tmp, lhs);
556 __ Add(tmp, tmp, Operand(Smi::FromInt(1)));
557 __ Str(tmp, lhs);
558}
559
560void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
561 __ And(output, lhs, Immediate(rhs));
562}
563
564void BaselineAssembler::Switch(Register reg, int case_value_base,
565 Label** labels, int num_labels) {
567 Label fallthrough;
568 if (case_value_base != 0) {
569 __ Sub(reg, reg, Immediate(case_value_base));
570 }
571
572 // Mostly copied from code-generator-arm64.cc
573 ScratchRegisterScope scope(this);
574 Register temp = scope.AcquireScratch();
575 Label table;
576 JumpIf(kUnsignedGreaterThanEqual, reg, num_labels, &fallthrough);
577 __ Adr(temp, &table);
578 int entry_size_log2 = 2;
579#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
580 ++entry_size_log2; // Account for BTI.
581 constexpr int instructions_per_jump_target = 1;
582#else
583 constexpr int instructions_per_jump_target = 0;
584#endif
585 constexpr int instructions_per_label = 1 + instructions_per_jump_target;
586 __ Add(temp, temp, Operand(reg, UXTW, entry_size_log2));
587 __ Br(temp);
588 {
589 const int instruction_count =
590 num_labels * instructions_per_label + instructions_per_jump_target;
591 MacroAssembler::BlockPoolsScope block_pools(masm_,
592 instruction_count * kInstrSize);
593 __ Bind(&table);
594 for (int i = 0; i < num_labels; ++i) {
595 __ JumpTarget();
596 __ B(labels[i]);
597 }
598 __ JumpTarget();
599 __ Bind(&fallthrough);
600 }
601}
602
603#undef __
604#define __ basm.
605
606void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
609
611 Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
612
613 {
614 ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget");
615
616 Label skip_interrupt_label;
617 __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label);
618 __ masm()->SmiTag(params_size);
620
624 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Sparkplug, 1);
625
627 __ masm()->SmiUntag(params_size);
628
629 __ Bind(&skip_interrupt_label);
630 }
631
632 BaselineAssembler::ScratchRegisterScope temps(&basm);
633 Register actual_params_size = temps.AcquireScratch();
634 // Compute the size of the actual parameters + receiver.
635 __ Move(actual_params_size,
637
638 // If actual is bigger than formal, then we should use it to free up the stack
639 // arguments.
640 __ masm()->Cmp(params_size, actual_params_size);
641 __ masm()->Csel(params_size, actual_params_size, params_size, kLessThan);
642
643 // Leave the frame (also dropping the register file).
644 __ masm()->LeaveFrame(StackFrame::BASELINE);
645
646 // Drop receiver + arguments.
647 __ masm()->DropArguments(params_size);
648 __ masm()->Ret();
649}
650
651#undef __
652
654 Register reg) {
656 assembler_->masm()->Assert(eq, AbortReason::kAccumulatorClobbered);
657}
658
659} // namespace baseline
660} // namespace internal
661} // namespace v8
662
663#endif // V8_BASELINE_ARM64_BASELINE_ASSEMBLER_ARM64_INL_H_
#define Assert(condition)
static const int kExtensionOffset
Definition contexts.h:500
static V8_INLINE constexpr int OffsetOfElementAt(int index)
Definition contexts.h:512
static const int kPreviousOffset
Definition contexts.h:492
void Cmp(const Register &rn, int imm)
void SmiUntag(Register reg, SBit s=LeaveCC)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void SmiTag(Register reg, SBit s=LeaveCC)
void Ret(Condition cond=al)
int LeaveFrame(StackFrame::Type type)
void Csel(const Register &rd, const Register &rn, const Operand &operand, Condition cond)
void CmpTagged(const Register &r1, const Register &r2)
const Register & base() const
const Register & regoffset() const
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
void JumpIfByte(Condition cc, Register value, int32_t byte, Label *target, Label::Distance distance=Label::kFar)
void CallRuntime(Runtime::FunctionId function, int nargs)
void JumpIf(Condition cc, Register lhs, const Operand &rhs, Label *target, Label::Distance distance=Label::kFar)
void LoadFixedArrayElement(Register output, Register array, int32_t index)
static MemOperand RegisterFrameOperand(interpreter::Register interpreter_register)
void JumpIfNotRoot(Register value, RootIndex index, Label *target, Label ::Distance distance=Label::kFar)
void JumpIfPointer(Condition cc, Register value, MemOperand operand, Label *target, Label::Distance distance=Label::kFar)
void Move(Register output, Register source)
void MoveSmi(Register output, Register source)
void TestAndBranch(Register value, int mask, Condition cc, Label *target, Label::Distance distance=Label::kFar)
void LoadWord8Field(Register output, Register source, int offset)
void LoadWord16FieldZeroExtend(Register output, Register source, int offset)
void LoadMap(Register output, Register value)
void AddToInterruptBudgetAndJumpIfNotExceeded(int32_t weight, Label *skip_interrupt_label)
void LdaContextSlot(Register context, uint32_t index, uint32_t depth, CompressionMode compression_mode=CompressionMode::kDefault)
void LoadTaggedField(Register output, Register source, int offset)
void Jump(Label *target, Label::Distance distance=Label::kFar)
void Switch(Register reg, int case_value_base, Label **labels, int num_labels)
void StaModuleVariable(Register context, Register value, int cell_index, uint32_t depth)
void JumpIfObjectTypeFast(Condition cc, Register object, InstanceType instance_type, Label *target, Label::Distance distance=Label::kFar)
void MoveMaybeSmi(Register output, Register source)
void StoreTaggedFieldNoWriteBarrier(Register target, int offset, Register value)
void TryLoadOptimizedOsrCode(Register scratch_and_result, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
void LoadTaggedSignedFieldAndUntag(Register output, Register source, int offset)
void JumpIfInstanceType(Condition cc, Register map, InstanceType instance_type, Label *target, Label::Distance distance=Label::kFar)
void JumpIfImmediate(Condition cc, Register left, int right, Label *target, Label::Distance distance=Label::kFar)
void JumpIfRoot(Register value, RootIndex index, Label *target, Label::Distance distance=Label::kFar)
void JumpIfSmi(Register value, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotSmi(Register value, Label *target, Label::Distance distance=Label::kFar)
void JumpIfObjectType(Condition cc, Register object, InstanceType instance_type, Register map, Label *target, Label::Distance distance=Label::kFar)
void LdaModuleVariable(Register context, int cell_index, uint32_t depth)
void StoreTaggedFieldWithWriteBarrier(Register target, int offset, Register value)
void LoadTaggedSignedField(Register output, Register source, int offset)
void RegisterFrameAddress(interpreter::Register interpreter_register, Register rscratch)
void Word32And(Register output, Register lhs, int rhs)
void StoreTaggedSignedField(Register target, int offset, Tagged< Smi > value)
void StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth)
void JumpIfTagged(Condition cc, Register value, MemOperand operand, Label *target, Label::Distance distance=Label::kFar)
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
TNode< Object > target
LiftoffRegister reg
uint32_t const mask
RegListBase< RegisterT > registers
void Add(RWDigits Z, Digits X, Digits Y)
void PushAll(BaselineAssembler *basm, Args... args)
Register ToRegister(BaselineAssembler *basm, BaselineAssembler::ScratchRegisterScope *scope, Arg arg)
void PushAllReverse(BaselineAssembler *basm, Args... args)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void Sub(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
constexpr int B
constexpr Register kInterpreterAccumulatorRegister
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr bool SmiValuesAre31Bits()
constexpr Register kContextRegister
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr uint8_t kInstrSize
constexpr Register kJSFunctionRegister
constexpr Register padreg
#define FATAL(...)
Definition logging.h:47
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
static void Pop(BaselineAssembler *basm, Register reg1, Register reg2, T... tail)
static void Pop(BaselineAssembler *basm, Register reg)
static void PushReverse(BaselineAssembler *basm, Arg1 arg1, Arg2 arg2, Args... args)
static void Push(BaselineAssembler *basm, Arg1 arg1, Arg2 arg2, Args... args)
static void Push(BaselineAssembler *basm, Arg arg, interpreter::RegisterList list)
static void PushReverse(BaselineAssembler *basm, Arg arg, interpreter::RegisterList list)
static void PushReverse(BaselineAssembler *basm, Arg arg)
static void PushReverse(BaselineAssembler *basm, interpreter::RegisterList list)
static void Push(BaselineAssembler *basm, interpreter::RegisterList list)