v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
baseline-assembler-riscv-inl.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_BASELINE_RISCV_BASELINE_ASSEMBLER_RISCV_INL_H_
6#define V8_BASELINE_RISCV_BASELINE_ASSEMBLER_RISCV_INL_H_
7
12namespace v8 {
13namespace internal {
14namespace baseline {
15
16class BaselineAssembler::ScratchRegisterScope {
17 public:
19 : assembler_(assembler),
21 wrapped_scope_(assembler->masm()) {
23 // If we haven't opened a scratch scope yet, for the first one add a
24 // couple of extra registers.
25 wrapped_scope_.Include(kScratchReg, kScratchReg2);
26 }
28 }
30
32
33 private:
37};
38
39namespace detail {
40
41#ifdef DEBUG
42inline bool Clobbers(Register target, MemOperand op) {
43 return op.is_reg() && op.rm() == target;
44}
45#endif
46
47} // namespace detail
48
49#define __ masm_->
50
52 interpreter::Register interpreter_register) {
53 return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
54}
56 interpreter::Register interpreter_register, Register rscratch) {
57 return __ AddWord(rscratch, fp,
58 interpreter_register.ToOperand() * kSystemPointerSize);
59}
62}
65}
66
67void BaselineAssembler::Bind(Label* label) { __ bind(label); }
68
70 // Nop
71}
72
73void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
74 __ jmp(target, distance);
75}
76void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
77 Label* target, Label::Distance distance) {
78 __ JumpIfRoot(value, index, target, distance);
79}
80void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
81 Label* target, Label::Distance distance) {
82 __ JumpIfNotRoot(value, index, target, distance);
83}
84void BaselineAssembler::JumpIfSmi(Register value, Label* target,
85 Label::Distance distance) {
86 __ JumpIfSmi(value, target, distance);
87}
88void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
89 Label::Distance distance) {
90 __ JumpIfNotSmi(value, target);
91}
92void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
93 Label* target,
94 Label::Distance distance) {
95 JumpIf(cc, left, Operand(right), target, distance);
96}
97void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
98 Label* target, Label::Distance distance) {
99 ScratchRegisterScope temps(this);
100 Register tmp = temps.AcquireScratch();
101 __ And(tmp, value, Operand(mask));
102 __ Branch(target, cc, tmp, Operand(zero_reg), distance);
103}
104
105void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
106 Label* target, Label::Distance distance) {
107 __ Branch(target, cc, lhs, Operand(rhs), distance);
108}
109
110#if V8_STATIC_ROOTS_BOOL
111void BaselineAssembler::JumpIfJSAnyIsPrimitive(Register heap_object,
112 Label* target,
113 Label::Distance distance) {
114 __ AssertNotSmi(heap_object);
115 ScratchRegisterScope temps(this);
116 Register scratch = temps.AcquireScratch();
117 __ JumpIfJSAnyIsPrimitive(heap_object, scratch, target, distance);
118}
119#endif // V8_STATIC_ROOTS_BOOL
120
122 InstanceType instance_type,
123 Label* target,
124 Label::Distance distance) {
125 ScratchRegisterScope temps(this);
126 Register scratch = temps.AcquireScratch();
127 if (cc == eq || cc == ne) {
128 __ JumpIfObjectType(target, cc, object, instance_type, scratch);
129 return;
130 }
131 JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
132}
134 InstanceType instance_type,
135 Register map, Label* target,
136 Label::Distance distance) {
137 ScratchRegisterScope temps(this);
138 Register type = temps.AcquireScratch();
139 __ GetObjectType(object, map, type);
140 __ Branch(target, cc, type, Operand(instance_type), distance);
141}
143 InstanceType instance_type,
144 Label* target,
145 Label::Distance distance) {
146 ScratchRegisterScope temps(this);
147 Register type = temps.AcquireScratch();
148 if (v8_flags.debug_code) {
149 __ AssertNotSmi(map);
150 __ GetObjectType(map, type, type);
151 __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
152 }
153 __ LoadWord(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
154 __ Branch(target, cc, type, Operand(instance_type), distance);
155}
157 MemOperand operand, Label* target,
158 Label::Distance distance) {
159 ScratchRegisterScope temps(this);
160 Register temp = temps.AcquireScratch();
161 __ LoadWord(temp, operand);
162 __ Branch(target, cc, value, Operand(temp), distance);
163}
164void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Tagged<Smi> smi,
165 Label* target, Label::Distance distance) {
166 __ CompareTaggedAndBranch(target, cc, value, Operand(smi));
167}
168void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
169 Label* target, Label::Distance distance) {
170 // todo: compress pointer
171 __ AssertSmi(lhs);
172 __ AssertSmi(rhs);
173 __ CompareTaggedAndBranch(target, cc, lhs, Operand(rhs), distance);
174}
175void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
176 MemOperand operand, Label* target,
177 Label::Distance distance) {
178 // todo: compress pointer
179 ScratchRegisterScope temps(this);
180 Register scratch = temps.AcquireScratch();
181 __ LoadWord(scratch, operand);
182 __ CompareTaggedAndBranch(target, cc, value, Operand(scratch), distance);
183}
185 Register value, Label* target,
186 Label::Distance distance) {
187 // todo: compress pointer
188 ScratchRegisterScope temps(this);
189 Register scratch = temps.AcquireScratch();
190 __ LoadWord(scratch, operand);
191 __ CompareTaggedAndBranch(target, cc, scratch, Operand(value), distance);
192}
193void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
194 Label* target, Label::Distance distance) {
195 __ Branch(target, cc, value, Operand(byte), distance);
196}
197
198void BaselineAssembler::Move(interpreter::Register output, Register source) {
199 Move(RegisterFrameOperand(output), source);
200}
201void BaselineAssembler::Move(Register output, Tagged<TaggedIndex> value) {
202 __ li(output, Operand(value.ptr()));
203}
204void BaselineAssembler::Move(MemOperand output, Register source) {
205 __ StoreWord(source, output);
206}
207void BaselineAssembler::Move(Register output, ExternalReference reference) {
208 __ li(output, Operand(reference));
209}
210void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
211 __ li(output, value);
212}
213void BaselineAssembler::Move(Register output, int32_t value) {
214 __ li(output, Operand(value));
215}
216void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
217 __ Move(output, source);
218}
219void BaselineAssembler::MoveSmi(Register output, Register source) {
220 __ Move(output, source);
221}
222
223namespace detail {
224
225template <typename Arg>
226inline Register ToRegister(BaselineAssembler* basm,
227 BaselineAssembler::ScratchRegisterScope* scope,
228 Arg arg) {
229 Register reg = scope->AcquireScratch();
230 basm->Move(reg, arg);
231 return reg;
232}
233inline Register ToRegister(BaselineAssembler* basm,
234 BaselineAssembler::ScratchRegisterScope* scope,
235 Register reg) {
236 return reg;
237}
238
239template <typename... Args>
240struct PushAllHelper;
241template <>
242struct PushAllHelper<> {
243 static int Push(BaselineAssembler* basm) { return 0; }
244 static int PushReverse(BaselineAssembler* basm) { return 0; }
245};
246template <typename Arg>
247struct PushAllHelper<Arg> {
248 static int Push(BaselineAssembler* basm, Arg arg) {
250 basm->masm()->Push(ToRegister(basm, &scope, arg));
251 return 1;
252 }
253 static int PushReverse(BaselineAssembler* basm, Arg arg) {
254 return Push(basm, arg);
255 }
256};
257template <typename Arg, typename... Args>
258struct PushAllHelper<Arg, Args...> {
259 static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
260 PushAllHelper<Arg>::Push(basm, arg);
261 return 1 + PushAllHelper<Args...>::Push(basm, args...);
262 }
263 static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
264 int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
265 PushAllHelper<Arg>::Push(basm, arg);
266 return nargs + 1;
267 }
268};
269template <>
270struct PushAllHelper<interpreter::RegisterList> {
272 for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
273 PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
274 }
275 return list.register_count();
276 }
279 for (int reg_index = list.register_count() - 1; reg_index >= 0;
280 --reg_index) {
281 PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
282 }
283 return list.register_count();
284 }
285};
286
287template <typename... T>
288struct PopAllHelper;
289template <>
290struct PopAllHelper<> {
291 static void Pop(BaselineAssembler* basm) {}
292};
293template <>
294struct PopAllHelper<Register> {
295 static void Pop(BaselineAssembler* basm, Register reg) {
296 basm->masm()->Pop(reg);
297 }
298};
299template <typename... T>
300struct PopAllHelper<Register, T...> {
301 static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
303 PopAllHelper<T...>::Pop(basm, tail...);
304 }
305};
306
307} // namespace detail
308
309template <typename... T>
310int BaselineAssembler::Push(T... vals) {
311 return detail::PushAllHelper<T...>::Push(this, vals...);
312}
313
314template <typename... T>
315void BaselineAssembler::PushReverse(T... vals) {
316 detail::PushAllHelper<T...>::PushReverse(this, vals...);
317}
318
319template <typename... T>
321 detail::PopAllHelper<T...>::Pop(this, registers...);
322}
323
324void BaselineAssembler::LoadTaggedField(Register output, Register source,
325 int offset) {
326 __ LoadTaggedField(output, FieldMemOperand(source, offset));
327}
328void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
329 int offset) {
331}
333 Register source,
334 int offset) {
335 LoadTaggedSignedField(output, source, offset);
336 SmiUntag(output);
337}
339 Register source, int offset) {
340 __ Lhu(output, FieldMemOperand(source, offset));
341}
342void BaselineAssembler::LoadWord8Field(Register output, Register source,
343 int offset) {
344 __ Lb(output, FieldMemOperand(source, offset));
345}
346void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
347 Tagged<Smi> value) {
349 ScratchRegisterScope temps(this);
350 Register tmp = temps.AcquireScratch();
351 __ li(tmp, Operand(value));
352 __ StoreTaggedField(tmp, FieldMemOperand(target, offset));
353}
355 int offset,
356 Register value) {
358 __ StoreTaggedField(value, FieldMemOperand(target, offset));
359 __ RecordWriteField(target, offset, value, kRAHasNotBeenSaved,
361}
363 int offset,
364 Register value) {
365 __ StoreTaggedField(value, FieldMemOperand(target, offset));
366}
367
368void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
369 Register feedback_vector,
370 FeedbackSlot slot,
371 Label* on_result,
372 Label::Distance distance) {
373 Label fallthrough, clear_slot;
374 LoadTaggedField(scratch_and_result, feedback_vector,
376 __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
377
378 // Is it marked_for_deoptimization? If yes, clear the slot.
379 {
380 ScratchRegisterScope temps(this);
381 // The entry references a CodeWrapper object. Unwrap it now.
382 __ LoadCodePointerField(
383 scratch_and_result,
384 FieldMemOperand(scratch_and_result, CodeWrapper::kCodeOffset));
385
386 __ JumpIfCodeIsMarkedForDeoptimization(scratch_and_result,
387 temps.AcquireScratch(), &clear_slot);
388 Jump(on_result, distance);
389 }
390
391 __ bind(&clear_slot);
392 __ li(scratch_and_result, __ ClearedValue());
394 feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
395 scratch_and_result);
396
397 __ bind(&fallthrough);
398 Move(scratch_and_result, 0);
399}
400
402 int32_t weight, Label* skip_interrupt_label) {
404 ScratchRegisterScope scratch_scope(this);
405 Register feedback_cell = scratch_scope.AcquireScratch();
406 LoadFeedbackCell(feedback_cell);
407
408 Register interrupt_budget = scratch_scope.AcquireScratch();
409 __ Lw(interrupt_budget,
410 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
411 // Remember to set flags as part of the add!
412 __ Add32(interrupt_budget, interrupt_budget, weight);
413 __ Sw(interrupt_budget,
414 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
415 if (skip_interrupt_label) {
416 DCHECK_LT(weight, 0);
417 __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
418 }
419}
420
422 Register weight, Label* skip_interrupt_label) {
424 ScratchRegisterScope scratch_scope(this);
425 Register feedback_cell = scratch_scope.AcquireScratch();
426 LoadFeedbackCell(feedback_cell);
427
428 Register interrupt_budget = scratch_scope.AcquireScratch();
429 __ Lw(interrupt_budget,
430 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
431 // Remember to set flags as part of the add!
432 __ Add32(interrupt_budget, interrupt_budget, weight);
433 __ Sw(interrupt_budget,
434 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
435 if (skip_interrupt_label) {
436 __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
437 }
438}
439
440void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
441 uint32_t depth,
442 CompressionMode compression_mode) {
443 for (; depth > 0; --depth) {
444 LoadTaggedField(context, context, Context::kPreviousOffset);
445 }
448}
449
450void BaselineAssembler::StaContextSlot(Register context, Register value,
451 uint32_t index, uint32_t depth) {
452 for (; depth > 0; --depth) {
453 LoadTaggedField(context, context, Context::kPreviousOffset);
454 }
456 value);
457}
458
459void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
460 uint32_t depth) {
461 for (; depth > 0; --depth) {
462 LoadTaggedField(context, context, Context::kPreviousOffset);
463 }
465 if (cell_index > 0) {
466 LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
467 // The actual array index is (cell_index - 1).
468 cell_index -= 1;
469 } else {
470 LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
471 // The actual array index is (-cell_index - 1).
472 cell_index = -cell_index - 1;
473 }
474 LoadFixedArrayElement(context, context, cell_index);
475 LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
476}
477
478void BaselineAssembler::StaModuleVariable(Register context, Register value,
479 int cell_index, uint32_t depth) {
480 for (; depth > 0; --depth) {
481 LoadTaggedField(context, context, Context::kPreviousOffset);
482 }
484 LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
485
486 // The actual array index is (cell_index - 1).
487 cell_index -= 1;
488 LoadFixedArrayElement(context, context, cell_index);
489 StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
490}
491
493 BaselineAssembler::ScratchRegisterScope temps(this);
494 Register tmp = temps.AcquireScratch();
496 if (SmiValuesAre31Bits()) {
497 __ Lw(tmp, lhs);
498 __ Add32(tmp, tmp, Operand(Smi::FromInt(1)));
499 __ Sw(tmp, lhs);
500 } else {
501 __ LoadWord(tmp, lhs);
502 __ AddWord(tmp, tmp, Operand(Smi::FromInt(1)));
503 __ StoreWord(tmp, lhs);
504 }
505}
506
507void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
508 __ And(output, lhs, Operand(rhs));
509}
510void BaselineAssembler::Switch(Register reg, int case_value_base,
511 Label** labels, int num_labels) {
513 Label fallthrough;
514 if (case_value_base != 0) {
515 __ SubWord(reg, reg, Operand(case_value_base));
516 }
517
518 // Mostly copied from code-generator-riscv64.cc
519 ScratchRegisterScope scope(this);
520 Label table;
521 __ Branch(&fallthrough, kUnsignedGreaterThanEqual, reg, Operand(num_labels));
522 int64_t imm64;
523 imm64 = __ branch_long_offset(&table);
524 CHECK(is_int32(imm64 + 0x800));
525 int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
526 int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
527 __ BlockTrampolinePoolFor(2);
528 __ auipc(t6, Hi20); // Read PC + Hi20 into t6
529 __ addi(t6, t6, Lo12); // jump PC + Hi20 + Lo12
530
531 int entry_size_log2 = 3;
532 __ BlockTrampolinePoolFor(num_labels * 2 + 5);
533 __ CalcScaledAddress(t6, t6, reg, entry_size_log2);
534 __ Jump(t6);
535 {
536 __ bind(&table);
537 for (int i = 0; i < num_labels; ++i) {
538 __ BranchLong(labels[i]);
539 }
540 DCHECK_EQ(num_labels * 2, __ InstructionsGeneratedSince(&table));
541 }
542 __ bind(&fallthrough);
543}
544
545#undef __
546
547#define __ basm.
548
549void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
552
554 Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
555
556 {
557 ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget");
558
559 Label skip_interrupt_label;
560 __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label);
561 __ masm()->SmiTag(params_size);
563
567 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Sparkplug, 1);
568
570 __ masm()->SmiUntag(params_size);
571
572 __ Bind(&skip_interrupt_label);
573 }
574
575 BaselineAssembler::ScratchRegisterScope temps(&basm);
576 Register actual_params_size = temps.AcquireScratch();
577 // Compute the size of the actual parameters + receiver.
578 __ Move(actual_params_size,
580
581 // If actual is bigger than formal, then we should use it to free up the stack
582 // arguments.
583 Label corrected_args_count;
584 __ masm()->Branch(&corrected_args_count, ge, params_size,
585 Operand(actual_params_size), Label::Distance::kNear);
586 __ masm()->Move(params_size, actual_params_size);
587 __ Bind(&corrected_args_count);
588
589 // Leave the frame (also dropping the register file).
590 __ masm()->LeaveFrame(StackFrame::BASELINE);
591
592 // Drop receiver + arguments.
593 __ masm()->DropArguments(params_size);
594 __ masm()->Ret();
595}
596
597#undef __
598
600 Register reg) {
601 assembler_->masm()->Assert(eq, AbortReason::kAccumulatorClobbered, reg,
603}
604} // namespace baseline
605} // namespace internal
606} // namespace v8
607
608#endif // V8_BASELINE_RISCV_BASELINE_ASSEMBLER_RISCV_INL_H_
#define Assert(condition)
static const int kExtensionOffset
Definition contexts.h:500
static V8_INLINE constexpr int OffsetOfElementAt(int index)
Definition contexts.h:512
static const int kPreviousOffset
Definition contexts.h:492
static constexpr int OffsetOfElementAt(int index)
void SmiUntag(Register reg, SBit s=LeaveCC)
void Move(Register dst, Tagged< Smi > smi)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void SmiTag(Register reg, SBit s=LeaveCC)
void Ret(Condition cond=al)
int LeaveFrame(StackFrame::Type type)
void Branch(Label *label, bool need_link=false)
bool is_reg(Register reg) const
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
void JumpIfByte(Condition cc, Register value, int32_t byte, Label *target, Label::Distance distance=Label::kFar)
void CallRuntime(Runtime::FunctionId function, int nargs)
void JumpIf(Condition cc, Register lhs, const Operand &rhs, Label *target, Label::Distance distance=Label::kFar)
void LoadFixedArrayElement(Register output, Register array, int32_t index)
static MemOperand RegisterFrameOperand(interpreter::Register interpreter_register)
void JumpIfNotRoot(Register value, RootIndex index, Label *target, Label ::Distance distance=Label::kFar)
void JumpIfPointer(Condition cc, Register value, MemOperand operand, Label *target, Label::Distance distance=Label::kFar)
void Move(Register output, Register source)
void MoveSmi(Register output, Register source)
void TestAndBranch(Register value, int mask, Condition cc, Label *target, Label::Distance distance=Label::kFar)
void LoadWord8Field(Register output, Register source, int offset)
void LoadWord16FieldZeroExtend(Register output, Register source, int offset)
void AddToInterruptBudgetAndJumpIfNotExceeded(int32_t weight, Label *skip_interrupt_label)
void LdaContextSlot(Register context, uint32_t index, uint32_t depth, CompressionMode compression_mode=CompressionMode::kDefault)
void LoadTaggedField(Register output, Register source, int offset)
void Jump(Label *target, Label::Distance distance=Label::kFar)
void Switch(Register reg, int case_value_base, Label **labels, int num_labels)
void StaModuleVariable(Register context, Register value, int cell_index, uint32_t depth)
void JumpIfObjectTypeFast(Condition cc, Register object, InstanceType instance_type, Label *target, Label::Distance distance=Label::kFar)
void MoveMaybeSmi(Register output, Register source)
void StoreTaggedFieldNoWriteBarrier(Register target, int offset, Register value)
void TryLoadOptimizedOsrCode(Register scratch_and_result, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
void LoadTaggedSignedFieldAndUntag(Register output, Register source, int offset)
void JumpIfInstanceType(Condition cc, Register map, InstanceType instance_type, Label *target, Label::Distance distance=Label::kFar)
void JumpIfImmediate(Condition cc, Register left, int right, Label *target, Label::Distance distance=Label::kFar)
void JumpIfRoot(Register value, RootIndex index, Label *target, Label::Distance distance=Label::kFar)
void JumpIfSmi(Register value, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotSmi(Register value, Label *target, Label::Distance distance=Label::kFar)
void JumpIfObjectType(Condition cc, Register object, InstanceType instance_type, Register map, Label *target, Label::Distance distance=Label::kFar)
void LdaModuleVariable(Register context, int cell_index, uint32_t depth)
void StoreTaggedFieldWithWriteBarrier(Register target, int offset, Register value)
void LoadTaggedSignedField(Register output, Register source, int offset)
void RegisterFrameAddress(interpreter::Register interpreter_register, Register rscratch)
void Word32And(Register output, Register lhs, int rhs)
void StoreTaggedSignedField(Register target, int offset, Tagged< Smi > value)
void StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth)
void JumpIfTagged(Condition cc, Register value, MemOperand operand, Label *target, Label::Distance distance=Label::kFar)
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
TNode< Object > target
LiftoffRegister reg
uint32_t const mask
RegListBase< RegisterT > registers
int int32_t
Definition unicode.cc:40
Register ToRegister(BaselineAssembler *basm, BaselineAssembler::ScratchRegisterScope *scope, Arg arg)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
constexpr Register kInterpreterAccumulatorRegister
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr bool SmiValuesAre31Bits()
constexpr Register kContextRegister
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register kJSFunctionRegister
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
static void Pop(BaselineAssembler *basm, Register reg, T... tail)
static void Pop(BaselineAssembler *basm, Register reg)
static int Push(BaselineAssembler *basm, Arg arg, Args... args)
static int PushReverse(BaselineAssembler *basm, Arg arg, Args... args)
static int PushReverse(BaselineAssembler *basm, Arg arg)
static int Push(BaselineAssembler *basm, interpreter::RegisterList list)
static int PushReverse(BaselineAssembler *basm, interpreter::RegisterList list)