v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
baseline-assembler-arm-inl.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_BASELINE_ARM_BASELINE_ASSEMBLER_ARM_INL_H_
6#define V8_BASELINE_ARM_BASELINE_ASSEMBLER_ARM_INL_H_
7
12
13namespace v8 {
14namespace internal {
15namespace baseline {
16
18 public:
20 : assembler_(assembler),
22 wrapped_scope_(assembler->masm()) {
24 // If we haven't opened a scratch scope yet, for the first one add a
25 // couple of extra registers.
26 DCHECK(wrapped_scope_.CanAcquire());
27 wrapped_scope_.Include(r8, r9);
28 wrapped_scope_.Include(kInterpreterBytecodeOffsetRegister);
29 }
31 }
33
35
36 private:
40};
41
42namespace detail {
43
44#ifdef DEBUG
45inline bool Clobbers(Register target, MemOperand op) {
46 return op.rn() == target || op.rm() == target;
47}
48#endif
49
50} // namespace detail
51
52#define __ masm_->
53
55 interpreter::Register interpreter_register) {
56 return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
57}
59 interpreter::Register interpreter_register, Register rscratch) {
60 return __ add(rscratch, fp,
61 Operand(interpreter_register.ToOperand() * kSystemPointerSize));
62}
69
71
73 // NOP on arm.
74}
75
77 __ b(target);
78}
79
81 Label* target, Label::Distance) {
82 __ JumpIfRoot(value, index, target);
83}
84
86 Label* target, Label::Distance) {
87 __ JumpIfNotRoot(value, index, target);
88}
89
92 __ JumpIfSmi(value, target);
93}
94
96 Label* target,
97 Label::Distance distance) {
98 JumpIf(cc, left, Operand(right), target, distance);
99}
100
103 __ JumpIfNotSmi(value, target);
104}
105
107 Label* target, Label::Distance) {
108 __ tst(value, Operand(mask));
109 __ b(cc, target);
110}
111
113 Label* target, Label::Distance) {
114 __ cmp(lhs, Operand(rhs));
115 __ b(cc, target);
116}
118 InstanceType instance_type,
119 Label* target,
120 Label::Distance distance) {
121 ScratchRegisterScope temps(this);
122 Register scratch = temps.AcquireScratch();
123 JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
124}
126 InstanceType instance_type,
127 Register map, Label* target,
129 ScratchRegisterScope temps(this);
130 Register type = temps.AcquireScratch();
131 __ LoadMap(map, object);
132 __ ldrh(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
133 JumpIf(cc, type, Operand(instance_type), target);
134}
136 InstanceType instance_type,
137 Label* target, Label::Distance) {
138 ScratchRegisterScope temps(this);
139 Register type = temps.AcquireScratch();
140 if (v8_flags.debug_code) {
141 __ AssertNotSmi(map);
142 __ CompareObjectType(map, type, type, MAP_TYPE);
143 __ Assert(eq, AbortReason::kUnexpectedValue);
144 }
145 __ ldrh(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
146 JumpIf(cc, type, Operand(instance_type), target);
147}
149 MemOperand operand, Label* target,
151 ScratchRegisterScope temps(this);
152 Register tmp = temps.AcquireScratch();
153 __ ldr(tmp, operand);
154 JumpIf(cc, value, Operand(tmp), target);
155}
157 Label* target, Label::Distance) {
158 __ AssertSmi(value);
159 JumpIf(cc, value, Operand(smi), target);
160}
162 Label* target, Label::Distance) {
163 __ AssertSmi(lhs);
164 __ AssertSmi(rhs);
165 JumpIf(cc, lhs, Operand(rhs), target);
166}
168 MemOperand operand, Label* target,
170 ScratchRegisterScope temps(this);
171 Register tmp = temps.AcquireScratch();
172 __ ldr(tmp, operand);
173 JumpIf(cc, value, Operand(tmp), target);
174}
176 Register value, Label* target,
178 ScratchRegisterScope temps(this);
179 Register tmp = temps.AcquireScratch();
180 __ ldr(tmp, operand);
181 JumpIf(cc, tmp, Operand(value), target);
182}
184 Label* target, Label::Distance) {
185 JumpIf(cc, value, Operand(byte), target);
186}
187
189 Move(RegisterFrameOperand(output), source);
190}
192 __ mov(output, Operand(value.ptr()));
193}
195 __ str(source, output);
196}
198 __ Move32BitImmediate(output, Operand(reference));
199}
201 __ Move32BitImmediate(output, Operand(value));
202}
203void BaselineAssembler::Move(Register output, int32_t value) {
204 __ mov(output, Operand(value));
205}
207 __ mov(output, source);
208}
210 __ mov(output, source);
211}
212
213namespace detail {
214
215template <typename Arg>
218 Arg arg) {
219 Register reg = scope->AcquireScratch();
220 basm->Move(reg, arg);
221 return reg;
222}
228
229template <typename... Args>
230struct PushAllHelper;
231template <>
232struct PushAllHelper<> {
233 static int Push(BaselineAssembler* basm) { return 0; }
234 static int PushReverse(BaselineAssembler* basm) { return 0; }
235};
236// TODO(ishell): try to pack sequence of pushes into one instruction by
237// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
238// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
239template <typename Arg>
240struct PushAllHelper<Arg> {
241 static int Push(BaselineAssembler* basm, Arg arg) {
243 basm->masm()->Push(ToRegister(basm, &scope, arg));
244 return 1;
245 }
246 static int PushReverse(BaselineAssembler* basm, Arg arg) {
247 return Push(basm, arg);
248 }
249};
250// TODO(ishell): try to pack sequence of pushes into one instruction by
251// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
252// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
253template <typename Arg, typename... Args>
254struct PushAllHelper<Arg, Args...> {
255 static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
256 PushAllHelper<Arg>::Push(basm, arg);
257 return 1 + PushAllHelper<Args...>::Push(basm, args...);
258 }
259 static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
260 int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
261 PushAllHelper<Arg>::Push(basm, arg);
262 return nargs + 1;
263 }
264};
265template <>
266struct PushAllHelper<interpreter::RegisterList> {
268 for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
269 PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
270 }
271 return list.register_count();
272 }
275 for (int reg_index = list.register_count() - 1; reg_index >= 0;
276 --reg_index) {
277 PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
278 }
279 return list.register_count();
280 }
281};
282
283template <typename... T>
284struct PopAllHelper;
285template <>
286struct PopAllHelper<> {
287 static void Pop(BaselineAssembler* basm) {}
288};
289// TODO(ishell): try to pack sequence of pops into one instruction by
290// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4)
291// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4).
292template <>
293struct PopAllHelper<Register> {
294 static void Pop(BaselineAssembler* basm, Register reg) {
295 basm->masm()->Pop(reg);
296 }
297};
298template <typename... T>
299struct PopAllHelper<Register, T...> {
300 static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
302 PopAllHelper<T...>::Pop(basm, tail...);
303 }
304};
305
306} // namespace detail
307
308template <typename... T>
310 return detail::PushAllHelper<T...>::Push(this, vals...);
311}
312
313template <typename... T>
317
318template <typename... T>
322
324 int offset) {
325 __ ldr(output, FieldMemOperand(source, offset));
326}
327
329 int offset) {
330 __ ldr(output, FieldMemOperand(source, offset));
331}
332
334 Register source,
335 int offset) {
336 LoadTaggedSignedField(output, source, offset);
337 SmiUntag(output);
338}
339
341 Register source, int offset) {
342 __ ldrh(output, FieldMemOperand(source, offset));
343}
344
346 int offset) {
347 __ ldrb(output, FieldMemOperand(source, offset));
348}
349
351 Tagged<Smi> value) {
353 ScratchRegisterScope temps(this);
354 Register tmp = temps.AcquireScratch();
355 __ mov(tmp, Operand(value));
356 __ str(tmp, FieldMemOperand(target, offset));
357}
358
360 int offset,
361 Register value) {
363 DCHECK(!AreAliased(target, value));
364 __ str(value, FieldMemOperand(target, offset));
365 __ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
367}
368
370 int offset,
371 Register value) {
372 __ str(value, FieldMemOperand(target, offset));
373}
374
376 Register feedback_vector,
377 FeedbackSlot slot,
378 Label* on_result,
380 Label fallthrough;
381 LoadTaggedField(scratch_and_result, feedback_vector,
383 __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
384
385 // Is it marked_for_deoptimization? If yes, clear the slot.
386 {
387 ScratchRegisterScope temps(this);
388
389 // The entry references a CodeWrapper object. Unwrap it now.
390 __ ldr(scratch_and_result,
391 FieldMemOperand(scratch_and_result, CodeWrapper::kCodeOffset));
392
393 Register scratch = temps.AcquireScratch();
394 __ TestCodeIsMarkedForDeoptimization(scratch_and_result, scratch);
395 __ b(eq, on_result);
396 __ mov(scratch, __ ClearedValue());
398 feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
399 scratch);
400 }
401
402 __ bind(&fallthrough);
403 Move(scratch_and_result, 0);
404}
405
407 int32_t weight, Label* skip_interrupt_label) {
409 ScratchRegisterScope scratch_scope(this);
410 Register feedback_cell = scratch_scope.AcquireScratch();
411 LoadFeedbackCell(feedback_cell);
412
413 Register interrupt_budget = scratch_scope.AcquireScratch();
414 __ ldr(interrupt_budget,
415 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
416 // Remember to set flags as part of the add!
417 __ add(interrupt_budget, interrupt_budget, Operand(weight), SetCC);
418 __ str(interrupt_budget,
419 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
420 if (skip_interrupt_label) {
421 // Use compare flags set by add
422 DCHECK_LT(weight, 0);
423 __ b(ge, skip_interrupt_label);
424 }
425}
426
428 Register weight, Label* skip_interrupt_label) {
430 ScratchRegisterScope scratch_scope(this);
431 Register feedback_cell = scratch_scope.AcquireScratch();
432 LoadFeedbackCell(feedback_cell);
433
434 Register interrupt_budget = scratch_scope.AcquireScratch();
435 __ ldr(interrupt_budget,
436 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
437 // Remember to set flags as part of the add!
438 __ add(interrupt_budget, interrupt_budget, weight, SetCC);
439 __ str(interrupt_budget,
440 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
441 if (skip_interrupt_label) __ b(ge, skip_interrupt_label);
442}
443
444void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
445 uint32_t depth,
446 CompressionMode compression_mode) {
447 for (; depth > 0; --depth) {
448 LoadTaggedField(context, context, Context::kPreviousOffset);
449 }
452}
453
455 uint32_t index, uint32_t depth) {
456 for (; depth > 0; --depth) {
457 LoadTaggedField(context, context, Context::kPreviousOffset);
458 }
460 value);
461}
462
464 uint32_t depth) {
465 for (; depth > 0; --depth) {
466 LoadTaggedField(context, context, Context::kPreviousOffset);
467 }
469 if (cell_index > 0) {
470 LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
471 // The actual array index is (cell_index - 1).
472 cell_index -= 1;
473 } else {
474 LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
475 // The actual array index is (-cell_index - 1).
476 cell_index = -cell_index - 1;
477 }
478 LoadFixedArrayElement(context, context, cell_index);
479 LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
480}
481
483 int cell_index, uint32_t depth) {
484 for (; depth > 0; --depth) {
485 LoadTaggedField(context, context, Context::kPreviousOffset);
486 }
488 LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
489
490 // The actual array index is (cell_index - 1).
491 cell_index -= 1;
492 LoadFixedArrayElement(context, context, cell_index);
493 StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
494}
495
498 Register tmp = temps.AcquireScratch();
499 __ ldr(tmp, lhs);
500 __ add(tmp, tmp, Operand(Smi::FromInt(1)));
501 __ str(tmp, lhs);
502}
503
505 __ and_(output, lhs, Operand(rhs));
506}
507
508void BaselineAssembler::Switch(Register reg, int case_value_base,
509 Label** labels, int num_labels) {
510 __ MacroAssembler::Switch(Register::no_reg(), reg, case_value_base, labels,
511 num_labels);
512}
513
514#undef __
515
516#define __ basm.
517
521
523 Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
524
525 {
526 ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget");
527
528 Label skip_interrupt_label;
529 __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label);
530 {
531 __ masm()->SmiTag(params_size);
533
537 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Sparkplug, 1);
538
540 __ masm()->SmiUntag(params_size);
541 }
542
543 __ Bind(&skip_interrupt_label);
544 }
545
547 Register actual_params_size = temps.AcquireScratch();
548 // Compute the size of the actual parameters + receiver.
549 __ Move(actual_params_size,
551
552 // If actual is bigger than formal, then we should use it to free up the stack
553 // arguments.
554 __ masm()->cmp(params_size, actual_params_size);
555 __ masm()->mov(params_size, actual_params_size, LeaveCC, kLessThan);
556
557 // Leave the frame (also dropping the register file).
558 __ masm()->LeaveFrame(StackFrame::BASELINE);
559
560 // Drop receiver + arguments.
561 __ masm()->DropArguments(params_size);
562 __ masm()->Ret();
563}
564
565#undef __
566
572
573} // namespace baseline
574} // namespace internal
575} // namespace v8
576
577#endif // V8_BASELINE_ARM_BASELINE_ASSEMBLER_ARM_INL_H_
#define Assert(condition)
void cmp(Register src1, const Operand &src2, Condition cond=al)
static const int kExtensionOffset
Definition contexts.h:500
static V8_INLINE constexpr int OffsetOfElementAt(int index)
Definition contexts.h:512
static const int kPreviousOffset
Definition contexts.h:492
static constexpr int OffsetOfElementAt(int index)
void mov(Register rd, Register rj)
void SmiUntag(Register reg, SBit s=LeaveCC)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void SmiTag(Register reg, SBit s=LeaveCC)
void Ret(Condition cond=al)
int LeaveFrame(StackFrame::Type type)
void Switch(Register scratch, Register value, int case_value_base, Label **labels, int num_labels)
static constexpr Register no_reg()
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
void JumpIfByte(Condition cc, Register value, int32_t byte, Label *target, Label::Distance distance=Label::kFar)
void CallRuntime(Runtime::FunctionId function, int nargs)
void JumpIf(Condition cc, Register lhs, const Operand &rhs, Label *target, Label::Distance distance=Label::kFar)
void LoadFixedArrayElement(Register output, Register array, int32_t index)
static MemOperand RegisterFrameOperand(interpreter::Register interpreter_register)
void JumpIfNotRoot(Register value, RootIndex index, Label *target, Label ::Distance distance=Label::kFar)
void JumpIfPointer(Condition cc, Register value, MemOperand operand, Label *target, Label::Distance distance=Label::kFar)
void Move(Register output, Register source)
void MoveSmi(Register output, Register source)
void TestAndBranch(Register value, int mask, Condition cc, Label *target, Label::Distance distance=Label::kFar)
void LoadWord8Field(Register output, Register source, int offset)
void LoadWord16FieldZeroExtend(Register output, Register source, int offset)
void LoadMap(Register output, Register value)
void AddToInterruptBudgetAndJumpIfNotExceeded(int32_t weight, Label *skip_interrupt_label)
void LdaContextSlot(Register context, uint32_t index, uint32_t depth, CompressionMode compression_mode=CompressionMode::kDefault)
void LoadTaggedField(Register output, Register source, int offset)
void Jump(Label *target, Label::Distance distance=Label::kFar)
void Switch(Register reg, int case_value_base, Label **labels, int num_labels)
void StaModuleVariable(Register context, Register value, int cell_index, uint32_t depth)
void JumpIfObjectTypeFast(Condition cc, Register object, InstanceType instance_type, Label *target, Label::Distance distance=Label::kFar)
void MoveMaybeSmi(Register output, Register source)
void StoreTaggedFieldNoWriteBarrier(Register target, int offset, Register value)
void TryLoadOptimizedOsrCode(Register scratch_and_result, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
void LoadTaggedSignedFieldAndUntag(Register output, Register source, int offset)
void JumpIfInstanceType(Condition cc, Register map, InstanceType instance_type, Label *target, Label::Distance distance=Label::kFar)
void JumpIfImmediate(Condition cc, Register left, int right, Label *target, Label::Distance distance=Label::kFar)
void JumpIfRoot(Register value, RootIndex index, Label *target, Label::Distance distance=Label::kFar)
void JumpIfSmi(Register value, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotSmi(Register value, Label *target, Label::Distance distance=Label::kFar)
void JumpIfObjectType(Condition cc, Register object, InstanceType instance_type, Register map, Label *target, Label::Distance distance=Label::kFar)
void LdaModuleVariable(Register context, int cell_index, uint32_t depth)
void StoreTaggedFieldWithWriteBarrier(Register target, int offset, Register value)
void LoadTaggedSignedField(Register output, Register source, int offset)
void RegisterFrameAddress(interpreter::Register interpreter_register, Register rscratch)
void Word32And(Register output, Register lhs, int rhs)
void StoreTaggedSignedField(Register target, int offset, Tagged< Smi > value)
void StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth)
void JumpIfTagged(Condition cc, Register value, MemOperand operand, Label *target, Label::Distance distance=Label::kFar)
constexpr int32_t ToOperand() const
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
TNode< Object > target
LiftoffRegister reg
uint32_t const mask
RegListBase< RegisterT > registers
Register ToRegister(BaselineAssembler *basm, BaselineAssembler::ScratchRegisterScope *scope, Arg arg)
constexpr SBit LeaveCC
constexpr Register kInterpreterAccumulatorRegister
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr SBit SetCC
constexpr Register kJSFunctionRegister
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
static void Pop(BaselineAssembler *basm, Register reg, T... tail)
static void Pop(BaselineAssembler *basm, Register reg)
static int Push(BaselineAssembler *basm, Arg arg, Args... args)
static int PushReverse(BaselineAssembler *basm, Arg arg, Args... args)
static int Push(BaselineAssembler *basm, Arg arg)
static int PushReverse(BaselineAssembler *basm, Arg arg)
static int Push(BaselineAssembler *basm, interpreter::RegisterList list)
static int PushReverse(BaselineAssembler *basm, interpreter::RegisterList list)