v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
baseline-assembler-loong64-inl.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_BASELINE_LOONG64_BASELINE_ASSEMBLER_LOONG64_INL_H_
6#define V8_BASELINE_LOONG64_BASELINE_ASSEMBLER_LOONG64_INL_H_
7
12
13namespace v8 {
14namespace internal {
15namespace baseline {
16
17class BaselineAssembler::ScratchRegisterScope {
18 public:
20 : assembler_(assembler),
22 wrapped_scope_(assembler->masm()) {
24 // If we haven't opened a scratch scope yet, for the first one add a
25 // couple of extra registers.
26 wrapped_scope_.Include({t0, t1, t2, t3});
27 }
29 }
30 ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
31
32 Register AcquireScratch() { return wrapped_scope_.Acquire(); }
33
34 private:
36 ScratchRegisterScope* prev_scope_;
37 UseScratchRegisterScope wrapped_scope_;
38};
39
40namespace detail {
41
42#ifdef DEBUG
43inline bool Clobbers(Register target, MemOperand op) {
44 return op.base() == target || op.index() == target;
45}
46#endif
47
48} // namespace detail
49
50#define __ masm_->
51
52MemOperand BaselineAssembler::RegisterFrameOperand(
53 interpreter::Register interpreter_register) {
54 return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
55}
56void BaselineAssembler::RegisterFrameAddress(
57 interpreter::Register interpreter_register, Register rscratch) {
58 return __ Add_d(rscratch, fp,
59 interpreter_register.ToOperand() * kSystemPointerSize);
60}
61MemOperand BaselineAssembler::FeedbackVectorOperand() {
63}
64MemOperand BaselineAssembler::FeedbackCellOperand() {
66}
67
68void BaselineAssembler::Bind(Label* label) { __ bind(label); }
69
70void BaselineAssembler::JumpTarget() {
71 // NOP.
72}
73void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
74 __ Branch(target);
75}
76void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
77 Label* target, Label::Distance) {
78 __ JumpIfRoot(value, index, target);
79}
80void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
81 Label* target, Label::Distance) {
82 __ JumpIfNotRoot(value, index, target);
83}
84void BaselineAssembler::JumpIfSmi(Register value, Label* target,
86 __ JumpIfSmi(value, target);
87}
88void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
90 __ JumpIfNotSmi(value, target);
91}
92void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
93 Label* target,
94 Label::Distance distance) {
95 JumpIf(cc, left, Operand(right), target, distance);
96}
97
98void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
99 Label* target, Label::Distance) {
100 ScratchRegisterScope temps(this);
101 Register scratch = temps.AcquireScratch();
102 __ And(scratch, value, Operand(mask));
103 __ Branch(target, cc, scratch, Operand(zero_reg));
104}
105
106void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
107 Label* target, Label::Distance) {
108 __ Branch(target, cc, lhs, Operand(rhs));
109}
110
111#if V8_STATIC_ROOTS_BOOL
112void BaselineAssembler::JumpIfJSAnyIsPrimitive(Register heap_object,
113 Label* target,
114 Label::Distance distance) {
115 __ AssertNotSmi(heap_object);
116 ScratchRegisterScope temps(this);
117 Register scratch = temps.AcquireScratch();
118 __ JumpIfJSAnyIsPrimitive(heap_object, scratch, target, distance);
119}
120#endif // V8_STATIC_ROOTS_BOOL
121
122void BaselineAssembler::JumpIfObjectTypeFast(Condition cc, Register object,
123 InstanceType instance_type,
124 Label* target,
125 Label::Distance distance) {
126 ScratchRegisterScope temps(this);
127 Register scratch = temps.AcquireScratch();
128 if (cc == eq || cc == ne) {
129 __ JumpIfObjectType(target, cc, object, instance_type, scratch);
130 return;
131 }
132 JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
133}
134void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
135 InstanceType instance_type,
136 Register map, Label* target,
138 ScratchRegisterScope temps(this);
139 Register type = temps.AcquireScratch();
140 __ GetObjectType(object, map, type);
141 __ Branch(target, cc, type, Operand(instance_type));
142}
143void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
144 InstanceType instance_type,
145 Label* target, Label::Distance) {
146 ScratchRegisterScope temps(this);
147 Register type = temps.AcquireScratch();
148 if (v8_flags.debug_code) {
149 __ AssertNotSmi(map);
150 __ GetObjectType(map, type, type);
151 __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
152 }
153 __ Ld_hu(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
154 __ Branch(target, cc, type, Operand(instance_type));
155}
156void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Tagged<Smi> smi,
157 Label* target, Label::Distance) {
158 __ CompareTaggedAndBranch(target, cc, value, Operand(smi));
159}
160void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
161 Label* target, Label::Distance) {
162 __ AssertSmi(lhs);
163 __ AssertSmi(rhs);
164 __ CompareTaggedAndBranch(target, cc, lhs, Operand(rhs));
165}
166void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
167 MemOperand operand, Label* target,
169 ScratchRegisterScope temps(this);
170 Register scratch = temps.AcquireScratch();
171 __ Ld_d(scratch, operand);
172 __ CompareTaggedAndBranch(target, cc, value, Operand(scratch));
173}
174void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
175 Register value, Label* target,
177 ScratchRegisterScope temps(this);
178 Register scratch = temps.AcquireScratch();
179 __ Ld_d(scratch, operand);
180 __ CompareTaggedAndBranch(target, cc, scratch, Operand(value));
181}
182void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
183 Label* target, Label::Distance) {
184 __ Branch(target, cc, value, Operand(byte));
185}
186void BaselineAssembler::Move(interpreter::Register output, Register source) {
187 Move(RegisterFrameOperand(output), source);
188}
189void BaselineAssembler::Move(Register output, Tagged<TaggedIndex> value) {
190 __ li(output, Operand(value.ptr()));
191}
192void BaselineAssembler::Move(MemOperand output, Register source) {
193 __ St_d(source, output);
194}
195void BaselineAssembler::Move(Register output, ExternalReference reference) {
196 __ li(output, Operand(reference));
197}
198void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
199 __ li(output, Operand(value));
200}
201void BaselineAssembler::Move(Register output, int32_t value) {
202 __ li(output, Operand(value));
203}
204void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
205 __ Move(output, source);
206}
207void BaselineAssembler::MoveSmi(Register output, Register source) {
208 __ Move(output, source);
209}
210
211namespace detail {
212
213template <typename Arg>
214inline Register ToRegister(BaselineAssembler* basm,
215 BaselineAssembler::ScratchRegisterScope* scope,
216 Arg arg) {
217 Register reg = scope->AcquireScratch();
218 basm->Move(reg, arg);
219 return reg;
220}
221inline Register ToRegister(BaselineAssembler* basm,
222 BaselineAssembler::ScratchRegisterScope* scope,
223 Register reg) {
224 return reg;
225}
226
227template <typename... Args>
228struct PushAllHelper;
229template <>
230struct PushAllHelper<> {
231 static int Push(BaselineAssembler* basm) { return 0; }
232 static int PushReverse(BaselineAssembler* basm) { return 0; }
233};
234// TODO(ishell): try to pack sequence of pushes into one instruction by
235// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
236// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
237template <typename Arg>
238struct PushAllHelper<Arg> {
239 static int Push(BaselineAssembler* basm, Arg arg) {
241 basm->masm()->Push(ToRegister(basm, &scope, arg));
242 return 1;
243 }
244 static int PushReverse(BaselineAssembler* basm, Arg arg) {
245 return Push(basm, arg);
246 }
247};
248// TODO(ishell): try to pack sequence of pushes into one instruction by
249// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
250// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
251template <typename Arg, typename... Args>
252struct PushAllHelper<Arg, Args...> {
253 static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
254 PushAllHelper<Arg>::Push(basm, arg);
255 return 1 + PushAllHelper<Args...>::Push(basm, args...);
256 }
257 static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
258 int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
259 PushAllHelper<Arg>::Push(basm, arg);
260 return nargs + 1;
261 }
262};
263
264template <>
265struct PushAllHelper<interpreter::RegisterList> {
267 for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
268 PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
269 }
270 return list.register_count();
271 }
274 for (int reg_index = list.register_count() - 1; reg_index >= 0;
275 --reg_index) {
276 PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
277 }
278 return list.register_count();
279 }
280};
281
282template <typename... T>
283struct PopAllHelper;
284template <>
285struct PopAllHelper<> {
286 static void Pop(BaselineAssembler* basm) {}
287};
288// TODO(ishell): try to pack sequence of pops into one instruction by
289// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4)
290// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4).
291template <>
292struct PopAllHelper<Register> {
293 static void Pop(BaselineAssembler* basm, Register reg) {
294 basm->masm()->Pop(reg);
295 }
296};
297template <typename... T>
298struct PopAllHelper<Register, T...> {
299 static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
301 PopAllHelper<T...>::Pop(basm, tail...);
302 }
303};
304
305} // namespace detail
306
307template <typename... T>
308int BaselineAssembler::Push(T... vals) {
309 return detail::PushAllHelper<T...>::Push(this, vals...);
310}
311
312template <typename... T>
313void BaselineAssembler::PushReverse(T... vals) {
314 detail::PushAllHelper<T...>::PushReverse(this, vals...);
315}
316
317template <typename... T>
318void BaselineAssembler::Pop(T... registers) {
319 detail::PopAllHelper<T...>::Pop(this, registers...);
320}
321
322void BaselineAssembler::LoadTaggedField(Register output, Register source,
323 int offset) {
324 __ LoadTaggedField(output, FieldMemOperand(source, offset));
325}
326void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
327 int offset) {
328 __ LoadTaggedSignedField(output, FieldMemOperand(source, offset));
329}
330void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
331 Register source,
332 int offset) {
333 LoadTaggedSignedField(output, source, offset);
334 SmiUntag(output);
335}
336void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
337 Register source, int offset) {
338 __ Ld_hu(output, FieldMemOperand(source, offset));
339}
340void BaselineAssembler::LoadWord8Field(Register output, Register source,
341 int offset) {
342 __ Ld_b(output, FieldMemOperand(source, offset));
343}
344void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
345 Tagged<Smi> value) {
347 ScratchRegisterScope temps(this);
348 Register scratch = temps.AcquireScratch();
349 __ li(scratch, Operand(value));
350 __ StoreTaggedField(scratch, FieldMemOperand(target, offset));
351}
352void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
353 int offset,
354 Register value) {
356 __ StoreTaggedField(value, FieldMemOperand(target, offset));
357 ScratchRegisterScope temps(this);
358 __ RecordWriteField(target, offset, value, kRAHasNotBeenSaved,
360}
361void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
362 int offset,
363 Register value) {
364 __ StoreTaggedField(value, FieldMemOperand(target, offset));
365}
366void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
367 Register feedback_vector,
368 FeedbackSlot slot,
369 Label* on_result,
371 Label fallthrough;
372 LoadTaggedField(scratch_and_result, feedback_vector,
374 __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
375 // Is it marked_for_deoptimization? If yes, clear the slot.
376 {
377 ScratchRegisterScope temps(this);
378
379 // The entry references a CodeWrapper object. Unwrap it now.
380 __ LoadCodePointerField(
381 scratch_and_result,
382 FieldMemOperand(scratch_and_result, CodeWrapper::kCodeOffset));
383
384 Register scratch = temps.AcquireScratch();
385 __ TestCodeIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch, eq,
386 on_result);
387 __ li(scratch, __ ClearedValue());
388 StoreTaggedFieldNoWriteBarrier(
389 feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
390 scratch);
391 }
392 __ bind(&fallthrough);
393 Move(scratch_and_result, 0);
394}
395
396void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
397 int32_t weight, Label* skip_interrupt_label) {
399 ScratchRegisterScope scratch_scope(this);
400 Register feedback_cell = scratch_scope.AcquireScratch();
401 LoadFeedbackCell(feedback_cell);
402
403 Register interrupt_budget = scratch_scope.AcquireScratch();
404 __ Ld_w(interrupt_budget,
405 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
406 __ Add_w(interrupt_budget, interrupt_budget, weight);
407 __ St_w(interrupt_budget,
408 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
409 if (skip_interrupt_label) {
410 DCHECK_LT(weight, 0);
411 __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
412 }
413}
414void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
415 Register weight, Label* skip_interrupt_label) {
417 ScratchRegisterScope scratch_scope(this);
418 Register feedback_cell = scratch_scope.AcquireScratch();
419 LoadFeedbackCell(feedback_cell);
420
421 Register interrupt_budget = scratch_scope.AcquireScratch();
422 __ Ld_w(interrupt_budget,
423 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
424 __ Add_w(interrupt_budget, interrupt_budget, weight);
425 __ St_w(interrupt_budget,
426 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
427 if (skip_interrupt_label)
428 __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
429}
430
431void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
432 uint32_t depth,
433 CompressionMode compression_mode) {
434 for (; depth > 0; --depth) {
435 LoadTaggedField(context, context, Context::kPreviousOffset);
436 }
437 LoadTaggedField(kInterpreterAccumulatorRegister, context,
439}
440
441void BaselineAssembler::StaContextSlot(Register context, Register value,
442 uint32_t index, uint32_t depth) {
443 for (; depth > 0; --depth) {
444 LoadTaggedField(context, context, Context::kPreviousOffset);
445 }
446 StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
447 value);
448}
449
450void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
451 uint32_t depth) {
452 for (; depth > 0; --depth) {
453 LoadTaggedField(context, context, Context::kPreviousOffset);
454 }
455 LoadTaggedField(context, context, Context::kExtensionOffset);
456 if (cell_index > 0) {
457 LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
458 // The actual array index is (cell_index - 1).
459 cell_index -= 1;
460 } else {
461 LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
462 // The actual array index is (-cell_index - 1).
463 cell_index = -cell_index - 1;
464 }
465 LoadFixedArrayElement(context, context, cell_index);
466 LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
467}
468
469void BaselineAssembler::StaModuleVariable(Register context, Register value,
470 int cell_index, uint32_t depth) {
471 for (; depth > 0; --depth) {
472 LoadTaggedField(context, context, Context::kPreviousOffset);
473 }
474 LoadTaggedField(context, context, Context::kExtensionOffset);
475 LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
476
477 // The actual array index is (cell_index - 1).
478 cell_index -= 1;
479 LoadFixedArrayElement(context, context, cell_index);
480 StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
481}
482
483void BaselineAssembler::IncrementSmi(MemOperand lhs) {
484 BaselineAssembler::ScratchRegisterScope temps(this);
485 Register tmp = temps.AcquireScratch();
486 if (SmiValuesAre31Bits()) {
487 __ Ld_w(tmp, lhs);
488 __ Add_w(tmp, tmp, Operand(Smi::FromInt(1)));
489 __ St_w(tmp, lhs);
490 } else {
491 __ Ld_d(tmp, lhs);
492 __ Add_d(tmp, tmp, Operand(Smi::FromInt(1)));
493 __ St_d(tmp, lhs);
494 }
495}
496
497void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
498 __ And(output, lhs, Operand(rhs));
499}
500
501void BaselineAssembler::Switch(Register reg, int case_value_base,
502 Label** labels, int num_labels) {
504 Label fallthrough;
505 if (case_value_base != 0) {
506 __ Sub_d(reg, reg, Operand(case_value_base));
507 }
508
509 __ Branch(&fallthrough, kUnsignedGreaterThanEqual, reg, Operand(num_labels));
510
511 __ GenerateSwitchTable(reg, num_labels,
512 [labels](size_t i) { return labels[i]; });
513
514 __ bind(&fallthrough);
515}
516
517#undef __
518
519#define __ basm.
520
521void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
522 ASM_CODE_COMMENT(masm);
523 BaselineAssembler basm(masm);
524
526 Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
527
528 {
529 ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget");
530
531 Label skip_interrupt_label;
532 __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label);
533 __ masm()->SmiTag(params_size);
534 __ masm()->Push(params_size, kInterpreterAccumulatorRegister);
535
536 __ LoadContext(kContextRegister);
537 __ LoadFunction(kJSFunctionRegister);
538 __ masm()->Push(kJSFunctionRegister);
539 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Sparkplug, 1);
540
541 __ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
542 __ masm()->SmiUntag(params_size);
543 __ Bind(&skip_interrupt_label);
544 }
545
546 BaselineAssembler::ScratchRegisterScope temps(&basm);
547 Register actual_params_size = temps.AcquireScratch();
548 // Compute the size of the actual parameters + receiver.
549 __ Move(actual_params_size,
551
552 // If actual is bigger than formal, then we should use it to free up the stack
553 // arguments.
554 Label corrected_args_count;
555 __ masm()->Branch(&corrected_args_count, ge, params_size,
556 Operand(actual_params_size));
557 __ masm()->Move(params_size, actual_params_size);
558 __ Bind(&corrected_args_count);
559
560 // Leave the frame (also dropping the register file).
561 __ masm()->LeaveFrame(StackFrame::BASELINE);
562
563 // Drop arguments.
564 __ masm()->DropArguments(params_size);
565 __ masm()->Ret();
566}
567
568#undef __
569
570inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
571 Register reg) {
572 assembler_->masm()->Assert(eq, AbortReason::kAccumulatorClobbered, reg,
574}
575
576} // namespace baseline
577} // namespace internal
578} // namespace v8
579
580#endif // V8_BASELINE_LOONG64_BASELINE_ASSEMBLER_LOONG64_INL_H_
#define Assert(condition)
static const int kExtensionOffset
Definition contexts.h:500
static V8_INLINE constexpr int OffsetOfElementAt(int index)
Definition contexts.h:512
static const int kPreviousOffset
Definition contexts.h:492
static constexpr int OffsetOfElementAt(int index)
const Register & base() const
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
BytecodeAssembler & assembler_
int32_t offset
TNode< Object > target
LiftoffRegister reg
uint32_t const mask
RegListBase< RegisterT > registers
MaglevAssembler *const masm_
Register ToRegister(BaselineAssembler *basm, BaselineAssembler::ScratchRegisterScope *scope, Arg arg)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
constexpr Register kInterpreterAccumulatorRegister
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr bool SmiValuesAre31Bits()
constexpr Register kContextRegister
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
V8_EXPORT_PRIVATE FlagValues v8_flags
Register ToRegister(int num)
constexpr Register kJSFunctionRegister
#define DCHECK_LT(v1, v2)
Definition logging.h:489
static void Pop(BaselineAssembler *basm, Register reg, T... tail)
static void Pop(BaselineAssembler *basm, Register reg)
static int Push(BaselineAssembler *basm, Arg arg, Args... args)
static int PushReverse(BaselineAssembler *basm, Arg arg, Args... args)
static int Push(BaselineAssembler *basm, interpreter::RegisterList list)
static int PushReverse(BaselineAssembler *basm, interpreter::RegisterList list)