v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
baseline-assembler-mips64-inl.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_BASELINE_MIPS64_BASELINE_ASSEMBLER_MIPS64_INL_H_
6#define V8_BASELINE_MIPS64_BASELINE_ASSEMBLER_MIPS64_INL_H_
7
12
13namespace v8 {
14namespace internal {
15namespace baseline {
16
17class BaselineAssembler::ScratchRegisterScope {
18 public:
20 : assembler_(assembler),
22 wrapped_scope_(assembler->masm()) {
24 // If we haven't opened a scratch scope yet, for the first one add a
25 // couple of extra registers.
26 wrapped_scope_.Include({t0, t1, t2, t3});
27 }
29 }
30 ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
31
32 Register AcquireScratch() { return wrapped_scope_.Acquire(); }
33
34 private:
36 ScratchRegisterScope* prev_scope_;
37 UseScratchRegisterScope wrapped_scope_;
38};
39
40namespace detail {
41
42#ifdef DEBUG
43inline bool Clobbers(Register target, MemOperand op) {
44 return op.is_reg() && op.rm() == target;
45}
46#endif
47
48} // namespace detail
49
50#define __ masm_->
51
52MemOperand BaselineAssembler::RegisterFrameOperand(
53 interpreter::Register interpreter_register) {
54 return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
55}
56void BaselineAssembler::RegisterFrameAddress(
57 interpreter::Register interpreter_register, Register rscratch) {
58 return __ Daddu(rscratch, fp,
59 interpreter_register.ToOperand() * kSystemPointerSize);
60}
61MemOperand BaselineAssembler::FeedbackVectorOperand() {
63}
64MemOperand BaselineAssembler::FeedbackCellOperand() {
66}
67
68void BaselineAssembler::Bind(Label* label) { __ bind(label); }
69
70void BaselineAssembler::JumpTarget() {
71 // NOP.
72}
73void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
74 __ Branch(target);
75}
76void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
77 Label* target, Label::Distance) {
78 __ JumpIfRoot(value, index, target);
79}
80void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
81 Label* target, Label::Distance) {
82 __ JumpIfNotRoot(value, index, target);
83}
84void BaselineAssembler::JumpIfSmi(Register value, Label* target,
86 __ JumpIfSmi(value, target);
87}
88void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
90 __ JumpIfNotSmi(value, target);
91}
92void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
93 Label* target,
94 Label::Distance distance) {
95 JumpIf(cc, left, Operand(right), target, distance);
96}
97
98void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
99 Label* target, Label::Distance) {
100 ScratchRegisterScope temps(this);
101 Register scratch = temps.AcquireScratch();
102 __ And(scratch, value, Operand(mask));
103 __ Branch(target, cc, scratch, Operand(zero_reg));
104}
105
106void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
107 Label* target, Label::Distance) {
108 __ Branch(target, cc, lhs, Operand(rhs));
109}
110void BaselineAssembler::JumpIfObjectTypeFast(Condition cc, Register object,
111 InstanceType instance_type,
112 Label* target,
113 Label::Distance distance) {
114 ScratchRegisterScope temps(this);
115 Register scratch = temps.AcquireScratch();
116 JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
117}
118void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
119 InstanceType instance_type,
120 Register map, Label* target,
122 ScratchRegisterScope temps(this);
123 Register type = temps.AcquireScratch();
124 __ GetObjectType(object, map, type);
125 __ Branch(target, cc, type, Operand(instance_type));
126}
127void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
128 InstanceType instance_type,
129 Label* target, Label::Distance) {
130 ScratchRegisterScope temps(this);
131 Register type = temps.AcquireScratch();
132 if (v8_flags.debug_code) {
133 __ AssertNotSmi(map);
134 __ GetObjectType(map, type, type);
135 __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
136 }
137 __ Ld(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
138 __ Branch(target, cc, type, Operand(instance_type));
139}
140void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
141 MemOperand operand, Label* target,
143 ScratchRegisterScope temps(this);
144 Register scratch = temps.AcquireScratch();
145 __ Ld(scratch, operand);
146 __ Branch(target, cc, value, Operand(scratch));
147}
148void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Tagged<Smi> smi,
149 Label* target, Label::Distance) {
150 ScratchRegisterScope temps(this);
151 Register scratch = temps.AcquireScratch();
152 __ li(scratch, Operand(smi));
153 __ SmiUntag(scratch);
154 __ Branch(target, cc, value, Operand(scratch));
155}
156void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
157 Label* target, Label::Distance) {
158 __ AssertSmi(lhs);
159 __ AssertSmi(rhs);
160 __ Branch(target, cc, lhs, Operand(rhs));
161}
162void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
163 MemOperand operand, Label* target,
165 ScratchRegisterScope temps(this);
166 Register scratch = temps.AcquireScratch();
167 __ Ld(scratch, operand);
168 __ Branch(target, cc, value, Operand(scratch));
169}
170void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
171 Register value, Label* target,
173 ScratchRegisterScope temps(this);
174 Register scratch = temps.AcquireScratch();
175 __ Ld(scratch, operand);
176 __ Branch(target, cc, scratch, Operand(value));
177}
178void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
179 Label* target, Label::Distance) {
180 __ Branch(target, cc, value, Operand(byte));
181}
182
183void BaselineAssembler::Move(interpreter::Register output, Register source) {
184 Move(RegisterFrameOperand(output), source);
185}
186void BaselineAssembler::Move(Register output, Tagged<TaggedIndex> value) {
187 __ li(output, Operand(value.ptr()));
188}
189void BaselineAssembler::Move(MemOperand output, Register source) {
190 __ Sd(source, output);
191}
192void BaselineAssembler::Move(Register output, ExternalReference reference) {
193 __ li(output, Operand(reference));
194}
195void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
196 __ li(output, Operand(value));
197}
198void BaselineAssembler::Move(Register output, int32_t value) {
199 __ li(output, Operand(value));
200}
201void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
202 __ Move(output, source);
203}
204void BaselineAssembler::MoveSmi(Register output, Register source) {
205 __ Move(output, source);
206}
207
208namespace detail {
209
210template <typename Arg>
211inline Register ToRegister(BaselineAssembler* basm,
212 BaselineAssembler::ScratchRegisterScope* scope,
213 Arg arg) {
214 Register reg = scope->AcquireScratch();
215 basm->Move(reg, arg);
216 return reg;
217}
218inline Register ToRegister(BaselineAssembler* basm,
219 BaselineAssembler::ScratchRegisterScope* scope,
220 Register reg) {
221 return reg;
222}
223
224template <typename... Args>
225struct PushAllHelper;
226template <>
227struct PushAllHelper<> {
228 static int Push(BaselineAssembler* basm) { return 0; }
229 static int PushReverse(BaselineAssembler* basm) { return 0; }
230};
231// TODO(ishell): try to pack sequence of pushes into one instruction by
232// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
233// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
234template <typename Arg>
235struct PushAllHelper<Arg> {
236 static int Push(BaselineAssembler* basm, Arg arg) {
238 basm->masm()->Push(ToRegister(basm, &scope, arg));
239 return 1;
240 }
241 static int PushReverse(BaselineAssembler* basm, Arg arg) {
242 return Push(basm, arg);
243 }
244};
245// TODO(ishell): try to pack sequence of pushes into one instruction by
246// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
247// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
248template <typename Arg, typename... Args>
249struct PushAllHelper<Arg, Args...> {
250 static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
251 PushAllHelper<Arg>::Push(basm, arg);
252 return 1 + PushAllHelper<Args...>::Push(basm, args...);
253 }
254 static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
255 int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
256 PushAllHelper<Arg>::Push(basm, arg);
257 return nargs + 1;
258 }
259};
260template <>
261struct PushAllHelper<interpreter::RegisterList> {
263 for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
264 PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
265 }
266 return list.register_count();
267 }
270 for (int reg_index = list.register_count() - 1; reg_index >= 0;
271 --reg_index) {
272 PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
273 }
274 return list.register_count();
275 }
276};
277
278template <typename... T>
279struct PopAllHelper;
280template <>
281struct PopAllHelper<> {
282 static void Pop(BaselineAssembler* basm) {}
283};
284// TODO(ishell): try to pack sequence of pops into one instruction by
285// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4)
286// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4).
287template <>
288struct PopAllHelper<Register> {
289 static void Pop(BaselineAssembler* basm, Register reg) {
290 basm->masm()->Pop(reg);
291 }
292};
293template <typename... T>
294struct PopAllHelper<Register, T...> {
295 static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
297 PopAllHelper<T...>::Pop(basm, tail...);
298 }
299};
300
301} // namespace detail
302
303template <typename... T>
304int BaselineAssembler::Push(T... vals) {
305 return detail::PushAllHelper<T...>::Push(this, vals...);
306}
307
308template <typename... T>
309void BaselineAssembler::PushReverse(T... vals) {
310 detail::PushAllHelper<T...>::PushReverse(this, vals...);
311}
312
313template <typename... T>
314void BaselineAssembler::Pop(T... registers) {
315 detail::PopAllHelper<T...>::Pop(this, registers...);
316}
317
318void BaselineAssembler::LoadTaggedField(Register output, Register source,
319 int offset) {
320 __ Ld(output, FieldMemOperand(source, offset));
321}
322void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
323 int offset) {
324 __ Ld(output, FieldMemOperand(source, offset));
325}
326void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
327 Register source,
328 int offset) {
329 LoadTaggedSignedField(output, source, offset);
330 SmiUntag(output);
331}
332void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
333 Register source, int offset) {
334 __ Lhu(output, FieldMemOperand(source, offset));
335}
336void BaselineAssembler::LoadWord8Field(Register output, Register source,
337 int offset) {
338 __ Lb(output, FieldMemOperand(source, offset));
339}
340void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
341 Tagged<Smi> value) {
343 ScratchRegisterScope temps(this);
344 Register scratch = temps.AcquireScratch();
345 __ li(scratch, Operand(value));
346 __ Sd(scratch, FieldMemOperand(target, offset));
347}
348void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
349 int offset,
350 Register value) {
352 __ Sd(value, FieldMemOperand(target, offset));
353 ScratchRegisterScope temps(this);
354 Register scratch = temps.AcquireScratch();
355 __ RecordWriteField(target, offset, value, scratch, kRAHasNotBeenSaved,
357}
358void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
359 int offset,
360 Register value) {
361 __ Sd(value, FieldMemOperand(target, offset));
362}
363
364void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
365 Register feedback_vector,
366 FeedbackSlot slot,
367 Label* on_result,
369 Label fallthrough;
370 LoadTaggedField(scratch_and_result, feedback_vector,
372 __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
373 // Is it marked_for_deoptimization? If yes, clear the slot.
374 {
375 ScratchRegisterScope temps(this);
376
377 // The entry references a CodeWrapper object. Unwrap it now.
378 __ Ld(scratch_and_result,
379 FieldMemOperand(scratch_and_result, CodeWrapper::kCodeOffset));
380
381 Register scratch = temps.AcquireScratch();
382 __ TestCodeIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch, eq,
383 on_result);
384 __ li(scratch, __ ClearedValue());
385 StoreTaggedFieldNoWriteBarrier(
386 feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
387 scratch);
388 }
389 __ bind(&fallthrough);
390 Move(scratch_and_result, 0);
391}
392
393void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
394 int32_t weight, Label* skip_interrupt_label) {
396 ScratchRegisterScope scratch_scope(this);
397 Register feedback_cell = scratch_scope.AcquireScratch();
398 LoadFeedbackCell(feedback_cell);
399
400 Register interrupt_budget = scratch_scope.AcquireScratch();
401 __ Lw(interrupt_budget,
402 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
403 __ Addu(interrupt_budget, interrupt_budget, weight);
404 __ Sw(interrupt_budget,
405 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
406 if (skip_interrupt_label) {
407 DCHECK_LT(weight, 0);
408 __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
409 }
410}
411void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
412 Register weight, Label* skip_interrupt_label) {
414 ScratchRegisterScope scratch_scope(this);
415 Register feedback_cell = scratch_scope.AcquireScratch();
416 LoadFeedbackCell(feedback_cell);
417
418 Register interrupt_budget = scratch_scope.AcquireScratch();
419 __ Lw(interrupt_budget,
420 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
421 __ Addu(interrupt_budget, interrupt_budget, weight);
422 __ Sw(interrupt_budget,
423 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
424 if (skip_interrupt_label)
425 __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
426}
427
428void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
429 uint32_t depth,
430 CompressionMode compression_mode) {
431 for (; depth > 0; --depth) {
432 LoadTaggedField(context, context, Context::kPreviousOffset);
433 }
434 LoadTaggedField(kInterpreterAccumulatorRegister, context,
436}
437
438void BaselineAssembler::StaContextSlot(Register context, Register value,
439 uint32_t index, uint32_t depth) {
440 for (; depth > 0; --depth) {
441 LoadTaggedField(context, context, Context::kPreviousOffset);
442 }
443 StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
444 value);
445}
446
447void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
448 uint32_t depth) {
449 for (; depth > 0; --depth) {
450 LoadTaggedField(context, context, Context::kPreviousOffset);
451 }
452 LoadTaggedField(context, context, Context::kExtensionOffset);
453 if (cell_index > 0) {
454 LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
455 // The actual array index is (cell_index - 1).
456 cell_index -= 1;
457 } else {
458 LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
459 // The actual array index is (-cell_index - 1).
460 cell_index = -cell_index - 1;
461 }
462 LoadFixedArrayElement(context, context, cell_index);
463 LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
464}
465
466void BaselineAssembler::StaModuleVariable(Register context, Register value,
467 int cell_index, uint32_t depth) {
468 for (; depth > 0; --depth) {
469 LoadTaggedField(context, context, Context::kPreviousOffset);
470 }
471 LoadTaggedField(context, context, Context::kExtensionOffset);
472 LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
473
474 // The actual array index is (cell_index - 1).
475 cell_index -= 1;
476 LoadFixedArrayElement(context, context, cell_index);
477 StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
478}
479
480void BaselineAssembler::IncrementSmi(MemOperand lhs) {
481 BaselineAssembler::ScratchRegisterScope temps(this);
482 Register tmp = temps.AcquireScratch();
483 if (SmiValuesAre31Bits()) {
484 __ Lw(tmp, lhs);
485 __ Addu(tmp, tmp, Operand(Smi::FromInt(1)));
486 __ Sw(tmp, lhs);
487 } else {
488 __ Ld(tmp, lhs);
489 __ Daddu(tmp, tmp, Operand(Smi::FromInt(1)));
490 __ Sd(tmp, lhs);
491 }
492}
493
494void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
495 __ And(output, lhs, Operand(rhs));
496}
497
498void BaselineAssembler::Switch(Register reg, int case_value_base,
499 Label** labels, int num_labels) {
501 Label fallthrough;
502 if (case_value_base != 0) {
503 __ Dsubu(reg, reg, Operand(case_value_base));
504 }
505
506 __ Branch(&fallthrough, kUnsignedGreaterThanEqual, reg, Operand(num_labels));
507
508 __ GenerateSwitchTable(reg, num_labels,
509 [labels](size_t i) { return labels[i]; });
510
511 __ bind(&fallthrough);
512}
513
514#undef __
515
516#define __ basm.
517
518void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
519 ASM_CODE_COMMENT(masm);
520 BaselineAssembler basm(masm);
521
523 Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
524
525 {
526 ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget");
527
528 Label skip_interrupt_label;
529 __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label);
530 __ masm()->SmiTag(params_size);
531 __ masm()->Push(params_size, kInterpreterAccumulatorRegister);
532
533 __ LoadContext(kContextRegister);
534 __ LoadFunction(kJSFunctionRegister);
535 __ masm()->Push(kJSFunctionRegister);
536 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Sparkplug, 1);
537
538 __ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
539 __ masm()->SmiUntag(params_size);
540
541 __ Bind(&skip_interrupt_label);
542 }
543
544 BaselineAssembler::ScratchRegisterScope temps(&basm);
545 Register actual_params_size = temps.AcquireScratch();
546 // Compute the size of the actual parameters + receiver.
547 __ Move(actual_params_size,
549
550 // If actual is bigger than formal, then we should use it to free up the stack
551 // arguments.
552 Label corrected_args_count;
553 __ masm()->Branch(&corrected_args_count, ge, params_size,
554 Operand(actual_params_size));
555 __ masm()->Move(params_size, actual_params_size);
556 __ Bind(&corrected_args_count);
557
558 // Leave the frame (also dropping the register file).
559 __ masm()->LeaveFrame(StackFrame::BASELINE);
560
561 // Drop arguments.
562 __ masm()->DropArguments(params_size);
563
564 __ masm()->Ret();
565}
566
567#undef __
568
569inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
570 Register reg) {
571 assembler_->masm()->Assert(eq, AbortReason::kAccumulatorClobbered, reg,
573}
574
575} // namespace baseline
576} // namespace internal
577} // namespace v8
578
579#endif // V8_BASELINE_MIPS64_BASELINE_ASSEMBLER_MIPS64_INL_H_
#define Assert(condition)
static const int kExtensionOffset
Definition contexts.h:500
static V8_INLINE constexpr int OffsetOfElementAt(int index)
Definition contexts.h:512
static const int kPreviousOffset
Definition contexts.h:492
static constexpr int OffsetOfElementAt(int index)
bool is_reg(Register reg) const
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
BytecodeAssembler & assembler_
int32_t offset
TNode< Object > target
LiftoffRegister reg
uint32_t const mask
RegListBase< RegisterT > registers
MaglevAssembler *const masm_
Register ToRegister(BaselineAssembler *basm, BaselineAssembler::ScratchRegisterScope *scope, Arg arg)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
constexpr Register kInterpreterAccumulatorRegister
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr bool SmiValuesAre31Bits()
constexpr Register kContextRegister
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
V8_EXPORT_PRIVATE FlagValues v8_flags
Register ToRegister(int num)
constexpr Register kJSFunctionRegister
#define DCHECK_LT(v1, v2)
Definition logging.h:489
static void Pop(BaselineAssembler *basm, Register reg, T... tail)
static void Pop(BaselineAssembler *basm, Register reg)
static int Push(BaselineAssembler *basm, Arg arg, Args... args)
static int PushReverse(BaselineAssembler *basm, Arg arg, Args... args)
static int PushReverse(BaselineAssembler *basm, Arg arg)
static int Push(BaselineAssembler *basm, interpreter::RegisterList list)
static int PushReverse(BaselineAssembler *basm, interpreter::RegisterList list)