v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
baseline-assembler-ppc-inl.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_BASELINE_PPC_BASELINE_ASSEMBLER_PPC_INL_H_
6#define V8_BASELINE_PPC_BASELINE_ASSEMBLER_PPC_INL_H_
7
13
14namespace v8 {
15namespace internal {
16namespace baseline {
17
18namespace detail {
19
20static constexpr Register kScratchRegisters[] = {r9, r10, ip};
22
23#ifdef DEBUG
24inline bool Clobbers(Register target, MemOperand op) {
25 return op.rb() == target || op.ra() == target;
26}
27#endif
28} // namespace detail
29
30class BaselineAssembler::ScratchRegisterScope {
31 public:
40
45
46 private:
50};
51
52#define __ assm->
53// ppc helper
54template <int width = 64>
56 Register rhs, Label* target) {
57 static_assert(width == 64 || width == 32,
58 "only support 64 and 32 bit compare");
59 if (width == 64) {
60 if (is_signed(cc)) {
61 __ CmpS64(lhs, rhs);
62 } else {
63 __ CmpU64(lhs, rhs);
64 }
65 } else {
66 if (is_signed(cc)) {
67 __ CmpS32(lhs, rhs);
68 } else {
69 __ CmpU32(lhs, rhs);
70 }
71 }
72 __ b(to_condition(cc), target);
73}
74#undef __
75
76#define __ masm_->
77
79 interpreter::Register interpreter_register) {
80 return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
81}
83 interpreter::Register interpreter_register, Register rscratch) {
84 return __ AddS64(
85 rscratch, fp,
86 Operand(interpreter_register.ToOperand() * kSystemPointerSize));
87}
90}
93}
94
95void BaselineAssembler::Bind(Label* label) { __ bind(label); }
96
98 // NOP on arm.
99}
100
101void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
103 __ b(target);
104}
105
106void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
107 Label* target, Label::Distance) {
109 __ JumpIfRoot(value, index, target);
110}
111
112void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
113 Label* target, Label::Distance) {
115 __ JumpIfNotRoot(value, index, target);
116}
117
118void BaselineAssembler::JumpIfSmi(Register value, Label* target,
121 __ JumpIfSmi(value, target);
122}
123
124void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
125 Label* target,
126 Label::Distance distance) {
128 JumpIf(cc, left, Operand(right), target, distance);
129}
130
131void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
134 __ JumpIfNotSmi(value, target);
135}
136
137void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
138 Label* target, Label::Distance) {
140 __ AndU64(r0, value, Operand(mask), ip, SetRC);
141 __ b(to_condition(cc), target, cr0);
142}
143
144void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
145 Label* target, Label::Distance) {
147 if (is_signed(cc)) {
148 __ CmpS64(lhs, rhs, r0);
149 } else {
150 __ CmpU64(lhs, rhs, r0);
151 }
152 __ b(to_condition(cc), target);
153}
154
155#if V8_STATIC_ROOTS_BOOL
156void BaselineAssembler::JumpIfJSAnyIsPrimitive(Register heap_object,
157 Label* target,
158 Label::Distance distance) {
159 __ AssertNotSmi(heap_object);
160 ScratchRegisterScope temps(this);
161 Register scratch = temps.AcquireScratch();
162 __ JumpIfJSAnyIsPrimitive(heap_object, scratch, target, distance);
163}
164#endif // V8_STATIC_ROOTS_BOOL
165
167 InstanceType instance_type,
168 Label* target,
169 Label::Distance distance) {
170 ScratchRegisterScope temps(this);
171 Register scratch = temps.AcquireScratch();
172 if (cc == eq || cc == ne) {
173 Register scratch2 = temps.AcquireScratch();
174 __ IsObjectType(object, scratch, scratch2, instance_type);
175 __ b(to_condition(cc), target);
176 return;
177 }
178 JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
179}
180
182 InstanceType instance_type,
183 Register map, Label* target,
186 ScratchRegisterScope temps(this);
187 Register type = temps.AcquireScratch();
188 __ LoadMap(map, object);
189 __ LoadU16(type, FieldMemOperand(map, Map::kInstanceTypeOffset), r0);
190 JumpIf(cc, type, Operand(instance_type), target);
191}
192
194 InstanceType instance_type,
195 Label* target, Label::Distance) {
197 ScratchRegisterScope temps(this);
198 Register type = temps.AcquireScratch();
199 if (v8_flags.debug_code) {
200 __ AssertNotSmi(map);
201 __ CompareObjectType(map, type, type, MAP_TYPE);
202 __ Assert(eq, AbortReason::kUnexpectedValue);
203 }
204 __ LoadU16(type, FieldMemOperand(map, Map::kInstanceTypeOffset), r0);
205 JumpIf(cc, type, Operand(instance_type), target);
206}
207
209 MemOperand operand, Label* target,
212 ScratchRegisterScope temps(this);
213 Register tmp = temps.AcquireScratch();
214 __ LoadU64(tmp, operand, r0);
215 JumpIfHelper(masm_, cc, value, tmp, target);
216}
217
218void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Tagged<Smi> smi,
219 Label* target, Label::Distance) {
221 __ AssertSmi(value);
222 __ LoadSmiLiteral(r0, smi);
223 JumpIfHelper(masm_, cc, value, r0, target);
224}
225
226void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
227 Label* target, Label::Distance) {
229 __ AssertSmi(lhs);
230 __ AssertSmi(rhs);
231 JumpIfHelper(masm_, cc, lhs, rhs, target);
232}
233
234void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
235 MemOperand operand, Label* target,
238 __ LoadTaggedField(ip, operand, r0);
240}
241
243 Register value, Label* target,
246 __ LoadTaggedField(ip, operand, r0);
248}
249
250void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
251 Label* target, Label::Distance) {
253 JumpIf(cc, value, Operand(byte), target);
254}
255
256void BaselineAssembler::Move(interpreter::Register output, Register source) {
258 Move(RegisterFrameOperand(output), source);
259}
260
261void BaselineAssembler::Move(Register output, Tagged<TaggedIndex> value) {
263 __ mov(output, Operand(value.ptr()));
264}
265
266void BaselineAssembler::Move(MemOperand output, Register source) {
268 __ StoreU64(source, output, r0);
269}
270
271void BaselineAssembler::Move(Register output, ExternalReference reference) {
273 __ Move(output, reference);
274}
275
276void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
278 __ Move(output, value);
279}
280
281void BaselineAssembler::Move(Register output, int32_t value) {
283 __ mov(output, Operand(value));
284}
285
286void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
288 __ mr(output, source);
289}
290
291void BaselineAssembler::MoveSmi(Register output, Register source) {
293 __ mr(output, source);
294}
295
296namespace detail {
297
298template <typename Arg>
299inline Register ToRegister(BaselineAssembler* basm,
300 BaselineAssembler::ScratchRegisterScope* scope,
301 Arg arg) {
302 Register reg = scope->AcquireScratch();
303 basm->Move(reg, arg);
304 return reg;
305}
306inline Register ToRegister(BaselineAssembler* basm,
307 BaselineAssembler::ScratchRegisterScope* scope,
308 Register reg) {
309 return reg;
310}
311
312template <typename... Args>
313struct PushAllHelper;
314template <>
315struct PushAllHelper<> {
316 static int Push(BaselineAssembler* basm) { return 0; }
317 static int PushReverse(BaselineAssembler* basm) { return 0; }
318};
319// TODO(ishell): try to pack sequence of pushes into one instruction by
320// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
321// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
322template <typename Arg>
323struct PushAllHelper<Arg> {
324 static int Push(BaselineAssembler* basm, Arg arg) {
326 basm->masm()->Push(ToRegister(basm, &scope, arg));
327 return 1;
328 }
329 static int PushReverse(BaselineAssembler* basm, Arg arg) {
330 return Push(basm, arg);
331 }
332};
333// TODO(ishell): try to pack sequence of pushes into one instruction by
334// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
335// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
336template <typename Arg, typename... Args>
337struct PushAllHelper<Arg, Args...> {
338 static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
339 PushAllHelper<Arg>::Push(basm, arg);
340 return 1 + PushAllHelper<Args...>::Push(basm, args...);
341 }
342 static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
343 int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
344 PushAllHelper<Arg>::Push(basm, arg);
345 return nargs + 1;
346 }
347};
348template <>
349struct PushAllHelper<interpreter::RegisterList> {
351 for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
352 PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
353 }
354 return list.register_count();
355 }
358 for (int reg_index = list.register_count() - 1; reg_index >= 0;
359 --reg_index) {
360 PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
361 }
362 return list.register_count();
363 }
364};
365
366template <typename... T>
367struct PopAllHelper;
368template <>
369struct PopAllHelper<> {
370 static void Pop(BaselineAssembler* basm) {}
371};
372// TODO(ishell): try to pack sequence of pops into one instruction by
373// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4)
374// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4).
375template <>
376struct PopAllHelper<Register> {
377 static void Pop(BaselineAssembler* basm, Register reg) {
378 basm->masm()->Pop(reg);
379 }
380};
381template <typename... T>
382struct PopAllHelper<Register, T...> {
383 static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
385 PopAllHelper<T...>::Pop(basm, tail...);
386 }
387};
388
389} // namespace detail
390
391template <typename... T>
392int BaselineAssembler::Push(T... vals) {
393 return detail::PushAllHelper<T...>::Push(this, vals...);
394}
395
396template <typename... T>
397void BaselineAssembler::PushReverse(T... vals) {
398 detail::PushAllHelper<T...>::PushReverse(this, vals...);
399}
400
401template <typename... T>
403 detail::PopAllHelper<T...>::Pop(this, registers...);
404}
405
406void BaselineAssembler::LoadTaggedField(Register output, Register source,
407 int offset) {
409 __ LoadTaggedField(output, FieldMemOperand(source, offset), r0);
410}
411
412void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
413 int offset) {
415 __ LoadTaggedSignedField(output, FieldMemOperand(source, offset), r0);
416}
417
419 Register source,
420 int offset) {
421 LoadTaggedSignedField(output, source, offset);
422 SmiUntag(output);
423}
424
426 Register source, int offset) {
428 __ LoadU16(output, FieldMemOperand(source, offset), r0);
429}
430
431void BaselineAssembler::LoadWord8Field(Register output, Register source,
432 int offset) {
434 __ LoadU8(output, FieldMemOperand(source, offset), r0);
435}
436
437void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
438 Tagged<Smi> value) {
440 ScratchRegisterScope temps(this);
441 Register tmp = temps.AcquireScratch();
442 __ LoadSmiLiteral(tmp, value);
443 __ StoreTaggedField(tmp, FieldMemOperand(target, offset), r0);
444}
445
447 int offset,
448 Register value) {
451 DCHECK(!AreAliased(target, value, scratch));
452 __ StoreTaggedField(value, FieldMemOperand(target, offset), r0);
453 __ RecordWriteField(target, offset, value, scratch, kLRHasNotBeenSaved,
455}
457 int offset,
458 Register value) {
460 __ StoreTaggedField(value, FieldMemOperand(target, offset), r0);
461}
462
463void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
464 Register feedback_vector,
465 FeedbackSlot slot,
466 Label* on_result,
468 Label fallthrough;
469 LoadTaggedField(scratch_and_result, feedback_vector,
471 __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
472
473 // Is it marked_for_deoptimization? If yes, clear the slot.
474 {
475 ScratchRegisterScope temps(this);
476
477 // The entry references a CodeWrapper object. Unwrap it now.
478 __ LoadCodePointerField(
479 scratch_and_result,
480 FieldMemOperand(scratch_and_result, CodeWrapper::kCodeOffset), r0);
481
482 Register scratch = temps.AcquireScratch();
483 __ TestCodeIsMarkedForDeoptimization(scratch_and_result, scratch, r0);
484 __ beq(on_result, cr0);
485 __ mov(scratch, __ ClearedValue());
487 feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
488 scratch);
489 }
490
491 __ bind(&fallthrough);
492 Move(scratch_and_result, 0);
493}
494
496 int32_t weight, Label* skip_interrupt_label) {
498 ScratchRegisterScope scratch_scope(this);
499 Register feedback_cell = scratch_scope.AcquireScratch();
500 LoadFeedbackCell(feedback_cell);
501
502 Register interrupt_budget = scratch_scope.AcquireScratch();
503 __ LoadU32(
504 interrupt_budget,
505 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), r0);
506 // Remember to set flags as part of the add!
507 __ AddS32(interrupt_budget, interrupt_budget, Operand(weight), r0, SetRC);
508 __ StoreU32(
509 interrupt_budget,
510 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), r0);
511 if (skip_interrupt_label) {
512 // Use compare flags set by add
513 DCHECK_LT(weight, 0);
514 __ bge(skip_interrupt_label, cr0);
515 }
516}
517
519 Register weight, Label* skip_interrupt_label) {
521 ScratchRegisterScope scratch_scope(this);
522 Register feedback_cell = scratch_scope.AcquireScratch();
523 LoadFeedbackCell(feedback_cell);
524
525 Register interrupt_budget = scratch_scope.AcquireScratch();
526 __ LoadU32(
527 interrupt_budget,
528 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), r0);
529 // Remember to set flags as part of the add!
530 __ AddS32(interrupt_budget, interrupt_budget, weight, SetRC);
531 __ StoreU32(
532 interrupt_budget,
533 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), r0);
534 if (skip_interrupt_label) __ bge(skip_interrupt_label, cr0);
535}
536
537void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
538 uint32_t depth,
539 CompressionMode compression_mode) {
541 for (; depth > 0; --depth) {
542 LoadTaggedField(context, context, Context::kPreviousOffset);
543 }
546}
547
548void BaselineAssembler::StaContextSlot(Register context, Register value,
549 uint32_t index, uint32_t depth) {
551 for (; depth > 0; --depth) {
552 LoadTaggedField(context, context, Context::kPreviousOffset);
553 }
555 value);
556}
557
558void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
559 uint32_t depth) {
561 for (; depth > 0; --depth) {
562 LoadTaggedField(context, context, Context::kPreviousOffset);
563 }
565 if (cell_index > 0) {
566 LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
567 // The actual array index is (cell_index - 1).
568 cell_index -= 1;
569 } else {
570 LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
571 // The actual array index is (-cell_index - 1).
572 cell_index = -cell_index - 1;
573 }
574 LoadFixedArrayElement(context, context, cell_index);
575 LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
576}
577
578void BaselineAssembler::StaModuleVariable(Register context, Register value,
579 int cell_index, uint32_t depth) {
581 for (; depth > 0; --depth) {
582 LoadTaggedField(context, context, Context::kPreviousOffset);
583 }
585 LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
586
587 // The actual array index is (cell_index - 1).
588 cell_index -= 1;
589 LoadFixedArrayElement(context, context, cell_index);
590 StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
591}
592
594 Register scratch = ip;
595 if (SmiValuesAre31Bits()) {
596 __ LoadS32(scratch, lhs, r0);
597 __ AddS64(scratch, scratch, Operand(Smi::FromInt(1)));
598 __ StoreU32(scratch, lhs, r0);
599 } else {
600 __ SmiUntag(scratch, lhs, LeaveRC, r0);
601 __ AddS64(scratch, scratch, Operand(1));
602 __ SmiTag(scratch);
603 __ StoreU64(scratch, lhs, r0);
604 }
605}
606
607void BaselineAssembler::Switch(Register reg, int case_value_base,
608 Label** labels, int num_labels) {
610 Label fallthrough, jump_table;
611 if (case_value_base != 0) {
612 __ AddS64(reg, reg, Operand(-case_value_base));
613 }
614
615 // Mostly copied from code-generator-arm.cc
616 JumpIf(kUnsignedGreaterThanEqual, reg, Operand(num_labels), &fallthrough);
617 // Ensure to emit the constant pool first if necessary.
618 int entry_size_log2 = 3;
619 __ ShiftLeftU32(reg, reg, Operand(entry_size_log2));
620 __ mov_label_addr(ip, &jump_table);
621 __ AddS64(reg, reg, ip);
622 __ Jump(reg);
623 __ b(&fallthrough);
624 __ bind(&jump_table);
625 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
626 for (int i = 0; i < num_labels; ++i) {
627 __ b(labels[i]);
628 __ nop();
629 }
630 __ bind(&fallthrough);
631}
632
633void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
634 __ AndU32(output, lhs, Operand(rhs));
635}
636
637#undef __
638
639#define __ basm.
640
641void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
644
646 Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
647
648 {
649 ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget");
650
651 Label skip_interrupt_label;
652 __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label);
653 {
654 __ masm()->SmiTag(params_size);
656
660 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Sparkplug, 1);
661
663 __ masm()->SmiUntag(params_size);
664 }
665
666 __ Bind(&skip_interrupt_label);
667 }
668
669 BaselineAssembler::ScratchRegisterScope temps(&basm);
670 Register actual_params_size = temps.AcquireScratch();
671 // Compute the size of the actual parameters + receiver.
672 __ Move(actual_params_size,
674
675 // If actual is bigger than formal, then we should use it to free up the stack
676 // arguments.
677 Label corrected_args_count;
678 JumpIfHelper(__ masm(), kGreaterThanEqual, params_size, actual_params_size,
679 &corrected_args_count);
680 __ masm()->mr(params_size, actual_params_size);
681 __ Bind(&corrected_args_count);
682
683 // Leave the frame (also dropping the register file).
684 __ masm()->LeaveFrame(StackFrame::BASELINE);
685
686 // Drop receiver + arguments.
687 __ masm() -> DropArguments(params_size);
688 __ masm()->Ret();
689}
690
691#undef __
692
694 Register reg) {
697 } else {
699 }
700 assembler_->masm()->Assert(eq, AbortReason::kAccumulatorClobbered);
701}
702
703} // namespace baseline
704} // namespace internal
705} // namespace v8
706
707#endif // V8_BASELINE_PPC_BASELINE_ASSEMBLER_PPC_INL_H_
#define Assert(condition)
void mr(Register dst, Register src)
static const int kExtensionOffset
Definition contexts.h:500
static V8_INLINE constexpr int OffsetOfElementAt(int index)
Definition contexts.h:512
static const int kPreviousOffset
Definition contexts.h:492
static constexpr int OffsetOfElementAt(int index)
void SmiUntag(Register reg, SBit s=LeaveCC)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void CmpU32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void SmiTag(Register reg, SBit s=LeaveCC)
void Ret(Condition cond=al)
int LeaveFrame(StackFrame::Type type)
void CmpU64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
void JumpIfByte(Condition cc, Register value, int32_t byte, Label *target, Label::Distance distance=Label::kFar)
void CallRuntime(Runtime::FunctionId function, int nargs)
void JumpIf(Condition cc, Register lhs, const Operand &rhs, Label *target, Label::Distance distance=Label::kFar)
void LoadFixedArrayElement(Register output, Register array, int32_t index)
static MemOperand RegisterFrameOperand(interpreter::Register interpreter_register)
void JumpIfNotRoot(Register value, RootIndex index, Label *target, Label ::Distance distance=Label::kFar)
void JumpIfPointer(Condition cc, Register value, MemOperand operand, Label *target, Label::Distance distance=Label::kFar)
void Move(Register output, Register source)
void MoveSmi(Register output, Register source)
void TestAndBranch(Register value, int mask, Condition cc, Label *target, Label::Distance distance=Label::kFar)
void LoadWord8Field(Register output, Register source, int offset)
void LoadWord16FieldZeroExtend(Register output, Register source, int offset)
void LoadMap(Register output, Register value)
void AddToInterruptBudgetAndJumpIfNotExceeded(int32_t weight, Label *skip_interrupt_label)
void LdaContextSlot(Register context, uint32_t index, uint32_t depth, CompressionMode compression_mode=CompressionMode::kDefault)
void LoadTaggedField(Register output, Register source, int offset)
void Jump(Label *target, Label::Distance distance=Label::kFar)
void Switch(Register reg, int case_value_base, Label **labels, int num_labels)
void StaModuleVariable(Register context, Register value, int cell_index, uint32_t depth)
void JumpIfObjectTypeFast(Condition cc, Register object, InstanceType instance_type, Label *target, Label::Distance distance=Label::kFar)
void MoveMaybeSmi(Register output, Register source)
void StoreTaggedFieldNoWriteBarrier(Register target, int offset, Register value)
void TryLoadOptimizedOsrCode(Register scratch_and_result, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
void LoadTaggedSignedFieldAndUntag(Register output, Register source, int offset)
void JumpIfInstanceType(Condition cc, Register map, InstanceType instance_type, Label *target, Label::Distance distance=Label::kFar)
void JumpIfImmediate(Condition cc, Register left, int right, Label *target, Label::Distance distance=Label::kFar)
void JumpIfRoot(Register value, RootIndex index, Label *target, Label::Distance distance=Label::kFar)
void JumpIfSmi(Register value, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotSmi(Register value, Label *target, Label::Distance distance=Label::kFar)
void JumpIfObjectType(Condition cc, Register object, InstanceType instance_type, Register map, Label *target, Label::Distance distance=Label::kFar)
void LdaModuleVariable(Register context, int cell_index, uint32_t depth)
void StoreTaggedFieldWithWriteBarrier(Register target, int offset, Register value)
void LoadTaggedSignedField(Register output, Register source, int offset)
void RegisterFrameAddress(interpreter::Register interpreter_register, Register rscratch)
void Word32And(Register output, Register lhs, int rhs)
void StoreTaggedSignedField(Register target, int offset, Tagged< Smi > value)
void StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth)
void JumpIfTagged(Condition cc, Register value, MemOperand operand, Label *target, Label::Distance distance=Label::kFar)
constexpr int32_t ToOperand() const
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
TNode< Object > target
LiftoffRegister reg
uint32_t const mask
RegListBase< RegisterT > registers
Register ToRegister(BaselineAssembler *basm, BaselineAssembler::ScratchRegisterScope *scope, Arg arg)
static constexpr Register kScratchRegisters[]
static void JumpIfHelper(MacroAssembler *assm, Condition cc, Register lhs, Register rhs, Label *target)
constexpr Register kInterpreterAccumulatorRegister
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr bool SmiValuesAre31Bits()
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
V8_EXPORT_PRIVATE FlagValues v8_flags
Condition to_condition(Condition cond)
bool is_signed(Condition cond)
constexpr Register kJSFunctionRegister
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define arraysize(array)
Definition macros.h:67
static void Pop(BaselineAssembler *basm, Register reg, T... tail)
static void Pop(BaselineAssembler *basm, Register reg)
static int Push(BaselineAssembler *basm, Arg arg, Args... args)
static int PushReverse(BaselineAssembler *basm, Arg arg, Args... args)
static int Push(BaselineAssembler *basm, Arg arg)
static int PushReverse(BaselineAssembler *basm, Arg arg)
static int Push(BaselineAssembler *basm, interpreter::RegisterList list)
static int PushReverse(BaselineAssembler *basm, interpreter::RegisterList list)