v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
baseline-assembler-s390-inl.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_BASELINE_S390_BASELINE_ASSEMBLER_S390_INL_H_
6#define V8_BASELINE_S390_BASELINE_ASSEMBLER_S390_INL_H_
7
12
13namespace v8 {
14namespace internal {
15namespace baseline {
16
17namespace detail {
18
19static constexpr Register kScratchRegisters[] = {r8, ip, r1};
21
22#ifdef DEBUG
23inline bool Clobbers(Register target, MemOperand op) {
24 return op.rb() == target || op.rx() == target;
25}
26#endif
27} // namespace detail
28
29class BaselineAssembler::ScratchRegisterScope {
30 public:
39
44
45 private:
49};
50
51#define __ assm->
52// s390x helper
53template <int width = 64>
55 Register rhs, Label* target) {
56 static_assert(width == 64 || width == 32,
57 "only support 64 and 32 bit compare");
58 if (width == 64) {
59 if (is_signed(cc)) {
60 __ CmpS64(lhs, rhs);
61 } else {
62 __ CmpU64(lhs, rhs);
63 }
64 } else {
65 if (is_signed(cc)) {
66 __ CmpS32(lhs, rhs);
67 } else {
68 __ CmpU32(lhs, rhs);
69 }
70 }
71 __ b(to_condition(cc), target);
72}
73
74#undef __
75
76#define __ masm_->
77
79 interpreter::Register interpreter_register) {
80 return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
81}
83 interpreter::Register interpreter_register, Register rscratch) {
84 return __ AddS64(rscratch, fp,
85 interpreter_register.ToOperand() * kSystemPointerSize);
86}
89}
92}
93
94void BaselineAssembler::Bind(Label* label) { __ bind(label); }
95
97 // NOP on arm.
98}
99
100void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
102 __ b(target);
103}
104
105void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
106 Label* target, Label::Distance) {
108 __ JumpIfRoot(value, index, target);
109}
110
111void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
112 Label* target, Label::Distance) {
114 __ JumpIfNotRoot(value, index, target);
115}
116
117void BaselineAssembler::JumpIfSmi(Register value, Label* target,
120 __ JumpIfSmi(value, target);
121}
122
123void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
124 Label* target,
125 Label::Distance distance) {
127 JumpIf(cc, left, Operand(right), target, distance);
128}
129
130void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
133 __ JumpIfNotSmi(value, target);
134}
135
136void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
137 Label* target, Label::Distance) {
139 __ AndP(r0, value, Operand(mask));
140 __ b(to_condition(cc), target);
141}
142
143void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
144 Label* target, Label::Distance) {
146 if (is_signed(cc)) {
147 __ CmpS64(lhs, rhs);
148 } else {
149 __ CmpU64(lhs, rhs);
150 }
151 __ b(to_condition(cc), target);
152}
153
155 InstanceType instance_type,
156 Label* target,
157 Label::Distance distance) {
158 ScratchRegisterScope temps(this);
159 Register scratch = temps.AcquireScratch();
160 JumpIfObjectType(cc, object, instance_type, scratch, target, distance);
161}
162
164 InstanceType instance_type,
165 Register map, Label* target,
168 ScratchRegisterScope temps(this);
169 Register type = temps.AcquireScratch();
170 __ LoadMap(map, object);
171 __ LoadU16(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
172 JumpIf(cc, type, Operand(instance_type), target);
173}
174
176 InstanceType instance_type,
177 Label* target, Label::Distance) {
179 ScratchRegisterScope temps(this);
180 Register type = temps.AcquireScratch();
181 if (v8_flags.debug_code) {
182 __ AssertNotSmi(map);
183 __ CompareObjectType(map, type, type, MAP_TYPE);
184 __ Assert(eq, AbortReason::kUnexpectedValue);
185 }
186 __ LoadU16(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
187 JumpIf(cc, type, Operand(instance_type), target);
188}
189
191 MemOperand operand, Label* target,
194 ScratchRegisterScope temps(this);
195 Register tmp = temps.AcquireScratch();
196 __ LoadU64(tmp, operand);
197 JumpIfHelper(masm_, cc, value, tmp, target);
198}
199
200void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Tagged<Smi> smi,
201 Label* target, Label::Distance) {
203 __ AssertSmi(value);
204 __ LoadSmiLiteral(r0, smi);
205 JumpIfHelper(masm_, cc, value, r0, target);
206}
207
208void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
209 Label* target, Label::Distance) {
211 __ AssertSmi(lhs);
212 __ AssertSmi(rhs);
213 JumpIfHelper(masm_, cc, lhs, rhs, target);
214}
215
216#ifdef V8_TARGET_BIG_ENDIAN
217constexpr static int stack_bias = 4;
218#else
219constexpr static int stack_bias = 0;
220#endif
221
223 MemOperand operand, Label* target,
226 DCHECK(operand.rb() == fp || operand.rx() == fp);
228 MemOperand addr =
229 MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
230 __ LoadTaggedField(ip, addr, r0);
231 } else {
232 __ LoadTaggedField(ip, operand, r0);
233 }
235}
236
238 Register value, Label* target,
241 DCHECK(operand.rb() == fp || operand.rx() == fp);
243 MemOperand addr =
244 MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
245 __ LoadTaggedField(ip, addr, r0);
246 } else {
247 __ LoadTaggedField(ip, operand, r0);
248 }
250}
251void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
252 Label* target, Label::Distance) {
254 JumpIf(cc, value, Operand(byte), target);
255}
256
257void BaselineAssembler::Move(interpreter::Register output, Register source) {
258 Move(RegisterFrameOperand(output), source);
259}
260
261void BaselineAssembler::Move(Register output, Tagged<TaggedIndex> value) {
263 __ mov(output, Operand(value.ptr()));
264}
265
266void BaselineAssembler::Move(MemOperand output, Register source) {
268 __ StoreU64(source, output);
269}
270
271void BaselineAssembler::Move(Register output, ExternalReference reference) {
273 __ Move(output, reference);
274}
275
276void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
278 __ Move(output, value);
279}
280
281void BaselineAssembler::Move(Register output, int32_t value) {
283 __ mov(output, Operand(value));
284}
285
286void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
288 __ mov(output, source);
289}
290
291void BaselineAssembler::MoveSmi(Register output, Register source) {
293 __ mov(output, source);
294}
295
296namespace detail {
297
298template <typename Arg>
299inline Register ToRegister(BaselineAssembler* basm,
300 BaselineAssembler::ScratchRegisterScope* scope,
301 Arg arg) {
302 Register reg = scope->AcquireScratch();
303 basm->Move(reg, arg);
304 return reg;
305}
306inline Register ToRegister(BaselineAssembler* basm,
307 BaselineAssembler::ScratchRegisterScope* scope,
308 Register reg) {
309 return reg;
310}
311
312template <typename... Args>
313struct PushAllHelper;
314template <>
315struct PushAllHelper<> {
316 static int Push(BaselineAssembler* basm) { return 0; }
317 static int PushReverse(BaselineAssembler* basm) { return 0; }
318};
319// TODO(ishell): try to pack sequence of pushes into one instruction by
320// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
321// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
322template <typename Arg>
323struct PushAllHelper<Arg> {
324 static int Push(BaselineAssembler* basm, Arg arg) {
326 basm->masm()->Push(ToRegister(basm, &scope, arg));
327 return 1;
328 }
329 static int PushReverse(BaselineAssembler* basm, Arg arg) {
330 return Push(basm, arg);
331 }
332};
333// TODO(ishell): try to pack sequence of pushes into one instruction by
334// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
335// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
336template <typename Arg, typename... Args>
337struct PushAllHelper<Arg, Args...> {
338 static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
339 PushAllHelper<Arg>::Push(basm, arg);
340 return 1 + PushAllHelper<Args...>::Push(basm, args...);
341 }
342 static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
343 int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
344 PushAllHelper<Arg>::Push(basm, arg);
345 return nargs + 1;
346 }
347};
348template <>
349struct PushAllHelper<interpreter::RegisterList> {
351 for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
352 PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
353 }
354 return list.register_count();
355 }
358 for (int reg_index = list.register_count() - 1; reg_index >= 0;
359 --reg_index) {
360 PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
361 }
362 return list.register_count();
363 }
364};
365
366template <typename... T>
368template <>
369struct PopAllHelper<> {
370 static void Pop(BaselineAssembler* basm) {}
371};
372// TODO(ishell): try to pack sequence of pops into one instruction by
373// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4)
374// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4).
375template <>
377 static void Pop(BaselineAssembler* basm, Register reg) {
378 basm->masm()->Pop(reg);
379 }
380};
381template <typename... T>
382struct PopAllHelper<Register, T...> {
383 static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
385 PopAllHelper<T...>::Pop(basm, tail...);
386 }
387};
388
389} // namespace detail
390
391template <typename... T>
392int BaselineAssembler::Push(T... vals) {
393 return detail::PushAllHelper<T...>::Push(this, vals...);
394}
395
396template <typename... T>
397void BaselineAssembler::PushReverse(T... vals) {
398 detail::PushAllHelper<T...>::PushReverse(this, vals...);
399}
400
401template <typename... T>
403 detail::PopAllHelper<T...>::Pop(this, registers...);
404}
405
406void BaselineAssembler::LoadTaggedField(Register output, Register source,
407 int offset) {
409 __ LoadTaggedField(output, FieldMemOperand(source, offset), r0);
410}
411
412void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
413 int offset) {
416}
417
419 Register source,
420 int offset) {
421 LoadTaggedSignedField(output, source, offset);
422 SmiUntag(output);
423}
424
426 Register source, int offset) {
428 __ LoadU16(output, FieldMemOperand(source, offset));
429}
430
431void BaselineAssembler::LoadWord8Field(Register output, Register source,
432 int offset) {
434 __ LoadU8(output, FieldMemOperand(source, offset));
435}
436
437void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
438 Tagged<Smi> value) {
440 ScratchRegisterScope temps(this);
441 Register tmp = temps.AcquireScratch();
442 __ LoadSmiLiteral(tmp, value);
443 __ StoreTaggedField(tmp, FieldMemOperand(target, offset), r0);
444}
445
447 int offset,
448 Register value) {
451 DCHECK(!AreAliased(target, value, scratch));
452 __ StoreTaggedField(value, FieldMemOperand(target, offset), r0);
453 __ RecordWriteField(target, offset, value, scratch, kLRHasNotBeenSaved,
455}
456
458 int offset,
459 Register value) {
460 __ StoreTaggedField(value, FieldMemOperand(target, offset), r0);
461}
462
463void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
464 Register feedback_vector,
465 FeedbackSlot slot,
466 Label* on_result,
468 Label fallthrough;
469 LoadTaggedField(scratch_and_result, feedback_vector,
471 __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
472
473 // Is it marked_for_deoptimization? If yes, clear the slot.
474 {
475 ScratchRegisterScope temps(this);
476
477 // The entry references a CodeWrapper object. Unwrap it now.
479 scratch_and_result,
480 FieldMemOperand(scratch_and_result, CodeWrapper::kCodeOffset));
481
482 Register scratch = temps.AcquireScratch();
483 __ TestCodeIsMarkedForDeoptimization(scratch_and_result, scratch);
484 __ beq(on_result);
485 __ mov(scratch, __ ClearedValue());
487 feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
488 scratch);
489 }
490
491 __ bind(&fallthrough);
492 Move(scratch_and_result, 0);
493}
494
496 int32_t weight, Label* skip_interrupt_label) {
498 ScratchRegisterScope scratch_scope(this);
499 Register feedback_cell = scratch_scope.AcquireScratch();
500 LoadFeedbackCell(feedback_cell);
501
502 Register interrupt_budget = scratch_scope.AcquireScratch();
503 __ LoadU32(
504 interrupt_budget,
505 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
506 // Remember to set flags as part of the add!
507 __ AddS32(interrupt_budget, Operand(weight));
508 __ StoreU32(
509 interrupt_budget,
510 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), r0);
511 if (skip_interrupt_label) {
512 // Use compare flags set by add
513 DCHECK_LT(weight, 0);
514 __ b(ge, skip_interrupt_label);
515 }
516}
517
519 Register weight, Label* skip_interrupt_label) {
521 ScratchRegisterScope scratch_scope(this);
522 Register feedback_cell = scratch_scope.AcquireScratch();
523 LoadFeedbackCell(feedback_cell);
524
525 Register interrupt_budget = scratch_scope.AcquireScratch();
526 __ LoadU32(
527 interrupt_budget,
528 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
529 // Remember to set flags as part of the add!
530 __ AddS32(interrupt_budget, interrupt_budget, weight);
531 __ StoreU32(
532 interrupt_budget,
533 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
534 if (skip_interrupt_label) __ b(ge, skip_interrupt_label);
535}
536
537void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
538 uint32_t depth,
539 CompressionMode compression_mode) {
540 for (; depth > 0; --depth) {
541 LoadTaggedField(context, context, Context::kPreviousOffset);
542 }
545}
546
547void BaselineAssembler::StaContextSlot(Register context, Register value,
548 uint32_t index, uint32_t depth) {
549 for (; depth > 0; --depth) {
550 LoadTaggedField(context, context, Context::kPreviousOffset);
551 }
553 value);
554}
555
556void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
557 uint32_t depth) {
558 for (; depth > 0; --depth) {
559 LoadTaggedField(context, context, Context::kPreviousOffset);
560 }
562 if (cell_index > 0) {
563 LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
564 // The actual array index is (cell_index - 1).
565 cell_index -= 1;
566 } else {
567 LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
568 // The actual array index is (-cell_index - 1).
569 cell_index = -cell_index - 1;
570 }
571 LoadFixedArrayElement(context, context, cell_index);
572 LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
573}
574
575void BaselineAssembler::StaModuleVariable(Register context, Register value,
576 int cell_index, uint32_t depth) {
577 for (; depth > 0; --depth) {
578 LoadTaggedField(context, context, Context::kPreviousOffset);
579 }
581 LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
582
583 // The actual array index is (cell_index - 1).
584 cell_index -= 1;
585 LoadFixedArrayElement(context, context, cell_index);
586 StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
587}
588
590 Register scratch = ip;
591 if (SmiValuesAre31Bits()) {
593 DCHECK(lhs.rb() == fp || lhs.rx() == fp);
594 MemOperand addr = MemOperand(lhs.rx(), lhs.rb(), lhs.offset() + stack_bias);
595 __ LoadS32(scratch, addr);
596 __ AddU32(scratch, Operand(Smi::FromInt(1)));
597 __ StoreU32(scratch, addr);
598 } else {
599 __ SmiUntag(scratch, lhs);
600 __ AddU64(scratch, Operand(1));
601 __ SmiTag(scratch);
602 __ StoreU64(scratch, lhs);
603 }
604}
605
606void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
607 __ AndP(output, lhs, Operand(rhs));
608}
609
610void BaselineAssembler::Switch(Register reg, int case_value_base,
611 Label** labels, int num_labels) {
613 Label fallthrough, jump_table;
614 if (case_value_base != 0) {
615 __ AddS64(reg, Operand(-case_value_base));
616 }
617
618 // Mostly copied from code-generator-arm.cc
619 ScratchRegisterScope scope(this);
620 JumpIf(kUnsignedGreaterThanEqual, reg, Operand(num_labels), &fallthrough);
621 // Ensure to emit the constant pool first if necessary.
622 int entry_size_log2 = 3;
623 __ ShiftLeftU32(reg, reg, Operand(entry_size_log2));
624 __ larl(r1, &jump_table);
625 __ lay(reg, MemOperand(reg, r1));
626 __ b(reg);
627 __ b(&fallthrough);
628 __ bind(&jump_table);
629 for (int i = 0; i < num_labels; ++i) {
630 __ b(labels[i], Label::kFar);
631 __ nop();
632 }
633 __ bind(&fallthrough);
634}
635
636#undef __
637
638#define __ basm.
639
640void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
643
645 Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
646
647 {
648 ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget");
649
650 Label skip_interrupt_label;
651 __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label);
652 {
653 __ masm()->SmiTag(params_size);
655
659 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Sparkplug, 1);
660
662 __ masm()->SmiUntag(params_size);
663 }
664
665 __ Bind(&skip_interrupt_label);
666 }
667
668 BaselineAssembler::ScratchRegisterScope temps(&basm);
669 Register actual_params_size = temps.AcquireScratch();
670 // Compute the size of the actual parameters + receiver.
671 __ Move(actual_params_size,
673
674 // If actual is bigger than formal, then we should use it to free up the stack
675 // arguments.
676 Label corrected_args_count;
677 JumpIfHelper(__ masm(), kGreaterThanEqual, params_size, actual_params_size,
678 &corrected_args_count);
679 __ masm()->mov(params_size, actual_params_size);
680 __ Bind(&corrected_args_count);
681
682 // Leave the frame (also dropping the register file).
683 __ masm()->LeaveFrame(StackFrame::BASELINE);
684
685 // Drop receiver + arguments.
686 __ masm() -> DropArguments(params_size);
687 __ masm()->Ret();
688}
689
690#undef __
691
693 Register reg) {
696 } else {
698 }
699 assembler_->masm()->Assert(eq, AbortReason::kAccumulatorClobbered);
700}
701
702} // namespace baseline
703} // namespace internal
704} // namespace v8
705
706#endif // V8_BASELINE_S390_BASELINE_ASSEMBLER_S390_INL_H_
#define Assert(condition)
static const int kExtensionOffset
Definition contexts.h:500
static V8_INLINE constexpr int OffsetOfElementAt(int index)
Definition contexts.h:512
static const int kPreviousOffset
Definition contexts.h:492
static constexpr int OffsetOfElementAt(int index)
void mov(Register rd, Register rj)
void SmiUntag(Register reg, SBit s=LeaveCC)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void CmpU32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void SmiTag(Register reg, SBit s=LeaveCC)
void Ret(Condition cond=al)
int LeaveFrame(StackFrame::Type type)
void CmpU64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
void JumpIfByte(Condition cc, Register value, int32_t byte, Label *target, Label::Distance distance=Label::kFar)
void CallRuntime(Runtime::FunctionId function, int nargs)
void JumpIf(Condition cc, Register lhs, const Operand &rhs, Label *target, Label::Distance distance=Label::kFar)
void LoadFixedArrayElement(Register output, Register array, int32_t index)
static MemOperand RegisterFrameOperand(interpreter::Register interpreter_register)
void JumpIfNotRoot(Register value, RootIndex index, Label *target, Label ::Distance distance=Label::kFar)
void JumpIfPointer(Condition cc, Register value, MemOperand operand, Label *target, Label::Distance distance=Label::kFar)
void Move(Register output, Register source)
void MoveSmi(Register output, Register source)
void TestAndBranch(Register value, int mask, Condition cc, Label *target, Label::Distance distance=Label::kFar)
void LoadWord8Field(Register output, Register source, int offset)
void LoadWord16FieldZeroExtend(Register output, Register source, int offset)
void LoadMap(Register output, Register value)
void AddToInterruptBudgetAndJumpIfNotExceeded(int32_t weight, Label *skip_interrupt_label)
void LdaContextSlot(Register context, uint32_t index, uint32_t depth, CompressionMode compression_mode=CompressionMode::kDefault)
void LoadTaggedField(Register output, Register source, int offset)
void Jump(Label *target, Label::Distance distance=Label::kFar)
void Switch(Register reg, int case_value_base, Label **labels, int num_labels)
void StaModuleVariable(Register context, Register value, int cell_index, uint32_t depth)
void JumpIfObjectTypeFast(Condition cc, Register object, InstanceType instance_type, Label *target, Label::Distance distance=Label::kFar)
void MoveMaybeSmi(Register output, Register source)
void StoreTaggedFieldNoWriteBarrier(Register target, int offset, Register value)
void TryLoadOptimizedOsrCode(Register scratch_and_result, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
void LoadTaggedSignedFieldAndUntag(Register output, Register source, int offset)
void JumpIfInstanceType(Condition cc, Register map, InstanceType instance_type, Label *target, Label::Distance distance=Label::kFar)
void JumpIfImmediate(Condition cc, Register left, int right, Label *target, Label::Distance distance=Label::kFar)
void JumpIfRoot(Register value, RootIndex index, Label *target, Label::Distance distance=Label::kFar)
void JumpIfSmi(Register value, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotSmi(Register value, Label *target, Label::Distance distance=Label::kFar)
void JumpIfObjectType(Condition cc, Register object, InstanceType instance_type, Register map, Label *target, Label::Distance distance=Label::kFar)
void LdaModuleVariable(Register context, int cell_index, uint32_t depth)
void StoreTaggedFieldWithWriteBarrier(Register target, int offset, Register value)
void LoadTaggedSignedField(Register output, Register source, int offset)
void RegisterFrameAddress(interpreter::Register interpreter_register, Register rscratch)
void Word32And(Register output, Register lhs, int rhs)
void StoreTaggedSignedField(Register target, int offset, Tagged< Smi > value)
void StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth)
void JumpIfTagged(Condition cc, Register value, MemOperand operand, Label *target, Label::Distance distance=Label::kFar)
constexpr int32_t ToOperand() const
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
TNode< Object > target
LiftoffRegister reg
uint32_t const mask
RegListBase< RegisterT > registers
Register ToRegister(BaselineAssembler *basm, BaselineAssembler::ScratchRegisterScope *scope, Arg arg)
static constexpr Register kScratchRegisters[]
static void JumpIfHelper(MacroAssembler *assm, Condition cc, Register lhs, Register rhs, Label *target)
constexpr Register kInterpreterAccumulatorRegister
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr bool SmiValuesAre31Bits()
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
V8_EXPORT_PRIVATE FlagValues v8_flags
Condition to_condition(Condition cond)
bool is_signed(Condition cond)
constexpr Register kJSFunctionRegister
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define arraysize(array)
Definition macros.h:67
static void Pop(BaselineAssembler *basm, Register reg, T... tail)
static void Pop(BaselineAssembler *basm, Register reg)
static int Push(BaselineAssembler *basm, Arg arg, Args... args)
static int PushReverse(BaselineAssembler *basm, Arg arg, Args... args)
static int PushReverse(BaselineAssembler *basm, Arg arg)
static int Push(BaselineAssembler *basm, interpreter::RegisterList list)
static int PushReverse(BaselineAssembler *basm, interpreter::RegisterList list)