v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
baseline-assembler-x64-inl.h
Go to the documentation of this file.
1// Use of this source code is governed by a BSD-style license that can be
2// Copyright 2021 the V8 project authors. All rights reserved.
3// found in the LICENSE file.
4
5#ifndef V8_BASELINE_X64_BASELINE_ASSEMBLER_X64_INL_H_
6#define V8_BASELINE_X64_BASELINE_ASSEMBLER_X64_INL_H_
7
8#include "src/base/macros.h"
13
14namespace v8 {
15namespace internal {
16namespace baseline {
17
18namespace detail {
19
20// Avoid using kScratchRegister(==r10) since the macro-assembler doesn't use
21// this scope and will conflict.
22static constexpr Register kScratchRegisters[] = {r8, r9, r11, r12, r15};
24
25} // namespace detail
26
48
49namespace detail {
50
51#define __ masm_->
52
53#ifdef DEBUG
54inline bool Clobbers(Register target, MemOperand op) {
55 return op.AddressUsesRegister(target);
56}
57#endif
58
59} // namespace detail
60
62 interpreter::Register interpreter_register) {
63 return MemOperand(rbp, interpreter_register.ToOperand() * kSystemPointerSize);
64}
66 interpreter::Register interpreter_register, Register rscratch) {
67 return __ leaq(rscratch, MemOperand(rbp, interpreter_register.ToOperand() *
69}
72}
75}
76
77void BaselineAssembler::Bind(Label* label) { __ bind(label); }
78
80 __ endbr64();
81}
82
83void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
84 __ jmp(target, distance);
85}
86void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
87 Label* target, Label::Distance distance) {
88 __ JumpIfRoot(value, index, target, distance);
89}
90void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
91 Label* target, Label::Distance distance) {
92 __ JumpIfNotRoot(value, index, target, distance);
93}
94void BaselineAssembler::JumpIfSmi(Register value, Label* target,
95 Label::Distance distance) {
96 __ JumpIfSmi(value, target, distance);
97}
98void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
99 Label::Distance distance) {
100 __ JumpIfNotSmi(value, target, distance);
101}
102
103void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
104 Label* target, Label::Distance distance) {
105 if ((mask & 0xff) == mask) {
106 __ testb(value, Immediate(mask));
107 } else {
108 __ testl(value, Immediate(mask));
109 }
110 __ j(cc, target, distance);
111}
112
113void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
114 Label* target, Label::Distance distance) {
115 __ cmpq(lhs, rhs);
116 __ j(cc, target, distance);
117}
118
119#if V8_STATIC_ROOTS_BOOL
120void BaselineAssembler::JumpIfJSAnyIsPrimitive(Register heap_object,
121 Label* target,
122 Label::Distance distance) {
123 __ AssertNotSmi(heap_object);
124 ScratchRegisterScope temps(this);
125 Register scratch = temps.AcquireScratch();
126 __ JumpIfJSAnyIsPrimitive(heap_object, scratch, target, distance);
127}
128#endif // V8_STATIC_ROOTS_BOOL
129
131 InstanceType instance_type,
132 Label* target,
133 Label::Distance distance) {
134 __ AssertNotSmi(object);
135 ScratchRegisterScope temps(this);
136 Register scratch = temps.AcquireScratch();
137 if (cc == Condition::kEqual || cc == Condition::kNotEqual) {
138 __ IsObjectType(object, instance_type, scratch);
139 } else {
140 __ CmpObjectType(object, instance_type, scratch);
141 }
142 __ j(cc, target, distance);
143}
144
146 InstanceType instance_type,
147 Register map, Label* target,
148 Label::Distance distance) {
149 __ AssertNotSmi(object);
150 __ CmpObjectType(object, instance_type, map);
151 __ j(cc, target, distance);
152}
154 InstanceType instance_type,
155 Label* target,
156 Label::Distance distance) {
157 if (v8_flags.debug_code) {
158 __ AssertNotSmi(map);
159 __ CmpObjectType(map, MAP_TYPE, kScratchRegister);
160 __ Assert(equal, AbortReason::kUnexpectedValue);
161 }
162 __ CmpInstanceType(map, instance_type);
163 __ j(cc, target, distance);
164}
166 MemOperand operand, Label* target,
167 Label::Distance distance) {
168 __ cmpq(value, operand);
169 __ j(cc, target, distance);
170}
172 Label* target, Label::Distance distance) {
173 __ SmiCompare(lhs, smi);
174 __ j(cc, target, distance);
175}
176void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
177 Label* target, Label::Distance distance) {
178 __ SmiCompare(lhs, rhs);
179 __ j(cc, target, distance);
180}
181
182void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
183 Label* target,
184 Label::Distance distance) {
185 __ cmpq(left, Immediate(right));
186 __ j(cc, target, distance);
187}
188
189// cmp_tagged
190void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
191 MemOperand operand, Label* target,
192 Label::Distance distance) {
193 __ cmp_tagged(value, operand);
194 __ j(cc, target, distance);
195}
197 Register value, Label* target,
198 Label::Distance distance) {
199 __ cmp_tagged(operand, value);
200 __ j(cc, target, distance);
201}
202void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
203 Label* target, Label::Distance distance) {
204 __ cmpb(value, Immediate(byte));
205 __ j(cc, target, distance);
206}
207
208void BaselineAssembler::Move(interpreter::Register output, Register source) {
209 return __ movq(RegisterFrameOperand(output), source);
210}
211void BaselineAssembler::Move(Register output, Tagged<TaggedIndex> value) {
212 __ Move(output, value);
213}
214void BaselineAssembler::Move(MemOperand output, Register source) {
215 __ movq(output, source);
216}
217void BaselineAssembler::Move(Register output, ExternalReference reference) {
218 __ Move(output, reference);
219}
220void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
221 __ Move(output, value);
222}
223void BaselineAssembler::Move(Register output, int32_t value) {
224 __ Move(output, value);
225}
226void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
227 __ mov_tagged(output, source);
228}
229void BaselineAssembler::MoveSmi(Register output, Register source) {
230 __ mov_tagged(output, source);
231}
232
233namespace detail {
234inline void PushSingle(MacroAssembler* masm, RootIndex source) {
235 masm->PushRoot(source);
236}
237inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); }
238inline void PushSingle(MacroAssembler* masm, Tagged<TaggedIndex> value) {
239 masm->Push(value);
240}
241inline void PushSingle(MacroAssembler* masm, Tagged<Smi> value) {
242 masm->Push(value);
243}
244inline void PushSingle(MacroAssembler* masm, Handle<HeapObject> object) {
245 masm->Push(object);
246}
247inline void PushSingle(MacroAssembler* masm, int32_t immediate) {
248 masm->Push(Immediate(immediate));
249}
250inline void PushSingle(MacroAssembler* masm, MemOperand operand) {
251 masm->Push(operand);
252}
253inline void PushSingle(MacroAssembler* masm, interpreter::Register source) {
255}
256
257template <typename Arg>
259 static int Push(BaselineAssembler* basm, Arg arg) {
260 PushSingle(basm->masm(), arg);
261 return 1;
262 }
263 static int PushReverse(BaselineAssembler* basm, Arg arg) {
264 return Push(basm, arg);
265 }
266};
267
268template <>
269struct PushHelper<interpreter::RegisterList> {
271 for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
272 PushSingle(basm->masm(), list[reg_index]);
273 }
274 return list.register_count();
275 }
278 for (int reg_index = list.register_count() - 1; reg_index >= 0;
279 --reg_index) {
280 PushSingle(basm->masm(), list[reg_index]);
281 }
282 return list.register_count();
283 }
284};
285
286template <typename... Args>
288template <>
290 static int Push(BaselineAssembler* masm) { return 0; }
291 static int PushReverse(BaselineAssembler* masm) { return 0; }
292};
293template <typename Arg, typename... Args>
294struct PushAllHelper<Arg, Args...> {
295 static int Push(BaselineAssembler* masm, Arg arg, Args... args) {
296 int nargs = PushHelper<Arg>::Push(masm, arg);
297 return nargs + PushAllHelper<Args...>::Push(masm, args...);
298 }
299 static int PushReverse(BaselineAssembler* masm, Arg arg, Args... args) {
300 int nargs = PushAllHelper<Args...>::PushReverse(masm, args...);
301 return nargs + PushHelper<Arg>::PushReverse(masm, arg);
302 }
303};
304
305} // namespace detail
306
307template <typename... T>
308int BaselineAssembler::Push(T... vals) {
309 return detail::PushAllHelper<T...>::Push(this, vals...);
310}
311
312template <typename... T>
313void BaselineAssembler::PushReverse(T... vals) {
314 detail::PushAllHelper<T...>::PushReverse(this, vals...);
315}
316
317template <typename... T>
319 (__ Pop(registers), ...);
320}
321
322void BaselineAssembler::LoadTaggedField(Register output, Register source,
323 int offset) {
324 __ LoadTaggedField(output, FieldOperand(source, offset));
325}
326void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
327 int offset) {
329}
331 Register source,
332 int offset) {
333 __ SmiUntagField(output, FieldOperand(source, offset));
334}
336 Register source, int offset) {
337 __ movzxwq(output, FieldOperand(source, offset));
338}
339void BaselineAssembler::LoadWord8Field(Register output, Register source,
340 int offset) {
341 __ movb(output, FieldOperand(source, offset));
342}
343void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
344 Tagged<Smi> value) {
346}
348 int offset,
349 Register value) {
352 DCHECK(!AreAliased(target, value, scratch));
353 __ StoreTaggedField(FieldOperand(target, offset), value);
354 __ RecordWriteField(target, offset, value, scratch, SaveFPRegsMode::kIgnore);
355}
357 int offset,
358 Register value) {
359 __ StoreTaggedField(FieldOperand(target, offset), value);
360}
361
362void BaselineAssembler::LoadTaggedField(TaggedRegister output, Register source,
363 int offset) {
364 __ LoadTaggedField(output, FieldOperand(source, offset));
365}
366
367void BaselineAssembler::LoadTaggedField(TaggedRegister output,
368 TaggedRegister source, int offset) {
369 __ LoadTaggedField(output, FieldOperand(source, offset));
370}
371
372void BaselineAssembler::LoadTaggedField(Register output, TaggedRegister source,
373 int offset) {
374 __ LoadTaggedField(output, FieldOperand(source, offset));
375}
376
378 TaggedRegister array,
379 int32_t index) {
380 LoadTaggedField(output, array,
381 OFFSET_OF_DATA_START(FixedArray) + index * kTaggedSize);
382}
383
384void BaselineAssembler::LoadFixedArrayElement(TaggedRegister output,
385 TaggedRegister array,
386 int32_t index) {
387 LoadTaggedField(output, array,
388 OFFSET_OF_DATA_START(FixedArray) + index * kTaggedSize);
389}
390
391void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
392 Register feedback_vector,
393 FeedbackSlot slot,
394 Label* on_result,
395 Label::Distance distance) {
397 CodeKind::MAGLEV, feedback_vector,
398 slot, on_result, distance);
399}
400
402 int32_t weight, Label* skip_interrupt_label) {
404 ScratchRegisterScope scratch_scope(this);
405 Register feedback_cell = scratch_scope.AcquireScratch();
406 LoadFeedbackCell(feedback_cell);
407 __ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
408 Immediate(weight));
409 if (skip_interrupt_label) {
410 DCHECK_LT(weight, 0);
411 __ j(greater_equal, skip_interrupt_label);
412 }
413}
414
416 Register weight, Label* skip_interrupt_label) {
418 ScratchRegisterScope scratch_scope(this);
419 Register feedback_cell = scratch_scope.AcquireScratch();
420 LoadFeedbackCell(feedback_cell);
421 __ addl(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
422 weight);
423 if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
424}
425
426void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
427 uint32_t depth,
428 CompressionMode compression_mode) {
429 // [context] is coming from interpreter frame so it is already decompressed
430 // when pointer compression is enabled. In order to make use of complex
431 // addressing mode, any intermediate context pointer is loaded in compressed
432 // form.
433 if (depth == 0) {
436 } else {
437 TaggedRegister tagged(context);
439 --depth;
440 for (; depth > 0; --depth) {
442 }
446 compression_mode == CompressionMode::kForceDecompression) {
447 __ addq(tagged.reg(), kPtrComprCageBaseRegister);
448 }
449 }
450}
451
452void BaselineAssembler::StaContextSlot(Register context, Register value,
453 uint32_t index, uint32_t depth) {
454 // [context] is coming from interpreter frame so it is already decompressed
455 // when pointer compression is enabled. In order to make use of complex
456 // addressing mode, any intermediate context pointer is loaded in compressed
457 // form.
458 if (depth > 0) {
459 TaggedRegister tagged(context);
461 --depth;
462 for (; depth > 0; --depth) {
464 }
466 // Decompress tagged pointer.
467 __ addq(tagged.reg(), kPtrComprCageBaseRegister);
468 }
469 }
471 value);
472}
473
474void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
475 uint32_t depth) {
476 // [context] is coming from interpreter frame so it is already decompressed.
477 // In order to make use of complex addressing mode when pointer compression is
478 // enabled, any intermediate context pointer is loaded in compressed form.
479 TaggedRegister tagged(context);
480 if (depth == 0) {
482 } else {
484 --depth;
485 for (; depth > 0; --depth) {
487 }
489 }
490 if (cell_index > 0) {
491 LoadTaggedField(tagged, tagged, SourceTextModule::kRegularExportsOffset);
492 // The actual array index is (cell_index - 1).
493 cell_index -= 1;
494 } else {
495 LoadTaggedField(tagged, tagged, SourceTextModule::kRegularImportsOffset);
496 // The actual array index is (-cell_index - 1).
497 cell_index = -cell_index - 1;
498 }
499 LoadFixedArrayElement(tagged, tagged, cell_index);
500 LoadTaggedField(kInterpreterAccumulatorRegister, tagged, Cell::kValueOffset);
501}
502
503void BaselineAssembler::StaModuleVariable(Register context, Register value,
504 int cell_index, uint32_t depth) {
505 // [context] is coming from interpreter frame so it is already decompressed.
506 // In order to make use of complex addressing mode when pointer compression is
507 // enabled, any intermediate context pointer is loaded in compressed form.
508 TaggedRegister tagged(context);
509 if (depth == 0) {
511 } else {
513 --depth;
514 for (; depth > 0; --depth) {
516 }
518 }
519 LoadTaggedField(tagged, tagged, SourceTextModule::kRegularExportsOffset);
520
521 // The actual array index is (cell_index - 1).
522 cell_index -= 1;
523 LoadFixedArrayElement(context, tagged, cell_index);
524 StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
525}
526
528 __ SmiAddConstant(lhs, Smi::FromInt(1));
529}
530
531void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
532 Move(output, lhs);
533 __ andq(output, Immediate(rhs));
534}
535
536void BaselineAssembler::Switch(Register reg, int case_value_base,
537 Label** labels, int num_labels) {
539 ScratchRegisterScope scope(this);
540 __ Switch(scope.AcquireScratch(), reg, case_value_base, labels, num_labels);
541}
542
543#ifdef V8_ENABLE_CET_SHADOW_STACK
544void BaselineAssembler::MaybeEmitPlaceHolderForDeopt() {
545 if (v8_flags.cet_compatible) {
547 }
548}
549#endif // V8_ENABLE_CET_SHADOW_STACK
550
551#undef __
552#define __ basm.
553
554void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
557
559 Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
560
561 {
562 ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget");
563
564 Label skip_interrupt_label;
565 __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label);
566 {
567 __ masm()->SmiTag(params_size);
569
572 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Sparkplug, 1);
573
575 __ masm()->SmiUntagUnsigned(params_size);
576 }
577 __ Bind(&skip_interrupt_label);
578 }
579
580 BaselineAssembler::ScratchRegisterScope scope(&basm);
581 Register scratch = scope.AcquireScratch();
582
583 Register actual_params_size = scratch;
584 // Compute the size of the actual parameters + receiver.
585 __ masm()->movq(actual_params_size,
587
588 // If actual is bigger than formal, then we should use it to free up the stack
589 // arguments.
590 __ masm()->cmpq(params_size, actual_params_size);
591 __ masm()->cmovq(kLessThan, params_size, actual_params_size);
592
593 // Leave the frame (also dropping the register file).
594 __ masm()->LeaveFrame(StackFrame::BASELINE);
595
596 // Drop receiver + arguments.
597 __ masm()->DropArguments(params_size, scratch);
598 __ masm()->Ret();
599}
600
601#undef __
602
604 Register reg) {
606 assembler_->masm()->Assert(equal, AbortReason::kAccumulatorClobbered);
607}
608
609} // namespace baseline
610} // namespace internal
611} // namespace v8
612
613#endif // V8_BASELINE_X64_BASELINE_ASSEMBLER_X64_INL_H_
#define Assert(condition)
void cmovq(Condition cc, Register dst, Register src)
static constexpr int kIntraSegmentJmpInstrSize
void movq(XMMRegister dst, Operand src)
static const int kExtensionOffset
Definition contexts.h:500
static V8_INLINE constexpr int OffsetOfElementAt(int index)
Definition contexts.h:512
static const int kPreviousOffset
Definition contexts.h:492
void SmiUntagUnsigned(Register reg)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void SmiTag(Register reg, SBit s=LeaveCC)
void Ret(Condition cond=al)
int LeaveFrame(StackFrame::Type type)
void TryLoadOptimizedOsrCode(Register scratch_and_result, CodeKind min_opt_level, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
void JumpIfByte(Condition cc, Register value, int32_t byte, Label *target, Label::Distance distance=Label::kFar)
void CallRuntime(Runtime::FunctionId function, int nargs)
void JumpIf(Condition cc, Register lhs, const Operand &rhs, Label *target, Label::Distance distance=Label::kFar)
void LoadFixedArrayElement(Register output, Register array, int32_t index)
static MemOperand RegisterFrameOperand(interpreter::Register interpreter_register)
void JumpIfNotRoot(Register value, RootIndex index, Label *target, Label ::Distance distance=Label::kFar)
void JumpIfPointer(Condition cc, Register value, MemOperand operand, Label *target, Label::Distance distance=Label::kFar)
void Move(Register output, Register source)
void MoveSmi(Register output, Register source)
void TestAndBranch(Register value, int mask, Condition cc, Label *target, Label::Distance distance=Label::kFar)
void LoadWord8Field(Register output, Register source, int offset)
void LoadWord16FieldZeroExtend(Register output, Register source, int offset)
void AddToInterruptBudgetAndJumpIfNotExceeded(int32_t weight, Label *skip_interrupt_label)
void LdaContextSlot(Register context, uint32_t index, uint32_t depth, CompressionMode compression_mode=CompressionMode::kDefault)
void LoadTaggedField(Register output, Register source, int offset)
void Jump(Label *target, Label::Distance distance=Label::kFar)
void Switch(Register reg, int case_value_base, Label **labels, int num_labels)
void StaModuleVariable(Register context, Register value, int cell_index, uint32_t depth)
void JumpIfObjectTypeFast(Condition cc, Register object, InstanceType instance_type, Label *target, Label::Distance distance=Label::kFar)
void MoveMaybeSmi(Register output, Register source)
void StoreTaggedFieldNoWriteBarrier(Register target, int offset, Register value)
void TryLoadOptimizedOsrCode(Register scratch_and_result, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
void LoadTaggedSignedFieldAndUntag(Register output, Register source, int offset)
void JumpIfInstanceType(Condition cc, Register map, InstanceType instance_type, Label *target, Label::Distance distance=Label::kFar)
void JumpIfImmediate(Condition cc, Register left, int right, Label *target, Label::Distance distance=Label::kFar)
void JumpIfRoot(Register value, RootIndex index, Label *target, Label::Distance distance=Label::kFar)
void JumpIfSmi(Register value, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotSmi(Register value, Label *target, Label::Distance distance=Label::kFar)
void JumpIfObjectType(Condition cc, Register object, InstanceType instance_type, Register map, Label *target, Label::Distance distance=Label::kFar)
void LdaModuleVariable(Register context, int cell_index, uint32_t depth)
void StoreTaggedFieldWithWriteBarrier(Register target, int offset, Register value)
void LoadTaggedSignedField(Register output, Register source, int offset)
void RegisterFrameAddress(interpreter::Register interpreter_register, Register rscratch)
void Word32And(Register output, Register lhs, int rhs)
void StoreTaggedSignedField(Register target, int offset, Tagged< Smi > value)
void StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth)
void JumpIfTagged(Condition cc, Register value, MemOperand operand, Label *target, Label::Distance distance=Label::kFar)
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
LiftoffRegister reg
uint32_t const mask
RegListBase< RegisterT > registers
void PushSingle(MacroAssembler *masm, RootIndex source)
static constexpr Register kScratchRegisters[]
constexpr int kTaggedSize
Definition globals.h:542
Operand FieldOperand(Register object, int offset)
constexpr Register kInterpreterAccumulatorRegister
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Register kScratchRegister
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register r11
constexpr Register kPtrComprCageBaseRegister
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define arraysize(array)
Definition macros.h:67
static int PushReverse(BaselineAssembler *masm, Arg arg, Args... args)
static int Push(BaselineAssembler *masm, Arg arg, Args... args)
static int PushReverse(BaselineAssembler *basm, interpreter::RegisterList list)
static int Push(BaselineAssembler *basm, interpreter::RegisterList list)
static int PushReverse(BaselineAssembler *basm, Arg arg)
static int Push(BaselineAssembler *basm, Arg arg)
#define OFFSET_OF_DATA_START(Type)