v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
macro-assembler-arm.h
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_CODEGEN_ARM_MACRO_ASSEMBLER_ARM_H_
6#define V8_CODEGEN_ARM_MACRO_ASSEMBLER_ARM_H_
7
8#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
9#error This header must be included via macro-assembler.h
10#endif
11
12#include <optional>
13
17#include "src/common/globals.h"
20
21namespace v8 {
22namespace internal {
23
24// TODO(victorgomes): Move definition to macro-assembler.h, once all other
25// platforms are updated.
27
28// ----------------------------------------------------------------------------
29// Static helper functions
30
31// Generate a MemOperand for loading a field from an object.
33 return MemOperand(object, offset - kHeapObjectTag);
34}
35
37
38Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
39 Register reg3 = no_reg,
40 Register reg4 = no_reg,
41 Register reg5 = no_reg,
42 Register reg6 = no_reg);
43
48
49class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
50 public:
51 using MacroAssemblerBase::MacroAssemblerBase;
52
53 // Activation support.
55 bool load_constant_pool_pointer_reg = false);
56 // Returns the pc offset at which the frame ends.
57 int LeaveFrame(StackFrame::Type type);
58
59// Allocate stack space of given size (i.e. decrement {sp} by the value
60// stored in the given register, or by a constant). If you need to perform a
61// stack check, do it before calling this function because this function may
62// write into the newly allocated space. It may also overwrite the given
63// register's value, in the version that takes a register.
64#ifdef V8_OS_WIN
65 void AllocateStackSpace(Register bytes_scratch);
66 void AllocateStackSpace(int bytes);
67#else
68 void AllocateStackSpace(Register bytes) { sub(sp, sp, bytes); }
69 void AllocateStackSpace(int bytes) {
70 DCHECK_GE(bytes, 0);
71 if (bytes == 0) return;
72 sub(sp, sp, Operand(bytes));
73 }
74#endif
75
76 // Push a fixed frame, consisting of lr, fp
77 void PushCommonFrame(Register marker_reg = no_reg);
78
79 // Generates function and stub prologue code.
80 void StubPrologue(StackFrame::Type type);
81 void Prologue();
82
83 void DropArguments(Register count);
84 void DropArgumentsAndPushNewReceiver(Register argc, Register receiver);
85
86 // Push a standard frame, consisting of lr, fp, context and JS function
87 void PushStandardFrame(Register function_reg);
88
89 void InitializeRootRegister();
90
91 void Push(Register src) { push(src); }
92
93 void Push(Handle<HeapObject> handle);
94 void Push(Tagged<Smi> smi);
95 void Push(Tagged<TaggedIndex> index);
96
97 // Push two registers. Pushes leftmost register first (to highest address).
98 void Push(Register src1, Register src2, Condition cond = al) {
99 if (src1.code() > src2.code()) {
100 stm(db_w, sp, {src1, src2}, cond);
101 } else {
102 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
103 str(src2, MemOperand(sp, 4, NegPreIndex), cond);
104 }
105 }
106
107 // Push three registers. Pushes leftmost register first (to highest address).
108 void Push(Register src1, Register src2, Register src3, Condition cond = al) {
109 if (src1.code() > src2.code()) {
110 if (src2.code() > src3.code()) {
111 stm(db_w, sp, {src1, src2, src3}, cond);
112 } else {
113 stm(db_w, sp, {src1, src2}, cond);
114 str(src3, MemOperand(sp, 4, NegPreIndex), cond);
115 }
116 } else {
117 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
118 Push(src2, src3, cond);
119 }
120 }
121
122 // Push four registers. Pushes leftmost register first (to highest address).
123 void Push(Register src1, Register src2, Register src3, Register src4,
124 Condition cond = al) {
125 if (src1.code() > src2.code()) {
126 if (src2.code() > src3.code()) {
127 if (src3.code() > src4.code()) {
128 stm(db_w, sp, {src1, src2, src3, src4}, cond);
129 } else {
130 stm(db_w, sp, {src1, src2, src3}, cond);
131 str(src4, MemOperand(sp, 4, NegPreIndex), cond);
132 }
133 } else {
134 stm(db_w, sp, {src1, src2}, cond);
135 Push(src3, src4, cond);
136 }
137 } else {
138 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
139 Push(src2, src3, src4, cond);
140 }
141 }
142
143 // Push five registers. Pushes leftmost register first (to highest address).
144 void Push(Register src1, Register src2, Register src3, Register src4,
145 Register src5, Condition cond = al) {
146 if (src1.code() > src2.code()) {
147 if (src2.code() > src3.code()) {
148 if (src3.code() > src4.code()) {
149 if (src4.code() > src5.code()) {
150 stm(db_w, sp, {src1, src2, src3, src4, src5}, cond);
151 } else {
152 stm(db_w, sp, {src1, src2, src3, src4}, cond);
153 str(src5, MemOperand(sp, 4, NegPreIndex), cond);
154 }
155 } else {
156 stm(db_w, sp, {src1, src2, src3}, cond);
157 Push(src4, src5, cond);
158 }
159 } else {
160 stm(db_w, sp, {src1, src2}, cond);
161 Push(src3, src4, src5, cond);
162 }
163 } else {
164 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
165 Push(src2, src3, src4, src5, cond);
166 }
167 }
168
169 enum class PushArrayOrder { kNormal, kReverse };
170 // `array` points to the first element (the lowest address).
171 // `array` and `size` are not modified.
172 void PushArray(Register array, Register size, Register scratch,
173 PushArrayOrder order = PushArrayOrder::kNormal);
174
175 void Pop(Register dst) { pop(dst); }
176
177 // Pop two registers. Pops rightmost register first (from lower address).
178 void Pop(Register src1, Register src2, Condition cond = al) {
179 DCHECK(src1 != src2);
180 if (src1.code() > src2.code()) {
181 ldm(ia_w, sp, {src1, src2}, cond);
182 } else {
183 ldr(src2, MemOperand(sp, 4, PostIndex), cond);
184 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
185 }
186 }
187
188 // Pop three registers. Pops rightmost register first (from lower address).
189 void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
190 DCHECK(!AreAliased(src1, src2, src3));
191 if (src1.code() > src2.code()) {
192 if (src2.code() > src3.code()) {
193 ldm(ia_w, sp, {src1, src2, src3}, cond);
194 } else {
195 ldr(src3, MemOperand(sp, 4, PostIndex), cond);
196 ldm(ia_w, sp, {src1, src2}, cond);
197 }
198 } else {
199 Pop(src2, src3, cond);
200 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
201 }
202 }
203
204 // Pop four registers. Pops rightmost register first (from lower address).
205 void Pop(Register src1, Register src2, Register src3, Register src4,
206 Condition cond = al) {
207 DCHECK(!AreAliased(src1, src2, src3, src4));
208 if (src1.code() > src2.code()) {
209 if (src2.code() > src3.code()) {
210 if (src3.code() > src4.code()) {
211 ldm(ia_w, sp, {src1, src2, src3, src4}, cond);
212 } else {
213 ldr(src4, MemOperand(sp, 4, PostIndex), cond);
214 ldm(ia_w, sp, {src1, src2, src3}, cond);
215 }
216 } else {
217 Pop(src3, src4, cond);
218 ldm(ia_w, sp, {src1, src2}, cond);
219 }
220 } else {
221 Pop(src2, src3, src4, cond);
222 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
223 }
224 }
225
226 // Before calling a C-function from generated code, align arguments on stack.
227 // After aligning the frame, non-register arguments must be stored in
228 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
229 // are word sized. If double arguments are used, this function assumes that
230 // all double arguments are stored before core registers; otherwise the
231 // correct alignment of the double values is not guaranteed.
232 // Some compilers/platforms require the stack to be aligned when calling
233 // C++ code.
234 // Needs a scratch register to do some arithmetic. This register will be
235 // trashed.
236 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers = 0,
237 Register scratch = no_reg);
238
239 // There are two ways of passing double arguments on ARM, depending on
240 // whether soft or hard floating point ABI is used. These functions
241 // abstract parameter passing for the three different ways we call
242 // C functions from generated code.
246
247 // Calls a C function and cleans up the space for arguments allocated
248 // by PrepareCallCFunction. The called function is not allowed to trigger a
249 // garbage collection, since that might move the code and invalidate the
250 // return address (unless this is somehow accounted for by the called
251 // function).
252 int CallCFunction(
253 ExternalReference function, int num_arguments,
254 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
255 Label* return_label = nullptr);
256 int CallCFunction(
257 Register function, int num_arguments,
258 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
259 Label* return_label = nullptr);
260 int CallCFunction(
261 ExternalReference function, int num_reg_arguments,
262 int num_double_arguments,
263 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
264 Label* return_label = nullptr);
265 int CallCFunction(
266 Register function, int num_reg_arguments, int num_double_arguments,
267 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
268 Label* return_label = nullptr);
269
272
273 void Trap();
274 void DebugBreak();
275
276 // Calls Abort(msg) if the condition cond is not satisfied.
277 // Use --debug-code to enable.
279
280 // Like Assert(), but without condition.
281 // Use --debug-code to enable.
283
284 // Like Assert(), but always enabled.
285 void Check(Condition cond, AbortReason reason);
286
287 // Print a message to stdout and abort execution.
288 void Abort(AbortReason msg);
289
290 void LslPair(Register dst_low, Register dst_high, Register src_low,
291 Register src_high, Register shift);
292 void LslPair(Register dst_low, Register dst_high, Register src_low,
293 Register src_high, uint32_t shift);
294 void LsrPair(Register dst_low, Register dst_high, Register src_low,
295 Register src_high, Register shift);
296 void LsrPair(Register dst_low, Register dst_high, Register src_low,
297 Register src_high, uint32_t shift);
298 void AsrPair(Register dst_low, Register dst_high, Register src_low,
299 Register src_high, Register shift);
300 void AsrPair(Register dst_low, Register dst_high, Register src_low,
301 Register src_high, uint32_t shift);
302
303 void LoadFromConstantsTable(Register destination, int constant_index) final;
304 void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
305 void LoadRootRelative(Register destination, int32_t offset) final;
306 void StoreRootRelative(int32_t offset, Register value) final;
307
308 // Operand pointing to an external reference.
309 // May emit code to set up the scratch register. The operand is
310 // only guaranteed to be correct as long as the scratch register
311 // isn't changed.
312 // If the operand is used more than once, use a scratch register
313 // that is guaranteed not to be clobbered.
314 MemOperand ExternalReferenceAsOperand(ExternalReference reference,
315 Register scratch);
317 return ExternalReferenceAsOperand(ExternalReference::Create(id), no_reg);
318 }
319
320 // Jump, Call, and Ret pseudo instructions implementing inter-working.
321 void Call(Register target, Condition cond = al);
322 void Call(Address target, RelocInfo::Mode rmode, Condition cond = al,
323 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
324 bool check_constant_pool = true);
325 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
326 Condition cond = al,
327 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
328 bool check_constant_pool = true);
329 void Call(Label* target);
330
331 MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
332 void LoadEntryFromBuiltin(Builtin builtin, Register destination);
333 // Load the builtin given by the Smi in |builtin| into |target|.
334 void LoadEntryFromBuiltinIndex(Register builtin_index, Register target);
335 void CallBuiltinByIndex(Register builtin_index, Register target);
336 void CallBuiltin(Builtin builtin, Condition cond = al);
337 void TailCallBuiltin(Builtin builtin, Condition cond = al);
338
339#ifdef V8_ENABLE_LEAPTIERING
340 void LoadEntrypointFromJSDispatchTable(Register destination,
341 Register dispatch_handle,
342 Register scratch);
343#endif // V8_ENABLE_LEAPTIERING
344
345 // Load the code entry point from the Code object.
346 void LoadCodeInstructionStart(
347 Register destination, Register code_object,
348 CodeEntrypointTag tag = kDefaultCodeEntrypointTag);
349 void CallCodeObject(Register code_object);
350 void JumpCodeObject(Register code_object,
351 JumpMode jump_mode = JumpMode::kJump);
352
353 // Convenience functions to call/jmp to the code of a JSFunction object.
354 void CallJSFunction(Register function_object, uint16_t argument_count);
355 void JumpJSFunction(Register function_object,
356 JumpMode jump_mode = JumpMode::kJump);
357#ifdef V8_ENABLE_LEAPTIERING
358 void CallJSDispatchEntry(JSDispatchHandle dispatch_handle,
359 uint16_t argument_count);
360#endif
361#ifdef V8_ENABLE_WEBASSEMBLY
362 void ResolveWasmCodePointer(Register target);
363 void CallWasmCodePointer(Register target,
364 CallJumpMode call_jump_mode = CallJumpMode::kCall);
365#endif
366
367 // Generates an instruction sequence s.t. the return address points to the
368 // instruction following the call.
369 // The return address on the stack is used by frame iteration.
370 void StoreReturnAddressAndCall(Register target);
371
372 // Enforce platform specific stack alignment.
374
375 // TODO(olivf, 42204201) Rename this to AssertNotDeoptimized once
376 // non-leaptiering is removed from the codebase.
377 void BailoutIfDeoptimized();
378 void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
380 Label* jump_deoptimization_entry_label);
381
382 // Emit code to discard a non-negative number of pointer-sized elements
383 // from the stack, clobbering only the sp register.
384 void Drop(int count, Condition cond = al);
385 void Drop(Register count, Condition cond = al);
386
387 void Ret(Condition cond = al);
388
389 // Compare single values and move the result to the normal condition flags.
391 const Condition cond = al);
392 void VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2,
393 const Condition cond = al);
394
395 // Compare double values and move the result to the normal condition flags.
397 const Condition cond = al);
398 void VFPCompareAndSetFlags(const DwVfpRegister src1, const double src2,
399 const Condition cond = al);
400
401 // If the value is a NaN, canonicalize the value else, do nothing.
403 const Condition cond = al);
405 const Condition cond = al) {
406 VFPCanonicalizeNaN(value, value, cond);
407 }
408
413
414 void CheckPageFlag(Register object, int mask, Condition cc,
415 Label* condition_met);
416 void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
417 Label* condition_met) {
418 CheckPageFlag(object, mask, cc, condition_met);
419 }
420
421 // Check whether d16-d31 are available on the CPU. The result is given by the
422 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
424
425 void MaybeSaveRegisters(RegList registers);
426 void MaybeRestoreRegisters(RegList registers);
427
428 void CallEphemeronKeyBarrier(Register object, Operand offset,
429 SaveFPRegsMode fp_mode);
430
431 void CallRecordWriteStubSaveRegisters(
432 Register object, Operand offset, SaveFPRegsMode fp_mode,
433 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
434 void CallRecordWriteStub(
435 Register object, Register slot_address, SaveFPRegsMode fp_mode,
436 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
437
438 // For a given |object| and |offset|:
439 // - Move |object| to |dst_object|.
440 // - Compute the address of the slot pointed to by |offset| in |object| and
441 // write it to |dst_slot|. |offset| can be either an immediate or a
442 // register.
443 // This method makes sure |object| and |offset| are allowed to overlap with
444 // the destination registers.
445 void MoveObjectAndSlot(Register dst_object, Register dst_slot,
446 Register object, Operand offset);
447
448 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
449 // values to location, saving [d0..(d15|d31)].
450 void SaveFPRegs(Register location, Register scratch);
451
452 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
453 // values to location, restoring [d0..(d15|d31)].
454 void RestoreFPRegs(Register location, Register scratch);
455
456 // As above, but with heap semantics instead of stack semantics, i.e.: the
457 // location starts at the lowest address and grows towards higher addresses,
458 // for both saves and restores.
459 void SaveFPRegsToHeap(Register location, Register scratch);
460 void RestoreFPRegsFromHeap(Register location, Register scratch);
461
462 // Calculate how much stack space (in bytes) are required to store caller
463 // registers excluding those specified in the arguments.
464 int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
465 Register exclusion1 = no_reg,
466 Register exclusion2 = no_reg,
467 Register exclusion3 = no_reg) const;
468
469 // Push caller saved registers on the stack, and return the number of bytes
470 // stack pointer is adjusted.
471 int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
472 Register exclusion2 = no_reg,
473 Register exclusion3 = no_reg);
474 // Restore caller saved registers from the stack, and return the number of
475 // bytes stack pointer is adjusted.
476 int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
477 Register exclusion2 = no_reg,
478 Register exclusion3 = no_reg);
479 void Jump(Register target, Condition cond = al);
480 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
481 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
482 void Jump(const ExternalReference& reference);
483
484 void GetLabelAddress(Register dst, Label* target);
485
486 // Perform a floating-point min or max operation with the
487 // (IEEE-754-compatible) semantics of ARM64's fmin/fmax. Some cases, typically
488 // NaNs or +/-0.0, are expected to be rare and are handled in out-of-line
489 // code. The specific behaviour depends on supported instructions.
490 //
491 // These functions assume (and assert) that left!=right. It is permitted
492 // for the result to alias either input register.
494 Label* out_of_line);
496 Label* out_of_line);
498 Label* out_of_line);
500 Label* out_of_line);
501
502 // Generate out-of-line cases for the macros above.
504 SwVfpRegister right);
506 SwVfpRegister right);
508 DwVfpRegister right);
510 DwVfpRegister right);
511
512 void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane);
513 void ExtractLane(Register dst, DwVfpRegister src, NeonDataType dt, int lane);
514 void ExtractLane(SwVfpRegister dst, QwNeonRegister src, int lane);
515 void ExtractLane(DwVfpRegister dst, QwNeonRegister src, int lane);
517 NeonDataType dt, int lane);
519 SwVfpRegister src_lane, int lane);
521 DwVfpRegister src_lane, int lane);
522
523 void LoadLane(NeonSize sz, NeonListOperand dst_list, uint8_t lane,
524 NeonMemOperand src);
525 void StoreLane(NeonSize sz, NeonListOperand src_list, uint8_t lane,
526 NeonMemOperand dst);
527
528 // Register move. May do nothing if the registers are identical.
529 void Move(Register dst, Tagged<Smi> smi);
531 void Move(Register dst, ExternalReference reference);
533 void Move(Register dst, Register src, Condition cond = al);
534 void Move(Register dst, const MemOperand& src) { ldr(dst, src); }
535 void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
536 Condition cond = al) {
537 if (!src.IsRegister() || src.rm() != dst || sbit != LeaveCC) {
538 mov(dst, src, sbit, cond);
539 }
540 }
541 // Move src0 to dst0 and src1 to dst1, handling possible overlaps.
542 void MovePair(Register dst0, Register src0, Register dst1, Register src1);
543
544 void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al);
545 void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al);
547
548 // Simulate s-register moves for imaginary s32 - s63 registers.
549 void VmovExtended(Register dst, int src_code);
550 void VmovExtended(int dst_code, Register src);
551 // Move between s-registers and imaginary s-registers.
552 void VmovExtended(int dst_code, int src_code);
553 void VmovExtended(int dst_code, const MemOperand& src);
554 void VmovExtended(const MemOperand& dst, int src_code);
555
556 // Register swap. Note that the register operands should be distinct.
557 void Swap(Register srcdst0, Register srcdst1);
558 void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
559 void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
560
561 // Get the actual activation frame alignment for target environment.
562 static int ActivationFrameAlignment();
563
564 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
565
566 void SmiUntag(Register reg, SBit s = LeaveCC) {
567 mov(reg, Operand::SmiUntag(reg), s);
568 }
569 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
570 mov(dst, Operand::SmiUntag(src), s);
571 }
572
573 void SmiToInt32(Register smi) { SmiUntag(smi); }
574 void SmiToInt32(Register dst, Register smi) { SmiUntag(dst, smi); }
575
576 // Load an object from the root table.
578 LoadRoot(destination, index);
579 }
581 LoadRoot(destination, index, al);
582 }
584
585 // Jump if the register contains a smi.
586 void JumpIfSmi(Register value, Label* smi_label);
587
588 void JumpIfEqual(Register x, int32_t y, Label* dest);
589
590 void JumpIfLessThan(Register x, int32_t y, Label* dest);
591
592 void LoadMap(Register destination, Register object);
593
594 void LoadFeedbackVector(Register dst, Register closure, Register scratch,
595 Label* fbv_undef);
596
598 if (registers.is_empty()) return;
599 ASM_CODE_COMMENT(this);
600 // stm(db_w, sp, registers);
601 // TODO(victorgomes): {stm/ldm} pushes/pops registers in the opposite order
602 // as expected by Maglev frame. Consider massaging Maglev to accept this
603 // order instead.
604 for (Register reg : registers) {
605 push(reg);
606 }
607 }
608
610 if (registers.is_empty()) return;
611 ASM_CODE_COMMENT(this);
612 // ldm(ia_w, sp, registers);
613 for (Register reg : base::Reversed(registers)) {
614 pop(reg);
615 }
616 }
617
618 void PushAll(DoubleRegList registers, int stack_slot_size = kDoubleSize) {
619 if (registers.is_empty()) return;
620 ASM_CODE_COMMENT(this);
621 // TODO(victorgomes): vstm only works for consecutive double registers. We
622 // could check if it is the case and optimize here.
624 vpush(reg);
625 }
626 }
627
628 void PopAll(DoubleRegList registers, int stack_slot_size = kDoubleSize) {
629 if (registers.is_empty()) return;
630 ASM_CODE_COMMENT(this);
631 // TODO(victorgomes): vldm only works for consecutive double registers. We
632 // could check if it is the case and optimize here.
633 for (DoubleRegister reg : base::Reversed(registers)) {
634 vpop(reg);
635 }
636 }
637
638 inline void Cmp(const Register& rn, int imm) { cmp(rn, Operand(imm)); }
639
640 inline void CmpTagged(const Register& r1, const Register& r2) { cmp(r1, r2); }
641
642 // Functions performing a check on a known or potential smi. Returns
643 // a condition that is satisfied if the check is successful.
645 SmiTst(src);
646 return eq;
647 }
648
649 void Zero(const MemOperand& dest);
650 void Zero(const MemOperand& dest1, const MemOperand& dest2);
651
653 const MemOperand& field_operand) {
654 // No pointer compression on arm, we do just a simple load.
655 LoadTaggedField(destination, field_operand);
656 }
657
658 void DecompressTagged(const Register& destination, const Register& source) {
659 // No pointer compression on arm. Do nothing.
660 }
661
663
665 const MemOperand& field_operand) {
666 ldr(destination, field_operand);
667 }
668
670 const MemOperand& field_operand) {
671 LoadTaggedField(destination, field_operand);
672 }
673
674 void SmiUntagField(Register dst, const MemOperand& src) {
675 LoadTaggedField(dst, src);
676 SmiUntag(dst);
677 }
678
679 void StoreTaggedField(const Register& value,
680 const MemOperand& dst_field_operand) {
681 str(value, dst_field_operand);
682 }
683
684 // For compatibility with platform-independent code.
685 void StoreTaggedField(const MemOperand& dst_field_operand,
686 const Register& value) {
687 StoreTaggedField(value, dst_field_operand);
688 }
689
690 void Switch(Register scratch, Register value, int case_value_base,
691 Label** labels, int num_labels);
692
693 void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch,
694 Label* if_marked_for_deoptimization);
695
697 Label* if_turbofanned);
698
699 // Falls through and sets scratch_and_result to 0 on failure, jumps to
700 // on_result on success.
701 void TryLoadOptimizedOsrCode(Register scratch_and_result,
702 CodeKind min_opt_level, Register feedback_vector,
703 FeedbackSlot slot, Label* on_result,
704 Label::Distance distance);
705
706 void AssertZeroExtended(Register int32_register) {
707 // In arm32, we don't have top 32 bits, so do nothing.
708 }
709
710 // Performs a truncating conversion of a floating point number as used by
711 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
712 // succeeds, otherwise falls through if result is saturated. On return
713 // 'result' either holds answer, or is clobbered on fall through.
715 Label* done);
716
717 // Performs a truncating conversion of a floating point number as used by
718 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
719 // Exits with 'result' holding the answer.
721 DwVfpRegister double_input, StubCallMode stub_mode);
722
723 // EABI variant for double arguments in use.
725#ifdef __arm__
726 return base::OS::ArmUsingHardFloat();
727#elif USE_EABI_HARDFLOAT
728 return true;
729#else
730 return false;
731#endif
732 }
733
734 // Compute the start of the generated instruction stream from the current PC.
735 // This is an alternative to embedding the {CodeObject} handle as a reference.
736 void ComputeCodeStartAddress(Register dst);
737
738 // Control-flow integrity:
739
740 // Define a function entrypoint. This doesn't emit any code for this
741 // architecture, as control-flow integrity is not supported for it.
742 void CodeEntry() {}
743 // Define an exception handler.
745 // Define an exception handler and bind a label.
747
748 // Wasm SIMD helpers. These instructions don't have direct lowering to native
749 // instructions. These helpers allow us to define the optimal code sequence,
750 // and be used in both TurboFan and Liftoff.
761
762 void Mls(Register dst, Register src1, Register src2, Register srcA,
763 Condition cond = al);
764 void And(Register dst, Register src1, const Operand& src2,
765 Condition cond = al);
766 void Ubfx(Register dst, Register src, int lsb, int width,
767 Condition cond = al);
768 void Sbfx(Register dst, Register src, int lsb, int width,
769 Condition cond = al);
770
771 // ---------------------------------------------------------------------------
772 // GC Support
773
774 // Notify the garbage collector that we wrote a pointer into an object.
775 // |object| is the object being stored into, |value| is the object being
776 // stored.
777 // The offset is the offset from the start of the object, not the offset from
778 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
779 void RecordWriteField(Register object, int offset, Register value,
780 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
781 SmiCheck smi_check = SmiCheck::kInline);
782
783 // For a given |object| notify the garbage collector that the slot at |offset|
784 // has been written. |value| is the object being stored.
786 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
787 SmiCheck smi_check = SmiCheck::kInline);
788
789 // Enter exit frame.
790 // stack_space - extra stack space, used for alignment before call to C.
791 void EnterExitFrame(Register scratch, int stack_space,
792 StackFrame::Type frame_type);
793
794 // Leave the current exit frame.
795 void LeaveExitFrame(Register scratch);
796
797 // Load the global proxy from the current context.
799
800 void LoadNativeContextSlot(Register dst, int index);
801
802 // ---------------------------------------------------------------------------
803 // JavaScript invokes
804
805 // Invoke the JavaScript function code by either calling or jumping.
806 void InvokeFunctionCode(Register function, Register new_target,
807 Register expected_parameter_count,
808 Register actual_parameter_count, InvokeType type);
809
810 // On function call, call into the debugger.
811 void CallDebugOnFunctionCall(Register fun, Register new_target,
812 Register expected_parameter_count,
813 Register actual_parameter_count);
814
815 // Invoke the JavaScript function in the given register. Changes the
816 // current context to the context in the function before invoking.
817 void InvokeFunctionWithNewTarget(Register function, Register new_target,
818 Register actual_parameter_count,
819 InvokeType type);
820
821 void InvokeFunction(Register function, Register expected_parameter_count,
822 Register actual_parameter_count, InvokeType type);
823
824 // Exception handling
825
826 // Push a new stack handler and link into stack handler chain.
827 void PushStackHandler();
828
829 // Unlink the stack handler on top of the stack from the stack handler chain.
830 // Must preserve the result register.
831 void PopStackHandler();
832
833 // ---------------------------------------------------------------------------
834 // Support functions.
835
836 // Compare object type for heap object. heap_object contains a non-Smi
837 // whose object type should be compared with the given type. This both
838 // sets the flags and leaves the object type in the type_reg register.
839 // It leaves the map in the map register (unless the type_reg and map register
840 // are the same register). It leaves the heap object in the heap_object
841 // register unless the heap_object register is the same register as one of the
842 // other registers.
843 // Type_reg can be no_reg. In that case a scratch register is used.
844 void CompareObjectType(Register heap_object, Register map, Register type_reg,
845 InstanceType type);
846 // Variant of the above, which compares against a type range rather than a
847 // single type (lower_limit and higher_limit are inclusive).
848 //
849 // Always use unsigned comparisons: ls for a positive result.
851 Register type_reg, Register scratch,
852 InstanceType lower_limit,
853 InstanceType higher_limit);
854
855 // Compare instance type in a map. map contains a valid map object whose
856 // object type should be compared with the given type. This both
857 // sets the flags and leaves the object type in the type_reg register.
859
860 // Compare instance type ranges for a map (lower_limit and higher_limit
861 // inclusive).
862 //
863 // Always use unsigned comparisons: ls for a positive result.
865 Register scratch, InstanceType lower_limit,
866 InstanceType higher_limit);
867
868 // Compare the object in a register to a value from the root list.
869 // Acquires a scratch register.
872 void PushRoot(RootIndex index) {
873 UseScratchRegisterScope temps(this);
874 Register scratch = temps.Acquire();
875 LoadRoot(scratch, index);
876 Push(scratch);
877 }
878
879 // Compare the object in a register to a value and jump if they are equal.
880 void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
881 CompareRoot(with, index);
882 b(eq, if_equal);
883 }
884
885 // Compare the object in a register to a value and jump if they are not equal.
886 void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
887 CompareRoot(with, index);
888 b(ne, if_not_equal);
889 }
890
891 // Checks if value is in range [lower_limit, higher_limit] using a single
892 // comparison. Flags C=0 or Z=1 indicate the value is in the range (condition
893 // ls).
894 void CompareRange(Register value, Register scratch, unsigned lower_limit,
895 unsigned higher_limit);
896 void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit,
897 unsigned higher_limit, Label* on_in_range);
898
899 // It assumes that the arguments are located below the stack pointer.
901
902 // Tiering support.
907 void ReplaceClosureCodeWithOptimizedCode(Register optimized_code,
908 Register closure);
909 void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
911 Register flags, Register feedback_vector, CodeKind current_code_kind);
912 void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
913 Register flags, Register feedback_vector, CodeKind current_code_kind,
914 Label* flags_need_processing);
915 void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags,
916 Register feedback_vector);
917
918 // ---------------------------------------------------------------------------
919 // Runtime calls
920
921 // Call a runtime routine.
922 void CallRuntime(const Runtime::Function* f, int num_arguments);
923
924 // Convenience function: Same as above, but takes the fid instead.
926 const Runtime::Function* function = Runtime::FunctionForId(fid);
927 CallRuntime(function, function->nargs);
928 }
929
930 // Convenience function: Same as above, but takes the fid instead.
931 void CallRuntime(Runtime::FunctionId fid, int num_arguments) {
932 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
933 }
934
935 // Convenience function: tail call a runtime routine (jump).
936 void TailCallRuntime(Runtime::FunctionId fid);
937
938 // Jump to a runtime routine.
939 void JumpToExternalReference(const ExternalReference& builtin,
940 bool builtin_exit_frame = false);
941
942 // ---------------------------------------------------------------------------
943 // In-place weak references.
944 void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
945
946 // ---------------------------------------------------------------------------
947 // StatsCounter support
948
949 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
950 Register scratch2) {
951 if (!v8_flags.native_code_counters) return;
952 EmitIncrementCounter(counter, value, scratch1, scratch2);
953 }
954 void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
955 Register scratch2);
956 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
957 Register scratch2) {
958 if (!v8_flags.native_code_counters) return;
959 EmitDecrementCounter(counter, value, scratch1, scratch2);
960 }
961 void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
962 Register scratch2);
963
964 // ---------------------------------------------------------------------------
965 // Stack limit utilities
966 void LoadStackLimit(Register destination, StackLimitKind kind);
967 void StackOverflowCheck(Register num_args, Register scratch,
968 Label* stack_overflow);
969
970 // ---------------------------------------------------------------------------
971 // Smi utilities
972
973 void SmiTag(Register reg, SBit s = LeaveCC);
974 void SmiTag(Register dst, Register src, SBit s = LeaveCC);
975
976 // Test if the register contains a smi (Z == 0 (eq) if true).
977 void SmiTst(Register value);
978 // Jump if either of the registers contain a non-smi.
979 void JumpIfNotSmi(Register value, Label* not_smi_label);
980
981 // Abort execution if argument is a smi, enabled via --debug-code.
982 void AssertNotSmi(Register object,
983 AbortReason reason = AbortReason::kOperandIsASmi)
985 void AssertSmi(Register object,
986 AbortReason reason = AbortReason::kOperandIsNotASmi)
988
989 // Abort execution if argument is not a Constructor, enabled via --debug-code.
990 void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE;
991
992 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
993 void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
994
995 // Abort execution if argument is not a callable JSFunction, enabled via
996 // --debug-code.
997 void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
998
999 // Abort execution if argument is not a JSBoundFunction,
1000 // enabled via --debug-code.
1001 void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE;
1002
1003 // Abort execution if argument is not a JSGeneratorObject (or subclass),
1004 // enabled via --debug-code.
1005 void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE;
1006
1007 // Abort execution if argument is not undefined or an AllocationSite, enabled
1008 // via --debug-code.
1009 void AssertUndefinedOrAllocationSite(Register object,
1011
1012 void AssertJSAny(Register object, Register map_tmp, Register tmp,
1013 AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE;
1014
1015 template <typename Field>
1017 Ubfx(dst, src, Field::kShift, Field::kSize);
1018 }
1019
1020 template <typename Field>
1022 DecodeField<Field>(reg, reg);
1023 }
1024
1026 Operand ClearedValue() const;
1027
1028 private:
1029 // Helper functions for generating invokes.
1030 void InvokePrologue(Register expected_parameter_count,
1031 Register actual_parameter_count, InvokeType type);
1032
1033 // Compare single values and then load the fpscr flags to a register.
1035 const SwVfpRegister src2,
1036 const Register fpscr_flags,
1037 const Condition cond = al);
1038 void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
1039 const Register fpscr_flags,
1040 const Condition cond = al);
1041
1042 // Compare double values and then load the fpscr flags to a register.
1044 const DwVfpRegister src2,
1045 const Register fpscr_flags,
1046 const Condition cond = al);
1047 void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2,
1048 const Register fpscr_flags,
1049 const Condition cond = al);
1050
1051 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
1052
1053 // Implementation helpers for FloatMin and FloatMax.
1054 template <typename T>
1055 void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
1056 template <typename T>
1057 void FloatMinHelper(T result, T left, T right, Label* out_of_line);
1058 template <typename T>
1059 void FloatMaxOutOfLineHelper(T result, T left, T right);
1060 template <typename T>
1061 void FloatMinOutOfLineHelper(T result, T left, T right);
1062
1063 int CalculateStackPassedWords(int num_reg_arguments,
1064 int num_double_arguments);
1065
1067};
1068
1069struct MoveCycleState {
1070 // List of scratch registers reserved for pending moves in a move cycle, and
1071 // which should therefore not be used as a temporary location by
1072 // {MoveToTempLocation}. The GP scratch register is implicitly reserved.
1074 // Available scratch registers during the move cycle resolution scope.
1075 std::optional<UseScratchRegisterScope> temps;
1076 // InstructionStream of the scratch register picked by {MoveToTempLocation}.
1078};
1079
1080// Provides access to exit frame parameters (GC-ed).
1082 // The slot at [sp] is reserved in all ExitFrames for storing the return
1083 // address before doing the actual call, it's necessary for frame iteration
1084 // (see StoreReturnAddressAndCall for details).
1085 static constexpr int kSPOffset = 1 * kPointerSize;
1086 return MemOperand(sp, kSPOffset + offset);
1087}
1088
1089// Provides access to exit frame stack space (not GC-ed).
1095
1096// Calls an API function. Allocates HandleScope, extracts returned value
1097// from handle and propagates exceptions. Clobbers C argument registers
1098// and C caller-saved registers. Restores context. On return removes
1099// (*argc_operand + slots_to_drop_on_return) * kSystemPointerSize
1100// (GCed, includes the call JS arguments space and the additional space
1101// allocated for the fast call).
1102void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling,
1103 Register function_address,
1104 ExternalReference thunk_ref, Register thunk_arg,
1105 int slots_to_drop_on_return,
1106 MemOperand* argc_operand,
1107 MemOperand return_value_operand);
1108
1109#define ACCESS_MASM(masm) masm->
1110
1111} // namespace internal
1112} // namespace v8
1113
1114#endif // V8_CODEGEN_ARM_MACRO_ASSEMBLER_ARM_H_
Builtins::Kind kind
Definition builtins.cc:40
static constexpr int kFixedSlotCountAboveFp
void JumpIfRoot(Register with, RootIndex index, Label *if_equal)
void PushAll(RegList registers)
void SmiUntag(Register dst, Register src, SBit s=LeaveCC)
void Call(Register target, Condition cond=al)
void DecompressTagged(const Register &destination, const Register &source)
void VmovExtended(const MemOperand &dst, int src_code)
void VmovHigh(DwVfpRegister dst, Register src)
void VFPCanonicalizeNaN(const DwVfpRegister value, const Condition cond=al)
void TestCodeIsMarkedForDeoptimization(Register code, Register scratch)
void FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right)
void Cmp(const Register &rn, int imm)
void AssertMap(Register object) NOOP_UNLESS_DEBUG_CODE
void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void Drop(int count, Condition cond=al)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
void CallRuntime(Runtime::FunctionId fid)
int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments)
void MovFromFloatResult(DwVfpRegister dst)
void Pop(Register src1, Register src2, Register src3, Condition cond=al)
void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2, const Register fpscr_flags, const Condition cond=al)
void Push(Register src1, Register src2, Register src3, Condition cond=al)
void Push(Register src1, Register src2, Condition cond=al)
void Move(Register dst, Register src, Condition cond=al)
void Pop(Register src1, Register src2, Condition cond=al)
void I64x2GtS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void SmiUntag(Register reg, SBit s=LeaveCC)
void DecodeField(Register dst, Register src)
void JumpIfNotRoot(Register with, RootIndex index, Label *if_not_equal)
void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond=al)
void I64x2Eq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void I64x2AllTrue(Register dst, QwNeonRegister src)
void CompareRoot(Register obj, RootIndex index)
void VFPCompareAndSetFlags(const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void MovFromFloatParameter(DwVfpRegister dst)
void LoadLane(NeonSize sz, NeonListOperand dst_list, uint8_t lane, NeonMemOperand src)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
void Move(Register dst, Tagged< Smi > smi)
void SmiTst(Register value)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void Move(Register dst, const MemOperand &src)
void Push(Register src1, Register src2, Register src3, Register src4, Condition cond=al)
void FloatMaxOutOfLineHelper(T result, T left, T right)
void I64x2Ne(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void StackOverflowCheck(Register num_args, Register scratch, Label *stack_overflow)
void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane, NeonDataType dt, int lane)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void JumpIfSmi(Register value, Label *smi_label)
void LoadRoot(Register destination, RootIndex index, Condition cond)
void SmiTag(Register dst, Register src, SBit s=LeaveCC)
void SaveFPRegsToHeap(Register location, Register scratch)
void Drop(Register count, Condition cond=al)
void CallCodeObject(Register code_object)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void RestoreFPRegs(Register location, Register scratch)
void VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2, const Condition cond=al)
void PopAll(DoubleRegList registers, int stack_slot_size=kDoubleSize)
void LsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift)
void VFPCompareAndLoadFlags(const DwVfpRegister src1, const DwVfpRegister src2, const Register fpscr_flags, const Condition cond=al)
void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src)
void FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right)
void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2, const Condition cond=al)
void RestoreFPRegsFromHeap(Register location, Register scratch)
void AsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift)
void AsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift)
void CompareTaggedRoot(Register with, RootIndex index)
MemOperand ExternalReferenceAsOperand(IsolateFieldId id)
void Move(QwNeonRegister dst, QwNeonRegister src)
void Call(Handle< Code > code, RelocInfo::Mode rmode=RelocInfo::CODE_TARGET, Condition cond=al, TargetAddressStorageMode mode=CAN_INLINE_TARGET_ADDRESS, bool check_constant_pool=true)
void StoreTaggedField(const MemOperand &dst_field_operand, const Register &value)
void SmiTag(Register reg, SBit s=LeaveCC)
void VmovLow(Register dst, DwVfpRegister src)
void FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, Label *out_of_line)
void PushAll(DoubleRegList registers, int stack_slot_size=kDoubleSize)
void FloatMin(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right, Label *out_of_line)
void PushArray(Register array, Register size, Register scratch, PushArrayOrder order=PushArrayOrder::kNormal)
void LsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift)
void Zero(const MemOperand &dest1, const MemOperand &dest2)
void MovToFloatResult(DwVfpRegister src)
void Bfc(Register dst, Register src, int lsb, int width, Condition cond=al)
void RecordWriteField(Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void VFPCompareAndSetFlags(const DwVfpRegister src1, const double src2, const Condition cond=al)
void Zero(const MemOperand &dest)
void IncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void MovToFloatParameter(DwVfpRegister src)
void Ret(Condition cond=al)
void GetLabelAddress(Register dst, Label *target)
void LslPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift)
void LoadGlobalProxy(Register dst)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right)
void I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void Jump(Register target, Condition cond=al)
void BindExceptionHandler(Label *label)
void LoadRoot(Register destination, RootIndex index) final
void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2, const Register fpscr_flags, const Condition cond=al)
void RecordWrite(Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void Pop(Register src1, Register src2, Register src3, Register src4, Condition cond=al)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void Move(Register dst, Handle< HeapObject > value)
void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void CompareRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit)
void JumpCodeObject(Register code_object, JumpMode jump_mode=JumpMode::kJump)
void DecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void JumpIfCodeIsTurbofanned(Register code, Register scratch, Label *if_turbofanned)
void FloatMinHelper(T result, T left, T right, Label *out_of_line)
void StoreLane(NeonSize sz, NeonListOperand src_list, uint8_t lane, NeonMemOperand dst)
void CompareObjectTypeRange(Register heap_object, Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void Sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void CheckFor32DRegs(Register scratch)
void LoadTaggedRoot(Register destination, RootIndex index)
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler)
void Call(Address target, RelocInfo::Mode rmode, Condition cond=al, TargetAddressStorageMode mode=CAN_INLINE_TARGET_ADDRESS, bool check_constant_pool=true)
void VmovExtended(int dst_code, const MemOperand &src)
void VFPCanonicalizeNaN(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void MovePair(Register dst0, Register src0, Register dst1, Register src1)
void VmovHigh(Register dst, DwVfpRegister src)
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond=al)
void I64x2BitMask(Register dst, QwNeonRegister src)
void ExtractLane(SwVfpRegister dst, QwNeonRegister src, int lane)
Condition LoadFeedbackVectorFlagsAndCheckIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind)
void CallBuiltin(Builtin builtin, Condition cond=al)
void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane)
void I64x2Abs(QwNeonRegister dst, QwNeonRegister src)
void F64x2ConvertLowI32x4S(QwNeonRegister dst, QwNeonRegister src)
void Ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1)
void TruncateDoubleToI(Isolate *isolate, Zone *zone, Register result, DwVfpRegister double_input, StubCallMode stub_mode)
void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, DwVfpRegister src_lane, int lane)
void CmpTagged(const Register &r1, const Register &r2)
void Move(Register dst, const Operand &src, SBit sbit=LeaveCC, Condition cond=al)
void Check(Condition cond, AbortReason reason)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right)
void FloatMaxHelper(T result, T left, T right, Label *out_of_line)
void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, SwVfpRegister src_lane, int lane)
void AllocateStackSpace(Register bytes)
void VmovExtended(int dst_code, Register src)
void AssertZeroExtended(Register int32_register)
void FloatMinOutOfLineHelper(T result, T left, T right)
void VmovExtended(Register dst, int src_code)
void VmovLow(DwVfpRegister dst, Register src)
void VFPCompareAndLoadFlags(const SwVfpRegister src1, const SwVfpRegister src2, const Register fpscr_flags, const Condition cond=al)
void SaveFPRegs(Register location, Register scratch)
void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond=al)
void ExtractLane(Register dst, DwVfpRegister src, NeonDataType dt, int lane)
void Push(Register src1, Register src2, Register src3, Register src4, Register src5, Condition cond=al)
void LslPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift)
void Mls(Register dst, Register src1, Register src2, Register srcA, Condition cond=al)
void CompareInstanceTypeRange(Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void LoadIsolateField(Register dst, IsolateFieldId id)
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, Label *done)
void VmovExtended(int dst_code, int src_code)
void Jump(Address target, RelocInfo::Mode rmode, Condition cond=al)
void Jump(Handle< Code > code, RelocInfo::Mode rmode, Condition cond=al)
void Move(Register dst, ExternalReference reference)
void SmiUntagField(Register dst, const MemOperand &src)
void PopAll(RegList registers)
void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2)
void DecompressTagged(const Register &destination, const MemOperand &field_operand)
Condition CheckSmi(Register src)
void SmiToInt32(Register dst, Register smi)
void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1)
void FloatMax(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right, Label *out_of_line)
void Swap(Register srcdst0, Register srcdst1)
void CallRuntime(Runtime::FunctionId fid, int num_arguments)
void ExtractLane(DwVfpRegister dst, QwNeonRegister src, int lane)
void FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, Label *out_of_line)
void LoadTaggedFieldWithoutDecompressing(const Register &destination, const MemOperand &field_operand)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label *condition_met)
constexpr int8_t code() const
#define NOOP_UNLESS_DEBUG_CODE
Definition assembler.h:628
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
DirectHandle< Object > new_target
Definition execution.cc:75
Label label
int32_t offset
TNode< Object > receiver
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int y
int x
uint32_t const mask
SmiCheck
InvokeType
SetIsolateDataSlots
JumpMode
RegListBase< RegisterT > registers
InstructionOperand destination
constexpr Register no_reg
MemOperand ExitFrameCallerStackSlotOperand(int index)
constexpr BlockAddrMode ia_w
constexpr AddrMode NegPreIndex
constexpr int kPointerSize
Definition globals.h:599
constexpr BlockAddrMode db_w
uint64_t VfpRegList
constexpr SBit LeaveCC
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const int kHeapObjectTag
Definition v8-internal.h:72
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr AddrMode PostIndex
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
MemOperand ExitFrameStackSlotOperand(int offset)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define V8_EXPORT_PRIVATE
Definition macros.h:460
std::optional< UseScratchRegisterScope > temps