v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
macro-assembler-s390.h
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_CODEGEN_S390_MACRO_ASSEMBLER_S390_H_
6#define V8_CODEGEN_S390_MACRO_ASSEMBLER_S390_H_
7
8#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
9#error This header must be included via macro-assembler.h
10#endif
11
15#include "src/common/globals.h"
19
20namespace v8 {
21namespace internal {
22
24
25// ----------------------------------------------------------------------------
26// Static helper functions
27
28// Generate a MemOperand for loading a field from an object.
29inline MemOperand FieldMemOperand(Register object, int offset) {
30 return MemOperand(object, offset - kHeapObjectTag);
31}
32
33// Generate a MemOperand for loading a field from an object.
35 return MemOperand(object, index, offset - kHeapObjectTag);
36}
37
39
40Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
41 Register reg3 = no_reg,
42 Register reg4 = no_reg,
43 Register reg5 = no_reg,
44 Register reg6 = no_reg);
45
46class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
47 public:
48 using MacroAssemblerBase::MacroAssemblerBase;
49
50 void CallBuiltin(Builtin builtin, Condition cond = al);
51 void TailCallBuiltin(Builtin builtin, Condition cond = al);
53 Register old_value, Register new_value,
54 int start, int end, int shift_amount, int offset,
55 Register temp0, Register temp1);
56 void AtomicCmpExchangeU8(Register addr, Register output, Register old_value,
57 Register new_value, Register temp0, Register temp1);
58 void AtomicCmpExchangeU16(Register addr, Register output, Register old_value,
59 Register new_value, Register temp0, Register temp1);
61 int start, int end, int shift_amount, int offset,
62 Register scratch);
63 void AtomicExchangeU8(Register addr, Register value, Register output,
64 Register scratch);
65 void AtomicExchangeU16(Register addr, Register value, Register output,
66 Register scratch);
67
68 void DoubleMax(DoubleRegister result_reg, DoubleRegister left_reg,
69 DoubleRegister right_reg);
70 void DoubleMin(DoubleRegister result_reg, DoubleRegister left_reg,
71 DoubleRegister right_reg);
72 void FloatMax(DoubleRegister result_reg, DoubleRegister left_reg,
73 DoubleRegister right_reg);
74 void FloatMin(DoubleRegister result_reg, DoubleRegister left_reg,
75 DoubleRegister right_reg);
84
85 void LoadFromConstantsTable(Register destination, int constant_index) final;
88 void StoreRootRelative(int32_t offset, Register value) final;
89
90 // Operand pointing to an external reference.
91 // May emit code to set up the scratch register. The operand is
92 // only guaranteed to be correct as long as the scratch register
93 // isn't changed.
94 // If the operand is used more than once, use a scratch register
95 // that is guaranteed not to be clobbered.
97 Register scratch);
99 return ExternalReferenceAsOperand(ExternalReference::Create(id), no_reg);
100 }
101
102 // Jump, Call, and Ret pseudo instructions implementing inter-working.
103 void Jump(Register target, Condition cond = al);
104 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
105 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
106 void Jump(const ExternalReference& reference);
107 // Jump the register contains a smi.
108 inline void JumpIfSmi(Register value, Label* smi_label) {
109 TestIfSmi(value);
110 beq(smi_label /*, cr0*/); // branch if SMI
111 }
113 TestIfSmi(src);
114 return eq;
115 }
116
117 void JumpIfEqual(Register x, int32_t y, Label* dest);
118 void JumpIfLessThan(Register x, int32_t y, Label* dest);
119
120 // Caution: if {reg} is a 32-bit negative int, it should be sign-extended to
121 // 64-bit before calling this function.
122 void Switch(Register scrach, Register reg, int case_base_value,
123 Label** labels, int num_labels);
124
126 Label* if_marked_for_deoptimization);
127
129 Label* if_turbofanned);
132
133 void LoadFeedbackVector(Register dst, Register closure, Register scratch,
134 Label* fbv_undef);
135
136 void Call(Register target);
137 void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
138 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
139 Condition cond = al);
140 void Ret() { b(r14); }
141 void Ret(Condition cond) { b(cond, r14); }
142
143 // TODO(olivf, 42204201) Rename this to AssertNotDeoptimized once
144 // non-leaptiering is removed from the codebase.
146 void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
148 Label* jump_deoptimization_entry_label);
149
150 // Emit code to discard a non-negative number of pointer-sized elements
151 // from the stack, clobbering only the sp register.
152 void Drop(int count);
153 void Drop(Register count, Register scratch = r0);
154
155 void Ret(int drop) {
156 Drop(drop);
157 Ret();
158 }
159
160 void Call(Label* target);
161
162 void GetLabelAddress(Register dst, Label* target);
163
164 // Load the builtin given by the Smi in |builtin_index| into |target|.
165 void LoadEntryFromBuiltinIndex(Register builtin_index, Register target);
168
169#ifdef V8_ENABLE_LEAPTIERING
170 void LoadEntrypointFromJSDispatchTable(Register destination,
171 Register dispatch_handle,
172 Register scratch);
173#endif // V8_ENABLE_LEAPTIERING
174
175 // Load the code entry point from the Code object.
177 Register destination, Register code_object,
178 CodeEntrypointTag tag = kDefaultCodeEntrypointTag);
179 void CallCodeObject(Register code_object);
180 void JumpCodeObject(Register code_object,
181 JumpMode jump_mode = JumpMode::kJump);
182
183 void CallBuiltinByIndex(Register builtin_index, Register target);
184
185 // Register move. May do nothing if the registers are identical.
186 void Move(Register dst, Tagged<Smi> smi) { LoadSmiLiteral(dst, smi); }
188 RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
189 void Move(Register dst, ExternalReference reference);
191 void Move(Register dst, const MemOperand& src);
192 void Move(Register dst, Register src, Condition cond = al);
194
195 void MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
196 const Operand& length);
197
198 void CompareLogicalChar(const MemOperand& opnd1, const MemOperand& opnd2,
199 const Operand& length);
200
201 void ExclusiveOrChar(const MemOperand& opnd1, const MemOperand& opnd2,
202 const Operand& length);
203
205 const Operand& startBit, const Operand& endBit,
206 const Operand& shiftAmt, bool zeroBits);
207
209
212
213 void CallEphemeronKeyBarrier(Register object, Register slot_address,
214 SaveFPRegsMode fp_mode);
215
217 Register object, Register slot_address, SaveFPRegsMode fp_mode,
218 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
220 Register object, Register slot_address, SaveFPRegsMode fp_mode,
221 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
222
223 void MultiPush(RegList regs, Register location = sp);
224 void MultiPop(RegList regs, Register location = sp);
225
226 void MultiPushDoubles(DoubleRegList dregs, Register location = sp);
227 void MultiPopDoubles(DoubleRegList dregs, Register location = sp);
228
230 Register location = sp);
232 Register location = sp);
233
235 Register location = sp);
237 Register location = sp);
240 void PushAll(DoubleRegList registers, int stack_slot_size = kDoubleSize);
241 void PopAll(DoubleRegList registers, int stack_slot_size = kDoubleSize);
242
243 // Calculate how much stack space (in bytes) are required to store caller
244 // registers excluding those specified in the arguments.
246 Register exclusion1 = no_reg,
247 Register exclusion2 = no_reg,
248 Register exclusion3 = no_reg) const;
249
250 // Push caller saved registers on the stack, and return the number of bytes
251 // stack pointer is adjusted.
253 Register exclusion1 = no_reg,
254 Register exclusion2 = no_reg,
255 Register exclusion3 = no_reg);
256 // Restore caller saved registers from the stack, and return the number of
257 // bytes stack pointer is adjusted.
259 Register exclusion1 = no_reg, Register exclusion2 = no_reg,
260 Register exclusion3 = no_reg);
261
262 // Load an object from the root table.
263 void LoadRoot(Register destination, RootIndex index) override {
264 LoadRoot(destination, index, al);
265 }
268 //--------------------------------------------------------------------------
269 // S390 Macro Assemblers for Instructions
270 //--------------------------------------------------------------------------
271
272 // Arithmetic Operations
273
274 // Add (Register - Immediate)
275 void AddS32(Register dst, const Operand& imm);
276 void AddS64(Register dst, const Operand& imm);
277 void AddS32(Register dst, Register src, const Operand& imm);
278 void AddS64(Register dst, Register src, const Operand& imm);
279 void AddS32(Register dst, Register src, int32_t imm);
280 void AddS64(Register dst, Register src, int32_t imm);
281
282 // Add (Register - Register)
283 void AddS32(Register dst, Register src);
284 void AddS64(Register dst, Register src);
285 void AddS32(Register dst, Register src1, Register src2);
286 void AddS64(Register dst, Register src1, Register src2);
287
288 // Add (Register - Mem)
289 void AddS32(Register dst, const MemOperand& opnd);
290 void AddS64(Register dst, const MemOperand& opnd);
291
292 // Add (Mem - Immediate)
293 void AddS32(const MemOperand& opnd, const Operand& imm);
294 void AddS64(const MemOperand& opnd, const Operand& imm);
295
296 // Add Logical (Register - Register)
297 void AddU32(Register dst, Register src1, Register src2);
298
299 // Add Logical (Register - Immediate)
300 void AddU32(Register dst, const Operand& imm);
301 void AddU64(Register dst, const Operand& imm);
302 void AddU64(Register dst, int imm) { AddU64(dst, Operand(imm)); }
303 void AddU64(Register dst, Register src1, Register src2);
304 void AddU64(Register dst, Register src) { algr(dst, src); }
305
306 // Add Logical (Register - Mem)
307 void AddU32(Register dst, const MemOperand& opnd);
308 void AddU64(Register dst, const MemOperand& opnd);
309
310 // Subtract (Register - Immediate)
311 void SubS32(Register dst, const Operand& imm);
312 void SubS64(Register dst, const Operand& imm);
313 void SubS32(Register dst, Register src, const Operand& imm);
314 void SubS64(Register dst, Register src, const Operand& imm);
315 void SubS32(Register dst, Register src, int32_t imm);
316 void SubS64(Register dst, Register src, int32_t imm);
317
318 // Subtract (Register - Register)
319 void SubS32(Register dst, Register src);
320 void SubS64(Register dst, Register src);
321 void SubS32(Register dst, Register src1, Register src2);
322 void SubS64(Register dst, Register src1, Register src2);
323
324 // Subtract (Register - Mem)
325 void SubS32(Register dst, const MemOperand& opnd);
326 void SubS64(Register dst, const MemOperand& opnd);
327 void LoadAndSub32(Register dst, Register src, const MemOperand& opnd);
328 void LoadAndSub64(Register dst, Register src, const MemOperand& opnd);
329
330 // Subtract Logical (Register - Mem)
331 void SubU32(Register dst, const MemOperand& opnd);
332 void SubU64(Register dst, const MemOperand& opnd);
333 // Subtract Logical 32-bit
334 void SubU32(Register dst, Register src1, Register src2);
335
336 // Multiply
337 void MulS64(Register dst, const Operand& opnd);
338 void MulS64(Register dst, Register src);
339 void MulS64(Register dst, const MemOperand& opnd);
340 void MulS64(Register dst, Register src1, Register src2) {
341 if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
342 msgrkc(dst, src1, src2);
343 } else {
344 if (dst == src2) {
345 MulS64(dst, src1);
346 } else if (dst == src1) {
347 MulS64(dst, src2);
348 } else {
349 mov(dst, src1);
350 MulS64(dst, src2);
351 }
352 }
353 }
354
355 void MulS32(Register dst, const MemOperand& src1);
356 void MulS32(Register dst, Register src1);
357 void MulS32(Register dst, const Operand& src1);
358 void MulS32(Register dst, Register src1, Register src2) {
359 if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
360 msrkc(dst, src1, src2);
361 } else {
362 if (dst == src2) {
363 MulS32(dst, src1);
364 } else if (dst == src1) {
365 MulS32(dst, src2);
366 } else {
367 mov(dst, src1);
368 MulS32(dst, src2);
369 }
370 }
371 }
372 void MulHighS64(Register dst, Register src1, Register src2);
373 void MulHighS64(Register dst, Register src1, const MemOperand& src2);
374 void MulHighU64(Register dst, Register src1, Register src2);
375 void MulHighU64(Register dst, Register src1, const MemOperand& src2);
376
377 void MulHighS32(Register dst, Register src1, const MemOperand& src2);
378 void MulHighS32(Register dst, Register src1, Register src2);
379 void MulHighS32(Register dst, Register src1, const Operand& src2);
380 void MulHighU32(Register dst, Register src1, const MemOperand& src2);
381 void MulHighU32(Register dst, Register src1, Register src2);
382 void MulHighU32(Register dst, Register src1, const Operand& src2);
384 const MemOperand& src2);
387 const Operand& src2);
388 // Divide
389 void DivS32(Register dst, Register src1, const MemOperand& src2);
390 void DivS32(Register dst, Register src1, Register src2);
391 void DivU32(Register dst, Register src1, const MemOperand& src2);
392 void DivU32(Register dst, Register src1, Register src2);
393 void DivS64(Register dst, Register src1, const MemOperand& src2);
394 void DivS64(Register dst, Register src1, Register src2);
395 void DivU64(Register dst, Register src1, const MemOperand& src2);
396 void DivU64(Register dst, Register src1, Register src2);
397
398 // Mod
399 void ModS32(Register dst, Register src1, const MemOperand& src2);
400 void ModS32(Register dst, Register src1, Register src2);
401 void ModU32(Register dst, Register src1, const MemOperand& src2);
402 void ModU32(Register dst, Register src1, Register src2);
403 void ModS64(Register dst, Register src1, const MemOperand& src2);
404 void ModS64(Register dst, Register src1, Register src2);
405 void ModU64(Register dst, Register src1, const MemOperand& src2);
406 void ModU64(Register dst, Register src1, Register src2);
407
408 // Square root
411
412 // Compare
413 void CmpS32(Register src1, Register src2);
414 void CmpS64(Register src1, Register src2);
415 void CmpS32(Register dst, const Operand& opnd);
416 void CmpS64(Register dst, const Operand& opnd);
417 void CmpS32(Register dst, const MemOperand& opnd);
418 void CmpS64(Register dst, const MemOperand& opnd);
419 void CmpAndSwap(Register old_val, Register new_val, const MemOperand& opnd);
420 void CmpAndSwap64(Register old_val, Register new_val, const MemOperand& opnd);
421 // TODO(john.yan): remove this
422 template <class T>
423 void CmpP(Register src1, T src2) {
424 CmpS64(src1, src2);
425 }
426
427 // Compare Logical
428 void CmpU32(Register src1, Register src2);
429 void CmpU64(Register src1, Register src2);
430 void CmpU32(Register src1, const Operand& opnd);
431 void CmpU64(Register src1, const Operand& opnd);
432 void CmpU32(Register dst, const MemOperand& opnd);
433 void CmpU64(Register dst, const MemOperand& opnd);
434
435 // Compare Floats
438 void CmpF32(DoubleRegister src1, const MemOperand& src2);
439 void CmpF64(DoubleRegister src1, const MemOperand& src2);
440
441 // Load
442 void LoadU64(Register dst, const MemOperand& mem, Register scratch = no_reg);
443 void LoadS32(Register dst, const MemOperand& opnd, Register scratch = no_reg);
444 void LoadS32(Register dst, Register src);
445 void LoadU32(Register dst, const MemOperand& opnd, Register scratch = no_reg);
446 void LoadU32(Register dst, Register src);
447 void LoadU16(Register dst, const MemOperand& opnd);
448 void LoadU16(Register dst, Register src);
449 void LoadS16(Register dst, Register src);
450 void LoadS16(Register dst, const MemOperand& mem, Register scratch = no_reg);
451 void LoadS8(Register dst, const MemOperand& opnd);
452 void LoadS8(Register dst, Register src);
453 void LoadU8(Register dst, const MemOperand& opnd);
454 void LoadU8(Register dst, Register src);
455 void LoadV128(Simd128Register dst, const MemOperand& mem, Register scratch);
456 void LoadF64(DoubleRegister dst, const MemOperand& opnd);
457 void LoadF32(DoubleRegister dst, const MemOperand& opnd);
458 // LE Load
459 void LoadU64LE(Register dst, const MemOperand& mem,
460 Register scratch = no_reg);
461 void LoadS32LE(Register dst, const MemOperand& opnd,
462 Register scratch = no_reg);
463 void LoadU32LE(Register dst, const MemOperand& opnd,
464 Register scratch = no_reg);
465 void LoadU16LE(Register dst, const MemOperand& opnd);
466 void LoadS16LE(Register dst, const MemOperand& opnd);
467 void LoadV128LE(DoubleRegister dst, const MemOperand& mem, Register scratch0,
468 Register scratch1);
469 void LoadF64LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
470 void LoadF32LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
471 // Vector LE Load and Transform instructions.
473 Register scratch);
475 Register scratch);
477 Register scratch);
479 Register scratch);
481 Register scratch);
483 Register scratch);
485 Register scratch);
487 Register scratch);
489 Register scratch);
491 Register scratch);
493 Register scratch);
495 Register scratch);
496 void LoadLane8LE(Simd128Register dst, const MemOperand& mem, int lane,
497 Register scratch);
498 void LoadLane16LE(Simd128Register dst, const MemOperand& mem, int lane,
499 Register scratch);
500 void LoadLane32LE(Simd128Register dst, const MemOperand& mem, int lane,
501 Register scratch);
502 void LoadLane64LE(Simd128Register dst, const MemOperand& mem, int lane,
503 Register scratch);
504 void StoreLane8LE(Simd128Register src, const MemOperand& mem, int lane,
505 Register scratch);
506 void StoreLane16LE(Simd128Register src, const MemOperand& mem, int lane,
507 Register scratch);
508 void StoreLane32LE(Simd128Register src, const MemOperand& mem, int lane,
509 Register scratch);
510 void StoreLane64LE(Simd128Register src, const MemOperand& mem, int lane,
511 Register scratch);
512
513 // Load And Test
516
517 void LoadAndTest32(Register dst, const MemOperand& opnd);
518 void LoadAndTestP(Register dst, const MemOperand& opnd);
519
520 // Store
521 void StoreU64(const MemOperand& mem, const Operand& opnd,
522 Register scratch = no_reg);
523 void StoreU64(Register src, const MemOperand& mem, Register scratch = no_reg);
524 void StoreU32(Register src, const MemOperand& mem, Register scratch = no_reg);
525
526 void StoreU16(Register src, const MemOperand& mem, Register scratch = r0);
527 void StoreU8(Register src, const MemOperand& mem, Register scratch = r0);
528 void StoreF64(DoubleRegister dst, const MemOperand& opnd);
529 void StoreF32(DoubleRegister dst, const MemOperand& opnd);
530 void StoreV128(Simd128Register src, const MemOperand& mem, Register scratch);
531
532 // Store LE
533 void StoreU64LE(Register src, const MemOperand& mem,
534 Register scratch = no_reg);
535 void StoreU32LE(Register src, const MemOperand& mem,
536 Register scratch = no_reg);
537
538 void StoreU16LE(Register src, const MemOperand& mem, Register scratch = r0);
539 void StoreF64LE(DoubleRegister src, const MemOperand& opnd, Register scratch);
540 void StoreF32LE(DoubleRegister src, const MemOperand& opnd, Register scratch);
542 Register scratch1, Register scratch2);
543
548
553
554 void AddFloat32(DoubleRegister dst, const MemOperand& opnd,
555 DoubleRegister scratch);
556 void AddFloat64(DoubleRegister dst, const MemOperand& opnd,
557 DoubleRegister scratch);
558 void SubFloat32(DoubleRegister dst, const MemOperand& opnd,
559 DoubleRegister scratch);
560 void SubFloat64(DoubleRegister dst, const MemOperand& opnd,
561 DoubleRegister scratch);
562 void MulFloat32(DoubleRegister dst, const MemOperand& opnd,
563 DoubleRegister scratch);
564 void MulFloat64(DoubleRegister dst, const MemOperand& opnd,
565 DoubleRegister scratch);
566 void DivFloat32(DoubleRegister dst, const MemOperand& opnd,
567 DoubleRegister scratch);
568 void DivFloat64(DoubleRegister dst, const MemOperand& opnd,
569 DoubleRegister scratch);
570 void LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd);
571
572 // Load On Condition
574
577
578 void Branch(Condition c, const Operand& opnd);
580
581 // Shifts
583 const Operand& val2 = Operand::Zero());
584 void ShiftLeftU32(Register dst, Register src, const Operand& val);
586 const Operand& val2 = Operand::Zero());
587 void ShiftLeftU64(Register dst, Register src, const Operand& val);
589 const Operand& val2 = Operand::Zero());
590 void ShiftRightU32(Register dst, Register src, const Operand& val);
592 const Operand& val2 = Operand::Zero());
593 void ShiftRightU64(Register dst, Register src, const Operand& val);
595 const Operand& val2 = Operand::Zero());
596 void ShiftRightS32(Register dst, Register src, const Operand& val);
598 const Operand& val2 = Operand::Zero());
599 void ShiftRightS64(Register dst, Register src, const Operand& val);
600
601 void ClearRightImm(Register dst, Register src, const Operand& val);
602
603 // Bitwise operations
604 void And(Register dst, Register src);
605 void AndP(Register dst, Register src);
606 void And(Register dst, Register src1, Register src2);
607 void AndP(Register dst, Register src1, Register src2);
608 void And(Register dst, const MemOperand& opnd);
609 void AndP(Register dst, const MemOperand& opnd);
610 void And(Register dst, const Operand& opnd);
611 void AndP(Register dst, const Operand& opnd);
612 void And(Register dst, Register src, const Operand& opnd);
613 void AndP(Register dst, Register src, const Operand& opnd);
614 void Or(Register dst, Register src);
615 void OrP(Register dst, Register src);
616 void Or(Register dst, Register src1, Register src2);
617 void OrP(Register dst, Register src1, Register src2);
618 void Or(Register dst, const MemOperand& opnd);
619 void OrP(Register dst, const MemOperand& opnd);
620 void Or(Register dst, const Operand& opnd);
621 void OrP(Register dst, const Operand& opnd);
622 void Or(Register dst, Register src, const Operand& opnd);
623 void OrP(Register dst, Register src, const Operand& opnd);
624 void Xor(Register dst, Register src);
625 void XorP(Register dst, Register src);
626 void Xor(Register dst, Register src1, Register src2);
627 void XorP(Register dst, Register src1, Register src2);
628 void Xor(Register dst, const MemOperand& opnd);
629 void XorP(Register dst, const MemOperand& opnd);
630 void Xor(Register dst, const Operand& opnd);
631 void XorP(Register dst, const Operand& opnd);
632 void Xor(Register dst, Register src, const Operand& opnd);
633 void XorP(Register dst, Register src, const Operand& opnd);
634 void Popcnt32(Register dst, Register src);
635 void Not32(Register dst, Register src = no_reg);
636 void Not64(Register dst, Register src = no_reg);
637 void NotP(Register dst, Register src = no_reg);
638
639 void Popcnt64(Register dst, Register src);
640
641 void mov(Register dst, const Operand& src);
642 void mov(Register dst, Register src);
643
645 lay(sp, MemOperand(sp, -kSystemPointerSize));
646 StoreF64(src, MemOperand(sp));
647 }
648
649 void push(Register src) {
650 lay(sp, MemOperand(sp, -kSystemPointerSize));
651 StoreU64(src, MemOperand(sp));
652 }
653
654 void pop(DoubleRegister dst) {
655 LoadF64(dst, MemOperand(sp));
656 la(sp, MemOperand(sp, kSystemPointerSize));
657 }
658
659 void pop(Register dst) {
660 LoadU64(dst, MemOperand(sp));
661 la(sp, MemOperand(sp, kSystemPointerSize));
662 }
663
664 void pop() { la(sp, MemOperand(sp, kSystemPointerSize)); }
665
666 void Push(Register src) { push(src); }
667
668 // Push a handle.
670 void Push(Tagged<Smi> smi);
672
673 // Push two registers. Pushes leftmost register first (to highest address).
674 void Push(Register src1, Register src2) {
675 lay(sp, MemOperand(sp, -kSystemPointerSize * 2));
676 StoreU64(src1, MemOperand(sp, kSystemPointerSize));
677 StoreU64(src2, MemOperand(sp, 0));
678 }
679
680 // Push three registers. Pushes leftmost register first (to highest address).
681 void Push(Register src1, Register src2, Register src3) {
682 lay(sp, MemOperand(sp, -kSystemPointerSize * 3));
683 StoreU64(src1, MemOperand(sp, kSystemPointerSize * 2));
684 StoreU64(src2, MemOperand(sp, kSystemPointerSize));
685 StoreU64(src3, MemOperand(sp, 0));
686 }
687
688 // Push four registers. Pushes leftmost register first (to highest address).
689 void Push(Register src1, Register src2, Register src3, Register src4) {
690 lay(sp, MemOperand(sp, -kSystemPointerSize * 4));
691 StoreU64(src1, MemOperand(sp, kSystemPointerSize * 3));
692 StoreU64(src2, MemOperand(sp, kSystemPointerSize * 2));
693 StoreU64(src3, MemOperand(sp, kSystemPointerSize));
694 StoreU64(src4, MemOperand(sp, 0));
695 }
696
697 // Push five registers. Pushes leftmost register first (to highest address).
698 void Push(Register src1, Register src2, Register src3, Register src4,
699 Register src5) {
700 DCHECK(src1 != src2);
701 DCHECK(src1 != src3);
702 DCHECK(src2 != src3);
703 DCHECK(src1 != src4);
704 DCHECK(src2 != src4);
705 DCHECK(src3 != src4);
706 DCHECK(src1 != src5);
707 DCHECK(src2 != src5);
708 DCHECK(src3 != src5);
709 DCHECK(src4 != src5);
710
711 lay(sp, MemOperand(sp, -kSystemPointerSize * 5));
712 StoreU64(src1, MemOperand(sp, kSystemPointerSize * 4));
713 StoreU64(src2, MemOperand(sp, kSystemPointerSize * 3));
714 StoreU64(src3, MemOperand(sp, kSystemPointerSize * 2));
715 StoreU64(src4, MemOperand(sp, kSystemPointerSize));
716 StoreU64(src5, MemOperand(sp, 0));
717 }
718
719 enum PushArrayOrder { kNormal, kReverse };
720 void PushArray(Register array, Register size, Register scratch,
721 Register scratch2, PushArrayOrder order = kNormal);
722
723 void Pop(Register dst) { pop(dst); }
724
725 // Pop two registers. Pops rightmost register first (from lower address).
726 void Pop(Register src1, Register src2) {
727 LoadU64(src2, MemOperand(sp, 0));
728 LoadU64(src1, MemOperand(sp, kSystemPointerSize));
729 la(sp, MemOperand(sp, 2 * kSystemPointerSize));
730 }
731
732 // Pop three registers. Pops rightmost register first (from lower address).
733 void Pop(Register src1, Register src2, Register src3) {
734 LoadU64(src3, MemOperand(sp, 0));
735 LoadU64(src2, MemOperand(sp, kSystemPointerSize));
736 LoadU64(src1, MemOperand(sp, 2 * kSystemPointerSize));
737 la(sp, MemOperand(sp, 3 * kSystemPointerSize));
738 }
739
740 // Pop four registers. Pops rightmost register first (from lower address).
741 void Pop(Register src1, Register src2, Register src3, Register src4) {
742 LoadU64(src4, MemOperand(sp, 0));
743 LoadU64(src3, MemOperand(sp, kSystemPointerSize));
744 LoadU64(src2, MemOperand(sp, 2 * kSystemPointerSize));
745 LoadU64(src1, MemOperand(sp, 3 * kSystemPointerSize));
746 la(sp, MemOperand(sp, 4 * kSystemPointerSize));
747 }
748
749 // Pop five registers. Pops rightmost register first (from lower address).
750 void Pop(Register src1, Register src2, Register src3, Register src4,
751 Register src5) {
752 LoadU64(src5, MemOperand(sp, 0));
753 LoadU64(src4, MemOperand(sp, kSystemPointerSize));
754 LoadU64(src3, MemOperand(sp, 2 * kSystemPointerSize));
755 LoadU64(src2, MemOperand(sp, 3 * kSystemPointerSize));
756 LoadU64(src1, MemOperand(sp, 4 * kSystemPointerSize));
757 la(sp, MemOperand(sp, 5 * kSystemPointerSize));
758 }
759
760 // Push a fixed frame, consisting of lr, fp, constant pool.
761 void PushCommonFrame(Register marker_reg = no_reg);
762
763 // Push a standard frame, consisting of lr, fp, constant pool,
764 // context and JS function
765 void PushStandardFrame(Register function_reg);
766
767 void PopCommonFrame(Register marker_reg = no_reg);
768
769 // Restore caller's frame pointer and return address prior to being
770 // overwritten by tail call stack preparation.
772
774 ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
775 mov(kRootRegister, Operand(isolate_root));
776#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
777 LoadRootRelative(kPtrComprCageBaseRegister,
778 IsolateData::cage_base_offset());
779#endif
780 }
781
782 // If the value is a NaN, canonicalize the value else, do nothing.
784 void CanonicalizeNaN(const DoubleRegister value) {
785 CanonicalizeNaN(value, value);
786 }
787
788 // Converts the integer (untagged smi) in |src| to a double, storing
789 // the result to |dst|
791
792 // Converts the unsigned integer (untagged smi) in |src| to
793 // a double, storing the result to |dst|
795
796 // Converts the integer (untagged smi) in |src| to
797 // a float, storing the result in |dst|
799
800 // Converts the unsigned integer (untagged smi) in |src| to
801 // a float, storing the result in |dst|
803
808
813 // Converts the double_input to an integer. Note that, upon return,
814 // the contents of double_dst will also hold the fixed point representation.
816 const DoubleRegister double_input,
817 FPRoundingMode rounding_mode = kRoundToZero);
818
819 // Converts the double_input to an integer. Note that, upon return,
820 // the contents of double_dst will also hold the fixed point representation.
822 const DoubleRegister double_input,
823 FPRoundingMode rounding_mode = kRoundToZero);
825 const DoubleRegister double_input,
826 FPRoundingMode rounding_mode = kRoundToZero);
827
829 const DoubleRegister double_input,
832 const Register result, const DoubleRegister double_input,
833 FPRoundingMode rounding_mode = kRoundToZero);
834 // Converts the double_input to an unsigned integer. Note that, upon return,
835 // the contents of double_dst will also hold the fixed point representation.
837 const Register dst, const DoubleRegister double_input,
838 FPRoundingMode rounding_mode = kRoundToZero);
840 const Register dst, const DoubleRegister double_input,
841 FPRoundingMode rounding_mode = kRoundToZero);
843 const Register result, const DoubleRegister double_input,
844 FPRoundingMode rounding_mode = kRoundToZero);
845
846 // Generates function and stub prologue code.
848 int prologue_offset = 0);
849 void Prologue(Register base, int prologue_offset = 0);
850
853
854 // Get the actual activation frame alignment for target environment.
856 // ----------------------------------------------------------------
857 // new S390 macro-assembler interfaces that are slightly higher level
858 // than assembler-s390 and may generate variable length sequences
859
860 // load an SMI value <value> to GPR <dst>
862
863 // load a literal double value <value> to FPR <result>
864 template <class T>
865 void LoadF64(DoubleRegister result, T value, Register scratch) {
866 static_assert(sizeof(T) == kDoubleSize, "Expect input size to be 8");
867 uint64_t int_val = base::bit_cast<uint64_t, T>(value);
868 // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
869 uint32_t hi_32 = int_val >> 32;
870 uint32_t lo_32 = static_cast<uint32_t>(int_val);
871
872 if (int_val == 0) {
873 lzdr(result);
874 } else if (lo_32 == 0) {
875 llihf(scratch, Operand(hi_32));
876 ldgr(result, scratch);
877 } else {
878 iihf(scratch, Operand(hi_32));
879 iilf(scratch, Operand(lo_32));
880 ldgr(result, scratch);
881 }
882 }
883
884 template <class T>
885 void LoadF32(DoubleRegister result, T value, Register scratch) {
886 static_assert(sizeof(T) == kFloatSize, "Expect input size to be 4");
887 uint32_t int_val = base::bit_cast<uint32_t, T>(value);
888 LoadF64(result, static_cast<uint64_t>(int_val) << 32, scratch);
889 }
890
891 void CmpSmiLiteral(Register src1, Tagged<Smi> smi, Register scratch);
892
893 // Set new rounding mode RN to FPSCR
895
896 // reset rounding mode to default (kRoundToNearest)
898
899 // These exist to provide portability between 32 and 64bit
900 void LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem);
901 void StoreMultipleP(Register dst1, Register dst2, const MemOperand& mem);
902 void LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem);
903 void StoreMultipleW(Register dst1, Register dst2, const MemOperand& mem);
904
905 void SwapP(Register src, Register dst, Register scratch);
906 void SwapP(Register src, MemOperand dst, Register scratch);
907 void SwapP(MemOperand src, MemOperand dst, Register scratch_0,
908 Register scratch_1);
910 DoubleRegister scratch);
914 DoubleRegister scratch);
918 Simd128Register scratch);
920 Simd128Register scratch);
922
923 // ---------------------------------------------------------------------------
924 // Runtime calls
925
926 // Before calling a C-function from generated code, align arguments on stack.
927 // After aligning the frame, non-register arguments must be stored in
928 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
929 // are word sized. If double arguments are used, this function assumes that
930 // all double arguments are stored before core registers; otherwise the
931 // correct alignment of the double values is not guaranteed.
932 // Some compilers/platforms require the stack to be aligned when calling
933 // C++ code.
934 // Needs a scratch register to do some arithmetic. This register will be
935 // trashed.
936 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
937 Register scratch);
938 void PrepareCallCFunction(int num_reg_arguments, Register scratch);
939
940 // There are two ways of passing double arguments on ARM, depending on
941 // whether soft or hard floating point ABI is used. These functions
942 // abstract parameter passing for the three different ways we call
943 // C functions from generated code.
947
948 // Calls a C function and cleans up the space for arguments allocated
949 // by PrepareCallCFunction. The called function is not allowed to trigger a
950 // garbage collection, since that might move the code and invalidate the
951 // return address (unless this is somehow accounted for by the called
952 // function).
954 ExternalReference function, int num_arguments,
955 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
956 bool has_function_descriptor = ABI_USES_FUNCTION_DESCRIPTORS,
957 Label* return_label = nullptr);
959 Register function, int num_arguments,
960 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
961 bool has_function_descriptor = ABI_USES_FUNCTION_DESCRIPTORS,
962 Label* return_label = nullptr);
964 ExternalReference function, int num_reg_arguments,
965 int num_double_arguments,
966 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
967 bool has_function_descriptor = ABI_USES_FUNCTION_DESCRIPTORS,
968 Label* return_label = nullptr);
970 Register function, int num_reg_arguments, int num_double_arguments,
971 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
972 bool has_function_descriptor = ABI_USES_FUNCTION_DESCRIPTORS,
973 Label* return_label = nullptr);
974
977
978 void Trap();
980
981 // Emit code for a truncating division by a constant. The dividend register is
982 // unchanged and ip gets clobbered. Dividend and result must be different.
984 DoubleRegister double_input, StubCallMode stub_mode);
986 Label* done);
987
988 // ---------------------------------------------------------------------------
989 // Debugging
990
991 // Calls Abort(msg) if the condition cond is not satisfied.
992 // Use --debug_code to enable.
993 void Assert(Condition cond, AbortReason reason,
995
996 // Like Assert(), but without condition.
997 // Use --debug-code to enable.
1000
1001 // Like Assert(), but always enabled.
1002 void Check(Condition cond, AbortReason reason, CRegister cr = cr7);
1003
1004 // Print a message to stdout and abort execution.
1005 void Abort(AbortReason reason);
1006
1007 // ---------------------------------------------------------------------------
1008 // Bit testing/extraction
1009 //
1010 // Bit numbering is such that the least significant bit is bit 0
1011 // (for consistency between 32/64-bit).
1012
1013 // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
1014 // and place them into the least significant bits of dst.
1015 inline void ExtractBitRange(Register dst, Register src, int rangeStart,
1016 int rangeEnd) {
1017 DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerSystemPointer);
1018
1019 // Try to use RISBG if possible.
1020 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1021 int shiftAmount = (64 - rangeEnd) % 64; // Convert to shift left.
1022 int endBit = 63; // End is always LSB after shifting.
1023 int startBit = 63 - rangeStart + rangeEnd;
1024 RotateInsertSelectBits(dst, src, Operand(startBit), Operand(endBit),
1025 Operand(shiftAmount), true);
1026 } else {
1027 if (rangeEnd > 0) // Don't need to shift if rangeEnd is zero.
1028 ShiftRightU64(dst, src, Operand(rangeEnd));
1029 else if (dst != src) // If we didn't shift, we might need to copy
1030 mov(dst, src);
1031 int width = rangeStart - rangeEnd + 1;
1032 uint64_t mask = (static_cast<uint64_t>(1) << width) - 1;
1033 nihf(dst, Operand(mask >> 32));
1034 nilf(dst, Operand(mask & 0xFFFFFFFF));
1035 ltgr(dst, dst);
1036 }
1037 }
1038
1039 inline void ExtractBit(Register dst, Register src, uint32_t bitNumber) {
1040 ExtractBitRange(dst, src, bitNumber, bitNumber);
1041 }
1042
1043 // Extract consecutive bits (defined by mask) from src and place them
1044 // into the least significant bits of dst.
1045 inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
1046 RCBit rc = LeaveRC) {
1047 int start = kBitsPerSystemPointer - 1;
1048 int end;
1049 uintptr_t bit = (1L << start);
1050
1051 while (bit && (mask & bit) == 0) {
1052 start--;
1053 bit >>= 1;
1054 }
1055 end = start;
1056 bit >>= 1;
1057
1058 while (bit && (mask & bit)) {
1059 end--;
1060 bit >>= 1;
1061 }
1062
1063 // 1-bits in mask must be contiguous
1064 DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
1065
1066 ExtractBitRange(dst, src, start, end);
1067 }
1068
1069 // Test single bit in value.
1070 inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
1071 ExtractBitRange(scratch, value, bitNumber, bitNumber);
1072 }
1073
1074 // Test consecutive bit range in value. Range is defined by
1075 // rangeStart - rangeEnd.
1076 inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
1077 Register scratch = r0) {
1078 ExtractBitRange(scratch, value, rangeStart, rangeEnd);
1079 }
1080
1081 // Test consecutive bit range in value. Range is defined by mask.
1082 inline void TestBitMask(Register value, uintptr_t mask,
1083 Register scratch = r0) {
1084 ExtractBitMask(scratch, value, mask, SetRC);
1085 }
1086 inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
1087
1088 inline void TestIfSmi(MemOperand value) {
1089 if (is_uint12(value.offset())) {
1090 tm(value, Operand(1));
1091 } else if (is_int20(value.offset())) {
1092 tmy(value, Operand(1));
1093 } else {
1094 LoadS8(r0, value);
1095 tmll(r0, Operand(1));
1096 }
1097 }
1098
1099 inline void TestIfInt32(Register value) {
1100 // High bits must be identical to fit into an 32-bit integer
1101 cgfr(value, value);
1102 }
1104
1105 void SmiUntag(Register dst, const MemOperand& src);
1106 void SmiUntag(Register dst, Register src) {
1107 if (SmiValuesAre31Bits()) {
1108 ShiftRightS32(dst, src, Operand(kSmiShift));
1109 } else {
1110 ShiftRightS64(dst, src, Operand(kSmiShift));
1111 }
1112 lgfr(dst, dst);
1113 }
1115 if (v8_flags.enable_slow_asserts) {
1116 AssertSmi(smi);
1117 }
1119 SmiUntag(smi);
1120 }
1123 mov(dst, src);
1124 SmiUntag(dst);
1125 }
1126
1127 // Shift left by kSmiShift
1129 void SmiTag(Register dst, Register src) {
1130 ShiftLeftU64(dst, src, Operand(kSmiShift));
1131 }
1132
1133 // Abort execution if argument is a smi, enabled via --debug-code.
1136
1137 // Abort execution if argument is not a Map, enabled via
1138 // --debug-code.
1140
1141 // Activation support.
1143 bool load_constant_pool_pointer_reg = false);
1144 // Returns the pc offset at which the frame ends.
1145 int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
1146
1147 void AllocateStackSpace(int bytes) {
1148 DCHECK_GE(bytes, 0);
1149 if (bytes == 0) return;
1150 lay(sp, MemOperand(sp, -bytes));
1151 }
1152
1153 void AllocateStackSpace(Register bytes) { SubS64(sp, sp, bytes); }
1154
1155 void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
1156 Label* condition_met);
1157
1159 void LoadPC(Register dst);
1160
1161 // Enforce platform specific stack alignment.
1163
1164 // Control-flow integrity:
1165
1166 // Define a function entrypoint. This doesn't emit any code for this
1167 // architecture, as control-flow integrity is not supported for it.
1168 void CodeEntry() {}
1169 // Define an exception handler.
1171 // Define an exception handler and bind a label.
1173
1174 // Convenience functions to call/jmp to the code of a JSFunction object.
1175 void CallJSFunction(Register function_object, uint16_t argument_count);
1176 void JumpJSFunction(Register function_object,
1177 JumpMode jump_mode = JumpMode::kJump);
1178#ifdef V8_ENABLE_LEAPTIERING
1179 void CallJSDispatchEntry(JSDispatchHandle dispatch_handle,
1180 uint16_t argument_count);
1181#endif
1182#ifdef V8_ENABLE_WEBASSEMBLY
1183 void ResolveWasmCodePointer(Register target);
1184 void CallWasmCodePointer(Register target,
1185 CallJumpMode call_jump_mode = CallJumpMode::kCall);
1186 void LoadWasmCodePointer(Register dst, MemOperand src);
1187#endif
1188
1189 // Generates an instruction sequence s.t. the return address points to the
1190 // instruction following the call.
1191 // The return address on the stack is used by frame iteration.
1193#if V8_OS_ZOS
1194 void zosStoreReturnAddressAndCall(Register target, Register scratch);
1195#endif
1196
1197 // ---------------------------------------------------------------------------
1198 // Simd Support.
1206 uint8_t imm_lane_idx, Register = r0);
1208 uint8_t imm_lane_idx, Register = r0);
1209 void I64x2ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx,
1210 Register = r0);
1211 void I32x4ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx,
1212 Register = r0);
1214 uint8_t imm_lane_idx, Register = r0);
1216 uint8_t imm_lane_idx, Register scratch);
1218 uint8_t imm_lane_idx, Register = r0);
1220 uint8_t imm_lane_idx, Register scratch);
1222 DoubleRegister src2, uint8_t imm_lane_idx,
1223 Register scratch);
1225 DoubleRegister src2, uint8_t imm_lane_idx,
1226 Register scratch);
1228 Register src2, uint8_t imm_lane_idx, Register = r0);
1230 Register src2, uint8_t imm_lane_idx, Register = r0);
1232 Register src2, uint8_t imm_lane_idx, Register = r0);
1234 Register src2, uint8_t imm_lane_idx, Register = r0);
1236 Register scratch1, Register scratch2, Register scratch3);
1238 Simd128Register scratch);
1240 Simd128Register scratch);
1242 Simd128Register scratch);
1244 Simd128Register scratch2);
1246 Simd128Register scratch2);
1248 Simd128Register scratch2);
1250 Register scratch2, Simd128Register scratch3);
1253 Simd128Register scratch1, Register scratch2);
1255 Simd128Register scratch1, Register scratch2);
1257 Simd128Register scratch1, Register scratch2);
1259 Simd128Register scratch1, Register scratch2);
1261 Simd128Register src2);
1263 Simd128Register src2);
1265 Simd128Register src2, Simd128Register scratch);
1267 Simd128Register src2, Simd128Register scratch);
1269 Simd128Register scratch1, Register scratch2,
1270 Register scratch3, Register scratch4);
1272 Simd128Register scratch1, Register scratch2,
1273 Register scratch3, Register scratch4);
1275 Simd128Register scratch);
1277 Simd128Register scratch);
1279 Simd128Register src2, Register scratch1, Register scratch2,
1280 Simd128Register scratch3);
1281 void S128Const(Simd128Register dst, uint64_t high, uint64_t low,
1282 Register scratch1, Register scratch2);
1284 Simd128Register src2, uint64_t high, uint64_t low,
1285 Register scratch1, Register scratch2,
1286 Simd128Register scratch3);
1288 Simd128Register src2, Simd128Register scratch);
1290 Simd128Register src2, Simd128Register scratch);
1293 Simd128Register scratch1, Simd128Register scratch2);
1295 Simd128Register src2, Simd128Register scratch1,
1296 Simd128Register scratch2, Simd128Register scratch3);
1299
1300#define SIMD_SHIFT_LIST(V) \
1301 V(I64x2Shl) \
1302 V(I64x2ShrS) \
1303 V(I64x2ShrU) \
1304 V(I32x4Shl) \
1305 V(I32x4ShrS) \
1306 V(I32x4ShrU) \
1307 V(I16x8Shl) \
1308 V(I16x8ShrS) \
1309 V(I16x8ShrU) \
1310 V(I8x16Shl) \
1311 V(I8x16ShrS) \
1312 V(I8x16ShrU)
1313
1314#define PROTOTYPE_SIMD_SHIFT(name) \
1315 void name(Simd128Register dst, Simd128Register src1, Register src2, \
1316 Simd128Register scratch); \
1317 void name(Simd128Register dst, Simd128Register src1, const Operand& src2, \
1318 Register scratch1, Simd128Register scratch2);
1320#undef PROTOTYPE_SIMD_SHIFT
1321#undef SIMD_SHIFT_LIST
1322
1323#define SIMD_UNOP_LIST(V) \
1324 V(F64x2Abs) \
1325 V(F64x2Neg) \
1326 V(F64x2Sqrt) \
1327 V(F64x2Ceil) \
1328 V(F64x2Floor) \
1329 V(F64x2Trunc) \
1330 V(F64x2NearestInt) \
1331 V(F64x2ConvertLowI32x4S) \
1332 V(F64x2ConvertLowI32x4U) \
1333 V(F32x4Abs) \
1334 V(F32x4Neg) \
1335 V(F32x4Sqrt) \
1336 V(F32x4Ceil) \
1337 V(F32x4Floor) \
1338 V(F32x4Trunc) \
1339 V(F32x4NearestInt) \
1340 V(I64x2Abs) \
1341 V(I64x2SConvertI32x4Low) \
1342 V(I64x2SConvertI32x4High) \
1343 V(I64x2UConvertI32x4Low) \
1344 V(I64x2UConvertI32x4High) \
1345 V(I64x2Neg) \
1346 V(I32x4Abs) \
1347 V(I32x4Neg) \
1348 V(I32x4SConvertI16x8Low) \
1349 V(I32x4SConvertI16x8High) \
1350 V(I32x4UConvertI16x8Low) \
1351 V(I32x4UConvertI16x8High) \
1352 V(I16x8Abs) \
1353 V(I16x8Neg) \
1354 V(I16x8SConvertI8x16Low) \
1355 V(I16x8SConvertI8x16High) \
1356 V(I16x8UConvertI8x16Low) \
1357 V(I16x8UConvertI8x16High) \
1358 V(I8x16Abs) \
1359 V(I8x16Neg) \
1360 V(I8x16Popcnt) \
1361 V(S128Not) \
1362 V(S128Zero) \
1363 V(S128AllOnes)
1364
1365#define PROTOTYPE_SIMD_UNOP(name) \
1366 void name(Simd128Register dst, Simd128Register src);
1368#undef PROTOTYPE_SIMD_UNOP
1369#undef SIMD_UNOP_LIST
1370
1371#define SIMD_BINOP_LIST(V) \
1372 V(F64x2Add) \
1373 V(F64x2Sub) \
1374 V(F64x2Mul) \
1375 V(F64x2Div) \
1376 V(F64x2Min) \
1377 V(F64x2Max) \
1378 V(F64x2Eq) \
1379 V(F64x2Ne) \
1380 V(F64x2Lt) \
1381 V(F64x2Le) \
1382 V(F64x2Pmin) \
1383 V(F64x2Pmax) \
1384 V(F32x4Add) \
1385 V(F32x4Sub) \
1386 V(F32x4Mul) \
1387 V(F32x4Div) \
1388 V(F32x4Min) \
1389 V(F32x4Max) \
1390 V(F32x4Eq) \
1391 V(F32x4Ne) \
1392 V(F32x4Lt) \
1393 V(F32x4Le) \
1394 V(F32x4Pmin) \
1395 V(F32x4Pmax) \
1396 V(I64x2Add) \
1397 V(I64x2Sub) \
1398 V(I64x2Eq) \
1399 V(I64x2Ne) \
1400 V(I64x2GtS) \
1401 V(I64x2GeS) \
1402 V(I32x4Add) \
1403 V(I32x4Sub) \
1404 V(I32x4Mul) \
1405 V(I32x4Eq) \
1406 V(I32x4Ne) \
1407 V(I32x4GtS) \
1408 V(I32x4GeS) \
1409 V(I32x4GtU) \
1410 V(I32x4MinS) \
1411 V(I32x4MinU) \
1412 V(I32x4MaxS) \
1413 V(I32x4MaxU) \
1414 V(I16x8Add) \
1415 V(I16x8Sub) \
1416 V(I16x8Mul) \
1417 V(I16x8Eq) \
1418 V(I16x8Ne) \
1419 V(I16x8GtS) \
1420 V(I16x8GeS) \
1421 V(I16x8GtU) \
1422 V(I16x8MinS) \
1423 V(I16x8MinU) \
1424 V(I16x8MaxS) \
1425 V(I16x8MaxU) \
1426 V(I16x8RoundingAverageU) \
1427 V(I8x16Add) \
1428 V(I8x16Sub) \
1429 V(I8x16Eq) \
1430 V(I8x16Ne) \
1431 V(I8x16GtS) \
1432 V(I8x16GeS) \
1433 V(I8x16GtU) \
1434 V(I8x16MinS) \
1435 V(I8x16MinU) \
1436 V(I8x16MaxS) \
1437 V(I8x16MaxU) \
1438 V(I8x16RoundingAverageU) \
1439 V(S128And) \
1440 V(S128Or) \
1441 V(S128Xor) \
1442 V(S128AndNot)
1443
1444#define PROTOTYPE_SIMD_BINOP(name) \
1445 void name(Simd128Register dst, Simd128Register src1, Simd128Register src2);
1447#undef PROTOTYPE_SIMD_BINOP
1448#undef SIMD_BINOP_LIST
1449
1450#define SIMD_EXT_MUL_LIST(V) \
1451 V(I64x2ExtMulLowI32x4S) \
1452 V(I64x2ExtMulHighI32x4S) \
1453 V(I64x2ExtMulLowI32x4U) \
1454 V(I64x2ExtMulHighI32x4U) \
1455 V(I32x4ExtMulLowI16x8S) \
1456 V(I32x4ExtMulHighI16x8S) \
1457 V(I32x4ExtMulLowI16x8U) \
1458 V(I32x4ExtMulHighI16x8U) \
1459 V(I16x8ExtMulLowI8x16S) \
1460 V(I16x8ExtMulHighI8x16S) \
1461 V(I16x8ExtMulLowI8x16U) \
1462 V(I16x8ExtMulHighI8x16U)
1463
1464#define PROTOTYPE_SIMD_EXT_MUL(name) \
1465 void name(Simd128Register dst, Simd128Register src1, Simd128Register src2, \
1466 Simd128Register scratch);
1468#undef PROTOTYPE_SIMD_EXT_MUL
1469#undef SIMD_EXT_MUL_LIST
1470
1471#define SIMD_ALL_TRUE_LIST(V) \
1472 V(I64x2AllTrue) \
1473 V(I32x4AllTrue) \
1474 V(I16x8AllTrue) \
1475 V(I8x16AllTrue)
1476
1477#define PROTOTYPE_SIMD_ALL_TRUE(name) \
1478 void name(Register dst, Simd128Register src, Register scratch1, \
1479 Simd128Register scratch2);
1481#undef PROTOTYPE_SIMD_ALL_TRUE
1482#undef SIMD_ALL_TRUE_LIST
1483
1484#define SIMD_QFM_LIST(V) \
1485 V(F64x2Qfma) \
1486 V(F64x2Qfms) \
1487 V(F32x4Qfma) \
1488 V(F32x4Qfms)
1489
1490#define PROTOTYPE_SIMD_QFM(name) \
1491 void name(Simd128Register dst, Simd128Register src1, Simd128Register src2, \
1492 Simd128Register src3);
1494#undef PROTOTYPE_SIMD_QFM
1495#undef SIMD_QFM_LIST
1496
1497#define SIMD_ADD_SUB_SAT_LIST(V) \
1498 V(I16x8AddSatS) \
1499 V(I16x8SubSatS) \
1500 V(I16x8AddSatU) \
1501 V(I16x8SubSatU) \
1502 V(I8x16AddSatS) \
1503 V(I8x16SubSatS) \
1504 V(I8x16AddSatU) \
1505 V(I8x16SubSatU)
1506
1507#define PROTOTYPE_SIMD_ADD_SUB_SAT(name) \
1508 void name(Simd128Register dst, Simd128Register src1, Simd128Register src2, \
1509 Simd128Register scratch1, Simd128Register scratch2);
1511#undef PROTOTYPE_SIMD_ADD_SUB_SAT
1512#undef SIMD_ADD_SUB_SAT_LIST
1513
1514#define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
1515 V(I32x4ExtAddPairwiseI16x8S) \
1516 V(I32x4ExtAddPairwiseI16x8U) \
1517 V(I16x8ExtAddPairwiseI8x16S) \
1518 V(I16x8ExtAddPairwiseI8x16U)
1519
1520#define PROTOTYPE_SIMD_EXT_ADD_PAIRWISE(name) \
1521 void name(Simd128Register dst, Simd128Register src, \
1522 Simd128Register scratch1, Simd128Register scratch2);
1524#undef PROTOTYPE_SIMD_EXT_ADD_PAIRWISE
1525#undef SIMD_EXT_ADD_PAIRWISE_LIST
1526
1527 // ---------------------------------------------------------------------------
1528 // Pointer compression Support
1529
1531#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
1532 static_assert(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2);
1533 ShiftLeftU64(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
1534#else
1535 static_assert(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2);
1536 ShiftRightS64(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2));
1537#endif
1538 }
1539
1540 // Loads a field containing any tagged value and decompresses it if necessary.
1542 const MemOperand& field_operand,
1543 const Register& scratch = no_reg);
1546 const MemOperand& field_operand,
1547 const Register& scratch = no_reg);
1548
1549 // Loads a field containing smi value and untags it.
1550 void SmiUntagField(Register dst, const MemOperand& src);
1551
1552 // Compresses and stores tagged value to given on-heap location.
1553 void StoreTaggedField(const Register& value,
1554 const MemOperand& dst_field_operand,
1555 const Register& scratch = no_reg);
1556
1557 void Zero(const MemOperand& dest);
1558 void Zero(const MemOperand& dest1, const MemOperand& dest2);
1559
1565
1566 // CountLeadingZeros will corrupt the scratch register pair (eg. r0:r1)
1568 Register scratch_pair = r0);
1570 Register scratch_pair = r0);
1572 Register scratch_pair = r0);
1574 Register scratch_pair = r0);
1575
1577
1578 // It assumes that the arguments are located below the stack pointer.
1579 void LoadReceiver(Register dest) { LoadU64(dest, MemOperand(sp, 0)); }
1580 void StoreReceiver(Register rec) { StoreU64(rec, MemOperand(sp, 0)); }
1581
1582 void CallRuntime(const Runtime::Function* f, int num_arguments);
1583
1584 // Convenience function: Same as above, but takes the fid instead.
1586 const Runtime::Function* function = Runtime::FunctionForId(fid);
1587 CallRuntime(function, function->nargs);
1588 }
1589
1590 // Convenience function: Same as above, but takes the fid instead.
1591 void CallRuntime(Runtime::FunctionId fid, int num_arguments) {
1592 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
1593 }
1594
1595 // Convenience function: tail call a runtime routine (jump).
1597
1598 // ---------------------------------------------------------------------------
1599 // Support functions.
1600
1601 void IsObjectType(Register object, Register scratch1, Register scratch2,
1602 InstanceType type);
1603
1604 // Compare object type for heap object. heap_object contains a non-Smi
1605 // whose object type should be compared with the given type. This both
1606 // sets the flags and leaves the object type in the type_reg register.
1607 // It leaves the map in the map register (unless the type_reg and map register
1608 // are the same register). It leaves the heap object in the heap_object
1609 // register unless the heap_object register is the same register as one of the
1610 // other registers.
1611 // Type_reg can be no_reg. In that case ip is used.
1612 template <bool use_unsigned_cmp = false>
1613 void CompareObjectType(Register heap_object, Register map, Register type_reg,
1614 InstanceType type) {
1615 const Register temp = type_reg == no_reg ? r0 : type_reg;
1616
1617 LoadMap(map, heap_object);
1618 CompareInstanceType<use_unsigned_cmp>(map, temp, type);
1619 }
1620 // Variant of the above, which compares against a type range rather than a
1621 // single type (lower_limit and higher_limit are inclusive).
1622 //
1623 // Always use unsigned comparisons: ls for a positive result.
1625 Register type_reg, Register scratch,
1626 InstanceType lower_limit,
1627 InstanceType higher_limit);
1628
1629 // Compare instance type in a map. map contains a valid map object whose
1630 // object type should be compared with the given type. This both
1631 // sets the flags and leaves the object type in the type_reg register.
1632 template <bool use_unsigned_cmp = false>
1634 static_assert(Map::kInstanceTypeOffset < 4096);
1635 static_assert(LAST_TYPE <= 0xFFFF);
1636 if (use_unsigned_cmp) {
1637 LoadU16(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1638 CmpU64(type_reg, Operand(type));
1639 } else {
1640 LoadS16(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1641 CmpS64(type_reg, Operand(type));
1642 }
1643 }
1644
1645 // Compare instance type ranges for a map (lower_limit and higher_limit
1646 // inclusive).
1647 //
1648 // Always use unsigned comparisons: ls for a positive result.
1650 Register scratch, InstanceType lower_limit,
1651 InstanceType higher_limit);
1652
1653 // Compare the object in a register to a value from the root list.
1654 // Uses the ip register as scratch.
1657 void PushRoot(RootIndex index) {
1658 LoadRoot(r0, index);
1659 Push(r0);
1660 }
1661
1662 template <class T>
1663 void CompareTagged(Register src1, T src2) {
1665 CmpS32(src1, src2);
1666 } else {
1667 CmpS64(src1, src2);
1668 }
1669 }
1670
1671 void Cmp(Register dst, int32_t src) { CmpS32(dst, Operand(src)); }
1672
1673 void CmpTagged(const Register& src1, const Register& src2) {
1674 CompareTagged(src1, src2);
1675 }
1676
1677 // Jump to a runtime routine.
1679 bool builtin_exit_frame = false);
1680
1681 // Compare the object in a register to a value and jump if they are equal.
1682 void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
1683 CompareRoot(with, index);
1684 beq(if_equal);
1685 }
1686
1687 // Compare the object in a register to a value and jump if they are not equal.
1688 void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
1689 CompareRoot(with, index);
1690 bne(if_not_equal);
1691 }
1692
1693 // Checks if value is in range [lower_limit, higher_limit] using a single
1694 // comparison.
1695 void CompareRange(Register value, Register scratch, unsigned lower_limit,
1696 unsigned higher_limit);
1697 void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit,
1698 unsigned higher_limit, Label* on_in_range);
1699
1700 // ---------------------------------------------------------------------------
1701 // In-place weak references.
1702 void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
1703
1704 // ---------------------------------------------------------------------------
1705 // StatsCounter support
1706
1707 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1708 Register scratch2) {
1709 if (!v8_flags.native_code_counters) return;
1710 EmitIncrementCounter(counter, value, scratch1, scratch2);
1711 }
1712 void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
1713 Register scratch2);
1714 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1715 Register scratch2) {
1716 if (!v8_flags.native_code_counters) return;
1717 EmitDecrementCounter(counter, value, scratch1, scratch2);
1718 }
1719 void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
1720 Register scratch2);
1721
1722 // ---------------------------------------------------------------------------
1723 // Stack limit utilities
1724
1726 void StackOverflowCheck(Register num_args, Register scratch,
1727 Label* stack_overflow);
1728
1729 // ---------------------------------------------------------------------------
1730 // JavaScript invokes
1731
1732 // Set up call kind marking in ecx. The method takes ecx as an
1733 // explicit first parameter to make the code more readable at the
1734 // call sites.
1735 // void SetCallKind(Register dst, CallKind kind);
1736
1737 // Removes current frame and its arguments from the stack preserving
1738 // the arguments and a return address pushed to the stack for the next call.
1739 // Both |callee_args_count| and |caller_args_count| do not include
1740 // receiver. |callee_args_count| is not modified. |caller_args_count|
1741 // is trashed.
1742
1743 // Invoke the JavaScript function code by either calling or jumping.
1745 Register expected_parameter_count,
1746 Register actual_parameter_count, InvokeType type);
1747
1748 // On function call, call into the debugger if necessary.
1750 Register expected_parameter_count,
1751 Register actual_parameter_count);
1752
1753 // Invoke the JavaScript function in the given register. Changes the
1754 // current context to the context in the function before invoking.
1756 Register actual_parameter_count,
1757 InvokeType type);
1758 void InvokeFunction(Register function, Register expected_parameter_count,
1759 Register actual_parameter_count, InvokeType type);
1760
1761 // Exception handling
1762
1763 // Push a new stack handler and link into stack handler chain.
1765
1766 // Unlink the stack handler on top of the stack from the stack handler chain.
1767 // Must preserve the result register.
1769
1770 // Enter exit frame.
1771 // stack_space - extra stack space, used for parameters before call to C.
1772 void EnterExitFrame(Register scratch, int stack_space,
1773 StackFrame::Type frame_type);
1774
1775 // Leave the current exit frame.
1777
1778 // Load the global proxy from the current context.
1780 LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
1781 }
1782
1783 void LoadNativeContextSlot(Register dst, int index);
1784
1785 // Falls through and sets scratch_and_result to 0 on failure, jumps to
1786 // on_result on success.
1787 void TryLoadOptimizedOsrCode(Register scratch_and_result,
1788 CodeKind min_opt_level, Register feedback_vector,
1789 FeedbackSlot slot, Label* on_result,
1790 Label::Distance distance);
1791 // ---------------------------------------------------------------------------
1792 // Smi utilities
1793
1794 // Jump if either of the registers contain a non-smi.
1795 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1796 TestIfSmi(value);
1797 bne(not_smi_label /*, cr0*/);
1798 }
1799
1800#if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
1801 // Ensure it is permissible to read/write int value directly from
1802 // upper half of the smi.
1803 static_assert(kSmiTag == 0);
1804 static_assert(kSmiTagSize + kSmiShiftSize == 32);
1805#endif
1806#if V8_TARGET_LITTLE_ENDIAN
1807#define SmiWordOffset(offset) (offset + kSystemPointerSize / 2)
1808#else
1809#define SmiWordOffset(offset) offset
1810#endif
1811
1812 // Abort execution if argument is not a Constructor, enabled via --debug-code.
1815
1816 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1818
1819 // Abort execution if argument is not a callable JSFunction, enabled via
1820 // --debug-code.
1822
1823 // Abort execution if argument is not a JSBoundFunction,
1824 // enabled via --debug-code.
1826
1827 // Abort execution if argument is not a JSGeneratorObject (or subclass),
1828 // enabled via --debug-code.
1830
1831 // Abort execution if argument is not undefined or an AllocationSite, enabled
1832 // via --debug-code.
1835
1836 void AssertJSAny(Register object, Register map_tmp, Register tmp,
1837 AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE;
1838
1839 template <typename Field>
1841 int shift = Field::kShift;
1842 int mask = Field::kMask >> Field::kShift;
1843 if (base::bits::IsPowerOfTwo(mask + 1)) {
1844 ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1,
1845 Field::kShift);
1846 } else if (shift != 0) {
1847 ShiftLeftU64(dst, src, Operand(shift));
1848 AndP(dst, Operand(mask));
1849 } else {
1850 AndP(dst, src, Operand(mask));
1851 }
1852 }
1853
1854 template <typename Field>
1856 DecodeField<Field>(reg, reg);
1857 }
1858
1859 // Tiering support.
1866 Register closure, Register scratch1,
1867 Register slot_address);
1870 Register flags, Register feedback_vector, CodeKind current_code_kind);
1872 Register flags, Register feedback_vector, CodeKind current_code_kind,
1873 Label* flags_need_processing);
1875 Register feedback_vector);
1876
1877 // ---------------------------------------------------------------------------
1878 // GC Support
1879
1881 Register address);
1882
1883 void CallJSEntry(Register target);
1884 static int CallSizeNotPredictableCodeSize(Address target,
1885 RelocInfo::Mode rmode,
1886 Condition cond = al);
1887 // Notify the garbage collector that we wrote a pointer into an object.
1888 // |object| is the object being stored into, |value| is the object being
1889 // stored. value and scratch registers are clobbered by the operation.
1890 // The offset is the offset from the start of the object, not the offset from
1891 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
1892 void RecordWriteField(Register object, int offset, Register value,
1893 Register slot_address, LinkRegisterStatus lr_status,
1894 SaveFPRegsMode save_fp,
1895 SmiCheck smi_check = SmiCheck::kInline);
1896
1897 // For a given |object| notify the garbage collector that the slot |address|
1898 // has been written. |value| is the object being stored. The value and
1899 // address registers are clobbered by the operation.
1900 void RecordWrite(Register object, Register slot_address, Register value,
1901 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
1902 SmiCheck smi_check = SmiCheck::kInline);
1903
1906
1907 private:
1908 static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1909
1910 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
1911 int CalculateStackPassedWords(int num_reg_arguments,
1912 int num_double_arguments);
1913
1914 // Helper functions for generating invokes.
1915 void InvokePrologue(Register expected_parameter_count,
1916 Register actual_parameter_count, InvokeType type);
1917
1919};
1920
1921struct MoveCycleState {
1922 // Whether a move in the cycle needs a double scratch register.
1924};
1925
1926// Provides access to exit frame parameters (GC-ed).
1928 // The slot at [sp] is reserved in all ExitFrames for storing the return
1929 // address before doing the actual call, it's necessary for frame iteration
1930 // (see StoreReturnAddressAndCall for details).
1931 static constexpr int kSPOffset = 1 * kSystemPointerSize;
1933 offset + kSPOffset);
1934}
1935
1936// Provides access to exit frame stack space (not GC-ed).
1938 return MemOperand(
1941}
1942
1943// Calls an API function. Allocates HandleScope, extracts returned value
1944// from handle and propagates exceptions. Clobbers C argument registers
1945// and C caller-saved registers. Restores context. On return removes
1946// (*argc_operand + slots_to_drop_on_return) * kSystemPointerSize
1947// (GCed, includes the call JS arguments space and the additional space
1948// allocated for the fast call).
1949void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling,
1950 Register function_address,
1951 ExternalReference thunk_ref, Register thunk_arg,
1952 int slots_to_drop_on_return,
1953 MemOperand* argc_operand,
1954 MemOperand return_value_operand);
1955
1956#define ACCESS_MASM(masm) masm->
1957
1958} // namespace internal
1959} // namespace v8
1960
1961#endif // V8_CODEGEN_S390_MACRO_ASSEMBLER_S390_H_
#define T
Builtins::Kind kind
Definition builtins.cc:40
static constexpr int kFixedSlotCountAboveFp
void LoadAndExtend32x2ULE(Simd128Register dst, const MemOperand &mem, Register scratch)
void I16x8DotI8x16S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void DivFloat32(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void F32x4UConvertI32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void LoadS32(Register dst, Register src)
void JumpIfRoot(Register with, RootIndex index, Label *if_equal)
void AssertFeedbackVector(Register object) NOOP_UNLESS_DEBUG_CODE
void Prologue(Register base, int prologue_offset=0)
void Abort(AbortReason reason)
void Push(Register src1, Register src2, Register src3, Register src4, Register src5)
void I32x4TruncSatF64x2SZero(Simd128Register dst, Simd128Register src, Simd128Register scratch)
void PushAll(RegList registers)
void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2)
void MulHighS32(Register dst, Register src1, Register src2)
void ModS32(Register dst, Register src1, Register src2)
void F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch)
void NearestIntF32(DoubleRegister dst, DoubleRegister src)
void StoreLane8LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch)
void LoadStackLimit(Register destination, StackLimitKind kind)
void ShiftRightU64(Register dst, Register src, Register val, const Operand &val2=Operand::Zero())
void SubU64(Register dst, const MemOperand &opnd)
void StoreV128(Simd128Register src, const MemOperand &mem, Register scratch)
void And(Register dst, const MemOperand &opnd)
void MulHighU64(Register dst, Register src1, Register src2)
void Pop(Register src1, Register src2, Register src3)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand, const Register &scratch=no_reg)
void Push(Tagged< TaggedIndex > index)
void SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch)
void CallJSFunction(Register function_object, uint16_t argument_count)
void AndP(Register dst, Register src, const Operand &opnd)
void S128Select(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register mask)
void ShiftRightU32(Register dst, Register src, Register val, const Operand &val2=Operand::Zero())
void I32x4BitMask(Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void MulS32(Register dst, Register src1)
void AddU64(Register dst, int imm)
void StoreU8(Register src, const MemOperand &mem, Register scratch=r0)
void LoadF32AsF64(DoubleRegister dst, const MemOperand &opnd)
void SubS64(Register dst, Register src, const Operand &imm)
void LoadAndSplat64x2LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void RecordWrite(Register object, Register slot_address, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void LoadV32ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void ConvertDoubleToInt64(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void TestCodeIsMarkedForDeoptimization(Register code, Register scratch)
void AtomicExchangeU8(Register addr, Register value, Register output, Register scratch)
void AssertMap(Register object) NOOP_UNLESS_DEBUG_CODE
void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void MulHighU32(Register dst, Register src1, Register src2)
void I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Register=r0)
int LeaveFrame(StackFrame::Type type, int stack_adjustment=0)
void ShiftLeftU32(Register dst, Register src, Register val, const Operand &val2=Operand::Zero())
void I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2, Simd128Register scratch3)
void SubFloat32(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void CallRuntime(Runtime::FunctionId fid)
void XorP(Register dst, Register src)
void LoadF64LE(DoubleRegister dst, const MemOperand &opnd, Register scratch)
void LoadAndSplat16x8LE(Simd128Register dst, const MemOperand &me, Register scratch)
int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments)
int PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand, const Register &scratch=no_reg)
void SubS64(Register dst, const Operand &imm)
void Call(Address target, RelocInfo::Mode rmode, Condition cond=al)
void mov(Register dst, Register src)
void PopCommonFrame(Register marker_reg=no_reg)
void AddU32(Register dst, const MemOperand &opnd)
void RotateInsertSelectBits(Register dst, Register src, const Operand &startBit, const Operand &endBit, const Operand &shiftAmt, bool zeroBits)
void ConvertUnsignedIntToDouble(DoubleRegister dst, Register src)
void ConvertInt64ToFloat(DoubleRegister double_dst, Register src)
void MultiPopV128(DoubleRegList dregs, Register scratch, Register location=sp)
void Move(Register dst, Register src, Condition cond=al)
MemOperand StackLimitAsMemOperand(StackLimitKind kind)
int CallCFunction(Register function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, bool has_function_descriptor=ABI_USES_FUNCTION_DESCRIPTORS, Label *return_label=nullptr)
void I8x16UConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void StoreU16LE(Register src, const MemOperand &mem, Register scratch=r0)
void MulS64(Register dst, const Operand &opnd)
void DecodeField(Register dst, Register src)
void I16x8BitMask(Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void CmpF32(DoubleRegister src1, const MemOperand &src2)
void I8x16GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void MulHighU32(Register dst, Register src1, const MemOperand &src2)
void LoadU32(Register dst, Register src)
void CmpU32(Register dst, const MemOperand &opnd)
void F32x4Splat(Simd128Register dst, Simd128Register src)
void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
static int CallSizeNotPredictableCodeSize(Address target, RelocInfo::Mode rmode, Condition cond=al)
void SubS64(Register dst, Register src1, Register src2)
void ConvertFloat32ToUnsignedInt32(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void JumpIfNotRoot(Register with, RootIndex index, Label *if_not_equal)
void MulS64(Register dst, Register src)
void Pop(Register src1, Register src2, Register src3, Register src4)
void MovFloatToInt(Register dst, DoubleRegister src)
void SwapSimd128(MemOperand src, MemOperand dst, Simd128Register scratch)
void AddS32(Register dst, Register src)
void CompareLogicalChar(const MemOperand &opnd1, const MemOperand &opnd2, const Operand &length)
void ModS32(Register dst, Register src1, const MemOperand &src2)
void V128AnyTrue(Register dst, Simd128Register src, Register scratch)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void OrP(Register dst, const Operand &opnd)
void LoadU32LE(Register dst, const MemOperand &opnd, Register scratch=no_reg)
void DivFloat64(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void F32x4SConvertI32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void Or(Register dst, const Operand &opnd)
void PushStandardFrame(Register function_reg)
void CountTrailingZerosU32(Register dst, Register src, Register scratch_pair=r0)
void LoadAndSub32(Register dst, Register src, const MemOperand &opnd)
void AtomicCmpExchangeHelper(Register addr, Register output, Register old_value, Register new_value, int start, int end, int shift_amount, int offset, Register temp0, Register temp1)
void AddS64(const MemOperand &opnd, const Operand &imm)
void SubF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void CmpF64(DoubleRegister src1, const MemOperand &src2)
void Or(Register dst, Register src1, Register src2)
void LoadLane16LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch)
void ExtractBitRange(Register dst, Register src, int rangeStart, int rangeEnd)
void CompareRoot(Register obj, RootIndex index)
void MulHighS32(Register dst, Register src1, const Operand &src2)
void MovIntToFloat(DoubleRegister dst, Register src)
void CmpS64(Register dst, const MemOperand &opnd)
void AddS32(Register dst, const Operand &imm)
void LoadU64LE(Register dst, const MemOperand &mem, Register scratch=no_reg)
void StoreU32LE(Register src, const MemOperand &mem, Register scratch=no_reg)
void ShiftRightS32(Register dst, Register src, Register shift, const Operand &val2=Operand::Zero())
void I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Register=r0)
void StoreU32(Register src, const MemOperand &mem, Register scratch=no_reg)
void Move(Register dst, Tagged< Smi > smi)
void LoadAndExtend32x2SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void mov(Register dst, const Operand &src)
void Move(Register dst, const MemOperand &src)
void RecordWriteField(Register object, int offset, Register value, Register slot_address, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void SubS32(Register dst, const Operand &imm)
int CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, bool has_function_descriptor=ABI_USES_FUNCTION_DESCRIPTORS, Label *return_label=nullptr)
void I32x4GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void Call(Handle< Code > code, RelocInfo::Mode rmode=RelocInfo::CODE_TARGET, Condition cond=al)
void ExtractBit(Register dst, Register src, uint32_t bitNumber)
void StoreReturnAddressAndCall(Register target)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
void LeaveExitFrame(Register scratch)
void StackOverflowCheck(Register num_args, Register scratch, Label *stack_overflow)
void DivU32(Register dst, Register src1, const MemOperand &src2)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallBuiltinByIndex(Register builtin_index, Register target)
void I16x8Splat(Simd128Register dst, Register src)
void SubS32(Register dst, const MemOperand &opnd)
void LoadRootRelative(Register destination, int32_t offset) final
void AtomicCmpExchangeU16(Register addr, Register output, Register old_value, Register new_value, Register temp0, Register temp1)
void JumpIfSmi(Register value, Label *smi_label)
void TruncF64(DoubleRegister dst, DoubleRegister src)
void LoadRoot(Register destination, RootIndex index, Condition cond)
void LoadPC(Register dst)
static int ActivationFrameAlignment()
void BranchRelativeOnIdxHighP(Register dst, Register inc, Label *L)
void SubS32(Register dst, Register src, const Operand &imm)
void Push(Handle< HeapObject > handle)
void PrepareCallCFunction(int num_reg_arguments, Register scratch)
void CallCodeObject(Register code_object)
void CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode)
void LoadLane64LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch)
void LoadTaggedSignedField(Register destination, MemOperand field_operand)
void CmpTagged(const Register &src1, const Register &src2)
void CmpS32(Register dst, const Operand &opnd)
void ModU64(Register dst, Register src1, Register src2)
void LoadS8(Register dst, const MemOperand &opnd)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void AddU32(Register dst, const Operand &imm)
void I32x4UConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void ConvertFloat32ToUnsignedInt64(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void AndP(Register dst, const Operand &opnd)
void ShiftLeftU64(Register dst, Register src, const Operand &val)
void I32x4ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register=r0)
void LoadCompressedMap(Register destination, Register object)
void SwapDouble(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch)
void LoadRootRegisterOffset(Register destination, intptr_t offset) final
void SubS64(Register dst, Register src)
void CanonicalizeNaN(const DoubleRegister value)
void Branch(Condition c, const Operand &opnd)
void PopAll(DoubleRegList registers, int stack_slot_size=kDoubleSize)
void SubU32(Register dst, const MemOperand &opnd)
void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE
void LoadCodeInstructionStart(Register destination, Register code_object, CodeEntrypointTag tag=kDefaultCodeEntrypointTag)
void TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input, Label *done)
void Xor(Register dst, const MemOperand &opnd)
void SmiUntag(Register dst, Register src)
void LoadAndExtend8x8SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void AddU64(Register dst, const MemOperand &opnd)
void Xor(Register dst, const Operand &opnd)
void I32x4SConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void MulS64(Register dst, Register src1, Register src2)
void LoadFeedbackVector(Register dst, Register closure, Register scratch, Label *fbv_undef)
void DivF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void AddU64(Register dst, Register src)
void CmpS32(Register dst, const MemOperand &opnd)
void EmitIncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void ModU32(Register dst, Register src1, Register src2)
void AndP(Register dst, Register src1, Register src2)
void StoreV128LE(Simd128Register src, const MemOperand &mem, Register scratch1, Register scratch2)
void AddS64(Register dst, Register src1, Register src2)
void SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch)
void SwapFloat32(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch)
void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void Mul32WithOverflowIfCCUnequal(Register dst, Register src1, const Operand &src2)
void CmpAndSwap64(Register old_val, Register new_val, const MemOperand &opnd)
void ExtractBitMask(Register dst, Register src, uintptr_t mask, RCBit rc=LeaveRC)
void ConvertUnsignedIntToFloat(DoubleRegister dst, Register src)
void StoreLane16LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch)
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg) const
void DoubleMax(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg)
void I64x2ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register=r0)
void MultiPopDoubles(DoubleRegList dregs, Register location=sp)
void Jump(const ExternalReference &reference)
void LoadF32(DoubleRegister result, T value, Register scratch)
void ShiftLeftU64(Register dst, Register src, Register val, const Operand &val2=Operand::Zero())
void StoreF64LE(DoubleRegister src, const MemOperand &opnd, Register scratch)
MemOperand ExternalReferenceAsOperand(IsolateFieldId id)
void IsObjectType(Register object, Register scratch1, Register scratch2, InstanceType type)
void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void DivS32(Register dst, Register src1, Register src2)
void LoadS32LE(Register dst, const MemOperand &opnd, Register scratch=no_reg)
void AddS64(Register dst, Register src)
void Push(Register src1, Register src2, Register src3)
void AssertConstructor(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void AddF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void I16x8ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register scratch)
void ModS64(Register dst, Register src1, const MemOperand &src2)
void StoreF32LE(DoubleRegister src, const MemOperand &opnd, Register scratch)
void LoadOnConditionP(Condition cond, Register dst, Register src)
void MovDoubleToInt64(Register dst, DoubleRegister src)
void PushAll(DoubleRegList registers, int stack_slot_size=kDoubleSize)
void ConvertFloat32ToInt32(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode)
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure, Register scratch1, Register slot_address)
void I8x16Shuffle(Simd128Register dst, Simd128Register src1, Simd128Register src2, uint64_t high, uint64_t low, Register scratch1, Register scratch2, Simd128Register scratch3)
void Sqrt(DoubleRegister result, DoubleRegister input)
void AssertZeroExtended(Register reg) NOOP_UNLESS_DEBUG_CODE
void AddS64(Register dst, Register src, int32_t imm)
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void ConvertDoubleToUnsignedInt32(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void And(Register dst, Register src1, Register src2)
void Zero(const MemOperand &dest1, const MemOperand &dest2)
void LoadAndTest32(Register dst, const MemOperand &opnd)
void SwapP(MemOperand src, MemOperand dst, Register scratch_0, Register scratch_1)
void SetRoundingMode(FPRoundingMode RN)
void BailoutIfDeoptimized(Register scratch)
void Check(Condition cond, AbortReason reason, CRegister cr=cr7)
void I32x4DotI8x16AddS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register src3, Simd128Register scratch1, Simd128Register scratch2)
void LoadU8(Register dst, const MemOperand &opnd)
void And(Register dst, const Operand &opnd)
void SmiTag(Register dst, Register src)
void CallJSEntry(Register target)
void DivU64(Register dst, Register src1, const MemOperand &src2)
void SubS64(Register dst, const MemOperand &opnd)
void SwapSimd128(Simd128Register src, MemOperand dst, Simd128Register scratch)
MemOperand ExternalReferenceAsOperand(ExternalReference reference, Register scratch)
void And(Register dst, Register src)
void Zero(const MemOperand &dest)
void IncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void AddU64(Register dst, Register src1, Register src2)
void I16x8ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register=r0)
void SwapP(Register src, MemOperand dst, Register scratch)
void PushCommonFrame(Register marker_reg=no_reg)
void MovFromFloatParameter(DoubleRegister dst)
void LoadAndSub64(Register dst, Register src, const MemOperand &opnd)
void JumpIfEqual(Register x, int32_t y, Label *dest)
void Not32(Register dst, Register src=no_reg)
void SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch)
void GetLabelAddress(Register dst, Label *target)
void LoadU16(Register dst, const MemOperand &opnd)
void MulHighS64(Register dst, Register src1, Register src2)
void Assert(Condition cond, AbortReason reason, CRegister cr=cr7) NOOP_UNLESS_DEBUG_CODE
void DecompressTaggedSigned(Register destination, MemOperand field_operand)
void MulHighS64(Register dst, Register src1, const MemOperand &src2)
void AddS32(Register dst, const MemOperand &opnd)
void StoreLane32LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch)
void SubS32(Register dst, Register src1, Register src2)
void Move(Register dst, Handle< HeapObject > source, RelocInfo::Mode rmode=RelocInfo::FULL_EMBEDDED_OBJECT)
void AddS32(Register dst, Register src1, Register src2)
void JumpToExternalReference(const ExternalReference &builtin, bool builtin_exit_frame=false)
void ConvertInt64ToDouble(DoubleRegister double_dst, Register src)
Operand ClearedValue() const
void MulF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void LoadS16(Register dst, Register src)
void LoadF64(DoubleRegister dst, const MemOperand &opnd)
void SwapSimd128(Simd128Register src, Simd128Register dst, Simd128Register scratch)
void DivU32(Register dst, Register src1, Register src2)
void DivS64(Register dst, Register src1, Register src2)
void I32x4Splat(Simd128Register dst, Register src)
void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, InvokeType type)
void MulF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void MovToFloatParameter(DoubleRegister src)
void Jump(Register target, Condition cond=al)
void CmpS64(Register src1, Register src2)
void LoadAndSplat8x16LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void FloatMin(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg)
void MultiPushF64OrV128(DoubleRegList dregs, Register scratch, Register location=sp)
void ConvertUnsignedInt64ToDouble(DoubleRegister double_dst, Register src)
void ClearRightImm(Register dst, Register src, const Operand &val)
void MovToFloatResult(DoubleRegister src)
void LoadAndTest32(Register dst, Register src)
void CountTrailingZerosU64(Register dst, Register src, Register scratch_pair=r0)
void MovInt64ToDouble(DoubleRegister dst, Register src)
void Pop(Register src1, Register src2)
void Switch(Register scrach, Register reg, int case_base_value, Label **labels, int num_labels)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void StubPrologue(StackFrame::Type type, Register base=no_reg, int prologue_offset=0)
void TruncateDoubleToI(Isolate *isolate, Zone *zone, Register result, DoubleRegister double_input, StubCallMode stub_mode)
void Popcnt32(Register dst, Register src)
void CmpS64(Register dst, const Operand &opnd)
void SwapP(Register src, Register dst, Register scratch)
void ShiftRightU32(Register dst, Register src, const Operand &val)
void CompareRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit)
void S128Const(Simd128Register dst, uint64_t high, uint64_t low, Register scratch1, Register scratch2)
void JumpCodeObject(Register code_object, JumpMode jump_mode=JumpMode::kJump)
void CmpU32(Register src1, Register src2)
void PushArray(Register array, Register size, Register scratch, Register scratch2, PushArrayOrder order=kNormal)
void DecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void OrP(Register dst, Register src1, Register src2)
void I64x2Splat(Simd128Register dst, Register src)
void LoadLane32LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch)
void I8x16ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register=r0)
void SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch)
void StoreLane64LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch)
void MulHighS32(Register dst, Register src1, const MemOperand &src2)
void BranchOnCount(Register r1, Label *l)
void JumpIfCodeIsTurbofanned(Register code, Register scratch, Label *if_turbofanned)
void EmitDecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void LoadU8(Register dst, Register src)
void ModU64(Register dst, Register src1, const MemOperand &src2)
void MulS64(Register dst, const MemOperand &opnd)
void DivU64(Register dst, Register src1, Register src2)
void LoadFromConstantsTable(Register destination, int constant_index) final
void AddU32(Register dst, Register src1, Register src2)
MemOperand EntryFromBuiltinAsOperand(Builtin builtin)
void SubU32(Register dst, Register src1, Register src2)
void StoreU16(Register src, const MemOperand &mem, Register scratch=r0)
void F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch)
void CompareObjectTypeRange(Register heap_object, Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void ConvertDoubleToInt32(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE
void CeilF32(DoubleRegister dst, DoubleRegister src)
void ConvertIntToDouble(DoubleRegister dst, Register src)
void ComputeCodeStartAddress(Register dst)
void MaybeSaveRegisters(RegList registers)
void LoadV128LE(DoubleRegister dst, const MemOperand &mem, Register scratch0, Register scratch1)
void DivS32(Register dst, Register src1, const MemOperand &src2)
void LoadTaggedRoot(Register destination, RootIndex index)
void SubF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler)
void FloorF64(DoubleRegister dst, DoubleRegister src)
void LoadPositive32(Register result, Register input)
void LoadPositiveP(Register result, Register input)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void MultiPush(RegList regs, Register location=sp)
void AddF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void ModU32(Register dst, Register src1, const MemOperand &src2)
void I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Register=r0)
void JumpJSFunction(Register function_object, JumpMode jump_mode=JumpMode::kJump)
void LoadU16LE(Register dst, const MemOperand &opnd)
void CountLeadingZerosU32(Register dst, Register src, Register scratch_pair=r0)
void ShiftRightS32(Register dst, Register src, const Operand &val)
void CheckDebugHook(Register fun, Register new_target, Register expected_parameter_count, Register actual_parameter_count)
void LoadAndExtend8x8ULE(Simd128Register dst, const MemOperand &mem, Register scratch)
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond=al)
void AtomicExchangeHelper(Register addr, Register value, Register output, int start, int end, int shift_amount, int offset, Register scratch)
void F64x2PromoteLowF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2, Register scratch3, Register scratch4)
void Sqrt(DoubleRegister result, const MemOperand &input)
void TestBitRange(Register value, int rangeStart, int rangeEnd, Register scratch=r0)
void LoadAndExtend16x4ULE(Simd128Register dst, const MemOperand &mem, Register scratch)
void push(DoubleRegister src)
void ShiftLeftU32(Register dst, Register src, const Operand &val)
Condition LoadFeedbackVectorFlagsAndCheckIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind)
void LoadAndTestP(Register dst, const MemOperand &opnd)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, bool has_function_descriptor=ABI_USES_FUNCTION_DESCRIPTORS, Label *return_label=nullptr)
void LoadF32LE(DoubleRegister dst, const MemOperand &opnd, Register scratch)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void LoadWeakValue(Register out, Register in, Label *target_if_cleared)
void CallBuiltin(Builtin builtin, Condition cond=al)
void Popcnt64(Register dst, Register src)
void SubFloat64(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void SubS32(Register dst, Register src, int32_t imm)
void ConvertUnsignedInt64ToFloat(DoubleRegister double_dst, Register src)
void Pop(Register src1, Register src2, Register src3, Register src4, Register src5)
void F32x4ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Register=r0)
void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src)
void I16x8UConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void I16x8SConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2)
void I8x16BitMask(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3)
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id)
void LoadAndSplat32x4LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void StoreF64(DoubleRegister dst, const MemOperand &opnd)
void I64x2BitMask(Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void StoreMultipleP(Register dst1, Register dst2, const MemOperand &mem)
void StoreU64LE(Register src, const MemOperand &mem, Register scratch=no_reg)
void AssertJSAny(Register object, Register map_tmp, Register tmp, AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE
void Push(Tagged< Smi > smi)
void FloorF32(DoubleRegister dst, DoubleRegister src)
void LoadSmiLiteral(Register dst, Tagged< Smi > smi)
void Xor(Register dst, Register src)
void Cmp(Register dst, int32_t src)
void DivS64(Register dst, Register src1, const MemOperand &src2)
void IncrementalMarkingRecordWriteHelper(Register object, Register value, Register address)
void DivF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs)
void F32x4DemoteF64x2Zero(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2, Register scratch3, Register scratch4)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void OrP(Register dst, Register src)
void Or(Register dst, Register src)
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void Xor(Register dst, Register src1, Register src2)
void CountLeadingZerosU64(Register dst, Register src, Register scratch_pair=r0)
void Call(Label *target)
void CmpSmiLiteral(Register src1, Tagged< Smi > smi, Register scratch)
void DropArgumentsAndPushNewReceiver(Register argc, Register receiver)
void CompareTagged(Register src1, T src2)
void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch, Label *if_marked_for_deoptimization)
void LoadF32(DoubleRegister dst, const MemOperand &opnd)
void LoadMultipleW(Register dst1, Register dst2, const MemOperand &mem)
void Or(Register dst, const MemOperand &opnd)
void NotP(Register dst, Register src=no_reg)
void AllocateStackSpace(Register bytes)
void LoadEntryFromBuiltinIndex(Register builtin_index, Register target)
void ConvertDoubleToUnsignedInt64(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void MoveChar(const MemOperand &opnd1, const MemOperand &opnd2, const Operand &length)
void AddFloat32(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void LoadAndExtend16x4SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void MulHighU32(Register dst, Register src1, const Operand &src2)
void SmiToInt32(Register dst, Register src)
void XorP(Register dst, Register src, const Operand &opnd)
void StoreU64(Register src, const MemOperand &mem, Register scratch=no_reg)
void MulFloat64(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void MultiPop(RegList regs, Register location=sp)
void SubS64(Register dst, Register src, int32_t imm)
void OrP(Register dst, const MemOperand &opnd)
void LoadS16LE(Register dst, const MemOperand &opnd)
void Move(DoubleRegister dst, DoubleRegister src)
void AddFloat64(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void LoadMultipleP(Register dst1, Register dst2, const MemOperand &mem)
void ExclusiveOrChar(const MemOperand &opnd1, const MemOperand &opnd2, const Operand &length)
void JumpIfLessThan(Register x, int32_t y, Label *dest)
void Push(Register src1, Register src2, Register src3, Register src4)
void AddS64(Register dst, Register src, const Operand &imm)
void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void CeilF64(DoubleRegister dst, DoubleRegister src)
void MulS32(Register dst, const Operand &src1)
void CmpU32(Register src1, const Operand &opnd)
void TestBitMask(Register value, uintptr_t mask, Register scratch=r0)
int PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void XorP(Register dst, const Operand &opnd)
void Not64(Register dst, Register src=no_reg)
void AddU64(Register dst, const Operand &imm)
void SubS32(Register dst, Register src)
void LoadS32(Register dst, const MemOperand &opnd, Register scratch=no_reg)
void CmpU64(Register dst, const MemOperand &opnd)
void LoadS8(Register dst, Register src)
void CmpU64(Register src1, Register src2)
void I16x8GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void CompareInstanceTypeRange(Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void F64x2ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Register=r0)
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, Register feedback_vector)
void LoadIsolateField(Register dst, IsolateFieldId id)
void LoadU64(Register dst, const MemOperand &mem, Register scratch=no_reg)
void Mul32WithOverflowIfCCUnequal(Register dst, Register src1, const MemOperand &src2)
void ShiftRightU64(Register dst, Register src, const Operand &val)
void MaybeRestoreRegisters(RegList registers)
void CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void StoreF32(DoubleRegister dst, const MemOperand &opnd)
void ConvertFloat32ToInt64(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void AddS64(Register dst, const Operand &imm)
void Call(Register target)
void MulS32(Register dst, const MemOperand &src1)
void CmpF64(DoubleRegister src1, DoubleRegister src2)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, Register scratch)
void LoadF64(DoubleRegister result, T value, Register scratch)
void TryLoadOptimizedOsrCode(Register scratch_and_result, CodeKind min_opt_level, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
void Jump(Address target, RelocInfo::Mode rmode, Condition cond=al)
void ShiftRightS64(Register dst, Register src, const Operand &val)
void I8x16Splat(Simd128Register dst, Register src)
void Drop(Register count, Register scratch=r0)
void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Register=r0)
void AssertUndefinedOrAllocationSite(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void Jump(Handle< Code > code, RelocInfo::Mode rmode, Condition cond=al)
void Move(Register dst, ExternalReference reference)
void CompareTaggedRoot(Register obj, RootIndex index)
void LoadAndTestP(Register dst, Register src)
void FloatMax(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg)
void F64x2Splat(Simd128Register dst, Simd128Register src)
void ShiftRightS64(Register dst, Register src, Register shift, const Operand &val2=Operand::Zero())
void StoreMultipleW(Register dst1, Register dst2, const MemOperand &mem)
void ConvertIntToFloat(DoubleRegister dst, Register src)
void MultiPushV128(DoubleRegList dregs, Register scratch, Register location=sp)
void OrP(Register dst, Register src, const Operand &opnd)
void NearestIntF64(DoubleRegister dst, DoubleRegister src)
void LoadU16(Register dst, Register src)
void SmiUntagField(Register dst, const MemOperand &src)
void PopAll(RegList registers)
void TruncF32(DoubleRegister dst, DoubleRegister src)
void DecompressTagged(const Register &destination, Tagged_t immediate)
void MulHighU64(Register dst, Register src1, const MemOperand &src2)
void DecompressTagged(Register destination, Register source)
void DecompressTagged(Register destination, MemOperand field_operand)
Condition CheckSmi(Register src)
void DecompressTaggedSigned(Register destination, Register src)
void AddS32(Register dst, Register src, int32_t imm)
void StoreRootRelative(int32_t offset, Register value) final
void DoubleMin(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg)
void Push(Register src1, Register src2)
void CmpF32(DoubleRegister src1, DoubleRegister src2)
void StoreU64(const MemOperand &mem, const Operand &opnd, Register scratch=no_reg)
void CmpU64(Register src1, const Operand &opnd)
void I64x2Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scratch2, Register scratch3)
void LoadMap(Register destination, Register object)
void MovFromFloatResult(DoubleRegister dst)
void CmpP(Register src1, T src2)
void SmiUntag(Register dst, const MemOperand &src)
void CmpAndSwap(Register old_val, Register new_val, const MemOperand &opnd)
void TailCallRuntime(Runtime::FunctionId fid)
void MultiPopF64OrV128(DoubleRegList dregs, Register scratch, Register location=sp)
void XorP(Register dst, const MemOperand &opnd)
void SmiToPtrArrayOffset(Register dst, Register src)
void CallRuntime(Runtime::FunctionId fid, int num_arguments)
void TestBit(Register value, int bitNumber, Register scratch=r0)
void LoadNativeContextSlot(Register dst, int index)
void I8x16SConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2)
void CallRecordWriteStubSaveRegisters(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void AtomicCmpExchangeU8(Register addr, Register output, Register old_value, Register new_value, Register temp0, Register temp1)
void AddS32(const MemOperand &opnd, const Operand &imm)
int CallCFunction(Register function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, bool has_function_descriptor=ABI_USES_FUNCTION_DESCRIPTORS, Label *return_label=nullptr)
void MulS32(Register dst, Register src1, Register src2)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void XorP(Register dst, Register src1, Register src2)
void AddS64(Register dst, const MemOperand &opnd)
void MulFloat32(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void Mul32WithOverflowIfCCUnequal(Register dst, Register src1, Register src2)
void AtomicExchangeU16(Register addr, Register value, Register output, Register scratch)
void I32x4TruncSatF64x2UZero(Simd128Register dst, Simd128Register src, Simd128Register scratch)
void LoadS16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void LoadLane8LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch)
void LoadRoot(Register destination, RootIndex index) override
void I8x16ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register scratch)
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label *condition_met)
void LoadV128(Simd128Register dst, const MemOperand &mem, Register scratch)
void CmpS32(Register src1, Register src2)
void LoadU32(Register dst, const MemOperand &opnd, Register scratch=no_reg)
void ModS64(Register dst, Register src1, Register src2)
void AndP(Register dst, const MemOperand &opnd)
void I8x16Swizzle(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scratch2, Simd128Register scratch3)
void AndP(Register dst, Register src)
void DropArguments(Register count)
void LoadTaggedFieldWithoutDecompressing(const Register &destination, const MemOperand &field_operand, const Register &scratch=no_reg)
void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void MultiPushDoubles(DoubleRegList dregs, Register location=sp)
void LoadV64ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void AddS32(Register dst, Register src, const Operand &imm)
#define NOOP_UNLESS_DEBUG_CODE
Definition assembler.h:628
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define ABI_USES_FUNCTION_DESCRIPTORS
int start
int end
DirectHandle< Object > new_target
Definition execution.cc:75
Label label
Isolate * isolate
int32_t offset
TNode< Object > receiver
RoundingMode rounding_mode
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int y
int x
uint32_t const mask
#define PROTOTYPE_SIMD_ALL_TRUE(name)
#define SIMD_UNOP_LIST(V)
#define SIMD_ALL_TRUE_LIST(V)
#define PROTOTYPE_SIMD_QFM(name)
#define PROTOTYPE_SIMD_BINOP(name)
#define PROTOTYPE_SIMD_SHIFT(name)
#define SIMD_BINOP_LIST(V)
#define SIMD_QFM_LIST(V)
#define SIMD_SHIFT_LIST(V)
#define SIMD_EXT_ADD_PAIRWISE_LIST(V)
#define PROTOTYPE_SIMD_EXT_ADD_PAIRWISE(name)
#define PROTOTYPE_SIMD_UNOP(name)
#define SIMD_EXT_MUL_LIST(V)
#define PROTOTYPE_SIMD_ADD_SUB_SAT(name)
#define PROTOTYPE_SIMD_EXT_MUL(name)
#define SIMD_ADD_SUB_SAT_LIST(V)
SmiCheck
InvokeType
SetIsolateDataSlots
JumpMode
RegListBase< RegisterT > registers
InstructionOperand destination
constexpr Register no_reg
constexpr Register kRootRegister
MemOperand ExitFrameCallerStackSlotOperand(int index)
const int kSmiTagSize
Definition v8-internal.h:87
const int kStackFrameExtraParamSlot
constexpr int kFloatSize
Definition globals.h:406
Address Tagged_t
Definition globals.h:547
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
constexpr int kSmiShift
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr bool SmiValuesAre31Bits()
const int kHeapObjectTag
Definition v8-internal.h:72
const int kSmiShiftSize
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool SmiValuesAre32Bits()
constexpr int kBitsPerSystemPointer
Definition globals.h:684
constexpr Register kPtrComprCageBaseRegister
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
const int kSmiTag
Definition v8-internal.h:86
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
constexpr int kDoubleSize
Definition globals.h:407
MemOperand ExitFrameStackSlotOperand(int offset)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define V8_EXPORT_PRIVATE
Definition macros.h:460