v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
assembler-arm.h
Go to the documentation of this file.
1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
33// The original source code covered by the above license above has been
34// modified significantly by Google Inc.
35// Copyright 2012 the V8 project authors. All rights reserved.
36
37// A light-weight ARM Assembler
38// Generates user mode instructions for the ARM architecture up to version 5
39
40#ifndef V8_CODEGEN_ARM_ASSEMBLER_ARM_H_
41#define V8_CODEGEN_ARM_ASSEMBLER_ARM_H_
42
43#include <stdio.h>
44
45#include <memory>
46
55namespace v8 {
56namespace internal {
57
58class SafepointTableBuilder;
59
60// Coprocessor number
62 p0 = 0,
63 p1 = 1,
64 p2 = 2,
65 p3 = 3,
66 p4 = 4,
67 p5 = 5,
68 p6 = 6,
69 p7 = 7,
70 p8 = 8,
71 p9 = 9,
72 p10 = 10,
73 p11 = 11,
74 p12 = 12,
75 p13 = 13,
76 p14 = 14,
77 p15 = 15
78};
79
80// -----------------------------------------------------------------------------
81// Machine instruction Operands
82
83// Class Operand represents a shifter operand in data processing instructions
84class V8_EXPORT_PRIVATE Operand {
85 public:
86 // immediate
87 V8_INLINE explicit Operand(int32_t immediate,
88 RelocInfo::Mode rmode = RelocInfo::NO_INFO)
89 : rmode_(rmode) {
90 value_.immediate = immediate;
91 }
92 V8_INLINE static Operand Zero();
93 V8_INLINE explicit Operand(const ExternalReference& f);
94 explicit Operand(Handle<HeapObject> handle);
95 V8_INLINE explicit Operand(Tagged<Smi> value);
96
97 // rm
98 V8_INLINE explicit Operand(Register rm);
99
100 // rm <shift_op> shift_imm
101 explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
103 return Operand(rm, ASR, kSmiTagSize);
104 }
113
114 // rm <shift_op> rs
115 explicit Operand(Register rm, ShiftOp shift_op, Register rs);
116
117 static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
118
119 // Return true if this is a register operand.
120 bool IsRegister() const {
121 return rm_.is_valid() && rs_ == no_reg && shift_op_ == LSL &&
122 shift_imm_ == 0;
123 }
124 // Return true if this is a register operand shifted with an immediate.
126 return rm_.is_valid() && !rs_.is_valid();
127 }
128 // Return true if this is a register operand shifted with a register.
130 return rm_.is_valid() && rs_.is_valid();
131 }
132
133 // Return the number of actual instructions required to implement the given
134 // instruction for this particular operand. This can be a single instruction,
135 // if no load into a scratch register is necessary, or anything between 2 and
136 // 4 instructions when we need to load from the constant pool (depending upon
137 // whether the constant pool entry is in the small or extended section). If
138 // the instruction this operand is used for is a MOV or MVN instruction the
139 // actual instruction to use is required for this calculation. For other
140 // instructions instr is ignored.
141 //
142 // The value returned is only valid as long as no entries are added to the
143 // constant pool between this call and the actual instruction being emitted.
144 int InstructionsRequired(const Assembler* assembler, Instr instr = 0) const;
145 bool MustOutputRelocInfo(const Assembler* assembler) const;
146
147 inline int32_t immediate() const {
148 DCHECK(IsImmediate());
149 DCHECK(!IsHeapNumberRequest());
150 return value_.immediate;
151 }
152 bool IsImmediate() const { return !rm_.is_valid(); }
153
155 DCHECK(IsHeapNumberRequest());
156 return value_.heap_number_request;
157 }
158 bool IsHeapNumberRequest() const {
159 DCHECK_IMPLIES(is_heap_number_request_, IsImmediate());
160 DCHECK_IMPLIES(is_heap_number_request_,
161 rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT ||
162 rmode_ == RelocInfo::CODE_TARGET);
163 return is_heap_number_request_;
164 }
165
166 Register rm() const { return rm_; }
167 Register rs() const { return rs_; }
168 ShiftOp shift_op() const { return shift_op_; }
169
170 private:
174 int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
175 union Value {
176 Value() {}
177 HeapNumberRequest heap_number_request; // if is_heap_number_request_
178 int32_t immediate; // otherwise
179 } value_; // valid if rm_ == no_reg
180 bool is_heap_number_request_ = false;
182
183 friend class Assembler;
184};
185
186// Class MemOperand represents a memory operand in load and store instructions
188 public:
189 // [rn +/- offset] Offset/NegOffset
190 // [rn +/- offset]! PreIndex/NegPreIndex
191 // [rn], +/- offset PostIndex/NegPostIndex
192 // offset is any signed 32-bit value; offset is first loaded to a scratch
193 // register if it does not fit the addressing mode (12-bit unsigned and sign
194 // bit)
195 explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
196
197 // [rn +/- rm] Offset/NegOffset
198 // [rn +/- rm]! PreIndex/NegPreIndex
199 // [rn], +/- rm PostIndex/NegPostIndex
200 explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
201
202 // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset
203 // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex
204 // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
205 explicit MemOperand(Register rn, Register rm, ShiftOp shift_op, int shift_imm,
206 AddrMode am = Offset);
209 AddrMode am = Offset) {
210 static_assert(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
211 return MemOperand(array, key, LSL, kPointerSizeLog2 - kSmiTagSize, am);
212 }
213
214 bool IsImmediateOffset() const { return rm_ == no_reg; }
215
216 void set_offset(int32_t offset) {
217 DCHECK(IsImmediateOffset());
218 offset_ = offset;
219 }
220
221 int32_t offset() const {
222 DCHECK(IsImmediateOffset());
223 return offset_;
224 }
225
226 Register rn() const { return rn_; }
227 Register rm() const { return rm_; }
228 AddrMode am() const { return am_; }
229
231 return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
232 }
233
234 private:
235 Register rn_; // base
236 Register rm_; // register offset
237 int32_t offset_; // valid if rm_ == no_reg
239 int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
240 AddrMode am_; // bits P, U, and W
241
242 friend class Assembler;
243};
244
245// Class NeonMemOperand represents a memory operand in load and
246// store NEON instructions
248 public:
249 // [rn {:align}] Offset
250 // [rn {:align}]! PostIndex
251 explicit NeonMemOperand(Register rn, AddrMode am = Offset, int align = 0);
252
253 // [rn {:align}], rm PostIndex
254 explicit NeonMemOperand(Register rn, Register rm, int align = 0);
255
256 Register rn() const { return rn_; }
257 Register rm() const { return rm_; }
258 int align() const { return align_; }
259
260 private:
261 void SetAlignment(int align);
262
263 Register rn_; // base
264 Register rm_; // register increment
266};
267
268// Class NeonListOperand represents a list of NEON registers
270 public:
274 : base_(q_reg.low()), register_count_(2) {}
275 DoubleRegister base() const { return base_; }
277 int length() const { return register_count_ - 1; }
279 switch (register_count_) {
280 default:
281 UNREACHABLE();
282 // Fall through.
283 case 1:
284 return nlt_1;
285 case 2:
286 return nlt_2;
287 case 3:
288 return nlt_3;
289 case 4:
290 return nlt_4;
291 }
292 }
293
294 private:
297};
298
300 public:
301 // Create an assembler. Instructions and relocation information are emitted
302 // into a buffer, with the instructions starting from the beginning and the
303 // relocation information starting from the end of the buffer. See CodeDesc
304 // for a detailed comment on the layout (globals.h).
305 //
306 // If the provided buffer is nullptr, the assembler allocates and grows its
307 // own buffer. Otherwise it takes ownership of the provided buffer.
308 explicit Assembler(const AssemblerOptions&,
309 std::unique_ptr<AssemblerBuffer> = {});
310 // For compatibility with assemblers that require a zone.
312 std::unique_ptr<AssemblerBuffer> buffer = {})
313 : Assembler(options, std::move(buffer)) {}
314
315 ~Assembler() override;
316
319
320 void AbortedCodeGeneration() override {
321 pending_32_bit_constants_.clear();
322 first_const_pool_32_use_ = -1;
323 constant_pool_deadline_ = kMaxInt;
324 }
325
326 // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
327 static constexpr int kNoHandlerTable = 0;
328 static constexpr SafepointTableBuilderBase* kNoSafepointTable = nullptr;
329 void GetCode(LocalIsolate* isolate, CodeDesc* desc,
330 SafepointTableBuilderBase* safepoint_table_builder,
331 int handler_table_offset);
332
333 // Convenience wrapper for allocating with an Isolate.
334 void GetCode(Isolate* isolate, CodeDesc* desc);
335 // Convenience wrapper for code without safepoint or handler tables.
336 void GetCode(LocalIsolate* isolate, CodeDesc* desc) {
337 GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
338 }
339
340 // Label operations & relative jumps (PPUM Appendix D)
341 //
342 // Takes a branch opcode (cc) and a label (L) and generates
343 // either a backward branch or a forward branch and links it
344 // to the label fixup chain. Usage:
345 //
346 // Label L; // unbound label
347 // j(cc, &L); // forward branch to unbound label
348 // bind(&L); // bind label to the current pc
349 // j(cc, &L); // backward branch to bound label
350 // bind(&L); // illegal: a label may be bound only once
351 //
352 // Note: The same Label can be used for forward and backward branches
353 // but it may be bound only once.
354
355 void bind(Label* L); // binds an unbound label L to the current code position
356
357 // Returns the branch offset to the given label from the current code position
358 // Links the label to the current position if it is still unbound
359 // Manages the jump elimination optimization if the second parameter is true.
361
362 // Returns true if the given pc address is the start of a constant pool load
363 // instruction sequence.
364 V8_INLINE static bool is_constant_pool_load(Address pc);
365
366 // Return the address in the constant pool of the code target address used by
367 // the branch/call instruction at pc, or the object in a mov.
368 V8_INLINE static Address constant_pool_entry_address(Address pc,
369 Address constant_pool);
370
371 // Read/Modify the code target address in the branch/call instruction at pc.
372 // The isolate argument is unused (and may be nullptr) when skipping flushing.
373 V8_INLINE static Address target_address_at(Address pc, Address constant_pool);
374 V8_INLINE static void set_target_address_at(
375 Address pc, Address constant_pool, Address target,
376 WritableJitAllocation* jit_allocation,
377 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
378
379 // Get the size of the special target encoded at 'location'.
380 inline static int deserialization_special_target_size(Address location);
381
382 // This sets the internal reference at the pc.
383 inline static void deserialization_set_target_internal_reference_at(
384 Address pc, Address target, WritableJitAllocation& jit_allocation,
385 RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
386
387 // Read/modify the uint32 constant used at pc.
388 static inline uint32_t uint32_constant_at(Address pc, Address constant_pool);
389 static inline void set_uint32_constant_at(
390 Address pc, Address constant_pool, uint32_t new_constant,
391 WritableJitAllocation* jit_allocation,
392 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
393
394 // Here we are patching the address in the constant pool, not the actual call
395 // instruction. The address in the constant pool is the same size as a
396 // pointer.
397 static constexpr int kSpecialTargetSize = kPointerSize;
398
399 RegList* GetScratchRegisterList() { return &scratch_register_list_; }
401 return &scratch_vfp_register_list_;
402 }
403
404 // ---------------------------------------------------------------------------
405 // InstructionStream generation
406
407 // Insert the smallest number of nop instructions
408 // possible to align the pc offset to a multiple
409 // of m. m must be a power of 2 (>= 4).
410 void Align(int m);
411 // Insert the smallest number of zero bytes possible to align the pc offset
412 // to a mulitple of m. m must be a power of 2 (>= 2).
413 void DataAlign(int m);
414 // Aligns code to something that's optimal for a jump target for the platform.
415 void CodeTargetAlign();
416 void LoopHeaderAlign() { CodeTargetAlign(); }
417
418 // Branch instructions
419 void b(int branch_offset, Condition cond = al,
420 RelocInfo::Mode rmode = RelocInfo::NO_INFO);
421 void bl(int branch_offset, Condition cond = al,
422 RelocInfo::Mode rmode = RelocInfo::NO_INFO);
423 void blx(int branch_offset); // v5 and above
424 void blx(Register target, Condition cond = al); // v5 and above
425 void bx(Register target, Condition cond = al); // v5 and above, plus v4t
426
427 // Convenience branch instructions using labels
428 void b(Label* L, Condition cond = al);
429 void b(Condition cond, Label* L) { b(L, cond); }
430 void bl(Label* L, Condition cond = al);
431 void bl(Condition cond, Label* L) { bl(L, cond); }
432 void blx(Label* L); // v5 and above
433
434 // Data-processing instructions
435
436 void and_(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
437 Condition cond = al);
438 void and_(Register dst, Register src1, Register src2, SBit s = LeaveCC,
439 Condition cond = al);
440
441 void eor(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
442 Condition cond = al);
443 void eor(Register dst, Register src1, Register src2, SBit s = LeaveCC,
444 Condition cond = al);
445
446 void sub(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
447 Condition cond = al);
448 void sub(Register dst, Register src1, Register src2, SBit s = LeaveCC,
449 Condition cond = al);
450
451 void rsb(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
452 Condition cond = al);
453
454 void add(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
455 Condition cond = al);
456 void add(Register dst, Register src1, Register src2, SBit s = LeaveCC,
457 Condition cond = al);
458
459 void adc(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
460 Condition cond = al);
461
462 void sbc(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
463 Condition cond = al);
464
465 void rsc(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
466 Condition cond = al);
467
468 void tst(Register src1, const Operand& src2, Condition cond = al);
469 void tst(Register src1, Register src2, Condition cond = al);
470
471 void teq(Register src1, const Operand& src2, Condition cond = al);
472
473 void cmp(Register src1, const Operand& src2, Condition cond = al);
474 void cmp(Register src1, Register src2, Condition cond = al);
475
476 void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
477
478 void cmn(Register src1, const Operand& src2, Condition cond = al);
479
480 void orr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
481 Condition cond = al);
482 void orr(Register dst, Register src1, Register src2, SBit s = LeaveCC,
483 Condition cond = al);
484
485 void mov(Register dst, const Operand& src, SBit s = LeaveCC,
486 Condition cond = al);
487 void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al);
488
489 // Load the position of the label relative to the generated code object
490 // pointer in a register.
492
493 // ARMv7 instructions for loading a 32 bit immediate in two instructions.
494 // The constant for movw and movt should be in the range 0-0xffff.
495 void movw(Register reg, uint32_t immediate, Condition cond = al);
496 void movt(Register reg, uint32_t immediate, Condition cond = al);
497
498 void bic(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
499 Condition cond = al);
500
501 void mvn(Register dst, const Operand& src, SBit s = LeaveCC,
502 Condition cond = al);
503
504 // Shift instructions
505
506 void asr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
507 Condition cond = al);
508
509 void lsl(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
510 Condition cond = al);
511
512 void lsr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
513 Condition cond = al);
514
515 // Multiply instructions
516
517 void mla(Register dst, Register src1, Register src2, Register srcA,
518 SBit s = LeaveCC, Condition cond = al);
519
520 void mls(Register dst, Register src1, Register src2, Register srcA,
521 Condition cond = al);
522
523 void sdiv(Register dst, Register src1, Register src2, Condition cond = al);
524
525 void udiv(Register dst, Register src1, Register src2, Condition cond = al);
526
527 void mul(Register dst, Register src1, Register src2, SBit s = LeaveCC,
528 Condition cond = al);
529
530 void smmla(Register dst, Register src1, Register src2, Register srcA,
531 Condition cond = al);
532
533 void smmul(Register dst, Register src1, Register src2, Condition cond = al);
534
535 void smlal(Register dstL, Register dstH, Register src1, Register src2,
536 SBit s = LeaveCC, Condition cond = al);
537
538 void smull(Register dstL, Register dstH, Register src1, Register src2,
539 SBit s = LeaveCC, Condition cond = al);
540
541 void umlal(Register dstL, Register dstH, Register src1, Register src2,
542 SBit s = LeaveCC, Condition cond = al);
543
544 void umull(Register dstL, Register dstH, Register src1, Register src2,
545 SBit s = LeaveCC, Condition cond = al);
546
547 // Miscellaneous arithmetic instructions
548
549 void clz(Register dst, Register src, Condition cond = al); // v5 and above
550
551 // Saturating instructions. v6 and above.
552
553 // Unsigned saturate.
554 //
555 // Saturate an optionally shifted signed value to an unsigned range.
556 //
557 // usat dst, #satpos, src
558 // usat dst, #satpos, src, lsl #sh
559 // usat dst, #satpos, src, asr #sh
560 //
561 // Register dst will contain:
562 //
563 // 0, if s < 0
564 // (1 << satpos) - 1, if s > ((1 << satpos) - 1)
565 // s, otherwise
566 //
567 // where s is the contents of src after shifting (if used.)
568 void usat(Register dst, int satpos, const Operand& src, Condition cond = al);
569
570 // Bitfield manipulation instructions. v7 and above.
571
572 void ubfx(Register dst, Register src, int lsb, int width,
573 Condition cond = al);
574
575 void sbfx(Register dst, Register src, int lsb, int width,
576 Condition cond = al);
577
578 void bfc(Register dst, int lsb, int width, Condition cond = al);
579
580 void bfi(Register dst, Register src, int lsb, int width, Condition cond = al);
581
582 void pkhbt(Register dst, Register src1, const Operand& src2,
583 Condition cond = al);
584
585 void pkhtb(Register dst, Register src1, const Operand& src2,
586 Condition cond = al);
587
588 void sxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
589 void sxtab(Register dst, Register src1, Register src2, int rotate = 0,
590 Condition cond = al);
591 void sxth(Register dst, Register src, int rotate = 0, Condition cond = al);
592 void sxtah(Register dst, Register src1, Register src2, int rotate = 0,
593 Condition cond = al);
594
595 void uxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
596 void uxtab(Register dst, Register src1, Register src2, int rotate = 0,
597 Condition cond = al);
598 void uxtb16(Register dst, Register src, int rotate = 0, Condition cond = al);
599 void uxth(Register dst, Register src, int rotate = 0, Condition cond = al);
600 void uxtah(Register dst, Register src1, Register src2, int rotate = 0,
601 Condition cond = al);
602
603 // Reverse the bits in a register.
604 void rbit(Register dst, Register src, Condition cond = al);
605 void rev(Register dst, Register src, Condition cond = al);
606
607 // Status register access instructions
608
609 void mrs(Register dst, SRegister s, Condition cond = al);
610 void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
611
612 // Load/Store instructions
613 void ldr(Register dst, const MemOperand& src, Condition cond = al);
614 void str(Register src, const MemOperand& dst, Condition cond = al);
615 void ldrb(Register dst, const MemOperand& src, Condition cond = al);
616 void strb(Register src, const MemOperand& dst, Condition cond = al);
617 void ldrh(Register dst, const MemOperand& src, Condition cond = al);
618 void strh(Register src, const MemOperand& dst, Condition cond = al);
619 void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
620 void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
621 void ldrd(Register dst1, Register dst2, const MemOperand& src,
622 Condition cond = al);
623 void strd(Register src1, Register src2, const MemOperand& dst,
624 Condition cond = al);
625
626 // Load literal from a pc relative address.
627 void ldr_pcrel(Register dst, int imm12, Condition cond = al);
628
629 // Load/Store exclusive instructions
630 void ldrex(Register dst, Register src, Condition cond = al);
631 void strex(Register src1, Register src2, Register dst, Condition cond = al);
632 void ldrexb(Register dst, Register src, Condition cond = al);
633 void strexb(Register src1, Register src2, Register dst, Condition cond = al);
634 void ldrexh(Register dst, Register src, Condition cond = al);
635 void strexh(Register src1, Register src2, Register dst, Condition cond = al);
636 void ldrexd(Register dst1, Register dst2, Register src, Condition cond = al);
637 void strexd(Register res, Register src1, Register src2, Register dst,
638 Condition cond = al);
639
640 // Preload instructions
641 void pld(const MemOperand& address);
642
643 // Load/Store multiple instructions
644 void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
645 void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
646
647 // Exception-generating instructions and debugging support
648 void stop(Condition cond = al, int32_t code = kDefaultStopCode);
649
650 void bkpt(uint32_t imm16); // v5 and above
651 void svc(uint32_t imm24, Condition cond = al);
652
653 // Synchronization instructions.
654 // On ARMv6, an equivalent CP15 operation will be used.
655 void dmb(BarrierOption option);
656 void dsb(BarrierOption option);
657 void isb(BarrierOption option);
658
659 // Conditional speculation barrier.
660 void csdb();
661
662 // Coprocessor instructions
663
664 void cdp(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn,
665 CRegister crm, int opcode_2, Condition cond = al);
666
667 void cdp2(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn,
668 CRegister crm,
669 int opcode_2); // v5 and above
670
671 void mcr(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
672 CRegister crm, int opcode_2 = 0, Condition cond = al);
673
674 void mcr2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
675 CRegister crm,
676 int opcode_2 = 0); // v5 and above
677
678 void mrc(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
679 CRegister crm, int opcode_2 = 0, Condition cond = al);
680
681 void mrc2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
682 CRegister crm,
683 int opcode_2 = 0); // v5 and above
684
685 void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
686 LFlag l = Short, Condition cond = al);
687 void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
688 LFlag l = Short, Condition cond = al);
689
690 void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
691 LFlag l = Short); // v5 and above
692 void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
693 LFlag l = Short); // v5 and above
694
695 // Support for VFP.
696 // All these APIs support S0 to S31 and D0 to D31.
697
698 void vldr(const DwVfpRegister dst, const Register base, int offset,
699 const Condition cond = al);
700 void vldr(const DwVfpRegister dst, const MemOperand& src,
701 const Condition cond = al);
702
703 void vldr(const SwVfpRegister dst, const Register base, int offset,
704 const Condition cond = al);
705 void vldr(const SwVfpRegister dst, const MemOperand& src,
706 const Condition cond = al);
707
708 void vstr(const DwVfpRegister src, const Register base, int offset,
709 const Condition cond = al);
710 void vstr(const DwVfpRegister src, const MemOperand& dst,
711 const Condition cond = al);
712
713 void vstr(const SwVfpRegister src, const Register base, int offset,
714 const Condition cond = al);
715 void vstr(const SwVfpRegister src, const MemOperand& dst,
716 const Condition cond = al);
717
719 DwVfpRegister last, Condition cond = al);
720
722 DwVfpRegister last, Condition cond = al);
723
725 SwVfpRegister last, Condition cond = al);
726
728 SwVfpRegister last, Condition cond = al);
729
730 void vmov(const SwVfpRegister dst, Float32 imm);
731 void vmov(const DwVfpRegister dst, base::Double imm,
732 const Register extra_scratch = no_reg);
733 void vmov(const SwVfpRegister dst, const SwVfpRegister src,
734 const Condition cond = al);
735 void vmov(const DwVfpRegister dst, const DwVfpRegister src,
736 const Condition cond = al);
737 void vmov(const DwVfpRegister dst, const Register src1, const Register src2,
738 const Condition cond = al);
739 void vmov(const Register dst1, const Register dst2, const DwVfpRegister src,
740 const Condition cond = al);
741 void vmov(const SwVfpRegister dst, const Register src,
742 const Condition cond = al);
743 void vmov(const Register dst, const SwVfpRegister src,
744 const Condition cond = al);
745 void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src,
746 VFPConversionMode mode = kDefaultRoundToZero,
747 const Condition cond = al);
748 void vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src,
749 VFPConversionMode mode = kDefaultRoundToZero,
750 const Condition cond = al);
751 void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src,
752 VFPConversionMode mode = kDefaultRoundToZero,
753 const Condition cond = al);
754 void vcvt_f32_u32(const SwVfpRegister dst, const SwVfpRegister src,
755 VFPConversionMode mode = kDefaultRoundToZero,
756 const Condition cond = al);
757 void vcvt_s32_f32(const SwVfpRegister dst, const SwVfpRegister src,
758 VFPConversionMode mode = kDefaultRoundToZero,
759 const Condition cond = al);
760 void vcvt_u32_f32(const SwVfpRegister dst, const SwVfpRegister src,
761 VFPConversionMode mode = kDefaultRoundToZero,
762 const Condition cond = al);
763 void vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src,
764 VFPConversionMode mode = kDefaultRoundToZero,
765 const Condition cond = al);
766 void vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src,
767 VFPConversionMode mode = kDefaultRoundToZero,
768 const Condition cond = al);
769 void vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src,
770 VFPConversionMode mode = kDefaultRoundToZero,
771 const Condition cond = al);
772 void vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src,
773 VFPConversionMode mode = kDefaultRoundToZero,
774 const Condition cond = al);
775 void vcvt_f64_s32(const DwVfpRegister dst, int fraction_bits,
776 const Condition cond = al);
777
778 void vmrs(const Register dst, const Condition cond = al);
779 void vmsr(const Register dst, const Condition cond = al);
780
781 void vneg(const DwVfpRegister dst, const DwVfpRegister src,
782 const Condition cond = al);
783 void vneg(const SwVfpRegister dst, const SwVfpRegister src,
784 const Condition cond = al);
785 void vabs(const DwVfpRegister dst, const DwVfpRegister src,
786 const Condition cond = al);
787 void vabs(const SwVfpRegister dst, const SwVfpRegister src,
788 const Condition cond = al);
789 void vadd(const DwVfpRegister dst, const DwVfpRegister src1,
790 const DwVfpRegister src2, const Condition cond = al);
791 void vadd(const SwVfpRegister dst, const SwVfpRegister src1,
792 const SwVfpRegister src2, const Condition cond = al);
793 void vsub(const DwVfpRegister dst, const DwVfpRegister src1,
794 const DwVfpRegister src2, const Condition cond = al);
795 void vsub(const SwVfpRegister dst, const SwVfpRegister src1,
796 const SwVfpRegister src2, const Condition cond = al);
797 void vmul(const DwVfpRegister dst, const DwVfpRegister src1,
798 const DwVfpRegister src2, const Condition cond = al);
799 void vmul(const SwVfpRegister dst, const SwVfpRegister src1,
800 const SwVfpRegister src2, const Condition cond = al);
801 void vmla(const DwVfpRegister dst, const DwVfpRegister src1,
802 const DwVfpRegister src2, const Condition cond = al);
803 void vmla(const SwVfpRegister dst, const SwVfpRegister src1,
804 const SwVfpRegister src2, const Condition cond = al);
805 void vmls(const DwVfpRegister dst, const DwVfpRegister src1,
806 const DwVfpRegister src2, const Condition cond = al);
807 void vmls(const SwVfpRegister dst, const SwVfpRegister src1,
808 const SwVfpRegister src2, const Condition cond = al);
809 void vdiv(const DwVfpRegister dst, const DwVfpRegister src1,
810 const DwVfpRegister src2, const Condition cond = al);
811 void vdiv(const SwVfpRegister dst, const SwVfpRegister src1,
812 const SwVfpRegister src2, const Condition cond = al);
813 void vcmp(const DwVfpRegister src1, const DwVfpRegister src2,
814 const Condition cond = al);
815 void vcmp(const SwVfpRegister src1, const SwVfpRegister src2,
816 const Condition cond = al);
817 void vcmp(const DwVfpRegister src1, const double src2,
818 const Condition cond = al);
819 void vcmp(const SwVfpRegister src1, const float src2,
820 const Condition cond = al);
821
822 void vmaxnm(const DwVfpRegister dst, const DwVfpRegister src1,
823 const DwVfpRegister src2);
824 void vmaxnm(const SwVfpRegister dst, const SwVfpRegister src1,
825 const SwVfpRegister src2);
826 void vminnm(const DwVfpRegister dst, const DwVfpRegister src1,
827 const DwVfpRegister src2);
828 void vminnm(const SwVfpRegister dst, const SwVfpRegister src1,
829 const SwVfpRegister src2);
830
831 // VSEL supports cond in {eq, ne, ge, lt, gt, le, vs, vc}.
832 void vsel(const Condition cond, const DwVfpRegister dst,
833 const DwVfpRegister src1, const DwVfpRegister src2);
834 void vsel(const Condition cond, const SwVfpRegister dst,
835 const SwVfpRegister src1, const SwVfpRegister src2);
836
837 void vsqrt(const DwVfpRegister dst, const DwVfpRegister src,
838 const Condition cond = al);
839 void vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
840 const Condition cond = al);
841
842 // ARMv8 rounding instructions (Scalar).
843 void vrinta(const SwVfpRegister dst, const SwVfpRegister src);
844 void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
845 void vrintn(const SwVfpRegister dst, const SwVfpRegister src);
846 void vrintn(const DwVfpRegister dst, const DwVfpRegister src);
847 void vrintm(const SwVfpRegister dst, const SwVfpRegister src);
848 void vrintm(const DwVfpRegister dst, const DwVfpRegister src);
849 void vrintp(const SwVfpRegister dst, const SwVfpRegister src);
850 void vrintp(const DwVfpRegister dst, const DwVfpRegister src);
851 void vrintz(const SwVfpRegister dst, const SwVfpRegister src,
852 const Condition cond = al);
853 void vrintz(const DwVfpRegister dst, const DwVfpRegister src,
854 const Condition cond = al);
855
856 // Support for NEON.
857
858 // All these APIs support D0 to D31 and Q0 to Q15.
859 void vld1(NeonSize size, const NeonListOperand& dst,
860 const NeonMemOperand& src);
861 // vld1s(ingle element to one lane).
862 void vld1s(NeonSize size, const NeonListOperand& dst, uint8_t index,
863 const NeonMemOperand& src);
864 void vld1r(NeonSize size, const NeonListOperand& dst,
865 const NeonMemOperand& src);
866 void vst1(NeonSize size, const NeonListOperand& src,
867 const NeonMemOperand& dst);
868 // vst1s(single element from one lane).
869 void vst1s(NeonSize size, const NeonListOperand& src, uint8_t index,
870 const NeonMemOperand& dst);
871 // dt represents the narrower type
873 // dst_dt represents the narrower type, src_dt represents the src type.
875 QwNeonRegister src);
876
877 // Only unconditional core <-> scalar moves are currently supported.
878 void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src);
879 void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index);
880
881 void vmov(DwVfpRegister dst, uint64_t imm);
882 void vmov(QwNeonRegister dst, uint64_t imm);
884 void vdup(NeonSize size, QwNeonRegister dst, Register src);
885 void vdup(NeonSize size, QwNeonRegister dst, DwVfpRegister src, int index);
886 void vdup(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int index);
887
892
900
910 QwNeonRegister src2);
912 QwNeonRegister src2);
915 QwNeonRegister src2);
917 QwNeonRegister src2);
919 DwVfpRegister src2);
922 QwNeonRegister src2);
924 DwVfpRegister src2);
927 QwNeonRegister src2);
930 QwNeonRegister src2);
933 DwVfpRegister src2);
935 DwVfpRegister src2);
937 DwVfpRegister src2);
938
942 QwNeonRegister src2);
943
944 // ARMv8 rounding instructions (NEON).
946 const QwNeonRegister src);
948 const QwNeonRegister src);
950 const QwNeonRegister src);
952 const QwNeonRegister src);
953
954 void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
956 QwNeonRegister shift);
957 void vshr(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src, int shift);
958 void vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
959 void vsli(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift);
960 void vsri(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift);
961 void vsra(NeonDataType size, DwVfpRegister dst, DwVfpRegister src, int imm);
962
963 // vrecpe and vrsqrte only support floating point lanes.
969 QwNeonRegister src2);
972 QwNeonRegister src2);
973 void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src, int value);
976 QwNeonRegister src2);
979 QwNeonRegister src2);
980 void vclt(NeonSize size, QwNeonRegister dst, QwNeonRegister src, int value);
982 QwNeonRegister src2);
984 int bytes);
985 void vzip(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
987 void vuzp(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
992 void vtrn(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
994 void vtbl(DwVfpRegister dst, const NeonListOperand& list,
995 DwVfpRegister index);
996 void vtbx(DwVfpRegister dst, const NeonListOperand& list,
997 DwVfpRegister index);
998
1000
1001 // Pseudo instructions
1002
1003 // Different nop operations are used by the code generator to detect certain
1004 // states of the generated code.
1006 NON_MARKING_NOP = 0,
1008 // IC markers.
1012 // Helper values.
1014 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
1016
1017 void nop(int type = 0); // 0 is the default non-marking type.
1018
1019 void push(Register src, Condition cond = al) {
1020 str(src, MemOperand(sp, 4, NegPreIndex), cond);
1021 }
1022
1023 void pop(Register dst, Condition cond = al) {
1024 ldr(dst, MemOperand(sp, 4, PostIndex), cond);
1025 }
1026
1027 void pop();
1028
1029 void vpush(QwNeonRegister src, Condition cond = al) {
1030 vstm(db_w, sp, src.low(), src.high(), cond);
1031 }
1032
1033 void vpush(DwVfpRegister src, Condition cond = al) {
1034 vstm(db_w, sp, src, src, cond);
1035 }
1036
1037 void vpush(SwVfpRegister src, Condition cond = al) {
1038 vstm(db_w, sp, src, src, cond);
1039 }
1040
1041 void vpop(DwVfpRegister dst, Condition cond = al) {
1042 vldm(ia_w, sp, dst, dst, cond);
1043 }
1044
1045 // Jump unconditionally to given label.
1046 void jmp(Label* L) { b(L, al); }
1047
1048 // Check the code size generated from label to here.
1050 return pc_offset() - label->pos();
1051 }
1052
1053 // Check the number of instructions generated from label to here.
1055 return SizeOfCodeGeneratedSince(label) / kInstrSize;
1056 }
1057
1058 // Check whether an immediate fits an addressing mode 1 instruction.
1059 static bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
1060
1061 // Check whether an immediate fits an addressing mode 2 instruction.
1063
1064 // Class for scoping postponing the constant pool generation.
1066 public:
1067 explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1068 assem_->StartBlockConstPool();
1069 }
1070 ~BlockConstPoolScope() { assem_->EndBlockConstPool(); }
1071
1072 private:
1074
1076 };
1077
1078 // Unused on this architecture.
1080
1081 // Record a deoptimization reason that can be used by a log or cpu profiler.
1082 // Use --trace-deopt to enable.
1083 void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
1084 SourcePosition position, int id);
1085
1086 // Record the emission of a constant pool.
1087 //
1088 // The emission of constant pool depends on the size of the code generated and
1089 // the number of RelocInfo recorded.
1090 // The Debug mechanism needs to map code offsets between two versions of a
1091 // function, compiled with and without debugger support (see for example
1092 // Debug::PrepareForBreakPoints()).
1093 // Compiling functions with debugger support generates additional code
1094 // (DebugCodegen::GenerateSlot()). This may affect the emission of the
1095 // constant pools and cause the version of the code with debugger support to
1096 // have constant pools generated in different places.
1097 // Recording the position and size of emitted constant pools allows to
1098 // correctly compute the offset mappings between the different versions of a
1099 // function in all situations.
1100 //
1101 // The parameter indicates the size of the constant pool (in bytes), including
1102 // the marker and branch over the data.
1103 void RecordConstPool(int size);
1104
1105 // Writes a single byte or word of data in the code stream. Used
1106 // for inline tables, e.g., jump-tables. CheckConstantPool() should be
1107 // called before any use of db/dd/dq/dp to ensure that constant pools
1108 // are not emitted as part of the tables generated.
1109 void db(uint8_t data);
1110 void dd(uint32_t data);
1111 void dq(uint64_t data);
1112 void dp(uintptr_t data) { dd(data); }
1113
1114 // Read/patch instructions
1116 return *reinterpret_cast<Instr*>(buffer_start_ + pos);
1117 }
1119 *reinterpret_cast<Instr*>(buffer_start_ + pos) = instr;
1120 }
1121 static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
1122 static void instr_at_put(Address pc, Instr instr) {
1123 *reinterpret_cast<Instr*>(pc) = instr;
1124 }
1139 static bool IsPush(Instr instr);
1140 static bool IsPop(Instr instr);
1148 static bool IsBlxReg(Instr instr);
1149 static bool IsBlxIp(Instr instr);
1155 static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
1156 static bool IsMovImmed(Instr instr);
1157 static bool IsOrrImmed(Instr instr);
1158 static bool IsMovT(Instr instr);
1160 static bool IsMovW(Instr instr);
1162 static Instr EncodeMovwImmediate(uint32_t immediate);
1163 static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate);
1165 static Instr PatchShiftImm(Instr instr, int immed);
1166
1167 // Constants are accessed via pc relative addressing, which can reach −4095 to
1168 // 4095 for integer PC-relative loads, and −1020 to 1020 for floating-point
1169 // PC-relative loads, thereby defining a maximum distance between the
1170 // instruction and the accessed constant. Additionally, PC-relative loads
1171 // start at a delta from the actual load instruction's PC, so we can add this
1172 // on to the (positive) distance.
1173 static constexpr int kMaxDistToPcRelativeConstant =
1174 4095 + Instruction::kPcLoadDelta;
1175 // The constant pool needs to be jumped over, and has a marker, so the actual
1176 // distance from the instruction and start of the constant pool has to include
1177 // space for these two instructions.
1178 static constexpr int kMaxDistToIntPool =
1179 kMaxDistToPcRelativeConstant - 2 * kInstrSize;
1180 // Experimentally derived as sufficient for ~95% of compiles.
1181 static constexpr int kTypicalNumPending32Constants = 32;
1182 // The maximum number of pending constants is reached by a sequence of only
1183 // constant loads, which limits it to the number of constant loads that can
1184 // fit between the first constant load and the distance to the constant pool.
1185 static constexpr int kMaxNumPending32Constants =
1186 kMaxDistToIntPool / kInstrSize;
1187
1188 // Postpone the generation of the constant pool for the specified number of
1189 // instructions.
1190 void BlockConstPoolFor(int instructions);
1191
1192 // Check if is time to emit a constant pool.
1193 void CheckConstPool(bool force_emit, bool require_jump);
1194
1196 if (V8_UNLIKELY(pc_offset() >= constant_pool_deadline_)) {
1197 CheckConstPool(false, true);
1198 }
1199 }
1200
1201 // Move a 32-bit immediate into a register, potentially via the constant pool.
1202 void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
1203
1204 // Get the code target object for a pc-relative call or jump.
1205 V8_INLINE Handle<Code> relative_code_target_object_handle_at(
1206 Address pc_) const;
1207
1208 protected:
1209 int buffer_space() const { return reloc_info_writer.pos() - pc_; }
1210
1211 // Decode branch instruction at pos and return branch target pos
1212 int target_at(int pos);
1213
1214 // Patch branch instruction at pos to branch to given branch target pos
1215 void target_at_put(int pos, int target_pos);
1216
1217 // Prevent contant pool emission until EndBlockConstPool is called.
1218 // Calls to this function can be nested but must be followed by an equal
1219 // number of call to EndBlockConstpool.
1221 if (const_pool_blocked_nesting_++ == 0) {
1222 // Prevent constant pool checks happening by resetting the deadline.
1223 constant_pool_deadline_ = kMaxInt;
1224 }
1225 }
1226
1227 // Resume constant pool emission. Needs to be called as many times as
1228 // StartBlockConstPool to have an effect.
1230 if (--const_pool_blocked_nesting_ == 0) {
1231 if (first_const_pool_32_use_ >= 0) {
1232#ifdef DEBUG
1233 // Check the constant pool hasn't been blocked for too long.
1234 DCHECK_LE(pc_offset(), first_const_pool_32_use_ + kMaxDistToIntPool);
1235#endif
1236 // Reset the constant pool check back to the deadline.
1237 constant_pool_deadline_ = first_const_pool_32_use_ + kCheckPoolDeadline;
1238 }
1239 }
1240 }
1241
1243 return (const_pool_blocked_nesting_ > 0) ||
1244 (pc_offset() < no_const_pool_before_);
1245 }
1246
1248 bool result = !pending_32_bit_constants_.empty();
1249 DCHECK_EQ(result, first_const_pool_32_use_ != -1);
1250 return result;
1251 }
1252
1254 DCHECK(reg.is_valid());
1255 return IsEnabled(VFP32DREGS) ||
1256 (reg.code() < LowDwVfpRegister::kNumRegisters);
1257 }
1258
1260 DCHECK(reg.is_valid());
1261 return IsEnabled(VFP32DREGS) ||
1262 (reg.code() < LowDwVfpRegister::kNumRegisters / 2);
1263 }
1264
1265 inline void emit(Instr x);
1266
1267 // InstructionStream generation
1268 // The relocation writer's position is at least kGap bytes below the end of
1269 // the generated instructions. This is so that multi-instruction sequences do
1270 // not have to check for overflow. The same is true for writes of large
1271 // relocation info entries.
1272 static constexpr int kGap = 32;
1273 static_assert(AssemblerBase::kMinimalBufferSize >= 2 * kGap);
1274
1275 // Relocation info generation
1276 // Each relocation is encoded as a variable size value
1277 static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
1279
1280 // ConstantPoolEntry records are used during code generation as temporary
1281 // containers for constants and code target addresses until they are emitted
1282 // to the constant pool. These records are temporarily stored in a separate
1283 // buffer until a constant pool is emitted.
1284 // If every instruction in a long sequence is accessing the pool, we need one
1285 // pending relocation entry per instruction.
1286
1287 // The buffers of pending constant pool entries.
1290
1291 // Scratch registers available for use by the Assembler.
1294
1295 private:
1296 // Avoid overflows for displacements etc.
1297 static const int kMaximalBufferSize = 512 * MB;
1298
1299 // Constant pool generation
1300 // Pools are emitted in the instruction stream, preferably after unconditional
1301 // jumps or after returns from functions (in dead code locations).
1302 // If a long code sequence does not contain unconditional jumps, it is
1303 // necessary to emit the constant pool before the pool gets too far from the
1304 // location it is accessed from. In this case, we emit a jump over the emitted
1305 // constant pool.
1306 // Constants in the pool may be addresses of functions that gets relocated;
1307 // if so, a relocation info entry is associated to the constant pool entry.
1308
1309 // Repeated checking whether the constant pool should be emitted is rather
1310 // expensive. Instead, we check once a deadline is hit; the deadline being
1311 // when there is a possibility that MaybeCheckConstPool won't be called before
1312 // kMaxDistToIntPoolWithHeader is exceeded. Since MaybeCheckConstPool is
1313 // called in CheckBuffer, this means that kGap is an upper bound on this
1314 // check. Use 2 * kGap just to give it some slack around BlockConstPoolScopes.
1315 static constexpr int kCheckPoolDeadline = kMaxDistToIntPool - 2 * kGap;
1316
1317 // pc offset of the upcoming constant pool deadline. Equivalent to
1318 // first_const_pool_32_use_ + kCheckPoolDeadline.
1320
1321 // Emission of the constant pool may be blocked in some code sequences.
1322 int const_pool_blocked_nesting_; // Block emission if this is not zero.
1323 int no_const_pool_before_; // Block emission before this pc offset.
1324
1325 // Keep track of the first instruction requiring a constant pool entry
1326 // since the previous constant pool was emitted.
1328
1329 // The bound position, before this we cannot do instruction elimination.
1331
1332 V8_INLINE void CheckBuffer();
1333 void GrowBuffer();
1334
1335 // Instruction generation
1337 // Attempt to encode operand |x| for instruction |instr| and return true on
1338 // success. The result will be encoded in |instr| directly. This method may
1339 // change the opcode if deemed beneficial, for instance, MOV may be turned
1340 // into MVN, ADD into SUB, AND into BIC, ...etc. The only reason this method
1341 // may fail is that the operand is an immediate that cannot be encoded.
1343
1348
1349 // Labels
1350 void print(const Label* L);
1351 void bind_to(Label* L, int pos);
1352 void next(Label* L);
1353
1354 // Record reloc info for current pc_
1355 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
1357 intptr_t value);
1358 void AllocateAndInstallRequestedHeapNumbers(LocalIsolate* isolate);
1359
1360 int WriteCodeComments();
1361
1362 friend class RelocInfo;
1364 friend class EnsureSpace;
1366};
1367
1368class EnsureSpace {
1369 public:
1370 V8_INLINE explicit EnsureSpace(Assembler* assembler);
1371};
1372
1373class PatchingAssembler : public Assembler {
1374 public:
1375 PatchingAssembler(const AssemblerOptions& options, uint8_t* address,
1376 int instructions);
1378
1379 void Emit(Address addr);
1381};
1382
1383// This scope utility allows scratch registers to be managed safely. The
1384// Assembler's GetScratchRegisterList() is used as a pool of scratch
1385// registers. These registers can be allocated on demand, and will be returned
1386// at the end of the scope.
1387//
1388// When the scope ends, the Assembler's list will be restored to its original
1389// state, even if the list is modified by some other means. Note that this scope
1390// can be nested but the destructors need to run in the opposite order as the
1391// constructors. We do not have assertions for this.
1393 public:
1395 : assembler_(assembler),
1396 old_available_(*assembler->GetScratchRegisterList()),
1397 old_available_vfp_(*assembler->GetScratchVfpRegisterList()) {}
1398
1400 *assembler_->GetScratchRegisterList() = old_available_;
1401 *assembler_->GetScratchVfpRegisterList() = old_available_vfp_;
1402 }
1403
1404 // Take a register from the list and return it.
1406 return assembler_->GetScratchRegisterList()->PopFirst();
1407 }
1408 SwVfpRegister AcquireS() { return AcquireVfp<SwVfpRegister>(); }
1409 LowDwVfpRegister AcquireLowD() { return AcquireVfp<LowDwVfpRegister>(); }
1411 DwVfpRegister reg = AcquireVfp<DwVfpRegister>();
1412 DCHECK(assembler_->VfpRegisterIsAvailable(reg));
1413 return reg;
1414 }
1416 QwNeonRegister reg = AcquireVfp<QwNeonRegister>();
1417 DCHECK(assembler_->VfpRegisterIsAvailable(reg));
1418 return reg;
1419 }
1420
1421 // Check if we have registers available to acquire.
1422 bool CanAcquire() const {
1423 return !assembler_->GetScratchRegisterList()->is_empty();
1424 }
1425 bool CanAcquireS() const { return CanAcquireVfp<SwVfpRegister>(); }
1426 bool CanAcquireD() const { return CanAcquireVfp<DwVfpRegister>(); }
1427 bool CanAcquireQ() const { return CanAcquireVfp<QwNeonRegister>(); }
1428
1429 RegList Available() { return *assembler_->GetScratchRegisterList(); }
1430 void SetAvailable(RegList available) {
1431 *assembler_->GetScratchRegisterList() = available;
1432 }
1433
1434 VfpRegList AvailableVfp() { return *assembler_->GetScratchVfpRegisterList(); }
1435 void SetAvailableVfp(VfpRegList available) {
1436 *assembler_->GetScratchVfpRegisterList() = available;
1437 }
1438
1439 void Include(const Register& reg1, const Register& reg2 = no_reg) {
1440 RegList* available = assembler_->GetScratchRegisterList();
1441 DCHECK_NOT_NULL(available);
1442 DCHECK(!available->has(reg1));
1443 DCHECK(!available->has(reg2));
1444 available->set(reg1);
1445 available->set(reg2);
1446 }
1447 void Include(RegList list) {
1448 RegList* available = assembler_->GetScratchRegisterList();
1449 DCHECK_NOT_NULL(available);
1450 *available = *available | list;
1451 }
1452 void Include(VfpRegList list) {
1453 VfpRegList* available = assembler_->GetScratchVfpRegisterList();
1454 DCHECK_NOT_NULL(available);
1455 DCHECK_EQ((*available & list), 0x0);
1456 *available = *available | list;
1457 }
1458 void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
1459 RegList* available = assembler_->GetScratchRegisterList();
1460 DCHECK_NOT_NULL(available);
1461 DCHECK(available->has(reg1));
1462 DCHECK_IMPLIES(reg2.is_valid(), available->has(reg2));
1463 available->clear(RegList{reg1, reg2});
1464 }
1465 void Exclude(VfpRegList list) {
1466 VfpRegList* available = assembler_->GetScratchVfpRegisterList();
1467 DCHECK_NOT_NULL(available);
1468 DCHECK_EQ((*available | list), *available);
1469 *available = *available & ~list;
1470 }
1471
1472 private:
1473 friend class Assembler;
1474 friend class MacroAssembler;
1475
1476 template <typename T>
1477 bool CanAcquireVfp() const;
1478
1479 template <typename T>
1480 T AcquireVfp();
1481
1483 // Available scratch registers at the start of this scope.
1486};
1487
1488// Helper struct for load lane and store lane to indicate which opcode to use
1489// and what memory size to be encoded in the opcode, and the new lane index.
1490class LoadStoreLaneParams {
1491 public:
1494 uint8_t laneidx;
1495 // The register mapping on ARM (1 Q to 2 D), means that loading/storing high
1496 // lanes of a Q register is equivalent to loading/storing the high D reg,
1497 // modulo number of lanes in a D reg. This constructor decides, based on the
1498 // laneidx and load/store size, whether the low or high D reg is accessed, and
1499 // what the new lane index is.
1501
1502 private:
1504 : low_op(laneidx < lanes), sz(sz), laneidx(laneidx % lanes) {}
1505};
1506
1507} // namespace internal
1508} // namespace v8
1509
1510#endif // V8_CODEGEN_ARM_ASSEMBLER_ARM_H_
SourcePosition pos
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope)
void vcmp(const DwVfpRegister src1, const double src2, const Condition cond=al)
void AddrMode1(Instr instr, Register rd, Register rn, const Operand &x)
void vsub(NeonSize size, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index)
void vdup(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int index)
void uxtb(Register dst, Register src, int rotate=0, Condition cond=al)
void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift)
void vmin(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void rsb(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vceq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vstr(const SwVfpRegister src, const Register base, int offset, const Condition cond=al)
void GetCode(LocalIsolate *isolate, CodeDesc *desc)
RelocInfoWriter reloc_info_writer
void pkhtb(Register dst, Register src1, const Operand &src2, Condition cond=al)
void vrintp(const DwVfpRegister dst, const DwVfpRegister src)
void cmp(Register src1, Register src2, Condition cond=al)
void vmrs(const Register dst, const Condition cond=al)
void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src)
void umlal(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void Move32BitImmediate(Register rd, const Operand &x, Condition cond=al)
void ldrd(Register dst1, Register dst2, const MemOperand &src, Condition cond=al)
void b(Label *L, Condition cond=al)
bool VfpRegisterIsAvailable(QwNeonRegister reg)
void vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
static bool IsPush(Instr instr)
void vmov(QwNeonRegister dst, QwNeonRegister src)
void vcvt_f32_s32(QwNeonRegister dst, QwNeonRegister src)
void usat(Register dst, int satpos, const Operand &src, Condition cond=al)
static bool IsLdrRegFpOffset(Instr instr)
void vswp(QwNeonRegister dst, QwNeonRegister src)
void vshr(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src, int shift)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vldr(const SwVfpRegister dst, const MemOperand &src, const Condition cond=al)
void vneg(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void vmov(DwVfpRegister dst, uint64_t imm)
void vrecps(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
static bool IsCmpRegister(Instr instr)
void vldm(BlockAddrMode am, Register base, DwVfpRegister first, DwVfpRegister last, Condition cond=al)
void AddrMode5(Instr instr, CRegister crd, const MemOperand &x)
void RecordConstPool(int size)
bool has_pending_constants() const
static int GetCmpImmediateRawImmediate(Instr instr)
void vuzp(NeonSize size, QwNeonRegister src1, QwNeonRegister src2)
void vzip(NeonSize size, DwVfpRegister src1, DwVfpRegister src2)
VfpRegList scratch_vfp_register_list_
void bfi(Register dst, Register src, int lsb, int width, Condition cond=al)
void vadd(const SwVfpRegister dst, const SwVfpRegister src1, const SwVfpRegister src2, const Condition cond=al)
void mov_label_offset(Register dst, Label *label)
void cdp(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn, CRegister crm, int opcode_2, Condition cond=al)
void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond=al)
void ldc2(Coprocessor coproc, CRegister crd, const MemOperand &src, LFlag l=Short)
void strex(Register src1, Register src2, Register dst, Condition cond=al)
void mls(Register dst, Register src1, Register src2, Register srcA, Condition cond=al)
void vcge(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void smull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void mcr(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0, Condition cond=al)
void vrinta(const SwVfpRegister dst, const SwVfpRegister src)
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset)
void vrsqrts(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
Assembler(const MaybeAssemblerZone &, const AssemblerOptions &options, std::unique_ptr< AssemblerBuffer > buffer={})
int branch_offset(Label *L)
static bool IsPop(Instr instr)
void mrc(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0, Condition cond=al)
void sbc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static VfpRegList DefaultFPTmpList()
static bool IsStrRegisterImmediate(Instr instr)
void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vmaxnm(const SwVfpRegister dst, const SwVfpRegister src1, const SwVfpRegister src2)
void stm(BlockAddrMode am, Register base, RegList src, Condition cond=al)
void vmls(const SwVfpRegister dst, const SwVfpRegister src1, const SwVfpRegister src2, const Condition cond=al)
void vmsr(const Register dst, const Condition cond=al)
void vdiv(const SwVfpRegister dst, const SwVfpRegister src1, const SwVfpRegister src2, const Condition cond=al)
void vrintz(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void vcvt_f32_u32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void sxtb(Register dst, Register src, int rotate=0, Condition cond=al)
void smmla(Register dst, Register src1, Register src2, Register srcA, Condition cond=al)
void vpadd(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2)
static bool IsCmpImmediate(Instr instr)
void vstr(const SwVfpRegister src, const MemOperand &dst, const Condition cond=al)
void ConstantPoolAddEntry(int position, RelocInfo::Mode rmode, intptr_t value)
void vmul(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void adc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vminnm(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2)
void push(Register src, Condition cond=al)
void vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src)
static bool IsTstImmediate(Instr instr)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
void vrintm(const DwVfpRegister dst, const DwVfpRegister src)
int InstructionsGeneratedSince(Label *label)
bool ImmediateFitsAddrMode2Instruction(int32_t imm32)
void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src, int value)
void bl(Condition cond, Label *L)
static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset)
void vldr(const DwVfpRegister dst, const MemOperand &src, const Condition cond=al)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void CheckConstPool(bool force_emit, bool require_jump)
void vmin(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vdup(NeonSize size, QwNeonRegister dst, DwVfpRegister src, int index)
void uxtab(Register dst, Register src1, Register src2, int rotate=0, Condition cond=al)
void instr_at_put(int pos, Instr instr)
void teq(Register src1, const Operand &src2, Condition cond=al)
void sub(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void vmvn(QwNeonRegister dst, QwNeonRegister src)
void vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vldm(BlockAddrMode am, Register base, SwVfpRegister first, SwVfpRegister last, Condition cond=al)
void bl(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void vmov(const SwVfpRegister dst, Float32 imm)
void vsqrt(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void vld1r(NeonSize size, const NeonListOperand &dst, const NeonMemOperand &src)
void msr(SRegisterFieldMask fields, const Operand &src, Condition cond=al)
void vrintz(NeonDataType dt, const QwNeonRegister dst, const QwNeonRegister src)
void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void vneg(QwNeonRegister dst, QwNeonRegister src)
void vpadal(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src)
void vmul(NeonSize size, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vmls(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
static Instr SetAddRegisterImmediateOffset(Instr instr, int offset)
void bl(Label *L, Condition cond=al)
void cmp(Register src1, const Operand &src2, Condition cond=al)
void vcvt_f32_u32(QwNeonRegister dst, QwNeonRegister src)
void ldrexb(Register dst, Register src, Condition cond=al)
void BlockConstPoolFor(int instructions)
void rsc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vmov(const DwVfpRegister dst, base::Double imm, const Register extra_scratch=no_reg)
void mov(Register dst, Register src, SBit s=LeaveCC, Condition cond=al)
void sxth(Register dst, Register src, int rotate=0, Condition cond=al)
void vsli(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift)
bool VfpRegisterIsAvailable(DwVfpRegister reg)
void vqmovn(NeonDataType dst_dt, NeonDataType src_dt, DwVfpRegister dst, QwNeonRegister src)
void vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift)
void lsr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vmla(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void eor(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void vtbl(DwVfpRegister dst, const NeonListOperand &list, DwVfpRegister index)
void vpush(DwVfpRegister src, Condition cond=al)
void sxtah(Register dst, Register src1, Register src2, int rotate=0, Condition cond=al)
void vpmin(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2)
void ldr_pcrel(Register dst, int imm12, Condition cond=al)
void vrecpe(QwNeonRegister dst, QwNeonRegister src)
VfpRegList * GetScratchVfpRegisterList()
static bool IsMovImmed(Instr instr)
void vzip(NeonSize size, QwNeonRegister src1, QwNeonRegister src2)
static Instr SetStrRegisterImmediateOffset(Instr instr, int offset)
static bool IsVldrDPcImmediateOffset(Instr instr)
static Register GetRn(Instr instr)
void blx(Register target, Condition cond=al)
static bool IsStrRegFpOffset(Instr instr)
void vabs(QwNeonRegister dst, QwNeonRegister src)
void vsra(NeonDataType size, DwVfpRegister dst, DwVfpRegister src, int imm)
void ldrsh(Register dst, const MemOperand &src, Condition cond=al)
void vcvt_f64_s32(const DwVfpRegister dst, int fraction_bits, const Condition cond=al)
void vneg(const SwVfpRegister dst, const SwVfpRegister src, const Condition cond=al)
void vpaddl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src)
RegList * GetScratchRegisterList()
void AddrMode3(Instr instr, Register rd, const MemOperand &x)
void vsel(const Condition cond, const SwVfpRegister dst, const SwVfpRegister src1, const SwVfpRegister src2)
V8_INLINE void MaybeCheckConstPool()
void ldrsb(Register dst, const MemOperand &src, Condition cond=al)
void vsqrt(const SwVfpRegister dst, const SwVfpRegister src, const Condition cond=al)
void smlal(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void pop(Register dst, Condition cond=al)
void vpop(DwVfpRegister dst, Condition cond=al)
void vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
static Instr EncodeMovwImmediate(uint32_t immediate)
void str(Register src, const MemOperand &dst, Condition cond=al)
void vstm(BlockAddrMode am, Register base, DwVfpRegister first, DwVfpRegister last, Condition cond=al)
void ldrexd(Register dst1, Register dst2, Register src, Condition cond=al)
void and_(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void b(Condition cond, Label *L)
void ldrh(Register dst, const MemOperand &src, Condition cond=al)
void rbit(Register dst, Register src, Condition cond=al)
void vmlal(NeonDataType size, QwNeonRegister dst, DwVfpRegister src1, DwVfpRegister src2)
void vcge(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vmul(const SwVfpRegister dst, const SwVfpRegister src1, const SwVfpRegister src2, const Condition cond=al)
void strd(Register src1, Register src2, const MemOperand &dst, Condition cond=al)
void clz(Register dst, Register src, Condition cond=al)
static Register GetRm(Instr instr)
void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vadd(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void mrc2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0)
static Instr GetMovWPattern()
void vrev16(NeonSize size, QwNeonRegister dst, QwNeonRegister src)
void vmla(const SwVfpRegister dst, const SwVfpRegister src1, const SwVfpRegister src2, const Condition cond=al)
void ldc2(Coprocessor coproc, CRegister crd, Register base, int option, LFlag l=Short)
void vcvt_u32_f32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void dmb(BarrierOption option)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vmov(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
static Instr PatchShiftImm(Instr instr, int immed)
void movw(Register reg, uint32_t immediate, Condition cond=al)
void vrintp(NeonDataType dt, const QwNeonRegister dst, const QwNeonRegister src)
void vrinta(const DwVfpRegister dst, const DwVfpRegister src)
void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vrintn(const SwVfpRegister dst, const SwVfpRegister src)
void udiv(Register dst, Register src1, Register src2, Condition cond=al)
void vrhadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void uxtb16(Register dst, Register src, int rotate=0, Condition cond=al)
void vclt(NeonSize size, QwNeonRegister dst, QwNeonRegister src, int value)
void vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
static Register GetRd(Instr instr)
void vminnm(const SwVfpRegister dst, const SwVfpRegister src1, const SwVfpRegister src2)
void vmov(const DwVfpRegister dst, const Register src1, const Register src2, const Condition cond=al)
void vorn(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void AddrMode2(Instr instr, Register rd, const MemOperand &x)
void vtrn(NeonSize size, QwNeonRegister src1, QwNeonRegister src2)
void vcgt(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vld1(NeonSize size, const NeonListOperand &dst, const NeonMemOperand &src)
static bool IsMovW(Instr instr)
void pkhbt(Register dst, Register src1, const Operand &src2, Condition cond=al)
void vsri(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift)
void vrintz(const SwVfpRegister dst, const SwVfpRegister src, const Condition cond=al)
static void instr_at_put(Address pc, Instr instr)
void vstr(const DwVfpRegister src, const MemOperand &dst, const Condition cond=al)
static Register GetCmpImmediateRegister(Instr instr)
void strb(Register src, const MemOperand &dst, Condition cond=al)
bool AddrMode1TryEncodeOperand(Instr *instr, const Operand &x)
void blx(int branch_offset)
static bool IsMovT(Instr instr)
void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2)
void lsl(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vqrdmulh(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void ldrb(Register dst, const MemOperand &src, Condition cond=al)
void vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
bool is_const_pool_blocked() const
void strexd(Register res, Register src1, Register src2, Register dst, Condition cond=al)
void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2)
void mla(Register dst, Register src1, Register src2, Register srcA, SBit s=LeaveCC, Condition cond=al)
void eor(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static bool IsVldrDRegisterImmediate(Instr instr)
void uxth(Register dst, Register src, int rotate=0, Condition cond=al)
void vabs(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void mvn(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
void vswp(DwVfpRegister dst, DwVfpRegister src)
void vdiv(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void svc(uint32_t imm24, Condition cond=al)
void isb(BarrierOption option)
void vrintm(NeonDataType dt, const QwNeonRegister dst, const QwNeonRegister src)
void strh(Register src, const MemOperand &dst, Condition cond=al)
void ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void vst1s(NeonSize size, const NeonListOperand &src, uint8_t index, const NeonMemOperand &dst)
void vstm(BlockAddrMode am, Register base, SwVfpRegister first, SwVfpRegister last, Condition cond=al)
void vrintp(const SwVfpRegister dst, const SwVfpRegister src)
void dsb(BarrierOption option)
void mrs(Register dst, SRegister s, Condition cond=al)
void rev(Register dst, Register src, Condition cond=al)
void ldc(Coprocessor coproc, CRegister crd, const MemOperand &src, LFlag l=Short, Condition cond=al)
void vcmp(const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void orr(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsOrrImmed(Instr instr)
static Instr instr_at(Address pc)
static int GetVldrDRegisterImmediateOffset(Instr instr)
void vsub(const SwVfpRegister dst, const SwVfpRegister src1, const SwVfpRegister src2, const Condition cond=al)
void vrintm(const SwVfpRegister dst, const SwVfpRegister src)
void vsel(const Condition cond, const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2)
void target_at_put(int pos, int target_pos)
void uxtah(Register dst, Register src1, Register src2, int rotate=0, Condition cond=al)
void sdiv(Register dst, Register src1, Register src2, Condition cond=al)
void vabs(const SwVfpRegister dst, const SwVfpRegister src, const Condition cond=al)
void vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
static RegList DefaultTmpList()
static int DecodeShiftImm(Instr instr)
void umull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void vqadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vdup(NeonSize size, QwNeonRegister dst, Register src)
void vcvt_s32_f32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void strexh(Register src1, Register src2, Register dst, Condition cond=al)
void vld1s(NeonSize size, const NeonListOperand &dst, uint8_t index, const NeonMemOperand &src)
void vpadd(NeonSize size, DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2)
void vabs(NeonSize size, QwNeonRegister dst, QwNeonRegister src)
void vrsqrte(QwNeonRegister dst, QwNeonRegister src)
void movt(Register reg, uint32_t immediate, Condition cond=al)
void orr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond=al)
void vmov(QwNeonRegister dst, uint64_t imm)
void vmov(const Register dst, const SwVfpRegister src, const Condition cond=al)
void smmul(Register dst, Register src1, Register src2, Condition cond=al)
void AddrMode4(Instr instr, Register rn, RegList rl)
static bool IsLdrPcImmediateOffset(Instr instr)
void vstr(const DwVfpRegister src, const Register base, int offset, const Condition cond=al)
void vmov(const SwVfpRegister dst, const Register src, const Condition cond=al)
void vsub(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src)
void dp(uintptr_t data)
void tst(Register src1, const Operand &src2, Condition cond=al)
void vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void vpush(SwVfpRegister src, Condition cond=al)
void pld(const MemOperand &address)
static bool ImmediateFitsAddrMode1Instruction(int32_t imm32)
void vext(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2, int bytes)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
void bic(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, QwNeonRegister shift)
void vrev32(NeonSize size, QwNeonRegister dst, QwNeonRegister src)
void vmul(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
static Condition GetCondition(Instr instr)
static bool IsBlxIp(Instr instr)
void mcr2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0)
void tst(Register src1, Register src2, Condition cond=al)
void vcnt(QwNeonRegister dst, QwNeonRegister src)
void nop(int type=0)
static bool IsBlxReg(Instr instr)
void vtrn(NeonSize size, DwVfpRegister src1, DwVfpRegister src2)
void vmov(const Register dst1, const Register dst2, const DwVfpRegister src, const Condition cond=al)
void ldrexh(Register dst, Register src, Condition cond=al)
void vadd(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void bkpt(uint32_t imm16)
void vbic(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void cdp2(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn, CRegister crm, int opcode_2)
static bool IsBOrBlPcImmediateOffset(Instr instr)
void vtbx(DwVfpRegister dst, const NeonListOperand &list, DwVfpRegister index)
void vcmp(const SwVfpRegister src1, const float src2, const Condition cond=al)
void vmov(const SwVfpRegister dst, const SwVfpRegister src, const Condition cond=al)
void ldrex(Register dst, Register src, Condition cond=al)
base::SmallVector< ConstantPoolEntry, kTypicalNumPending32Constants > pending_32_bit_constants_
void bx(Register target, Condition cond=al)
void vst1(NeonSize size, const NeonListOperand &src, const NeonMemOperand &dst)
static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate)
static bool IsAddRegisterImmediate(Instr instr)
void strexb(Register src1, Register src2, Register dst, Condition cond=al)
void vcgt(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vmaxnm(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2)
void vcmp(const SwVfpRegister src1, const SwVfpRegister src2, const Condition cond=al)
void vrintn(const DwVfpRegister dst, const DwVfpRegister src)
void ldc(Coprocessor coproc, CRegister crd, Register base, int option, LFlag l=Short, Condition cond=al)
void vneg(NeonSize size, QwNeonRegister dst, QwNeonRegister src)
void sxtab(Register dst, Register src1, Register src2, int rotate=0, Condition cond=al)
static bool IsStrRegFpNegOffset(Instr instr)
static bool IsLdrRegisterImmediate(Instr instr)
void AbortedCodeGeneration() override
void vsub(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
static int GetLdrRegisterImmediateOffset(Instr instr)
int SizeOfCodeGeneratedSince(Label *label)
void vldr(const DwVfpRegister dst, const Register base, int offset, const Condition cond=al)
void vuzp(NeonSize size, DwVfpRegister src1, DwVfpRegister src2)
void vmull(NeonDataType size, QwNeonRegister dst, DwVfpRegister src1, DwVfpRegister src2)
void cmn(Register src1, const Operand &src2, Condition cond=al)
static bool IsLdrRegFpNegOffset(Instr instr)
static Instr GetMovTPattern()
void vpush(QwNeonRegister src, Condition cond=al)
void bfc(Register dst, int lsb, int width, Condition cond=al)
void add(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vcvt_s32_f32(QwNeonRegister dst, QwNeonRegister src)
void vrintn(NeonDataType dt, const QwNeonRegister dst, const QwNeonRegister src)
void asr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vrev64(NeonSize size, QwNeonRegister dst, QwNeonRegister src)
void vldr(const SwVfpRegister dst, const Register base, int offset, const Condition cond=al)
void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
V8_INLINE EnsureSpace(Assembler *assembler)
LoadStoreLaneParams(MachineRepresentation rep, uint8_t laneidx)
LoadStoreLaneParams(uint8_t laneidx, NeonSize sz, int lanes)
MemOperand(Register rn, Register rm, AddrMode am=Offset)
MemOperand(Register rn, int32_t offset=0, AddrMode am=Offset)
void set_offset(int32_t offset)
bool OffsetIsUint12Encodable() const
MemOperand(Register rn, Register rm, ShiftOp shift_op, int shift_imm, AddrMode am=Offset)
static V8_INLINE MemOperand PointerAddressFromSmiKey(Register array, Register key, AddrMode am=Offset)
DoubleRegister base() const
NeonListOperand(DoubleRegister base, int register_count=1)
NeonListOperand(QwNeonRegister q_reg)
NeonListType type() const
void SetAlignment(int align)
NeonMemOperand(Register rn, Register rm, int align=0)
NeonMemOperand(Register rn, AddrMode am=Offset, int align=0)
int InstructionsRequired(const Assembler *assembler, Instr instr=0) const
Operand(Register rm, ShiftOp shift_op, int shift_imm)
static V8_INLINE Operand SmiUntag(Register rm)
Register rs() const
Operand(Register rm, ShiftOp shift_op, Register rs)
bool MustOutputRelocInfo(const Assembler *assembler) const
static V8_INLINE Operand PointerOffsetFromSmiKey(Register key)
Operand(Handle< HeapObject > handle)
HeapNumberRequest heap_number_request() const
RelocInfo::Mode rmode_
static V8_INLINE Operand DoubleOffsetFromSmiKey(Register key)
ShiftOp shift_op() const
bool IsHeapNumberRequest() const
bool IsImmediateShiftedRegister() const
Register rm() const
bool IsRegisterShiftedRegister() const
V8_INLINE Operand(int32_t immediate, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
int32_t immediate() const
PatchingAssembler(const AssemblerOptions &options, uint8_t *address, int instructions)
UseScratchRegisterScope(Assembler *assembler)
void SetAvailableVfp(VfpRegList available)
void Exclude(const Register &reg1, const Register &reg2=no_reg)
void Include(const Register &reg1, const Register &reg2=no_reg)
Operand const offset_
Register const value_
Label label
BytecodeAssembler & assembler_
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
int32_t offset
Instruction * instr
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int pc_offset
int x
int position
Definition liveedit.cc:290
int m
Definition mul-fft.cc:294
STL namespace.
constexpr Register no_reg
constexpr int kPointerSizeLog2
Definition globals.h:600
constexpr BlockAddrMode ia_w
std::variant< Zone *, AccountingAllocator * > MaybeAssemblerZone
Definition assembler.h:262
constexpr AddrMode NegPreIndex
const int kSmiTagSize
Definition v8-internal.h:87
constexpr int kPointerSize
Definition globals.h:599
constexpr BlockAddrMode db_w
uint64_t VfpRegList
constexpr ShiftOp ASR
constexpr NeonListType nlt_3
constexpr ShiftOp LSL
uint32_t SRegisterFieldMask
constexpr NeonListType nlt_2
constexpr int L
constexpr BlockAddrMode db
constexpr AddrMode PostIndex
constexpr uint8_t kInstrSize
const int kSmiTag
Definition v8-internal.h:86
constexpr int kDoubleSizeLog2
Definition globals.h:421
constexpr int kMaxInt
Definition globals.h:374
constexpr NeonListType nlt_1
constexpr NeonListType nlt_4
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define V8_EXPORT_PRIVATE
Definition macros.h:460
HeapNumberRequest heap_number_request
#define V8_INLINE
Definition v8config.h:500
#define V8_UNLIKELY(condition)
Definition v8config.h:660
#define V8_NODISCARD
Definition v8config.h:693
std::unique_ptr< ValueMirror > key