v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
assembler-ia32.h
Go to the documentation of this file.
1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
33// Copyright 2011 the V8 project authors. All rights reserved.
34
35// A light-weight IA32 Assembler.
36
37#ifndef V8_CODEGEN_IA32_ASSEMBLER_IA32_H_
38#define V8_CODEGEN_IA32_ASSEMBLER_IA32_H_
39
40#include <deque>
41#include <memory>
42
48#include "src/codegen/label.h"
50#include "src/objects/smi.h"
51#include "src/utils/utils.h"
52
53namespace v8 {
54namespace internal {
55
56class SafepointTableBuilder;
57
100
101// Returns the equivalent of !cc.
103 return static_cast<Condition>(cc ^ 1);
104}
105
107 kRoundToNearest = 0x0,
109 kRoundUp = 0x2,
110 kRoundToZero = 0x3
111};
112
113// -----------------------------------------------------------------------------
114// Machine instruction Immediates
115
116class Immediate {
117 public:
118 // Calls where x is an Address (uintptr_t) resolve to this overload.
120 value_.immediate = x;
121 rmode_ = rmode;
122 }
123 inline explicit Immediate(const ExternalReference& ext)
124 : Immediate(ext.raw(), RelocInfo::EXTERNAL_REFERENCE) {}
126 : Immediate(handle.address(), RelocInfo::FULL_EMBEDDED_OBJECT) {}
127 inline explicit Immediate(Tagged<Smi> value)
128 : Immediate(static_cast<intptr_t>(value.ptr())) {}
129
130 static Immediate EmbeddedNumber(double number); // Smi or HeapNumber.
131
133
140
143 return value_.heap_number_request;
144 }
145
146 int immediate() const {
148 return value_.immediate;
149 }
150
155
157 return Handle<HeapObject>(reinterpret_cast<Address*>(immediate()));
158 }
159
162 }
163
168
169 bool is_zero() const {
170 return RelocInfo::IsNoInfo(rmode_) && immediate() == 0;
171 }
172 bool is_int8() const {
173 return RelocInfo::IsNoInfo(rmode_) && i::is_int8(immediate());
174 }
175 bool is_uint8() const {
176 return RelocInfo::IsNoInfo(rmode_) && i::is_uint8(immediate());
177 }
178 bool is_int16() const {
179 return RelocInfo::IsNoInfo(rmode_) && i::is_int16(immediate());
180 }
181
182 bool is_uint16() const {
183 return RelocInfo::IsNoInfo(rmode_) && i::is_uint16(immediate());
184 }
185
186 RelocInfo::Mode rmode() const { return rmode_; }
187
188 private:
189 inline explicit Immediate(Label* value) {
190 value_.immediate = reinterpret_cast<int32_t>(value);
192 }
193
201
202 friend class Operand;
203 friend class Assembler;
204 friend class MacroAssembler;
205};
206
207// -----------------------------------------------------------------------------
208// Machine instruction Operands
209
222
223class V8_EXPORT_PRIVATE Operand {
224 public:
225 // reg
226 V8_INLINE explicit Operand(Register reg) { set_modrm(3, reg); }
227
228 // XMM reg
229 V8_INLINE explicit Operand(XMMRegister xmm_reg) {
230 Register reg = Register::from_code(xmm_reg.code());
231 set_modrm(3, reg);
232 }
233
234 // [disp/r]
235 V8_INLINE explicit Operand(int32_t disp, RelocInfo::Mode rmode) {
236 set_modrm(0, ebp);
237 set_dispr(disp, rmode);
238 }
239
240 // [disp/r]
242 set_modrm(0, ebp);
243 set_dispr(imm.immediate(), imm.rmode_);
244 }
245
246 // [base + disp/r]
247 explicit Operand(Register base, int32_t disp,
248 RelocInfo::Mode rmode = RelocInfo::NO_INFO);
249
250 // [disp/r]
251 explicit Operand(Label* label) {
252 set_modrm(0, ebp);
253 set_dispr(reinterpret_cast<intptr_t>(label), RelocInfo::INTERNAL_REFERENCE);
254 }
255
256 // [base + index*scale + disp/r]
258 int32_t disp, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
259
260 // [index*scale + disp/r]
261 explicit Operand(Register index, ScaleFactor scale, int32_t disp,
262 RelocInfo::Mode rmode = RelocInfo::NO_INFO);
263
265 return Operand(index, scale, reinterpret_cast<int32_t>(table),
266 RelocInfo::INTERNAL_REFERENCE);
267 }
268
270 return Operand(base, imm.value_.immediate, imm.rmode_);
271 }
272
273 // Returns true if this Operand is a wrapper for the specified register.
274 bool is_reg(Register reg) const { return is_reg(reg.code()); }
275 bool is_reg(XMMRegister reg) const { return is_reg(reg.code()); }
276
277 // Returns true if this Operand is a wrapper for one register.
278 bool is_reg_only() const;
279
280 // Asserts that this Operand is a wrapper for one register and returns the
281 // register.
282 Register reg() const;
283
285 RelocInfo::Mode rmode() { return rmode_; }
286
287 private:
288 // Set the ModRM byte without an encoded 'reg' register. The
289 // register is encoded later as part of the emit_operand operation.
290 inline void set_modrm(int mod, Register rm) {
291 DCHECK_EQ(mod & -4, 0);
292 buf_[0] = mod << 6 | rm.code();
293 len_ = 1;
294 }
295
296 inline void set_sib(ScaleFactor scale, Register index, Register base);
297 inline void set_disp8(int8_t disp);
298 inline void set_dispr(int32_t disp, RelocInfo::Mode rmode) {
299 DCHECK(len_ == 1 || len_ == 2);
300 Address p = reinterpret_cast<Address>(&buf_[len_]);
301 WriteUnalignedValue(p, disp);
302 len_ += sizeof(int32_t);
303 rmode_ = rmode;
304 }
305
306 inline bool is_reg(int reg_code) const {
307 return ((buf_[0] & 0xF8) == 0xC0) // addressing mode is register only.
308 && ((buf_[0] & 0x07) == reg_code); // register codes match.
309 }
310
311 uint8_t buf_[6];
312 // The number of bytes in buf_.
313 uint8_t len_ = 0;
314 // Only valid if len_ > 4.
315 RelocInfo::Mode rmode_ = RelocInfo::NO_INFO;
316};
318static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
319 "Operand must be small enough to pass it by value");
320
322
323// -----------------------------------------------------------------------------
324// A Displacement describes the 32bit immediate field of an instruction which
325// may be used together with a Label in order to refer to a yet unknown code
326// position. Displacements stored in the instruction stream are used to describe
327// the instruction and to chain a list of instructions using the same Label.
328// A Displacement contains 2 different fields:
329//
330// next field: position of next displacement in the chain (0 = end of list)
331// type field: instruction type
332//
333// A next value of null (0) indicates the end of a chain (note that there can
334// be no displacement at position zero, because there is always at least one
335// instruction byte before the displacement).
336//
337// Displacement _data field layout
338//
339// |31.....2|1......0|
340// [ next | type |
341
343 public:
345
346 int data() const { return data_; }
347 Type type() const { return TypeField::decode(data_); }
348 void next(Label* L) const {
349 int n = NextField::decode(data_);
350 n > 0 ? L->link_to(n) : L->Unuse();
351 }
352 void link_to(Label* L) { init(L, type()); }
353
354 explicit Displacement(int data) { data_ = data; }
355
356 Displacement(Label* L, Type type) { init(L, type); }
357
358 void print() {
359 PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
361 }
362
363 private:
364 int data_;
365
367 using NextField = base::BitField<int, 2, 32 - 2>;
368
369 void init(Label* L, Type type);
370};
371
373 private:
374 // We check before assembling an instruction that there is sufficient
375 // space to write an instruction and its relocation information.
376 // The relocation writer's position must be kGap bytes above the end of
377 // the generated instructions. This leaves enough space for the
378 // longest possible ia32 instruction, 15 bytes, and the longest possible
379 // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
380 // (There is a 15 byte limit on ia32 instruction length that rules out some
381 // otherwise valid instructions.)
382 // This allows for a single, fast space check per instruction.
383 static constexpr int kGap = 32;
384 static_assert(AssemblerBase::kMinimalBufferSize >= 2 * kGap);
385
386 public:
387 // Create an assembler. Instructions and relocation information are emitted
388 // into a buffer, with the instructions starting from the beginning and the
389 // relocation information starting from the end of the buffer. See CodeDesc
390 // for a detailed comment on the layout (globals.h).
391 //
392 // If the provided buffer is nullptr, the assembler allocates and grows its
393 // own buffer. Otherwise it takes ownership of the provided buffer.
395 std::unique_ptr<AssemblerBuffer> = {});
396 // For compatibility with assemblers that require a zone.
398 std::unique_ptr<AssemblerBuffer> buffer = {})
399 : Assembler(options, std::move(buffer)) {}
400
401 // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
402 static constexpr int kNoHandlerTable = 0;
403 static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
404 void GetCode(LocalIsolate* isolate, CodeDesc* desc,
405 SafepointTableBuilder* safepoint_table_builder,
406 int handler_table_offset);
407
408 // Convenience wrapper for allocating with an Isolate.
409 void GetCode(Isolate* isolate, CodeDesc* desc);
410 // Convenience wrapper for code without safepoint or handler tables.
411 void GetCode(LocalIsolate* isolate, CodeDesc* desc) {
412 GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
413 }
414
416
417 // Unused on this architecture.
419
420 // Read/Modify the code target in the branch/call instruction at pc.
421 // The isolate argument is unused (and may be nullptr) when skipping flushing.
422 inline static Address target_address_at(Address pc, Address constant_pool);
423 inline static void set_target_address_at(
424 Address pc, Address constant_pool, Address target,
425 WritableJitAllocation* jit_allocation,
426 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
427
428 // Get the size of the special target encoded at 'instruction_payload'.
430 Address instruction_payload);
431
432 // This sets the internal reference at the pc.
434 Address pc, Address target, WritableJitAllocation& jit_allocation,
435 RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
436
437 // Read/modify the uint32 constant used at pc.
438 static inline uint32_t uint32_constant_at(Address pc, Address constant_pool);
439 static inline void set_uint32_constant_at(
440 Address pc, Address constant_pool, uint32_t new_constant,
441 WritableJitAllocation* jit_allocation,
442 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
443
444 static constexpr int kSpecialTargetSize = kSystemPointerSize;
445
446 // One byte opcode for test al, 0xXX.
447 static constexpr uint8_t kTestAlByte = 0xA8;
448 // One byte opcode for nop.
449 static constexpr uint8_t kNopByte = 0x90;
450
451 // One byte opcode for a short unconditional jump.
452 static constexpr uint8_t kJmpShortOpcode = 0xEB;
453 // One byte prefix for a short conditional jump.
454 static constexpr uint8_t kJccShortPrefix = 0x70;
455 static constexpr uint8_t kJncShortOpcode = kJccShortPrefix | not_carry;
456 static constexpr uint8_t kJcShortOpcode = kJccShortPrefix | carry;
457 static constexpr uint8_t kJnzShortOpcode = kJccShortPrefix | not_zero;
458 static constexpr uint8_t kJzShortOpcode = kJccShortPrefix | zero;
459
460 // ---------------------------------------------------------------------------
461 // InstructionStream generation
462 //
463 // - function names correspond one-to-one to ia32 instruction mnemonics
464 // - unless specified otherwise, instructions operate on 32bit operands
465 // - instructions on 8bit (byte) operands/registers have a trailing '_b'
466 // - instructions on 16bit (word) operands/registers have a trailing '_w'
467 // - naming conflicts with C++ keywords are resolved via a trailing '_'
468
469 // NOTE ON INTERFACE: Currently, the interface is not very consistent
470 // in the sense that some operations (e.g. mov()) can be called in more
471 // the one way to generate the same instruction: The Register argument
472 // can in some cases be replaced with an Operand(Register) argument.
473 // This should be cleaned up and made more orthogonal. The questions
474 // is: should we always use Operands instead of Registers where an
475 // Operand is possible, or should we have a Register (overloaded) form
476 // instead? We must be careful to make sure that the selected instruction
477 // is obvious from the parameters to avoid hard-to-find code generation
478 // bugs.
479
480 // Insert the smallest number of nop instructions
481 // possible to align the pc offset to a multiple
482 // of m. m must be a power of 2.
483 void Align(int m);
484 // Insert the smallest number of zero bytes possible to align the pc offset
485 // to a mulitple of m. m must be a power of 2 (>= 2).
486 void DataAlign(int m);
487 void Nop(int bytes = 1);
488 // Aligns code to something that's optimal for a jump target for the platform.
490 void LoopHeaderAlign() { CodeTargetAlign(); }
491
492 // Stack
493 void pushad();
494 void popad();
495
496 void pushfd();
497 void popfd();
498
499 void push(const Immediate& x);
500 void push_imm32(int32_t imm32);
501 void push(Register src);
502 void push(Operand src);
503
504 void pop(Register dst);
505 void pop(Operand dst);
506
507 void leave();
508
509 // Moves
510 void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
511 void mov_b(Register dst, Operand src);
512 void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
513 void mov_b(Operand dst, int8_t src) { mov_b(dst, Immediate(src)); }
514 void mov_b(Operand dst, const Immediate& src);
515 void mov_b(Operand dst, Register src);
516
517 void mov_w(Register dst, Operand src);
518 void mov_w(Operand dst, int16_t src) { mov_w(dst, Immediate(src)); }
519 void mov_w(Operand dst, const Immediate& src);
520 void mov_w(Operand dst, Register src);
521
522 void mov(Register dst, int32_t imm32);
523 void mov(Register dst, const Immediate& x);
524 void mov(Register dst, Handle<HeapObject> handle);
525 void mov(Register dst, Operand src);
526 void mov(Register dst, Register src);
527 void mov(Operand dst, const Immediate& x);
528 void mov(Operand dst, Handle<HeapObject> handle);
529 void mov(Operand dst, Register src);
530 void mov(Operand dst, Address src, RelocInfo::Mode);
531
532 void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
533 void movsx_b(Register dst, Operand src);
534
535 void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
536 void movsx_w(Register dst, Operand src);
537
538 void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
539 void movzx_b(Register dst, Operand src);
540
541 void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
542 void movzx_w(Register dst, Operand src);
543
544 void movq(XMMRegister dst, Operand src);
545 void movq(Operand dst, XMMRegister src);
546
547 // Conditional moves
548 void cmov(Condition cc, Register dst, Register src) {
549 cmov(cc, dst, Operand(src));
550 }
551 void cmov(Condition cc, Register dst, Operand src);
552
553 // Flag management.
554 void cld();
555
556 // Repetitive string instructions.
557 void rep_movs();
558 void rep_stos();
559 void stos();
560
561 void xadd(Operand dst, Register src);
562 void xadd_b(Operand dst, Register src);
563 void xadd_w(Operand dst, Register src);
564
565 // Exchange
566 void xchg(Register dst, Register src);
567 void xchg(Register dst, Operand src);
570
571 // Lock prefix
572 void lock();
573
574 // CompareExchange
575 void cmpxchg(Operand dst, Register src);
576 void cmpxchg_b(Operand dst, Register src);
577 void cmpxchg_w(Operand dst, Register src);
579
580 // Memory Fence
581 void mfence();
582 void lfence();
583
584 void pause();
585
586 // Arithmetics
587 void adc(Register dst, int32_t imm32);
588 void adc(Register dst, Register src) { adc(dst, Operand(src)); }
589 void adc(Register dst, Operand src);
590
591 void add(Register dst, Register src) { add(dst, Operand(src)); }
592 void add(Register dst, Operand src);
593 void add(Operand dst, Register src);
594 void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
595 void add(Operand dst, const Immediate& x);
596
597 void and_(Register dst, int32_t imm32);
598 void and_(Register dst, const Immediate& x);
599 void and_(Register dst, Register src) { and_(dst, Operand(src)); }
600 void and_(Register dst, Operand src);
601 void and_(Operand dst, Register src);
602 void and_(Operand dst, const Immediate& x);
603
605 DCHECK(reg.is_byte_register());
606 cmpb(Operand(reg), imm8);
607 }
608 void cmpb(Operand op, Immediate imm8);
611 void cmpb(Register dst, Register src) { cmpb(Operand(dst), src); }
612 void cmpb_al(Operand op);
613 void cmpw_ax(Operand op);
614 void cmpw(Operand dst, Immediate src);
615 void cmpw(Register dst, Immediate src) { cmpw(Operand(dst), src); }
616 void cmpw(Register dst, Operand src);
617 void cmpw(Register dst, Register src) { cmpw(Operand(dst), src); }
618 void cmpw(Operand dst, Register src);
619 void cmp(Register reg, int32_t imm32);
621 void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
623 void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
625 void cmp(Operand op, const Immediate& imm);
627
628 void dec_b(Register dst);
629 void dec_b(Operand dst);
630
631 void dec(Register dst);
632 void dec(Operand dst);
633
634 void cdq();
635
636 void idiv(Register src) { idiv(Operand(src)); }
637 void idiv(Operand src);
638 void div(Register src) { div(Operand(src)); }
639 void div(Operand src);
640
641 // Signed multiply instructions.
642 void imul(Register src); // edx:eax = eax * src.
643 void imul(Register dst, Register src) { imul(dst, Operand(src)); }
644 void imul(Register dst, Operand src); // dst = dst * src.
645 void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
646 void imul(Register dst, Operand src, int32_t imm32);
647
648 void inc(Register dst);
649 void inc(Operand dst);
650
651 void lea(Register dst, Operand src);
652 void lea(Register dst, Register src, Label* lbl);
653
654 // Unsigned multiply instruction.
655 void mul(Register src); // edx:eax = eax * reg.
656
657 void neg(Register dst);
658 void neg(Operand dst);
659
660 void not_(Register dst);
661 void not_(Operand dst);
662
663 void or_(Register dst, int32_t imm32);
664 void or_(Register dst, Register src) { or_(dst, Operand(src)); }
665 void or_(Register dst, Operand src);
666 void or_(Operand dst, Register src);
667 void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
668 void or_(Operand dst, const Immediate& x);
669
670 void rcl(Register dst, uint8_t imm8);
671 void rcr(Register dst, uint8_t imm8);
672
673 void rol(Register dst, uint8_t imm8) { rol(Operand(dst), imm8); }
674 void rol(Operand dst, uint8_t imm8);
675 void rol_cl(Register dst) { rol_cl(Operand(dst)); }
676 void rol_cl(Operand dst);
677
678 void ror(Register dst, uint8_t imm8) { ror(Operand(dst), imm8); }
679 void ror(Operand dst, uint8_t imm8);
680 void ror_cl(Register dst) { ror_cl(Operand(dst)); }
681 void ror_cl(Operand dst);
682
683 void sar(Register dst, uint8_t imm8) { sar(Operand(dst), imm8); }
684 void sar(Operand dst, uint8_t imm8);
685 void sar_cl(Register dst) { sar_cl(Operand(dst)); }
686 void sar_cl(Operand dst);
687
688 void sbb(Register dst, Register src) { sbb(dst, Operand(src)); }
689 void sbb(Register dst, Operand src);
690
691 void shl(Register dst, uint8_t imm8) { shl(Operand(dst), imm8); }
692 void shl(Operand dst, uint8_t imm8);
693 void shl_cl(Register dst) { shl_cl(Operand(dst)); }
694 void shl_cl(Operand dst);
695 void shld(Register dst, Register src, uint8_t shift);
696 void shld_cl(Register dst, Register src);
697
698 void shr(Register dst, uint8_t imm8) { shr(Operand(dst), imm8); }
699 void shr(Operand dst, uint8_t imm8);
700 void shr_cl(Register dst) { shr_cl(Operand(dst)); }
701 void shr_cl(Operand dst);
702 void shrd(Register dst, Register src, uint8_t shift);
703 void shrd_cl(Register dst, Register src) { shrd_cl(Operand(dst), src); }
704 void shrd_cl(Operand dst, Register src);
705
706 void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
707 void sub(Operand dst, const Immediate& x);
708 void sub(Register dst, Register src) { sub(dst, Operand(src)); }
709 void sub(Register dst, Operand src);
710 void sub(Operand dst, Register src);
711 void sub_sp_32(uint32_t imm);
712
713 void test(Register reg, const Immediate& imm);
714 void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
716 void test(Operand op, const Immediate& imm);
717 void test(Operand op, Register reg) { test(reg, op); }
720 void test_b(Operand op, Immediate imm8);
721 void test_b(Operand op, Register reg) { test_b(reg, op); }
722 void test_b(Register dst, Register src) { test_b(dst, Operand(src)); }
725 void test_w(Operand op, Immediate imm16);
726 void test_w(Operand op, Register reg) { test_w(reg, op); }
727 void test_w(Register dst, Register src) { test_w(dst, Operand(src)); }
728
729 void xor_(Register dst, int32_t imm32);
730 void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
731 void xor_(Register dst, Operand src);
732 void xor_(Operand dst, Register src);
733 void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
734 void xor_(Operand dst, const Immediate& x);
735
736 // Bit operations.
737 void bswap(Register dst);
738 void bt(Operand dst, Register src);
739 void bts(Register dst, Register src) { bts(Operand(dst), src); }
740 void bts(Operand dst, Register src);
741 void bsr(Register dst, Register src) { bsr(dst, Operand(src)); }
742 void bsr(Register dst, Operand src);
743 void bsf(Register dst, Register src) { bsf(dst, Operand(src)); }
744 void bsf(Register dst, Operand src);
745
746 // Miscellaneous
747 void hlt();
748 void int3();
749 void nop();
750 void ret(int imm16);
751 void ud2();
752
753 // Label operations & relative jumps (PPUM Appendix D)
754 //
755 // Takes a branch opcode (cc) and a label (L) and generates
756 // either a backward branch or a forward branch and links it
757 // to the label fixup chain. Usage:
758 //
759 // Label L; // unbound label
760 // j(cc, &L); // forward branch to unbound label
761 // bind(&L); // bind label to the current pc
762 // j(cc, &L); // backward branch to bound label
763 // bind(&L); // illegal: a label may be bound only once
764 //
765 // Note: The same Label can be used for forward and backward branches
766 // but it may be bound only once.
767
768 void bind(Label* L); // binds an unbound label L to the current code position
769
770 // Calls
771 void call(Label* L);
772 void call(Address entry, RelocInfo::Mode rmode);
774 void call(Operand adr);
776 void wasm_call(Address address, RelocInfo::Mode rmode);
777
778 // Jumps
779 // unconditional jump to L
780 void jmp(Label* L, Label::Distance distance = Label::kFar);
781 void jmp(Address entry, RelocInfo::Mode rmode);
783 void jmp(Operand adr);
785 // Unconditional jump relative to the current address. Low-level routine,
786 // use with caution!
787 void jmp_rel(int offset);
788
789 // Conditional jumps
790 void j(Condition cc, Label* L, Label::Distance distance = Label::kFar);
791 void j(Condition cc, uint8_t* entry, RelocInfo::Mode rmode);
792 void j(Condition cc, Handle<Code> code,
793 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
794
795 // Floating-point operations
796 void fld(int i);
797 void fstp(int i);
798
799 void fld1();
800 void fldz();
801 void fldpi();
802 void fldln2();
803
804 void fld_s(Operand adr);
805 void fld_d(Operand adr);
806
807 void fstp_s(Operand adr);
808 void fst_s(Operand adr);
809 void fstp_d(Operand adr);
810 void fst_d(Operand adr);
811
812 void fild_s(Operand adr);
813 void fild_d(Operand adr);
814
815 void fist_s(Operand adr);
816
817 void fistp_s(Operand adr);
818 void fistp_d(Operand adr);
819
820 // The fisttp instructions require SSE3.
821 void fisttp_s(Operand adr);
822 void fisttp_d(Operand adr);
823
824 void fabs();
825 void fchs();
826 void fcos();
827 void fsin();
828 void fptan();
829 void fyl2x();
830 void f2xm1();
831 void fscale();
832 void fninit();
833
834 void fadd(int i);
835 void fadd_i(int i);
836 void fsub(int i);
837 void fsub_i(int i);
838 void fmul(int i);
839 void fmul_i(int i);
840 void fdiv(int i);
841 void fdiv_i(int i);
842
843 void fisub_s(Operand adr);
844
845 void faddp(int i = 1);
846 void fsubp(int i = 1);
847 void fsubrp(int i = 1);
848 void fmulp(int i = 1);
849 void fdivp(int i = 1);
850 void fprem();
851 void fprem1();
852
853 void fxch(int i = 1);
854 void fincstp();
855 void ffree(int i = 0);
856
857 void ftst();
858 void fucomp(int i);
859 void fucompp();
860 void fucomi(int i);
861 void fucomip();
862 void fcompp();
863 void fnstsw_ax();
864 void fwait();
865 void fnclex();
866
867 void frndint();
868
869 void sahf();
871
872 void cpuid();
873
874 // SSE instructions
875 void addss(XMMRegister dst, XMMRegister src) { addss(dst, Operand(src)); }
876 void addss(XMMRegister dst, Operand src);
877 void subss(XMMRegister dst, XMMRegister src) { subss(dst, Operand(src)); }
878 void subss(XMMRegister dst, Operand src);
879 void mulss(XMMRegister dst, XMMRegister src) { mulss(dst, Operand(src)); }
880 void mulss(XMMRegister dst, Operand src);
881 void divss(XMMRegister dst, XMMRegister src) { divss(dst, Operand(src)); }
882 void divss(XMMRegister dst, Operand src);
883 void sqrtss(XMMRegister dst, XMMRegister src) { sqrtss(dst, Operand(src)); }
884 void sqrtss(XMMRegister dst, Operand src);
885
886 void ucomiss(XMMRegister dst, XMMRegister src) { ucomiss(dst, Operand(src)); }
888 void movaps(XMMRegister dst, XMMRegister src) { movaps(dst, Operand(src)); }
889 void movaps(XMMRegister dst, Operand src);
890 void movups(XMMRegister dst, XMMRegister src) { movups(dst, Operand(src)); }
891 void movups(XMMRegister dst, Operand src);
892 void movups(Operand dst, XMMRegister src);
893 void shufps(XMMRegister dst, XMMRegister src, uint8_t imm8);
894 void shufpd(XMMRegister dst, XMMRegister src, uint8_t imm8);
895
898 void movlps(XMMRegister dst, Operand src);
899 void movlps(Operand dst, XMMRegister src);
900 void movhps(XMMRegister dst, Operand src);
901 void movhps(Operand dst, XMMRegister src);
902
903 void maxss(XMMRegister dst, XMMRegister src) { maxss(dst, Operand(src)); }
904 void maxss(XMMRegister dst, Operand src);
905 void minss(XMMRegister dst, XMMRegister src) { minss(dst, Operand(src)); }
906 void minss(XMMRegister dst, Operand src);
907
908 void haddps(XMMRegister dst, Operand src);
909 void haddps(XMMRegister dst, XMMRegister src) { haddps(dst, Operand(src)); }
910 void sqrtpd(XMMRegister dst, Operand src) {
911 sse2_instr(dst, src, 0x66, 0x0F, 0x51);
912 }
913 void sqrtpd(XMMRegister dst, XMMRegister src) { sqrtpd(dst, Operand(src)); }
914
915 void cmpps(XMMRegister dst, Operand src, uint8_t cmp);
916 void cmpps(XMMRegister dst, XMMRegister src, uint8_t cmp) {
917 cmpps(dst, Operand(src), cmp);
918 }
919 void cmppd(XMMRegister dst, Operand src, uint8_t cmp);
920 void cmppd(XMMRegister dst, XMMRegister src, uint8_t cmp) {
921 cmppd(dst, Operand(src), cmp);
922 }
923
924// Packed floating-point comparison operations.
925#define PACKED_CMP_LIST(V) \
926 V(cmpeq, 0x0) \
927 V(cmplt, 0x1) \
928 V(cmple, 0x2) \
929 V(cmpunord, 0x3) \
930 V(cmpneq, 0x4)
931
932#define SSE_CMP_P(instr, imm8) \
933 void instr##ps(XMMRegister dst, XMMRegister src) { \
934 cmpps(dst, Operand(src), imm8); \
935 } \
936 void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); } \
937 void instr##pd(XMMRegister dst, XMMRegister src) { \
938 cmppd(dst, Operand(src), imm8); \
939 } \
940 void instr##pd(XMMRegister dst, Operand src) { cmppd(dst, src, imm8); }
941
943#undef SSE_CMP_P
944
945 // SSE2 instructions
946 void cvttss2si(Register dst, Operand src);
948 cvttss2si(dst, Operand(src));
949 }
950 void cvttsd2si(Register dst, Operand src);
952 cvttsd2si(dst, Operand(src));
953 }
955
956 void cvtsi2ss(XMMRegister dst, Register src) { cvtsi2ss(dst, Operand(src)); }
958 void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
962 cvtss2sd(dst, Operand(src));
963 }
967 cvttps2dq(dst, Operand(src));
968 }
971
972 void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
974
977
978 void movapd(XMMRegister dst, XMMRegister src) { movapd(dst, Operand(src)); }
979 void movapd(XMMRegister dst, Operand src) {
980 sse2_instr(dst, src, 0x66, 0x0F, 0x28);
981 }
982 void movupd(XMMRegister dst, Operand src) {
983 sse2_instr(dst, src, 0x66, 0x0F, 0x10);
984 }
985
988
990
992
993 void movdqa(XMMRegister dst, Operand src);
994 void movdqa(Operand dst, XMMRegister src);
996 void movdqu(XMMRegister dst, Operand src);
997 void movdqu(Operand dst, XMMRegister src);
999 void movdq(bool aligned, XMMRegister dst, Operand src) {
1000 if (aligned) {
1001 movdqa(dst, src);
1002 } else {
1003 movdqu(dst, src);
1004 }
1005 }
1006
1007 void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
1008 void movd(XMMRegister dst, Operand src);
1009 void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
1010 void movd(Operand dst, XMMRegister src);
1011 void movsd(XMMRegister dst, XMMRegister src) { movsd(dst, Operand(src)); }
1012 void movsd(XMMRegister dst, Operand src);
1013 void movsd(Operand dst, XMMRegister src);
1014
1015 void movss(XMMRegister dst, Operand src);
1016 void movss(Operand dst, XMMRegister src);
1017 void movss(XMMRegister dst, XMMRegister src) { movss(dst, Operand(src)); }
1018
1019 void extractps(Operand dst, XMMRegister src, uint8_t imm8);
1020 void extractps(Register dst, XMMRegister src, uint8_t imm8);
1021
1023
1024 void psllw(XMMRegister reg, uint8_t shift);
1025 void pslld(XMMRegister reg, uint8_t shift);
1026 void psrlw(XMMRegister reg, uint8_t shift);
1027 void psrld(XMMRegister reg, uint8_t shift);
1028 void psraw(XMMRegister reg, uint8_t shift);
1029 void psrad(XMMRegister reg, uint8_t shift);
1030 void psllq(XMMRegister reg, uint8_t shift);
1031 void psrlq(XMMRegister reg, uint8_t shift);
1032
1033 void pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
1034 pshufhw(dst, Operand(src), shuffle);
1035 }
1036 void pshufhw(XMMRegister dst, Operand src, uint8_t shuffle);
1037 void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
1038 pshuflw(dst, Operand(src), shuffle);
1039 }
1040 void pshuflw(XMMRegister dst, Operand src, uint8_t shuffle);
1041 void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
1042 pshufd(dst, Operand(src), shuffle);
1043 }
1044 void pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
1045
1046 void pblendw(XMMRegister dst, XMMRegister src, uint8_t mask) {
1047 pblendw(dst, Operand(src), mask);
1048 }
1049 void pblendw(XMMRegister dst, Operand src, uint8_t mask);
1050
1051 void palignr(XMMRegister dst, XMMRegister src, uint8_t mask) {
1052 palignr(dst, Operand(src), mask);
1053 }
1054 void palignr(XMMRegister dst, Operand src, uint8_t mask);
1055
1056 void pextrb(Register dst, XMMRegister src, uint8_t offset) {
1057 pextrb(Operand(dst), src, offset);
1058 }
1059 void pextrb(Operand dst, XMMRegister src, uint8_t offset);
1060 // SSE3 instructions
1062 void movddup(XMMRegister dst, XMMRegister src) { movddup(dst, Operand(src)); }
1064
1065 // Use SSE4_1 encoding for pextrw reg, xmm, imm8 for consistency
1066 void pextrw(Register dst, XMMRegister src, uint8_t offset) {
1067 pextrw(Operand(dst), src, offset);
1068 }
1069 void pextrw(Operand dst, XMMRegister src, uint8_t offset);
1070 void pextrd(Register dst, XMMRegister src, uint8_t offset) {
1071 pextrd(Operand(dst), src, offset);
1072 }
1073 void pextrd(Operand dst, XMMRegister src, uint8_t offset);
1074
1075 void insertps(XMMRegister dst, XMMRegister src, uint8_t offset) {
1076 insertps(dst, Operand(src), offset);
1077 }
1078 void insertps(XMMRegister dst, Operand src, uint8_t offset);
1079 void pinsrb(XMMRegister dst, Register src, uint8_t offset) {
1080 pinsrb(dst, Operand(src), offset);
1081 }
1082 void pinsrb(XMMRegister dst, Operand src, uint8_t offset);
1083 void pinsrw(XMMRegister dst, Register src, uint8_t offset) {
1084 pinsrw(dst, Operand(src), offset);
1085 }
1086 void pinsrw(XMMRegister dst, Operand src, uint8_t offset);
1087 void pinsrd(XMMRegister dst, Register src, uint8_t offset) {
1088 pinsrd(dst, Operand(src), offset);
1089 }
1090 void pinsrd(XMMRegister dst, Operand src, uint8_t offset);
1091
1094
1095 // AVX instructions
1097 vaddss(dst, src1, Operand(src2));
1098 }
1099 void vaddss(XMMRegister dst, XMMRegister src1, Operand src2) {
1100 vss(0x58, dst, src1, src2);
1101 }
1103 vsubss(dst, src1, Operand(src2));
1104 }
1105 void vsubss(XMMRegister dst, XMMRegister src1, Operand src2) {
1106 vss(0x5c, dst, src1, src2);
1107 }
1109 vmulss(dst, src1, Operand(src2));
1110 }
1111 void vmulss(XMMRegister dst, XMMRegister src1, Operand src2) {
1112 vss(0x59, dst, src1, src2);
1113 }
1115 vdivss(dst, src1, Operand(src2));
1116 }
1117 void vdivss(XMMRegister dst, XMMRegister src1, Operand src2) {
1118 vss(0x5e, dst, src1, src2);
1119 }
1121 vmaxss(dst, src1, Operand(src2));
1122 }
1123 void vmaxss(XMMRegister dst, XMMRegister src1, Operand src2) {
1124 vss(0x5f, dst, src1, src2);
1125 }
1127 vminss(dst, src1, Operand(src2));
1128 }
1129 void vminss(XMMRegister dst, XMMRegister src1, Operand src2) {
1130 vss(0x5d, dst, src1, src2);
1131 }
1133 vsqrtss(dst, src1, Operand(src2));
1134 }
1135 void vsqrtss(XMMRegister dst, XMMRegister src1, Operand src2) {
1136 vss(0x51, dst, src1, src2);
1137 }
1138 void vss(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2);
1139
1141 vhaddps(dst, src1, Operand(src2));
1142 }
1143 void vhaddps(XMMRegister dst, XMMRegister src1, Operand src2) {
1144 vinstr(0x7C, dst, src1, src2, kF2, k0F, kWIG);
1145 }
1146 void vsqrtpd(XMMRegister dst, XMMRegister src) { vsqrtpd(dst, Operand(src)); }
1147 void vsqrtpd(XMMRegister dst, Operand src) {
1148 vinstr(0x51, dst, xmm0, src, k66, k0F, kWIG);
1149 }
1150 void vmovss(Operand dst, XMMRegister src) {
1151 vinstr(0x11, src, xmm0, dst, kF3, k0F, kWIG);
1152 }
1154 vinstr(0x10, dst, src1, src2, kF3, k0F, kWIG);
1155 }
1156 void vmovss(XMMRegister dst, Operand src) {
1157 vinstr(0x10, dst, xmm0, src, kF3, k0F, kWIG);
1158 }
1159 void vmovsd(Operand dst, XMMRegister src) {
1160 vinstr(0x11, src, xmm0, dst, kF2, k0F, kWIG);
1161 }
1163 vinstr(0x10, dst, src1, src2, kF2, k0F, kWIG);
1164 }
1165 void vmovsd(XMMRegister dst, Operand src) {
1166 vinstr(0x10, dst, xmm0, src, kF2, k0F, kWIG);
1167 }
1168
1169 void vextractps(Operand dst, XMMRegister src, uint8_t imm8);
1170
1172
1173 void vmovaps(XMMRegister dst, XMMRegister src) { vmovaps(dst, Operand(src)); }
1174 void vmovaps(XMMRegister dst, Operand src) { vps(0x28, dst, xmm0, src); }
1175 void vmovapd(XMMRegister dst, XMMRegister src) { vmovapd(dst, Operand(src)); }
1176 void vmovapd(XMMRegister dst, Operand src) { vpd(0x28, dst, xmm0, src); }
1177 void vmovups(Operand dst, XMMRegister src) { vps(0x11, src, xmm0, dst); }
1178 void vmovups(XMMRegister dst, XMMRegister src) { vmovups(dst, Operand(src)); }
1179 void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); }
1180 void vmovupd(XMMRegister dst, Operand src) { vpd(0x10, dst, xmm0, src); }
1182 uint8_t imm8) {
1183 vshufps(dst, src1, Operand(src2), imm8);
1184 }
1185 void vshufps(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
1187 uint8_t imm8) {
1188 vshufpd(dst, src1, Operand(src2), imm8);
1189 }
1190 void vshufpd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
1191
1198
1199 void vpsllw(XMMRegister dst, XMMRegister src, uint8_t imm8);
1200 void vpslld(XMMRegister dst, XMMRegister src, uint8_t imm8);
1201 void vpsllq(XMMRegister dst, XMMRegister src, uint8_t imm8);
1202 void vpsrlw(XMMRegister dst, XMMRegister src, uint8_t imm8);
1203 void vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8);
1204 void vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8);
1205 void vpsrad(XMMRegister dst, XMMRegister src, uint8_t imm8);
1206 void vpsrlq(XMMRegister dst, XMMRegister src, uint8_t imm8);
1207
1208 void vpshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
1209 vpshufhw(dst, Operand(src), shuffle);
1210 }
1211 void vpshufhw(XMMRegister dst, Operand src, uint8_t shuffle);
1212 void vpshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
1213 vpshuflw(dst, Operand(src), shuffle);
1214 }
1215 void vpshuflw(XMMRegister dst, Operand src, uint8_t shuffle);
1216 void vpshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
1217 vpshufd(dst, Operand(src), shuffle);
1218 }
1219 void vpshufd(XMMRegister dst, Operand src, uint8_t shuffle);
1220
1227
1229 uint8_t mask) {
1230 vpblendw(dst, src1, Operand(src2), mask);
1231 }
1232 void vpblendw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t mask);
1233
1235 uint8_t mask) {
1236 vpalignr(dst, src1, Operand(src2), mask);
1237 }
1238 void vpalignr(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t mask);
1239
1240 void vpextrb(Register dst, XMMRegister src, uint8_t offset) {
1241 vpextrb(Operand(dst), src, offset);
1242 }
1243 void vpextrb(Operand dst, XMMRegister src, uint8_t offset);
1244 void vpextrw(Register dst, XMMRegister src, uint8_t offset) {
1245 vpextrw(Operand(dst), src, offset);
1246 }
1247 void vpextrw(Operand dst, XMMRegister src, uint8_t offset);
1248 void vpextrd(Register dst, XMMRegister src, uint8_t offset) {
1249 vpextrd(Operand(dst), src, offset);
1250 }
1251 void vpextrd(Operand dst, XMMRegister src, uint8_t offset);
1252
1254 uint8_t offset) {
1255 vinsertps(dst, src1, Operand(src2), offset);
1256 }
1258 uint8_t offset);
1260 uint8_t offset) {
1261 vpinsrb(dst, src1, Operand(src2), offset);
1262 }
1263 void vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t offset);
1265 uint8_t offset) {
1266 vpinsrw(dst, src1, Operand(src2), offset);
1267 }
1268 void vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t offset);
1270 uint8_t offset) {
1271 vpinsrd(dst, src1, Operand(src2), offset);
1272 }
1273 void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t offset);
1274
1276 RoundingMode mode);
1278 RoundingMode mode);
1281
1283 vinstr(0xE6, dst, xmm0, src, kF3, k0F, kWIG);
1284 }
1286 vinstr(0x5A, dst, xmm0, src, k66, k0F, kWIG);
1287 }
1289 vcvttps2dq(dst, Operand(src));
1290 }
1292 vinstr(0x5B, dst, xmm0, src, kF3, k0F, kWIG);
1293 }
1295 vinstr(0xE6, dst, xmm0, src, k66, k0F, kWIG);
1296 }
1298 XMMRegister idst = XMMRegister::from_code(dst.code());
1299 vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
1300 }
1301 void vcvttsd2si(Register dst, Operand src) {
1302 XMMRegister idst = XMMRegister::from_code(dst.code());
1303 vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
1304 }
1306 vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
1307 }
1309 vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
1310 }
1312 XMMRegister idst = XMMRegister::from_code(dst.code());
1313 vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
1314 }
1315 void vcvttss2si(Register dst, Operand src) {
1316 XMMRegister idst = XMMRegister::from_code(dst.code());
1317 vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
1318 }
1319
1321 vinstr(0x12, dst, xmm0, src, kF2, k0F, kWIG);
1322 }
1324 vmovddup(dst, Operand(src));
1325 }
1327 vinstr(0x16, dst, xmm0, src, kF3, k0F, kWIG);
1328 }
1330 vinstr(0x18, dst, xmm0, src, k66, k0F38, kW0, AVX2);
1331 }
1333 vinstr(0x18, dst, xmm0, src, k66, k0F38, kW0);
1334 }
1335 void vmovdqa(XMMRegister dst, Operand src) {
1336 vinstr(0x6F, dst, xmm0, src, k66, k0F, kWIG);
1337 }
1339 vinstr(0x6F, dst, xmm0, src, k66, k0F, kWIG);
1340 }
1341 void vmovdqu(XMMRegister dst, Operand src) {
1342 vinstr(0x6F, dst, xmm0, src, kF3, k0F, kWIG);
1343 }
1344 void vmovdqu(Operand dst, XMMRegister src) {
1345 vinstr(0x7F, src, xmm0, dst, kF3, k0F, kWIG);
1346 }
1347 void vmovd(XMMRegister dst, Register src) { vmovd(dst, Operand(src)); }
1348 void vmovd(XMMRegister dst, Operand src) {
1349 vinstr(0x6E, dst, xmm0, src, k66, k0F, kWIG);
1350 }
1351 void vmovd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
1352 void vmovd(Operand dst, XMMRegister src) {
1353 vinstr(0x7E, src, xmm0, dst, k66, k0F, kWIG);
1354 }
1355
1358
1360
1362 vinstr(0x2E, dst, xmm0, src, k66, k0F, kWIG);
1363 }
1365 vinstr(0x2E, dst, xmm0, src, k66, k0F, kWIG);
1366 }
1368 vinstr(0x2E, dst, xmm0, src, kNoPrefix, k0F, kWIG);
1369 }
1371 vinstr(0x2E, dst, xmm0, src, kNoPrefix, k0F, kWIG);
1372 }
1373
1374 // BMI instruction
1375 void andn(Register dst, Register src1, Register src2) {
1376 andn(dst, src1, Operand(src2));
1377 }
1378 void andn(Register dst, Register src1, Operand src2) {
1379 bmi1(0xf2, dst, src1, src2);
1380 }
1381 void bextr(Register dst, Register src1, Register src2) {
1382 bextr(dst, Operand(src1), src2);
1383 }
1384 void bextr(Register dst, Operand src1, Register src2) {
1385 bmi1(0xf7, dst, src2, src1);
1386 }
1387 void blsi(Register dst, Register src) { blsi(dst, Operand(src)); }
1388 void blsi(Register dst, Operand src) { bmi1(0xf3, ebx, dst, src); }
1389 void blsmsk(Register dst, Register src) { blsmsk(dst, Operand(src)); }
1390 void blsmsk(Register dst, Operand src) { bmi1(0xf3, edx, dst, src); }
1391 void blsr(Register dst, Register src) { blsr(dst, Operand(src)); }
1392 void blsr(Register dst, Operand src) { bmi1(0xf3, ecx, dst, src); }
1393 void tzcnt(Register dst, Register src) { tzcnt(dst, Operand(src)); }
1394 void tzcnt(Register dst, Operand src);
1395
1396 void lzcnt(Register dst, Register src) { lzcnt(dst, Operand(src)); }
1397 void lzcnt(Register dst, Operand src);
1398
1399 void popcnt(Register dst, Register src) { popcnt(dst, Operand(src)); }
1400 void popcnt(Register dst, Operand src);
1401
1402 void bzhi(Register dst, Register src1, Register src2) {
1403 bzhi(dst, Operand(src1), src2);
1404 }
1405 void bzhi(Register dst, Operand src1, Register src2) {
1406 bmi2(kNoPrefix, 0xf5, dst, src2, src1);
1407 }
1408 void mulx(Register dst1, Register dst2, Register src) {
1409 mulx(dst1, dst2, Operand(src));
1410 }
1411 void mulx(Register dst1, Register dst2, Operand src) {
1412 bmi2(kF2, 0xf6, dst1, dst2, src);
1413 }
1414 void pdep(Register dst, Register src1, Register src2) {
1415 pdep(dst, src1, Operand(src2));
1416 }
1417 void pdep(Register dst, Register src1, Operand src2) {
1418 bmi2(kF2, 0xf5, dst, src1, src2);
1419 }
1420 void pext(Register dst, Register src1, Register src2) {
1421 pext(dst, src1, Operand(src2));
1422 }
1423 void pext(Register dst, Register src1, Operand src2) {
1424 bmi2(kF3, 0xf5, dst, src1, src2);
1425 }
1426 void sarx(Register dst, Register src1, Register src2) {
1427 sarx(dst, Operand(src1), src2);
1428 }
1429 void sarx(Register dst, Operand src1, Register src2) {
1430 bmi2(kF3, 0xf7, dst, src2, src1);
1431 }
1432 void shlx(Register dst, Register src1, Register src2) {
1433 shlx(dst, Operand(src1), src2);
1434 }
1435 void shlx(Register dst, Operand src1, Register src2) {
1436 bmi2(k66, 0xf7, dst, src2, src1);
1437 }
1438 void shrx(Register dst, Register src1, Register src2) {
1439 shrx(dst, Operand(src1), src2);
1440 }
1441 void shrx(Register dst, Operand src1, Register src2) {
1442 bmi2(kF2, 0xf7, dst, src2, src1);
1443 }
1444 void rorx(Register dst, Register src, uint8_t imm8) {
1445 rorx(dst, Operand(src), imm8);
1446 }
1447 void rorx(Register dst, Operand src, uint8_t imm8);
1448
1449 // Implementation of packed single-precision floating-point SSE instructions.
1450 void ps(uint8_t op, XMMRegister dst, Operand src);
1451 // Implementation of packed double-precision floating-point SSE instructions.
1452 void pd(uint8_t op, XMMRegister dst, Operand src);
1453
1454#define PACKED_OP_LIST(V) \
1455 V(unpckl, 0x14) \
1456 V(and, 0x54) \
1457 V(andn, 0x55) \
1458 V(or, 0x56) \
1459 V(xor, 0x57) \
1460 V(add, 0x58) \
1461 V(mul, 0x59) \
1462 V(sub, 0x5c) \
1463 V(min, 0x5d) \
1464 V(div, 0x5e) \
1465 V(max, 0x5f)
1466
1467#define SSE_PACKED_OP_DECLARE(name, opcode) \
1468 void name##ps(XMMRegister dst, XMMRegister src) { \
1469 ps(opcode, dst, Operand(src)); \
1470 } \
1471 void name##ps(XMMRegister dst, Operand src) { ps(opcode, dst, src); } \
1472 void name##pd(XMMRegister dst, XMMRegister src) { \
1473 pd(opcode, dst, Operand(src)); \
1474 } \
1475 void name##pd(XMMRegister dst, Operand src) { pd(opcode, dst, src); }
1476
1478#undef SSE_PACKED_OP_DECLARE
1479
1480#define AVX_PACKED_OP_DECLARE(name, opcode) \
1481 void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1482 vps(opcode, dst, src1, Operand(src2)); \
1483 } \
1484 void v##name##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
1485 vps(opcode, dst, src1, src2); \
1486 } \
1487 void v##name##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1488 vpd(opcode, dst, src1, Operand(src2)); \
1489 } \
1490 void v##name##pd(XMMRegister dst, XMMRegister src1, Operand src2) { \
1491 vpd(opcode, dst, src1, src2); \
1492 }
1493
1495#undef AVX_PACKED_OP_DECLARE
1496#undef PACKED_OP_LIST
1497
1498 void vps(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2);
1499 void vpd(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2);
1500
1501 void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t cmp);
1502 void vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t cmp);
1503
1504#define AVX_CMP_P(instr, imm8) \
1505 void v##instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1506 vcmpps(dst, src1, Operand(src2), imm8); \
1507 } \
1508 void v##instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
1509 vcmpps(dst, src1, src2, imm8); \
1510 } \
1511 void v##instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1512 vcmppd(dst, src1, Operand(src2), imm8); \
1513 } \
1514 void v##instr##pd(XMMRegister dst, XMMRegister src1, Operand src2) { \
1515 vcmppd(dst, src1, src2, imm8); \
1516 }
1517
1519 // vcmpgeps/vcmpgepd only in AVX.
1520 AVX_CMP_P(cmpge, 0xd)
1521#undef AVX_CMP_P
1522#undef PACKED_CMP_LIST
1523
1524// Other SSE and AVX instructions
1525#define DECLARE_SSE_UNOP_AND_AVX(instruction, escape, opcode) \
1526 void instruction(XMMRegister dst, XMMRegister src) { \
1527 instruction(dst, Operand(src)); \
1528 } \
1529 void instruction(XMMRegister dst, Operand src) { \
1530 sse_instr(dst, src, 0x##escape, 0x##opcode); \
1531 } \
1532 void v##instruction(XMMRegister dst, XMMRegister src) { \
1533 v##instruction(dst, Operand(src)); \
1534 } \
1535 void v##instruction(XMMRegister dst, Operand src) { \
1536 vinstr(0x##opcode, dst, xmm0, src, kNoPrefix, k##escape, kWIG); \
1537 }
1538
1540#undef DECLARE_SSE_UNOP_AND_AVX
1541
1542#define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \
1543 void instruction(XMMRegister dst, XMMRegister src) { \
1544 instruction(dst, Operand(src)); \
1545 } \
1546 void instruction(XMMRegister dst, Operand src) { \
1547 sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
1548 }
1549
1552#undef DECLARE_SSE2_INSTRUCTION
1553
1554#define DECLARE_SSE2_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
1555 void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1556 v##instruction(dst, src1, Operand(src2)); \
1557 } \
1558 void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
1559 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
1560 }
1561
1564#undef DECLARE_SSE2_AVX_INSTRUCTION
1565
1566#define DECLARE_SSSE3_INSTRUCTION(instruction, prefix, escape1, escape2, \
1567 opcode) \
1568 void instruction(XMMRegister dst, XMMRegister src) { \
1569 instruction(dst, Operand(src)); \
1570 } \
1571 void instruction(XMMRegister dst, Operand src) { \
1572 ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
1573 }
1574
1577#undef DECLARE_SSSE3_INSTRUCTION
1578
1579#define DECLARE_SSE4_INSTRUCTION(instruction, prefix, escape1, escape2, \
1580 opcode) \
1581 void instruction(XMMRegister dst, XMMRegister src) { \
1582 instruction(dst, Operand(src)); \
1583 } \
1584 void instruction(XMMRegister dst, Operand src) { \
1585 sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
1586 }
1587
1590 DECLARE_SSE4_INSTRUCTION(blendvps, 66, 0F, 38, 14)
1591 DECLARE_SSE4_INSTRUCTION(blendvpd, 66, 0F, 38, 15)
1592 DECLARE_SSE4_INSTRUCTION(pblendvb, 66, 0F, 38, 10)
1593#undef DECLARE_SSE4_INSTRUCTION
1594
1595#define DECLARE_SSE34_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, \
1596 opcode) \
1597 void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1598 v##instruction(dst, src1, Operand(src2)); \
1599 } \
1600 void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
1601 vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
1602 }
1603
1606#undef DECLARE_SSE34_AVX_INSTRUCTION
1607
1608#define DECLARE_SSE4_AVX_RM_INSTRUCTION(instruction, prefix, escape1, escape2, \
1609 opcode) \
1610 void v##instruction(XMMRegister dst, XMMRegister src) { \
1611 v##instruction(dst, Operand(src)); \
1612 } \
1613 void v##instruction(XMMRegister dst, Operand src) { \
1614 vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0); \
1615 }
1616
1619#undef DECLARE_SSE4_AVX_RM_INSTRUCTION
1620
1621 // AVX2 instructions
1622#define AVX2_INSTRUCTION(instr, prefix, escape1, escape2, opcode) \
1623 void instr(XMMRegister dst, XMMRegister src) { \
1624 vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0, \
1625 AVX2); \
1626 } \
1627 void instr(XMMRegister dst, Operand src) { \
1628 vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0, \
1629 AVX2); \
1630 }
1632#undef AVX2_INSTRUCTION
1633
1634#define FMA(instr, length, prefix, escape1, escape2, extension, opcode) \
1635 void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1636 vinstr(0x##opcode, dst, src1, src2, k##length, k##prefix, \
1637 k##escape1##escape2, k##extension, FMA3); \
1638 } \
1639 void instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
1640 vinstr(0x##opcode, dst, src1, src2, k##length, k##prefix, \
1641 k##escape1##escape2, k##extension, FMA3); \
1642 }
1644#undef FMA
1645
1646 // Prefetch src position into cache level.
1647 // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
1648 // non-temporal
1649 void prefetch(Operand src, int level);
1650 // TODO(lrn): Need SFENCE for movnt?
1651
1652 // Check the code size generated from label to here.
1654 return pc_offset() - label->pos();
1655 }
1656
1657 // Record a deoptimization reason that can be used by a log or cpu profiler.
1658 // Use --trace-deopt to enable.
1659 void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
1660 SourcePosition position, int id);
1661
1662 // Writes a single byte or word of data in the code stream. Used for
1663 // inline tables, e.g., jump-tables.
1664 void db(uint8_t data);
1665 void dd(uint32_t data);
1666 void dq(uint64_t data);
1667 void dp(uintptr_t data) { dd(data); }
1668 void dd(Label* label);
1669
1670 // Check if there is less than kGap bytes available in the buffer.
1671 // If this is the case, we need to grow the buffer before emitting
1672 // an instruction or relocation information.
1673 inline bool buffer_overflow() const {
1674 return pc_ >= reloc_info_writer.pos() - kGap;
1675 }
1676
1677 // Get the number of bytes available in the buffer.
1678 inline int available_space() const { return reloc_info_writer.pos() - pc_; }
1679
1680 static bool IsNop(Address addr);
1681
1683 return (buffer_start_ + buffer_->size()) - reloc_info_writer.pos();
1684 }
1685
1686 // Avoid overflows for displacements etc.
1687 static constexpr int kMaximalBufferSize = 512 * MB;
1688
1689 uint8_t byte_at(int pos) { return buffer_start_[pos]; }
1690 void set_byte_at(int pos, uint8_t value) { buffer_start_[pos] = value; }
1691
1692 protected:
1697
1698 Address addr_at(int pos) {
1699 DCHECK_GE(pos, 0);
1701 return reinterpret_cast<Address>(buffer_start_ + pos);
1702 }
1703
1704 private:
1705 uint32_t long_at(int pos) {
1706 return ReadUnalignedValue<uint32_t>(addr_at(pos));
1707 }
1708 void long_at_put(int pos, uint32_t x) {
1709 WriteUnalignedValue(addr_at(pos), x);
1710 }
1711
1712 // code emission
1714 inline void emit(uint32_t x);
1715 inline void emit(Handle<HeapObject> handle);
1716 inline void emit(uint32_t x, RelocInfo::Mode rmode);
1717 inline void emit(Handle<Code> code, RelocInfo::Mode rmode);
1718 inline void emit(const Immediate& x);
1719 inline void emit_b(Immediate x);
1720 inline void emit_w(const Immediate& x);
1721 inline void emit_q(uint64_t x);
1722
1723 // Emit the code-object-relative offset of the label's position
1724 inline void emit_code_relative_offset(Label* label);
1725
1726 // instruction generation
1727 void emit_arith_b(int op1, int op2, Register dst, int imm8);
1728
1729 // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
1730 // with a given destination expression and an immediate operand. It attempts
1731 // to use the shortest encoding possible.
1732 // sel specifies the /n in the modrm byte (see the Intel PRM).
1733 void emit_arith(int sel, Operand dst, const Immediate& x);
1734
1735 void emit_operand(int code, Operand adr);
1738
1740
1741 void emit_farith(int b1, int b2, int i);
1742
1743 // Emit vex prefix
1744 enum SIMDPrefix { kNoPrefix = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
1745 enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 };
1746 enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
1747 enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
1749 LeadingOpcode m, VexW w);
1751 LeadingOpcode m, VexW w);
1752
1753 // labels
1754 void print(const Label* L);
1755 void bind_to(Label* L, int pos);
1756
1757 // displacements
1758 inline Displacement disp_at(Label* L);
1759 inline void disp_at_put(Label* L, Displacement disp);
1760 inline void emit_disp(Label* L, Displacement::Type type);
1761 inline void emit_near_disp(Label* L);
1762
1763 void sse_instr(XMMRegister dst, Operand src, uint8_t prefix, uint8_t opcode);
1764 void sse2_instr(XMMRegister dst, Operand src, uint8_t prefix, uint8_t escape,
1765 uint8_t opcode);
1766 void ssse3_instr(XMMRegister dst, Operand src, uint8_t prefix,
1767 uint8_t escape1, uint8_t escape2, uint8_t opcode);
1768 void sse4_instr(XMMRegister dst, Operand src, uint8_t prefix, uint8_t escape1,
1769 uint8_t escape2, uint8_t opcode);
1770 void vinstr(uint8_t op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
1771 SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature = AVX);
1772 void vinstr(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2,
1773 SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature = AVX);
1774 void vinstr(uint8_t op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
1776 CpuFeature = AVX);
1777 void vinstr(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2,
1779 CpuFeature = AVX);
1780 // Most BMI instructions are similar.
1781 void bmi1(uint8_t op, Register reg, Register vreg, Operand rm);
1782 void bmi2(SIMDPrefix pp, uint8_t op, Register reg, Register vreg, Operand rm);
1783 void fma_instr(uint8_t op, XMMRegister dst, XMMRegister src1,
1785 LeadingOpcode m, VexW w);
1786 void fma_instr(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2,
1788
1789 // record reloc info for current pc_
1790 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
1791
1792 // record the position of jmp/jcc instruction
1794
1796
1798
1800
1801 friend class EnsureSpace;
1802
1803 // Internal reference positions, required for (potential) patching in
1804 // GrowBuffer(); contains only those internal references whose labels
1805 // are already bound.
1806 std::deque<int> internal_reference_positions_;
1807
1808 // code generation
1809 RelocInfoWriter reloc_info_writer;
1810};
1811
1812// Helper class that ensures that there is enough space for generating
1813// instructions and relocation information. The constructor makes
1814// sure that there is enough space and (in debug mode) the destructor
1815// checks that we did not generate too much.
1816class EnsureSpace {
1817 public:
1818 explicit V8_INLINE EnsureSpace(Assembler* assembler) : assembler_(assembler) {
1820#ifdef DEBUG
1821 space_before_ = assembler->available_space();
1822#endif
1823 }
1824
1825#ifdef DEBUG
1826 ~EnsureSpace() {
1827 int bytes_generated = space_before_ - assembler_->available_space();
1828 DCHECK(bytes_generated < assembler_->kGap);
1829 }
1830#endif
1831
1832 private:
1834#ifdef DEBUG
1835 int space_before_;
1836#endif
1837};
1838
1839} // namespace internal
1840} // namespace v8
1841
1842#endif // V8_CODEGEN_IA32_ASSEMBLER_IA32_H_
#define AVX2_INSTRUCTION(instr, prefix, escape1, escape2, opcode)
#define SSE_CMP_P(instr, imm8)
#define PACKED_CMP_LIST(V)
#define DECLARE_SSE4_AVX_RM_INSTRUCTION(instruction, prefix, escape1, escape2, opcode)
#define AVX_CMP_P(instr, imm8)
#define FMA(instr, length, prefix, escape1, escape2, extension, opcode)
#define DECLARE_SSE34_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, opcode)
#define AVX_PACKED_OP_DECLARE(name, opcode)
#define DECLARE_SSE_UNOP_AND_AVX(instruction, escape, opcode)
#define PACKED_OP_LIST(V)
#define SSE_PACKED_OP_DECLARE(name, opcode)
#define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode)
#define DECLARE_SSE2_AVX_INSTRUCTION(instruction, prefix, escape, opcode)
#define DECLARE_SSSE3_INSTRUCTION(instruction, prefix, escape1, escape2, opcode)
#define DECLARE_SSE4_INSTRUCTION(instruction, prefix, escape1, escape2, opcode)
#define F(name, str)
uint8_t * buf_
interpreter::OperandScale scale
Definition builtins.cc:44
SourcePosition pos
static constexpr T decode(U value)
Definition bit-field.h:66
void bind_to(Label *L, int pos)
void xor_(Register dst, Operand src)
void shrx(Register dst, Register src1, Register src2)
void neg(Register dst)
void movdqu(XMMRegister dst, XMMRegister src)
void test_w(Register reg, Immediate imm16)
void mov_w(Operand dst, Register src)
void palignr(XMMRegister dst, Operand src, uint8_t mask)
void vpblendvb(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask)
void sse_instr(XMMRegister dst, Operand src, uint8_t prefix, uint8_t opcode)
void cmov(Condition cc, Register dst, Register src)
void xor_(Operand dst, const Immediate &x)
void vmovhps(Operand dst, XMMRegister src)
void GetCode(LocalIsolate *isolate, CodeDesc *desc)
void adc(Register dst, Register src)
void divss(XMMRegister dst, XMMRegister src)
void shrx(Register dst, Operand src1, Register src2)
void emit_sse_operand(XMMRegister dst, XMMRegister src)
void mov(Register dst, Handle< HeapObject > handle)
void test(Operand op, Register reg)
void vaddss(XMMRegister dst, XMMRegister src1, Operand src2)
void movsd(XMMRegister dst, Operand src)
void call(Address entry, RelocInfo::Mode rmode)
void Nop(int bytes=1)
void mulx(Register dst1, Register dst2, Register src)
void cmppd(XMMRegister dst, XMMRegister src, uint8_t cmp)
void cmpw(Register dst, Immediate src)
void sqrtss(XMMRegister dst, Operand src)
void cmpb(Register dst, Register src)
void test_w(Register reg, Operand op)
void movapd(XMMRegister dst, XMMRegister src)
void adc(Register dst, int32_t imm32)
void cvttss2si(Register dst, XMMRegister src)
void popcnt(Register dst, Register src)
void or_(Register dst, Operand src)
void neg(Operand dst)
void vinstr(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2, VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature=AVX)
void vextractps(Operand dst, XMMRegister src, uint8_t imm8)
void vmovd(XMMRegister dst, Operand src)
void or_(Register dst, Register src)
void sub(Register dst, Register src)
void cvtsi2sd(XMMRegister dst, Operand src)
void vpsrad(XMMRegister dst, XMMRegister src, uint8_t imm8)
void vsqrtpd(XMMRegister dst, Operand src)
void insertps(XMMRegister dst, Operand src, uint8_t offset)
void maxss(XMMRegister dst, Operand src)
void vhaddps(XMMRegister dst, XMMRegister src1, Operand src2)
void vpextrd(Operand dst, XMMRegister src, uint8_t offset)
void vcvtpd2ps(XMMRegister dst, XMMRegister src)
void vpalignr(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t mask)
void psllw(XMMRegister reg, uint8_t shift)
void ror_cl(Register dst)
void rcr(Register dst, uint8_t imm8)
void vmovlps(Operand dst, XMMRegister src)
void dec_b(Operand dst)
void pext(Register dst, Register src1, Register src2)
void movlhps(XMMRegister dst, XMMRegister src)
void and_(Register dst, const Immediate &x)
void sub(Register dst, const Immediate &imm)
void sub(Register dst, Operand src)
void vmaxss(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void ucomiss(XMMRegister dst, XMMRegister src)
void record_farjmp_position(Label *L, int pos)
void bmi1(uint8_t op, Register reg, Register vreg, Operand rm)
void div(Operand src)
void mov_b(Operand dst, int8_t src)
void movss(XMMRegister dst, Operand src)
void movupd(XMMRegister dst, Operand src)
void vmovss(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void subss(XMMRegister dst, XMMRegister src)
void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2, RoundingMode mode)
void cmp(Operand op, const Immediate &imm)
void vmovshdup(XMMRegister dst, XMMRegister src)
void vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask)
void blsi(Register dst, Operand src)
void sbb(Register dst, Operand src)
void vpextrd(Register dst, XMMRegister src, uint8_t offset)
void lzcnt(Register dst, Register src)
void ucomisd(XMMRegister dst, Operand src)
void vmovd(XMMRegister dst, Register src)
void mov(Register dst, Operand src)
void test_b(Register dst, Register src)
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode)
void sar_cl(Operand dst)
void mov(Operand dst, Address src, RelocInfo::Mode)
void emit_sse_operand(XMMRegister dst, Register src)
void vps(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2)
void shrd_cl(Operand dst, Register src)
void fld_d(Operand adr)
void vpshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle)
void cmpb(Operand op, Immediate imm8)
void movss(Operand dst, XMMRegister src)
void vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode)
void sarx(Register dst, Operand src1, Register src2)
void not_(Register dst)
void jmp_rel(int offset)
void sar(Register dst, uint8_t imm8)
void vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t offset)
void jmp(Address entry, RelocInfo::Mode rmode)
void psllq(XMMRegister reg, uint8_t shift)
void emit_label(Label *label)
void fild_s(Operand adr)
void fstp_d(Operand adr)
Assembler(const MaybeAssemblerZone &, const AssemblerOptions &options, std::unique_ptr< AssemblerBuffer > buffer={})
void vpd(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2)
void palignr(XMMRegister dst, XMMRegister src, uint8_t mask)
void vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t offset)
void test(Operand op, const Immediate &imm)
void vmovups(XMMRegister dst, Operand src)
void ucomisd(XMMRegister dst, XMMRegister src)
void maxss(XMMRegister dst, XMMRegister src)
void imul(Register dst, Register src, int32_t imm32)
void blsmsk(Register dst, Register src)
void psraw(XMMRegister reg, uint8_t shift)
void add(Operand dst, Register src)
static int deserialization_special_target_size(Address instruction_payload)
void vshufpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, uint8_t imm8)
void emit_operand(int code, Operand adr)
void mulss(XMMRegister dst, Operand src)
void ror(Operand dst, uint8_t imm8)
void movmskps(Register dst, XMMRegister src)
void AllocateAndInstallRequestedHeapNumbers(LocalIsolate *isolate)
void xor_(Operand dst, Register src)
void cmp(Register reg, Handle< HeapObject > handle)
void vminss(XMMRegister dst, XMMRegister src1, Operand src2)
void vmovdqa(XMMRegister dst, XMMRegister src)
void vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8)
void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void vmovlps(XMMRegister dst, XMMRegister src1, Operand src2)
void vmovsd(Operand dst, XMMRegister src)
void xadd_w(Operand dst, Register src)
void pd(uint8_t op, XMMRegister dst, Operand src)
void vaddss(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void divss(XMMRegister dst, Operand src)
void lzcnt(Register dst, Operand src)
void psrlq(XMMRegister reg, uint8_t shift)
void div(Register src)
void mov_b(Register dst, int8_t imm8)
void set_byte_at(int pos, uint8_t value)
void vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8)
static void deserialization_set_target_internal_reference_at(Address pc, Address target, WritableJitAllocation &jit_allocation, RelocInfo::Mode mode=RelocInfo::INTERNAL_REFERENCE)
void vpextrw(Register dst, XMMRegister src, uint8_t offset)
void mov_b(Operand dst, const Immediate &src)
void vmovss(XMMRegister dst, Operand src)
void mulss(XMMRegister dst, XMMRegister src)
void vsubss(XMMRegister dst, XMMRegister src1, Operand src2)
void mov_w(Register dst, Operand src)
void sar(Operand dst, uint8_t imm8)
void vpshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
void mul(Register src)
void cmpb(Operand op, Register reg)
void movd(XMMRegister dst, Register src)
void vinstr(uint8_t op, XMMRegister dst, XMMRegister src1, XMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature=AVX)
void vpsrlq(XMMRegister dst, XMMRegister src, uint8_t imm8)
void sbb(Register dst, Register src)
void cmpxchg(Operand dst, Register src)
void fild_d(Operand adr)
void imul(Register dst, Operand src, int32_t imm32)
void vbroadcastss(XMMRegister dst, XMMRegister src)
void vmulss(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void idiv(Register src)
void vmovups(Operand dst, XMMRegister src)
void movups(XMMRegister dst, XMMRegister src)
void ror(Register dst, uint8_t imm8)
void pop(Operand dst)
void jmp(Handle< Code > code, RelocInfo::Mode rmode)
void db(uint8_t data)
void vmovhps(XMMRegister dst, XMMRegister src1, Operand src2)
void shrd_cl(Register dst, Register src)
void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2, uint8_t offset)
void subss(XMMRegister dst, Operand src)
void vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask)
void rol(Operand dst, uint8_t imm8)
void and_(Register dst, int32_t imm32)
void vmovddup(XMMRegister dst, Operand src)
void jmp(Register reg)
void add(Register dst, Operand src)
void cvtsi2ss(XMMRegister dst, Operand src)
void mov_w(Operand dst, const Immediate &src)
void dec(Register dst)
void vcvttsd2si(Register dst, XMMRegister src)
void long_at_put(int pos, uint32_t x)
void ps(uint8_t op, XMMRegister dst, Operand src)
void vmulss(XMMRegister dst, XMMRegister src1, Operand src2)
void vmovsd(XMMRegister dst, Operand src)
void vmovmskps(Register dst, XMMRegister src)
void shufps(XMMRegister dst, XMMRegister src, uint8_t imm8)
void cmpw(Operand dst, Immediate src)
void roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode)
void xchg_w(Register reg, Operand op)
void vpinsrw(XMMRegister dst, XMMRegister src1, Register src2, uint8_t offset)
void shr(Register dst, uint8_t imm8)
void vpmovmskb(Register dst, XMMRegister src)
void vucomiss(XMMRegister dst, Operand src)
void pextrd(Operand dst, XMMRegister src, uint8_t offset)
void add(Register dst, const Immediate &imm)
void movdqa(XMMRegister dst, Operand src)
void cmp(Operand op, Register reg)
void vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t cmp)
void shl_cl(Register dst)
void ucomiss(XMMRegister dst, Operand src)
void extractps(Register dst, XMMRegister src, uint8_t imm8)
void pinsrw(XMMRegister dst, Register src, uint8_t offset)
static void set_uint32_constant_at(Address pc, Address constant_pool, uint32_t new_constant, WritableJitAllocation *jit_allocation, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
void not_(Operand dst)
void vmovupd(XMMRegister dst, Operand src)
void psrad(XMMRegister reg, uint8_t shift)
void vmovapd(XMMRegister dst, XMMRegister src)
void cvttsd2si(Register dst, Operand src)
void pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle)
void vsubss(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void minss(XMMRegister dst, Operand src)
void vpblendw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t mask)
void mov(Operand dst, Handle< HeapObject > handle)
void pop(Register dst)
void pcmpgtq(XMMRegister dst, XMMRegister src)
void vcvttss2si(Register dst, XMMRegister src)
void sub(Operand dst, Register src)
void bextr(Register dst, Register src1, Register src2)
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data=0)
void vpshufhw(XMMRegister dst, Operand src, uint8_t shuffle)
void tzcnt(Register dst, Operand src)
void fist_s(Operand adr)
void popcnt(Register dst, Operand src)
void rol_cl(Register dst)
void or_(Operand dst, const Immediate &x)
void shl(Register dst, uint8_t imm8)
void cmpxchg_b(Operand dst, Register src)
void test_b(Register reg, Operand op)
void movddup(XMMRegister dst, XMMRegister src)
void vdivss(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void setcc(Condition cc, Register reg)
void vmovss(Operand dst, XMMRegister src)
void movapd(XMMRegister dst, Operand src)
void pinsrd(XMMRegister dst, Operand src, uint8_t offset)
static uint32_t uint32_constant_at(Address pc, Address constant_pool)
void bzhi(Register dst, Register src1, Register src2)
void shrd(Register dst, Register src, uint8_t shift)
void movddup(XMMRegister dst, Operand src)
void psrld(XMMRegister reg, uint8_t shift)
void rol(Register dst, uint8_t imm8)
void jmp(Operand adr)
void print(const Label *L)
void sse4_instr(XMMRegister dst, Operand src, uint8_t prefix, uint8_t escape1, uint8_t escape2, uint8_t opcode)
void GetCode(LocalIsolate *isolate, CodeDesc *desc, SafepointTableBuilder *safepoint_table_builder, int handler_table_offset)
void vucomisd(XMMRegister dst, XMMRegister src)
void pblendw(XMMRegister dst, XMMRegister src, uint8_t mask)
void cmpps(XMMRegister dst, Operand src, uint8_t cmp)
void add(Register dst, Register src)
void cmp(Register reg, Operand op)
void pshufd(XMMRegister dst, Operand src, uint8_t shuffle)
void vinstr(uint8_t op, XMMRegister dst, XMMRegister src1, XMMRegister src2, VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature=AVX)
void mov_b(Register dst, Register src)
void pinsrb(XMMRegister dst, Register src, uint8_t offset)
void cmpxchg_w(Operand dst, Register src)
void movss(XMMRegister dst, XMMRegister src)
void shl_cl(Operand dst)
void cmpw(Register dst, Register src)
void and_(Operand dst, Register src)
void cmpb(Register reg, Operand op)
void and_(Register dst, Operand src)
void test_w(Operand op, Immediate imm16)
void inc(Register dst)
static Address target_address_at(Address pc, Address constant_pool)
void test_b(Operand op, Register reg)
void pinsrd(XMMRegister dst, Register src, uint8_t offset)
uint32_t long_at(int pos)
void vcvttss2si(Register dst, Operand src)
void rorx(Register dst, Register src, uint8_t imm8)
void push(Register src)
void vmovd(Operand dst, XMMRegister src)
void addss(XMMRegister dst, Operand src)
void vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2, RoundingMode mode)
void fstp_s(Operand adr)
void rol_cl(Operand dst)
void mov_b(Operand dst, Register src)
void call(Register reg)
void or_(Register dst, int32_t imm32)
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode)
void test(Register reg, const Immediate &imm)
void movzx_w(Register dst, Register src)
void sqrtpd(XMMRegister dst, XMMRegister src)
void sse2_instr(XMMRegister dst, Operand src, uint8_t prefix, uint8_t escape, uint8_t opcode)
void pdep(Register dst, Register src1, Register src2)
void vsqrtss(XMMRegister dst, XMMRegister src1, Operand src2)
void vucomiss(XMMRegister dst, XMMRegister src)
void cmp(Register reg, int32_t imm32)
bool is_optimizable_farjmp(int idx)
void pinsrw(XMMRegister dst, Operand src, uint8_t offset)
void movzx_b(Register dst, Register src)
void mov(Register dst, const Immediate &x)
void vmovd(Register dst, XMMRegister src)
void xor_(Register dst, int32_t imm32)
void andn(Register dst, Register src1, Operand src2)
void or_(Operand dst, Register src)
void rorx(Register dst, Operand src, uint8_t imm8)
void shld_cl(Register dst, Register src)
void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t offset)
void movdqa(Operand dst, XMMRegister src)
void cvttpd2dq(XMMRegister dst, XMMRegister src)
void cmppd(XMMRegister dst, Operand src, uint8_t cmp)
void vinsertps(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t offset)
void vpslld(XMMRegister dst, XMMRegister src, uint8_t imm8)
void vshufpd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8)
void xchg(Register dst, Operand src)
void cmpltsd(XMMRegister dst, XMMRegister src)
void sub(Operand dst, const Immediate &x)
void sarx(Register dst, Register src1, Register src2)
void vmovdqu(Operand dst, XMMRegister src)
void movdqu(XMMRegister dst, Operand src)
void vcvtss2sd(XMMRegister dst, XMMRegister src1, Operand src2)
void bzhi(Register dst, Operand src1, Register src2)
void movsd(Operand dst, XMMRegister src)
void fma_instr(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2, VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w)
void cmp(Register reg, const Immediate &imm)
void cmov(Condition cc, Register dst, Operand src)
void bsf(Register dst, Register src)
void fisttp_d(Operand adr)
void vpsllq(XMMRegister dst, XMMRegister src, uint8_t imm8)
void imul(Register dst, Register src)
void movsd(XMMRegister dst, XMMRegister src)
void movzx_b(Register dst, Operand src)
void vhaddps(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void pextrw(Operand dst, XMMRegister src, uint8_t offset)
void and_(Register dst, Register src)
void vpextrw(Operand dst, XMMRegister src, uint8_t offset)
void movaps(XMMRegister dst, XMMRegister src)
void cvtsi2sd(XMMRegister dst, Register src)
void dec(Operand dst)
void jmp(Label *L, Label::Distance distance=Label::kFar)
void cvtss2sd(XMMRegister dst, Operand src)
void cmpw_ax(Operand op)
void haddps(XMMRegister dst, XMMRegister src)
void mov(Operand dst, Register src)
void GetCode(Isolate *isolate, CodeDesc *desc)
void adc(Register dst, Operand src)
void vpshuflw(XMMRegister dst, Operand src, uint8_t shuffle)
void vmovdqu(XMMRegister dst, Operand src)
void fisttp_s(Operand adr)
void emit_sse_operand(Register dst, XMMRegister src)
void movsx_w(Register dst, Register src)
void xchg(Register dst, Register src)
void cvttsd2si(Register dst, XMMRegister src)
void movsx_w(Register dst, Operand src)
void vpcmpgtq(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void movdq(bool aligned, XMMRegister dst, Operand src)
void mov(Operand dst, const Immediate &x)
void vucomisd(XMMRegister dst, Operand src)
void blsr(Register dst, Register src)
void ret(int imm16)
void pslld(XMMRegister reg, uint8_t shift)
void fistp_d(Operand adr)
void vmovaps(XMMRegister dst, Operand src)
void emit_sse_operand(XMMRegister reg, Operand adr)
void sqrtss(XMMRegister dst, XMMRegister src)
void cmpb(Register reg, Immediate imm8)
void vcvttpd2dq(XMMRegister dst, XMMRegister src)
void test(Register reg0, Register reg1)
void pextrb(Register dst, XMMRegister src, uint8_t offset)
void bsf(Register dst, Operand src)
void pshufhw(XMMRegister dst, Operand src, uint8_t shuffle)
void andn(Register dst, Register src1, Register src2)
void vmovdqa(XMMRegister dst, Operand src)
void emit_vex_prefix(XMMRegister v, VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w)
void bmi2(SIMDPrefix pp, uint8_t op, Register reg, Register vreg, Operand rm)
void bextr(Register dst, Operand src1, Register src2)
void pextrd(Register dst, XMMRegister src, uint8_t offset)
void movshdup(XMMRegister dst, XMMRegister src)
void dq(uint64_t data)
static bool IsNop(Address addr)
void pshuflw(XMMRegister dst, Operand src, uint8_t shuffle)
void vmaxss(XMMRegister dst, XMMRegister src1, Operand src2)
void movd(Operand dst, XMMRegister src)
void cvtsd2si(Register dst, XMMRegister src)
void blsr(Register dst, Operand src)
void haddps(XMMRegister dst, Operand src)
void lea(Register dst, Register src, Label *lbl)
void vmovsd(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void cvttps2dq(XMMRegister dst, XMMRegister src)
void vshufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, uint8_t imm8)
void psrlw(XMMRegister reg, uint8_t shift)
void imul(Register dst, Operand src)
void pextrw(Register dst, XMMRegister src, uint8_t offset)
void test_b(Operand op, Immediate imm8)
void movsx_b(Register dst, Operand src)
void mov_w(Operand dst, int16_t src)
void vdivss(XMMRegister dst, XMMRegister src1, Operand src2)
void vpextrb(Register dst, XMMRegister src, uint8_t offset)
void j(Condition cc, Handle< Code > code, RelocInfo::Mode rmode=RelocInfo::CODE_TARGET)
void vpblendw(XMMRegister dst, XMMRegister src1, XMMRegister src2, uint8_t mask)
void emit_vex_prefix(Register v, VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w)
void pextrb(Operand dst, XMMRegister src, uint8_t offset)
void vmovapd(XMMRegister dst, Operand src)
void fst_s(Operand adr)
void pinsrb(XMMRegister dst, Operand src, uint8_t offset)
void cmpw(Operand dst, Register src)
void call(Handle< Code > code, RelocInfo::Mode rmode)
void bswap(Register dst)
void test_b(Register reg, Immediate imm8)
void emit_farith(int b1, int b2, int i)
void shr_cl(Register dst)
void pdep(Register dst, Register src1, Operand src2)
void pblendw(XMMRegister dst, Operand src, uint8_t mask)
void vpextrb(Operand dst, XMMRegister src, uint8_t offset)
void wasm_call(Address address, RelocInfo::Mode rmode)
void or_(Register dst, const Immediate &imm)
void shufpd(XMMRegister dst, XMMRegister src, uint8_t imm8)
void vmovaps(XMMRegister dst, XMMRegister src)
void fst_d(Operand adr)
void vpshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle)
void cmpb_al(Operand op)
void mov_b(Register dst, Operand src)
void push(const Immediate &x)
void fisub_s(Operand adr)
void rcl(Register dst, uint8_t imm8)
void vminss(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void lea(Register dst, Operand src)
void emit_arith_b(int op1, int op2, Register dst, int imm8)
void vpsrlw(XMMRegister dst, XMMRegister src, uint8_t imm8)
void sqrtpd(XMMRegister dst, Operand src)
void dp(uintptr_t data)
void bt(Operand dst, Register src)
void shlx(Register dst, Register src1, Register src2)
void test(Register reg, Operand op)
void vroundpd(XMMRegister dst, XMMRegister src, RoundingMode mode)
void emit_operand(Register reg, Operand adr)
void movdqa(XMMRegister dst, XMMRegister src)
void shr_cl(Operand dst)
void xadd_b(Operand dst, Register src)
void fma_instr(uint8_t op, XMMRegister dst, XMMRegister src1, XMMRegister src2, VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w)
void vmovmskpd(Register dst, XMMRegister src)
void add(Operand dst, const Immediate &x)
void bsr(Register dst, Register src)
void xor_(Register dst, const Immediate &imm)
void movsx_b(Register dst, Register src)
void cmp(Register reg0, Register reg1)
void shlx(Register dst, Operand src1, Register src2)
void movmskpd(Register dst, XMMRegister src)
void bts(Register dst, Register src)
void fistp_s(Operand adr)
void bsr(Register dst, Operand src)
void call(Operand adr)
static void set_target_address_at(Address pc, Address constant_pool, Address target, WritableJitAllocation *jit_allocation, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
void movd(Register dst, XMMRegister src)
void xchg_b(Register reg, Operand op)
void sar_cl(Register dst)
void idiv(Operand src)
void shl(Operand dst, uint8_t imm8)
void movhps(XMMRegister dst, Operand src)
void vinsertps(XMMRegister dst, XMMRegister src1, XMMRegister src2, uint8_t offset)
void pmovmskb(Register dst, XMMRegister src)
void vpalignr(XMMRegister dst, XMMRegister src1, XMMRegister src2, uint8_t mask)
void movd(XMMRegister dst, Operand src)
void movups(XMMRegister dst, Operand src)
void push_imm32(int32_t imm32)
void dd(uint32_t data)
void bts(Operand dst, Register src)
void vpsllw(XMMRegister dst, XMMRegister src, uint8_t imm8)
void cvtpd2ps(XMMRegister dst, XMMRegister src)
void prefetch(Operand src, int level)
void xadd(Operand dst, Register src)
void blsi(Register dst, Register src)
void roundps(XMMRegister dst, XMMRegister src, RoundingMode mode)
void imul(Register src)
void mulx(Register dst1, Register dst2, Operand src)
void cvttps2dq(XMMRegister dst, Operand src)
void sub_sp_32(uint32_t imm)
void inc(Operand dst)
void vcvtdq2pd(XMMRegister dst, XMMRegister src)
void movhlps(XMMRegister dst, XMMRegister src)
Assembler(const AssemblerOptions &, std::unique_ptr< AssemblerBuffer >={})
void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle)
void xor_(Register dst, Register src)
void blsmsk(Register dst, Operand src)
void cvtdq2pd(XMMRegister dst, XMMRegister src)
void movdqu(Operand dst, XMMRegister src)
void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t cmp)
void j(Condition cc, uint8_t *entry, RelocInfo::Mode rmode)
void cvtsi2ss(XMMRegister dst, Register src)
void vpinsrd(XMMRegister dst, XMMRegister src1, Register src2, uint8_t offset)
void movlps(Operand dst, XMMRegister src)
void vsqrtss(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void extractps(Operand dst, XMMRegister src, uint8_t imm8)
void movups(Operand dst, XMMRegister src)
void cmpxchg8b(Operand dst)
void minss(XMMRegister dst, XMMRegister src)
void vss(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2)
void movq(Operand dst, XMMRegister src)
void mov(Register dst, int32_t imm32)
void mov(Register dst, Register src)
void vmovups(XMMRegister dst, XMMRegister src)
void cmpw(Register dst, Operand src)
void test_w(Register dst, Register src)
void cvttss2si(Register dst, Operand src)
void vmovhlps(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void pext(Register dst, Register src1, Operand src2)
void tzcnt(Register dst, Register src)
void vmovlhps(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void shld(Register dst, Register src, uint8_t shift)
void movaps(XMMRegister dst, Operand src)
int SizeOfCodeGeneratedSince(Label *label)
void ror_cl(Operand dst)
void vsqrtpd(XMMRegister dst, XMMRegister src)
void fld_s(Operand adr)
void push(Operand src)
void vcvttps2dq(XMMRegister dst, XMMRegister src)
void cmp(Operand op, Handle< HeapObject > handle)
void insertps(XMMRegister dst, XMMRegister src, uint8_t offset)
void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id, SourcePosition position, int id)
void addss(XMMRegister dst, XMMRegister src)
void movlps(XMMRegister dst, Operand src)
void vshufps(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8)
void ssse3_instr(XMMRegister dst, Operand src, uint8_t prefix, uint8_t escape1, uint8_t escape2, uint8_t opcode)
void vpshufd(XMMRegister dst, Operand src, uint8_t shuffle)
void and_(Operand dst, const Immediate &x)
void test_w(Operand op, Register reg)
void emit_operand(XMMRegister reg, Operand adr)
void emit_arith(int sel, Operand dst, const Immediate &x)
void movzx_w(Register dst, Operand src)
void vbroadcastss(XMMRegister dst, Operand src)
void vcvttsd2si(Register dst, Operand src)
void cmpps(XMMRegister dst, XMMRegister src, uint8_t cmp)
void movhps(Operand dst, XMMRegister src)
void cvtss2sd(XMMRegister dst, XMMRegister src)
void dec_b(Register dst)
void vmovddup(XMMRegister dst, XMMRegister src)
void vinstr(uint8_t op, XMMRegister dst, XMMRegister src1, Operand src2, SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature=AVX)
void movq(XMMRegister dst, Operand src)
void shr(Operand dst, uint8_t imm8)
void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle)
void vcvttps2dq(XMMRegister dst, Operand src)
void next(Label *L) const
Displacement(Label *L, Type type)
void init(Label *L, Type type)
V8_INLINE EnsureSpace(Assembler *assembler)
bool is_heap_number_request() const
Handle< HeapObject > embedded_object() const
bool is_external_reference() const
ExternalReference external_reference() const
Immediate(Handle< HeapObject > handle)
Immediate(Handle< T > handle, RelocInfo::Mode mode=RelocInfo::FULL_EMBEDDED_OBJECT)
Immediate(int x, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
static Immediate EmbeddedNumber(double number)
Immediate(const ExternalReference &ext)
bool is_embedded_object() const
Immediate(Tagged< Smi > value)
HeapNumberRequest heap_number_request() const
RelocInfo::Mode rmode() const
static Immediate CodeRelativeOffset(Label *label)
bool is_reg(Register reg) const
Operand(Register base, Register index, ScaleFactor scale, int32_t disp, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
V8_INLINE Operand(Immediate imm)
Operand(Register base, int32_t disp, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
Register reg() const
static Operand ForRegisterPlusImmediate(Register base, Immediate imm)
bool is_reg(int reg_code) const
bool is_reg(XMMRegister reg) const
V8_INLINE Operand(XMMRegister xmm_reg)
static Operand JumpTable(Register index, ScaleFactor scale, Label *table)
Operand(Register index, ScaleFactor scale, int32_t disp, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
RelocInfo::Mode rmode()
void set_dispr(int32_t disp, RelocInfo::Mode rmode)
V8_INLINE Operand(int32_t disp, RelocInfo::Mode rmode)
base::Vector< const uint8_t > encoded_bytes() const
void set_modrm(int mod, Register rm)
bool is_reg_only() const
V8_INLINE Operand(Register reg)
constexpr int8_t code() const
static constexpr bool IsNoInfo(Mode mode)
Definition reloc-info.h:257
base::OwnedVector< uint8_t > buffer_
Definition assembler.cc:111
Label label
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
#define FMA_INSTRUCTION_LIST(V)
Definition fma-instr.h:52
#define SSE_UNOP_INSTRUCTION_LIST(V)
Definition sse-instr.h:9
#define SSSE3_UNOP_INSTRUCTION_LIST(V)
Definition sse-instr.h:96
#define SSE4_INSTRUCTION_LIST(V)
Definition sse-instr.h:101
#define SSE4_RM_INSTRUCTION_LIST(V)
Definition sse-instr.h:115
#define SSE2_INSTRUCTION_LIST(V)
Definition sse-instr.h:16
#define SSE2_INSTRUCTION_LIST_SD(V)
Definition sse-instr.h:75
#define SSSE3_INSTRUCTION_LIST(V)
Definition sse-instr.h:85
#define AVX2_BROADCAST_LIST(V)
Definition sse-instr.h:126
int32_t offset
LiftoffRegister reg
int pc_offset
int x
int position
Definition liveedit.cc:290
uint32_t const mask
int m
Definition mul-fft.cc:294
int r
Definition mul-fft.cc:298
STL namespace.
V8_INLINE Dest bit_cast(Source const &source)
Definition macros.h:95
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr VFPRoundingMode kRoundToNearest
bool operator!=(ExternalReference lhs, ExternalReference rhs)
void PrintF(const char *format,...)
Definition utils.cc:39
std::variant< Zone *, AccountingAllocator * > MaybeAssemblerZone
Definition assembler.h:262
constexpr int L
constexpr int kSystemPointerSize
Definition globals.h:410
Condition NegateCondition(Condition cond)
constexpr VFPRoundingMode kRoundToZero
@ times_half_system_pointer_size
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define ASSERT_TRIVIALLY_COPYABLE(T)
Definition macros.h:267
#define V8_EXPORT_PRIVATE
Definition macros.h:460
HeapNumberRequest heap_number_request
#define V8_INLINE
Definition v8config.h:500
#define V8_UNLIKELY(condition)
Definition v8config.h:660
std::unique_ptr< ValueMirror > value