v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
liftoff-assembler-arm64-inl.h File Reference
Include dependency graph for liftoff-assembler-arm64-inl.h:

Go to the source code of this file.

Classes

class  v8::internal::wasm::GetProtectedInstruction< kLoadOrStore, kExtraEmittedInstructions >
 

Namespaces

namespace  v8
 
namespace  v8::internal
 
namespace  v8::internal::wasm
 
namespace  v8::internal::wasm::liftoff
 

Macros

#define __   lasm->
 
#define ATOMIC_BINOP_CASE(op, instr)
 
#define EMIT_QFMOP(instr, format)
 

Enumerations

enum class  v8::internal::wasm::liftoff::ShiftDirection : bool { v8::internal::wasm::liftoff::kLeft , v8::internal::wasm::liftoff::kRight , v8::internal::wasm::liftoff::kLeft , v8::internal::wasm::liftoff::kRight }
 
enum class  v8::internal::wasm::liftoff::ShiftSign : bool { v8::internal::wasm::liftoff::kSigned , v8::internal::wasm::liftoff::kUnsigned }
 
enum class  v8::internal::wasm::LoadOrStore : bool { v8::internal::wasm::kLoad , v8::internal::wasm::kStore }
 
enum class  v8::internal::wasm::liftoff::Binop {
  v8::internal::wasm::liftoff::kAdd , v8::internal::wasm::liftoff::kSub , v8::internal::wasm::liftoff::kAnd , v8::internal::wasm::liftoff::kOr ,
  v8::internal::wasm::liftoff::kXor , v8::internal::wasm::liftoff::kExchange , v8::internal::wasm::liftoff::kAdd , v8::internal::wasm::liftoff::kAdd ,
  v8::internal::wasm::liftoff::kSub , v8::internal::wasm::liftoff::kSub , v8::internal::wasm::liftoff::kAnd , v8::internal::wasm::liftoff::kAnd ,
  v8::internal::wasm::liftoff::kOr , v8::internal::wasm::liftoff::kOr , v8::internal::wasm::liftoff::kXor , v8::internal::wasm::liftoff::kXor ,
  v8::internal::wasm::liftoff::kExchange , v8::internal::wasm::liftoff::kExchange , v8::internal::wasm::liftoff::kAdd , v8::internal::wasm::liftoff::kSub ,
  v8::internal::wasm::liftoff::kAnd , v8::internal::wasm::liftoff::kOr , v8::internal::wasm::liftoff::kXor , v8::internal::wasm::liftoff::kExchange ,
  v8::internal::wasm::liftoff::kAdd , v8::internal::wasm::liftoff::kSub , v8::internal::wasm::liftoff::kAnd , v8::internal::wasm::liftoff::kOr ,
  v8::internal::wasm::liftoff::kXor , v8::internal::wasm::liftoff::kExchange
}
 

Functions

MemOperand v8::internal::wasm::liftoff::GetStackSlot (int offset)
 
MemOperand v8::internal::wasm::liftoff::GetInstanceDataOperand ()
 
CPURegister v8::internal::wasm::liftoff::GetRegFromType (const LiftoffRegister &reg, ValueKind kind)
 
CPURegList v8::internal::wasm::liftoff::PadRegList (RegList list)
 
CPURegList v8::internal::wasm::liftoff::PadVRegList (DoubleRegList list)
 
CPURegister v8::internal::wasm::liftoff::AcquireByType (UseScratchRegisterScope *temps, ValueKind kind)
 
template<typename T >
MemOperand v8::internal::wasm::liftoff::GetMemOp (LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr, Register offset, T offset_imm, bool i64_offset=false, unsigned shift_amount=0)
 
Register v8::internal::wasm::liftoff::GetEffectiveAddress (LiftoffAssembler *assm, UseScratchRegisterScope *temps, Register addr, Register offset, uintptr_t offset_imm, bool i64_offset=false)
 
template<ShiftDirection dir, ShiftSign sign = ShiftSign::kSigned>
void v8::internal::wasm::liftoff::EmitSimdShift (LiftoffAssembler *assm, VRegister dst, VRegister lhs, Register rhs, VectorFormat format)
 
template<VectorFormat format, ShiftSign sign>
void v8::internal::wasm::liftoff::EmitSimdShiftRightImmediate (LiftoffAssembler *assm, VRegister dst, VRegister lhs, int32_t rhs)
 
void v8::internal::wasm::liftoff::EmitAnyTrue (LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister src)
 
void v8::internal::wasm::liftoff::EmitAllTrue (LiftoffAssembler *assm, LiftoffRegister dst, LiftoffRegister src, VectorFormat format)
 
CPURegister v8::internal::wasm::liftoff::LoadToRegister (LiftoffAssembler *assm, UseScratchRegisterScope *temps, const LiftoffAssembler::VarState &src)
 
void v8::internal::wasm::liftoff::StoreToMemory (LiftoffAssembler *assm, MemOperand dst, const LiftoffAssembler::VarState &src)
 
Register v8::internal::wasm::liftoff::CalculateActualAddress (LiftoffAssembler *lasm, UseScratchRegisterScope &temps, Register addr_reg, Register offset_reg, uintptr_t offset_imm)
 
void v8::internal::wasm::liftoff::AtomicBinop (LiftoffAssembler *lasm, Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister value, LiftoffRegister result, StoreType type, Binop op)
 

Macro Definition Documentation

◆ __

#define __   lasm->

Definition at line 853 of file liftoff-assembler-arm64-inl.h.

◆ ATOMIC_BINOP_CASE

#define ATOMIC_BINOP_CASE ( op,
instr )
Value:
case Binop::op: \
switch (type.value()) { \
case StoreType::kI64Store8: \
case StoreType::kI32Store8: \
__ instr##b(value.gp().W(), result.gp().W(), MemOperand(actual_addr)); \
break; \
case StoreType::kI64Store16: \
case StoreType::kI32Store16: \
__ instr##h(value.gp().W(), result.gp().W(), MemOperand(actual_addr)); \
break; \
case StoreType::kI64Store32: \
case StoreType::kI32Store: \
__ instr(value.gp().W(), result.gp().W(), MemOperand(actual_addr)); \
break; \
case StoreType::kI64Store: \
__ instr(value.gp(), result.gp(), MemOperand(actual_addr)); \
break; \
default: \
UNREACHABLE(); \
} \
break;
Instruction * instr
ZoneVector< RpoNumber > & result

◆ EMIT_QFMOP

#define EMIT_QFMOP ( instr,
format )
Value:
if (dst == src3) { \
instr(dst.fp().V##format(), src1.fp().V##format(), src2.fp().V##format()); \
} else if (dst != src1 && dst != src2) { \
Mov(dst.fp().V##format(), src3.fp().V##format()); \
instr(dst.fp().V##format(), src1.fp().V##format(), src2.fp().V##format()); \
} else { \
DCHECK(dst == src1 || dst == src2); \
UseScratchRegisterScope temps(this); \
VRegister tmp = temps.AcquireV(kFormat##format); \
Mov(tmp, src3.fp().V##format()); \
instr(tmp, src1.fp().V##format(), src2.fp().V##format()); \
Mov(dst.fp().V##format(), tmp); \
}

Definition at line 3760 of file liftoff-assembler-arm64-inl.h.