35#ifndef V8_CODEGEN_RISCV_ASSEMBLER_RISCV_H_ 
   36#define V8_CODEGEN_RISCV_ASSEMBLER_RISCV_H_ 
   69#define DEBUG_PRINTF(...)     \ 
   70  if (v8_flags.riscv_debug) { \ 
   71    printf(__VA_ARGS__);      \ 
 
   74class SafepointTableBuilder;
 
   91      : 
Operand(static_cast<intptr_t>(value.ptr())) {}
 
 
  176                                    public AssemblerRISCVI,
 
  177                                    public AssemblerRISCVA,
 
  178                                    public AssemblerRISCVB,
 
  179                                    public AssemblerRISCVF,
 
  180                                    public AssemblerRISCVD,
 
  181                                    public AssemblerRISCVM,
 
  182                                    public AssemblerRISCVC,
 
  183                                    public AssemblerRISCVZifencei,
 
  184                                    public AssemblerRISCVZicsr,
 
  185                                    public AssemblerRISCVZicond,
 
  186                                    public AssemblerRISCVV {
 
  196                     std::unique_ptr<AssemblerBuffer> = {});
 
  199            std::unique_ptr<AssemblerBuffer> buffer = {})
 
 
  209  static constexpr int kNoHandlerTable = 0;
 
  213               int handler_table_offset);
 
  219    GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
 
 
  249  static int BrachlongOffset(
Instr auipc, 
Instr jalr);
 
  250  static int PatchBranchlongOffset(
 
  277      Address 
pc, Address constant_pool, Address target,
 
  283                                                      Address constant_pool);
 
  285      Address 
pc, Address constant_pool, 
Tagged_t target,
 
  290                                                     Address constant_pool);
 
  292      Address 
pc, Address constant_pool);
 
  296#ifdef V8_TARGET_ARCH_RISCV64 
  297  inline void set_embedded_object_index_referenced_from(
 
  304  void EmitPoolGuard();
 
  306#if defined(V8_TARGET_ARCH_RISCV64) 
  307  static void set_target_value_at(
 
  308      Address 
pc, uint64_t target,
 
  311#elif defined(V8_TARGET_ARCH_RISCV32) 
  312  static void set_target_value_at(
 
  313      Address 
pc, uint32_t target,
 
  318  static inline int32_t target_constant32_at(Address 
pc);
 
  319  static inline void set_target_constant32_at(
 
  328  inline static void deserialization_set_special_target_at(Address location,
 
  334      Address instruction_payload);
 
  344      Address 
pc, Address constant_pool, uint32_t new_constant,
 
  354  static constexpr int kSpecialTargetSize = 0;
 
  360  static constexpr int kInstructionsFor32BitConstant = 2;
 
  361  static constexpr int kInstructionsFor64BitConstant = 8;
 
  368  static constexpr int kBranchOffsetBits = 13;
 
  371  static constexpr int kJumpOffsetBits = 21;
 
  374  static constexpr int kCJalOffsetBits = 12;
 
  377  static constexpr int kCBranchOffsetBits = 9;
 
  380  static constexpr int kMaxBranchOffset = (1 << (13 - 1)) - 1;
 
  383  static constexpr int kMaxJumpOffset = (1 << (21 - 1)) - 1;
 
  385  static constexpr int kTrampolineSlotsSize = 2 * 
kInstrSize;
 
  389    return &scratch_double_register_list_;
 
 
  412    PROPERTY_ACCESS_INLINED,
 
  413    PROPERTY_ACCESS_INLINED_CONTEXT,
 
  414    PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
 
  417    FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
 
 
  425#if defined(V8_TARGET_ARCH_RISCV64) 
  426  void RecursiveLiImpl(
Register rd, int64_t imm);
 
  427  void RecursiveLi(
Register rd, int64_t imm);
 
  428  static int RecursiveLiCount(int64_t imm);
 
  429  static int RecursiveLiImplCount(int64_t imm);
 
  430  void RV_li(
Register rd, int64_t imm);
 
  431  static int RV_li_count(int64_t imm, 
bool is_get_temp_reg = 
false);
 
  433  void GeneralLi(
Register rd, int64_t imm);
 
  434  static int GeneralLiCount(int64_t imm, 
bool is_get_temp_reg = 
false);
 
  437  void li_constant(
Register rd, int64_t imm);
 
  438  void li_constant32(
Register rd, int32_t imm);
 
  439  void li_ptr(
Register rd, int64_t imm);
 
  441#if defined(V8_TARGET_ARCH_RISCV32) 
  442  void RV_li(
Register rd, int32_t imm);
 
  443  static int RV_li_count(int32_t imm, 
bool is_get_temp_reg = 
false);
 
  445  void li_constant(
Register rd, int32_t imm);
 
  446  void li_ptr(
Register rd, int32_t imm);
 
  449  void break_(uint32_t code, 
bool break_as_stop = 
false);
 
  450  void stop(uint32_t code = kMaxStopCode);
 
  462  using BlockConstPoolScope = ConstantPool::BlockScope;
 
  469        assem_->CheckTrampolinePoolQuick(margin / 
kInstrSize);
 
  471      assem_->StartBlockTrampolinePool();
 
 
  476      assem_->StartBlockTrampolinePool();
 
 
 
  491        : block_const_pool_(assem, margin),
 
  492          block_trampoline_pool_(assem, margin) {}
 
 
  495        : block_const_pool_(assem, check),
 
  496          block_trampoline_pool_(assem, margin) {}
 
 
 
  512      assem_->StartBlockGrowBuffer();
 
 
 
  534  void db(uint8_t data);
 
  535  void dd(uint32_t data);
 
  536  void dq(uint64_t data);
 
  537  void dp(uintptr_t data) { dq(data); }
 
  553  inline bool overflow()
 const { 
return pc_ >= reloc_info_writer.pos() - kGap; }
 
  557    return reloc_info_writer.pos() - 
pc_;
 
 
  566    return *
reinterpret_cast<Instr*
>(buffer_start_ + 
pos);
 
 
  575    return reinterpret_cast<Address
>(buffer_start_ + 
pos);
 
 
  586  void RecordConstPool(
int size);
 
  589    constpool_.Check(Emission::kForced, Jump::kOmitted);
 
 
  592    constpool_.Check(Emission::kForced, Jump::kRequired);
 
 
  597    constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin);
 
 
  601    constpool_.Check(Emission::kIfNeeded, Jump::kOmitted, margin);
 
 
  605    return constpool_.RecordEntry(data, rmode);
 
 
  609    return constpool_.RecordEntry(data, rmode);
 
 
  614                 next_buffer_check_ - extra_instructions * 
kInstrSize);
 
  616      CheckTrampolinePool();
 
 
  623    inline int32_t 
sew()
 const { 
return 2 ^ (sew_ + 3); }
 
  626      if ((lmul_ & 0b100) != 0) {
 
  627        return (
kRvvVLEN / sew()) >> (lmul_ & 0b11);
 
  629        return ((
kRvvVLEN << lmul_) / sew());
 
 
  636      if (sew != sew_ || lmul != lmul_ || vl != vlmax()) {
 
  640        assm_->vsetvlmax(rd, sew_, lmul_);
 
 
  660      if (sew != sew_ || lmul != lmul_) {
 
  664        assm_->vsetvli(rd, rs1, sew_, lmul_);
 
 
  669      if (sew != sew_ || lmul != lmul_) {
 
  672        assm_->vsetvl(sew_, lmul_);
 
 
 
  697    SINGLE_ACCESS = 
false,
 
 
  702  bool NeedAdjustBaseAndOffset(
 
  703      const MemOperand& src, OffsetAccessType = OffsetAccessType::SINGLE_ACCESS,
 
  704      int second_Access_add_to_offset = 4);
 
  707  void AdjustBaseAndOffset(
 
  709      OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
 
  710      int second_access_add_to_offset = 4);
 
  731    if (no_trampoline_pool_before_ < 
pc_offset)
 
 
  737    trampoline_pool_blocked_nesting_++;
 
 
  741    trampoline_pool_blocked_nesting_--;
 
  743                 trampoline_pool_blocked_nesting_);
 
  744    if (trampoline_pool_blocked_nesting_ == 0) {
 
  745      CheckTrampolinePoolQuick(1);
 
 
  750    return trampoline_pool_blocked_nesting_ > 0;
 
 
  759    DCHECK(!block_buffer_growth_);
 
  760    block_buffer_growth_ = 
true;
 
 
  764    DCHECK(block_buffer_growth_);
 
  765    block_buffer_growth_ = 
false;
 
 
  772  static const int kMaximalBufferSize = 512 * 
MB;
 
  776  static constexpr int kBufferCheckInterval = 1 * KB / 2;
 
  783  static constexpr int kGap = 64;
 
  784  static_assert(AssemblerBase::kMinimalBufferSize >= 2 * kGap);
 
  789  static constexpr int kCheckConstIntervalInst = 32;
 
  790  static constexpr int kCheckConstInterval =
 
  793  int next_buffer_check_;  
 
  796  int trampoline_pool_blocked_nesting_;  
 
  797  int no_trampoline_pool_before_;  
 
  800  int last_trampoline_pool_end_;  
 
  803  bool block_buffer_growth_;  
 
  807  static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
 
  819  template <
typename T>
 
  822  static void disassembleInstr(uint8_t* 
pc);
 
  842      free_slot_count_ = 0;
 
 
  848      free_slot_count_ = slot_count;
 
  849      end_ = 
start + slot_count * kTrampolineSlotsSize;
 
 
  854      int trampoline_slot = kInvalidSlotPos;
 
  855      if (free_slot_count_ <= 0) {
 
  862        trampoline_slot = next_slot_;
 
  864        next_slot_ += kTrampolineSlotsSize;
 
  865        DEBUG_PRINTF(
"\ttrampoline  slot %d next %d free %d\n", trampoline_slot,
 
  866                     next_slot_, free_slot_count_)
 
  868      return trampoline_slot;
 
 
  875    int free_slot_count_;
 
 
  879  int unbound_labels_count_;
 
  885  bool trampoline_emitted_ = 
false;
 
  886  static constexpr int kInvalidSlotPos = -1;
 
  892    return internal_reference_positions_.find(
L->pos()) !=
 
  893           internal_reference_positions_.end();
 
 
  896  Trampoline trampoline_;
 
  897  bool internal_trampoline_exception_;
 
  899  RegList scratch_register_list_;
 
  903  ConstantPool constpool_;
 
  913  friend class ConstantPool;
 
  935        old_available_(*assembler->GetScratchRegisterList()),
 
  936        old_available_double_(*assembler->GetScratchDoubleRegisterList()) {}
 
 
  942    *available = old_available_;
 
  943    *available_double = old_available_double_;
 
 
  954    return available_double->
PopFirst();
 
 
  977    *available = *available | list;
 
 
  982    available->clear(list);
 
 
  998    DCHECK_EQ((*available_double & list).bits(), 0x0);
 
  999    *available_double = *available_double | list;
 
 
 1004    *
assembler_->GetScratchRegisterList() = available;
 
 
 1007    return *
assembler_->GetScratchDoubleRegisterList();
 
 
 1010    *
assembler_->GetScratchDoubleRegisterList() = available_double;
 
 
#define DEBUG_PRINTF(...)
 
size_t EmbeddedObjectIndex
 
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope)
 
BlockGrowBufferScope(Assembler *assem)
 
BlockTrampolinePoolScope block_trampoline_pool_
 
BlockPoolsScope(Assembler *assem, int margin=0)
 
BlockPoolsScope(Assembler *assem, PoolEmissionCheck check, int margin=0)
 
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope)
 
BlockTrampolinePoolScope(Assembler *assem, int margin=0)
 
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope)
 
BlockTrampolinePoolScope(Assembler *assem, PoolEmissionCheck check)
 
~BlockTrampolinePoolScope()
 
Trampoline(int start, int slot_count)
 
void set(Register rd, VSew sew, Vlmul lmul)
 
void set(Register rd, int8_t sew, int8_t lmul)
 
void set(FPURoundingMode mode)
 
void set(VSew sew, Vlmul lmul)
 
void set(Register rd, Register rs1, VSew sew, Vlmul lmul)
 
VectorUnit(Assembler *assm)
 
void bind_to(Label *L, int pos)
 
void stop(uint32_t code=kMaxStopCode)
 
void GetCode(LocalIsolate *isolate, CodeDesc *desc)
 
bool is_near(Label *L, OffsetSize bits)
 
bool is_buffer_growth_blocked() const
 
bool is_internal_reference(Label *L)
 
intptr_t buffer_space() const
 
Address toAddress(int pos)
 
void break_(uint32_t code, bool break_as_stop=false)
 
void label_at_put(Label *L, int at_offset)
 
void AbortedCodeGeneration()
 
void EmitConstPoolWithoutJumpIfNeeded(size_t margin=0)
 
void CheckTrampolinePool()
 
bool is_trampoline_emitted() const
 
std::set< intptr_t > internal_reference_positions_
 
void EndBlockTrampolinePool()
 
void target_at_put(int pos, int target_pos, bool is_internal)
 
Assembler(const MaybeAssemblerZone &, const AssemblerOptions &options, std::unique_ptr< AssemblerBuffer > buffer={})
 
static int deserialization_special_target_size(Address instruction_payload)
 
void AllocateAndInstallRequestedHeapNumbers(LocalIsolate *isolate)
 
void next(Label *L, bool is_internal)
 
static void deserialization_set_target_internal_reference_at(Address pc, Address target, WritableJitAllocation &jit_allocation, RelocInfo::Mode mode=RelocInfo::INTERNAL_REFERENCE)
 
static Address target_constant_address_at(Address pc)
 
void StartBlockGrowBuffer()
 
bool is_trampoline_pool_blocked() const
 
void StartBlockTrampolinePool()
 
DoubleRegList * GetScratchDoubleRegisterList()
 
int InstructionsGeneratedSince(Label *label)
 
DoubleRegList scratch_double_register_list_
 
static void set_target_address_at(Address pc, Address constant_pool, Address target, WritableJitAllocation *jit_allocation=nullptr, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
 
bool has_exception() const
 
Instruction * InstructionAt(ptrdiff_t offset) const
 
intptr_t available_space() const
 
static void set_uint32_constant_at(Address pc, Address constant_pool, uint32_t new_constant, WritableJitAllocation *jit_allocation, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
 
virtual int32_t branch_offset_helper(Label *L, OffsetSize bits)
 
uintptr_t jump_address(Label *L)
 
static void set_target_compressed_address_at(Address pc, Address constant_pool, Tagged_t target, WritableJitAllocation *jit_allocation=nullptr, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
 
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data=0)
 
RelocInfoStatus RecordEntry(uint32_t data, RelocInfo::Mode rmode)
 
void EmitConstPoolWithJumpIfNeeded(size_t margin=0)
 
static uint32_t uint32_constant_at(Address pc, Address constant_pool)
 
void print(const Label *L)
 
bool MustUseReg(RelocInfo::Mode rmode)
 
RegList * GetScratchRegisterList()
 
static Tagged_t target_compressed_address_at(Address pc, Address constant_pool)
 
static Address target_address_at(Address pc, Address constant_pool)
 
Handle< Object > code_target_object_handle_at(Address pc, Address constant_pool)
 
void ForceConstantPoolEmissionWithJump()
 
void BlockTrampolinePoolFor(int instructions)
 
void GetCode(Isolate *isolate, CodeDesc *desc)
 
Handle< HeapObject > compressed_embedded_object_handle_at(Address pc, Address constant_pool)
 
void MaybeEmitOutOfLineConstantPool()
 
static Instr instr_at(Address pc)
 
V8_INLINE Handle< Code > relative_code_target_object_handle_at(Address pc_) const
 
int32_t get_trampoline_entry(int32_t pos)
 
static RegList DefaultTmpList()
 
void GetCode(LocalIsolate *isolate, CodeDesc *desc, SafepointTableBuilderBase *safepoint_table_builder, int handler_table_offset)
 
bool is_near_branch(Label *L)
 
RelocInfoStatus RecordEntry(uint64_t data, RelocInfo::Mode rmode)
 
void EndBlockGrowBuffer()
 
int32_t branch_long_offset(Label *L)
 
static void JumpLabelToJumpRegister(Address pc)
 
int BranchOffset(Instr instr)
 
Assembler(const AssemblerOptions &, std::unique_ptr< AssemblerBuffer >={})
 
void ForceConstantPoolEmissionWithoutJump()
 
void CheckTrampolinePoolQuick(int extra_instructions=0)
 
int SizeOfCodeGeneratedSince(Label *label)
 
static void set_target_internal_reference_encoded_at(Address pc, Address target)
 
void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id, SourcePosition position, int id)
 
void BlockTrampolinePoolBefore(int pc_offset)
 
int target_at(int pos, bool is_internal)
 
static DoubleRegList DefaultFPTmpList()
 
static Builtin target_builtin_at(Address pc)
 
EnsureSpace(Assembler *assembler)
 
V8_EXPORT_PRIVATE Address address() const
 
void set_offset(int32_t offset)
 
bool OffsetIsInt12Encodable() const
 
MemOperand(Register rn, int32_t unit, int32_t multiplier, OffsetAddend offset_addend=offset_zero)
 
MemOperand(Register rn, int32_t offset=0)
 
intptr_t immediate() const
 
V8_INLINE Operand(Tagged< Smi > value)
 
RelocInfo::Mode rmode() const
 
V8_INLINE Operand(const ExternalReference &f)
 
union v8::internal::Operand::Value value_
 
intptr_t immediate_for_heap_number_request() const
 
HeapNumberRequest heap_number_request() const
 
bool IsHeapNumberRequest() const
 
V8_INLINE bool is_reg() const
 
V8_INLINE bool is_reg() const
 
bool is_heap_number_request_
 
static Operand EmbeddedNumber(double number)
 
V8_INLINE Operand(intptr_t immediate, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
 
V8_INLINE Operand(int32_t immediate, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
 
int32_t immediate() const
 
V8_INLINE Operand(Register rm)
 
constexpr bool is_empty() const
 
constexpr RegisterT PopFirst()
 
constexpr bool is_valid() const
 
UseScratchRegisterScope(Assembler *assembler)
 
void Include(DoubleRegList list)
 
void Exclude(const RegList &list)
 
~UseScratchRegisterScope()
 
void Include(RegList list)
 
DoubleRegister AcquireDouble()
 
void Include(const Register ®1, const Register ®2)
 
void SetAvailable(RegList available)
 
void Exclude(const Register ®)
 
DoubleRegList old_available_double_
 
void SetAvailableDouble(DoubleRegList available_double)
 
void Exclude(const Register ®1, const Register ®2)
 
DoubleRegList AvailableDouble()
 
void Include(const Register ®)
 
RecordWriteMode const mode_
 
const v8::base::TimeTicks end_
 
BytecodeAssembler & assembler_
 
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
 
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
 
constexpr Register no_reg
 
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
 
constexpr uint8_t kPcLoadDelta
 
std::variant< Zone *, AccountingAllocator * > MaybeAssemblerZone
 
constexpr uint64_t kSmiShiftMask
 
static Instr SetHi20Offset(int32_t hi20, Instr instr)
 
constexpr uint8_t kInstrSize
 
static Instr SetLo12Offset(int32_t lo12, Instr instr)
 
#define DCHECK_LE(v1, v2)
 
#define DCHECK_NOT_NULL(val)
 
#define DCHECK_IMPLIES(v1, v2)
 
#define DCHECK_NE(v1, v2)
 
#define DCHECK_GE(v1, v2)
 
#define DCHECK(condition)
 
#define DCHECK_EQ(v1, v2)
 
#define V8_EXPORT_PRIVATE
 
HeapNumberRequest heap_number_request