|
| enum class | FastIterateResult { kException = static_cast<int>(v8::Array::CallbackResult::kException)
, kBreak = static_cast<int>(v8::Array::CallbackResult::kBreak)
, kSlowPath
, kFinished
} |
| |
| enum class | SourceRangeKind {
kBody
, kCatch
, kContinuation
, kElse
,
kFinally
, kRight
, kThen
} |
| |
| enum class | IteratorType { kNormal
, kAsync
} |
| |
| enum class | HoleCheckMode { kRequired
, kElided
} |
| |
| enum | AssignType {
NON_PROPERTY
, NAMED_PROPERTY
, KEYED_PROPERTY
, NAMED_SUPER_PROPERTY
,
KEYED_SUPER_PROPERTY
, PRIVATE_METHOD
, PRIVATE_GETTER_ONLY
, PRIVATE_SETTER_ONLY
,
PRIVATE_GETTER_AND_SETTER
, PRIVATE_DEBUG_DYNAMIC
} |
| |
| enum class | GroupByCoercionMode { kZero
, kProperty
} |
| |
| enum class | Builtin : int32_t { kNoBuiltinId = -1
, DEF_ENUM
, BUILTIN_LIST
} |
| |
| enum class | TieringBuiltin : int32_t { DEF_ENUM
} |
| |
| enum | Coprocessor {
p0 = 0
, p1 = 1
, p2 = 2
, p3 = 3
,
p4 = 4
, p5 = 5
, p6 = 6
, p7 = 7
,
p8 = 8
, p9 = 9
, p10 = 10
, p11 = 11
,
p12 = 12
, p13 = 13
, p14 = 14
, p15 = 15
} |
| |
| enum | Condition : int {
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, cs = 2 << 28
,
cc = 3 << 28
, mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
,
vc = 7 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
kSpecialCondition = 15 << 28
, kNumberOfConditions = 16
, hs = cs
, lo = cc
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, eq = 0 << 28
, ne = 1 << 28
,
hs = cs
, cs = 2 << 28
, lo = cc
, cc = 3 << 28
,
mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
, vc = 7 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
, nv = 15
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
below = 2
, above_equal = 3
, equal = 4
, not_equal = 5
,
below_equal = 6
, above = 7
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, carry = below
, not_carry = above_equal
,
zero = equal
, not_zero = not_equal
, sign = negative
, not_sign = positive
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, ueq = 16
, ogl = 17
,
cc_always = 18
, carry = below
, not_carry = above_equal
, zero = equal
,
eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
, nz = not_equal
,
sign = negative
, not_sign = positive
, mi = 4 << 28
, pl = 5 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, hs = cs
, lo = cc
,
al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
, ule = Uless_equal
,
ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
,
Ugreater = 5
, equal = 4
, not_equal = 5
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, ueq = 16
,
ogl = 17
, cc_always = 18
, carry = below
, not_carry = above_equal
,
zero = equal
, eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
,
nz = not_equal
, sign = negative
, not_sign = positive
, mi = 4 << 28
,
pl = 5 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, hs = cs
,
lo = cc
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, unordered = 6
,
ordered = 7
, overflow = 0
, nooverflow = 9
, al = 14 << 28
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, cc_always = 18
, eq = 0 << 28
,
ne = 1 << 28
, ge = 10 << 28
, lt = 11 << 28
, gt = 12 << 28
,
le = 13 << 28
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
CC_NOP = 0x0
, CC_EQ = 0x08
, CC_LT = 0x04
, CC_LE = CC_EQ | CC_LT
,
CC_GT = 0x02
, CC_GE = CC_EQ | CC_GT
, CC_OF = 0x01
, CC_NOF = 0x0E
,
CC_ALWAYS = 0x0F
, unordered = 6
, ordered = 7
, overflow = 0
,
nooverflow = 9
, mask0x0 = 0
, mask0x1 = 1
, mask0x2 = 2
,
mask0x3 = 3
, mask0x4 = 4
, mask0x5 = 5
, mask0x6 = 6
,
mask0x7 = 7
, mask0x8 = 8
, mask0x9 = 9
, mask0xA = 10
,
mask0xB = 11
, mask0xC = 12
, mask0xD = 13
, mask0xE = 14
,
mask0xF = 15
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, below = 2
, above_equal = 3
, equal = 4
,
not_equal = 5
, below_equal = 6
, above = 7
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, carry = below
,
not_carry = above_equal
, zero = equal
, not_zero = not_equal
, sign = negative
,
not_sign = positive
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
} |
| |
| enum | CheckForInexactConversion {
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
,
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
,
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
} |
| |
| enum | Hint { no_hint = 0
, no_hint = 0
, no_hint = 0
, no_hint = 0
} |
| |
| enum class | StackLimitKind {
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
,
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
,
kInterruptStackLimit
, kInterruptStackLimit
, kInterruptStackLimit
, kInterruptStackLimit
,
kInterruptStackLimit
, kRealStackLimit
, kRealStackLimit
, kRealStackLimit
,
kRealStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
,
kInterruptStackLimit
, kRealStackLimit
} |
| |
| enum | LinkRegisterStatus {
kLRHasNotBeenSaved
, kLRHasBeenSaved
, kLRHasNotBeenSaved
, kLRHasBeenSaved
,
kLRHasNotBeenSaved
, kLRHasBeenSaved
, kLRHasNotBeenSaved
, kLRHasBeenSaved
} |
| |
| enum | TargetAddressStorageMode { CAN_INLINE_TARGET_ADDRESS
, NEVER_INLINE_TARGET_ADDRESS
} |
| |
| enum | RegisterCode {
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
} |
| |
| enum | SwVfpRegisterCode { kSwVfpAfterLast
} |
| |
| enum | DoubleRegisterCode {
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
,
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
} |
| |
| enum | Simd128RegisterCode { kSimd128AfterLast
, kSimd128AfterLast
} |
| |
| enum | CRegisterCode { kCAfterLast
, kCAfterLast
, kCAfterLast
} |
| |
| enum | Condition : int {
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, cs = 2 << 28
,
cc = 3 << 28
, mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
,
vc = 7 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
kSpecialCondition = 15 << 28
, kNumberOfConditions = 16
, hs = cs
, lo = cc
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, eq = 0 << 28
, ne = 1 << 28
,
hs = cs
, cs = 2 << 28
, lo = cc
, cc = 3 << 28
,
mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
, vc = 7 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
, nv = 15
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
below = 2
, above_equal = 3
, equal = 4
, not_equal = 5
,
below_equal = 6
, above = 7
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, carry = below
, not_carry = above_equal
,
zero = equal
, not_zero = not_equal
, sign = negative
, not_sign = positive
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, ueq = 16
, ogl = 17
,
cc_always = 18
, carry = below
, not_carry = above_equal
, zero = equal
,
eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
, nz = not_equal
,
sign = negative
, not_sign = positive
, mi = 4 << 28
, pl = 5 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, hs = cs
, lo = cc
,
al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
, ule = Uless_equal
,
ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
,
Ugreater = 5
, equal = 4
, not_equal = 5
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, ueq = 16
,
ogl = 17
, cc_always = 18
, carry = below
, not_carry = above_equal
,
zero = equal
, eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
,
nz = not_equal
, sign = negative
, not_sign = positive
, mi = 4 << 28
,
pl = 5 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, hs = cs
,
lo = cc
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, unordered = 6
,
ordered = 7
, overflow = 0
, nooverflow = 9
, al = 14 << 28
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, cc_always = 18
, eq = 0 << 28
,
ne = 1 << 28
, ge = 10 << 28
, lt = 11 << 28
, gt = 12 << 28
,
le = 13 << 28
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
CC_NOP = 0x0
, CC_EQ = 0x08
, CC_LT = 0x04
, CC_LE = CC_EQ | CC_LT
,
CC_GT = 0x02
, CC_GE = CC_EQ | CC_GT
, CC_OF = 0x01
, CC_NOF = 0x0E
,
CC_ALWAYS = 0x0F
, unordered = 6
, ordered = 7
, overflow = 0
,
nooverflow = 9
, mask0x0 = 0
, mask0x1 = 1
, mask0x2 = 2
,
mask0x3 = 3
, mask0x4 = 4
, mask0x5 = 5
, mask0x6 = 6
,
mask0x7 = 7
, mask0x8 = 8
, mask0x9 = 9
, mask0xA = 10
,
mask0xB = 11
, mask0xC = 12
, mask0xD = 13
, mask0xE = 14
,
mask0xF = 15
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, below = 2
, above_equal = 3
, equal = 4
,
not_equal = 5
, below_equal = 6
, above = 7
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, carry = below
,
not_carry = above_equal
, zero = equal
, not_zero = not_equal
, sign = negative
,
not_sign = positive
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
} |
| |
| enum | FlagsUpdate { SetFlags = 1
, LeaveFlags = 0
} |
| |
| enum | StatusFlags {
NoFlag = 0
, NFlag = N_mask
, ZFlag = Z_mask
, CFlag = C_mask
,
VFlag = V_mask
, NZFlag = NFlag | ZFlag
, NCFlag = NFlag | CFlag
, NVFlag = NFlag | VFlag
,
ZCFlag = ZFlag | CFlag
, ZVFlag = ZFlag | VFlag
, CVFlag = CFlag | VFlag
, NZCFlag = NFlag | ZFlag | CFlag
,
NZVFlag = NFlag | ZFlag | VFlag
, NCVFlag = NFlag | CFlag | VFlag
, ZCVFlag = ZFlag | CFlag | VFlag
, NZCVFlag = NFlag | ZFlag | CFlag | VFlag
,
FPEqualFlag = ZCFlag
, FPLessThanFlag = NFlag
, FPGreaterThanFlag = CFlag
, FPUnorderedFlag = CVFlag
} |
| |
| enum | Shift { NO_SHIFT = -1
, MSL = 0x4
} |
| |
| enum | Extend {
NO_EXTEND = -1
, UXTB = 0
, UXTH = 1
, UXTW = 2
,
UXTX = 3
, SXTB = 4
, SXTH = 5
, SXTW = 6
,
SXTX = 7
} |
| |
| enum | SystemHint {
NOP = 0
, YIELD = 1
, WFE = 2
, WFI = 3
,
SEV = 4
, SEVL = 5
, CSDB = 20
, BTI = 32
,
BTI_c = 34
, BTI_j = 36
, BTI_jc = 38
} |
| |
| enum class | BranchTargetIdentifier {
kNone
, kBti
, kBtiCall
, kBtiJump
,
kBtiJumpCall
, kPacibsp
} |
| |
| enum | BarrierDomain { OuterShareable = 0
, NonShareable = 1
, InnerShareable = 2
, FullSystem = 3
} |
| |
| enum | BarrierType { BarrierOther = 0
, BarrierReads = 1
, BarrierWrites = 2
, BarrierAll = 3
} |
| |
| enum | SystemRegister { NZCV
, FPCR
} |
| |
| enum | ImmBranchType {
UnknownBranchType = 0
, CondBranchType = 1
, UncondBranchType = 2
, CompareBranchType = 3
,
TestBranchType = 4
} |
| |
| enum | AddrMode |
| |
| enum | FPRounding {
FPTieEven = 0x0
, FPPositiveInfinity = 0x1
, FPNegativeInfinity = 0x2
, FPZero = 0x3
,
FPTieAway
, FPRoundOdd
} |
| |
| enum | Reg31Mode { Reg31IsStackPointer
, Reg31IsZeroRegister
} |
| |
| enum | PrintfArgPattern { kPrintfArgW = 1
, kPrintfArgX = 2
, kPrintfArgD = 3
} |
| |
| enum | DebugParameters {
NO_PARAM = 0
, BREAK = 1 << 0
, LOG_DISASM = 1 << 1
, LOG_REGS = 1 << 2
,
LOG_VREGS = 1 << 3
, LOG_SYS_REGS = 1 << 4
, LOG_WRITE = 1 << 5
, LOG_NONE = 0
,
LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYS_REGS
, LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE
, TRACE_ENABLE = 1 << 6
, TRACE_DISABLE = 2 << 6
,
TRACE_OVERRIDE = 3 << 6
, NO_PARAM = 0
, BREAK = 1 << 0
, LOG_TRACE = 1 << 1
,
LOG_REGS = 1 << 2
, LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE
, TRACE_ENABLE = 1 << 6
, TRACE_DISABLE = 2 << 6
} |
| |
| enum | NEONFormat {
NF_UNDEF = 0
, NF_8B = 1
, NF_16B = 2
, NF_4H = 3
,
NF_8H = 4
, NF_2S = 5
, NF_4S = 6
, NF_1D = 7
,
NF_2D = 8
, NF_B = 9
, NF_H = 10
, NF_S = 11
,
NF_D = 12
} |
| |
| enum | BranchType {
integer_eq = eq
, integer_ne = ne
, integer_hs = hs
, integer_lo = lo
,
integer_mi = mi
, integer_pl = pl
, integer_vs = vs
, integer_vc = vc
,
integer_hi = hi
, integer_ls = ls
, integer_ge = ge
, integer_lt = lt
,
integer_gt = gt
, integer_le = le
, integer_al = al
, integer_nv = nv
,
always
, never
, reg_zero
, reg_not_zero
,
reg_bit_clear
, reg_bit_set
, kBranchTypeFirstCondition = eq
, kBranchTypeLastCondition = nv
,
kBranchTypeFirstUsingReg = reg_zero
, kBranchTypeFirstUsingBit = reg_bit_clear
} |
| |
| enum | LinkRegisterStatus {
kLRHasNotBeenSaved
, kLRHasBeenSaved
, kLRHasNotBeenSaved
, kLRHasBeenSaved
,
kLRHasNotBeenSaved
, kLRHasBeenSaved
, kLRHasNotBeenSaved
, kLRHasBeenSaved
} |
| |
| enum | DiscardMoveMode { kDontDiscardForSameWReg
, kDiscardForSameWReg
} |
| |
| enum | PreShiftImmMode { kNoShift
, kLimitShiftForSP
, kAnyShift
} |
| |
| enum class | StackLimitKind {
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
,
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
,
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
} |
| |
| enum | RegisterCode {
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
} |
| |
| enum | DoubleRegisterCode {
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
,
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
} |
| |
| enum | VectorFormat {
kFormatUndefined = 0xffffffff
, kFormat8B = NEON_8B
, kFormat16B = NEON_16B
, kFormat4H = NEON_4H
,
kFormat8H = NEON_8H
, kFormat2S = NEON_2S
, kFormat4S = NEON_4S
, kFormat1D = NEON_1D
,
kFormat2D = NEON_2D
, kFormatB = NEON_B | NEONScalar
, kFormatH = NEON_H | NEONScalar
, kFormatS = NEON_S | NEONScalar
,
kFormatD = NEON_D | NEONScalar
, kFormat1Q = 0xfffffffd
} |
| |
| enum class | CodeObjectRequired { kNo
, kYes
} |
| |
| enum class | BuiltinCallJumpMode { kAbsolute
, kPCRelative
, kIndirect
, kForMksnapshot
} |
| |
| enum class | AtomicMemoryOrder : uint8_t { kAcqRel
, kSeqCst
} |
| |
| enum class | BailoutReason : uint8_t { BAILOUT_MESSAGES_LIST =(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
} |
| |
| enum class | AbortReason : uint8_t { ABORT_MESSAGES_LIST =(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
} |
| |
| enum | AllocationSiteOverrideMode { DONT_OVERRIDE
, DISABLE_ALLOCATION_SITES
} |
| |
| enum class | PrimitiveType { kBoolean
, kNumber
, kString
, kSymbol
} |
| |
| enum | CpuFeature { NUMBER_OF_CPU_FEATURES
} |
| |
| enum | LookupMode { kFindExisting
, kFindInsertionEntry
} |
| |
| enum | Condition : int {
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, cs = 2 << 28
,
cc = 3 << 28
, mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
,
vc = 7 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
kSpecialCondition = 15 << 28
, kNumberOfConditions = 16
, hs = cs
, lo = cc
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, eq = 0 << 28
, ne = 1 << 28
,
hs = cs
, cs = 2 << 28
, lo = cc
, cc = 3 << 28
,
mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
, vc = 7 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
, nv = 15
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
below = 2
, above_equal = 3
, equal = 4
, not_equal = 5
,
below_equal = 6
, above = 7
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, carry = below
, not_carry = above_equal
,
zero = equal
, not_zero = not_equal
, sign = negative
, not_sign = positive
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, ueq = 16
, ogl = 17
,
cc_always = 18
, carry = below
, not_carry = above_equal
, zero = equal
,
eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
, nz = not_equal
,
sign = negative
, not_sign = positive
, mi = 4 << 28
, pl = 5 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, hs = cs
, lo = cc
,
al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
, ule = Uless_equal
,
ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
,
Ugreater = 5
, equal = 4
, not_equal = 5
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, ueq = 16
,
ogl = 17
, cc_always = 18
, carry = below
, not_carry = above_equal
,
zero = equal
, eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
,
nz = not_equal
, sign = negative
, not_sign = positive
, mi = 4 << 28
,
pl = 5 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, hs = cs
,
lo = cc
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, unordered = 6
,
ordered = 7
, overflow = 0
, nooverflow = 9
, al = 14 << 28
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, cc_always = 18
, eq = 0 << 28
,
ne = 1 << 28
, ge = 10 << 28
, lt = 11 << 28
, gt = 12 << 28
,
le = 13 << 28
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
CC_NOP = 0x0
, CC_EQ = 0x08
, CC_LT = 0x04
, CC_LE = CC_EQ | CC_LT
,
CC_GT = 0x02
, CC_GE = CC_EQ | CC_GT
, CC_OF = 0x01
, CC_NOF = 0x0E
,
CC_ALWAYS = 0x0F
, unordered = 6
, ordered = 7
, overflow = 0
,
nooverflow = 9
, mask0x0 = 0
, mask0x1 = 1
, mask0x2 = 2
,
mask0x3 = 3
, mask0x4 = 4
, mask0x5 = 5
, mask0x6 = 6
,
mask0x7 = 7
, mask0x8 = 8
, mask0x9 = 9
, mask0xA = 10
,
mask0xB = 11
, mask0xC = 12
, mask0xD = 13
, mask0xE = 14
,
mask0xF = 15
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, below = 2
, above_equal = 3
, equal = 4
,
not_equal = 5
, below_equal = 6
, above = 7
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, carry = below
,
not_carry = above_equal
, zero = equal
, not_zero = not_equal
, sign = negative
,
not_sign = positive
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
} |
| |
| enum | RoundingMode { kRoundDown = 0x1
, kRoundUp = 0x2
, kRoundDown = 0x1
, kRoundUp = 0x2
} |
| |
| enum | ScaleFactor {
times_1 = 0
, times_2 = 1
, times_4 = 2
, times_8 = 3
,
times_int_size = times_4
, times_half_system_pointer_size = times_2
, times_system_pointer_size = times_4
, times_tagged_size = times_4
,
times_1 = 0
, times_2 = 1
, times_4 = 2
, times_8 = 3
,
times_int_size = times_4
, times_half_system_pointer_size = times_2
, times_system_pointer_size = times_4
, times_tagged_size = times_4
,
times_external_pointer_size = V8_ENABLE_SANDBOX_BOOL ? times_4 : times_8
} |
| |
| enum class | StackLimitKind {
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
,
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
,
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
} |
| |
| enum | RegisterCode {
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
} |
| |
| enum | DoubleCode { kDoubleAfterLast
} |
| |
| enum class | StackArgumentOrder { kDefault
, kJS
} |
| |
| enum | SoftwareInterruptCodes { call_rt_redirected = 0x7fff
, call_rt_redirected = 0x7fff
, call_rt_redirected = 0x7fff
} |
| |
| enum | Opcode : uint32_t {
BEQZ = 0x10U << 26
, BNEZ = 0x11U << 26
, BCZ = 0x12U << 26
, JIRL = 0x13U << 26
,
BEQ = 0x16U << 26
, BNE = 0x17U << 26
, BLT = 0x18U << 26
, BGE = 0x19U << 26
,
BLTU = 0x1aU << 26
, BGEU = 0x1bU << 26
, ADDU16I_D = 0x4U << 26
, LU12I_W = 0xaU << 25
,
LU32I_D = 0xbU << 25
, PCADDI = 0xcU << 25
, PCALAU12I = 0xdU << 25
, PCADDU12I = 0xeU << 25
,
PCADDU18I = 0xfU << 25
, LL_W = 0x20U << 24
, SC_W = 0x21U << 24
, LL_D = 0x22U << 24
,
SC_D = 0x23U << 24
, LDPTR_W = 0x24U << 24
, STPTR_W = 0x25U << 24
, LDPTR_D = 0x26U << 24
,
STPTR_D = 0x27U << 24
, BSTR_W = 0x1U << 22
, BSTRINS_W = BSTR_W
, BSTRPICK_W = BSTR_W
,
BSTRINS_D = 0x2U << 22
, BSTRPICK_D = 0x3U << 22
, SLTI = 0x8U << 22
, SLTUI = 0x9U << 22
,
ADDI_W = 0xaU << 22
, ADDI_D = 0xbU << 22
, LU52I_D = 0xcU << 22
, ANDI = 0xdU << 22
,
ORI = 0xeU << 22
, XORI = 0xfU << 22
, LD_B = 0xa0U << 22
, LD_H = 0xa1U << 22
,
LD_W = 0xa2U << 22
, LD_D = 0xa3U << 22
, ST_B = 0xa4U << 22
, ST_H = 0xa5U << 22
,
ST_W = 0xa6U << 22
, ST_D = 0xa7U << 22
, LD_BU = 0xa8U << 22
, LD_HU = 0xa9U << 22
,
LD_WU = 0xaaU << 22
, FLD_S = 0xacU << 22
, FST_S = 0xadU << 22
, FLD_D = 0xaeU << 22
,
FST_D = 0xafU << 22
, FMADD_S = 0x81U << 20
, FMADD_D = 0x82U << 20
, FMSUB_S = 0x85U << 20
,
FMSUB_D = 0x86U << 20
, FNMADD_S = 0x89U << 20
, FNMADD_D = 0x8aU << 20
, FNMSUB_S = 0x8dU << 20
,
FNMSUB_D = 0x8eU << 20
, FCMP_COND_S = 0xc1U << 20
, FCMP_COND_D = 0xc2U << 20
, BYTEPICK_D = 0x3U << 18
,
BYTEPICK_W = 0x2U << 18
, FSEL = 0x340U << 18
, ALSL = 0x1U << 18
, ALSL_W = ALSL
,
ALSL_WU = ALSL
, ALSL_D = 0xbU << 18
, SLLI_W = 0x40U << 16
, SRLI_W = 0x44U << 16
,
SRAI_W = 0x48U << 16
, ROTRI_W = 0x4cU << 16
, SLLI_D = 0x41U << 16
, SRLI_D = 0x45U << 16
,
SRAI_D = 0x49U << 16
, ROTRI_D = 0x4dU << 16
, SLLI = 0x10U << 18
, SRLI = 0x11U << 18
,
SRAI = 0x12U << 18
, ROTRI = 0x13U << 18
, ADD_W = 0x20U << 15
, ADD_D = 0x21U << 15
,
SUB_W = 0x22U << 15
, SUB_D = 0x23U << 15
, SLT = 0x24U << 15
, SLTU = 0x25U << 15
,
MASKEQZ = 0x26U << 15
, MASKNEZ = 0x27U << 15
, NOR = 0x28U << 15
, OR = 0x2aU << 15
,
XOR = 0x2bU << 15
, ANDN = 0x2dU << 15
, SLL_W = 0x2eU << 15
, SRL_W = 0x2fU << 15
,
SRA_W = 0x30U << 15
, SLL_D = 0x31U << 15
, SRL_D = 0x32U << 15
, SRA_D = 0x33U << 15
,
ROTR_W = 0x36U << 15
, ROTR_D = 0x37U << 15
, MUL_W = 0x38U << 15
, MULH_W = 0x39U << 15
,
MULH_WU = 0x3aU << 15
, MUL_D = 0x3bU << 15
, MULH_D = 0x3cU << 15
, MULH_DU = 0x3dU << 15
,
MULW_D_W = 0x3eU << 15
, MULW_D_WU = 0x3fU << 15
, DIV_W = 0x40U << 15
, MOD_W = 0x41U << 15
,
DIV_WU = 0x42U << 15
, MOD_WU = 0x43U << 15
, DIV_D = 0x44U << 15
, MOD_D = 0x45U << 15
,
DIV_DU = 0x46U << 15
, MOD_DU = 0x47U << 15
, BREAK = 1 << 0
, FADD_S = 0x201U << 15
,
FADD_D = 0x202U << 15
, FSUB_S = 0x205U << 15
, FSUB_D = 0x206U << 15
, FMUL_S = 0x209U << 15
,
FMUL_D = 0x20aU << 15
, FDIV_S = 0x20dU << 15
, FDIV_D = 0x20eU << 15
, FMAX_S = 0x211U << 15
,
FMAX_D = 0x212U << 15
, FMIN_S = 0x215U << 15
, FMIN_D = 0x216U << 15
, FMAXA_S = 0x219U << 15
,
FMAXA_D = 0x21aU << 15
, FMINA_S = 0x21dU << 15
, FMINA_D = 0x21eU << 15
, FSCALEB_S = 0x221U << 15
,
FSCALEB_D = 0x222U << 15
, FCOPYSIGN_S = 0x225U << 15
, FCOPYSIGN_D = 0x226U << 15
, LDX_B = 0x7000U << 15
,
LDX_H = 0x7008U << 15
, LDX_W = 0x7010U << 15
, LDX_D = 0x7018U << 15
, STX_B = 0x7020U << 15
,
STX_H = 0x7028U << 15
, STX_W = 0x7030U << 15
, STX_D = 0x7038U << 15
, LDX_BU = 0x7040U << 15
,
LDX_HU = 0x7048U << 15
, LDX_WU = 0x7050U << 15
, FLDX_S = 0x7060U << 15
, FLDX_D = 0x7068U << 15
,
FSTX_S = 0x7070U << 15
, FSTX_D = 0x7078U << 15
, AMSWAP_W = 0x70c0U << 15
, AMSWAP_D = 0x70c1U << 15
,
AMADD_W = 0x70c2U << 15
, AMADD_D = 0x70c3U << 15
, AMAND_W = 0x70c4U << 15
, AMAND_D = 0x70c5U << 15
,
AMOR_W = 0x70c6U << 15
, AMOR_D = 0x70c7U << 15
, AMXOR_W = 0x70c8U << 15
, AMXOR_D = 0x70c9U << 15
,
AMMAX_W = 0x70caU << 15
, AMMAX_D = 0x70cbU << 15
, AMMIN_W = 0x70ccU << 15
, AMMIN_D = 0x70cdU << 15
,
AMMAX_WU = 0x70ceU << 15
, AMMAX_DU = 0x70cfU << 15
, AMMIN_WU = 0x70d0U << 15
, AMMIN_DU = 0x70d1U << 15
,
AMSWAP_DB_W = 0x70d2U << 15
, AMSWAP_DB_D = 0x70d3U << 15
, AMADD_DB_W = 0x70d4U << 15
, AMADD_DB_D = 0x70d5U << 15
,
AMAND_DB_W = 0x70d6U << 15
, AMAND_DB_D = 0x70d7U << 15
, AMOR_DB_W = 0x70d8U << 15
, AMOR_DB_D = 0x70d9U << 15
,
AMXOR_DB_W = 0x70daU << 15
, AMXOR_DB_D = 0x70dbU << 15
, AMMAX_DB_W = 0x70dcU << 15
, AMMAX_DB_D = 0x70ddU << 15
,
AMMIN_DB_W = 0x70deU << 15
, AMMIN_DB_D = 0x70dfU << 15
, AMMAX_DB_WU = 0x70e0U << 15
, AMMAX_DB_DU = 0x70e1U << 15
,
AMMIN_DB_WU = 0x70e2U << 15
, AMMIN_DB_DU = 0x70e3U << 15
, DBAR = 0x70e4U << 15
, IBAR = 0x70e5U << 15
,
CLO_W = 0X4U << 10
, CLZ_W = 0X5U << 10
, CTO_W = 0X6U << 10
, CTZ_W = 0X7U << 10
,
CLO_D = 0X8U << 10
, CLZ_D = 0X9U << 10
, CTO_D = 0XaU << 10
, CTZ_D = 0XbU << 10
,
REVB_2H = 0XcU << 10
, REVB_4H = 0XdU << 10
, REVB_2W = 0XeU << 10
, REVB_D = 0XfU << 10
,
REVH_2W = 0X10U << 10
, REVH_D = 0X11U << 10
, BITREV_4B = 0X12U << 10
, BITREV_8B = 0X13U << 10
,
BITREV_W = 0X14U << 10
, BITREV_D = 0X15U << 10
, EXT_W_H = 0X16U << 10
, EXT_W_B = 0X17U << 10
,
FABS_S = 0X4501U << 10
, FABS_D = 0X4502U << 10
, FNEG_S = 0X4505U << 10
, FNEG_D = 0X4506U << 10
,
FLOGB_S = 0X4509U << 10
, FLOGB_D = 0X450aU << 10
, FCLASS_S = 0X450dU << 10
, FCLASS_D = 0X450eU << 10
,
FSQRT_S = 0X4511U << 10
, FSQRT_D = 0X4512U << 10
, FRECIP_S = 0X4515U << 10
, FRECIP_D = 0X4516U << 10
,
FRSQRT_S = 0X4519U << 10
, FRSQRT_D = 0X451aU << 10
, FMOV_S = 0X4525U << 10
, FMOV_D = 0X4526U << 10
,
MOVGR2FR_W = 0X4529U << 10
, MOVGR2FR_D = 0X452aU << 10
, MOVGR2FRH_W = 0X452bU << 10
, MOVFR2GR_S = 0X452dU << 10
,
MOVFR2GR_D = 0X452eU << 10
, MOVFRH2GR_S = 0X452fU << 10
, MOVGR2FCSR = 0X4530U << 10
, MOVFCSR2GR = 0X4532U << 10
,
MOVFR2CF = 0X4534U << 10
, MOVGR2CF = 0X4536U << 10
, FCVT_S_D = 0x4646U << 10
, FCVT_D_S = 0x4649U << 10
,
FTINTRM_W_S = 0x4681U << 10
, FTINTRM_W_D = 0x4682U << 10
, FTINTRM_L_S = 0x4689U << 10
, FTINTRM_L_D = 0x468aU << 10
,
FTINTRP_W_S = 0x4691U << 10
, FTINTRP_W_D = 0x4692U << 10
, FTINTRP_L_S = 0x4699U << 10
, FTINTRP_L_D = 0x469aU << 10
,
FTINTRZ_W_S = 0x46a1U << 10
, FTINTRZ_W_D = 0x46a2U << 10
, FTINTRZ_L_S = 0x46a9U << 10
, FTINTRZ_L_D = 0x46aaU << 10
,
FTINTRNE_W_S = 0x46b1U << 10
, FTINTRNE_W_D = 0x46b2U << 10
, FTINTRNE_L_S = 0x46b9U << 10
, FTINTRNE_L_D = 0x46baU << 10
,
FTINT_W_S = 0x46c1U << 10
, FTINT_W_D = 0x46c2U << 10
, FTINT_L_S = 0x46c9U << 10
, FTINT_L_D = 0x46caU << 10
,
FFINT_S_W = 0x4744U << 10
, FFINT_S_L = 0x4746U << 10
, FFINT_D_W = 0x4748U << 10
, FFINT_D_L = 0x474aU << 10
,
FRINT_S = 0x4791U << 10
, FRINT_D = 0x4792U << 10
, MOVCF2FR = 0x4535U << 10
, MOVCF2GR = 0x4537U << 10
,
EXTP = 0x4000000
, EXT0 = 0x10000000
, EXT1 = 0x4C000000
, EXT2 = 0x7C000000
,
EXT3 = 0xEC000000
, EXT4 = 0xFC000000
, EXT5 = 0x78000000
, EXT6 = 0xF0000000
,
DUMY = 0xE352
} |
| |
| enum | Condition : int {
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, cs = 2 << 28
,
cc = 3 << 28
, mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
,
vc = 7 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
kSpecialCondition = 15 << 28
, kNumberOfConditions = 16
, hs = cs
, lo = cc
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, eq = 0 << 28
, ne = 1 << 28
,
hs = cs
, cs = 2 << 28
, lo = cc
, cc = 3 << 28
,
mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
, vc = 7 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
, nv = 15
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
below = 2
, above_equal = 3
, equal = 4
, not_equal = 5
,
below_equal = 6
, above = 7
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, carry = below
, not_carry = above_equal
,
zero = equal
, not_zero = not_equal
, sign = negative
, not_sign = positive
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, ueq = 16
, ogl = 17
,
cc_always = 18
, carry = below
, not_carry = above_equal
, zero = equal
,
eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
, nz = not_equal
,
sign = negative
, not_sign = positive
, mi = 4 << 28
, pl = 5 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, hs = cs
, lo = cc
,
al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
, ule = Uless_equal
,
ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
,
Ugreater = 5
, equal = 4
, not_equal = 5
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, ueq = 16
,
ogl = 17
, cc_always = 18
, carry = below
, not_carry = above_equal
,
zero = equal
, eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
,
nz = not_equal
, sign = negative
, not_sign = positive
, mi = 4 << 28
,
pl = 5 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, hs = cs
,
lo = cc
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, unordered = 6
,
ordered = 7
, overflow = 0
, nooverflow = 9
, al = 14 << 28
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, cc_always = 18
, eq = 0 << 28
,
ne = 1 << 28
, ge = 10 << 28
, lt = 11 << 28
, gt = 12 << 28
,
le = 13 << 28
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
CC_NOP = 0x0
, CC_EQ = 0x08
, CC_LT = 0x04
, CC_LE = CC_EQ | CC_LT
,
CC_GT = 0x02
, CC_GE = CC_EQ | CC_GT
, CC_OF = 0x01
, CC_NOF = 0x0E
,
CC_ALWAYS = 0x0F
, unordered = 6
, ordered = 7
, overflow = 0
,
nooverflow = 9
, mask0x0 = 0
, mask0x1 = 1
, mask0x2 = 2
,
mask0x3 = 3
, mask0x4 = 4
, mask0x5 = 5
, mask0x6 = 6
,
mask0x7 = 7
, mask0x8 = 8
, mask0x9 = 9
, mask0xA = 10
,
mask0xB = 11
, mask0xC = 12
, mask0xD = 13
, mask0xE = 14
,
mask0xF = 15
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, below = 2
, above_equal = 3
, equal = 4
,
not_equal = 5
, below_equal = 6
, above = 7
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, carry = below
,
not_carry = above_equal
, zero = equal
, not_zero = not_equal
, sign = negative
,
not_sign = positive
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
} |
| |
| enum | FPUCondition {
kNoFPUCondition = -1
, CAF = 0x00
, SAF = 0x01
, CLT = 0x02
,
CEQ = 0x04
, SEQ = 0x05
, CLE = 0x06
, SLE = 0x07
,
CUN = 0x08
, SUN = 0x09
, CULT = 0x0a
, SULT = 0x0b
,
CUEQ = 0x0c
, SUEQ = 0x0d
, CULE = 0x0e
, SULE = 0x0f
,
CNE = 0x10
, SNE = 0x11
, COR = 0x14
, SOR = 0x15
,
CUNE = 0x18
, SUNE = 0x19
, kNoFPUCondition = -1
, F = 0x00
,
UN = 0x01
, EQ = 0x02
, UEQ = 0x03
, OLT = 0x04
,
LT = 0x04
, ULT = 0x05
, OLE = 0x06
, LE = 0x06
,
ULE = 0x07
, ORD = 0x11
, UNE = 0x12
, NE = 0x13
,
kNoFPUCondition = -1
, EQ = 0x02
, NE = 0x13
, LT = 0x04
,
GE = 0x05
, LE = 0x06
, GT = 0x07
} |
| |
| enum | FPURoundingMode {
mode_round = RN
, mode_ceil = RP
, mode_floor = RM
, mode_trunc = RZ
,
mode_round = RN
, mode_ceil = RP
, mode_floor = RM
, mode_trunc = RZ
,
RNE = 0b000
, RTZ = 0b001
, RDN = 0b010
, RUP = 0b011
,
RMM = 0b100
, DYN = 0b111
} |
| |
| enum | CheckForInexactConversion {
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
,
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
,
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
} |
| |
| enum class | MaxMinKind : int {
kMin = 0
, kMax = 1
, kMin = 0
, kMax = 1
,
kMin = 0
, kMax = 1
} |
| |
| enum | Hint { no_hint = 0
, no_hint = 0
, no_hint = 0
, no_hint = 0
} |
| |
| enum | LiFlags {
OPTIMIZE_SIZE = 0
, CONSTANT_SIZE = 1
, ADDRESS_LOAD = 2
, OPTIMIZE_SIZE = 0
,
CONSTANT_SIZE = 1
, ADDRESS_LOAD = 2
, OPTIMIZE_SIZE = 0
, CONSTANT_SIZE = 1
,
ADDRESS_LOAD = 2
} |
| |
| enum | RAStatus {
kRAHasNotBeenSaved
, kRAHasBeenSaved
, kRAHasNotBeenSaved
, kRAHasBeenSaved
,
kRAHasNotBeenSaved
, kRAHasBeenSaved
} |
| |
| enum | RegisterCode {
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
} |
| |
| enum | DoubleRegisterCode {
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
,
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
} |
| |
| enum | CFRegister {
FCC0
, FCC1
, FCC2
, FCC3
,
FCC4
, FCC5
, FCC6
, FCC7
} |
| |
| enum class | MachineRepresentation : uint8_t {
kNone
, kBit
, kWord8
, kWord16
,
kWord32
, kWord64
, kMapWord
, kTaggedSigned
,
kTaggedPointer
, kTagged
, kCompressedPointer
, kCompressed
,
kProtectedPointer
, kIndirectPointer
, kSandboxedPointer
, kFloat16RawBits
,
kFloat16
, kFloat32
, kFloat64
, kSimd128
,
kSimd256
, kFirstFPRepresentation = kFloat16
, kLastRepresentation = kSimd256
} |
| |
| enum class | MachineSemantic : uint8_t {
kNone
, kBool
, kInt32
, kUint32
,
kInt64
, kUint64
, kSignedBigInt64
, kUnsignedBigInt64
,
kNumber
, kHoleyFloat64
, kAny
} |
| |
| enum | MSASize { MSA_B = 0x0
, MSA_H = 0x1
, MSA_W = 0x2
, MSA_D = 0x3
} |
| |
| enum | MSADataType {
MSAS8 = 0
, MSAS16 = 1
, MSAS32 = 2
, MSAS64 = 3
,
MSAU8 = 4
, MSAU16 = 5
, MSAU32 = 6
, MSAU64 = 7
} |
| |
| enum | SoftwareInterruptCodes { call_rt_redirected = 0x7fff
, call_rt_redirected = 0x7fff
, call_rt_redirected = 0x7fff
} |
| |
| enum | SecondaryField : uint32_t {
SLL = ((0U << 3) + 0)
, MOVCI = ((0U << 3) + 1)
, SRL = ((0U << 3) + 2)
, SRA = ((0U << 3) + 3)
,
SLLV = ((0U << 3) + 4)
, LSA = ((0U << 3) + 5)
, SRLV = ((0U << 3) + 6)
, SRAV = ((0U << 3) + 7)
,
JR = ((1U << 3) + 0)
, JALR = ((1U << 3) + 1)
, BREAK = 1 << 0
, SYNC = ((1U << 3) + 7)
,
MFHI = ((2U << 3) + 0)
, CLZ_R6 = ((2U << 3) + 0)
, CLO_R6 = ((2U << 3) + 1)
, MFLO = ((2U << 3) + 2)
,
DCLZ_R6 = ((2U << 3) + 2)
, DCLO_R6 = ((2U << 3) + 3)
, DSLLV = ((2U << 3) + 4)
, DLSA = ((2U << 3) + 5)
,
DSRLV = ((2U << 3) + 6)
, DSRAV = ((2U << 3) + 7)
, MULT = ((3U << 3) + 0)
, MULTU = ((3U << 3) + 1)
,
DIV = ((3U << 3) + 2)
, DIVU = ((3U << 3) + 3)
, DMULT = ((3U << 3) + 4)
, DMULTU = ((3U << 3) + 5)
,
DDIV = ((3U << 3) + 6)
, DDIVU = ((3U << 3) + 7)
, ADDU = ((4U << 3) + 1)
, SUBU = ((4U << 3) + 3)
,
OR = 0x2aU << 15
, XOR = 0x2bU << 15
, NOR = 0x28U << 15
, SLT = 0x24U << 15
,
SLTU = 0x25U << 15
, DADD = ((5U << 3) + 4)
, DADDU = ((5U << 3) + 5)
, DSUB = ((5U << 3) + 6)
,
DSUBU = ((5U << 3) + 7)
, TGE = ((6U << 3) + 0)
, TGEU = ((6U << 3) + 1)
, TLT = ((6U << 3) + 2)
,
TLTU = ((6U << 3) + 3)
, SELEQZ_S = ((6U << 3) + 5)
, TNE = ((6U << 3) + 6)
, SELNEZ_S = ((6U << 3) + 7)
,
DSLL = ((7U << 3) + 0)
, DSRL = ((7U << 3) + 2)
, DSRA = ((7U << 3) + 3)
, DSLL32 = ((7U << 3) + 4)
,
DSRL32 = ((7U << 3) + 6)
, DSRA32 = ((7U << 3) + 7)
, MUL_MUH = ((3U << 3) + 0)
, MUL_MUH_U = ((3U << 3) + 1)
,
D_MUL_MUH = ((7U << 2) + 0)
, D_MUL_MUH_U = ((7U << 2) + 1)
, RINT = ((3U << 3) + 2)
, MUL_OP = ((0U << 3) + 2)
,
MUH_OP = ((0U << 3) + 3)
, DIV_OP = ((0U << 3) + 2)
, MOD_OP = ((0U << 3) + 3)
, DIV_MOD = ((3U << 3) + 2)
,
DIV_MOD_U = ((3U << 3) + 3)
, D_DIV_MOD = ((3U << 3) + 6)
, D_DIV_MOD_U = ((3U << 3) + 7)
, MUL = ((0U << 3) + 2)
,
CLO = ((4U << 3) + 1)
, DCLZ = ((4U << 3) + 4)
, DCLO = ((4U << 3) + 5)
, EXT = ((0U << 3) + 0)
,
DEXTM = ((0U << 3) + 1)
, DEXTU = ((0U << 3) + 2)
, DEXT = ((0U << 3) + 3)
, INS = ((0U << 3) + 4)
,
DINSM = ((0U << 3) + 5)
, DINSU = ((0U << 3) + 6)
, DINS = ((0U << 3) + 7)
, BSHFL = ((4U << 3) + 0)
,
DBSHFL = ((4U << 3) + 4)
, SC_R6 = ((4U << 3) + 6)
, SCD_R6 = ((4U << 3) + 7)
, LL_R6 = ((6U << 3) + 6)
,
LLD_R6 = ((6U << 3) + 7)
, BITSWAP = ((0U << 3) + 0)
, ALIGN = ((0U << 3) + 2)
, WSBH = ((0U << 3) + 2)
,
SEB = ((2U << 3) + 0)
, SEH = ((3U << 3) + 0)
, DBITSWAP = ((0U << 3) + 0)
, DALIGN = ((0U << 3) + 1)
,
DBITSWAP_SA = ((0U << 3) + 0) << kSaShift
, DSBH = ((0U << 3) + 2)
, DSHD = ((0U << 3) + 5)
, BLTZ = ((0U << 3) + 0) << 16
,
BGEZ = ((0U << 3) + 1) << 16
, BLTZAL = ((2U << 3) + 0) << 16
, BGEZAL = ((2U << 3) + 1) << 16
, BGEZALL = ((2U << 3) + 3) << 16
,
DAHI = ((0U << 3) + 6) << 16
, DATI = ((3U << 3) + 6) << 16
, MFC1 = ((0U << 3) + 0) << 21
, DMFC1 = ((0U << 3) + 1) << 21
,
CFC1 = ((0U << 3) + 2) << 21
, MFHC1 = ((0U << 3) + 3) << 21
, MTC1 = ((0U << 3) + 4) << 21
, DMTC1 = ((0U << 3) + 5) << 21
,
CTC1 = ((0U << 3) + 6) << 21
, MTHC1 = ((0U << 3) + 7) << 21
, BC1 = ((1U << 3) + 0) << 21
, D = ((2U << 3) + 1) << 21
,
PS = ((2U << 3) + 6) << 21
, ADD_S = ((0U << 3) + 0)
, SUB_S = ((0U << 3) + 1)
, MUL_S = ((0U << 3) + 2)
,
DIV_S = ((0U << 3) + 3)
, ABS_S = ((0U << 3) + 5)
, SQRT_S = ((0U << 3) + 4)
, MOV_S = ((0U << 3) + 6)
,
NEG_S = ((0U << 3) + 7)
, ROUND_L_S = ((1U << 3) + 0)
, TRUNC_L_S = ((1U << 3) + 1)
, CEIL_L_S = ((1U << 3) + 2)
,
FLOOR_L_S = ((1U << 3) + 3)
, ROUND_W_S = ((1U << 3) + 4)
, TRUNC_W_S = ((1U << 3) + 5)
, CEIL_W_S = ((1U << 3) + 6)
,
FLOOR_W_S = ((1U << 3) + 7)
, RECIP_S = ((2U << 3) + 5)
, RSQRT_S = ((2U << 3) + 6)
, MADDF_S = ((3U << 3) + 0)
,
MSUBF_S = ((3U << 3) + 1)
, CLASS_S = ((3U << 3) + 3)
, CVT_D_S = ((4U << 3) + 1)
, CVT_W_S = ((4U << 3) + 4)
,
CVT_L_S = ((4U << 3) + 5)
, CVT_PS_S = ((4U << 3) + 6)
, ADD_D = 0x21U << 15
, SUB_D = 0x23U << 15
,
MUL_D = 0x3bU << 15
, DIV_D = 0x44U << 15
, SQRT_D = ((0U << 3) + 4)
, ABS_D = ((0U << 3) + 5)
,
MOV_D = ((0U << 3) + 6)
, NEG_D = ((0U << 3) + 7)
, ROUND_L_D = ((1U << 3) + 0)
, TRUNC_L_D = ((1U << 3) + 1)
,
CEIL_L_D = ((1U << 3) + 2)
, FLOOR_L_D = ((1U << 3) + 3)
, ROUND_W_D = ((1U << 3) + 4)
, TRUNC_W_D = ((1U << 3) + 5)
,
CEIL_W_D = ((1U << 3) + 6)
, FLOOR_W_D = ((1U << 3) + 7)
, RECIP_D = ((2U << 3) + 5)
, RSQRT_D = ((2U << 3) + 6)
,
MADDF_D = ((3U << 3) + 0)
, MSUBF_D = ((3U << 3) + 1)
, CLASS_D = ((3U << 3) + 3)
, MIN = ((3U << 3) + 4)
,
MINA = ((3U << 3) + 5)
, MAX = ((3U << 3) + 6)
, MAXA = ((3U << 3) + 7)
, CVT_S_D = ((4U << 3) + 0)
,
CVT_W_D = ((4U << 3) + 4)
, CVT_L_D = ((4U << 3) + 5)
, C_F_D = ((6U << 3) + 0)
, C_UN_D = ((6U << 3) + 1)
,
C_EQ_D = ((6U << 3) + 2)
, C_UEQ_D = ((6U << 3) + 3)
, C_OLT_D = ((6U << 3) + 4)
, C_ULT_D = ((6U << 3) + 5)
,
C_OLE_D = ((6U << 3) + 6)
, C_ULE_D = ((6U << 3) + 7)
, CVT_S_W = ((4U << 3) + 0)
, CVT_D_W = ((4U << 3) + 1)
,
CVT_S_L = ((4U << 3) + 0)
, CVT_D_L = ((4U << 3) + 1)
, BC1EQZ = ((2U << 2) + 1) << 21
, BC1NEZ = ((3U << 2) + 1) << 21
,
CMP_AF = ((0U << 3) + 0)
, CMP_UN = ((0U << 3) + 1)
, CMP_EQ = ((0U << 3) + 2)
, CMP_UEQ = ((0U << 3) + 3)
,
CMP_LT = ((0U << 3) + 4)
, CMP_ULT = ((0U << 3) + 5)
, CMP_LE = ((0U << 3) + 6)
, CMP_ULE = ((0U << 3) + 7)
,
CMP_SAF = ((1U << 3) + 0)
, CMP_SUN = ((1U << 3) + 1)
, CMP_SEQ = ((1U << 3) + 2)
, CMP_SUEQ = ((1U << 3) + 3)
,
CMP_SSLT = ((1U << 3) + 4)
, CMP_SSULT = ((1U << 3) + 5)
, CMP_SLE = ((1U << 3) + 6)
, CMP_SULE = ((1U << 3) + 7)
,
CMP_AT = ((2U << 3) + 0)
, CMP_OR = ((2U << 3) + 1)
, CMP_UNE = ((2U << 3) + 2)
, CMP_NE = ((2U << 3) + 3)
,
CMP_UGE = ((2U << 3) + 4)
, CMP_OGE = ((2U << 3) + 5)
, CMP_UGT = ((2U << 3) + 6)
, CMP_OGT = ((2U << 3) + 7)
,
CMP_SAT = ((3U << 3) + 0)
, CMP_SOR = ((3U << 3) + 1)
, CMP_SUNE = ((3U << 3) + 2)
, CMP_SNE = ((3U << 3) + 3)
,
CMP_SUGE = ((3U << 3) + 4)
, CMP_SOGE = ((3U << 3) + 5)
, CMP_SUGT = ((3U << 3) + 6)
, CMP_SOGT = ((3U << 3) + 7)
,
SEL = ((2U << 3) + 0)
, MOVF = ((2U << 3) + 1)
, MOVZ_C = ((2U << 3) + 2)
, MOVN_C = ((2U << 3) + 3)
,
SELEQZ_C = ((2U << 3) + 4)
, SELNEZ_C = ((2U << 3) + 7)
, MADD_S = ((4U << 3) + 0)
, MADD_D = ((4U << 3) + 1)
,
MSUB_S = ((5U << 3) + 0)
, MSUB_D = ((5U << 3) + 1)
, ADDIUPC = ((0U << 2) + 0)
, LWPC = ((0U << 2) + 1)
,
LWUPC = ((0U << 2) + 2)
, LDPC = ((0U << 3) + 6)
, AUIPC = ((3U << 3) + 6)
, ALUIPC = ((3U << 3) + 7)
,
JIC = ((0U << 5) + 0)
, JIALC = ((0U << 5) + 0)
, BZ_V = (((1U << 3) + 3) << kRsShift)
, BNZ_V = (((1U << 3) + 7) << kRsShift)
,
BZ_B = (((3U << 3) + 0) << kRsShift)
, BZ_H = (((3U << 3) + 1) << kRsShift)
, BZ_W = (((3U << 3) + 2) << kRsShift)
, BZ_D = (((3U << 3) + 3) << kRsShift)
,
BNZ_B = (((3U << 3) + 4) << kRsShift)
, BNZ_H = (((3U << 3) + 5) << kRsShift)
, BNZ_W = (((3U << 3) + 6) << kRsShift)
, BNZ_D = (((3U << 3) + 7) << kRsShift)
,
MSA_LD = (8U << 2)
, MSA_ST = (9U << 2)
, LD_B = 0xa0U << 22
, LD_H = 0xa1U << 22
,
LD_W = 0xa2U << 22
, LD_D = 0xa3U << 22
, ST_B = 0xa4U << 22
, ST_H = 0xa5U << 22
,
ST_W = 0xa6U << 22
, ST_D = 0xa7U << 22
, ADDVI = ((0U << 23) + 6)
, SUBVI = ((1U << 23) + 6)
,
MAXI_S = ((2U << 23) + 6)
, MAXI_U = ((3U << 23) + 6)
, MINI_S = ((4U << 23) + 6)
, MINI_U = ((5U << 23) + 6)
,
CEQI = ((0U << 23) + 7)
, CLTI_S = ((2U << 23) + 7)
, CLTI_U = ((3U << 23) + 7)
, CLEI_S = ((4U << 23) + 7)
,
CLEI_U = ((5U << 23) + 7)
, LDI = ((6U << 23) + 7)
, I5_DF_b = (0U << 21)
, I5_DF_h = (1U << 21)
,
I5_DF_w = (2U << 21)
, I5_DF_d = (3U << 21)
, ANDI_B = ((0U << 24) + 0)
, ORI_B = ((1U << 24) + 0)
,
NORI_B = ((2U << 24) + 0)
, XORI_B = ((3U << 24) + 0)
, BMNZI_B = ((0U << 24) + 1)
, BMZI_B = ((1U << 24) + 1)
,
BSELI_B = ((2U << 24) + 1)
, SHF_B = ((0U << 24) + 2)
, SHF_H = ((1U << 24) + 2)
, SHF_W = ((2U << 24) + 2)
,
MSA_VEC_2R_2RF_MINOR = ((3U << 3) + 6)
, AND_V = (((0U << 2) + 0) << 21)
, OR_V = (((0U << 2) + 1) << 21)
, NOR_V = (((0U << 2) + 2) << 21)
,
XOR_V = (((0U << 2) + 3) << 21)
, BMNZ_V = (((1U << 2) + 0) << 21)
, BMZ_V = (((1U << 2) + 1) << 21)
, BSEL_V = (((1U << 2) + 2) << 21)
,
MSA_2R_FORMAT = (((6U << 2) + 0) << 21)
, FILL = (0U << 18)
, PCNT = (1U << 18)
, NLOC = (2U << 18)
,
NLZC = (3U << 18)
, MSA_2R_DF_b = (0U << 16)
, MSA_2R_DF_h = (1U << 16)
, MSA_2R_DF_w = (2U << 16)
,
MSA_2R_DF_d = (3U << 16)
, MSA_2RF_FORMAT = (((6U << 2) + 1) << 21)
, FCLASS = (0U << 17)
, FTRUNC_S = (1U << 17)
,
FTRUNC_U = (2U << 17)
, FRSQRT = (4U << 17)
, FRCP = (5U << 17)
, FRINT = (6U << 17)
,
FLOG2 = (7U << 17)
, FEXUPL = (8U << 17)
, FEXUPR = (9U << 17)
, FFQL = (10U << 17)
,
FFQR = (11U << 17)
, FTINT_S = (12U << 17)
, FTINT_U = (13U << 17)
, FFINT_S = (14U << 17)
,
FFINT_U = (15U << 17)
, MSA_2RF_DF_w = (0U << 16)
, MSA_2RF_DF_d = (1U << 16)
, SLL_MSA = ((0U << 23) + 13)
,
SRA_MSA = ((1U << 23) + 13)
, SRL_MSA = ((2U << 23) + 13)
, BCLR = ((3U << 23) + 13)
, BSET = ((4U << 23) + 13)
,
BNEG = ((5U << 23) + 13)
, BINSL = ((6U << 23) + 13)
, BINSR = ((7U << 23) + 13)
, ADDV = ((0U << 23) + 14)
,
SUBV = ((1U << 23) + 14)
, MAX_S = ((2U << 23) + 14)
, MAX_U = ((3U << 23) + 14)
, MIN_S = ((4U << 23) + 14)
,
MIN_U = ((5U << 23) + 14)
, MAX_A = ((6U << 23) + 14)
, MIN_A = ((7U << 23) + 14)
, CEQ = 0x04
,
CLT_S = ((2U << 23) + 15)
, CLT_U = ((3U << 23) + 15)
, CLE_S = ((4U << 23) + 15)
, CLE_U = ((5U << 23) + 15)
,
ADD_A = ((0U << 23) + 16)
, ADDS_A = ((1U << 23) + 16)
, ADDS_S = ((2U << 23) + 16)
, ADDS_U = ((3U << 23) + 16)
,
AVE_S = ((4U << 23) + 16)
, AVE_U = ((5U << 23) + 16)
, AVER_S = ((6U << 23) + 16)
, AVER_U = ((7U << 23) + 16)
,
SUBS_S = ((0U << 23) + 17)
, SUBS_U = ((1U << 23) + 17)
, SUBSUS_U = ((2U << 23) + 17)
, SUBSUU_S = ((3U << 23) + 17)
,
ASUB_S = ((4U << 23) + 17)
, ASUB_U = ((5U << 23) + 17)
, MULV = ((0U << 23) + 18)
, MADDV = ((1U << 23) + 18)
,
MSUBV = ((2U << 23) + 18)
, DIV_S_MSA = ((4U << 23) + 18)
, DIV_U = ((5U << 23) + 18)
, MOD_S = ((6U << 23) + 18)
,
MOD_U = ((7U << 23) + 18)
, DOTP_S = ((0U << 23) + 19)
, DOTP_U = ((1U << 23) + 19)
, DPADD_S = ((2U << 23) + 19)
,
DPADD_U = ((3U << 23) + 19)
, DPSUB_S = ((4U << 23) + 19)
, DPSUB_U = ((5U << 23) + 19)
, SLD = ((0U << 23) + 20)
,
SPLAT = ((1U << 23) + 20)
, PCKEV = ((2U << 23) + 20)
, PCKOD = ((3U << 23) + 20)
, ILVL = ((4U << 23) + 20)
,
ILVR = ((5U << 23) + 20)
, ILVEV = ((6U << 23) + 20)
, ILVOD = ((7U << 23) + 20)
, VSHF = ((0U << 23) + 21)
,
SRAR = ((1U << 23) + 21)
, SRLR = ((2U << 23) + 21)
, HADD_S = ((4U << 23) + 21)
, HADD_U = ((5U << 23) + 21)
,
HSUB_S = ((6U << 23) + 21)
, HSUB_U = ((7U << 23) + 21)
, MSA_3R_DF_b = (0U << 21)
, MSA_3R_DF_h = (1U << 21)
,
MSA_3R_DF_w = (2U << 21)
, MSA_3R_DF_d = (3U << 21)
, FCAF = ((0U << 22) + 26)
, FCUN = ((1U << 22) + 26)
,
FCEQ = ((2U << 22) + 26)
, FCUEQ = ((3U << 22) + 26)
, FCLT = ((4U << 22) + 26)
, FCULT = ((5U << 22) + 26)
,
FCLE = ((6U << 22) + 26)
, FCULE = ((7U << 22) + 26)
, FSAF = ((8U << 22) + 26)
, FSUN = ((9U << 22) + 26)
,
FSEQ = ((10U << 22) + 26)
, FSUEQ = ((11U << 22) + 26)
, FSLT = ((12U << 22) + 26)
, FSULT = ((13U << 22) + 26)
,
FSLE = ((14U << 22) + 26)
, FSULE = ((15U << 22) + 26)
, FMADD = ((4U << 22) + 27)
, FMSUB = ((5U << 22) + 27)
,
FEXP2 = ((7U << 22) + 27)
, FEXDO = ((8U << 22) + 27)
, FTQ = ((10U << 22) + 27)
, FMIN_A = ((13U << 22) + 27)
,
FMAX_A = ((15U << 22) + 27)
, FCOR = ((1U << 22) + 28)
, FCUNE = ((2U << 22) + 28)
, FCNE = ((3U << 22) + 28)
,
MUL_Q = ((4U << 22) + 28)
, MADD_Q = ((5U << 22) + 28)
, MSUB_Q = ((6U << 22) + 28)
, FSOR = ((9U << 22) + 28)
,
FSUNE = ((10U << 22) + 28)
, FSNE = ((11U << 22) + 28)
, MULR_Q = ((12U << 22) + 28)
, MADDR_Q = ((13U << 22) + 28)
,
MSUBR_Q = ((14U << 22) + 28)
, MSA_ELM_MINOR = ((3U << 3) + 1)
, SLDI = (0U << 22)
, CTCMSA = ((0U << 22) | (62U << 16))
,
SPLATI = (1U << 22)
, CFCMSA = ((1U << 22) | (62U << 16))
, COPY_S = (2U << 22)
, MOVE_V = ((2U << 22) | (62U << 16))
,
COPY_U = (3U << 22)
, INSERT = (4U << 22)
, INSVE = (5U << 22)
, ELM_DF_B = ((0U << 4) << 16)
,
ELM_DF_H = ((4U << 3) << 16)
, ELM_DF_W = ((12U << 2) << 16)
, ELM_DF_D = ((28U << 1) << 16)
, SLLI = 0x10U << 18
,
SRAI = 0x12U << 18
, SRLI = 0x11U << 18
, BCLRI = ((3U << 23) + 9)
, BSETI = ((4U << 23) + 9)
,
BNEGI = ((5U << 23) + 9)
, BINSLI = ((6U << 23) + 9)
, BINSRI = ((7U << 23) + 9)
, SAT_S = ((0U << 23) + 10)
,
SAT_U = ((1U << 23) + 10)
, SRARI = ((2U << 23) + 10)
, SRLRI = ((3U << 23) + 10)
, BIT_DF_b = ((14U << 3) << 16)
,
BIT_DF_h = ((6U << 4) << 16)
, BIT_DF_w = ((2U << 5) << 16)
, BIT_DF_d = ((0U << 6) << 16)
, nullptrSF = 0U
} |
| |
| enum | MSAMinorOpcode : uint32_t {
kMsaMinorUndefined = 0
, kMsaMinorI8
, kMsaMinorI5
, kMsaMinorI10
,
kMsaMinorBIT
, kMsaMinor3R
, kMsaMinor3RF
, kMsaMinorELM
,
kMsaMinorVEC
, kMsaMinor2R
, kMsaMinor2RF
, kMsaMinorMI10
} |
| |
| enum | Condition : int {
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, cs = 2 << 28
,
cc = 3 << 28
, mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
,
vc = 7 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
kSpecialCondition = 15 << 28
, kNumberOfConditions = 16
, hs = cs
, lo = cc
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, eq = 0 << 28
, ne = 1 << 28
,
hs = cs
, cs = 2 << 28
, lo = cc
, cc = 3 << 28
,
mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
, vc = 7 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
, nv = 15
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
below = 2
, above_equal = 3
, equal = 4
, not_equal = 5
,
below_equal = 6
, above = 7
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, carry = below
, not_carry = above_equal
,
zero = equal
, not_zero = not_equal
, sign = negative
, not_sign = positive
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, ueq = 16
, ogl = 17
,
cc_always = 18
, carry = below
, not_carry = above_equal
, zero = equal
,
eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
, nz = not_equal
,
sign = negative
, not_sign = positive
, mi = 4 << 28
, pl = 5 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, hs = cs
, lo = cc
,
al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
, ule = Uless_equal
,
ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
,
Ugreater = 5
, equal = 4
, not_equal = 5
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, ueq = 16
,
ogl = 17
, cc_always = 18
, carry = below
, not_carry = above_equal
,
zero = equal
, eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
,
nz = not_equal
, sign = negative
, not_sign = positive
, mi = 4 << 28
,
pl = 5 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, hs = cs
,
lo = cc
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, unordered = 6
,
ordered = 7
, overflow = 0
, nooverflow = 9
, al = 14 << 28
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, cc_always = 18
, eq = 0 << 28
,
ne = 1 << 28
, ge = 10 << 28
, lt = 11 << 28
, gt = 12 << 28
,
le = 13 << 28
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
CC_NOP = 0x0
, CC_EQ = 0x08
, CC_LT = 0x04
, CC_LE = CC_EQ | CC_LT
,
CC_GT = 0x02
, CC_GE = CC_EQ | CC_GT
, CC_OF = 0x01
, CC_NOF = 0x0E
,
CC_ALWAYS = 0x0F
, unordered = 6
, ordered = 7
, overflow = 0
,
nooverflow = 9
, mask0x0 = 0
, mask0x1 = 1
, mask0x2 = 2
,
mask0x3 = 3
, mask0x4 = 4
, mask0x5 = 5
, mask0x6 = 6
,
mask0x7 = 7
, mask0x8 = 8
, mask0x9 = 9
, mask0xA = 10
,
mask0xB = 11
, mask0xC = 12
, mask0xD = 13
, mask0xE = 14
,
mask0xF = 15
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, below = 2
, above_equal = 3
, equal = 4
,
not_equal = 5
, below_equal = 6
, above = 7
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, carry = below
,
not_carry = above_equal
, zero = equal
, not_zero = not_equal
, sign = negative
,
not_sign = positive
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
} |
| |
| enum | MSABranchCondition { all_not_zero = 0
, one_elem_not_zero
, one_elem_zero
, all_zero
} |
| |
| enum | MSABranchDF {
MSA_BRANCH_B = 0
, MSA_BRANCH_H
, MSA_BRANCH_W
, MSA_BRANCH_D
,
MSA_BRANCH_V
} |
| |
| enum | FPUCondition {
kNoFPUCondition = -1
, CAF = 0x00
, SAF = 0x01
, CLT = 0x02
,
CEQ = 0x04
, SEQ = 0x05
, CLE = 0x06
, SLE = 0x07
,
CUN = 0x08
, SUN = 0x09
, CULT = 0x0a
, SULT = 0x0b
,
CUEQ = 0x0c
, SUEQ = 0x0d
, CULE = 0x0e
, SULE = 0x0f
,
CNE = 0x10
, SNE = 0x11
, COR = 0x14
, SOR = 0x15
,
CUNE = 0x18
, SUNE = 0x19
, kNoFPUCondition = -1
, F = 0x00
,
UN = 0x01
, EQ = 0x02
, UEQ = 0x03
, OLT = 0x04
,
LT = 0x04
, ULT = 0x05
, OLE = 0x06
, LE = 0x06
,
ULE = 0x07
, ORD = 0x11
, UNE = 0x12
, NE = 0x13
,
kNoFPUCondition = -1
, EQ = 0x02
, NE = 0x13
, LT = 0x04
,
GE = 0x05
, LE = 0x06
, GT = 0x07
} |
| |
| enum | FPURoundingMode {
mode_round = RN
, mode_ceil = RP
, mode_floor = RM
, mode_trunc = RZ
,
mode_round = RN
, mode_ceil = RP
, mode_floor = RM
, mode_trunc = RZ
,
RNE = 0b000
, RTZ = 0b001
, RDN = 0b010
, RUP = 0b011
,
RMM = 0b100
, DYN = 0b111
} |
| |
| enum | CheckForInexactConversion {
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
,
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
,
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
} |
| |
| enum class | MaxMinKind : int {
kMin = 0
, kMax = 1
, kMin = 0
, kMax = 1
,
kMin = 0
, kMax = 1
} |
| |
| enum | Hint { no_hint = 0
, no_hint = 0
, no_hint = 0
, no_hint = 0
} |
| |
| enum | BranchDelaySlot { USE_DELAY_SLOT
, PROTECT
} |
| |
| enum | LiFlags {
OPTIMIZE_SIZE = 0
, CONSTANT_SIZE = 1
, ADDRESS_LOAD = 2
, OPTIMIZE_SIZE = 0
,
CONSTANT_SIZE = 1
, ADDRESS_LOAD = 2
, OPTIMIZE_SIZE = 0
, CONSTANT_SIZE = 1
,
ADDRESS_LOAD = 2
} |
| |
| enum | RAStatus {
kRAHasNotBeenSaved
, kRAHasBeenSaved
, kRAHasNotBeenSaved
, kRAHasBeenSaved
,
kRAHasNotBeenSaved
, kRAHasBeenSaved
} |
| |
| enum | RegisterCode {
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
} |
| |
| enum | MSARegisterCode { kMsaAfterLast
} |
| |
| enum | DoubleRegisterCode {
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
,
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
} |
| |
| enum | Condition : int {
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, cs = 2 << 28
,
cc = 3 << 28
, mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
,
vc = 7 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
kSpecialCondition = 15 << 28
, kNumberOfConditions = 16
, hs = cs
, lo = cc
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, eq = 0 << 28
, ne = 1 << 28
,
hs = cs
, cs = 2 << 28
, lo = cc
, cc = 3 << 28
,
mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
, vc = 7 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
, nv = 15
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
below = 2
, above_equal = 3
, equal = 4
, not_equal = 5
,
below_equal = 6
, above = 7
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, carry = below
, not_carry = above_equal
,
zero = equal
, not_zero = not_equal
, sign = negative
, not_sign = positive
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, ueq = 16
, ogl = 17
,
cc_always = 18
, carry = below
, not_carry = above_equal
, zero = equal
,
eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
, nz = not_equal
,
sign = negative
, not_sign = positive
, mi = 4 << 28
, pl = 5 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, hs = cs
, lo = cc
,
al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
, ule = Uless_equal
,
ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
,
Ugreater = 5
, equal = 4
, not_equal = 5
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, ueq = 16
,
ogl = 17
, cc_always = 18
, carry = below
, not_carry = above_equal
,
zero = equal
, eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
,
nz = not_equal
, sign = negative
, not_sign = positive
, mi = 4 << 28
,
pl = 5 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, hs = cs
,
lo = cc
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, unordered = 6
,
ordered = 7
, overflow = 0
, nooverflow = 9
, al = 14 << 28
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, cc_always = 18
, eq = 0 << 28
,
ne = 1 << 28
, ge = 10 << 28
, lt = 11 << 28
, gt = 12 << 28
,
le = 13 << 28
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
CC_NOP = 0x0
, CC_EQ = 0x08
, CC_LT = 0x04
, CC_LE = CC_EQ | CC_LT
,
CC_GT = 0x02
, CC_GE = CC_EQ | CC_GT
, CC_OF = 0x01
, CC_NOF = 0x0E
,
CC_ALWAYS = 0x0F
, unordered = 6
, ordered = 7
, overflow = 0
,
nooverflow = 9
, mask0x0 = 0
, mask0x1 = 1
, mask0x2 = 2
,
mask0x3 = 3
, mask0x4 = 4
, mask0x5 = 5
, mask0x6 = 6
,
mask0x7 = 7
, mask0x8 = 8
, mask0x9 = 9
, mask0xA = 10
,
mask0xB = 11
, mask0xC = 12
, mask0xD = 13
, mask0xE = 14
,
mask0xF = 15
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, below = 2
, above_equal = 3
, equal = 4
,
not_equal = 5
, below_equal = 6
, above = 7
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, carry = below
,
not_carry = above_equal
, zero = equal
, not_zero = not_equal
, sign = negative
,
not_sign = positive
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
} |
| |
| enum | Opcode : uint32_t {
BEQZ = 0x10U << 26
, BNEZ = 0x11U << 26
, BCZ = 0x12U << 26
, JIRL = 0x13U << 26
,
BEQ = 0x16U << 26
, BNE = 0x17U << 26
, BLT = 0x18U << 26
, BGE = 0x19U << 26
,
BLTU = 0x1aU << 26
, BGEU = 0x1bU << 26
, ADDU16I_D = 0x4U << 26
, LU12I_W = 0xaU << 25
,
LU32I_D = 0xbU << 25
, PCADDI = 0xcU << 25
, PCALAU12I = 0xdU << 25
, PCADDU12I = 0xeU << 25
,
PCADDU18I = 0xfU << 25
, LL_W = 0x20U << 24
, SC_W = 0x21U << 24
, LL_D = 0x22U << 24
,
SC_D = 0x23U << 24
, LDPTR_W = 0x24U << 24
, STPTR_W = 0x25U << 24
, LDPTR_D = 0x26U << 24
,
STPTR_D = 0x27U << 24
, BSTR_W = 0x1U << 22
, BSTRINS_W = BSTR_W
, BSTRPICK_W = BSTR_W
,
BSTRINS_D = 0x2U << 22
, BSTRPICK_D = 0x3U << 22
, SLTI = 0x8U << 22
, SLTUI = 0x9U << 22
,
ADDI_W = 0xaU << 22
, ADDI_D = 0xbU << 22
, LU52I_D = 0xcU << 22
, ANDI = 0xdU << 22
,
ORI = 0xeU << 22
, XORI = 0xfU << 22
, LD_B = 0xa0U << 22
, LD_H = 0xa1U << 22
,
LD_W = 0xa2U << 22
, LD_D = 0xa3U << 22
, ST_B = 0xa4U << 22
, ST_H = 0xa5U << 22
,
ST_W = 0xa6U << 22
, ST_D = 0xa7U << 22
, LD_BU = 0xa8U << 22
, LD_HU = 0xa9U << 22
,
LD_WU = 0xaaU << 22
, FLD_S = 0xacU << 22
, FST_S = 0xadU << 22
, FLD_D = 0xaeU << 22
,
FST_D = 0xafU << 22
, FMADD_S = 0x81U << 20
, FMADD_D = 0x82U << 20
, FMSUB_S = 0x85U << 20
,
FMSUB_D = 0x86U << 20
, FNMADD_S = 0x89U << 20
, FNMADD_D = 0x8aU << 20
, FNMSUB_S = 0x8dU << 20
,
FNMSUB_D = 0x8eU << 20
, FCMP_COND_S = 0xc1U << 20
, FCMP_COND_D = 0xc2U << 20
, BYTEPICK_D = 0x3U << 18
,
BYTEPICK_W = 0x2U << 18
, FSEL = 0x340U << 18
, ALSL = 0x1U << 18
, ALSL_W = ALSL
,
ALSL_WU = ALSL
, ALSL_D = 0xbU << 18
, SLLI_W = 0x40U << 16
, SRLI_W = 0x44U << 16
,
SRAI_W = 0x48U << 16
, ROTRI_W = 0x4cU << 16
, SLLI_D = 0x41U << 16
, SRLI_D = 0x45U << 16
,
SRAI_D = 0x49U << 16
, ROTRI_D = 0x4dU << 16
, SLLI = 0x10U << 18
, SRLI = 0x11U << 18
,
SRAI = 0x12U << 18
, ROTRI = 0x13U << 18
, ADD_W = 0x20U << 15
, ADD_D = 0x21U << 15
,
SUB_W = 0x22U << 15
, SUB_D = 0x23U << 15
, SLT = 0x24U << 15
, SLTU = 0x25U << 15
,
MASKEQZ = 0x26U << 15
, MASKNEZ = 0x27U << 15
, NOR = 0x28U << 15
, OR = 0x2aU << 15
,
XOR = 0x2bU << 15
, ANDN = 0x2dU << 15
, SLL_W = 0x2eU << 15
, SRL_W = 0x2fU << 15
,
SRA_W = 0x30U << 15
, SLL_D = 0x31U << 15
, SRL_D = 0x32U << 15
, SRA_D = 0x33U << 15
,
ROTR_W = 0x36U << 15
, ROTR_D = 0x37U << 15
, MUL_W = 0x38U << 15
, MULH_W = 0x39U << 15
,
MULH_WU = 0x3aU << 15
, MUL_D = 0x3bU << 15
, MULH_D = 0x3cU << 15
, MULH_DU = 0x3dU << 15
,
MULW_D_W = 0x3eU << 15
, MULW_D_WU = 0x3fU << 15
, DIV_W = 0x40U << 15
, MOD_W = 0x41U << 15
,
DIV_WU = 0x42U << 15
, MOD_WU = 0x43U << 15
, DIV_D = 0x44U << 15
, MOD_D = 0x45U << 15
,
DIV_DU = 0x46U << 15
, MOD_DU = 0x47U << 15
, BREAK = 1 << 0
, FADD_S = 0x201U << 15
,
FADD_D = 0x202U << 15
, FSUB_S = 0x205U << 15
, FSUB_D = 0x206U << 15
, FMUL_S = 0x209U << 15
,
FMUL_D = 0x20aU << 15
, FDIV_S = 0x20dU << 15
, FDIV_D = 0x20eU << 15
, FMAX_S = 0x211U << 15
,
FMAX_D = 0x212U << 15
, FMIN_S = 0x215U << 15
, FMIN_D = 0x216U << 15
, FMAXA_S = 0x219U << 15
,
FMAXA_D = 0x21aU << 15
, FMINA_S = 0x21dU << 15
, FMINA_D = 0x21eU << 15
, FSCALEB_S = 0x221U << 15
,
FSCALEB_D = 0x222U << 15
, FCOPYSIGN_S = 0x225U << 15
, FCOPYSIGN_D = 0x226U << 15
, LDX_B = 0x7000U << 15
,
LDX_H = 0x7008U << 15
, LDX_W = 0x7010U << 15
, LDX_D = 0x7018U << 15
, STX_B = 0x7020U << 15
,
STX_H = 0x7028U << 15
, STX_W = 0x7030U << 15
, STX_D = 0x7038U << 15
, LDX_BU = 0x7040U << 15
,
LDX_HU = 0x7048U << 15
, LDX_WU = 0x7050U << 15
, FLDX_S = 0x7060U << 15
, FLDX_D = 0x7068U << 15
,
FSTX_S = 0x7070U << 15
, FSTX_D = 0x7078U << 15
, AMSWAP_W = 0x70c0U << 15
, AMSWAP_D = 0x70c1U << 15
,
AMADD_W = 0x70c2U << 15
, AMADD_D = 0x70c3U << 15
, AMAND_W = 0x70c4U << 15
, AMAND_D = 0x70c5U << 15
,
AMOR_W = 0x70c6U << 15
, AMOR_D = 0x70c7U << 15
, AMXOR_W = 0x70c8U << 15
, AMXOR_D = 0x70c9U << 15
,
AMMAX_W = 0x70caU << 15
, AMMAX_D = 0x70cbU << 15
, AMMIN_W = 0x70ccU << 15
, AMMIN_D = 0x70cdU << 15
,
AMMAX_WU = 0x70ceU << 15
, AMMAX_DU = 0x70cfU << 15
, AMMIN_WU = 0x70d0U << 15
, AMMIN_DU = 0x70d1U << 15
,
AMSWAP_DB_W = 0x70d2U << 15
, AMSWAP_DB_D = 0x70d3U << 15
, AMADD_DB_W = 0x70d4U << 15
, AMADD_DB_D = 0x70d5U << 15
,
AMAND_DB_W = 0x70d6U << 15
, AMAND_DB_D = 0x70d7U << 15
, AMOR_DB_W = 0x70d8U << 15
, AMOR_DB_D = 0x70d9U << 15
,
AMXOR_DB_W = 0x70daU << 15
, AMXOR_DB_D = 0x70dbU << 15
, AMMAX_DB_W = 0x70dcU << 15
, AMMAX_DB_D = 0x70ddU << 15
,
AMMIN_DB_W = 0x70deU << 15
, AMMIN_DB_D = 0x70dfU << 15
, AMMAX_DB_WU = 0x70e0U << 15
, AMMAX_DB_DU = 0x70e1U << 15
,
AMMIN_DB_WU = 0x70e2U << 15
, AMMIN_DB_DU = 0x70e3U << 15
, DBAR = 0x70e4U << 15
, IBAR = 0x70e5U << 15
,
CLO_W = 0X4U << 10
, CLZ_W = 0X5U << 10
, CTO_W = 0X6U << 10
, CTZ_W = 0X7U << 10
,
CLO_D = 0X8U << 10
, CLZ_D = 0X9U << 10
, CTO_D = 0XaU << 10
, CTZ_D = 0XbU << 10
,
REVB_2H = 0XcU << 10
, REVB_4H = 0XdU << 10
, REVB_2W = 0XeU << 10
, REVB_D = 0XfU << 10
,
REVH_2W = 0X10U << 10
, REVH_D = 0X11U << 10
, BITREV_4B = 0X12U << 10
, BITREV_8B = 0X13U << 10
,
BITREV_W = 0X14U << 10
, BITREV_D = 0X15U << 10
, EXT_W_H = 0X16U << 10
, EXT_W_B = 0X17U << 10
,
FABS_S = 0X4501U << 10
, FABS_D = 0X4502U << 10
, FNEG_S = 0X4505U << 10
, FNEG_D = 0X4506U << 10
,
FLOGB_S = 0X4509U << 10
, FLOGB_D = 0X450aU << 10
, FCLASS_S = 0X450dU << 10
, FCLASS_D = 0X450eU << 10
,
FSQRT_S = 0X4511U << 10
, FSQRT_D = 0X4512U << 10
, FRECIP_S = 0X4515U << 10
, FRECIP_D = 0X4516U << 10
,
FRSQRT_S = 0X4519U << 10
, FRSQRT_D = 0X451aU << 10
, FMOV_S = 0X4525U << 10
, FMOV_D = 0X4526U << 10
,
MOVGR2FR_W = 0X4529U << 10
, MOVGR2FR_D = 0X452aU << 10
, MOVGR2FRH_W = 0X452bU << 10
, MOVFR2GR_S = 0X452dU << 10
,
MOVFR2GR_D = 0X452eU << 10
, MOVFRH2GR_S = 0X452fU << 10
, MOVGR2FCSR = 0X4530U << 10
, MOVFCSR2GR = 0X4532U << 10
,
MOVFR2CF = 0X4534U << 10
, MOVGR2CF = 0X4536U << 10
, FCVT_S_D = 0x4646U << 10
, FCVT_D_S = 0x4649U << 10
,
FTINTRM_W_S = 0x4681U << 10
, FTINTRM_W_D = 0x4682U << 10
, FTINTRM_L_S = 0x4689U << 10
, FTINTRM_L_D = 0x468aU << 10
,
FTINTRP_W_S = 0x4691U << 10
, FTINTRP_W_D = 0x4692U << 10
, FTINTRP_L_S = 0x4699U << 10
, FTINTRP_L_D = 0x469aU << 10
,
FTINTRZ_W_S = 0x46a1U << 10
, FTINTRZ_W_D = 0x46a2U << 10
, FTINTRZ_L_S = 0x46a9U << 10
, FTINTRZ_L_D = 0x46aaU << 10
,
FTINTRNE_W_S = 0x46b1U << 10
, FTINTRNE_W_D = 0x46b2U << 10
, FTINTRNE_L_S = 0x46b9U << 10
, FTINTRNE_L_D = 0x46baU << 10
,
FTINT_W_S = 0x46c1U << 10
, FTINT_W_D = 0x46c2U << 10
, FTINT_L_S = 0x46c9U << 10
, FTINT_L_D = 0x46caU << 10
,
FFINT_S_W = 0x4744U << 10
, FFINT_S_L = 0x4746U << 10
, FFINT_D_W = 0x4748U << 10
, FFINT_D_L = 0x474aU << 10
,
FRINT_S = 0x4791U << 10
, FRINT_D = 0x4792U << 10
, MOVCF2FR = 0x4535U << 10
, MOVCF2GR = 0x4537U << 10
,
EXTP = 0x4000000
, EXT0 = 0x10000000
, EXT1 = 0x4C000000
, EXT2 = 0x7C000000
,
EXT3 = 0xEC000000
, EXT4 = 0xFC000000
, EXT5 = 0x78000000
, EXT6 = 0xF0000000
,
DUMY = 0xE352
} |
| |
| enum | {
B1 = 1 << 1
, B2 = 1 << 2
, B3 = 1 << 3
, B11 = 1 << 11
,
kOff16Mask = (1 << 16) - 1
, kImm22Mask = (1 << 22) - 1
, kBOfieldMask = 0x1f << 21
, kExt1OpcodeMask = 0x3ff << 1
,
kExt2OpcodeMask = 0x3ff << 1
, kExt2OpcodeVariant2Mask = 0x1ff << 2
, kExt5OpcodeMask = 0x3 << 2
, kBOMask = 0x1f << 21
,
kBIMask = 0x1F << 16
, kBDMask = 0x14 << 2
, kAAMask = 0x01 << 1
, kLKMask = 0x01
,
kRCMask = 0x01
, kTOMask = 0x1f << 21
} |
| |
| enum | OEBit { SetOE = 1 << 10
, LeaveOE = 0 << 10
, SetOE = 1 << 10
, LeaveOE = 0 << 10
} |
| |
| enum | RCBit { SetRC = 1
, LeaveRC = 0
, SetRC = 1
, LeaveRC = 0
} |
| |
| enum | EHBit { SetEH = 1
, LeaveEH = 0
} |
| |
| enum | LKBit { SetLK = 1
, LeaveLK = 0
, SetLK = 1
, LeaveLK = 0
} |
| |
| enum | PRBit { SetPR = 1
, LeavePR = 0
} |
| |
| enum | BOfield {
DCBNZF = 0 << 21
, DCBEZF = 2 << 21
, BF = 4 << 21
, DCBNZT = 8 << 21
,
DCBEZT = 10 << 21
, BT = 12 << 21
, DCBNZ = 16 << 21
, DCBEZ = 18 << 21
,
BA = 20 << 21
, DCBNZF = 0 << 21
, DCBEZF = 2 << 21
, BF = 4 << 21
,
DCBNZT = 8 << 21
, DCBEZT = 10 << 21
, BT = 12 << 21
, DCBNZ = 16 << 21
,
DCBEZ = 18 << 21
, BA = 20 << 21
} |
| |
| enum | CRBit {
CR_LT = 0
, CR_GT = 1
, CR_EQ = 2
, CR_SO = 3
,
CR_FU = 3
, CR_LT = 0
, CR_GT = 1
, CR_EQ = 2
,
CR_SO = 3
, CR_FU = 3
} |
| |
| enum | FPSCRBit { VXSOFT = 21
, VXSQRT = 22
, VXCVI = 23
} |
| |
| enum | SoftwareInterruptCodes { call_rt_redirected = 0x7fff
, call_rt_redirected = 0x7fff
, call_rt_redirected = 0x7fff
} |
| |
| enum | FPRoundingMode {
CURRENT_ROUNDING_MODE = 0
, ROUND_TO_NEAREST_AWAY_FROM_0 = 1
, ROUND_TO_NEAREST_TO_EVEN = 4
, ROUND_TOWARD_0 = 5
,
ROUND_TOWARD_POS_INF = 6
, ROUND_TOWARD_NEG_INF = 7
} |
| |
| enum | CheckForInexactConversion {
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
,
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
,
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
} |
| |
| enum class | StackLimitKind {
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
,
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
,
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
} |
| |
| enum | LinkRegisterStatus {
kLRHasNotBeenSaved
, kLRHasBeenSaved
, kLRHasNotBeenSaved
, kLRHasBeenSaved
,
kLRHasNotBeenSaved
, kLRHasBeenSaved
, kLRHasNotBeenSaved
, kLRHasBeenSaved
} |
| |
| enum | RegisterCode {
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
} |
| |
| enum | Simd128RegisterCode { kSimd128AfterLast
, kSimd128AfterLast
} |
| |
| enum | DoubleRegisterCode {
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
,
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
} |
| |
| enum | CRegisterCode { kCAfterLast
, kCAfterLast
, kCAfterLast
} |
| |
| enum | ICacheFlushMode { FLUSH_ICACHE_IF_NEEDED
, SKIP_ICACHE_FLUSH
} |
| |
| enum | Vlmul { kVlInvalid
} |
| |
| enum | VSew { kVsInvalid
} |
| |
| enum | SoftwareInterruptCodes { call_rt_redirected = 0x7fff
, call_rt_redirected = 0x7fff
, call_rt_redirected = 0x7fff
} |
| |
| enum | DebugParameters : uint32_t {
NO_PARAM = 0
, BREAK = 1 << 0
, LOG_DISASM = 1 << 1
, LOG_REGS = 1 << 2
,
LOG_VREGS = 1 << 3
, LOG_SYS_REGS = 1 << 4
, LOG_WRITE = 1 << 5
, LOG_NONE = 0
,
LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYS_REGS
, LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE
, TRACE_ENABLE = 1 << 6
, TRACE_DISABLE = 2 << 6
,
TRACE_OVERRIDE = 3 << 6
, NO_PARAM = 0
, BREAK = 1 << 0
, LOG_TRACE = 1 << 1
,
LOG_REGS = 1 << 2
, LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE
, TRACE_ENABLE = 1 << 6
, TRACE_DISABLE = 2 << 6
} |
| |
| enum | Condition : int {
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, cs = 2 << 28
,
cc = 3 << 28
, mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
,
vc = 7 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
kSpecialCondition = 15 << 28
, kNumberOfConditions = 16
, hs = cs
, lo = cc
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, eq = 0 << 28
, ne = 1 << 28
,
hs = cs
, cs = 2 << 28
, lo = cc
, cc = 3 << 28
,
mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
, vc = 7 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
, nv = 15
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
below = 2
, above_equal = 3
, equal = 4
, not_equal = 5
,
below_equal = 6
, above = 7
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, carry = below
, not_carry = above_equal
,
zero = equal
, not_zero = not_equal
, sign = negative
, not_sign = positive
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, ueq = 16
, ogl = 17
,
cc_always = 18
, carry = below
, not_carry = above_equal
, zero = equal
,
eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
, nz = not_equal
,
sign = negative
, not_sign = positive
, mi = 4 << 28
, pl = 5 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, hs = cs
, lo = cc
,
al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
, ule = Uless_equal
,
ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
,
Ugreater = 5
, equal = 4
, not_equal = 5
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, ueq = 16
,
ogl = 17
, cc_always = 18
, carry = below
, not_carry = above_equal
,
zero = equal
, eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
,
nz = not_equal
, sign = negative
, not_sign = positive
, mi = 4 << 28
,
pl = 5 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, hs = cs
,
lo = cc
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, unordered = 6
,
ordered = 7
, overflow = 0
, nooverflow = 9
, al = 14 << 28
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, cc_always = 18
, eq = 0 << 28
,
ne = 1 << 28
, ge = 10 << 28
, lt = 11 << 28
, gt = 12 << 28
,
le = 13 << 28
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
CC_NOP = 0x0
, CC_EQ = 0x08
, CC_LT = 0x04
, CC_LE = CC_EQ | CC_LT
,
CC_GT = 0x02
, CC_GE = CC_EQ | CC_GT
, CC_OF = 0x01
, CC_NOF = 0x0E
,
CC_ALWAYS = 0x0F
, unordered = 6
, ordered = 7
, overflow = 0
,
nooverflow = 9
, mask0x0 = 0
, mask0x1 = 1
, mask0x2 = 2
,
mask0x3 = 3
, mask0x4 = 4
, mask0x5 = 5
, mask0x6 = 6
,
mask0x7 = 7
, mask0x8 = 8
, mask0x9 = 9
, mask0xA = 10
,
mask0xB = 11
, mask0xC = 12
, mask0xD = 13
, mask0xE = 14
,
mask0xF = 15
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, below = 2
, above_equal = 3
, equal = 4
,
not_equal = 5
, below_equal = 6
, above = 7
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, carry = below
,
not_carry = above_equal
, zero = equal
, not_zero = not_equal
, sign = negative
,
not_sign = positive
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
} |
| |
| enum | FPUCondition {
kNoFPUCondition = -1
, CAF = 0x00
, SAF = 0x01
, CLT = 0x02
,
CEQ = 0x04
, SEQ = 0x05
, CLE = 0x06
, SLE = 0x07
,
CUN = 0x08
, SUN = 0x09
, CULT = 0x0a
, SULT = 0x0b
,
CUEQ = 0x0c
, SUEQ = 0x0d
, CULE = 0x0e
, SULE = 0x0f
,
CNE = 0x10
, SNE = 0x11
, COR = 0x14
, SOR = 0x15
,
CUNE = 0x18
, SUNE = 0x19
, kNoFPUCondition = -1
, F = 0x00
,
UN = 0x01
, EQ = 0x02
, UEQ = 0x03
, OLT = 0x04
,
LT = 0x04
, ULT = 0x05
, OLE = 0x06
, LE = 0x06
,
ULE = 0x07
, ORD = 0x11
, UNE = 0x12
, NE = 0x13
,
kNoFPUCondition = -1
, EQ = 0x02
, NE = 0x13
, LT = 0x04
,
GE = 0x05
, LE = 0x06
, GT = 0x07
} |
| |
| enum | CheckForInexactConversion {
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
,
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
,
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
} |
| |
| enum class | MaxMinKind : int {
kMin = 0
, kMax = 1
, kMin = 0
, kMax = 1
,
kMin = 0
, kMax = 1
} |
| |
| enum | ControlStatusReg {
csr_fflags = 0x001
, csr_frm = 0x002
, csr_fcsr = 0x003
, csr_cycle = 0xc00
,
csr_time = 0xc01
, csr_instret = 0xc02
, csr_cycleh = 0xc80
, csr_timeh = 0xc81
,
csr_instreth = 0xc82
} |
| |
| enum | FFlagsMask {
kInvalidOperation = 0b10000
, kDivideByZero = 0b1000
, kFPUOverflow = 0b100
, kUnderflow = 0b10
,
kInexact = 0b1
} |
| |
| enum | FPURoundingMode {
mode_round = RN
, mode_ceil = RP
, mode_floor = RM
, mode_trunc = RZ
,
mode_round = RN
, mode_ceil = RP
, mode_floor = RM
, mode_trunc = RZ
,
RNE = 0b000
, RTZ = 0b001
, RDN = 0b010
, RUP = 0b011
,
RMM = 0b100
, DYN = 0b111
} |
| |
| enum | MemoryOdering {
PSI = 0b1000
, PSO = 0b0100
, PSR = 0b0010
, PSW = 0b0001
,
PSIORW = PSI | PSO | PSR | PSW
} |
| |
| enum | FClassFlag {
kNegativeInfinity = 1
, kNegativeNormalNumber = 1 << 1
, kNegativeSubnormalNumber = 1 << 2
, kNegativeZero = 1 << 3
,
kPositiveZero = 1 << 4
, kPositiveSubnormalNumber = 1 << 5
, kPositiveNormalNumber = 1 << 6
, kPositiveInfinity = 1 << 7
,
kSignalingNaN = 1 << 8
, kQuietNaN = 1 << 9
} |
| |
| enum | TailAgnosticType { ta = 0x1
, tu = 0x0
} |
| |
| enum | MaskAgnosticType { ma = 0x1
, mu = 0x0
} |
| |
| enum | MaskType { Mask = 0x0
, NoMask = 0x1
} |
| |
| enum | Hint { no_hint = 0
, no_hint = 0
, no_hint = 0
, no_hint = 0
} |
| |
| enum | BaseOpcode : uint32_t {
LOAD = 0b0000011
, LOAD_FP = 0b0000111
, MISC_MEM = 0b0001111
, OP_IMM = 0b0010011
,
AUIPC = ((3U << 3) + 6)
, OP_IMM_32 = 0b0011011
, STORE = 0b0100011
, STORE_FP = 0b0100111
,
AMO = 0b0101111
, OP = 0b0110011
, OP_32 = 0b0111011
, NMSUB = 0b1001011
,
NMADD = 0b1001111
, OP_FP = 0b1010011
, BRANCH = 0b1100011
, JALR = ((1U << 3) + 1)
,
SYSTEM = 0b1110011
, OP_V = 0b1010111
, C0 = 0b00
, C1 = 0b01
,
C2 = 0b10
, FUNCT2_0 = 0b00
, FUNCT2_1 = 0b01
, FUNCT2_2 = 0b10
,
FUNCT2_3 = 0b11
} |
| |
| enum | LiFlags {
OPTIMIZE_SIZE = 0
, CONSTANT_SIZE = 1
, ADDRESS_LOAD = 2
, OPTIMIZE_SIZE = 0
,
CONSTANT_SIZE = 1
, ADDRESS_LOAD = 2
, OPTIMIZE_SIZE = 0
, CONSTANT_SIZE = 1
,
ADDRESS_LOAD = 2
} |
| |
| enum | RAStatus {
kRAHasNotBeenSaved
, kRAHasBeenSaved
, kRAHasNotBeenSaved
, kRAHasBeenSaved
,
kRAHasNotBeenSaved
, kRAHasBeenSaved
} |
| |
| enum | StackLimitKind {
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
,
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
,
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
} |
| |
| enum | RegisterCode {
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
} |
| |
| enum | DoubleRegisterCode {
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
,
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
} |
| |
| enum | VRegisterCode { kVRAfterLast
} |
| |
| enum | Condition : int {
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, cs = 2 << 28
,
cc = 3 << 28
, mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
,
vc = 7 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
kSpecialCondition = 15 << 28
, kNumberOfConditions = 16
, hs = cs
, lo = cc
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, eq = 0 << 28
, ne = 1 << 28
,
hs = cs
, cs = 2 << 28
, lo = cc
, cc = 3 << 28
,
mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
, vc = 7 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
, nv = 15
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
below = 2
, above_equal = 3
, equal = 4
, not_equal = 5
,
below_equal = 6
, above = 7
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, carry = below
, not_carry = above_equal
,
zero = equal
, not_zero = not_equal
, sign = negative
, not_sign = positive
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, ueq = 16
, ogl = 17
,
cc_always = 18
, carry = below
, not_carry = above_equal
, zero = equal
,
eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
, nz = not_equal
,
sign = negative
, not_sign = positive
, mi = 4 << 28
, pl = 5 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, hs = cs
, lo = cc
,
al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
, ule = Uless_equal
,
ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
,
Ugreater = 5
, equal = 4
, not_equal = 5
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, ueq = 16
,
ogl = 17
, cc_always = 18
, carry = below
, not_carry = above_equal
,
zero = equal
, eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
,
nz = not_equal
, sign = negative
, not_sign = positive
, mi = 4 << 28
,
pl = 5 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, hs = cs
,
lo = cc
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, unordered = 6
,
ordered = 7
, overflow = 0
, nooverflow = 9
, al = 14 << 28
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, cc_always = 18
, eq = 0 << 28
,
ne = 1 << 28
, ge = 10 << 28
, lt = 11 << 28
, gt = 12 << 28
,
le = 13 << 28
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
CC_NOP = 0x0
, CC_EQ = 0x08
, CC_LT = 0x04
, CC_LE = CC_EQ | CC_LT
,
CC_GT = 0x02
, CC_GE = CC_EQ | CC_GT
, CC_OF = 0x01
, CC_NOF = 0x0E
,
CC_ALWAYS = 0x0F
, unordered = 6
, ordered = 7
, overflow = 0
,
nooverflow = 9
, mask0x0 = 0
, mask0x1 = 1
, mask0x2 = 2
,
mask0x3 = 3
, mask0x4 = 4
, mask0x5 = 5
, mask0x6 = 6
,
mask0x7 = 7
, mask0x8 = 8
, mask0x9 = 9
, mask0xA = 10
,
mask0xB = 11
, mask0xC = 12
, mask0xD = 13
, mask0xE = 14
,
mask0xF = 15
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, below = 2
, above_equal = 3
, equal = 4
,
not_equal = 5
, below_equal = 6
, above = 7
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, carry = below
,
not_carry = above_equal
, zero = equal
, not_zero = not_equal
, sign = negative
,
not_sign = positive
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
} |
| |
| enum | Opcode {
BEQZ = 0x10U << 26
, BNEZ = 0x11U << 26
, BCZ = 0x12U << 26
, JIRL = 0x13U << 26
,
BEQ = 0x16U << 26
, BNE = 0x17U << 26
, BLT = 0x18U << 26
, BGE = 0x19U << 26
,
BLTU = 0x1aU << 26
, BGEU = 0x1bU << 26
, ADDU16I_D = 0x4U << 26
, LU12I_W = 0xaU << 25
,
LU32I_D = 0xbU << 25
, PCADDI = 0xcU << 25
, PCALAU12I = 0xdU << 25
, PCADDU12I = 0xeU << 25
,
PCADDU18I = 0xfU << 25
, LL_W = 0x20U << 24
, SC_W = 0x21U << 24
, LL_D = 0x22U << 24
,
SC_D = 0x23U << 24
, LDPTR_W = 0x24U << 24
, STPTR_W = 0x25U << 24
, LDPTR_D = 0x26U << 24
,
STPTR_D = 0x27U << 24
, BSTR_W = 0x1U << 22
, BSTRINS_W = BSTR_W
, BSTRPICK_W = BSTR_W
,
BSTRINS_D = 0x2U << 22
, BSTRPICK_D = 0x3U << 22
, SLTI = 0x8U << 22
, SLTUI = 0x9U << 22
,
ADDI_W = 0xaU << 22
, ADDI_D = 0xbU << 22
, LU52I_D = 0xcU << 22
, ANDI = 0xdU << 22
,
ORI = 0xeU << 22
, XORI = 0xfU << 22
, LD_B = 0xa0U << 22
, LD_H = 0xa1U << 22
,
LD_W = 0xa2U << 22
, LD_D = 0xa3U << 22
, ST_B = 0xa4U << 22
, ST_H = 0xa5U << 22
,
ST_W = 0xa6U << 22
, ST_D = 0xa7U << 22
, LD_BU = 0xa8U << 22
, LD_HU = 0xa9U << 22
,
LD_WU = 0xaaU << 22
, FLD_S = 0xacU << 22
, FST_S = 0xadU << 22
, FLD_D = 0xaeU << 22
,
FST_D = 0xafU << 22
, FMADD_S = 0x81U << 20
, FMADD_D = 0x82U << 20
, FMSUB_S = 0x85U << 20
,
FMSUB_D = 0x86U << 20
, FNMADD_S = 0x89U << 20
, FNMADD_D = 0x8aU << 20
, FNMSUB_S = 0x8dU << 20
,
FNMSUB_D = 0x8eU << 20
, FCMP_COND_S = 0xc1U << 20
, FCMP_COND_D = 0xc2U << 20
, BYTEPICK_D = 0x3U << 18
,
BYTEPICK_W = 0x2U << 18
, FSEL = 0x340U << 18
, ALSL = 0x1U << 18
, ALSL_W = ALSL
,
ALSL_WU = ALSL
, ALSL_D = 0xbU << 18
, SLLI_W = 0x40U << 16
, SRLI_W = 0x44U << 16
,
SRAI_W = 0x48U << 16
, ROTRI_W = 0x4cU << 16
, SLLI_D = 0x41U << 16
, SRLI_D = 0x45U << 16
,
SRAI_D = 0x49U << 16
, ROTRI_D = 0x4dU << 16
, SLLI = 0x10U << 18
, SRLI = 0x11U << 18
,
SRAI = 0x12U << 18
, ROTRI = 0x13U << 18
, ADD_W = 0x20U << 15
, ADD_D = 0x21U << 15
,
SUB_W = 0x22U << 15
, SUB_D = 0x23U << 15
, SLT = 0x24U << 15
, SLTU = 0x25U << 15
,
MASKEQZ = 0x26U << 15
, MASKNEZ = 0x27U << 15
, NOR = 0x28U << 15
, OR = 0x2aU << 15
,
XOR = 0x2bU << 15
, ANDN = 0x2dU << 15
, SLL_W = 0x2eU << 15
, SRL_W = 0x2fU << 15
,
SRA_W = 0x30U << 15
, SLL_D = 0x31U << 15
, SRL_D = 0x32U << 15
, SRA_D = 0x33U << 15
,
ROTR_W = 0x36U << 15
, ROTR_D = 0x37U << 15
, MUL_W = 0x38U << 15
, MULH_W = 0x39U << 15
,
MULH_WU = 0x3aU << 15
, MUL_D = 0x3bU << 15
, MULH_D = 0x3cU << 15
, MULH_DU = 0x3dU << 15
,
MULW_D_W = 0x3eU << 15
, MULW_D_WU = 0x3fU << 15
, DIV_W = 0x40U << 15
, MOD_W = 0x41U << 15
,
DIV_WU = 0x42U << 15
, MOD_WU = 0x43U << 15
, DIV_D = 0x44U << 15
, MOD_D = 0x45U << 15
,
DIV_DU = 0x46U << 15
, MOD_DU = 0x47U << 15
, BREAK = 1 << 0
, FADD_S = 0x201U << 15
,
FADD_D = 0x202U << 15
, FSUB_S = 0x205U << 15
, FSUB_D = 0x206U << 15
, FMUL_S = 0x209U << 15
,
FMUL_D = 0x20aU << 15
, FDIV_S = 0x20dU << 15
, FDIV_D = 0x20eU << 15
, FMAX_S = 0x211U << 15
,
FMAX_D = 0x212U << 15
, FMIN_S = 0x215U << 15
, FMIN_D = 0x216U << 15
, FMAXA_S = 0x219U << 15
,
FMAXA_D = 0x21aU << 15
, FMINA_S = 0x21dU << 15
, FMINA_D = 0x21eU << 15
, FSCALEB_S = 0x221U << 15
,
FSCALEB_D = 0x222U << 15
, FCOPYSIGN_S = 0x225U << 15
, FCOPYSIGN_D = 0x226U << 15
, LDX_B = 0x7000U << 15
,
LDX_H = 0x7008U << 15
, LDX_W = 0x7010U << 15
, LDX_D = 0x7018U << 15
, STX_B = 0x7020U << 15
,
STX_H = 0x7028U << 15
, STX_W = 0x7030U << 15
, STX_D = 0x7038U << 15
, LDX_BU = 0x7040U << 15
,
LDX_HU = 0x7048U << 15
, LDX_WU = 0x7050U << 15
, FLDX_S = 0x7060U << 15
, FLDX_D = 0x7068U << 15
,
FSTX_S = 0x7070U << 15
, FSTX_D = 0x7078U << 15
, AMSWAP_W = 0x70c0U << 15
, AMSWAP_D = 0x70c1U << 15
,
AMADD_W = 0x70c2U << 15
, AMADD_D = 0x70c3U << 15
, AMAND_W = 0x70c4U << 15
, AMAND_D = 0x70c5U << 15
,
AMOR_W = 0x70c6U << 15
, AMOR_D = 0x70c7U << 15
, AMXOR_W = 0x70c8U << 15
, AMXOR_D = 0x70c9U << 15
,
AMMAX_W = 0x70caU << 15
, AMMAX_D = 0x70cbU << 15
, AMMIN_W = 0x70ccU << 15
, AMMIN_D = 0x70cdU << 15
,
AMMAX_WU = 0x70ceU << 15
, AMMAX_DU = 0x70cfU << 15
, AMMIN_WU = 0x70d0U << 15
, AMMIN_DU = 0x70d1U << 15
,
AMSWAP_DB_W = 0x70d2U << 15
, AMSWAP_DB_D = 0x70d3U << 15
, AMADD_DB_W = 0x70d4U << 15
, AMADD_DB_D = 0x70d5U << 15
,
AMAND_DB_W = 0x70d6U << 15
, AMAND_DB_D = 0x70d7U << 15
, AMOR_DB_W = 0x70d8U << 15
, AMOR_DB_D = 0x70d9U << 15
,
AMXOR_DB_W = 0x70daU << 15
, AMXOR_DB_D = 0x70dbU << 15
, AMMAX_DB_W = 0x70dcU << 15
, AMMAX_DB_D = 0x70ddU << 15
,
AMMIN_DB_W = 0x70deU << 15
, AMMIN_DB_D = 0x70dfU << 15
, AMMAX_DB_WU = 0x70e0U << 15
, AMMAX_DB_DU = 0x70e1U << 15
,
AMMIN_DB_WU = 0x70e2U << 15
, AMMIN_DB_DU = 0x70e3U << 15
, DBAR = 0x70e4U << 15
, IBAR = 0x70e5U << 15
,
CLO_W = 0X4U << 10
, CLZ_W = 0X5U << 10
, CTO_W = 0X6U << 10
, CTZ_W = 0X7U << 10
,
CLO_D = 0X8U << 10
, CLZ_D = 0X9U << 10
, CTO_D = 0XaU << 10
, CTZ_D = 0XbU << 10
,
REVB_2H = 0XcU << 10
, REVB_4H = 0XdU << 10
, REVB_2W = 0XeU << 10
, REVB_D = 0XfU << 10
,
REVH_2W = 0X10U << 10
, REVH_D = 0X11U << 10
, BITREV_4B = 0X12U << 10
, BITREV_8B = 0X13U << 10
,
BITREV_W = 0X14U << 10
, BITREV_D = 0X15U << 10
, EXT_W_H = 0X16U << 10
, EXT_W_B = 0X17U << 10
,
FABS_S = 0X4501U << 10
, FABS_D = 0X4502U << 10
, FNEG_S = 0X4505U << 10
, FNEG_D = 0X4506U << 10
,
FLOGB_S = 0X4509U << 10
, FLOGB_D = 0X450aU << 10
, FCLASS_S = 0X450dU << 10
, FCLASS_D = 0X450eU << 10
,
FSQRT_S = 0X4511U << 10
, FSQRT_D = 0X4512U << 10
, FRECIP_S = 0X4515U << 10
, FRECIP_D = 0X4516U << 10
,
FRSQRT_S = 0X4519U << 10
, FRSQRT_D = 0X451aU << 10
, FMOV_S = 0X4525U << 10
, FMOV_D = 0X4526U << 10
,
MOVGR2FR_W = 0X4529U << 10
, MOVGR2FR_D = 0X452aU << 10
, MOVGR2FRH_W = 0X452bU << 10
, MOVFR2GR_S = 0X452dU << 10
,
MOVFR2GR_D = 0X452eU << 10
, MOVFRH2GR_S = 0X452fU << 10
, MOVGR2FCSR = 0X4530U << 10
, MOVFCSR2GR = 0X4532U << 10
,
MOVFR2CF = 0X4534U << 10
, MOVGR2CF = 0X4536U << 10
, FCVT_S_D = 0x4646U << 10
, FCVT_D_S = 0x4649U << 10
,
FTINTRM_W_S = 0x4681U << 10
, FTINTRM_W_D = 0x4682U << 10
, FTINTRM_L_S = 0x4689U << 10
, FTINTRM_L_D = 0x468aU << 10
,
FTINTRP_W_S = 0x4691U << 10
, FTINTRP_W_D = 0x4692U << 10
, FTINTRP_L_S = 0x4699U << 10
, FTINTRP_L_D = 0x469aU << 10
,
FTINTRZ_W_S = 0x46a1U << 10
, FTINTRZ_W_D = 0x46a2U << 10
, FTINTRZ_L_S = 0x46a9U << 10
, FTINTRZ_L_D = 0x46aaU << 10
,
FTINTRNE_W_S = 0x46b1U << 10
, FTINTRNE_W_D = 0x46b2U << 10
, FTINTRNE_L_S = 0x46b9U << 10
, FTINTRNE_L_D = 0x46baU << 10
,
FTINT_W_S = 0x46c1U << 10
, FTINT_W_D = 0x46c2U << 10
, FTINT_L_S = 0x46c9U << 10
, FTINT_L_D = 0x46caU << 10
,
FFINT_S_W = 0x4744U << 10
, FFINT_S_L = 0x4746U << 10
, FFINT_D_W = 0x4748U << 10
, FFINT_D_L = 0x474aU << 10
,
FRINT_S = 0x4791U << 10
, FRINT_D = 0x4792U << 10
, MOVCF2FR = 0x4535U << 10
, MOVCF2GR = 0x4537U << 10
,
EXTP = 0x4000000
, EXT0 = 0x10000000
, EXT1 = 0x4C000000
, EXT2 = 0x7C000000
,
EXT3 = 0xEC000000
, EXT4 = 0xFC000000
, EXT5 = 0x78000000
, EXT6 = 0xF0000000
,
DUMY = 0xE352
} |
| |
| enum | {
B1 = 1 << 1
, B11 = 1 << 11
, kOff16Mask = (1 << 16) - 1
, kBOfieldMask = 0x1f << 21
,
kExt2OpcodeMask = 0x3ff << 1
, kExt5OpcodeMask = 0x3 << 2
, kBIMask = 0x1F << 16
, kBDMask = 0x14 << 2
,
kAAMask = 0x01 << 1
, kLKMask = 0x01
, kRCMask = 0x01
, kTOMask = 0x1f << 21
} |
| |
| enum | OEBit { SetOE = 1 << 10
, LeaveOE = 0 << 10
, SetOE = 1 << 10
, LeaveOE = 0 << 10
} |
| |
| enum | RCBit { SetRC = 1
, LeaveRC = 0
, SetRC = 1
, LeaveRC = 0
} |
| |
| enum | LKBit { SetLK = 1
, LeaveLK = 0
, SetLK = 1
, LeaveLK = 0
} |
| |
| enum | BOfield {
DCBNZF = 0 << 21
, DCBEZF = 2 << 21
, BF = 4 << 21
, DCBNZT = 8 << 21
,
DCBEZT = 10 << 21
, BT = 12 << 21
, DCBNZ = 16 << 21
, DCBEZ = 18 << 21
,
BA = 20 << 21
, DCBNZF = 0 << 21
, DCBEZF = 2 << 21
, BF = 4 << 21
,
DCBNZT = 8 << 21
, DCBEZT = 10 << 21
, BT = 12 << 21
, DCBNZ = 16 << 21
,
DCBEZ = 18 << 21
, BA = 20 << 21
} |
| |
| enum | CRBit {
CR_LT = 0
, CR_GT = 1
, CR_EQ = 2
, CR_SO = 3
,
CR_FU = 3
, CR_LT = 0
, CR_GT = 1
, CR_EQ = 2
,
CR_SO = 3
, CR_FU = 3
} |
| |
| enum | SoftwareInterruptCodes { call_rt_redirected = 0x7fff
, call_rt_redirected = 0x7fff
, call_rt_redirected = 0x7fff
} |
| |
| enum | FPRoundingMode {
CURRENT_ROUNDING_MODE = 0
, ROUND_TO_NEAREST_AWAY_FROM_0 = 1
, ROUND_TO_NEAREST_TO_EVEN = 4
, ROUND_TOWARD_0 = 5
,
ROUND_TOWARD_POS_INF = 6
, ROUND_TOWARD_NEG_INF = 7
} |
| |
| enum | CheckForInexactConversion {
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
,
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
,
kCheckForInexactConversion
, kDontCheckForInexactConversion
, kCheckForInexactConversion
, kDontCheckForInexactConversion
} |
| |
| enum class | StackLimitKind {
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
,
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
,
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
} |
| |
| enum | LinkRegisterStatus {
kLRHasNotBeenSaved
, kLRHasBeenSaved
, kLRHasNotBeenSaved
, kLRHasBeenSaved
,
kLRHasNotBeenSaved
, kLRHasBeenSaved
, kLRHasNotBeenSaved
, kLRHasBeenSaved
} |
| |
| enum | RegisterCode {
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
} |
| |
| enum | DoubleRegisterCode {
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
,
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
} |
| |
| enum | CRegisterCode { kCAfterLast
, kCAfterLast
, kCAfterLast
} |
| |
| enum | IsKnownTaggedPointer { kNo
, kYes
} |
| |
| enum | Condition : int {
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, cs = 2 << 28
,
cc = 3 << 28
, mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
,
vc = 7 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
kSpecialCondition = 15 << 28
, kNumberOfConditions = 16
, hs = cs
, lo = cc
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, eq = 0 << 28
, ne = 1 << 28
,
hs = cs
, cs = 2 << 28
, lo = cc
, cc = 3 << 28
,
mi = 4 << 28
, pl = 5 << 28
, vs = 6 << 28
, vc = 7 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
, nv = 15
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
below = 2
, above_equal = 3
, equal = 4
, not_equal = 5
,
below_equal = 6
, above = 7
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, carry = below
, not_carry = above_equal
,
zero = equal
, not_zero = not_equal
, sign = negative
, not_sign = positive
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, negative = 8
, positive = 9
,
parity_even = 10
, parity_odd = 11
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, ueq = 16
, ogl = 17
,
cc_always = 18
, carry = below
, not_carry = above_equal
, zero = equal
,
eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
, nz = not_equal
,
sign = negative
, not_sign = positive
, mi = 4 << 28
, pl = 5 << 28
,
hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
, lt = 11 << 28
,
gt = 12 << 28
, le = 13 << 28
, hs = cs
, lo = cc
,
al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
, ule = Uless_equal
,
ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
,
Ugreater = 5
, equal = 4
, not_equal = 5
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, ueq = 16
,
ogl = 17
, cc_always = 18
, carry = below
, not_carry = above_equal
,
zero = equal
, eq = 0 << 28
, not_zero = not_equal
, ne = 1 << 28
,
nz = not_equal
, sign = negative
, not_sign = positive
, mi = 4 << 28
,
pl = 5 << 28
, hi = 8 << 28
, ls = 9 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, hs = cs
,
lo = cc
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, unordered = 6
,
ordered = 7
, overflow = 0
, nooverflow = 9
, al = 14 << 28
,
kEqual = eq
, kNotEqual = ne
, kLessThan = lt
, kGreaterThan = gt
,
kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
,
kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
, kNoOverflow = vc
,
kZero = eq
, kNotZero = ne
, overflow = 0
, no_overflow = 1
,
Uless = 2
, Ugreater_equal = 3
, Uless_equal = 4
, Ugreater = 5
,
equal = 4
, not_equal = 5
, less = 12
, greater_equal = 13
,
less_equal = 14
, greater = 15
, cc_always = 18
, eq = 0 << 28
,
ne = 1 << 28
, ge = 10 << 28
, lt = 11 << 28
, gt = 12 << 28
,
le = 13 << 28
, al = 14 << 28
, ult = Uless
, uge = Ugreater_equal
,
ule = Uless_equal
, ugt = Ugreater
, kEqual = eq
, kNotEqual = ne
,
kLessThan = lt
, kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
,
kUnsignedLessThan = lo
, kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
,
kOverflow = vs
, kNoOverflow = vc
, kZero = eq
, kNotZero = ne
,
kNoCondition = -1
, eq = 0 << 28
, ne = 1 << 28
, ge = 10 << 28
,
lt = 11 << 28
, gt = 12 << 28
, le = 13 << 28
, al = 14 << 28
,
CC_NOP = 0x0
, CC_EQ = 0x08
, CC_LT = 0x04
, CC_LE = CC_EQ | CC_LT
,
CC_GT = 0x02
, CC_GE = CC_EQ | CC_GT
, CC_OF = 0x01
, CC_NOF = 0x0E
,
CC_ALWAYS = 0x0F
, unordered = 6
, ordered = 7
, overflow = 0
,
nooverflow = 9
, mask0x0 = 0
, mask0x1 = 1
, mask0x2 = 2
,
mask0x3 = 3
, mask0x4 = 4
, mask0x5 = 5
, mask0x6 = 6
,
mask0x7 = 7
, mask0x8 = 8
, mask0x9 = 9
, mask0xA = 10
,
mask0xB = 11
, mask0xC = 12
, mask0xD = 13
, mask0xE = 14
,
mask0xF = 15
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
, overflow = 0
,
no_overflow = 1
, below = 2
, above_equal = 3
, equal = 4
,
not_equal = 5
, below_equal = 6
, above = 7
, negative = 8
,
positive = 9
, parity_even = 10
, parity_odd = 11
, less = 12
,
greater_equal = 13
, less_equal = 14
, greater = 15
, carry = below
,
not_carry = above_equal
, zero = equal
, not_zero = not_equal
, sign = negative
,
not_sign = positive
, kEqual = eq
, kNotEqual = ne
, kLessThan = lt
,
kGreaterThan = gt
, kLessThanEqual = le
, kGreaterThanEqual = ge
, kUnsignedLessThan = lo
,
kUnsignedGreaterThan = hi
, kUnsignedLessThanEqual = ls
, kUnsignedGreaterThanEqual = hs
, kOverflow = vs
,
kNoOverflow = vc
, kZero = eq
, kNotZero = ne
} |
| |
| enum | RoundingMode { kRoundDown = 0x1
, kRoundUp = 0x2
, kRoundDown = 0x1
, kRoundUp = 0x2
} |
| |
| enum | ScaleFactor : int8_t {
times_1 = 0
, times_2 = 1
, times_4 = 2
, times_8 = 3
,
times_int_size = times_4
, times_half_system_pointer_size = times_2
, times_system_pointer_size = times_4
, times_tagged_size = times_4
,
times_1 = 0
, times_2 = 1
, times_4 = 2
, times_8 = 3
,
times_int_size = times_4
, times_half_system_pointer_size = times_2
, times_system_pointer_size = times_4
, times_tagged_size = times_4
,
times_external_pointer_size = V8_ENABLE_SANDBOX_BOOL ? times_4 : times_8
} |
| |
| enum class | StackLimitKind {
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
,
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
,
kInterruptStackLimit
, kRealStackLimit
, kInterruptStackLimit
, kRealStackLimit
} |
| |
| enum | RegisterCode {
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
, kRegAfterLast
, kRegAfterLast
, kRegAfterLast
,
kRegAfterLast
} |
| |
| enum | DoubleRegisterCode {
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
,
kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
, kDoubleAfterLast
} |
| |
| enum | YMMRegisterCode { kYMMAfterLast
} |
| |
| enum | PerThreadAssertType {
ASSERT_TYPE_IS_VALID_MARKER
, SAFEPOINTS_ASSERT
, HEAP_ALLOCATION_ASSERT
, HANDLE_ALLOCATION_ASSERT
,
HANDLE_DEREFERENCE_ASSERT
, HANDLE_USAGE_ON_ALL_THREADS_ASSERT
, CODE_DEPENDENCY_CHANGE_ASSERT
, CODE_ALLOCATION_ASSERT
,
GC_MOLE
, POSITION_INFO_SLOW_ASSERT
} |
| |
| enum class | LanguageMode : bool { kSloppy
, kStrict
} |
| |
| enum class | StoreOrigin { kMaybeKeyed
, kNamed
} |
| |
| enum class | TypeofMode { kInside
, kNotInside
} |
| |
| enum class | ContextKind { kDefault
, kScriptContext
} |
| |
| enum class | SaveFPRegsMode { kIgnore
, kSave
} |
| |
| enum class | IndirectPointerMode { kStrong
, kCustom
} |
| |
| enum class | ArgvMode { kStack
, kRegister
} |
| |
| enum class | CallApiCallbackMode { kGeneric
, kOptimizedNoProfiling
, kOptimized
} |
| |
| enum class | DeoptimizeKind : uint8_t { kEager
, kLazy
} |
| |
| enum class | LookupHoistingMode { kNormal
, kLegacySloppy
} |
| |
| enum class | ArgumentsType { kRuntime
, kJS
} |
| |
| enum | AllocationSpace {
RO_SPACE
, NEW_SPACE
, OLD_SPACE
, CODE_SPACE
,
SHARED_SPACE
, TRUSTED_SPACE
, SHARED_TRUSTED_SPACE
, NEW_LO_SPACE
,
LO_SPACE
, CODE_LO_SPACE
, SHARED_LO_SPACE
, SHARED_TRUSTED_LO_SPACE
,
TRUSTED_LO_SPACE
, FIRST_SPACE = RO_SPACE
, LAST_SPACE = TRUSTED_LO_SPACE
, FIRST_MUTABLE_SPACE = NEW_SPACE
,
LAST_MUTABLE_SPACE = TRUSTED_LO_SPACE
, FIRST_GROWABLE_PAGED_SPACE = OLD_SPACE
, LAST_GROWABLE_PAGED_SPACE = TRUSTED_SPACE
, FIRST_SWEEPABLE_SPACE = NEW_SPACE
,
LAST_SWEEPABLE_SPACE = SHARED_TRUSTED_SPACE
} |
| |
| enum class | AllocationType : uint8_t {
kYoung
, kOld
, kCode
, kMap
,
kReadOnly
, kSharedOld
, kSharedMap
, kSharedTrusted
,
kTrusted
} |
| |
| enum class | GarbageCollectionReason : int {
kUnknown = 0
, kAllocationFailure = 1
, kAllocationLimit = 2
, kContextDisposal = 3
,
kCountersExtension = 4
, kDebugger = 5
, kDeserializer = 6
, kExternalMemoryPressure = 7
,
kFinalizeMarkingViaStackGuard = 8
, kFinalizeMarkingViaTask = 9
, kFullHashtable = 10
, kHeapProfiler = 11
,
kTask = 12
, kLastResort = 13
, kLowMemoryNotification = 14
, kMakeHeapIterable = 15
,
kMemoryPressure = 16
, kMemoryReducer = 17
, kRuntime = 18
, kSamplingProfiler = 19
,
kSnapshotCreator = 20
, kTesting = 21
, kExternalFinalize = 22
, kGlobalAllocationLimit = 23
,
kMeasureMemory = 24
, kBackgroundAllocationFailure = 25
, kFinalizeConcurrentMinorMS = 26
, kCppHeapAllocationFailure = 27
,
kFrozen = 28
, kIdleContextDisposal = 29
, NUM_REASONS
} |
| |
| enum | AllocationAlignment { kTaggedAligned
, kDoubleAligned
, kDoubleUnaligned
} |
| |
| enum class | AccessMode { ATOMIC
, NON_ATOMIC
} |
| |
| enum | MinimumCapacity { USE_DEFAULT_MINIMUM_CAPACITY
, USE_CUSTOM_MINIMUM_CAPACITY
} |
| |
| enum class | GarbageCollector { SCAVENGER
, MARK_COMPACTOR
, MINOR_MARK_SWEEPER
} |
| |
| enum class | CompactionSpaceKind { kNone
, kCompactionSpaceForScavenge
, kCompactionSpaceForMarkCompact
, kCompactionSpaceForMinorMarkSweep
} |
| |
| enum | Executability { NOT_EXECUTABLE
, EXECUTABLE
} |
| |
| enum class | PageSize { kRegular
, kLarge
} |
| |
| enum class | CodeFlushMode { kFlushBytecode
, kFlushBaselineCode
, kForceFlush
} |
| |
| enum class | ExternalBackingStoreType { kArrayBuffer
, kExternalString
, kNumValues
} |
| |
| enum class | NewJSObjectType : uint8_t { kNoAPIWrapper
, kAPIWrapper
} |
| |
| enum class | REPLMode { kYes
, kNo
} |
| |
| enum class | ParsingWhileDebugging { kYes
, kNo
} |
| |
| enum | NativesFlag { NOT_NATIVES_CODE
, EXTENSION_CODE
, INSPECTOR_CODE
} |
| |
| enum | ParseRestriction : bool { NO_PARSE_RESTRICTION
, ONLY_SINGLE_FUNCTION_LITERAL
} |
| |
| enum class | ScriptEventType {
kReserveId
, kCreate
, kDeserialize
, kBackgroundCompile
,
kStreamingCompileBackground
, kStreamingCompileForeground
} |
| |
| enum class | InlineCacheState {
NO_FEEDBACK
, UNINITIALIZED
, MONOMORPHIC
, RECOMPUTE_HANDLER
,
POLYMORPHIC
, MEGADOM
, MEGAMORPHIC
, GENERIC
} |
| |
| enum | WhereToStart { kStartAtReceiver
, kStartAtPrototype
} |
| |
| enum | ResultSentinel { kNotFound = -1
, kUnsupported = -2
} |
| |
| enum | ShouldThrow { kDontThrow = Internals::kDontThrow
, kThrowOnError = Internals::kThrowOnError
} |
| |
| enum class | InterceptorResult { kFalse = 0
, kTrue = 1
, kNotIntercepted = 2
} |
| |
| enum class | ThreadKind { kMain
, kBackground
} |
| |
| enum class | BranchHint : uint8_t { kNone
, kTrue
, kFalse
} |
| |
| enum class | GotoHint : uint8_t { kNone
, kLabel
, kFallthrough
} |
| |
| enum class | ConvertReceiverMode : unsigned { kNullOrUndefined
, kNotNullOrUndefined
, kAny
, kLast = kAny
} |
| |
| enum class | OrdinaryToPrimitiveHint { kNumber
, kString
} |
| |
| enum class | ToPrimitiveHint { kDefault
, kNumber
, kString
} |
| |
| enum class | CreateArgumentsType : uint8_t { kMappedArguments
, kUnmappedArguments
, kRestParameter
} |
| |
| enum | ScopeType : uint8_t {
SCRIPT_SCOPE
, REPL_MODE_SCOPE
, CLASS_SCOPE
, EVAL_SCOPE
,
FUNCTION_SCOPE
, MODULE_SCOPE
, CATCH_SCOPE
, BLOCK_SCOPE
,
WITH_SCOPE
, SHADOW_REALM_SCOPE
} |
| |
| enum | AllocationSiteMode { DONT_TRACK_ALLOCATION_SITE
, TRACK_ALLOCATION_SITE
, LAST_ALLOCATION_SITE_MODE = TRACK_ALLOCATION_SITE
} |
| |
| enum class | AllocationSiteUpdateMode { kUpdate
, kCheckOnly
} |
| |
| enum class | VariableMode : uint8_t {
kLet
, kConst
, kUsing
, kAwaitUsing
,
kVar
, kTemporary
, kDynamic
, kDynamicGlobal
,
kDynamicLocal
, kPrivateMethod
, kPrivateSetterOnly
, kPrivateGetterOnly
,
kPrivateGetterAndSetter
, kFirstImmutableLexicalVariableMode = kConst
, kLastLexicalVariableMode = kAwaitUsing
} |
| |
| enum | VariableKind : uint8_t {
NORMAL_VARIABLE
, PARAMETER_VARIABLE
, THIS_VARIABLE
, SLOPPY_BLOCK_FUNCTION_VARIABLE
,
SLOPPY_FUNCTION_NAME_VARIABLE
} |
| |
| enum | VariableLocation : uint8_t {
UNALLOCATED
, PARAMETER
, LOCAL
, CONTEXT
,
LOOKUP
, MODULE
, REPL_GLOBAL
, kLastVariableLocation = REPL_GLOBAL
} |
| |
| enum | InitializationFlag : uint8_t { kNeedsInitialization
, kCreatedInitialized
} |
| |
| enum class | IsStaticFlag : uint8_t { kNotStatic
, kStatic
} |
| |
| enum | MaybeAssignedFlag : uint8_t { kNotAssigned
, kMaybeAssigned
} |
| |
| enum class | InterpreterPushArgsMode : unsigned { kArrayFunction
, kWithFinalSpread
, kOther
} |
| |
| enum class | ForInFeedback : uint8_t { kNone = 0x0
, kEnumCacheKeysAndIndices = 0x1
, kEnumCacheKeys = 0x3
, kAny = 0x7
} |
| |
| enum class | UnicodeEncoding : uint8_t { UTF16
, UTF32
} |
| |
| enum class | IterationKind { kKeys
, kValues
, kEntries
} |
| |
| enum class | CollectionKind { kMap
, kSet
} |
| |
| enum class | IsolateExecutionModeFlag : uint8_t { kNoFlags = 0
, kIsProfiling = 1 << 0
, kCheckSideEffects = 1 << 1
} |
| |
| enum class | DefineKeyedOwnPropertyInLiteralFlag { kNoFlags = 0
, kSetFunctionName = 1 << 0
} |
| |
| enum class | DefineKeyedOwnPropertyFlag { kNoFlags = 0
, kSetFunctionName = 1 << 0
} |
| |
| enum | ExternalArrayType {
kExternalInt8Array = 1
, kExternalUint8Array
, kExternalInt16Array
, kExternalUint16Array
,
kExternalInt32Array
, kExternalUint32Array
, kExternalFloat16Array
, kExternalFloat32Array
,
kExternalFloat64Array
, kExternalUint8ClampedArray
, kExternalBigInt64Array
, kExternalBigUint64Array
} |
| |
| enum class | TieringState : int32_t { V
, kLastTieringState = kRequestTurbofan_Concurrent
} |
| |
| enum class | CachedTieringDecision : int32_t {
kPending
, kEarlySparkplug
, kDelayMaglev
, kEarlyMaglev
,
kEarlyTurbofan
, kNormal
} |
| |
| enum class | SpeculationMode { kAllowSpeculation
, kDisallowSpeculation
} |
| |
| enum class | CallFeedbackContent { kTarget
, kReceiver
} |
| |
| enum class | BlockingBehavior { kBlock
, kDontBlock
} |
| |
| enum class | ConcurrencyMode : uint8_t { kSynchronous
, kConcurrent
} |
| |
| enum class | AliasingKind { kOverlap
, kCombine
, kIndependent
} |
| |
| enum | IsolateAddressId { kIsolateAddressCount
} |
| |
| enum class | KeyedAccessLoadMode { kInBounds = 0b00
, kHandleOOB = 0b01
, kHandleHoles = 0b10
, kHandleOOBAndHoles = 0b11
} |
| |
| enum class | KeyedAccessStoreMode { kInBounds
, kGrowAndHandleCOW
, kIgnoreTypedArrayOOB
, kHandleCOW
} |
| |
| enum | MutableMode { MUTABLE
, IMMUTABLE
} |
| |
| enum class | IcCheckType { kElement
, kProperty
} |
| |
| enum class | StubCallMode { kCallCodeObject
, kCallBuiltinPointer
} |
| |
| enum class | NeedsContext { kYes
, kNo
} |
| |
| enum class | AdaptArguments { kYes
, kNo
} |
| |
| enum | StackFrameId { ID_MIN_VALUE = kMinInt
, ID_MAX_VALUE = kMaxInt
, NO_ID = 0
} |
| |
| enum class | ExceptionStatus : bool { kException = false
, kSuccess = true
} |
| |
| enum class | VariableAllocationInfo { NONE
, STACK
, CONTEXT
, UNUSED
} |
| |
| enum | PropertiesEnumerationMode { kEnumerationOrder
, kPropertyAdditionOrder
} |
| |
| enum class | StringTransitionStrategy { kCopy
, kInPlace
, kAlreadyTransitioned
} |
| |
| enum class | CallJumpMode { kCall
, kTailCall
} |
| |
| enum class | MessageTemplate { TEMPLATE
, kMessageCount
} |
| |
| enum class | CheckBounds { kAlways
, kDebugOnly
} |
| |
| enum class | StoreToObjectWriteBarrier { kNone
, kMap
, kFull
} |
| |
| enum class | ToDateStringMode {
kLocalDate
, kLocalTime
, kLocalDateAndTime
, kUTCDateAndTime
,
kISODateAndTime
} |
| |
| enum | StepAction : int8_t {
StepNone = -1
, StepOut = 0
, StepOver = 1
, StepInto = 2
,
LastStepAction = StepInto
} |
| |
| enum | ExceptionBreakType { BreakCaughtException = 0
, BreakUncaughtException = 1
} |
| |
| enum | DebugBreakType {
NOT_DEBUG_BREAK
, DEBUG_BREAK_AT_ENTRY
, DEBUGGER_STATEMENT
, DEBUG_BREAK_SLOT
,
DEBUG_BREAK_SLOT_AT_CALL
, DEBUG_BREAK_SLOT_AT_RETURN
, DEBUG_BREAK_SLOT_AT_SUSPEND
} |
| |
| enum | IgnoreBreakMode { kIgnoreIfAllFramesBlackboxed
, kIgnoreIfTopFrameBlackboxed
} |
| |
| enum class | DeoptimizeReason : uint8_t { DEOPTIMIZE_REASON
} |
| |
| enum class | LazyDeoptimizeReason : uint8_t { LAZY_DEOPTIMIZE_REASON
} |
| |
| enum class | TranslationOpcode { CASE
} |
| |
| enum class | FrameInfoKind { kPrecise
, kConservative
} |
| |
| enum class | BuiltinContinuationMode { STUB
, JAVASCRIPT
, JAVASCRIPT_WITH_CATCH
, JAVASCRIPT_HANDLE_EXCEPTION
} |
| |
| enum | WaitReturnValue : int { kOk = 0
, kNotEqualValue = 1
, kTimedOut = 2
} |
| |
| enum class | IsolateFieldId : uint8_t { kUnknown = 0
, FIELD
, FIELD
} |
| |
| enum | FrameSkipMode { SKIP_FIRST
, SKIP_UNTIL_SEEN
, SKIP_NONE
} |
| |
| enum class | OptimizationReason : uint8_t { OPTIMIZATION_REASON_CONSTANTS
} |
| |
| enum class | WeaknessType { kCallback
, kCallbackWithTwoEmbedderFields
, kNoCallback
} |
| |
| enum class | AllocationOrigin {
kGeneratedCode = 0
, kRuntime = 1
, kGC = 2
, kFirstAllocationOrigin = kGeneratedCode
,
kLastAllocationOrigin = kGC
, kNumberOfAllocationOrigins = kLastAllocationOrigin + 1
} |
| |
| enum class | NumberCacheMode { kIgnore
, kSetOnly
, kBoth
} |
| |
| enum | FunctionMode {
kWithNameBit = 1 << 0
, kWithWritablePrototypeBit = 1 << 1
, kWithReadonlyPrototypeBit = 1 << 2
, kWithPrototypeBits = kWithWritablePrototypeBit | kWithReadonlyPrototypeBit
,
FUNCTION_WITHOUT_PROTOTYPE = 0
, METHOD_WITH_NAME = kWithNameBit
, FUNCTION_WITH_WRITEABLE_PROTOTYPE = kWithWritablePrototypeBit
, FUNCTION_WITH_NAME_AND_WRITEABLE_PROTOTYPE
,
FUNCTION_WITH_READONLY_PROTOTYPE = kWithReadonlyPrototypeBit
, FUNCTION_WITH_NAME_AND_READONLY_PROTOTYPE
} |
| |
| enum class | ArrayStorageAllocationMode { DONT_INITIALIZE_ARRAY_ELEMENTS
, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
} |
| |
| enum | FreeMode { kLinkCategory
, kDoNotLinkCategory
} |
| |
| enum | YoungGenerationSpeedMode { kUpToAndIncludingAtomicPause
, kOnlyAtomicPause
} |
| |
| enum class | ClearRecordedSlots { kYes
, kNo
} |
| |
| enum class | InvalidateRecordedSlots { kYes
, kNo
} |
| |
| enum class | InvalidateExternalPointerSlots { kYes
, kNo
} |
| |
| enum class | ClearFreedMemoryMode { kClearFreedMemory
, kDontClearFreedMemory
} |
| |
| enum class | SkipRoot {
kExternalStringTable
, kGlobalHandles
, kTracedHandles
, kOldGeneration
,
kStack
, kMainThreadHandles
, kUnserializable
, kWeak
,
kConservativeStack
, kReadOnlyBuiltins
} |
| |
| enum class | EmbedderStackStateOrigin { kImplicitThroughTask
, kExplicitInvocation
} |
| |
| enum class | GCFlag : uint8_t { kNoFlags = 0
, kReduceMemoryFootprint = 1 << 0
, kForced = 1 << 1
, kLastResort = 1 << 2
} |
| |
| enum class | StepOrigin { kV8
, kTask
} |
| |
| enum class | ExternalStringTableCleaningMode { kAll
, kYoungOnly
} |
| |
| enum class | MarkingMode { kNoMarking
, kMinorMarking
, kMajorMarking
} |
| |
| enum | RememberedSetType {
OLD_TO_NEW
, OLD_TO_NEW_BACKGROUND
, OLD_TO_OLD
, OLD_TO_SHARED
,
TRUSTED_TO_CODE
, TRUSTED_TO_TRUSTED
, TRUSTED_TO_SHARED_TRUSTED
, SURVIVOR_TO_EXTERNAL_POINTER
,
NUMBER_OF_REMEMBERED_SET_TYPES
} |
| |
| enum | SemiSpaceId { kFromSpace = 0
, kToSpace = 1
} |
| |
| enum class | SkipFreeSpaceOrFiller { kYes
, kNo
} |
| |
| enum class | SafepointKind { kIsolate
, kGlobal
} |
| |
| enum class | CopyAndForwardResult { SUCCESS_YOUNG_GENERATION
, SUCCESS_OLD_GENERATION
, FAILURE
} |
| |
| enum class | SlotType : uint8_t {
kEmbeddedObjectFull
, kEmbeddedObjectCompressed
, kCodeEntry
, kConstPoolEmbeddedObjectFull
,
kConstPoolEmbeddedObjectCompressed
, kConstPoolCodeEntry
, kCleared
, kLast = kCleared
} |
| |
| enum class | FreeSpaceTreatmentMode { kIgnoreFreeSpace
, kZapFreeSpace
} |
| |
| enum class | YoungGenerationMarkingVisitationMode { kParallel
, kConcurrent
} |
| |
| enum class | WasmValueType {
kI8
, kI16
, kI32
, kU32
,
kI64
, kF32
, kF64
, kS128
,
kRef
, kRefNull
, kNumTypes
} |
| |
| enum class | NamedPropertyType : bool { kNotOwn
, kOwn
} |
| |
| enum | KeyedStoreCheckMap { kDontCheckMap
, kCheckMap
} |
| |
| enum | KeyedStoreIncrementLength { kDontIncrementLength
, kIncrementLength
} |
| |
| enum class | TransitionMode { kNoTransition
, kTransitionToDouble
, kTransitionToObject
} |
| |
| enum class | StoreMode { kSet
, kDefineKeyedOwnInLiteral
, kDefineNamedOwn
, kDefineKeyedOwn
} |
| |
| enum class | PrivateNameSemantics { kUpdate
, kDefine
} |
| |
| enum | ParseElementResult { kElementFound
, kElementNotFound
} |
| |
| enum class | JsonToken : uint8_t {
NUMBER
, STRING
, LBRACE
, RBRACE
,
LBRACK
, RBRACK
, TRUE_LITERAL
, FALSE_LITERAL
,
NULL_LITERAL
, WHITESPACE
, COLON
, COMMA
,
ILLEGAL
, EOS
} |
| |
| enum | FastJsonStringifierResult {
SUCCESS
, JS_OBJECT
, JS_ARRAY
, UNDEFINED
,
CHANGE_ENCODING
, SLOW_PATH
, EXCEPTION
} |
| |
| enum class | FastJsonStringifierObjectKeyResult : uint8_t { kSuccess
, kNeedsEscaping
, kChangeEncoding
} |
| |
| enum class | ResumeJSObjectMode : uint8_t { kWithMapCache
, kWithoutMapCache
, kBuildingMapCache
} |
| |
| enum class | OptionalTimedHistogramScopeMode { TAKE_TIME
, DONT_TAKE_TIME
} |
| |
| enum class | TimedHistogramResolution { MILLISECOND
, MICROSECOND
} |
| |
| enum class | LogSeparator { kSeparator
} |
| |
| enum | ConversionFlag { NO_CONVERSION_FLAG
, ALLOW_NON_DECIMAL_PREFIX
, ALLOW_TRAILING_JUNK
} |
| |
| enum class | WasmMemoryFlag : uint8_t { kNotWasm
, kWasmMemory32
, kWasmMemory64
} |
| |
| enum class | SharedFlag : uint8_t { kNotShared
, kShared
} |
| |
| enum class | ResizableFlag : uint8_t { kNotResizable
, kResizable
} |
| |
| enum class | InitializedFlag : uint8_t { kUninitialized
, kZeroInitialized
} |
| |
| enum class | CodeKind : uint8_t { DEFINE_CODE_KIND_ENUM
} |
| |
| enum class | CodeKindFlag { V
} |
| |
| enum | ContextLookupFlags { FOLLOW_CONTEXT_CHAIN = 1 << 0
, FOLLOW_PROTOTYPE_CHAIN = 1 << 1
, DONT_FOLLOW_CHAINS = 0
, FOLLOW_CHAINS = FOLLOW_CONTEXT_CHAIN | FOLLOW_PROTOTYPE_CHAIN
} |
| |
| enum class | DeoptimizationLiteralKind {
kObject
, kNumber
, kSignedBigInt64
, kUnsignedBigInt64
,
kHoleNaN
, kInvalid
, kWasmI31Ref
, kWasmInt32
,
kWasmFloat32
, kWasmFloat64
, kWasmInt64 = kSignedBigInt64
} |
| |
| enum | ElementsKind : uint8_t {
PACKED_SMI_ELEMENTS
, PACKED_ELEMENTS
, PACKED_DOUBLE_ELEMENTS
, PACKED_NONEXTENSIBLE_ELEMENTS
,
HOLEY_NONEXTENSIBLE_ELEMENTS
, PACKED_SEALED_ELEMENTS
, HOLEY_SEALED_ELEMENTS
, PACKED_FROZEN_ELEMENTS
,
HOLEY_FROZEN_ELEMENTS
, SHARED_ARRAY_ELEMENTS
, DICTIONARY_ELEMENTS
, FAST_SLOPPY_ARGUMENTS_ELEMENTS
,
SLOW_SLOPPY_ARGUMENTS_ELEMENTS
, FAST_STRING_WRAPPER_ELEMENTS
, SLOW_STRING_WRAPPER_ELEMENTS
, WASM_ARRAY_ELEMENTS
,
NO_ELEMENTS
, FIRST_ELEMENTS_KIND = PACKED_SMI_ELEMENTS
, LAST_ELEMENTS_KIND = RAB_GSAB_FLOAT16_ELEMENTS
, FIRST_FAST_ELEMENTS_KIND = PACKED_SMI_ELEMENTS
,
LAST_FAST_ELEMENTS_KIND = HOLEY_DOUBLE_ELEMENTS
, FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_ELEMENTS
, LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = FLOAT16_ELEMENTS
, FIRST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND = RAB_GSAB_UINT8_ELEMENTS
,
LAST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND = RAB_GSAB_FLOAT16_ELEMENTS
, TERMINAL_FAST_ELEMENTS_KIND = HOLEY_ELEMENTS
, FIRST_ANY_NONEXTENSIBLE_ELEMENTS_KIND = PACKED_NONEXTENSIBLE_ELEMENTS
, LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND = SHARED_ARRAY_ELEMENTS
,
FIRST_VALID_ATOMICS_TYPED_ARRAY_ELEMENTS_KIND = UINT8_ELEMENTS
, LAST_VALID_ATOMICS_TYPED_ARRAY_ELEMENTS_KIND = BIGINT64_ELEMENTS
, SYSTEM_POINTER_ELEMENTS = PACKED_ELEMENTS
} |
| |
| enum class | UpdateFeedbackMode { kOptionalFeedback
, kGuaranteedFeedback
, kNoFeedback
} |
| |
| enum class | ClearBehavior { kDefault
, kClearAll
} |
| |
| enum class | FeedbackSlotKind : uint8_t {
kInvalid
, kStoreGlobalSloppy
, kSetNamedSloppy
, kSetKeyedSloppy
,
kLastSloppyKind = kSetKeyedSloppy
, kCall
, kLoadProperty
, kLoadGlobalNotInsideTypeof
,
kLoadGlobalInsideTypeof
, kLoadKeyed
, kHasKeyed
, kStoreGlobalStrict
,
kSetNamedStrict
, kDefineNamedOwn
, kDefineKeyedOwn
, kSetKeyedStrict
,
kStoreInArrayLiteral
, kBinaryOp
, kCompareOp
, kDefineKeyedOwnPropertyInLiteral
,
kLiteral
, kForIn
, kInstanceOf
, kTypeOf
,
kCloneObject
, kJumpLoop
, kLast = kJumpLoop
} |
| |
| enum class | FunctionKind : uint8_t {
kNormalFunction
, kModule
, kModuleWithTopLevelAwait
, kBaseConstructor
,
kDefaultBaseConstructor
, kDefaultDerivedConstructor
, kDerivedConstructor
, kGetterFunction
,
kStaticGetterFunction
, kSetterFunction
, kStaticSetterFunction
, kArrowFunction
,
kAsyncArrowFunction
, kAsyncFunction
, kAsyncConciseMethod
, kStaticAsyncConciseMethod
,
kAsyncConciseGeneratorMethod
, kStaticAsyncConciseGeneratorMethod
, kAsyncGeneratorFunction
, kGeneratorFunction
,
kConciseGeneratorMethod
, kStaticConciseGeneratorMethod
, kConciseMethod
, kStaticConciseMethod
,
kClassMembersInitializerFunction
, kClassStaticInitializerFunction
, kInvalid
, kLastFunctionKind = kClassStaticInitializerFunction
} |
| |
| enum class | FunctionSyntaxKind : uint8_t {
kAnonymousExpression
, kNamedExpression
, kDeclaration
, kAccessorOrMethod
,
kWrapped
, kLastFunctionSyntaxKind = kWrapped
} |
| |
| enum | StringRepresentationTag {
kSeqStringTag = 0x0
, kConsStringTag = 0x1
, kExternalStringTag = 0x2
, kSlicedStringTag = 0x3
,
kThinStringTag = 0x5
} |
| |
| enum | InstanceType : uint16_t {
INTERNALIZED_TWO_BYTE_STRING_TYPE
, INTERNALIZED_ONE_BYTE_STRING_TYPE
, EXTERNAL_INTERNALIZED_TWO_BYTE_STRING_TYPE
, EXTERNAL_INTERNALIZED_ONE_BYTE_STRING_TYPE
,
UNCACHED_EXTERNAL_INTERNALIZED_TWO_BYTE_STRING_TYPE
, UNCACHED_EXTERNAL_INTERNALIZED_ONE_BYTE_STRING_TYPE
, SEQ_TWO_BYTE_STRING_TYPE
, SEQ_ONE_BYTE_STRING_TYPE
,
CONS_TWO_BYTE_STRING_TYPE
, CONS_ONE_BYTE_STRING_TYPE
, SLICED_TWO_BYTE_STRING_TYPE
, SLICED_ONE_BYTE_STRING_TYPE
,
EXTERNAL_TWO_BYTE_STRING_TYPE
, EXTERNAL_ONE_BYTE_STRING_TYPE
, UNCACHED_EXTERNAL_TWO_BYTE_STRING_TYPE
, UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE
,
THIN_TWO_BYTE_STRING_TYPE
, THIN_ONE_BYTE_STRING_TYPE
, SHARED_SEQ_TWO_BYTE_STRING_TYPE = SEQ_TWO_BYTE_STRING_TYPE | kSharedStringTag
, SHARED_SEQ_ONE_BYTE_STRING_TYPE = SEQ_ONE_BYTE_STRING_TYPE | kSharedStringTag
,
SHARED_EXTERNAL_TWO_BYTE_STRING_TYPE
, SHARED_EXTERNAL_ONE_BYTE_STRING_TYPE
, SHARED_UNCACHED_EXTERNAL_TWO_BYTE_STRING_TYPE
, SHARED_UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE
,
FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_TWO_BYTE_STRING_TYPE
, LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE
, FIRST_NONSTRING_TYPE = SYMBOL_TYPE
, FIRST_CALLABLE_JS_FUNCTION_TYPE = FIRST_JS_FUNCTION_TYPE
,
LAST_CALLABLE_JS_FUNCTION_TYPE = JS_CLASS_CONSTRUCTOR_TYPE - 1
, LAST_SPECIAL_RECEIVER_TYPE = LAST_JS_SPECIAL_OBJECT_TYPE
, LAST_CUSTOM_ELEMENTS_RECEIVER = LAST_JS_CUSTOM_ELEMENTS_OBJECT_TYPE
, FIRST_TYPE = FIRST_HEAP_OBJECT_TYPE
,
LAST_TYPE = LAST_HEAP_OBJECT_TYPE
, BIGINT_TYPE = BIG_INT_BASE_TYPE
, DEPENDENT_CODE_TYPE = WEAK_ARRAY_LIST_TYPE
} |
| |
| enum | FormatMatcherOption { kBestFit
, kBasic
} |
| |
| enum class | DisposableStackState { kDisposed
, kPending
} |
| |
| enum class | DisposeMethodCallType { kValueIsReceiver = 0
, kValueIsArgument = 1
} |
| |
| enum class | DisposeMethodHint { kSyncDispose = 0
, kAsyncDispose = 1
} |
| |
| enum class | DisposableStackResourcesType { kAllSync
, kAtLeastOneAsync
} |
| |
| enum class | BudgetModification { kReduce
, kRaise
, kReset
} |
| |
| enum class | AllocationPolicy { kAllocationAllowed
, kAllocationDisallowed
} |
| |
| enum | AddKeyConversion { DO_NOT_CONVERT
, CONVERT_TO_ARRAY_INDEX
} |
| |
| enum class | GetKeysConversion { kKeepNumbers = static_cast<int>(v8::KeyConversionMode::kKeepNumbers)
, kConvertToString = static_cast<int>(v8::KeyConversionMode::kConvertToString)
, kNoNumbers = static_cast<int>(v8::KeyConversionMode::kNoNumbers)
} |
| |
| enum class | KeyCollectionMode { kOwnOnly = static_cast<int>(v8::KeyCollectionMode::kOwnOnly)
, kIncludePrototypes
} |
| |
| enum | VisitorId { kDataOnlyVisitorIdCount
, kVisitorIdCount
} |
| |
| enum class | ObjectFields { kDataOnly
, kMaybePointers
} |
| |
| enum class | ObjectType |
| |
| enum | WriteBarrierMode { SKIP_WRITE_BARRIER
, UNSAFE_SKIP_WRITE_BARRIER
, UPDATE_EPHEMERON_KEY_WRITE_BARRIER
, UPDATE_WRITE_BARRIER
} |
| |
| enum | PropertyNormalizationMode { CLEAR_INOBJECT_PROPERTIES
, KEEP_INOBJECT_PROPERTIES
} |
| |
| enum | TransitionFlag { INSERT_TRANSITION
, OMIT_TRANSITION
} |
| |
| enum | TransitionKindFlag { SIMPLE_PROPERTY_TRANSITION
, PROPERTY_TRANSITION
, PROTOTYPE_TRANSITION
, SPECIAL_TRANSITION
} |
| |
| enum | DescriptorFlag { ALL_DESCRIPTORS
, OWN_DESCRIPTORS
} |
| |
| enum class | ComparisonResult { kLessThan = -1
, kEqual = 0
, kGreaterThan = 1
, kUndefined = 2
} |
| |
| enum class | OnNonExistent { kThrowReferenceError
, kReturnUndefined
} |
| |
| enum class | ElementTypes { kAll
, kStringAndSymbol
} |
| |
| enum class | EnforceDefineSemantics { kSet
, kDefine
} |
| |
| enum | EnsureElementsMode { DONT_ALLOW_DOUBLE_ELEMENTS
, ALLOW_COPIED_DOUBLE_ELEMENTS
, ALLOW_CONVERTED_DOUBLE_ELEMENTS
} |
| |
| enum | AccessorComponent { ACCESSOR_GETTER
, ACCESSOR_SETTER
} |
| |
| enum | PropertyAttributes {
NONE = ::v8::None
, READ_ONLY = ::v8::ReadOnly
, DONT_ENUM = ::v8::DontEnum
, DONT_DELETE = ::v8::DontDelete
,
ALL_ATTRIBUTES_MASK = READ_ONLY | DONT_ENUM | DONT_DELETE
, SEALED = DONT_DELETE
, FROZEN = SEALED | READ_ONLY
, ABSENT = 64
} |
| |
| enum | PropertyFilter {
ALL_PROPERTIES = 0
, ONLY_WRITABLE = 1
, ONLY_ENUMERABLE = 2
, ONLY_CONFIGURABLE = 4
,
SKIP_STRINGS = 8
, SKIP_SYMBOLS = 16
, PRIVATE_NAMES_ONLY = 32
, ENUMERABLE_STRINGS = ONLY_ENUMERABLE | SKIP_SYMBOLS
} |
| |
| enum class | PropertyKind { kData = 0
, kAccessor = 1
} |
| |
| enum class | PropertyLocation { kField = 0
, kDescriptor = 1
} |
| |
| enum class | PropertyConstness { kMutable = 0
, kConst = 1
} |
| |
| enum class | PropertyCellType {
kMutable
, kUndefined
, kConstant
, kConstantType
,
kInTransition
, kNoCell = kMutable
} |
| |
| enum class | CreateSourcePositions { kNo
, kYes
} |
| |
| enum class | BinaryOperationHint : uint8_t {
kNone
, kSignedSmall
, kSignedSmallInputs
, kAdditiveSafeInteger
,
kNumber
, kNumberOrOddball
, kString
, kStringOrStringWrapper
,
kBigInt
, kBigInt64
, kAny
} |
| |
| enum class | CompareOperationHint : uint8_t {
kNone
, kSignedSmall
, kNumber
, kNumberOrBoolean
,
kNumberOrOddball
, kInternalizedString
, kString
, kSymbol
,
kBigInt
, kBigInt64
, kReceiver
, kReceiverOrNullOrUndefined
,
kAny
} |
| |
| enum class | ForInHint : uint8_t { kNone
, kEnumCacheKeysAndIndices
, kEnumCacheKeys
, kAny
} |
| |
| enum | StringAddFlags { STRING_ADD_CHECK_NONE
, STRING_ADD_CONVERT_LEFT
, STRING_ADD_CONVERT_RIGHT
} |
| |
| enum class | SerializationTag : uint8_t {
kVersion = 0xFF
, kPadding = '\0'
, kVerifyObjectCount = '?'
, kTheHole = '-'
,
kUndefined = '_'
, kNull = '0'
, kTrue = 'T'
, kFalse = 'F'
,
kInt32 = 'I'
, kUint32 = 'U'
, kDouble = 'N'
, kBigInt = 'Z'
,
kUtf8String = 'S'
, kOneByteString = '"'
, kTwoByteString = 'c'
, kObjectReference = '^'
,
kBeginJSObject = 'o'
, kEndJSObject = '{'
, kBeginSparseJSArray = 'a'
, kEndSparseJSArray = '@'
,
kBeginDenseJSArray = 'A'
, kEndDenseJSArray = '$'
, kDate = 'D'
, kTrueObject = 'y'
,
kFalseObject = 'x'
, kNumberObject = 'n'
, kBigIntObject = 'z'
, kStringObject = 's'
,
kRegExp = 'R'
, kBeginJSMap = ';'
, kEndJSMap = ':'
, kBeginJSSet = '\''
,
kEndJSSet = ','
, kArrayBuffer = 'B'
, kResizableArrayBuffer = '~'
, kArrayBufferTransfer = 't'
,
kArrayBufferView = 'V'
, kSharedArrayBuffer = 'u'
, kSharedObject = 'p'
, kWasmModuleTransfer = 'w'
,
kHostObject = '\\'
, kWasmMemoryTransfer = 'm'
, kError = 'r'
, kLegacyReservedMessagePort = 'M'
,
kLegacyReservedBlob = 'b'
, kLegacyReservedBlobIndex = 'i'
, kLegacyReservedFile = 'f'
, kLegacyReservedFileIndex = 'e'
,
kLegacyReservedDOMFileSystem = 'd'
, kLegacyReservedFileList = 'l'
, kLegacyReservedFileListIndex = 'L'
, kLegacyReservedImageData = '#'
,
kLegacyReservedImageBitmap = 'g'
, kLegacyReservedImageBitmapTransfer = 'G'
, kLegacyReservedOffscreenCanvas = 'H'
, kLegacyReservedCryptoKey = 'K'
,
kLegacyReservedRTCCertificate = 'k'
} |
| |
| enum class | Root { DECLARE_ENUM
, kNumberOfRoots
} |
| |
| enum class | InferName { kYes
, kNo
} |
| |
| enum | {
TOTAL_KEYWORDS = 52
, MIN_WORD_LENGTH = 2
, MAX_WORD_LENGTH = 10
, MIN_HASH_VALUE = 3
,
MAX_HASH_VALUE = 64
} |
| |
| enum | FunctionNameValidity { kFunctionNameIsStrictReserved
, kSkipFunctionNameCheck
, kFunctionNameValidityUnknown
} |
| |
| enum | AllowLabelledFunctionStatement { kAllowLabelledFunctionStatement
, kDisallowLabelledFunctionStatement
} |
| |
| enum | ParsingArrowHeadFlag { kCertainlyNotArrowHead
, kMaybeArrowHead
} |
| |
| enum class | ParseFunctionFlag : uint8_t { kIsNormal = 0
, kIsGenerator = 1 << 0
, kIsAsync = 1 << 1
} |
| |
| enum class | ParsePropertyKind : uint8_t {
kAutoAccessorClassField
, kAccessorGetter
, kAccessorSetter
, kValue
,
kShorthand
, kAssign
, kMethod
, kClassField
,
kShorthandOrClassField
, kSpread
, kNotSet
} |
| |
| enum class | ScanFlags : uint8_t {
kTerminatesLiteral = 1 << 0
, kCannotBeKeyword = 1 << 1
, kCannotBeKeywordStart = 1 << 2
, kStringTerminator = 1 << 3
,
kIdentifierNeedsSlowPath = 1 << 4
, kMultilineCommentCharacterNeedsSlowPath = 1 << 5
} |
| |
| enum class | StandardCharacterSet : char {
kWhitespace = 's'
, kNotWhitespace = 'S'
, kWord = 'w'
, kNotWord = 'W'
,
kDigit = 'd'
, kNotDigit = 'D'
, kLineTerminator = 'n'
, kNotLineTerminator = '.'
,
kEverything = '*'
} |
| |
| enum | ContainedInLattice { kNotYet = 0
, kLatticeIn = 1
, kLatticeOut = 2
, kLatticeUnknown = 3
} |
| |
| enum class | RegExpError : uint32_t { TEMPLATE
, NumErrors
} |
| |
| enum class | RegExpFlag { REGEXP_FLAG_LIST =(V)
} |
| |
| enum class | RegExpCompilationTarget : int { kBytecode
, kNative
} |
| |
| enum class | RootIndex : uint16_t {
COUNT_ROOT
, DECL
, kRootListLength
, kFirstRoot = 0
,
kLastRoot = kRootListLength - 1
, kReadOnlyRootsCount = 0 READ_ONLY_ROOT_LIST(COUNT_ROOT)
, kImmortalImmovableRootsCount
, kFirstReadOnlyRoot = kFirstRoot
,
kLastReadOnlyRoot = kFirstReadOnlyRoot + kReadOnlyRootsCount - 1
, kFirstHeapNumberRoot = kNanValue
, kLastHeapNumberRoot = kSmiMaxValuePlusOne
, kFirstJSReceiverMapRoot = kJSSharedArrayMap
,
kFirstNameForProtector = kconstructor_string
, kNameForProtectorCount = 0 NAME_FOR_PROTECTOR_ROOT_LIST(COUNT_ROOT)
, kLastNameForProtector = kFirstNameForProtector + kNameForProtectorCount - 1
, kMutableRootsCount
,
kFirstStrongRoot = kLastReadOnlyRoot + 1
, kLastStrongRoot = kFirstStrongRoot + kMutableRootsCount - 1
, kFirstStrongOrReadOnlyRoot = kFirstRoot
, kLastStrongOrReadOnlyRoot = kLastStrongRoot
,
kFirstImmortalImmovableRoot = kFirstReadOnlyRoot
, kLastImmortalImmovableRoot
, kFirstSmiRoot = kLastStrongRoot + 1
, kLastSmiRoot = kLastRoot
,
kFirstBuiltinWithSfiRoot = kProxyRevokeSharedFun
, kLastBuiltinWithSfiRoot = kFirstBuiltinWithSfiRoot + BUILTINS_WITH_SFI_ROOTS_LIST(COUNT_ROOT) - 1
} |
| |
| enum | AsmJsInstantiateResult { kAsmJsInstantiateSuccess = 0
, kAsmJsInstantiateFail = 1
} |
| |
| enum class | PrivateMemberType { kPrivateField
, kPrivateAccessor
, kPrivateMethod
} |
| |
| enum class | OptimizationStatus {
kIsFunction = 1 << 0
, kNeverOptimize = 1 << 1
, kAlwaysOptimize = 1 << 2
, kMaybeDeopted = 1 << 3
,
kOptimized = 1 << 4
, kMaglevved = 1 << 5
, kTurboFanned = 1 << 6
, kInterpreted = 1 << 7
,
kMarkedForOptimization = 1 << 8
, kMarkedForConcurrentOptimization = 1 << 9
, kOptimizingConcurrently = 1 << 10
, kIsExecuting = 1 << 11
,
kTopmostFrameIsTurboFanned = 1 << 12
, kLiteMode = 1 << 13
, kMarkedForDeoptimization = 1 << 14
, kBaseline = 1 << 15
,
kTopmostFrameIsInterpreted = 1 << 16
, kTopmostFrameIsBaseline = 1 << 17
, kIsLazy = 1 << 18
, kTopmostFrameIsMaglev = 1 << 19
,
kOptimizeOnNextCallOptimizesToMaglev = 1 << 20
, kOptimizeMaglevOptimizesToTurbofan = 1 << 21
, kMarkedForMaglevOptimization = 1 << 22
, kMarkedForConcurrentMaglevOptimization = 1 << 23
} |
| |
| enum | CodeEntrypointTag : uint64_t {
kDefaultCodeEntrypointTag = 0
, kJSEntrypointTag = kDefaultCodeEntrypointTag
, kWasmEntrypointTag = uint64_t{1} << kCodeEntrypointTagShift
, kBytecodeHandlerEntrypointTag = uint64_t{2} << kCodeEntrypointTagShift
,
kLoadWithVectorICHandlerEntrypointTag
, kStoreWithVectorICHandlerEntrypointTag
, kStoreTransitionICHandlerEntrypointTag
, kRegExpEntrypointTag = uint64_t{6} << kCodeEntrypointTagShift
,
kInvalidEntrypointTag = uint64_t{0xff} << kCodeEntrypointTagShift
, kFreeCodePointerTableEntryTag = uint64_t{0xffff} << kCodeEntrypointTagShift
} |
| |
| enum | IndirectPointerTag : uint64_t { kIndirectPointerNullTag = 0
, kUnknownIndirectPointerTag = kIndirectPointerTagMaskWithoutFreeEntryBit
, kFreeTrustedPointerTableEntryTag = kTrustedPointerTableFreeEntryBit
} |
| |
| enum class | DeserializingUserCodeOption { kNotDeserializingUserCode
, kIsDeserializingUserCode
} |
| |
| enum | DataDirective { kByte
, kLong
, kQuad
, kOcta
} |
| |
| enum class | EmbeddedTargetOs {
kAIX
, kChromeOS
, kFuchsia
, kMac
,
kWin
, kStarboard
, kZOS
, kGeneric
} |
| |
| enum class | EmbeddedTargetArch {
kArm
, kArm64
, kIA32
, kX64
,
kGeneric
} |
| |
| enum class | SnapshotSpace : uint8_t { kReadOnlyHeap = 0
, kOld = 1
, kCode = 2
, kTrusted = 3
} |
| |
| enum | OneByteCharFlags {
kIsIdentifierStart = 1 << 0
, kIsIdentifierPart = 1 << 1
, kIsWhiteSpace = 1 << 2
, kIsWhiteSpaceOrLineTerminator = 1 << 3
,
kMaybeLineEnd = 1 << 4
} |
| |
| enum class | TryAbortResult { kTaskRemoved
, kTaskRunning
, kTaskAborted
} |
| |
| enum class | JitPermission { kNoJit
, kMapAsJittable
} |
| |
| enum | ToIndexMode { kToArrayIndex
, kToIntegerIndex
} |
| |
| enum | InternalizeString : bool { kInternalize = true
, kNoInternalize = false
} |
| |
| enum | ExternalPointerTag : uint16_t {
kFirstExternalPointerTag = 0
, kExternalPointerNullTag = 0
, kFirstSharedExternalPointerTag
, kWaiterQueueNodeTag = kFirstSharedExternalPointerTag
,
kExternalStringResourceTag
, kExternalStringResourceDataTag
, kLastSharedExternalPointerTag = kExternalStringResourceDataTag
, kNativeContextMicrotaskQueueTag
,
kEmbedderDataSlotPayloadTag
, kExternalObjectValueTag
, kFirstMaybeReadOnlyExternalPointerTag
, kFunctionTemplateInfoCallbackTag = kFirstMaybeReadOnlyExternalPointerTag
,
kAccessorInfoGetterTag
, kAccessorInfoSetterTag
, kFirstInterceptorInfoExternalPointerTag
, kApiNamedPropertyQueryCallbackTag = kFirstInterceptorInfoExternalPointerTag
,
kApiNamedPropertyGetterCallbackTag
, kApiNamedPropertyDescriptorCallbackTag
, kApiNamedPropertyDefinerCallbackTag
, kApiNamedPropertyDeleterCallbackTag
,
kApiNamedPropertyEnumeratorCallbackTag
, kApiIndexedPropertyQueryCallbackTag
, kApiIndexedPropertyGetterCallbackTag
, kApiIndexedPropertySetterCallbackTag
,
kApiIndexedPropertyDescriptorCallbackTag
, kApiIndexedPropertyDefinerCallbackTag
, kApiIndexedPropertyDeleterCallbackTag
, kApiIndexedPropertyEnumeratorCallbackTag
,
kLastInterceptorInfoExternalPointerTag
, kLastMaybeReadOnlyExternalPointerTag = kLastInterceptorInfoExternalPointerTag
, kWasmInternalFunctionCallTargetTag
, kWasmTypeInfoNativeTypeTag
,
kWasmExportedFunctionDataSignatureTag
, kWasmStackMemoryTag
, kWasmIndirectFunctionTargetTag
, kFirstForeignExternalPointerTag
,
kGenericForeignTag = kFirstForeignExternalPointerTag
, kApiAccessCheckCallbackTag
, kApiAbortScriptExecutionCallbackTag
, kSyntheticModuleTag
,
kMicrotaskCallbackTag
, kMicrotaskCallbackDataTag
, kCFunctionTag
, kCFunctionInfoTag
,
kMessageListenerTag
, kWaiterQueueForeignTag
, kFirstManagedResourceTag
, kFirstManagedExternalPointerTag = kFirstManagedResourceTag
,
kGenericManagedTag = kFirstManagedExternalPointerTag
, kWasmWasmStreamingTag
, kWasmFuncDataTag
, kWasmManagedDataTag
,
kWasmNativeModuleTag
, kIcuBreakIteratorTag
, kIcuUnicodeStringTag
, kIcuListFormatterTag
,
kIcuLocaleTag
, kIcuSimpleDateFormatTag
, kIcuDateIntervalFormatTag
, kIcuRelativeDateTimeFormatterTag
,
kIcuLocalizedNumberFormatterTag
, kIcuPluralRulesTag
, kIcuCollatorTag
, kDisplayNamesInternalTag
,
kD8WorkerTag
, kD8ModuleEmbedderDataTag
, kLastForeignExternalPointerTag = kD8ModuleEmbedderDataTag
, kLastManagedExternalPointerTag = kLastForeignExternalPointerTag
,
kArrayBufferExtensionTag
, kLastManagedResourceTag = kArrayBufferExtensionTag
, kExternalPointerZappedEntryTag = 0x7d
, kExternalPointerEvacuationEntryTag = 0x7e
,
kExternalPointerFreeEntryTag = 0x7f
, kLastExternalPointerTag = 0x7f
} |
| |
| enum class | TracedReferenceStoreMode { kInitializingStore
, kAssigningStore
} |
| |
| enum class | TracedReferenceHandling { kDefault
, kDroppable
} |
| |
|
| void | VerifyHandleIsNonEmpty (bool is_empty) |
| |
| i::Address * | GlobalizeTracedReference (i::Isolate *i_isolate, i::Address value, internal::Address *slot, TracedReferenceStoreMode store_mode, TracedReferenceHandling reference_handling) |
| |
| void | MoveTracedReference (internal::Address **from, internal::Address **to) |
| |
| void | CopyTracedReference (const internal::Address *const *from, internal::Address **to) |
| |
| void | DisposeTracedReference (internal::Address *location) |
| |
| uint32_t | GetLength (Tagged< JSArray > array) |
| |
| bool | CanUseFastIteration (Isolate *isolate, DirectHandle< JSArray > array) |
| |
| FastIterateResult | FastIterateArray (DirectHandle< JSArray > array, Isolate *isolate, v8::Array::IterationCallback callback, void *callback_data) |
| |
| void | InvokeAccessorGetterCallback (v8::Local< v8::Name > property, const v8::PropertyCallbackInfo< v8::Value > &info) |
| |
| void | InvokeFunctionCallbackGeneric (const v8::FunctionCallbackInfo< v8::Value > &info) |
| |
| void | InvokeFunctionCallbackOptimized (const v8::FunctionCallbackInfo< v8::Value > &info) |
| |
| void | InvokeFinalizationRegistryCleanupFromTask (DirectHandle< NativeContext > native_context, DirectHandle< JSFinalizationRegistry > finalization_registry) |
| |
| template<> |
| int32_t | ConvertDouble (double d) |
| |
| template<> |
| uint32_t | ConvertDouble (double d) |
| |
| template<> |
| float | ConvertDouble (double d) |
| |
| template<> |
| double | ConvertDouble (double d) |
| |
| template<> |
| int64_t | ConvertDouble (double d) |
| |
| template<> |
| uint64_t | ConvertDouble (double d) |
| |
| template<> |
| bool | ConvertDouble (double d) |
| |
| template<typename T > |
| bool | ValidateFunctionCallbackInfo (const FunctionCallbackInfo< T > &info) |
| |
| template<typename T > |
| bool | ValidatePropertyCallbackInfo (const PropertyCallbackInfo< T > &info) |
| |
| template<> |
| bool V8_EXPORT | ValidateCallbackInfo (const FunctionCallbackInfo< void > &info) |
| |
| template<> |
| bool V8_EXPORT | ValidateCallbackInfo (const FunctionCallbackInfo< v8::Value > &info) |
| |
| template<> |
| bool V8_EXPORT | ValidateCallbackInfo (const PropertyCallbackInfo< v8::Value > &info) |
| |
| template<> |
| bool V8_EXPORT | ValidateCallbackInfo (const PropertyCallbackInfo< v8::Array > &info) |
| |
| template<> |
| bool V8_EXPORT | ValidateCallbackInfo (const PropertyCallbackInfo< v8::Boolean > &info) |
| |
| template<> |
| bool V8_EXPORT | ValidateCallbackInfo (const PropertyCallbackInfo< v8::Integer > &info) |
| |
| template<> |
| bool V8_EXPORT | ValidateCallbackInfo (const PropertyCallbackInfo< void > &info) |
| |
| template<typename T > |
| T | ConvertDouble (double d) |
| |
| template<typename T > |
| bool | ValidateCallbackInfo (const FunctionCallbackInfo< T > &info) |
| |
| template<typename T > |
| bool | ValidateCallbackInfo (const PropertyCallbackInfo< T > &info) |
| |
| template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) Handle< TemplateObjectDescription > GetTemplateObject template static EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) Handle< TemplateObjectDescription > GetTemplateObject bool | IsCommutativeOperationWithSmiLiteral (Token::Value op) |
| |
| static bool | MatchSmiLiteralOperation (Expression *left, Expression *right, Expression **expr, Tagged< Smi > *literal) |
| |
| static bool | IsVoidOfLiteral (Expression *expr) |
| |
| static bool | MatchLiteralStrictCompareBoolean (Expression *left, Token::Value op, Expression *right, Expression **expr, Literal **literal) |
| |
| static bool | MatchLiteralCompareUndefined (Expression *left, Token::Value op, Expression *right, Expression **expr) |
| |
| static bool | MatchLiteralCompareNull (Expression *left, Token::Value op, Expression *right, Expression **expr) |
| |
| static bool | MatchLiteralCompareEqualVariable (Expression *left, Token::Value op, Expression *right, Expression **expr, Literal **literal) |
| |
| template Variable * | Scope::Lookup< Scope::kParsedScope > (VariableProxy *proxy, Scope *scope, Scope *outer_scope_end, Scope *cache_scope, bool force_context_allocation) |
| |
| template Variable * | Scope::Lookup< Scope::kDeserializedScope > (VariableProxy *proxy, Scope *scope, Scope *outer_scope_end, Scope *cache_scope, bool force_context_allocation) |
| |
| bool | IsComplementaryAccessorPair (VariableMode a, VariableMode b) |
| |
| bool | CanCompileWithBaseline (Isolate *isolate, Tagged< SharedFunctionInfo > shared) |
| |
| MaybeDirectHandle< Code > | GenerateBaselineCode (Isolate *isolate, Handle< SharedFunctionInfo > shared) |
| |
| void | EmitReturnBaseline (MacroAssembler *masm) |
| |
| static V8_INLINE bool | CheckForName (Isolate *isolate, DirectHandle< Name > name, DirectHandle< String > property_name, int offset, FieldIndex::Encoding encoding, FieldIndex *index) |
| |
| static DirectHandle< Object > | GetFunctionPrototype (Isolate *isolate, DirectHandle< JSFunction > function) |
| |
| static bool | AllowAccessToFunction (Tagged< Context > current_context, Tagged< JSFunction > function) |
| |
| MaybeDirectHandle< JSFunction > | FindCaller (Isolate *isolate, DirectHandle< JSFunction > function) |
| |
| | BUILTIN (AbstractModuleSourceToStringTag) |
| |
| | BUILTIN (HandleApiConstruct) |
| |
| static V8_WARN_UNUSED_RESULT Tagged< Object > | HandleApiCallAsFunctionOrConstructorDelegate (Isolate *isolate, bool is_construct_call, BuiltinArguments args) |
| |
| | BUILTIN (HandleApiCallAsFunctionDelegate) |
| |
| | BUILTIN (HandleApiCallAsConstructorDelegate) |
| |
| | TF_BUILTIN (ArrayPrototypePop, CodeStubAssembler) |
| |
| | TF_BUILTIN (ArrayPrototypePush, CodeStubAssembler) |
| |
| | TF_BUILTIN (ExtractFastJSArray, ArrayBuiltinsAssembler) |
| |
| | TF_BUILTIN (CloneFastJSArray, ArrayBuiltinsAssembler) |
| |
| | TF_BUILTIN (CloneFastJSArrayFillingHoles, ArrayBuiltinsAssembler) |
| |
| | TF_BUILTIN (TypedArrayPrototypeMap, ArrayBuiltinsAssembler) |
| |
| | TF_BUILTIN (ArrayIncludes, ArrayIncludesIndexofAssembler) |
| |
| | TF_BUILTIN (ArrayIncludesSmi, ArrayIncludesIndexofAssembler) |
| |
| | TF_BUILTIN (ArrayIncludesSmiOrObject, ArrayIncludesIndexofAssembler) |
| |
| | TF_BUILTIN (ArrayIncludesPackedDoubles, ArrayIncludesIndexofAssembler) |
| |
| | TF_BUILTIN (ArrayIncludesHoleyDoubles, ArrayIncludesIndexofAssembler) |
| |
| | TF_BUILTIN (ArrayIndexOf, ArrayIncludesIndexofAssembler) |
| |
| | TF_BUILTIN (ArrayIndexOfSmi, ArrayIncludesIndexofAssembler) |
| |
| | TF_BUILTIN (ArrayIndexOfSmiOrObject, ArrayIncludesIndexofAssembler) |
| |
| | TF_BUILTIN (ArrayIndexOfPackedDoubles, ArrayIncludesIndexofAssembler) |
| |
| | TF_BUILTIN (ArrayIndexOfHoleyDoubles, ArrayIncludesIndexofAssembler) |
| |
| | TF_BUILTIN (ArrayPrototypeValues, CodeStubAssembler) |
| |
| | TF_BUILTIN (ArrayPrototypeEntries, CodeStubAssembler) |
| |
| | TF_BUILTIN (ArrayPrototypeKeys, CodeStubAssembler) |
| |
| | TF_BUILTIN (ArrayIteratorPrototypeNext, CodeStubAssembler) |
| |
| | TF_BUILTIN (ArrayConstructor, ArrayBuiltinsAssembler) |
| |
| | TF_BUILTIN (ArrayConstructorImpl, ArrayBuiltinsAssembler) |
| |
| | TF_BUILTIN (ArrayNArgumentsConstructor, ArrayBuiltinsAssembler) |
| |
| | GENERATE_ARRAY_CTOR (NoArgument, PackedSmi, PACKED_SMI_ELEMENTS, DontOverride, DONT_OVERRIDE) GENERATE_ARRAY_CTOR(NoArgument |
| |
| DONT_OVERRIDE | GENERATE_ARRAY_CTOR (NoArgument, PackedSmi, PACKED_SMI_ELEMENTS, DisableAllocationSites, DISABLE_ALLOCATION_SITES) GENERATE_ARRAY_CTOR(NoArgument |
| |
| DONT_OVERRIDE DISABLE_ALLOCATION_SITES | GENERATE_ARRAY_CTOR (NoArgument, Packed, PACKED_ELEMENTS, DisableAllocationSites, DISABLE_ALLOCATION_SITES) GENERATE_ARRAY_CTOR(NoArgument |
| |
| DONT_OVERRIDE DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES | GENERATE_ARRAY_CTOR (NoArgument, PackedDouble, PACKED_DOUBLE_ELEMENTS, DisableAllocationSites, DISABLE_ALLOCATION_SITES) GENERATE_ARRAY_CTOR(NoArgument |
| |
| DONT_OVERRIDE DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES | GENERATE_ARRAY_CTOR (SingleArgument, PackedSmi, PACKED_SMI_ELEMENTS, DontOverride, DONT_OVERRIDE) GENERATE_ARRAY_CTOR(SingleArgument |
| |
| DONT_OVERRIDE DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES DONT_OVERRIDE | GENERATE_ARRAY_CTOR (SingleArgument, PackedSmi, PACKED_SMI_ELEMENTS, DisableAllocationSites, DISABLE_ALLOCATION_SITES) GENERATE_ARRAY_CTOR(SingleArgument |
| |
| DONT_OVERRIDE DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES DONT_OVERRIDE DISABLE_ALLOCATION_SITES | GENERATE_ARRAY_CTOR (SingleArgument, Packed, PACKED_ELEMENTS, DisableAllocationSites, DISABLE_ALLOCATION_SITES) GENERATE_ARRAY_CTOR(SingleArgument |
| |
| DONT_OVERRIDE DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES DONT_OVERRIDE DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES | GENERATE_ARRAY_CTOR (SingleArgument, PackedDouble, PACKED_DOUBLE_ELEMENTS, DisableAllocationSites, DISABLE_ALLOCATION_SITES) GENERATE_ARRAY_CTOR(SingleArgument |
| |
| | TF_BUILTIN (CreateArrayFromSlowBoilerplate, SlowBoilerplateCloneAssembler) |
| |
| | TF_BUILTIN (CreateObjectFromSlowBoilerplate, SlowBoilerplateCloneAssembler) |
| |
| | TF_BUILTIN (CreateArrayFromSlowBoilerplateHelper, SlowBoilerplateCloneAssembler) |
| |
| | TF_BUILTIN (CreateObjectFromSlowBoilerplateHelper, SlowBoilerplateCloneAssembler) |
| |
| | BUILTIN (ArrayPrototypeFill) |
| |
| | BUILTIN (ArrayPush) |
| |
| | BUILTIN (ArrayPop) |
| |
| | BUILTIN (ArrayShift) |
| |
| | BUILTIN (ArrayUnshift) |
| |
| | BUILTIN (ArrayConcat) |
| |
| | BUILTIN (ArrayBufferConstructor) |
| |
| | BUILTIN (ArrayBufferConstructor_DoNotInitialize) |
| |
| static Tagged< Object > | SliceHelper (BuiltinArguments args, Isolate *isolate, const char *kMethodName, bool is_shared) |
| |
| | BUILTIN (SharedArrayBufferPrototypeSlice) |
| |
| | BUILTIN (ArrayBufferPrototypeSlice) |
| |
| static Tagged< Object > | ResizeHelper (BuiltinArguments args, Isolate *isolate, const char *kMethodName, bool is_shared) |
| |
| | BUILTIN (SharedArrayBufferPrototypeGetByteLength) |
| |
| | BUILTIN (ArrayBufferPrototypeResize) |
| |
| | BUILTIN (ArrayBufferPrototypeTransfer) |
| |
| | BUILTIN (ArrayBufferPrototypeTransferToFixedLength) |
| |
| | BUILTIN (SharedArrayBufferPrototypeGrow) |
| |
| | BUILTIN (AsyncDisposableStackOnFulfilled) |
| |
| | BUILTIN (AsyncDisposableStackOnRejected) |
| |
| | BUILTIN (AsyncDisposeFromSyncDispose) |
| |
| | BUILTIN (AsyncDisposableStackConstructor) |
| |
| | BUILTIN (AsyncDisposableStackPrototypeUse) |
| |
| | BUILTIN (AsyncDisposableStackPrototypeDisposeAsync) |
| |
| | BUILTIN (AsyncDisposableStackPrototypeGetDisposed) |
| |
| | BUILTIN (AsyncDisposableStackPrototypeAdopt) |
| |
| | BUILTIN (AsyncDisposableStackPrototypeDefer) |
| |
| | BUILTIN (AsyncDisposableStackPrototypeMove) |
| |
| | TF_BUILTIN (AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncFunctionReject, AsyncFunctionBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncFunctionResolve, AsyncFunctionBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncFunctionLazyDeoptContinuation, AsyncFunctionBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncFunctionAwait, AsyncFunctionBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncIteratorValueUnwrap, AsyncBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncGeneratorAwaitResolveClosure, AsyncGeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncGeneratorAwaitRejectClosure, AsyncGeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncGeneratorAwait, AsyncGeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncGeneratorYieldWithAwait, AsyncGeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncGeneratorYieldWithAwaitResolveClosure, AsyncGeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncGeneratorReturnResolveClosure, AsyncGeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncGeneratorReturnClosedResolveClosure, AsyncGeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncGeneratorReturnClosedRejectClosure, AsyncGeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncFromSyncIteratorPrototypeReturn, AsyncFromSyncBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncFromSyncIteratorPrototypeThrow, AsyncFromSyncBuiltinsAssembler) |
| |
| | TF_BUILTIN (AsyncFromSyncIteratorCloseSyncAndRethrow, AsyncFromSyncBuiltinsAssembler) |
| |
| | BUILTIN (CallAsyncModuleFulfilled) |
| |
| | BUILTIN (CallAsyncModuleRejected) |
| |
| | BUILTIN (AtomicsMutexConstructor) |
| |
| | BUILTIN (AtomicsMutexLock) |
| |
| | BUILTIN (AtomicsMutexTryLock) |
| |
| | BUILTIN (AtomicsMutexLockWithTimeout) |
| |
| | BUILTIN (AtomicsMutexLockAsync) |
| |
| | BUILTIN (AtomicsMutexAsyncUnlockResolveHandler) |
| |
| | BUILTIN (AtomicsMutexAsyncUnlockRejectHandler) |
| |
| | BUILTIN (AtomicsConditionConstructor) |
| |
| | BUILTIN (AtomicsConditionWait) |
| |
| | BUILTIN (AtomicsConditionNotify) |
| |
| | BUILTIN (AtomicsConditionWaitAsync) |
| |
| | BUILTIN (AtomicsConditionAcquireLock) |
| |
| | TF_BUILTIN (BigIntToI64, CodeStubAssembler) |
| |
| | TF_BUILTIN (BigIntToI32Pair, CodeStubAssembler) |
| |
| | TF_BUILTIN (I64ToBigInt, CodeStubAssembler) |
| |
| | TF_BUILTIN (I32PairToBigInt, CodeStubAssembler) |
| |
| | BUILTIN (BigIntConstructor) |
| |
| | BUILTIN (BigIntAsUintN) |
| |
| | BUILTIN (BigIntAsIntN) |
| |
| | BUILTIN (BigIntPrototypeToLocaleString) |
| |
| | BUILTIN (BigIntPrototypeToString) |
| |
| | BUILTIN (BigIntPrototypeValueOf) |
| |
| | TF_BUILTIN (Call_ReceiverIsNullOrUndefined_Baseline_Compact, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (Call_ReceiverIsNullOrUndefined_Baseline, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (Call_ReceiverIsNotNullOrUndefined_Baseline_Compact, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (Call_ReceiverIsNotNullOrUndefined_Baseline, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (Call_ReceiverIsAny_Baseline_Compact, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (Call_ReceiverIsAny_Baseline, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (Call_ReceiverIsNullOrUndefined_WithFeedback, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (Call_ReceiverIsNotNullOrUndefined_WithFeedback, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (Call_ReceiverIsAny_WithFeedback, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (CallWithArrayLike, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (CallWithArrayLike_WithFeedback, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (CallWithSpread, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (CallWithSpread_Baseline, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (CallWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (CallFunctionTemplate_Generic, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (CallFunctionTemplate_CheckAccess, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (CallFunctionTemplate_CheckCompatibleReceiver, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (CallFunctionTemplate_CheckAccessAndCompatibleReceiver, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (HandleApiCallOrConstruct, CallOrConstructBuiltinsAssembler) |
| |
| | BUILTIN (CallSitePrototypeGetColumnNumber) |
| |
| | BUILTIN (CallSitePrototypeGetEnclosingColumnNumber) |
| |
| | BUILTIN (CallSitePrototypeGetEnclosingLineNumber) |
| |
| | BUILTIN (CallSitePrototypeGetEvalOrigin) |
| |
| | BUILTIN (CallSitePrototypeGetFileName) |
| |
| | BUILTIN (CallSitePrototypeGetFunction) |
| |
| | BUILTIN (CallSitePrototypeGetFunctionName) |
| |
| | BUILTIN (CallSitePrototypeGetLineNumber) |
| |
| | BUILTIN (CallSitePrototypeGetMethodName) |
| |
| | BUILTIN (CallSitePrototypeGetPosition) |
| |
| | BUILTIN (CallSitePrototypeGetPromiseIndex) |
| |
| | BUILTIN (CallSitePrototypeGetScriptHash) |
| |
| | BUILTIN (CallSitePrototypeGetScriptNameOrSourceURL) |
| |
| | BUILTIN (CallSitePrototypeGetThis) |
| |
| | BUILTIN (CallSitePrototypeGetTypeName) |
| |
| | BUILTIN (CallSitePrototypeIsAsync) |
| |
| | BUILTIN (CallSitePrototypeIsConstructor) |
| |
| | BUILTIN (CallSitePrototypeIsEval) |
| |
| | BUILTIN (CallSitePrototypeIsNative) |
| |
| | BUILTIN (CallSitePrototypeIsPromiseAll) |
| |
| | BUILTIN (CallSitePrototypeIsToplevel) |
| |
| | BUILTIN (CallSitePrototypeToString) |
| |
| | TF_BUILTIN (MapConstructor, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (SetConstructor, CollectionsBuiltinsAssembler) |
| |
| void | BranchIfIterableWithOriginalKeyOrValueMapIterator (compiler::CodeAssemblerState *state, TNode< Object > iterable, TNode< Context > context, compiler::CodeAssemblerLabel *if_true, compiler::CodeAssemblerLabel *if_false) |
| |
| void | BranchIfIterableWithOriginalValueSetIterator (compiler::CodeAssemblerState *state, TNode< Object > iterable, TNode< Context > context, compiler::CodeAssemblerLabel *if_true, compiler::CodeAssemblerLabel *if_false) |
| |
| | TF_BUILTIN (MapIteratorToList, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (SetOrSetIteratorToList, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (MapPrototypeGet, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (MapPrototypeHas, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (MapPrototypeSet, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (MapPrototypeDelete, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (SetPrototypeAdd, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (SetPrototypeDelete, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (MapPrototypeEntries, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (MapPrototypeGetSize, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (MapPrototypeForEach, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (MapPrototypeKeys, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (MapPrototypeValues, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (SetPrototypeHas, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (SetPrototypeEntries, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (SetPrototypeGetSize, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (SetPrototypeForEach, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (SetPrototypeValues, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (FindOrderedHashMapEntry, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (FindOrderedHashSetEntry, CollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (WeakMapConstructor, WeakCollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (WeakSetConstructor, WeakCollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (WeakMapGet, WeakCollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (WeakMapPrototypeHas, WeakCollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (WeakCollectionSet, WeakCollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (WeakMapPrototypeDelete, CodeStubAssembler) |
| |
| | TF_BUILTIN (WeakMapPrototypeSet, WeakCollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (WeakSetPrototypeAdd, WeakCollectionsBuiltinsAssembler) |
| |
| | TF_BUILTIN (WeakSetPrototypeDelete, CodeStubAssembler) |
| |
| | TF_BUILTIN (WeakSetPrototypeHas, WeakCollectionsBuiltinsAssembler) |
| |
| | BUILTIN (MapPrototypeClear) |
| |
| | BUILTIN (SetPrototypeClear) |
| |
| | BUILTIN (ConsoleTime) |
| |
| | BUILTIN (ConsoleTimeEnd) |
| |
| | BUILTIN (ConsoleTimeLog) |
| |
| | BUILTIN (ConsoleTimeStamp) |
| |
| | BUILTIN (ConsoleContext) |
| |
| | TF_BUILTIN (Construct_Baseline, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (Construct_WithFeedback, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (ConstructWithSpread, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (ConstructWithSpread_Baseline, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (ConstructWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (ConstructForwardAllArgs_Baseline, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (ConstructForwardAllArgs_WithFeedback, CallOrConstructBuiltinsAssembler) |
| |
| | TF_BUILTIN (FastNewClosure, ConstructorBuiltinsAssembler) |
| |
| | TF_BUILTIN (FastNewObject, ConstructorBuiltinsAssembler) |
| |
| | TF_BUILTIN (ToNumber, CodeStubAssembler) |
| |
| | TF_BUILTIN (ToBigInt, CodeStubAssembler) |
| |
| | TF_BUILTIN (ToNumber_Baseline, CodeStubAssembler) |
| |
| | TF_BUILTIN (ToNumeric_Baseline, CodeStubAssembler) |
| |
| | TF_BUILTIN (PlainPrimitiveToNumber, CodeStubAssembler) |
| |
| | TF_BUILTIN (ToNumberConvertBigInt, CodeStubAssembler) |
| |
| | TF_BUILTIN (ToBigIntConvertNumber, CodeStubAssembler) |
| |
| | TF_BUILTIN (ToBooleanLazyDeoptContinuation, CodeStubAssembler) |
| |
| | TF_BUILTIN (MathRoundContinuation, CodeStubAssembler) |
| |
| | TF_BUILTIN (MathFloorContinuation, CodeStubAssembler) |
| |
| | TF_BUILTIN (MathCeilContinuation, CodeStubAssembler) |
| |
| | TF_BUILTIN (Typeof, CodeStubAssembler) |
| |
| | TF_BUILTIN (Typeof_Baseline, CodeStubAssembler) |
| |
| | BUILTIN (DataViewConstructor) |
| |
| | TF_BUILTIN (DatePrototypeGetDate, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetDay, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetFullYear, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetHours, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetMilliseconds, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetMinutes, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetMonth, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetSeconds, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetTime, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetTimezoneOffset, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetUTCDate, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetUTCDay, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetUTCFullYear, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetUTCHours, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetUTCMilliseconds, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetUTCMinutes, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetUTCMonth, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeGetUTCSeconds, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeValueOf, DateBuiltinsAssembler) |
| |
| | TF_BUILTIN (DatePrototypeToPrimitive, CodeStubAssembler) |
| |
| | BUILTIN (DateConstructor) |
| |
| | BUILTIN (DateNow) |
| |
| | BUILTIN (DateParse) |
| |
| | BUILTIN (DateUTC) |
| |
| | BUILTIN (DatePrototypeSetDate) |
| |
| | BUILTIN (DatePrototypeSetFullYear) |
| |
| | BUILTIN (DatePrototypeSetHours) |
| |
| | BUILTIN (DatePrototypeSetMilliseconds) |
| |
| | BUILTIN (DatePrototypeSetMinutes) |
| |
| | BUILTIN (DatePrototypeSetMonth) |
| |
| | BUILTIN (DatePrototypeSetSeconds) |
| |
| | BUILTIN (DatePrototypeSetTime) |
| |
| | BUILTIN (DatePrototypeSetUTCDate) |
| |
| | BUILTIN (DatePrototypeSetUTCFullYear) |
| |
| | BUILTIN (DatePrototypeSetUTCHours) |
| |
| | BUILTIN (DatePrototypeSetUTCMilliseconds) |
| |
| | BUILTIN (DatePrototypeSetUTCMinutes) |
| |
| | BUILTIN (DatePrototypeSetUTCMonth) |
| |
| | BUILTIN (DatePrototypeSetUTCSeconds) |
| |
| | BUILTIN (DatePrototypeToDateString) |
| |
| | BUILTIN (DatePrototypeToISOString) |
| |
| | BUILTIN (DatePrototypeToString) |
| |
| | BUILTIN (DatePrototypeToTimeString) |
| |
| | BUILTIN (DatePrototypeToUTCString) |
| |
| | BUILTIN (DatePrototypeGetYear) |
| |
| | BUILTIN (DatePrototypeSetYear) |
| |
| | BUILTIN (DatePrototypeToJson) |
| |
| | BUILTIN (DatePrototypeToTemporalInstant) |
| |
| | BUILTIN (DisposableStackConstructor) |
| |
| | BUILTIN (DisposableStackPrototypeUse) |
| |
| | BUILTIN (DisposableStackPrototypeDispose) |
| |
| | BUILTIN (DisposableStackPrototypeGetDisposed) |
| |
| | BUILTIN (DisposableStackPrototypeAdopt) |
| |
| | BUILTIN (DisposableStackPrototypeDefer) |
| |
| | BUILTIN (DisposableStackPrototypeMove) |
| |
| | BUILTIN (ErrorConstructor) |
| |
| | BUILTIN (ErrorCaptureStackTrace) |
| |
| | BUILTIN (ErrorPrototypeToString) |
| |
| | BUILTIN (ErrorIsError) |
| |
| | BUILTIN (FunctionConstructor) |
| |
| | BUILTIN (GeneratorFunctionConstructor) |
| |
| | BUILTIN (AsyncFunctionConstructor) |
| |
| | BUILTIN (AsyncGeneratorFunctionConstructor) |
| |
| | BUILTIN (FunctionPrototypeBind) |
| |
| | BUILTIN (FunctionPrototypeToString) |
| |
| | TF_BUILTIN (AsyncModuleEvaluate, GeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (GeneratorPrototypeNext, GeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (SuspendGeneratorBaseline, GeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (ResumeGeneratorBaseline, GeneratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (GlobalIsFinite, CodeStubAssembler) |
| |
| | TF_BUILTIN (GlobalIsNaN, CodeStubAssembler) |
| |
| | BUILTIN (GlobalDecodeURI) |
| |
| | BUILTIN (GlobalDecodeURIComponent) |
| |
| | BUILTIN (GlobalEncodeURI) |
| |
| | BUILTIN (GlobalEncodeURIComponent) |
| |
| | BUILTIN (GlobalEscape) |
| |
| | BUILTIN (GlobalUnescape) |
| |
| | BUILTIN (GlobalEval) |
| |
| | TF_BUILTIN (LoadIC_StringLength, CodeStubAssembler) |
| |
| | TF_BUILTIN (LoadIC_StringWrapperLength, CodeStubAssembler) |
| |
| | TF_BUILTIN (ElementsTransitionAndStore_InBounds, HandlerBuiltinsAssembler) |
| |
| | TF_BUILTIN (ElementsTransitionAndStore_NoTransitionGrowAndHandleCOW, HandlerBuiltinsAssembler) |
| |
| | TF_BUILTIN (ElementsTransitionAndStore_NoTransitionIgnoreTypedArrayOOB, HandlerBuiltinsAssembler) |
| |
| | TF_BUILTIN (ElementsTransitionAndStore_NoTransitionHandleCOW, HandlerBuiltinsAssembler) |
| |
| | TF_BUILTIN (StoreFastElementIC_InBounds, HandlerBuiltinsAssembler) |
| |
| | TF_BUILTIN (StoreFastElementIC_NoTransitionGrowAndHandleCOW, HandlerBuiltinsAssembler) |
| |
| | TF_BUILTIN (StoreFastElementIC_NoTransitionIgnoreTypedArrayOOB, HandlerBuiltinsAssembler) |
| |
| | TF_BUILTIN (StoreFastElementIC_NoTransitionHandleCOW, HandlerBuiltinsAssembler) |
| |
| | TF_BUILTIN (LoadIC_FunctionPrototype, CodeStubAssembler) |
| |
| | TF_BUILTIN (StoreGlobalIC_Slow, CodeStubAssembler) |
| |
| | TF_BUILTIN (KeyedLoadIC_SloppyArguments, HandlerBuiltinsAssembler) |
| |
| | TF_BUILTIN (KeyedStoreIC_SloppyArguments_InBounds, HandlerBuiltinsAssembler) |
| |
| | TF_BUILTIN (KeyedStoreIC_SloppyArguments_NoTransitionGrowAndHandleCOW, HandlerBuiltinsAssembler) |
| |
| | TF_BUILTIN (KeyedStoreIC_SloppyArguments_NoTransitionIgnoreTypedArrayOOB, HandlerBuiltinsAssembler) |
| |
| | TF_BUILTIN (KeyedStoreIC_SloppyArguments_NoTransitionHandleCOW, HandlerBuiltinsAssembler) |
| |
| | TF_BUILTIN (LoadIndexedInterceptorIC, CodeStubAssembler) |
| |
| | TF_BUILTIN (KeyedHasIC_SloppyArguments, HandlerBuiltinsAssembler) |
| |
| | TF_BUILTIN (HasIndexedInterceptorIC, CodeStubAssembler) |
| |
| | TF_BUILTIN (CopyFastSmiOrObjectElements, CodeStubAssembler) |
| |
| | TF_BUILTIN (GrowFastDoubleElements, CodeStubAssembler) |
| |
| | TF_BUILTIN (GrowFastSmiOrObjectElements, CodeStubAssembler) |
| |
| | TF_BUILTIN (ReturnReceiver, CodeStubAssembler) |
| |
| | TF_BUILTIN (DebugBreakTrampoline, CodeStubAssembler) |
| |
| | TF_BUILTIN (RecordWriteSaveFP, WriteBarrierCodeStubAssembler) |
| |
| | TF_BUILTIN (RecordWriteIgnoreFP, WriteBarrierCodeStubAssembler) |
| |
| | TF_BUILTIN (IndirectPointerBarrierSaveFP, WriteBarrierCodeStubAssembler) |
| |
| | TF_BUILTIN (IndirectPointerBarrierIgnoreFP, WriteBarrierCodeStubAssembler) |
| |
| | TF_BUILTIN (EphemeronKeyBarrierSaveFP, WriteBarrierCodeStubAssembler) |
| |
| | TF_BUILTIN (EphemeronKeyBarrierIgnoreFP, WriteBarrierCodeStubAssembler) |
| |
| | TF_BUILTIN (DeleteProperty, DeletePropertyBaseAssembler) |
| |
| | TF_BUILTIN (CopyDataPropertiesWithExcludedPropertiesOnStack, SetOrCopyDataPropertiesAssembler) |
| |
| | TF_BUILTIN (CopyDataPropertiesWithExcludedProperties, SetOrCopyDataPropertiesAssembler) |
| |
| | TF_BUILTIN (CopyDataProperties, SetOrCopyDataPropertiesAssembler) |
| |
| | TF_BUILTIN (SetDataProperties, SetOrCopyDataPropertiesAssembler) |
| |
| | TF_BUILTIN (ForInEnumerate, CodeStubAssembler) |
| |
| | TF_BUILTIN (ForInPrepare, CodeStubAssembler) |
| |
| | TF_BUILTIN (ForInFilter, CodeStubAssembler) |
| |
| | TF_BUILTIN (SameValue, CodeStubAssembler) |
| |
| | TF_BUILTIN (SameValueNumbersOnly, CodeStubAssembler) |
| |
| | TF_BUILTIN (AdaptorWithBuiltinExitFrame0, CppBuiltinsAdaptorAssembler) |
| |
| | TF_BUILTIN (AdaptorWithBuiltinExitFrame1, CppBuiltinsAdaptorAssembler) |
| |
| | TF_BUILTIN (AdaptorWithBuiltinExitFrame2, CppBuiltinsAdaptorAssembler) |
| |
| | TF_BUILTIN (AdaptorWithBuiltinExitFrame3, CppBuiltinsAdaptorAssembler) |
| |
| | TF_BUILTIN (AdaptorWithBuiltinExitFrame4, CppBuiltinsAdaptorAssembler) |
| |
| | TF_BUILTIN (AdaptorWithBuiltinExitFrame5, CppBuiltinsAdaptorAssembler) |
| |
| | TF_BUILTIN (NewHeapNumber, CodeStubAssembler) |
| |
| | TF_BUILTIN (AllocateInYoungGeneration, CodeStubAssembler) |
| |
| | TF_BUILTIN (AllocateInOldGeneration, CodeStubAssembler) |
| |
| | TF_BUILTIN (Abort, CodeStubAssembler) |
| |
| | TF_BUILTIN (AbortCSADcheck, CodeStubAssembler) |
| |
| | TF_BUILTIN (GetProperty, CodeStubAssembler) |
| |
| | TF_BUILTIN (GetPropertyWithReceiver, CodeStubAssembler) |
| |
| | TF_BUILTIN (SetProperty, CodeStubAssembler) |
| |
| | TF_BUILTIN (CreateDataProperty, CodeStubAssembler) |
| |
| | TF_BUILTIN (InstantiateAsmJs, CodeStubAssembler) |
| |
| | TF_BUILTIN (FindNonDefaultConstructorOrConstruct, CodeStubAssembler) |
| |
| | TF_BUILTIN (GetOwnPropertyDescriptor, CodeStubAssembler) |
| |
| | BUILTIN (Illegal) |
| |
| | BUILTIN (DummyBuiltin) |
| |
| | BUILTIN (IllegalInvocationThrower) |
| |
| | BUILTIN (EmptyFunction) |
| |
| | BUILTIN (EmptyFunction1) |
| |
| | BUILTIN (UnsupportedThrower) |
| |
| | BUILTIN (StrictPoisonPillThrower) |
| |
| | TF_BUILTIN (StringToLowerCaseIntl, IntlBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringPrototypeToLowerCaseIntl, IntlBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringPrototypeToLocaleLowerCase, IntlBuiltinsAssembler) |
| |
| | TF_BUILTIN (ListFormatPrototypeFormat, IntlBuiltinsAssembler) |
| |
| | TF_BUILTIN (ListFormatPrototypeFormatToParts, IntlBuiltinsAssembler) |
| |
| | BUILTIN (StringPrototypeToUpperCaseIntl) |
| |
| | BUILTIN (StringPrototypeNormalizeIntl) |
| |
| | BUILTIN (StringPrototypeLocaleCompareIntl) |
| |
| | BUILTIN (V8BreakIteratorSupportedLocalesOf) |
| |
| | BUILTIN (NumberFormatSupportedLocalesOf) |
| |
| | BUILTIN (NumberFormatPrototypeFormatToParts) |
| |
| | BUILTIN (DateTimeFormatPrototypeResolvedOptions) |
| |
| | BUILTIN (DateTimeFormatSupportedLocalesOf) |
| |
| | BUILTIN (DateTimeFormatPrototypeFormatToParts) |
| |
| template<class T , MaybeDirectHandle< T >(*)(Isolate *, DirectHandle< JSDateTimeFormat >, DirectHandle< Object >, DirectHandle< Object >, const char *const) F> |
| V8_WARN_UNUSED_RESULT Tagged< Object > | DateTimeFormatRange (BuiltinArguments args, Isolate *isolate, const char *const method_name) |
| |
| | BUILTIN (DateTimeFormatPrototypeFormatRange) |
| |
| | BUILTIN (DateTimeFormatPrototypeFormatRangeToParts) |
| |
| | BUILTIN (DisplayNamesConstructor) |
| |
| | BUILTIN (DisplayNamesPrototypeResolvedOptions) |
| |
| | BUILTIN (DisplayNamesSupportedLocalesOf) |
| |
| | BUILTIN (DisplayNamesPrototypeOf) |
| |
| | BUILTIN (DurationFormatConstructor) |
| |
| | BUILTIN (DurationFormatPrototypeResolvedOptions) |
| |
| | BUILTIN (DurationFormatSupportedLocalesOf) |
| |
| | BUILTIN (DurationFormatPrototypeFormat) |
| |
| | BUILTIN (DurationFormatPrototypeFormatToParts) |
| |
| | BUILTIN (NumberFormatConstructor) |
| |
| | BUILTIN (NumberFormatPrototypeResolvedOptions) |
| |
| | BUILTIN (NumberFormatPrototypeFormatNumber) |
| |
| | BUILTIN (NumberFormatInternalFormatNumber) |
| |
| template<class T , MaybeDirectHandle< T >(*)(Isolate *, DirectHandle< JSNumberFormat >, Handle< Object >, Handle< Object >) F> |
| V8_WARN_UNUSED_RESULT Tagged< Object > | NumberFormatRange (BuiltinArguments args, Isolate *isolate, const char *const method_name) |
| |
| | BUILTIN (NumberFormatPrototypeFormatRange) |
| |
| | BUILTIN (NumberFormatPrototypeFormatRangeToParts) |
| |
| | BUILTIN (DateTimeFormatConstructor) |
| |
| | BUILTIN (DateTimeFormatPrototypeFormat) |
| |
| | BUILTIN (DateTimeFormatInternalFormat) |
| |
| | BUILTIN (IntlGetCanonicalLocales) |
| |
| | BUILTIN (IntlSupportedValuesOf) |
| |
| | BUILTIN (ListFormatConstructor) |
| |
| | BUILTIN (ListFormatPrototypeResolvedOptions) |
| |
| | BUILTIN (ListFormatSupportedLocalesOf) |
| |
| | BUILTIN (LocaleConstructor) |
| |
| | BUILTIN (LocalePrototypeMaximize) |
| |
| | BUILTIN (LocalePrototypeMinimize) |
| |
| | BUILTIN (LocalePrototypeGetCalendars) |
| |
| | BUILTIN (LocalePrototypeGetCollations) |
| |
| | BUILTIN (LocalePrototypeGetHourCycles) |
| |
| | BUILTIN (LocalePrototypeGetNumberingSystems) |
| |
| | BUILTIN (LocalePrototypeGetTextInfo) |
| |
| | BUILTIN (LocalePrototypeGetTimeZones) |
| |
| | BUILTIN (LocalePrototypeGetWeekInfo) |
| |
| | BUILTIN (LocalePrototypeCalendars) |
| |
| | BUILTIN (LocalePrototypeCollations) |
| |
| | BUILTIN (LocalePrototypeHourCycles) |
| |
| | BUILTIN (LocalePrototypeNumberingSystems) |
| |
| | BUILTIN (LocalePrototypeTextInfo) |
| |
| | BUILTIN (LocalePrototypeTimeZones) |
| |
| | BUILTIN (LocalePrototypeWeekInfo) |
| |
| | BUILTIN (RelativeTimeFormatSupportedLocalesOf) |
| |
| | BUILTIN (RelativeTimeFormatPrototypeFormat) |
| |
| | BUILTIN (RelativeTimeFormatPrototypeFormatToParts) |
| |
| | BUILTIN (LocalePrototypeLanguage) |
| |
| | BUILTIN (LocalePrototypeScript) |
| |
| | BUILTIN (LocalePrototypeRegion) |
| |
| | BUILTIN (LocalePrototypeBaseName) |
| |
| | BUILTIN (LocalePrototypeCalendar) |
| |
| | BUILTIN (LocalePrototypeCaseFirst) |
| |
| | BUILTIN (LocalePrototypeCollation) |
| |
| | BUILTIN (LocalePrototypeFirstDayOfWeek) |
| |
| | BUILTIN (LocalePrototypeHourCycle) |
| |
| | BUILTIN (LocalePrototypeNumeric) |
| |
| | BUILTIN (LocalePrototypeNumberingSystem) |
| |
| | BUILTIN (LocalePrototypeToString) |
| |
| | BUILTIN (RelativeTimeFormatConstructor) |
| |
| | BUILTIN (RelativeTimeFormatPrototypeResolvedOptions) |
| |
| bool | IsFastLocale (Tagged< Object > maybe_locale) |
| |
| | BUILTIN (StringPrototypeToLocaleUpperCase) |
| |
| | BUILTIN (PluralRulesConstructor) |
| |
| | BUILTIN (PluralRulesPrototypeResolvedOptions) |
| |
| | BUILTIN (PluralRulesPrototypeSelect) |
| |
| | BUILTIN (PluralRulesPrototypeSelectRange) |
| |
| | BUILTIN (PluralRulesSupportedLocalesOf) |
| |
| | BUILTIN (CollatorConstructor) |
| |
| | BUILTIN (CollatorPrototypeResolvedOptions) |
| |
| | BUILTIN (CollatorSupportedLocalesOf) |
| |
| | BUILTIN (CollatorPrototypeCompare) |
| |
| | BUILTIN (CollatorInternalCompare) |
| |
| | BUILTIN (SegmentIteratorPrototypeNext) |
| |
| | BUILTIN (SegmenterConstructor) |
| |
| | BUILTIN (SegmenterSupportedLocalesOf) |
| |
| | BUILTIN (SegmenterPrototypeResolvedOptions) |
| |
| | BUILTIN (SegmenterPrototypeSegment) |
| |
| | BUILTIN (SegmentsPrototypeContaining) |
| |
| | BUILTIN (SegmentsPrototypeIterator) |
| |
| | BUILTIN (V8BreakIteratorConstructor) |
| |
| | BUILTIN (V8BreakIteratorPrototypeResolvedOptions) |
| |
| | BUILTIN (V8BreakIteratorPrototypeAdoptText) |
| |
| | BUILTIN (V8BreakIteratorInternalAdoptText) |
| |
| | BUILTIN (V8BreakIteratorPrototypeFirst) |
| |
| | BUILTIN (V8BreakIteratorInternalFirst) |
| |
| | BUILTIN (V8BreakIteratorPrototypeNext) |
| |
| | BUILTIN (V8BreakIteratorInternalNext) |
| |
| | BUILTIN (V8BreakIteratorPrototypeCurrent) |
| |
| | BUILTIN (V8BreakIteratorInternalCurrent) |
| |
| | BUILTIN (V8BreakIteratorPrototypeBreakType) |
| |
| | BUILTIN (V8BreakIteratorInternalBreakType) |
| |
| | TF_BUILTIN (IterableToList, IteratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (IterableToFixedArray, IteratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringListFromIterable, IteratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringFixedArrayFromIterable, IteratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (IterableToListMayPreserveHoles, IteratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (IterableToListConvertHoles, IteratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (IterableToListWithSymbolLookup, IteratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (GetIteratorWithFeedbackLazyDeoptContinuation, IteratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (CallIteratorWithFeedbackLazyDeoptContinuation, IteratorBuiltinsAssembler) |
| |
| | TF_BUILTIN (IterableToFixedArrayWithSymbolLookupSlow, IteratorBuiltinsAssembler) |
| |
| | BUILTIN (JsonParse) |
| |
| | BUILTIN (JsonStringify) |
| |
| | BUILTIN (JsonRawJson) |
| |
| | BUILTIN (JsonIsRawJson) |
| |
| | TF_BUILTIN (CompileLazy, LazyBuiltinsAssembler) |
| |
| | TF_BUILTIN (CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) |
| |
| | TF_BUILTIN (EnqueueMicrotask, MicrotaskQueueBuiltinsAssembler) |
| |
| | TF_BUILTIN (RunMicrotasks, MicrotaskQueueBuiltinsAssembler) |
| |
| | DEF_BINOP (ShiftRightLogical_WithFeedback, Generate_ShiftRightLogicalWithFeedback) DEF_BINOP_RHS_SMI(ShiftRightLogicalSmi_Baseline |
| |
| | TF_BUILTIN (Equal_WithFeedback, CodeStubAssembler) |
| |
| | TF_BUILTIN (StrictEqual_WithFeedback, CodeStubAssembler) |
| |
| | TF_BUILTIN (Equal_Baseline, CodeStubAssembler) |
| |
| | TF_BUILTIN (StrictEqual_Baseline, CodeStubAssembler) |
| |
| | BUILTIN (NumberPrototypeToExponential) |
| |
| | BUILTIN (NumberPrototypeToFixed) |
| |
| | BUILTIN (NumberPrototypeToLocaleString) |
| |
| | BUILTIN (NumberPrototypeToPrecision) |
| |
| | TF_BUILTIN (ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (ObjectAssign, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (ObjectKeys, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (ObjectHasOwn, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (ObjectValues, ObjectEntriesValuesBuiltinsAssembler) |
| |
| | TF_BUILTIN (ObjectEntries, ObjectEntriesValuesBuiltinsAssembler) |
| |
| | TF_BUILTIN (ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (ObjectToString, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (ObjectCreate, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (ObjectIs, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (CreateIterResultObject, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (HasProperty, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (InstanceOf, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (InstanceOf_WithFeedback, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (InstanceOf_Baseline, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (OrdinaryHasInstance, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (CreateGeneratorObject, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (OrdinaryGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) |
| |
| | TF_BUILTIN (ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) |
| |
| | BUILTIN (ObjectPrototypePropertyIsEnumerable) |
| |
| | BUILTIN (ObjectDefineProperties) |
| |
| | BUILTIN (ObjectDefineProperty) |
| |
| | BUILTIN (ObjectDefineGetter) |
| |
| | BUILTIN (ObjectDefineSetter) |
| |
| | BUILTIN (ObjectLookupGetter) |
| |
| | BUILTIN (ObjectLookupSetter) |
| |
| | BUILTIN (ObjectFreeze) |
| |
| | BUILTIN (ObjectPrototypeGetProto) |
| |
| | BUILTIN (ObjectPrototypeSetProto) |
| |
| | BUILTIN (ObjectGetOwnPropertySymbols) |
| |
| | BUILTIN (ObjectIsFrozen) |
| |
| | BUILTIN (ObjectIsSealed) |
| |
| | BUILTIN (ObjectGetOwnPropertyDescriptors) |
| |
| | BUILTIN (ObjectSeal) |
| |
| | TF_BUILTIN (CallProxy, ProxiesCodeStubAssembler) |
| |
| | TF_BUILTIN (ConstructProxy, ProxiesCodeStubAssembler) |
| |
| | BUILTIN (ReflectDefineProperty) |
| |
| | BUILTIN (ReflectOwnKeys) |
| |
| | BUILTIN (ReflectSet) |
| |
| | TF_BUILTIN (RegExpExecAtom, RegExpBuiltinsAssembler) |
| |
| | TF_BUILTIN (RegExpConstructor, RegExpBuiltinsAssembler) |
| |
| | TF_BUILTIN (RegExpPrototypeCompile, RegExpBuiltinsAssembler) |
| |
| | BUILTIN (RegExpPrototypeToString) |
| |
| | BUILTIN (RegExpInputGetter) |
| |
| | BUILTIN (RegExpInputSetter) |
| |
| | BUILTIN (RegExpLastMatchGetter) |
| |
| | BUILTIN (RegExpLastParenGetter) |
| |
| | BUILTIN (RegExpLeftContextGetter) |
| |
| | BUILTIN (RegExpRightContextGetter) |
| |
| | BUILTIN (RegExpEscape) |
| |
| | TF_BUILTIN (ShadowRealmGetWrappedValue, ShadowRealmBuiltinsAssembler) |
| |
| | TF_BUILTIN (CallWrappedFunction, ShadowRealmBuiltinsAssembler) |
| |
| | TF_BUILTIN (ShadowRealmPrototypeImportValue, ShadowRealmBuiltinsAssembler) |
| |
| | TF_BUILTIN (ShadowRealmImportValueFulfilled, ShadowRealmBuiltinsAssembler) |
| |
| | TF_BUILTIN (ShadowRealmImportValueRejected, ShadowRealmBuiltinsAssembler) |
| |
| | BUILTIN (ShadowRealmConstructor) |
| |
| | BUILTIN (ShadowRealmPrototypeEvaluate) |
| |
| | BUILTIN (SharedArrayConstructor) |
| |
| | BUILTIN (SharedArrayIsSharedArray) |
| |
| | TF_BUILTIN (AtomicsLoad, SharedArrayBufferBuiltinsAssembler) |
| |
| | TF_BUILTIN (AtomicsStore, SharedArrayBufferBuiltinsAssembler) |
| |
| | TF_BUILTIN (AtomicsExchange, SharedArrayBufferBuiltinsAssembler) |
| |
| | TF_BUILTIN (AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) |
| |
| bool | AtomicIsLockFree (double size) |
| |
| | BUILTIN (AtomicsIsLockFree) |
| |
| V8_WARN_UNUSED_RESULT MaybeDirectHandle< JSTypedArray > | ValidateIntegerTypedArray (Isolate *isolate, Handle< Object > object, const char *method_name, bool only_int32_and_big_int64=false) |
| |
| V8_WARN_UNUSED_RESULT Maybe< size_t > | ValidateAtomicAccess (Isolate *isolate, DirectHandle< JSTypedArray > typed_array, Handle< Object > request_index) |
| |
| | BUILTIN (AtomicsNotify) |
| |
| Tagged< Object > | DoWait (Isolate *isolate, FutexEmulation::WaitMode mode, Handle< Object > array, Handle< Object > index, Handle< Object > value, Handle< Object > timeout) |
| |
| | BUILTIN (AtomicsWait) |
| |
| | BUILTIN (AtomicsWaitAsync) |
| |
| | BUILTIN (AtomicsPause) |
| |
| | TF_BUILTIN (StringAdd_CheckNone, StringBuiltinsAssembler) |
| |
| | TF_BUILTIN (SubString, StringBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringEqual, StringBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringLessThan, StringBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringLessThanOrEqual, StringBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringGreaterThan, StringBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringCompare, StringBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringGreaterThanOrEqual, StringBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringFromCodePointAt, StringBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringFromCharCode, StringBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringPrototypeReplace, StringBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringPrototypeMatchAll, StringBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringPrototypeSplit, StringBuiltinsAssembler) |
| |
| | TF_BUILTIN (StringSubstring, StringBuiltinsAssembler) |
| |
| | BUILTIN (StringFromCodePoint) |
| |
| | BUILTIN (StringPrototypeLastIndexOf) |
| |
| | BUILTIN (StringPrototypeLocaleCompare) |
| |
| | BUILTIN (StringPrototypeNormalize) |
| |
| | BUILTIN (StringPrototypeToLocaleLowerCase) |
| |
| | BUILTIN (StringPrototypeToLowerCase) |
| |
| | BUILTIN (StringPrototypeToUpperCase) |
| |
| | BUILTIN (StringRaw) |
| |
| | BUILTIN (SharedSpaceJSObjectHasInstance) |
| |
| | BUILTIN (SharedStructTypeConstructor) |
| |
| | BUILTIN (SharedStructConstructor) |
| |
| | BUILTIN (SharedStructTypeIsSharedStruct) |
| |
| | BUILTIN (AtomicsMutexIsMutex) |
| |
| | BUILTIN (AtomicsConditionIsCondition) |
| |
| | BUILTIN (SymbolConstructor) |
| |
| | BUILTIN (SymbolFor) |
| |
| | BUILTIN (SymbolKeyFor) |
| |
| | TF_BUILTIN (TemporalInstantFixedArrayFromIterable, TemporalBuiltinsAssembler) |
| |
| | TF_BUILTIN (TemporalCalendarPrototypeFields, TemporalBuiltinsAssembler) |
| |
| | BUILTIN (TemporalPlainDateConstructor) |
| |
| | BUILTIN (TemporalPlainTimeConstructor) |
| |
| | BUILTIN (TemporalPlainDateTimeConstructor) |
| |
| | TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD (PlainDateTime, MonthsInYear, monthsInYear) BUILTIN(TemporalPlainYearMonthConstructor) |
| |
| | TEMPORAL_GET_BY_INVOKE_CALENDAR_METHOD (PlainYearMonth, MonthsInYear, monthsInYear) BUILTIN(TemporalPlainMonthDayConstructor) |
| |
| | BUILTIN (TemporalZonedDateTimeConstructor) |
| |
| | TEMPORAL_GET_NUMBER_AFTER_DIVID (ZonedDateTime, EpochSeconds, nanoseconds, 1000000000, epochSeconds) TEMPORAL_GET_NUMBER_AFTER_DIVID(ZonedDateTime |
| |
| epochMilliseconds | TEMPORAL_GET_BIGINT_AFTER_DIVID (ZonedDateTime, EpochMicroseconds, nanoseconds, 1000, epochMicroseconds) TEMPORAL_ZONED_DATE_TIME_GET_INT_BY_FORWARD_TIME_ZONE(Millisecond |
| |
| epochMilliseconds iso_millisecond | TEMPORAL_ZONED_DATE_TIME_GET_INT_BY_FORWARD_TIME_ZONE (Microsecond, iso_microsecond) TEMPORAL_ZONED_DATE_TIME_GET_INT_BY_FORWARD_TIME_ZONE(Nanosecond |
| |
| epochMilliseconds iso_millisecond iso_nanosecond | BUILTIN (TemporalDurationConstructor) |
| |
| | BUILTIN (TemporalDurationCompare) |
| |
| | TEMPORAL_GET_NUMBER_AFTER_DIVID (Instant, EpochSeconds, nanoseconds, 1000000000, epochSeconds) TEMPORAL_GET_NUMBER_AFTER_DIVID(Instant |
| |
| epochMilliseconds | TEMPORAL_GET_BIGINT_AFTER_DIVID (Instant, EpochMicroseconds, nanoseconds, 1000, epochMicroseconds) BUILTIN(TemporalCalendarPrototypeId) |
| |
| | BUILTIN (TemporalCalendarPrototypeToJSON) |
| |
| | BUILTIN (TemporalCalendarPrototypeToString) |
| |
| | BUILTIN (TemporalCalendarFrom) |
| |
| | TEMPORAL_PROTOTYPE_METHOD1 (TimeZone, GetOffsetNanosecondsFor, getOffsetNanosecondsFor) TEMPORAL_PROTOTYPE_METHOD1(TimeZone |
| |
| getPossibleInstantFor | TEMPORAL_PROTOTYPE_METHOD1 (TimeZone, GetPreviousTransition, getPreviousTransition) BUILTIN(TemporalTimeZonePrototypeId) |
| |
| | BUILTIN (TemporalTimeZonePrototypeToJSON) |
| |
| | BUILTIN (TemporalTimeZonePrototypeToString) |
| |
| | BUILTIN (TemporalTimeZoneFrom) |
| |
| | BUILTIN (IsTraceCategoryEnabled) |
| |
| | BUILTIN (Trace) |
| |
| | TF_BUILTIN (TypedArrayBaseConstructor, TypedArrayBuiltinsAssembler) |
| |
| | TF_BUILTIN (TypedArrayConstructor, TypedArrayBuiltinsAssembler) |
| |
| | TF_BUILTIN (TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) |
| |
| | TF_BUILTIN (TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) |
| |
| | TF_BUILTIN (TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) |
| |
| | TF_BUILTIN (TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) |
| |
| | BUILTIN (TypedArrayPrototypeBuffer) |
| |
| | BUILTIN (TypedArrayPrototypeCopyWithin) |
| |
| | BUILTIN (TypedArrayPrototypeFill) |
| |
| | BUILTIN (TypedArrayPrototypeIncludes) |
| |
| | BUILTIN (TypedArrayPrototypeIndexOf) |
| |
| | BUILTIN (TypedArrayPrototypeLastIndexOf) |
| |
| | BUILTIN (TypedArrayPrototypeReverse) |
| |
| | BUILTIN (Uint8ArrayFromBase64) |
| |
| | BUILTIN (Uint8ArrayPrototypeToBase64) |
| |
| | BUILTIN (Uint8ArrayFromHex) |
| |
| | BUILTIN (Uint8ArrayPrototypeToHex) |
| |
| | TF_BUILTIN (WasmFloat32ToNumber, WasmBuiltinsAssembler) |
| |
| | TF_BUILTIN (WasmFloat64ToNumber, WasmBuiltinsAssembler) |
| |
| | TF_BUILTIN (WasmFloat64ToString, WasmBuiltinsAssembler) |
| |
| | TF_BUILTIN (JSToWasmLazyDeoptContinuation, WasmBuiltinsAssembler) |
| |
| | TF_BUILTIN (WasmToJsWrapperCSA, WasmBuiltinsAssembler) |
| |
| | TF_BUILTIN (WasmToJsWrapperInvalidSig, WasmBuiltinsAssembler) |
| |
| | BUILTIN (FinalizationRegistryUnregister) |
| |
| Builtin | ExampleBuiltinForTorqueFunctionPointerType (size_t function_pointer_type_id) |
| |
| template<typename T > |
| static constexpr T | FirstFromVarArgs (T x,...) noexcept |
| |
| V8_INLINE bool | IsValidTieringBuiltin (TieringBuiltin builtin) |
| |
| V8_INLINE constexpr bool | operator< (Builtin a, Builtin b) |
| |
| V8_INLINE Builtin | operator++ (Builtin &builtin) |
| |
| V8_INLINE constexpr bool | IsInterpreterTrampolineBuiltin (Builtin builtin_id) |
| |
| V8_INLINE constexpr bool | IsBaselineTrampolineBuiltin (Builtin builtin_id) |
| |
| static void | AssertCodeIsBaseline (MacroAssembler *masm, Register code, Register scratch) |
| |
| static void | GetSharedFunctionInfoBytecodeOrBaseline (MacroAssembler *masm, Register sfi, Register bytecode, Register scratch1, Label *is_baseline, Label *is_unavailable) |
| |
| static void | Generate_CheckStackOverflow (MacroAssembler *masm, Register argc, Register scratch1, Register scratch2) |
| |
| static void | Generate_JSEntryTrampolineHelper (MacroAssembler *masm, bool is_construct) |
| |
| static void | LeaveInterpreterFrame (MacroAssembler *masm, Register scratch1, Register scratch2) |
| |
| static void | AdvanceBytecodeOffsetOrReturn (MacroAssembler *masm, Register bytecode_array, Register bytecode_offset, Register bytecode, Register scratch1, Register scratch2, Register scratch3, Label *if_return) |
| |
| static void | GenerateInterpreterPushArgs (MacroAssembler *masm, Register num_args, Register start_address, Register scratch) |
| |
| static void | Generate_InterpreterEnterBytecode (MacroAssembler *masm) |
| |
| int | EncodeConstantPoolLength (int length) |
| |
| int | DecodeConstantPoolLength (int instr) |
| |
| Condition | NegateCondition (Condition cond) |
| |
| int | NeonU (NeonDataType dt) |
| |
| int | NeonSz (NeonDataType dt) |
| |
| NeonDataType | NeonSizeToDataType (NeonSize size) |
| |
| NeonSize | NeonDataTypeToSize (NeonDataType dt) |
| |
| Hint | NegateHint (Hint ignored) |
| |
| MemOperand | FieldMemOperand (Register object, int offset) |
| |
| Register | GetRegisterThatIsNotOneOf (Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg) |
| |
| MemOperand | ExitFrameStackSlotOperand (int offset) |
| |
| MemOperand | ExitFrameCallerStackSlotOperand (int index) |
| |
| void | CallApiFunctionAndReturn (MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (Register) |
| |
| Register | ReassignRegister (Register &source) |
| |
| constexpr int | ArgumentPaddingSlots (int argument_count) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (SwVfpRegister) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (DwVfpRegister) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (RegList) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (DoubleRegList) |
| |
| | ADD_SUB_OP_LIST (ADD_SUB_IMMEDIATE) |
| |
| | ADD_SUB_OP_LIST (ADD_SUB_SHIFTED) |
| |
| | ADD_SUB_OP_LIST (ADD_SUB_EXTENDED) |
| |
| | LOAD_STORE_PAIR_OP_LIST (LOAD_STORE_PAIR) |
| |
| | LOAD_STORE_PAIR_OP_LIST (LOAD_STORE_PAIR_POST_INDEX) |
| |
| | LOAD_STORE_PAIR_OP_LIST (LOAD_STORE_PAIR_PRE_INDEX) |
| |
| | LOAD_STORE_PAIR_OP_LIST (LOAD_STORE_PAIR_OFFSET) |
| |
| | LOAD_STORE_OP_LIST (LOAD_STORE_UNSCALED) |
| |
| | LOAD_STORE_OP_LIST (LOAD_STORE) |
| |
| | LOAD_STORE_OP_LIST (LOAD_STORE_POST_INDEX) |
| |
| | LOAD_STORE_OP_LIST (LOAD_STORE_PRE_INDEX) |
| |
| | LOAD_STORE_OP_LIST (LOAD_STORE_UNSIGNED_OFFSET) |
| |
| | LOAD_STORE_OP_LIST (LOAD_STORE_REGISTER_OFFSET) |
| |
| | ATOMIC_MEMORY_SIMPLE_OPC_LIST (ATOMIC_MEMORY_SIMPLE) |
| |
| unsigned | CalcLSDataSizeLog2 (LoadStoreOp op) |
| |
| unsigned | CalcLSPairDataSize (LoadStorePairOp op) |
| |
| | ATOMIC_MEMORY_SIMPLE_MACRO_LIST (ATOMIC_MEMORY_LOAD_MACRO_MODES, DEFINE_LOAD_FUNCTION, Ld, ld) ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_STORE_MACRO_MODES |
| |
| BranchType | InvertBranchType (BranchType type) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (CPURegister) |
| |
| template<typename RegT > |
| RegT | ReassignRegister (RegT &source) |
| |
| VectorFormat | VectorFormatHalfWidth (VectorFormat vform) |
| |
| VectorFormat | VectorFormatDoubleWidth (VectorFormat vform) |
| |
| VectorFormat | VectorFormatDoubleLanes (VectorFormat vform) |
| |
| VectorFormat | VectorFormatHalfLanes (VectorFormat vform) |
| |
| VectorFormat | ScalarFormatFromLaneSize (int lanesize) |
| |
| VectorFormat | VectorFormatHalfWidthDoubleLanes (VectorFormat vform) |
| |
| VectorFormat | VectorFormatFillQ (int laneSize) |
| |
| VectorFormat | VectorFormatFillQ (VectorFormat vform) |
| |
| VectorFormat | ScalarFormatFromFormat (VectorFormat vform) |
| |
| V8_EXPORT_PRIVATE unsigned | RegisterSizeInBitsFromFormat (VectorFormat vform) |
| |
| unsigned | RegisterSizeInBytesFromFormat (VectorFormat vform) |
| |
| int | LaneSizeInBytesFromFormat (VectorFormat vform) |
| |
| unsigned | LaneSizeInBitsFromFormat (VectorFormat vform) |
| |
| int | LaneSizeInBytesLog2FromFormat (VectorFormat vform) |
| |
| V8_EXPORT_PRIVATE int | LaneCountFromFormat (VectorFormat vform) |
| |
| int | MaxLaneCountFromFormat (VectorFormat vform) |
| |
| V8_EXPORT_PRIVATE bool | IsVectorFormat (VectorFormat vform) |
| |
| int64_t | MaxIntFromFormat (VectorFormat vform) |
| |
| int64_t | MinIntFromFormat (VectorFormat vform) |
| |
| uint64_t | MaxUintFromFormat (VectorFormat vform) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (VRegister) |
| |
| | DEFINE_REGISTER (Register, wsp, kSPRegInternalCode, kWRegSizeInBits) |
| |
| | DEFINE_REGISTER (Register, sp, kSPRegInternalCode, kXRegSizeInBits) |
| |
| | ALIAS_REGISTER (Register, kStackPointerRegister, sp) |
| |
| | ALIAS_REGISTER (VRegister, v8_, v8) |
| |
| | ALIAS_REGISTER (Register, ip0, x16) |
| |
| | ALIAS_REGISTER (Register, ip1, x17) |
| |
| | ALIAS_REGISTER (Register, wip0, w16) |
| |
| | ALIAS_REGISTER (Register, wip1, w17) |
| |
| | ALIAS_REGISTER (Register, kRootRegister, x26) |
| |
| | ALIAS_REGISTER (Register, rr, x26) |
| |
| | ALIAS_REGISTER (Register, kPtrComprCageBaseRegister, no_reg) |
| |
| | ALIAS_REGISTER (Register, cp, x27) |
| |
| | ALIAS_REGISTER (Register, fp, x29) |
| |
| | ALIAS_REGISTER (Register, lr, x30) |
| |
| | ALIAS_REGISTER (Register, xzr, x31) |
| |
| | ALIAS_REGISTER (Register, wzr, w31) |
| |
| | ALIAS_REGISTER (Register, padreg, x31) |
| |
| | ALIAS_REGISTER (VRegister, fp_zero, d15) |
| |
| | ALIAS_REGISTER (VRegister, fp_fixed1, d27) |
| |
| | ALIAS_REGISTER (VRegister, fp_fixed2, d28) |
| |
| | ALIAS_REGISTER (VRegister, fp_scratch, d30) |
| |
| | ALIAS_REGISTER (VRegister, fp_scratch1, d30) |
| |
| | ALIAS_REGISTER (VRegister, fp_scratch2, d31) |
| |
| V8_EXPORT_PRIVATE bool | AreAliased (const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg) |
| |
| V8_EXPORT_PRIVATE bool | AreSameSizeAndType (const CPURegister ®1, const CPURegister ®2=NoCPUReg, const CPURegister ®3=NoCPUReg, const CPURegister ®4=NoCPUReg, const CPURegister ®5=NoCPUReg, const CPURegister ®6=NoCPUReg, const CPURegister ®7=NoCPUReg, const CPURegister ®8=NoCPUReg) |
| |
| bool | AreSameFormat (const Register ®1, const Register ®2, const Register ®3=NoReg, const Register ®4=NoReg) |
| |
| bool | AreSameFormat (const VRegister ®1, const VRegister ®2, const VRegister ®3=NoVReg, const VRegister ®4=NoVReg) |
| |
| V8_EXPORT_PRIVATE bool | AreConsecutive (const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg) |
| |
| bool | AreEven (const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg) |
| |
| uint32_t | float_sign (float val) |
| |
| uint32_t | float_exp (float val) |
| |
| uint32_t | float_mantissa (float val) |
| |
| uint32_t | double_sign (double val) |
| |
| uint32_t | double_exp (double val) |
| |
| uint64_t | double_mantissa (double val) |
| |
| float | float_pack (uint32_t sign, uint32_t exp, uint32_t mantissa) |
| |
| double | double_pack (uint64_t sign, uint64_t exp, uint64_t mantissa) |
| |
| int | float16classify (float16 value) |
| |
| static int | CountLeadingZeros (uint64_t value, int width) |
| |
| int | CountLeadingSignBits (int64_t value, int width) |
| |
| V8_EXPORT_PRIVATE int | CountSetBits (uint64_t value, int width) |
| |
| int | LowestSetBitPosition (uint64_t value) |
| |
| int | HighestSetBitPosition (uint64_t value) |
| |
| static uint64_t | LargestPowerOf2Divisor (uint64_t value) |
| |
| int | MaskToBit (uint64_t mask) |
| |
| template<typename T > |
| T | ReverseBytes (T value, int block_bytes_log2) |
| |
| bool | IsSignallingNaN (double num) |
| |
| bool | IsSignallingNaN (float num) |
| |
| bool | IsSignallingNaN (float16 num) |
| |
| template<typename T > |
| bool | IsQuietNaN (T num) |
| |
| double | ToQuietNaN (double num) |
| |
| float | ToQuietNaN (float num) |
| |
| double | FusedMultiplyAdd (double op1, double op2, double a) |
| |
| float | FusedMultiplyAdd (float op1, float op2, float a) |
| |
| std::unique_ptr< AssemblerBuffer > | ExternalAssemblerBuffer (void *start, int size) |
| |
| std::unique_ptr< AssemblerBuffer > | NewAssemblerBuffer (int size) |
| |
| size_t | hash_value (AtomicMemoryOrder order) |
| |
| std::ostream & | operator<< (std::ostream &os, AtomicMemoryOrder order) |
| |
| const char * | GetBailoutReason (BailoutReason reason) |
| |
| const char * | GetAbortReason (AbortReason reason) |
| |
| bool | IsValidAbortReason (int reason_id) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (CodeReference) |
| |
| void | SetScriptFieldsFromDetails (Isolate *isolate, Tagged< Script > script, const ScriptDetails &script_details, DisallowGarbageCollection *no_gc) |
| |
| bool | ModifyCodeGenerationFromStrings (Isolate *isolate, DirectHandle< NativeContext > context, Handle< i::Object > *source, bool is_code_like) |
| |
| static ExternalReference::Type | BuiltinCallTypeForResultSize (int result_size) |
| |
| uint32_t | fp64_to_fp16_raw_bits (double input) |
| |
| uint32_t | fp64_raw_bits_to_fp16_raw_bits_for_32bit_arch (uint32_t hi, uint32_t lo) |
| |
| uint32_t | fp16_raw_bits_ieee_to_fp32_raw_bits (uint32_t input) |
| |
| | FUNCTION_REFERENCE (ieee754_fp64_raw_bits_to_fp16_raw_bits_for_32bit_arch, fp64_raw_bits_to_fp16_raw_bits_for_32bit_arch) FUNCTION_REFERENCE_WITH_TYPE(ieee754_fp64_to_fp16_raw_bits |
| |
| BUILTIN_INT_FP_CALL | FUNCTION_REFERENCE (ieee754_fp16_raw_bits_to_fp32_raw_bits, fp16_raw_bits_ieee_to_fp32_raw_bits) FUNCTION_REFERENCE(write_barrier_marking_from_code_function |
| |
| BUILTIN_INT_FP_CALL WriteBarrier::MarkingFromCode | FUNCTION_REFERENCE (write_barrier_indirect_pointer_marking_from_code_function, WriteBarrier::IndirectPointerMarkingFromCode) FUNCTION_REFERENCE(write_barrier_shared_marking_from_code_function |
| |
| BUILTIN_INT_FP_CALL WriteBarrier::MarkingFromCode WriteBarrier::SharedMarkingFromCode | FUNCTION_REFERENCE (shared_barrier_from_code_function, WriteBarrier::SharedFromCode) FUNCTION_REFERENCE(insert_remembered_set_function |
| |
| | FUNCTION_REFERENCE (delete_handle_scope_extensions, HandleScope::DeleteExtensions) FUNCTION_REFERENCE(ephemeron_key_write_barrier_function |
| |
| WriteBarrier::EphemeronKeyWriteBarrierFromCode ExternalPointerHandle | AllocateAndInitializeYoungExternalPointerTableEntry (Isolate *isolate, Address pointer) |
| |
| | FUNCTION_REFERENCE (allocate_and_initialize_young_external_pointer_table_entry, AllocateAndInitializeYoungExternalPointerTableEntry) ExternalReference ExternalReference |
| |
| | FUNCTION_REFERENCE (compute_output_frames_function, Deoptimizer::ComputeOutputFrames) namespace |
| |
| static void | f64_acos_wrapper (Address data) |
| |
| static void | f64_asin_wrapper (Address data) |
| |
| static void | f64_mod_wrapper (Address data) |
| |
| | FUNCTION_REFERENCE (baseline_pc_for_next_executed_bytecode, BaselinePCForNextExecutedBytecode) ExternalReference ExternalReference |
| |
| | UNREACHABLE () |
| |
| | FUNCTION_REFERENCE (re_match_for_call_from_js, IrregexpInterpreter::MatchForCallFromJs) FUNCTION_REFERENCE(re_experimental_match_for_call_from_js |
| |
| ExperimentalRegExp::MatchForCallFromJs | FUNCTION_REFERENCE (re_case_insensitive_compare_unicode, NativeRegExpMacroAssembler::CaseInsensitiveCompareUnicode) FUNCTION_REFERENCE(re_case_insensitive_compare_non_unicode |
| |
| ExperimentalRegExp::MatchForCallFromJs NativeRegExpMacroAssembler::CaseInsensitiveCompareNonUnicode | FUNCTION_REFERENCE (re_is_character_in_range_array, RegExpMacroAssembler::IsCharacterInRangeArray) ExternalReference ExternalReference |
| |
| | FUNCTION_REFERENCE_WITH_TYPE (ieee754_acos_function, base::ieee754::acos, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_acosh_function |
| |
| BUILTIN_FP_CALL | FUNCTION_REFERENCE_WITH_TYPE (ieee754_asin_function, base::ieee754::asin, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_asinh_function |
| |
| BUILTIN_FP_CALL BUILTIN_FP_CALL | FUNCTION_REFERENCE_WITH_TYPE (ieee754_atan_function, base::ieee754::atan, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_atanh_function |
| |
| BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL | FUNCTION_REFERENCE_WITH_TYPE (ieee754_atan2_function, base::ieee754::atan2, BUILTIN_FP_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_cbrt_function |
| |
| BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL | FUNCTION_REFERENCE_WITH_TYPE (ieee754_cosh_function, base::ieee754::cosh, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_exp_function |
| |
| BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL | FUNCTION_REFERENCE_WITH_TYPE (ieee754_expm1_function, base::ieee754::expm1, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_log_function |
| |
| BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL | FUNCTION_REFERENCE_WITH_TYPE (ieee754_log1p_function, base::ieee754::log1p, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_log10_function |
| |
| BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL | FUNCTION_REFERENCE_WITH_TYPE (ieee754_log2_function, base::ieee754::log2, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_sinh_function |
| |
| BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL | FUNCTION_REFERENCE_WITH_TYPE (ieee754_tan_function, base::ieee754::tan, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_tanh_function |
| |
| BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL | FUNCTION_REFERENCE_WITH_TYPE (ieee754_pow_function, math::pow, BUILTIN_FP_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_sin_function |
| |
| BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL | FUNCTION_REFERENCE_WITH_TYPE (ieee754_cos_function, base::ieee754::cos, BUILTIN_FP_CALL) void *libc_memchr(void *string |
| |
| void * | libc_memcpy (void *dest, const void *src, size_t n) |
| |
| void * | libc_memmove (void *dest, const void *src, size_t n) |
| |
| void * | libc_memset (void *dest, int value, size_t n) |
| |
| void | relaxed_memcpy (volatile base::Atomic8 *dest, volatile const base::Atomic8 *src, size_t n) |
| |
| void | relaxed_memmove (volatile base::Atomic8 *dest, volatile const base::Atomic8 *src, size_t n) |
| |
| | FUNCTION_REFERENCE (jsarray_array_join_concat_to_sequential_string, JSArray::ArrayJoinConcatToSequentialString) ExternalReference ExternalReference |
| |
| | FUNCTION_REFERENCE (external_one_byte_string_get_chars, ExternalOneByteStringGetChars) FUNCTION_REFERENCE(external_two_byte_string_get_chars |
| |
| Address | GetOrCreateHash (Isolate *isolate, Address raw_key) |
| |
| static Address | JSReceiverCreateIdentityHash (Isolate *isolate, Address raw_key) |
| |
| | FUNCTION_REFERENCE (jsreceiver_create_identity_hash, JSReceiverCreateIdentityHash) static uint32_t ComputeSeededIntegerHash(Isolate *isolate |
| |
| return | ComputeSeededHash (static_cast< uint32_t >(key), HashSeed(isolate)) |
| |
| template<typename Dictionary , LookupMode mode> |
| static size_t | NameDictionaryLookupForwardedString (Isolate *isolate, Address raw_dict, Address raw_key) |
| |
| | FUNCTION_REFERENCE (name_dictionary_lookup_forwarded_string,(NameDictionaryLookupForwardedString< NameDictionary, kFindExisting >)) FUNCTION_REFERENCE(name_dictionary_find_insertion_entry_forwarded_string |
| |
| NameDictionaryLookupForwardedString< NameDictionary, kFindInsertionEntry > | FUNCTION_REFERENCE (global_dictionary_lookup_forwarded_string,(NameDictionaryLookupForwardedString< GlobalDictionary, kFindExisting >)) FUNCTION_REFERENCE(global_dictionary_find_insertion_entry_forwarded_string |
| |
| NameDictionaryLookupForwardedString< NameDictionary, kFindInsertionEntry > NameDictionaryLookupForwardedString< GlobalDictionary, kFindInsertionEntry > | FUNCTION_REFERENCE (name_to_index_hashtable_lookup_forwarded_string,(NameDictionaryLookupForwardedString< NameToIndexHashTable, kFindExisting >)) FUNCTION_REFERENCE(name_to_index_hashtable_find_insertion_entry_forwarded_string |
| |
| NameDictionaryLookupForwardedString< NameDictionary, kFindInsertionEntry > NameDictionaryLookupForwardedString< GlobalDictionary, kFindInsertionEntry > NameDictionaryLookupForwardedString< NameToIndexHashTable, kFindInsertionEntry > | FUNCTION_REFERENCE (copy_fast_number_jsarray_elements_to_typed_array, CopyFastNumberJSArrayElementsToTypedArray) FUNCTION_REFERENCE(copy_typed_array_elements_to_typed_array |
| |
| NameDictionaryLookupForwardedString< NameDictionary, kFindInsertionEntry > NameDictionaryLookupForwardedString< GlobalDictionary, kFindInsertionEntry > NameDictionaryLookupForwardedString< NameToIndexHashTable, kFindInsertionEntry > CopyTypedArrayElementsToTypedArray | FUNCTION_REFERENCE (try_string_to_index_or_lookup_existing, StringTable::TryStringToIndexOrLookupExisting) FUNCTION_REFERENCE(string_from_forward_table |
| |
| NameDictionaryLookupForwardedString< NameDictionary, kFindInsertionEntry > NameDictionaryLookupForwardedString< GlobalDictionary, kFindInsertionEntry > NameDictionaryLookupForwardedString< NameToIndexHashTable, kFindInsertionEntry > CopyTypedArrayElementsToTypedArray StringForwardingTable::GetForwardStringAddress | FUNCTION_REFERENCE (raw_hash_from_forward_table, StringForwardingTable::GetRawHashStatic) FUNCTION_REFERENCE(array_indexof_includes_smi_or_object |
| |
| NameDictionaryLookupForwardedString< NameDictionary, kFindInsertionEntry > NameDictionaryLookupForwardedString< GlobalDictionary, kFindInsertionEntry > NameDictionaryLookupForwardedString< NameToIndexHashTable, kFindInsertionEntry > CopyTypedArrayElementsToTypedArray StringForwardingTable::GetForwardStringAddress static ArrayIndexOfIncludesSmiOrObject Address | LexicographicCompareWrapper (Isolate *isolate, Address smi_x, Address smi_y) |
| |
| | FUNCTION_REFERENCE (smi_lexicographic_compare_function, LexicographicCompareWrapper) uint32_t HasUnpairedSurrogate(const uint16_t *code_units |
| |
| return unibrow::Utf16::HasUnpairedSurrogate(code_units, length) ? kTrue void | ReplaceUnpairedSurrogates (const uint16_t *source_code_units, uint16_t *dest_code_units, size_t length) |
| |
| | FUNCTION_REFERENCE (mutable_big_int_absolute_add_and_canonicalize_function, MutableBigInt_AbsoluteAddAndCanonicalize) FUNCTION_REFERENCE(mutable_big_int_absolute_compare_function |
| |
| MutableBigInt_AbsoluteCompare | FUNCTION_REFERENCE (mutable_big_int_absolute_sub_and_canonicalize_function, MutableBigInt_AbsoluteSubAndCanonicalize) FUNCTION_REFERENCE(mutable_big_int_absolute_mul_and_canonicalize_function |
| |
| MutableBigInt_AbsoluteCompare MutableBigInt_AbsoluteMulAndCanonicalize | FUNCTION_REFERENCE (mutable_big_int_absolute_div_and_canonicalize_function, MutableBigInt_AbsoluteDivAndCanonicalize) FUNCTION_REFERENCE(mutable_big_int_absolute_mod_and_canonicalize_function |
| |
| MutableBigInt_AbsoluteCompare MutableBigInt_AbsoluteMulAndCanonicalize MutableBigInt_AbsoluteModAndCanonicalize | FUNCTION_REFERENCE (mutable_big_int_bitwise_and_pp_and_canonicalize_function, MutableBigInt_BitwiseAndPosPosAndCanonicalize) FUNCTION_REFERENCE(mutable_big_int_bitwise_and_nn_and_canonicalize_function |
| |
| MutableBigInt_AbsoluteCompare MutableBigInt_AbsoluteMulAndCanonicalize MutableBigInt_AbsoluteModAndCanonicalize MutableBigInt_BitwiseAndNegNegAndCanonicalize | FUNCTION_REFERENCE (mutable_big_int_bitwise_and_pn_and_canonicalize_function, MutableBigInt_BitwiseAndPosNegAndCanonicalize) FUNCTION_REFERENCE(mutable_big_int_bitwise_or_pp_and_canonicalize_function |
| |
| MutableBigInt_AbsoluteCompare MutableBigInt_AbsoluteMulAndCanonicalize MutableBigInt_AbsoluteModAndCanonicalize MutableBigInt_BitwiseAndNegNegAndCanonicalize MutableBigInt_BitwiseOrPosPosAndCanonicalize | FUNCTION_REFERENCE (mutable_big_int_bitwise_or_nn_and_canonicalize_function, MutableBigInt_BitwiseOrNegNegAndCanonicalize) FUNCTION_REFERENCE(mutable_big_int_bitwise_or_pn_and_canonicalize_function |
| |
| MutableBigInt_AbsoluteCompare MutableBigInt_AbsoluteMulAndCanonicalize MutableBigInt_AbsoluteModAndCanonicalize MutableBigInt_BitwiseAndNegNegAndCanonicalize MutableBigInt_BitwiseOrPosPosAndCanonicalize MutableBigInt_BitwiseOrPosNegAndCanonicalize | FUNCTION_REFERENCE (mutable_big_int_bitwise_xor_pp_and_canonicalize_function, MutableBigInt_BitwiseXorPosPosAndCanonicalize) FUNCTION_REFERENCE(mutable_big_int_bitwise_xor_nn_and_canonicalize_function |
| |
| MutableBigInt_AbsoluteCompare MutableBigInt_AbsoluteMulAndCanonicalize MutableBigInt_AbsoluteModAndCanonicalize MutableBigInt_BitwiseAndNegNegAndCanonicalize MutableBigInt_BitwiseOrPosPosAndCanonicalize MutableBigInt_BitwiseOrPosNegAndCanonicalize MutableBigInt_BitwiseXorNegNegAndCanonicalize | FUNCTION_REFERENCE (mutable_big_int_bitwise_xor_pn_and_canonicalize_function, MutableBigInt_BitwiseXorPosNegAndCanonicalize) FUNCTION_REFERENCE(mutable_big_int_left_shift_and_canonicalize_function |
| |
| MutableBigInt_AbsoluteCompare MutableBigInt_AbsoluteMulAndCanonicalize MutableBigInt_AbsoluteModAndCanonicalize MutableBigInt_BitwiseAndNegNegAndCanonicalize MutableBigInt_BitwiseOrPosPosAndCanonicalize MutableBigInt_BitwiseOrPosNegAndCanonicalize MutableBigInt_BitwiseXorNegNegAndCanonicalize MutableBigInt_LeftShiftAndCanonicalize | FUNCTION_REFERENCE (big_int_right_shift_result_length_function, RightShiftResultLength) FUNCTION_REFERENCE(mutable_big_int_right_shift_and_canonicalize_function |
| |
| template ExternalReference | ExternalReference::search_string_raw< const uint8_t, const base::uc16 > () |
| |
| template ExternalReference | ExternalReference::search_string_raw< const base::uc16, const uint8_t > () |
| |
| template ExternalReference | ExternalReference::search_string_raw< const base::uc16, const base::uc16 > () |
| |
| static Address | InvalidatePrototypeChainsWrapper (Address raw_map) |
| |
| | FUNCTION_REFERENCE (invalidate_prototype_chains_function, InvalidatePrototypeChainsWrapper) double modulo_double_double(double x |
| |
| | FUNCTION_REFERENCE_WITH_TYPE (mod_two_doubles_operation, modulo_double_double, BUILTIN_FP_FP_CALL) ExternalReference ExternalReference |
| |
| | FUNCTION_REFERENCE (call_enqueue_microtask_function, MicrotaskQueue::CallEnqueueMicrotask) ExternalReference ExternalReference |
| |
| static int64_t | atomic_pair_load (intptr_t address) |
| |
| static void | atomic_pair_store (intptr_t address, int value_low, int value_high) |
| |
| static int64_t | atomic_pair_add (intptr_t address, int value_low, int value_high) |
| |
| static int64_t | atomic_pair_sub (intptr_t address, int value_low, int value_high) |
| |
| static int64_t | atomic_pair_and (intptr_t address, int value_low, int value_high) |
| |
| static int64_t | atomic_pair_or (intptr_t address, int value_low, int value_high) |
| |
| static int64_t | atomic_pair_xor (intptr_t address, int value_low, int value_high) |
| |
| static int64_t | atomic_pair_exchange (intptr_t address, int value_low, int value_high) |
| |
| static uint64_t | atomic_pair_compare_exchange (intptr_t address, int old_value_low, int old_value_high, int new_value_low, int new_value_high) |
| |
| | FUNCTION_REFERENCE (atomic_pair_compare_exchange_function, atomic_pair_compare_exchange) IF_TSAN(FUNCTION_REFERENCE |
| |
| tsan_relaxed_store_8_bits | IF_TSAN (FUNCTION_REFERENCE, tsan_relaxed_store_function_16_bits, tsan_relaxed_store_16_bits) IF_TSAN(FUNCTION_REFERENCE |
| |
| tsan_relaxed_store_8_bits tsan_relaxed_store_32_bits | IF_TSAN (FUNCTION_REFERENCE, tsan_relaxed_store_function_64_bits, tsan_relaxed_store_64_bits) IF_TSAN(FUNCTION_REFERENCE |
| |
| tsan_relaxed_store_8_bits tsan_relaxed_store_32_bits tsan_seq_cst_store_8_bits | IF_TSAN (FUNCTION_REFERENCE, tsan_seq_cst_store_function_16_bits, tsan_seq_cst_store_16_bits) IF_TSAN(FUNCTION_REFERENCE |
| |
| tsan_relaxed_store_8_bits tsan_relaxed_store_32_bits tsan_seq_cst_store_8_bits tsan_seq_cst_store_32_bits | IF_TSAN (FUNCTION_REFERENCE, tsan_seq_cst_store_function_64_bits, tsan_seq_cst_store_64_bits) IF_TSAN(FUNCTION_REFERENCE |
| |
| tsan_relaxed_store_8_bits tsan_relaxed_store_32_bits tsan_seq_cst_store_8_bits tsan_seq_cst_store_32_bits tsan_relaxed_load_32_bits | IF_TSAN (FUNCTION_REFERENCE, tsan_relaxed_load_function_64_bits, tsan_relaxed_load_64_bits) static int EnterContextWrapper(HandleScopeImplementer *hsi |
| |
| hsi | EnterContext (context) |
| |
| bool | operator== (ExternalReference lhs, ExternalReference rhs) |
| |
| bool | operator!= (ExternalReference lhs, ExternalReference rhs) |
| |
| size_t | hash_value (ExternalReference reference) |
| |
| std::ostream & | operator<< (std::ostream &os, ExternalReference reference) |
| |
| void | abort_with_reason (int reason) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (ExternalReference) |
| |
| void | FlushInstructionCache (void *start, size_t size) |
| |
| V8_EXPORT_PRIVATE V8_INLINE void | FlushInstructionCache (Address start, size_t size) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (Operand) |
| |
| bool | operator!= (Operand op, XMMRegister r) |
| |
| Operand | FieldOperand (Register object, int offset) |
| |
| Operand | FieldOperand (Register object, Register index, ScaleFactor scale, int offset) |
| |
| template<typename... Registers> |
| constexpr std::array< Register, 1+sizeof...(Registers)> | RegisterArray (Register first_reg, Registers... regs) |
| |
| constexpr EmptyRegisterArray | RegisterArray () |
| |
| template<typename... Registers> |
| constexpr std::array< DoubleRegister, 1+sizeof...(Registers)> | DoubleRegisterArray (DoubleRegister first_reg, Registers... regs) |
| |
| constexpr EmptyDoubleRegisterArray | DoubleRegisterArray () |
| |
| Condition | NegateFpuCondition (Condition cc) |
| |
| int | ToNumber (Register reg) |
| |
| Register | ToRegister (int num) |
| |
| bool | IsSubtype (MachineRepresentation rep1, MachineRepresentation rep2) |
| |
| std::ostream & | operator<< (std::ostream &os, MachineRepresentation rep) |
| |
| const char * | MachineReprToString (MachineRepresentation rep) |
| |
| std::ostream & | operator<< (std::ostream &os, MachineSemantic type) |
| |
| std::ostream & | operator<< (std::ostream &os, MachineType type) |
| |
| V8_EXPORT_PRIVATE constexpr int | ElementSizeLog2Of (MachineRepresentation) |
| |
| V8_EXPORT_PRIVATE constexpr int | ElementSizeInBytes (MachineRepresentation) |
| |
| V8_INLINE size_t | hash_value (MachineRepresentation rep) |
| |
| V8_INLINE size_t | hash_value (MachineType type) |
| |
| constexpr bool | IsIntegral (MachineRepresentation rep) |
| |
| constexpr bool | IsFloatingPoint (MachineRepresentation rep) |
| |
| constexpr bool | IsSimd128 (MachineRepresentation rep) |
| |
| constexpr bool | CanBeTaggedPointer (MachineRepresentation rep) |
| |
| constexpr bool | CanBeTaggedSigned (MachineRepresentation rep) |
| |
| constexpr bool | IsAnyTagged (MachineRepresentation rep) |
| |
| constexpr bool | CanBeCompressedPointer (MachineRepresentation rep) |
| |
| constexpr bool | CanBeIndirectPointer (MachineRepresentation rep) |
| |
| constexpr bool | CanBeTaggedOrCompressedPointer (MachineRepresentation rep) |
| |
| constexpr bool | CanBeTaggedOrCompressedOrIndirectPointer (MachineRepresentation rep) |
| |
| constexpr bool | IsAnyCompressed (MachineRepresentation rep) |
| |
| constexpr int | ElementSizeInBits (MachineRepresentation rep) |
| |
| constexpr uint64_t | MaxUnsignedValue (MachineRepresentation rep) |
| |
| V8_EXPORT_PRIVATE constexpr int | ElementSizeInPointers (MachineRepresentation rep) |
| |
| V8_EXPORT_PRIVATE constexpr int | RepresentationBit (MachineRepresentation rep) |
| |
| MSABranchCondition | NegateMSABranchCondition (MSABranchCondition cond) |
| |
| static constexpr uint64_t | OpcodeToBitNumber (Opcode opcode) |
| |
| MemOperand | CFunctionArgumentOperand (int index) |
| |
| Condition | to_condition (Condition cond) |
| |
| bool | is_signed (Condition cond) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (Simd128Register) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (DoubleRegister) |
| |
| int | JSCallerSavedCode (int n) |
| |
| template<typename RegType , typename = decltype(RegisterName(std::declval<RegType>()))> |
| std::ostream & | operator<< (std::ostream &os, RegType reg) |
| |
| constexpr int | AddArgumentPaddingSlots (int argument_count) |
| |
| constexpr bool | ShouldPadArguments (int argument_count) |
| |
template<typename... RegTypes>
requires (std::conjunction_v<std::is_same<Register, RegTypes>...> || std::conjunction_v<std::is_same<DoubleRegister, RegTypes>...> // V8_TARGET_ARCH_X64 ) |
| constexpr bool | AreAliased (RegTypes... regs) |
| |
| template<typename RegisterT > |
| std::ostream & | operator<< (std::ostream &os, RegListBase< RegisterT > reglist) |
| |
| template class | EXPORT_TEMPLATE_DEFINE (V8_EXPORT_PRIVATE) RelocIteratorBase< RelocInfo > |
| |
| template class | EXPORT_TEMPLATE_DECLARE (V8_EXPORT_PRIVATE) RelocIteratorBase< RelocInfo > |
| |
| static Instr | SetHi20Offset (int32_t hi20, Instr instr) |
| |
| static Instr | SetLo12Offset (int32_t lo12, Instr instr) |
| |
| static unsigned | CpuFeaturesImpliedByCompiler () |
| |
| static Instr | SetBranchOffset (int32_t pos, int32_t target_pos, Instr instr) |
| |
| static Instr | SetLoadOffset (int32_t offset, Instr instr) |
| |
| static Instr | SetJalOffset (int32_t pos, int32_t target_pos, Instr instr) |
| |
| static ShortInstr | SetCJalOffset (int32_t pos, int32_t target_pos, Instr instr) |
| |
| static Instr | SetCBranchOffset (int32_t pos, int32_t target_pos, Instr instr) |
| |
| int64_t | signExtend (uint64_t V, int N) |
| |
| static Instr | SetHi20Offset (int32_t hi29, Instr instr) |
| |
| static Instr | SetLo12Offset (int32_t lo12, Instr instr) |
| |
| uint8_t | vsew_switch (VSew vsew) |
| |
| static bool | IsZero (const Operand &rt) |
| |
| static int | InstrCountForLiLower32Bit (int64_t value) |
| |
| MemOperand | FieldMemOperand (Register object, Register index, int offset) |
| |
| template<typename T > |
| size_t | hash_value (const Signature< T > &sig) |
| |
| std::ostream & | operator<< (std::ostream &out, const SourcePositionInfo &pos) |
| |
| std::ostream & | operator<< (std::ostream &out, const std::vector< SourcePositionInfo > &stack) |
| |
| std::ostream & | operator<< (std::ostream &out, const SourcePosition &pos) |
| |
| bool | operator== (const SourcePosition &lhs, const SourcePosition &rhs) |
| |
| bool | operator!= (const SourcePosition &lhs, const SourcePosition &rhs) |
| |
| constexpr MachineType | CommonMachineType (MachineType type1, MachineType type2) |
| |
| template<typename T > |
| constexpr bool | IsMachineRepresentationOf (MachineRepresentation r) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (Immediate) |
| |
| Operand | FieldOperand (TaggedRegister object, int offset) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (XMMRegister) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (YMMRegister) |
| |
| template<class T > |
| bool | operator== (const ThreadIsolation::StlAllocator< T > &, const ThreadIsolation::StlAllocator< T > &) |
| |
| template<class T > |
| bool | operator!= (const ThreadIsolation::StlAllocator< T > &, const ThreadIsolation::StlAllocator< T > &) |
| |
| constexpr JSDispatchHandle | kNullJSDispatchHandle (0) |
| |
| constexpr JSDispatchHandle | kPlaceholderDispatchHandle (0x0) |
| |
| constexpr JSDispatchHandle | kInvalidDispatchHandle (0xffffffff<< kJSDispatchHandleShift) |
| |
| template<typename F > |
| F | FUNCTION_CAST (uint8_t *addr) |
| |
| template<typename F > |
| F | FUNCTION_CAST (Address addr) |
| |
| constexpr bool | StaticStringsEqual (const char *s1, const char *s2) |
| |
| size_t | hash_value (LanguageMode mode) |
| |
| const char * | LanguageMode2String (LanguageMode mode) |
| |
| std::ostream & | operator<< (std::ostream &os, LanguageMode mode) |
| |
| bool | is_sloppy (LanguageMode language_mode) |
| |
| bool | is_strict (LanguageMode language_mode) |
| |
| bool | is_valid_language_mode (int language_mode) |
| |
| LanguageMode | construct_language_mode (bool strict_bit) |
| |
| LanguageMode | stricter_language_mode (LanguageMode mode1, LanguageMode mode2) |
| |
| size_t | hash_value (DeoptimizeKind kind) |
| |
| constexpr const char * | ToString (DeoptimizeKind kind) |
| |
| std::ostream & | operator<< (std::ostream &os, DeoptimizeKind kind) |
| |
| std::ostream & | operator<< (std::ostream &os, const LookupHoistingMode &mode) |
| |
| constexpr bool | IsAnyCodeSpace (AllocationSpace space) |
| |
| constexpr bool | IsAnyTrustedSpace (AllocationSpace space) |
| |
| constexpr bool | IsAnySharedSpace (AllocationSpace space) |
| |
| constexpr bool | IsAnyNewSpace (AllocationSpace space) |
| |
| constexpr const char * | ToString (AllocationSpace space) |
| |
| std::ostream & | operator<< (std::ostream &os, AllocationSpace space) |
| |
| constexpr const char * | ToString (AllocationType kind) |
| |
| std::ostream & | operator<< (std::ostream &os, AllocationType type) |
| |
| constexpr const char * | ToString (GarbageCollectionReason reason) |
| |
| std::ostream & | operator<< (std::ostream &os, GarbageCollectionReason reason) |
| |
| size_t | hash_value (AllocationType kind) |
| |
| constexpr bool | IsSharedAllocationType (AllocationType kind) |
| |
| constexpr const char * | ToString (GarbageCollector collector) |
| |
| std::ostream & | operator<< (std::ostream &os, GarbageCollector collector) |
| |
| bool | IsBaselineCodeFlushingEnabled (base::EnumSet< CodeFlushMode > mode) |
| |
| bool | IsByteCodeFlushingEnabled (base::EnumSet< CodeFlushMode > mode) |
| |
| bool | IsForceFlushingEnabled (base::EnumSet< CodeFlushMode > mode) |
| |
| bool | IsFlushingDisabled (base::EnumSet< CodeFlushMode > mode) |
| |
| REPLMode | construct_repl_mode (bool is_repl_mode) |
| |
| size_t | hash_value (InlineCacheState mode) |
| |
| const char * | InlineCacheState2String (InlineCacheState state) |
| |
| size_t | hash_value (ConvertReceiverMode mode) |
| |
| std::ostream & | operator<< (std::ostream &os, ConvertReceiverMode mode) |
| |
| size_t | hash_value (CreateArgumentsType type) |
| |
| std::ostream & | operator<< (std::ostream &os, CreateArgumentsType type) |
| |
| std::ostream & | operator<< (std::ostream &os, ScopeType type) |
| |
| const char * | ImmutableLexicalVariableModeToString (VariableMode mode) |
| |
| bool | IsDynamicVariableMode (VariableMode mode) |
| |
| bool | IsDeclaredVariableMode (VariableMode mode) |
| |
| bool | IsPrivateAccessorVariableMode (VariableMode mode) |
| |
| bool | IsPrivateMethodVariableMode (VariableMode mode) |
| |
| bool | IsPrivateMethodOrAccessorVariableMode (VariableMode mode) |
| |
| bool | IsSerializableVariableMode (VariableMode mode) |
| |
| bool | IsImmutableLexicalVariableMode (VariableMode mode) |
| |
| bool | IsImmutableLexicalOrPrivateVariableMode (VariableMode mode) |
| |
| bool | IsLexicalVariableMode (VariableMode mode) |
| |
| size_t | hash_value (InterpreterPushArgsMode mode) |
| |
| std::ostream & | operator<< (std::ostream &os, InterpreterPushArgsMode mode) |
| |
| uint32_t | ObjectHash (Address address) |
| |
| size_t | hash_value (UnicodeEncoding encoding) |
| |
| std::ostream & | operator<< (std::ostream &os, UnicodeEncoding encoding) |
| |
| std::ostream & | operator<< (std::ostream &os, IterationKind kind) |
| |
| std::ostream & | operator<< (std::ostream &os, CollectionKind kind) |
| |
| std::ostream & | operator<< (std::ostream &os, const AssemblerDebugInfo &info) |
| |
| constexpr bool | IsRequestMaglev (TieringState state) |
| |
| constexpr bool | IsRequestTurbofan (TieringState state) |
| |
| constexpr const char * | ToString (TieringState marker) |
| |
| std::ostream & | operator<< (std::ostream &os, TieringState marker) |
| |
| std::ostream & | operator<< (std::ostream &os, SpeculationMode speculation_mode) |
| |
| constexpr bool | IsSynchronous (ConcurrencyMode mode) |
| |
| constexpr bool | IsConcurrent (ConcurrencyMode mode) |
| |
| constexpr const char * | ToString (ConcurrencyMode mode) |
| |
| std::ostream & | operator<< (std::ostream &os, ConcurrencyMode mode) |
| |
| KeyedAccessLoadMode | CreateKeyedAccessLoadMode (bool handle_oob, bool handle_holes) |
| |
| KeyedAccessLoadMode | GeneralizeKeyedAccessLoadMode (KeyedAccessLoadMode mode1, KeyedAccessLoadMode mode2) |
| |
| bool | LoadModeHandlesOOB (KeyedAccessLoadMode load_mode) |
| |
| bool | LoadModeHandlesHoles (KeyedAccessLoadMode load_mode) |
| |
| std::ostream & | operator<< (std::ostream &os, KeyedAccessStoreMode mode) |
| |
| bool | StoreModeIsInBounds (KeyedAccessStoreMode store_mode) |
| |
| bool | StoreModeHandlesCOW (KeyedAccessStoreMode store_mode) |
| |
| bool | StoreModeSupportsTypeArray (KeyedAccessStoreMode store_mode) |
| |
| bool | StoreModeIgnoresTypeArrayOOB (KeyedAccessStoreMode store_mode) |
| |
| bool | StoreModeCanGrow (KeyedAccessStoreMode store_mode) |
| |
| constexpr int | JSParameterCount (int param_count_without_receiver) |
| |
| V8_INLINE bool | operator! (ExceptionStatus status) |
| |
| MessageTemplate | MessageTemplateFromInt (int message_id) |
| |
| V8_INLINE constexpr PtrComprCageBase | GetPtrComprCageBaseFromOnHeapAddress (Address address) |
| |
| V8_INLINE PtrComprCageBase | GetPtrComprCageBase () |
| |
| V8_INLINE PtrComprCageBase | GetPtrComprCageBase (Tagged< HeapObject > object) |
| |
| template<typename V > |
| static V | ReadMaybeUnalignedValue (Address p) |
| |
| template<typename V > |
| static void | WriteMaybeUnalignedValue (Address p, V value) |
| |
| else | if (instr->arch_opcode()==kRiscvCmpZero) |
| |
| | TRACE ("UNIMPLEMENTED code_generator_riscv64: %s at line %d\n", __FUNCTION__, __LINE__) |
| |
| | UNIMPLEMENTED () |
| |
| void | EmitWordCompareZero (InstructionSelectorT *selector, OpIndex value, FlagsContinuationT *cont) |
| |
| bool | NeedsBoundsCheck (CheckBounds check_bounds) |
| |
| std::ostream & | operator<< (std::ostream &os, BranchHint hint) |
| |
| std::ostream & | operator<< (std::ostream &os, AbortReason reason) |
| |
| double | MakeDate (double day, double time) |
| |
| double | MakeDay (double year, double month, double date) |
| |
| double | MakeTime (double hour, double min, double sec, double ms) |
| |
| DateBuffer | ToDateString (double time_val, DateCache *date_cache, ToDateStringMode mode) |
| |
| double | ParseDateTimeString (Isolate *isolate, DirectHandle< String > str) |
| |
| DirectHandle< JSObject > | GetWasmDebugProxy (WasmFrame *frame) |
| |
| std::unique_ptr< debug::ScopeIterator > | GetWasmScopeIterator (WasmFrame *frame) |
| |
| DirectHandle< String > | GetWasmFunctionDebugName (Isolate *isolate, DirectHandle< WasmTrustedInstanceData > instance_data, uint32_t func_index) |
| |
| DirectHandle< ArrayList > | AddWasmInstanceObjectInternalProperties (Isolate *isolate, DirectHandle< ArrayList > result, DirectHandle< WasmInstanceObject > instance) |
| |
| DirectHandle< ArrayList > | AddWasmModuleObjectInternalProperties (Isolate *isolate, DirectHandle< ArrayList > result, DirectHandle< WasmModuleObject > module_object) |
| |
| DirectHandle< ArrayList > | AddWasmTableObjectInternalProperties (Isolate *isolate, DirectHandle< ArrayList > result, DirectHandle< WasmTableObject > table) |
| |
| | ASSERT_OFFSET (Builtin::kDeoptimizationEntry_Eager) |
| |
| | ASSERT_OFFSET (Builtin::kDeoptimizationEntry_Lazy) |
| |
| std::ostream & | operator<< (std::ostream &os, DeoptimizeReason reason) |
| |
| size_t | hash_value (DeoptimizeReason reason) |
| |
| char const * | DeoptimizeReasonToString (DeoptimizeReason reason) |
| |
| char const * | DeoptimizeReasonToString (LazyDeoptimizeReason reason) |
| |
| constexpr bool | IsDeoptimizationWithoutCodeInvalidation (DeoptimizeReason reason) |
| |
| void | DeoptimizationFrameTranslationPrintSingleOpcode (std::ostream &os, TranslationOpcode opcode, DeoptimizationFrameTranslation::Iterator &iterator, Tagged< ProtectedDeoptimizationLiteralArray > protected_literal_array, Tagged< DeoptimizationLiteralArray > literal_array) |
| |
| int | TranslationOpcodeOperandCount (TranslationOpcode o) |
| |
| bool | TranslationOpcodeIsBegin (TranslationOpcode o) |
| |
| bool | IsTranslationFrameOpcode (TranslationOpcode o) |
| |
| bool | IsTranslationJsFrameOpcode (TranslationOpcode o) |
| |
| bool | IsTranslationInterpreterFrameOpcode (TranslationOpcode o) |
| |
| std::ostream & | operator<< (std::ostream &out, TranslationOpcode opcode) |
| |
| std::ostream & | operator<< (std::ostream &os, const BasicBlockProfilerData &d) |
| |
| static void | WriteLine (std::ostream &os, bool machine_format, const char *name, const char *compiler, const CompilationStatistics::BasicStats &stats, const CompilationStatistics::BasicStats &total_stats) |
| |
| static void | WriteFullLine (std::ostream &os) |
| |
| static void | WriteHeader (std::ostream &os, const char *compiler) |
| |
| static void | WritePhaseKindBreak (std::ostream &os) |
| |
| std::ostream & | operator<< (std::ostream &os, const AsPrintableStatistics &ps) |
| |
| std::ostream & | operator<< (std::ostream &os, const EtwTraceDebug &) |
| |
| double | ClobberDoubleRegisters (double x1, double x2, double x3, double x4) |
| |
| static int | FPOffsetToFrameSlot (int frame_offset) |
| |
| static int | FrameSlotToFPOffset (int slot) |
| |
| V8_INLINE Heap * | GetHeapFromWritableObject (Tagged< HeapObject > object) |
| |
| V8_INLINE Isolate * | GetIsolateFromWritableObject (Tagged< HeapObject > object) |
| |
| V8_INLINE Heap * | GetHeapFromWritableObject (const HeapObjectLayout &object) |
| |
| V8_INLINE Isolate * | GetIsolateFromWritableObject (const HeapObjectLayout &object) |
| |
| V8_INLINE bool | GetIsolateFromHeapObject (Tagged< HeapObject > object, Isolate **isolate) |
| |
| const uint8_t * | DefaultEmbeddedBlobCode () |
| |
| uint32_t | DefaultEmbeddedBlobCodeSize () |
| |
| const uint8_t * | DefaultEmbeddedBlobData () |
| |
| uint32_t | DefaultEmbeddedBlobDataSize () |
| |
| void | DisableEmbeddedBlobRefcounting () |
| |
| void | FreeCurrentEmbeddedBlob () |
| |
| bool | NoExtension (const v8::FunctionCallbackInfo< v8::Value > &) |
| |
| static void | PrintFrames (Isolate *isolate, StringStream *accumulator, StackFrame::PrintMode mode) |
| |
| static base::RandomNumberGenerator * | ensure_rng_exists (base::RandomNumberGenerator **rng, int seed) |
| |
| void | DefaultWasmAsyncResolvePromiseCallback (v8::Isolate *isolate, v8::Local< v8::Context > context, v8::Local< v8::Promise::Resolver > resolver, v8::Local< v8::Value > result, WasmAsyncSuccess success) |
| |
| | __attribute__ ((tls_model(V8_TLS_MODEL))) extern thread_local Isolate *g_current_isolate_ V8_CONSTINIT |
| |
| char const * | OptimizationReasonToString (OptimizationReason reason) |
| |
| std::ostream & | operator<< (std::ostream &os, OptimizationReason reason) |
| |
| void | TraceManualRecompile (Tagged< JSFunction > function, CodeKind code_kind, ConcurrencyMode concurrency_mode) |
| |
| static int | ArchiveSpacePerThread () |
| |
| const char * | StateToString (StateTag state) |
| |
| static void | AddCounter (v8::Isolate *isolate, v8::Local< v8::Object > object, StatsCounter *counter, const char *name) |
| |
| static void | AddNumber (v8::Isolate *isolate, v8::Local< v8::Object > object, double value, const char *name) |
| |
| static void | AddNumber64 (v8::Isolate *isolate, v8::Local< v8::Object > object, int64_t value, const char *name) |
| |
| Flag * | FindFlagByPointer (const void *ptr) |
| |
| V8_EXPORT_PRIVATE Flag * | FindFlagByName (const char *name) |
| |
| V8_EXPORT_PRIVATE Flag * | FindImplicationFlagByName (const char *name) |
| |
| V8_EXPORT_PRIVATE base::Vector< Flag > | Flags () |
| |
| std::ostream & | operator<< (std::ostream &os, FlagName flag_name) |
| |
| | DEFINE_BOOL (experimental, false, "Indicates that V8 is running with experimental features enabled. " "This flag is typically not set explicitly but instead enabled as " "an implication of other flags which enable experimental features.") DEFINE_BOOL(abort_on_contradictory_flags |
| |
| Disallow flags or implications overriding each other | DEFINE_BOOL (exit_on_contradictory_flags, false, "Exit with return code 0 on contradictory flags.") DEFINE_WEAK_IMPLICATION(exit_on_contradictory_flags |
| |
| Disallow flags or implications overriding each other abort_on_contradictory_flags | DEFINE_BOOL (allow_overwriting_for_next_flag, false, "temporary disable flag contradiction to allow overwriting just " "the next flag") DEFINE_BOOL(builtin_subclassing |
| |
| Disallow flags or implications overriding each other abort_on_contradictory_flags subclassing support in built in methods | DEFINE_BOOL (enable_sharedarraybuffer_per_context, false, "enable the SharedArrayBuffer constructor per context") DEFINE_BOOL(stress_snapshot |
| |
| | DEFINE_BOOL (lite_mode, V8_LITE_MODE_BOOL, "enables trade-off of performance for memory savings") DEFINE_BOOL_READONLY(enable_allocation_folding |
| |
| | DEFINE_BOOL_READONLY (disable_write_barriers, V8_DISABLE_WRITE_BARRIERS_BOOL, "disable write barriers when GC is non-incremental " "and heap contains single generation.") DEFINE_BOOL_READONLY(enable_unconditional_write_barriers |
| |
| | DEFINE_BOOL_READONLY (single_generation, V8_SINGLE_GENERATION_BOOL, "allocate all objects from young generation to old generation") DEFINE_BOOL_READONLY(conservative_stack_scanning |
| |
| use conservative stack scanning | DEFINE_IMPLICATION (conservative_stack_scanning, scavenger_conservative_object_pinning) DEFINE_BOOL_READONLY(direct_handle |
| |
| use conservative stack scanning use direct handles with conservative stack scanning | DEFINE_EXPERIMENTAL_FEATURE (scavenger_conservative_object_pinning, "Objects reachable from the native stack during " "scavenge will be pinned and " "won't move.") DEFINE_BOOL(stress_scavenger_conservative_object_pinning |
| |
| use conservative stack scanning use direct handles with conservative stack scanning Treat some precise references as conservative references to stress test object pinning in Scavenger | DEFINE_IMPLICATION (stress_scavenger_conservative_object_pinning, scavenger_conservative_object_pinning) DEFINE_NEG_IMPLICATION(stress_scavenger_conservative_object_pinning |
| |
| use conservative stack scanning use direct handles with conservative stack scanning Treat some precise references as conservative references to stress test object pinning in Scavenger minor_gc_task | DEFINE_VALUE_IMPLICATION (stress_scavenger_conservative_object_pinning, scavenger_max_new_space_capacity_mb, 1u) DEFINE_BOOL(stress_scavenger_conservative_object_pinning_random |
| |
| use conservative stack scanning use direct handles with conservative stack scanning Treat some precise references as conservative references to stress test object pinning in Scavenger minor_gc_task Enables random stressing of object pinning in such that each GC would randomly pick a subset of the precise references to treat conservatively | DEFINE_IMPLICATION (stress_scavenger_conservative_object_pinning_random, stress_scavenger_conservative_object_pinning) DEFINE_BOOL(scavenger_precise_object_pinning |
| |
| use conservative stack scanning use direct handles with conservative stack scanning Treat some precise references as conservative references to stress test object pinning in Scavenger minor_gc_task Enables random stressing of object pinning in such that each GC would randomly pick a subset of the precise references to treat conservatively Objects reachable from handles during scavenge will be pinned and won t move | DEFINE_BOOL (precise_object_pinning, false, "Objects reachable from handles during GC will be pinned and won't move.") DEFINE_BOOL(scavenger_promote_quarantined_pages |
| |
| | DEFINE_BOOL_READONLY (local_off_stack_check, V8_ENABLE_LOCAL_OFF_STACK_CHECK_BOOL, "check for off-stack allocation of v8::Local") DEFINE_BOOL(future |
| |
| Implies all staged features that we want to ship in the not too far future | DEFINE_BOOL (force_emit_interrupt_budget_checks, false, "force emit tier-up logic from all non-turbofan code, even if it " "is the top enabled tier") DEFINE_BOOL_READONLY(maglev_future |
| |
| Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future | DEFINE_BOOL_READONLY (optimize_on_next_call_optimizes_to_maglev, false, "make OptimizeFunctionOnNextCall optimize to maglev instead of turbofan") DEFINE_BOOL(maglev_inlining |
| |
| Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler | DEFINE_BOOL (maglev_loop_peeling, true, "enable loop peeling in the maglev optimizing compiler") DEFINE_BOOL(maglev_optimistic_peeled_loops |
| |
| Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for | loops (loop SPeeling) in the " "maglev optimizing compiler") DEFINE_INT(maglev_loop_peeling_max_size |
| |
| Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler | DEFINE_INT (maglev_loop_peeling_max_size_cumulative, 900, "max cumulative size for loop peeling in the maglev optimizing compiler") DEFINE_BOOL(maglev_deopt_data_on_background |
| |
| Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread | DEFINE_BOOL (maglev_build_code_on_background, true, "Generate code on background thread") DEFINE_WEAK_IMPLICATION(maglev_build_code_on_background |
| |
| Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background | DEFINE_BOOL (maglev_destroy_on_background, true, "Destroy compilation jobs on background thread") DEFINE_BOOL(maglev_inline_api_calls |
| |
| Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code | DEFINE_BOOL (maglev_cons_string_elision, false, "Native support for cons strings and their elision in maglev.") DEFINE_BOOL(maglev_pretenure_store_values |
| |
| Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites | DEFINE_UINT (concurrent_maglev_max_threads, 2, "max number of threads that concurrent Maglev can use (0 for unbounded)") DEFINE_BOOL(concurrent_maglev_high_priority_threads |
| |
| Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites use high priority compiler threads for concurrent Maglev | DEFINE_INT (max_maglev_inline_depth, 1, "max depth of functions that Maglev will inline excl. small functions") DEFINE_INT(max_maglev_hard_inline_depth |
| |
| Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites use high priority compiler threads for concurrent Maglev max depth of functions that Maglev will incl small functions | DEFINE_INT (max_maglev_inlined_bytecode_size, 460, "maximum size of bytecode for a single inlining") DEFINE_INT(max_maglev_inlined_bytecode_size_cumulative |
| |
| Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites use high priority compiler threads for concurrent Maglev max depth of functions that Maglev will incl small functions maximum cumulative size of bytecode considered for inlining excl small functions | DEFINE_INT (max_maglev_inlined_bytecode_size_small, 27, "maximum size of bytecode considered for small function inlining") DEFINE_FLOAT(min_maglev_inlining_frequency |
| |
| Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites use high priority compiler threads for concurrent Maglev max depth of functions that Maglev will incl small functions maximum cumulative size of bytecode considered for inlining excl small functions minimum frequency for inlining | DEFINE_WEAK_VALUE_IMPLICATION (turbofan, max_maglev_inlined_bytecode_size_cumulative, 920) DEFINE_BOOL(maglev_reuse_stack_slots |
| |
| Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites use high priority compiler threads for concurrent Maglev max depth of functions that Maglev will incl small functions maximum cumulative size of bytecode considered for inlining excl small functions minimum frequency for inlining reuse stack slots in the maglev optimizing compiler | DEFINE_BOOL (maglev_untagged_phis, true, "enable phi untagging in the maglev optimizing compiler") DEFINE_BOOL(maglev_hoist_osr_value_phi_untagging |
| |
| Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites use high priority compiler threads for concurrent Maglev max depth of functions that Maglev will incl small functions maximum cumulative size of bytecode considered for inlining excl small functions minimum frequency for inlining reuse stack slots in the maglev optimizing compiler enable phi untagging to hoist untagging of osr values | DEFINE_EXPERIMENTAL_FEATURE (maglev_speculative_hoist_phi_untagging, "enable phi untagging to hoist untagging of loop phi inputs (could " "still cause deopt loops)") DEFINE_EXPERIMENTAL_FEATURE(maglev_non_eager_inlining |
| |
| Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites use high priority compiler threads for concurrent Maglev max depth of functions that Maglev will incl small functions maximum cumulative size of bytecode considered for inlining excl small functions minimum frequency for inlining reuse stack slots in the maglev optimizing compiler enable phi untagging to hoist untagging of osr values enable Maglev non eager inlining | DEFINE_EXPERIMENTAL_FEATURE (turbolev_non_eager_inlining, "enable Turbolev non-eager inlining") DEFINE_BOOL(maglev_inlining_following_eager_order |
| |
| other heap size | flags (e.g. initial_heap_size) take precedence") DEFINE_SIZE_T( max_shared_heap_size |
| |
| other heap size max size of the shared | heap (in Mbytes) |
| |
| other heap size generate builtins concurrently on separate threads in mksnapshot | DEFINE_BOOL (concurrent_recompilation, true, "optimizing hot functions asynchronously on a separate thread") DEFINE_BOOL(trace_concurrent_recompilation |
| |
| other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation | DEFINE_INT (concurrent_recompilation_queue_length, 8, "the length of the concurrent compilation queue") DEFINE_INT(concurrent_recompilation_delay |
| |
| other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms | DEFINE_BOOL (concurrent_recompilation_front_running, true, "move compile jobs to the front if recompilation is requested " "multiple times") DEFINE_UINT(concurrent_turbofan_max_threads |
| |
| other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can | use (0 for unbounded)") DEFINE_BOOL( stress_concurrent_inlining |
| |
| other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can create additional concurrent optimization jobs but throw away result | DEFINE_WEAK_VALUE_IMPLICATION (stress_concurrent_inlining, invocation_count_for_turbofan, 150) DEFINE_BOOL(maglev_overwrite_budget |
| |
| other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can create additional concurrent optimization jobs but throw away result whether maglev resets the interrupt budget | DEFINE_WEAK_VALUE_IMPLICATION (maglev_overwrite_budget, invocation_count_for_turbofan, 10000) DEFINE_BOOL(maglev_overwrite_osr_budget |
| |
| other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can create additional concurrent optimization jobs but throw away result whether maglev resets the interrupt budget whether maglev resets the OSR interrupt budget | DEFINE_WEAK_VALUE_IMPLICATION (maglev_overwrite_osr_budget, invocation_count_for_osr, 800) DEFINE_BOOL(stress_concurrent_inlining_attach_code |
| |
| other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can create additional concurrent optimization jobs but throw away result whether maglev resets the interrupt budget whether maglev resets the OSR interrupt budget create additional concurrent optimization jobs | DEFINE_IMPLICATION (stress_concurrent_inlining_attach_code, stress_concurrent_inlining) DEFINE_INT(max_serializer_nesting |
| |
| other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can create additional concurrent optimization jobs but throw away result whether maglev resets the interrupt budget whether maglev resets the OSR interrupt budget create additional concurrent optimization jobs maximum levels for nesting child serializers | DEFINE_BOOL (trace_heap_broker_verbose, false, "trace the heap broker verbosely (all reports)") DEFINE_BOOL(trace_heap_broker |
| |
| other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can create additional concurrent optimization jobs but throw away result whether maglev resets the interrupt budget whether maglev resets the OSR interrupt budget create additional concurrent optimization jobs maximum levels for nesting child serializers trace the heap | broker (reports on missing data only)") DEFINE_INT(deopt_every_n_times |
| |
| | DEFINE_BOOL (stress_turbo_late_spilling, false, "optimize placement of all spill instructions, not just loop-top phis") DEFINE_BOOL(turbo_wasm_address_reassociation |
| |
| refactor address components for immediate indexing | DEFINE_BOOL (concurrent_turbo_tracing, false, "allow concurrent compilation to happen in combination with " "trace-turbo* flags") DEFINE_BOOL(optimize_maglev_optimizes_to_turbofan |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev | DEFINE_STRING (trace_turbo_path, nullptr, "directory to dump generated TurboFan IR to") DEFINE_STRING(trace_turbo_filter |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation | DEFINE_STRING (trace_turbo_file_prefix, "turbo", "trace turbo graph to a file with given prefix") DEFINE_STRING(trace_turbo_cfg_file |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg | graph (for C1 visualizer) to a given file name") DEFINE_SLOW_TRACING_BOOL(trace_turbo_trimming |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer | DEFINE_SLOW_TRACING_BOOL (trace_turbo_jt, false, "trace TurboFan's jump threading") DEFINE_SLOW_TRACING_BOOL(trace_turbo_ceq |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence | DEFINE_SLOW_TRACING_BOOL (trace_turbo_loop, false, "trace TurboFan's loop optimizations") DEFINE_SLOW_TRACING_BOOL(trace_turbo_alloc |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator | DEFINE_SLOW_TRACING_BOOL (trace_representation, false, "trace representation types") DEFINE_BOOL(trace_turbo_stack_accesses |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run | time (x64 only)") DEFINE_BOOL(fuzzing_and_concurrent_recompilation |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation | DEFINE_NEG_NEG_IMPLICATION (concurrent_recompilation, fuzzing_and_concurrent_recompilation) DEFINE_DISABLE_FLAG_IMPLICATION(fuzzing_and_concurrent_recompilation |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo | DEFINE_DISABLE_FLAG_IMPLICATION (fuzzing_and_concurrent_recompilation, trace_turbo_graph) DEFINE_DISABLE_FLAG_IMPLICATION(fuzzing_and_concurrent_recompilation |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled | DEFINE_DISABLE_FLAG_IMPLICATION (fuzzing_and_concurrent_recompilation, trace_turbo_reduction) DEFINE_DISABLE_FLAG_IMPLICATION(fuzzing_and_concurrent_recompilation |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses | DEFINE_STRING (turbo_verify_machine_graph, nullptr, "verify TurboFan machine graph before instruction selection") DEFINE_BOOL_READONLY(verify_csa |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs | DEFINE_STRING (csa_trap_on_node, nullptr, "trigger break point when a node with given id is created in " "given stub. The format is: StubName,NodeId") DEFINE_BOOL_READONLY(fixed_array_bounds_checks |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks | DEFINE_BOOL (turbo_stats_nvp, false, "print TurboFan statistics in machine-readable format") DEFINE_BOOL(turbo_stats_wasm |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations | DEFINE_INT (max_inlined_bytecode_size, 460, "maximum size of bytecode for a single inlining") DEFINE_INT(max_inlined_bytecode_size_cumulative |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining | DEFINE_INT (max_inlined_bytecode_size_absolute, 4600, "maximum absolute size of bytecode considered for inlining") DEFINE_FLOAT(reserve_inline_budget_scale_factor |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget | DEFINE_INT (max_inlined_bytecode_size_small, 27, "maximum size of bytecode considered for small function inlining") DEFINE_INT(max_optimized_bytecode_size |
| |
| too high values may cause the compiler to | hit (release) assertions") DEFINE_BOOL(stress_inline |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible | DEFINE_VALUE_IMPLICATION (stress_inline, max_inlined_bytecode_size_cumulative, 999999) DEFINE_VALUE_IMPLICATION(stress_inline |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible | DEFINE_BOOL (turbo_inline_array_builtins, true, "inline array builtins in TurboFan code") DEFINE_BOOL(maglev_escape_analysis |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape | DEFINE_EXPERIMENTAL_FEATURE (maglev_object_tracking, "track object changes to avoid escaping them") DEFINE_BOOL(trace_maglev_object_tracking |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects | DEFINE_WEAK_IMPLICATION (trace_maglev_graph_building, trace_maglev_object_tracking) DEFINE_BOOL_READONLY(turbo_string_builder |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder | DEFINE_BOOL (log_or_trace_osr, false, "internal helper flag, please use --trace-osr instead.") DEFINE_BOOL(analyze_environment_liveness |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values | DEFINE_BOOL (trace_environment_liveness, false, "trace liveness of local variable slots") DEFINE_BOOL(trace_turbo_load_elimination |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination | DEFINE_BOOL (turbo_profiling_verbose, false, "enable basic block profiling in TurboFan, and include each " "function's schedule and disassembly in the output") DEFINE_STRING(turbo_profiling_output |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this | file (requires that V8 was built with v8_enable_builtins_profiling=true)") DEFINE_BOOL(reorder_builtins |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot | DEFINE_BOOL (abort_on_bad_builtin_profile_data, false, "flag for mksnapshot, abort if builtins profile can't be applied") DEFINE_BOOL(warn_about_builtin_profile_data |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data | DEFINE_STRING (dump_builtins_hashes_to_file, nullptr, "flag for mksnapshot, dump CSA builtins graph hashes to this file") DEFINE_BOOL(turbo_verify_allocation |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan | DEFINE_BOOL (turbo_instruction_scheduling, false, "enable instruction scheduling in TurboFan") DEFINE_BOOL(turbo_stress_instruction_scheduling |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking | DEFINE_IMPLICATION (turbo_stress_instruction_scheduling, turbo_instruction_scheduling) DEFINE_BOOL(turbo_store_elimination |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan | DEFINE_BOOL_READONLY (turbo_typer_hardening, true, "extra bounds checks to protect against some known typer " "mismatch exploit techniques (best effort)") DEFINE_BOOL_READONLY(turbo_rewrite_far_jumps |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near | jumps (ia32, x64)") DEFINE_BOOL( stress_gc_during_compilation |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode | DEFINE_BOOL_READONLY (turbo_compress_frame_translations, false, "compress deoptimization frame translations (experimental)") DEFINE_BOOL(turbo_inline_js_wasm_calls |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm | calls (specifically:inline JS-to-Wasm wrappers and then "
"the body of the Wasm function, if applicable)") DEFINE_BOOL(turbo_optimize_inlined_js_wasm_wrappers |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional | optimizations (especially load-elimination) on " "inlined JS-to-Wasm wrappers") DEFINE_NEG_NEG_IMPLICATION(turbo_inline_js_wasm_calls |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers | DEFINE_BOOL (turbo_optimize_math_minmax, true, "optimize call math.min/max with double array") DEFINE_BOOL(turbo_collect_feedback_in_generic_lowering |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering | DEFINE_BOOL (turboshaft_enable_debug_features, false, "enables Turboshaft's DebugPrint, StaticAssert and " "CheckTurboshaftTypeOf operations") DEFINE_BOOL(turboshaft_wasm_load_elimination |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination | DEFINE_EXPERIMENTAL_FEATURE (turboshaft_wasm_in_js_inlining, "inline Wasm code into JS functions via Turboshaft (instead of via " "TurboFan). Only the Wasm code is inlined in Turboshaft, the JS-to-Wasm " "wrappers are still inlined in TurboFan. For controlling whether to inline " "at all, see --turbo-inline-js-wasm-calls.") DEFINE_BOOL(turboshaft_load_elimination |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS | DEFINE_BOOL (turboshaft_loop_unrolling, true, "enable Turboshaft's loop unrolling") DEFINE_BOOL(turboshaft_string_concat_escape_analysis |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation | DEFINE_EXPERIMENTAL_FEATURE (turboshaft_typed_optimizations, "enable an additional Turboshaft phase that " "performs optimizations based on type information") DEFINE_BOOL(turbolev |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use | Turbolev (≈ Maglev+Turboshaft combined) as the 4th tier " "compiler instead of Turbofan") DEFINE_EXPERIMENTAL_FEATURE( turbolev_future |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future | DEFINE_BOOL (typed_array_length_loading, true, "Enable specializing loading the TypedArray length in Maglev / Turbofan") DEFINE_BOOL_READONLY(turboshaft_trace_reduction |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps | DEFINE_BOOL_READONLY (turboshaft_trace_emitted, false, "trace emitted Turboshaft instructions") DEFINE_BOOL_READONLY(turboshaft_trace_intermediate_reductions |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps | DEFINE_BOOL (profile_guided_optimization_for_empty_feedback_vector, true, "profile guided optimization for empty feedback vector") DEFINE_INT(invocation_count_for_early_optimization |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization | DEFINE_INT (invocation_count_for_maglev_with_delay, 600, "invocation count for maglev for functions which according to " "profile_guided_optimization are likely to deoptimize before " "reaching this invocation count") DEFINE_BOOL(optimize_for_size |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed | DEFINE_BOOL (reopt_after_lazy_deopts, true, "Immediately re-optimize code after some lazy deopts") DEFINE_INT(stress_sampling_allocation_profiler |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval | DEFINE_BOOL (lazy_new_space_shrinking, false, "Enables the lazy new space shrinking strategy") DEFINE_SIZE_T(min_semi_space_size |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi | space (in MBytes) |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces | DEFINE_SIZE_T (max_semi_space_size, 0, "max size of a semi-space (in MBytes), the new space consists of " "two semi-spaces") DEFINE_SIZE_T(max_heap_size |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after | random (0, X) V8 allocations. It override s " "gc_interval.") DEFINE_INT(cppgc_random_gc_interval |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections | DEFINE_BOOL (trace_gc, false, "print one trace line following each garbage collection") DEFINE_BOOL(trace_gc_nvp |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed | DEFINE_BOOL (incremental_marking_start_user_visible, true, "Starts incremental marking with kUserVisible priority.") DEFINE_INT(incremental_marking_soft_trigger |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects | DEFINE_UINT (minor_gc_task_trigger, 80, "minor GC task trigger in percent of the current heap limit") DEFINE_BOOL(minor_gc_task_with_lower_priority |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority | DEFINE_EXPERIMENTAL_FEATURE (cppgc_young_generation, "run young generation garbage collections in Oilpan") DEFINE_INT(concurrent_marking_max_worker_num |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads | DEFINE_BOOL (concurrent_array_buffer_sweeping, true, "concurrently sweep array buffers") DEFINE_BOOL(stress_concurrent_allocation |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory | DEFINE_INT (ephemeron_fixpoint_iterations, 10, "number of fixpoint iterations it takes to switch to linear " "ephemeron algorithm") DEFINE_NEG_NEG_IMPLICATION(concurrent_sweeping |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping | DEFINE_BOOL (parallel_pointer_update, true, "use parallel pointer update during compaction") DEFINE_BOOL(parallel_weak_ref_clearing |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause | DEFINE_BOOL (detect_ineffective_gcs_near_heap_limit, true, "trigger out-of-memory failure to avoid GC storm near heap limit") DEFINE_BOOL(trace_incremental_marking |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking | DEFINE_BOOL (track_gc_object_stats, false, "track object counts and memory usage") DEFINE_BOOL(trace_gc_object_stats |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage | DEFINE_GENERIC_IMPLICATION (trace_zone_stats, TracingFlags::zone_stats.store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_SIZE_T(zone_stats_tolerance |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount | DEFINE_GENERIC_IMPLICATION (trace_zone_type_stats, TracingFlags::zone_stats.store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(track_gc_object_stats |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats | store (v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(trace_gc_object_stats |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected | DEFINE_BOOL (trace_detached_contexts, false, "trace native contexts that are expected to be garbage collected") DEFINE_BOOL_READONLY(verify_heap |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC | DEFINE_BOOL (memory_reducer_respects_frozen_state, false, "don't schedule another GC when we are frozen") DEFINE_BOOL(memory_reducer_favors_memory |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag | DEFINE_BOOL (memory_reducer_for_small_heaps, true, "use memory reducer for small heaps") DEFINE_INT(memory_reducer_gc_count |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled | DEFINE_BOOL (external_memory_accounted_in_global_limit, false, "External memory limits are computed as part of global limits in v8 Heap.") DEFINE_BOOL(gc_speed_uses_counters |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters | DEFINE_INT (heap_growing_percent, 0, "specifies heap growing factor as (1 + heap_growing_percent/100)") DEFINE_BOOL(compact |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics | DEFINE_BOOL (compact_code_space, true, "Perform code space compaction on full collections.") DEFINE_BOOL(compact_on_every_full_gc |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC | DEFINE_BOOL (compact_with_stack, true, "Perform compaction when finalizing a full GC with stack") DEFINE_BOOL(compact_code_space_with_stack |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack | DEFINE_BOOL (shortcut_strings_with_stack, true, "Shortcut Strings during GC with stack") DEFINE_BOOL(stress_compaction |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects | DEFINE_BOOL (stress_compaction_random, false, "Stress GC compaction by selecting random percent of pages as " "evacuation candidates. Overrides stress_compaction.") DEFINE_BOOL(flush_baseline_code |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently | DEFINE_BOOL (flush_bytecode, true, "flush of bytecode when it has not been executed recently") DEFINE_BOOL(flush_code_based_on_time |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age | DEFINE_BOOL (flush_code_based_on_tab_visibility, false, "Flush code when tab goes into the background.") DEFINE_BOOL(use_marking_progress_bar |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active | DEFINE_BOOL (stress_per_context_marking_worklist, false, "Use per-context worklist for marking") DEFINE_BOOL(stress_incremental_marking |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often | DEFINE_BOOL (fuzzer_gc_analysis, false, "prints number of allocations and enables analysis mode for gc " "fuzz testing, e.g. --stress-marking, --stress-scavenge") DEFINE_INT(stress_marking |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and | X (inclusive) percent " "of the regular marking start limit") DEFINE_INT(stress_scavenge |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible | DEFINE_BOOL (parallel_reclaim_unmodified_wrappers, true, "reclaim wrapper objects in parallel") DEFINE_BOOL(gc_experiment_less_compaction |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode | DEFINE_INT (gc_memory_reducer_start_delay_ms, 8000, "Delay before memory reducer start") DEFINE_BOOL(concurrent_marking_high_priority_threads |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking | DEFINE_BOOL (randomize_all_allocations, false, "randomize virtual memory reservations by ignoring any hints " "passed when allocating pages") DEFINE_BOOL(manual_evacuation_candidates_selection |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates | pages (requires --stress_compaction).") DEFINE_BOOL(cppheap_incremental_marking |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap | DEFINE_BOOL (cppheap_concurrent_marking, false, "use concurrent marking for CppHeap") DEFINE_NEG_NEG_IMPLICATION(cppheap_incremental_marking |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking | DEFINE_BOOL (memory_balancer, false, "use membalancer, " "a new heap limit balancing algorithm") DEFINE_FLOAT(memory_balancer_c_value |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses | DEFINE_BOOL (enable_sse4_1, true, "enable use of SSE4.1 instructions if available") DEFINE_BOOL(enable_sse4_2 |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available | DEFINE_BOOL (enable_sahf, true, "enable use of SAHF instruction if available (X64 only)") DEFINE_BOOL(enable_avx_vnni |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available | DEFINE_BOOL (enable_avx_vnni_int8, true, "enable use of AVX-VNNI-INT8 instructions if available") DEFINE_BOOL(enable_popcnt |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available | DEFINE_STRING (arm_arch, ARM_ARCH_DEFAULT, "generate instructions for the selected ARM architecture if " "available: armv6, armv7, armv7+sudiv or armv8") DEFINE_BOOL(force_long_branches |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long | mode (MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant | pools (x64 only)") DEFINE_STRING(sim_arm64_optional_features |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs | DEFINE_BOOL (enable_source_at_csa_bind, false, "Include source information in the binary at CSA bind locations.") DEFINE_BOOL(enable_regexp_unaligned_accesses |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine | DEFINE_BOOL (stress_background_compile, false, "stress test parsing on background") DEFINE_BOOL(concurrent_cache_deserialization |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine enable deserializing code caches on background | DEFINE_BOOL (merge_background_deserialized_script_with_compilation_cache, true, "After deserializing code cache data on a background thread, merge it into " "an existing Script if one is found in the Isolate compilation cache") DEFINE_BOOL(experimental_embedder_instance_types |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine enable deserializing code caches on background enable type checks based on instance types provided by the embedder | DEFINE_STRING (expose_gc_as, nullptr, "expose gc extension under the specified name") DEFINE_BOOL(expose_externalize_string |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine enable deserializing code caches on background enable type checks based on instance types provided by the embedder expose externalize string extension | DEFINE_BOOL (expose_ignition_statistics, false, "expose ignition-statistics extension (requires building with " "v8_enable_ignition_dispatch_counting)") DEFINE_BOOL(builtins_in_stack_traces |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine enable deserializing code caches on background enable type checks based on instance types provided by the embedder expose externalize string extension show built in functions in stack traces | DEFINE_BOOL (experimental_stack_trace_frames, false, "enable experimental frames (API/Builtins) and stack trace layout") DEFINE_BOOL(disallow_code_generation_from_strings |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine enable deserializing code caches on background enable type checks based on instance types provided by the embedder expose externalize string extension show built in functions in stack traces disallow eval and friends | DEFINE_STRING (expose_cputracemark_as, nullptr, "expose cputracemark extension under the specified name") DEFINE_BOOL(experimental_report_exceptions_from_callbacks |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine enable deserializing code caches on background enable type checks based on instance types provided by the embedder expose externalize string extension show built in functions in stack traces disallow eval and friends Notify Api callback about exceptions thrown in Api callbacks | DEFINE_BOOL (allow_unsafe_function_constructor, false, "allow invoking the function constructor without security checks") DEFINE_BOOL(test_small_max_function_context_stub_size |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine enable deserializing code caches on background enable type checks based on instance types provided by the embedder expose externalize string extension show built in functions in stack traces disallow eval and friends Notify Api callback about exceptions thrown in Api callbacks enable testing the function context size overflow path by making the maximum size smaller | DEFINE_INT (switch_table_spread_threshold, 3, "allow the jump table used for switch statements to span a range " "of integers roughly equal to this number times the number of " "clauses in the switch") DEFINE_INT(switch_table_min_cases |
| |
| | if (change_flag &&IsReadOnly()) |
| |
| | switch (set_by_) |
| |
| | if (IsAnyImplication(new_set_by)) |
| |
| static const char * | Type2String (Flag::FlagType type) |
| |
| std::ostream & | operator<< (std::ostream &os, PrintFlagValue flag_value) |
| |
| std::ostream & | operator<< (std::ostream &os, const Flag &flag) |
| |
| uint32_t | ComputeFlagListHash () |
| |
| static void | SplitArgument (const char *arg, char *buffer, int buffer_size, const char **name, const char **value, bool *negated) |
| |
| template<typename T > |
| bool | TryParseUnsigned (Flag *flag, const char *arg, const char *value, char **endp, T *out_val) |
| |
| static char * | SkipWhiteSpace (char *p) |
| |
| static char * | SkipBlackSpace (char *p) |
| |
| template<typename T , typename U > |
| bool | Is (IndirectHandle< U > value) |
| |
| template<typename To , typename From > |
| Handle< To > | UncheckedCast (Handle< From > value) |
| |
| template<typename T > |
| V8_INLINE IndirectHandle< T > | handle (Tagged< T > object, Isolate *isolate) |
| |
| template<typename T > |
| V8_INLINE IndirectHandle< T > | handle (Tagged< T > object, LocalIsolate *isolate) |
| |
| template<typename T > |
| V8_INLINE IndirectHandle< T > | handle (Tagged< T > object, LocalHeap *local_heap) |
| |
| template<typename T > |
| V8_INLINE IndirectHandle< T > | handle (T object, Isolate *isolate) |
| |
| template<typename T > |
| V8_INLINE IndirectHandle< T > | handle (T object, LocalIsolate *isolate) |
| |
| template<typename T > |
| V8_INLINE IndirectHandle< T > | handle (T object, LocalHeap *local_heap) |
| |
| template<typename T > |
| std::ostream & | operator<< (std::ostream &os, IndirectHandle< T > handle) |
| |
| template<typename T , typename U > |
| bool | Is (DirectHandle< U > value) |
| |
| template<typename To , typename From > |
| DirectHandle< To > | UncheckedCast (DirectHandle< From > value) |
| |
| template<typename T > |
| std::ostream & | operator<< (std::ostream &os, DirectHandle< T > handle) |
| |
| template<typename T > |
| V8_INLINE DirectHandle< T > | direct_handle (Tagged< T > object, Isolate *isolate) |
| |
| template<typename T > |
| V8_INLINE DirectHandle< T > | direct_handle (Tagged< T > object, LocalIsolate *isolate) |
| |
| template<typename T > |
| V8_INLINE DirectHandle< T > | direct_handle (Tagged< T > object, LocalHeap *local_heap) |
| |
| template<typename T > |
| V8_INLINE DirectHandle< T > | direct_handle (T object, Isolate *isolate) |
| |
| template<typename T > |
| V8_INLINE DirectHandle< T > | direct_handle (T object, LocalIsolate *isolate) |
| |
| template<typename T > |
| V8_INLINE DirectHandle< T > | direct_handle (T object, LocalHeap *local_heap) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (HandleBase) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (Handle< Object >) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (MaybeHandle< Object >) |
| |
| template<typename T > |
| V8_INLINE IndirectHandle< T > | indirect_handle (DirectHandle< T > handle) |
| |
| template<typename T > |
| V8_INLINE IndirectHandle< T > | indirect_handle (DirectHandle< T > handle, Isolate *isolate) |
| |
| template<typename T > |
| V8_INLINE IndirectHandle< T > | indirect_handle (DirectHandle< T > handle, LocalIsolate *isolate) |
| |
| template<typename T > |
| V8_INLINE IndirectHandle< T > | indirect_handle (DirectHandle< T > handle, LocalHeap *local_heap) |
| |
template<typename T , template< typename > typename HandleType>
requires (std::is_convertible_v<HandleType<T>, DirectHandle<T>>) |
| V8_INLINE DirectHandle< T > | direct_handle (HandleType< T > handle) |
| |
| template<typename T , typename U > |
| bool | Is (MaybeIndirectHandle< U > value) |
| |
| template<typename To , typename From > |
| MaybeIndirectHandle< To > | UncheckedCast (MaybeIndirectHandle< From > value) |
| |
| MaybeObjectHandle | handle (Tagged< MaybeObject > object, Isolate *isolate) |
| |
| MaybeObjectHandle | handle (Tagged< MaybeObject > object, LocalHeap *local_heap) |
| |
| template<typename T > |
| std::ostream & | operator<< (std::ostream &os, MaybeIndirectHandle< T > handle) |
| |
| template<typename T , typename U > |
| bool | Is (MaybeDirectHandle< U > value) |
| |
| template<typename To , typename From > |
| MaybeDirectHandle< To > | UncheckedCast (MaybeDirectHandle< From > value) |
| |
| template<typename T > |
| std::ostream & | operator<< (std::ostream &os, MaybeDirectHandle< T > handle) |
| |
| template<typename T > |
| V8_INLINE MaybeIndirectHandle< T > | indirect_handle (MaybeDirectHandle< T > maybe_handle, Isolate *isolate) |
| |
| template<typename T > |
| V8_INLINE MaybeIndirectHandle< T > | indirect_handle (MaybeDirectHandle< T > maybe_handle, LocalIsolate *isolate) |
| |
| void | SetSlotThreadSafe (Address **slot, Address *val) |
| |
| V8_WARN_UNUSED_RESULT bool | IsValidHeapObject (Heap *heap, Tagged< HeapObject > object) |
| |
| V8_WARN_UNUSED_RESULT bool | IsValidCodeObject (Heap *heap, Tagged< HeapObject > object) |
| |
| void * | ExtractEmbedderDataBackref (Isolate *isolate, CppHeap &cpp_heap, v8::Local< v8::Data > v8_value) |
| |
| template<typename Impl > |
| class | EXPORT_TEMPLATE_DECLARE (V8_EXPORT_PRIVATE) TorqueGeneratedFactory |
| |
| static size_t | CountTotalHolesSize (Heap *heap) |
| |
| const char * | ToString (GCTracer::Event::Type type, bool short_name) |
| |
| template<typename T > |
| Tagged< T > | ForwardingAddress (Tagged< T > heap_obj) |
| |
| template<VisitorId visitor_id> |
| constexpr bool | SupportsRightTrim () |
| |
| template<VisitorId visitor_id> |
| bool | ContainsReadOnlyMap (PtrComprCageBase, Tagged< HeapObject >) |
| |
| static bool | MustRecordSlots (Heap *heap) |
| |
| template<class T > |
| Tagged< Object > | VisitWeakList (Heap *heap, Tagged< Object > list, WeakObjectRetainer *retainer) |
| |
| template<class T > |
| static void | ClearWeakList (Heap *heap, Tagged< Object > list) |
| |
| template Tagged< Object > | VisitWeakList< Context > (Heap *heap, Tagged< Object > list, WeakObjectRetainer *retainer) |
| |
| template Tagged< Object > | VisitWeakList< AllocationSiteWithWeakNext > (Heap *heap, Tagged< Object > list, WeakObjectRetainer *retainer) |
| |
| template Tagged< Object > | VisitWeakList< JSFinalizationRegistry > (Heap *heap, Tagged< Object > list, WeakObjectRetainer *retainer) |
| |
| static GCType | GetGCTypeFromGarbageCollector (GarbageCollector collector) |
| |
| constexpr const char * | ToString (StepOrigin step_origin) |
| |
| static void | TraceFragmentation (PagedSpace *space) |
| |
| static Tagged< String > | UpdateReferenceInExternalStringTableEntry (Heap *heap, FullObjectSlot p) |
| |
| bool | IsCppHeapMarkingFinished (Heap *heap, MarkingWorklists::Local *local_marking_worklists) |
| |
| template<> |
| void | MarkingBitmap::SetBitsInCell< AccessMode::NON_ATOMIC > (uint32_t cell_index, MarkBit::CellType mask) |
| |
| template<> |
| void | MarkingBitmap::SetBitsInCell< AccessMode::ATOMIC > (uint32_t cell_index, MarkBit::CellType mask) |
| |
| template<> |
| void | MarkingBitmap::ClearBitsInCell< AccessMode::NON_ATOMIC > (uint32_t cell_index, MarkBit::CellType mask) |
| |
| template<> |
| void | MarkingBitmap::ClearBitsInCell< AccessMode::ATOMIC > (uint32_t cell_index, MarkBit::CellType mask) |
| |
| template<> |
| void | MarkingBitmap::ClearCellRangeRelaxed< AccessMode::ATOMIC > (uint32_t start_cell_index, uint32_t end_cell_index) |
| |
| template<> |
| void | MarkingBitmap::ClearCellRangeRelaxed< AccessMode::NON_ATOMIC > (uint32_t start_cell_index, uint32_t end_cell_index) |
| |
| template<> |
| void | MarkingBitmap::SetCellRangeRelaxed< AccessMode::ATOMIC > (uint32_t start_cell_index, uint32_t end_cell_index) |
| |
| template<> |
| void | MarkingBitmap::SetCellRangeRelaxed< AccessMode::NON_ATOMIC > (uint32_t start_cell_index, uint32_t end_cell_index) |
| |
| template<> |
| bool | MarkBit::Set< AccessMode::NON_ATOMIC > () |
| |
| template<> |
| bool | MarkBit::Set< AccessMode::ATOMIC > () |
| |
| template<> |
| bool | MarkBit::Get< AccessMode::NON_ATOMIC > () const |
| |
| template<> |
| bool | MarkBit::Get< AccessMode::ATOMIC > () const |
| |
| static V8_NOINLINE void | PrintJSONArray (size_t *array, const int len) |
| |
| static V8_NOINLINE void | DumpJSONArray (std::stringstream &stream, size_t *array, const int len) |
| |
| static ObjectStats::VirtualInstanceType | GetFeedbackSlotType (Tagged< MaybeObject > maybe_obj, FeedbackSlotKind kind, Isolate *isolate) |
| |
| template<typename Enum , typename Callback > |
| void | ForAll (Callback callback) |
| |
| void | VisitObject (Isolate *isolate, Tagged< HeapObject > object, ObjectVisitor *visitor) |
| |
| void | VisitObject (LocalIsolate *isolate, Tagged< HeapObject > object, ObjectVisitor *visitor) |
| |
| void | VisitObjectBody (Isolate *isolate, Tagged< HeapObject > object, ObjectVisitor *visitor) |
| |
| void | VisitObjectBody (Isolate *isolate, Tagged< Map > map, Tagged< HeapObject > object, ObjectVisitor *visitor) |
| |
| void | VisitObjectBody (LocalIsolate *isolate, Tagged< HeapObject > object, ObjectVisitor *visitor) |
| |
| const char * | WasmValueType2String (WasmValueType type) |
| |
| std::ostream & | operator<< (std::ostream &os, WasmValueType type) |
| |
| static void | LookupForRead (LookupIterator *it, bool is_has_property) |
| |
| bool | AllowedHandlerChange (KeyedAccessLoadMode old_mode, KeyedAccessLoadMode new_mode) |
| |
| | RUNTIME_FUNCTION (Runtime_LoadIC_Miss) |
| |
| | RUNTIME_FUNCTION (Runtime_LoadNoFeedbackIC_Miss) |
| |
| | RUNTIME_FUNCTION (Runtime_LoadWithReceiverNoFeedbackIC_Miss) |
| |
| | RUNTIME_FUNCTION (Runtime_LoadGlobalIC_Miss) |
| |
| | RUNTIME_FUNCTION (Runtime_LoadGlobalIC_Slow) |
| |
| | RUNTIME_FUNCTION (Runtime_LoadWithReceiverIC_Miss) |
| |
| | RUNTIME_FUNCTION (Runtime_KeyedLoadIC_Miss) |
| |
| | RUNTIME_FUNCTION (Runtime_StoreIC_Miss) |
| |
| | RUNTIME_FUNCTION (Runtime_DefineNamedOwnIC_Miss) |
| |
| | RUNTIME_FUNCTION (Runtime_DefineNamedOwnIC_Slow) |
| |
| | RUNTIME_FUNCTION (Runtime_StoreGlobalIC_Miss) |
| |
| | RUNTIME_FUNCTION (Runtime_StoreGlobalICNoFeedback_Miss) |
| |
| | RUNTIME_FUNCTION (Runtime_StoreGlobalIC_Slow) |
| |
| | RUNTIME_FUNCTION (Runtime_KeyedStoreIC_Miss) |
| |
| | RUNTIME_FUNCTION (Runtime_DefineKeyedOwnIC_Miss) |
| |
| | RUNTIME_FUNCTION (Runtime_StoreInArrayLiteralIC_Miss) |
| |
| | RUNTIME_FUNCTION (Runtime_KeyedStoreIC_Slow) |
| |
| | RUNTIME_FUNCTION (Runtime_DefineKeyedOwnIC_Slow) |
| |
| | RUNTIME_FUNCTION (Runtime_StoreInArrayLiteralIC_Slow) |
| |
| | RUNTIME_FUNCTION (Runtime_ElementsTransitionAndStoreIC_Miss) |
| |
| static MaybeDirectHandle< JSObject > | CloneObjectSlowPath (Isolate *isolate, DirectHandle< Object > source, int flags) |
| |
| | RUNTIME_FUNCTION (Runtime_CloneObjectIC_Slow) |
| |
| | RUNTIME_FUNCTION (Runtime_CloneObjectIC_Miss) |
| |
| | RUNTIME_FUNCTION (Runtime_StoreCallbackProperty) |
| |
| | RUNTIME_FUNCTION (Runtime_ObjectAssignTryFastcase) |
| |
| | RUNTIME_FUNCTION (Runtime_LoadPropertyWithInterceptor) |
| |
| | RUNTIME_FUNCTION (Runtime_StorePropertyWithInterceptor) |
| |
| | RUNTIME_FUNCTION (Runtime_LoadElementWithInterceptor) |
| |
| | RUNTIME_FUNCTION (Runtime_KeyedHasIC_Miss) |
| |
| | RUNTIME_FUNCTION (Runtime_HasElementWithInterceptor) |
| |
| static const char * | GCFunctionName () |
| |
| static bool | isValidCpuTraceMarkFunctionName () |
| |
| Handle< JSFunction > | SimpleInstallFunction (Isolate *isolate, DirectHandle< JSObject > base, const char *name, Builtin call, int len, AdaptArguments adapt, PropertyAttributes attrs) |
| |
| static void | AddToWeakNativeContextList (Isolate *isolate, Tagged< Context > context) |
| |
| static void | InstallWithIntrinsicDefaultProto (Isolate *isolate, DirectHandle< JSFunction > function, int context_index) |
| |
| void | InstallError (Isolate *isolate, DirectHandle< JSObject > global, DirectHandle< String > name, int context_index, Builtin error_constructor, int error_function_length) |
| |
| static uint32_t | Hash (RegisteredExtension *extension) |
| |
| static bool | PropertyAlreadyExists (Isolate *isolate, DirectHandle< JSObject > to, DirectHandle< Name > key) |
| |
| bool | InitializeICUDefaultLocation (const char *exec_path, const char *icu_data_file) |
| |
| bool | InitializeICU (const char *icu_data_file) |
| |
| void | InitializeExternalStartupData (const char *directory_path) |
| |
| void | InitializeExternalStartupDataFromFile (const char *snapshot_blob) |
| |
| MaybeDirectHandle< Object > | InternalizeJsonProperty (Handle< JSObject > holder, Handle< String > key) |
| |
| bool | MayHaveInterestingProperties (Isolate *isolate, Tagged< JSReceiver > object) |
| |
| MaybeDirectHandle< Object > | JsonStringify (Isolate *isolate, Handle< JSAny > object, Handle< JSAny > replacer, Handle< Object > gap) |
| |
| std::ostream & | operator<< (std::ostream &os, LogEventListener::CodeTag tag) |
| |
| std::ostream & | operator<< (std::ostream &os, LogEventListener::Event event) |
| |
| static std::vector< std::pair< Handle< SharedFunctionInfo >, Handle< AbstractCode > > > | EnumerateCompiledFunctions (Heap *heap) |
| |
| static void | AddIsolateIdIfNeeded (std::ostream &os, Isolate *isolate) |
| |
| static void | PrepareLogFileName (std::ostream &os, Isolate *isolate, const char *file_name) |
| |
| unsigned int | FastD2UI (double x) |
| |
| uint16_t | DoubleToFloat16 (double value) |
| |
| float | DoubleToFloat32 (double x) |
| |
| double | DoubleToInteger (double x) |
| |
| int32_t | DoubleToInt32 (double x) |
| |
| int64_t | DoubleToWebIDLInt64 (double x) |
| |
| uint64_t | DoubleToWebIDLUint64 (double x) |
| |
| bool | DoubleToSmiInteger (double value, int *smi_int_value) |
| |
| bool | IsSmiDouble (double value) |
| |
| bool | IsInt32Double (double value) |
| |
| bool | IsUint32Double (double value) |
| |
| bool | DoubleToUint32IfEqualToSelf (double value, uint32_t *uint32_value) |
| |
| int32_t | NumberToInt32 (Tagged< Object > number) |
| |
| uint32_t | NumberToUint32 (Tagged< Object > number) |
| |
| uint32_t | PositiveNumberToUint32 (Tagged< Object > number) |
| |
| int64_t | NumberToInt64 (Tagged< Object > number) |
| |
| uint64_t | PositiveNumberToUint64 (Tagged< Object > number) |
| |
| bool | TryNumberToSize (Tagged< Object > number, size_t *result) |
| |
| size_t | NumberToSize (Tagged< Object > number) |
| |
| uint32_t | DoubleToUint32 (double x) |
| |
| double | JunkStringValue () |
| |
| double | SignedZero (bool negative) |
| |
| bool | isDigit (int x, int radix) |
| |
| bool | isBinaryDigit (int x) |
| |
| template<class Char > |
| bool | SubStringEquals (const Char **current, const Char *end, const char *substring) |
| |
| template<class Char > |
| bool | AdvanceToNonspace (const Char **current, const Char *end) |
| |
| template<int radix_log_2, class Char > |
| double | InternalStringToIntDouble (const Char *start, const Char *end, bool negative, bool allow_trailing_junk) |
| |
| template<class Char > |
| double | InternalStringToDouble (const Char *current, const Char *end, ConversionFlag flag, double empty_string_val) |
| |
| double | StringToDouble (const char *str, ConversionFlag flags, double empty_string_val) |
| |
| double | StringToDouble (base::Vector< const uint8_t > str, ConversionFlag flags, double empty_string_val) |
| |
| double | StringToDouble (base::Vector< const base::uc16 > str, ConversionFlag flags, double empty_string_val) |
| |
| double | BinaryStringToDouble (base::Vector< const uint8_t > str) |
| |
| double | OctalStringToDouble (base::Vector< const uint8_t > str) |
| |
| double | HexStringToDouble (base::Vector< const uint8_t > str) |
| |
| double | ImplicitOctalStringToDouble (base::Vector< const uint8_t > str) |
| |
| double | StringToInt (Isolate *isolate, DirectHandle< String > string, int radix) |
| |
| MaybeHandle< BigInt > | StringToBigInt (Isolate *isolate, DirectHandle< String > string) |
| |
| template<typename IsolateT > |
| MaybeHandle< BigInt > | BigIntLiteral (IsolateT *isolate, const char *string) |
| |
| std::unique_ptr< char[]> | BigIntLiteralToDecimal (LocalIsolate *isolate, base::Vector< const uint8_t > literal) |
| |
| std::string_view | DoubleToStringView (double v, base::Vector< char > buffer) |
| |
| std::string_view | IntToStringView (int n, base::Vector< char > buffer) |
| |
| std::string_view | DoubleToFixedStringView (double value, int f, base::Vector< char > buffer) |
| |
| static std::string_view | CreateExponentialRepresentation (char *decimal_rep, int rep_length, int exponent, bool negative, int significant_digits, base::Vector< char > buffer) |
| |
| std::string_view | DoubleToExponentialStringView (double value, int f, base::Vector< char > buffer) |
| |
| std::string_view | DoubleToPrecisionStringView (double value, int p, base::Vector< char > buffer) |
| |
| std::string_view | DoubleToRadixStringView (double value, int radix, base::Vector< char > buffer) |
| |
| double | StringToDouble (Isolate *isolate, DirectHandle< String > string, ConversionFlag flag, double empty_string_val) |
| |
| double | FlatStringToDouble (Tagged< String > string, ConversionFlag flag, double empty_string_val) |
| |
| std::optional< double > | TryStringToDouble (LocalIsolate *isolate, DirectHandle< String > object, uint32_t max_length_for_conversion) |
| |
| std::optional< double > | TryStringToInt (LocalIsolate *isolate, DirectHandle< String > object, int radix) |
| |
| bool | IsSpecialIndex (Tagged< String > string) |
| |
| bool | IsSpecialIndex (Tagged< String > string, SharedStringAccessGuardIfNeeded &access_guard) |
| |
| float | DoubleToFloat32_NoInline (double x) |
| |
| int32_t | DoubleToInt32_NoInline (double x) |
| |
| int | FastD2IChecked (double x) |
| |
| int | FastD2I (double x) |
| |
| double | FastI2D (int x) |
| |
| double | FastUI2D (unsigned x) |
| |
| int64_t | DoubleToInt64 (double x) |
| |
| uint64_t | DoubleToUint64 (double x) |
| |
| static bool | IsMinusZero (double value) |
| |
| uint64_t | HashSeed (Isolate *isolate) |
| |
| uint64_t | HashSeed (LocalIsolate *isolate) |
| |
| uint64_t | HashSeed (ReadOnlyRoots roots) |
| |
| IntegerLiteral | operator<< (const IntegerLiteral &x, const IntegerLiteral &y) |
| |
| IntegerLiteral | operator+ (const IntegerLiteral &x, const IntegerLiteral &y) |
| |
| bool | operator== (const IntegerLiteral &x, const IntegerLiteral &y) |
| |
| bool | operator!= (const IntegerLiteral &x, const IntegerLiteral &y) |
| |
| std::ostream & | operator<< (std::ostream &stream, const IntegerLiteral &literal) |
| |
| IntegerLiteral | operator| (const IntegerLiteral &x, const IntegerLiteral &y) |
| |
| constexpr bool | operator== (const Tagged< AbstractCode > lhs, const Tagged< AbstractCode > rhs) |
| |
| | EXTERNAL_POINTER_ACCESSORS_MAYBE_READ_ONLY_HOST (AccessorInfo, maybe_redirected_getter, Address, kMaybeRedirectedGetterOffset, kAccessorInfoGetterTag) EXTERNAL_POINTER_ACCESSORS_MAYBE_READ_ONLY_HOST(AccessorInfo |
| |
| | BIT_FIELD_ACCESSORS (AccessorInfo, flags, replace_on_access, AccessorInfo::ReplaceOnAccessBit) BIT_FIELD_ACCESSORS(AccessorInfo |
| |
| | BIT_FIELD_ACCESSORS (AccessorInfo, flags, initial_property_attributes, AccessorInfo::InitialAttributesBits) void AccessorInfo |
| |
| | LAZY_EXTERNAL_POINTER_ACCESSORS_MAYBE_READ_ONLY_HOST_CHECKED2 (InterceptorInfo, named_getter, Address, kGetterOffset, kApiNamedPropertyGetterCallbackTag, is_named(), is_named() &&(value !=kNullAddress)) LAZY_EXTERNAL_POINTER_ACCESSORS_MAYBE_READ_ONLY_HOST_CHECKED2(InterceptorInfo |
| |
| | is_named () |
| |
| template<typename T , typename Isolate > |
| MaybeHandle< T > | ThrowBigIntTooBig (Isolate *isolate) |
| |
| void | Terminate (Isolate *isolate) |
| |
| void | Terminate (LocalIsolate *isolate) |
| |
| void | MutableBigInt_AbsoluteAddAndCanonicalize (Address result_addr, Address x_addr, Address y_addr) |
| |
| int32_t | MutableBigInt_AbsoluteCompare (Address x_addr, Address y_addr) |
| |
| void | MutableBigInt_AbsoluteSubAndCanonicalize (Address result_addr, Address x_addr, Address y_addr) |
| |
| int32_t | MutableBigInt_AbsoluteMulAndCanonicalize (Address result_addr, Address x_addr, Address y_addr) |
| |
| int32_t | MutableBigInt_AbsoluteDivAndCanonicalize (Address result_addr, Address x_addr, Address y_addr) |
| |
| int32_t | MutableBigInt_AbsoluteModAndCanonicalize (Address result_addr, Address x_addr, Address y_addr) |
| |
| void | MutableBigInt_BitwiseAndPosPosAndCanonicalize (Address result_addr, Address x_addr, Address y_addr) |
| |
| void | MutableBigInt_BitwiseAndNegNegAndCanonicalize (Address result_addr, Address x_addr, Address y_addr) |
| |
| void | MutableBigInt_BitwiseAndPosNegAndCanonicalize (Address result_addr, Address x_addr, Address y_addr) |
| |
| void | MutableBigInt_BitwiseOrPosPosAndCanonicalize (Address result_addr, Address x_addr, Address y_addr) |
| |
| void | MutableBigInt_BitwiseOrNegNegAndCanonicalize (Address result_addr, Address x_addr, Address y_addr) |
| |
| void | MutableBigInt_BitwiseOrPosNegAndCanonicalize (Address result_addr, Address x_addr, Address y_addr) |
| |
| void | MutableBigInt_BitwiseXorPosPosAndCanonicalize (Address result_addr, Address x_addr, Address y_addr) |
| |
| void | MutableBigInt_BitwiseXorNegNegAndCanonicalize (Address result_addr, Address x_addr, Address y_addr) |
| |
| void | MutableBigInt_BitwiseXorPosNegAndCanonicalize (Address result_addr, Address x_addr, Address y_addr) |
| |
| void | MutableBigInt_LeftShiftAndCanonicalize (Address result_addr, Address x_addr, intptr_t shift) |
| |
| uint32_t | RightShiftResultLength (Address x_addr, uint32_t x_sign, intptr_t shift) |
| |
| void | MutableBigInt_RightShiftAndCanonicalize (Address result_addr, Address x_addr, intptr_t shift, uint32_t must_round_down) |
| |
| | PROTECTED_POINTER_ACCESSORS (BytecodeArray, handler_table, TrustedByteArray, kHandlerTableOffset) PROTECTED_POINTER_ACCESSORS(BytecodeArray |
| |
| kConstantPoolOffset | RELEASE_ACQUIRE_PROTECTED_POINTER_ACCESSORS (BytecodeArray, source_position_table, TrustedByteArray, kSourcePositionTableOffset) uint8_t BytecodeArray |
| |
| | DEF_GETTER (BytecodeArray, SourcePositionTable, Tagged< TrustedByteArray >) |
| |
| | DEF_GETTER (BytecodeArray, raw_constant_pool, Tagged< Object >) |
| |
| | DEF_GETTER (BytecodeArray, raw_handler_table, Tagged< Object >) |
| |
| | DEF_ACQUIRE_GETTER (BytecodeArray, raw_source_position_table, Tagged< Object >) |
| |
| | DEF_GETTER (BytecodeArray, SizeIncludingMetadata, int) |
| |
| void | SerializeCallSiteInfo (Isolate *isolate, DirectHandle< CallSiteInfo > frame, IncrementalStringBuilder *builder) |
| |
| MaybeDirectHandle< String > | SerializeCallSiteInfo (Isolate *isolate, DirectHandle< CallSiteInfo > frame) |
| |
| template<typename T , typename U > |
| bool | Is (Tagged< U > value) |
| |
| template<typename To , typename From > |
| Tagged< To > | UncheckedCast (Tagged< From > value) |
| |
| template<typename To , typename From > |
| IndirectHandle< To > | UncheckedCast (IndirectHandle< From > value) |
| |
| template<typename To , typename From > |
| bool | TryCast (Tagged< From > value, Tagged< To > *out) |
| |
| template<typename To , typename From > |
| bool | TryCast (IndirectHandle< From > value, IndirectHandle< To > *out) |
| |
| template<typename To , typename From > |
| bool | TryCast (IndirectHandle< From > value, DirectHandle< To > *out) |
| |
| template<typename To , typename From > |
| bool | TryCast (DirectHandle< From > value, DirectHandle< To > *out) |
| |
| template<typename To , typename From > |
| bool | TryCast (MaybeIndirectHandle< From > value, MaybeIndirectHandle< To > *out) |
| |
| template<typename To , typename From > |
| bool | TryCast (MaybeIndirectHandle< From > value, MaybeDirectHandle< To > *out) |
| |
| template<typename To , typename From > |
| bool | TryCast (MaybeDirectHandle< From > value, MaybeDirectHandle< To > *out) |
| |
| template<typename T > |
| Tagged< T > | GCSafeCast (Tagged< Object > object, const Heap *heap) |
| |
| template<typename To , typename From > |
| Tagged< To > | Cast (Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG) |
| |
| template<typename To , typename From > |
| IndirectHandle< To > | Cast (IndirectHandle< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG) |
| |
| template<typename To , typename From > |
| DirectHandle< To > | Cast (DirectHandle< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG) |
| |
| template<typename To , typename From > |
| MaybeIndirectHandle< To > | Cast (MaybeIndirectHandle< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG) |
| |
| template<typename To , typename From > |
| MaybeDirectHandle< To > | Cast (MaybeDirectHandle< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG) |
| |
| template<typename To , typename From > |
| Tagged< To > | UncheckedCast (const From *value) |
| |
| template<typename To , typename From > |
| Tagged< To > | Cast (const From *value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG) |
| |
| template<typename To , typename From > |
| Tagged< To > | UncheckedCast (From value) |
| |
| template<typename To , typename From > |
| Tagged< To > | Cast (From value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG) |
| |
| template<typename T , typename U > |
| bool | Is (Tagged< MaybeWeak< U > > value) |
| |
| template<typename T , typename... U> |
| constexpr bool | Is (Tagged< Union< U... > > value) |
| |
| | DEF_RELAXED_GETTER (Cell, value, Tagged< Object >) |
| |
| constexpr bool | CodeKindHasTaggedOutgoingParams (CodeKind kind) |
| |
| | DEF_GETTER (Code, instruction_start, Address) |
| |
| const char * | CodeKindToString (CodeKind kind) |
| |
| const char * | CodeKindToMarker (CodeKind kind, bool context_specialized) |
| |
| constexpr bool | CodeKindIsInterpretedJSFunction (CodeKind kind) |
| |
| constexpr bool | CodeKindIsBaselinedJSFunction (CodeKind kind) |
| |
| constexpr bool | CodeKindIsUnoptimizedJSFunction (CodeKind kind) |
| |
| constexpr bool | CodeKindIsOptimizedJSFunction (CodeKind kind) |
| |
| constexpr bool | CodeKindIsJSFunction (CodeKind kind) |
| |
| constexpr bool | CodeKindIsBuiltinOrJSFunction (CodeKind kind) |
| |
| constexpr bool | CodeKindCanDeoptimize (CodeKind kind) |
| |
| constexpr bool | CodeKindCanOSR (CodeKind kind) |
| |
| constexpr bool | CodeKindCanTierUp (CodeKind kind) |
| |
| constexpr bool | CodeKindIsStoredInOptimizedCodeCache (CodeKind kind) |
| |
| constexpr bool | CodeKindUsesBytecodeOrInterpreterData (CodeKind kind) |
| |
| constexpr bool | CodeKindUsesDeoptimizationData (CodeKind kind) |
| |
| constexpr bool | CodeKindUsesBytecodeOffsetTable (CodeKind kind) |
| |
| constexpr bool | CodeKindMayLackSourcePositionTable (CodeKind kind) |
| |
| CodeKind | CodeKindForTopTier () |
| |
| constexpr CodeKindFlag | CodeKindToCodeKindFlag (CodeKind kind) |
| |
| | EXTERNAL_POINTER_ACCESSORS (NativeContext, microtask_queue, MicrotaskQueue *, kMicrotaskQueueOffset, kNativeContextMicrotaskQueueTag) void NativeContext |
| |
| static Maybe< bool > | UnscopableLookup (LookupIterator *it, bool is_with_context) |
| |
| static PropertyAttributes | GetAttributesForMode (VariableMode mode) |
| |
| | BIT_FIELD_ACCESSORS (DebugInfo, debugger_hints, side_effect_state, DebugInfo::SideEffectStateBits) BIT_FIELD_ACCESSORS(DebugInfo |
| |
| DebugInfo::DebugIsBlackboxedBit | BIT_FIELD_ACCESSORS (DebugInfo, debugger_hints, computed_debug_is_blackboxed, DebugInfo::ComputedDebugIsBlackboxedBit) BIT_FIELD_ACCESSORS(DebugInfo |
| |
| | TRUSTED_POINTER_ACCESSORS (DebugInfo, debug_bytecode_array, BytecodeArray, kDebugBytecodeArrayOffset, kBytecodeArrayIndirectPointerTag) TRUSTED_POINTER_ACCESSORS(DebugInfo |
| |
| | BIT_FIELD_ACCESSORS (StackFrameInfo, flags, bytecode_offset_or_source_position, StackFrameInfo::BytecodeOffsetOrSourcePositionBits) BIT_FIELD_ACCESSORS(StackFrameInfo |
| |
| | ACCESSORS_RELAXED_CHECKED2 (ErrorStackData, formatted_stack, Tagged< Object >, kCallSiteInfosOrFormattedStackOffset, HasFormattedStack(), true) bool ErrorStackData |
| |
| | DEFINE_DEOPT_ELEMENT_ACCESSORS (ProtectedLiteralArray, ProtectedDeoptimizationLiteralArray) DEFINE_DEOPT_ELEMENT_ACCESSORS(WrappedSharedFunctionInfo |
| |
| SharedFunctionInfoWrapperOrSmi | DEFINE_DEOPT_ELEMENT_ACCESSORS (InliningPositions, TrustedPodArray< InliningPosition >) Tagged< SharedFunctionInfo > DeoptimizationData |
| |
| | RELAXED_INT16_ACCESSORS (DescriptorArray, number_of_all_descriptors, kNumberOfAllDescriptorsOffset) RELAXED_INT16_ACCESSORS(DescriptorArray |
| |
| | BIT_FIELD_ACCESSORS (NameDictionary, flags, may_have_interesting_properties, NameDictionary::MayHaveInterestingPropertiesBit) Tagged< PropertyCell > GlobalDictionary |
| |
| const uint8_t * | TypedArrayAndRabGsabTypedArrayElementsKindShifts () |
| |
| const uint8_t * | TypedArrayAndRabGsabTypedArrayElementsKindSizes () |
| |
| int | GetDefaultHeaderSizeForElementsKind (ElementsKind elements_kind) |
| |
| const char * | ElementsKindToString (ElementsKind kind) |
| |
| ElementsKind | GetFastElementsKindFromSequenceIndex (int sequence_number) |
| |
| int | GetSequenceIndexFromFastElementsKind (ElementsKind elements_kind) |
| |
| ElementsKind | GetNextTransitionElementsKind (ElementsKind kind) |
| |
| static bool | IsFastTransitionTarget (ElementsKind elements_kind) |
| |
| bool | IsMoreGeneralElementsKindTransition (ElementsKind from_kind, ElementsKind to_kind) |
| |
| bool | UnionElementsKindUptoSize (ElementsKind *a_out, ElementsKind b) |
| |
| std::ostream & | operator<< (std::ostream &os, ElementsKind kind) |
| |
| constexpr int | ElementsKindToShiftSize (ElementsKind elements_kind) |
| |
| constexpr int | ElementsKindToByteSize (ElementsKind elements_kind) |
| |
| ElementsKind | GetInitialFastElementsKind () |
| |
| bool | IsDictionaryElementsKind (ElementsKind kind) |
| |
| bool | IsFastArgumentsElementsKind (ElementsKind kind) |
| |
| bool | IsSlowArgumentsElementsKind (ElementsKind kind) |
| |
| bool | IsSloppyArgumentsElementsKind (ElementsKind kind) |
| |
| bool | IsStringWrapperElementsKind (ElementsKind kind) |
| |
| bool | IsTypedArrayElementsKind (ElementsKind kind) |
| |
| bool | IsRabGsabTypedArrayElementsKind (ElementsKind kind) |
| |
| bool | IsTypedArrayOrRabGsabTypedArrayElementsKind (ElementsKind kind) |
| |
| bool | IsBigIntTypedArrayElementsKind (ElementsKind kind) |
| |
| constexpr bool | IsFloat16TypedArrayElementsKind (ElementsKind kind) |
| |
| bool | IsFloatTypedArrayElementsKind (ElementsKind kind) |
| |
| bool | IsSignedIntTypedArrayElementsKind (ElementsKind kind) |
| |
| bool | IsUnsignedIntTypedArrayElementsKind (ElementsKind kind) |
| |
| bool | IsWasmArrayElementsKind (ElementsKind kind) |
| |
| bool | IsSharedArrayElementsKind (ElementsKind kind) |
| |
| bool | IsTerminalElementsKind (ElementsKind kind) |
| |
| bool | IsFastElementsKind (ElementsKind kind) |
| |
| bool | IsTransitionElementsKind (ElementsKind kind) |
| |
| constexpr bool | IsDoubleElementsKind (ElementsKind kind) |
| |
| bool | IsAnyNonextensibleElementsKindUnchecked (ElementsKind kind) |
| |
| bool | IsAnyNonextensibleElementsKind (ElementsKind kind) |
| |
| bool | IsNonextensibleElementsKind (ElementsKind kind) |
| |
| bool | IsSealedElementsKind (ElementsKind kind) |
| |
| bool | IsFrozenElementsKind (ElementsKind kind) |
| |
| bool | IsFastOrNonextensibleOrSealedElementsKind (ElementsKind kind) |
| |
| bool | IsSmiOrObjectElementsKind (ElementsKind kind) |
| |
| constexpr bool | IsSmiElementsKind (ElementsKind kind) |
| |
| bool | IsFastNumberElementsKind (ElementsKind kind) |
| |
| constexpr bool | IsObjectElementsKind (ElementsKind kind) |
| |
| bool | IsAnyHoleyNonextensibleElementsKind (ElementsKind kind) |
| |
| constexpr bool | IsHoleyElementsKind (ElementsKind kind) |
| |
| bool | IsHoleyElementsKindForRead (ElementsKind kind) |
| |
| bool | IsHoleyOrDictionaryElementsKind (ElementsKind kind) |
| |
| bool | IsFastPackedElementsKind (ElementsKind kind) |
| |
| ElementsKind | GetPackedElementsKind (ElementsKind holey_kind) |
| |
| ElementsKind | GetHoleyElementsKind (ElementsKind packed_kind) |
| |
| ElementsKind | GetCorrespondingRabGsabElementsKind (ElementsKind typed_array_kind) |
| |
| ElementsKind | GetCorrespondingNonRabGsabElementsKind (ElementsKind typed_array_kind) |
| |
| bool | UnionElementsKindUptoPackedness (ElementsKind *a_out, ElementsKind b) |
| |
| ElementsKind | FastSmiToObjectElementsKind (ElementsKind from_kind) |
| |
| bool | IsSimpleMapChangeTransition (ElementsKind from_kind, ElementsKind to_kind) |
| |
| ElementsKind | GetMoreGeneralElementsKind (ElementsKind from_kind, ElementsKind to_kind) |
| |
| bool | IsTransitionableFastElementsKind (ElementsKind from_kind) |
| |
| bool | ElementsKindEqual (ElementsKind a, ElementsKind b) |
| |
| MaybeDirectHandle< Object > | ArrayConstructInitializeElements (DirectHandle< JSArray > array, JavaScriptArguments *args) |
| |
| void | CopyFastNumberJSArrayElementsToTypedArray (Address raw_context, Address raw_source, Address raw_destination, uintptr_t length, uintptr_t offset) |
| |
| void | CopyTypedArrayElementsToTypedArray (Address raw_source, Address raw_destination, uintptr_t length, uintptr_t offset) |
| |
| void | CopyTypedArrayElementsSlice (Address raw_source, Address raw_destination, uintptr_t start, uintptr_t end) |
| |
| template<typename Mapping > |
| constexpr bool | IsIdentityMapping (const Mapping &mapping, size_t index) |
| |
| | INT32_ACCESSORS (FeedbackMetadata, create_closure_slot_count, kCreateClosureSlotCountOffset) int32_t FeedbackMetadata |
| |
| | DEF_GETTER (FeedbackVector, metadata, Tagged< FeedbackMetadata >) |
| |
| | DEF_ACQUIRE_GETTER (FeedbackVector, metadata, Tagged< FeedbackMetadata >) |
| |
| | RELAXED_INT32_ACCESSORS (FeedbackVector, invocation_count, kInvocationCountOffset) void FeedbackVector |
| |
| | RELAXED_UINT8_ACCESSORS (FeedbackVector, invocation_count_before_stable, kInvocationCountBeforeStableOffset) int FeedbackVector |
| |
| BinaryOperationHint | BinaryOperationHintFromFeedback (int type_feedback) |
| |
| template<CompareOperationFeedback::Type Feedback> |
| bool | Is (int type_feedback) |
| |
| CompareOperationHint | CompareOperationHintFromFeedback (int type_feedback) |
| |
| ForInHint | ForInHintFromFeedback (ForInFeedback type_feedback) |
| |
| static bool | IsPropertyNameFeedback (Tagged< MaybeObject > feedback) |
| |
| std::ostream & | operator<< (std::ostream &os, FeedbackSlotKind kind) |
| |
| bool | IsCallICKind (FeedbackSlotKind kind) |
| |
| bool | IsLoadICKind (FeedbackSlotKind kind) |
| |
| bool | IsLoadGlobalICKind (FeedbackSlotKind kind) |
| |
| bool | IsKeyedLoadICKind (FeedbackSlotKind kind) |
| |
| bool | IsKeyedHasICKind (FeedbackSlotKind kind) |
| |
| bool | IsStoreGlobalICKind (FeedbackSlotKind kind) |
| |
| bool | IsSetNamedICKind (FeedbackSlotKind kind) |
| |
| bool | IsDefineNamedOwnICKind (FeedbackSlotKind kind) |
| |
| bool | IsDefineKeyedOwnICKind (FeedbackSlotKind kind) |
| |
| bool | IsDefineKeyedOwnPropertyInLiteralKind (FeedbackSlotKind kind) |
| |
| bool | IsKeyedStoreICKind (FeedbackSlotKind kind) |
| |
| bool | IsStoreInArrayLiteralICKind (FeedbackSlotKind kind) |
| |
| bool | IsGlobalICKind (FeedbackSlotKind kind) |
| |
| bool | IsCloneObjectKind (FeedbackSlotKind kind) |
| |
| TypeofMode | GetTypeofModeFromSlotKind (FeedbackSlotKind kind) |
| |
| LanguageMode | GetLanguageModeFromSlotKind (FeedbackSlotKind kind) |
| |
| bool | IsClass (Tagged< FieldType > obj) |
| |
| bool | IsNone (Tagged< FieldType > obj) |
| |
| bool | IsAny (Tagged< FieldType > obj) |
| |
| bool | IsArrowFunction (FunctionKind kind) |
| |
| bool | IsModule (FunctionKind kind) |
| |
| bool | IsModuleWithTopLevelAwait (FunctionKind kind) |
| |
| bool | IsAsyncGeneratorFunction (FunctionKind kind) |
| |
| bool | IsGeneratorFunction (FunctionKind kind) |
| |
| bool | IsAsyncFunction (FunctionKind kind) |
| |
| bool | IsResumableFunction (FunctionKind kind) |
| |
| bool | IsConciseMethod (FunctionKind kind) |
| |
| bool | IsStrictFunctionWithoutPrototype (FunctionKind kind) |
| |
| bool | IsGetterFunction (FunctionKind kind) |
| |
| bool | IsSetterFunction (FunctionKind kind) |
| |
| bool | IsAccessorFunction (FunctionKind kind) |
| |
| bool | IsDefaultConstructor (FunctionKind kind) |
| |
| bool | IsBaseConstructor (FunctionKind kind) |
| |
| bool | IsDerivedConstructor (FunctionKind kind) |
| |
| bool | IsClassConstructor (FunctionKind kind) |
| |
| bool | IsClassMembersInitializerFunction (FunctionKind kind) |
| |
| bool | IsConstructable (FunctionKind kind) |
| |
| bool | IsStatic (FunctionKind kind) |
| |
| bool | BindsSuper (FunctionKind kind) |
| |
| const char * | FunctionKind2String (FunctionKind kind) |
| |
| std::ostream & | operator<< (std::ostream &os, FunctionKind kind) |
| |
| const char * | FunctionSyntaxKind2String (FunctionSyntaxKind kind) |
| |
| std::ostream & | operator<< (std::ostream &os, FunctionSyntaxKind kind) |
| |
| bool | operator== (const HeapObjectLayout *obj, StrongTaggedBase ptr) |
| |
| bool | operator== (StrongTaggedBase ptr, const HeapObjectLayout *obj) |
| |
| bool | operator!= (const HeapObjectLayout *obj, StrongTaggedBase ptr) |
| |
| bool | operator!= (StrongTaggedBase ptr, const HeapObjectLayout *obj) |
| |
| V8_INLINE bool | OutsideSandboxOrInReadonlySpace (Tagged< HeapObject > obj) |
| |
| V8_INLINE constexpr bool | FastInReadOnlySpaceOrSmallSmi (Tagged_t obj) |
| |
| V8_INLINE constexpr bool | FastInReadOnlySpaceOrSmallSmi (Tagged< MaybeObject > obj) |
| |
| static bool | IsShortcutCandidate (int type) |
| |
| V8_EXPORT_PRIVATE std::ostream & | operator<< (std::ostream &os, InstanceType instance_type) |
| |
| V8_EXPORT_PRIVATE std::string | ToString (InstanceType instance_type) |
| |
| V8_EXPORT_PRIVATE std::vector< NumberFormatSpan > | FlattenRegionsToParts (std::vector< NumberFormatSpan > *regions) |
| |
| | RELEASE_ACQUIRE_ACCESSORS (JSTypedArray, base_pointer, Tagged< Object >, kBasePointerOffset) size_t JSArrayBuffer |
| |
| | DEF_GETTER (JSArrayBuffer, backing_store, void *) |
| |
| | BIT_FIELD_ACCESSORS (JSArrayBuffer, bit_field, is_external, JSArrayBuffer::IsExternalBit) BIT_FIELD_ACCESSORS(JSArrayBuffer |
| |
| JSArrayBuffer::IsDetachableBit | BIT_FIELD_ACCESSORS (JSArrayBuffer, bit_field, was_detached, JSArrayBuffer::WasDetachedBit) BIT_FIELD_ACCESSORS(JSArrayBuffer |
| |
| JSArrayBuffer::IsDetachableBit JSArrayBuffer::IsSharedBit | BIT_FIELD_ACCESSORS (JSArrayBuffer, bit_field, is_resizable_by_js, JSArrayBuffer::IsResizableByJsBit) bool JSArrayBuffer |
| |
| | BIT_FIELD_ACCESSORS (JSArrayBufferView, bit_field, is_length_tracking, JSArrayBufferView::IsLengthTrackingBit) BIT_FIELD_ACCESSORS(JSArrayBufferView |
| |
| | DEF_GETTER (JSTypedArray, external_pointer, Address) |
| |
| | DEF_GETTER (JSDataViewOrRabGsabDataView, data_pointer, void *) |
| |
| | DEF_GETTER (JSArray, length, Tagged< Number >) |
| |
| | ACCESSORS (JSV8BreakIterator, break_iterator, Tagged< Managed< icu::BreakIterator > >, kBreakIteratorOffset) ACCESSORS(JSV8BreakIterator |
| |
| | ACCESSORS (JSDateTimeFormat, icu_locale, Tagged< Managed< icu::Locale > >, kIcuLocaleOffset) ACCESSORS(JSDateTimeFormat |
| |
| kIcuSimpleDateFormatOffset | ACCESSORS (JSDateTimeFormat, icu_date_interval_format, Tagged< Managed< icu::DateIntervalFormat > >, kIcuDateIntervalFormatOffset) inline void JSDateTimeFormat |
| |
| std::optional< std::string > | GetOffsetTimeZone (Isolate *isolate, DirectHandle< String > time_zone) |
| |
| | ACCESSORS (JSDisplayNames, internal, Tagged< Managed< DisplayNamesInternal > >, kInternalOffset) inline void JSDisplayNames |
| |
| | BIT_FIELD_ACCESSORS (JSDisposableStackBase, status, state, JSDisposableStackBase::StateBit) BIT_FIELD_ACCESSORS(JSDisposableStackBase |
| |
| JSDisposableStackBase::NeedsAwaitBit | BIT_FIELD_ACCESSORS (JSDisposableStackBase, status, has_awaited, JSDisposableStackBase::HasAwaitedBit) BIT_FIELD_ACCESSORS(JSDisposableStackBase |
| |
| JSDisposableStackBase::NeedsAwaitBit JSDisposableStackBase::SuppressedErrorCreatedBit | BIT_FIELD_ACCESSORS (JSDisposableStackBase, status, length, JSDisposableStackBase::LengthBits) inline void JSDisposableStackBase |
| |
| | ACCESSORS (JSDurationFormat, icu_locale, Tagged< Managed< icu::Locale > >, kIcuLocaleOffset) IMPL_INLINE_SETTER_GETTER(Separator |
| |
| MaybeDirectHandle< String > | FormattedToString (Isolate *isolate, const icu::FormattedValue &formatted, const std::vector< std::vector< Part > > *parts, JSDurationFormat::Separator) |
| |
| MaybeDirectHandle< JSArray > | FormattedListToJSArray (Isolate *isolate, const icu::FormattedValue &formatted, const std::vector< std::vector< Part > > *parts, JSDurationFormat::Separator separator) |
| |
| | ACCESSORS (JSFunction, raw_feedback_cell, Tagged< FeedbackCell >, kFeedbackCellOffset) RELEASE_ACQUIRE_ACCESSORS(JSFunction |
| |
| kFeedbackCellOffset | DEF_GETTER (JSFunction, feedback_vector, Tagged< FeedbackVector >) |
| |
| | DEF_GETTER (JSFunction, shared, Tagged< SharedFunctionInfo >) |
| |
| | DEF_RELAXED_GETTER (JSFunction, shared, Tagged< SharedFunctionInfo >) |
| |
| | DEF_GETTER (JSFunction, has_feedback_vector, bool) |
| |
| | DEF_RELAXED_GETTER (JSFunction, context, Tagged< Context >) |
| |
| | RELEASE_ACQUIRE_ACCESSORS_CHECKED (JSFunction, prototype_or_initial_map,(Tagged< UnionOf< JSPrototype, Map, Hole > >), kPrototypeOrInitialMapOffset, map() ->has_prototype_slot()) DEF_GETTER(JSFunction |
| |
| | DEF_GETTER (JSFunction, initial_map, Tagged< Map >) |
| |
| | DEF_GETTER (JSFunction, has_initial_map, bool) |
| |
| | DEF_GETTER (JSFunction, has_instance_prototype, bool) |
| |
| | DEF_GETTER (JSFunction, has_prototype, bool) |
| |
| | DEF_GETTER (JSFunction, has_prototype_property, bool) |
| |
| | DEF_GETTER (JSFunction, PrototypeRequiresRuntimeLookup, bool) |
| |
| | DEF_GETTER (JSFunction, instance_prototype, Tagged< JSPrototype >) |
| |
| | DEF_GETTER (JSFunction, prototype, Tagged< Object >) |
| |
| | ACCESSORS (JSListFormat, icu_formatter, Tagged< Managed< icu::ListFormatter > >, kIcuFormatterOffset) inline void JSListFormat |
| |
| template<typename T > |
| MaybeDirectHandle< JSArray > | GetKeywordValuesFromLocale (Isolate *isolate, const char *key, const char *unicode_key, const icu::Locale &locale, bool(*removes)(const char *), bool commonly_used, bool sort) |
| |
| | DEF_GETTER (JSObject, elements, Tagged< FixedArrayBase >) |
| |
| | ACCESSORS (JSReceiver, raw_properties_or_hash, Tagged< Object >, kPropertiesOrHashOffset) RELAXED_ACCESSORS(JSReceiver |
| |
| | DEF_GETTER (JSObject, GetIndexedInterceptor, Tagged< InterceptorInfo >) |
| |
| | DEF_GETTER (JSObject, GetNamedInterceptor, Tagged< InterceptorInfo >) |
| |
| | EXTERNAL_POINTER_ACCESSORS (JSExternalObject, value, void *, kValueOffset, kExternalObjectValueTag) JSApiWrapper |
| |
| | DEF_GETTER (JSObject, GetElementsKind, ElementsKind) |
| |
| | DEF_GETTER (JSObject, GetElementsAccessor, ElementsAccessor *) |
| |
| | DEF_GETTER (JSObject, HasObjectElements, bool) |
| |
| | DEF_GETTER (JSObject, HasSmiElements, bool) |
| |
| | DEF_GETTER (JSObject, HasSmiOrObjectElements, bool) |
| |
| | DEF_GETTER (JSObject, HasDoubleElements, bool) |
| |
| | DEF_GETTER (JSObject, HasHoleyElements, bool) |
| |
| | DEF_GETTER (JSObject, HasFastElements, bool) |
| |
| | DEF_GETTER (JSObject, HasFastPackedElements, bool) |
| |
| | DEF_GETTER (JSObject, HasDictionaryElements, bool) |
| |
| | DEF_GETTER (JSObject, HasPackedElements, bool) |
| |
| | DEF_GETTER (JSObject, HasAnyNonextensibleElements, bool) |
| |
| | DEF_GETTER (JSObject, HasSealedElements, bool) |
| |
| | DEF_GETTER (JSObject, HasSharedArrayElements, bool) |
| |
| | DEF_GETTER (JSObject, HasNonextensibleElements, bool) |
| |
| | DEF_GETTER (JSObject, HasFastArgumentsElements, bool) |
| |
| | DEF_GETTER (JSObject, HasSlowArgumentsElements, bool) |
| |
| | DEF_GETTER (JSObject, HasSloppyArgumentsElements, bool) |
| |
| | DEF_GETTER (JSObject, HasStringWrapperElements, bool) |
| |
| | DEF_GETTER (JSObject, HasFastStringWrapperElements, bool) |
| |
| | DEF_GETTER (JSObject, HasSlowStringWrapperElements, bool) |
| |
| | DEF_GETTER (JSObject, HasTypedArrayOrRabGsabTypedArrayElements, bool) |
| |
| | DEF_GETTER (JSObject, HasNamedInterceptor, bool) |
| |
| | DEF_GETTER (JSObject, HasIndexedInterceptor, bool) |
| |
| | RELEASE_ACQUIRE_ACCESSORS_CHECKED2 (JSGlobalObject, global_dictionary, Tagged< GlobalDictionary >, kPropertiesOrHashOffset, !HasFastProperties(cage_base), true) DEF_GETTER(JSObject |
| |
| return | Cast< NumberDictionary > (elements(cage_base)) |
| |
| | DEF_GETTER (JSReceiver, HasFastProperties, bool) |
| |
| | DEF_GETTER (JSReceiver, property_dictionary, Tagged< NameDictionary >) |
| |
| | DEF_GETTER (JSReceiver, property_dictionary_swiss, Tagged< SwissNameDictionary >) |
| |
| | DEF_GETTER (JSReceiver, property_array, Tagged< PropertyArray >) |
| |
| static bool | ShouldConvertToSlowElements (uint32_t used_elements, uint32_t new_capacity) |
| |
| static bool | ShouldConvertToSlowElements (Tagged< JSObject > object, uint32_t capacity, uint32_t index, uint32_t *new_capacity) |
| |
| V8_WARN_UNUSED_RESULT Maybe< bool > | FastGetOwnValuesOrEntries (Isolate *isolate, DirectHandle< JSReceiver > receiver, bool get_entries, Handle< FixedArray > *result) |
| |
| MaybeDirectHandle< FixedArray > | GetOwnValuesOrEntries (Isolate *isolate, DirectHandle< JSReceiver > object, PropertyFilter filter, bool try_fast_path, bool get_entries) |
| |
| static const char * | NonAPIInstanceTypeToString (InstanceType instance_type) |
| |
| DirectHandle< NumberDictionary > | CreateElementDictionary (Isolate *isolate, DirectHandle< JSObject > object) |
| |
| static bool | PrototypeBenefitsFromNormalization (Tagged< JSObject > object) |
| |
| static bool | ShouldConvertToFastElements (Tagged< JSObject > object, Tagged< NumberDictionary > dictionary, uint32_t index, uint32_t *new_capacity) |
| |
| static ElementsKind | BestFittingFastElementsKind (Tagged< JSObject > object) |
| |
| template bool | JSObject::UpdateAllocationSite< AllocationSiteUpdateMode::kCheckOnly > (DirectHandle< JSObject > object, ElementsKind to_kind) |
| |
| template bool | JSObject::UpdateAllocationSite< AllocationSiteUpdateMode::kUpdate > (DirectHandle< JSObject > object, ElementsKind to_kind) |
| |
| template<typename BackingStore > |
| static int | HoleyElementsUsage (Tagged< JSObject > object, Tagged< BackingStore > store) |
| |
| | ACCESSORS (JSPluralRules, icu_plural_rules, Tagged< Managed< icu::PluralRules > >, kIcuPluralRulesOffset) ACCESSORS(JSPluralRules |
| |
| | TRUSTED_POINTER_ACCESSORS (JSRegExp, data, RegExpData, kDataOffset, kRegExpDataIndirectPointerTag) const char *JSRegExp |
| |
| | TRUSTED_POINTER_ACCESSORS (RegExpDataWrapper, data, RegExpData, kDataOffset, kRegExpDataIndirectPointerTag) bool IrRegExpData |
| |
| | PROTECTED_POINTER_ACCESSORS (IrRegExpData, latin1_bytecode, TrustedByteArray, kLatin1BytecodeOffset) PROTECTED_POINTER_ACCESSORS(IrRegExpData |
| |
| | ACCESSORS (JSRelativeTimeFormat, icu_formatter, Tagged< Managed< icu::RelativeDateTimeFormatter > >, kIcuFormatterOffset) inline void JSRelativeTimeFormat |
| |
| | ACCESSORS (JSSegmentIterator, icu_break_iterator, Tagged< Managed< icu::BreakIterator > >, kIcuBreakIteratorOffset) ACCESSORS(JSSegmentIterator |
| |
| | ACCESSORS (JSSegmenter, icu_break_iterator, Tagged< Managed< icu::BreakIterator > >, kIcuBreakIteratorOffset) inline void JSSegmenter |
| |
| | ACCESSORS (JSSegments, icu_break_iterator, Tagged< Managed< icu::BreakIterator > >, kIcuBreakIteratorOffset) ACCESSORS(JSSegments |
| |
| | TEMPORAL_TIME_INLINE_GETTER_SETTER (JSTemporalPlainDateTime, hour_minute_second, second_parts) TEMPORAL_TIME_INLINE_GETTER_SETTER(JSTemporalPlainTime |
| |
| second_parts | BIT_FIELD_ACCESSORS (JSTemporalCalendar, flags, calendar_index, JSTemporalCalendar::CalendarIndexBits) TEMPORAL_INLINE_SIGNED_GETTER_SETTER(JSTemporalTimeZone |
| |
| second_parts OffsetMillisecondsOrTimeZoneIndex | TEMPORAL_INLINE_SIGNED_GETTER_SETTER (JSTemporalTimeZone, details, offset_sub_milliseconds, -1000000, 1000000, OffsetSubMilliseconds) BIT_FIELD_ACCESSORS(JSTemporalTimeZone |
| |
| template<DirectHandle< Object >(*)(Isolate *, DirectHandle< BigInt >, int32_t) iana_func> |
| MaybeDirectHandle< Object > | GetTransition (Isolate *isolate, DirectHandle< JSTemporalTimeZone > time_zone, DirectHandle< Object > starting_point_obj, const char *method_name) |
| |
| MaybeDirectHandle< JSArray > | GetIANATimeZoneEpochValueAsArrayOfInstantForUTC (Isolate *isolate, const DateTimeRecord &date_time) |
| |
| MaybeDirectHandle< JSArray > | GetIANATimeZoneEpochValueAsArrayOfInstant (Isolate *isolate, int32_t time_zone_index, const DateTimeRecord &date_time) |
| |
| template<typename T , typename R , MaybeDirectHandle< R >(*)(Isolate *, DirectHandle< JSReceiver >, DirectHandle< JSReceiver >, DirectHandle< Object >) from_fields> |
| MaybeDirectHandle< R > | ToPlain (Isolate *isolate, DirectHandle< T > t, DirectHandle< String > f1, DirectHandle< String > f2) |
| |
| template<typename T , MaybeDirectHandle< T >(*)(Isolate *, DirectHandle< JSReceiver >, DirectHandle< JSReceiver >, DirectHandle< Object >) from_fields_func> |
| MaybeDirectHandle< T > | ZonedDateTimeToPlainYearMonthOrMonthDay (Isolate *isolate, DirectHandle< JSTemporalZonedDateTime > zoned_date_time, DirectHandle< String > field_name_1, DirectHandle< String > field_name_2, const char *method_name) |
| |
| | BIT_FIELD_ACCESSORS (JSFinalizationRegistry, flags, scheduled_for_cleanup, JSFinalizationRegistry::ScheduledForCleanupBit) void JSFinalizationRegistry |
| |
| MaybeDirectHandle< FixedArray > | FilterProxyKeys (KeyAccumulator *accumulator, DirectHandle< JSProxy > owner, DirectHandle< FixedArray > keys, PropertyFilter filter, bool skip_indices) |
| |
| | ACCESSORS (ClassBoilerplate, static_properties_template, Tagged< Object >, kStaticPropertiesTemplateOffset) ACCESSORS(ClassBoilerplate |
| |
| kStaticElementsTemplateOffset | ACCESSORS (ClassBoilerplate, static_computed_properties, Tagged< FixedArray >, kStaticComputedPropertiesOffset) ACCESSORS(ClassBoilerplate |
| |
| kStaticElementsTemplateOffset kInstancePropertiesTemplateOffset | ACCESSORS (ClassBoilerplate, instance_elements_template, Tagged< Object >, kInstanceElementsTemplateOffset) ACCESSORS(ClassBoilerplate |
| |
| void | ManagedObjectFinalizer (const v8::WeakCallbackInfo< void > &data) |
| |
| | ACCESSORS (Map, instance_descriptors, Tagged< DescriptorArray >, kInstanceDescriptorsOffset) RELAXED_ACCESSORS(Map |
| |
| kInstanceDescriptorsOffset | RELEASE_ACQUIRE_ACCESSORS (Map, instance_descriptors, Tagged< DescriptorArray >, kInstanceDescriptorsOffset) ACCESSORS(Map |
| |
| kInstanceDescriptorsOffset kTransitionsOrPrototypeInfoOffset | RELEASE_ACQUIRE_ACCESSORS (Map, raw_transitions,(Tagged< UnionOf< Smi, MaybeWeak< Map >, TransitionArray > >), kTransitionsOrPrototypeInfoOffset) ACCESSORS_CHECKED2(Map |
| |
| kInstanceDescriptorsOffset kTransitionsOrPrototypeInfoOffset | IsNull (value)||IsJSProxy(value)||IsWasmObject(value)||(IsJSObject(value) &&(HeapLayout |
| |
| | RELEASE_ACQUIRE_ACCESSORS (Map, prototype_info,(Tagged< UnionOf< Smi, PrototypeInfo > >), kTransitionsOrPrototypeInfoOffset) void Map |
| |
| | BIT_FIELD_ACCESSORS (Map, relaxed_bit_field, has_non_instance_prototype, Map::Bits1::HasNonInstancePrototypeBit) BIT_FIELD_ACCESSORS(Map |
| |
| Map::Bits1::HasPrototypeSlotBit | BIT_FIELD_ACCESSORS2 (Map, relaxed_bit_field, bit_field, is_callable, Map::Bits1::IsCallableBit) BIT_FIELD_ACCESSORS2(Map |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit | BIT_FIELD_ACCESSORS2 (Map, relaxed_bit_field, bit_field, has_indexed_interceptor, Map::Bits1::HasIndexedInterceptorBit) BIT_FIELD_ACCESSORS2(Map |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit | BIT_FIELD_ACCESSORS2 (Map, relaxed_bit_field, bit_field, is_access_check_needed, Map::Bits1::IsAccessCheckNeededBit) BIT_FIELD_ACCESSORS2(Map |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit | BIT_FIELD_ACCESSORS (Map, bit_field2, new_target_is_base, Map::Bits2::NewTargetIsBaseBit) BIT_FIELD_ACCESSORS(Map |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit Map::Bits2::IsImmutablePrototypeBit | BIT_FIELD_ACCESSORS (Map, relaxed_bit_field3, owns_descriptors, Map::Bits3::OwnsDescriptorsBit) BIT_FIELD_ACCESSORS(Map |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit Map::Bits2::IsImmutablePrototypeBit Map::Bits3::IsDeprecatedBit | BIT_FIELD_ACCESSORS (Map, relaxed_bit_field3, is_in_retained_map_list, Map::Bits3::IsInRetainedMapListBit) BIT_FIELD_ACCESSORS(Map |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit Map::Bits2::IsImmutablePrototypeBit Map::Bits3::IsDeprecatedBit Map::Bits3::IsPrototypeMapBit | BIT_FIELD_ACCESSORS (Map, relaxed_bit_field3, is_migration_target, Map::Bits3::IsMigrationTargetBit) BIT_FIELD_ACCESSORS2(Map |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit Map::Bits2::IsImmutablePrototypeBit Map::Bits3::IsDeprecatedBit Map::Bits3::IsPrototypeMapBit Map::Bits3::IsExtensibleBit | BIT_FIELD_ACCESSORS (Map, bit_field3, may_have_interesting_properties, Map::Bits3::MayHaveInterestingPropertiesBit) BIT_FIELD_ACCESSORS(Map |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit Map::Bits2::IsImmutablePrototypeBit Map::Bits3::IsDeprecatedBit Map::Bits3::IsPrototypeMapBit Map::Bits3::IsExtensibleBit Map::Bits3::ConstructionCounterBits | DEF_GETTER (Map, GetNamedInterceptor, Tagged< InterceptorInfo >) |
| |
| | DEF_GETTER (Map, GetIndexedInterceptor, Tagged< InterceptorInfo >) |
| |
| bool | IsBooleanMap (Tagged< Map > map) |
| |
| bool | IsNullOrUndefinedMap (Tagged< Map > map) |
| |
| bool | IsPrimitiveMap (Tagged< Map > map) |
| |
| | DEF_GETTER (Map, GetBackPointer, Tagged< HeapObject >) |
| |
| | RELAXED_ACCESSORS (Map, prototype_validity_cell,(Tagged< UnionOf< Smi, Cell > >), kPrototypeValidityCellOffset) ACCESSORS_CHECKED2(Map |
| |
| !IsContextMap | IsNull (value)||!IsContextMap(*this)) RELAXED_ACCESSORS_CHECKED2(Map |
| |
| !IsContextMap !IsContextMap IsContextMap this IsMapMap this | ACCESSORS_CHECKED (Map, native_context_or_null, Tagged< Object >, kConstructorOrBackPointerOrNativeContextOffset,(IsNull(value)||IsNativeContext(value)) &&(IsContextMap(*this)||IsMapMap(*this))) DEF_GETTER(Map |
| |
| | DCHECK (IsNull(value)||IsNativeContext(value)||value==Smi::uninitialized_deserialization_value()) |
| |
| | DCHECK (IsContextMap(*this)||IsMapMap(*this)) |
| |
| | DEF_GETTER (Map, GetConstructorRaw, Tagged< Object >) |
| |
| | DEF_GETTER (Map, GetNonInstancePrototype, Tagged< Object >) |
| |
| | DEF_GETTER (Map, GetConstructor, Tagged< Object >) |
| |
| | DEF_GETTER (Map, GetFunctionTemplateInfo, Tagged< FunctionTemplateInfo >) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsNormalizedMapCache) |
| |
| static bool | ContainsMap (MapHandlesSpan maps, Tagged< Map > map) |
| |
| static bool | HasElementsKind (MapHandlesSpan maps, ElementsKind elements_kind) |
| |
| static Tagged< Map > | FindClosestElementsTransition (Isolate *isolate, Tagged< Map > map, ElementsKind to_kind, ConcurrencyMode cmode) |
| |
| static Handle< Map > | AddMissingElementsTransitions (Isolate *isolate, Handle< Map > map, ElementsKind to_kind) |
| |
| bool | IsSpecialReceiverMap (Tagged< Map > map) |
| |
| bool | IsCustomElementsReceiverMap (Tagged< Map > map) |
| |
| Tagged< ClearedWeakValue > | ClearedValue (PtrComprCageBase cage_base) |
| |
| Tagged< ClearedWeakValue > | ClearedTrustedValue () |
| |
| template<typename THeapObjectSlot > |
| void | UpdateHeapObjectReferenceSlot (THeapObjectSlot slot, Tagged< HeapObject > value) |
| |
| | BOOL_ACCESSORS (SourceTextModule, flags, has_toplevel_await, HasToplevelAwaitBit::kShift) BIT_FIELD_ACCESSORS(SourceTextModule |
| |
| SourceTextModule::AsyncEvaluationOrdinalBits | ACCESSORS (SourceTextModule, async_parent_modules, Tagged< ArrayList >, kAsyncParentModulesOffset) inline void ModuleRequest |
| |
| | BIT_FIELD_ACCESSORS (Symbol, flags, is_well_known_symbol, Symbol::IsWellKnownSymbolBit) BIT_FIELD_ACCESSORS(Symbol |
| |
| Symbol::IsInPublicSymbolTableBit | BIT_FIELD_ACCESSORS (Symbol, flags, is_interesting_symbol, Symbol::IsInterestingSymbolBit) bool Symbol |
| |
| | DEF_HEAP_OBJECT_PREDICATE (Name, IsUniqueName) |
| |
| bool | IsUniqueName (Tagged< Name > obj) |
| |
| bool | IsUniqueName (Tagged< Name > obj, PtrComprCageBase cage_base) |
| |
| Address | CheckObjectType (Address raw_value, Address raw_type, Address raw_location) |
| |
| bool | IsTaggedIndex (Tagged< Object > obj) |
| |
| bool | IsJSObjectThatCanBeTrackedAsPrototype (Tagged< Object > obj) |
| |
| bool | IsAnyHole (Tagged< Object > obj, PtrComprCageBase cage_base) |
| |
| bool | IsAnyHole (Tagged< Object > obj) |
| |
| bool | IsNullOrUndefined (Tagged< Object > obj, Isolate *isolate) |
| |
| bool | IsNullOrUndefined (Tagged< Object > obj, LocalIsolate *local_isolate) |
| |
| bool | IsNullOrUndefined (Tagged< Object > obj, ReadOnlyRoots roots) |
| |
| bool | IsNullOrUndefined (Tagged< Object > obj) |
| |
| bool | IsNullOrUndefined (Tagged< HeapObject > obj) |
| |
| bool | IsZero (Tagged< Object > obj) |
| |
| bool | IsPublicSymbol (Tagged< Object > obj) |
| |
| bool | IsPrivateSymbol (Tagged< Object > obj) |
| |
| bool | IsNoSharedNameSentinel (Tagged< Object > obj) |
| |
| bool | IsJSObjectThatCanBeTrackedAsPrototype (Tagged< HeapObject > obj) |
| |
| bool | IsJSApiWrapperObject (Tagged< Map > map) |
| |
| bool | IsJSApiWrapperObject (Tagged< HeapObject > js_obj) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsUniqueName) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsCallable) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsCallableJSProxy) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsCallableApiObject) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsNonNullForeign) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsConstructor) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsSourceTextModuleInfo) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsConsString) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsThinString) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsSlicedString) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsSeqString) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsSeqOneByteString) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsSeqTwoByteString) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsExternalOneByteString) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsExternalTwoByteString) |
| |
| bool | IsNumber (Tagged< Object > obj) |
| |
| bool | IsNumber (Tagged< Object > obj, PtrComprCageBase cage_base) |
| |
| bool | IsNumeric (Tagged< Object > obj) |
| |
| bool | IsNumeric (Tagged< Object > obj, PtrComprCageBase cage_base) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsTemplateLiteralObject) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsDeoptimizationData) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsHandlerTable) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsDependentCode) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsOSROptimizedCodeCache) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsStringWrapper) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsBooleanWrapper) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsScriptWrapper) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsNumberWrapper) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsBigIntWrapper) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsSymbolWrapper) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsStringSet) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsObjectHashSet) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsCompilationCacheTable) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsMapCache) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsObjectHashTable) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsObjectTwoHashTable) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsHashTableBase) |
| |
| bool | IsPrimitive (Tagged< Object > obj) |
| |
| bool | IsPrimitive (Tagged< Object > obj, PtrComprCageBase cage_base) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsUndetectable) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsAccessCheckNeeded) |
| |
| bool | IsNaN (Tagged< Object > obj) |
| |
| bool | IsMinusZero (Tagged< Object > obj) |
| |
| | DEF_ACQUIRE_GETTER (HeapObject, map, Tagged< Map >) |
| |
| bool | IsSpecialReceiverInstanceType (InstanceType instance_type) |
| |
| bool | IsCustomElementsReceiverInstanceType (InstanceType instance_type) |
| |
| bool | IsShared (Tagged< Object > obj) |
| |
| static uint32_t | ObjectAddressForHashing (Address object) |
| |
| static DirectHandle< Object > | MakeEntryPair (Isolate *isolate, size_t index, DirectHandle< Object > value) |
| |
| static DirectHandle< Object > | MakeEntryPair (Isolate *isolate, DirectHandle< Object > key, DirectHandle< Object > value) |
| |
| ShouldThrow | GetShouldThrow (Isolate *isolate, Maybe< ShouldThrow > should_throw) |
| |
| bool | ComparisonResultToBool (Operation op, ComparisonResult result) |
| |
| std::ostream & | operator<< (std::ostream &os, PropertyCellType type) |
| |
| template Handle< JSAny > | Object::WrapForRead< AllocationType::kYoung > (Isolate *isolate, Handle< JSAny > object, Representation representation) |
| |
| template Handle< JSAny > | Object::WrapForRead< AllocationType::kOld > (LocalIsolate *isolate, Handle< JSAny > object, Representation representation) |
| |
| void | ShortPrint (Tagged< Object > obj, FILE *out) |
| |
| void | ShortPrint (Tagged< Object > obj, StringStream *accumulator) |
| |
| void | ShortPrint (Tagged< Object > obj, std::ostream &os) |
| |
| std::ostream & | operator<< (std::ostream &os, Tagged< Object > obj) |
| |
| std::ostream & | operator<< (std::ostream &os, Object::Conversion kind) |
| |
| std::ostream & | operator<< (std::ostream &os, const Brief &v) |
| |
| bool | PropertyKeyToArrayLength (DirectHandle< Object > value, uint32_t *length) |
| |
| bool | PropertyKeyToArrayIndex (DirectHandle< Object > index_obj, uint32_t *output) |
| |
| template bool | Script::GetPositionInfoInternal< String::LineEndsVector > (const String::LineEndsVector &ends, int position, Script::PositionInfo *info, const DisallowGarbageCollection &no_gc) const |
| |
| static void | MoveMessageToPromise (Isolate *isolate, DirectHandle< JSPromise > promise) |
| |
| static bool | RemainsConstantType (Tagged< PropertyCell > cell, Tagged< Object > value) |
| |
| | EXTERN_DEFINE_OBJECT_BASE_HASH_TABLE (EphemeronHashTable, EphemeronHashTableShape) template V8_EXPORT_PRIVATE void Dictionary< NumberDictionary |
| |
| NumberDictionaryShape | ::UncheckedAdd< Isolate, Handle, AllocationType::kSharedOld > (Isolate *, Handle< NumberDictionary >, uint32_t, DirectHandle< Object >, PropertyDetails) |
| |
| static V8_INLINE bool | HasWeakHeapObjectTag (const Tagged< Object > value) |
| |
| template<HeapObjectReferenceType kRefType, typename StorageType > |
| V8_INLINE constexpr bool | IsObject (TaggedImpl< kRefType, StorageType > obj) |
| |
| template<HeapObjectReferenceType kRefType, typename StorageType > |
| V8_INLINE constexpr bool | IsSmi (TaggedImpl< kRefType, StorageType > obj) |
| |
| template<HeapObjectReferenceType kRefType, typename StorageType > |
| V8_INLINE constexpr bool | IsHeapObject (TaggedImpl< kRefType, StorageType > obj) |
| |
| template<typename StorageType > |
| V8_INLINE constexpr bool | IsWeak (TaggedImpl< HeapObjectReferenceType::WEAK, StorageType > obj) |
| |
| V8_INLINE bool | IsSmi (Tagged< Object > obj) |
| |
| V8_INLINE bool | IsSmi (Tagged< HeapObject > obj) |
| |
| V8_INLINE bool | IsSmi (Tagged< Smi > obj) |
| |
| V8_INLINE bool | IsHeapObject (Tagged< Object > obj) |
| |
| V8_INLINE bool | IsHeapObject (Tagged< HeapObject > obj) |
| |
| V8_INLINE bool | IsHeapObject (Tagged< Smi > obj) |
| |
| V8_INLINE bool | IsNumber (Tagged< Object > obj, ReadOnlyRoots roots) |
| |
| template<typename T > |
| V8_INLINE bool | IsWasmObject (T obj, Isolate *=nullptr) |
| |
| void | Print (Tagged< Object > obj) |
| |
| void | Print (Tagged< Object > obj, std::ostream &os) |
| |
| | DEF_HEAP_OBJECT_PREDICATE (HeapObject, IsBoolean) |
| |
| MaybeDirectHandle< JSReceiver > | GetOptionsObject (Isolate *isolate, DirectHandle< Object > options, const char *method_name) |
| |
| MaybeDirectHandle< JSReceiver > | CoerceOptionsToObject (Isolate *isolate, DirectHandle< Object > options, const char *method_name) |
| |
| Maybe< bool > | GetStringOption (Isolate *isolate, DirectHandle< JSReceiver > options, const char *property, const std::vector< const char * > &values, const char *method_name, std::unique_ptr< char[]> *result) |
| |
| V8_WARN_UNUSED_RESULT Maybe< bool > | GetBoolOption (Isolate *isolate, DirectHandle< JSReceiver > options, const char *property, const char *method_name, bool *result) |
| |
| Maybe< int > | DefaultNumberOption (Isolate *isolate, DirectHandle< Object > value, int min, int max, int fallback, DirectHandle< String > property) |
| |
| Maybe< int > | GetNumberOption (Isolate *isolate, DirectHandle< JSReceiver > options, DirectHandle< String > property, int min, int max, int fallback) |
| |
| Maybe< double > | GetNumberOptionAsDouble (Isolate *isolate, DirectHandle< JSReceiver > options, DirectHandle< String > property, double default_value) |
| |
| template<typename T > |
| static V8_WARN_UNUSED_RESULT Maybe< T > | GetStringOption (Isolate *isolate, DirectHandle< JSReceiver > options, const char *name, const char *method_name, const std::vector< const char * > &str_values, const std::vector< T > &enum_values, T default_value) |
| |
| template<typename T > |
| static V8_WARN_UNUSED_RESULT Maybe< T > | GetStringOrBooleanOption (Isolate *isolate, DirectHandle< JSReceiver > options, const char *property, const char *method, const std::vector< const char * > &str_values, const std::vector< T > &enum_values, T true_value, T false_value, T fallback_value) |
| |
| | OBJECT_CONSTRUCTORS_IMPL (SmallOrderedHashSet, SmallOrderedHashTable< SmallOrderedHashSet >) OBJECT_CONSTRUCTORS_IMPL(SmallOrderedHashMap |
| |
| SmallOrderedHashTable< SmallOrderedHashMap > | OBJECT_CONSTRUCTORS_IMPL (SmallOrderedNameDictionary, SmallOrderedHashTable< SmallOrderedNameDictionary >) Handle< Map > OrderedHashSet |
| |
| template<class SmallTable , class LargeTable > |
| class | EXPORT_TEMPLATE_DECLARE (V8_EXPORT_PRIVATE) OrderedHashTableHandler |
| |
| | RELEASE_ACQUIRE_SMI_ACCESSORS (PropertyArray, length_and_hash, kLengthAndHashOffset) Tagged< JSAny > PropertyArray |
| |
| | ACCESSORS (PropertyCell, dependent_code, Tagged< DependentCode >, kDependentCodeOffset) ACCESSORS(PropertyCell |
| |
| kPropertyDetailsRawOffset | RELEASE_ACQUIRE_ACCESSORS (PropertyCell, property_details_raw, Tagged< Smi >, kPropertyDetailsRawOffset) PropertyDetails PropertyCell |
| |
| | RELEASE_ACQUIRE_ACCESSORS (ContextSidePropertyCell, context_side_property_raw, Tagged< Smi >, kPropertyDetailsRawOffset) ACCESSORS(ContextSidePropertyCell |
| |
| V8_INLINE PropertyAttributes | PropertyAttributesFromInt (int value) |
| |
| bool | IsGeneralizableTo (PropertyLocation a, PropertyLocation b) |
| |
| bool | IsGeneralizableTo (PropertyConstness a, PropertyConstness b) |
| |
| PropertyConstness | GeneralizeConstness (PropertyConstness a, PropertyConstness b) |
| |
| V8_EXPORT_PRIVATE std::ostream & | operator<< (std::ostream &os, const Representation &representation) |
| |
| V8_EXPORT_PRIVATE std::ostream & | operator<< (std::ostream &os, const PropertyAttributes &attributes) |
| |
| V8_EXPORT_PRIVATE std::ostream & | operator<< (std::ostream &os, PropertyConstness constness) |
| |
| | DEF_GETTER (PrototypeInfo, derived_maps, Tagged< HeapObject >) |
| |
| | RELEASE_ACQUIRE_ACCESSORS (PrototypeInfo, derived_maps, Tagged< HeapObject >, kDerivedMapsOffset) Tagged< MaybeObject > PrototypeInfo |
| |
| | BOOL_ACCESSORS (PrototypeInfo, bit_field, should_be_fast_map, ShouldBeFastBit::kShift) void PrototypeUsers |
| |
| std::ostream & | operator<< (std::ostream &os, VariableAllocationInfo var_info) |
| |
| | ACCESSORS_CHECKED (Script, eval_from_shared_or_wrapped_arguments, Tagged< Object >, kEvalFromSharedOrWrappedArgumentsOffset, CHECK_SCRIPT_NOT_WASM) SMI_ACCESSORS_CHECKED(Script |
| |
| CHECK_SCRIPT_NOT_WASM | ACCESSORS (Script, compiled_lazy_function_positions, Tagged< Object >, kCompiledLazyFunctionPositionsOffset) bool Script |
| |
| | DEF_GETTER (Script, infos, Tagged< WeakFixedArray >) |
| |
| | PROTECTED_POINTER_ACCESSORS (InterpreterData, bytecode_array, BytecodeArray, kBytecodeArrayOffset) PROTECTED_POINTER_ACCESSORS(InterpreterData |
| |
| kInterpreterTrampolineOffset | RELEASE_ACQUIRE_ACCESSORS (SharedFunctionInfo, name_or_scope_info, Tagged< NameOrScopeInfoT >, kNameOrScopeInfoOffset) RELEASE_ACQUIRE_ACCESSORS(SharedFunctionInfo |
| |
| kInterpreterTrampolineOffset kScriptOffset | RELEASE_ACQUIRE_ACCESSORS (SharedFunctionInfo, raw_script, Tagged< Object >, kScriptOffset) void SharedFunctionInfo |
| |
| | DEF_GETTER (SharedFunctionInfo, script, Tagged< HeapObject >) |
| |
| | RENAME_TORQUE_ACCESSORS (SharedFunctionInfo, raw_outer_scope_info_or_feedback_metadata, outer_scope_info_or_feedback_metadata, Tagged< HeapObject >) DEF_ACQUIRE_GETTER(SharedFunctionInfo |
| |
| | RENAME_PRIMITIVE_TORQUE_ACCESSORS (SharedFunctionInfo, raw_function_token_offset, function_token_offset, uint16_t) int32_t SharedFunctionInfo |
| |
| | BIT_FIELD_ACCESSORS (SharedFunctionInfo, flags2, class_scope_has_private_brand, SharedFunctionInfo::ClassScopeHasPrivateBrandBit) BIT_FIELD_ACCESSORS(SharedFunctionInfo |
| |
| SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit | BIT_FIELD_ACCESSORS (SharedFunctionInfo, flags2, is_sparkplug_compiling, SharedFunctionInfo::IsSparkplugCompilingBit) BIT_FIELD_ACCESSORS(SharedFunctionInfo |
| |
| SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit SharedFunctionInfo::MaglevCompilationFailedBit | BIT_FIELD_ACCESSORS (SharedFunctionInfo, flags2, function_context_independent_compiled, SharedFunctionInfo::FunctionContextIndependentCompiledBit) BIT_FIELD_ACCESSORS(SharedFunctionInfo |
| |
| SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit SharedFunctionInfo::MaglevCompilationFailedBit SharedFunctionInfo::FunctionSyntaxKindBits | BIT_FIELD_ACCESSORS (SharedFunctionInfo, relaxed_flags, allows_lazy_compilation, SharedFunctionInfo::AllowLazyCompilationBit) BIT_FIELD_ACCESSORS(SharedFunctionInfo |
| |
| SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit SharedFunctionInfo::MaglevCompilationFailedBit SharedFunctionInfo::FunctionSyntaxKindBits SharedFunctionInfo::HasDuplicateParametersBit | BIT_FIELD_ACCESSORS (SharedFunctionInfo, relaxed_flags, native, SharedFunctionInfo::IsNativeBit) BIT_FIELD_ACCESSORS(SharedFunctionInfo |
| |
| SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit SharedFunctionInfo::MaglevCompilationFailedBit SharedFunctionInfo::FunctionSyntaxKindBits SharedFunctionInfo::HasDuplicateParametersBit SharedFunctionInfo::RequiresInstanceMembersInitializerBit | BIT_FIELD_ACCESSORS (SharedFunctionInfo, relaxed_flags, name_should_print_as_anonymous, SharedFunctionInfo::NameShouldPrintAsAnonymousBit) BIT_FIELD_ACCESSORS(SharedFunctionInfo |
| |
| SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit SharedFunctionInfo::MaglevCompilationFailedBit SharedFunctionInfo::FunctionSyntaxKindBits SharedFunctionInfo::HasDuplicateParametersBit SharedFunctionInfo::RequiresInstanceMembersInitializerBit SharedFunctionInfo::HasReportedBinaryCoverageBit | BIT_FIELD_ACCESSORS (SharedFunctionInfo, relaxed_flags, is_toplevel, SharedFunctionInfo::IsTopLevelBit) BIT_FIELD_ACCESSORS(SharedFunctionInfo |
| |
| SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit SharedFunctionInfo::MaglevCompilationFailedBit SharedFunctionInfo::FunctionSyntaxKindBits SharedFunctionInfo::HasDuplicateParametersBit SharedFunctionInfo::RequiresInstanceMembersInitializerBit SharedFunctionInfo::HasReportedBinaryCoverageBit SharedFunctionInfo::PropertiesAreFinalBit | BIT_FIELD_ACCESSORS (SharedFunctionInfo, relaxed_flags, private_name_lookup_skips_outer_class, SharedFunctionInfo::PrivateNameLookupSkipsOuterClassBit) BIT_FIELD_ACCESSORS(SharedFunctionInfo |
| |
| | DEF_ACQUIRE_GETTER (SharedFunctionInfo, scope_info, Tagged< ScopeInfo >) |
| |
| | DEF_GETTER (SharedFunctionInfo, scope_info, Tagged< ScopeInfo >) |
| |
| | DEF_GETTER (SharedFunctionInfo, outer_scope_info, Tagged< HeapObject >) |
| |
| | DEF_GETTER (SharedFunctionInfo, feedback_metadata, Tagged< FeedbackMetadata >) |
| |
| | RELEASE_ACQUIRE_ACCESSORS_CHECKED2 (SharedFunctionInfo, feedback_metadata, Tagged< FeedbackMetadata >, kOuterScopeInfoOrFeedbackMetadataOffset, HasFeedbackMetadata(kAcquireLoad), !HasFeedbackMetadata(kAcquireLoad) &&IsFeedbackMetadata(value)) bool SharedFunctionInfo |
| |
| | DEF_GETTER (SharedFunctionInfo, api_func_data, Tagged< FunctionTemplateInfo >) |
| |
| | DEF_GETTER (SharedFunctionInfo, HasBytecodeArray, bool) |
| |
| | DEF_GETTER (SharedFunctionInfo, HasBaselineCode, bool) |
| |
| | DEF_ACQUIRE_GETTER (SharedFunctionInfo, baseline_code, Tagged< Code >) |
| |
| | DEF_GETTER (SharedFunctionInfo, inferred_name, Tagged< String >) |
| |
| std::ostream & | operator<< (std::ostream &os, const SourceCodeOf &v) |
| |
| std::ostream & | operator<< (std::ostream &os, SharedFunctionInfo::Inlineability i) |
| |
| uintptr_t | ArrayIndexOfIncludesSmiOrObject (Address array_start, uintptr_t array_len, uintptr_t from_index, Address search_element) |
| |
| uintptr_t | ArrayIndexOfIncludesDouble (Address array_start, uintptr_t array_len, uintptr_t from_index, Address search_element) |
| |
| Tagged< Object > | Uint8ArrayToHex (const char *bytes, size_t length, DirectHandle< SeqOneByteString > string_output) |
| |
| template<typename T > |
| bool | ArrayBufferFromHex (base::Vector< T > &input_vector, DirectHandle< JSArrayBuffer > buffer, size_t output_length) |
| |
| template bool | ArrayBufferFromHex (base::Vector< const uint8_t > &input_vector, DirectHandle< JSArrayBuffer > buffer, size_t output_length) |
| |
| template bool | ArrayBufferFromHex (base::Vector< const base::uc16 > &input_vector, DirectHandle< JSArrayBuffer > buffer, size_t output_length) |
| |
| void | CopyTagged (Address dst, const Address src, size_t num_tagged) |
| |
| void | MemsetTagged (Tagged_t *start, Tagged< MaybeObject > value, size_t counter) |
| |
| template<typename T > |
| void | MemsetTagged (SlotBase< T, Tagged_t > start, Tagged< MaybeObject > value, size_t counter) |
| |
| void | MemsetPointer (FullObjectSlot start, Tagged< Object > value, size_t counter) |
| |
| template<typename SourceChar > |
| static void | CalculateLineEndsImpl (String::LineEndsVector *line_ends, base::Vector< const SourceChar > src, bool include_ending_line) |
| |
| template<HeapObjectReferenceType kRefType, typename StorageType > |
| void | ShortPrint (TaggedImpl< kRefType, StorageType > ptr, FILE *out) |
| |
| template<HeapObjectReferenceType kRefType, typename StorageType > |
| template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void ShortPrint(TaggedImpl< HeapObjectReferenceType template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void ShortPrint(TaggedImpl< HeapObjectReferenceType void | ShortPrint (TaggedImpl< kRefType, StorageType > ptr, StringStream *accumulator) |
| |
| template<HeapObjectReferenceType kRefType, typename StorageType > |
| template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void ShortPrint(TaggedImpl< HeapObjectReferenceType template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void ShortPrint(TaggedImpl< HeapObjectReferenceType void | ShortPrint (TaggedImpl< kRefType, StorageType > ptr, std::ostream &os) |
| |
| template<HeapObjectReferenceType kRefType, typename StorageType > |
| void | ShortPrint (TaggedImpl< kRefType, StorageType > ptr, StringStream *accumulator) |
| |
| template<HeapObjectReferenceType kRefType, typename StorageType > |
| void | ShortPrint (TaggedImpl< kRefType, StorageType > ptr, std::ostream &os) |
| |
| template<HeapObjectReferenceType kRefType, typename StorageType > |
| void | Print (TaggedImpl< kRefType, StorageType > ptr) |
| |
| template<HeapObjectReferenceType kRefType, typename StorageType > |
| void | Print (TaggedImpl< kRefType, StorageType > ptr, std::ostream &os) |
| |
| template<typename T > |
| Tagged< MaybeWeak< T > > | MakeWeak (Tagged< T > value) |
| |
| template<typename T > |
| Tagged< MaybeWeak< T > > | MakeWeak (Tagged< MaybeWeak< T > > value) |
| |
| template<typename T > |
| Tagged< T > | MakeStrong (Tagged< T > value) |
| |
| template<typename T > |
| Tagged< T > | MakeStrong (Tagged< MaybeWeak< T > > value) |
| |
| template<class T > |
| | Tagged (T object) -> Tagged< T > |
| |
| | Tagged (const HeapObjectLayout *object) -> Tagged< HeapObject > |
| |
| template<class T > |
| | Tagged (const T *object) -> Tagged< T > |
| |
| template<class T > |
| | Tagged (T *object) -> Tagged< T > |
| |
| | BOOL_ACCESSORS (FunctionTemplateInfo, relaxed_flag, is_object_template_call_handler, IsObjectTemplateCallHandlerBit::kShift) BOOL_ACCESSORS(FunctionTemplateInfo |
| |
| HasSideEffectsBit::kShift | BOOL_ACCESSORS (FunctionTemplateInfo, relaxed_flag, undetectable, UndetectableBit::kShift) BOOL_ACCESSORS(FunctionTemplateInfo |
| |
| HasSideEffectsBit::kShift NeedsAccessCheckBit::kShift | BOOL_ACCESSORS (FunctionTemplateInfo, relaxed_flag, read_only_prototype, ReadOnlyPrototypeBit::kShift) BOOL_ACCESSORS(FunctionTemplateInfo |
| |
| HasSideEffectsBit::kShift NeedsAccessCheckBit::kShift RemovePrototypeBit::kShift | BOOL_ACCESSORS (FunctionTemplateInfo, relaxed_flag, accept_any_receiver, AcceptAnyReceiverBit::kShift) bool FunctionTemplateInfo |
| |
| | BIT_FIELD_ACCESSORS (FunctionTemplateInfo, relaxed_flag, allowed_receiver_instance_type_range_start, FunctionTemplateInfo::AllowedReceiverInstanceTypeRangeStartBits) BIT_FIELD_ACCESSORS(FunctionTemplateInfo |
| |
| FunctionTemplateInfo::AllowedReceiverInstanceTypeRangeEndBits | RELAXED_UINT32_ACCESSORS (FunctionTemplateInfo, flag, FunctionTemplateInfo::kFlagOffset) int32_t FunctionTemplateInfo |
| |
| | EXTERNAL_POINTER_ACCESSORS_MAYBE_READ_ONLY_HOST (FunctionTemplateInfo, maybe_redirected_callback, Address, kMaybeRedirectedCallbackOffset, kFunctionTemplateInfoCallbackTag) template< class IsolateT > bool FunctionTemplateInfo |
| |
| | RARE_ACCESSORS (prototype_template, PrototypeTemplate, undefined, UnionOf< Undefined, ObjectTemplateInfo >) RARE_ACCESSORS(prototype_provider_template |
| |
| UnionOf< Undefined, FunctionTemplateInfo > | RARE_ACCESSORS (parent_template, ParentTemplate, undefined, UnionOf< Undefined, FunctionTemplateInfo >) RARE_ACCESSORS(named_property_handler |
| |
| UnionOf< Undefined, FunctionTemplateInfo > UnionOf< Undefined, InterceptorInfo > | RARE_ACCESSORS (indexed_property_handler, IndexedPropertyHandler, undefined, UnionOf< Undefined, InterceptorInfo >) RARE_ACCESSORS(instance_template |
| |
| UnionOf< Undefined, FunctionTemplateInfo > UnionOf< Undefined, InterceptorInfo > UnionOf< Undefined, ObjectTemplateInfo > | RARE_ACCESSORS (instance_call_handler, InstanceCallHandler, undefined, UnionOf< Undefined, FunctionTemplateInfo >) RARE_ACCESSORS(access_check_info |
| |
| UnionOf< Undefined, FunctionTemplateInfo > UnionOf< Undefined, InterceptorInfo > UnionOf< Undefined, ObjectTemplateInfo > UnionOf< Undefined, AccessCheckInfo > | RARE_ACCESSORS (c_function_overloads, CFunctionOverloads, GetReadOnlyRoots().empty_fixed_array(), FixedArray) InstanceType FunctionTemplateInfo |
| |
| std::ostream & | operator<< (std::ostream &os, SideStepTransition::Kind sidestep) |
| |
| std::ostream & | operator<< (std::ostream &os, BinaryOperationHint hint) |
| |
| std::ostream & | operator<< (std::ostream &os, CompareOperationHint hint) |
| |
| std::ostream & | operator<< (std::ostream &os, ForInHint hint) |
| |
| std::ostream & | operator<< (std::ostream &os, const StringAddFlags &flags) |
| |
| size_t | hash_value (BinaryOperationHint hint) |
| |
| size_t | hash_value (CompareOperationHint hint) |
| |
| template<typename T > |
| static size_t | BytesNeededForVarint (T value) |
| |
| static void | CommitProperties (DirectHandle< JSObject > object, DirectHandle< Map > map, base::Vector< const DirectHandle< Object > > properties) |
| |
| static bool | IsValidObjectKey (Tagged< Object > value, Isolate *isolate) |
| |
| static Maybe< bool > | SetPropertiesFromKeyValuePairs (Isolate *isolate, DirectHandle< JSObject > object, DirectHandle< Object > *data, uint32_t num_properties) |
| |
| constexpr bool | IsKeywordStart (char c) |
| |
| V8_INLINE Token::Value | KeywordOrIdentifierToken (const uint8_t *input, int input_length) |
| |
| template<int N> |
| constexpr bool | IsInString (const char(&s)[N], char c, size_t i=0) |
| |
| constexpr bool | CanBeKeywordCharacter (char c) |
| |
| constexpr Token::Value | GetOneCharToken (char c) |
| |
| constexpr uint8_t | GetScanFlags (char c) |
| |
| bool | TerminatesLiteral (uint8_t scan_flags) |
| |
| bool | CanBeKeyword (uint8_t scan_flags) |
| |
| bool | IdentifierNeedsSlowPath (uint8_t scan_flags) |
| |
| bool | MultilineCommentCharacterNeedsSlowPath (uint8_t scan_flags) |
| |
| bool | MayTerminateString (uint8_t scan_flags) |
| |
| bool | CharCanBeKeyword (base::uc32 c) |
| |
| constexpr uint8_t | length (const char *str) |
| |
| static uint32_t | SnapshotObjectIdHash (SnapshotObjectId id) |
| |
| static void | WriteUChar (OutputStreamWriter *w, unibrow::uchar u) |
| |
| std::ostream & | operator<< (std::ostream &os, const RegExpInstruction &inst) |
| |
| std::ostream & | operator<< (std::ostream &os, base::Vector< const RegExpInstruction > insts) |
| |
| std::ostream & | operator<< (std::ostream &os, const RegExpInstruction::LookaroundPayload &payload) |
| |
| template<class T > |
| DirectHandle< TrustedByteArray > | VectorToByteArray (Isolate *isolate, base::Vector< T > data) |
| |
| base::Vector< RegExpInstruction > | AsInstructionSequence (Tagged< TrustedByteArray > raw_bytes) |
| |
| void | PrintSet (std::ofstream &out, const char *name, const icu::UnicodeSet &set) |
| |
| void | PrintSpecial (std::ofstream &out) |
| |
| void | WriteHeader (const char *header_filename) |
| |
| bool | operator== (const CharacterRange &lhs, const CharacterRange &rhs) |
| |
| bool | operator!= (const CharacterRange &lhs, const CharacterRange &rhs) |
| |
| void | RegExpBytecodeDisassembleSingle (const uint8_t *code_base, const uint8_t *pc) |
| |
| void | RegExpBytecodeDisassemble (const uint8_t *code_base, int length, const char *pattern) |
| |
| constexpr int | RegExpBytecodeLength (int bytecode) |
| |
| constexpr const char * | RegExpBytecodeName (int bytecode) |
| |
| bool | RangeContainsLatin1Equivalents (CharacterRange range) |
| |
| RegExpError | AnalyzeRegExp (Isolate *isolate, bool is_one_byte, RegExpFlags flags, RegExpNode *node) |
| |
| bool | NeedsUnicodeCaseEquivalents (RegExpFlags flags) |
| |
| ContainedInLattice | Combine (ContainedInLattice a, ContainedInLattice b) |
| |
| const char * | RegExpErrorString (RegExpError error) |
| |
| constexpr bool | RegExpErrorIsStackOverflow (RegExpError error) |
| |
| constexpr bool | IsEitherUnicode (RegExpFlags f) |
| |
| constexpr bool | ShouldOptionallyStepBackToLeadSurrogate (RegExpFlags f) |
| |
| constexpr std::optional< RegExpFlag > | TryRegExpFlagFromChar (char c) |
| |
| std::ostream & | operator<< (std::ostream &os, RegExpFlags flags) |
| |
| static int | LabelToInt (Label *label) |
| |
| template bool | RegExpParser::VerifyRegExpSyntax< base::uc16 > (Zone *, uintptr_t, const base::uc16 *, int, RegExpFlags, RegExpCompileData *, const DisallowGarbageCollection &) |
| |
| template bool | RegExp::VerifySyntax< base::uc16 > (Zone *, uintptr_t, const base::uc16 *, int, RegExpFlags, RegExpError *regexp_error_out, const DisallowGarbageCollection &) |
| |
| template<typename T > |
| static T & | frame_entry (Address re_frame, int frame_offset) |
| |
| template<typename T > |
| static T * | frame_entry_address (Address re_frame, int frame_offset) |
| |
| V8_INLINE constexpr bool | operator< (RootIndex lhs, RootIndex rhs) |
| |
| V8_INLINE RootIndex | operator++ (RootIndex &index) |
| |
| ReadOnlyRoots | GetReadOnlyRoots () |
| |
| | RUNTIME_FUNCTION (Runtime_TransitionElementsKind) |
| |
| | RUNTIME_FUNCTION (Runtime_TransitionElementsKindWithKind) |
| |
| | RUNTIME_FUNCTION (Runtime_NewArray) |
| |
| | RUNTIME_FUNCTION (Runtime_NormalizeElements) |
| |
| | RUNTIME_FUNCTION (Runtime_GrowArrayElements) |
| |
| | RUNTIME_FUNCTION (Runtime_ArrayIsArray) |
| |
| | RUNTIME_FUNCTION (Runtime_IsArray) |
| |
| | RUNTIME_FUNCTION (Runtime_ArraySpeciesConstructor) |
| |
| | RUNTIME_FUNCTION (Runtime_ArrayIncludes_Slow) |
| |
| | RUNTIME_FUNCTION (Runtime_ArrayIndexOf) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsLoad64) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsStore64) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsExchange) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsCompareExchange) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsAdd) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsSub) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsAnd) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsOr) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsXor) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsLoadSharedStructOrArray) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsStoreSharedStructOrArray) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsExchangeSharedStructOrArray) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsCompareExchangeSharedStructOrArray) |
| |
| | RUNTIME_FUNCTION (Runtime_BigIntCompareToNumber) |
| |
| | RUNTIME_FUNCTION (Runtime_BigIntCompareToString) |
| |
| | RUNTIME_FUNCTION (Runtime_BigIntEqualToBigInt) |
| |
| | RUNTIME_FUNCTION (Runtime_BigIntEqualToNumber) |
| |
| | RUNTIME_FUNCTION (Runtime_BigIntEqualToString) |
| |
| | RUNTIME_FUNCTION (Runtime_BigIntToNumber) |
| |
| | RUNTIME_FUNCTION (Runtime_ToBigInt) |
| |
| | RUNTIME_FUNCTION (Runtime_ToBigIntConvertNumber) |
| |
| | RUNTIME_FUNCTION (Runtime_BigIntExponentiate) |
| |
| | RUNTIME_FUNCTION (Runtime_BigIntUnaryOp) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowUnsupportedSuperError) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowConstructorNonCallableError) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowStaticPrototypeError) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowSuperAlreadyCalledError) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowSuperNotCalled) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowNotSuperConstructor) |
| |
| | RUNTIME_FUNCTION (Runtime_DefineClass) |
| |
| | RUNTIME_FUNCTION (Runtime_LoadFromSuper) |
| |
| | RUNTIME_FUNCTION (Runtime_LoadKeyedFromSuper) |
| |
| | RUNTIME_FUNCTION (Runtime_StoreToSuper) |
| |
| | RUNTIME_FUNCTION (Runtime_StoreKeyedToSuper) |
| |
| | RUNTIME_FUNCTION (Runtime_TheHole) |
| |
| | RUNTIME_FUNCTION (Runtime_OrderedHashSetGrow) |
| |
| | RUNTIME_FUNCTION (Runtime_SetGrow) |
| |
| | RUNTIME_FUNCTION (Runtime_SetShrink) |
| |
| | RUNTIME_FUNCTION (Runtime_OrderedHashSetShrink) |
| |
| | RUNTIME_FUNCTION (Runtime_MapShrink) |
| |
| | RUNTIME_FUNCTION (Runtime_MapGrow) |
| |
| | RUNTIME_FUNCTION (Runtime_OrderedHashMapGrow) |
| |
| | RUNTIME_FUNCTION (Runtime_WeakCollectionDelete) |
| |
| | RUNTIME_FUNCTION (Runtime_WeakCollectionSet) |
| |
| | RUNTIME_FUNCTION (Runtime_CompileLazy) |
| |
| | RUNTIME_FUNCTION (Runtime_InstallBaselineCode) |
| |
| | RUNTIME_FUNCTION (Runtime_InstallSFICode) |
| |
| | RUNTIME_FUNCTION (Runtime_CompileOptimized) |
| |
| | RUNTIME_FUNCTION (Runtime_HealOptimizedCodeSlot) |
| |
| | RUNTIME_FUNCTION (Runtime_FunctionLogNextExecution) |
| |
| | RUNTIME_FUNCTION (Runtime_InstantiateAsmJs) |
| |
| | RUNTIME_FUNCTION (Runtime_NotifyDeoptimized) |
| |
| | RUNTIME_FUNCTION (Runtime_ObserveNode) |
| |
| | RUNTIME_FUNCTION (Runtime_VerifyType) |
| |
| | RUNTIME_FUNCTION (Runtime_CheckTurboshaftTypeOf) |
| |
| | RUNTIME_FUNCTION (Runtime_CompileOptimizedOSR) |
| |
| | RUNTIME_FUNCTION (Runtime_CompileOptimizedOSRFromMaglev) |
| |
| | RUNTIME_FUNCTION (Runtime_CompileOptimizedOSRFromMaglevInlined) |
| |
| | RUNTIME_FUNCTION (Runtime_LogOrTraceOptimizedOSREntry) |
| |
| static Tagged< Object > | CompileGlobalEval (Isolate *isolate, Handle< i::Object > source_object, DirectHandle< SharedFunctionInfo > outer_info, LanguageMode language_mode, int eval_scope_info_index, int eval_position) |
| |
| | RUNTIME_FUNCTION (Runtime_ResolvePossiblyDirectEval) |
| |
| | RUNTIME_FUNCTION (Runtime_DateCurrentTime) |
| |
| | RUNTIME_FUNCTION_RETURN_PAIR (Runtime_DebugBreakOnBytecode) |
| |
| | RUNTIME_FUNCTION (Runtime_DebugBreakAtEntry) |
| |
| | RUNTIME_FUNCTION (Runtime_HandleDebuggerStatement) |
| |
| | RUNTIME_FUNCTION (Runtime_ScheduleBreak) |
| |
| | RUNTIME_FUNCTION (Runtime_GetGeneratorScopeCount) |
| |
| | RUNTIME_FUNCTION (Runtime_GetGeneratorScopeDetails) |
| |
| static bool | SetScopeVariableValue (ScopeIterator *it, int index, Handle< String > variable_name, DirectHandle< Object > new_value) |
| |
| | RUNTIME_FUNCTION (Runtime_SetGeneratorScopeVariableValue) |
| |
| | RUNTIME_FUNCTION (Runtime_GetBreakLocations) |
| |
| | RUNTIME_FUNCTION (Runtime_IsBreakOnException) |
| |
| | RUNTIME_FUNCTION (Runtime_ClearStepping) |
| |
| | RUNTIME_FUNCTION (Runtime_DebugGetLoadedScriptIds) |
| |
| | RUNTIME_FUNCTION (Runtime_FunctionGetInferredName) |
| |
| | RUNTIME_FUNCTION (Runtime_CollectGarbage) |
| |
| | RUNTIME_FUNCTION (Runtime_ScriptLocationFromLine2) |
| |
| | RUNTIME_FUNCTION (Runtime_DebugOnFunctionCall) |
| |
| | RUNTIME_FUNCTION (Runtime_DebugPrepareStepInSuspendedGenerator) |
| |
| | RUNTIME_FUNCTION (Runtime_DebugCollectCoverage) |
| |
| | RUNTIME_FUNCTION (Runtime_DebugTogglePreciseCoverage) |
| |
| | RUNTIME_FUNCTION (Runtime_DebugToggleBlockCoverage) |
| |
| | RUNTIME_FUNCTION (Runtime_IncBlockCounter) |
| |
| | RUNTIME_FUNCTION (Runtime_DebugAsyncFunctionSuspended) |
| |
| | RUNTIME_FUNCTION (Runtime_DebugPromiseThen) |
| |
| | RUNTIME_FUNCTION (Runtime_LiveEditPatchScript) |
| |
| | RUNTIME_FUNCTION (Runtime_ProfileCreateSnapshotDataBlob) |
| |
| | RUNTIME_FUNCTION (Runtime_ForInEnumerate) |
| |
| | RUNTIME_FUNCTION (Runtime_ForInHasProperty) |
| |
| | RUNTIME_FUNCTION (Runtime_FunctionGetScriptSource) |
| |
| | RUNTIME_FUNCTION (Runtime_FunctionGetScriptId) |
| |
| | RUNTIME_FUNCTION (Runtime_FunctionGetSourceCode) |
| |
| | RUNTIME_FUNCTION (Runtime_FunctionGetScriptSourcePosition) |
| |
| | RUNTIME_FUNCTION (Runtime_FunctionIsAPIFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_Call) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsNumWaitersForTesting) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsNumUnresolvedAsyncPromisesForTesting) |
| |
| | RUNTIME_FUNCTION (Runtime_SetAllowAtomicsWait) |
| |
| | RUNTIME_FUNCTION (Runtime_AsyncFunctionAwait) |
| |
| | RUNTIME_FUNCTION (Runtime_AsyncFunctionEnter) |
| |
| | RUNTIME_FUNCTION (Runtime_AsyncFunctionReject) |
| |
| | RUNTIME_FUNCTION (Runtime_AsyncFunctionResolve) |
| |
| | RUNTIME_FUNCTION (Runtime_CreateJSGeneratorObject) |
| |
| | RUNTIME_FUNCTION (Runtime_GeneratorClose) |
| |
| | RUNTIME_FUNCTION (Runtime_GeneratorGetFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_AsyncGeneratorAwait) |
| |
| | RUNTIME_FUNCTION (Runtime_AsyncGeneratorResolve) |
| |
| | RUNTIME_FUNCTION (Runtime_AsyncGeneratorReject) |
| |
| | RUNTIME_FUNCTION (Runtime_AsyncGeneratorYieldWithAwait) |
| |
| | RUNTIME_FUNCTION (Runtime_GeneratorGetResumeMode) |
| |
| | RUNTIME_FUNCTION (Runtime_AccessCheck) |
| |
| | RUNTIME_FUNCTION (Runtime_FatalProcessOutOfMemoryInAllocateRaw) |
| |
| | RUNTIME_FUNCTION (Runtime_FatalProcessOutOfMemoryInvalidArrayLength) |
| |
| | RUNTIME_FUNCTION (Runtime_FatalInvalidSize) |
| |
| | RUNTIME_FUNCTION (Runtime_Throw) |
| |
| | RUNTIME_FUNCTION (Runtime_ReThrow) |
| |
| | RUNTIME_FUNCTION (Runtime_ReThrowWithMessage) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowStackOverflow) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowSymbolAsyncIteratorInvalid) |
| |
| | RUNTIME_FUNCTION (Runtime_TerminateExecution) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowRangeError) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowTypeError) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowTypeErrorIfStrict) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowInvalidTypedArrayAlignment) |
| |
| | RUNTIME_FUNCTION (Runtime_UnwindAndFindExceptionHandler) |
| |
| | RUNTIME_FUNCTION (Runtime_PropagateException) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowReferenceError) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowAccessedUninitializedVariable) |
| |
| | RUNTIME_FUNCTION (Runtime_NewError) |
| |
| | RUNTIME_FUNCTION (Runtime_NewTypeError) |
| |
| | RUNTIME_FUNCTION (Runtime_NewReferenceError) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowInvalidStringLength) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowIteratorResultNotAnObject) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowThrowMethodMissing) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowSymbolIteratorInvalid) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowNoAccess) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowNotConstructor) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowApplyNonFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_StackGuard) |
| |
| | RUNTIME_FUNCTION (Runtime_HandleNoHeapWritesInterrupts) |
| |
| | RUNTIME_FUNCTION (Runtime_StackGuardWithGap) |
| |
| | RUNTIME_FUNCTION (Runtime_BytecodeBudgetInterruptWithStackCheck_Ignition) |
| |
| | RUNTIME_FUNCTION (Runtime_BytecodeBudgetInterrupt_Ignition) |
| |
| | RUNTIME_FUNCTION (Runtime_BytecodeBudgetInterruptWithStackCheck_Sparkplug) |
| |
| | RUNTIME_FUNCTION (Runtime_BytecodeBudgetInterrupt_Sparkplug) |
| |
| | RUNTIME_FUNCTION (Runtime_BytecodeBudgetInterrupt_Maglev) |
| |
| | RUNTIME_FUNCTION (Runtime_BytecodeBudgetInterruptWithStackCheck_Maglev) |
| |
| | RUNTIME_FUNCTION (Runtime_AllocateInYoungGeneration) |
| |
| | RUNTIME_FUNCTION (Runtime_AllocateInOldGeneration) |
| |
| | RUNTIME_FUNCTION (Runtime_AllocateByteArray) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowIteratorError) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowSpreadArgError) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowCalledNonCallable) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowConstructedNonConstructable) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowPatternAssignmentNonCoercible) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowConstructorReturnedNonObject) |
| |
| | RUNTIME_FUNCTION (Runtime_CreateListFromArrayLike) |
| |
| | RUNTIME_FUNCTION (Runtime_IncrementUseCounter) |
| |
| | RUNTIME_FUNCTION (Runtime_GetAndResetTurboProfilingData) |
| |
| | RUNTIME_FUNCTION (Runtime_GetAndResetRuntimeCallStats) |
| |
| | RUNTIME_FUNCTION (Runtime_OrdinaryHasInstance) |
| |
| | RUNTIME_FUNCTION (Runtime_Typeof) |
| |
| | RUNTIME_FUNCTION (Runtime_AllowDynamicFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_CreateAsyncFromSyncIterator) |
| |
| | RUNTIME_FUNCTION (Runtime_GetTemplateObject) |
| |
| | RUNTIME_FUNCTION (Runtime_ReportMessageFromMicrotask) |
| |
| | RUNTIME_FUNCTION (Runtime_GetInitializerFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_DoubleToStringWithRadix) |
| |
| | RUNTIME_FUNCTION (Runtime_SharedValueBarrierSlow) |
| |
| | RUNTIME_FUNCTION (Runtime_InvalidateDependentCodeForScriptContextSlot) |
| |
| | RUNTIME_FUNCTION (Runtime_FormatList) |
| |
| | RUNTIME_FUNCTION (Runtime_FormatListToParts) |
| |
| | RUNTIME_FUNCTION (Runtime_StringToLowerCaseIntl) |
| |
| | RUNTIME_FUNCTION (Runtime_StringToUpperCaseIntl) |
| |
| | RUNTIME_FUNCTION (Runtime_StringToLocaleLowerCase) |
| |
| | RUNTIME_FUNCTION (Runtime_CreateObjectLiteral) |
| |
| | RUNTIME_FUNCTION (Runtime_CreateArrayLiteral) |
| |
| | RUNTIME_FUNCTION (Runtime_CreateRegExpLiteral) |
| |
| | RUNTIME_FUNCTION (Runtime_DynamicImportCall) |
| |
| | RUNTIME_FUNCTION (Runtime_GetModuleNamespace) |
| |
| | RUNTIME_FUNCTION (Runtime_GetImportMetaObject) |
| |
| | RUNTIME_FUNCTION (Runtime_GetModuleNamespaceExport) |
| |
| | RUNTIME_FUNCTION (Runtime_StringToNumber) |
| |
| | RUNTIME_FUNCTION (Runtime_StringParseInt) |
| |
| | RUNTIME_FUNCTION (Runtime_StringParseFloat) |
| |
| | RUNTIME_FUNCTION (Runtime_NumberToStringSlow) |
| |
| | RUNTIME_FUNCTION (Runtime_MaxSmi) |
| |
| | RUNTIME_FUNCTION (Runtime_IsSmi) |
| |
| | RUNTIME_FUNCTION (Runtime_GetHoleNaNUpper) |
| |
| | RUNTIME_FUNCTION (Runtime_GetHoleNaNLower) |
| |
| | RUNTIME_FUNCTION (Runtime_ObjectKeys) |
| |
| | RUNTIME_FUNCTION (Runtime_ObjectGetOwnPropertyNames) |
| |
| | RUNTIME_FUNCTION (Runtime_ObjectGetOwnPropertyNamesTryFast) |
| |
| | RUNTIME_FUNCTION (Runtime_ObjectHasOwnProperty) |
| |
| | RUNTIME_FUNCTION (Runtime_HasOwnConstDataProperty) |
| |
| | RUNTIME_FUNCTION (Runtime_IsDictPropertyConstTrackingEnabled) |
| |
| | RUNTIME_FUNCTION (Runtime_AddDictionaryProperty) |
| |
| | RUNTIME_FUNCTION (Runtime_AddPrivateBrand) |
| |
| | RUNTIME_FUNCTION (Runtime_ObjectCreate) |
| |
| | RUNTIME_FUNCTION (Runtime_InternalSetPrototype) |
| |
| | RUNTIME_FUNCTION (Runtime_OptimizeObjectForAddingMultipleProperties) |
| |
| | RUNTIME_FUNCTION (Runtime_ObjectValues) |
| |
| | RUNTIME_FUNCTION (Runtime_ObjectValuesSkipFastPath) |
| |
| | RUNTIME_FUNCTION (Runtime_ObjectEntries) |
| |
| | RUNTIME_FUNCTION (Runtime_ObjectEntriesSkipFastPath) |
| |
| | RUNTIME_FUNCTION (Runtime_ObjectIsExtensible) |
| |
| | RUNTIME_FUNCTION (Runtime_JSReceiverPreventExtensionsThrow) |
| |
| | RUNTIME_FUNCTION (Runtime_JSReceiverPreventExtensionsDontThrow) |
| |
| | RUNTIME_FUNCTION (Runtime_JSReceiverGetPrototypeOf) |
| |
| | RUNTIME_FUNCTION (Runtime_JSReceiverSetPrototypeOfThrow) |
| |
| | RUNTIME_FUNCTION (Runtime_JSReceiverSetPrototypeOfDontThrow) |
| |
| | RUNTIME_FUNCTION (Runtime_GetProperty) |
| |
| | RUNTIME_FUNCTION (Runtime_SetKeyedProperty) |
| |
| | RUNTIME_FUNCTION (Runtime_DefineObjectOwnProperty) |
| |
| | RUNTIME_FUNCTION (Runtime_SetNamedProperty) |
| |
| | RUNTIME_FUNCTION (Runtime_DeleteProperty) |
| |
| | RUNTIME_FUNCTION (Runtime_ShrinkNameDictionary) |
| |
| | RUNTIME_FUNCTION (Runtime_ShrinkSwissNameDictionary) |
| |
| | RUNTIME_FUNCTION (Runtime_HasProperty) |
| |
| | RUNTIME_FUNCTION (Runtime_GetOwnPropertyKeys) |
| |
| | RUNTIME_FUNCTION (Runtime_ToFastProperties) |
| |
| | RUNTIME_FUNCTION (Runtime_AllocateHeapNumber) |
| |
| | RUNTIME_FUNCTION (Runtime_NewObject) |
| |
| | RUNTIME_FUNCTION (Runtime_GetDerivedMap) |
| |
| | RUNTIME_FUNCTION (Runtime_CompleteInobjectSlackTrackingForMap) |
| |
| | RUNTIME_FUNCTION (Runtime_TryMigrateInstance) |
| |
| | RUNTIME_FUNCTION (Runtime_TryMigrateInstanceAndMarkMapAsMigrationTarget) |
| |
| static bool | IsValidAccessor (Isolate *isolate, DirectHandle< Object > obj) |
| |
| | RUNTIME_FUNCTION (Runtime_DefineAccessorPropertyUnchecked) |
| |
| | RUNTIME_FUNCTION (Runtime_SetFunctionName) |
| |
| | RUNTIME_FUNCTION (Runtime_DefineKeyedOwnPropertyInLiteral) |
| |
| | RUNTIME_FUNCTION (Runtime_HasFastPackedElements) |
| |
| | RUNTIME_FUNCTION (Runtime_IsJSReceiver) |
| |
| | RUNTIME_FUNCTION (Runtime_GetFunctionName) |
| |
| | RUNTIME_FUNCTION (Runtime_DefineGetterPropertyUnchecked) |
| |
| | RUNTIME_FUNCTION (Runtime_SetDataProperties) |
| |
| | RUNTIME_FUNCTION (Runtime_CopyDataProperties) |
| |
| | RUNTIME_FUNCTION (Runtime_CopyDataPropertiesWithExcludedPropertiesOnStack) |
| |
| | RUNTIME_FUNCTION (Runtime_DefineSetterPropertyUnchecked) |
| |
| | RUNTIME_FUNCTION (Runtime_ToObject) |
| |
| | RUNTIME_FUNCTION (Runtime_ToNumber) |
| |
| | RUNTIME_FUNCTION (Runtime_ToNumeric) |
| |
| | RUNTIME_FUNCTION (Runtime_ToLength) |
| |
| | RUNTIME_FUNCTION (Runtime_ToString) |
| |
| | RUNTIME_FUNCTION (Runtime_ToName) |
| |
| | RUNTIME_FUNCTION (Runtime_HasInPrototypeChain) |
| |
| | RUNTIME_FUNCTION (Runtime_CreateIterResultObject) |
| |
| | RUNTIME_FUNCTION (Runtime_CreateDataProperty) |
| |
| | RUNTIME_FUNCTION (Runtime_SetOwnPropertyIgnoreAttributes) |
| |
| | RUNTIME_FUNCTION (Runtime_GetOwnPropertyDescriptorObject) |
| |
| | RUNTIME_FUNCTION (Runtime_GetPrivateMember) |
| |
| | RUNTIME_FUNCTION (Runtime_SetPrivateMember) |
| |
| | RUNTIME_FUNCTION (Runtime_LoadPrivateSetter) |
| |
| | RUNTIME_FUNCTION (Runtime_LoadPrivateGetter) |
| |
| | RUNTIME_FUNCTION (Runtime_CreatePrivateAccessors) |
| |
| | RUNTIME_FUNCTION (Runtime_SwissTableAllocate) |
| |
| | RUNTIME_FUNCTION (Runtime_SwissTableAdd) |
| |
| | RUNTIME_FUNCTION (Runtime_SwissTableFindEntry) |
| |
| | RUNTIME_FUNCTION (Runtime_SwissTableUpdate) |
| |
| | RUNTIME_FUNCTION (Runtime_SwissTableDelete) |
| |
| | RUNTIME_FUNCTION (Runtime_SwissTableEquals) |
| |
| | RUNTIME_FUNCTION (Runtime_SwissTableElementsCount) |
| |
| | RUNTIME_FUNCTION (Runtime_SwissTableKeyAt) |
| |
| | RUNTIME_FUNCTION (Runtime_SwissTableValueAt) |
| |
| | RUNTIME_FUNCTION (Runtime_SwissTableDetailsAt) |
| |
| | RUNTIME_FUNCTION (Runtime_Add) |
| |
| | RUNTIME_FUNCTION (Runtime_Equal) |
| |
| | RUNTIME_FUNCTION (Runtime_NotEqual) |
| |
| | RUNTIME_FUNCTION (Runtime_StrictEqual) |
| |
| | RUNTIME_FUNCTION (Runtime_StrictNotEqual) |
| |
| | RUNTIME_FUNCTION (Runtime_ReferenceEqual) |
| |
| | RUNTIME_FUNCTION (Runtime_LessThan) |
| |
| | RUNTIME_FUNCTION (Runtime_GreaterThan) |
| |
| | RUNTIME_FUNCTION (Runtime_LessThanOrEqual) |
| |
| | RUNTIME_FUNCTION (Runtime_GreaterThanOrEqual) |
| |
| | RUNTIME_FUNCTION (Runtime_PromiseRejectEventFromStack) |
| |
| | RUNTIME_FUNCTION (Runtime_PromiseRejectAfterResolved) |
| |
| | RUNTIME_FUNCTION (Runtime_PromiseResolveAfterResolved) |
| |
| | RUNTIME_FUNCTION (Runtime_PromiseRevokeReject) |
| |
| | RUNTIME_FUNCTION (Runtime_EnqueueMicrotask) |
| |
| | RUNTIME_FUNCTION (Runtime_PerformMicrotaskCheckpoint) |
| |
| | RUNTIME_FUNCTION (Runtime_RunMicrotaskCallback) |
| |
| | RUNTIME_FUNCTION (Runtime_PromiseHookInit) |
| |
| | RUNTIME_FUNCTION (Runtime_PromiseHookBefore) |
| |
| | RUNTIME_FUNCTION (Runtime_PromiseHookAfter) |
| |
| | RUNTIME_FUNCTION (Runtime_RejectPromise) |
| |
| | RUNTIME_FUNCTION (Runtime_ResolvePromise) |
| |
| | RUNTIME_FUNCTION (Runtime_ConstructAggregateErrorHelper) |
| |
| | RUNTIME_FUNCTION (Runtime_ConstructInternalAggregateErrorHelper) |
| |
| | RUNTIME_FUNCTION (Runtime_ConstructSuppressedError) |
| |
| | RUNTIME_FUNCTION (Runtime_IsJSProxy) |
| |
| | RUNTIME_FUNCTION (Runtime_JSProxyGetHandler) |
| |
| | RUNTIME_FUNCTION (Runtime_JSProxyGetTarget) |
| |
| | RUNTIME_FUNCTION (Runtime_GetPropertyWithReceiver) |
| |
| | RUNTIME_FUNCTION (Runtime_SetPropertyWithReceiver) |
| |
| | RUNTIME_FUNCTION (Runtime_CheckProxyGetSetTrapResult) |
| |
| | RUNTIME_FUNCTION (Runtime_CheckProxyHasTrapResult) |
| |
| | RUNTIME_FUNCTION (Runtime_CheckProxyDeleteTrapResult) |
| |
| void | FindOneByteStringIndices (base::Vector< const uint8_t > subject, uint8_t pattern, std::vector< int > *indices, unsigned int limit) |
| |
| void | FindTwoByteStringIndices (const base::Vector< const base::uc16 > subject, base::uc16 pattern, std::vector< int > *indices, unsigned int limit) |
| |
| template<typename SubjectChar , typename PatternChar > |
| void | FindStringIndices (Isolate *isolate, base::Vector< const SubjectChar > subject, base::Vector< const PatternChar > pattern, std::vector< int > *indices, unsigned int limit) |
| |
| void | FindStringIndicesDispatch (Isolate *isolate, Tagged< String > subject, Tagged< String > pattern, std::vector< int > *indices, unsigned int limit) |
| |
| template<typename ResultSeqString > |
| static V8_WARN_UNUSED_RESULT Tagged< Object > | StringReplaceGlobalAtomRegExpWithString (Isolate *isolate, DirectHandle< String > subject, DirectHandle< JSRegExp > pattern_regexp, DirectHandle< String > replacement, DirectHandle< RegExpMatchInfo > last_match_info, DirectHandle< AtomRegExpData > regexp_data) |
| |
| static V8_WARN_UNUSED_RESULT Tagged< Object > | StringReplaceGlobalRegExpWithString (Isolate *isolate, DirectHandle< String > subject, DirectHandle< JSRegExp > regexp, DirectHandle< RegExpData > regexp_data, DirectHandle< String > replacement, DirectHandle< RegExpMatchInfo > last_match_info) |
| |
| template<typename ResultSeqString > |
| static V8_WARN_UNUSED_RESULT Tagged< Object > | StringReplaceGlobalRegExpWithEmptyString (Isolate *isolate, DirectHandle< String > subject, DirectHandle< JSRegExp > regexp, DirectHandle< RegExpData > regexp_data, DirectHandle< RegExpMatchInfo > last_match_info) |
| |
| | RUNTIME_FUNCTION (Runtime_StringSplit) |
| |
| | RUNTIME_FUNCTION (Runtime_RegExpExec) |
| |
| | RUNTIME_FUNCTION (Runtime_RegExpGrowRegExpMatchInfo) |
| |
| | RUNTIME_FUNCTION (Runtime_RegExpExperimentalOneshotExec) |
| |
| | RUNTIME_FUNCTION (Runtime_RegExpBuildIndices) |
| |
| | RUNTIME_FUNCTION (Runtime_RegExpExecMultiple) |
| |
| | RUNTIME_FUNCTION (Runtime_StringReplaceNonGlobalRegExpWithFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_RegExpSplit) |
| |
| | RUNTIME_FUNCTION (Runtime_RegExpReplaceRT) |
| |
| | RUNTIME_FUNCTION (Runtime_RegExpInitializeAndCompile) |
| |
| | RUNTIME_FUNCTION (Runtime_RegExpStringFromFlags) |
| |
| | RUNTIME_FUNCTION (Runtime_RegExpMatchGlobalAtom) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowConstAssignError) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowUsingAssignError) |
| |
| | RUNTIME_FUNCTION (Runtime_DeclareModuleExports) |
| |
| | RUNTIME_FUNCTION (Runtime_DeclareGlobals) |
| |
| | RUNTIME_FUNCTION (Runtime_InitializeDisposableStack) |
| |
| | RUNTIME_FUNCTION (Runtime_AddDisposableValue) |
| |
| | RUNTIME_FUNCTION (Runtime_AddAsyncDisposableValue) |
| |
| | RUNTIME_FUNCTION (Runtime_DisposeDisposableStack) |
| |
| | RUNTIME_FUNCTION (Runtime_HandleExceptionsInDisposeDisposableStack) |
| |
| | RUNTIME_FUNCTION (Runtime_DeclareEvalFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_DeclareEvalVar) |
| |
| | RUNTIME_FUNCTION (Runtime_NewSloppyArguments) |
| |
| | RUNTIME_FUNCTION (Runtime_NewStrictArguments) |
| |
| | RUNTIME_FUNCTION (Runtime_NewRestParameter) |
| |
| | RUNTIME_FUNCTION (Runtime_NewClosure) |
| |
| | RUNTIME_FUNCTION (Runtime_NewClosure_Tenured) |
| |
| | RUNTIME_FUNCTION (Runtime_NewFunctionContext) |
| |
| | RUNTIME_FUNCTION (Runtime_PushWithContext) |
| |
| | RUNTIME_FUNCTION (Runtime_PushCatchContext) |
| |
| | RUNTIME_FUNCTION (Runtime_PushBlockContext) |
| |
| | RUNTIME_FUNCTION (Runtime_DeleteLookupSlot) |
| |
| | RUNTIME_FUNCTION (Runtime_LoadLookupSlot) |
| |
| | RUNTIME_FUNCTION (Runtime_LoadLookupSlotInsideTypeof) |
| |
| | RUNTIME_FUNCTION_RETURN_PAIR (Runtime_LoadLookupSlotForCall) |
| |
| | RUNTIME_FUNCTION (Runtime_LoadLookupSlotForCall_Baseline) |
| |
| | RUNTIME_FUNCTION (Runtime_StoreLookupSlot_Sloppy) |
| |
| | RUNTIME_FUNCTION (Runtime_StoreLookupSlot_Strict) |
| |
| | RUNTIME_FUNCTION (Runtime_StoreLookupSlot_SloppyHoisting) |
| |
| | RUNTIME_FUNCTION (Runtime_StoreGlobalNoHoleCheckForReplLetOrConst) |
| |
| | RUNTIME_FUNCTION (Runtime_ShadowRealmWrappedFunctionCreate) |
| |
| | RUNTIME_FUNCTION (Runtime_ShadowRealmImportValue) |
| |
| | RUNTIME_FUNCTION (Runtime_ShadowRealmThrow) |
| |
| | RUNTIME_FUNCTION (Runtime_GetSubstitution) |
| |
| MaybeDirectHandle< String > | StringReplaceOneCharWithString (Isolate *isolate, DirectHandle< String > subject, DirectHandle< String > search, DirectHandle< String > replace, bool *found, int recursion_limit) |
| |
| | RUNTIME_FUNCTION (Runtime_StringReplaceOneCharWithString) |
| |
| | RUNTIME_FUNCTION (Runtime_StringLastIndexOf) |
| |
| | RUNTIME_FUNCTION (Runtime_StringSubstring) |
| |
| | RUNTIME_FUNCTION (Runtime_StringAdd) |
| |
| | RUNTIME_FUNCTION (Runtime_InternalizeString) |
| |
| | RUNTIME_FUNCTION (Runtime_StringCharCodeAt) |
| |
| | RUNTIME_FUNCTION (Runtime_StringCodePointAt) |
| |
| | RUNTIME_FUNCTION (Runtime_StringBuilderConcat) |
| |
| | RUNTIME_FUNCTION (Runtime_StringToArray) |
| |
| | RUNTIME_FUNCTION (Runtime_StringLessThan) |
| |
| | RUNTIME_FUNCTION (Runtime_StringLessThanOrEqual) |
| |
| | RUNTIME_FUNCTION (Runtime_StringGreaterThan) |
| |
| | RUNTIME_FUNCTION (Runtime_StringGreaterThanOrEqual) |
| |
| | RUNTIME_FUNCTION (Runtime_StringEqual) |
| |
| | RUNTIME_FUNCTION (Runtime_StringCompare) |
| |
| | RUNTIME_FUNCTION (Runtime_FlattenString) |
| |
| | RUNTIME_FUNCTION (Runtime_StringMaxLength) |
| |
| | RUNTIME_FUNCTION (Runtime_StringEscapeQuotes) |
| |
| | RUNTIME_FUNCTION (Runtime_StringIsWellFormed) |
| |
| | RUNTIME_FUNCTION (Runtime_StringToWellFormed) |
| |
| | RUNTIME_FUNCTION (Runtime_CreatePrivateSymbol) |
| |
| | RUNTIME_FUNCTION (Runtime_CreatePrivateBrandSymbol) |
| |
| | RUNTIME_FUNCTION (Runtime_CreatePrivateNameSymbol) |
| |
| | RUNTIME_FUNCTION (Runtime_SymbolDescriptiveString) |
| |
| | RUNTIME_FUNCTION (Runtime_SymbolIsPrivate) |
| |
| | RUNTIME_FUNCTION (Runtime_IsInvalidTemporalCalendarField) |
| |
| | RUNTIME_FUNCTION (Runtime_SetWasmCompileControls) |
| |
| | RUNTIME_FUNCTION (Runtime_SetWasmInstantiateControls) |
| |
| | RUNTIME_FUNCTION (Runtime_CountUnoptimizedWasmToJSWrapper) |
| |
| | RUNTIME_FUNCTION (Runtime_HasUnoptimizedWasmToJSWrapper) |
| |
| | RUNTIME_FUNCTION (Runtime_HasUnoptimizedJSToJSWrapper) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmTraceEnter) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmTraceExit) |
| |
| | RUNTIME_FUNCTION (Runtime_IsAsmWasmCode) |
| |
| | RUNTIME_FUNCTION (Runtime_DisallowWasmCodegen) |
| |
| | RUNTIME_FUNCTION (Runtime_IsWasmCode) |
| |
| | RUNTIME_FUNCTION (Runtime_IsWasmTrapHandlerEnabled) |
| |
| | RUNTIME_FUNCTION (Runtime_IsWasmPartialOOBWriteNoop) |
| |
| | RUNTIME_FUNCTION (Runtime_IsThreadInWasm) |
| |
| | RUNTIME_FUNCTION (Runtime_GetWasmRecoveredTrapCount) |
| |
| | RUNTIME_FUNCTION (Runtime_GetWasmExceptionTagId) |
| |
| | RUNTIME_FUNCTION (Runtime_GetWasmExceptionValues) |
| |
| | RUNTIME_FUNCTION (Runtime_SerializeWasmModule) |
| |
| | RUNTIME_FUNCTION (Runtime_DeserializeWasmModule) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmGetNumberOfInstances) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmNumCodeSpaces) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmTraceMemory) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmTierUpFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmTriggerTierUpForTesting) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmNull) |
| |
| static Tagged< Object > | CreateWasmObject (Isolate *isolate, base::Vector< const uint8_t > module_bytes, bool is_struct) |
| |
| static Tagged< Object > | CreateDummyWasmLookAlikeForFuzzing (Isolate *isolate) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStruct) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmArray) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmEnterDebugging) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmLeaveDebugging) |
| |
| | RUNTIME_FUNCTION (Runtime_IsWasmDebugFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_IsLiftoffFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_IsTurboFanFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_IsUncompiledWasmFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_FreezeWasmLazyCompilation) |
| |
| | RUNTIME_FUNCTION (Runtime_SetWasmImportedStringsEnabled) |
| |
| | RUNTIME_FUNCTION (Runtime_FlushLiftoffCode) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmTriggerCodeGC) |
| |
| | RUNTIME_FUNCTION (Runtime_EstimateCurrentMemoryConsumption) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmCompiledExportWrappersCount) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmDeoptsExecutedCount) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmDeoptsExecutedForFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmSwitchToTheCentralStackCount) |
| |
| | RUNTIME_FUNCTION (Runtime_CheckIsOnCentralStack) |
| |
| | RUNTIME_FUNCTION (Runtime_BuildRefTypeBitfield) |
| |
| | RUNTIME_FUNCTION (Runtime_ClearMegamorphicStubCache) |
| |
| | RUNTIME_FUNCTION (Runtime_ConstructDouble) |
| |
| | RUNTIME_FUNCTION (Runtime_StringIsFlat) |
| |
| | RUNTIME_FUNCTION (Runtime_ConstructConsString) |
| |
| | RUNTIME_FUNCTION (Runtime_ConstructSlicedString) |
| |
| | RUNTIME_FUNCTION (Runtime_ConstructInternalizedString) |
| |
| | RUNTIME_FUNCTION (Runtime_ConstructThinString) |
| |
| | RUNTIME_FUNCTION (Runtime_DeoptimizeFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_DeoptimizeNow) |
| |
| | RUNTIME_FUNCTION (Runtime_LeakHole) |
| |
| | RUNTIME_FUNCTION (Runtime_RunningInSimulator) |
| |
| | RUNTIME_FUNCTION (Runtime_RuntimeEvaluateREPL) |
| |
| | RUNTIME_FUNCTION (Runtime_ICsAreEnabled) |
| |
| | RUNTIME_FUNCTION (Runtime_IsConcurrentRecompilationSupported) |
| |
| | RUNTIME_FUNCTION (Runtime_IsAtomicsWaitAllowed) |
| |
| | RUNTIME_FUNCTION (Runtime_CompileBaseline) |
| |
| | RUNTIME_FUNCTION (Runtime_BenchMaglev) |
| |
| | RUNTIME_FUNCTION (Runtime_BenchTurbofan) |
| |
| | RUNTIME_FUNCTION (Runtime_ActiveTierIsIgnition) |
| |
| | RUNTIME_FUNCTION (Runtime_ActiveTierIsSparkplug) |
| |
| | RUNTIME_FUNCTION (Runtime_ActiveTierIsMaglev) |
| |
| | RUNTIME_FUNCTION (Runtime_ActiveTierIsTurbofan) |
| |
| | RUNTIME_FUNCTION (Runtime_IsSparkplugEnabled) |
| |
| | RUNTIME_FUNCTION (Runtime_IsMaglevEnabled) |
| |
| | RUNTIME_FUNCTION (Runtime_IsTurbofanEnabled) |
| |
| | RUNTIME_FUNCTION (Runtime_CurrentFrameIsTurbofan) |
| |
| | RUNTIME_FUNCTION (Runtime_OptimizeMaglevOnNextCall) |
| |
| | RUNTIME_FUNCTION (Runtime_OptimizeFunctionOnNextCall) |
| |
| | RUNTIME_FUNCTION (Runtime_EnsureFeedbackVectorForFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_PrepareFunctionForOptimization) |
| |
| | RUNTIME_FUNCTION (Runtime_OptimizeOsr) |
| |
| | RUNTIME_FUNCTION (Runtime_BaselineOsr) |
| |
| | RUNTIME_FUNCTION (Runtime_NeverOptimizeFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_GetOptimizationStatus) |
| |
| | RUNTIME_FUNCTION (Runtime_GetFunctionForCurrentFrame) |
| |
| | RUNTIME_FUNCTION (Runtime_DisableOptimizationFinalization) |
| |
| | RUNTIME_FUNCTION (Runtime_WaitForBackgroundOptimization) |
| |
| | RUNTIME_FUNCTION (Runtime_FinalizeOptimization) |
| |
| | RUNTIME_FUNCTION (Runtime_ForceFlush) |
| |
| static void | ReturnNull (const v8::FunctionCallbackInfo< v8::Value > &info) |
| |
| | RUNTIME_FUNCTION (Runtime_GetUndetectable) |
| |
| | RUNTIME_FUNCTION (Runtime_GetAbstractModuleSource) |
| |
| | RUNTIME_FUNCTION (Runtime_GetCallable) |
| |
| | RUNTIME_FUNCTION (Runtime_ClearFunctionFeedback) |
| |
| | RUNTIME_FUNCTION (Runtime_NotifyContextDisposed) |
| |
| | RUNTIME_FUNCTION (Runtime_SetAllocationTimeout) |
| |
| | RUNTIME_FUNCTION (Runtime_SimulateNewspaceFull) |
| |
| | RUNTIME_FUNCTION (Runtime_ScheduleGCInStackCheck) |
| |
| | RUNTIME_FUNCTION (Runtime_TakeHeapSnapshot) |
| |
| static void | DebugPrintImpl (Tagged< MaybeObject > maybe_object, std::ostream &os) |
| |
| | RUNTIME_FUNCTION (Runtime_DebugPrint) |
| |
| | RUNTIME_FUNCTION (Runtime_DebugPrintPtr) |
| |
| | RUNTIME_FUNCTION (Runtime_DebugPrintWord) |
| |
| | RUNTIME_FUNCTION (Runtime_DebugPrintFloat) |
| |
| | RUNTIME_FUNCTION (Runtime_PrintWithNameForAssert) |
| |
| | RUNTIME_FUNCTION (Runtime_DebugTrace) |
| |
| | RUNTIME_FUNCTION (Runtime_GlobalPrint) |
| |
| | RUNTIME_FUNCTION (Runtime_SystemBreak) |
| |
| | RUNTIME_FUNCTION (Runtime_SetForceSlowPath) |
| |
| | RUNTIME_FUNCTION (Runtime_Abort) |
| |
| | RUNTIME_FUNCTION (Runtime_AbortJS) |
| |
| | RUNTIME_FUNCTION (Runtime_AbortCSADcheck) |
| |
| | RUNTIME_FUNCTION (Runtime_DisassembleFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_TraceEnter) |
| |
| | RUNTIME_FUNCTION (Runtime_TraceExit) |
| |
| | RUNTIME_FUNCTION (Runtime_HaveSameMap) |
| |
| | RUNTIME_FUNCTION (Runtime_InLargeObjectSpace) |
| |
| | RUNTIME_FUNCTION (Runtime_HasElementsInALargeObjectSpace) |
| |
| | RUNTIME_FUNCTION (Runtime_HasCowElements) |
| |
| | RUNTIME_FUNCTION (Runtime_InYoungGeneration) |
| |
| | RUNTIME_FUNCTION (Runtime_PretenureAllocationSite) |
| |
| | RUNTIME_FUNCTION (Runtime_DisallowCodegenFromStrings) |
| |
| | RUNTIME_FUNCTION (Runtime_RegexpHasBytecode) |
| |
| | RUNTIME_FUNCTION (Runtime_RegexpHasNativeCode) |
| |
| | RUNTIME_FUNCTION (Runtime_RegexpTypeTag) |
| |
| | RUNTIME_FUNCTION (Runtime_RegexpIsUnmodified) |
| |
| | RUNTIME_FUNCTION (Runtime_IsConcatSpreadableProtector) |
| |
| | RUNTIME_FUNCTION (Runtime_TypedArrayLengthProtector) |
| |
| | RUNTIME_FUNCTION (Runtime_TypedArraySpeciesProtector) |
| |
| | RUNTIME_FUNCTION (Runtime_RegExpSpeciesProtector) |
| |
| | RUNTIME_FUNCTION (Runtime_PromiseSpeciesProtector) |
| |
| | RUNTIME_FUNCTION (Runtime_ArraySpeciesProtector) |
| |
| | RUNTIME_FUNCTION (Runtime_MapIteratorProtector) |
| |
| | RUNTIME_FUNCTION (Runtime_SetIteratorProtector) |
| |
| | RUNTIME_FUNCTION (Runtime_StringIteratorProtector) |
| |
| | RUNTIME_FUNCTION (Runtime_ArrayIteratorProtector) |
| |
| | RUNTIME_FUNCTION (Runtime_NoElementsProtector) |
| |
| | RUNTIME_FUNCTION (Runtime_StringWrapperToPrimitiveProtector) |
| |
| | RUNTIME_FUNCTION (Runtime_SerializeDeserializeNow) |
| |
| | RUNTIME_FUNCTION (Runtime_HeapObjectVerify) |
| |
| | RUNTIME_FUNCTION (Runtime_CompleteInobjectSlackTracking) |
| |
| | RUNTIME_FUNCTION (Runtime_TurbofanStaticAssert) |
| |
| | RUNTIME_FUNCTION (Runtime_IsBeingInterpreted) |
| |
| | RUNTIME_FUNCTION (Runtime_EnableCodeLoggingForTesting) |
| |
| | RUNTIME_FUNCTION (Runtime_NewRegExpWithBacktrackLimit) |
| |
| | RUNTIME_FUNCTION (Runtime_Is64Bit) |
| |
| | RUNTIME_FUNCTION (Runtime_BigIntMaxLengthBits) |
| |
| | RUNTIME_FUNCTION (Runtime_IsSameHeapObject) |
| |
| | RUNTIME_FUNCTION (Runtime_IsSharedString) |
| |
| | RUNTIME_FUNCTION (Runtime_ShareObject) |
| |
| | RUNTIME_FUNCTION (Runtime_IsInPlaceInternalizableString) |
| |
| | RUNTIME_FUNCTION (Runtime_IsInternalizedString) |
| |
| | RUNTIME_FUNCTION (Runtime_StringToCString) |
| |
| | RUNTIME_FUNCTION (Runtime_StringUtf8Value) |
| |
| | RUNTIME_FUNCTION (Runtime_SharedGC) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsSynchronizationPrimitiveNumWaitersForTesting) |
| |
| | RUNTIME_FUNCTION (Runtime_AtomicsSychronizationNumAsyncWaitersInIsolateForTesting) |
| |
| | RUNTIME_FUNCTION (Runtime_GetWeakCollectionSize) |
| |
| | RUNTIME_FUNCTION (Runtime_SetPriorityBestEffort) |
| |
| | RUNTIME_FUNCTION (Runtime_SetPriorityUserVisible) |
| |
| | RUNTIME_FUNCTION (Runtime_SetPriorityUserBlocking) |
| |
| | RUNTIME_FUNCTION (Runtime_IsEfficiencyModeEnabled) |
| |
| | RUNTIME_FUNCTION (Runtime_SetBatterySaverMode) |
| |
| | RUNTIME_FUNCTION (Runtime_IsWasmTieringPredictable) |
| |
| | RUNTIME_FUNCTION (Runtime_GetFeedback) |
| |
| | RUNTIME_FUNCTION (Runtime_CheckNoWriteBarrierNeeded) |
| |
| | RUNTIME_FUNCTION (Runtime_ArrayBufferDetachForceWasm) |
| |
| | RUNTIME_FUNCTION (Runtime_ArrayBufferDetach) |
| |
| | RUNTIME_FUNCTION (Runtime_ArrayBufferSetDetachKey) |
| |
| | RUNTIME_FUNCTION (Runtime_TypedArrayCopyElements) |
| |
| | RUNTIME_FUNCTION (Runtime_TypedArrayGetBuffer) |
| |
| | RUNTIME_FUNCTION (Runtime_GrowableSharedArrayBufferByteLength) |
| |
| | RUNTIME_FUNCTION (Runtime_TypedArraySortFast) |
| |
| | RUNTIME_FUNCTION (Runtime_TypedArraySet) |
| |
| | RUNTIME_FUNCTION (Runtime_ArrayBufferMaxByteLength) |
| |
| static ObjectPair | MakePair (Tagged< Object > x, Tagged< Object > y) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmGenericWasmToJSObject) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmGenericJSToWasmObject) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmJSToWasmObject) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmMemoryGrow) |
| |
| | RUNTIME_FUNCTION (Runtime_TrapHandlerThrowWasmError) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowWasmError) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowWasmStackOverflow) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmThrowJSTypeError) |
| |
| | RUNTIME_FUNCTION (Runtime_ThrowWasmSuspendError) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmThrowRangeError) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmThrowDataViewTypeError) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmThrowDataViewDetachedError) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmThrowTypeError) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmThrow) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmReThrow) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStackGuard) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmCompileLazy) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmAllocateFeedbackVector) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmLiftoffDeoptFinish) |
| |
| | RUNTIME_FUNCTION (Runtime_TierUpJSToWasmWrapper) |
| |
| | RUNTIME_FUNCTION (Runtime_IsWasmExternalFunction) |
| |
| | RUNTIME_FUNCTION (Runtime_TierUpWasmToJSWrapper) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmTriggerTierUp) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmI32AtomicWait) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmI64AtomicWait) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmRefFunc) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmInternalFunctionCreateExternal) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmFunctionTableGet) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmFunctionTableSet) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmTableInit) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmTableCopy) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmTableGrow) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmTableFill) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmDebugBreak) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmArrayCopy) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmAllocateDescriptorStruct) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmArrayNewSegment) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmArrayInitSegment) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmAllocateSuspender) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmCastToSpecialPrimitiveArray) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringNewWtf8) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringNewWtf8Array) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringNewWtf16) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringNewWtf16Array) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmSubstring) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringConst) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringNewSegmentWtf8) |
| |
| void | ToUtf8Lossy (Isolate *isolate, DirectHandle< String > string, std::string &out) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringMeasureUtf8) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringMeasureWtf8) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringEncodeWtf8) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringEncodeWtf8Array) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringToUtf8Array) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringEncodeWtf16) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringAsWtf8) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringViewWtf8Encode) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringViewWtf8Slice) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringFromCodePoint) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmStringHash) |
| |
| | RUNTIME_FUNCTION (Runtime_JSFinalizationRegistryRegisterWeakCellWithUnregisterToken) |
| |
| | RUNTIME_FUNCTION (Runtime_JSWeakRefAddToKeptObjects) |
| |
| std::ostream & | operator<< (std::ostream &os, Runtime::FunctionId id) |
| |
| V8_INLINE size_t | ReadBoundedSizeField (Address field_address) |
| |
| V8_INLINE void | WriteBoundedSizeField (Address field_address, size_t value) |
| |
| V8_INLINE Address | ReadCodeEntrypointViaCodePointerField (Address field_address, CodeEntrypointTag tag) |
| |
| V8_INLINE void | WriteCodeEntrypointViaCodePointerField (Address field_address, Address value, CodeEntrypointTag tag) |
| |
| template<CppHeapPointerTag lower_bound, CppHeapPointerTag upper_bound> |
| V8_INLINE Address | ReadCppHeapPointerField (Address field_address, IsolateForPointerCompression isolate) |
| |
| V8_INLINE Address | ReadCppHeapPointerField (Address field_address, IsolateForPointerCompression isolate, CppHeapPointerTagRange tag_range) |
| |
| template<CppHeapPointerTag tag> |
| V8_INLINE void | WriteLazilyInitializedCppHeapPointerField (Address field_address, IsolateForPointerCompression isolate, Address value) |
| |
| V8_INLINE void | WriteLazilyInitializedCppHeapPointerField (Address field_address, IsolateForPointerCompression isolate, Address value, CppHeapPointerTag tag) |
| |
| template<ExternalPointerTag tag> |
| V8_INLINE void | InitExternalPointerField (Address host_address, Address field_address, IsolateForSandbox isolate, Address value) |
| |
| template<ExternalPointerTagRange tag_range> |
| V8_INLINE Address | ReadExternalPointerField (Address field_address, IsolateForSandbox isolate) |
| |
| template<ExternalPointerTag tag> |
| V8_INLINE void | WriteExternalPointerField (Address field_address, IsolateForSandbox isolate, Address value) |
| |
| V8_INLINE void | SetupLazilyInitializedExternalPointerField (Address field_address) |
| |
| V8_INLINE void | InitLazyExternalPointerField (Address field_address) |
| |
| V8_INLINE void | InitSelfIndirectPointerField (Address field_address, IsolateForSandbox isolate, Tagged< HeapObject > host, IndirectPointerTag tag, TrustedPointerPublishingScope *opt_publishing_scope) |
| |
| template<IndirectPointerTag tag> |
| V8_INLINE Tagged< Object > | ReadIndirectPointerField (Address field_address, IsolateForSandbox isolate, AcquireLoadTag) |
| |
| template<IndirectPointerTag tag> |
| V8_INLINE void | WriteIndirectPointerField (Address field_address, Tagged< ExposedTrustedObject > value, ReleaseStoreTag) |
| |
| static V8_INLINE constexpr bool | IsSharedTrustedPointerType (IndirectPointerTag tag) |
| |
| static V8_INLINE constexpr bool | IsPerIsolateTrustedPointerType (IndirectPointerTag tag) |
| |
| V8_INLINE constexpr bool | IsValidIndirectPointerTag (IndirectPointerTag tag) |
| |
| V8_INLINE constexpr bool | IsTrustedSpaceMigrationInProgressForObjectsWithTag (IndirectPointerTag tag) |
| |
| V8_INLINE IndirectPointerTag | IndirectPointerTagFromInstanceType (InstanceType instance_type) |
| |
| V8_INLINE InstanceType | InstanceTypeFromIndirectPointerTag (IndirectPointerTag tag) |
| |
| V8_INLINE IsolateForSandbox | GetIsolateForSandbox (Tagged< HeapObject >) |
| |
| V8_INLINE IsolateForSandbox | GetCurrentIsolateForSandbox () |
| |
| V8_INLINE bool | InsideSandbox (uintptr_t address) |
| |
| V8_INLINE void * | EmptyBackingStoreBuffer () |
| |
| V8_INLINE Address | ReadSandboxedPointerField (Address field_address, PtrComprCageBase cage_base) |
| |
| V8_INLINE void | WriteSandboxedPointerField (Address field_address, PtrComprCageBase cage_base, Address pointer) |
| |
| v8::StartupData | InternalFieldSerializeWrapper (int index, bool field_is_nullptr, v8::SerializeInternalFieldsCallback user_callback, v8::Local< v8::Object > api_obj) |
| |
| v8::StartupData | ContextDataSerializeWrapper (int index, bool field_is_nullptr, v8::SerializeContextDataCallback user_callback, v8::Local< v8::Context > api_obj) |
| |
| DataDirective | PointerSizeDirective () |
| |
| int | DataDirectiveSize (DataDirective directive) |
| |
| std::unique_ptr< PlatformEmbeddedFileWriterBase > | NewPlatformEmbeddedFileWriter (const char *target_arch, const char *target_os) |
| |
| void | NoExternalReferencesCallback () |
| |
| uint32_t | GetUncompressedSize (const Bytef *compressed_data) |
| |
| void | SetSnapshotFromFile (StartupData *snapshot_blob) |
| |
| uint32_t | Checksum (base::Vector< const uint8_t > payload) |
| |
| SnapshotData | MaybeDecompress (Isolate *isolate, base::Vector< const uint8_t > snapshot_data) |
| |
| v8::StartupData | CreateSnapshotDataBlobInternal (v8::SnapshotCreator::FunctionCodeHandling function_code_handling, const char *embedded_source, SnapshotCreator &snapshot_creator, Snapshot::SerializerFlags serializer_flags) |
| |
| v8::StartupData | CreateSnapshotDataBlobInternal (v8::SnapshotCreator::FunctionCodeHandling function_code_handling, const char *embedded_source, Snapshot::SerializerFlags serializer_flags) |
| |
| v8::StartupData | CreateSnapshotDataBlobInternalForInspectorTest (v8::SnapshotCreator::FunctionCodeHandling function_code_handling, const char *embedded_source) |
| |
| v8::StartupData | WarmUpSnapshotDataBlobInternal (v8::StartupData cold_snapshot_blob, const char *warmup_source) |
| |
| bool | AddBuiltinIfNotProcessed (Builtin builtin, std::vector< Builtin > &order, std::unordered_set< Builtin > &processed_builtins) |
| |
| constexpr int | AsciiAlphaToLower (base::uc32 c) |
| |
| constexpr bool | IsCarriageReturn (base::uc32 c) |
| |
| constexpr bool | IsLineFeed (base::uc32 c) |
| |
| constexpr bool | IsAsciiIdentifier (base::uc32 c) |
| |
| constexpr bool | IsAlphaNumeric (base::uc32 c) |
| |
| constexpr bool | IsDecimalDigit (base::uc32 c) |
| |
| constexpr bool | IsHexDigit (base::uc32 c) |
| |
| constexpr bool | IsOctalDigit (base::uc32 c) |
| |
| constexpr bool | IsNonOctalDecimalDigit (base::uc32 c) |
| |
| constexpr bool | IsBinaryDigit (base::uc32 c) |
| |
| constexpr bool | IsAscii (base::uc32 c) |
| |
| constexpr bool | IsAsciiLower (base::uc32 c) |
| |
| constexpr bool | IsAsciiUpper (base::uc32 c) |
| |
| constexpr base::uc32 | ToAsciiUpper (base::uc32 c) |
| |
| constexpr base::uc32 | ToAsciiLower (base::uc32 c) |
| |
| constexpr bool | IsRegExpWord (base::uc32 c) |
| |
| constexpr bool | IsOneByteIDStart (base::uc32 c) |
| |
| constexpr bool | IsOneByteIDContinue (base::uc32 c) |
| |
| constexpr bool | IsOneByteWhitespace (base::uc32 c) |
| |
| constexpr uint8_t | BuildOneByteCharFlags (base::uc32 c) |
| |
| bool | IsIdentifierStart (base::uc32 c) |
| |
| bool | IsIdentifierPart (base::uc32 c) |
| |
| bool | IsWhiteSpace (base::uc32 c) |
| |
| bool | IsWhiteSpaceOrLineTerminator (base::uc32 c) |
| |
| bool | IsLineTerminatorSequence (base::uc32 c, base::uc32 next) |
| |
| bool | IsIdentifierStartSlow (base::uc32 c) |
| |
| bool | IsIdentifierPartSlow (base::uc32 c) |
| |
| bool | IsWhiteSpaceSlow (base::uc32 c) |
| |
| bool | IsWhiteSpaceOrLineTerminatorSlow (base::uc32 c) |
| |
| template<typename sinkchar > |
| void | StringBuilderConcatHelper (Tagged< String > special, sinkchar *sink, Tagged< FixedArray > fixed_array, int array_length) |
| |
| int | StringBuilderConcatLength (int special_length, Tagged< FixedArray > fixed_array, int array_length, bool *one_byte) |
| |
| template void | StringBuilderConcatHelper< uint8_t > (Tagged< String > special, uint8_t *sink, Tagged< FixedArray > fixed_array, int array_length) |
| |
| template void | StringBuilderConcatHelper< base::uc16 > (Tagged< String > special, base::uc16 *sink, Tagged< FixedArray > fixed_array, int array_length) |
| |
| template<char low, char high> |
| static word_t | AsciiRangeMask (word_t w) |
| |
| template<class CaseMapping > |
| uint32_t | FastAsciiCasePrefixLength (const char *src, uint32_t length) |
| |
| template uint32_t | FastAsciiCasePrefixLength< unibrow::ToLowercase > (const char *src, uint32_t length) |
| |
| template uint32_t | FastAsciiCasePrefixLength< unibrow::ToUppercase > (const char *src, uint32_t length) |
| |
| template<class CaseMapping > |
| uint32_t | FastAsciiConvert (char *dst, const char *src, uint32_t length) |
| |
| template uint32_t | FastAsciiConvert< unibrow::ToLowercase > (char *dst, const char *src, uint32_t length) |
| |
| template uint32_t | FastAsciiConvert< unibrow::ToUppercase > (char *dst, const char *src, uint32_t length) |
| |
| template<typename T , typename U > |
| T | AlignDown (T value, U alignment) |
| |
| uint8_t | GetHighestValueByte (base::uc16 character) |
| |
| uint8_t | GetHighestValueByte (uint8_t character) |
| |
| template<typename PatternChar , typename SubjectChar > |
| int | FindFirstCharacter (base::Vector< const PatternChar > pattern, base::Vector< const SubjectChar > subject, int index) |
| |
| template<typename PatternChar , typename SubjectChar > |
| bool | CharCompare (const PatternChar *pattern, const SubjectChar *subject, int length) |
| |
| template<typename SubjectChar , typename PatternChar > |
| int | SearchString (Isolate *isolate, base::Vector< const SubjectChar > subject, base::Vector< const PatternChar > pattern, int start_index) |
| |
| template<typename SubjectChar , typename PatternChar > |
| intptr_t | SearchStringRaw (Isolate *isolate, const SubjectChar *subject_ptr, int subject_length, const PatternChar *pattern_ptr, int pattern_length, int start_index) |
| |
| static bool | IsControlChar (char c) |
| |
| | DEFINE_UNICODE_DECODER (Utf8Decoder) |
| |
| uint32_t | NonAsciiStart (const uint8_t *chars, uint32_t length) |
| |
| std::unique_ptr< CancelableTask > | MakeCancelableTask (Isolate *isolate, std::function< void()> func) |
| |
| std::unique_ptr< CancelableTask > | MakeCancelableTask (CancelableTaskManager *manager, std::function< void()> func) |
| |
| std::unique_ptr< CancelableIdleTask > | MakeCancelableIdleTask (Isolate *isolate, std::function< void(double)> func) |
| |
| std::unique_ptr< CancelableIdleTask > | MakeCancelableIdleTask (CancelableTaskManager *manager, std::function< void(double)> func) |
| |
| v8::PageAllocator * | GetPlatformPageAllocator () |
| |
| v8::VirtualAddressSpace * | GetPlatformVirtualAddressSpace () |
| |
| v8::PageAllocator * | SetPlatformPageAllocatorForTesting (v8::PageAllocator *new_page_allocator) |
| |
| char * | StrDup (const char *str) |
| |
| char * | StrNDup (const char *str, size_t n) |
| |
| void * | AllocWithRetry (size_t size, MallocFn malloc_fn) |
| |
| base::AllocationResult< void * > | AllocAtLeastWithRetry (size_t size) |
| |
| void * | AlignedAllocWithRetry (size_t size, size_t alignment) |
| |
| void | AlignedFree (void *ptr) |
| |
| size_t | AllocatePageSize () |
| |
| size_t | CommitPageSize () |
| |
| void * | GetRandomMmapAddr () |
| |
| void * | AllocatePages (v8::PageAllocator *page_allocator, void *hint, size_t size, size_t alignment, PageAllocator::Permission access) |
| |
| void | FreePages (v8::PageAllocator *page_allocator, void *address, const size_t size) |
| |
| void | ReleasePages (v8::PageAllocator *page_allocator, void *address, size_t size, size_t new_size) |
| |
| bool | SetPermissions (v8::PageAllocator *page_allocator, void *address, size_t size, PageAllocator::Permission access) |
| |
| void | OnCriticalMemoryPressure () |
| |
| template<typename T > |
| T * | NewArray (size_t size) |
| |
template<typename T >
requires base::is_trivially_copyable<T>::value |
| T * | NewArray (size_t size, T default_val) |
| |
| template<typename T > |
| void | DeleteArray (T *array) |
| |
| char * | StrNDup (const char *str, int n) |
| |
| v8::PageAllocator * | GetArrayBufferPageAllocator () |
| |
| bool | SetPermissions (v8::PageAllocator *page_allocator, Address address, size_t size, PageAllocator::Permission access) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (Float32) |
| |
| | ASSERT_TRIVIALLY_COPYABLE (Float64) |
| |
| void | FormatBytesToHex (char *formatted, size_t size_of_formatted, const uint8_t *val, size_t size_of_val) |
| |
| void | init_memcopy_functions () |
| |
| void | MemCopy (void *dest, const void *src, size_t size) |
| |
| V8_EXPORT_PRIVATE void | MemMove (void *dest, const void *src, size_t size) |
| |
| template<size_t kBlockCopyLimit, typename T > |
| void | CopyImpl (T *dst_ptr, const T *src_ptr, size_t count) |
| |
| void | CopyWords (Address dst, const Address src, size_t num_words) |
| |
| template<typename T > |
| void | CopyBytes (T *dst, const T *src, size_t num_bytes) |
| |
| void | MemsetUint32 (uint32_t *dest, uint32_t value, size_t counter) |
| |
| void | MemsetPointer (Address *dest, Address value, size_t counter) |
| |
| template<typename T , typename U > |
| void | MemsetPointer (T **dest, U *value, size_t counter) |
| |
| template<typename T > |
| void | MemsetPointer (T **dest, std::nullptr_t, size_t counter) |
| |
| template<typename SrcType , typename DstType > |
| void | CopyChars (DstType *dst, const SrcType *src, size_t count) V8_NONNULL(1 |
| |
| template<typename SrcType , typename DstType > |
| void void | CopyChars (DstType *dst, const SrcType *src, size_t count) |
| |
| | DEFINE_LAZY_LEAKY_OBJECT_GETTER (base::RecursiveMutex, StdoutStream::GetStdoutMutex) namespace |
| |
| std::ostream & | operator<< (std::ostream &os, const AsReversiblyEscapedUC16 &c) |
| |
| std::ostream & | operator<< (std::ostream &os, const AsEscapedUC16ForJSON &c) |
| |
| std::ostream & | operator<< (std::ostream &os, const AsUC16 &c) |
| |
| std::ostream & | operator<< (std::ostream &os, const AsUC32 &c) |
| |
| std::ostream & | operator<< (std::ostream &os, const AsHex &hex) |
| |
| std::ostream & | operator<< (std::ostream &os, const AsHexBytes &hex) |
| |
| template<typename T > |
| auto | PrintCollection (const T &collection) -> PrintIteratorRange< typename std::common_type< decltype(std::begin(collection)), decltype(std::end(collection))>::type > |
| |
| template<typename T > |
| std::ostream & | operator<< (std::ostream &os, const PrintIteratorRange< T > &range) |
| |
| static void | SHA256_Transform (LITE_SHA256_CTX *ctx) |
| |
| void | SHA256_init (LITE_SHA256_CTX *ctx) |
| |
| void | SHA256_update (LITE_SHA256_CTX *ctx, const void *data, size_t len) |
| |
| const uint8_t * | SHA256_final (LITE_SHA256_CTX *ctx) |
| |
| const uint8_t * | SHA256_hash (const void *data, size_t len, uint8_t *digest) |
| |
| template<typename Char > |
| bool | TryAddArrayIndexChar (uint32_t *index, Char c) |
| |
| template<typename Char > |
| bool | TryAddIntegerIndexChar (uint64_t *index, Char c) |
| |
| template<typename Stream , typename index_t , enum ToIndexMode mode> |
| bool | StringToIndex (Stream *stream, index_t *index) |
| |
| std::ostream & | operator<< (std::ostream &os, FeedbackSlot slot) |
| |
| size_t | hash_value (BytecodeOffset id) |
| |
| std::ostream & | operator<< (std::ostream &os, BytecodeOffset id) |
| |
| void | PrintF (const char *format,...) |
| |
| void | PrintF (FILE *out, const char *format,...) |
| |
| void | PrintPID (const char *format,...) |
| |
| void | PrintIsolate (void *isolate, const char *format,...) |
| |
| char * | ReadLine (const char *prompt) |
| |
| std::string | ReadFile (const char *filename, bool *exists, bool verbose) |
| |
| std::string | ReadFile (FILE *file, bool *exists, bool verbose) |
| |
| int | WriteChars (const char *filename, const char *str, int size, bool verbose) |
| |
| int | WriteBytes (const char *filename, const uint8_t *bytes, int size, bool verbose) |
| |
| bool | DoubleToBoolean (double d) |
| |
| uintptr_t | GetCurrentStackPosition () |
| |
| bool | PassesFilter (base::Vector< const char > name, base::Vector< const char > filter) |
| |
| template<typename T > |
| static T | ArithmeticShiftRight (T x, int shift) |
| |
| template<typename T > |
| T | JSMax (T x, T y) |
| |
| template<typename T > |
| T | JSMin (T x, T y) |
| |
template<typename T >
requires std::is_signed<T>::value |
| std::make_unsigned< T >::type | Abs (T a) |
| |
| double | Modulo (double x, double y) |
| |
| template<typename T > |
| T | SaturateAdd (T a, T b) |
| |
| template<typename T > |
| T | SaturateSub (T a, T b) |
| |
| template<typename T > |
| T | SaturateRoundingQMul (T a, T b) |
| |
| template<typename Wide , typename Narrow > |
| Wide | MultiplyLong (Narrow a, Narrow b) |
| |
| template<typename Wide , typename Narrow > |
| Wide | AddLong (Narrow a, Narrow b) |
| |
| template<typename T > |
| T | RoundingAverageUnsigned (T a, T b) |
| |
| uint32_t | ComputeUnseededHash (uint32_t key) |
| |
| uint32_t | ComputeLongHash (uint64_t key) |
| |
| uint32_t | ComputeSeededHash (uint32_t key, uint64_t seed) |
| |
| uint32_t | ComputePointerHash (void *ptr) |
| |
| uint32_t | ComputeAddressHash (Address address) |
| |
| template<typename IntType , typename Char > |
| V8_INLINE bool | OverlappingCompare (const Char *lhs, const Char *rhs, size_t count) |
| |
| template<typename Char > |
| V8_INLINE bool | SimdMemEqual (const Char *lhs, const Char *rhs, size_t count) |
| |
| template<typename lchar , typename rchar > |
| bool | CompareCharsEqualUnsigned (const lchar *lhs, const rchar *rhs, size_t chars) |
| |
| template<typename lchar , typename rchar > |
| bool | CompareCharsEqual (const lchar *lhs, const rchar *rhs, size_t chars) |
| |
| template<typename lchar , typename rchar > |
| int | CompareCharsUnsigned (const lchar *lhs, const rchar *rhs, size_t chars) |
| |
| template<typename lchar , typename rchar > |
| int | CompareChars (const lchar *lhs, const rchar *rhs, size_t chars) |
| |
| constexpr uint64_t | TenToThe (uint32_t exponent) |
| |
| uint32_t | unsigned_bitextract_32 (int msb, int lsb, uint32_t x) |
| |
| uint64_t | unsigned_bitextract_64 (int msb, int lsb, uint64_t x) |
| |
| int32_t | signed_bitextract_32 (int msb, int lsb, uint32_t x) |
| |
| constexpr bool | is_intn (int64_t x, unsigned n) |
| |
| constexpr bool | is_uintn (int64_t x, unsigned n) |
| |
| template<class T > |
| constexpr T | truncate_to_intn (T x, unsigned n) |
| |
| V8_EXPORT_PRIVATE void | PRINTF_FORMAT (1, 2) PrintF(const char *format |
| |
| V8_EXPORT_PRIVATE void V8_EXPORT_PRIVATE void | PRINTF_FORMAT (2, 3) PrintF(FILE *out |
| |
| template<typename Char > |
| bool | TryAddIndexChar (uint32_t *index, Char c) |
| |
| static uint16_t | ByteReverse16 (uint16_t value) |
| |
| static uint32_t | ByteReverse32 (uint32_t value) |
| |
| static uint64_t | ByteReverse64 (uint64_t value) |
| |
| template<typename V > |
| static V | ByteReverse (V value) |
| |
| V8_INLINE void | ZapCode (Address addr, size_t size_in_bytes) |
| |
| bool | RoundUpToPageSize (size_t byte_length, size_t page_size, size_t max_allowed_byte_length, size_t *pages) |
| |
| | RUNTIME_FUNCTION (Runtime_WasmRunInterpreter) |
| |
| | ACCESSORS (WasmGlobalObject, untagged_buffer, Tagged< JSArrayBuffer >, kUntaggedBufferOffset) ACCESSORS(WasmGlobalObject |
| |
| kTaggedBufferOffset | TRUSTED_POINTER_ACCESSORS (WasmGlobalObject, trusted_data, WasmTrustedInstanceData, kTrustedDataOffset, kWasmTrustedInstanceDataIndirectPointerTag) wasm |
| |
| | PRIMITIVE_ACCESSORS (WasmTrustedInstanceData, memory0_start, uint8_t *, kMemory0StartOffset) PRIMITIVE_ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset | PROTECTED_POINTER_ACCESSORS (WasmTrustedInstanceData, managed_native_module, TrustedManaged< wasm::NativeModule >, kProtectedManagedNativeModuleOffset) PRIMITIVE_ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset | PRIMITIVE_ACCESSORS (WasmTrustedInstanceData, new_allocation_top_address, Address *, kNewAllocationTopAddressOffset) PRIMITIVE_ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset | PRIMITIVE_ACCESSORS (WasmTrustedInstanceData, old_allocation_top_address, Address *, kOldAllocationTopAddressOffset) PRIMITIVE_ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset | ACCESSORS (WasmTrustedInstanceData, imported_mutable_globals, Tagged< FixedAddressArray >, kImportedMutableGlobalsOffset) PRIMITIVE_ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset | PRIMITIVE_ACCESSORS (WasmTrustedInstanceData, hook_on_function_call_address, Address, kHookOnFunctionCallAddressOffset) PRIMITIVE_ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset | PROTECTED_POINTER_ACCESSORS (WasmTrustedInstanceData, memory_bases_and_sizes, TrustedFixedAddressArray, kProtectedMemoryBasesAndSizesOffset) ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset | ACCESSORS (WasmTrustedInstanceData, data_segment_sizes, Tagged< FixedUInt32Array >, kDataSegmentSizesOffset) ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset | PRIMITIVE_ACCESSORS (WasmTrustedInstanceData, break_on_entry, uint8_t, kBreakOnEntryOffset) OPTIONAL_ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset | ACCESSORS (WasmTrustedInstanceData, native_context, Tagged< Context >, kNativeContextOffset) ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset | OPTIONAL_ACCESSORS (WasmTrustedInstanceData, untagged_globals_buffer, Tagged< JSArrayBuffer >, kUntaggedGlobalsBufferOffset) OPTIONAL_ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset | OPTIONAL_ACCESSORS (WasmTrustedInstanceData, imported_mutable_globals_buffers, Tagged< FixedArray >, kImportedMutableGlobalsBuffersOffset) OPTIONAL_ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset kTablesOffset | PROTECTED_POINTER_ACCESSORS (WasmTrustedInstanceData, shared_part, WasmTrustedInstanceData, kProtectedSharedPartOffset) PROTECTED_POINTER_ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset kTablesOffset kProtectedDispatchTable0Offset | PROTECTED_POINTER_ACCESSORS (WasmTrustedInstanceData, dispatch_tables, ProtectedFixedArray, kProtectedDispatchTablesOffset) PROTECTED_POINTER_ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset kTablesOffset kProtectedDispatchTable0Offset kProtectedDispatchTableForImportsOffset | OPTIONAL_ACCESSORS (WasmTrustedInstanceData, tags_table, Tagged< FixedArray >, kTagsTableOffset) ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset kTablesOffset kProtectedDispatchTable0Offset kProtectedDispatchTableForImportsOffset kFuncRefsOffset | ACCESSORS (WasmTrustedInstanceData, managed_object_maps, Tagged< FixedArray >, kManagedObjectMapsOffset) ACCESSORS(WasmTrustedInstanceData |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset kTablesOffset kProtectedDispatchTable0Offset kProtectedDispatchTableForImportsOffset kFuncRefsOffset kFeedbackVectorsOffset | ACCESSORS (WasmTrustedInstanceData, well_known_imports, Tagged< FixedArray >, kWellKnownImportsOffset) PRIMITIVE_ACCESSORS(WasmTrustedInstanceData |
| |
| | TRUSTED_POINTER_ACCESSORS (WasmInstanceObject, trusted_data, WasmTrustedInstanceData, kTrustedDataOffset, kWasmTrustedInstanceDataIndirectPointerTag) const wasm |
| |
| | PROTECTED_POINTER_ACCESSORS (WasmDispatchTable, protected_offheap_data, TrustedManaged< WasmDispatchTableData >, kProtectedOffheapDataOffset) WasmDispatchTableData *WasmDispatchTable |
| |
| | PROTECTED_POINTER_ACCESSORS (WasmImportData, instance_data, WasmTrustedInstanceData, kProtectedInstanceDataOffset) PROTECTED_POINTER_ACCESSORS(WasmImportData |
| |
| | PROTECTED_POINTER_ACCESSORS (WasmInternalFunction, implicit_arg, TrustedObject, kProtectedImplicitArgOffset) TRUSTED_POINTER_ACCESSORS(WasmFuncRef |
| |
| kWasmInternalFunctionIndirectPointerTag | PROTECTED_POINTER_ACCESSORS (WasmFunctionData, internal, WasmInternalFunction, kProtectedInternalOffset) PROTECTED_POINTER_ACCESSORS(WasmExportedFunctionData |
| |
| kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset | CODE_POINTER_ACCESSORS (WasmExportedFunctionData, c_wrapper_code, kCWrapperCodeOffset) PRIMITIVE_ACCESSORS(WasmExportedFunctionData |
| |
| | PROTECTED_POINTER_ACCESSORS (WasmJSFunctionData, protected_offheap_data, TrustedManaged< WasmJSFunctionData::OffheapData >, kProtectedOffheapDataOffset) WasmJSFunctionData |
| |
| | TRUSTED_POINTER_ACCESSORS (WasmTableObject, trusted_data, WasmTrustedInstanceData, kTrustedDataOffset, kWasmTrustedInstanceDataIndirectPointerTag) TRUSTED_POINTER_ACCESSORS(WasmTableObject |
| |
| | EXTERNAL_POINTER_ACCESSORS (WasmSuspenderObject, stack, wasm::StackMemory *, kStackOffset, kWasmStackMemoryTag) TRUSTED_POINTER_ACCESSORS(WasmTagObject |
| |
| void | EncodeI32ExceptionValue (DirectHandle< FixedArray > encoded_values, uint32_t *encoded_index, uint32_t value) |
| |
| void | EncodeI64ExceptionValue (DirectHandle< FixedArray > encoded_values, uint32_t *encoded_index, uint64_t value) |
| |
| void | DecodeI32ExceptionValue (DirectHandle< FixedArray > encoded_values, uint32_t *encoded_index, uint32_t *value) |
| |
| void | DecodeI64ExceptionValue (DirectHandle< FixedArray > encoded_values, uint32_t *encoded_index, uint64_t *value) |
| |
| bool | UseGenericWasmToJSWrapper (wasm::ImportCallKind kind, const wasm::CanonicalSig *sig, wasm::Suspend suspend) |
| |
| DirectHandle< Map > | CreateStructMap (Isolate *isolate, wasm::CanonicalTypeIndex struct_index, DirectHandle< Map > opt_rtt_parent, DirectHandle< NativeContext > opt_native_context) |
| |
| DirectHandle< Map > | CreateArrayMap (Isolate *isolate, wasm::CanonicalTypeIndex array_index, DirectHandle< Map > opt_rtt_parent) |
| |
| DirectHandle< Map > | CreateFuncRefMap (Isolate *isolate, wasm::CanonicalTypeIndex type, DirectHandle< Map > opt_rtt_parent) |
| |
| template<typename T > |
| std::ostream & | operator<< (std::ostream &os, ZoneCompactSet< T > set) |
| |
| template<class T > |
| bool | operator== (const ZoneVector< T > &lhs, const ZoneVector< T > &rhs) |
| |
| template<class T > |
| bool | operator!= (const ZoneVector< T > &lhs, const ZoneVector< T > &rhs) |
| |
| template<class T > |
| bool | operator< (const ZoneVector< T > &lhs, const ZoneVector< T > &rhs) |
| |
| template<typename T > |
| base::Vector< T > | CloneVector (Zone *zone, base::Vector< const T > other) |
| |
| constexpr bool | PointerCompressionIsEnabled () |
| |
| constexpr bool | SmiValuesAre31Bits () |
| |
| constexpr bool | SmiValuesAre32Bits () |
| |
| constexpr bool | Is64 () |
| |
| static V8_INLINE constexpr Address | IntToSmi (int value) |
| |
| constexpr bool | SandboxIsEnabled () |
| |
| constexpr ExternalPointerTagRange | kAnyExternalPointerTagRange (kFirstExternalPointerTag, kLastExternalPointerTag) |
| |
| constexpr ExternalPointerTagRange | kAnySharedExternalPointerTagRange (kFirstSharedExternalPointerTag, kLastSharedExternalPointerTag) |
| |
| constexpr ExternalPointerTagRange | kAnyForeignExternalPointerTagRange (kFirstForeignExternalPointerTag, kLastForeignExternalPointerTag) |
| |
| constexpr ExternalPointerTagRange | kAnyInterceptorInfoExternalPointerTagRange (kFirstInterceptorInfoExternalPointerTag, kLastInterceptorInfoExternalPointerTag) |
| |
| constexpr ExternalPointerTagRange | kAnyManagedExternalPointerTagRange (kFirstManagedExternalPointerTag, kLastManagedExternalPointerTag) |
| |
| constexpr ExternalPointerTagRange | kAnyMaybeReadOnlyExternalPointerTagRange (kFirstMaybeReadOnlyExternalPointerTag, kLastMaybeReadOnlyExternalPointerTag) |
| |
| constexpr ExternalPointerTagRange | kAnyManagedResourceExternalPointerTag (kFirstManagedResourceTag, kLastManagedResourceTag) |
| |
| static V8_INLINE constexpr bool | IsSharedExternalPointerType (ExternalPointerTagRange tag_range) |
| |
| static V8_INLINE constexpr bool | IsMaybeReadOnlyExternalPointerType (ExternalPointerTagRange tag_range) |
| |
| static V8_INLINE constexpr bool | IsManagedExternalPointerType (ExternalPointerTagRange tag_range) |
| |
| static V8_INLINE constexpr bool | ExternalPointerCanBeEmpty (ExternalPointerTagRange tag_range) |
| |
| V8_EXPORT internal::Isolate * | IsolateFromNeverReadOnlySpaceObject (Address obj) |
| |
| V8_EXPORT bool | ShouldThrowOnError (internal::Isolate *isolate) |
| |
| template<class T > |
| V8_INLINE void | PerformCastCheck (T *data) |
| |
| void | PrintFunctionCallbackInfo (void *function_callback_info) |
| |
| void | PrintPropertyCallbackInfo (void *property_callback_info) |
| |
| template<typename T > |
| static V8_INLINE T * | ReadCppHeapPointerField (v8::Isolate *isolate, Address heap_object_ptr, int offset, CppHeapPointerTagRange tag_range) |
| |
| V8_EXPORT Address * | GlobalizeTracedReference (Isolate *isolate, Address value, Address *slot, TracedReferenceStoreMode store_mode, TracedReferenceHandling reference_handling) |
| |
| V8_EXPORT void | MoveTracedReference (Address **from, Address **to) |
| |
| V8_EXPORT void | CopyTracedReference (const Address *const *from, Address **to) |
| |
| V8_EXPORT void | DisposeTracedReference (Address *global_handle) |
| |
|
| const int | kHandleBlockSize = v8::internal::KB - 2 |
| |
| | HoleySmi |
| |
| | HOLEY_SMI_ELEMENTS |
| |
| | DontOverride |
| |
| DONT_OVERRIDE | DisableAllocationSites |
| |
| DONT_OVERRIDE DISABLE_ALLOCATION_SITES | Holey |
| |
| DONT_OVERRIDE DISABLE_ALLOCATION_SITES | HOLEY_ELEMENTS |
| |
| DONT_OVERRIDE DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES | HoleyDouble |
| |
| DONT_OVERRIDE DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES | HOLEY_DOUBLE_ELEMENTS |
| |
| constexpr size_t | kJSBuiltinBaseParameterCount = 3 |
| |
| constexpr int | kMaxJSStructFields = 999 |
| |
| | EpochMilliseconds |
| |
| | nanoseconds |
| |
| | GetPossibleInstantsFor |
| |
| const int | kConstantPoolMarkerMask = 0xfff000f0 |
| |
| const int | kConstantPoolMarker = 0xe7f000f0 |
| |
| const int | kConstantPoolLengthMaxMask = 0xffff |
| |
| constexpr int | kNumRegisters = 16 |
| |
| constexpr int | kRegSizeInBitsLog2 = 5 |
| |
| constexpr int | kNumVFPSingleRegisters = 32 |
| |
| constexpr int | kNumVFPDoubleRegisters = 32 |
| |
| constexpr int | kNumVFPRegisters |
| |
| constexpr int | kPCRegister = 15 |
| |
| constexpr int | kNoRegister = -1 |
| |
| constexpr int | kLdrMaxReachBits = 12 |
| |
| constexpr int | kVldrMaxReachBits = 10 |
| |
| constexpr int | kRootRegisterBias = 4095 |
| |
| constexpr Opcode | AND = 0 << 21 |
| |
| constexpr Opcode | EOR = 1 << 21 |
| |
| constexpr Opcode | SUB = 2 << 21 |
| |
| constexpr Opcode | RSB = 3 << 21 |
| |
| constexpr Opcode | ADD = 4 << 21 |
| |
| constexpr Opcode | ADC = 5 << 21 |
| |
| constexpr Opcode | SBC = 6 << 21 |
| |
| constexpr Opcode | RSC = 7 << 21 |
| |
| constexpr Opcode | TST = 8 << 21 |
| |
| constexpr Opcode | TEQ = 9 << 21 |
| |
| constexpr Opcode | CMP = 10 << 21 |
| |
| constexpr Opcode | CMN = 11 << 21 |
| |
| constexpr Opcode | ORR = 12 << 21 |
| |
| constexpr Opcode | MOV = 13 << 21 |
| |
| constexpr Opcode | BIC = 14 << 21 |
| |
| constexpr Opcode | MVN = 15 << 21 |
| |
| constexpr MiscInstructionsBits74 | BX = 1 << 4 |
| |
| constexpr MiscInstructionsBits74 | BXJ = 2 << 4 |
| |
| constexpr MiscInstructionsBits74 | BLX = 3 << 4 |
| |
| constexpr MiscInstructionsBits74 | BKPT = 7 << 4 |
| |
| constexpr MiscInstructionsBits74 | CLZ = 1 << 4 |
| |
| constexpr int | H = 1 << 5 |
| |
| constexpr int | S6 = 1 << 6 |
| |
| constexpr int | L = 1 << 20 |
| |
| constexpr int | S = 1 << 20 |
| |
| constexpr int | W = 1 << 21 |
| |
| constexpr int | A = 1 << 21 |
| |
| constexpr int | B = 1 << 22 |
| |
| constexpr int | N = 1 << 22 |
| |
| constexpr int | U = 1 << 23 |
| |
| constexpr int | P |
| |
| constexpr int | I = 1 << 25 |
| |
| constexpr int | B0 = 1 << 0 |
| |
| constexpr int | B4 = 1 << 4 |
| |
| constexpr int | B5 = 1 << 5 |
| |
| constexpr int | B6 = 1 << 6 |
| |
| constexpr int | B7 = 1 << 7 |
| |
| constexpr int | B8 = 1 << 8 |
| |
| constexpr int | B9 = 1 << 9 |
| |
| constexpr int | B10 = 1 << 10 |
| |
| constexpr int | B12 = 1 << 12 |
| |
| constexpr int | B16 = 1 << 16 |
| |
| constexpr int | B17 = 1 << 17 |
| |
| constexpr int | B18 = 1 << 18 |
| |
| constexpr int | B19 = 1 << 19 |
| |
| constexpr int | B20 = 1 << 20 |
| |
| constexpr int | B21 = 1 << 21 |
| |
| constexpr int | B22 = 1 << 22 |
| |
| constexpr int | B23 = 1 << 23 |
| |
| constexpr int | B24 = 1 << 24 |
| |
| constexpr int | B25 = 1 << 25 |
| |
| constexpr int | B26 = 1 << 26 |
| |
| constexpr int | B27 = 1 << 27 |
| |
| constexpr int | B28 = 1 << 28 |
| |
| constexpr int | kCondMask = 15 << 28 |
| |
| constexpr int | kALUMask = 0x6f << 21 |
| |
| constexpr int | kRdMask = 15 << 12 |
| |
| constexpr int | kCoprocessorMask = 15 << 8 |
| |
| constexpr int | kOpCodeMask = 15 << 21 |
| |
| constexpr int | kImm24Mask = (1 << 24) - 1 |
| |
| constexpr int | kImm16Mask = (1 << 16) - 1 |
| |
| constexpr int | kImm8Mask = (1 << 8) - 1 |
| |
| constexpr int | kOff12Mask = (1 << 12) - 1 |
| |
| constexpr int | kOff8Mask = (1 << 8) - 1 |
| |
| constexpr BarrierOption | OSHLD = 0x1 |
| |
| constexpr BarrierOption | OSHST = 0x2 |
| |
| constexpr BarrierOption | OSH = 0x3 |
| |
| constexpr BarrierOption | NSHLD = 0x5 |
| |
| constexpr BarrierOption | NSHST = 0x6 |
| |
| constexpr BarrierOption | NSH = 0x7 |
| |
| constexpr BarrierOption | ISHLD = 0x9 |
| |
| constexpr BarrierOption | ISHST = 0xa |
| |
| constexpr BarrierOption | ISH = 0xb |
| |
| constexpr BarrierOption | LD = 0xd |
| |
| constexpr BarrierOption | ST = 0xe |
| |
| constexpr BarrierOption | SY = 0xf |
| |
| constexpr SBit | SetCC = 1 << 20 |
| |
| constexpr SBit | LeaveCC = 0 << 20 |
| |
| constexpr SRegister | CPSR = 0 << 22 |
| |
| constexpr SRegister | SPSR = 1 << 22 |
| |
| constexpr ShiftOp | LSL = 0 << 5 |
| |
| constexpr ShiftOp | LSR = 1 << 5 |
| |
| constexpr ShiftOp | ASR = 2 << 5 |
| |
| constexpr ShiftOp | ROR = 3 << 5 |
| |
| constexpr ShiftOp | RRX = -1 |
| |
| constexpr ShiftOp | kNumberOfShifts = 4 |
| |
| constexpr SRegisterField | CPSR_c = CPSR | 1 << 16 |
| |
| constexpr SRegisterField | CPSR_x = CPSR | 1 << 17 |
| |
| constexpr SRegisterField | CPSR_s = CPSR | 1 << 18 |
| |
| constexpr SRegisterField | CPSR_f = CPSR | 1 << 19 |
| |
| constexpr SRegisterField | SPSR_c = SPSR | 1 << 16 |
| |
| constexpr SRegisterField | SPSR_x = SPSR | 1 << 17 |
| |
| constexpr SRegisterField | SPSR_s = SPSR | 1 << 18 |
| |
| constexpr SRegisterField | SPSR_f = SPSR | 1 << 19 |
| |
| constexpr AddrMode | Offset |
| |
| constexpr AddrMode | PreIndex |
| |
| constexpr AddrMode | PostIndex |
| |
| constexpr AddrMode | NegOffset |
| |
| constexpr AddrMode | NegPreIndex |
| |
| constexpr AddrMode | NegPostIndex |
| |
| constexpr BlockAddrMode | da = (0 | 0 | 0) << 21 |
| |
| constexpr BlockAddrMode | ia = (0 | 4 | 0) << 21 |
| |
| constexpr BlockAddrMode | db = (8 | 0 | 0) << 21 |
| |
| constexpr BlockAddrMode | ib = (8 | 4 | 0) << 21 |
| |
| constexpr BlockAddrMode | da_w |
| |
| constexpr BlockAddrMode | ia_w |
| |
| constexpr BlockAddrMode | db_w |
| |
| constexpr BlockAddrMode | ib_w |
| |
| constexpr BlockAddrMode | da_x = (0 | 0 | 0) << 21 |
| |
| constexpr BlockAddrMode | ia_x = (0 | 4 | 0) << 21 |
| |
| constexpr BlockAddrMode | db_x = (8 | 0 | 0) << 21 |
| |
| constexpr BlockAddrMode | ib_x = (8 | 4 | 0) << 21 |
| |
| constexpr BlockAddrMode | kBlockAddrModeMask = (8 | 4 | 1) << 21 |
| |
| constexpr LFlag | Long = 1 << 22 |
| |
| constexpr LFlag | Short = 0 << 22 |
| |
| constexpr NeonSize | Neon8 = 0x0 |
| |
| constexpr NeonSize | Neon16 = 0x1 |
| |
| constexpr NeonSize | Neon32 = 0x2 |
| |
| constexpr NeonSize | Neon64 = 0x3 |
| |
| constexpr NeonDataType | NeonS8 = 0 |
| |
| constexpr NeonDataType | NeonS16 = 1 |
| |
| constexpr NeonDataType | NeonS32 = 2 |
| |
| constexpr NeonDataType | NeonS64 = 3 |
| |
| constexpr NeonDataType | NeonU8 = 4 |
| |
| constexpr NeonDataType | NeonU16 = 5 |
| |
| constexpr NeonDataType | NeonU32 = 6 |
| |
| constexpr NeonDataType | NeonU64 = 7 |
| |
| constexpr NeonListType | nlt_1 = 0x7 |
| |
| constexpr NeonListType | nlt_2 = 0xA |
| |
| constexpr NeonListType | nlt_3 = 0x6 |
| |
| constexpr NeonListType | nlt_4 = 0x2 |
| |
| constexpr SoftwareInterruptCodes | kCallRtRedirected = 0x10 |
| |
| constexpr SoftwareInterruptCodes | kBreakpoint = 0x20 |
| |
| constexpr SoftwareInterruptCodes | kStopCode = 1 << 23 |
| |
| constexpr uint32_t | kStopCodeMask = kStopCode - 1 |
| |
| constexpr uint32_t | kMaxStopCode = kStopCode - 1 |
| |
| constexpr int32_t | kDefaultStopCode = -1 |
| |
| constexpr VFPRegPrecision | kSinglePrecision = 0 |
| |
| constexpr VFPRegPrecision | kDoublePrecision = 1 |
| |
| constexpr VFPRegPrecision | kSimd128Precision = 2 |
| |
| constexpr VFPConversionMode | kFPSCRRounding = 0 |
| |
| constexpr VFPConversionMode | kDefaultRoundToZero = 1 |
| |
| constexpr uint32_t | kVFPExceptionMask = 0xf |
| |
| constexpr uint32_t | kVFPInvalidOpExceptionBit = 1 << 0 |
| |
| constexpr uint32_t | kVFPOverflowExceptionBit = 1 << 2 |
| |
| constexpr uint32_t | kVFPUnderflowExceptionBit = 1 << 3 |
| |
| constexpr uint32_t | kVFPInexactExceptionBit = 1 << 4 |
| |
| constexpr uint32_t | kVFPFlushToZeroMask = 1 << 24 |
| |
| constexpr uint32_t | kVFPDefaultNaNModeControlBit = 1 << 25 |
| |
| constexpr uint32_t | kVFPNConditionFlagBit = 1 << 31 |
| |
| constexpr uint32_t | kVFPZConditionFlagBit = 1 << 30 |
| |
| constexpr uint32_t | kVFPCConditionFlagBit = 1 << 29 |
| |
| constexpr uint32_t | kVFPVConditionFlagBit = 1 << 28 |
| |
| constexpr VFPRoundingMode | RN = 0 << 22 |
| |
| constexpr VFPRoundingMode | RP = 1 << 22 |
| |
| constexpr VFPRoundingMode | RM = 2 << 22 |
| |
| constexpr VFPRoundingMode | RZ = 3 << 22 |
| |
| constexpr VFPRoundingMode | kRoundToNearest = RN |
| |
| constexpr VFPRoundingMode | kRoundToPlusInf = RP |
| |
| constexpr VFPRoundingMode | kRoundToMinusInf = RM |
| |
| constexpr VFPRoundingMode | kRoundToZero = RZ |
| |
| const uint32_t | kVFPRoundingModeMask = 3 << 22 |
| |
| constexpr uint8_t | kInstrSize = 4 |
| |
| constexpr uint8_t | kInstrSizeLog2 = 2 |
| |
| constexpr size_t | kMaxPCRelativeCodeRangeInMB = 32 |
| |
| constexpr Register | no_reg = Register::no_reg() |
| |
| constexpr Register | kCArgRegs [] = {r0, r1, r2, r3} |
| |
| static const int | kRegisterPassedArguments = arraysize(kCArgRegs) |
| |
| static const int | kDoubleRegisterPassedArguments = 8 |
| |
| constexpr AliasingKind | kFPAliasing = AliasingKind::kCombine |
| |
| constexpr bool | kSimdMaskRegisters = false |
| |
| constexpr DwVfpRegister | no_dreg = DwVfpRegister::no_reg() |
| |
| constexpr LowDwVfpRegister | kFirstCalleeSavedDoubleReg = d8 |
| |
| constexpr LowDwVfpRegister | kLastCalleeSavedDoubleReg = d15 |
| |
| constexpr LowDwVfpRegister | kDoubleRegZero = d13 |
| |
| constexpr CRegister | no_creg = CRegister::no_reg() |
| |
| constexpr Register | kStackPointerRegister = sp |
| |
| constexpr Register | kReturnRegister0 = r0 |
| |
| constexpr Register | kReturnRegister1 = r1 |
| |
| constexpr Register | kReturnRegister2 = r2 |
| |
| constexpr Register | kJSFunctionRegister = r1 |
| |
| constexpr Register | kContextRegister = r7 |
| |
| constexpr Register | kAllocateSizeRegister = r1 |
| |
| constexpr Register | kInterpreterAccumulatorRegister = r0 |
| |
| constexpr Register | kInterpreterBytecodeOffsetRegister = r5 |
| |
| constexpr Register | kInterpreterBytecodeArrayRegister = r6 |
| |
| constexpr Register | kInterpreterDispatchTableRegister = r8 |
| |
| constexpr Register | kJavaScriptCallArgCountRegister = r0 |
| |
| constexpr Register | kJavaScriptCallCodeStartRegister = r2 |
| |
| constexpr Register | kJavaScriptCallTargetRegister = kJSFunctionRegister |
| |
| constexpr Register | kJavaScriptCallNewTargetRegister = r3 |
| |
| constexpr Register | kJavaScriptCallExtraArg1Register = r2 |
| |
| constexpr Register | kJavaScriptCallDispatchHandleRegister = no_reg |
| |
| constexpr Register | kRuntimeCallFunctionRegister = r1 |
| |
| constexpr Register | kRuntimeCallArgCountRegister = r0 |
| |
| constexpr Register | kRuntimeCallArgvRegister = r2 |
| |
| constexpr Register | kWasmImplicitArgRegister = r3 |
| |
| constexpr Register | kWasmCompileLazyFuncIndexRegister = r4 |
| |
| constexpr Register | cp = r7 |
| |
| constexpr Register | r11 = fp |
| |
| constexpr Register | kRootRegister = r10 |
| |
| constexpr DoubleRegister | kFPReturnRegister0 = d0 |
| |
| constexpr Register | kMaglevExtraScratchRegister = r9 |
| |
| const RegList | kJSCallerSaved |
| |
| const int | kNumJSCallerSaved = 4 |
| |
| const RegList | kCalleeSaved |
| |
| const RegList | kCallerSaved |
| |
| const int | kNumCalleeSaved = 8 |
| |
| const int | kNumDoubleCalleeSaved = 8 |
| |
| constexpr int | kSmiShift = kSmiTagSize + kSmiShiftSize |
| |
| constexpr uint64_t | kSmiShiftMask = (1ULL << kSmiShift) - 1 |
| |
| constexpr uint8_t | kLoadLiteralScaleLog2 = 2 |
| |
| constexpr uint8_t | kLoadLiteralScale = 1 << kLoadLiteralScaleLog2 |
| |
| constexpr int | kMaxLoadLiteralRange = 1 * MB |
| |
| constexpr int | kNumberOfRegisters = 32 |
| |
| constexpr int | kNumberOfVRegisters = 32 |
| |
| constexpr int | kNumberOfCalleeSavedRegisters = 10 |
| |
| constexpr int | kNumberOfCalleeSavedVRegisters = 8 |
| |
| constexpr int | kWRegSizeInBits = 32 |
| |
| constexpr int | kWRegSizeInBitsLog2 = 5 |
| |
| constexpr int | kWRegSize = kWRegSizeInBits >> 3 |
| |
| constexpr int | kWRegSizeLog2 = kWRegSizeInBitsLog2 - 3 |
| |
| constexpr int | kXRegSizeInBits = 64 |
| |
| constexpr int | kXRegSizeInBitsLog2 = 6 |
| |
| constexpr int | kXRegSize = kXRegSizeInBits >> 3 |
| |
| constexpr int | kXRegSizeLog2 = kXRegSizeInBitsLog2 - 3 |
| |
| constexpr int | kSRegSizeInBits = 32 |
| |
| constexpr int | kSRegSizeInBitsLog2 = 5 |
| |
| constexpr int | kSRegSize = kSRegSizeInBits >> 3 |
| |
| constexpr int | kSRegSizeLog2 = kSRegSizeInBitsLog2 - 3 |
| |
| constexpr int | kDRegSizeInBits = 64 |
| |
| constexpr int | kDRegSizeInBitsLog2 = 6 |
| |
| constexpr int | kDRegSize = kDRegSizeInBits >> 3 |
| |
| constexpr int | kDRegSizeLog2 = kDRegSizeInBitsLog2 - 3 |
| |
| constexpr int | kDRegSizeInBytesLog2 = kDRegSizeInBitsLog2 - 3 |
| |
| constexpr int | kBRegSizeInBits = 8 |
| |
| constexpr int | kBRegSize = kBRegSizeInBits >> 3 |
| |
| constexpr int | kHRegSizeInBits = 16 |
| |
| constexpr int | kHRegSize = kHRegSizeInBits >> 3 |
| |
| constexpr int | kQRegSizeInBits = 128 |
| |
| constexpr int | kQRegSizeInBitsLog2 = 7 |
| |
| constexpr int | kQRegSize = kQRegSizeInBits >> 3 |
| |
| constexpr int | kQRegSizeLog2 = kQRegSizeInBitsLog2 - 3 |
| |
| constexpr int | kVRegSizeInBits = kQRegSizeInBits |
| |
| constexpr int | kVRegSize = kVRegSizeInBits >> 3 |
| |
| constexpr int64_t | kWRegMask = 0x00000000ffffffffL |
| |
| constexpr int64_t | kXRegMask = 0xffffffffffffffffL |
| |
| constexpr int64_t | kSRegMask = 0x00000000ffffffffL |
| |
| constexpr int64_t | kDRegMask = 0xffffffffffffffffL |
| |
| constexpr int64_t | kDSignBit = 63 |
| |
| constexpr int64_t | kDSignMask = 0x1LL << kDSignBit |
| |
| constexpr int64_t | kSSignBit = 31 |
| |
| constexpr int64_t | kSSignMask = 0x1LL << kSSignBit |
| |
| constexpr int64_t | kXSignBit = 63 |
| |
| constexpr int64_t | kXSignMask = 0x1LL << kXSignBit |
| |
| constexpr int64_t | kWSignBit = 31 |
| |
| constexpr int64_t | kWSignMask = 0x1LL << kWSignBit |
| |
| constexpr int64_t | kDQuietNanBit = 51 |
| |
| constexpr int64_t | kDQuietNanMask = 0x1LL << kDQuietNanBit |
| |
| constexpr int64_t | kSQuietNanBit = 22 |
| |
| constexpr int64_t | kSQuietNanMask = 0x1LL << kSQuietNanBit |
| |
| constexpr int64_t | kHQuietNanBit = 9 |
| |
| constexpr int64_t | kHQuietNanMask = 0x1LL << kHQuietNanBit |
| |
| constexpr int64_t | kByteMask = 0xffL |
| |
| constexpr int64_t | kHalfWordMask = 0xffffL |
| |
| constexpr int64_t | kWordMask = 0xffffffffL |
| |
| constexpr uint64_t | kXMaxUInt = 0xffffffffffffffffUL |
| |
| constexpr uint64_t | kWMaxUInt = 0xffffffffUL |
| |
| constexpr int64_t | kXMaxInt = 0x7fffffffffffffffL |
| |
| constexpr int64_t | kXMinInt = 0x8000000000000000L |
| |
| constexpr int32_t | kWMaxInt = 0x7fffffff |
| |
| constexpr int32_t | kWMinInt = 0x80000000 |
| |
| constexpr int | kIp0Code = 16 |
| |
| constexpr int | kIp1Code = 17 |
| |
| constexpr int | kFramePointerRegCode = 29 |
| |
| constexpr int | kLinkRegCode = 30 |
| |
| constexpr int | kZeroRegCode = 31 |
| |
| constexpr int | kSPRegInternalCode = 63 |
| |
| constexpr unsigned | kRegCodeMask = 0x1f |
| |
| constexpr unsigned | kShiftAmountWRegMask = 0x1f |
| |
| constexpr unsigned | kShiftAmountXRegMask = 0x3f |
| |
| constexpr unsigned | kHalfWordSize = 16 |
| |
| constexpr unsigned | kHalfWordSizeLog2 = 4 |
| |
| constexpr unsigned | kHalfWordSizeInBytes = kHalfWordSize >> 3 |
| |
| constexpr unsigned | kHalfWordSizeInBytesLog2 = kHalfWordSizeLog2 - 3 |
| |
| constexpr unsigned | kWordSize = 32 |
| |
| constexpr unsigned | kWordSizeLog2 = 5 |
| |
| constexpr unsigned | kWordSizeInBytes = kWordSize >> 3 |
| |
| constexpr unsigned | kWordSizeInBytesLog2 = kWordSizeLog2 - 3 |
| |
| constexpr unsigned | kDoubleWordSize = 64 |
| |
| constexpr unsigned | kDoubleWordSizeInBytes = kDoubleWordSize >> 3 |
| |
| constexpr unsigned | kQuadWordSize = 128 |
| |
| constexpr unsigned | kQuadWordSizeInBytes = kQuadWordSize >> 3 |
| |
| constexpr int | kMaxLanesPerVector = 16 |
| |
| constexpr unsigned | kAddressTagOffset = 56 |
| |
| constexpr unsigned | kAddressTagWidth = 8 |
| |
| constexpr uint64_t | kAddressTagMask |
| |
| constexpr uint64_t | kTTBRMask = UINT64_C(1) << 55 |
| |
| constexpr unsigned | kDoubleMantissaBits = 52 |
| |
| constexpr unsigned | kDoubleExponentBits = 11 |
| |
| constexpr unsigned | kDoubleExponentBias = 1023 |
| |
| constexpr unsigned | kFloatMantissaBits = 23 |
| |
| constexpr unsigned | kFloatExponentBits = 8 |
| |
| constexpr unsigned | kFloatExponentBias = 127 |
| |
| constexpr unsigned | kFloat16MantissaBits = 10 |
| |
| constexpr unsigned | kFloat16ExponentBits = 5 |
| |
| constexpr unsigned | kFloat16ExponentBias = 15 |
| |
| constexpr int | ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask |
| |
| constexpr uint32_t | kUnallocatedInstruction = 0xffffffff |
| |
| constexpr GenericInstrField | SixtyFourBits = 0x80000000 |
| |
| constexpr GenericInstrField | ThirtyTwoBits = 0x00000000 |
| |
| constexpr GenericInstrField | FP32 = 0x00000000 |
| |
| constexpr GenericInstrField | FP64 = 0x00400000 |
| |
| constexpr NEONFormatField | NEONFormatFieldMask = 0x40C00000 |
| |
| constexpr NEONFormatField | NEON_Q = 0x40000000 |
| |
| constexpr NEONFormatField | NEON_sz = 0x00400000 |
| |
| constexpr NEONFormatField | NEON_8B = 0x00000000 |
| |
| constexpr NEONFormatField | NEON_16B = NEON_8B | NEON_Q |
| |
| constexpr NEONFormatField | NEON_4H = 0x00400000 |
| |
| constexpr NEONFormatField | NEON_8H = NEON_4H | NEON_Q |
| |
| constexpr NEONFormatField | NEON_2S = 0x00800000 |
| |
| constexpr NEONFormatField | NEON_4S = NEON_2S | NEON_Q |
| |
| constexpr NEONFormatField | NEON_1D = 0x00C00000 |
| |
| constexpr NEONFormatField | NEON_2D = 0x00C00000 | NEON_Q |
| |
| constexpr NEONFPFormatField | NEONFPFormatFieldMask = 0x40400000 |
| |
| constexpr NEONFPFormatField | NEON_FP_4H = 0x00000000 |
| |
| constexpr NEONFPFormatField | NEON_FP_8H = NEON_Q |
| |
| constexpr NEONFPFormatField | NEON_FP_2S = FP32 |
| |
| constexpr NEONFPFormatField | NEON_FP_4S = FP32 | NEON_Q |
| |
| constexpr NEONFPFormatField | NEON_FP_2D = FP64 | NEON_Q |
| |
| constexpr NEONLSFormatField | NEONLSFormatFieldMask = 0x40000C00 |
| |
| constexpr NEONLSFormatField | LS_NEON_8B = 0x00000000 |
| |
| constexpr NEONLSFormatField | LS_NEON_16B = LS_NEON_8B | NEON_Q |
| |
| constexpr NEONLSFormatField | LS_NEON_4H = 0x00000400 |
| |
| constexpr NEONLSFormatField | LS_NEON_8H = LS_NEON_4H | NEON_Q |
| |
| constexpr NEONLSFormatField | LS_NEON_2S = 0x00000800 |
| |
| constexpr NEONLSFormatField | LS_NEON_4S = LS_NEON_2S | NEON_Q |
| |
| constexpr NEONLSFormatField | LS_NEON_1D = 0x00000C00 |
| |
| constexpr NEONLSFormatField | LS_NEON_2D = LS_NEON_1D | NEON_Q |
| |
| constexpr NEONScalarFormatField | NEONScalarFormatFieldMask = 0x00C00000 |
| |
| constexpr NEONScalarFormatField | NEONScalar = 0x10000000 |
| |
| constexpr NEONScalarFormatField | NEON_B = 0x00000000 |
| |
| constexpr NEONScalarFormatField | NEON_H = 0x00400000 |
| |
| constexpr NEONScalarFormatField | NEON_S = 0x00800000 |
| |
| constexpr NEONScalarFormatField | NEON_D = 0x00C00000 |
| |
| constexpr PCRelAddressingOp | PCRelAddressingFixed = 0x10000000 |
| |
| constexpr PCRelAddressingOp | PCRelAddressingFMask = 0x1F000000 |
| |
| constexpr PCRelAddressingOp | PCRelAddressingMask = 0x9F000000 |
| |
| constexpr PCRelAddressingOp | ADR = PCRelAddressingFixed | 0x00000000 |
| |
| constexpr PCRelAddressingOp | ADRP = PCRelAddressingFixed | 0x80000000 |
| |
| constexpr int | kSFOffset = 31 |
| |
| constexpr AddSubOp | AddSubOpMask = 0x60000000 |
| |
| constexpr AddSubOp | AddSubSetFlagsBit = 0x20000000 |
| |
| constexpr AddSubOp | ADDS = ADD | AddSubSetFlagsBit |
| |
| constexpr AddSubOp | SUBS = SUB | AddSubSetFlagsBit |
| |
| constexpr AddSubImmediateOp | AddSubImmediateFixed = 0x11000000 |
| |
| constexpr AddSubImmediateOp | AddSubImmediateFMask = 0x1F000000 |
| |
| constexpr AddSubImmediateOp | AddSubImmediateMask = 0xFF000000 |
| |
| constexpr AddSubShiftedOp | AddSubShiftedFixed = 0x0B000000 |
| |
| constexpr AddSubShiftedOp | AddSubShiftedFMask = 0x1F200000 |
| |
| constexpr AddSubShiftedOp | AddSubShiftedMask = 0xFF200000 |
| |
| constexpr AddSubExtendedOp | AddSubExtendedFixed = 0x0B200000 |
| |
| constexpr AddSubExtendedOp | AddSubExtendedFMask = 0x1F200000 |
| |
| constexpr AddSubExtendedOp | AddSubExtendedMask = 0xFFE00000 |
| |
| constexpr AddSubWithCarryOp | AddSubWithCarryFixed = 0x1A000000 |
| |
| constexpr AddSubWithCarryOp | AddSubWithCarryFMask = 0x1FE00000 |
| |
| constexpr AddSubWithCarryOp | AddSubWithCarryMask = 0xFFE0FC00 |
| |
| constexpr AddSubWithCarryOp | ADC_w = AddSubWithCarryFixed | ADD |
| |
| constexpr AddSubWithCarryOp | ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits |
| |
| constexpr AddSubWithCarryOp | ADCS_w = AddSubWithCarryFixed | ADDS |
| |
| constexpr AddSubWithCarryOp | ADCS_x |
| |
| constexpr AddSubWithCarryOp | SBC_w = AddSubWithCarryFixed | SUB |
| |
| constexpr AddSubWithCarryOp | SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits |
| |
| constexpr AddSubWithCarryOp | SBCS_w = AddSubWithCarryFixed | SUBS |
| |
| constexpr AddSubWithCarryOp | SBCS_x |
| |
| constexpr LogicalOp | LogicalOpMask = 0x60200000 |
| |
| constexpr LogicalOp | NOT = 0x00200000 |
| |
| constexpr LogicalOp | ORN = ORR | NOT |
| |
| constexpr LogicalOp | EON = EOR | NOT |
| |
| constexpr LogicalOp | ANDS = 0x60000000 |
| |
| constexpr LogicalOp | BICS = ANDS | NOT |
| |
| constexpr LogicalImmediateOp | LogicalImmediateFixed = 0x12000000 |
| |
| constexpr LogicalImmediateOp | LogicalImmediateFMask = 0x1F800000 |
| |
| constexpr LogicalImmediateOp | LogicalImmediateMask = 0xFF800000 |
| |
| constexpr LogicalImmediateOp | AND_w_imm = LogicalImmediateFixed | AND |
| |
| constexpr LogicalImmediateOp | AND_x_imm |
| |
| constexpr LogicalImmediateOp | ORR_w_imm = LogicalImmediateFixed | ORR |
| |
| constexpr LogicalImmediateOp | ORR_x_imm |
| |
| constexpr LogicalImmediateOp | EOR_w_imm = LogicalImmediateFixed | EOR |
| |
| constexpr LogicalImmediateOp | EOR_x_imm |
| |
| constexpr LogicalImmediateOp | ANDS_w_imm = LogicalImmediateFixed | ANDS |
| |
| constexpr LogicalImmediateOp | ANDS_x_imm |
| |
| constexpr LogicalShiftedOp | LogicalShiftedFixed = 0x0A000000 |
| |
| constexpr LogicalShiftedOp | LogicalShiftedFMask = 0x1F000000 |
| |
| constexpr LogicalShiftedOp | LogicalShiftedMask = 0xFF200000 |
| |
| constexpr LogicalShiftedOp | AND_w = LogicalShiftedFixed | AND |
| |
| constexpr LogicalShiftedOp | AND_x = LogicalShiftedFixed | AND | SixtyFourBits |
| |
| constexpr LogicalShiftedOp | AND_shift = AND_w |
| |
| constexpr LogicalShiftedOp | BIC_w = LogicalShiftedFixed | BIC |
| |
| constexpr LogicalShiftedOp | BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits |
| |
| constexpr LogicalShiftedOp | BIC_shift = BIC_w |
| |
| constexpr LogicalShiftedOp | ORR_w = LogicalShiftedFixed | ORR |
| |
| constexpr LogicalShiftedOp | ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits |
| |
| constexpr LogicalShiftedOp | ORR_shift = ORR_w |
| |
| constexpr LogicalShiftedOp | ORN_w = LogicalShiftedFixed | ORN |
| |
| constexpr LogicalShiftedOp | ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits |
| |
| constexpr LogicalShiftedOp | ORN_shift = ORN_w |
| |
| constexpr LogicalShiftedOp | EOR_w = LogicalShiftedFixed | EOR |
| |
| constexpr LogicalShiftedOp | EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits |
| |
| constexpr LogicalShiftedOp | EOR_shift = EOR_w |
| |
| constexpr LogicalShiftedOp | EON_w = LogicalShiftedFixed | EON |
| |
| constexpr LogicalShiftedOp | EON_x = LogicalShiftedFixed | EON | SixtyFourBits |
| |
| constexpr LogicalShiftedOp | EON_shift = EON_w |
| |
| constexpr LogicalShiftedOp | ANDS_w = LogicalShiftedFixed | ANDS |
| |
| constexpr LogicalShiftedOp | ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits |
| |
| constexpr LogicalShiftedOp | ANDS_shift = ANDS_w |
| |
| constexpr LogicalShiftedOp | BICS_w = LogicalShiftedFixed | BICS |
| |
| constexpr LogicalShiftedOp | BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits |
| |
| constexpr LogicalShiftedOp | BICS_shift = BICS_w |
| |
| constexpr MoveWideImmediateOp | MoveWideImmediateFixed = 0x12800000 |
| |
| constexpr MoveWideImmediateOp | MoveWideImmediateFMask = 0x1F800000 |
| |
| constexpr MoveWideImmediateOp | MoveWideImmediateMask = 0xFF800000 |
| |
| constexpr MoveWideImmediateOp | MOVN = 0x00000000 |
| |
| constexpr MoveWideImmediateOp | MOVZ = 0x40000000 |
| |
| constexpr MoveWideImmediateOp | MOVK = 0x60000000 |
| |
| constexpr MoveWideImmediateOp | MOVN_w = MoveWideImmediateFixed | MOVN |
| |
| constexpr MoveWideImmediateOp | MOVN_x |
| |
| constexpr MoveWideImmediateOp | MOVZ_w = MoveWideImmediateFixed | MOVZ |
| |
| constexpr MoveWideImmediateOp | MOVZ_x |
| |
| constexpr MoveWideImmediateOp | MOVK_w = MoveWideImmediateFixed | MOVK |
| |
| constexpr MoveWideImmediateOp | MOVK_x |
| |
| constexpr int | kBitfieldNOffset = 22 |
| |
| constexpr BitfieldOp | BitfieldFixed = 0x13000000 |
| |
| constexpr BitfieldOp | BitfieldFMask = 0x1F800000 |
| |
| constexpr BitfieldOp | BitfieldMask = 0xFF800000 |
| |
| constexpr BitfieldOp | SBFM_w = BitfieldFixed | 0x00000000 |
| |
| constexpr BitfieldOp | SBFM_x = BitfieldFixed | 0x80000000 |
| |
| constexpr BitfieldOp | SBFM = SBFM_w |
| |
| constexpr BitfieldOp | BFM_w = BitfieldFixed | 0x20000000 |
| |
| constexpr BitfieldOp | BFM_x = BitfieldFixed | 0xA0000000 |
| |
| constexpr BitfieldOp | BFM = BFM_w |
| |
| constexpr BitfieldOp | UBFM_w = BitfieldFixed | 0x40000000 |
| |
| constexpr BitfieldOp | UBFM_x = BitfieldFixed | 0xC0000000 |
| |
| constexpr BitfieldOp | UBFM = UBFM_w |
| |
| constexpr ExtractOp | ExtractFixed = 0x13800000 |
| |
| constexpr ExtractOp | ExtractFMask = 0x1F800000 |
| |
| constexpr ExtractOp | ExtractMask = 0xFFA00000 |
| |
| constexpr ExtractOp | EXTR_w = ExtractFixed | 0x00000000 |
| |
| constexpr ExtractOp | EXTR_x = ExtractFixed | 0x80000000 |
| |
| constexpr ExtractOp | EXTR = EXTR_w |
| |
| constexpr UnconditionalBranchOp | UnconditionalBranchFixed = 0x14000000 |
| |
| constexpr UnconditionalBranchOp | UnconditionalBranchFMask = 0x7C000000 |
| |
| constexpr UnconditionalBranchOp | UnconditionalBranchMask = 0xFC000000 |
| |
| constexpr UnconditionalBranchOp | BL = UnconditionalBranchFixed | 0x80000000 |
| |
| constexpr UnconditionalBranchToRegisterOp | UnconditionalBranchToRegisterFixed |
| |
| constexpr UnconditionalBranchToRegisterOp | UnconditionalBranchToRegisterFMask |
| |
| constexpr UnconditionalBranchToRegisterOp | UnconditionalBranchToRegisterMask |
| |
| constexpr UnconditionalBranchToRegisterOp | BR |
| |
| constexpr UnconditionalBranchToRegisterOp | BLR |
| |
| constexpr UnconditionalBranchToRegisterOp | RET |
| |
| constexpr CompareBranchOp | CompareBranchFixed = 0x34000000 |
| |
| constexpr CompareBranchOp | CompareBranchFMask = 0x7E000000 |
| |
| constexpr CompareBranchOp | CompareBranchMask = 0xFF000000 |
| |
| constexpr CompareBranchOp | CBZ_w = CompareBranchFixed | 0x00000000 |
| |
| constexpr CompareBranchOp | CBZ_x = CompareBranchFixed | 0x80000000 |
| |
| constexpr CompareBranchOp | CBZ = CBZ_w |
| |
| constexpr CompareBranchOp | CBNZ_w = CompareBranchFixed | 0x01000000 |
| |
| constexpr CompareBranchOp | CBNZ_x = CompareBranchFixed | 0x81000000 |
| |
| constexpr CompareBranchOp | CBNZ = CBNZ_w |
| |
| constexpr TestBranchOp | TestBranchFixed = 0x36000000 |
| |
| constexpr TestBranchOp | TestBranchFMask = 0x7E000000 |
| |
| constexpr TestBranchOp | TestBranchMask = 0x7F000000 |
| |
| constexpr TestBranchOp | TBZ = TestBranchFixed | 0x00000000 |
| |
| constexpr TestBranchOp | TBNZ = TestBranchFixed | 0x01000000 |
| |
| constexpr ConditionalBranchOp | ConditionalBranchFixed = 0x54000000 |
| |
| constexpr ConditionalBranchOp | ConditionalBranchFMask = 0xFE000000 |
| |
| constexpr ConditionalBranchOp | ConditionalBranchMask = 0xFF000010 |
| |
| constexpr ConditionalBranchOp | B_cond = ConditionalBranchFixed | 0x00000000 |
| |
| constexpr SystemOp | SystemFixed = 0xD5000000 |
| |
| constexpr SystemOp | SystemFMask = 0xFFC00000 |
| |
| constexpr SystemSysRegOp | SystemSysRegFixed = 0xD5100000 |
| |
| constexpr SystemSysRegOp | SystemSysRegFMask = 0xFFD00000 |
| |
| constexpr SystemSysRegOp | SystemSysRegMask = 0xFFF00000 |
| |
| constexpr SystemSysRegOp | MRS = SystemSysRegFixed | 0x00200000 |
| |
| constexpr SystemSysRegOp | MSR = SystemSysRegFixed | 0x00000000 |
| |
| constexpr SystemHintOp | SystemHintFixed = 0xD503201F |
| |
| constexpr SystemHintOp | SystemHintFMask = 0xFFFFF01F |
| |
| constexpr SystemHintOp | SystemHintMask = 0xFFFFF01F |
| |
| constexpr SystemHintOp | HINT = SystemHintFixed | 0x00000000 |
| |
| constexpr ExceptionOp | ExceptionFixed = 0xD4000000 |
| |
| constexpr ExceptionOp | ExceptionFMask = 0xFF000000 |
| |
| constexpr ExceptionOp | ExceptionMask = 0xFFE0001F |
| |
| constexpr ExceptionOp | HLT = ExceptionFixed | 0x00400000 |
| |
| constexpr ExceptionOp | BRK = ExceptionFixed | 0x00200000 |
| |
| constexpr ExceptionOp | SVC = ExceptionFixed | 0x00000001 |
| |
| constexpr ExceptionOp | HVC = ExceptionFixed | 0x00000002 |
| |
| constexpr ExceptionOp | SMC = ExceptionFixed | 0x00000003 |
| |
| constexpr ExceptionOp | DCPS1 = ExceptionFixed | 0x00A00001 |
| |
| constexpr ExceptionOp | DCPS2 = ExceptionFixed | 0x00A00002 |
| |
| constexpr ExceptionOp | DCPS3 = ExceptionFixed | 0x00A00003 |
| |
| constexpr int | kHltBadCode = 0xbad |
| |
| constexpr MemBarrierOp | MemBarrierFixed = 0xD503309F |
| |
| constexpr MemBarrierOp | MemBarrierFMask = 0xFFFFF09F |
| |
| constexpr MemBarrierOp | MemBarrierMask = 0xFFFFF0FF |
| |
| constexpr MemBarrierOp | DSB = MemBarrierFixed | 0x00000000 |
| |
| constexpr MemBarrierOp | DMB = MemBarrierFixed | 0x00000020 |
| |
| constexpr MemBarrierOp | ISB = MemBarrierFixed | 0x00000040 |
| |
| constexpr SystemPAuthOp | SystemPAuthFixed = 0xD503211F |
| |
| constexpr SystemPAuthOp | SystemPAuthFMask = 0xFFFFFD1F |
| |
| constexpr SystemPAuthOp | SystemPAuthMask = 0xFFFFFFFF |
| |
| constexpr SystemPAuthOp | PACIB1716 = SystemPAuthFixed | 0x00000140 |
| |
| constexpr SystemPAuthOp | AUTIB1716 = SystemPAuthFixed | 0x000001C0 |
| |
| constexpr SystemPAuthOp | PACIBSP = SystemPAuthFixed | 0x00000360 |
| |
| constexpr SystemPAuthOp | AUTIBSP = SystemPAuthFixed | 0x000003E0 |
| |
| constexpr LoadStoreAnyOp | LoadStoreAnyFMask = 0x0A000000 |
| |
| constexpr LoadStoreAnyOp | LoadStoreAnyFixed = 0x08000000 |
| |
| constexpr LoadStorePairAnyOp | LoadStorePairAnyFMask = 0x3A000000 |
| |
| constexpr LoadStorePairAnyOp | LoadStorePairAnyFixed = 0x28000000 |
| |
| constexpr LoadStorePairOp | LoadStorePairMask = 0xC4400000 |
| |
| constexpr LoadStorePairOp | LoadStorePairLBit = 1 << 22 |
| |
| constexpr LoadStorePairPostIndexOp | LoadStorePairPostIndexFixed = 0x28800000 |
| |
| constexpr LoadStorePairPostIndexOp | LoadStorePairPostIndexFMask = 0x3B800000 |
| |
| constexpr LoadStorePairPostIndexOp | LoadStorePairPostIndexMask = 0xFFC00000 |
| |
| constexpr LoadStorePairPreIndexOp | LoadStorePairPreIndexFixed = 0x29800000 |
| |
| constexpr LoadStorePairPreIndexOp | LoadStorePairPreIndexFMask = 0x3B800000 |
| |
| constexpr LoadStorePairPreIndexOp | LoadStorePairPreIndexMask = 0xFFC00000 |
| |
| constexpr LoadStorePairOffsetOp | LoadStorePairOffsetFixed = 0x29000000 |
| |
| constexpr LoadStorePairOffsetOp | LoadStorePairOffsetFMask = 0x3B800000 |
| |
| constexpr LoadStorePairOffsetOp | LoadStorePairOffsetMask = 0xFFC00000 |
| |
| constexpr LoadLiteralOp | LoadLiteralFixed = 0x18000000 |
| |
| constexpr LoadLiteralOp | LoadLiteralFMask = 0x3B000000 |
| |
| constexpr LoadLiteralOp | LoadLiteralMask = 0xFF000000 |
| |
| constexpr LoadLiteralOp | LDR_w_lit = LoadLiteralFixed | 0x00000000 |
| |
| constexpr LoadLiteralOp | LDR_x_lit = LoadLiteralFixed | 0x40000000 |
| |
| constexpr LoadLiteralOp | LDRSW_x_lit = LoadLiteralFixed | 0x80000000 |
| |
| constexpr LoadLiteralOp | PRFM_lit = LoadLiteralFixed | 0xC0000000 |
| |
| constexpr LoadLiteralOp | LDR_s_lit = LoadLiteralFixed | 0x04000000 |
| |
| constexpr LoadLiteralOp | LDR_d_lit = LoadLiteralFixed | 0x44000000 |
| |
| constexpr LoadStoreUnscaledOffsetOp | LoadStoreUnscaledOffsetFixed = 0x38000000 |
| |
| constexpr LoadStoreUnscaledOffsetOp | LoadStoreUnscaledOffsetFMask = 0x3B200C00 |
| |
| constexpr LoadStoreUnscaledOffsetOp | LoadStoreUnscaledOffsetMask = 0xFFE00C00 |
| |
| constexpr LoadStoreOp | LoadStoreMask = 0xC4C00000 |
| |
| constexpr LoadStoreOp | PRFM = 0xC0800000 |
| |
| constexpr LoadStorePostIndex | LoadStorePostIndexFixed = 0x38000400 |
| |
| constexpr LoadStorePostIndex | LoadStorePostIndexFMask = 0x3B200C00 |
| |
| constexpr LoadStorePostIndex | LoadStorePostIndexMask = 0xFFE00C00 |
| |
| constexpr LoadStorePreIndex | LoadStorePreIndexFixed = 0x38000C00 |
| |
| constexpr LoadStorePreIndex | LoadStorePreIndexFMask = 0x3B200C00 |
| |
| constexpr LoadStorePreIndex | LoadStorePreIndexMask = 0xFFE00C00 |
| |
| constexpr LoadStoreUnsignedOffset | LoadStoreUnsignedOffsetFixed = 0x39000000 |
| |
| constexpr LoadStoreUnsignedOffset | LoadStoreUnsignedOffsetFMask = 0x3B000000 |
| |
| constexpr LoadStoreUnsignedOffset | LoadStoreUnsignedOffsetMask = 0xFFC00000 |
| |
| constexpr LoadStoreUnsignedOffset | PRFM_unsigned |
| |
| constexpr LoadStoreRegisterOffset | LoadStoreRegisterOffsetFixed = 0x38200800 |
| |
| constexpr LoadStoreRegisterOffset | LoadStoreRegisterOffsetFMask = 0x3B200C00 |
| |
| constexpr LoadStoreRegisterOffset | LoadStoreRegisterOffsetMask = 0xFFE00C00 |
| |
| constexpr LoadStoreRegisterOffset | PRFM_reg |
| |
| constexpr LoadStoreAcquireReleaseOp | LoadStoreAcquireReleaseFixed = 0x08000000 |
| |
| constexpr LoadStoreAcquireReleaseOp | LoadStoreAcquireReleaseFMask = 0x3F000000 |
| |
| constexpr LoadStoreAcquireReleaseOp | LoadStoreAcquireReleaseMask = 0xCFE08000 |
| |
| constexpr LoadStoreAcquireReleaseOp | STLXR_b |
| |
| constexpr LoadStoreAcquireReleaseOp | LDAXR_b |
| |
| constexpr LoadStoreAcquireReleaseOp | STLR_b |
| |
| constexpr LoadStoreAcquireReleaseOp | LDAR_b |
| |
| constexpr LoadStoreAcquireReleaseOp | STLXR_h |
| |
| constexpr LoadStoreAcquireReleaseOp | LDAXR_h |
| |
| constexpr LoadStoreAcquireReleaseOp | STLR_h |
| |
| constexpr LoadStoreAcquireReleaseOp | LDAR_h |
| |
| constexpr LoadStoreAcquireReleaseOp | STLXR_w |
| |
| constexpr LoadStoreAcquireReleaseOp | LDAXR_w |
| |
| constexpr LoadStoreAcquireReleaseOp | STLR_w |
| |
| constexpr LoadStoreAcquireReleaseOp | LDAR_w |
| |
| constexpr LoadStoreAcquireReleaseOp | STLXR_x |
| |
| constexpr LoadStoreAcquireReleaseOp | LDAXR_x |
| |
| constexpr LoadStoreAcquireReleaseOp | STLR_x |
| |
| constexpr LoadStoreAcquireReleaseOp | LDAR_x |
| |
| constexpr LoadStoreAcquireReleaseOp | LSEBit_l = 0x00400000 |
| |
| constexpr LoadStoreAcquireReleaseOp | LSEBit_o0 = 0x00008000 |
| |
| constexpr LoadStoreAcquireReleaseOp | LSEBit_sz = 0x40000000 |
| |
| constexpr LoadStoreAcquireReleaseOp | CASFixed |
| |
| constexpr LoadStoreAcquireReleaseOp | CASBFixed |
| |
| constexpr LoadStoreAcquireReleaseOp | CASHFixed |
| |
| constexpr LoadStoreAcquireReleaseOp | CASPFixed |
| |
| constexpr LoadStoreAcquireReleaseOp | CAS_w = CASFixed |
| |
| constexpr LoadStoreAcquireReleaseOp | CAS_x = CASFixed | LSEBit_sz |
| |
| constexpr LoadStoreAcquireReleaseOp | CASA_w = CASFixed | LSEBit_l |
| |
| constexpr LoadStoreAcquireReleaseOp | CASA_x = CASFixed | LSEBit_l | LSEBit_sz |
| |
| constexpr LoadStoreAcquireReleaseOp | CASL_w = CASFixed | LSEBit_o0 |
| |
| constexpr LoadStoreAcquireReleaseOp | CASL_x = CASFixed | LSEBit_o0 | LSEBit_sz |
| |
| constexpr LoadStoreAcquireReleaseOp | CASAL_w = CASFixed | LSEBit_l | LSEBit_o0 |
| |
| constexpr LoadStoreAcquireReleaseOp | CASAL_x |
| |
| constexpr LoadStoreAcquireReleaseOp | CASB = CASBFixed |
| |
| constexpr LoadStoreAcquireReleaseOp | CASAB = CASBFixed | LSEBit_l |
| |
| constexpr LoadStoreAcquireReleaseOp | CASLB = CASBFixed | LSEBit_o0 |
| |
| constexpr LoadStoreAcquireReleaseOp | CASALB = CASBFixed | LSEBit_l | LSEBit_o0 |
| |
| constexpr LoadStoreAcquireReleaseOp | CASH = CASHFixed |
| |
| constexpr LoadStoreAcquireReleaseOp | CASAH = CASHFixed | LSEBit_l |
| |
| constexpr LoadStoreAcquireReleaseOp | CASLH = CASHFixed | LSEBit_o0 |
| |
| constexpr LoadStoreAcquireReleaseOp | CASALH = CASHFixed | LSEBit_l | LSEBit_o0 |
| |
| constexpr LoadStoreAcquireReleaseOp | CASP_w = CASPFixed |
| |
| constexpr LoadStoreAcquireReleaseOp | CASP_x = CASPFixed | LSEBit_sz |
| |
| constexpr LoadStoreAcquireReleaseOp | CASPA_w = CASPFixed | LSEBit_l |
| |
| constexpr LoadStoreAcquireReleaseOp | CASPA_x = CASPFixed | LSEBit_l | LSEBit_sz |
| |
| constexpr LoadStoreAcquireReleaseOp | CASPL_w = CASPFixed | LSEBit_o0 |
| |
| constexpr LoadStoreAcquireReleaseOp | CASPL_x = CASPFixed | LSEBit_o0 | LSEBit_sz |
| |
| constexpr LoadStoreAcquireReleaseOp | CASPAL_w = CASPFixed | LSEBit_l | LSEBit_o0 |
| |
| constexpr LoadStoreAcquireReleaseOp | CASPAL_x |
| |
| constexpr AtomicMemoryOp | AtomicMemoryFixed = 0x38200000 |
| |
| constexpr AtomicMemoryOp | AtomicMemoryFMask = 0x3B200C00 |
| |
| constexpr AtomicMemoryOp | AtomicMemoryMask = 0xFFE0FC00 |
| |
| constexpr AtomicMemoryOp | SWPB = AtomicMemoryFixed | 0x00008000 |
| |
| constexpr AtomicMemoryOp | SWPAB = AtomicMemoryFixed | 0x00808000 |
| |
| constexpr AtomicMemoryOp | SWPLB = AtomicMemoryFixed | 0x00408000 |
| |
| constexpr AtomicMemoryOp | SWPALB = AtomicMemoryFixed | 0x00C08000 |
| |
| constexpr AtomicMemoryOp | SWPH = AtomicMemoryFixed | 0x40008000 |
| |
| constexpr AtomicMemoryOp | SWPAH = AtomicMemoryFixed | 0x40808000 |
| |
| constexpr AtomicMemoryOp | SWPLH = AtomicMemoryFixed | 0x40408000 |
| |
| constexpr AtomicMemoryOp | SWPALH = AtomicMemoryFixed | 0x40C08000 |
| |
| constexpr AtomicMemoryOp | SWP_w = AtomicMemoryFixed | 0x80008000 |
| |
| constexpr AtomicMemoryOp | SWPA_w = AtomicMemoryFixed | 0x80808000 |
| |
| constexpr AtomicMemoryOp | SWPL_w = AtomicMemoryFixed | 0x80408000 |
| |
| constexpr AtomicMemoryOp | SWPAL_w = AtomicMemoryFixed | 0x80C08000 |
| |
| constexpr AtomicMemoryOp | SWP_x = AtomicMemoryFixed | 0xC0008000 |
| |
| constexpr AtomicMemoryOp | SWPA_x = AtomicMemoryFixed | 0xC0808000 |
| |
| constexpr AtomicMemoryOp | SWPL_x = AtomicMemoryFixed | 0xC0408000 |
| |
| constexpr AtomicMemoryOp | SWPAL_x = AtomicMemoryFixed | 0xC0C08000 |
| |
| constexpr AtomicMemoryOp | AtomicMemorySimpleFMask = 0x3B208C00 |
| |
| constexpr AtomicMemoryOp | AtomicMemorySimpleOpMask = 0x00007000 |
| |
| constexpr ConditionalCompareOp | ConditionalCompareMask = 0x60000000 |
| |
| constexpr ConditionalCompareOp | CCMN = 0x20000000 |
| |
| constexpr ConditionalCompareOp | CCMP = 0x60000000 |
| |
| constexpr ConditionalCompareRegisterOp | ConditionalCompareRegisterFixed |
| |
| constexpr ConditionalCompareRegisterOp | ConditionalCompareRegisterFMask |
| |
| constexpr ConditionalCompareRegisterOp | ConditionalCompareRegisterMask |
| |
| constexpr ConditionalCompareRegisterOp | CCMN_w |
| |
| constexpr ConditionalCompareRegisterOp | CCMN_x |
| |
| constexpr ConditionalCompareRegisterOp | CCMP_w |
| |
| constexpr ConditionalCompareRegisterOp | CCMP_x |
| |
| constexpr ConditionalCompareImmediateOp | ConditionalCompareImmediateFixed |
| |
| constexpr ConditionalCompareImmediateOp | ConditionalCompareImmediateFMask |
| |
| constexpr ConditionalCompareImmediateOp | ConditionalCompareImmediateMask |
| |
| constexpr ConditionalCompareImmediateOp | CCMN_w_imm |
| |
| constexpr ConditionalCompareImmediateOp | CCMN_x_imm |
| |
| constexpr ConditionalCompareImmediateOp | CCMP_w_imm |
| |
| constexpr ConditionalCompareImmediateOp | CCMP_x_imm |
| |
| constexpr ConditionalSelectOp | ConditionalSelectFixed = 0x1A800000 |
| |
| constexpr ConditionalSelectOp | ConditionalSelectFMask = 0x1FE00000 |
| |
| constexpr ConditionalSelectOp | ConditionalSelectMask = 0xFFE00C00 |
| |
| constexpr ConditionalSelectOp | CSEL_w = ConditionalSelectFixed | 0x00000000 |
| |
| constexpr ConditionalSelectOp | CSEL_x = ConditionalSelectFixed | 0x80000000 |
| |
| constexpr ConditionalSelectOp | CSEL = CSEL_w |
| |
| constexpr ConditionalSelectOp | CSINC_w = ConditionalSelectFixed | 0x00000400 |
| |
| constexpr ConditionalSelectOp | CSINC_x = ConditionalSelectFixed | 0x80000400 |
| |
| constexpr ConditionalSelectOp | CSINC = CSINC_w |
| |
| constexpr ConditionalSelectOp | CSINV_w = ConditionalSelectFixed | 0x40000000 |
| |
| constexpr ConditionalSelectOp | CSINV_x = ConditionalSelectFixed | 0xC0000000 |
| |
| constexpr ConditionalSelectOp | CSINV = CSINV_w |
| |
| constexpr ConditionalSelectOp | CSNEG_w = ConditionalSelectFixed | 0x40000400 |
| |
| constexpr ConditionalSelectOp | CSNEG_x = ConditionalSelectFixed | 0xC0000400 |
| |
| constexpr ConditionalSelectOp | CSNEG = CSNEG_w |
| |
| constexpr DataProcessing1SourceOp | DataProcessing1SourceFixed = 0x5AC00000 |
| |
| constexpr DataProcessing1SourceOp | DataProcessing1SourceFMask = 0x5FE00000 |
| |
| constexpr DataProcessing1SourceOp | DataProcessing1SourceMask = 0xFFFFFC00 |
| |
| constexpr DataProcessing1SourceOp | RBIT |
| |
| constexpr DataProcessing1SourceOp | RBIT_w = RBIT |
| |
| constexpr DataProcessing1SourceOp | RBIT_x = RBIT | SixtyFourBits |
| |
| constexpr DataProcessing1SourceOp | REV16 |
| |
| constexpr DataProcessing1SourceOp | REV16_w = REV16 |
| |
| constexpr DataProcessing1SourceOp | REV16_x = REV16 | SixtyFourBits |
| |
| constexpr DataProcessing1SourceOp | REV = DataProcessing1SourceFixed | 0x00000800 |
| |
| constexpr DataProcessing1SourceOp | REV_w = REV |
| |
| constexpr DataProcessing1SourceOp | REV32_x = REV | SixtyFourBits |
| |
| constexpr DataProcessing1SourceOp | REV_x |
| |
| constexpr DataProcessing1SourceOp | CLZ_w = CLZ |
| |
| constexpr DataProcessing1SourceOp | CLZ_x = CLZ | SixtyFourBits |
| |
| constexpr DataProcessing1SourceOp | CLS = DataProcessing1SourceFixed | 0x00001400 |
| |
| constexpr DataProcessing1SourceOp | CLS_w = CLS |
| |
| constexpr DataProcessing1SourceOp | CLS_x = CLS | SixtyFourBits |
| |
| constexpr DataProcessing2SourceOp | DataProcessing2SourceFixed = 0x1AC00000 |
| |
| constexpr DataProcessing2SourceOp | DataProcessing2SourceFMask = 0x5FE00000 |
| |
| constexpr DataProcessing2SourceOp | DataProcessing2SourceMask = 0xFFE0FC00 |
| |
| constexpr DataProcessing2SourceOp | UDIV_w |
| |
| constexpr DataProcessing2SourceOp | UDIV_x |
| |
| constexpr DataProcessing2SourceOp | UDIV = UDIV_w |
| |
| constexpr DataProcessing2SourceOp | SDIV_w |
| |
| constexpr DataProcessing2SourceOp | SDIV_x |
| |
| constexpr DataProcessing2SourceOp | SDIV = SDIV_w |
| |
| constexpr DataProcessing2SourceOp | LSLV_w |
| |
| constexpr DataProcessing2SourceOp | LSLV_x |
| |
| constexpr DataProcessing2SourceOp | LSLV = LSLV_w |
| |
| constexpr DataProcessing2SourceOp | LSRV_w |
| |
| constexpr DataProcessing2SourceOp | LSRV_x |
| |
| constexpr DataProcessing2SourceOp | LSRV = LSRV_w |
| |
| constexpr DataProcessing2SourceOp | ASRV_w |
| |
| constexpr DataProcessing2SourceOp | ASRV_x |
| |
| constexpr DataProcessing2SourceOp | ASRV = ASRV_w |
| |
| constexpr DataProcessing2SourceOp | RORV_w |
| |
| constexpr DataProcessing2SourceOp | RORV_x |
| |
| constexpr DataProcessing2SourceOp | RORV = RORV_w |
| |
| constexpr DataProcessing2SourceOp | CRC32B |
| |
| constexpr DataProcessing2SourceOp | CRC32H |
| |
| constexpr DataProcessing2SourceOp | CRC32W |
| |
| constexpr DataProcessing2SourceOp | CRC32X |
| |
| constexpr DataProcessing2SourceOp | CRC32CB |
| |
| constexpr DataProcessing2SourceOp | CRC32CH |
| |
| constexpr DataProcessing2SourceOp | CRC32CW |
| |
| constexpr DataProcessing2SourceOp | CRC32CX |
| |
| constexpr DataProcessing3SourceOp | DataProcessing3SourceFixed = 0x1B000000 |
| |
| constexpr DataProcessing3SourceOp | DataProcessing3SourceFMask = 0x1F000000 |
| |
| constexpr DataProcessing3SourceOp | DataProcessing3SourceMask = 0xFFE08000 |
| |
| constexpr DataProcessing3SourceOp | MADD_w |
| |
| constexpr DataProcessing3SourceOp | MADD_x |
| |
| constexpr DataProcessing3SourceOp | MADD = MADD_w |
| |
| constexpr DataProcessing3SourceOp | MSUB_w |
| |
| constexpr DataProcessing3SourceOp | MSUB_x |
| |
| constexpr DataProcessing3SourceOp | MSUB = MSUB_w |
| |
| constexpr DataProcessing3SourceOp | SMADDL_x |
| |
| constexpr DataProcessing3SourceOp | SMSUBL_x |
| |
| constexpr DataProcessing3SourceOp | SMULH_x |
| |
| constexpr DataProcessing3SourceOp | UMADDL_x |
| |
| constexpr DataProcessing3SourceOp | UMSUBL_x |
| |
| constexpr DataProcessing3SourceOp | UMULH_x |
| |
| constexpr FPCompareOp | FPCompareFixed = 0x1E202000 |
| |
| constexpr FPCompareOp | FPCompareFMask = 0x5F203C00 |
| |
| constexpr FPCompareOp | FPCompareMask = 0xFFE0FC1F |
| |
| constexpr FPCompareOp | FCMP_s = FPCompareFixed | 0x00000000 |
| |
| constexpr FPCompareOp | FCMP_d = FPCompareFixed | FP64 | 0x00000000 |
| |
| constexpr FPCompareOp | FCMP = FCMP_s |
| |
| constexpr FPCompareOp | FCMP_s_zero = FPCompareFixed | 0x00000008 |
| |
| constexpr FPCompareOp | FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008 |
| |
| constexpr FPCompareOp | FCMP_zero = FCMP_s_zero |
| |
| constexpr FPCompareOp | FCMPE_s = FPCompareFixed | 0x00000010 |
| |
| constexpr FPCompareOp | FCMPE_d = FPCompareFixed | FP64 | 0x00000010 |
| |
| constexpr FPCompareOp | FCMPE_s_zero = FPCompareFixed | 0x00000018 |
| |
| constexpr FPCompareOp | FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018 |
| |
| constexpr FPConditionalCompareOp | FPConditionalCompareFixed = 0x1E200400 |
| |
| constexpr FPConditionalCompareOp | FPConditionalCompareFMask = 0x5F200C00 |
| |
| constexpr FPConditionalCompareOp | FPConditionalCompareMask = 0xFFE00C10 |
| |
| constexpr FPConditionalCompareOp | FCCMP_s |
| |
| constexpr FPConditionalCompareOp | FCCMP_d |
| |
| constexpr FPConditionalCompareOp | FCCMP = FCCMP_s |
| |
| constexpr FPConditionalCompareOp | FCCMPE_s |
| |
| constexpr FPConditionalCompareOp | FCCMPE_d |
| |
| constexpr FPConditionalCompareOp | FCCMPE = FCCMPE_s |
| |
| constexpr FPConditionalSelectOp | FPConditionalSelectFixed = 0x1E200C00 |
| |
| constexpr FPConditionalSelectOp | FPConditionalSelectFMask = 0x5F200C00 |
| |
| constexpr FPConditionalSelectOp | FPConditionalSelectMask = 0xFFE00C00 |
| |
| constexpr FPConditionalSelectOp | FCSEL_s = FPConditionalSelectFixed | 0x00000000 |
| |
| constexpr FPConditionalSelectOp | FCSEL_d |
| |
| constexpr FPConditionalSelectOp | FCSEL = FCSEL_s |
| |
| constexpr FPImmediateOp | FPImmediateFixed = 0x1E201000 |
| |
| constexpr FPImmediateOp | FPImmediateFMask = 0x5F201C00 |
| |
| constexpr FPImmediateOp | FPImmediateMask = 0xFFE01C00 |
| |
| constexpr FPImmediateOp | FMOV_s_imm = FPImmediateFixed | 0x00000000 |
| |
| constexpr FPImmediateOp | FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000 |
| |
| constexpr FPDataProcessing1SourceOp | FPDataProcessing1SourceFixed = 0x1E204000 |
| |
| constexpr FPDataProcessing1SourceOp | FPDataProcessing1SourceFMask = 0x5F207C00 |
| |
| constexpr FPDataProcessing1SourceOp | FPDataProcessing1SourceMask = 0xFFFFFC00 |
| |
| constexpr FPDataProcessing1SourceOp | FMOV_s |
| |
| constexpr FPDataProcessing1SourceOp | FMOV_d |
| |
| constexpr FPDataProcessing1SourceOp | FMOV = FMOV_s |
| |
| constexpr FPDataProcessing1SourceOp | FABS_s |
| |
| constexpr FPDataProcessing1SourceOp | FABS_d |
| |
| constexpr FPDataProcessing1SourceOp | FABS = FABS_s |
| |
| constexpr FPDataProcessing1SourceOp | FNEG_s |
| |
| constexpr FPDataProcessing1SourceOp | FNEG_d |
| |
| constexpr FPDataProcessing1SourceOp | FNEG = FNEG_s |
| |
| constexpr FPDataProcessing1SourceOp | FSQRT_s |
| |
| constexpr FPDataProcessing1SourceOp | FSQRT_d |
| |
| constexpr FPDataProcessing1SourceOp | FSQRT = FSQRT_s |
| |
| constexpr FPDataProcessing1SourceOp | FCVT_ds |
| |
| constexpr FPDataProcessing1SourceOp | FCVT_sd |
| |
| constexpr FPDataProcessing1SourceOp | FCVT_hs |
| |
| constexpr FPDataProcessing1SourceOp | FCVT_hd |
| |
| constexpr FPDataProcessing1SourceOp | FCVT_sh |
| |
| constexpr FPDataProcessing1SourceOp | FCVT_dh |
| |
| constexpr FPDataProcessing1SourceOp | FRINTN_s |
| |
| constexpr FPDataProcessing1SourceOp | FRINTN_d |
| |
| constexpr FPDataProcessing1SourceOp | FRINTN = FRINTN_s |
| |
| constexpr FPDataProcessing1SourceOp | FRINTP_s |
| |
| constexpr FPDataProcessing1SourceOp | FRINTP_d |
| |
| constexpr FPDataProcessing1SourceOp | FRINTP = FRINTP_s |
| |
| constexpr FPDataProcessing1SourceOp | FRINTM_s |
| |
| constexpr FPDataProcessing1SourceOp | FRINTM_d |
| |
| constexpr FPDataProcessing1SourceOp | FRINTM = FRINTM_s |
| |
| constexpr FPDataProcessing1SourceOp | FRINTZ_s |
| |
| constexpr FPDataProcessing1SourceOp | FRINTZ_d |
| |
| constexpr FPDataProcessing1SourceOp | FRINTZ = FRINTZ_s |
| |
| constexpr FPDataProcessing1SourceOp | FRINTA_s |
| |
| constexpr FPDataProcessing1SourceOp | FRINTA_d |
| |
| constexpr FPDataProcessing1SourceOp | FRINTA = FRINTA_s |
| |
| constexpr FPDataProcessing1SourceOp | FRINTX_s |
| |
| constexpr FPDataProcessing1SourceOp | FRINTX_d |
| |
| constexpr FPDataProcessing1SourceOp | FRINTX = FRINTX_s |
| |
| constexpr FPDataProcessing1SourceOp | FRINTI_s |
| |
| constexpr FPDataProcessing1SourceOp | FRINTI_d |
| |
| constexpr FPDataProcessing1SourceOp | FRINTI = FRINTI_s |
| |
| constexpr FPDataProcessing2SourceOp | FPDataProcessing2SourceFixed = 0x1E200800 |
| |
| constexpr FPDataProcessing2SourceOp | FPDataProcessing2SourceFMask = 0x5F200C00 |
| |
| constexpr FPDataProcessing2SourceOp | FPDataProcessing2SourceMask = 0xFFE0FC00 |
| |
| constexpr FPDataProcessing2SourceOp | FMUL |
| |
| constexpr FPDataProcessing2SourceOp | FMUL_s = FMUL |
| |
| constexpr FPDataProcessing2SourceOp | FMUL_d = FMUL | FP64 |
| |
| constexpr FPDataProcessing2SourceOp | FDIV |
| |
| constexpr FPDataProcessing2SourceOp | FDIV_s = FDIV |
| |
| constexpr FPDataProcessing2SourceOp | FDIV_d = FDIV | FP64 |
| |
| constexpr FPDataProcessing2SourceOp | FADD |
| |
| constexpr FPDataProcessing2SourceOp | FADD_s = FADD |
| |
| constexpr FPDataProcessing2SourceOp | FADD_d = FADD | FP64 |
| |
| constexpr FPDataProcessing2SourceOp | FSUB |
| |
| constexpr FPDataProcessing2SourceOp | FSUB_s = FSUB |
| |
| constexpr FPDataProcessing2SourceOp | FSUB_d = FSUB | FP64 |
| |
| constexpr FPDataProcessing2SourceOp | FMAX |
| |
| constexpr FPDataProcessing2SourceOp | FMAX_s = FMAX |
| |
| constexpr FPDataProcessing2SourceOp | FMAX_d = FMAX | FP64 |
| |
| constexpr FPDataProcessing2SourceOp | FMIN |
| |
| constexpr FPDataProcessing2SourceOp | FMIN_s = FMIN |
| |
| constexpr FPDataProcessing2SourceOp | FMIN_d = FMIN | FP64 |
| |
| constexpr FPDataProcessing2SourceOp | FMAXNM |
| |
| constexpr FPDataProcessing2SourceOp | FMAXNM_s = FMAXNM |
| |
| constexpr FPDataProcessing2SourceOp | FMAXNM_d = FMAXNM | FP64 |
| |
| constexpr FPDataProcessing2SourceOp | FMINNM |
| |
| constexpr FPDataProcessing2SourceOp | FMINNM_s = FMINNM |
| |
| constexpr FPDataProcessing2SourceOp | FMINNM_d = FMINNM | FP64 |
| |
| constexpr FPDataProcessing2SourceOp | FNMUL |
| |
| constexpr FPDataProcessing2SourceOp | FNMUL_s = FNMUL |
| |
| constexpr FPDataProcessing2SourceOp | FNMUL_d = FNMUL | FP64 |
| |
| constexpr FPDataProcessing3SourceOp | FPDataProcessing3SourceFixed = 0x1F000000 |
| |
| constexpr FPDataProcessing3SourceOp | FPDataProcessing3SourceFMask = 0x5F000000 |
| |
| constexpr FPDataProcessing3SourceOp | FPDataProcessing3SourceMask = 0xFFE08000 |
| |
| constexpr FPDataProcessing3SourceOp | FMADD_s |
| |
| constexpr FPDataProcessing3SourceOp | FMSUB_s |
| |
| constexpr FPDataProcessing3SourceOp | FNMADD_s |
| |
| constexpr FPDataProcessing3SourceOp | FNMSUB_s |
| |
| constexpr FPDataProcessing3SourceOp | FMADD_d |
| |
| constexpr FPDataProcessing3SourceOp | FMSUB_d |
| |
| constexpr FPDataProcessing3SourceOp | FNMADD_d |
| |
| constexpr FPDataProcessing3SourceOp | FNMSUB_d |
| |
| constexpr FPIntegerConvertOp | FPIntegerConvertFixed = 0x1E200000 |
| |
| constexpr FPIntegerConvertOp | FPIntegerConvertFMask = 0x5F20FC00 |
| |
| constexpr FPIntegerConvertOp | FPIntegerConvertMask = 0xFFFFFC00 |
| |
| constexpr FPIntegerConvertOp | FCVTNS = FPIntegerConvertFixed | 0x00000000 |
| |
| constexpr FPIntegerConvertOp | FCVTNS_ws = FCVTNS |
| |
| constexpr FPIntegerConvertOp | FCVTNS_xs = FCVTNS | SixtyFourBits |
| |
| constexpr FPIntegerConvertOp | FCVTNS_wd = FCVTNS | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTNS_xd = FCVTNS | SixtyFourBits | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTNU = FPIntegerConvertFixed | 0x00010000 |
| |
| constexpr FPIntegerConvertOp | FCVTNU_ws = FCVTNU |
| |
| constexpr FPIntegerConvertOp | FCVTNU_xs = FCVTNU | SixtyFourBits |
| |
| constexpr FPIntegerConvertOp | FCVTNU_wd = FCVTNU | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTNU_xd = FCVTNU | SixtyFourBits | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTPS = FPIntegerConvertFixed | 0x00080000 |
| |
| constexpr FPIntegerConvertOp | FCVTPS_ws = FCVTPS |
| |
| constexpr FPIntegerConvertOp | FCVTPS_xs = FCVTPS | SixtyFourBits |
| |
| constexpr FPIntegerConvertOp | FCVTPS_wd = FCVTPS | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTPS_xd = FCVTPS | SixtyFourBits | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTPU = FPIntegerConvertFixed | 0x00090000 |
| |
| constexpr FPIntegerConvertOp | FCVTPU_ws = FCVTPU |
| |
| constexpr FPIntegerConvertOp | FCVTPU_xs = FCVTPU | SixtyFourBits |
| |
| constexpr FPIntegerConvertOp | FCVTPU_wd = FCVTPU | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTPU_xd = FCVTPU | SixtyFourBits | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTMS = FPIntegerConvertFixed | 0x00100000 |
| |
| constexpr FPIntegerConvertOp | FCVTMS_ws = FCVTMS |
| |
| constexpr FPIntegerConvertOp | FCVTMS_xs = FCVTMS | SixtyFourBits |
| |
| constexpr FPIntegerConvertOp | FCVTMS_wd = FCVTMS | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTMS_xd = FCVTMS | SixtyFourBits | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTMU = FPIntegerConvertFixed | 0x00110000 |
| |
| constexpr FPIntegerConvertOp | FCVTMU_ws = FCVTMU |
| |
| constexpr FPIntegerConvertOp | FCVTMU_xs = FCVTMU | SixtyFourBits |
| |
| constexpr FPIntegerConvertOp | FCVTMU_wd = FCVTMU | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTMU_xd = FCVTMU | SixtyFourBits | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTZS = FPIntegerConvertFixed | 0x00180000 |
| |
| constexpr FPIntegerConvertOp | FCVTZS_ws = FCVTZS |
| |
| constexpr FPIntegerConvertOp | FCVTZS_xs = FCVTZS | SixtyFourBits |
| |
| constexpr FPIntegerConvertOp | FCVTZS_wd = FCVTZS | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTZS_xd = FCVTZS | SixtyFourBits | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTZU = FPIntegerConvertFixed | 0x00190000 |
| |
| constexpr FPIntegerConvertOp | FCVTZU_ws = FCVTZU |
| |
| constexpr FPIntegerConvertOp | FCVTZU_xs = FCVTZU | SixtyFourBits |
| |
| constexpr FPIntegerConvertOp | FCVTZU_wd = FCVTZU | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTZU_xd = FCVTZU | SixtyFourBits | FP64 |
| |
| constexpr FPIntegerConvertOp | SCVTF = FPIntegerConvertFixed | 0x00020000 |
| |
| constexpr FPIntegerConvertOp | SCVTF_sw = SCVTF |
| |
| constexpr FPIntegerConvertOp | SCVTF_sx = SCVTF | SixtyFourBits |
| |
| constexpr FPIntegerConvertOp | SCVTF_dw = SCVTF | FP64 |
| |
| constexpr FPIntegerConvertOp | SCVTF_dx = SCVTF | SixtyFourBits | FP64 |
| |
| constexpr FPIntegerConvertOp | UCVTF = FPIntegerConvertFixed | 0x00030000 |
| |
| constexpr FPIntegerConvertOp | UCVTF_sw = UCVTF |
| |
| constexpr FPIntegerConvertOp | UCVTF_sx = UCVTF | SixtyFourBits |
| |
| constexpr FPIntegerConvertOp | UCVTF_dw = UCVTF | FP64 |
| |
| constexpr FPIntegerConvertOp | UCVTF_dx = UCVTF | SixtyFourBits | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTAS = FPIntegerConvertFixed | 0x00040000 |
| |
| constexpr FPIntegerConvertOp | FCVTAS_ws = FCVTAS |
| |
| constexpr FPIntegerConvertOp | FCVTAS_xs = FCVTAS | SixtyFourBits |
| |
| constexpr FPIntegerConvertOp | FCVTAS_wd = FCVTAS | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTAS_xd = FCVTAS | SixtyFourBits | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTAU = FPIntegerConvertFixed | 0x00050000 |
| |
| constexpr FPIntegerConvertOp | FCVTAU_ws = FCVTAU |
| |
| constexpr FPIntegerConvertOp | FCVTAU_xs = FCVTAU | SixtyFourBits |
| |
| constexpr FPIntegerConvertOp | FCVTAU_wd = FCVTAU | FP64 |
| |
| constexpr FPIntegerConvertOp | FCVTAU_xd = FCVTAU | SixtyFourBits | FP64 |
| |
| constexpr FPIntegerConvertOp | FMOV_ws = FPIntegerConvertFixed | 0x00060000 |
| |
| constexpr FPIntegerConvertOp | FMOV_sw = FPIntegerConvertFixed | 0x00070000 |
| |
| constexpr FPIntegerConvertOp | FMOV_xd = FMOV_ws | SixtyFourBits | FP64 |
| |
| constexpr FPIntegerConvertOp | FMOV_dx = FMOV_sw | SixtyFourBits | FP64 |
| |
| constexpr FPIntegerConvertOp | FMOV_d1_x |
| |
| constexpr FPIntegerConvertOp | FMOV_x_d1 |
| |
| constexpr FPIntegerConvertOp | FJCVTZS |
| |
| constexpr FPFixedPointConvertOp | FPFixedPointConvertFixed = 0x1E000000 |
| |
| constexpr FPFixedPointConvertOp | FPFixedPointConvertFMask = 0x5F200000 |
| |
| constexpr FPFixedPointConvertOp | FPFixedPointConvertMask = 0xFFFF0000 |
| |
| constexpr FPFixedPointConvertOp | FCVTZS_fixed |
| |
| constexpr FPFixedPointConvertOp | FCVTZS_ws_fixed = FCVTZS_fixed |
| |
| constexpr FPFixedPointConvertOp | FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits |
| |
| constexpr FPFixedPointConvertOp | FCVTZS_wd_fixed = FCVTZS_fixed | FP64 |
| |
| constexpr FPFixedPointConvertOp | FCVTZS_xd_fixed |
| |
| constexpr FPFixedPointConvertOp | FCVTZU_fixed |
| |
| constexpr FPFixedPointConvertOp | FCVTZU_ws_fixed = FCVTZU_fixed |
| |
| constexpr FPFixedPointConvertOp | FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits |
| |
| constexpr FPFixedPointConvertOp | FCVTZU_wd_fixed = FCVTZU_fixed | FP64 |
| |
| constexpr FPFixedPointConvertOp | FCVTZU_xd_fixed |
| |
| constexpr FPFixedPointConvertOp | SCVTF_fixed |
| |
| constexpr FPFixedPointConvertOp | SCVTF_sw_fixed = SCVTF_fixed |
| |
| constexpr FPFixedPointConvertOp | SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits |
| |
| constexpr FPFixedPointConvertOp | SCVTF_dw_fixed = SCVTF_fixed | FP64 |
| |
| constexpr FPFixedPointConvertOp | SCVTF_dx_fixed |
| |
| constexpr FPFixedPointConvertOp | UCVTF_fixed |
| |
| constexpr FPFixedPointConvertOp | UCVTF_sw_fixed = UCVTF_fixed |
| |
| constexpr FPFixedPointConvertOp | UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits |
| |
| constexpr FPFixedPointConvertOp | UCVTF_dw_fixed = UCVTF_fixed | FP64 |
| |
| constexpr FPFixedPointConvertOp | UCVTF_dx_fixed |
| |
| constexpr NEON2RegMiscOp | NEON2RegMiscFixed = 0x0E200800 |
| |
| constexpr NEON2RegMiscOp | NEON2RegMiscFMask = 0x9F260C00 |
| |
| constexpr NEON2RegMiscOp | NEON2RegMiscHPFixed = 0x00180000 |
| |
| constexpr NEON2RegMiscOp | NEON2RegMiscMask = 0xBF3FFC00 |
| |
| constexpr NEON2RegMiscOp | NEON2RegMiscUBit = 0x20000000 |
| |
| constexpr NEON2RegMiscOp | NEON_REV64 = NEON2RegMiscFixed | 0x00000000 |
| |
| constexpr NEON2RegMiscOp | NEON_REV32 = NEON2RegMiscFixed | 0x20000000 |
| |
| constexpr NEON2RegMiscOp | NEON_REV16 = NEON2RegMiscFixed | 0x00001000 |
| |
| constexpr NEON2RegMiscOp | NEON_SADDLP = NEON2RegMiscFixed | 0x00002000 |
| |
| constexpr NEON2RegMiscOp | NEON_UADDLP = NEON_SADDLP | NEON2RegMiscUBit |
| |
| constexpr NEON2RegMiscOp | NEON_SUQADD = NEON2RegMiscFixed | 0x00003000 |
| |
| constexpr NEON2RegMiscOp | NEON_USQADD = NEON_SUQADD | NEON2RegMiscUBit |
| |
| constexpr NEON2RegMiscOp | NEON_CLS = NEON2RegMiscFixed | 0x00004000 |
| |
| constexpr NEON2RegMiscOp | NEON_CLZ = NEON2RegMiscFixed | 0x20004000 |
| |
| constexpr NEON2RegMiscOp | NEON_CNT = NEON2RegMiscFixed | 0x00005000 |
| |
| constexpr NEON2RegMiscOp | NEON_RBIT_NOT = NEON2RegMiscFixed | 0x20005000 |
| |
| constexpr NEON2RegMiscOp | NEON_SADALP = NEON2RegMiscFixed | 0x00006000 |
| |
| constexpr NEON2RegMiscOp | NEON_UADALP = NEON_SADALP | NEON2RegMiscUBit |
| |
| constexpr NEON2RegMiscOp | NEON_SQABS = NEON2RegMiscFixed | 0x00007000 |
| |
| constexpr NEON2RegMiscOp | NEON_SQNEG = NEON2RegMiscFixed | 0x20007000 |
| |
| constexpr NEON2RegMiscOp | NEON_CMGT_zero = NEON2RegMiscFixed | 0x00008000 |
| |
| constexpr NEON2RegMiscOp | NEON_CMGE_zero = NEON2RegMiscFixed | 0x20008000 |
| |
| constexpr NEON2RegMiscOp | NEON_CMEQ_zero = NEON2RegMiscFixed | 0x00009000 |
| |
| constexpr NEON2RegMiscOp | NEON_CMLE_zero = NEON2RegMiscFixed | 0x20009000 |
| |
| constexpr NEON2RegMiscOp | NEON_CMLT_zero = NEON2RegMiscFixed | 0x0000A000 |
| |
| constexpr NEON2RegMiscOp | NEON_ABS = NEON2RegMiscFixed | 0x0000B000 |
| |
| constexpr NEON2RegMiscOp | NEON_NEG = NEON2RegMiscFixed | 0x2000B000 |
| |
| constexpr NEON2RegMiscOp | NEON_XTN = NEON2RegMiscFixed | 0x00012000 |
| |
| constexpr NEON2RegMiscOp | NEON_SQXTUN = NEON2RegMiscFixed | 0x20012000 |
| |
| constexpr NEON2RegMiscOp | NEON_SHLL = NEON2RegMiscFixed | 0x20013000 |
| |
| constexpr NEON2RegMiscOp | NEON_SQXTN = NEON2RegMiscFixed | 0x00014000 |
| |
| constexpr NEON2RegMiscOp | NEON_UQXTN = NEON_SQXTN | NEON2RegMiscUBit |
| |
| constexpr NEON2RegMiscOp | NEON2RegMiscOpcode = 0x0001F000 |
| |
| constexpr NEON2RegMiscOp | NEON_RBIT_NOT_opcode |
| |
| constexpr NEON2RegMiscOp | NEON_NEG_opcode = NEON_NEG & NEON2RegMiscOpcode |
| |
| constexpr NEON2RegMiscOp | NEON_XTN_opcode = NEON_XTN & NEON2RegMiscOpcode |
| |
| constexpr NEON2RegMiscOp | NEON_UQXTN_opcode = NEON_UQXTN & NEON2RegMiscOpcode |
| |
| constexpr NEON2RegMiscOp | NEON2RegMiscFPMask = NEON2RegMiscMask | 0x00800000 |
| |
| constexpr NEON2RegMiscOp | NEON_FABS = NEON2RegMiscFixed | 0x0080F000 |
| |
| constexpr NEON2RegMiscOp | NEON_FNEG = NEON2RegMiscFixed | 0x2080F000 |
| |
| constexpr NEON2RegMiscOp | NEON_FCVTN = NEON2RegMiscFixed | 0x00016000 |
| |
| constexpr NEON2RegMiscOp | NEON_FCVTXN = NEON2RegMiscFixed | 0x20016000 |
| |
| constexpr NEON2RegMiscOp | NEON_FCVTL = NEON2RegMiscFixed | 0x00017000 |
| |
| constexpr NEON2RegMiscOp | NEON_FRINTN = NEON2RegMiscFixed | 0x00018000 |
| |
| constexpr NEON2RegMiscOp | NEON_FRINTA = NEON2RegMiscFixed | 0x20018000 |
| |
| constexpr NEON2RegMiscOp | NEON_FRINTP = NEON2RegMiscFixed | 0x00818000 |
| |
| constexpr NEON2RegMiscOp | NEON_FRINTM = NEON2RegMiscFixed | 0x00019000 |
| |
| constexpr NEON2RegMiscOp | NEON_FRINTX = NEON2RegMiscFixed | 0x20019000 |
| |
| constexpr NEON2RegMiscOp | NEON_FRINTZ = NEON2RegMiscFixed | 0x00819000 |
| |
| constexpr NEON2RegMiscOp | NEON_FRINTI = NEON2RegMiscFixed | 0x20819000 |
| |
| constexpr NEON2RegMiscOp | NEON_FCVTNS = NEON2RegMiscFixed | 0x0001A000 |
| |
| constexpr NEON2RegMiscOp | NEON_FCVTNU = NEON_FCVTNS | NEON2RegMiscUBit |
| |
| constexpr NEON2RegMiscOp | NEON_FCVTPS = NEON2RegMiscFixed | 0x0081A000 |
| |
| constexpr NEON2RegMiscOp | NEON_FCVTPU = NEON_FCVTPS | NEON2RegMiscUBit |
| |
| constexpr NEON2RegMiscOp | NEON_FCVTMS = NEON2RegMiscFixed | 0x0001B000 |
| |
| constexpr NEON2RegMiscOp | NEON_FCVTMU = NEON_FCVTMS | NEON2RegMiscUBit |
| |
| constexpr NEON2RegMiscOp | NEON_FCVTZS = NEON2RegMiscFixed | 0x0081B000 |
| |
| constexpr NEON2RegMiscOp | NEON_FCVTZU = NEON_FCVTZS | NEON2RegMiscUBit |
| |
| constexpr NEON2RegMiscOp | NEON_FCVTAS = NEON2RegMiscFixed | 0x0001C000 |
| |
| constexpr NEON2RegMiscOp | NEON_FCVTAU = NEON_FCVTAS | NEON2RegMiscUBit |
| |
| constexpr NEON2RegMiscOp | NEON_FSQRT = NEON2RegMiscFixed | 0x2081F000 |
| |
| constexpr NEON2RegMiscOp | NEON_SCVTF = NEON2RegMiscFixed | 0x0001D000 |
| |
| constexpr NEON2RegMiscOp | NEON_UCVTF = NEON_SCVTF | NEON2RegMiscUBit |
| |
| constexpr NEON2RegMiscOp | NEON_URSQRTE = NEON2RegMiscFixed | 0x2081C000 |
| |
| constexpr NEON2RegMiscOp | NEON_URECPE = NEON2RegMiscFixed | 0x0081C000 |
| |
| constexpr NEON2RegMiscOp | NEON_FRSQRTE = NEON2RegMiscFixed | 0x2081D000 |
| |
| constexpr NEON2RegMiscOp | NEON_FRECPE = NEON2RegMiscFixed | 0x0081D000 |
| |
| constexpr NEON2RegMiscOp | NEON_FCMGT_zero = NEON2RegMiscFixed | 0x0080C000 |
| |
| constexpr NEON2RegMiscOp | NEON_FCMGE_zero = NEON2RegMiscFixed | 0x2080C000 |
| |
| constexpr NEON2RegMiscOp | NEON_FCMEQ_zero = NEON2RegMiscFixed | 0x0080D000 |
| |
| constexpr NEON2RegMiscOp | NEON_FCMLE_zero = NEON2RegMiscFixed | 0x2080D000 |
| |
| constexpr NEON2RegMiscOp | NEON_FCMLT_zero = NEON2RegMiscFixed | 0x0080E000 |
| |
| constexpr NEON2RegMiscOp | NEON_FCVTL_opcode = NEON_FCVTL & NEON2RegMiscOpcode |
| |
| constexpr NEON2RegMiscOp | NEON_FCVTN_opcode = NEON_FCVTN & NEON2RegMiscOpcode |
| |
| constexpr NEON3SameOp | NEON3SameFixed = 0x0E200400 |
| |
| constexpr NEON3SameOp | NEON3SameFMask = 0x9F200400 |
| |
| constexpr NEON3SameOp | NEON3SameMask = 0xBF20FC00 |
| |
| constexpr NEON3SameOp | NEON3SameUBit = 0x20000000 |
| |
| constexpr NEON3SameOp | NEON_ADD = NEON3SameFixed | 0x00008000 |
| |
| constexpr NEON3SameOp | NEON_ADDP = NEON3SameFixed | 0x0000B800 |
| |
| constexpr NEON3SameOp | NEON_SHADD = NEON3SameFixed | 0x00000000 |
| |
| constexpr NEON3SameOp | NEON_SHSUB = NEON3SameFixed | 0x00002000 |
| |
| constexpr NEON3SameOp | NEON_SRHADD = NEON3SameFixed | 0x00001000 |
| |
| constexpr NEON3SameOp | NEON_CMEQ = NEON3SameFixed | NEON3SameUBit | 0x00008800 |
| |
| constexpr NEON3SameOp | NEON_CMGE = NEON3SameFixed | 0x00003800 |
| |
| constexpr NEON3SameOp | NEON_CMGT = NEON3SameFixed | 0x00003000 |
| |
| constexpr NEON3SameOp | NEON_CMHI = NEON3SameFixed | NEON3SameUBit | NEON_CMGT |
| |
| constexpr NEON3SameOp | NEON_CMHS = NEON3SameFixed | NEON3SameUBit | NEON_CMGE |
| |
| constexpr NEON3SameOp | NEON_CMTST = NEON3SameFixed | 0x00008800 |
| |
| constexpr NEON3SameOp | NEON_MLA = NEON3SameFixed | 0x00009000 |
| |
| constexpr NEON3SameOp | NEON_MLS = NEON3SameFixed | 0x20009000 |
| |
| constexpr NEON3SameOp | NEON_MUL = NEON3SameFixed | 0x00009800 |
| |
| constexpr NEON3SameOp | NEON_PMUL = NEON3SameFixed | 0x20009800 |
| |
| constexpr NEON3SameOp | NEON_SRSHL = NEON3SameFixed | 0x00005000 |
| |
| constexpr NEON3SameOp | NEON_SQSHL = NEON3SameFixed | 0x00004800 |
| |
| constexpr NEON3SameOp | NEON_SQRSHL = NEON3SameFixed | 0x00005800 |
| |
| constexpr NEON3SameOp | NEON_SSHL = NEON3SameFixed | 0x00004000 |
| |
| constexpr NEON3SameOp | NEON_SMAX = NEON3SameFixed | 0x00006000 |
| |
| constexpr NEON3SameOp | NEON_SMAXP = NEON3SameFixed | 0x0000A000 |
| |
| constexpr NEON3SameOp | NEON_SMIN = NEON3SameFixed | 0x00006800 |
| |
| constexpr NEON3SameOp | NEON_SMINP = NEON3SameFixed | 0x0000A800 |
| |
| constexpr NEON3SameOp | NEON_SABD = NEON3SameFixed | 0x00007000 |
| |
| constexpr NEON3SameOp | NEON_SABA = NEON3SameFixed | 0x00007800 |
| |
| constexpr NEON3SameOp | NEON_UABD = NEON3SameFixed | NEON3SameUBit | NEON_SABD |
| |
| constexpr NEON3SameOp | NEON_UABA = NEON3SameFixed | NEON3SameUBit | NEON_SABA |
| |
| constexpr NEON3SameOp | NEON_SQADD = NEON3SameFixed | 0x00000800 |
| |
| constexpr NEON3SameOp | NEON_SQSUB = NEON3SameFixed | 0x00002800 |
| |
| constexpr NEON3SameOp | NEON_SUB = NEON3SameFixed | NEON3SameUBit | 0x00008000 |
| |
| constexpr NEON3SameOp | NEON_UHADD = NEON3SameFixed | NEON3SameUBit | NEON_SHADD |
| |
| constexpr NEON3SameOp | NEON_UHSUB = NEON3SameFixed | NEON3SameUBit | NEON_SHSUB |
| |
| constexpr NEON3SameOp | NEON_URHADD |
| |
| constexpr NEON3SameOp | NEON_UMAX = NEON3SameFixed | NEON3SameUBit | NEON_SMAX |
| |
| constexpr NEON3SameOp | NEON_UMAXP = NEON3SameFixed | NEON3SameUBit | NEON_SMAXP |
| |
| constexpr NEON3SameOp | NEON_UMIN = NEON3SameFixed | NEON3SameUBit | NEON_SMIN |
| |
| constexpr NEON3SameOp | NEON_UMINP = NEON3SameFixed | NEON3SameUBit | NEON_SMINP |
| |
| constexpr NEON3SameOp | NEON_URSHL = NEON3SameFixed | NEON3SameUBit | NEON_SRSHL |
| |
| constexpr NEON3SameOp | NEON_UQADD = NEON3SameFixed | NEON3SameUBit | NEON_SQADD |
| |
| constexpr NEON3SameOp | NEON_UQRSHL |
| |
| constexpr NEON3SameOp | NEON_UQSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQSHL |
| |
| constexpr NEON3SameOp | NEON_UQSUB = NEON3SameFixed | NEON3SameUBit | NEON_SQSUB |
| |
| constexpr NEON3SameOp | NEON_USHL = NEON3SameFixed | NEON3SameUBit | NEON_SSHL |
| |
| constexpr NEON3SameOp | NEON_SQDMULH = NEON3SameFixed | 0x0000B000 |
| |
| constexpr NEON3SameOp | NEON_SQRDMULH = NEON3SameFixed | 0x2000B000 |
| |
| constexpr NEON3SameOp | NEON3SameFPFixed = NEON3SameFixed | 0x0000C000 |
| |
| constexpr NEON3SameOp | NEON3SameFPFMask = NEON3SameFMask | 0x0000C000 |
| |
| constexpr NEON3SameOp | NEON3SameFPMask = NEON3SameMask | 0x00800000 |
| |
| constexpr NEON3SameOp | NEON_FADD = NEON3SameFixed | 0x0000D000 |
| |
| constexpr NEON3SameOp | NEON_FSUB = NEON3SameFixed | 0x0080D000 |
| |
| constexpr NEON3SameOp | NEON_FMUL = NEON3SameFixed | 0x2000D800 |
| |
| constexpr NEON3SameOp | NEON_FDIV = NEON3SameFixed | 0x2000F800 |
| |
| constexpr NEON3SameOp | NEON_FMAX = NEON3SameFixed | 0x0000F000 |
| |
| constexpr NEON3SameOp | NEON_FMAXNM = NEON3SameFixed | 0x0000C000 |
| |
| constexpr NEON3SameOp | NEON_FMAXP = NEON3SameFixed | 0x2000F000 |
| |
| constexpr NEON3SameOp | NEON_FMAXNMP = NEON3SameFixed | 0x2000C000 |
| |
| constexpr NEON3SameOp | NEON_FMIN = NEON3SameFixed | 0x0080F000 |
| |
| constexpr NEON3SameOp | NEON_FMINNM = NEON3SameFixed | 0x0080C000 |
| |
| constexpr NEON3SameOp | NEON_FMINP = NEON3SameFixed | 0x2080F000 |
| |
| constexpr NEON3SameOp | NEON_FMINNMP = NEON3SameFixed | 0x2080C000 |
| |
| constexpr NEON3SameOp | NEON_FMLA = NEON3SameFixed | 0x0000C800 |
| |
| constexpr NEON3SameOp | NEON_FMLS = NEON3SameFixed | 0x0080C800 |
| |
| constexpr NEON3SameOp | NEON_FMULX = NEON3SameFixed | 0x0000D800 |
| |
| constexpr NEON3SameOp | NEON_FRECPS = NEON3SameFixed | 0x0000F800 |
| |
| constexpr NEON3SameOp | NEON_FRSQRTS = NEON3SameFixed | 0x0080F800 |
| |
| constexpr NEON3SameOp | NEON_FABD = NEON3SameFixed | 0x2080D000 |
| |
| constexpr NEON3SameOp | NEON_FADDP = NEON3SameFixed | 0x2000D000 |
| |
| constexpr NEON3SameOp | NEON_FCMEQ = NEON3SameFixed | 0x0000E000 |
| |
| constexpr NEON3SameOp | NEON_FCMGE = NEON3SameFixed | 0x2000E000 |
| |
| constexpr NEON3SameOp | NEON_FCMGT = NEON3SameFixed | 0x2080E000 |
| |
| constexpr NEON3SameOp | NEON_FACGE = NEON3SameFixed | 0x2000E800 |
| |
| constexpr NEON3SameOp | NEON_FACGT = NEON3SameFixed | 0x2080E800 |
| |
| constexpr NEON3SameOp | NEON3SameHPMask = 0x0020C000 |
| |
| constexpr NEON3SameOp | NEON3SameHPFixed = 0x0E400400 |
| |
| constexpr NEON3SameOp | NEON3SameHPFMask = 0x9F400400 |
| |
| constexpr NEON3SameOp | NEON3SameLogicalFixed = NEON3SameFixed | 0x00001800 |
| |
| constexpr NEON3SameOp | NEON3SameLogicalFMask = NEON3SameFMask | 0x0000F800 |
| |
| constexpr NEON3SameOp | NEON3SameLogicalMask = 0xBFE0FC00 |
| |
| constexpr NEON3SameOp | NEON3SameLogicalFormatMask = NEON_Q |
| |
| constexpr NEON3SameOp | NEON_AND = NEON3SameLogicalFixed | 0x00000000 |
| |
| constexpr NEON3SameOp | NEON_ORR = NEON3SameLogicalFixed | 0x00A00000 |
| |
| constexpr NEON3SameOp | NEON_ORN = NEON3SameLogicalFixed | 0x00C00000 |
| |
| constexpr NEON3SameOp | NEON_EOR = NEON3SameLogicalFixed | 0x20000000 |
| |
| constexpr NEON3SameOp | NEON_BIC = NEON3SameLogicalFixed | 0x00400000 |
| |
| constexpr NEON3SameOp | NEON_BIF = NEON3SameLogicalFixed | 0x20C00000 |
| |
| constexpr NEON3SameOp | NEON_BIT = NEON3SameLogicalFixed | 0x20800000 |
| |
| constexpr NEON3SameOp | NEON_BSL = NEON3SameLogicalFixed | 0x20400000 |
| |
| constexpr NEON3DifferentOp | NEON3DifferentFixed = 0x0E200000 |
| |
| constexpr NEON3DifferentOp | NEON3DifferentDot = 0x0E800000 |
| |
| constexpr NEON3DifferentOp | NEON3DifferentFMask = 0x9F200C00 |
| |
| constexpr NEON3DifferentOp | NEON3DifferentMask = 0xFF20FC00 |
| |
| constexpr NEON3DifferentOp | NEON_ADDHN = NEON3DifferentFixed | 0x00004000 |
| |
| constexpr NEON3DifferentOp | NEON_ADDHN2 = NEON_ADDHN | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_PMULL = NEON3DifferentFixed | 0x0000E000 |
| |
| constexpr NEON3DifferentOp | NEON_PMULL2 = NEON_PMULL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_RADDHN = NEON3DifferentFixed | 0x20004000 |
| |
| constexpr NEON3DifferentOp | NEON_RADDHN2 = NEON_RADDHN | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_RSUBHN = NEON3DifferentFixed | 0x20006000 |
| |
| constexpr NEON3DifferentOp | NEON_RSUBHN2 = NEON_RSUBHN | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_SABAL = NEON3DifferentFixed | 0x00005000 |
| |
| constexpr NEON3DifferentOp | NEON_SABAL2 = NEON_SABAL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_SABDL = NEON3DifferentFixed | 0x00007000 |
| |
| constexpr NEON3DifferentOp | NEON_SABDL2 = NEON_SABDL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_SADDL = NEON3DifferentFixed | 0x00000000 |
| |
| constexpr NEON3DifferentOp | NEON_SADDL2 = NEON_SADDL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_SADDW = NEON3DifferentFixed | 0x00001000 |
| |
| constexpr NEON3DifferentOp | NEON_SADDW2 = NEON_SADDW | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_SMLAL = NEON3DifferentFixed | 0x00008000 |
| |
| constexpr NEON3DifferentOp | NEON_SMLAL2 = NEON_SMLAL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_SMLSL = NEON3DifferentFixed | 0x0000A000 |
| |
| constexpr NEON3DifferentOp | NEON_SMLSL2 = NEON_SMLSL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_SMULL = NEON3DifferentFixed | 0x0000C000 |
| |
| constexpr NEON3DifferentOp | NEON_SMULL2 = NEON_SMULL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_SSUBL = NEON3DifferentFixed | 0x00002000 |
| |
| constexpr NEON3DifferentOp | NEON_SSUBL2 = NEON_SSUBL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_SSUBW = NEON3DifferentFixed | 0x00003000 |
| |
| constexpr NEON3DifferentOp | NEON_SSUBW2 = NEON_SSUBW | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_SQDMLAL = NEON3DifferentFixed | 0x00009000 |
| |
| constexpr NEON3DifferentOp | NEON_SQDMLAL2 = NEON_SQDMLAL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_SQDMLSL = NEON3DifferentFixed | 0x0000B000 |
| |
| constexpr NEON3DifferentOp | NEON_SQDMLSL2 = NEON_SQDMLSL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_SQDMULL = NEON3DifferentFixed | 0x0000D000 |
| |
| constexpr NEON3DifferentOp | NEON_SQDMULL2 = NEON_SQDMULL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_SUBHN = NEON3DifferentFixed | 0x00006000 |
| |
| constexpr NEON3DifferentOp | NEON_SUBHN2 = NEON_SUBHN | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_UABAL = NEON_SABAL | NEON3SameUBit |
| |
| constexpr NEON3DifferentOp | NEON_UABAL2 = NEON_UABAL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_UABDL = NEON_SABDL | NEON3SameUBit |
| |
| constexpr NEON3DifferentOp | NEON_UABDL2 = NEON_UABDL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_UADDL = NEON_SADDL | NEON3SameUBit |
| |
| constexpr NEON3DifferentOp | NEON_UADDL2 = NEON_UADDL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_UADDW = NEON_SADDW | NEON3SameUBit |
| |
| constexpr NEON3DifferentOp | NEON_UADDW2 = NEON_UADDW | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_UMLAL = NEON_SMLAL | NEON3SameUBit |
| |
| constexpr NEON3DifferentOp | NEON_UMLAL2 = NEON_UMLAL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_UMLSL = NEON_SMLSL | NEON3SameUBit |
| |
| constexpr NEON3DifferentOp | NEON_UMLSL2 = NEON_UMLSL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_UMULL = NEON_SMULL | NEON3SameUBit |
| |
| constexpr NEON3DifferentOp | NEON_UMULL2 = NEON_UMULL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_USUBL = NEON_SSUBL | NEON3SameUBit |
| |
| constexpr NEON3DifferentOp | NEON_USUBL2 = NEON_USUBL | NEON_Q |
| |
| constexpr NEON3DifferentOp | NEON_USUBW = NEON_SSUBW | NEON3SameUBit |
| |
| constexpr NEON3DifferentOp | NEON_USUBW2 = NEON_USUBW | NEON_Q |
| |
| constexpr NEON3ExtensionOp | NEON3ExtensionFixed = 0x0E008400 |
| |
| constexpr NEON3ExtensionOp | NEON3ExtensionFMask = 0x9F208400 |
| |
| constexpr NEON3ExtensionOp | NEON3ExtensionMask = 0xBF20FC00 |
| |
| constexpr NEON3ExtensionOp | NEON_SDOT = NEON3ExtensionFixed | 0x00001000 |
| |
| constexpr NEONAcrossLanesOp | NEONAcrossLanesFixed = 0x0E300800 |
| |
| constexpr NEONAcrossLanesOp | NEONAcrossLanesFMask = 0x9F3E0C00 |
| |
| constexpr NEONAcrossLanesOp | NEONAcrossLanesMask = 0xBF3FFC00 |
| |
| constexpr NEONAcrossLanesOp | NEON_ADDV = NEONAcrossLanesFixed | 0x0001B000 |
| |
| constexpr NEONAcrossLanesOp | NEON_SADDLV = NEONAcrossLanesFixed | 0x00003000 |
| |
| constexpr NEONAcrossLanesOp | NEON_UADDLV = NEONAcrossLanesFixed | 0x20003000 |
| |
| constexpr NEONAcrossLanesOp | NEON_SMAXV = NEONAcrossLanesFixed | 0x0000A000 |
| |
| constexpr NEONAcrossLanesOp | NEON_SMINV = NEONAcrossLanesFixed | 0x0001A000 |
| |
| constexpr NEONAcrossLanesOp | NEON_UMAXV = NEONAcrossLanesFixed | 0x2000A000 |
| |
| constexpr NEONAcrossLanesOp | NEON_UMINV = NEONAcrossLanesFixed | 0x2001A000 |
| |
| constexpr NEONAcrossLanesOp | NEONAcrossLanesFPFixed |
| |
| constexpr NEONAcrossLanesOp | NEONAcrossLanesFPFMask |
| |
| constexpr NEONAcrossLanesOp | NEONAcrossLanesFPMask |
| |
| constexpr NEONAcrossLanesOp | NEON_FMAXV = NEONAcrossLanesFPFixed | 0x2000F000 |
| |
| constexpr NEONAcrossLanesOp | NEON_FMINV = NEONAcrossLanesFPFixed | 0x2080F000 |
| |
| constexpr NEONAcrossLanesOp | NEON_FMAXNMV = NEONAcrossLanesFPFixed | 0x2000C000 |
| |
| constexpr NEONAcrossLanesOp | NEON_FMINNMV = NEONAcrossLanesFPFixed | 0x2080C000 |
| |
| constexpr NEONByIndexedElementOp | NEONByIndexedElementFixed = 0x0F000000 |
| |
| constexpr NEONByIndexedElementOp | NEONByIndexedElementFMask = 0x9F000400 |
| |
| constexpr NEONByIndexedElementOp | NEONByIndexedElementMask = 0xBF00F400 |
| |
| constexpr NEONByIndexedElementOp | NEON_MUL_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_MLA_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_MLS_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_SMULL_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_SMLAL_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_SMLSL_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_UMULL_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_UMLAL_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_UMLSL_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_SQDMULL_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_SQDMLAL_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_SQDMLSL_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_SQDMULH_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_SQRDMULH_byelement |
| |
| constexpr NEONByIndexedElementOp | NEONByIndexedElementFPFixed |
| |
| constexpr NEONByIndexedElementOp | NEONByIndexedElementFPMask |
| |
| constexpr NEONByIndexedElementOp | NEON_FMLA_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_FMLS_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_FMUL_byelement |
| |
| constexpr NEONByIndexedElementOp | NEON_FMULX_byelement |
| |
| constexpr NEONModifiedImmediateOp | NEONModifiedImmediateFixed = 0x0F000400 |
| |
| constexpr NEONModifiedImmediateOp | NEONModifiedImmediateFMask = 0x9FF80400 |
| |
| constexpr NEONModifiedImmediateOp | NEONModifiedImmediateOpBit = 0x20000000 |
| |
| constexpr NEONModifiedImmediateOp | NEONModifiedImmediate_MOVI |
| |
| constexpr NEONModifiedImmediateOp | NEONModifiedImmediate_MVNI |
| |
| constexpr NEONModifiedImmediateOp | NEONModifiedImmediate_ORR |
| |
| constexpr NEONModifiedImmediateOp | NEONModifiedImmediate_BIC |
| |
| constexpr NEONExtractOp | NEONExtractFixed = 0x2E000000 |
| |
| constexpr NEONExtractOp | NEONExtractFMask = 0xBF208400 |
| |
| constexpr NEONExtractOp | NEONExtractMask = 0xBFE08400 |
| |
| constexpr NEONExtractOp | NEON_EXT = NEONExtractFixed | 0x00000000 |
| |
| constexpr NEONLoadStoreMultiOp | NEONLoadStoreMultiL = 0x00400000 |
| |
| constexpr NEONLoadStoreMultiOp | NEONLoadStoreMulti1_1v = 0x00007000 |
| |
| constexpr NEONLoadStoreMultiOp | NEONLoadStoreMulti1_2v = 0x0000A000 |
| |
| constexpr NEONLoadStoreMultiOp | NEONLoadStoreMulti1_3v = 0x00006000 |
| |
| constexpr NEONLoadStoreMultiOp | NEONLoadStoreMulti1_4v = 0x00002000 |
| |
| constexpr NEONLoadStoreMultiOp | NEONLoadStoreMulti2 = 0x00008000 |
| |
| constexpr NEONLoadStoreMultiOp | NEONLoadStoreMulti3 = 0x00004000 |
| |
| constexpr NEONLoadStoreMultiOp | NEONLoadStoreMulti4 = 0x00000000 |
| |
| constexpr NEONLoadStoreMultiStructOp | NEONLoadStoreMultiStructFixed = 0x0C000000 |
| |
| constexpr NEONLoadStoreMultiStructOp | NEONLoadStoreMultiStructFMask = 0xBFBF0000 |
| |
| constexpr NEONLoadStoreMultiStructOp | NEONLoadStoreMultiStructMask = 0xBFFFF000 |
| |
| constexpr NEONLoadStoreMultiStructOp | NEONLoadStoreMultiStructStore |
| |
| constexpr NEONLoadStoreMultiStructOp | NEONLoadStoreMultiStructLoad |
| |
| constexpr NEONLoadStoreMultiStructOp | NEON_LD1_1v |
| |
| constexpr NEONLoadStoreMultiStructOp | NEON_LD1_2v |
| |
| constexpr NEONLoadStoreMultiStructOp | NEON_LD1_3v |
| |
| constexpr NEONLoadStoreMultiStructOp | NEON_LD1_4v |
| |
| constexpr NEONLoadStoreMultiStructOp | NEON_LD2 |
| |
| constexpr NEONLoadStoreMultiStructOp | NEON_LD3 |
| |
| constexpr NEONLoadStoreMultiStructOp | NEON_LD4 |
| |
| constexpr NEONLoadStoreMultiStructOp | NEON_ST1_1v |
| |
| constexpr NEONLoadStoreMultiStructOp | NEON_ST1_2v |
| |
| constexpr NEONLoadStoreMultiStructOp | NEON_ST1_3v |
| |
| constexpr NEONLoadStoreMultiStructOp | NEON_ST1_4v |
| |
| constexpr NEONLoadStoreMultiStructOp | NEON_ST2 |
| |
| constexpr NEONLoadStoreMultiStructOp | NEON_ST3 |
| |
| constexpr NEONLoadStoreMultiStructOp | NEON_ST4 |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEONLoadStoreMultiStructPostIndexFixed = 0x0C800000 |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEONLoadStoreMultiStructPostIndexFMask = 0xBFA00000 |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEONLoadStoreMultiStructPostIndexMask = 0xBFE0F000 |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEONLoadStoreMultiStructPostIndex = 0x00800000 |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEON_LD1_1v_post |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEON_LD1_2v_post |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEON_LD1_3v_post |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEON_LD1_4v_post |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEON_LD2_post |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEON_LD3_post |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEON_LD4_post |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEON_ST1_1v_post |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEON_ST1_2v_post |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEON_ST1_3v_post |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEON_ST1_4v_post |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEON_ST2_post |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEON_ST3_post |
| |
| constexpr NEONLoadStoreMultiStructPostIndexOp | NEON_ST4_post |
| |
| constexpr NEONLoadStoreSingleOp | NEONLoadStoreSingle1 = 0x00000000 |
| |
| constexpr NEONLoadStoreSingleOp | NEONLoadStoreSingle2 = 0x00200000 |
| |
| constexpr NEONLoadStoreSingleOp | NEONLoadStoreSingle3 = 0x00002000 |
| |
| constexpr NEONLoadStoreSingleOp | NEONLoadStoreSingle4 = 0x00202000 |
| |
| constexpr NEONLoadStoreSingleOp | NEONLoadStoreSingleL = 0x00400000 |
| |
| constexpr NEONLoadStoreSingleOp | NEONLoadStoreSingle_b = 0x00000000 |
| |
| constexpr NEONLoadStoreSingleOp | NEONLoadStoreSingle_h = 0x00004000 |
| |
| constexpr NEONLoadStoreSingleOp | NEONLoadStoreSingle_s = 0x00008000 |
| |
| constexpr NEONLoadStoreSingleOp | NEONLoadStoreSingle_d = 0x00008400 |
| |
| constexpr NEONLoadStoreSingleOp | NEONLoadStoreSingleAllLanes = 0x0000C000 |
| |
| constexpr NEONLoadStoreSingleOp | NEONLoadStoreSingleLenMask = 0x00202000 |
| |
| constexpr NEONLoadStoreSingleStructOp | NEONLoadStoreSingleStructFixed |
| |
| constexpr NEONLoadStoreSingleStructOp | NEONLoadStoreSingleStructFMask |
| |
| constexpr NEONLoadStoreSingleStructOp | NEONLoadStoreSingleStructMask |
| |
| constexpr NEONLoadStoreSingleStructOp | NEONLoadStoreSingleStructStore |
| |
| constexpr NEONLoadStoreSingleStructOp | NEONLoadStoreSingleStructLoad |
| |
| constexpr NEONLoadStoreSingleStructOp | NEONLoadStoreSingleStructLoad1 |
| |
| constexpr NEONLoadStoreSingleStructOp | NEONLoadStoreSingleStructLoad2 |
| |
| constexpr NEONLoadStoreSingleStructOp | NEONLoadStoreSingleStructLoad3 |
| |
| constexpr NEONLoadStoreSingleStructOp | NEONLoadStoreSingleStructLoad4 |
| |
| constexpr NEONLoadStoreSingleStructOp | NEONLoadStoreSingleStructStore1 |
| |
| constexpr NEONLoadStoreSingleStructOp | NEONLoadStoreSingleStructStore2 |
| |
| constexpr NEONLoadStoreSingleStructOp | NEONLoadStoreSingleStructStore3 |
| |
| constexpr NEONLoadStoreSingleStructOp | NEONLoadStoreSingleStructStore4 |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD1_b |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD1_h |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD1_s |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD1_d |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD1R |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST1_b |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST1_h |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST1_s |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST1_d |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD2_b |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD2_h |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD2_s |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD2_d |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD2R |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST2_b |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST2_h |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST2_s |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST2_d |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD3_b |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD3_h |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD3_s |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD3_d |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD3R |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST3_b |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST3_h |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST3_s |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST3_d |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD4_b |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD4_h |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD4_s |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD4_d |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_LD4R |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST4_b |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST4_h |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST4_s |
| |
| constexpr NEONLoadStoreSingleStructOp | NEON_ST4_d |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEONLoadStoreSingleStructPostIndexFixed = 0x0D800000 |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEONLoadStoreSingleStructPostIndexFMask = 0xBF800000 |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEONLoadStoreSingleStructPostIndexMask = 0xBFE0E000 |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEONLoadStoreSingleStructPostIndex = 0x00800000 |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD1_b_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD1_h_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD1_s_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD1_d_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD1R_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST1_b_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST1_h_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST1_s_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST1_d_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD2_b_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD2_h_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD2_s_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD2_d_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD2R_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST2_b_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST2_h_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST2_s_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST2_d_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD3_b_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD3_h_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD3_s_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD3_d_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD3R_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST3_b_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST3_h_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST3_s_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST3_d_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD4_b_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD4_h_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD4_s_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD4_d_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_LD4R_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST4_b_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST4_h_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST4_s_post |
| |
| constexpr NEONLoadStoreSingleStructPostIndexOp | NEON_ST4_d_post |
| |
| constexpr NEONCopyOp | NEONCopyFixed = 0x0E000400 |
| |
| constexpr NEONCopyOp | NEONCopyFMask = 0x9FE08400 |
| |
| constexpr NEONCopyOp | NEONCopyMask = 0x3FE08400 |
| |
| constexpr NEONCopyOp | NEONCopyInsElementMask = NEONCopyMask | 0x40000000 |
| |
| constexpr NEONCopyOp | NEONCopyInsGeneralMask = NEONCopyMask | 0x40007800 |
| |
| constexpr NEONCopyOp | NEONCopyDupElementMask = NEONCopyMask | 0x20007800 |
| |
| constexpr NEONCopyOp | NEONCopyDupGeneralMask = NEONCopyDupElementMask |
| |
| constexpr NEONCopyOp | NEONCopyUmovMask = NEONCopyMask | 0x20007800 |
| |
| constexpr NEONCopyOp | NEONCopySmovMask = NEONCopyMask | 0x20007800 |
| |
| constexpr NEONCopyOp | NEON_INS_ELEMENT = NEONCopyFixed | 0x60000000 |
| |
| constexpr NEONCopyOp | NEON_INS_GENERAL = NEONCopyFixed | 0x40001800 |
| |
| constexpr NEONCopyOp | NEON_DUP_ELEMENT = NEONCopyFixed | 0x00000000 |
| |
| constexpr NEONCopyOp | NEON_DUP_GENERAL = NEONCopyFixed | 0x00000800 |
| |
| constexpr NEONCopyOp | NEON_SMOV = NEONCopyFixed | 0x00002800 |
| |
| constexpr NEONCopyOp | NEON_UMOV = NEONCopyFixed | 0x00003800 |
| |
| constexpr NEONScalarByIndexedElementOp | NEONScalarByIndexedElementFixed |
| |
| constexpr NEONScalarByIndexedElementOp | NEONScalarByIndexedElementFMask |
| |
| constexpr NEONScalarByIndexedElementOp | NEONScalarByIndexedElementMask |
| |
| constexpr NEONScalarByIndexedElementOp | NEON_SQDMLAL_byelement_scalar |
| |
| constexpr NEONScalarByIndexedElementOp | NEON_SQDMLSL_byelement_scalar |
| |
| constexpr NEONScalarByIndexedElementOp | NEON_SQDMULL_byelement_scalar |
| |
| constexpr NEONScalarByIndexedElementOp | NEON_SQDMULH_byelement_scalar |
| |
| constexpr NEONScalarByIndexedElementOp | NEON_SQRDMULH_byelement_scalar |
| |
| constexpr NEONScalarByIndexedElementOp | NEONScalarByIndexedElementFPFixed |
| |
| constexpr NEONScalarByIndexedElementOp | NEONScalarByIndexedElementFPMask |
| |
| constexpr NEONScalarByIndexedElementOp | NEON_FMLA_byelement_scalar |
| |
| constexpr NEONScalarByIndexedElementOp | NEON_FMLS_byelement_scalar |
| |
| constexpr NEONScalarByIndexedElementOp | NEON_FMUL_byelement_scalar |
| |
| constexpr NEONScalarByIndexedElementOp | NEON_FMULX_byelement_scalar |
| |
| constexpr NEONShiftImmediateOp | NEONShiftImmediateFixed = 0x0F000400 |
| |
| constexpr NEONShiftImmediateOp | NEONShiftImmediateFMask = 0x9F800400 |
| |
| constexpr NEONShiftImmediateOp | NEONShiftImmediateMask = 0xBF80FC00 |
| |
| constexpr NEONShiftImmediateOp | NEONShiftImmediateUBit = 0x20000000 |
| |
| constexpr NEONShiftImmediateOp | NEON_SHL = NEONShiftImmediateFixed | 0x00005000 |
| |
| constexpr NEONShiftImmediateOp | NEON_SSHLL |
| |
| constexpr NEONShiftImmediateOp | NEON_USHLL |
| |
| constexpr NEONShiftImmediateOp | NEON_SLI = NEONShiftImmediateFixed | 0x20005000 |
| |
| constexpr NEONShiftImmediateOp | NEON_SRI = NEONShiftImmediateFixed | 0x20004000 |
| |
| constexpr NEONShiftImmediateOp | NEON_SHRN = NEONShiftImmediateFixed | 0x00008000 |
| |
| constexpr NEONShiftImmediateOp | NEON_RSHRN |
| |
| constexpr NEONShiftImmediateOp | NEON_UQSHRN |
| |
| constexpr NEONShiftImmediateOp | NEON_UQRSHRN |
| |
| constexpr NEONShiftImmediateOp | NEON_SQSHRN |
| |
| constexpr NEONShiftImmediateOp | NEON_SQRSHRN |
| |
| constexpr NEONShiftImmediateOp | NEON_SQSHRUN |
| |
| constexpr NEONShiftImmediateOp | NEON_SQRSHRUN |
| |
| constexpr NEONShiftImmediateOp | NEON_SSHR = NEONShiftImmediateFixed | 0x00000000 |
| |
| constexpr NEONShiftImmediateOp | NEON_SRSHR |
| |
| constexpr NEONShiftImmediateOp | NEON_USHR = NEONShiftImmediateFixed | 0x20000000 |
| |
| constexpr NEONShiftImmediateOp | NEON_URSHR |
| |
| constexpr NEONShiftImmediateOp | NEON_SSRA = NEONShiftImmediateFixed | 0x00001000 |
| |
| constexpr NEONShiftImmediateOp | NEON_SRSRA |
| |
| constexpr NEONShiftImmediateOp | NEON_USRA = NEONShiftImmediateFixed | 0x20001000 |
| |
| constexpr NEONShiftImmediateOp | NEON_URSRA |
| |
| constexpr NEONShiftImmediateOp | NEON_SQSHLU |
| |
| constexpr NEONShiftImmediateOp | NEON_SCVTF_imm |
| |
| constexpr NEONShiftImmediateOp | NEON_UCVTF_imm |
| |
| constexpr NEONShiftImmediateOp | NEON_FCVTZS_imm |
| |
| constexpr NEONShiftImmediateOp | NEON_FCVTZU_imm |
| |
| constexpr NEONShiftImmediateOp | NEON_SQSHL_imm |
| |
| constexpr NEONShiftImmediateOp | NEON_UQSHL_imm |
| |
| constexpr NEONScalarCopyOp | NEONScalarCopyFixed = 0x5E000400 |
| |
| constexpr NEONScalarCopyOp | NEONScalarCopyFMask = 0xDFE08400 |
| |
| constexpr NEONScalarCopyOp | NEONScalarCopyMask = 0xFFE0FC00 |
| |
| constexpr NEONScalarCopyOp | NEON_DUP_ELEMENT_scalar |
| |
| constexpr NEONScalarPairwiseOp | NEONScalarPairwiseFixed = 0x5E300800 |
| |
| constexpr NEONScalarPairwiseOp | NEONScalarPairwiseFMask = 0xDF3E0C00 |
| |
| constexpr NEONScalarPairwiseOp | NEONScalarPairwiseMask = 0xFFB1F800 |
| |
| constexpr NEONScalarPairwiseOp | NEON_ADDP_scalar |
| |
| constexpr NEONScalarPairwiseOp | NEON_FMAXNMP_scalar |
| |
| constexpr NEONScalarPairwiseOp | NEON_FMINNMP_scalar |
| |
| constexpr NEONScalarPairwiseOp | NEON_FADDP_scalar |
| |
| constexpr NEONScalarPairwiseOp | NEON_FMAXP_scalar |
| |
| constexpr NEONScalarPairwiseOp | NEON_FMINP_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEONScalarShiftImmediateFixed = 0x5F000400 |
| |
| constexpr NEONScalarShiftImmediateOp | NEONScalarShiftImmediateFMask = 0xDF800400 |
| |
| constexpr NEONScalarShiftImmediateOp | NEONScalarShiftImmediateMask = 0xFF80FC00 |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_SHL_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_SLI_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_SRI_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_SSHR_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_USHR_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_SRSHR_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_URSHR_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_SSRA_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_USRA_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_SRSRA_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_URSRA_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_UQSHRN_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_UQRSHRN_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_SQSHRN_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_SQRSHRN_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_SQSHRUN_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_SQRSHRUN_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_SQSHLU_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_SQSHL_imm_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_UQSHL_imm_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_SCVTF_imm_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_UCVTF_imm_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_FCVTZS_imm_scalar |
| |
| constexpr NEONScalarShiftImmediateOp | NEON_FCVTZU_imm_scalar |
| |
| constexpr NEONTableOp | NEONTableFixed = 0x0E000000 |
| |
| constexpr NEONTableOp | NEONTableFMask = 0xBF208C00 |
| |
| constexpr NEONTableOp | NEONTableExt = 0x00001000 |
| |
| constexpr NEONTableOp | NEONTableMask = 0xBF20FC00 |
| |
| constexpr NEONTableOp | NEON_TBL_1v = NEONTableFixed | 0x00000000 |
| |
| constexpr NEONTableOp | NEON_TBL_2v = NEONTableFixed | 0x00002000 |
| |
| constexpr NEONTableOp | NEON_TBL_3v = NEONTableFixed | 0x00004000 |
| |
| constexpr NEONTableOp | NEON_TBL_4v = NEONTableFixed | 0x00006000 |
| |
| constexpr NEONTableOp | NEON_TBX_1v = NEON_TBL_1v | NEONTableExt |
| |
| constexpr NEONTableOp | NEON_TBX_2v = NEON_TBL_2v | NEONTableExt |
| |
| constexpr NEONTableOp | NEON_TBX_3v = NEON_TBL_3v | NEONTableExt |
| |
| constexpr NEONTableOp | NEON_TBX_4v = NEON_TBL_4v | NEONTableExt |
| |
| constexpr NEONSHA3Op | NEONSHA3Fixed = 0xce000000 |
| |
| constexpr NEONSHA3Op | NEONSHA3FMask = 0xce000000 |
| |
| constexpr NEONSHA3Op | NEONSHA3Mask = 0xcee00000 |
| |
| constexpr NEONSHA3Op | NEON_BCAX = NEONSHA3Fixed | 0x00200000 |
| |
| constexpr NEONSHA3Op | NEON_EOR3 = NEONSHA3Fixed |
| |
| constexpr NEONPermOp | NEONPermFixed = 0x0E000800 |
| |
| constexpr NEONPermOp | NEONPermFMask = 0xBF208C00 |
| |
| constexpr NEONPermOp | NEONPermMask = 0x3F20FC00 |
| |
| constexpr NEONPermOp | NEON_UZP1 = NEONPermFixed | 0x00001000 |
| |
| constexpr NEONPermOp | NEON_TRN1 = NEONPermFixed | 0x00002000 |
| |
| constexpr NEONPermOp | NEON_ZIP1 = NEONPermFixed | 0x00003000 |
| |
| constexpr NEONPermOp | NEON_UZP2 = NEONPermFixed | 0x00005000 |
| |
| constexpr NEONPermOp | NEON_TRN2 = NEONPermFixed | 0x00006000 |
| |
| constexpr NEONPermOp | NEON_ZIP2 = NEONPermFixed | 0x00007000 |
| |
| constexpr NEONScalar2RegMiscOp | NEONScalar2RegMiscFixed = 0x5E200800 |
| |
| constexpr NEONScalar2RegMiscOp | NEONScalar2RegMiscFMask = 0xDF3E0C00 |
| |
| constexpr NEONScalar2RegMiscOp | NEONScalar2RegMiscMask |
| |
| constexpr NEONScalar2RegMiscOp | NEON_CMGT_zero_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_CMEQ_zero_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_CMLT_zero_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_CMGE_zero_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_CMLE_zero_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_ABS_scalar = NEON_Q | NEONScalar | NEON_ABS |
| |
| constexpr NEONScalar2RegMiscOp | NEON_SQABS_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_NEG_scalar = NEON_Q | NEONScalar | NEON_NEG |
| |
| constexpr NEONScalar2RegMiscOp | NEON_SQNEG_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_SQXTN_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_UQXTN_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_SQXTUN_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_SUQADD_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_USQADD_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEONScalar2RegMiscOpcode = NEON2RegMiscOpcode |
| |
| constexpr NEONScalar2RegMiscOp | NEON_NEG_scalar_opcode |
| |
| constexpr NEONScalar2RegMiscOp | NEONScalar2RegMiscFPMask |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FRSQRTE_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FRECPE_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_SCVTF_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_UCVTF_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCMGT_zero_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCMEQ_zero_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCMLT_zero_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCMGE_zero_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCMLE_zero_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FRECPX_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCVTNS_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCVTNU_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCVTPS_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCVTPU_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCVTMS_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCVTMU_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCVTZS_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCVTZU_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCVTAS_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCVTAU_scalar |
| |
| constexpr NEONScalar2RegMiscOp | NEON_FCVTXN_scalar |
| |
| constexpr NEONScalar3SameOp | NEONScalar3SameFixed = 0x5E200400 |
| |
| constexpr NEONScalar3SameOp | NEONScalar3SameFMask = 0xDF200400 |
| |
| constexpr NEONScalar3SameOp | NEONScalar3SameMask = 0xFF20FC00 |
| |
| constexpr NEONScalar3SameOp | NEON_ADD_scalar = NEON_Q | NEONScalar | NEON_ADD |
| |
| constexpr NEONScalar3SameOp | NEON_CMEQ_scalar = NEON_Q | NEONScalar | NEON_CMEQ |
| |
| constexpr NEONScalar3SameOp | NEON_CMGE_scalar = NEON_Q | NEONScalar | NEON_CMGE |
| |
| constexpr NEONScalar3SameOp | NEON_CMGT_scalar = NEON_Q | NEONScalar | NEON_CMGT |
| |
| constexpr NEONScalar3SameOp | NEON_CMHI_scalar = NEON_Q | NEONScalar | NEON_CMHI |
| |
| constexpr NEONScalar3SameOp | NEON_CMHS_scalar = NEON_Q | NEONScalar | NEON_CMHS |
| |
| constexpr NEONScalar3SameOp | NEON_CMTST_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_SUB_scalar = NEON_Q | NEONScalar | NEON_SUB |
| |
| constexpr NEONScalar3SameOp | NEON_UQADD_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_SQADD_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_UQSUB_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_SQSUB_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_USHL_scalar = NEON_Q | NEONScalar | NEON_USHL |
| |
| constexpr NEONScalar3SameOp | NEON_SSHL_scalar = NEON_Q | NEONScalar | NEON_SSHL |
| |
| constexpr NEONScalar3SameOp | NEON_UQSHL_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_SQSHL_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_URSHL_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_SRSHL_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_UQRSHL_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_SQRSHL_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_SQDMULH_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_SQRDMULH_scalar |
| |
| constexpr NEONScalar3SameOp | NEONScalar3SameFPFixed |
| |
| constexpr NEONScalar3SameOp | NEONScalar3SameFPFMask |
| |
| constexpr NEONScalar3SameOp | NEONScalar3SameFPMask |
| |
| constexpr NEONScalar3SameOp | NEON_FACGE_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_FACGT_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_FCMEQ_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_FCMGE_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_FCMGT_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_FMULX_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_FRECPS_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_FRSQRTS_scalar |
| |
| constexpr NEONScalar3SameOp | NEON_FABD_scalar = NEON_Q | NEONScalar | NEON_FABD |
| |
| constexpr NEONScalar3DiffOp | NEONScalar3DiffFixed = 0x5E200000 |
| |
| constexpr NEONScalar3DiffOp | NEONScalar3DiffFMask = 0xDF200C00 |
| |
| constexpr NEONScalar3DiffOp | NEONScalar3DiffMask |
| |
| constexpr NEONScalar3DiffOp | NEON_SQDMLAL_scalar |
| |
| constexpr NEONScalar3DiffOp | NEON_SQDMLSL_scalar |
| |
| constexpr NEONScalar3DiffOp | NEON_SQDMULL_scalar |
| |
| constexpr UnimplementedOp | UnimplementedFixed = 0x00000000 |
| |
| constexpr UnimplementedOp | UnimplementedFMask = 0x00000000 |
| |
| constexpr UnallocatedOp | UnallocatedFixed = 0x00000000 |
| |
| constexpr UnallocatedOp | UnallocatedFMask = 0x00000000 |
| |
| const float16 | kFP16PositiveInfinity |
| |
| const float16 | kFP16NegativeInfinity |
| |
| V8_EXPORT_PRIVATE const float | kFP32PositiveInfinity |
| |
| V8_EXPORT_PRIVATE const float | kFP32NegativeInfinity |
| |
| V8_EXPORT_PRIVATE const double | kFP64PositiveInfinity |
| |
| V8_EXPORT_PRIVATE const double | kFP64NegativeInfinity |
| |
| V8_EXPORT_PRIVATE const double | kFP64SignallingNaN |
| |
| V8_EXPORT_PRIVATE const float | kFP32SignallingNaN |
| |
| V8_EXPORT_PRIVATE const double | kFP64QuietNaN |
| |
| V8_EXPORT_PRIVATE const float | kFP32QuietNaN |
| |
| V8_EXPORT_PRIVATE const double | kFP64DefaultNaN |
| |
| V8_EXPORT_PRIVATE const float | kFP32DefaultNaN |
| |
| const float16 | kFP16DefaultNaN |
| |
| const Instr | kImmExceptionIsRedirectedCall = 0xca11 |
| |
| const Instr | kImmExceptionIsUnreachable = 0xdebf |
| |
| const Instr | kImmExceptionIsSwitchStackLimit = 0x5915 |
| |
| const Instr | kImmExceptionIsPrintf = 0xdeb1 |
| |
| const unsigned | kPrintfArgCountOffset = 1 * kInstrSize |
| |
| const unsigned | kPrintfArgPatternListOffset = 2 * kInstrSize |
| |
| const unsigned | kPrintfLength = 3 * kInstrSize |
| |
| const unsigned | kPrintfMaxArgCount = 4 |
| |
| static const unsigned | kPrintfArgPatternBits = 2 |
| |
| const Instr | kImmExceptionIsDebug = 0xdeb0 |
| |
| const unsigned | kDebugCodeOffset = 1 * kInstrSize |
| |
| const unsigned | kDebugParamsOffset = 2 * kInstrSize |
| |
| const unsigned | kDebugMessageOffset = 3 * kInstrSize |
| |
| const unsigned | kDebuggerTracingDirectivesMask = 3 << 6 |
| |
| static const unsigned | kNEONFormatMaxBits = 6 |
| |
| | DEFINE_STORE_FUNCTION |
| |
| | St |
| |
| constexpr Register | NoReg = Register::no_reg() |
| |
| constexpr VRegister | NoVReg = VRegister::no_reg() |
| |
| constexpr CPURegister | NoCPUReg = CPURegister::no_reg() |
| |
| constexpr int | kFPRegisterPassedArguments = 8 |
| |
| constexpr Register | kWasmTrapHandlerFaultAddressRegister = x16 |
| |
| constexpr Register | kSimulatorHltArgument = x16 |
| |
| constexpr int | kRegListSizeInBits = sizeof(RegList) * kBitsPerByte |
| |
| static const int | kInitialCacheSize = 64 |
| |
| static bool | force_gc_during_next_merge_for_testing_ = false |
| |
| constexpr double | double_min_int_constant = kMinInt |
| |
| constexpr double | double_one_half_constant = 0.5 |
| |
| constexpr uint64_t | double_the_hole_nan_constant = kHoleNanInt64 |
| |
| constexpr double | double_uint32_bias_constant |
| |
| constexpr struct v8::internal::alignas | fp16_absolute_constant |
| |
| constexpr struct v8::internal::alignas | fp16_negate_constant |
| |
| constexpr struct v8::internal::alignas | float_absolute_constant = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF} |
| |
| constexpr struct v8::internal::alignas | float_negate_constant = {0x80000000, 0x80000000, 0x80000000, 0x80000000} |
| |
| constexpr struct v8::internal::alignas | double_absolute_constant |
| |
| constexpr struct v8::internal::alignas | double_negate_constant |
| |
| constexpr struct v8::internal::alignas | wasm_i8x16_swizzle_mask |
| |
| constexpr struct v8::internal::alignas | wasm_i8x16_popcnt_mask |
| |
| constexpr struct v8::internal::alignas | wasm_i8x16_splat_0x01 |
| |
| constexpr struct v8::internal::alignas | wasm_i8x16_splat_0x0f |
| |
| constexpr struct v8::internal::alignas | wasm_i8x16_splat_0x33 |
| |
| constexpr struct v8::internal::alignas | wasm_i8x16_splat_0x55 |
| |
| constexpr struct v8::internal::alignas | wasm_i16x8_splat_0x0001 |
| |
| constexpr struct v8::internal::alignas | wasm_f64x2_convert_low_i32x4_u_int_mask |
| |
| constexpr struct v8::internal::alignas | wasm_double_2_power_52 |
| |
| constexpr struct v8::internal::alignas | wasm_int32_max_as_double |
| |
| constexpr struct v8::internal::alignas | wasm_uint32_max_as_double |
| |
| constexpr struct v8::internal::alignas | wasm_int32_overflow_as_float |
| |
| constexpr struct v8::internal::alignas | wasm_i32x8_int32_overflow_as_float |
| |
| | fp64_to_fp16_raw_bits |
| |
| BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL int | character |
| |
| BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL BUILTIN_FP_CALL int size_t | search_length |
| |
| static ExternalTwoByteStringGetChars constexpr uint64_t | kLog10OffsetTable [] |
| |
| int32_t | key |
| |
| size_t | length |
| |
| static constexpr uint32_t | kFalse = 0 |
| |
| double | y { return Modulo(x, y) |
| |
| | tsan_relaxed_store_function_8_bits |
| |
| tsan_relaxed_store_8_bits | tsan_relaxed_store_function_32_bits |
| |
| tsan_relaxed_store_8_bits tsan_relaxed_store_32_bits | tsan_seq_cst_store_function_8_bits |
| |
| tsan_relaxed_store_8_bits tsan_relaxed_store_32_bits tsan_seq_cst_store_8_bits | tsan_seq_cst_store_function_32_bits |
| |
| tsan_relaxed_store_8_bits tsan_relaxed_store_32_bits tsan_seq_cst_store_8_bits tsan_seq_cst_store_32_bits | tsan_relaxed_load_function_32_bits |
| |
| tsan_relaxed_store_8_bits tsan_relaxed_store_32_bits tsan_seq_cst_store_8_bits tsan_seq_cst_store_32_bits tsan_relaxed_load_32_bits Address | raw_context |
| |
| | return |
| |
| constexpr int | kNumRegs = 8 |
| |
| constexpr DoubleRegister | kScratchDoubleReg = xmm7 |
| |
| constexpr int | kMaxBuiltinRegisterParams = 5 |
| |
| constexpr int | kMaxTFSBuiltinRegisterParams = kMaxBuiltinRegisterParams |
| |
| constexpr int | kJSBuiltinRegisterParams = 4 |
| |
| const int | kInvalidRegister = -1 |
| |
| const int | kNumSimuRegisters = 33 |
| |
| const int | kNumFPURegisters = 32 |
| |
| const int | kInvalidFPURegister = -1 |
| |
| const int | kFCSRRegister = 0 |
| |
| const int | kInvalidFPUControlRegister = -1 |
| |
| const uint32_t | kFPUInvalidResult = static_cast<uint32_t>(1u << 31) - 1 |
| |
| const int32_t | kFPUInvalidResultNegative = static_cast<int32_t>(1u << 31) |
| |
| const uint64_t | kFPU64InvalidResult |
| |
| const int64_t | kFPU64InvalidResultNegative |
| |
| const uint32_t | kFCSRInexactCauseBit = 24 |
| |
| const uint32_t | kFCSRUnderflowCauseBit = 25 |
| |
| const uint32_t | kFCSROverflowCauseBit = 26 |
| |
| const uint32_t | kFCSRDivideByZeroCauseBit = 27 |
| |
| const uint32_t | kFCSRInvalidOpCauseBit = 28 |
| |
| const uint32_t | kFCSRInexactCauseMask = 1 << kFCSRInexactCauseBit |
| |
| const uint32_t | kFCSRUnderflowCauseMask = 1 << kFCSRUnderflowCauseBit |
| |
| const uint32_t | kFCSROverflowCauseMask = 1 << kFCSROverflowCauseBit |
| |
| const uint32_t | kFCSRDivideByZeroCauseMask = 1 << kFCSRDivideByZeroCauseBit |
| |
| const uint32_t | kFCSRInvalidOpCauseMask = 1 << kFCSRInvalidOpCauseBit |
| |
| const uint32_t | kFCSRCauseMask |
| |
| const uint32_t | kFCSRExceptionCauseMask = kFCSRCauseMask ^ kFCSRInexactCauseMask |
| |
| const uint32_t | kMaxWatchpointCode = 31 |
| |
| const int | kRjShift = 5 |
| |
| const int | kRjBits = 5 |
| |
| const int | kRkShift = 10 |
| |
| const int | kRkBits = 5 |
| |
| const int | kRdShift = 0 |
| |
| const int | kRdBits = 5 |
| |
| const int | kSaShift = 15 |
| |
| const int | kSa2Bits = 2 |
| |
| const int | kSa3Bits = 3 |
| |
| const int | kCdShift = 0 |
| |
| const int | kCdBits = 3 |
| |
| const int | kCjShift = 5 |
| |
| const int | kCjBits = 3 |
| |
| const int | kCodeShift = 0 |
| |
| const int | kCodeBits = 15 |
| |
| const int | kCondShift = 15 |
| |
| const int | kCondBits = 5 |
| |
| const int | kUi5Shift = 10 |
| |
| const int | kUi5Bits = 5 |
| |
| const int | kUi6Shift = 10 |
| |
| const int | kUi6Bits = 6 |
| |
| const int | kUi12Shift = 10 |
| |
| const int | kUi12Bits = 12 |
| |
| const int | kSi12Shift = 10 |
| |
| const int | kSi12Bits = 12 |
| |
| const int | kSi14Shift = 10 |
| |
| const int | kSi14Bits = 14 |
| |
| const int | kSi16Shift = 10 |
| |
| const int | kSi16Bits = 16 |
| |
| const int | kSi20Shift = 5 |
| |
| const int | kSi20Bits = 20 |
| |
| const int | kMsbwShift = 16 |
| |
| const int | kMsbwBits = 5 |
| |
| const int | kLsbwShift = 10 |
| |
| const int | kLsbwBits = 5 |
| |
| const int | kMsbdShift = 16 |
| |
| const int | kMsbdBits = 6 |
| |
| const int | kLsbdShift = 10 |
| |
| const int | kLsbdBits = 6 |
| |
| const int | kFdShift = 0 |
| |
| const int | kFdBits = 5 |
| |
| const int | kFjShift = 5 |
| |
| const int | kFjBits = 5 |
| |
| const int | kFkShift = 10 |
| |
| const int | kFkBits = 5 |
| |
| const int | kFaShift = 15 |
| |
| const int | kFaBits = 5 |
| |
| const int | kCaShift = 15 |
| |
| const int | kCaBits = 3 |
| |
| const int | kHint15Shift = 0 |
| |
| const int | kHint15Bits = 15 |
| |
| const int | kHint5Shift = 0 |
| |
| const int | kHint5Bits = 5 |
| |
| const int | kOffsLowShift = 10 |
| |
| const int | kOffsLowBits = 16 |
| |
| const int | kOffs26HighShift = 0 |
| |
| const int | kOffs26HighBits = 10 |
| |
| const int | kOffs21HighShift = 0 |
| |
| const int | kOffs21HighBits = 5 |
| |
| const int | kImm12Shift = 0 |
| |
| const int | kImm12Bits = 12 |
| |
| const int | kImm16Shift = 0 |
| |
| const int | kImm16Bits = 16 |
| |
| const int | kImm26Shift = 0 |
| |
| const int | kImm26Bits = 26 |
| |
| const int | kImm28Shift = 0 |
| |
| const int | kImm28Bits = 28 |
| |
| const int | kImm32Shift = 0 |
| |
| const int | kImm32Bits = 32 |
| |
| const int | kRjFieldMask = ((1 << kRjBits) - 1) << kRjShift |
| |
| const int | kRkFieldMask = ((1 << kRkBits) - 1) << kRkShift |
| |
| const int | kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift |
| |
| const int | kSa2FieldMask = ((1 << kSa2Bits) - 1) << kSaShift |
| |
| const int | kSa3FieldMask = ((1 << kSa3Bits) - 1) << kSaShift |
| |
| const int | kHiMaskOf32 = 0xffff << 16 |
| |
| const int | kLoMaskOf32 = 0xffff |
| |
| const int | kSignMaskOf32 = 0x80000000 |
| |
| const int64_t | kTop16MaskOf64 = (int64_t)0xffff << 48 |
| |
| const int64_t | kHigher16MaskOf64 = (int64_t)0xffff << 32 |
| |
| const int64_t | kUpper16MaskOf64 = (int64_t)0xffff << 16 |
| |
| const int | kImm12Mask = ((1 << kImm12Bits) - 1) << kImm12Shift |
| |
| const int | kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift |
| |
| const int | kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift |
| |
| const uint32_t | kFPURoundingModeShift = 8 |
| |
| const uint32_t | kFPURoundingModeMask = 0b11 << kFPURoundingModeShift |
| |
| const Instr | rtCallRedirInstr |
| |
| const Instr | nopInstr = ADDI_W |
| |
| const int | kInvalidStackOffset = -1 |
| |
| static const int | kNegOffset = 0x00008000 |
| |
| constexpr Register | kScratchReg = s3 |
| |
| constexpr Register | kScratchReg2 = s4 |
| |
| constexpr DoubleRegister | kScratchDoubleReg2 = f31 |
| |
| constexpr FPUControlRegister | no_fpucreg = {kInvalidFPUControlRegister} |
| |
| constexpr FPUControlRegister | FCSR = {kFCSRRegister} |
| |
| constexpr FPUControlRegister | FCSR0 = {kFCSRRegister} |
| |
| constexpr FPUControlRegister | FCSR1 = {kFCSRRegister + 1} |
| |
| constexpr FPUControlRegister | FCSR2 = {kFCSRRegister + 2} |
| |
| constexpr FPUControlRegister | FCSR3 = {kFCSRRegister + 3} |
| |
| constexpr Register | kPtrComprCageBaseRegister = no_reg |
| |
| const DoubleRegList | kCalleeSavedFPU = {f24, f25, f26, f27, f28, f29, f30, f31} |
| |
| const int | kNumCalleeSavedFPU = 8 |
| |
| const DoubleRegList | kCallerSavedFPU |
| |
| constexpr int | kMaximumReprSizeLog2 |
| |
| constexpr int | kMaximumReprSizeInBytes = 1 << kMaximumReprSizeLog2 |
| |
| static constexpr int | kMaxCParameters = 256 |
| |
| const int | kNumMSARegisters = 32 |
| |
| const int | kInvalidMSARegister = -1 |
| |
| const int | kInvalidMSAControlRegister = -1 |
| |
| const int | kMSAIRRegister = 0 |
| |
| const int | kMSACSRRegister = 1 |
| |
| const int | kMSARegSize = 128 |
| |
| const int | kMSALanesByte = kMSARegSize / 8 |
| |
| const int | kMSALanesHalf = kMSARegSize / 16 |
| |
| const int | kMSALanesWord = kMSARegSize / 32 |
| |
| const int | kMSALanesDword = kMSARegSize / 64 |
| |
| const uint32_t | kFCSRInexactFlagBit = 2 |
| |
| const uint32_t | kFCSRUnderflowFlagBit = 3 |
| |
| const uint32_t | kFCSROverflowFlagBit = 4 |
| |
| const uint32_t | kFCSRDivideByZeroFlagBit = 5 |
| |
| const uint32_t | kFCSRInvalidOpFlagBit = 6 |
| |
| const uint32_t | kFCSRNaN2008FlagBit = 18 |
| |
| const uint32_t | kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit |
| |
| const uint32_t | kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit |
| |
| const uint32_t | kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit |
| |
| const uint32_t | kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit |
| |
| const uint32_t | kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit |
| |
| const uint32_t | kFCSRNaN2008FlagMask = 1 << kFCSRNaN2008FlagBit |
| |
| const uint32_t | kFCSRFlagMask |
| |
| const uint32_t | kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask |
| |
| const uint32_t | kFCSRUnimplementedOpCauseBit = 17 |
| |
| const uint32_t | kFCSRUnimplementedOpCauseMask |
| |
| const int32_t | kPrefHintLoad = 0 |
| |
| const int32_t | kPrefHintStore = 1 |
| |
| const int32_t | kPrefHintLoadStreamed = 4 |
| |
| const int32_t | kPrefHintStoreStreamed = 5 |
| |
| const int32_t | kPrefHintLoadRetained = 6 |
| |
| const int32_t | kPrefHintStoreRetained = 7 |
| |
| const int32_t | kPrefHintWritebackInvalidate = 25 |
| |
| const int32_t | kPrefHintPrepareForStore = 30 |
| |
| const int | kOpcodeShift = 26 |
| |
| const int | kOpcodeBits = 6 |
| |
| const int | kRsShift = 21 |
| |
| const int | kRsBits = 5 |
| |
| const int | kRtShift = 16 |
| |
| const int | kRtBits = 5 |
| |
| const int | kSaBits = 5 |
| |
| const int | kLsaSaBits = 2 |
| |
| const int | kFunctionShift = 0 |
| |
| const int | kFunctionBits = 6 |
| |
| const int | kLuiShift = 16 |
| |
| const int | kBp2Shift = 6 |
| |
| const int | kBp2Bits = 2 |
| |
| const int | kBp3Shift = 6 |
| |
| const int | kBp3Bits = 3 |
| |
| const int | kBaseShift = 21 |
| |
| const int | kBaseBits = 5 |
| |
| const int | kBit6Shift = 6 |
| |
| const int | kBit6Bits = 1 |
| |
| const int | kImm9Shift = 7 |
| |
| const int | kImm9Bits = 9 |
| |
| const int | kImm18Shift = 0 |
| |
| const int | kImm18Bits = 18 |
| |
| const int | kImm19Shift = 0 |
| |
| const int | kImm19Bits = 19 |
| |
| const int | kImm21Shift = 0 |
| |
| const int | kImm21Bits = 21 |
| |
| const int | kMsaImm8Shift = 16 |
| |
| const int | kMsaImm8Bits = 8 |
| |
| const int | kMsaImm5Shift = 16 |
| |
| const int | kMsaImm5Bits = 5 |
| |
| const int | kMsaImm10Shift = 11 |
| |
| const int | kMsaImm10Bits = 10 |
| |
| const int | kMsaImmMI10Shift = 16 |
| |
| const int | kMsaImmMI10Bits = 10 |
| |
| const int | kImmFieldShift = 2 |
| |
| const int | kFrBits = 5 |
| |
| const int | kFrShift = 21 |
| |
| const int | kFsShift = 11 |
| |
| const int | kFsBits = 5 |
| |
| const int | kFtShift = 16 |
| |
| const int | kFtBits = 5 |
| |
| const int | kFCccShift = 8 |
| |
| const int | kFCccBits = 3 |
| |
| const int | kFBccShift = 18 |
| |
| const int | kFBccBits = 3 |
| |
| const int | kFBtrueShift = 16 |
| |
| const int | kFBtrueBits = 1 |
| |
| const int | kWtBits = 5 |
| |
| const int | kWtShift = 16 |
| |
| const int | kWsBits = 5 |
| |
| const int | kWsShift = 11 |
| |
| const int | kWdBits = 5 |
| |
| const int | kWdShift = 6 |
| |
| const int | kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift |
| |
| const int | kImm9Mask = ((1 << kImm9Bits) - 1) << kImm9Shift |
| |
| const int | kImm18Mask = ((1 << kImm18Bits) - 1) << kImm18Shift |
| |
| const int | kImm19Mask = ((1 << kImm19Bits) - 1) << kImm19Shift |
| |
| const int | kImm21Mask = ((1 << kImm21Bits) - 1) << kImm21Shift |
| |
| const int | kImm5Mask = ((1 << 5) - 1) |
| |
| const int | kImm10Mask = ((1 << 10) - 1) |
| |
| const int | kMsaI5I10Mask = ((7U << 23) | ((1 << 6) - 1)) |
| |
| const int | kMsaI8Mask = ((3U << 24) | ((1 << 6) - 1)) |
| |
| const int | kMsaI5Mask = ((7U << 23) | ((1 << 6) - 1)) |
| |
| const int | kMsaMI10Mask = (15U << 2) |
| |
| const int | kMsaBITMask = ((7U << 23) | ((1 << 6) - 1)) |
| |
| const int | kMsaELMMask = (15U << 22) |
| |
| const int | kMsaLongerELMMask = kMsaELMMask | (63U << 16) |
| |
| const int | kMsa3RMask = ((7U << 23) | ((1 << 6) - 1)) |
| |
| const int | kMsa3RFMask = ((15U << 22) | ((1 << 6) - 1)) |
| |
| const int | kMsaVECMask = (23U << 21) |
| |
| const int | kMsa2RMask = (7U << 18) |
| |
| const int | kMsa2RFMask = (15U << 17) |
| |
| const int | kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift |
| |
| const int | kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift |
| |
| const int | kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift |
| |
| const int | kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift |
| |
| const int | kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1 |
| |
| const int32_t | kJalRawMark = 0x00000000 |
| |
| const int32_t | kJRawMark = 0xf0000000 |
| |
| const int32_t | kJumpRawMask = 0xf0000000 |
| |
| constexpr Opcode | SPECIAL = 0U << kOpcodeShift |
| |
| constexpr Opcode | REGIMM = 1U << kOpcodeShift |
| |
| constexpr Opcode | J = ((0U << 3) + 2) << kOpcodeShift |
| |
| constexpr Opcode | JAL = ((0U << 3) + 3) << kOpcodeShift |
| |
| constexpr Opcode | BLEZ = ((0U << 3) + 6) << kOpcodeShift |
| |
| constexpr Opcode | BGTZ = ((0U << 3) + 7) << kOpcodeShift |
| |
| constexpr Opcode | ADDI = ((1U << 3) + 0) << kOpcodeShift |
| |
| constexpr Opcode | ADDIU = ((1U << 3) + 1) << kOpcodeShift |
| |
| constexpr Opcode | SLTIU = ((1U << 3) + 3) << kOpcodeShift |
| |
| constexpr Opcode | LUI = ((1U << 3) + 7) << kOpcodeShift |
| |
| constexpr Opcode | DAUI = ((3U << 3) + 5) << kOpcodeShift |
| |
| constexpr Opcode | BEQC = ((2U << 3) + 0) << kOpcodeShift |
| |
| constexpr Opcode | COP1 |
| |
| constexpr Opcode | BEQL = ((2U << 3) + 4) << kOpcodeShift |
| |
| constexpr Opcode | BNEL = ((2U << 3) + 5) << kOpcodeShift |
| |
| constexpr Opcode | BLEZL = ((2U << 3) + 6) << kOpcodeShift |
| |
| constexpr Opcode | BGTZL = ((2U << 3) + 7) << kOpcodeShift |
| |
| constexpr Opcode | DADDI = ((3U << 3) + 0) << kOpcodeShift |
| |
| constexpr Opcode | DADDIU = ((3U << 3) + 1) << kOpcodeShift |
| |
| constexpr Opcode | LDL = ((3U << 3) + 2) << kOpcodeShift |
| |
| constexpr Opcode | LDR = ((3U << 3) + 3) << kOpcodeShift |
| |
| constexpr Opcode | SPECIAL2 = ((3U << 3) + 4) << kOpcodeShift |
| |
| constexpr Opcode | MSA = ((3U << 3) + 6) << kOpcodeShift |
| |
| constexpr Opcode | SPECIAL3 = ((3U << 3) + 7) << kOpcodeShift |
| |
| constexpr Opcode | LB = ((4U << 3) + 0) << kOpcodeShift |
| |
| constexpr Opcode | LH = ((4U << 3) + 1) << kOpcodeShift |
| |
| constexpr Opcode | LWL = ((4U << 3) + 2) << kOpcodeShift |
| |
| constexpr Opcode | LW = ((4U << 3) + 3) << kOpcodeShift |
| |
| constexpr Opcode | LBU = ((4U << 3) + 4) << kOpcodeShift |
| |
| constexpr Opcode | LHU = ((4U << 3) + 5) << kOpcodeShift |
| |
| constexpr Opcode | LWR = ((4U << 3) + 6) << kOpcodeShift |
| |
| constexpr Opcode | LWU = ((4U << 3) + 7) << kOpcodeShift |
| |
| constexpr Opcode | SB = ((5U << 3) + 0) << kOpcodeShift |
| |
| constexpr Opcode | SH = ((5U << 3) + 1) << kOpcodeShift |
| |
| constexpr Opcode | SWL = ((5U << 3) + 2) << kOpcodeShift |
| |
| constexpr Opcode | SW = ((5U << 3) + 3) << kOpcodeShift |
| |
| constexpr Opcode | SDL = ((5U << 3) + 4) << kOpcodeShift |
| |
| constexpr Opcode | SDR = ((5U << 3) + 5) << kOpcodeShift |
| |
| constexpr Opcode | SWR = ((5U << 3) + 6) << kOpcodeShift |
| |
| constexpr Opcode | LL = ((6U << 3) + 0) << kOpcodeShift |
| |
| constexpr Opcode | LWC1 = ((6U << 3) + 1) << kOpcodeShift |
| |
| constexpr Opcode | BC = ((6U << 3) + 2) << kOpcodeShift |
| |
| constexpr Opcode | LLD = ((6U << 3) + 4) << kOpcodeShift |
| |
| constexpr Opcode | LDC1 = ((6U << 3) + 5) << kOpcodeShift |
| |
| constexpr Opcode | POP66 = ((6U << 3) + 6) << kOpcodeShift |
| |
| constexpr Opcode | PREF = ((6U << 3) + 3) << kOpcodeShift |
| |
| constexpr Opcode | SC = ((7U << 3) + 0) << kOpcodeShift |
| |
| constexpr Opcode | SWC1 = ((7U << 3) + 1) << kOpcodeShift |
| |
| constexpr Opcode | BALC = ((7U << 3) + 2) << kOpcodeShift |
| |
| constexpr Opcode | PCREL = ((7U << 3) + 3) << kOpcodeShift |
| |
| constexpr Opcode | SCD = ((7U << 3) + 4) << kOpcodeShift |
| |
| constexpr Opcode | SDC1 = ((7U << 3) + 5) << kOpcodeShift |
| |
| constexpr Opcode | POP76 = ((7U << 3) + 6) << kOpcodeShift |
| |
| constexpr Opcode | SD = ((7U << 3) + 7) << kOpcodeShift |
| |
| constexpr Opcode | COP1X = ((1U << 4) + 3) << kOpcodeShift |
| |
| constexpr Opcode | POP06 = BLEZ |
| |
| constexpr Opcode | POP07 = BGTZ |
| |
| constexpr Opcode | POP10 = ADDI |
| |
| constexpr Opcode | POP26 = BLEZL |
| |
| constexpr Opcode | POP27 = BGTZL |
| |
| constexpr Opcode | POP30 = DADDI |
| |
| const Instr | kPopInstruction |
| |
| const Instr | kPushInstruction |
| |
| const Instr | kPushRegPattern |
| |
| const Instr | kPopRegPattern |
| |
| const Instr | kLwRegFpOffsetPattern |
| |
| const Instr | kSwRegFpOffsetPattern |
| |
| const Instr | kLwRegFpNegOffsetPattern |
| |
| const Instr | kSwRegFpNegOffsetPattern |
| |
| const Instr | kRtMask |
| |
| const Instr | kLwSwInstrTypeMask |
| |
| const Instr | kLwSwInstrArgumentMask |
| |
| const Instr | kLwSwOffsetMask |
| |
| const int | kCArgSlotCount = 0 |
| |
| const int | kCArgsSlotsSize = kCArgSlotCount * kInstrSize * 2 |
| |
| const int | kBranchReturnOffset = 2 * kInstrSize |
| |
| static const int | kNegOffset = 0x00008000 |
| |
| const Simd128Register | no_msareg = Simd128Register::no_reg() |
| |
| constexpr DoubleRegister | kDoubleCompareReg = f23 |
| |
| constexpr Simd128Register | kSimd128RegZero = w28 |
| |
| constexpr Simd128Register | kSimd128ScratchReg = w30 |
| |
| constexpr MSAControlRegister | no_msacreg = {kInvalidMSAControlRegister} |
| |
| constexpr MSAControlRegister | MSAIR = {kMSAIRRegister} |
| |
| constexpr MSAControlRegister | MSACSR = {kMSACSRRegister} |
| |
| const uint32_t | kLoadIntptrOpcode = LD |
| |
| constexpr int | kHasFunctionDescriptorBitShift = 4 |
| |
| constexpr int | kHasFunctionDescriptorBitMask |
| |
| const int | kNumDoubleRegisters = 32 |
| |
| const int | kLoadPtrMaxReachBits = 15 |
| |
| const int | kLoadDoubleMaxReachBits = 15 |
| |
| const uint32_t | kFPRoundingModeMask = 3 |
| |
| constexpr uint8_t | kPcLoadDelta = 8 |
| |
| const int | kNumRequiredStackFrameSlots = 14 |
| |
| const int | kStackFrameLRSlot = 2 |
| |
| const int | kStackFrameExtraParamSlot = 14 |
| |
| constexpr Register | kConstantPoolRegister = r28 |
| |
| static const int | kRegisterPassedArguments = arraysize(kCArgRegs) |
| |
| const Simd128Register | no_simdreg = Simd128Register::no_reg() |
| |
| constexpr Simd128Register | kScratchSimd128Reg = v13 |
| |
| constexpr Simd128Register | kScratchSimd128Reg2 = v15 |
| |
| const DoubleRegList | kCallerSavedDoubles |
| |
| const Simd128RegList | kCallerSavedSimd128s |
| |
| const int | kNumCallerSavedDoubles = 14 |
| |
| const DoubleRegList | kCalleeSavedDoubles |
| |
| const int | kNumCalleeSavedDoubles = 18 |
| |
| static constexpr RegList | kEmptyRegList = {} |
| |
| static constexpr RegList | kAllocatableGeneralRegisters |
| |
| static constexpr DoubleRegList | kEmptyDoubleRegList = {} |
| |
| static constexpr DoubleRegList | kAllocatableDoubleRegisters |
| |
| const int | kEndOfChain = -1 |
| |
| const int | kEndOfJumpChain = 0 |
| |
| | else |
| |
| const int | kNumVRegisters = 32 |
| |
| const int | kInvalidVRegister = -1 |
| |
| const uint32_t | kMaxTracepointCode = 63 |
| |
| const uint32_t | kExceptionIsSwitchStackLimit = 128 |
| |
| const int | kBaseOpcodeShift = 0 |
| |
| const int | kBaseOpcodeBits = 7 |
| |
| const int | kFunct6Shift = 26 |
| |
| const int | kFunct6Bits = 6 |
| |
| const int | kFunct7Shift = 25 |
| |
| const int | kFunct7Bits = 7 |
| |
| const int | kFunct5Shift = 27 |
| |
| const int | kFunct5Bits = 5 |
| |
| const int | kFunct3Shift = 12 |
| |
| const int | kFunct3Bits = 3 |
| |
| const int | kFunct2Shift = 25 |
| |
| const int | kFunct2Bits = 2 |
| |
| const int | kRs1Shift = 15 |
| |
| const int | kRs1Bits = 5 |
| |
| const int | kVs1Shift = 15 |
| |
| const int | kVs1Bits = 5 |
| |
| const int | kVs2Shift = 20 |
| |
| const int | kVs2Bits = 5 |
| |
| const int | kVdShift = 7 |
| |
| const int | kVdBits = 5 |
| |
| const int | kRs2Shift = 20 |
| |
| const int | kRs2Bits = 5 |
| |
| const int | kRs3Shift = 27 |
| |
| const int | kRs3Bits = 5 |
| |
| const int | kRlShift = 25 |
| |
| const int | kAqShift = 26 |
| |
| const int | kImm11Shift = 2 |
| |
| const int | kImm11Bits = 11 |
| |
| const int | kShamtShift = 20 |
| |
| const int | kShamtBits = 5 |
| |
| const uint32_t | kShamtMask = (((1 << kShamtBits) - 1) << kShamtShift) |
| |
| const int | kShamtWShift = 20 |
| |
| const int | kShamtWBits = 6 |
| |
| const int | kArithShiftShift = 30 |
| |
| const int | kImm20Shift = 12 |
| |
| const int | kImm20Bits = 20 |
| |
| const int | kCsrShift = 20 |
| |
| const int | kCsrBits = 12 |
| |
| const int | kMemOrderBits = 4 |
| |
| const int | kPredOrderShift = 24 |
| |
| const int | kSuccOrderShift = 20 |
| |
| const int | kRvcFunct4Shift = 12 |
| |
| const int | kRvcFunct4Bits = 4 |
| |
| const int | kRvcFunct3Shift = 13 |
| |
| const int | kRvcFunct3Bits = 3 |
| |
| const int | kRvcRs1Shift = 7 |
| |
| const int | kRvcRs1Bits = 5 |
| |
| const int | kRvcRs2Shift = 2 |
| |
| const int | kRvcRs2Bits = 5 |
| |
| const int | kRvcRdShift = 7 |
| |
| const int | kRvcRdBits = 5 |
| |
| const int | kRvcRs1sShift = 7 |
| |
| const int | kRvcRs1sBits = 3 |
| |
| const int | kRvcRs2sShift = 2 |
| |
| const int | kRvcRs2sBits = 3 |
| |
| const int | kRvcFunct2Shift = 5 |
| |
| const int | kRvcFunct2BShift = 10 |
| |
| const int | kRvcFunct2Bits = 2 |
| |
| const int | kRvcFunct6Shift = 10 |
| |
| const int | kRvcFunct6Bits = 6 |
| |
| const uint32_t | kRvcOpcodeMask |
| |
| const uint32_t | kRvcFunct3Mask |
| |
| const uint32_t | kRvcFunct4Mask |
| |
| const uint32_t | kRvcFunct6Mask |
| |
| const uint32_t | kRvcFunct2Mask |
| |
| const uint32_t | kRvcFunct2BMask |
| |
| const uint32_t | kCRTypeMask = kRvcOpcodeMask | kRvcFunct4Mask |
| |
| const uint32_t | kCSTypeMask = kRvcOpcodeMask | kRvcFunct6Mask |
| |
| const uint32_t | kCATypeMask = kRvcOpcodeMask | kRvcFunct6Mask | kRvcFunct2Mask |
| |
| const uint32_t | kRvcBImm8Mask = (((1 << 5) - 1) << 2) | (((1 << 3) - 1) << 10) |
| |
| constexpr int | kRvvELEN = 64 |
| |
| constexpr int | kRvvVLEN = 128 |
| |
| constexpr int | kRvvSLEN = kRvvVLEN |
| |
| const int | kRvvFunct6Shift = 26 |
| |
| const int | kRvvFunct6Bits = 6 |
| |
| const uint32_t | kRvvFunct6Mask |
| |
| const int | kRvvVmBits = 1 |
| |
| const int | kRvvVmShift = 25 |
| |
| const uint32_t | kRvvVmMask = (((1 << kRvvVmBits) - 1) << kRvvVmShift) |
| |
| const int | kRvvVs2Bits = 5 |
| |
| const int | kRvvVs2Shift = 20 |
| |
| const uint32_t | kRvvVs2Mask = (((1 << kRvvVs2Bits) - 1) << kRvvVs2Shift) |
| |
| const int | kRvvVs1Bits = 5 |
| |
| const int | kRvvVs1Shift = 15 |
| |
| const uint32_t | kRvvVs1Mask = (((1 << kRvvVs1Bits) - 1) << kRvvVs1Shift) |
| |
| const int | kRvvRs1Bits = kRvvVs1Bits |
| |
| const int | kRvvRs1Shift = kRvvVs1Shift |
| |
| const uint32_t | kRvvRs1Mask = (((1 << kRvvRs1Bits) - 1) << kRvvRs1Shift) |
| |
| const int | kRvvRs2Bits = 5 |
| |
| const int | kRvvRs2Shift = 20 |
| |
| const uint32_t | kRvvRs2Mask = (((1 << kRvvRs2Bits) - 1) << kRvvRs2Shift) |
| |
| const int | kRvvImm5Bits = kRvvVs1Bits |
| |
| const int | kRvvImm5Shift = kRvvVs1Shift |
| |
| const uint32_t | kRvvImm5Mask = (((1 << kRvvImm5Bits) - 1) << kRvvImm5Shift) |
| |
| const int | kRvvVdBits = 5 |
| |
| const int | kRvvVdShift = 7 |
| |
| const uint32_t | kRvvVdMask = (((1 << kRvvVdBits) - 1) << kRvvVdShift) |
| |
| const int | kRvvRdBits = kRvvVdBits |
| |
| const int | kRvvRdShift = kRvvVdShift |
| |
| const uint32_t | kRvvRdMask = (((1 << kRvvRdBits) - 1) << kRvvRdShift) |
| |
| const int | kRvvZimmBits = 11 |
| |
| const int | kRvvZimmShift = 20 |
| |
| const uint32_t | kRvvZimmMask = (((1 << kRvvZimmBits) - 1) << kRvvZimmShift) |
| |
| const int | kRvvUimmShift = kRvvRs1Shift |
| |
| const int | kRvvUimmBits = kRvvRs1Bits |
| |
| const uint32_t | kRvvUimmMask = (((1 << kRvvUimmBits) - 1) << kRvvUimmShift) |
| |
| const int | kRvvWidthBits = 3 |
| |
| const int | kRvvWidthShift = 12 |
| |
| const uint32_t | kRvvWidthMask = (((1 << kRvvWidthBits) - 1) << kRvvWidthShift) |
| |
| const int | kRvvMopBits = 2 |
| |
| const int | kRvvMopShift = 26 |
| |
| const uint32_t | kRvvMopMask = (((1 << kRvvMopBits) - 1) << kRvvMopShift) |
| |
| const int | kRvvMewBits = 1 |
| |
| const int | kRvvMewShift = 28 |
| |
| const uint32_t | kRvvMewMask = (((1 << kRvvMewBits) - 1) << kRvvMewShift) |
| |
| const int | kRvvNfBits = 3 |
| |
| const int | kRvvNfShift = 29 |
| |
| const uint32_t | kRvvNfMask = (((1 << kRvvNfBits) - 1) << kRvvNfShift) |
| |
| const uint32_t | kBaseOpcodeMask |
| |
| const uint32_t | kFunct3Mask = ((1 << kFunct3Bits) - 1) << kFunct3Shift |
| |
| const uint32_t | kFunct5Mask = ((1 << kFunct5Bits) - 1) << kFunct5Shift |
| |
| const uint32_t | kFunct6Mask = ((1 << kFunct6Bits) - 1) << kFunct6Shift |
| |
| const uint32_t | kFunct7Mask = ((1 << kFunct7Bits) - 1) << kFunct7Shift |
| |
| const uint32_t | kFunct2Mask = 0b11 << kFunct7Shift |
| |
| const uint32_t | kRTypeMask = kBaseOpcodeMask | kFunct3Mask | kFunct7Mask |
| |
| const uint32_t | kRATypeMask = kBaseOpcodeMask | kFunct3Mask | kFunct5Mask |
| |
| const uint32_t | kRFPTypeMask = kBaseOpcodeMask | kFunct7Mask |
| |
| const uint32_t | kR4TypeMask = kBaseOpcodeMask | kFunct3Mask | kFunct2Mask |
| |
| const uint32_t | kITypeMask = kBaseOpcodeMask | kFunct3Mask |
| |
| const uint32_t | kSTypeMask = kBaseOpcodeMask | kFunct3Mask |
| |
| const uint32_t | kBTypeMask = kBaseOpcodeMask | kFunct3Mask |
| |
| const uint32_t | kUTypeMask = kBaseOpcodeMask |
| |
| const uint32_t | kJTypeMask = kBaseOpcodeMask |
| |
| const uint32_t | kVTypeMask = kRvvFunct6Mask | kFunct3Mask | kBaseOpcodeMask |
| |
| const uint32_t | kRs1FieldMask = ((1 << kRs1Bits) - 1) << kRs1Shift |
| |
| const uint32_t | kRs2FieldMask = ((1 << kRs2Bits) - 1) << kRs2Shift |
| |
| const uint32_t | kRs3FieldMask = ((1 << kRs3Bits) - 1) << kRs3Shift |
| |
| const uint32_t | kBImm12Mask = kFunct7Mask | kRdFieldMask |
| |
| const uint32_t | kImm20Mask = ((1 << kImm20Bits) - 1) << kImm20Shift |
| |
| const uint32_t | kImm11Mask = ((1 << kImm11Bits) - 1) << kImm11Shift |
| |
| const uint32_t | kImm31_12Mask = ((1 << 20) - 1) << 12 |
| |
| const uint32_t | kImm19_0Mask = ((1 << 20) - 1) |
| |
| const int | kNopByte = 0x00000013 |
| |
| const int | kFloat32ExponentBias = 127 |
| |
| const int | kFloat32MantissaBits = 23 |
| |
| const int | kFloat32ExponentBits = 8 |
| |
| const int | kFloat64ExponentBias = 1023 |
| |
| const int | kFloat64MantissaBits = 52 |
| |
| const int | kFloat64ExponentBits = 11 |
| |
| const Instr | kIllegalInstr = 0 |
| |
| const Instr | kBreakInstr = SYSTEM | 1 << kImm12Shift |
| |
| constexpr uint8_t | kShortInstrSize = 2 |
| |
| static const int | kNegOffset = 0x00008000 |
| |
| constexpr Opcode | RO_LR_W |
| |
| constexpr Opcode | RO_SC_W |
| |
| constexpr Opcode | RO_AMOSWAP_W |
| |
| constexpr Opcode | RO_AMOADD_W |
| |
| constexpr Opcode | RO_AMOXOR_W |
| |
| constexpr Opcode | RO_AMOAND_W |
| |
| constexpr Opcode | RO_AMOOR_W |
| |
| constexpr Opcode | RO_AMOMIN_W |
| |
| constexpr Opcode | RO_AMOMAX_W |
| |
| constexpr Opcode | RO_AMOMINU_W |
| |
| constexpr Opcode | RO_AMOMAXU_W |
| |
| constexpr Opcode | RO_SH1ADD |
| |
| constexpr Opcode | RO_SH2ADD |
| |
| constexpr Opcode | RO_SH3ADD |
| |
| constexpr Opcode | RO_ANDN |
| |
| constexpr Opcode | RO_ORN |
| |
| constexpr Opcode | RO_XNOR |
| |
| constexpr Opcode | OP_COUNT |
| |
| constexpr Opcode | RO_CLZ = OP_COUNT | (0b00000 << kShamtShift) |
| |
| constexpr Opcode | RO_CTZ = OP_COUNT | (0b00001 << kShamtShift) |
| |
| constexpr Opcode | RO_CPOP = OP_COUNT | (0b00010 << kShamtShift) |
| |
| constexpr Opcode | RO_MAX |
| |
| constexpr Opcode | RO_MAXU |
| |
| constexpr Opcode | RO_MIN |
| |
| constexpr Opcode | RO_MINU |
| |
| constexpr Opcode | RO_SEXTB |
| |
| constexpr Opcode | RO_SEXTH |
| |
| constexpr Opcode | RO_ROL |
| |
| constexpr Opcode | RO_ROR |
| |
| constexpr Opcode | RO_ORCB |
| |
| constexpr Opcode | RO_REV8 |
| |
| constexpr Opcode | RO_BCLR |
| |
| constexpr Opcode | RO_BCLRI |
| |
| constexpr Opcode | RO_BEXT |
| |
| constexpr Opcode | RO_BEXTI |
| |
| constexpr Opcode | RO_BINV |
| |
| constexpr Opcode | RO_BINVI |
| |
| constexpr Opcode | RO_BSET |
| |
| constexpr Opcode | RO_BSETI |
| |
| constexpr Opcode | RO_C_ADDI4SPN = C0 | (0b000 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_ADDI16SP = C1 | (0b011 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_LW = C0 | (0b010 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_SW = C0 | (0b110 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_NOP_ADDI = C1 | (0b000 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_LI = C1 | (0b010 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_SUB |
| |
| constexpr Opcode | RO_C_XOR |
| |
| constexpr Opcode | RO_C_OR |
| |
| constexpr Opcode | RO_C_AND |
| |
| constexpr Opcode | RO_C_LUI_ADD = C1 | (0b011 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_MISC_ALU = C1 | (0b100 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_J = C1 | (0b101 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_BEQZ = C1 | (0b110 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_BNEZ = C1 | (0b111 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_SLLI = C2 | (0b000 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_LWSP = C2 | (0b010 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_JR_MV_ADD = C2 | (0b100 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_JR = C2 | (0b1000 << kRvcFunct4Shift) |
| |
| constexpr Opcode | RO_C_MV = C2 | (0b1000 << kRvcFunct4Shift) |
| |
| constexpr Opcode | RO_C_EBREAK = C2 | (0b1001 << kRvcFunct4Shift) |
| |
| constexpr Opcode | RO_C_JALR = C2 | (0b1001 << kRvcFunct4Shift) |
| |
| constexpr Opcode | RO_C_ADD = C2 | (0b1001 << kRvcFunct4Shift) |
| |
| constexpr Opcode | RO_C_SWSP = C2 | (0b110 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_FSD = C0 | (0b101 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_FLD = C0 | (0b001 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_FLDSP = C2 | (0b001 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_C_FSDSP = C2 | (0b101 << kRvcFunct3Shift) |
| |
| constexpr Opcode | RO_FLD = LOAD_FP | (0b011 << kFunct3Shift) |
| |
| constexpr Opcode | RO_FSD = STORE_FP | (0b011 << kFunct3Shift) |
| |
| constexpr Opcode | RO_FMADD_D = MADD | (0b01 << kFunct2Shift) |
| |
| constexpr Opcode | RO_FMSUB_D = MSUB | (0b01 << kFunct2Shift) |
| |
| constexpr Opcode | RO_FNMSUB_D = NMSUB | (0b01 << kFunct2Shift) |
| |
| constexpr Opcode | RO_FNMADD_D = NMADD | (0b01 << kFunct2Shift) |
| |
| constexpr Opcode | RO_FADD_D = OP_FP | (0b0000001 << kFunct7Shift) |
| |
| constexpr Opcode | RO_FSUB_D = OP_FP | (0b0000101 << kFunct7Shift) |
| |
| constexpr Opcode | RO_FMUL_D = OP_FP | (0b0001001 << kFunct7Shift) |
| |
| constexpr Opcode | RO_FDIV_D = OP_FP | (0b0001101 << kFunct7Shift) |
| |
| constexpr Opcode | RO_FSQRT_D |
| |
| constexpr Opcode | RO_FSGNJ_D |
| |
| constexpr Opcode | RO_FSGNJN_D |
| |
| constexpr Opcode | RO_FSQNJX_D |
| |
| constexpr Opcode | RO_FMIN_D |
| |
| constexpr Opcode | RO_FMAX_D |
| |
| constexpr Opcode | RO_FCVT_S_D |
| |
| constexpr Opcode | RO_FCVT_D_S |
| |
| constexpr Opcode | RO_FEQ_D |
| |
| constexpr Opcode | RO_FLT_D |
| |
| constexpr Opcode | RO_FLE_D |
| |
| constexpr Opcode | RO_FCLASS_D |
| |
| constexpr Opcode | RO_FCVT_W_D |
| |
| constexpr Opcode | RO_FCVT_WU_D |
| |
| constexpr Opcode | RO_FCVT_D_W |
| |
| constexpr Opcode | RO_FCVT_D_WU |
| |
| constexpr Opcode | RO_FLW = LOAD_FP | (0b010 << kFunct3Shift) |
| |
| constexpr Opcode | RO_FSW = STORE_FP | (0b010 << kFunct3Shift) |
| |
| constexpr Opcode | RO_FMADD_S = MADD | (0b00 << kFunct2Shift) |
| |
| constexpr Opcode | RO_FMSUB_S = MSUB | (0b00 << kFunct2Shift) |
| |
| constexpr Opcode | RO_FNMSUB_S = NMSUB | (0b00 << kFunct2Shift) |
| |
| constexpr Opcode | RO_FNMADD_S = NMADD | (0b00 << kFunct2Shift) |
| |
| constexpr Opcode | RO_FADD_S = OP_FP | (0b0000000 << kFunct7Shift) |
| |
| constexpr Opcode | RO_FSUB_S = OP_FP | (0b0000100 << kFunct7Shift) |
| |
| constexpr Opcode | RO_FMUL_S = OP_FP | (0b0001000 << kFunct7Shift) |
| |
| constexpr Opcode | RO_FDIV_S = OP_FP | (0b0001100 << kFunct7Shift) |
| |
| constexpr Opcode | RO_FSQRT_S |
| |
| constexpr Opcode | RO_FSGNJ_S |
| |
| constexpr Opcode | RO_FSGNJN_S |
| |
| constexpr Opcode | RO_FSQNJX_S |
| |
| constexpr Opcode | RO_FMIN_S |
| |
| constexpr Opcode | RO_FMAX_S |
| |
| constexpr Opcode | RO_FCVT_W_S |
| |
| constexpr Opcode | RO_FCVT_WU_S |
| |
| constexpr Opcode | RO_FMV |
| |
| constexpr Opcode | RO_FEQ_S |
| |
| constexpr Opcode | RO_FLT_S |
| |
| constexpr Opcode | RO_FLE_S |
| |
| constexpr Opcode | RO_FCLASS_S |
| |
| constexpr Opcode | RO_FCVT_S_W |
| |
| constexpr Opcode | RO_FCVT_S_WU |
| |
| constexpr Opcode | RO_FMV_W_X |
| |
| constexpr Opcode | RO_LUI = LUI |
| |
| constexpr Opcode | RO_AUIPC = AUIPC |
| |
| constexpr Opcode | RO_JAL = JAL |
| |
| constexpr Opcode | RO_JALR = JALR | (0b000 << kFunct3Shift) |
| |
| constexpr Opcode | RO_BEQ = BRANCH | (0b000 << kFunct3Shift) |
| |
| constexpr Opcode | RO_BNE = BRANCH | (0b001 << kFunct3Shift) |
| |
| constexpr Opcode | RO_BLT = BRANCH | (0b100 << kFunct3Shift) |
| |
| constexpr Opcode | RO_BGE = BRANCH | (0b101 << kFunct3Shift) |
| |
| constexpr Opcode | RO_BLTU = BRANCH | (0b110 << kFunct3Shift) |
| |
| constexpr Opcode | RO_BGEU = BRANCH | (0b111 << kFunct3Shift) |
| |
| constexpr Opcode | RO_LB = LOAD | (0b000 << kFunct3Shift) |
| |
| constexpr Opcode | RO_LH = LOAD | (0b001 << kFunct3Shift) |
| |
| constexpr Opcode | RO_LW = LOAD | (0b010 << kFunct3Shift) |
| |
| constexpr Opcode | RO_LBU = LOAD | (0b100 << kFunct3Shift) |
| |
| constexpr Opcode | RO_LHU = LOAD | (0b101 << kFunct3Shift) |
| |
| constexpr Opcode | RO_SB = STORE | (0b000 << kFunct3Shift) |
| |
| constexpr Opcode | RO_SH = STORE | (0b001 << kFunct3Shift) |
| |
| constexpr Opcode | RO_SW = STORE | (0b010 << kFunct3Shift) |
| |
| constexpr Opcode | RO_ADDI = OP_IMM | (0b000 << kFunct3Shift) |
| |
| constexpr Opcode | RO_SLTI = OP_IMM | (0b010 << kFunct3Shift) |
| |
| constexpr Opcode | RO_SLTIU = OP_IMM | (0b011 << kFunct3Shift) |
| |
| constexpr Opcode | RO_XORI = OP_IMM | (0b100 << kFunct3Shift) |
| |
| constexpr Opcode | RO_ORI = OP_IMM | (0b110 << kFunct3Shift) |
| |
| constexpr Opcode | RO_ANDI = OP_IMM | (0b111 << kFunct3Shift) |
| |
| constexpr Opcode | OP_SHL = OP_IMM | (0b001 << kFunct3Shift) |
| |
| constexpr Opcode | RO_SLLI = OP_SHL | (0b000000 << kFunct6Shift) |
| |
| constexpr Opcode | OP_SHR = OP_IMM | (0b101 << kFunct3Shift) |
| |
| constexpr Opcode | RO_SRLI = OP_SHR | (0b000000 << kFunct6Shift) |
| |
| constexpr Opcode | RO_SRAI = OP_SHR | (0b010000 << kFunct6Shift) |
| |
| constexpr Opcode | RO_ADD |
| |
| constexpr Opcode | RO_SUB |
| |
| constexpr Opcode | RO_SLL |
| |
| constexpr Opcode | RO_SLT |
| |
| constexpr Opcode | RO_SLTU |
| |
| constexpr Opcode | RO_XOR |
| |
| constexpr Opcode | RO_SRL |
| |
| constexpr Opcode | RO_SRA |
| |
| constexpr Opcode | RO_OR |
| |
| constexpr Opcode | RO_AND |
| |
| constexpr Opcode | RO_FENCE = MISC_MEM | (0b000 << kFunct3Shift) |
| |
| constexpr Opcode | RO_ECALL = SYSTEM | (0b000 << kFunct3Shift) |
| |
| constexpr Opcode | RO_MUL |
| |
| constexpr Opcode | RO_MULH |
| |
| constexpr Opcode | RO_MULHSU |
| |
| constexpr Opcode | RO_MULHU |
| |
| constexpr Opcode | RO_DIV |
| |
| constexpr Opcode | RO_DIVU |
| |
| constexpr Opcode | RO_REM |
| |
| constexpr Opcode | RO_REMU |
| |
| constexpr Opcode | OP_IVV = OP_V | (0b000 << kFunct3Shift) |
| |
| constexpr Opcode | OP_FVV = OP_V | (0b001 << kFunct3Shift) |
| |
| constexpr Opcode | OP_MVV = OP_V | (0b010 << kFunct3Shift) |
| |
| constexpr Opcode | OP_IVI = OP_V | (0b011 << kFunct3Shift) |
| |
| constexpr Opcode | OP_IVX = OP_V | (0b100 << kFunct3Shift) |
| |
| constexpr Opcode | OP_FVF = OP_V | (0b101 << kFunct3Shift) |
| |
| constexpr Opcode | OP_MVX = OP_V | (0b110 << kFunct3Shift) |
| |
| constexpr Opcode | RO_V_VSETVLI = OP_V | (0b111 << kFunct3Shift) | 0b0 << 31 |
| |
| constexpr Opcode | RO_V_VSETIVLI = OP_V | (0b111 << kFunct3Shift) | 0b11 << 30 |
| |
| constexpr Opcode | RO_V_VSETVL = OP_V | (0b111 << kFunct3Shift) | 0b1 << 31 |
| |
| constexpr Opcode | RO_V_VL |
| |
| constexpr Opcode | RO_V_VLS |
| |
| constexpr Opcode | RO_V_VLX |
| |
| constexpr Opcode | RO_V_VS |
| |
| constexpr Opcode | RO_V_VSS |
| |
| constexpr Opcode | RO_V_VSX |
| |
| constexpr Opcode | RO_V_VSU |
| |
| constexpr Opcode | RO_V_VLSEG2 |
| |
| constexpr Opcode | RO_V_VLSEG3 |
| |
| constexpr Opcode | RO_V_VLSEG4 |
| |
| constexpr Opcode | RO_V_VLSEG5 |
| |
| constexpr Opcode | RO_V_VLSEG6 |
| |
| constexpr Opcode | RO_V_VLSEG7 |
| |
| constexpr Opcode | RO_V_VLSEG8 |
| |
| constexpr Opcode | RO_V_VSSEG2 |
| |
| constexpr Opcode | RO_V_VSSEG3 |
| |
| constexpr Opcode | RO_V_VSSEG4 |
| |
| constexpr Opcode | RO_V_VSSEG5 |
| |
| constexpr Opcode | RO_V_VSSEG6 |
| |
| constexpr Opcode | RO_V_VSSEG7 |
| |
| constexpr Opcode | RO_V_VSSEG8 |
| |
| constexpr Opcode | RO_V_VLSSEG2 |
| |
| constexpr Opcode | RO_V_VLSSEG3 |
| |
| constexpr Opcode | RO_V_VLSSEG4 |
| |
| constexpr Opcode | RO_V_VLSSEG5 |
| |
| constexpr Opcode | RO_V_VLSSEG6 |
| |
| constexpr Opcode | RO_V_VLSSEG7 |
| |
| constexpr Opcode | RO_V_VLSSEG8 |
| |
| constexpr Opcode | RO_V_VSSSEG2 |
| |
| constexpr Opcode | RO_V_VSSSEG3 |
| |
| constexpr Opcode | RO_V_VSSSEG4 |
| |
| constexpr Opcode | RO_V_VSSSEG5 |
| |
| constexpr Opcode | RO_V_VSSSEG6 |
| |
| constexpr Opcode | RO_V_VSSSEG7 |
| |
| constexpr Opcode | RO_V_VSSSEG8 |
| |
| constexpr Opcode | RO_V_VLXSEG2 |
| |
| constexpr Opcode | RO_V_VLXSEG3 |
| |
| constexpr Opcode | RO_V_VLXSEG4 |
| |
| constexpr Opcode | RO_V_VLXSEG5 |
| |
| constexpr Opcode | RO_V_VLXSEG6 |
| |
| constexpr Opcode | RO_V_VLXSEG7 |
| |
| constexpr Opcode | RO_V_VLXSEG8 |
| |
| constexpr Opcode | RO_V_VSXSEG2 |
| |
| constexpr Opcode | RO_V_VSXSEG3 |
| |
| constexpr Opcode | RO_V_VSXSEG4 |
| |
| constexpr Opcode | RO_V_VSXSEG5 |
| |
| constexpr Opcode | RO_V_VSXSEG6 |
| |
| constexpr Opcode | RO_V_VSXSEG7 |
| |
| constexpr Opcode | RO_V_VSXSEG8 |
| |
| constexpr Opcode | VADD_FUNCT6 = 0b000000 |
| |
| constexpr Opcode | RO_V_VADD_VI = OP_IVI | (VADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VADD_VV = OP_IVV | (VADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VADD_VX = OP_IVX | (VADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VSUB_FUNCT6 = 0b000010 |
| |
| constexpr Opcode | RO_V_VSUB_VX = OP_IVX | (VSUB_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VSUB_VV = OP_IVV | (VSUB_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VDIVU_FUNCT6 = 0b100000 |
| |
| constexpr Opcode | RO_V_VDIVU_VX = OP_MVX | (VDIVU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VDIVU_VV = OP_MVV | (VDIVU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VDIV_FUNCT6 = 0b100001 |
| |
| constexpr Opcode | RO_V_VDIV_VX = OP_MVX | (VDIV_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VDIV_VV = OP_MVV | (VDIV_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VREMU_FUNCT6 = 0b100010 |
| |
| constexpr Opcode | RO_V_VREMU_VX = OP_MVX | (VREMU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VREMU_VV = OP_MVV | (VREMU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VREM_FUNCT6 = 0b100011 |
| |
| constexpr Opcode | RO_V_VREM_VX = OP_MVX | (VREM_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VREM_VV = OP_MVV | (VREM_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMULHU_FUNCT6 = 0b100100 |
| |
| constexpr Opcode | RO_V_VMULHU_VX = OP_MVX | (VMULHU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMULHU_VV = OP_MVV | (VMULHU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMUL_FUNCT6 = 0b100101 |
| |
| constexpr Opcode | RO_V_VMUL_VX = OP_MVX | (VMUL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMUL_VV = OP_MVV | (VMUL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VWMUL_FUNCT6 = 0b111011 |
| |
| constexpr Opcode | RO_V_VWMUL_VX = OP_MVX | (VWMUL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VWMUL_VV = OP_MVV | (VWMUL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VWMULU_FUNCT6 = 0b111000 |
| |
| constexpr Opcode | RO_V_VWMULU_VX = OP_MVX | (VWMULU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VWMULU_VV = OP_MVV | (VWMULU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMULHSU_FUNCT6 = 0b100110 |
| |
| constexpr Opcode | RO_V_VMULHSU_VX = OP_MVX | (VMULHSU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMULHSU_VV = OP_MVV | (VMULHSU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMULH_FUNCT6 = 0b100111 |
| |
| constexpr Opcode | RO_V_VMULH_VX = OP_MVX | (VMULH_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMULH_VV = OP_MVV | (VMULH_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VWADD_FUNCT6 = 0b110001 |
| |
| constexpr Opcode | RO_V_VWADD_VV = OP_MVV | (VWADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VWADD_VX = OP_MVX | (VWADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VWADDU_FUNCT6 = 0b110000 |
| |
| constexpr Opcode | RO_V_VWADDU_VV = OP_MVV | (VWADDU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VWADDU_VX = OP_MVX | (VWADDU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VWADDUW_FUNCT6 = 0b110101 |
| |
| constexpr Opcode | RO_V_VWADDUW_VX = OP_MVX | (VWADDUW_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VWADDUW_VV = OP_MVV | (VWADDUW_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VCOMPRESS_FUNCT6 = 0b010111 |
| |
| constexpr Opcode | RO_V_VCOMPRESS_VV |
| |
| constexpr Opcode | VSADDU_FUNCT6 = 0b100000 |
| |
| constexpr Opcode | RO_V_VSADDU_VI = OP_IVI | (VSADDU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VSADDU_VV = OP_IVV | (VSADDU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VSADDU_VX = OP_IVX | (VSADDU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VSADD_FUNCT6 = 0b100001 |
| |
| constexpr Opcode | RO_V_VSADD_VI = OP_IVI | (VSADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VSADD_VV = OP_IVV | (VSADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VSADD_VX = OP_IVX | (VSADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VSSUB_FUNCT6 = 0b100011 |
| |
| constexpr Opcode | RO_V_VSSUB_VV = OP_IVV | (VSSUB_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VSSUB_VX = OP_IVX | (VSSUB_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VSSUBU_FUNCT6 = 0b100010 |
| |
| constexpr Opcode | RO_V_VSSUBU_VV = OP_IVV | (VSSUBU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VSSUBU_VX = OP_IVX | (VSSUBU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VRSUB_FUNCT6 = 0b000011 |
| |
| constexpr Opcode | RO_V_VRSUB_VX = OP_IVX | (VRSUB_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VRSUB_VI = OP_IVI | (VRSUB_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMINU_FUNCT6 = 0b000100 |
| |
| constexpr Opcode | RO_V_VMINU_VX = OP_IVX | (VMINU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMINU_VV = OP_IVV | (VMINU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMIN_FUNCT6 = 0b000101 |
| |
| constexpr Opcode | RO_V_VMIN_VX = OP_IVX | (VMIN_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMIN_VV = OP_IVV | (VMIN_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMAXU_FUNCT6 = 0b000110 |
| |
| constexpr Opcode | RO_V_VMAXU_VX = OP_IVX | (VMAXU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMAXU_VV = OP_IVV | (VMAXU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMAX_FUNCT6 = 0b000111 |
| |
| constexpr Opcode | RO_V_VMAX_VX = OP_IVX | (VMAX_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMAX_VV = OP_IVV | (VMAX_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VAND_FUNCT6 = 0b001001 |
| |
| constexpr Opcode | RO_V_VAND_VI = OP_IVI | (VAND_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VAND_VV = OP_IVV | (VAND_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VAND_VX = OP_IVX | (VAND_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VOR_FUNCT6 = 0b001010 |
| |
| constexpr Opcode | RO_V_VOR_VI = OP_IVI | (VOR_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VOR_VV = OP_IVV | (VOR_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VOR_VX = OP_IVX | (VOR_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VXOR_FUNCT6 = 0b001011 |
| |
| constexpr Opcode | RO_V_VXOR_VI = OP_IVI | (VXOR_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VXOR_VV = OP_IVV | (VXOR_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VXOR_VX = OP_IVX | (VXOR_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VRGATHER_FUNCT6 = 0b001100 |
| |
| constexpr Opcode | RO_V_VRGATHER_VI |
| |
| constexpr Opcode | RO_V_VRGATHER_VV |
| |
| constexpr Opcode | RO_V_VRGATHER_VX |
| |
| constexpr Opcode | VMV_FUNCT6 = 0b010111 |
| |
| constexpr Opcode | RO_V_VMV_VI = OP_IVI | (VMV_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMV_VV = OP_IVV | (VMV_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMV_VX = OP_IVX | (VMV_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFMV_VF = OP_FVF | (VMV_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMERGE_VI = RO_V_VMV_VI |
| |
| constexpr Opcode | RO_V_VMERGE_VV = RO_V_VMV_VV |
| |
| constexpr Opcode | RO_V_VMERGE_VX = RO_V_VMV_VX |
| |
| constexpr Opcode | RO_V_VFMERGE_VF = RO_V_VFMV_VF |
| |
| constexpr Opcode | VMSEQ_FUNCT6 = 0b011000 |
| |
| constexpr Opcode | RO_V_VMSEQ_VI = OP_IVI | (VMSEQ_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMSEQ_VV = OP_IVV | (VMSEQ_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMSEQ_VX = OP_IVX | (VMSEQ_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMSNE_FUNCT6 = 0b011001 |
| |
| constexpr Opcode | RO_V_VMSNE_VI = OP_IVI | (VMSNE_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMSNE_VV = OP_IVV | (VMSNE_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMSNE_VX = OP_IVX | (VMSNE_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMSLTU_FUNCT6 = 0b011010 |
| |
| constexpr Opcode | RO_V_VMSLTU_VV = OP_IVV | (VMSLTU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMSLTU_VX = OP_IVX | (VMSLTU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMSLT_FUNCT6 = 0b011011 |
| |
| constexpr Opcode | RO_V_VMSLT_VV = OP_IVV | (VMSLT_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMSLT_VX = OP_IVX | (VMSLT_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMSLE_FUNCT6 = 0b011101 |
| |
| constexpr Opcode | RO_V_VMSLE_VI = OP_IVI | (VMSLE_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMSLE_VV = OP_IVV | (VMSLE_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMSLE_VX = OP_IVX | (VMSLE_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMSLEU_FUNCT6 = 0b011100 |
| |
| constexpr Opcode | RO_V_VMSLEU_VI = OP_IVI | (VMSLEU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMSLEU_VV = OP_IVV | (VMSLEU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMSLEU_VX = OP_IVX | (VMSLEU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMSGTU_FUNCT6 = 0b011110 |
| |
| constexpr Opcode | RO_V_VMSGTU_VI = OP_IVI | (VMSGTU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMSGTU_VX = OP_IVX | (VMSGTU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMSGT_FUNCT6 = 0b011111 |
| |
| constexpr Opcode | RO_V_VMSGT_VI = OP_IVI | (VMSGT_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMSGT_VX = OP_IVX | (VMSGT_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VSLIDEUP_FUNCT6 = 0b001110 |
| |
| constexpr Opcode | RO_V_VSLIDEUP_VI |
| |
| constexpr Opcode | RO_V_VSLIDEUP_VX |
| |
| constexpr Opcode | RO_V_VSLIDE1UP_VX |
| |
| constexpr Opcode | RO_V_VFSLIDE1UP_VF |
| |
| constexpr Opcode | VSLIDEDOWN_FUNCT6 = 0b001111 |
| |
| constexpr Opcode | RO_V_VSLIDEDOWN_VI |
| |
| constexpr Opcode | RO_V_VSLIDEDOWN_VX |
| |
| constexpr Opcode | RO_V_VSLIDE1DOWN_VX |
| |
| constexpr Opcode | RO_V_VFSLIDE1DOWN_VF |
| |
| constexpr Opcode | VSRL_FUNCT6 = 0b101000 |
| |
| constexpr Opcode | RO_V_VSRL_VI = OP_IVI | (VSRL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VSRL_VV = OP_IVV | (VSRL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VSRL_VX = OP_IVX | (VSRL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VSRA_FUNCT6 = 0b101001 |
| |
| constexpr Opcode | RO_V_VSRA_VI = OP_IVI | (VSRA_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VSRA_VV = OP_IVV | (VSRA_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VSRA_VX = OP_IVX | (VSRA_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VSLL_FUNCT6 = 0b100101 |
| |
| constexpr Opcode | RO_V_VSLL_VI = OP_IVI | (VSLL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VSLL_VV = OP_IVV | (VSLL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VSLL_VX = OP_IVX | (VSLL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VSMUL_FUNCT6 = 0b100111 |
| |
| constexpr Opcode | RO_V_VSMUL_VV = OP_IVV | (VSMUL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VSMUL_VX = OP_IVX | (VSMUL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VADC_FUNCT6 = 0b010000 |
| |
| constexpr Opcode | RO_V_VADC_VI = OP_IVI | (VADC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VADC_VV = OP_IVV | (VADC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VADC_VX = OP_IVX | (VADC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMADC_FUNCT6 = 0b010001 |
| |
| constexpr Opcode | RO_V_VMADC_VI = OP_IVI | (VMADC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMADC_VV = OP_IVV | (VMADC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMADC_VX = OP_IVX | (VMADC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VWXUNARY0_FUNCT6 = 0b010000 |
| |
| constexpr Opcode | VRXUNARY0_FUNCT6 = 0b010000 |
| |
| constexpr Opcode | VMUNARY0_FUNCT6 = 0b010100 |
| |
| constexpr Opcode | RO_V_VWXUNARY0 |
| |
| constexpr Opcode | RO_V_VRXUNARY0 |
| |
| constexpr Opcode | RO_V_VMUNARY0 = OP_MVV | (VMUNARY0_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VID_V = 0b10001 |
| |
| constexpr Opcode | VXUNARY0_FUNCT6 = 0b010010 |
| |
| constexpr Opcode | RO_V_VXUNARY0 = OP_MVV | (VXUNARY0_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VWFUNARY0_FUNCT6 = 0b010000 |
| |
| constexpr Opcode | RO_V_VFMV_FS = OP_FVV | (VWFUNARY0_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VRFUNARY0_FUNCT6 = 0b010000 |
| |
| constexpr Opcode | RO_V_VFMV_SF = OP_FVF | (VRFUNARY0_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VREDMAXU_FUNCT6 = 0b000110 |
| |
| constexpr Opcode | RO_V_VREDMAXU = OP_MVV | (VREDMAXU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VREDMAX_FUNCT6 = 0b000111 |
| |
| constexpr Opcode | RO_V_VREDMAX = OP_MVV | (VREDMAX_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VREDMINU_FUNCT6 = 0b000100 |
| |
| constexpr Opcode | RO_V_VREDMINU = OP_MVV | (VREDMINU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VREDMIN_FUNCT6 = 0b000101 |
| |
| constexpr Opcode | RO_V_VREDMIN = OP_MVV | (VREDMIN_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFUNARY0_FUNCT6 = 0b010010 |
| |
| constexpr Opcode | RO_V_VFUNARY0 = OP_FVV | (VFUNARY0_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFUNARY1_FUNCT6 = 0b010011 |
| |
| constexpr Opcode | RO_V_VFUNARY1 = OP_FVV | (VFUNARY1_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFCVT_XU_F_V = 0b00000 |
| |
| constexpr Opcode | VFCVT_X_F_V = 0b00001 |
| |
| constexpr Opcode | VFCVT_F_XU_V = 0b00010 |
| |
| constexpr Opcode | VFCVT_F_X_V = 0b00011 |
| |
| constexpr Opcode | VFWCVT_XU_F_V = 0b01000 |
| |
| constexpr Opcode | VFWCVT_X_F_V = 0b01001 |
| |
| constexpr Opcode | VFWCVT_F_XU_V = 0b01010 |
| |
| constexpr Opcode | VFWCVT_F_X_V = 0b01011 |
| |
| constexpr Opcode | VFWCVT_F_F_V = 0b01100 |
| |
| constexpr Opcode | VFNCVT_F_F_W = 0b10100 |
| |
| constexpr Opcode | VFNCVT_X_F_W = 0b10001 |
| |
| constexpr Opcode | VFNCVT_XU_F_W = 0b10000 |
| |
| constexpr Opcode | VFCLASS_V = 0b10000 |
| |
| constexpr Opcode | VFSQRT_V = 0b00000 |
| |
| constexpr Opcode | VFRSQRT7_V = 0b00100 |
| |
| constexpr Opcode | VFREC7_V = 0b00101 |
| |
| constexpr Opcode | VFADD_FUNCT6 = 0b000000 |
| |
| constexpr Opcode | RO_V_VFADD_VV = OP_FVV | (VFADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFADD_VF = OP_FVF | (VFADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFSUB_FUNCT6 = 0b000010 |
| |
| constexpr Opcode | RO_V_VFSUB_VV = OP_FVV | (VFSUB_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFSUB_VF = OP_FVF | (VFSUB_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFDIV_FUNCT6 = 0b100000 |
| |
| constexpr Opcode | RO_V_VFDIV_VV = OP_FVV | (VFDIV_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFDIV_VF = OP_FVF | (VFDIV_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFMUL_FUNCT6 = 0b100100 |
| |
| constexpr Opcode | RO_V_VFMUL_VV = OP_FVV | (VFMUL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFMUL_VF = OP_FVF | (VFMUL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFWADD_FUNCT6 = 0b110000 |
| |
| constexpr Opcode | RO_V_VFWADD_VV = OP_FVV | (VFWADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFWADD_VF = OP_FVF | (VFWADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFWSUB_FUNCT6 = 0b110010 |
| |
| constexpr Opcode | RO_V_VFWSUB_VV = OP_FVV | (VFWSUB_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFWSUB_VF = OP_FVF | (VFWSUB_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFWADD_W_FUNCT6 = 0b110100 |
| |
| constexpr Opcode | RO_V_VFWADD_W_VV |
| |
| constexpr Opcode | RO_V_VFWADD_W_VF |
| |
| constexpr Opcode | VFWSUB_W_FUNCT6 = 0b110110 |
| |
| constexpr Opcode | RO_V_VFWSUB_W_VV |
| |
| constexpr Opcode | RO_V_VFWSUB_W_VF |
| |
| constexpr Opcode | VFWREDUSUM_FUNCT6 = 0b110001 |
| |
| constexpr Opcode | RO_V_VFWREDUSUM_VS |
| |
| constexpr Opcode | VFWREDOSUM_FUNCT6 = 0b110011 |
| |
| constexpr Opcode | RO_V_VFWREDOSUM_VS |
| |
| constexpr Opcode | VFWMUL_FUNCT6 = 0b111000 |
| |
| constexpr Opcode | RO_V_VFWMUL_VV = OP_FVV | (VFWMUL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFWMUL_VF = OP_FVF | (VFWMUL_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMFEQ_FUNCT6 = 0b011000 |
| |
| constexpr Opcode | RO_V_VMFEQ_VV = OP_FVV | (VMFEQ_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMFEQ_VF = OP_FVF | (VMFEQ_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMFNE_FUNCT6 = 0b011100 |
| |
| constexpr Opcode | RO_V_VMFNE_VV = OP_FVV | (VMFNE_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMFNE_VF = OP_FVF | (VMFNE_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMFLT_FUNCT6 = 0b011011 |
| |
| constexpr Opcode | RO_V_VMFLT_VV = OP_FVV | (VMFLT_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMFLT_VF = OP_FVF | (VMFLT_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMFLE_FUNCT6 = 0b011001 |
| |
| constexpr Opcode | RO_V_VMFLE_VV = OP_FVV | (VMFLE_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VMFLE_VF = OP_FVF | (VMFLE_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMFGE_FUNCT6 = 0b011111 |
| |
| constexpr Opcode | RO_V_VMFGE_VF = OP_FVF | (VMFGE_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VMFGT_FUNCT6 = 0b011101 |
| |
| constexpr Opcode | RO_V_VMFGT_VF = OP_FVF | (VMFGT_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFMAX_FUNCT6 = 0b000110 |
| |
| constexpr Opcode | RO_V_VFMAX_VV = OP_FVV | (VFMAX_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFMAX_VF = OP_FVF | (VFMAX_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFREDMAX_FUNCT6 = 0b0001111 |
| |
| constexpr Opcode | RO_V_VFREDMAX_VV |
| |
| constexpr Opcode | VFMIN_FUNCT6 = 0b000100 |
| |
| constexpr Opcode | RO_V_VFMIN_VV = OP_FVV | (VFMIN_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFMIN_VF = OP_FVF | (VFMIN_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFSGNJ_FUNCT6 = 0b001000 |
| |
| constexpr Opcode | RO_V_VFSGNJ_VV = OP_FVV | (VFSGNJ_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFSGNJ_VF = OP_FVF | (VFSGNJ_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFSGNJN_FUNCT6 = 0b001001 |
| |
| constexpr Opcode | RO_V_VFSGNJN_VV = OP_FVV | (VFSGNJN_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFSGNJN_VF = OP_FVF | (VFSGNJN_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFSGNJX_FUNCT6 = 0b001010 |
| |
| constexpr Opcode | RO_V_VFSGNJX_VV = OP_FVV | (VFSGNJX_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFSGNJX_VF = OP_FVF | (VFSGNJX_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFMADD_FUNCT6 = 0b101000 |
| |
| constexpr Opcode | RO_V_VFMADD_VV = OP_FVV | (VFMADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFMADD_VF = OP_FVF | (VFMADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFNMADD_FUNCT6 = 0b101001 |
| |
| constexpr Opcode | RO_V_VFNMADD_VV = OP_FVV | (VFNMADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFNMADD_VF = OP_FVF | (VFNMADD_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFMSUB_FUNCT6 = 0b101010 |
| |
| constexpr Opcode | RO_V_VFMSUB_VV = OP_FVV | (VFMSUB_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFMSUB_VF = OP_FVF | (VFMSUB_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFNMSUB_FUNCT6 = 0b101011 |
| |
| constexpr Opcode | RO_V_VFNMSUB_VV = OP_FVV | (VFNMSUB_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFNMSUB_VF = OP_FVF | (VFNMSUB_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFMACC_FUNCT6 = 0b101100 |
| |
| constexpr Opcode | RO_V_VFMACC_VV = OP_FVV | (VFMACC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFMACC_VF = OP_FVF | (VFMACC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFNMACC_FUNCT6 = 0b101101 |
| |
| constexpr Opcode | RO_V_VFNMACC_VV = OP_FVV | (VFNMACC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFNMACC_VF = OP_FVF | (VFNMACC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFMSAC_FUNCT6 = 0b101110 |
| |
| constexpr Opcode | RO_V_VFMSAC_VV = OP_FVV | (VFMSAC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFMSAC_VF = OP_FVF | (VFMSAC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFNMSAC_FUNCT6 = 0b101111 |
| |
| constexpr Opcode | RO_V_VFNMSAC_VV = OP_FVV | (VFNMSAC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFNMSAC_VF = OP_FVF | (VFNMSAC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFWMACC_FUNCT6 = 0b111100 |
| |
| constexpr Opcode | RO_V_VFWMACC_VV = OP_FVV | (VFWMACC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFWMACC_VF = OP_FVF | (VFWMACC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFWNMACC_FUNCT6 = 0b111101 |
| |
| constexpr Opcode | RO_V_VFWNMACC_VV |
| |
| constexpr Opcode | RO_V_VFWNMACC_VF |
| |
| constexpr Opcode | VFWMSAC_FUNCT6 = 0b111110 |
| |
| constexpr Opcode | RO_V_VFWMSAC_VV = OP_FVV | (VFWMSAC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VFWMSAC_VF = OP_FVF | (VFWMSAC_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VFWNMSAC_FUNCT6 = 0b111111 |
| |
| constexpr Opcode | RO_V_VFWNMSAC_VV |
| |
| constexpr Opcode | RO_V_VFWNMSAC_VF |
| |
| constexpr Opcode | VNCLIP_FUNCT6 = 0b101111 |
| |
| constexpr Opcode | RO_V_VNCLIP_WV = OP_IVV | (VNCLIP_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VNCLIP_WX = OP_IVX | (VNCLIP_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VNCLIP_WI = OP_IVI | (VNCLIP_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | VNCLIPU_FUNCT6 = 0b101110 |
| |
| constexpr Opcode | RO_V_VNCLIPU_WV = OP_IVV | (VNCLIPU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VNCLIPU_WX = OP_IVX | (VNCLIPU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_V_VNCLIPU_WI = OP_IVI | (VNCLIPU_FUNCT6 << kRvvFunct6Shift) |
| |
| constexpr Opcode | RO_CZERO_EQZ |
| |
| constexpr Opcode | RO_CZERO_NEZ |
| |
| const int | kFcsrFlagsBits = 5 |
| |
| const uint32_t | kFcsrFlagsMask = (1 << kFcsrFlagsBits) - 1 |
| |
| const int | kFcsrFrmBits = 3 |
| |
| const int | kFcsrFrmShift = kFcsrFlagsBits |
| |
| const uint32_t | kFcsrFrmMask = ((1 << kFcsrFrmBits) - 1) << kFcsrFrmShift |
| |
| const int | kFcsrBits = kFcsrFlagsBits + kFcsrFrmBits |
| |
| const uint32_t | kFcsrMask = kFcsrFlagsMask | kFcsrFrmMask |
| |
| constexpr Opcode | RO_CSRRW = SYSTEM | (0b001 << kFunct3Shift) |
| |
| constexpr Opcode | RO_CSRRS = SYSTEM | (0b010 << kFunct3Shift) |
| |
| constexpr Opcode | RO_CSRRC = SYSTEM | (0b011 << kFunct3Shift) |
| |
| constexpr Opcode | RO_CSRRWI = SYSTEM | (0b101 << kFunct3Shift) |
| |
| constexpr Opcode | RO_CSRRSI = SYSTEM | (0b110 << kFunct3Shift) |
| |
| constexpr Opcode | RO_CSRRCI = SYSTEM | (0b111 << kFunct3Shift) |
| |
| constexpr Opcode | RO_FENCE_I = MISC_MEM | (0b001 << kFunct3Shift) |
| |
| static RegList | t_regs = {t0, t1, t2, t3, t4, t5, t6} |
| |
| static RegList | a_regs = {a0, a1, a2, a3, a4, a5, a6, a7} |
| |
| static RegList | s_regs = {s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11} |
| |
| const int | kUndefIndex = -1 |
| |
| const int | kSafepointRegisterStackIndexMap [kNumRegs] |
| |
| constexpr bool | kPadArguments = false |
| |
| constexpr Register | padreg = t6 |
| |
| constexpr DoubleRegister | kSingleRegZero = fs10 |
| |
| constexpr Register | kSimulatorBreakArgument = t6 |
| |
| constexpr Register | kMaglevFlagsRegister = t6 |
| |
| constexpr VRegister | kSimd128ScratchReg2 = v23 |
| |
| constexpr VRegister | kSimd128ScratchReg3 = v8 |
| |
| const int | kNumCallerSavedFPU = kCallerSavedFPU.Count() |
| |
| const int | kNumSafepointRegisters = 32 |
| |
| const RegList | kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved |
| |
| const int | kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved |
| |
| const FourByteInstr | kFourByteBrCondMask = 0xF << 20 |
| |
| const SixByteInstr | kSixByteBrCondMask = static_cast<SixByteInstr>(0xF) << 36 |
| |
| const int | kStackFrameRASlot = 14 |
| |
| const int | kStackFrameSPSlot = 15 |
| |
| const int | kCalleeRegisterSaveAreaSize = 160 |
| |
| static const int | kRegisterPassedArguments = arraysize(kCArgRegs) |
| |
| constexpr int | kSafepointTableStackSlotsOffset = 0 |
| |
| constexpr int | kStackSavedSavedFPSize = kDoubleSize |
| |
| template<class T > |
| constexpr MachineRepresentation | PhiMachineRepresentationOf |
| |
| constexpr Register | kScratchRegister = r10 |
| |
| constexpr YMMRegister | kScratchSimd256Reg = ymm15 |
| |
| constexpr int | kStackSpaceRequiredForCompilation = 40 |
| |
| constexpr int | kStackLimitSlackForDeoptimizationInBytes = 256 |
| |
| const size_t | kShortBuiltinCallsOldSpaceSizeThreshold = size_t{2} * GB |
| |
| constexpr int | kMaxInt = 0x7FFFFFFF |
| |
| constexpr int | kMinInt = -kMaxInt - 1 |
| |
| constexpr int | kMaxInt8 = (1 << 7) - 1 |
| |
| constexpr int | kMinInt8 = -(1 << 7) |
| |
| constexpr int | kMaxUInt8 = (1 << 8) - 1 |
| |
| constexpr int | kMinUInt8 = 0 |
| |
| constexpr int | kMaxInt16 = (1 << 15) - 1 |
| |
| constexpr int | kMinInt16 = -(1 << 15) |
| |
| constexpr int | kMaxUInt16 = (1 << 16) - 1 |
| |
| constexpr int | kMinUInt16 = 0 |
| |
| constexpr int | kMaxInt31 = kMaxInt / 2 |
| |
| constexpr int | kMinInt31 = kMinInt / 2 |
| |
| constexpr uint32_t | kMaxUInt32 = 0xFFFF'FFFFu |
| |
| constexpr uint32_t | kMinUInt32 = 0 |
| |
| constexpr uint64_t | kMaxUInt64 = 0xFFFF'FFFF'FFFF'FFFFu |
| |
| constexpr uint64_t | kMinUInt64 = 0 |
| |
| constexpr int | kInt8Size = sizeof(int8_t) |
| |
| constexpr int | kUInt8Size = sizeof(uint8_t) |
| |
| constexpr int | kByteSize = 1 |
| |
| constexpr int | kCharSize = sizeof(char) |
| |
| constexpr int | kShortSize = sizeof(short) |
| |
| constexpr int | kInt16Size = sizeof(int16_t) |
| |
| constexpr int | kUInt16Size = sizeof(uint16_t) |
| |
| constexpr int | kIntSize = sizeof(int) |
| |
| constexpr int | kInt32Size = sizeof(int32_t) |
| |
| constexpr int | kInt64Size = sizeof(int64_t) |
| |
| constexpr int | kUInt32Size = sizeof(uint32_t) |
| |
| constexpr int | kSizetSize = sizeof(size_t) |
| |
| constexpr int | kFloat16Size = sizeof(uint16_t) |
| |
| constexpr int | kFloatSize = sizeof(float) |
| |
| constexpr int | kDoubleSize = sizeof(double) |
| |
| constexpr int | kIntptrSize = sizeof(intptr_t) |
| |
| constexpr int | kUIntptrSize = sizeof(uintptr_t) |
| |
| constexpr int | kSystemPointerSize = sizeof(void*) |
| |
| constexpr int | kSystemPointerHexDigits = kSystemPointerSize == 4 ? 8 : 12 |
| |
| constexpr int | kPCOnStackSize = kSystemPointerSize |
| |
| constexpr int | kFPOnStackSize = kSystemPointerSize |
| |
| constexpr int | kElidedFrameSlots = 0 |
| |
| constexpr int | kDoubleSizeLog2 = 3 |
| |
| constexpr int | kMaxDoubleStringLength = 24 |
| |
| constexpr uint32_t | kMaxCommittedWasmCodeMB = 4095 |
| |
| constexpr uint32_t | kDefaultMaxWasmCodeSpaceSizeMb = 1024 |
| |
| constexpr size_t | kIsolateDataAlignment = 64 |
| |
| constexpr int | kSystemPointerSizeLog2 = 2 |
| |
| constexpr intptr_t | kIntptrSignBit = 0x80000000 |
| |
| constexpr bool | kPlatformRequiresCodeRange = false |
| |
| constexpr size_t | kMaximalCodeRangeSize = 0 * MB |
| |
| constexpr size_t | kMinimumCodeRangeSize = 0 * MB |
| |
| constexpr size_t | kMinExpectedOSPageSize = 4 * KB |
| |
| constexpr size_t | kReservedCodeRangePages = 0 |
| |
| static constexpr bool | kCompressGraphZone = COMPRESS_ZONES_BOOL |
| |
| constexpr int | kTaggedSize = kSystemPointerSize |
| |
| constexpr int | kTaggedSizeLog2 = kSystemPointerSizeLog2 |
| |
| constexpr int | kJSDispatchTableEntrySize = 16 |
| |
| constexpr int | kJSDispatchTableEntrySizeLog2 = 4 |
| |
| constexpr size_t | kJSDispatchTableReservationSize |
| |
| constexpr size_t | kMaxJSDispatchEntries |
| |
| constexpr int | kPointerSize = kSystemPointerSize |
| |
| constexpr int | kPointerSizeLog2 = kSystemPointerSizeLog2 |
| |
| constexpr int | kExternalPointerSlotSize = sizeof(ExternalPointer_t) |
| |
| constexpr int | kCppHeapPointerSlotSize = sizeof(CppHeapPointer_t) |
| |
| constexpr int | kIndirectPointerSize = sizeof(IndirectPointerHandle) |
| |
| constexpr int | kTrustedPointerSize = kTaggedSize |
| |
| constexpr int | kCodePointerSize = kTrustedPointerSize |
| |
| constexpr int | kProtectedPointerSize = kTaggedSize |
| |
| constexpr int | kJSDispatchHandleSize = sizeof(JSDispatchHandle) |
| |
| constexpr int | kEmbedderDataSlotSize = kSystemPointerSize |
| |
| constexpr int | kEmbedderDataSlotSizeInTaggedSlots |
| |
| constexpr size_t | kExternalAllocationSoftLimit |
| |
| constexpr int | kMaxRegularHeapObjectSize = (1 << (kPageSizeBits - 1)) |
| |
| constexpr int | kBitsPerByte = 8 |
| |
| constexpr int | kBitsPerByteLog2 = 3 |
| |
| constexpr int | kBitsPerSystemPointer = kSystemPointerSize * kBitsPerByte |
| |
| constexpr int | kBitsPerSystemPointerLog2 |
| |
| constexpr int | kBitsPerInt = kIntSize * kBitsPerByte |
| |
| constexpr uint32_t | kBinary32SignMask = 0x80000000u |
| |
| constexpr uint32_t | kBinary32ExponentMask = 0x7f800000u |
| |
| constexpr uint32_t | kBinary32MantissaMask = 0x007fffffu |
| |
| constexpr int | kBinary32ExponentBias = 127 |
| |
| constexpr int | kBinary32MaxExponent = 0xFE |
| |
| constexpr int | kBinary32MinExponent = 0x01 |
| |
| constexpr int | kBinary32MantissaBits = 23 |
| |
| constexpr int | kBinary32ExponentShift = 23 |
| |
| constexpr uint64_t | kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51 |
| |
| constexpr int | kOneByteSize = kCharSize |
| |
| constexpr int | kSimd128Size = 16 |
| |
| constexpr int | kSimd256Size = 32 |
| |
| static const size_t | LanguageModeSize = 2 |
| |
| constexpr int | kNoSourcePosition = -1 |
| |
| constexpr int | kFunctionEntryBytecodeOffset = -1 |
| |
| constexpr int | kFunctionExitBytecodeOffset = -1 |
| |
| constexpr int | kNoDeoptimizationId = -1 |
| |
| constexpr DeoptimizeKind | kFirstDeoptimizeKind = DeoptimizeKind::kEager |
| |
| constexpr DeoptimizeKind | kLastDeoptimizeKind = DeoptimizeKind::kLazy |
| |
| constexpr int | kDeoptimizeKindCount = static_cast<int>(kLastDeoptimizeKind) + 1 |
| |
| constexpr bool | kIsSmiValueInUpper32Bits |
| |
| constexpr bool | kIsSmiValueInLower32Bits |
| |
| constexpr intptr_t | kSmiSignMask |
| |
| constexpr int | kObjectAlignmentBits = kTaggedSizeLog2 |
| |
| constexpr intptr_t | kObjectAlignment = 1 << kObjectAlignmentBits |
| |
| constexpr intptr_t | kObjectAlignmentMask = kObjectAlignment - 1 |
| |
| constexpr intptr_t | kObjectAlignment8GbHeap = 8 |
| |
| constexpr intptr_t | kObjectAlignment8GbHeapMask = kObjectAlignment8GbHeap - 1 |
| |
| constexpr intptr_t | kPointerAlignment = (1 << kSystemPointerSizeLog2) |
| |
| constexpr intptr_t | kPointerAlignmentMask = kPointerAlignment - 1 |
| |
| constexpr intptr_t | kDoubleAlignment = 8 |
| |
| constexpr intptr_t | kDoubleAlignmentMask = kDoubleAlignment - 1 |
| |
| constexpr int | kCodeAlignmentBits = 5 |
| |
| constexpr intptr_t | kCodeAlignment = 1 << kCodeAlignmentBits |
| |
| constexpr intptr_t | kCodeAlignmentMask = kCodeAlignment - 1 |
| |
| const Address | kWeakHeapObjectMask = 1 << 1 |
| |
| const uint32_t | kClearedWeakHeapObjectLower32 = 3 |
| |
| constexpr uint32_t | kClearedFreeMemoryValue = 0 |
| |
| constexpr uint32_t | kZapValue = 0xdeadbeef |
| |
| constexpr uint32_t | kHandleZapValue = 0xbaddeaf |
| |
| constexpr uint32_t | kGlobalHandleZapValue = 0xbaffedf |
| |
| constexpr uint32_t | kPersistentHandleZapValue = 0xbaff6df |
| |
| constexpr uint32_t | kTracedHandleEagerResetZapValue = 0xbeffedf |
| |
| constexpr uint32_t | kTracedHandleMinorGCResetZapValue = 0xbeffadf |
| |
| constexpr uint32_t | kTracedHandleMinorGCWeakResetZapValue = 0xbe11adf |
| |
| constexpr uint32_t | kTracedHandleFullGCResetZapValue = 0xbe77adf |
| |
| constexpr uint32_t | kFromSpaceZapValue = 0xbeefdaf |
| |
| constexpr uint32_t | kSlotsZapValue = 0xbeefdeef |
| |
| constexpr uint32_t | kDebugZapValue = 0xbadbaddb |
| |
| constexpr uint32_t | kFreeListZapValue = 0xfeed1eaf |
| |
| constexpr int | kCodeZapValue = 0xbadc0de |
| |
| constexpr uint32_t | kPhantomReferenceZap = 0xca11bac |
| |
| constexpr uint32_t | kQuietNaNHighBitsMask = 0xfff << (51 - 32) |
| |
| template<typename TSlot > |
| static constexpr bool | SlotHoldsTrustedPointerV |
| |
| constexpr int | kSpaceTagSize = 4 |
| |
| constexpr int | kIeeeDoubleMantissaWordOffset = 4 |
| |
| constexpr int | kIeeeDoubleExponentWordOffset = 0 |
| |
| constexpr int | kScopeInfoMaxInlinedLocalNamesSize = 75 |
| |
| constexpr uint32_t | kHoleNanUpper32 = 0xFFF7FFFF |
| |
| constexpr uint32_t | kHoleNanLower32 = 0xFFF7FFFF |
| |
| constexpr uint64_t | kHoleNanInt64 |
| |
| constexpr uint64_t | kMaxSafeIntegerUint64 = 9007199254740991 |
| |
| constexpr double | kMaxSafeInteger = static_cast<double>(kMaxSafeIntegerUint64) |
| |
| constexpr double | kMinSafeInteger = -kMaxSafeInteger |
| |
| constexpr double | kMaxUInt32Double = double{kMaxUInt32} |
| |
| constexpr int64_t | kMaxAdditiveSafeInteger = 4503599627370495 |
| |
| constexpr int64_t | kMinAdditiveSafeInteger = -4503599627370496 |
| |
| constexpr int | kAdditiveSafeIntegerBitLength = 53 |
| |
| constexpr int | kAdditiveSafeIntegerShift = 64 - kAdditiveSafeIntegerBitLength |
| |
| static constexpr bool | kTieringStateInProgressBlocksTierup = true |
| |
| static constexpr uint32_t | kNoneOrInProgressMask = 0b110 |
| |
| constexpr int | kInvalidInfoId = -1 |
| |
| constexpr int | kFunctionLiteralIdTopLevel = 0 |
| |
| constexpr int | kSwissNameDictionaryInitialCapacity = 4 |
| |
| constexpr int | kSmallOrderedHashSetMinCapacity = 4 |
| |
| constexpr int | kSmallOrderedHashMapMinCapacity = 4 |
| |
| constexpr AdaptArguments | kAdapt = AdaptArguments::kYes |
| |
| constexpr AdaptArguments | kDontAdapt = AdaptArguments::kNo |
| |
| constexpr int | kJSArgcReceiverSlots = 1 |
| |
| constexpr uint16_t | kDontAdaptArgumentsSentinel = 0 |
| |
| constexpr int | kJSCallClosureParameterIndex = -1 |
| |
| constexpr int | kMinParameterIndex = kJSCallClosureParameterIndex |
| |
| constexpr uint64_t | kInvalidWasmSignatureHash = ~uint64_t{0} |
| |
| static constexpr int | kMaxOpportunisticFinalizeTimeMs = 1 |
| |
| static const int | kDaysIn4Years = 4 * 365 + 1 |
| |
| static const int | kDaysIn100Years = 25 * kDaysIn4Years - 1 |
| |
| static const int | kDaysIn400Years = 4 * kDaysIn100Years + 1 |
| |
| static const int | kDays1970to2000 = 30 * 365 + 7 |
| |
| static const int | kDaysOffset |
| |
| static const int | kYearsOffset = 400000 |
| |
| static const char | kDaysInMonths [] |
| |
| constexpr DeoptimizeReason | kFirstDeoptimizeReason |
| |
| constexpr int | kDeoptimizeReasonCount = 0 DEOPTIMIZE_REASON_LIST(SUM) |
| |
| constexpr DeoptimizeReason | kLastDeoptimizeReason |
| |
| const int | kNoWasmReturnKind = -1 |
| |
| static constexpr int | kNumTranslationOpcodes |
| |
| static constexpr int | kNumTranslationJsFrameOpcodes |
| |
| static constexpr int | kNumTranslationFrameOpcodes |
| |
| constexpr int | kMaxTranslationOperandCount = 6 |
| |
| static const int | kR0DwarfCode = 0 |
| |
| static const int | kFpDwarfCode = 11 |
| |
| static const int | kSpDwarfCode = 13 |
| |
| static const int | kLrDwarfCode = 14 |
| |
| static const int | kX0DwarfCode = 0 |
| |
| static const int | kFpDwarfCode = 29 |
| |
| static const int | kLrDwarfCode = 30 |
| |
| static const int | kSpDwarfCode = 31 |
| |
| static const int | kR0DwarfCode = 0 |
| |
| static const int | kFpDwarfCode = 11 |
| |
| static const int | kR14DwarfCode = 14 |
| |
| static const int | kSpDwarfCode = 15 |
| |
| static const int | kRaxDwarfCode = 0 |
| |
| static const int | kRbpDwarfCode = 6 |
| |
| static const int | kRspDwarfCode = 7 |
| |
| static const int | kRipDwarfCode = 16 |
| |
| static constexpr int | kFastCCallAlignmentPaddingCount = 1 |
| |
| constexpr uint8_t | kNumIsolateFieldIds |
| |
| thread_local Isolate::PerIsolateThreadData *g_current_per_isolate_thread_data_ | V8_CONSTINIT = nullptr |
| |
| base::LazyMutex | read_only_dispatch_entries_mutex_ = LAZY_MUTEX_INITIALIZER |
| |
| static constexpr int | kMinOneByteLength |
| |
| static constexpr int | kMinTwoByteLength |
| |
| static constexpr int | kMinOneByteCachedLength |
| |
| static constexpr int | kMinTwoByteCachedLength |
| |
| FlagValues v8_flags | PERMISSION_MUTABLE_SECTION |
| |
| | false |
| |
| Disallow flags or implications overriding each other abort_on_contradictory_flags | true |
| |
| | V8_ALLOCATION_FOLDING_BOOL |
| |
| | V8_ENABLE_UNCONDITIONAL_WRITE_BARRIERS_BOOL |
| |
| | V8_ENABLE_CONSERVATIVE_STACK_SCANNING_BOOL |
| |
| use conservative stack scanning | V8_ENABLE_DIRECT_HANDLE_BOOL |
| |
| use conservative stack scanning use direct handles with conservative stack scanning Treat some precise references as conservative references to stress test object pinning in Scavenger minor_gc_task Enables random stressing of object pinning in | Scavenger |
| |
| | FUTURE_BOOL |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation | nullptr |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * | KB = 1024 |
| |
| refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget maximum bytecode size to be considered for turbofan | optimization |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible | max_inlined_bytecode_size_absolute |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for | mksnapshot |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data | DEBUG_BOOL |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to | https |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in | name |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation | speed |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available | space |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent | marking |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * | MB = KB * 1024 |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant | none |
| |
| too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for | testing |
| |
| bool | is_bool_flag = type_ == TYPE_MAYBE_BOOL || type_ == TYPE_BOOL |
| |
| bool | check_implications = change_flag |
| |
| | set_by_ = new_set_by |
| |
| return | change_flag |
| |
| Flag | flags [] |
| |
| constexpr size_t | kNumFlags = arraysize(flags) |
| |
| static std::atomic< uint32_t > | flag_hash {0} |
| |
| static std::atomic< bool > | flags_frozen {false} |
| |
| V8_EXPORT_PRIVATE FlagValues | v8_flags |
| |
| constexpr Address | kTaggedNullAddress = 0x1 |
| |
| template<typename T > |
| static constexpr bool | is_direct_handle_v = is_direct_handle<T>::value |
| |
| constexpr NullMaybeHandleType | kNullMaybeHandle |
| |
| constexpr HeapObjectHeader * | kNoNativeAddress = nullptr |
| |
| static constexpr FreeListCategoryType | kFirstCategory = 0 |
| |
| static constexpr FreeListCategoryType | kInvalidCategory = -1 |
| |
| template class V8_EXPORT_PRIVATE | MemoryController< V8HeapTrait > |
| |
| template class V8_EXPORT_PRIVATE | MemoryController< GlobalMemoryTrait > |
| |
| const int | kMainThreadTask = 0 |
| |
| static base::LazyMutex | object_stats_mutex = LAZY_MUTEX_INITIALIZER |
| |
| static constexpr GlobalSafepointForSharedSpaceIsolateTag | kGlobalSafepointForSharedSpaceIsolate |
| |
| static constexpr auto | kNewSpace |
| |
| v8::internal::LoadHandler | V8_OBJECT_END |
| |
| static constexpr char | kJsonStringifierZoneName [] = "json-stringifier-zone" |
| |
| static const char * | kLogEventsNames [] |
| |
| static const char * | kCodeTagNames [] |
| |
| template const char * | string |
| |
| constexpr int | kFP64ExponentBits = 11 |
| |
| constexpr int | kFP64MantissaBits = 52 |
| |
| constexpr uint64_t | kFP64ExponentBias = 1023 |
| |
| constexpr uint64_t | kFP64SignMask |
| |
| constexpr uint64_t | kFP64Infinity = uint64_t{2047} << kFP64MantissaBits |
| |
| constexpr uint64_t | kFP16InfinityAndNaNInfimum |
| |
| constexpr uint64_t | kFP16MinExponent = kFP64ExponentBias - 14 |
| |
| constexpr uint64_t | kFP16DenormalThreshold |
| |
| constexpr int | kFP16MantissaBits = 10 |
| |
| constexpr uint16_t | kFP16qNaN = 0x7e00 |
| |
| constexpr uint16_t | kFP16Infinity = 0x7c00 |
| |
| constexpr uint64_t | kFP64To16RoundingAddend |
| |
| constexpr uint64_t | kFP64To16RebiasExponentAndRound |
| |
| constexpr uint64_t | kFP64To16DenormalMagic |
| |
| constexpr uint32_t | kFP32WithoutSignMask = 0x7fffffff |
| |
| constexpr uint32_t | kFP32MinFP16ZeroRepresentable = 0x33000000 |
| |
| constexpr uint32_t | kFP32MaxFP16Representable = 0x47800000 |
| |
| constexpr uint32_t | kFP32SubnormalThresholdOfFP16 = 0x38800000 |
| |
| constexpr int | kMaxFractionDigits = 100 |
| |
| constexpr int | kDoubleToFixedMaxDigitsBeforePoint = 21 |
| |
| constexpr int | kDoubleToFixedMaxChars |
| |
| constexpr int | kDoubleToPrecisionMaxChars = kMaxFractionDigits + 8 |
| |
| constexpr int | kDoubleToExponentialMaxChars = kMaxFractionDigits + 8 |
| |
| constexpr int | kDoubleToRadixMaxChars = 2200 |
| |
| constexpr int | kDoubleToStringMinBufferSize = 100 |
| |
| | setter |
| |
| using | Address = uintptr_t |
| |
| | kSetterOffset |
| |
| | getter_side_effect_type |
| |
| | named_setter |
| |
| | kApiNamedPropertySetterCallbackTag |
| |
| | constant_pool |
| |
| | TrustedFixedArray |
| |
| static constexpr int | kCodeKindCount = CODE_KIND_LIST(V) |
| |
| static constexpr CodeKinds | kJSFunctionCodeKindsMask |
| |
| static constexpr CodeKinds | kOptimizedJSFunctionCodeKindsMask |
| |
| | debugger_hints |
| |
| | debug_is_blackboxed |
| |
| DebugInfo::DebugIsBlackboxedBit | debugging_id |
| |
| | original_bytecode_array |
| |
| | BytecodeArray |
| |
| | kOriginalBytecodeArrayOffset |
| |
| | is_constructor |
| |
| | number_of_descriptors |
| |
| constexpr uint8_t | kTypedArrayAndRabGsabTypedArrayElementsKindShifts [] |
| |
| constexpr uint8_t | kTypedArrayAndRabGsabTypedArrayElementsKindSizes [] |
| |
| const ElementsKind | kFastElementsKindSequence [kFastElementsKindCount] |
| |
| constexpr int | kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1 |
| |
| constexpr int | kFastElementsKindCount |
| |
| constexpr int | kFastElementsKindPackedToHoley |
| |
| constexpr int | kElementsKindBits = 6 |
| |
| constexpr int | kFastElementsKindBits = 3 |
| |
| static constexpr int | kFeedbackSlotKindCount |
| |
| static constexpr int | kMaxFixedArrayCapacity |
| |
| constexpr int | kFunctionKindBitSize = 5 |
| |
| const uint32_t | kIsNotStringMask = ~((1 << 7) - 1) |
| |
| const uint32_t | kStringTag = 0x0 |
| |
| const uint32_t | kStringRepresentationMask = (1 << 3) - 1 |
| |
| const uint32_t | kIsIndirectStringMask = 1 << 0 |
| |
| const uint32_t | kIsIndirectStringTag = 1 << 0 |
| |
| const uint32_t | kThinStringTagBit = 1 << 2 |
| |
| const uint32_t | kStringEncodingMask = 1 << 3 |
| |
| const uint32_t | kTwoByteStringTag = 0 |
| |
| const uint32_t | kOneByteStringTag = 1 << 3 |
| |
| constexpr uint32_t | kStringRepresentationAndEncodingMask |
| |
| constexpr uint32_t | kSeqOneByteStringTag = kSeqStringTag | kOneByteStringTag |
| |
| constexpr uint32_t | kSeqTwoByteStringTag = kSeqStringTag | kTwoByteStringTag |
| |
| constexpr uint32_t | kExternalOneByteStringTag |
| |
| constexpr uint32_t | kExternalTwoByteStringTag |
| |
| const uint32_t | kUncachedExternalStringMask = 1 << 4 |
| |
| const uint32_t | kUncachedExternalStringTag = 1 << 4 |
| |
| const uint32_t | kIsNotInternalizedMask = 1 << 5 |
| |
| const uint32_t | kNotInternalizedTag = 1 << 5 |
| |
| const uint32_t | kInternalizedTag = 0 |
| |
| const uint32_t | kSharedStringMask = 1 << 6 |
| |
| const uint32_t | kSharedStringTag = 1 << 6 |
| |
| constexpr uint32_t | kStringRepresentationEncodingAndSharedMask |
| |
| const uint32_t | kShortcutTypeMask |
| |
| const uint32_t | kShortcutTypeTag = kConsStringTag | kNotInternalizedTag |
| |
| constexpr InstanceType | LAST_STRING_TYPE |
| |
| static constexpr InstanceType | PROPERTY_DICTIONARY_TYPE = NAME_DICTIONARY_TYPE |
| |
| constexpr uint16_t | kInfinityChar = 0x221e |
| |
| | bit_field |
| |
| | is_detachable |
| |
| JSArrayBuffer::IsDetachableBit | is_shared |
| |
| | is_backed_by_rab |
| |
| | unicode_string |
| |
| | icu_simple_date_format |
| |
| | status |
| |
| | needs_await |
| |
| JSDisposableStackBase::NeedsAwaitBit | suppressed_error_created |
| |
| | separator |
| |
| | SeparatorBits |
| |
| | style_flags |
| |
| | raw_feedback_cell |
| |
| | Tagged< FeedbackCell > |
| |
| | has_prototype_slot |
| |
| | bool |
| |
| | raw_properties_or_hash |
| |
| | Tagged< Object > |
| |
| | element_dictionary |
| |
| | Tagged< NumberDictionary > |
| |
| | icu_number_formatter |
| |
| | uc16_bytecode |
| |
| | TrustedByteArray |
| |
| | hour_minute_second |
| |
| second_parts | offset_milliseconds |
| |
| second_parts OffsetMillisecondsOrTimeZoneIndex | offset_milliseconds_or_time_zone_index |
| |
| | static_elements_template |
| |
| kStaticElementsTemplateOffset | instance_properties_template |
| |
| kStaticElementsTemplateOffset kInstancePropertiesTemplateOffset | instance_computed_properties |
| |
| kStaticElementsTemplateOffset kInstancePropertiesTemplateOffset | Tagged< FixedArray > |
| |
| | instance_descriptors |
| |
| | Tagged< DescriptorArray > |
| |
| kInstanceDescriptorsOffset | raw_transitions |
| |
| kInstanceDescriptorsOffset | Tagged< UnionOf< Smi, MaybeWeak< Map >, TransitionArray > > |
| |
| kInstanceDescriptorsOffset kTransitionsOrPrototypeInfoOffset | prototype |
| |
| kInstanceDescriptorsOffset kTransitionsOrPrototypeInfoOffset | Tagged< JSPrototype > |
| |
| kInstanceDescriptorsOffset kTransitionsOrPrototypeInfoOffset | kPrototypeOffset |
| |
| | relaxed_bit_field |
| |
| Map::Bits1::HasPrototypeSlotBit | has_named_interceptor |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit | is_undetectable |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit | bit_field2 |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit | is_immutable_proto |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit Map::Bits2::IsImmutablePrototypeBit | release_acquire_bit_field3 |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit Map::Bits2::IsImmutablePrototypeBit | is_deprecated |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit Map::Bits2::IsImmutablePrototypeBit Map::Bits3::IsDeprecatedBit | is_prototype_map |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit Map::Bits2::IsImmutablePrototypeBit Map::Bits3::IsDeprecatedBit Map::Bits3::IsPrototypeMapBit | relaxed_bit_field3 |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit Map::Bits2::IsImmutablePrototypeBit Map::Bits3::IsDeprecatedBit Map::Bits3::IsPrototypeMapBit | bit_field3 |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit Map::Bits2::IsImmutablePrototypeBit Map::Bits3::IsDeprecatedBit Map::Bits3::IsPrototypeMapBit | is_extensible |
| |
| Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit Map::Bits1::IsUndetectableBit Map::Bits1::IsConstructorBit Map::Bits2::IsImmutablePrototypeBit Map::Bits3::IsDeprecatedBit Map::Bits3::IsPrototypeMapBit Map::Bits3::IsExtensibleBit | construction_counter |
| |
| | constructor_or_back_pointer |
| |
| | kConstructorOrBackPointerOrNativeContextOffset |
| |
| !IsContextMap | this |
| |
| !IsContextMap !IsContextMap | native_context |
| |
| !IsContextMap !IsContextMap | Tagged< NativeContext > |
| |
| !IsContextMap !IsContextMap IsContextMap this IsMapMap this | raw_native_context_or_null |
| |
| return | value |
| |
| | async_evaluation_ordinal |
| |
| | is_in_public_symbol_table |
| |
| const int | kVariableSizeSentinel = 0 |
| |
| const int | kStubMajorKeyBits = 8 |
| |
| const int | kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1 |
| |
| | property_details_raw |
| |
| | Tagged< Smi > |
| |
| | dependent_code |
| |
| | Tagged< DependentCode > |
| |
| static const int | kPropertyAttributesBitsCount = 3 |
| |
| static const int | kPropertyAttributesCombinationsCount |
| |
| static const int | kDescriptorIndexBitCount = 10 |
| |
| static const int | kFirstInobjectPropertyOffsetBitCount = 7 |
| |
| static const int | kMaxNumberOfDescriptors = (1 << kDescriptorIndexBitCount) - 4 |
| |
| static const int | kInvalidEnumCacheSentinel |
| |
| | eval_from_position |
| |
| | kEvalFromPositionOffset |
| |
| | interpreter_trampoline |
| |
| | Code |
| |
| kInterpreterTrampolineOffset | script |
| |
| kInterpreterTrampolineOffset | Tagged< HeapObject > |
| |
| | raw_outer_scope_info_or_feedback_metadata |
| |
| | flags2 |
| |
| | has_static_private_methods_or_accessors |
| |
| SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit | maglev_compilation_failed |
| |
| SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit SharedFunctionInfo::MaglevCompilationFailedBit | relaxed_flags |
| |
| SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit SharedFunctionInfo::MaglevCompilationFailedBit | syntax_kind |
| |
| SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit SharedFunctionInfo::MaglevCompilationFailedBit SharedFunctionInfo::FunctionSyntaxKindBits | has_duplicate_parameters |
| |
| SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit SharedFunctionInfo::MaglevCompilationFailedBit SharedFunctionInfo::FunctionSyntaxKindBits SharedFunctionInfo::HasDuplicateParametersBit | requires_instance_members_initializer |
| |
| SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit SharedFunctionInfo::MaglevCompilationFailedBit SharedFunctionInfo::FunctionSyntaxKindBits SharedFunctionInfo::HasDuplicateParametersBit SharedFunctionInfo::RequiresInstanceMembersInitializerBit | has_reported_binary_coverage |
| |
| SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit SharedFunctionInfo::MaglevCompilationFailedBit SharedFunctionInfo::FunctionSyntaxKindBits SharedFunctionInfo::HasDuplicateParametersBit SharedFunctionInfo::RequiresInstanceMembersInitializerBit SharedFunctionInfo::HasReportedBinaryCoverageBit | properties_are_final |
| |
| SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit SharedFunctionInfo::MaglevCompilationFailedBit SharedFunctionInfo::FunctionSyntaxKindBits SharedFunctionInfo::HasDuplicateParametersBit SharedFunctionInfo::RequiresInstanceMembersInitializerBit SharedFunctionInfo::HasReportedBinaryCoverageBit SharedFunctionInfo::PropertiesAreFinalBit | live_edited |
| |
| static constexpr int | kStaticRootsSFISize = 48 |
| |
| template<typename T > |
| static constexpr bool | is_maybe_weak_v = is_maybe_weak<T>::value |
| |
| template<typename Derived , typename Base > |
| static constexpr bool | is_subtype_v = is_subtype<Derived, Base>::value |
| |
| template<typename T > |
| static constexpr bool | is_taggable_v = is_taggable<T>::value |
| |
| template<typename From , typename To > |
| static constexpr bool | is_castable_v = is_castable<From, To>::value |
| |
| static constexpr bool | kTaggedCanConvertToRawObjects = true |
| |
| | relaxed_flag |
| |
| | has_side_effects |
| |
| HasSideEffectsBit::kShift | needs_access_check |
| |
| HasSideEffectsBit::kShift NeedsAccessCheckBit::kShift | remove_prototype |
| |
| | allowed_receiver_instance_type_range_end |
| |
| | PrototypeProviderTemplate |
| |
| | undefined |
| |
| UnionOf< Undefined, FunctionTemplateInfo > | NamedPropertyHandler |
| |
| UnionOf< Undefined, FunctionTemplateInfo > UnionOf< Undefined, InterceptorInfo > | InstanceTemplate |
| |
| UnionOf< Undefined, FunctionTemplateInfo > UnionOf< Undefined, InterceptorInfo > UnionOf< Undefined, ObjectTemplateInfo > | AccessCheckInfo |
| |
| template<typename... Ts> |
| static constexpr bool | is_union_v = is_union<Ts...>::value |
| |
| static const uint32_t | kLatestVersion = 15 |
| |
| static const unsigned char | kPerfectKeywordLengthTable [128] |
| |
| static const struct PerfectKeywordHashTableEntry | kPerfectKeywordHashTable [128] |
| |
| static const constexpr Token::Value | one_char_tokens [128] |
| |
| static constexpr const uint8_t | character_scan_flags [128] |
| |
| static const int | kProfilerStackSize = 256 * KB |
| |
| struct { | |
| |
| int v8::internal::index | |
| |
| const char * v8::internal::name | |
| |
| } | native_context_names [] | |
| |
| static const base::uc32 | kSurrogateStart = 0xd800 |
| |
| static const base::uc32 | kSurrogateEnd = 0xdfff |
| |
| static const base::uc32 | kNonBmpStart = 0x10000 |
| |
| const RegList | kRegExpCalleeSaved = {r25, r26, r27, r28, r29, r30, fp} |
| |
| constexpr int | kRegExpPaddedBytecodeCount = 1 << 6 |
| |
| constexpr int | BYTECODE_MASK = kRegExpPaddedBytecodeCount - 1 |
| |
| const unsigned int | MAX_FIRST_ARG = 0x7fffffu |
| |
| const int | BYTECODE_SHIFT = 8 |
| |
| static constexpr int | kRegExpBytecodeCount = BYTECODE_ITERATOR(COUNT) |
| |
| static constexpr int | kRegExpBytecodeLengths [] |
| |
| static constexpr const char *const | kRegExpBytecodeNames [] |
| |
| constexpr base::uc32 | kMaxCodePoint = 0x10ffff |
| |
| constexpr int | kMaxUtf16CodeUnit = 0xffff |
| |
| constexpr uint32_t | kMaxUtf16CodeUnitU = 0xffff |
| |
| const char *const | kRegExpErrorStrings [] |
| |
| constexpr int | kRegExpFlagCount = REGEXP_FLAG_LIST(V) |
| |
| static const base::uc32 | kLeadSurrogateStart = 0xd800 |
| |
| static const base::uc32 | kLeadSurrogateEnd = 0xdbff |
| |
| static const base::uc32 | kTrailSurrogateStart = 0xdc00 |
| |
| static const base::uc32 | kTrailSurrogateEnd = 0xdfff |
| |
| static const base::uc32 | kNonBmpStart = 0x10000 |
| |
| static const base::uc32 | kNonBmpEnd = 0x10ffff |
| |
| static const Runtime::Function | kIntrinsicFunctions [] |
| |
| int | g_num_isolates_for_testing = 1 |
| |
| constexpr int | kCodeEntrypointTagShift = 48 |
| |
| constexpr int | kIndirectPointerTagShift = 48 |
| |
| constexpr uint64_t | kIndirectPointerTagMask = 0x7fff'0000'0000'0000 |
| |
| constexpr uint64_t | kTrustedPointerTableMarkBit = 0x8000'0000'0000'0000 |
| |
| constexpr uint64_t | kTrustedPointerTableFreeEntryBit = 0x0080'0000'0000'0000 |
| |
| constexpr uint64_t | kIndirectPointerTagMaskWithoutFreeEntryBit |
| |
| constexpr uint64_t | kAllTagsForAndBasedTypeChecking [] |
| |
| static constexpr char | kDefaultEmbeddedVariant [] = "Default" |
| |
| static constexpr int | kAsmMaxLineLen = 71 |
| |
| static constexpr int | kAsmContIndentLen = 15 |
| |
| static constexpr int | kAsmContMaxLen = kAsmMaxLineLen - kAsmContIndentLen |
| |
| static constexpr int | kNumberOfSnapshotSpaces = 4 |
| |
| static base::LazyMutex | external_startup_data_mutex = LAZY_MUTEX_INITIALIZER |
| |
| static v8::StartupData | external_startup_blob = {nullptr, 0} |
| |
| const constexpr uint8_t | kOneByteCharFlags [256] |
| |
| const int | kStringBuilderConcatHelperLengthBits = 11 |
| |
| const int | kStringBuilderConcatHelperPositionBits = 19 |
| |
| constexpr word_t | kWordTAllBitsSet = std::numeric_limits<word_t>::max() |
| |
| constexpr word_t | kOneInEveryByte = kWordTAllBitsSet / 0xFF |
| |
| constexpr word_t | kAsciiMask = kOneInEveryByte << 7 |
| |
| static const int | kMentionedObjectCacheMaxSize = 256 |
| |
| static const int | kInitialIdentityMapSize = 4 |
| |
| static const int | kResizeFactor = 2 |
| |
| const size_t | kMinComplexMemCopy = 8 |
| |
| static const uint32_t | K [64] |
| |
| static const HASH_VTAB | SHA256_VTAB |
| |
| static const uint64_t | kZeroHashSeed = 0 |
| |
| V8_EXPORT_PRIVATE void V8_EXPORT_PRIVATE void const char * | format |
| |
| | tagged_buffer |
| |
| | memory0_size |
| |
| | size_t |
| |
| kMemory0SizeOffset | new_allocation_limit_address |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset | old_allocation_limit_address |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset | globals_start |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset | jump_table_start |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset | tiering_budget_array |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset | data_segment_starts |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset | Tagged< FixedAddressArray > |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset | element_segments |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset | instance_object |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset | Tagged< WasmInstanceObject > |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset | memory_objects |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset | tagged_globals_buffer |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset | tables |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset kTablesOffset | dispatch_table0 |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset kTablesOffset | WasmDispatchTable |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset kTablesOffset kProtectedDispatchTable0Offset | dispatch_table_for_imports |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset kTablesOffset kProtectedDispatchTable0Offset kProtectedDispatchTableForImportsOffset | func_refs |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset kTablesOffset kProtectedDispatchTable0Offset kProtectedDispatchTableForImportsOffset kFuncRefsOffset | feedback_vectors |
| |
| kMemory0SizeOffset Address kNewAllocationLimitAddressOffset Address kOldAllocationLimitAddressOffset uint8_t kGlobalsStartOffset kJumpTableStartOffset std::atomic< uint32_t > kTieringBudgetArrayOffset kDataSegmentStartsOffset kElementSegmentsOffset kInstanceObjectOffset kMemoryObjectsOffset kTaggedGlobalsBufferOffset kTablesOffset kProtectedDispatchTable0Offset kProtectedDispatchTableForImportsOffset kFuncRefsOffset kFeedbackVectorsOffset | stress_deopt_counter_address |
| |
| | call_origin |
| |
| | TrustedObject |
| |
| | internal |
| |
| | WasmInternalFunction |
| |
| | kTrustedInternalOffset |
| |
| kWasmInternalFunctionIndirectPointerTag | instance_data |
| |
| kWasmInternalFunctionIndirectPointerTag | WasmTrustedInstanceData |
| |
| kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset | sig |
| |
| | trusted_dispatch_table |
| |
| | kTrustedDispatchTableOffset |
| |
| | trusted_data |
| |
| | kTrustedDataOffset |
| |
| static constexpr Address | kNullAddress = 0 |
| |
| constexpr int | GB = MB * 1024 |
| |
| const int | kApiSystemPointerSize = sizeof(void*) |
| |
| const int | kApiDoubleSize = sizeof(double) |
| |
| const int | kApiInt32Size = sizeof(int32_t) |
| |
| const int | kApiInt64Size = sizeof(int64_t) |
| |
| const int | kApiSizetSize = sizeof(size_t) |
| |
| const int | kHeapObjectTag = 1 |
| |
| const int | kWeakHeapObjectTag = 3 |
| |
| const int | kHeapObjectTagSize = 2 |
| |
| const intptr_t | kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1 |
| |
| const intptr_t | kHeapObjectReferenceTagMask = 1 << (kHeapObjectTagSize - 1) |
| |
| const int | kForwardingTag = 0 |
| |
| const int | kForwardingTagSize = 2 |
| |
| const intptr_t | kForwardingTagMask = (1 << kForwardingTagSize) - 1 |
| |
| const int | kSmiTag = 0 |
| |
| const int | kSmiTagSize = 1 |
| |
| const intptr_t | kSmiTagMask = (1 << kSmiTagSize) - 1 |
| |
| constexpr intptr_t | kIntptrAllBitsSet = intptr_t{-1} |
| |
| constexpr uintptr_t | kUintptrAllBitsSet |
| |
| const int | kApiTaggedSize = kApiSystemPointerSize |
| |
| const int | kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize |
| |
| const int | kSmiValueSize = PlatformSmiTagging::kSmiValueSize |
| |
| const int | kSmiMinValue = static_cast<int>(PlatformSmiTagging::kSmiMinValue) |
| |
| const int | kSmiMaxValue = static_cast<int>(PlatformSmiTagging::kSmiMaxValue) |
| |
| constexpr size_t | kMaxExternalPointers = 0 |
| |
| constexpr uint64_t | kExternalPointerMarkBit = 1ULL << 48 |
| |
| constexpr uint64_t | kExternalPointerTagShift = 49 |
| |
| constexpr uint64_t | kExternalPointerTagMask = 0x00fe000000000000ULL |
| |
| constexpr uint64_t | kExternalPointerShiftedTagMask |
| |
| constexpr uint64_t | kExternalPointerTagAndMarkbitMask = 0x00ff000000000000ULL |
| |
| constexpr uint64_t | kExternalPointerPayloadMask = 0xff00ffffffffffffULL |
| |
| constexpr ExternalPointer_t | kNullExternalPointer = 0 |
| |
| constexpr ExternalPointerHandle | kNullExternalPointerHandle = 0 |
| |
| constexpr CppHeapPointer_t | kNullCppHeapPointer = 0 |
| |
| constexpr CppHeapPointerHandle | kNullCppHeapPointerHandle = 0 |
| |
| constexpr uint64_t | kCppHeapPointerMarkBit = 1ULL |
| |
| constexpr uint64_t | kCppHeapPointerTagShift = 1 |
| |
| constexpr uint64_t | kCppHeapPointerPayloadShift = 16 |
| |
| constexpr size_t | kMaxCppHeapPointers = 0 |
| |
| constexpr IndirectPointerHandle | kNullIndirectPointerHandle = 0 |
| |
| constexpr size_t | kTrustedPointerTableReservationSize = 64 * MB |
| |
| constexpr uint32_t | kTrustedPointerHandleShift = 9 |
| |
| constexpr TrustedPointerHandle | kNullTrustedPointerHandle |
| |
| constexpr int | kTrustedPointerTableEntrySize = 8 |
| |
| constexpr int | kTrustedPointerTableEntrySizeLog2 = 3 |
| |
| constexpr size_t | kMaxTrustedPointers |
| |
| constexpr size_t | kCodePointerTableReservationSize = 128 * MB |
| |
| constexpr uint32_t | kCodePointerHandleShift = 9 |
| |
| constexpr CodePointerHandle | kNullCodePointerHandle = kNullIndirectPointerHandle |
| |
| constexpr uint32_t | kCodePointerHandleMarker = 0x1 |
| |
| constexpr int | kCodePointerTableEntrySize = 16 |
| |
| constexpr int | kCodePointerTableEntrySizeLog2 = 4 |
| |
| constexpr size_t | kMaxCodePointers |
| |
| constexpr int | kCodePointerTableEntryEntrypointOffset = 0 |
| |
| constexpr int | kCodePointerTableEntryCodeObjectOffset = 8 |
| |
| constexpr bool | kRuntimeGeneratedCodeObjectsLiveInTrustedSpace = true |
| |
| constexpr bool | kBuiltinCodeObjectsLiveInTrustedSpace = false |
| |
| constexpr bool | kAllCodeObjectsLiveInTrustedSpace |
| |
| constexpr int | kGarbageCollectionReasonMaxValue = 29 |
| |
| template<typename Iterator , typename = void> |
| constexpr bool | kHaveIteratorConcept = false |
| |
| template<typename Iterator , typename = void> |
| constexpr bool | kHaveIteratorCategory = false |
| |