5#ifndef V8_WASM_WASM_OPCODES_H_
6#define V8_WASM_WASM_OPCODES_H_
8#if !V8_ENABLE_WEBASSEMBLY
9#error This header should only be included if WebAssembly is enabled.
34#define FOREACH_CONTROL_OPCODE(V) \
35 V(Unreachable, 0x00, _, "unreachable") \
36 V(Nop, 0x01, _, "nop") \
37 V(Block, 0x02, _, "block") \
38 V(Loop, 0x03, _, "loop") \
39 V(If, 0x04, _, "if") \
40 V(Else, 0x05, _, "else") \
41 V(Try, 0x06, _, "try") \
42 V(Catch, 0x07, _, "catch") \
43 V(Throw, 0x08, _, "throw") \
44 V(Rethrow, 0x09, _, "rethrow") \
45 V(TryTable, 0x1f, _, "try_table") \
46 V(ThrowRef, 0x0a, _, "throw_ref") \
47 V(End, 0x0b, _, "end") \
48 V(Br, 0x0c, _, "br") \
49 V(BrIf, 0x0d, _, "br_if") \
50 V(BrTable, 0x0e, _, "br_table") \
51 V(Return, 0x0f, _, "return") \
52 V(Delegate, 0x18, _, "delegate") \
53 V(CatchAll, 0x19, _, "catch_all") \
54 V(BrOnNull, 0xd5, _, "br_on_null") \
55 V(BrOnNonNull, 0xd6, _, "br_on_non_null") \
56 V(NopForTestingUnsupportedInLiftoff, 0x16, _, "nop_for_testing")
59#define FOREACH_MISC_OPCODE(V) \
60 V(CallFunction, 0x10, _, "call") \
61 V(CallIndirect, 0x11, _, "call_indirect") \
62 V(ReturnCall, 0x12, _, "return_call") \
63 V(ReturnCallIndirect, 0x13, _, "return_call_indirect") \
64 V(CallRef, 0x14, _, "call_ref") \
65 V(ReturnCallRef, 0x15, _, "return_call_ref") \
66 V(Drop, 0x1a, _, "drop") \
67 V(Select, 0x1b, _, "select") \
68 V(SelectWithType, 0x1c, _, "select") \
69 V(LocalGet, 0x20, _, "local.get") \
70 V(LocalSet, 0x21, _, "local.set") \
71 V(LocalTee, 0x22, _, "local.tee") \
72 V(GlobalGet, 0x23, _, "global.get") \
73 V(GlobalSet, 0x24, _, "global.set") \
74 V(TableGet, 0x25, _, "table.get") \
75 V(TableSet, 0x26, _, "table.set") \
76 V(I32Const, 0x41, _, "i32.const") \
77 V(I64Const, 0x42, _, "i64.const") \
78 V(F32Const, 0x43, _, "f32.const") \
79 V(F64Const, 0x44, _, "f64.const") \
80 V(RefNull, 0xd0, _, "ref.null") \
81 V(RefIsNull, 0xd1, _, "ref.is_null") \
82 V(RefFunc, 0xd2, _, "ref.func") \
83 V(RefAsNonNull, 0xd4, _, "ref.as_non_null") \
84 V(RefEq, 0xd3, _, "ref.eq")
87#define FOREACH_LOAD_MEM_OPCODE(V) \
88 V(I32LoadMem, 0x28, i_i, "i32.load") \
89 V(I64LoadMem, 0x29, l_i, "i64.load") \
90 V(F32LoadMem, 0x2a, f_i, "f32.load") \
91 V(F64LoadMem, 0x2b, d_i, "f64.load") \
92 V(I32LoadMem8S, 0x2c, i_i, "i32.load8_s") \
93 V(I32LoadMem8U, 0x2d, i_i, "i32.load8_u") \
94 V(I32LoadMem16S, 0x2e, i_i, "i32.load16_s") \
95 V(I32LoadMem16U, 0x2f, i_i, "i32.load16_u") \
96 V(I64LoadMem8S, 0x30, l_i, "i64.load8_s") \
97 V(I64LoadMem8U, 0x31, l_i, "i64.load8_u") \
98 V(I64LoadMem16S, 0x32, l_i, "i64.load16_s") \
99 V(I64LoadMem16U, 0x33, l_i, "i64.load16_u") \
100 V(I64LoadMem32S, 0x34, l_i, "i64.load32_s") \
101 V(I64LoadMem32U, 0x35, l_i, "i64.load32_u") \
102 V(F32LoadMemF16, 0xfc30, f_i, "f32.load_f16")
105#define FOREACH_STORE_MEM_OPCODE(V) \
106 V(I32StoreMem, 0x36, v_ii, "i32.store") \
107 V(I64StoreMem, 0x37, v_il, "i64.store") \
108 V(F32StoreMem, 0x38, v_if, "f32.store") \
109 V(F64StoreMem, 0x39, v_id, "f64.store") \
110 V(I32StoreMem8, 0x3a, v_ii, "i32.store8") \
111 V(I32StoreMem16, 0x3b, v_ii, "i32.store16") \
112 V(I64StoreMem8, 0x3c, v_il, "i64.store8") \
113 V(I64StoreMem16, 0x3d, v_il, "i64.store16") \
114 V(I64StoreMem32, 0x3e, v_il, "i64.store32") \
115 V(F32StoreMemF16, 0xfc31, v_if, "f32.store_f16")
118#define FOREACH_MISC_MEM_OPCODE(V) \
119 V(MemorySize, 0x3f, i_v, "memory.size") \
120 V(MemoryGrow, 0x40, i_i, "memory.grow")
126#define FOREACH_SIMPLE_EXTENDED_CONST_OPCODE(V) \
127 V(I32Add, 0x6a, i_ii, "i32.add") \
128 V(I32Sub, 0x6b, i_ii, "i32.sub") \
129 V(I32Mul, 0x6c, i_ii, "i32.mul") \
130 V(I64Add, 0x7c, l_ll, "i64.add") \
131 V(I64Sub, 0x7d, l_ll, "i64.sub") \
132 V(I64Mul, 0x7e, l_ll, "i64.mul")
134#define FOREACH_SIMPLE_NON_CONST_OPCODE(V) \
135 V(I32Eqz, 0x45, i_i, "i32.eqz") \
136 V(I32Eq, 0x46, i_ii, "i32.eq") \
137 V(I32Ne, 0x47, i_ii, "i32.ne") \
138 V(I32LtS, 0x48, i_ii, "i32.lt_s") \
139 V(I32LtU, 0x49, i_ii, "i32.lt_u") \
140 V(I32GtS, 0x4a, i_ii, "i32.gt_s") \
141 V(I32GtU, 0x4b, i_ii, "i32.gt_u") \
142 V(I32LeS, 0x4c, i_ii, "i32.le_s") \
143 V(I32LeU, 0x4d, i_ii, "i32.le_u") \
144 V(I32GeS, 0x4e, i_ii, "i32.ge_s") \
145 V(I32GeU, 0x4f, i_ii, "i32.ge_u") \
146 V(I64Eqz, 0x50, i_l, "i64.eqz") \
147 V(I64Eq, 0x51, i_ll, "i64.eq") \
148 V(I64Ne, 0x52, i_ll, "i64.ne") \
149 V(I64LtS, 0x53, i_ll, "i64.lt_s") \
150 V(I64LtU, 0x54, i_ll, "i64.lt_u") \
151 V(I64GtS, 0x55, i_ll, "i64.gt_s") \
152 V(I64GtU, 0x56, i_ll, "i64.gt_u") \
153 V(I64LeS, 0x57, i_ll, "i64.le_s") \
154 V(I64LeU, 0x58, i_ll, "i64.le_u") \
155 V(I64GeS, 0x59, i_ll, "i64.ge_s") \
156 V(I64GeU, 0x5a, i_ll, "i64.ge_u") \
157 V(F32Eq, 0x5b, i_ff, "f32.eq") \
158 V(F32Ne, 0x5c, i_ff, "f32.ne") \
159 V(F32Lt, 0x5d, i_ff, "f32.lt") \
160 V(F32Gt, 0x5e, i_ff, "f32.gt") \
161 V(F32Le, 0x5f, i_ff, "f32.le") \
162 V(F32Ge, 0x60, i_ff, "f32.ge") \
163 V(F64Eq, 0x61, i_dd, "f64.eq") \
164 V(F64Ne, 0x62, i_dd, "f64.ne") \
165 V(F64Lt, 0x63, i_dd, "f64.lt") \
166 V(F64Gt, 0x64, i_dd, "f64.gt") \
167 V(F64Le, 0x65, i_dd, "f64.le") \
168 V(F64Ge, 0x66, i_dd, "f64.ge") \
169 V(I32Clz, 0x67, i_i, "i32.clz") \
170 V(I32Ctz, 0x68, i_i, "i32.ctz") \
171 V(I32Popcnt, 0x69, i_i, "i32.popcnt") \
172 V(I32DivS, 0x6d, i_ii, "i32.div_s") \
173 V(I32DivU, 0x6e, i_ii, "i32.div_u") \
174 V(I32RemS, 0x6f, i_ii, "i32.rem_s") \
175 V(I32RemU, 0x70, i_ii, "i32.rem_u") \
176 V(I32And, 0x71, i_ii, "i32.and") \
177 V(I32Ior, 0x72, i_ii, "i32.or") \
178 V(I32Xor, 0x73, i_ii, "i32.xor") \
179 V(I32Shl, 0x74, i_ii, "i32.shl") \
180 V(I32ShrS, 0x75, i_ii, "i32.shr_s") \
181 V(I32ShrU, 0x76, i_ii, "i32.shr_u") \
182 V(I32Rol, 0x77, i_ii, "i32.rotl") \
183 V(I32Ror, 0x78, i_ii, "i32.rotr") \
184 V(I64Clz, 0x79, l_l, "i64.clz") \
185 V(I64Ctz, 0x7a, l_l, "i64.ctz") \
186 V(I64Popcnt, 0x7b, l_l, "i64.popcnt") \
187 V(I64DivS, 0x7f, l_ll, "i64.div_s") \
188 V(I64DivU, 0x80, l_ll, "i64.div_u") \
189 V(I64RemS, 0x81, l_ll, "i64.rem_s") \
190 V(I64RemU, 0x82, l_ll, "i64.rem_u") \
191 V(I64And, 0x83, l_ll, "i64.and") \
192 V(I64Ior, 0x84, l_ll, "i64.or") \
193 V(I64Xor, 0x85, l_ll, "i64.xor") \
194 V(I64Shl, 0x86, l_ll, "i64.shl") \
195 V(I64ShrS, 0x87, l_ll, "i64.shr_s") \
196 V(I64ShrU, 0x88, l_ll, "i64.shr_u") \
197 V(I64Rol, 0x89, l_ll, "i64.rotl") \
198 V(I64Ror, 0x8a, l_ll, "i64.rotr") \
199 V(F32Abs, 0x8b, f_f, "f32.abs") \
200 V(F32Neg, 0x8c, f_f, "f32.neg") \
201 V(F32Ceil, 0x8d, f_f, "f32.ceil") \
202 V(F32Floor, 0x8e, f_f, "f32.floor") \
203 V(F32Trunc, 0x8f, f_f, "f32.trunc") \
204 V(F32NearestInt, 0x90, f_f, "f32.nearest") \
205 V(F32Sqrt, 0x91, f_f, "f32.sqrt") \
206 V(F32Add, 0x92, f_ff, "f32.add") \
207 V(F32Sub, 0x93, f_ff, "f32.sub") \
208 V(F32Mul, 0x94, f_ff, "f32.mul") \
209 V(F32Div, 0x95, f_ff, "f32.div") \
210 V(F32Min, 0x96, f_ff, "f32.min") \
211 V(F32Max, 0x97, f_ff, "f32.max") \
212 V(F32CopySign, 0x98, f_ff, "f32.copysign") \
213 V(F64Abs, 0x99, d_d, "f64.abs") \
214 V(F64Neg, 0x9a, d_d, "f64.neg") \
215 V(F64Ceil, 0x9b, d_d, "f64.ceil") \
216 V(F64Floor, 0x9c, d_d, "f64.floor") \
217 V(F64Trunc, 0x9d, d_d, "f64.trunc") \
218 V(F64NearestInt, 0x9e, d_d, "f64.nearest") \
219 V(F64Sqrt, 0x9f, d_d, "f64.sqrt") \
220 V(F64Add, 0xa0, d_dd, "f64.add") \
221 V(F64Sub, 0xa1, d_dd, "f64.sub") \
222 V(F64Mul, 0xa2, d_dd, "f64.mul") \
223 V(F64Div, 0xa3, d_dd, "f64.div") \
224 V(F64Min, 0xa4, d_dd, "f64.min") \
225 V(F64Max, 0xa5, d_dd, "f64.max") \
226 V(F64CopySign, 0xa6, d_dd, "f64.copysign") \
227 V(I32ConvertI64, 0xa7, i_l, "i32.wrap_i64") \
228 V(I32SConvertF32, 0xa8, i_f, "i32.trunc_f32_s") \
229 V(I32UConvertF32, 0xa9, i_f, "i32.trunc_f32_u") \
230 V(I32SConvertF64, 0xaa, i_d, "i32.trunc_f64_s") \
231 V(I32UConvertF64, 0xab, i_d, "i32.trunc_f64_u") \
232 V(I64SConvertI32, 0xac, l_i, "i64.extend_i32_s") \
233 V(I64UConvertI32, 0xad, l_i, "i64.extend_i32_u") \
234 V(I64SConvertF32, 0xae, l_f, "i64.trunc_f32_s") \
235 V(I64UConvertF32, 0xaf, l_f, "i64.trunc_f32_u") \
236 V(I64SConvertF64, 0xb0, l_d, "i64.trunc_f64_s") \
237 V(I64UConvertF64, 0xb1, l_d, "i64.trunc_f64_u") \
238 V(F32SConvertI32, 0xb2, f_i, "f32.convert_i32_s") \
239 V(F32UConvertI32, 0xb3, f_i, "f32.convert_i32_u") \
240 V(F32SConvertI64, 0xb4, f_l, "f32.convert_i64_s") \
241 V(F32UConvertI64, 0xb5, f_l, "f32.convert_i64_u") \
242 V(F32ConvertF64, 0xb6, f_d, "f32.demote_f64") \
243 V(F64SConvertI32, 0xb7, d_i, "f64.convert_i32_s") \
244 V(F64UConvertI32, 0xb8, d_i, "f64.convert_i32_u") \
245 V(F64SConvertI64, 0xb9, d_l, "f64.convert_i64_s") \
246 V(F64UConvertI64, 0xba, d_l, "f64.convert_i64_u") \
247 V(F64ConvertF32, 0xbb, d_f, "f64.promote_f32") \
248 V(I32ReinterpretF32, 0xbc, i_f, "i32.reinterpret_f32") \
249 V(I64ReinterpretF64, 0xbd, l_d, "i64.reinterpret_f64") \
250 V(F32ReinterpretI32, 0xbe, f_i, "f32.reinterpret_i32") \
251 V(F64ReinterpretI64, 0xbf, d_l, "f64.reinterpret_i64") \
252 V(I32SExtendI8, 0xc0, i_i, "i32.extend8_s") \
253 V(I32SExtendI16, 0xc1, i_i, "i32.extend16_s") \
254 V(I64SExtendI8, 0xc2, l_l, "i64.extend8_s") \
255 V(I64SExtendI16, 0xc3, l_l, "i64.extend16_s") \
256 V(I64SExtendI32, 0xc4, l_l, "i64.extend32_s")
258#define FOREACH_WASMFX_OPCODE(V) \
259 V(ContNew, 0xe0, _, "cont.new") \
260 V(ContBind, 0xe1, _, "cont.bind") \
261 V(Suspend, 0xe2, _, "suspend") \
262 V(Resume, 0xe3, _, "resume") \
263 V(ResumeThrow, 0xe4, _, "resume_throw") \
264 V(Switch, 0xe5, _, "switch")
266#define FOREACH_SIMPLE_OPCODE(V) \
267 FOREACH_SIMPLE_EXTENDED_CONST_OPCODE(V) \
268 FOREACH_SIMPLE_NON_CONST_OPCODE(V)
270#define FOREACH_SIMPLE_PROTOTYPE_OPCODE(V)
275#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
276 V(F64Acos, 0xfa3c, d_d, "f64.acos") \
277 V(F64Asin, 0xfa3d, d_d, "f64.asin") \
278 V(F64Atan, 0xfa3e, d_d, "f64.atan") \
279 V(F64Cos, 0xfa3f, d_d, "f64.cos") \
280 V(F64Sin, 0xfa40, d_d, "f64.sin") \
281 V(F64Tan, 0xfa41, d_d, "f64.tan") \
282 V(F64Exp, 0xfa42, d_d, "f64.exp") \
283 V(F64Log, 0xfa43, d_d, "f64.log") \
284 V(F64Atan2, 0xfa44, d_dd, "f64.atan2") \
285 V(F64Pow, 0xfa45, d_dd, "f64.pow") \
286 V(F64Mod, 0xfa46, d_dd, "f64.mod") \
287 V(I32AsmjsDivS, 0xfa47, i_ii, "i32.asmjs_div_s") \
288 V(I32AsmjsDivU, 0xfa48, i_ii, "i32.asmjs_div_u") \
289 V(I32AsmjsRemS, 0xfa49, i_ii, "i32.asmjs_rem_s") \
290 V(I32AsmjsRemU, 0xfa4a, i_ii, "i32.asmjs_rem_u") \
291 V(I32AsmjsLoadMem8S, 0xfa4b, i_i, "i32.asmjs_load8_s") \
292 V(I32AsmjsLoadMem8U, 0xfa4c, i_i, "i32.asmjs_load8_u") \
293 V(I32AsmjsLoadMem16S, 0xfa4d, i_i, "i32.asmjs_load16_s") \
294 V(I32AsmjsLoadMem16U, 0xfa4e, i_i, "i32.asmjs_load16_u") \
295 V(I32AsmjsLoadMem, 0xfa4f, i_i, "i32.asmjs_load32") \
296 V(F32AsmjsLoadMem, 0xfa50, f_i, "f32.asmjs_load") \
297 V(F64AsmjsLoadMem, 0xfa51, d_i, "f64.asmjs_load") \
298 V(I32AsmjsStoreMem8, 0xfa52, i_ii, "i32.asmjs_store8") \
299 V(I32AsmjsStoreMem16, 0xfa53, i_ii, "i32.asmjs_store16") \
300 V(I32AsmjsStoreMem, 0xfa54, i_ii, "i32.asmjs_store") \
301 V(F32AsmjsStoreMem, 0xfa55, f_if, "f32.asmjs_store") \
302 V(F64AsmjsStoreMem, 0xfa56, d_id, "f64.asmjs_store") \
303 V(I32AsmjsSConvertF32, 0xfa57, i_f, "i32.asmjs_convert_f32_s") \
304 V(I32AsmjsUConvertF32, 0xfa58, i_f, "i32.asmjs_convert_f32_u") \
305 V(I32AsmjsSConvertF64, 0xfa59, i_d, "i32.asmjs_convert_f64_s") \
306 V(I32AsmjsUConvertF64, 0xfa5a, i_d, "i32.asmjs_convert_f64_u")
308#define FOREACH_SIMD_MEM_OPCODE(V) \
309 V(S128LoadMem, 0xfd00, s_i, "v128.load") \
310 V(S128Load8x8S, 0xfd01, s_i, "v128.load8x8_s") \
311 V(S128Load8x8U, 0xfd02, s_i, "v128.load8x8_u") \
312 V(S128Load16x4S, 0xfd03, s_i, "v128.load16x4_s") \
313 V(S128Load16x4U, 0xfd04, s_i, "v128.load16x4_u") \
314 V(S128Load32x2S, 0xfd05, s_i, "v128.load32x2_s") \
315 V(S128Load32x2U, 0xfd06, s_i, "v128.load32x2_u") \
316 V(S128Load8Splat, 0xfd07, s_i, "v128.load8_splat") \
317 V(S128Load16Splat, 0xfd08, s_i, "v128.load16_splat") \
318 V(S128Load32Splat, 0xfd09, s_i, "v128.load32_splat") \
319 V(S128Load64Splat, 0xfd0a, s_i, "v128.load64_splat") \
320 V(S128StoreMem, 0xfd0b, v_is, "v128.store") \
321 V(S128Load32Zero, 0xfd5c, s_i, "v128.load32_zero") \
322 V(S128Load64Zero, 0xfd5d, s_i, "v128.load64_zero")
324#define FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \
325 V(S128Load8Lane, 0xfd54, s_is, "v128.load8_lane") \
326 V(S128Load16Lane, 0xfd55, s_is, "v128.load16_lane") \
327 V(S128Load32Lane, 0xfd56, s_is, "v128.load32_lane") \
328 V(S128Load64Lane, 0xfd57, s_is, "v128.load64_lane") \
329 V(S128Store8Lane, 0xfd58, v_is, "v128.store8_lane") \
330 V(S128Store16Lane, 0xfd59, v_is, "v128.store16_lane") \
331 V(S128Store32Lane, 0xfd5a, v_is, "v128.store32_lane") \
332 V(S128Store64Lane, 0xfd5b, v_is, "v128.store64_lane")
334#define FOREACH_SIMD_CONST_OPCODE(V) V(S128Const, 0xfd0c, _, "v128.const")
336#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
337 V(I8x16Shuffle, 0xfd0d, s_ss, "i8x16.shuffle")
339#define FOREACH_SIMD_MVP_0_OPERAND_OPCODE(V) \
340 V(I8x16Swizzle, 0xfd0e, s_ss, "i8x16.swizzle") \
341 V(I8x16Splat, 0xfd0f, s_i, "i8x16.splat") \
342 V(I16x8Splat, 0xfd10, s_i, "i16x8.splat") \
343 V(I32x4Splat, 0xfd11, s_i, "i32x4.splat") \
344 V(I64x2Splat, 0xfd12, s_l, "i64x2.splat") \
345 V(F32x4Splat, 0xfd13, s_f, "f32x4.splat") \
346 V(F64x2Splat, 0xfd14, s_d, "f64x2.splat") \
347 V(I8x16Eq, 0xfd23, s_ss, "i8x16.eq") \
348 V(I8x16Ne, 0xfd24, s_ss, "i8x16.ne") \
349 V(I8x16LtS, 0xfd25, s_ss, "i8x16.lt_s") \
350 V(I8x16LtU, 0xfd26, s_ss, "i8x16.lt_u") \
351 V(I8x16GtS, 0xfd27, s_ss, "i8x16.gt_s") \
352 V(I8x16GtU, 0xfd28, s_ss, "i8x16.gt_u") \
353 V(I8x16LeS, 0xfd29, s_ss, "i8x16.le_s") \
354 V(I8x16LeU, 0xfd2a, s_ss, "i8x16.le_u") \
355 V(I8x16GeS, 0xfd2b, s_ss, "i8x16.ge_s") \
356 V(I8x16GeU, 0xfd2c, s_ss, "i8x16.ge_u") \
357 V(I16x8Eq, 0xfd2d, s_ss, "i16x8.eq") \
358 V(I16x8Ne, 0xfd2e, s_ss, "i16x8.ne") \
359 V(I16x8LtS, 0xfd2f, s_ss, "i16x8.lt_s") \
360 V(I16x8LtU, 0xfd30, s_ss, "i16x8.lt_u") \
361 V(I16x8GtS, 0xfd31, s_ss, "i16x8.gt_s") \
362 V(I16x8GtU, 0xfd32, s_ss, "i16x8.gt_u") \
363 V(I16x8LeS, 0xfd33, s_ss, "i16x8.le_s") \
364 V(I16x8LeU, 0xfd34, s_ss, "i16x8.le_u") \
365 V(I16x8GeS, 0xfd35, s_ss, "i16x8.ge_s") \
366 V(I16x8GeU, 0xfd36, s_ss, "i16x8.ge_u") \
367 V(I32x4Eq, 0xfd37, s_ss, "i32x4.eq") \
368 V(I32x4Ne, 0xfd38, s_ss, "i32x4.ne") \
369 V(I32x4LtS, 0xfd39, s_ss, "i32x4.lt_s") \
370 V(I32x4LtU, 0xfd3a, s_ss, "i32x4.lt_u") \
371 V(I32x4GtS, 0xfd3b, s_ss, "i32x4.gt_s") \
372 V(I32x4GtU, 0xfd3c, s_ss, "i32x4.gt_u") \
373 V(I32x4LeS, 0xfd3d, s_ss, "i32x4.le_s") \
374 V(I32x4LeU, 0xfd3e, s_ss, "i32x4.le_u") \
375 V(I32x4GeS, 0xfd3f, s_ss, "i32x4.ge_s") \
376 V(I32x4GeU, 0xfd40, s_ss, "i32x4.ge_u") \
377 V(F32x4Eq, 0xfd41, s_ss, "f32x4.eq") \
378 V(F32x4Ne, 0xfd42, s_ss, "f32x4.ne") \
379 V(F32x4Lt, 0xfd43, s_ss, "f32x4.lt") \
380 V(F32x4Gt, 0xfd44, s_ss, "f32x4.gt") \
381 V(F32x4Le, 0xfd45, s_ss, "f32x4.le") \
382 V(F32x4Ge, 0xfd46, s_ss, "f32x4.ge") \
383 V(F64x2Eq, 0xfd47, s_ss, "f64x2.eq") \
384 V(F64x2Ne, 0xfd48, s_ss, "f64x2.ne") \
385 V(F64x2Lt, 0xfd49, s_ss, "f64x2.lt") \
386 V(F64x2Gt, 0xfd4a, s_ss, "f64x2.gt") \
387 V(F64x2Le, 0xfd4b, s_ss, "f64x2.le") \
388 V(F64x2Ge, 0xfd4c, s_ss, "f64x2.ge") \
389 V(S128Not, 0xfd4d, s_s, "v128.not") \
390 V(S128And, 0xfd4e, s_ss, "v128.and") \
391 V(S128AndNot, 0xfd4f, s_ss, "v128.andnot") \
392 V(S128Or, 0xfd50, s_ss, "v128.or") \
393 V(S128Xor, 0xfd51, s_ss, "v128.xor") \
394 V(S128Select, 0xfd52, s_sss, "v128.bitselect") \
395 V(V128AnyTrue, 0xfd53, i_s, "v128.any_true") \
396 V(F32x4DemoteF64x2Zero, 0xfd5e, s_s, "f32x4.demote_f64x2_zero") \
397 V(F64x2PromoteLowF32x4, 0xfd5f, s_s, "f64x2.promote_low_f32x4") \
398 V(I8x16Abs, 0xfd60, s_s, "i8x16.abs") \
399 V(I8x16Neg, 0xfd61, s_s, "i8x16.neg") \
400 V(I8x16Popcnt, 0xfd62, s_s, "i8x16.popcnt") \
401 V(I8x16AllTrue, 0xfd63, i_s, "i8x16.all_true") \
402 V(I8x16BitMask, 0xfd64, i_s, "i8x16.bitmask") \
403 V(I8x16SConvertI16x8, 0xfd65, s_ss, "i8x16.narrow_i16x8_s") \
404 V(I8x16UConvertI16x8, 0xfd66, s_ss, "i8x16.narrow_i16x8_u") \
405 V(F32x4Ceil, 0xfd67, s_s, "f32x4.ceil") \
406 V(F32x4Floor, 0xfd68, s_s, "f32x4.floor") \
407 V(F32x4Trunc, 0xfd69, s_s, "f32x4.trunc") \
408 V(F32x4NearestInt, 0xfd6a, s_s, "f32x4.nearest") \
409 V(I8x16Shl, 0xfd6b, s_si, "i8x16.shl") \
410 V(I8x16ShrS, 0xfd6c, s_si, "i8x16.shr_s") \
411 V(I8x16ShrU, 0xfd6d, s_si, "i8x16.shr_u") \
412 V(I8x16Add, 0xfd6e, s_ss, "i8x16.add") \
413 V(I8x16AddSatS, 0xfd6f, s_ss, "i8x16.add_sat_s") \
414 V(I8x16AddSatU, 0xfd70, s_ss, "i8x16.add_sat_u") \
415 V(I8x16Sub, 0xfd71, s_ss, "i8x16.sub") \
416 V(I8x16SubSatS, 0xfd72, s_ss, "i8x16.sub_sat_s") \
417 V(I8x16SubSatU, 0xfd73, s_ss, "i8x16.sub_sat_u") \
418 V(F64x2Ceil, 0xfd74, s_s, "f64x2.ceil") \
419 V(F64x2Floor, 0xfd75, s_s, "f64x2.floor") \
420 V(I8x16MinS, 0xfd76, s_ss, "i8x16.min_s") \
421 V(I8x16MinU, 0xfd77, s_ss, "i8x16.min_u") \
422 V(I8x16MaxS, 0xfd78, s_ss, "i8x16.max_s") \
423 V(I8x16MaxU, 0xfd79, s_ss, "i8x16.max_u") \
424 V(F64x2Trunc, 0xfd7a, s_s, "f64x2.trunc") \
425 V(I8x16RoundingAverageU, 0xfd7b, s_ss, "i8x16.avgr_u") \
426 V(I16x8ExtAddPairwiseI8x16S, 0xfd7c, s_s, "i16x8.extadd_pairwise_i8x16_s") \
427 V(I16x8ExtAddPairwiseI8x16U, 0xfd7d, s_s, "i16x8.extadd_pairwise_i8x16_u") \
428 V(I32x4ExtAddPairwiseI16x8S, 0xfd7e, s_s, "i32x4.extadd_pairwise_i16x8_s") \
429 V(I32x4ExtAddPairwiseI16x8U, 0xfd7f, s_s, "i32x4.extadd_pairwise_i16x8_u") \
430 V(I16x8Abs, 0xfd80, s_s, "i16x8.abs") \
431 V(I16x8Neg, 0xfd81, s_s, "i16x8.neg") \
432 V(I16x8Q15MulRSatS, 0xfd82, s_ss, "i16x8.q15mulr_sat_s") \
433 V(I16x8AllTrue, 0xfd83, i_s, "i16x8.all_true") \
434 V(I16x8BitMask, 0xfd84, i_s, "i16x8.bitmask") \
435 V(I16x8SConvertI32x4, 0xfd85, s_ss, "i16x8.narrow_i32x4_s") \
436 V(I16x8UConvertI32x4, 0xfd86, s_ss, "i16x8.narrow_i32x4_u") \
437 V(I16x8SConvertI8x16Low, 0xfd87, s_s, "i16x8.extend_low_i8x16_s") \
438 V(I16x8SConvertI8x16High, 0xfd88, s_s, "i16x8.extend_high_i8x16_s") \
439 V(I16x8UConvertI8x16Low, 0xfd89, s_s, "i16x8.extend_low_i8x16_u") \
440 V(I16x8UConvertI8x16High, 0xfd8a, s_s, "i16x8.extend_high_i8x16_u") \
441 V(I16x8Shl, 0xfd8b, s_si, "i16x8.shl") \
442 V(I16x8ShrS, 0xfd8c, s_si, "i16x8.shr_s") \
443 V(I16x8ShrU, 0xfd8d, s_si, "i16x8.shr_u") \
444 V(I16x8Add, 0xfd8e, s_ss, "i16x8.add") \
445 V(I16x8AddSatS, 0xfd8f, s_ss, "i16x8.add_sat_s") \
446 V(I16x8AddSatU, 0xfd90, s_ss, "i16x8.add_sat_u") \
447 V(I16x8Sub, 0xfd91, s_ss, "i16x8.sub") \
448 V(I16x8SubSatS, 0xfd92, s_ss, "i16x8.sub_sat_s") \
449 V(I16x8SubSatU, 0xfd93, s_ss, "i16x8.sub_sat_u") \
450 V(F64x2NearestInt, 0xfd94, s_s, "f64x2.nearest") \
451 V(I16x8Mul, 0xfd95, s_ss, "i16x8.mul") \
452 V(I16x8MinS, 0xfd96, s_ss, "i16x8.min_s") \
453 V(I16x8MinU, 0xfd97, s_ss, "i16x8.min_u") \
454 V(I16x8MaxS, 0xfd98, s_ss, "i16x8.max_s") \
455 V(I16x8MaxU, 0xfd99, s_ss, "i16x8.max_u") \
456 V(I16x8RoundingAverageU, 0xfd9b, s_ss, "i16x8.avgr_u") \
457 V(I16x8ExtMulLowI8x16S, 0xfd9c, s_ss, "i16x8.extmul_low_i8x16_s") \
458 V(I16x8ExtMulHighI8x16S, 0xfd9d, s_ss, "i16x8.extmul_high_i8x16_s") \
459 V(I16x8ExtMulLowI8x16U, 0xfd9e, s_ss, "i16x8.extmul_low_i8x16_u") \
460 V(I16x8ExtMulHighI8x16U, 0xfd9f, s_ss, "i16x8.extmul_high_i8x16_u") \
461 V(I32x4Abs, 0xfda0, s_s, "i32x4.abs") \
462 V(I32x4Neg, 0xfda1, s_s, "i32x4.neg") \
463 V(I32x4AllTrue, 0xfda3, i_s, "i32x4.all_true") \
464 V(I32x4BitMask, 0xfda4, i_s, "i32x4.bitmask") \
465 V(I32x4SConvertI16x8Low, 0xfda7, s_s, "i32x4.extend_low_i16x8_s") \
466 V(I32x4SConvertI16x8High, 0xfda8, s_s, "i32x4.extend_high_i16x8_s") \
467 V(I32x4UConvertI16x8Low, 0xfda9, s_s, "i32x4.extend_low_i16x8_u") \
468 V(I32x4UConvertI16x8High, 0xfdaa, s_s, "i32x4.extend_high_i16x8_u") \
469 V(I32x4Shl, 0xfdab, s_si, "i32x4.shl") \
470 V(I32x4ShrS, 0xfdac, s_si, "i32x4.shr_s") \
471 V(I32x4ShrU, 0xfdad, s_si, "i32x4.shr_u") \
472 V(I32x4Add, 0xfdae, s_ss, "i32x4.add") \
473 V(I32x4Sub, 0xfdb1, s_ss, "i32x4.sub") \
474 V(I32x4Mul, 0xfdb5, s_ss, "i32x4.mul") \
475 V(I32x4MinS, 0xfdb6, s_ss, "i32x4.min_s") \
476 V(I32x4MinU, 0xfdb7, s_ss, "i32x4.min_u") \
477 V(I32x4MaxS, 0xfdb8, s_ss, "i32x4.max_s") \
478 V(I32x4MaxU, 0xfdb9, s_ss, "i32x4.max_u") \
479 V(I32x4DotI16x8S, 0xfdba, s_ss, "i32x4.dot_i16x8_s") \
480 V(I32x4ExtMulLowI16x8S, 0xfdbc, s_ss, "i32x4.extmul_low_i16x8_s") \
481 V(I32x4ExtMulHighI16x8S, 0xfdbd, s_ss, "i32x4.extmul_high_i16x8_s") \
482 V(I32x4ExtMulLowI16x8U, 0xfdbe, s_ss, "i32x4.extmul_low_i16x8_u") \
483 V(I32x4ExtMulHighI16x8U, 0xfdbf, s_ss, "i32x4.extmul_high_i16x8_u") \
484 V(I64x2Abs, 0xfdc0, s_s, "i64x2.abs") \
485 V(I64x2Neg, 0xfdc1, s_s, "i64x2.neg") \
486 V(I64x2AllTrue, 0xfdc3, i_s, "i64x2.all_true") \
487 V(I64x2BitMask, 0xfdc4, i_s, "i64x2.bitmask") \
488 V(I64x2SConvertI32x4Low, 0xfdc7, s_s, "i64x2.extend_low_i32x4_s") \
489 V(I64x2SConvertI32x4High, 0xfdc8, s_s, "i64x2.extend_high_i32x4_s") \
490 V(I64x2UConvertI32x4Low, 0xfdc9, s_s, "i64x2.extend_low_i32x4_u") \
491 V(I64x2UConvertI32x4High, 0xfdca, s_s, "i64x2.extend_high_i32x4_u") \
492 V(I64x2Shl, 0xfdcb, s_si, "i64x2.shl") \
493 V(I64x2ShrS, 0xfdcc, s_si, "i64x2.shr_s") \
494 V(I64x2ShrU, 0xfdcd, s_si, "i64x2.shr_u") \
495 V(I64x2Add, 0xfdce, s_ss, "i64x2.add") \
496 V(I64x2Sub, 0xfdd1, s_ss, "i64x2.sub") \
497 V(I64x2Mul, 0xfdd5, s_ss, "i64x2.mul") \
498 V(I64x2Eq, 0xfdd6, s_ss, "i64x2.eq") \
499 V(I64x2Ne, 0xfdd7, s_ss, "i64x2.ne") \
500 V(I64x2LtS, 0xfdd8, s_ss, "i64x2.lt_s") \
501 V(I64x2GtS, 0xfdd9, s_ss, "i64x2.gt_s") \
502 V(I64x2LeS, 0xfdda, s_ss, "i64x2.le_s") \
503 V(I64x2GeS, 0xfddb, s_ss, "i64x2.ge_s") \
504 V(I64x2ExtMulLowI32x4S, 0xfddc, s_ss, "i64x2.extmul_low_i32x4_s") \
505 V(I64x2ExtMulHighI32x4S, 0xfddd, s_ss, "i64x2.extmul_high_i32x4_s") \
506 V(I64x2ExtMulLowI32x4U, 0xfdde, s_ss, "i64x2.extmul_low_i32x4_u") \
507 V(I64x2ExtMulHighI32x4U, 0xfddf, s_ss, "i64x2.extmul_high_i32x4_u") \
508 V(F32x4Abs, 0xfde0, s_s, "f32x4.abs") \
509 V(F32x4Neg, 0xfde1, s_s, "f32x4.neg") \
510 V(F32x4Sqrt, 0xfde3, s_s, "f32x4.sqrt") \
511 V(F32x4Add, 0xfde4, s_ss, "f32x4.add") \
512 V(F32x4Sub, 0xfde5, s_ss, "f32x4.sub") \
513 V(F32x4Mul, 0xfde6, s_ss, "f32x4.mul") \
514 V(F32x4Div, 0xfde7, s_ss, "f32x4.div") \
515 V(F32x4Min, 0xfde8, s_ss, "f32x4.min") \
516 V(F32x4Max, 0xfde9, s_ss, "f32x4.max") \
517 V(F32x4Pmin, 0xfdea, s_ss, "f32x4.pmin") \
518 V(F32x4Pmax, 0xfdeb, s_ss, "f32x4.pmax") \
519 V(F64x2Abs, 0xfdec, s_s, "f64x2.abs") \
520 V(F64x2Neg, 0xfded, s_s, "f64x2.neg") \
521 V(F64x2Sqrt, 0xfdef, s_s, "f64x2.sqrt") \
522 V(F64x2Add, 0xfdf0, s_ss, "f64x2.add") \
523 V(F64x2Sub, 0xfdf1, s_ss, "f64x2.sub") \
524 V(F64x2Mul, 0xfdf2, s_ss, "f64x2.mul") \
525 V(F64x2Div, 0xfdf3, s_ss, "f64x2.div") \
526 V(F64x2Min, 0xfdf4, s_ss, "f64x2.min") \
527 V(F64x2Max, 0xfdf5, s_ss, "f64x2.max") \
528 V(F64x2Pmin, 0xfdf6, s_ss, "f64x2.pmin") \
529 V(F64x2Pmax, 0xfdf7, s_ss, "f64x2.pmax") \
530 V(I32x4SConvertF32x4, 0xfdf8, s_s, "i32x4.trunc_sat_f32x4_s") \
531 V(I32x4UConvertF32x4, 0xfdf9, s_s, "i32x4.trunc_sat_f32x4_u") \
532 V(F32x4SConvertI32x4, 0xfdfa, s_s, "f32x4.convert_i32x4_s") \
533 V(F32x4UConvertI32x4, 0xfdfb, s_s, "f32x4.convert_i32x4_u") \
534 V(I32x4TruncSatF64x2SZero, 0xfdfc, s_s, "i32x4.trunc_sat_f64x2_s_zero") \
535 V(I32x4TruncSatF64x2UZero, 0xfdfd, s_s, "i32x4.trunc_sat_f64x2_u_zero") \
536 V(F64x2ConvertLowI32x4S, 0xfdfe, s_s, "f64x2.convert_low_i32x4_s") \
537 V(F64x2ConvertLowI32x4U, 0xfdff, s_s, "f64x2.convert_low_i32x4_u")
539#define FOREACH_RELAXED_SIMD_OPCODE(V) \
540 V(I8x16RelaxedSwizzle, 0xfd100, s_ss, "i8x16.relaxed_swizzle") \
541 V(I32x4RelaxedTruncF32x4S, 0xfd101, s_s, "i32x4.relaxed_trunc_f32x4_s") \
542 V(I32x4RelaxedTruncF32x4U, 0xfd102, s_s, "i32x4.relaxed_trunc_f32x4_u") \
543 V(I32x4RelaxedTruncF64x2SZero, 0xfd103, s_s, \
544 "i32x4.relaxed_trunc_f64x2_s_zero") \
545 V(I32x4RelaxedTruncF64x2UZero, 0xfd104, s_s, \
546 "i32x4.relaxed_trunc_f64x2_u_zero") \
547 V(F32x4Qfma, 0xfd105, s_sss, "f32x4.qfma") \
548 V(F32x4Qfms, 0xfd106, s_sss, "f32x4.qfms") \
549 V(F64x2Qfma, 0xfd107, s_sss, "f64x2.qfma") \
550 V(F64x2Qfms, 0xfd108, s_sss, "f64x2.qfms") \
551 V(I8x16RelaxedLaneSelect, 0xfd109, s_sss, "i8x16.relaxed_laneselect") \
552 V(I16x8RelaxedLaneSelect, 0xfd10a, s_sss, "i16x8.relaxed_laneselect") \
553 V(I32x4RelaxedLaneSelect, 0xfd10b, s_sss, "i32x4.relaxed_laneselect") \
554 V(I64x2RelaxedLaneSelect, 0xfd10c, s_sss, "i64x2.relaxed_laneselect") \
555 V(F32x4RelaxedMin, 0xfd10d, s_ss, "f32x4.relaxed_min") \
556 V(F32x4RelaxedMax, 0xfd10e, s_ss, "f32x4.relaxed_max") \
557 V(F64x2RelaxedMin, 0xfd10f, s_ss, "f64x2.relaxed_min") \
558 V(F64x2RelaxedMax, 0xfd110, s_ss, "f64x2.relaxed_max") \
559 V(I16x8RelaxedQ15MulRS, 0xfd111, s_ss, "i16x8.relaxed_q15mulr_s") \
560 V(I16x8DotI8x16I7x16S, 0xfd112, s_ss, "i16x8.dot_i8x16_i7x16_s") \
561 V(I32x4DotI8x16I7x16AddS, 0xfd113, s_sss, "i32x4.dot_i8x16_i7x16_add_s") \
562 V(F16x8Splat, 0xfd120, s_f, "f16x8.splat") \
563 V(F16x8Abs, 0xfd130, s_s, "f16x8.abs") \
564 V(F16x8Neg, 0xfd131, s_s, "f16x8.neg") \
565 V(F16x8Sqrt, 0xfd132, s_s, "f16x8.sqrt") \
566 V(F16x8Ceil, 0xfd133, s_s, "f16x8.ceil") \
567 V(F16x8Floor, 0xfd134, s_s, "f16x8.floor") \
568 V(F16x8Trunc, 0xfd135, s_s, "f16x8.trunc") \
569 V(F16x8NearestInt, 0xfd136, s_s, "f16x8.nearest") \
570 V(F16x8Eq, 0xfd137, s_ss, "f16x8.eq") \
571 V(F16x8Ne, 0xfd138, s_ss, "f16x8.ne") \
572 V(F16x8Lt, 0xfd139, s_ss, "f16x8.lt") \
573 V(F16x8Gt, 0xfd13a, s_ss, "f16x8.gt") \
574 V(F16x8Le, 0xfd13b, s_ss, "f16x8.le") \
575 V(F16x8Ge, 0xfd13c, s_ss, "f16x8.ge") \
576 V(F16x8Add, 0xfd13d, s_ss, "f16x8.add") \
577 V(F16x8Sub, 0xfd13e, s_ss, "f16x8.sub") \
578 V(F16x8Mul, 0xfd13f, s_ss, "f16x8.mul") \
579 V(F16x8Div, 0xfd140, s_ss, "f16x8.div") \
580 V(F16x8Min, 0xfd141, s_ss, "f16x8.min") \
581 V(F16x8Max, 0xfd142, s_ss, "f16x8.max") \
582 V(F16x8Pmin, 0xfd143, s_ss, "f16x8.pmin") \
583 V(F16x8Pmax, 0xfd144, s_ss, "f16x8.pmax") \
584 V(I16x8SConvertF16x8, 0xfd145, s_s, "i16x8.trunc_sat_f16x8_s") \
585 V(I16x8UConvertF16x8, 0xfd146, s_s, "i16x8.trunc_sat_f16x8_u") \
586 V(F16x8SConvertI16x8, 0xfd147, s_s, "f16x8.convert_i16x8_s") \
587 V(F16x8UConvertI16x8, 0xfd148, s_s, "f16x8.convert_i16x8_u") \
588 V(F16x8DemoteF32x4Zero, 0xfd149, s_s, "f16x8.demote_f32x4_zero") \
589 V(F16x8DemoteF64x2Zero, 0xfd14a, s_s, "f16x8.demote_f64x2_zero") \
590 V(F32x4PromoteLowF16x8, 0xfd14b, s_s, "f32x4.promote_low_f16x8") \
591 V(F16x8Qfma, 0xfd14e, s_sss, "f16x8.madd") \
592 V(F16x8Qfms, 0xfd14f, s_sss, "f16x8.nmadd")
594#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
595 V(I8x16ExtractLaneS, 0xfd15, _, "i8x16.extract_lane_s") \
596 V(I8x16ExtractLaneU, 0xfd16, _, "i8x16.extract_lane_u") \
597 V(I16x8ExtractLaneS, 0xfd18, _, "i16x8.extract_lane_s") \
598 V(I16x8ExtractLaneU, 0xfd19, _, "i16x8.extract_lane_u") \
599 V(I32x4ExtractLane, 0xfd1b, _, "i32x4.extract_lane") \
600 V(I64x2ExtractLane, 0xfd1d, _, "i64x2.extract_lane") \
601 V(F32x4ExtractLane, 0xfd1f, _, "f32x4.extract_lane") \
602 V(F64x2ExtractLane, 0xfd21, _, "f64x2.extract_lane") \
603 V(F16x8ExtractLane, 0xfd121, _, "f16x8.extract_lane")
605#define FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V) \
606 V(I8x16ReplaceLane, 0xfd17, _, "i8x16.replace_lane") \
607 V(I16x8ReplaceLane, 0xfd1a, _, "i16x8.replace_lane") \
608 V(I32x4ReplaceLane, 0xfd1c, _, "i32x4.replace_lane") \
609 V(I64x2ReplaceLane, 0xfd1e, _, "i64x2.replace_lane") \
610 V(F32x4ReplaceLane, 0xfd20, _, "f32x4.replace_lane") \
611 V(F64x2ReplaceLane, 0xfd22, _, "f64x2.replace_lane") \
612 V(F16x8ReplaceLane, 0xfd122, _, "f16x8.replace_lane")
614#define FOREACH_SIMD_0_OPERAND_OPCODE(V) \
615 FOREACH_SIMD_MVP_0_OPERAND_OPCODE(V) \
616 FOREACH_RELAXED_SIMD_OPCODE(V)
618#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
619 FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
620 FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V)
622#define FOREACH_SIMD_OPCODE(V) \
623 FOREACH_SIMD_0_OPERAND_OPCODE(V) \
624 FOREACH_SIMD_1_OPERAND_OPCODE(V) \
625 FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
626 FOREACH_SIMD_MEM_OPCODE(V) \
627 FOREACH_SIMD_MEM_1_OPERAND_OPCODE(V) \
628 FOREACH_SIMD_CONST_OPCODE(V)
630#define FOREACH_NUMERIC_OPCODE_WITH_SIG(V) \
631 V(I32SConvertSatF32, 0xfc00, i_f, "i32.trunc_sat_f32_s") \
632 V(I32UConvertSatF32, 0xfc01, i_f, "i32.trunc_sat_f32_u") \
633 V(I32SConvertSatF64, 0xfc02, i_d, "i32.trunc_sat_f64_s") \
634 V(I32UConvertSatF64, 0xfc03, i_d, "i32.trunc_sat_f64_u") \
635 V(I64SConvertSatF32, 0xfc04, l_f, "i64.trunc_sat_f32_s") \
636 V(I64UConvertSatF32, 0xfc05, l_f, "i64.trunc_sat_f32_u") \
637 V(I64SConvertSatF64, 0xfc06, l_d, "i64.trunc_sat_f64_s") \
638 V(I64UConvertSatF64, 0xfc07, l_d, "i64.trunc_sat_f64_u") \
639 V(DataDrop, 0xfc09, v_v, "data.drop") \
640 V(TableInit, 0xfc0c, v_iii, "table.init") \
641 V(ElemDrop, 0xfc0d, v_v, "elem.drop") \
642 V(TableCopy, 0xfc0e, v_iii, "table.copy") \
643 V(TableSize, 0xfc10, i_v, "table.size")
645#define FOREACH_NUMERIC_OPCODE_VARIADIC(V) \
646 V(MemoryInit, 0xfc08, _, "memory.init") \
647 V(MemoryCopy, 0xfc0a, _, "memory.copy") \
648 V(MemoryFill, 0xfc0b, _, "memory.fill") \
651 V(TableGrow, 0xfc0f, _, "table.grow") \
654 V(TableFill, 0xfc11, _, "table.fill")
656#define FOREACH_NUMERIC_OPCODE(V) \
657 FOREACH_NUMERIC_OPCODE_WITH_SIG(V) FOREACH_NUMERIC_OPCODE_VARIADIC(V)
660#define FOREACH_ATOMIC_OPCODE(V) \
661 V(AtomicNotify, 0xfe00, i_ii, "memory.atomic.notify", i_li) \
662 V(I32AtomicWait, 0xfe01, i_iil, "memory.atomic.wait32", i_lil) \
663 V(I64AtomicWait, 0xfe02, i_ill, "memory.atomic.wait64", i_lll) \
664 V(I32AtomicLoad, 0xfe10, i_i, "i32.atomic.load", i_l) \
665 V(I64AtomicLoad, 0xfe11, l_i, "i64.atomic.load", l_l) \
666 V(I32AtomicLoad8U, 0xfe12, i_i, "i32.atomic.load8_u", i_l) \
667 V(I32AtomicLoad16U, 0xfe13, i_i, "i32.atomic.load16_u", i_l) \
668 V(I64AtomicLoad8U, 0xfe14, l_i, "i64.atomic.load8_u", l_l) \
669 V(I64AtomicLoad16U, 0xfe15, l_i, "i64.atomic.load16_u", l_l) \
670 V(I64AtomicLoad32U, 0xfe16, l_i, "i64.atomic.load32_u", l_l) \
671 V(I32AtomicStore, 0xfe17, v_ii, "i32.atomic.store", v_li) \
672 V(I64AtomicStore, 0xfe18, v_il, "i64.atomic.store", v_ll) \
673 V(I32AtomicStore8U, 0xfe19, v_ii, "i32.atomic.store8", v_li) \
674 V(I32AtomicStore16U, 0xfe1a, v_ii, "i32.atomic.store16", v_li) \
675 V(I64AtomicStore8U, 0xfe1b, v_il, "i64.atomic.store8", v_ll) \
676 V(I64AtomicStore16U, 0xfe1c, v_il, "i64.atomic.store16", v_ll) \
677 V(I64AtomicStore32U, 0xfe1d, v_il, "i64.atomic.store32", v_ll) \
678 V(I32AtomicAdd, 0xfe1e, i_ii, "i32.atomic.rmw.add", i_li) \
679 V(I64AtomicAdd, 0xfe1f, l_il, "i64.atomic.rmw.add", l_ll) \
680 V(I32AtomicAdd8U, 0xfe20, i_ii, "i32.atomic.rmw8.add_u", i_li) \
681 V(I32AtomicAdd16U, 0xfe21, i_ii, "i32.atomic.rmw16.add_u", i_li) \
682 V(I64AtomicAdd8U, 0xfe22, l_il, "i64.atomic.rmw8.add_u", l_ll) \
683 V(I64AtomicAdd16U, 0xfe23, l_il, "i64.atomic.rmw16.add_u", l_ll) \
684 V(I64AtomicAdd32U, 0xfe24, l_il, "i64.atomic.rmw32.add_u", l_ll) \
685 V(I32AtomicSub, 0xfe25, i_ii, "i32.atomic.rmw.sub", i_li) \
686 V(I64AtomicSub, 0xfe26, l_il, "i64.atomic.rmw.sub", l_ll) \
687 V(I32AtomicSub8U, 0xfe27, i_ii, "i32.atomic.rmw8.sub_u", i_li) \
688 V(I32AtomicSub16U, 0xfe28, i_ii, "i32.atomic.rmw16.sub_u", i_li) \
689 V(I64AtomicSub8U, 0xfe29, l_il, "i64.atomic.rmw8.sub_u", l_ll) \
690 V(I64AtomicSub16U, 0xfe2a, l_il, "i64.atomic.rmw16.sub_u", l_ll) \
691 V(I64AtomicSub32U, 0xfe2b, l_il, "i64.atomic.rmw32.sub_u", l_ll) \
692 V(I32AtomicAnd, 0xfe2c, i_ii, "i32.atomic.rmw.and", i_li) \
693 V(I64AtomicAnd, 0xfe2d, l_il, "i64.atomic.rmw.and", l_ll) \
694 V(I32AtomicAnd8U, 0xfe2e, i_ii, "i32.atomic.rmw8.and_u", i_li) \
695 V(I32AtomicAnd16U, 0xfe2f, i_ii, "i32.atomic.rmw16.and_u", i_li) \
696 V(I64AtomicAnd8U, 0xfe30, l_il, "i64.atomic.rmw8.and_u", l_ll) \
697 V(I64AtomicAnd16U, 0xfe31, l_il, "i64.atomic.rmw16.and_u", l_ll) \
698 V(I64AtomicAnd32U, 0xfe32, l_il, "i64.atomic.rmw32.and_u", l_ll) \
699 V(I32AtomicOr, 0xfe33, i_ii, "i32.atomic.rmw.or", i_li) \
700 V(I64AtomicOr, 0xfe34, l_il, "i64.atomic.rmw.or", l_ll) \
701 V(I32AtomicOr8U, 0xfe35, i_ii, "i32.atomic.rmw8.or_u", i_li) \
702 V(I32AtomicOr16U, 0xfe36, i_ii, "i32.atomic.rmw16.or_u", i_li) \
703 V(I64AtomicOr8U, 0xfe37, l_il, "i64.atomic.rmw8.or_u", l_ll) \
704 V(I64AtomicOr16U, 0xfe38, l_il, "i64.atomic.rmw16.or_u", l_ll) \
705 V(I64AtomicOr32U, 0xfe39, l_il, "i64.atomic.rmw32.or_u", l_ll) \
706 V(I32AtomicXor, 0xfe3a, i_ii, "i32.atomic.rmw.xor", i_li) \
707 V(I64AtomicXor, 0xfe3b, l_il, "i64.atomic.rmw.xor", l_ll) \
708 V(I32AtomicXor8U, 0xfe3c, i_ii, "i32.atomic.rmw8.xor_u", i_li) \
709 V(I32AtomicXor16U, 0xfe3d, i_ii, "i32.atomic.rmw16.xor_u", i_li) \
710 V(I64AtomicXor8U, 0xfe3e, l_il, "i64.atomic.rmw8.xor_u", l_ll) \
711 V(I64AtomicXor16U, 0xfe3f, l_il, "i64.atomic.rmw16.xor_u", l_ll) \
712 V(I64AtomicXor32U, 0xfe40, l_il, "i64.atomic.rmw32.xor_u", l_ll) \
713 V(I32AtomicExchange, 0xfe41, i_ii, "i32.atomic.rmw.xchg", i_li) \
714 V(I64AtomicExchange, 0xfe42, l_il, "i64.atomic.rmw.xchg", l_ll) \
715 V(I32AtomicExchange8U, 0xfe43, i_ii, "i32.atomic.rmw8.xchg_u", i_li) \
716 V(I32AtomicExchange16U, 0xfe44, i_ii, "i32.atomic.rmw16.xchg_u", i_li) \
717 V(I64AtomicExchange8U, 0xfe45, l_il, "i64.atomic.rmw8.xchg_u", l_ll) \
718 V(I64AtomicExchange16U, 0xfe46, l_il, "i64.atomic.rmw16.xchg_u", l_ll) \
719 V(I64AtomicExchange32U, 0xfe47, l_il, "i64.atomic.rmw32.xchg_u", l_ll) \
720 V(I32AtomicCompareExchange, 0xfe48, i_iii, "i32.atomic.rmw.cmpxchg", i_lii) \
721 V(I64AtomicCompareExchange, 0xfe49, l_ill, "i64.atomic.rmw.cmpxchg", l_lll) \
722 V(I32AtomicCompareExchange8U, 0xfe4a, i_iii, "i32.atomic.rmw8.cmpxchg_u", \
724 V(I32AtomicCompareExchange16U, 0xfe4b, i_iii, "i32.atomic.rmw16.cmpxchg_u", \
726 V(I64AtomicCompareExchange8U, 0xfe4c, l_ill, "i64.atomic.rmw8.cmpxchg_u", \
728 V(I64AtomicCompareExchange16U, 0xfe4d, l_ill, "i64.atomic.rmw16.cmpxchg_u", \
730 V(I64AtomicCompareExchange32U, 0xfe4e, l_ill, "i64.atomic.rmw32.cmpxchg_u", \
733#define FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
735 V(AtomicFence, 0xfe03, v_v, "atomic.fence", v_v)
737#define FOREACH_GC_OPCODE(V) \
738 V(StructNew, 0xfb00, _, "struct.new") \
739 V(StructNewDefault, 0xfb01, _, "struct.new_default") \
740 V(StructGet, 0xfb02, _, "struct.get") \
741 V(StructGetS, 0xfb03, _, "struct.get_s") \
742 V(StructGetU, 0xfb04, _, "struct.get_u") \
743 V(StructSet, 0xfb05, _, "struct.set") \
744 V(ArrayNew, 0xfb06, _, "array.new") \
745 V(ArrayNewDefault, 0xfb07, _, "array.new_default") \
746 V(ArrayNewFixed, 0xfb08, _, "array.new_fixed") \
747 V(ArrayNewData, 0xfb09, _, "array.new_data") \
748 V(ArrayNewElem, 0xfb0a, _, "array.new_elem") \
749 V(ArrayGet, 0xfb0b, _, "array.get") \
750 V(ArrayGetS, 0xfb0c, _, "array.get_s") \
751 V(ArrayGetU, 0xfb0d, _, "array.get_u") \
752 V(ArraySet, 0xfb0e, _, "array.set") \
753 V(ArrayLen, 0xfb0f, _, "array.len") \
754 V(ArrayFill, 0xfb10, _, "array.fill") \
755 V(ArrayCopy, 0xfb11, _, "array.copy") \
756 V(ArrayInitData, 0xfb12, _, "array.init_data") \
757 V(ArrayInitElem, 0xfb13, _, "array.init_elem") \
758 V(RefTest, 0xfb14, _, "ref.test") \
759 V(RefTestNull, 0xfb15, _, "ref.test null") \
760 V(RefCast, 0xfb16, _, "ref.cast") \
761 V(RefCastNull, 0xfb17, _, "ref.cast null") \
762 V(BrOnCast, 0xfb18, _, "br_on_cast") \
763 V(BrOnCastFail, 0xfb19, _, "br_on_cast_fail") \
764 V(AnyConvertExtern, 0xfb1a, _, "any.convert_extern") \
765 V(ExternConvertAny, 0xfb1b, _, "extern.convert_any") \
766 V(RefI31, 0xfb1c, _, "ref.i31") \
767 V(I31GetS, 0xfb1d, _, "i31.get_s") \
768 V(I31GetU, 0xfb1e, _, "i31.get_u") \
770 V(RefGetDesc, 0xfb22, _, "ref.get_desc") \
771 V(RefCastDesc, 0xfb23, _, "ref.cast_desc") \
772 V(RefCastDescNull, 0xfb24, _, "ref.cast_desc null") \
773 V(BrOnCastDesc, 0xfb25, _, "br_on_cast_desc") \
774 V(BrOnCastDescFail, 0xfb26, _, "br_on_cast_desc_fail") \
775 V(RefCastNop, 0xfb4c, _, "ref.cast_nop") \
777 V(StringNewUtf8, 0xfb80, _, "string.new_utf8") \
778 V(StringNewWtf16, 0xfb81, _, "string.new_wtf16") \
779 V(StringConst, 0xfb82, _, "string.const") \
780 V(StringMeasureUtf8, 0xfb83, _, "string.measure_utf8") \
781 V(StringMeasureWtf8, 0xfb84, _, "string.measure_wtf8") \
782 V(StringMeasureWtf16, 0xfb85, _, "string.measure_wtf16") \
783 V(StringEncodeUtf8, 0xfb86, _, "string.encode_utf8") \
784 V(StringEncodeWtf16, 0xfb87, _, "string.encode_wtf16") \
785 V(StringConcat, 0xfb88, _, "string.concat") \
786 V(StringEq, 0xfb89, _, "string.eq") \
787 V(StringIsUSVSequence, 0xfb8a, _, "string.is_usv_sequence") \
788 V(StringNewLossyUtf8, 0xfb8b, _, "string.new_lossy_utf8") \
789 V(StringNewWtf8, 0xfb8c, _, "string.new_wtf8") \
790 V(StringEncodeLossyUtf8, 0xfb8d, _, "string.encode_lossy_utf8") \
791 V(StringEncodeWtf8, 0xfb8e, _, "string.encode_wtf8") \
792 V(StringNewUtf8Try, 0xfb8f, _, "string.new_utf8_try") \
793 V(StringAsWtf8, 0xfb90, _, "string.as_wtf8") \
794 V(StringViewWtf8Advance, 0xfb91, _, "stringview_wtf8.advance") \
795 V(StringViewWtf8EncodeUtf8, 0xfb92, _, "stringview_wtf8.encode_utf8") \
796 V(StringViewWtf8Slice, 0xfb93, _, "stringview_wtf8.slice") \
797 V(StringViewWtf8EncodeLossyUtf8, 0xfb94, _, \
798 "stringview_wtf8.encode_lossy_utf8") \
799 V(StringViewWtf8EncodeWtf8, 0xfb95, _, "stringview_wtf8.encode_wtf8") \
800 V(StringAsWtf16, 0xfb98, _, "string.as_wtf16") \
801 V(StringViewWtf16Length, 0xfb99, _, "stringview_wtf16.length") \
802 V(StringViewWtf16GetCodeunit, 0xfb9a, _, "stringview_wtf16.get_codeunit") \
803 V(StringViewWtf16Encode, 0xfb9b, _, "stringview_wtf16.encode") \
804 V(StringViewWtf16Slice, 0xfb9c, _, "stringview_wtf16.slice") \
805 V(StringAsIter, 0xfba0, _, "string.as_iter") \
806 V(StringViewIterNext, 0xfba1, _, "stringview_iter.next") \
807 V(StringViewIterAdvance, 0xfba2, _, "stringview_iter.advance") \
808 V(StringViewIterRewind, 0xfba3, _, "stringview_iter.rewind") \
809 V(StringViewIterSlice, 0xfba4, _, "stringview_iter.slice") \
810 V(StringCompare, 0xfba8, _, "string.compare") \
811 V(StringFromCodePoint, 0xfba9, _, "string.from_code_point") \
812 V(StringHash, 0xfbaa, _, "string.hash") \
813 V(StringNewUtf8Array, 0xfbb0, _, "string.new_utf8_array") \
814 V(StringNewWtf16Array, 0xfbb1, _, "string.new_wtf16_array") \
815 V(StringEncodeUtf8Array, 0xfbb2, _, "string.encode_utf8_array") \
816 V(StringEncodeWtf16Array, 0xfbb3, _, "string.encode_wtf16_array") \
817 V(StringNewLossyUtf8Array, 0xfbb4, _, "string.new_lossy_utf8_array") \
818 V(StringNewWtf8Array, 0xfbb5, _, "string.new_wtf8_array") \
819 V(StringEncodeLossyUtf8Array, 0xfbb6, _, "string.encode_lossy_utf8_array") \
820 V(StringEncodeWtf8Array, 0xfbb7, _, "string.encode_wtf8_array") \
821 V(StringNewUtf8ArrayTry, 0xfbb8, _, "string.new_utf8_array_try")
824#define FOREACH_OPCODE(V) \
825 FOREACH_CONTROL_OPCODE(V) \
826 FOREACH_MISC_OPCODE(V) \
827 FOREACH_SIMPLE_OPCODE(V) \
828 FOREACH_SIMPLE_PROTOTYPE_OPCODE(V) \
829 FOREACH_STORE_MEM_OPCODE(V) \
830 FOREACH_LOAD_MEM_OPCODE(V) \
831 FOREACH_MISC_MEM_OPCODE(V) \
832 FOREACH_ASMJS_COMPAT_OPCODE(V) \
833 FOREACH_SIMD_OPCODE(V) \
834 FOREACH_ATOMIC_OPCODE(V) \
835 FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
836 FOREACH_NUMERIC_OPCODE(V) \
837 FOREACH_GC_OPCODE(V) \
838 FOREACH_WASMFX_OPCODE(V)
841#define FOREACH_SIGNATURE(V) \
842 FOREACH_SIMD_SIGNATURE(V) \
843 V(d_d, kWasmF64, kWasmF64) \
844 V(d_dd, kWasmF64, kWasmF64, kWasmF64) \
845 V(d_f, kWasmF64, kWasmF32) \
846 V(d_i, kWasmF64, kWasmI32) \
847 V(d_id, kWasmF64, kWasmI32, kWasmF64) \
848 V(d_l, kWasmF64, kWasmI64) \
849 V(f_d, kWasmF32, kWasmF64) \
850 V(f_f, kWasmF32, kWasmF32) \
851 V(f_ff, kWasmF32, kWasmF32, kWasmF32) \
852 V(f_i, kWasmF32, kWasmI32) \
853 V(f_if, kWasmF32, kWasmI32, kWasmF32) \
854 V(f_l, kWasmF32, kWasmI64) \
855 V(i_a, kWasmI32, kWasmAnyRef) \
856 V(i_ci, kWasmI32, kWasmFuncRef, kWasmI32) \
857 V(i_d, kWasmI32, kWasmF64) \
858 V(i_dd, kWasmI32, kWasmF64, kWasmF64) \
859 V(i_f, kWasmI32, kWasmF32) \
860 V(i_ff, kWasmI32, kWasmF32, kWasmF32) \
861 V(i_i, kWasmI32, kWasmI32) \
862 V(i_ii, kWasmI32, kWasmI32, kWasmI32) \
863 V(i_iii, kWasmI32, kWasmI32, kWasmI32, kWasmI32) \
864 V(i_iil, kWasmI32, kWasmI32, kWasmI32, kWasmI64) \
865 V(i_ill, kWasmI32, kWasmI32, kWasmI64, kWasmI64) \
866 V(i_l, kWasmI32, kWasmI64) \
867 V(i_li, kWasmI32, kWasmI64, kWasmI32) \
868 V(i_lii, kWasmI32, kWasmI64, kWasmI32, kWasmI32) \
869 V(i_lil, kWasmI32, kWasmI64, kWasmI32, kWasmI64) \
870 V(i_lll, kWasmI32, kWasmI64, kWasmI64, kWasmI64) \
871 V(i_ll, kWasmI32, kWasmI64, kWasmI64) \
872 V(i_qq, kWasmI32, kWasmEqRef, kWasmEqRef) \
874 V(l_d, kWasmI64, kWasmF64) \
875 V(l_f, kWasmI64, kWasmF32) \
876 V(l_i, kWasmI64, kWasmI32) \
877 V(l_il, kWasmI64, kWasmI32, kWasmI64) \
878 V(l_ill, kWasmI64, kWasmI32, kWasmI64, kWasmI64) \
879 V(l_l, kWasmI64, kWasmI64) \
880 V(l_ll, kWasmI64, kWasmI64, kWasmI64) \
881 V(l_lll, kWasmI64, kWasmI64, kWasmI64, kWasmI64) \
882 V(v_id, kWasmVoid, kWasmI32, kWasmF64) \
883 V(v_if, kWasmVoid, kWasmI32, kWasmF32) \
884 V(v_i, kWasmVoid, kWasmI32) \
885 V(v_ii, kWasmVoid, kWasmI32, kWasmI32) \
886 V(v_iii, kWasmVoid, kWasmI32, kWasmI32, kWasmI32) \
887 V(v_il, kWasmVoid, kWasmI32, kWasmI64) \
888 V(v_li, kWasmVoid, kWasmI64, kWasmI32) \
889 V(v_ll, kWasmVoid, kWasmI64, kWasmI64) \
892#define FOREACH_SIMD_SIGNATURE(V) \
893 V(s_s, kWasmS128, kWasmS128) \
894 V(s_f, kWasmS128, kWasmF32) \
895 V(s_d, kWasmS128, kWasmF64) \
896 V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
897 V(s_i, kWasmS128, kWasmI32) \
898 V(s_l, kWasmS128, kWasmI64) \
899 V(s_si, kWasmS128, kWasmS128, kWasmI32) \
900 V(i_s, kWasmI32, kWasmS128) \
901 V(v_is, kWasmVoid, kWasmI32, kWasmS128) \
902 V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128) \
903 V(s_is, kWasmS128, kWasmI32, kWasmS128)
905#define FOREACH_PREFIX(V) \
931#define DECLARE_NAMED_ENUM(name, opcode, ...) kExpr##name = opcode,
933#undef DECLARE_NAMED_ENUM
934#define DECLARE_PREFIX(name, opcode) k##name##Prefix = opcode,
940#define DECLARE_ENUM(name) k##name,
954 static constexpr bool IsPrefixOpcode(
WasmOpcode);
955 static constexpr bool IsControlOpcode(
WasmOpcode);
956 static constexpr bool IsExternRefOpcode(
WasmOpcode);
957 static constexpr bool IsThrowingOpcode(
WasmOpcode);
958 static constexpr bool IsRelaxedSimdOpcode(
WasmOpcode);
959 static constexpr bool IsFP16SimdOpcode(
WasmOpcode);
961 static constexpr bool IsMemoryAccessOpcode(
WasmOpcode);
965 static constexpr bool IsUnconditionalJump(
WasmOpcode);
966 static constexpr bool IsBreakable(
WasmOpcode);
972 static constexpr uint8_t ExtractPrefix(
WasmOpcode);
973 static inline const char* TrapReasonMessage(
TrapReason);
#define FOREACH_WASM_TRAPREASON(V)
const char * OpcodeName(Opcode opcode)
bool IsJSCompatibleSignature(const CanonicalSig *sig)
std::ostream & operator<<(std::ostream &os, LiftoffVarState slot)
Signature< ValueType > FunctionSig
wasm::WasmModule WasmModule
kWasmInternalFunctionIndirectPointerTag kProtectedInstanceDataOffset sig
DECLARE_ENUM(enum_item, ignore)
#define V8_EXPORT_PRIVATE
#define FOREACH_PREFIX(V)
#define FOREACH_OPCODE(V)
#define DECLARE_PREFIX(name, opcode)
#define DECLARE_NAMED_ENUM(name, opcode,...)