v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
jump-table-assembler.cc
Go to the documentation of this file.
1// Copyright 2018 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
9
10namespace v8 {
11namespace internal {
12namespace wasm {
13
14// static
16 Address base, uint32_t num_slots, uint32_t num_imported_functions,
17 Address wasm_compile_lazy_target) {
18 uint32_t lazy_compile_table_size = num_slots * kLazyCompileTableSlotSize;
20 base, RoundUp<kCodeAlignment>(lazy_compile_table_size),
22 // Assume enough space, so the Assembler does not try to grow the buffer.
23 JumpTableAssembler jtasm(jit_allocation, base);
24 for (uint32_t slot_index = 0; slot_index < num_slots; ++slot_index) {
25 DCHECK_EQ(slot_index * kLazyCompileTableSlotSize, jtasm.pc_offset());
26 jtasm.EmitLazyCompileJumpSlot(slot_index + num_imported_functions,
27 wasm_compile_lazy_target);
28 }
29 DCHECK_EQ(lazy_compile_table_size, jtasm.pc_offset());
30 FlushInstructionCache(base, lazy_compile_table_size);
31}
32
34 Address base, uint32_t num_slots, Address lazy_compile_table_start) {
35 uint32_t jump_table_size = SizeForNumberOfSlots(num_slots);
37 base, RoundUp<kCodeAlignment>(jump_table_size),
39 JumpTableAssembler jtasm(jit_allocation, base);
40
41 for (uint32_t slot_index = 0; slot_index < num_slots; ++slot_index) {
42 // Make sure we write at the correct offset.
43 int slot_offset =
44 static_cast<int>(JumpTableAssembler::JumpSlotIndexToOffset(slot_index));
45
46 jtasm.SkipUntil(slot_offset);
47
48 Address target =
49 lazy_compile_table_start +
51
52#ifdef DEBUG
53 int offset_before_emit = jtasm.pc_offset();
54#endif
55 // This function initializes the first jump table with jumps to the lazy
56 // compile table. Both get allocated in the constructor of the
57 // {NativeModule}, so they both should end up in the initial code space.
58 // Jumps within one code space can always be near jumps, so the following
59 // call to {EmitJumpSlot} should always succeed. If the call fails, then
60 // either the jump table allocation was changed incorrectly so that the lazy
61 // compile table was not within near-jump distance of the jump table
62 // anymore (e.g. the initial code space was too small to fit both tables),
63 // or the code space was allocated larger than the maximum near-jump
64 // distance.
65 CHECK(jtasm.EmitJumpSlot(target));
66
67 DCHECK_EQ(kJumpTableSlotSize, jtasm.pc_offset() - offset_before_emit);
68 }
69 FlushInstructionCache(base, jump_table_size);
70}
71
72template <typename T>
75 pc_ += sizeof(T);
76}
77
78template <typename T>
80 // We disable ubsan for these stores since they don't follow the alignment
81 // requirements. We instead guarantee in the jump table layout that the writes
82 // will still be atomic since they don't cross a qword boundary.
83#if V8_TARGET_ARCH_X64
84#ifdef DEBUG
85 Address write_start = pc_;
86 Address write_end = write_start + sizeof(T) - 1;
87 // Check that the write doesn't cross a qword boundary.
88 DCHECK_EQ(write_start >> kSystemPointerSizeLog2,
89 write_end >> kSystemPointerSizeLog2);
90#endif
91#endif
92 jit_allocation_.WriteValue(pc_, value, kRelaxedStore);
93 pc_ += sizeof(T);
94}
95
96// The implementation is compact enough to implement it inline here. If it gets
97// much bigger, we might want to split it in a separate file per architecture.
98#if V8_TARGET_ARCH_X64
99void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
100 Address lazy_compile_target) {
101 // Use a push, because mov to an extended register takes 6 bytes.
102 const uint8_t inst[kLazyCompileTableSlotSize] = {
103 0x68, 0, 0, 0, 0, // pushq func_index
104 0xe9, 0, 0, 0, 0, // near_jmp displacement
105 };
106
107 intptr_t displacement =
108 lazy_compile_target - (pc_ + kLazyCompileTableSlotSize);
109
110 emit<uint8_t>(inst[0]);
111 emit<uint32_t>(func_index);
112 emit<uint8_t>(inst[5]);
113 emit<int32_t>(base::checked_cast<int32_t>(displacement));
114}
115
117#ifdef V8_ENABLE_CET_IBT
118 uint32_t endbr_insn = 0xfa1e0ff3;
119 uint32_t nop = 0x00401f0f;
120 emit<uint32_t>(endbr_insn, kRelaxedStore);
121 // Add a nop to ensure that the next block is 8 byte aligned.
123#endif
124
125 intptr_t displacement =
127 if (!is_int32(displacement)) return false;
128
129 uint8_t inst[kJumpTableSlotSize] = {
130 0xe9, 0, 0, 0, 0, // near_jmp displacement
131 0xcc, 0xcc, 0xcc, // int3 * 3
132 };
133 int32_t displacement32 = base::checked_cast<int32_t>(displacement);
134 memcpy(&inst[1], &displacement32, sizeof(int32_t));
135
136 // The jump table is updated live, so the write has to be atomic.
137 emit<uint64_t>(*reinterpret_cast<uint64_t*>(inst), kRelaxedStore);
138
139 return true;
140}
141
143 const uint8_t inst[kFarJumpTableSlotSize] = {
144 0xff, 0x25, 0x02, 0, 0, 0, // jmp [rip+0x2]
145 0x66, 0x90, // Nop(2)
146 0, 0, 0, 0, 0, 0, 0, 0, // target
147 };
148
149 emit<uint64_t>(*reinterpret_cast<const uint64_t*>(inst));
150 emit<uint64_t>(target);
151}
152
153// static
154void JumpTableAssembler::PatchFarJumpSlot(WritableJitAllocation& jit_allocation,
155 Address slot, Address target) {
156 // The slot needs to be pointer-size aligned so we can atomically update it.
158 // Offset of the target is at 8 bytes, see {EmitFarJumpSlot}.
159 jit_allocation.WriteValue(slot + kSystemPointerSize, target, kRelaxedStore);
160 // The update is atomic because the address is properly aligned.
161 // Because of cache coherence, the data update will eventually be seen by all
162 // cores. It's ok if they temporarily jump to the old target.
163}
164
167 pc_ += offset - pc_offset();
168}
169
170#elif V8_TARGET_ARCH_IA32
171void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
172 Address lazy_compile_target) {
173 static_assert(kWasmCompileLazyFuncIndexRegister == edi);
174 const uint8_t inst[kLazyCompileTableSlotSize] = {
175 0xbf, 0, 0, 0, 0, // mov edi, func_index
176 0xe9, 0, 0, 0, 0, // near_jmp displacement
177 };
178 intptr_t displacement =
179 lazy_compile_target - (pc_ + kLazyCompileTableSlotSize);
180
181 emit<uint8_t>(inst[0]);
182 emit<uint32_t>(func_index);
183 emit<uint8_t>(inst[5]);
184 emit<int32_t>(base::checked_cast<int32_t>(displacement));
185}
186
188 intptr_t displacement = target - (pc_ + kJumpTableSlotSize);
189
190 const uint8_t inst[kJumpTableSlotSize] = {
191 0xe9, 0, 0, 0, 0, // near_jmp displacement
192 };
193
194 // The jump table is updated live, so the writes have to be atomic.
196 emit<int32_t>(base::checked_cast<int32_t>(displacement), kRelaxedStore);
197
198 return true;
199}
200
202 static_assert(kJumpTableSlotSize == kFarJumpTableSlotSize);
203 EmitJumpSlot(target);
204}
205
206// static
207void JumpTableAssembler::PatchFarJumpSlot(WritableJitAllocation& jit_allocation,
208 Address slot, Address target) {
209 UNREACHABLE();
210}
211
214 pc_ += offset - pc_offset();
215}
216
217#elif V8_TARGET_ARCH_ARM
218void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
219 Address lazy_compile_target) {
220 static_assert(kWasmCompileLazyFuncIndexRegister == r4);
221 // Note that below, [pc] points to the instruction after the next.
222 const uint32_t inst[kLazyCompileTableSlotSize / 4] = {
223 0xe59f4000, // ldr r4, [pc]
224 0xe59ff000, // ldr pc, [pc]
225 0x00000000, // func_index
226 0x00000000, // target
227 };
228 emit<uint32_t>(inst[0]);
229 emit<uint32_t>(inst[1]);
230 emit<uint32_t>(func_index);
231 emit<Address>(lazy_compile_target);
232}
233
235 static_assert(kInstrSize == kInt32Size);
236 static_assert(kJumpTableSlotSize == 2 * kInstrSize);
237
238 // Load from [pc + kInstrSize] to pc. Note that {pc} points two instructions
239 // after the currently executing one.
240 const uint32_t inst[kJumpTableSlotSize / kInstrSize] = {
241 0xe51ff004, // ldr pc, [pc, -4]
242 0x00000000, // target
243 };
244
245 // This function is also used for patching existing jump slots and the writes
246 // need to be atomic.
249 return true;
250}
251
253 static_assert(kJumpTableSlotSize == kFarJumpTableSlotSize);
254 EmitJumpSlot(target);
255}
256
257// static
258void JumpTableAssembler::PatchFarJumpSlot(WritableJitAllocation& jit_allocation,
259 Address slot, Address target) {
260 UNREACHABLE();
261}
262
264 // On this platform the jump table is not zapped with valid instructions, so
265 // skipping over bytes is not allowed.
267}
268
269#elif V8_TARGET_ARCH_ARM64
270void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
271 Address lazy_compile_target) {
272 uint16_t func_index_low = func_index & 0xffff;
273 uint16_t func_index_high = func_index >> 16;
274
275 // TODO(sroettger): This bti instruction is a temporary fix for crashes that
276 // we observed in the wild. We can probably avoid this again if we change the
277 // callee to jump to the far jump table instead.
278 const uint32_t inst[kLazyCompileTableSlotSize / 4] = {
279 0xd50324df, // bti.jc
280 0x52800008, // mov w8, func_index_low
281 0x72a00008, // movk w8, func_index_high, LSL#0x10
282 0x14000000, // b lazy_compile_target
283 };
284 static_assert(kWasmCompileLazyFuncIndexRegister == x8);
285
286 int64_t target_offset = MacroAssembler::CalculateTargetOffset(
287 lazy_compile_target, RelocInfo::NO_INFO,
288 reinterpret_cast<uint8_t*>(pc_ + 3 * kInstrSize));
290
291 emit<uint32_t>(inst[0]);
292 emit<uint32_t>(inst[1] | Assembler::ImmMoveWide(func_index_low));
293 emit<uint32_t>(inst[2] | Assembler::ImmMoveWide(func_index_high));
295 base::checked_cast<int32_t>(target_offset)));
296}
297
299#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
300 static constexpr ptrdiff_t kCodeEntryMarkerSize = kInstrSize;
301#else
302 static constexpr ptrdiff_t kCodeEntryMarkerSize = 0;
303#endif
304
305 int64_t target_offset = MacroAssembler::CalculateTargetOffset(
306 target, RelocInfo::NO_INFO,
307 reinterpret_cast<uint8_t*>(pc_ + kCodeEntryMarkerSize));
308 if (!MacroAssembler::IsNearCallOffset(target_offset)) {
309 return false;
310 }
311
312#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
313 uint32_t bti_inst = 0xd503245f; // bti c
314 emit<uint32_t>(bti_inst, kRelaxedStore);
315#endif
316
317 uint32_t branch_inst =
318 0x14000000 |
319 Assembler::ImmUncondBranch(base::checked_cast<int32_t>(target_offset));
320 emit<uint32_t>(branch_inst, kRelaxedStore);
321
322 return true;
323}
324
326 DCHECK(MacroAssembler::DefaultTmpList().IncludesAliasOf(x16));
327
328 const uint32_t inst[kFarJumpTableSlotSize / 4] = {
329 0x58000050, // ldr x16, #8
330 0xd61f0200, // br x16
331 0x00000000, // target[0]
332 0x00000000, // target[1]
333 };
334 emit<uint32_t>(inst[0]);
335 emit<uint32_t>(inst[1]);
336 emit<Address>(target);
337
338 static_assert(2 * kInstrSize == kSystemPointerSize);
339}
340
341// static
342void JumpTableAssembler::PatchFarJumpSlot(WritableJitAllocation& jit_allocation,
343 Address slot, Address target) {
344 // See {EmitFarJumpSlot} for the offset of the target (16 bytes with
345 // CFI enabled, 8 bytes otherwise).
346 int kTargetOffset = 2 * kInstrSize;
347 // The slot needs to be pointer-size aligned so we can atomically update it.
348 DCHECK(IsAligned(slot + kTargetOffset, kSystemPointerSize));
349 jit_allocation.WriteValue(slot + kTargetOffset, target, kRelaxedStore);
350 // The data update is guaranteed to be atomic since it's a properly aligned
351 // and stores a single machine word. This update will eventually be observed
352 // by any concurrent [ldr] on the same address because of the data cache
353 // coherence. It's ok if other cores temporarily jump to the old target.
354}
355
357 // On this platform the jump table is not zapped with valid instructions, so
358 // skipping over bytes is not allowed.
360}
361
362#elif V8_TARGET_ARCH_S390X
363void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
364 Address lazy_compile_target) {
365 static_assert(kWasmCompileLazyFuncIndexRegister == r7);
366 uint8_t inst[kLazyCompileTableSlotSize] = {
367 0xc0, 0x71, 0x00, 0x00, 0x00, 0x00, // lgfi r7, 0
368 0xc0, 0x10, 0x00, 0x00, 0x00, 0x00, // larl r1, 0
369 0xe3, 0x10, 0x10, 0x12, 0x00, 0x04, // lg r1, 18(r1)
370 0x07, 0xf1, // br r1
371 0xb9, 0x04, 0x00, 0x00, // nop (alignment)
372 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 // lazy_compile_target
373 };
374
375#if V8_TARGET_LITTLE_ENDIAN
376 // We need to emit the value in big endian format.
377 func_index = base::bits::ReverseBytes(func_index);
378#endif
379 memcpy(&inst[2], &func_index, sizeof(int32_t));
380 for (size_t i = 0; i < (kLazyCompileTableSlotSize - sizeof(Address)); i++) {
381 emit<uint8_t>(inst[i]);
382 }
383 emit<Address>(lazy_compile_target);
384}
385
387 intptr_t relative_target = target - pc_;
388
389 if (!is_int32(relative_target / 2)) {
390 return false;
391 }
392
393 uint8_t inst[kJumpTableSlotSize] = {
394 0xc0, 0xf4, 0x00,
395 0x00, 0x00, 0x00, // brcl(al, Operand(relative_target / 2))
396 0x18, 0x00 // nop (alignment)
397 };
398
399 int32_t relative_target_addr = static_cast<int32_t>(relative_target / 2);
400#if V8_TARGET_LITTLE_ENDIAN
401 // We need to emit the value in big endian format.
402 relative_target_addr = base::bits::ReverseBytes(relative_target_addr);
403#endif
404 memcpy(&inst[2], &relative_target_addr, sizeof(int32_t));
405 // The jump table is updated live, so the write has to be atomic.
406 emit<uint64_t>(*reinterpret_cast<uint64_t*>(inst), kRelaxedStore);
407
408 return true;
409}
410
412 const uint8_t inst[kFarJumpTableSlotSize] = {
413 0xc0, 0x10, 0x00, 0x00, 0x00, 0x00, // larl r1, 0
414 0xe3, 0x10, 0x10, 0x10, 0x00, 0x04, // lg r1, 16(r1)
415 0x07, 0xf1, // br r1
416 0x18, 0x00, // nop (alignment)
417 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 // target
418 };
419
420 for (size_t i = 0; i < (kFarJumpTableSlotSize - sizeof(Address)); i++) {
421 emit<uint8_t>(inst[i]);
422 }
423 emit<Address>(target);
424}
425
426// static
427void JumpTableAssembler::PatchFarJumpSlot(WritableJitAllocation& jit_allocation,
428 Address slot, Address target) {
429 Address target_addr = slot + 8;
430 jit_allocation.WriteValue(target_addr, target, kRelaxedStore);
431}
432
434 // On this platform the jump table is not zapped with valid instructions, so
435 // skipping over bytes is not allowed.
437}
438
439#elif V8_TARGET_ARCH_MIPS64
440void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
441 Address lazy_compile_target) {
442 uint32_t func_index_low = func_index & 0xffff;
443 uint32_t func_index_high = func_index >> 16;
444
445 const uint32_t inst[kLazyCompileTableSlotSize / 4] = {
446 0x3c0c0000, // lui $t0, func_index_high
447 0x358c0000, // ori $t0, $t0, func_index_low
448 0x03e00825, // move $at, $ra
449 0x04110001, // bal 1
450 0x00000000, // nop (alignment, in delay slot)
451 0xdff9000c, // ld $t9, 12($ra) (ra = pc)
452 0x03200008, // jr $t9
453 0x0020f825, // move $ra, $at (in delay slot)
454 0x00000000, // lazy_compile_target[0]
455 0x00000000, // layz_compile_target[1]
456 };
457 static_assert(kWasmCompileLazyFuncIndexRegister == t0);
458
459 emit<uint32_t>(inst[0] | func_index_high);
460 emit<uint32_t>(inst[1] | func_index_low);
461 emit<uint32_t>(inst[2]);
462 emit<uint32_t>(inst[3]);
463 emit<uint32_t>(inst[4]);
464 emit<uint32_t>(inst[5]);
465 emit<uint32_t>(inst[6]);
466 emit<uint32_t>(inst[7]);
468 emit<Address>(lazy_compile_target);
469}
470
472 const uint32_t inst[kJumpTableSlotSize / kInstrSize] = {
473 0x03e00825, // move $at, $ra
474 0x04110001, // bal 1
475 0x00000000, // nop (alignment, in delay slot)
476 0xdff9000c, // ld $t9, 12($ra) (ra = pc)
477 0x03200008, // jr $t9
478 0x0020f825, // move $ra, $at (in delay slot)
479 0x00000000, // lazy_compile_target[0]
480 0x00000000, // layz_compile_target[1]
481 };
482
483 // This function is also used for patching existing jump slots and the writes
484 // need to be atomic.
493 return true;
494}
495
497 static_assert(kJumpTableSlotSize == kFarJumpTableSlotSize);
498 EmitJumpSlot(target);
499}
500
501// static
502void JumpTableAssembler::PatchFarJumpSlot(WritableJitAllocation& jit_allocation,
503 Address slot, Address target) {
504 UNREACHABLE();
505}
506
508 // On this platform the jump table is not zapped with valid instructions, so
509 // skipping over bytes is not allowed.
511}
512
513#elif V8_TARGET_ARCH_LOONG64
514void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
515 Address lazy_compile_target) {
516 uint32_t func_index_low_12 = func_index & 0xfff;
517 uint32_t func_index_high_20 = func_index >> 12;
518
519 const uint32_t inst[kLazyCompileTableSlotSize / 4] = {
520 0x1400000c, // lu12i.w $t0, func_index_high_20
521 0x0380018c, // ori $t0, $t0, func_index_low_12
522 0x50000000, // b lazy_compile_target
523 };
524 static_assert(kWasmCompileLazyFuncIndexRegister == t0);
525
526 int64_t target_offset = MacroAssembler::CalculateTargetOffset(
527 lazy_compile_target, RelocInfo::NO_INFO,
528 reinterpret_cast<uint8_t*>(pc_ + 2 * kInstrSize));
530
531 uint32_t target_offset_offs26 = (target_offset & 0xfffffff) >> 2;
532 uint32_t target_offset_low_16 = target_offset_offs26 & 0xffff;
533 uint32_t target_offset_high_10 = target_offset_offs26 >> 16;
534
535 emit<uint32_t>(inst[0] | func_index_high_20 << kRjShift);
536 emit<uint32_t>(inst[1] | func_index_low_12 << kRkShift);
537 emit<uint32_t>(inst[2] | target_offset_low_16 << kRkShift |
538 target_offset_high_10);
539}
540
542 int64_t target_offset = MacroAssembler::CalculateTargetOffset(
543 target, RelocInfo::NO_INFO, reinterpret_cast<uint8_t*>(pc_));
544 if (!MacroAssembler::IsNearCallOffset(target_offset)) {
545 return false;
546 }
547
548 uint32_t target_offset_offs26 = (target_offset & 0xfffffff) >> 2;
549 uint32_t target_offset_low_16 = target_offset_offs26 & 0xffff;
550 uint32_t target_offset_high_10 = target_offset_offs26 >> 16;
551
552 uint32_t branch_inst =
553 0x50000000 | target_offset_low_16 << kRkShift | target_offset_high_10;
554 emit<uint32_t>(branch_inst, kRelaxedStore);
555
556 return true;
557}
558
560 const uint32_t inst[kFarJumpTableSlotSize / 4] = {
561 0x18000093, // pcaddi $t7, 4
562 0x28c00273, // ld.d $t7, $t7, 0
563 0x4c000260, // jirl $zero, $t7, 0
564 0x03400000, // nop (make target pointer-size aligned)
565 0x00000000, // target[0]
566 0x00000000, // target[1]
567 };
568 emit<uint32_t>(inst[0]);
569 emit<uint32_t>(inst[1]);
570 emit<uint32_t>(inst[2]);
571 emit<uint32_t>(inst[3]);
573 emit<Address>(target);
574}
575
576void JumpTableAssembler::PatchFarJumpSlot(WritableJitAllocation& jit_allocation,
577 Address slot, Address target) {
578 // See {EmitFarJumpSlot} for the address of the target.
579 Address target_addr = slot + kFarJumpTableSlotSize - kSystemPointerSize;
580 // The slot needs to be pointer-size aligned so we can atomically update it.
581 DCHECK(IsAligned(target_addr, kSystemPointerSize));
582 jit_allocation.WriteValue(target_addr, target, kRelaxedStore);
583 // The data update is guaranteed to be atomic since it's a properly aligned
584 // and stores a single machine word. This update will eventually be observed
585 // by any concurrent [ld.d] on the same address because of the data cache
586 // coherence. It's ok if other cores temporarily jump to the old target.
587}
588
590 // On this platform the jump table is not zapped with valid instructions, so
591 // skipping over bytes is not allowed.
593}
594
595#elif V8_TARGET_ARCH_PPC64
596void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
597 Address lazy_compile_target) {
598 static_assert(kWasmCompileLazyFuncIndexRegister == r15);
599 const uint32_t inst[kLazyCompileTableSlotSize / 4] = {
600 0x7c0802a6, // mflr r0
601 0x48000005, // b(4, SetLK)
602 0x7d8802a6, // mflr ip
603 0x7c0803a6, // mtlr r0
604 0x81ec0018, // lwz r15, 24(ip)
605 0xe80c0020, // ld r0, 32(ip)
606 0x7c0903a6, // mtctr r0
607 0x4e800420, // bctr
608 0x00000000, // func_index
609 0x60000000, // nop (alignment)
610 0x00000000, // lazy_compile_target_0
611 0x00000000 // lazy_compile_target_1
612 };
613 emit<uint32_t>(inst[0]);
614 emit<uint32_t>(inst[1]);
615 emit<uint32_t>(inst[2]);
616 emit<uint32_t>(inst[3]);
617 emit<uint32_t>(inst[4]);
618 emit<uint32_t>(inst[5]);
619 emit<uint32_t>(inst[6]);
620 emit<uint32_t>(inst[7]);
621 emit<uint32_t>(func_index);
622 emit<uint32_t>(inst[9]);
623 emit<Address>(lazy_compile_target);
624}
625
627 intptr_t relative_target = target - pc_;
628
629 if (!is_int26(relative_target)) {
630 return false;
631 }
632
633 const uint32_t inst[kJumpTableSlotSize / kInstrSize] = {
634 0x48000000 // b(relative_target, LeaveLK)
635 };
636
637 CHECK((relative_target & (kAAMask | kLKMask)) == 0);
638 // The jump table is updated live, so the write has to be atomic.
639 emit<uint32_t>(inst[0] | relative_target, kRelaxedStore);
640 return true;
641}
642
644 const uint32_t inst[kFarJumpTableSlotSize / 4] = {
645 0x7c0802a6, // mflr r0
646 0x48000005, // b(4, SetLK)
647 0x7d8802a6, // mflr ip
648 0x7c0803a6, // mtlr r0
649 0xe98c0018, // ld ip, 24(ip)
650 0x7d8903a6, // mtctr ip
651 0x4e800420, // bctr
652 0x60000000, // nop (alignment)
653 0x00000000, // target_0
654 0x00000000, // target_1
655 0x60000000, // nop
656 0x60000000 // nop
657 };
658 emit<uint32_t>(inst[0]);
659 emit<uint32_t>(inst[1]);
660 emit<uint32_t>(inst[2]);
661 emit<uint32_t>(inst[3]);
662 emit<uint32_t>(inst[4]);
663 emit<uint32_t>(inst[5]);
664 emit<uint32_t>(inst[6]);
665 emit<uint32_t>(inst[7]);
666 emit<Address>(target);
667 emit<uint32_t>(inst[10]);
668 emit<uint32_t>(inst[11]);
669}
670
671// static
672void JumpTableAssembler::PatchFarJumpSlot(WritableJitAllocation& jit_allocation,
673 Address slot, Address target) {
674 Address target_addr = slot + kFarJumpTableSlotSize - 8;
675 jit_allocation.WriteValue(target_addr, target, kRelaxedStore);
676}
677
679 // On this platform the jump table is not zapped with valid instructions, so
680 // skipping over bytes is not allowed.
682}
683
684#elif V8_TARGET_ARCH_RISCV64
685void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
686 Address lazy_compile_target) {
687 static_assert(kLazyCompileTableSlotSize == 3 * kInstrSize);
688 int64_t high_20 = (func_index + 0x800) >> 12;
689 int64_t low_12 = int64_t(func_index) << 52 >> 52;
690
691 int64_t target_offset = MacroAssembler::CalculateTargetOffset(
692 lazy_compile_target, RelocInfo::NO_INFO,
693 reinterpret_cast<uint8_t*>(pc_ + 2 * kInstrSize));
694 DCHECK(is_int21(target_offset));
695 DCHECK_EQ(target_offset & 0x1, 0);
696
697 const uint32_t inst[kLazyCompileTableSlotSize / 4] = {
699 int32_t(high_20 << kImm20Shift)), // lui t0, high_20
702 int32_t(low_12 << kImm12Shift)), // addi t0, t0, low_12
703 (RO_JAL | (zero_reg.code() << kRdShift) |
704 uint32_t(target_offset & 0xff000) | // bits 19-12
705 uint32_t((target_offset & 0x800) << 9) | // bit 11
706 uint32_t((target_offset & 0x7fe) << 20) | // bits 10-1
707 uint32_t((target_offset & 0x100000) << 11)), // bit 20 ), // jal
708 };
709
710 emit<uint32_t>(inst[0]);
711 emit<uint32_t>(inst[1]);
712 emit<uint32_t>(inst[2]);
713}
714
716 static_assert(kInstrSize == kInt32Size);
717 static_assert(kJumpTableSlotSize == 2 * kInstrSize);
718 intptr_t relative_target = target - pc_;
719 if (!is_int32(relative_target)) {
720 return false;
721 }
722
723 uint32_t inst[kJumpTableSlotSize / kInstrSize] = {kNopByte, kNopByte};
724 int64_t high_20 = (relative_target + 0x800) >> 12;
725 int64_t low_12 = int64_t(relative_target) << 52 >> 52;
726 inst[0] = (RO_AUIPC | (t6.code() << kRdShift) |
727 int32_t(high_20 << kImm20Shift)); // auipc t6, high_20
728 inst[1] =
729 (RO_JALR | (zero_reg.code() << kRdShift) | (t6.code() << kRs1Shift) |
730 int32_t(low_12 << kImm12Shift)); // jalr t6, t6, low_12
731
732 // This function is also used for patching existing jump slots and the writes
733 // need to be atomic.
734 emit<uint64_t>(*reinterpret_cast<uint64_t*>(inst), kRelaxedStore);
735 return true;
736}
737
739 uint32_t high_20 = (int64_t(4 * kInstrSize + 0x800) >> 12);
740 uint32_t low_12 = (int64_t(4 * kInstrSize) << 52 >> 52);
741
742 const uint32_t inst[kFarJumpTableSlotSize / 4] = {
743 (RO_AUIPC | (t6.code() << kRdShift) |
744 (high_20 << kImm20Shift)), // auipc t6, high_20
745 (RO_LD | (t6.code() << kRdShift) | (t6.code() << kRs1Shift) |
746 (low_12 << kImm12Shift)), // jalr t6, t6, low_12
747 (RO_JALR | (t6.code() << kRs1Shift) | zero_reg.code() << kRdShift),
748 (kNopByte), // nop
749 0x0000, // target[0]
750 0x0000, // target[1]
751 };
752 emit<uint32_t>(inst[0]);
753 emit<uint32_t>(inst[1]);
754 emit<uint32_t>(inst[2]);
755 emit<uint32_t>(inst[3]);
756 emit<Address>(target);
757}
758
759// static
760void JumpTableAssembler::PatchFarJumpSlot(WritableJitAllocation& jit_allocation,
761 Address slot, Address target) {
762 // See {EmitFarJumpSlot} for the offset of the target (16 bytes with
763 // CFI enabled, 8 bytes otherwise).
764 int kTargetOffset = kFarJumpTableSlotSize - sizeof(Address);
765 jit_allocation.WriteValue(slot + kTargetOffset, target, kRelaxedStore);
766 // The data update is guaranteed to be atomic since it's a properly aligned
767 // and stores a single machine word. This update will eventually be observed
768 // by any concurrent [ldr] on the same address because of the data cache
769 // coherence. It's ok if other cores temporarily jump to the old target.
770}
771
773 // On this platform the jump table is not zapped with valid instructions, so
774 // skipping over bytes is not allowed.
776}
777
778#elif V8_TARGET_ARCH_RISCV32
779void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
780 Address lazy_compile_target) {
781 static_assert(kLazyCompileTableSlotSize == 3 * kInstrSize);
782 int64_t high_20 = (func_index + 0x800) >> 12;
783 int64_t low_12 = int64_t(func_index) << 52 >> 52;
784
785 int64_t target_offset = MacroAssembler::CalculateTargetOffset(
786 lazy_compile_target, RelocInfo::NO_INFO,
787 reinterpret_cast<uint8_t*>(pc_ + 2 * kInstrSize));
788 DCHECK(is_int21(target_offset));
789 DCHECK_EQ(target_offset & 0x1, 0);
790
791 const uint32_t inst[kLazyCompileTableSlotSize / 4] = {
793 int32_t(high_20 << kImm20Shift)), // lui t0, high_20
796 int32_t(low_12 << kImm12Shift)), // addi t0, t0, low_12
797 (RO_JAL | (zero_reg.code() << kRdShift) |
798 uint32_t(target_offset & 0xff000) | // bits 19-12
799 uint32_t((target_offset & 0x800) << 9) | // bit 11
800 uint32_t((target_offset & 0x7fe) << 20) | // bits 10-1
801 uint32_t((target_offset & 0x100000) << 11)), // bit 20 ), // jal
802 };
803
804 emit<uint32_t>(inst[0]);
805 emit<uint32_t>(inst[1]);
806 emit<uint32_t>(inst[2]);
807}
809 uint32_t high_20 = (int64_t(4 * kInstrSize + 0x800) >> 12);
810 uint32_t low_12 = (int64_t(4 * kInstrSize) << 52 >> 52);
811
812 const uint32_t inst[kJumpTableSlotSize / 4] = {
813 (RO_AUIPC | (t6.code() << kRdShift) |
814 (high_20 << kImm20Shift)), // auipc t6, high_20
815 (RO_LW | (t6.code() << kRdShift) | (t6.code() << kRs1Shift) |
816 (low_12 << kImm12Shift)), // jalr t6, t6, low_12
817 (RO_JALR | (t6.code() << kRs1Shift) | zero_reg.code() << kRdShift),
818 (kNopByte), // nop
819 0x0000, // target
820 };
821 emit<uint32_t>(inst[0]);
822 emit<uint32_t>(inst[1]);
823 emit<uint32_t>(inst[2]);
824 emit<uint32_t>(inst[3]);
826 return true;
827}
828
830 static_assert(kJumpTableSlotSize == kFarJumpTableSlotSize);
831 EmitJumpSlot(target);
832}
833
834// static
835void JumpTableAssembler::PatchFarJumpSlot(WritableJitAllocation& jit_allocation,
836 Address slot, Address target) {
837 UNREACHABLE();
838}
839
841 // On this platform the jump table is not zapped with valid instructions, so
842 // skipping over bytes is not allowed.
844}
845
846#else
847#error Unknown architecture.
848#endif
849
850} // namespace wasm
851} // namespace internal
852} // namespace v8
#define T
static constexpr int kIntraSegmentJmpInstrSize
static Instr ImmMoveWide(int imm)
static Instr ImmUncondBranch(int imm26)
static CPURegList DefaultTmpList()
static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode, uint8_t *pc)
static bool IsNearCallOffset(int64_t offset)
constexpr int8_t code() const
static WritableJitAllocation LookupJitAllocation(Address addr, size_t size, JitAllocationType type, bool enforce_write_api=false)
V8_INLINE void WriteUnalignedValue(Address address, T value)
static constexpr uint32_t SizeForNumberOfSlots(uint32_t slot_count)
static uint32_t JumpSlotIndexToOffset(uint32_t slot_index)
static void InitializeJumpsToLazyCompileTable(Address base, uint32_t num_slots, Address lazy_compile_table_start)
void EmitLazyCompileJumpSlot(uint32_t func_index, Address lazy_compile_target)
static uint32_t LazyCompileSlotIndexToOffset(uint32_t slot_index)
static void PatchFarJumpSlot(WritableJitAllocation &jit_allocation, Address slot, Address target)
static void GenerateLazyCompileTable(Address base, uint32_t num_slots, uint32_t num_imported_functions, Address wasm_compile_lazy_target)
int32_t displacement
int32_t offset
int int32_t
Definition unicode.cc:40
unsigned short uint16_t
Definition unicode.cc:39
T ReverseBytes(T value)
Definition bits.h:74
const int kImm12Shift
constexpr Opcode RO_AUIPC
constexpr Opcode RO_JAL
void FlushInstructionCache(void *start, size_t size)
constexpr Opcode RO_JALR
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr int kInt32Size
Definition globals.h:401
constexpr Opcode RO_LUI
constexpr Opcode RO_LW
constexpr Opcode RO_ADDI
constexpr Register kWasmCompileLazyFuncIndexRegister
constexpr uint8_t kInstrSize
static constexpr RelaxedStoreTag kRelaxedStore
Definition globals.h:2911
Definition c-api.cc:87
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define DISABLE_UBSAN
Definition ubsan.h:16