v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
assembler-riscv.cc
Go to the documentation of this file.
1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
33// Copyright 2021 the V8 project authors. All rights reserved.
34
36
37#include "src/base/bits.h"
38#include "src/base/cpu.h"
46
47namespace v8 {
48namespace internal {
49// Get the CPU features enabled by the build. For cross compilation the
50// preprocessor symbols __riscv_f and __riscv_d
51// can be defined to enable FPU instructions when building the
52// snapshot.
54 unsigned answer = 0;
55#if defined(__riscv_f) && defined(__riscv_d)
56 answer |= 1u << FPU;
57#endif // def __riscv_f
58
59#if (defined __riscv_vector) && (__riscv_v >= 1000000)
60 answer |= 1u << RISCV_SIMD;
61#endif // def CAN_USE_RVV_INSTRUCTIONS
62
63#if (defined __riscv_zba)
64 answer |= 1u << ZBA;
65#endif // def __riscv_zba
66
67#if (defined __riscv_zbb)
68 answer |= 1u << ZBB;
69#endif // def __riscv_zbb
70
71#if (defined __riscv_zbs)
72 answer |= 1u << ZBS;
73#endif // def __riscv_zbs
74
75#if (defined _riscv_zicond)
76 answer |= 1u << ZICOND;
77#endif // def _riscv_zicond
78 return answer;
79}
80
81#ifdef _RISCV_TARGET_SIMULATOR
82static unsigned SimulatorFeatures() {
83 unsigned answer = 0;
84 answer |= 1u << RISCV_SIMD;
85 answer |= 1u << ZBA;
86 answer |= 1u << ZBB;
87 answer |= 1u << ZBS;
88 answer |= 1u << ZICOND;
89 answer |= 1u << FPU;
90 return answer;
91}
92#endif
93
94bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(RISCV_SIMD); }
95
96void CpuFeatures::ProbeImpl(bool cross_compile) {
98
99#ifdef _RISCV_TARGET_SIMULATOR
100 supported_ |= SimulatorFeatures();
101#endif // _RISCV_TARGET_SIMULATOR
102 // Only use statically determined features for cross compile (snapshot).
103 if (cross_compile) return;
104 // Probe for additional features at runtime.
105
106#ifndef USE_SIMULATOR
107 base::CPU cpu;
108 if (cpu.has_fpu()) supported_ |= 1u << FPU;
109 if (cpu.has_rvv()) supported_ |= 1u << RISCV_SIMD;
110 if (cpu.has_zba()) supported_ |= 1u << ZBA;
111 if (cpu.has_zbb()) supported_ |= 1u << ZBB;
112 if (cpu.has_zbs()) supported_ |= 1u << ZBS;
113 if (v8_flags.riscv_b_extension) {
114 supported_ |= (1u << ZBA) | (1u << ZBB) | (1u << ZBS);
115 }
116#ifdef V8_COMPRESS_POINTERS
117 if (cpu.riscv_mmu() == base::CPU::RV_MMU_MODE::kRiscvSV57) {
118 FATAL("SV57 is not supported");
120 }
121#endif // V8_COMPRESS_POINTERS
122#endif // USE_SIMULATOR
123 // Set a static value on whether SIMD is supported.
124 // This variable is only used for certain archs to query SupportWasmSimd128()
125 // at runtime in builtins using an extern ref. Other callers should use
126 // CpuFeatures::SupportWasmSimd128().
128}
129
132 printf("supports_wasm_simd_128=%d\n", CpuFeatures::SupportsWasmSimd128());
133 printf("RISC-V Extension zba=%d,zbb=%d,zbs=%d,ZICOND=%d\n",
136}
138 DCHECK(reg.is_valid());
139 const int kNumbers[] = {
140 0, // zero_reg
141 1, // ra
142 2, // sp
143 3, // gp
144 4, // tp
145 5, // t0
146 6, // t1
147 7, // t2
148 8, // s0/fp
149 9, // s1
150 10, // a0
151 11, // a1
152 12, // a2
153 13, // a3
154 14, // a4
155 15, // a5
156 16, // a6
157 17, // a7
158 18, // s2
159 19, // s3
160 20, // s4
161 21, // s5
162 22, // s6
163 23, // s7
164 24, // s8
165 25, // s9
166 26, // s10
167 27, // s11
168 28, // t3
169 29, // t4
170 30, // t5
171 31, // t6
172 };
173 return kNumbers[reg.code()];
174}
175
177 DCHECK(num >= 0 && num < kNumRegisters);
178 const Register kRegisters[] = {
179 zero_reg, ra, sp, gp, tp, t0, t1, t2, fp, s1, a0, a1, a2, a3, a4, a5,
180 a6, a7, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, t3, t4, t5, t6};
181 return kRegisters[num];
182}
183
184// -----------------------------------------------------------------------------
185// Implementation of RelocInfo.
186
187const int RelocInfo::kApplyMask =
192
194 // The deserializer needs to know whether a pointer is specially coded. Being
195 // specially coded on RISC-V means that it is a lui/addi instruction, and that
196 // is always the case inside code objects.
197 return true;
198}
199
200bool RelocInfo::IsInConstantPool() { return false; }
201
202uint32_t RelocInfo::wasm_call_tag() const {
205 Instr instr1 = Assembler::instr_at(pc_ + 1 * kInstrSize);
207 DCHECK(reinterpret_cast<Instruction*>(pc_)->RdValue() ==
208 reinterpret_cast<Instruction*>(pc_ + 4)->Rs1Value());
209 return Assembler::BrachlongOffset(instr, instr1);
210 } else {
211 return static_cast<uint32_t>(
213 }
214}
215
216// -----------------------------------------------------------------------------
217// Implementation of Operand and MemOperand.
218// See assembler-riscv-inl.h for inlined constructors.
219
221 : rm_(no_reg), rmode_(rmode) {
223 value_.immediate = static_cast<intptr_t>(handle.address());
224}
225
227 int32_t smi;
228 if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
230 result.is_heap_number_request_ = true;
231 result.value_.heap_number_request = HeapNumberRequest(value);
232 return result;
233}
234
235MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
236 offset_ = offset;
237}
238
239MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
240 OffsetAddend offset_addend)
241 : Operand(rm) {
242 offset_ = unit * multiplier + offset_addend;
243}
244
246 DCHECK_IMPLIES(isolate == nullptr, heap_number_requests_.empty());
247 for (auto& request : heap_number_requests_) {
248 Handle<HeapObject> object =
249 isolate->factory()->NewHeapNumber<AllocationType::kOld>(
250 request.heap_number());
251 Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
252#ifdef V8_TARGET_ARCH_RISCV64
255#else
256 set_target_value_at(pc, reinterpret_cast<uintptr_t>(object.location()));
257#endif
258 }
259}
260
261// -----------------------------------------------------------------------------
262// Specific instructions, constants, and masks.
263
265 std::unique_ptr<AssemblerBuffer> buffer)
266 : AssemblerBase(options, std::move(buffer)),
267 VU(this),
268 scratch_register_list_(DefaultTmpList()),
269 scratch_double_register_list_(DefaultFPTmpList()),
270 constpool_(this) {
272
276 // We leave space (16 * kTrampolineSlotsSize)
277 // for BlockTrampolinePoolScope buffer.
278 next_buffer_check_ = v8_flags.force_long_branches
279 ? kMaxInt
282 last_bound_pos_ = 0;
283
284 trampoline_emitted_ = v8_flags.force_long_branches;
286 block_buffer_growth_ = false;
287}
288
291
292void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
293 GetCode(isolate->main_thread_local_isolate(), desc);
294}
296 SafepointTableBuilderBase* safepoint_table_builder,
297 int handler_table_offset) {
298 // As a crutch to avoid having to add manual Align calls wherever we use a
299 // raw workflow to create InstructionStream objects (mostly in tests), add
300 // another Align call here. It does no harm - the end of the InstructionStream
301 // object is aligned to the (larger) kCodeAlignment anyways.
302 // TODO(jgruber): Consider moving responsibility for proper alignment to
303 // metadata table builders (safepoint, handler, constant pool, code
304 // comments).
306
308
309 int code_comments_size = WriteCodeComments();
310
311 DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
312
314
315 // Set up code descriptor.
316 // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
317 // this point to make CodeDesc initialization less fiddly.
318
319 static constexpr int kConstantPoolSize = 0;
320 static constexpr int kBuiltinJumpTableInfoSize = 0;
321 const int instruction_size = pc_offset();
322 const int builtin_jump_table_info_offset =
323 instruction_size - kBuiltinJumpTableInfoSize;
324 const int code_comments_offset =
325 builtin_jump_table_info_offset - code_comments_size;
326 const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
327 const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
328 ? constant_pool_offset
329 : handler_table_offset;
330 const int safepoint_table_offset =
331 (safepoint_table_builder == kNoSafepointTable)
332 ? handler_table_offset2
333 : safepoint_table_builder->safepoint_table_offset();
334 const int reloc_info_offset =
335 static_cast<int>(reloc_info_writer.pos() - buffer_->start());
336 CodeDesc::Initialize(desc, this, safepoint_table_offset,
337 handler_table_offset2, constant_pool_offset,
338 code_comments_offset, builtin_jump_table_info_offset,
339 reloc_info_offset);
340}
341
344 while ((pc_offset() & (m - 1)) != 0) {
345 NOP();
346 }
347}
348
350 // No advantage to aligning branch/call targets to more than
351 // single instruction, that I am aware of.
352 Align(4);
353}
354
355// Labels refer to positions in the (to be) generated code.
356// There are bound, linked, and unused labels.
357//
358// Bound labels refer to known positions in the already
359// generated code. pos() is the position the label refers to.
360//
361// Linked labels refer to unknown positions in the code
362// to be generated; pos() is the position of the last
363// instruction using the label.
364
365// The link chain is terminated by a value in the instruction of 0,
366// which is an otherwise illegal value (branch 0 is inf loop). When this case
367// is detected, return an position of -1, an otherwise illegal position.
368const int kEndOfChain = -1;
369const int kEndOfJumpChain = 0;
370
371int Assembler::target_at(int pos, bool is_internal) {
372 if (is_internal) {
373 uintptr_t* p = reinterpret_cast<uintptr_t*>(buffer_start_ + pos);
374 uintptr_t address = *p;
375 if (address == kEndOfJumpChain) {
376 return kEndOfChain;
377 } else {
378 uintptr_t instr_address = reinterpret_cast<uintptr_t>(p);
379 DCHECK(instr_address - address < INT_MAX);
380 int delta = static_cast<int>(instr_address - address);
381 DCHECK(pos > delta);
382 return pos - delta;
383 }
384 }
386 DEBUG_PRINTF("target_at: %p (%d)\n\t",
387 reinterpret_cast<Instr*>(buffer_start_ + pos), pos);
388 Instr instr = instruction->InstructionBits();
390
391 switch (instruction->InstructionOpcodeType()) {
392 case BRANCH: {
393 int32_t imm13 = BranchOffset(instr);
394 if (imm13 == kEndOfJumpChain) {
395 // EndOfChain sentinel is returned directly, not relative to pc or pos.
396 return kEndOfChain;
397 } else {
398 return pos + imm13;
399 }
400 }
401 case JAL: {
402 int32_t imm21 = JumpOffset(instr);
403 if (imm21 == kEndOfJumpChain) {
404 // EndOfChain sentinel is returned directly, not relative to pc or pos.
405 return kEndOfChain;
406 } else {
407 return pos + imm21;
408 }
409 }
410 case JALR: {
411 int32_t imm12 = instr >> 20;
412 if (imm12 == kEndOfJumpChain) {
413 // EndOfChain sentinel is returned directly, not relative to pc or pos.
414 return kEndOfChain;
415 } else {
416 return pos + imm12;
417 }
418 }
419 case LUI: {
420 Address pc = reinterpret_cast<Address>(buffer_start_ + pos);
422 uintptr_t instr_address =
423 reinterpret_cast<uintptr_t>(buffer_start_ + pos);
424 uintptr_t imm = reinterpret_cast<uintptr_t>(pc);
425 if (imm == kEndOfJumpChain) {
426 return kEndOfChain;
427 } else {
428 DCHECK(instr_address - imm < INT_MAX);
429 int32_t delta = static_cast<int32_t>(instr_address - imm);
430 DCHECK(pos > delta);
431 return pos - delta;
432 }
433 }
434 case AUIPC: {
435 Instr instr_auipc = instr;
436 Instr instr_I = instr_at(pos + 4);
437 DCHECK(IsJalr(instr_I) || IsAddi(instr_I));
438 int32_t offset = BrachlongOffset(instr_auipc, instr_I);
439 if (offset == kEndOfJumpChain) return kEndOfChain;
440 return offset + pos;
441 }
442 case RO_C_J: {
443 int32_t offset = instruction->RvcImm11CJValue();
444 if (offset == kEndOfJumpChain) return kEndOfChain;
445 return offset + pos;
446 }
447 case RO_C_BNEZ:
448 case RO_C_BEQZ: {
449 int32_t offset = instruction->RvcImm8BValue();
450 if (offset == kEndOfJumpChain) return kEndOfChain;
451 return pos + offset;
452 }
453 default: {
454 if (instr == kEndOfJumpChain) {
455 return kEndOfChain;
456 } else {
457 int32_t imm18 =
458 ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
459 return (imm18 + pos);
460 }
461 }
462 }
463}
464
465[[nodiscard]] static inline Instr SetBranchOffset(int32_t pos,
466 int32_t target_pos,
467 Instr instr) {
468 int32_t imm = target_pos - pos;
469 DCHECK_EQ(imm & 1, 0);
471
472 instr &= ~kBImm12Mask;
473 int32_t imm12 = ((imm & 0x800) >> 4) | // bit 11
474 ((imm & 0x1e) << 7) | // bits 4-1
475 ((imm & 0x7e0) << 20) | // bits 10-5
476 ((imm & 0x1000) << 19); // bit 12
477
478 return instr | (imm12 & kBImm12Mask);
479}
480
481[[nodiscard]] static inline Instr SetLoadOffset(int32_t offset, Instr instr) {
482#if V8_TARGET_ARCH_RISCV64
483 DCHECK(Assembler::IsLd(instr));
484#elif V8_TARGET_ARCH_RISCV32
486#endif
487 DCHECK(is_int12(offset));
488 instr &= ~kImm12Mask;
489 int32_t imm12 = offset << kImm12Shift;
490 return instr | (imm12 & kImm12Mask);
491}
492
493
494[[nodiscard]] static inline Instr SetJalOffset(int32_t pos, int32_t target_pos,
495 Instr instr) {
497 int32_t imm = target_pos - pos;
498 DCHECK_EQ(imm & 1, 0);
500
501 instr &= ~kImm20Mask;
502 int32_t imm20 = (imm & 0xff000) | // bits 19-12
503 ((imm & 0x800) << 9) | // bit 11
504 ((imm & 0x7fe) << 20) | // bits 10-1
505 ((imm & 0x100000) << 11); // bit 20
506
507 return instr | (imm20 & kImm20Mask);
508}
509
510[[nodiscard]] static inline ShortInstr SetCJalOffset(int32_t pos,
511 int32_t target_pos,
512 Instr instr) {
514 int32_t imm = target_pos - pos;
515 DCHECK_EQ(imm & 1, 0);
517 instr &= ~kImm11Mask;
518 int16_t imm11 = ((imm & 0x800) >> 1) | ((imm & 0x400) >> 4) |
519 ((imm & 0x300) >> 1) | ((imm & 0x80) >> 3) |
520 ((imm & 0x40) >> 1) | ((imm & 0x20) >> 5) |
521 ((imm & 0x10) << 5) | (imm & 0xe);
522 imm11 = imm11 << kImm11Shift;
524 return instr | (imm11 & kImm11Mask);
525}
526[[nodiscard]] static inline Instr SetCBranchOffset(int32_t pos,
527 int32_t target_pos,
528 Instr instr) {
530 int32_t imm = target_pos - pos;
531 DCHECK_EQ(imm & 1, 0);
533
534 instr &= ~kRvcBImm8Mask;
535 int32_t imm8 = ((imm & 0x20) >> 5) | ((imm & 0x6)) | ((imm & 0xc0) >> 3) |
536 ((imm & 0x18) << 2) | ((imm & 0x100) >> 1);
537 imm8 = ((imm8 & 0x1f) << 2) | ((imm8 & 0xe0) << 5);
539
540 return instr | (imm8 & kRvcBImm8Mask);
541}
542
543// We have to use a temporary register for things that can be relocated even
544// if they can be encoded in RISC-V's 12 bits of immediate-offset instruction
545// space. There is no guarantee that the relocated location can be similarly
546// encoded.
548 return !RelocInfo::IsNoInfo(rmode);
549}
550
552 if (!v8_flags.riscv_debug) return;
553 disasm::NameConverter converter;
554 disasm::Disassembler disasm(converter);
556
557 disasm.InstructionDecode(disasm_buffer, pc);
558 DEBUG_PRINTF("%s\n", disasm_buffer.begin());
559}
560
561void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
562 if (is_internal) {
563 uintptr_t imm = reinterpret_cast<uintptr_t>(buffer_start_) + target_pos;
564 *reinterpret_cast<uintptr_t*>(buffer_start_ + pos) = imm;
565 return;
566 }
567 DEBUG_PRINTF("\ttarget_at_put: %p (%d) to %p (%d)\n",
568 reinterpret_cast<Instr*>(buffer_start_ + pos), pos,
569 reinterpret_cast<Instr*>(buffer_start_ + target_pos),
570 target_pos);
572 Instr instr = instruction->InstructionBits();
573
574 switch (instruction->InstructionOpcodeType()) {
575 case BRANCH: {
576 instr = SetBranchOffset(pos, target_pos, instr);
578 } break;
579 case JAL: {
581 intptr_t offset = target_pos - pos;
583 instr = SetJalOffset(pos, target_pos, instr);
585 } else {
586 Instr instr_I = instr_at(pos + 4);
587 CHECK_EQ(instr_I, kNopByte);
588 CHECK(is_int32(offset + 0x800));
589 Instr instr_auipc = AUIPC | t6.code() << kRdShift;
590 instr_I = RO_JALR | (t6.code() << kRs1Shift) |
591 (instruction->RdValue() << kRdShift);
592
593 int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
594 int32_t Lo12 = (int32_t)offset << 20 >> 20;
595
596 instr_auipc = SetHi20Offset(Hi20, instr_auipc);
597 instr_at_put(pos, instr_auipc);
598
599 instr_I = SetLo12Offset(Lo12, instr_I);
600 instr_at_put(pos + 4, instr_I);
603 }
604 } break;
605 case LUI: {
606 Address pc = reinterpret_cast<Address>(buffer_start_ + pos);
608 pc, reinterpret_cast<uintptr_t>(buffer_start_ + target_pos));
609 } break;
610 case AUIPC: {
611 Instr instr_auipc = instr;
612 Instr instr_I = instr_at(pos + 4);
613 Instruction* instruction_I = Instruction::At(buffer_start_ + pos + 4);
614 DCHECK(IsJalr(instr_I) || IsAddi(instr_I));
615
616 intptr_t offset = target_pos - pos;
617 if (is_int21(offset) && IsJalr(instr_I) &&
618 (instruction->RdValue() == instruction_I->Rs1Value())) {
619 if (v8_flags.riscv_debug) {
622 }
623 DEBUG_PRINTF("\ttarget_at_put: Relpace by JAL pos:(%d) \n", pos);
624 DCHECK(is_int21(offset) && ((offset & 1) == 0));
625 Instr instr = JAL | (instruction_I->RdValue() << kRdShift);
626 instr = SetJalOffset(pos, target_pos, instr);
631 } else {
632 CHECK(is_int32(offset + 0x800));
633
634 int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
635 int32_t Lo12 = (int32_t)offset << 20 >> 20;
636
637 instr_auipc = SetHi20Offset(Hi20, instr_auipc);
638 instr_at_put(pos, instr_auipc);
639
640 const int kImm31_20Mask = ((1 << 12) - 1) << 20;
641 const int kImm11_0Mask = ((1 << 12) - 1);
642 instr_I = (instr_I & ~kImm31_20Mask) | ((Lo12 & kImm11_0Mask) << 20);
643 instr_at_put(pos + 4, instr_I);
644 }
645 } break;
646 case RO_C_J: {
647 ShortInstr short_instr = SetCJalOffset(pos, target_pos, instr);
648 instr_at_put(pos, short_instr);
649 } break;
650 case RO_C_BNEZ:
651 case RO_C_BEQZ: {
652 instr = SetCBranchOffset(pos, target_pos, instr);
654 } break;
655 default: {
656 // Emitted label constant, not part of a branch.
657 // Make label relative to Code pointer of generated InstructionStream
658 // object.
661 } break;
662 }
663
665 if (instruction->InstructionOpcodeType() == AUIPC) {
667 }
668}
669
671 if (L->is_unused()) {
672 PrintF("unused label\n");
673 } else if (L->is_bound()) {
674 PrintF("bound label to %d\n", L->pos());
675 } else if (L->is_linked()) {
676 Label l;
677 l.link_to(L->pos());
678 PrintF("unbound label");
679 while (l.is_linked()) {
680 PrintF("@ %d ", l.pos());
681 Instr instr = instr_at(l.pos());
682 if ((instr & ~kImm16Mask) == 0) {
683 PrintF("value\n");
684 } else {
685 PrintF("%d\n", instr);
686 }
688 }
689 } else {
690 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
691 }
692}
693
695 DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
696 DEBUG_PRINTF("\tbinding %d to label %p\n", pos, L);
697 int trampoline_pos = kInvalidSlotPos;
698 bool is_internal = false;
699 if (L->is_linked() && !trampoline_emitted_) {
701 if (!is_internal_reference(L)) {
703 }
704 }
705
706 while (L->is_linked()) {
707 int fixup_pos = L->pos();
708 int dist = pos - fixup_pos;
709 is_internal = is_internal_reference(L);
710 next(L, is_internal); // Call next before overwriting link with target
711 // at fixup_pos.
712 Instr instr = instr_at(fixup_pos);
713 DEBUG_PRINTF("\tfixup: %d to %d\n", fixup_pos, dist);
714 if (is_internal) {
715 target_at_put(fixup_pos, pos, is_internal);
716 } else {
717 if (IsBranch(instr)) {
718 if (dist > kMaxBranchOffset) {
719 if (trampoline_pos == kInvalidSlotPos) {
720 trampoline_pos = get_trampoline_entry(fixup_pos);
721 CHECK_NE(trampoline_pos, kInvalidSlotPos);
722 }
723 CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
724 DEBUG_PRINTF("\t\ttrampolining: %d\n", trampoline_pos);
725 target_at_put(fixup_pos, trampoline_pos, false);
726 fixup_pos = trampoline_pos;
727 }
728 target_at_put(fixup_pos, pos, false);
729 } else if (IsJal(instr)) {
730 if (dist > kMaxJumpOffset) {
731 if (trampoline_pos == kInvalidSlotPos) {
732 trampoline_pos = get_trampoline_entry(fixup_pos);
733 CHECK_NE(trampoline_pos, kInvalidSlotPos);
734 }
735 CHECK((trampoline_pos - fixup_pos) <= kMaxJumpOffset);
736 DEBUG_PRINTF("\t\ttrampolining: %d\n", trampoline_pos);
737 target_at_put(fixup_pos, trampoline_pos, false);
738 fixup_pos = trampoline_pos;
739 }
740 target_at_put(fixup_pos, pos, false);
741 } else {
742 target_at_put(fixup_pos, pos, false);
743 }
744 }
745 }
746 L->bind_to(pos);
747
748 // Keep track of the last bound label so we don't eliminate any instructions
749 // before a bound label.
751}
752
754 DCHECK(!L->is_bound()); // Label can only be bound once.
755 bind_to(L, pc_offset());
756}
757
758void Assembler::next(Label* L, bool is_internal) {
759 DCHECK(L->is_linked());
760 int link = target_at(L->pos(), is_internal);
761 if (link == kEndOfChain) {
762 L->Unuse();
763 } else {
764 DCHECK_GE(link, 0);
765 DEBUG_PRINTF("\tnext: %p to %p (%d)\n", L,
766 reinterpret_cast<Instr*>(buffer_start_ + link), link);
767 L->link_to(link);
768 }
769}
770
772 DCHECK(L->is_bound());
773 return is_intn((pc_offset() - L->pos()), kJumpOffsetBits);
774}
775
777 if (L == nullptr || !L->is_bound()) return true;
778 return is_intn((pc_offset() - L->pos()), bits);
779}
780
782 DCHECK(L->is_bound());
783 return is_intn((pc_offset() - L->pos()), kBranchOffsetBits);
784}
785
787 // | imm[12] | imm[10:5] | rs2 | rs1 | funct3 | imm[4:1|11] | opcode |
788 // 31 25 11 7
789 int32_t imm13 = ((instr & 0xf00) >> 7) | ((instr & 0x7e000000) >> 20) |
790 ((instr & 0x80) << 4) | ((instr & 0x80000000) >> 19);
791 imm13 = imm13 << 19 >> 19;
792 return imm13;
793}
794
796 DCHECK(reinterpret_cast<Instruction*>(&instr_I)->InstructionType() ==
800 (instr_I & kRs1FieldMask) >> kRs1Shift);
801 int32_t imm_auipc = AuipcOffset(auipc);
802 int32_t imm12 = static_cast<int32_t>(instr_I & kImm12Mask) >> 20;
803 int32_t offset = imm12 + imm_auipc;
804 return offset;
805}
806
808 Instr instr_jalr, int32_t offset,
809 WritableJitAllocation* jit_allocation) {
810 DCHECK(IsAuipc(instr_auipc));
811 DCHECK(IsJalr(instr_jalr));
812 CHECK(is_int32(offset + 0x800));
813 int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
814 int32_t Lo12 = (int32_t)offset << 20 >> 20;
815 instr_at_put(pc, SetHi20Offset(Hi20, instr_auipc), jit_allocation);
816 instr_at_put(pc + kInstrSize, SetLo12Offset(Lo12, instr_jalr),
817 jit_allocation);
818 DCHECK(offset ==
820 return 2;
821}
822
823// Returns the next free trampoline entry.
825 int32_t trampoline_entry = kInvalidSlotPos;
827 DEBUG_PRINTF("\ttrampoline start: %d,pos: %d\n", trampoline_.start(), pos);
828 if (trampoline_.start() > pos) {
829 trampoline_entry = trampoline_.take_slot();
830 }
831
832 if (kInvalidSlotPos == trampoline_entry) {
834 }
835 }
836 return trampoline_entry;
837}
838
840 intptr_t target_pos;
841 DEBUG_PRINTF("\tjump_address: %p to %p (%d)\n", L,
842 reinterpret_cast<Instr*>(buffer_start_ + pc_offset()),
843 pc_offset());
844 if (L->is_bound()) {
845 target_pos = L->pos();
846 } else {
847 if (L->is_linked()) {
848 target_pos = L->pos(); // L's link.
849 L->link_to(pc_offset());
850 } else {
851 L->link_to(pc_offset());
852 if (!trampoline_emitted_) {
855 }
856 DEBUG_PRINTF("\tstarted link\n");
857 return kEndOfJumpChain;
858 }
859 }
860 uintptr_t imm = reinterpret_cast<uintptr_t>(buffer_start_) + target_pos;
861 if (v8_flags.riscv_c_extension)
862 DCHECK_EQ(imm & 1, 0);
863 else
864 DCHECK_EQ(imm & 3, 0);
865
866 return imm;
867}
868
870 intptr_t target_pos;
871
872 DEBUG_PRINTF("\tbranch_long_offset: %p to %p (%d)\n", L,
873 reinterpret_cast<Instr*>(buffer_start_ + pc_offset()),
874 pc_offset());
875 if (L->is_bound()) {
876 target_pos = L->pos();
877 } else {
878 if (L->is_linked()) {
879 target_pos = L->pos(); // L's link.
880 L->link_to(pc_offset());
881 } else {
882 L->link_to(pc_offset());
883 if (!trampoline_emitted_) {
886 }
887 DEBUG_PRINTF("\tstarted link\n");
888 return kEndOfJumpChain;
889 }
890 }
891 intptr_t offset = target_pos - pc_offset();
892 if (v8_flags.riscv_c_extension)
893 DCHECK_EQ(offset & 1, 0);
894 else
895 DCHECK_EQ(offset & 3, 0);
896 DCHECK(is_int32(offset));
897 VU.clear();
898 return static_cast<int32_t>(offset);
899}
900
902 int32_t target_pos;
903
904 DEBUG_PRINTF("\tbranch_offset_helper: %p to %p (%d)\n", L,
905 reinterpret_cast<Instr*>(buffer_start_ + pc_offset()),
906 pc_offset());
907 if (L->is_bound()) {
908 target_pos = L->pos();
909 DEBUG_PRINTF("\tbound: %d", target_pos);
910 } else {
911 if (L->is_linked()) {
912 target_pos = L->pos();
913 L->link_to(pc_offset());
914 DEBUG_PRINTF("\tadded to link: %d\n", target_pos);
915 } else {
916 L->link_to(pc_offset());
917 if (!trampoline_emitted_) {
920 }
921 DEBUG_PRINTF("\tstarted link\n");
922 return kEndOfJumpChain;
923 }
924 }
925
926 int32_t offset = target_pos - pc_offset();
927 DCHECK(is_intn(offset, bits));
928 DCHECK_EQ(offset & 1, 0);
929 DEBUG_PRINTF("\toffset = %d\n", offset);
930 VU.clear();
931 return offset;
932}
933
934void Assembler::label_at_put(Label* L, int at_offset) {
935 int target_pos;
936 DEBUG_PRINTF("\tlabel_at_put: %p @ %p (%d)\n", L,
937 reinterpret_cast<Instr*>(buffer_start_ + at_offset), at_offset);
938 if (L->is_bound()) {
939 target_pos = L->pos();
940 instr_at_put(at_offset, target_pos + (InstructionStream::kHeaderSize -
942 } else {
943 if (L->is_linked()) {
944 target_pos = L->pos(); // L's link.
945 int32_t imm18 = target_pos - at_offset;
946 DCHECK_EQ(imm18 & 3, 0);
947 int32_t imm16 = imm18 >> 2;
948 DCHECK(is_int16(imm16));
949 instr_at_put(at_offset, (int32_t)(imm16 & kImm16Mask));
950 } else {
951 target_pos = kEndOfJumpChain;
952 instr_at_put(at_offset, target_pos);
953 if (!trampoline_emitted_) {
956 }
957 }
958 L->link_to(at_offset);
959 }
960}
961
962//===----------------------------------------------------------------------===//
963// Instructions
964//===----------------------------------------------------------------------===//
965
966// Definitions for using compressed vs non compressed
967
969 if (v8_flags.riscv_c_extension)
970 c_nop();
971 else
972 nop();
973}
974
976 if (v8_flags.riscv_c_extension)
977 c_ebreak();
978 else
979 ebreak();
980}
981
982// Assembler Pseudo Instructions (Tables 25.2 and 25.3, RISC-V Unprivileged ISA)
983
984void Assembler::nop() { addi(ToRegister(0), ToRegister(0), 0); }
985
986inline int64_t signExtend(uint64_t V, int N) {
987 return int64_t(V << (64 - N)) >> (64 - N);
988}
989
990#if V8_TARGET_ARCH_RISCV64
991void Assembler::RV_li(Register rd, int64_t imm) {
992 UseScratchRegisterScope temps(this);
993 if (RecursiveLiCount(imm) > GeneralLiCount(imm, temps.CanAcquire())) {
994 GeneralLi(rd, imm);
995 } else {
996 RecursiveLi(rd, imm);
997 }
998}
999
1000int Assembler::RV_li_count(int64_t imm, bool is_get_temp_reg) {
1001 if (RecursiveLiCount(imm) > GeneralLiCount(imm, is_get_temp_reg)) {
1002 return GeneralLiCount(imm, is_get_temp_reg);
1003 } else {
1004 return RecursiveLiCount(imm);
1005 }
1006}
1007
1008void Assembler::GeneralLi(Register rd, int64_t imm) {
1009 // 64-bit imm is put in the register rd.
1010 // In most cases the imm is 32 bit and 2 instructions are generated. If a
1011 // temporary register is available, in the worst case, 6 instructions are
1012 // generated for a full 64-bit immediate. If temporay register is not
1013 // available the maximum will be 8 instructions. If imm is more than 32 bits
1014 // and a temp register is available, imm is divided into two 32-bit parts,
1015 // low_32 and up_32. Each part is built in a separate register. low_32 is
1016 // built before up_32. If low_32 is negative (upper 32 bits are 1), 0xffffffff
1017 // is subtracted from up_32 before up_32 is built. This compensates for 32
1018 // bits of 1's in the lower when the two registers are added. If no temp is
1019 // available, the upper 32 bit is built in rd, and the lower 32 bits are
1020 // devided to 3 parts (11, 11, and 10 bits). The parts are shifted and added
1021 // to the upper part built in rd.
1022 if (is_int32(imm + 0x800)) {
1023 // 32-bit case. Maximum of 2 instructions generated
1024 int64_t high_20 = ((imm + 0x800) >> 12);
1025 int64_t low_12 = imm << 52 >> 52;
1026 if (high_20) {
1027 lui(rd, (int32_t)high_20);
1028 if (low_12) {
1029 addi(rd, rd, low_12);
1030 }
1031 } else {
1032 addi(rd, zero_reg, low_12);
1033 }
1034 return;
1035 } else {
1036 UseScratchRegisterScope temps(this);
1037 // 64-bit case: divide imm into two 32-bit parts, upper and lower
1038 int64_t up_32 = imm >> 32;
1039 int64_t low_32 = imm & 0xffffffffull;
1040 Register temp_reg = rd;
1041 // Check if a temporary register is available
1042 if (up_32 == 0 || low_32 == 0) {
1043 // No temp register is needed
1044 } else {
1045 BlockTrampolinePoolScope block_trampoline_pool(this);
1046 temp_reg = temps.CanAcquire() ? temps.Acquire() : no_reg;
1047 }
1048 if (temp_reg != no_reg) {
1049 // keep track of hardware behavior for lower part in sim_low
1050 int64_t sim_low = 0;
1051 // Build lower part
1052 if (low_32 != 0) {
1053 int64_t high_20 = ((low_32 + 0x800) >> 12);
1054 int64_t low_12 = low_32 & 0xfff;
1055 if (high_20) {
1056 // Adjust to 20 bits for the case of overflow
1057 high_20 &= 0xfffff;
1058 sim_low = ((high_20 << 12) << 32) >> 32;
1059 lui(rd, (int32_t)high_20);
1060 if (low_12) {
1061 sim_low += (low_12 << 52 >> 52) | low_12;
1062 addi(rd, rd, low_12);
1063 }
1064 } else {
1065 sim_low = low_12;
1066 ori(rd, zero_reg, low_12);
1067 }
1068 }
1069 if (sim_low & 0x100000000) {
1070 // Bit 31 is 1. Either an overflow or a negative 64 bit
1071 if (up_32 == 0) {
1072 // Positive number, but overflow because of the add 0x800
1073 slli(rd, rd, 32);
1074 srli(rd, rd, 32);
1075 return;
1076 }
1077 // low_32 is a negative 64 bit after the build
1078 up_32 = (up_32 - 0xffffffff) & 0xffffffff;
1079 }
1080 if (up_32 == 0) {
1081 return;
1082 }
1083 // Build upper part in a temporary register
1084 if (low_32 == 0) {
1085 // Build upper part in rd
1086 temp_reg = rd;
1087 }
1088 int64_t high_20 = (up_32 + 0x800) >> 12;
1089 int64_t low_12 = up_32 & 0xfff;
1090 if (high_20) {
1091 // Adjust to 20 bits for the case of overflow
1092 high_20 &= 0xfffff;
1093 lui(temp_reg, (int32_t)high_20);
1094 if (low_12) {
1095 addi(temp_reg, temp_reg, low_12);
1096 }
1097 } else {
1098 ori(temp_reg, zero_reg, low_12);
1099 }
1100 // Put it at the bgining of register
1101 slli(temp_reg, temp_reg, 32);
1102 if (low_32 != 0) {
1103 add(rd, rd, temp_reg);
1104 }
1105 return;
1106 }
1107 // No temp register. Build imm in rd.
1108 // Build upper 32 bits first in rd. Divide lower 32 bits parts and add
1109 // parts to the upper part by doing shift and add.
1110 // First build upper part in rd.
1111 int64_t high_20 = (up_32 + 0x800) >> 12;
1112 int64_t low_12 = up_32 & 0xfff;
1113 if (high_20) {
1114 // Adjust to 20 bits for the case of overflow
1115 high_20 &= 0xfffff;
1116 lui(rd, (int32_t)high_20);
1117 if (low_12) {
1118 addi(rd, rd, low_12);
1119 }
1120 } else {
1121 ori(rd, zero_reg, low_12);
1122 }
1123 // upper part already in rd. Each part to be added to rd, has maximum of 11
1124 // bits, and always starts with a 1. rd is shifted by the size of the part
1125 // plus the number of zeros between the parts. Each part is added after the
1126 // left shift.
1127 uint32_t mask = 0x80000000;
1128 int32_t shift_val = 0;
1129 int32_t i;
1130 for (i = 0; i < 32; i++) {
1131 if ((low_32 & mask) == 0) {
1132 mask >>= 1;
1133 shift_val++;
1134 if (i == 31) {
1135 // rest is zero
1136 slli(rd, rd, shift_val);
1137 }
1138 continue;
1139 }
1140 // The first 1 seen
1141 int32_t part;
1142 if ((i + 11) < 32) {
1143 // Pick 11 bits
1144 part = ((uint32_t)(low_32 << i) >> i) >> (32 - (i + 11));
1145 slli(rd, rd, shift_val + 11);
1146 ori(rd, rd, part);
1147 i += 10;
1148 mask >>= 11;
1149 } else {
1150 part = (uint32_t)(low_32 << i) >> i;
1151 slli(rd, rd, shift_val + (32 - i));
1152 ori(rd, rd, part);
1153 break;
1154 }
1155 shift_val = 0;
1156 }
1157 }
1158}
1159
1160void Assembler::li_ptr(Register rd, int64_t imm) {
1161#ifdef RISCV_USE_SV39
1162 // Initialize rd with an address
1163 // Pointers are 39 bits
1164 // 4 fixed instructions are generated
1165 DCHECK_EQ((imm & 0xffffff8000000000ll), 0);
1166 int64_t a8 = imm & 0xff; // bits 0:7. 8 bits
1167 int64_t high_31 = (imm >> 8) & 0x7fffffff; // 31 bits
1168 int64_t high_20 = ((high_31 + 0x800) >> 12); // 19 bits
1169 int64_t low_12 = high_31 & 0xfff; // 12 bits
1170 lui(rd, (int32_t)high_20);
1171 addi(rd, rd, low_12); // 31 bits in rd.
1172 slli(rd, rd, 8); // Space for next 8 bis
1173 ori(rd, rd, a8); // 8 bits are put in.
1174#else
1175 // Initialize rd with an address
1176 // Pointers are 48 bits
1177 // 6 fixed instructions are generated
1178 DCHECK_EQ((imm & 0xfff0000000000000ll), 0);
1179 int64_t a6 = imm & 0x3f; // bits 0:5. 6 bits
1180 int64_t b11 = (imm >> 6) & 0x7ff; // bits 6:11. 11 bits
1181 int64_t high_31 = (imm >> 17) & 0x7fffffff; // 31 bits
1182 int64_t high_20 = ((high_31 + 0x800) >> 12); // 19 bits
1183 int64_t low_12 = high_31 & 0xfff; // 12 bits
1184 lui(rd, (int32_t)high_20);
1185 addi(rd, rd, low_12); // 31 bits in rd.
1186 slli(rd, rd, 11); // Space for next 11 bis
1187 ori(rd, rd, b11); // 11 bits are put in. 42 bit in rd
1188 slli(rd, rd, 6); // Space for next 6 bits
1189 ori(rd, rd, a6); // 6 bits are put in. 48 bis in rd
1190#endif
1191}
1192
1193void Assembler::li_constant(Register rd, int64_t imm) {
1194 DEBUG_PRINTF("\tli_constant(%d, %" PRIx64 " <%" PRId64 ">)\n", ToNumber(rd),
1195 imm, imm);
1196 lui(rd, (imm + (1LL << 47) + (1LL << 35) + (1LL << 23) + (1LL << 11)) >>
1197 48); // Bits 63:48
1198 addiw(rd, rd,
1199 (imm + (1LL << 35) + (1LL << 23) + (1LL << 11)) << 16 >>
1200 52); // Bits 47:36
1201 slli(rd, rd, 12);
1202 addi(rd, rd, (imm + (1LL << 23) + (1LL << 11)) << 28 >> 52); // Bits 35:24
1203 slli(rd, rd, 12);
1204 addi(rd, rd, (imm + (1LL << 11)) << 40 >> 52); // Bits 23:12
1205 slli(rd, rd, 12);
1206 addi(rd, rd, imm << 52 >> 52); // Bits 11:0
1207}
1208
1209void Assembler::li_constant32(Register rd, int32_t imm) {
1210 ASM_CODE_COMMENT(this);
1211 DEBUG_PRINTF("\tli_constant(%d, %x <%d>)\n", ToNumber(rd), imm, imm);
1212 int32_t high_20 = ((imm + 0x800) >> 12); // bits31:12
1213 int32_t low_12 = imm & 0xfff; // bits11:0
1214 lui(rd, high_20);
1215 addi(rd, rd, low_12);
1216}
1217
1218#elif V8_TARGET_ARCH_RISCV32
1219void Assembler::RV_li(Register rd, int32_t imm) {
1220 int32_t high_20 = ((imm + 0x800) >> 12);
1221 int32_t low_12 = imm & 0xfff;
1222 if (high_20) {
1223 lui(rd, high_20);
1224 if (low_12) {
1225 addi(rd, rd, low_12);
1226 }
1227 } else {
1228 addi(rd, zero_reg, low_12);
1229 }
1230}
1231
1232int Assembler::RV_li_count(int32_t imm, bool is_get_temp_reg) {
1233 int count = 0;
1234 // imitate Assembler::RV_li
1235 int32_t high_20 = ((imm + 0x800) >> 12);
1236 int32_t low_12 = imm & 0xfff;
1237 if (high_20) {
1238 count++;
1239 if (low_12) {
1240 count++;
1241 }
1242 } else {
1243 // if high_20 is 0, always need one instruction to load the low_12 bit
1244 count++;
1245 }
1246
1247 return count;
1248}
1249
1250void Assembler::li_ptr(Register rd, int32_t imm) {
1251 // Initialize rd with an address
1252 // Pointers are 32 bits
1253 // 2 fixed instructions are generated
1254 int32_t high_20 = ((imm + 0x800) >> 12); // bits31:12
1255 int32_t low_12 = imm & 0xfff; // bits11:0
1256 lui(rd, high_20);
1257 addi(rd, rd, low_12);
1258}
1259
1260void Assembler::li_constant(Register rd, int32_t imm) {
1261 ASM_CODE_COMMENT(this);
1262 DEBUG_PRINTF("\tli_constant(%d, %x <%d>)\n", ToNumber(rd), imm, imm);
1263 int32_t high_20 = ((imm + 0x800) >> 12); // bits31:12
1264 int32_t low_12 = imm & 0xfff; // bits11:0
1265 lui(rd, high_20);
1266 addi(rd, rd, low_12);
1267}
1268#endif
1269
1270// Break / Trap instructions.
1271void Assembler::break_(uint32_t code, bool break_as_stop) {
1272 // We need to invalidate breaks that could be stops as well because the
1273 // simulator expects a char pointer after the stop instruction.
1274 // See base-constants-riscv.h for explanation.
1275 DCHECK(
1277 (!break_as_stop && (code > kMaxStopCode || code <= kMaxTracepointCode)));
1278
1279 // since ebreak does not allow additional immediate field, we use the
1280 // immediate field of lui instruction immediately following the ebreak to
1281 // encode the "code" info
1282 ebreak();
1283 DCHECK(is_uint20(code));
1284 lui(zero_reg, code);
1285}
1286
1287void Assembler::stop(uint32_t code) {
1289 DCHECK_LE(code, kMaxStopCode);
1290#if defined(V8_HOST_ARCH_RISCV64) || defined(V8_HOST_ARCH_RISCV32)
1291 break_(0x54321);
1292#else // V8_HOST_ARCH_RISCV64 || V8_HOST_ARCH_RISCV32
1293 break_(code, true);
1294#endif
1295}
1296
1297// Original MIPS Instructions
1298
1299// ------------Memory-instructions-------------
1300
1302 OffsetAccessType access_type,
1303 int second_access_add_to_offset) {
1304 bool two_accesses = static_cast<bool>(access_type);
1305 DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7.
1306
1307 // is_int12 must be passed a signed value, hence the static cast below.
1308 if (is_int12(src.offset()) &&
1309 (!two_accesses || is_int12(static_cast<int32_t>(
1310 src.offset() + second_access_add_to_offset)))) {
1311 // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
1312 // value) fits into int12.
1313 return false;
1314 }
1315 return true;
1316}
1317
1319 OffsetAccessType access_type,
1320 int second_Access_add_to_offset) {
1321 // This method is used to adjust the base register and offset pair
1322 // for a load/store when the offset doesn't fit into int12.
1323
1324 // Must not overwrite the register 'base' while loading 'offset'.
1325 constexpr int32_t kMinOffsetForSimpleAdjustment = 0x7F8;
1326 constexpr int32_t kMaxOffsetForSimpleAdjustment =
1327 2 * kMinOffsetForSimpleAdjustment;
1328 if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
1329 addi(scratch, src->rm(), kMinOffsetForSimpleAdjustment);
1330 src->offset_ -= kMinOffsetForSimpleAdjustment;
1331 } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
1332 src->offset() < 0) {
1333 addi(scratch, src->rm(), -kMinOffsetForSimpleAdjustment);
1334 src->offset_ += kMinOffsetForSimpleAdjustment;
1335 } else if (access_type == OffsetAccessType::SINGLE_ACCESS) {
1336 RV_li(scratch, (static_cast<intptr_t>(src->offset()) + 0x800) >> 12 << 12);
1337 add(scratch, scratch, src->rm());
1338 src->offset_ = src->offset() << 20 >> 20;
1339 } else {
1340 RV_li(scratch, src->offset());
1341 add(scratch, scratch, src->rm());
1342 src->offset_ = 0;
1343 }
1344 src->rm_ = scratch;
1345}
1346
1348 intptr_t pc_delta) {
1349 if (RelocInfo::IsInternalReference(rmode)) {
1350 intptr_t* p = reinterpret_cast<intptr_t*>(pc);
1351 if (*p == kEndOfJumpChain) {
1352 return 0; // Number of instructions patched.
1353 }
1354 *p += pc_delta;
1355 return 2; // Number of instructions patched.
1356 }
1359 if (IsLui(instr)) {
1360 uintptr_t target_address = target_constant_address_at(pc) + pc_delta;
1361 DEBUG_PRINTF("\ttarget_address 0x%" PRIxPTR "\n", target_address);
1362 set_target_value_at(pc, target_address);
1363#if V8_TARGET_ARCH_RISCV64
1364#ifdef RISCV_USE_SV39
1365 return 6; // Number of instructions patched.
1366#else
1367 return 8; // Number of instructions patched.
1368#endif
1369#elif V8_TARGET_ARCH_RISCV32
1370 return 2; // Number of instructions patched.
1371#endif
1372 } else {
1373 UNIMPLEMENTED();
1374 }
1375}
1376
1378 intptr_t pc_delta) {
1380 Instr instr1 = instr_at(pc + 1 * kInstrSize);
1383 if (IsAuipc(instr) && IsJalr(instr1)) {
1384 int32_t imm;
1385 imm = BrachlongOffset(instr, instr1);
1386 imm -= pc_delta;
1387 PatchBranchlongOffset(pc, instr, instr1, imm);
1388 return;
1389 } else {
1390 UNREACHABLE();
1391 }
1392}
1393
1395 DEBUG_PRINTF("GrowBuffer: %p -> ", buffer_start_);
1396 // Compute new buffer size.
1397 int old_size = buffer_->size();
1398 int new_size = std::min(2 * old_size, old_size + 1 * MB);
1399
1400 // Some internal data structures overflow for very large buffers,
1401 // they must ensure that kMaximalBufferSize is not too large.
1402 if (new_size > kMaximalBufferSize) {
1403 V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
1404 }
1405
1406 // Set up new buffer.
1407 std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
1408 DCHECK_EQ(new_size, new_buffer->size());
1409 uint8_t* new_start = new_buffer->start();
1410
1411 // Copy the data.
1412 intptr_t pc_delta = new_start - buffer_start_;
1413 intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
1414 size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
1415 MemMove(new_start, buffer_start_, pc_offset());
1417 reloc_size);
1418
1419 // Switch buffers.
1420 buffer_ = std::move(new_buffer);
1421 buffer_start_ = new_start;
1422 DEBUG_PRINTF("%p\n", buffer_start_);
1423 pc_ += pc_delta;
1425 reloc_info_writer.last_pc() + pc_delta);
1426
1427 // Relocate runtime entries.
1429 static_cast<size_t>(pc_offset())};
1430 base::Vector<const uint8_t> reloc_info{reloc_info_writer.pos(), reloc_size};
1431 for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) {
1432 RelocInfo::Mode rmode = it.rinfo()->rmode();
1433 if (rmode == RelocInfo::INTERNAL_REFERENCE) {
1434 RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta);
1435 }
1436 }
1437
1438 DCHECK(!overflow());
1439}
1440
1441void Assembler::db(uint8_t data) {
1443 DEBUG_PRINTF("%p(%d): constant 0x%x\n", pc_, pc_offset(), data);
1444 EmitHelper(data);
1445}
1446
1447void Assembler::dd(uint32_t data) {
1449 DEBUG_PRINTF("%p(%d): constant 0x%x\n", pc_, pc_offset(), data);
1450 EmitHelper(data);
1451}
1452
1453void Assembler::dq(uint64_t data) {
1455 DEBUG_PRINTF("%p(%d): constant 0x%" PRIx64 "\n", pc_, pc_offset(), data);
1456 EmitHelper(data);
1457}
1458
1460 uintptr_t data;
1462 if (label->is_bound()) {
1463 data = reinterpret_cast<uintptr_t>(buffer_start_ + label->pos());
1464 } else {
1465 data = jump_address(label);
1466 internal_reference_positions_.insert(label->pos());
1467 }
1469 EmitHelper(data);
1470}
1471
1473 if (!ShouldRecordRelocInfo(rmode)) return;
1474 // We do not try to reuse pool constants.
1475 RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data);
1476 DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
1477 reloc_info_writer.Write(&rinfo);
1478}
1479
1481 DEBUG_PRINTF("\tBlockTrampolinePoolFor %d", instructions);
1482 CheckTrampolinePoolQuick(instructions);
1483 DEBUG_PRINTF("\tpc_offset %d,BlockTrampolinePoolBefore %d\n", pc_offset(),
1484 pc_offset() + instructions * kInstrSize);
1486}
1487
1489 // Some small sequences of instructions must not be broken up by the
1490 // insertion of a trampoline pool; such sequences are protected by setting
1491 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
1492 // which are both checked here. Also, recursive calls to CheckTrampolinePool
1493 // are blocked by trampoline_pool_blocked_nesting_.
1494 DEBUG_PRINTF("\tpc_offset %d no_trampoline_pool_before:%d\n", pc_offset(),
1496 DEBUG_PRINTF("\ttrampoline_pool_blocked_nesting:%d\n",
1500 // Emission is currently blocked; make sure we try again as soon as
1501 // possible.
1504 } else {
1506 }
1507 return;
1508 }
1509
1512 if (unbound_labels_count_ > 0) {
1513 // First we emit jump, then we emit trampoline pool.
1514 {
1515 DEBUG_PRINTF("inserting trampoline pool at %p (%d)\n",
1516 reinterpret_cast<Instr*>(buffer_start_ + pc_offset()),
1517 pc_offset());
1518 BlockTrampolinePoolScope block_trampoline_pool(this);
1519 Label after_pool;
1520 j(&after_pool);
1521
1522 int pool_start = pc_offset();
1523 for (int i = 0; i < unbound_labels_count_; i++) {
1524 int32_t imm;
1525 imm = branch_long_offset(&after_pool);
1526 CHECK(is_int32(imm + 0x800));
1527 int32_t Hi20 = (((int32_t)imm + 0x800) >> 12);
1528 int32_t Lo12 = (int32_t)imm << 20 >> 20;
1529 auipc(t6, Hi20); // Read PC + Hi20 into t6
1530 jr(t6, Lo12); // jump PC + Hi20 + Lo12
1531 }
1532 // If unbound_labels_count_ is big enough, label after_pool will
1533 // need a trampoline too, so we must create the trampoline before
1534 // the bind operation to make sure function 'bind' can get this
1535 // information.
1537 bind(&after_pool);
1538
1539 trampoline_emitted_ = true;
1540 // As we are only going to emit trampoline once, we need to prevent any
1541 // further emission.
1543 }
1544 } else {
1545 // Number of branches to unbound label at this point is zero, so we can
1546 // move next buffer check to maximum.
1549 }
1550 return;
1551}
1552
1554 Address target,
1555 WritableJitAllocation* jit_allocation,
1556 ICacheFlushMode icache_flush_mode) {
1557 Instr* instr = reinterpret_cast<Instr*>(pc);
1558 if (IsAuipc(*instr)) {
1559#if V8_TARGET_ARCH_RISCV64
1560 if (IsLd(*reinterpret_cast<Instr*>(pc + 4))) {
1561#elif V8_TARGET_ARCH_RISCV32
1562 if (IsLw(*reinterpret_cast<Instr*>(pc + 4))) {
1563#endif
1564 int32_t Hi20 = AuipcOffset(*instr);
1565 int32_t Lo12 = LoadOffset(*reinterpret_cast<Instr*>(pc + 4));
1566 if (jit_allocation) {
1567 jit_allocation->WriteValue<Address>(
1568 reinterpret_cast<Address>(pc + Hi20 + Lo12), target);
1569 } else {
1570 Memory<Address>(reinterpret_cast<Address>(pc + Hi20 + Lo12)) = target;
1571 }
1572 } else {
1573 DCHECK(IsJalr(*reinterpret_cast<Instr*>(pc + 4)));
1574 intptr_t imm = (intptr_t)target - (intptr_t)pc;
1576 Instr instr1 = instr_at(pc + 1 * kInstrSize);
1577 DCHECK(is_int32(imm + 0x800));
1578 int num = PatchBranchlongOffset(pc, instr, instr1, (int32_t)imm,
1579 jit_allocation);
1580 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
1582 }
1583 }
1584 } else {
1585 set_target_value_at(pc, target, jit_allocation, icache_flush_mode);
1586 }
1587}
1588
1590 Instr* instr = reinterpret_cast<Instr*>(pc);
1591 if (IsAuipc(*instr)) {
1592#if V8_TARGET_ARCH_RISCV64
1593 if (IsLd(*reinterpret_cast<Instr*>(pc + 4))) {
1594#elif V8_TARGET_ARCH_RISCV32
1595 if (IsLw(*reinterpret_cast<Instr*>(pc + 4))) {
1596#endif
1597 int32_t Hi20 = AuipcOffset(*instr);
1598 int32_t Lo12 = LoadOffset(*reinterpret_cast<Instr*>(pc + 4));
1599 return Memory<Address>(pc + Hi20 + Lo12);
1600 } else {
1601 DCHECK(IsJalr(*reinterpret_cast<Instr*>(pc + 4)));
1602 int32_t Hi20 = AuipcOffset(*instr);
1603 int32_t Lo12 = JalrOffset(*reinterpret_cast<Instr*>(pc + 4));
1604 return pc + Hi20 + Lo12;
1605 }
1606
1607 } else {
1609 }
1610}
1611
1612#if V8_TARGET_ARCH_RISCV64
1614#ifdef RISCV_USE_SV39
1615 Instruction* instr0 = Instruction::At((unsigned char*)pc);
1616 Instruction* instr1 = Instruction::At((unsigned char*)(pc + 1 * kInstrSize));
1617 Instruction* instr2 = Instruction::At((unsigned char*)(pc + 2 * kInstrSize));
1618 Instruction* instr3 = Instruction::At((unsigned char*)(pc + 3 * kInstrSize));
1619
1620 // Interpret instructions for address generated by li: See listing in
1621 // Assembler::set_target_address_at() just below.
1622 if (IsLui(*reinterpret_cast<Instr*>(instr0)) &&
1623 IsAddi(*reinterpret_cast<Instr*>(instr1)) &&
1624 IsSlli(*reinterpret_cast<Instr*>(instr2)) &&
1625 IsOri(*reinterpret_cast<Instr*>(instr3))) {
1626 // Assemble the 64 bit value.
1627 int64_t addr = (int64_t)(instr0->Imm20UValue() << kImm20Shift) +
1628 (int64_t)instr1->Imm12Value();
1629 addr <<= 8;
1630 addr |= (int64_t)instr3->Imm12Value();
1631#else
1632 Instruction* instr0 = Instruction::At((unsigned char*)pc);
1633 Instruction* instr1 = Instruction::At((unsigned char*)(pc + 1 * kInstrSize));
1634 Instruction* instr2 = Instruction::At((unsigned char*)(pc + 2 * kInstrSize));
1635 Instruction* instr3 = Instruction::At((unsigned char*)(pc + 3 * kInstrSize));
1636 Instruction* instr4 = Instruction::At((unsigned char*)(pc + 4 * kInstrSize));
1637 Instruction* instr5 = Instruction::At((unsigned char*)(pc + 5 * kInstrSize));
1638
1639 // Interpret instructions for address generated by li: See listing in
1640 // Assembler::set_target_address_at() just below.
1641 if (IsLui(*reinterpret_cast<Instr*>(instr0)) &&
1642 IsAddi(*reinterpret_cast<Instr*>(instr1)) &&
1643 IsSlli(*reinterpret_cast<Instr*>(instr2)) &&
1644 IsOri(*reinterpret_cast<Instr*>(instr3)) &&
1645 IsSlli(*reinterpret_cast<Instr*>(instr4)) &&
1646 IsOri(*reinterpret_cast<Instr*>(instr5))) {
1647 // Assemble the 64 bit value.
1648 int64_t addr = (int64_t)(instr0->Imm20UValue() << kImm20Shift) +
1649 (int64_t)instr1->Imm12Value();
1650 addr <<= 11;
1651 addr |= (int64_t)instr3->Imm12Value();
1652 addr <<= 6;
1653 addr |= (int64_t)instr5->Imm12Value();
1654#endif
1655 DEBUG_PRINTF("\taddr: %" PRIx64 "\n", addr);
1656 return static_cast<Address>(addr);
1657 }
1658 // We should never get here, force a bad address if we do.
1659 UNREACHABLE();
1660}
1661// On RISC-V, a 48-bit target address is stored in an 6-instruction sequence:
1662// lui(reg, (int32_t)high_20); // 19 high bits
1663// addi(reg, reg, low_12); // 12 following bits. total is 31 high bits in reg.
1664// slli(reg, reg, 11); // Space for next 11 bits
1665// ori(reg, reg, b11); // 11 bits are put in. 42 bit in reg
1666// slli(reg, reg, 6); // Space for next 6 bits
1667// ori(reg, reg, a6); // 6 bits are put in. all 48 bis in reg
1668//
1669// If define RISCV_USE_SV39, a 39-bit target address is stored in an
1670// 4-instruction sequence:
1671// lui(reg, (int32_t)high_20); // 20 high bits
1672// addi(reg, reg, low_12); // 12 following bits. total is 32 high bits in reg.
1673// slli(reg, reg, 8); // Space for next 7 bits
1674// ori(reg, reg, a7); // 7 bits are put in.
1675//
1676// Patching the address must replace all instructions, and flush the i-cache.
1677// Note that this assumes the use of SV48, the 48-bit virtual memory system.
1678void Assembler::set_target_value_at(Address pc, uint64_t target,
1679 WritableJitAllocation* jit_allocation,
1680 ICacheFlushMode icache_flush_mode) {
1681 DEBUG_PRINTF("\tset_target_value_at: pc: %" PRIxPTR "\ttarget: %" PRIx64
1682 "\told: %" PRIx64 "\n",
1683 pc, target, target_address_at(pc, static_cast<Address>(0)));
1684 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
1685#ifdef RISCV_USE_SV39
1686 DCHECK_EQ((target & 0xffffff8000000000ll), 0);
1687#ifdef DEBUG
1688 // Check we have the result from a li macro-instruction.
1689 Instruction* instr0 = Instruction::At((unsigned char*)pc);
1690 Instruction* instr1 = Instruction::At((unsigned char*)(pc + 1 * kInstrSize));
1691 Instruction* instr3 = Instruction::At((unsigned char*)(pc + 3 * kInstrSize));
1692 DCHECK(IsLui(*reinterpret_cast<Instr*>(instr0)) &&
1693 IsAddi(*reinterpret_cast<Instr*>(instr1)) &&
1694 IsOri(*reinterpret_cast<Instr*>(instr3)));
1695#endif
1696 int64_t a8 = target & 0xff; // bits 0:7. 8 bits
1697 int64_t high_31 = (target >> 8) & 0x7fffffff; // 31 bits
1698 int64_t high_20 = ((high_31 + 0x800) >> 12); // 19 bits
1699 int64_t low_12 = high_31 & 0xfff; // 12 bits
1700 instr_at_put(pc, (*p & 0xfff) | ((int32_t)high_20 << 12), jit_allocation);
1702 (*(p + 1) & 0xfffff) | ((int32_t)low_12 << 20), jit_allocation);
1703 instr_at_put(pc + 2 * kInstrSize, (*(p + 2) & 0xfffff) | (8 << 20),
1704 jit_allocation);
1705 instr_at_put(pc + 3 * kInstrSize, (*(p + 3) & 0xfffff) | ((int32_t)a8 << 20),
1706 jit_allocation);
1707 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
1709 }
1710#else
1711 DCHECK_EQ((target & 0xffff000000000000ll), 0);
1712#ifdef DEBUG
1713 // Check we have the result from a li macro-instruction.
1714 Instruction* instr0 = Instruction::At((unsigned char*)pc);
1715 Instruction* instr1 = Instruction::At((unsigned char*)(pc + 1 * kInstrSize));
1716 Instruction* instr3 = Instruction::At((unsigned char*)(pc + 3 * kInstrSize));
1717 Instruction* instr5 = Instruction::At((unsigned char*)(pc + 5 * kInstrSize));
1718 DCHECK(IsLui(*reinterpret_cast<Instr*>(instr0)) &&
1719 IsAddi(*reinterpret_cast<Instr*>(instr1)) &&
1720 IsOri(*reinterpret_cast<Instr*>(instr3)) &&
1721 IsOri(*reinterpret_cast<Instr*>(instr5)));
1722#endif
1723 int64_t a6 = target & 0x3f; // bits 0:6. 6 bits
1724 int64_t b11 = (target >> 6) & 0x7ff; // bits 6:11. 11 bits
1725 int64_t high_31 = (target >> 17) & 0x7fffffff; // 31 bits
1726 int64_t high_20 = ((high_31 + 0x800) >> 12); // 19 bits
1727 int64_t low_12 = high_31 & 0xfff; // 12 bits
1728 instr_at_put(pc, (*p & 0xfff) | ((int32_t)high_20 << 12), jit_allocation);
1730 (*(p + 1) & 0xfffff) | ((int32_t)low_12 << 20), jit_allocation);
1731 instr_at_put(pc + 2 * kInstrSize, (*(p + 2) & 0xfffff) | (11 << 20),
1732 jit_allocation);
1733 instr_at_put(pc + 3 * kInstrSize, (*(p + 3) & 0xfffff) | ((int32_t)b11 << 20),
1734 jit_allocation);
1735 instr_at_put(pc + 4 * kInstrSize, (*(p + 4) & 0xfffff) | (6 << 20),
1736 jit_allocation);
1737 instr_at_put(pc + 5 * kInstrSize, (*(p + 5) & 0xfffff) | ((int32_t)a6 << 20),
1738 jit_allocation);
1739 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
1741 }
1742#endif
1744}
1745
1746#elif V8_TARGET_ARCH_RISCV32
1748 DEBUG_PRINTF("\ttarget_constant_address_at: pc: %x\t", pc);
1750 DEBUG_PRINTF("\taddr: %x\n", addr);
1751 return static_cast<Address>(addr);
1752}
1753// On RISC-V, a 32-bit target address is stored in an 2-instruction sequence:
1754// lui(reg, high_20); // 20 high bits
1755// addi(reg, reg, low_12); // 12 following bits. total is 31 high bits in reg.
1756//
1757// Patching the address must replace all instructions, and flush the i-cache.
1758void Assembler::set_target_value_at(Address pc, uint32_t target,
1759 WritableJitAllocation* jit_allocation,
1760 ICacheFlushMode icache_flush_mode) {
1761 DEBUG_PRINTF("\tset_target_value_at: pc: %x\ttarget: %x\n", pc, target);
1762 set_target_constant32_at(pc, target, jit_allocation, icache_flush_mode);
1763}
1764#endif
1765
1767 // The constant pool marker is made of two instructions. These instructions
1768 // will never be emitted by the JIT, so checking for the first one is enough:
1769 // 0: ld x0, x0, #offset
1770 Instr instr_value = *reinterpret_cast<Instr*>(instr);
1771#if V8_TARGET_ARCH_RISCV64
1772 bool result = IsLd(instr_value) && (instr->Rs1Value() == kRegCode_zero_reg) &&
1773 (instr->RdValue() == kRegCode_zero_reg);
1774#elif V8_TARGET_ARCH_RISCV32
1775 bool result = IsLw(instr_value) && (instr->Rs1Value() == kRegCode_zero_reg) &&
1776 (instr->RdValue() == kRegCode_zero_reg);
1777#endif
1778#ifdef DEBUG
1779 // It is still worth asserting the marker is complete.
1780 // 1: j 0x0
1781 Instruction* instr_following = instr + kInstrSize;
1782 DCHECK(!result || (IsJal(*reinterpret_cast<Instr*>(instr_following)) &&
1783 instr_following->Imm20JValue() == 0 &&
1784 instr_following->RdValue() == kRegCode_zero_reg));
1785#endif
1786 return result;
1787}
1788
1790 if (IsConstantPoolAt(instr)) {
1791 return instr->Imm12Value();
1792 } else {
1793 return -1;
1794 }
1795}
1796
1798 // We only need this for debugger support, to correctly compute offsets in the
1799 // code.
1800 Assembler::BlockPoolsScope block_pools(this);
1801 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
1802}
1803
1805 // We must generate only one instruction as this is used in scopes that
1806 // control the size of the code generated.
1807 j(0);
1808}
1809
1810// -----------------------------------------------------------------------------
1811// Assembler.
1812template <typename T>
1814 *reinterpret_cast<T*>(pc_) = x;
1815 pc_ += sizeof(x);
1816}
1817
1819 if (!is_buffer_growth_blocked()) {
1820 CheckBuffer();
1821 }
1822 DEBUG_PRINTF("%p(%d): ", pc_, pc_offset());
1823 EmitHelper(x);
1824 disassembleInstr(pc_ - sizeof(x));
1826}
1827
1829 if (!is_buffer_growth_blocked()) {
1830 CheckBuffer();
1831 }
1832 DEBUG_PRINTF("%p(%d): ", pc_, pc_offset());
1833 EmitHelper(x);
1834 disassembleInstr(pc_ - sizeof(x));
1836}
1837
1838void Assembler::emit(uint64_t data) {
1839 DEBUG_PRINTF("%p(%d): ", pc_, pc_offset());
1841 EmitHelper(data);
1842}
1843
1845 WritableJitAllocation* jit_allocation) {
1846 if (jit_allocation) {
1847 jit_allocation->WriteUnalignedValue(
1848 reinterpret_cast<Address>(buffer_start_ + pos), instr);
1849 } else {
1850 *reinterpret_cast<Instr*>(buffer_start_ + pos) = instr;
1851 }
1852}
1853
1855 WritableJitAllocation* jit_allocation) {
1856 if (jit_allocation) {
1857 jit_allocation->WriteUnalignedValue(
1858 reinterpret_cast<Address>(buffer_start_ + pos), instr);
1859 } else {
1860 *reinterpret_cast<ShortInstr*>(buffer_start_ + pos) = instr;
1861 }
1862}
1863
1865 WritableJitAllocation* jit_allocation) {
1866 if (jit_allocation) {
1867 jit_allocation->WriteUnalignedValue(pc, instr);
1868 } else {
1869 *reinterpret_cast<Instr*>(pc) = instr;
1870 }
1871}
1872
1873// Constant Pool
1874
1875void ConstantPool::EmitPrologue(Alignment require_alignment) {
1876 // Recorded constant pool size is expressed in number of 32-bits words,
1877 // and includes prologue and alignment, but not the jump around the pool
1878 // and the size of the marker itself.
1879 const int marker_size = 1;
1880 int word_count =
1881 ComputeSize(Jump::kOmitted, require_alignment) / kInt32Size - marker_size;
1882#if V8_TARGET_ARCH_RISCV64
1883 assm_->ld(zero_reg, zero_reg, word_count);
1884#elif V8_TARGET_ARCH_RISCV32
1885 assm_->lw(zero_reg, zero_reg, word_count);
1886#endif
1887 assm_->EmitPoolGuard();
1888}
1889
1890int ConstantPool::PrologueSize(Jump require_jump) const {
1891 // Prologue is:
1892 // j over ;; if require_jump
1893 // ld x0, x0, #pool_size
1894 // j 0x0
1895 int prologue_size = require_jump == Jump::kRequired ? kInstrSize : 0;
1896 prologue_size += 2 * kInstrSize;
1897 return prologue_size;
1898}
1899
1900void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset,
1901 Instruction* entry_offset,
1902 const ConstantPoolKey& key) {
1903 Instr instr_auipc = assm_->instr_at(load_offset);
1904 Instr instr_load = assm_->instr_at(load_offset + 4);
1905 // Instruction to patch must be 'ld/lw rd, offset(rd)' with 'offset == 0'.
1906 DCHECK(assm_->IsAuipc(instr_auipc));
1907#if V8_TARGET_ARCH_RISCV64
1908 DCHECK(assm_->IsLd(instr_load));
1909#elif V8_TARGET_ARCH_RISCV32
1910 DCHECK(assm_->IsLw(instr_load));
1911#endif
1912 DCHECK_EQ(assm_->LoadOffset(instr_load), 1);
1913 DCHECK_EQ(assm_->AuipcOffset(instr_auipc), 0);
1914 int32_t distance = static_cast<int32_t>(
1915 reinterpret_cast<Address>(entry_offset) -
1916 reinterpret_cast<Address>(assm_->toAddress(load_offset)));
1917 CHECK(is_int32(distance + 0x800));
1918 int32_t Hi20 = (((int32_t)distance + 0x800) >> 12);
1919 int32_t Lo12 = (int32_t)distance << 20 >> 20;
1920 assm_->instr_at_put(load_offset, SetHi20Offset(Hi20, instr_auipc));
1921 assm_->instr_at_put(load_offset + 4, SetLoadOffset(Lo12, instr_load));
1922}
1923
1924void ConstantPool::Check(Emission force_emit, Jump require_jump,
1925 size_t margin) {
1926 // Some short sequence of instruction must not be broken up by constant pool
1927 // emission, such sequences are protected by a ConstPool::BlockScope.
1928 if (IsBlocked()) {
1929 // Something is wrong if emission is forced and blocked at the same time.
1930 DCHECK_EQ(force_emit, Emission::kIfNeeded);
1931 return;
1932 }
1933
1934 // We emit a constant pool only if :
1935 // * it is not empty
1936 // * emission is forced by parameter force_emit (e.g. at function end).
1937 // * emission is mandatory or opportune according to {ShouldEmitNow}.
1938 if (!IsEmpty() && (force_emit == Emission::kForced ||
1939 ShouldEmitNow(require_jump, margin))) {
1940 // Emit veneers for branches that would go out of range during emission of
1941 // the constant pool.
1942 int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
1943
1944 // Check that the code buffer is large enough before emitting the constant
1945 // pool (this includes the gap to the relocation information).
1946 int needed_space = worst_case_size + assm_->kGap;
1947 while (assm_->buffer_space() <= needed_space) {
1948 assm_->GrowBuffer();
1949 }
1950
1951 EmitAndClear(require_jump);
1952 }
1953 // Since a constant pool is (now) empty, move the check offset forward by
1954 // the standard interval.
1955 SetNextCheckIn(ConstantPool::kCheckInterval);
1956}
1957
1958// Pool entries are accessed with pc relative load therefore this cannot be more
1959// than 1 * MB. Since constant pool emission checks are interval based, and we
1960// want to keep entries close to the code, we try to emit every 64KB.
1961const size_t ConstantPool::kMaxDistToPool32 = 1 * MB;
1962const size_t ConstantPool::kMaxDistToPool64 = 1 * MB;
1963const size_t ConstantPool::kCheckInterval = 128 * kInstrSize;
1964const size_t ConstantPool::kApproxDistToPool32 = 64 * KB;
1965const size_t ConstantPool::kApproxDistToPool64 = kApproxDistToPool32;
1966
1967const size_t ConstantPool::kOpportunityDistToPool32 = 64 * KB;
1968const size_t ConstantPool::kOpportunityDistToPool64 = 64 * KB;
1969const size_t ConstantPool::kApproxMaxEntryCount = 512;
1970
1971#if defined(V8_TARGET_ARCH_RISCV64)
1972// LLVM Code
1973//===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++
1974//-*--===//
1975//
1976// Part of the LLVM Project, under the Apache License v2.0 with LLVM
1977// Exceptions. See https://llvm.org/LICENSE.txt for license information.
1978// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
1979//
1980//===----------------------------------------------------------------------===//
1981void Assembler::RecursiveLi(Register rd, int64_t val) {
1982 if (val > 0 && RecursiveLiImplCount(val) > 2) {
1983 unsigned LeadingZeros = base::bits::CountLeadingZeros((uint64_t)val);
1984 uint64_t ShiftedVal = (uint64_t)val << LeadingZeros;
1985 int countFillZero = RecursiveLiImplCount(ShiftedVal) + 1;
1986 if (countFillZero < RecursiveLiImplCount(val)) {
1987 RecursiveLiImpl(rd, ShiftedVal);
1988 srli(rd, rd, LeadingZeros);
1989 return;
1990 }
1991 }
1992 RecursiveLiImpl(rd, val);
1993}
1994
1995int Assembler::RecursiveLiCount(int64_t val) {
1996 if (val > 0 && RecursiveLiImplCount(val) > 2) {
1997 unsigned LeadingZeros = base::bits::CountLeadingZeros((uint64_t)val);
1998 uint64_t ShiftedVal = (uint64_t)val << LeadingZeros;
1999 // Fill in the bits that will be shifted out with 1s. An example where
2000 // this helps is trailing one masks with 32 or more ones. This will
2001 // generate ADDI -1 and an SRLI.
2002 int countFillZero = RecursiveLiImplCount(ShiftedVal) + 1;
2003 if (countFillZero < RecursiveLiImplCount(val)) {
2004 return countFillZero;
2005 }
2006 }
2007 return RecursiveLiImplCount(val);
2008}
2009
2010void Assembler::RecursiveLiImpl(Register rd, int64_t Val) {
2011 if (is_int32(Val)) {
2012 // Depending on the active bits in the immediate Value v, the following
2013 // instruction sequences are emitted:
2014 //
2015 // v == 0 : ADDI
2016 // v[0,12) != 0 && v[12,32) == 0 : ADDI
2017 // v[0,12) == 0 && v[12,32) != 0 : LUI
2018 // v[0,32) != 0 : LUI+ADDI(W)
2019 int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
2020 int64_t Lo12 = Val << 52 >> 52;
2021
2022 if (Hi20) {
2023 lui(rd, (int32_t)Hi20);
2024 }
2025
2026 if (Lo12 || Hi20 == 0) {
2027 if (Hi20) {
2028 addiw(rd, rd, Lo12);
2029 } else {
2030 addi(rd, zero_reg, Lo12);
2031 }
2032 }
2033 return;
2034 }
2035
2036 // In the worst case, for a full 64-bit constant, a sequence of 8
2037 // instructions (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be
2038 // emitted. Note that the first two instructions (LUI+ADDIW) can contribute
2039 // up to 32 bits while the following ADDI instructions contribute up to 12
2040 // bits each.
2041 //
2042 // On the first glance, implementing this seems to be possible by simply
2043 // emitting the most significant 32 bits (LUI+ADDIW) followed by as many
2044 // left shift (SLLI) and immediate additions (ADDI) as needed. However, due
2045 // to the fact that ADDI performs a sign extended addition, doing it like
2046 // that would only be possible when at most 11 bits of the ADDI instructions
2047 // are used. Using all 12 bits of the ADDI instructions, like done by GAS,
2048 // actually requires that the constant is processed starting with the least
2049 // significant bit.
2050 //
2051 // In the following, constants are processed from LSB to MSB but instruction
2052 // emission is performed from MSB to LSB by recursively calling
2053 // generateInstSeq. In each recursion, first the lowest 12 bits are removed
2054 // from the constant and the optimal shift amount, which can be greater than
2055 // 12 bits if the constant is sparse, is determined. Then, the shifted
2056 // remaining constant is processed recursively and gets emitted as soon as
2057 // it fits into 32 bits. The emission of the shifts and additions is
2058 // subsequently performed when the recursion returns.
2059
2060 int64_t Lo12 = Val << 52 >> 52;
2061 int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
2062 int ShiftAmount = 12 + base::bits::CountTrailingZeros((uint64_t)Hi52);
2063 Hi52 = signExtend(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
2064
2065 // If the remaining bits don't fit in 12 bits, we might be able to reduce
2066 // the shift amount in order to use LUI which will zero the lower 12 bits.
2067 bool Unsigned = false;
2068 if (ShiftAmount > 12 && !is_int12(Hi52)) {
2069 if (is_int32((uint64_t)Hi52 << 12)) {
2070 // Reduce the shift amount and add zeros to the LSBs so it will match
2071 // LUI.
2072 ShiftAmount -= 12;
2073 Hi52 = (uint64_t)Hi52 << 12;
2074 }
2075 }
2076 RecursiveLi(rd, Hi52);
2077
2078 if (Unsigned) {
2079 } else {
2080 slli(rd, rd, ShiftAmount);
2081 }
2082 if (Lo12) {
2083 addi(rd, rd, Lo12);
2084 }
2085}
2086
2087int Assembler::RecursiveLiImplCount(int64_t Val) {
2088 int count = 0;
2089 if (is_int32(Val)) {
2090 // Depending on the active bits in the immediate Value v, the following
2091 // instruction sequences are emitted:
2092 //
2093 // v == 0 : ADDI
2094 // v[0,12) != 0 && v[12,32) == 0 : ADDI
2095 // v[0,12) == 0 && v[12,32) != 0 : LUI
2096 // v[0,32) != 0 : LUI+ADDI(W)
2097 int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
2098 int64_t Lo12 = Val << 52 >> 52;
2099
2100 if (Hi20) {
2101 // lui(rd, (int32_t)Hi20);
2102 count++;
2103 }
2104
2105 if (Lo12 || Hi20 == 0) {
2106 // unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
2107 // Res.push_back(RISCVMatInt::Inst(AddiOpc, Lo12));
2108 count++;
2109 }
2110 return count;
2111 }
2112
2113 // In the worst case, for a full 64-bit constant, a sequence of 8
2114 // instructions (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be
2115 // emitted. Note that the first two instructions (LUI+ADDIW) can contribute
2116 // up to 32 bits while the following ADDI instructions contribute up to 12
2117 // bits each.
2118 //
2119 // On the first glance, implementing this seems to be possible by simply
2120 // emitting the most significant 32 bits (LUI+ADDIW) followed by as many
2121 // left shift (SLLI) and immediate additions (ADDI) as needed. However, due
2122 // to the fact that ADDI performs a sign extended addition, doing it like
2123 // that would only be possible when at most 11 bits of the ADDI instructions
2124 // are used. Using all 12 bits of the ADDI instructions, like done by GAS,
2125 // actually requires that the constant is processed starting with the least
2126 // significant bit.
2127 //
2128 // In the following, constants are processed from LSB to MSB but instruction
2129 // emission is performed from MSB to LSB by recursively calling
2130 // generateInstSeq. In each recursion, first the lowest 12 bits are removed
2131 // from the constant and the optimal shift amount, which can be greater than
2132 // 12 bits if the constant is sparse, is determined. Then, the shifted
2133 // remaining constant is processed recursively and gets emitted as soon as
2134 // it fits into 32 bits. The emission of the shifts and additions is
2135 // subsequently performed when the recursion returns.
2136
2137 int64_t Lo12 = Val << 52 >> 52;
2138 int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
2139 int ShiftAmount = 12 + base::bits::CountTrailingZeros((uint64_t)Hi52);
2140 Hi52 = signExtend(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
2141
2142 // If the remaining bits don't fit in 12 bits, we might be able to reduce
2143 // the shift amount in order to use LUI which will zero the lower 12 bits.
2144 bool Unsigned = false;
2145 if (ShiftAmount > 12 && !is_int12(Hi52)) {
2146 if (is_int32((uint64_t)Hi52 << 12)) {
2147 // Reduce the shift amount and add zeros to the LSBs so it will match
2148 // LUI.
2149 ShiftAmount -= 12;
2150 Hi52 = (uint64_t)Hi52 << 12;
2151 }
2152 }
2153
2154 count += RecursiveLiImplCount(Hi52);
2155
2156 if (Unsigned) {
2157 } else {
2158 // slli(rd, rd, ShiftAmount);
2159 count++;
2160 }
2161 if (Lo12) {
2162 // addi(rd, rd, Lo12);
2163 count++;
2164 }
2165 return count;
2166}
2167
2168int Assembler::GeneralLiCount(int64_t imm, bool is_get_temp_reg) {
2169 int count = 0;
2170 // imitate Assembler::RV_li
2171 if (is_int32(imm + 0x800)) {
2172 // 32-bit case. Maximum of 2 instructions generated
2173 int64_t high_20 = ((imm + 0x800) >> 12);
2174 int64_t low_12 = imm << 52 >> 52;
2175 if (high_20) {
2176 count++;
2177 if (low_12) {
2178 count++;
2179 }
2180 } else {
2181 count++;
2182 }
2183 return count;
2184 } else {
2185 // 64-bit case: divide imm into two 32-bit parts, upper and lower
2186 int64_t up_32 = imm >> 32;
2187 int64_t low_32 = imm & 0xffffffffull;
2188 // Check if a temporary register is available
2189 if (is_get_temp_reg) {
2190 // keep track of hardware behavior for lower part in sim_low
2191 int64_t sim_low = 0;
2192 // Build lower part
2193 if (low_32 != 0) {
2194 int64_t high_20 = ((low_32 + 0x800) >> 12);
2195 int64_t low_12 = low_32 & 0xfff;
2196 if (high_20) {
2197 // Adjust to 20 bits for the case of overflow
2198 high_20 &= 0xfffff;
2199 sim_low = ((high_20 << 12) << 32) >> 32;
2200 count++;
2201 if (low_12) {
2202 sim_low += (low_12 << 52 >> 52) | low_12;
2203 count++;
2204 }
2205 } else {
2206 sim_low = low_12;
2207 count++;
2208 }
2209 }
2210 if (sim_low & 0x100000000) {
2211 // Bit 31 is 1. Either an overflow or a negative 64 bit
2212 if (up_32 == 0) {
2213 // Positive number, but overflow because of the add 0x800
2214 count++;
2215 count++;
2216 return count;
2217 }
2218 // low_32 is a negative 64 bit after the build
2219 up_32 = (up_32 - 0xffffffff) & 0xffffffff;
2220 }
2221 if (up_32 == 0) {
2222 return count;
2223 }
2224 int64_t high_20 = (up_32 + 0x800) >> 12;
2225 int64_t low_12 = up_32 & 0xfff;
2226 if (high_20) {
2227 // Adjust to 20 bits for the case of overflow
2228 high_20 &= 0xfffff;
2229 count++;
2230 if (low_12) {
2231 count++;
2232 }
2233 } else {
2234 count++;
2235 }
2236 // Put it at the bgining of register
2237 count++;
2238 if (low_32 != 0) {
2239 count++;
2240 }
2241 return count;
2242 }
2243 // No temp register. Build imm in rd.
2244 // Build upper 32 bits first in rd. Divide lower 32 bits parts and add
2245 // parts to the upper part by doing shift and add.
2246 // First build upper part in rd.
2247 int64_t high_20 = (up_32 + 0x800) >> 12;
2248 int64_t low_12 = up_32 & 0xfff;
2249 if (high_20) {
2250 // Adjust to 20 bits for the case of overflow
2251 high_20 &= 0xfffff;
2252 count++;
2253 if (low_12) {
2254 count++;
2255 }
2256 } else {
2257 count++;
2258 }
2259 // upper part already in rd. Each part to be added to rd, has maximum of
2260 // 11 bits, and always starts with a 1. rd is shifted by the size of the
2261 // part plus the number of zeros between the parts. Each part is added
2262 // after the left shift.
2263 uint32_t mask = 0x80000000;
2264 int32_t i;
2265 for (i = 0; i < 32; i++) {
2266 if ((low_32 & mask) == 0) {
2267 mask >>= 1;
2268 if (i == 31) {
2269 // rest is zero
2270 count++;
2271 }
2272 continue;
2273 }
2274 // The first 1 seen
2275 if ((i + 11) < 32) {
2276 // Pick 11 bits
2277 count++;
2278 count++;
2279 i += 10;
2280 mask >>= 11;
2281 } else {
2282 count++;
2283 count++;
2284 break;
2285 }
2286 }
2287 }
2288 return count;
2289}
2290#endif
2291
2294} // namespace internal
2295} // namespace v8
#define DEBUG_PRINTF(...)
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
SourcePosition pos
constexpr T * begin() const
Definition vector.h:96
bool ShouldRecordRelocInfo(RelocInfo::Mode rmode) const
Definition assembler.h:528
std::unique_ptr< AssemblerBuffer > buffer_
Definition assembler.h:504
EmbeddedObjectIndex AddEmbeddedObject(IndirectHandle< HeapObject > object)
Definition assembler.cc:285
std::forward_list< HeapNumberRequest > heap_number_requests_
Definition assembler.h:507
static bool IsCBranch(Instr instr)
static bool IsCJal(Instr instr)
static int LoadOffset(Instr instr)
static int AuipcOffset(Instr instr)
static int JalrOffset(Instr instr)
static bool IsSlli(Instr instr)
static int JumpOffset(Instr instr)
static bool IsAddi(Instr instr)
void srli(Register rd, Register rs1, uint8_t shamt)
static bool IsAuipc(Instr instr)
void slli(Register rd, Register rs1, uint8_t shamt)
void bind_to(Label *L, int pos)
static const int kMaximalBufferSize
static void set_target_value_at(Address pc, uint64_t target, WritableJitAllocation *jit_allocation=nullptr, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
RelocInfoWriter reloc_info_writer
bool is_near(Label *L, OffsetSize bits)
bool is_buffer_growth_blocked() const
bool is_internal_reference(Label *L)
void addi(Register dst, Register src, const Operand &imm)
void break_(uint32_t code, bool break_as_stop=false)
void label_at_put(Label *L, int at_offset)
static constexpr int kCJalOffsetBits
static bool IsLw(Instr instr)
void RecordConstPool(int size)
void jr(Register target)
static constexpr int kBranchOffsetBits
static constexpr int kTrampolineSlotsSize
static constexpr int kNoHandlerTable
static VfpRegList DefaultFPTmpList()
static int32_t target_constant32_at(Address pc)
static bool IsConstantPoolAt(Instruction *instr)
static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc, intptr_t pc_delta, WritableJitAllocation *jit_allocation=nullptr)
void AllocateAndInstallRequestedHeapNumbers(LocalIsolate *isolate)
int32_t get_trampoline_entry()
static Address target_constant_address_at(Address pc)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
static bool IsJalr(Instr instr)
void auipc(Register rs, int16_t imm16)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static bool IsOri(Instr instr)
void instr_at_put(int pos, Instr instr)
static constexpr SafepointTableBuilderBase * kNoSafepointTable
uint64_t jump_address(Label *L)
static constexpr int kMaxRelocSize
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data=0)
static constexpr int kJumpOffsetBits
static bool IsLui(Instr instr)
friend class UseScratchRegisterScope
void print(const Label *L)
bool MustUseReg(RelocInfo::Mode rmode)
std::deque< int > internal_reference_positions_
void lui(Register rd, int32_t j)
static bool IsBranch(Instr instr)
static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc, intptr_t pc_delta, WritableJitAllocation *jit_allocation=nullptr)
void BlockTrampolinePoolFor(int instructions)
static V8_INLINE void set_target_address_at(Address pc, Address constant_pool, Address target, WritableJitAllocation *jit_allocation, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
static constexpr int kInvalidSlotPos
int32_t branch_offset_helper(Label *L, OffsetSize bits)
void target_at_put(int pos, int target_pos)
void ori(Register rd, Register rj, int32_t ui12)
static RegList DefaultTmpList()
void GetCode(LocalIsolate *isolate, CodeDesc *desc, SafepointTableBuilderBase *safepoint_table_builder, int handler_table_offset)
static int PatchBranchlongOffset(Address pc, Instr auipc, Instr instr_I, int32_t offset, WritableJitAllocation *jit_allocation=nullptr)
static constexpr int kCBranchOffsetBits
static constexpr uint8_t kNopByte
uint64_t branch_long_offset(Label *L)
void AdjustBaseAndOffset(MemOperand *src)
int BranchOffset(Instr instr)
static void set_target_constant32_at(Address pc, uint32_t target, WritableJitAllocation *jit_allocation, ICacheFlushMode icache_flush_mode)
static constexpr int kMaxBranchOffset
Assembler(const AssemblerOptions &, std::unique_ptr< AssemblerBuffer >={})
void set_embedded_object_index_referenced_from(Address p, EmbeddedObjectIndex index)
bool NeedAdjustBaseAndOffset(const MemOperand &src, OffsetAccessType=OffsetAccessType::SINGLE_ACCESS, int second_Access_add_to_offset=4)
void AbortedCodeGeneration() override
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
static V8_INLINE Address target_address_at(Address pc, Address constant_pool)
void CheckTrampolinePoolQuick(int extra_instructions=0)
Instruction * pc() const
static bool IsJal(Instr instr)
static void disassembleInstr(uint8_t *pc)
void BlockTrampolinePoolBefore(int pc_offset)
static int BrachlongOffset(Instr auipc, Instr jalr)
static int ConstantPoolSizeAt(Instruction *instr)
static constexpr int kMaxJumpOffset
static void Initialize(CodeDesc *desc, Assembler *assembler, int safepoint_table_offset, int handler_table_offset, int constant_pool_offset, int code_comments_offset, int builtin_jump_table_info_offset, int reloc_info_offset)
Definition code-desc.cc:13
static bool IsSupported(CpuFeature f)
static bool supports_wasm_simd_128_
static unsigned supported_
static void ProbeImpl(bool cross_compile)
static constexpr int kHeaderSize
static constexpr int kMetadataAlignment
static Instruction * At(Address pc)
void link_to(int pos, Distance distance=kFar)
Definition label.h:95
static Operand EmbeddedNumber(double number)
union v8::internal::Operand::Value value_
RelocInfo::Mode rmode()
V8_INLINE Operand(int32_t immediate, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void Reposition(uint8_t *pos, uint8_t *pc)
Definition reloc-info.h:481
uint8_t * last_pc() const
Definition reloc-info.h:475
void Write(const RelocInfo *rinfo)
Definition reloc-info.cc:67
static constexpr bool IsInternalReference(Mode mode)
Definition reloc-info.h:238
static const int kApplyMask
Definition reloc-info.h:369
uint32_t wasm_call_tag() const
static constexpr bool IsRelativeCodeTarget(Mode mode)
Definition reloc-info.h:200
static constexpr int ModeMask(Mode mode)
Definition reloc-info.h:272
static constexpr bool IsNearBuiltinEntry(Mode mode)
Definition reloc-info.h:247
static constexpr bool IsInternalReferenceEncoded(Mode mode)
Definition reloc-info.h:241
static constexpr bool IsEmbeddedObjectMode(Mode mode)
Definition reloc-info.h:209
static constexpr bool IsNoInfo(Mode mode)
Definition reloc-info.h:257
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(Isolate *isolate, const char *location, const OOMDetails &details=kNoOOMDetails)
V8_INLINE void WriteUnalignedValue(Address address, T value)
V8_INLINE void WriteValue(Address address, T value)
Handle< Code > code
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
uint32_t count
Label label
int32_t offset
TNode< Object > target
Instruction * instr
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int x
uint32_t const mask
int m
Definition mul-fft.cc:294
STL namespace.
int int32_t
Definition unicode.cc:40
constexpr unsigned CountLeadingZeros(T value)
Definition bits.h:100
constexpr unsigned CountTrailingZeros(T value)
Definition bits.h:144
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
constexpr std::make_unsigned_t< T > Unsigned(T value)
Definition bits.h:86
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
const uint32_t kBImm12Mask
const int kImm12Shift
bool DoubleToSmiInteger(double value, int *smi_int_value)
void PrintF(const char *format,...)
Definition utils.cc:39
constexpr DoubleRegister kScratchDoubleReg
constexpr uint32_t kMaxStopCode
const uint32_t kMaxWatchpointCode
const uint32_t kMaxTracepointCode
void FlushInstructionCache(void *start, size_t size)
constexpr Opcode RO_JALR
const uint32_t kRvcBImm8Mask
static Instr SetLoadOffset(int32_t offset, Instr instr)
constexpr int N
int ToNumber(Register reg)
constexpr int kImm16Mask
constexpr int L
int64_t signExtend(uint64_t V, int N)
static Instr SetHi20Offset(int32_t hi20, Instr instr)
const uint32_t kImm11Mask
constexpr Opcode JAL
V8_EXPORT_PRIVATE void MemMove(void *dest, const void *src, size_t size)
Definition memcopy.h:189
constexpr Opcode LUI
constexpr Opcode RO_C_BEQZ
constexpr bool is_intn(int64_t x, unsigned n)
Definition utils.h:568
constexpr int kInt32Size
Definition globals.h:401
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget * KB
Definition flags.cc:1366
constexpr Opcode RO_C_BNEZ
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
Register ToRegister(int num)
const uint32_t kRs1FieldMask
const int kEndOfJumpChain
constexpr Opcode RO_C_J
static unsigned CpuFeaturesImpliedByCompiler()
constexpr uint8_t kInstrSize
constexpr int kMaxInt
Definition globals.h:374
static Instr SetJalOffset(int32_t pos, int32_t target_pos, Instr instr)
static ShortInstr SetCJalOffset(int32_t pos, int32_t target_pos, Instr instr)
static Instr SetCBranchOffset(int32_t pos, int32_t target_pos, Instr instr)
const int kRdFieldMask
const int kEndOfChain
static Instr SetLo12Offset(int32_t lo12, Instr instr)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage * MB
Definition flags.cc:2197
const uint32_t kImm20Mask
static Instr SetBranchOffset(int32_t pos, int32_t target_pos, Instr instr)
constexpr int kNumRegisters
#define FATAL(...)
Definition logging.h:47
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487