v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
assembler-loong64.cc
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#if V8_TARGET_ARCH_LOONG64
8
9#include "src/base/cpu.h"
16
17namespace v8 {
18namespace internal {
19
20bool CpuFeatures::SupportsWasmSimd128() { return false; }
21
22void CpuFeatures::ProbeImpl(bool cross_compile) {
23 supported_ |= 1u << FPU;
24
25 // Only use statically determined features for cross compile (snapshot).
26 if (cross_compile) return;
27
28#ifdef __loongarch__
29 // Probe for additional features at runtime.
30 base::CPU cpu;
31 supported_ |= 1u << FPU;
32#endif
33
34 // Set a static value on whether Simd is supported.
35 // This variable is only used for certain archs to query SupportWasmSimd128()
36 // at runtime in builtins using an extern ref. Other callers should use
37 // CpuFeatures::SupportWasmSimd128().
39}
40
43
44int ToNumber(Register reg) {
45 DCHECK(reg.is_valid());
46 const int kNumbers[] = {
47 0, // zero_reg
48 1, // ra
49 2, // tp
50 3, // sp
51 4, // a0 v0
52 5, // a1 v1
53 6, // a2
54 7, // a3
55 8, // a4
56 9, // a5
57 10, // a6
58 11, // a7
59 12, // t0
60 13, // t1
61 14, // t2
62 15, // t3
63 16, // t4
64 17, // t5
65 18, // t6
66 19, // t7
67 20, // t8
68 21, // x_reg
69 22, // fp
70 23, // s0
71 24, // s1
72 25, // s2
73 26, // s3
74 27, // s4
75 28, // s5
76 29, // s6
77 30, // s7
78 31, // s8
79 };
80 return kNumbers[reg.code()];
81}
82
83Register ToRegister(int num) {
84 DCHECK(num >= 0 && num < kNumRegisters);
85 const Register kRegisters[] = {
86 zero_reg, ra, tp, sp, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3,
87 t4, t5, t6, t7, t8, x_reg, fp, s0, s1, s2, s3, s4, s5, s6, s7, s8};
88 return kRegisters[num];
89}
90
91// -----------------------------------------------------------------------------
92// Implementation of RelocInfo.
93
94const int RelocInfo::kApplyMask =
98
100 // The deserializer needs to know whether a pointer is specially coded. Being
101 // specially coded on LoongArch64 means that it is a lu12i_w/ori instruction,
102 // and that is always the case inside code objects.
103 return true;
104}
105
106bool RelocInfo::IsInConstantPool() { return false; }
107
108uint32_t RelocInfo::wasm_call_tag() const {
110 return static_cast<uint32_t>(
112}
113
114// -----------------------------------------------------------------------------
115// Implementation of Operand and MemOperand.
116// See assembler-loong64-inl.h for inlined constructors.
117
118Operand::Operand(Handle<HeapObject> handle)
119 : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) {
120 value_.immediate = static_cast<intptr_t>(handle.address());
121}
122
123Operand Operand::EmbeddedNumber(double value) {
124 int32_t smi;
125 if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
126 Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
127 result.is_heap_number_request_ = true;
128 result.value_.heap_number_request = HeapNumberRequest(value);
129 return result;
130}
131
132MemOperand::MemOperand(Register base, int32_t offset)
134
135MemOperand::MemOperand(Register base, Register index)
136 : base_(base), index_(index), offset_(0) {}
137
138void Assembler::AllocateAndInstallRequestedHeapNumbers(LocalIsolate* isolate) {
139 DCHECK_IMPLIES(isolate == nullptr, heap_number_requests_.empty());
140 for (auto& request : heap_number_requests_) {
141 Handle<HeapObject> object;
142 object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
143 request.heap_number());
144 Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
145 EmbeddedObjectIndex index = AddEmbeddedObject(object);
146 if (IsLu32i_d(instr_at(pc + 2 * kInstrSize))) {
147 set_target_value_at(pc, static_cast<uint64_t>(index));
148 } else {
149 set_target_compressed_value_at(pc, static_cast<uint32_t>(index));
150 }
151 }
152}
153
154// -----------------------------------------------------------------------------
155// Specific instructions, constants, and masks.
156
157Assembler::Assembler(const AssemblerOptions& options,
158 std::unique_ptr<AssemblerBuffer> buffer)
159 : AssemblerBase(options, std::move(buffer)),
160 scratch_register_list_({t6, t7, t8}),
161 scratch_fpregister_list_({f31}) {
162 reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
163
164 last_trampoline_pool_end_ = 0;
165 no_trampoline_pool_before_ = 0;
166 trampoline_pool_blocked_nesting_ = 0;
167 // We leave space (16 * kTrampolineSlotsSize)
168 // for BlockTrampolinePoolScope buffer.
169 next_buffer_check_ = v8_flags.force_long_branches
170 ? kMaxInt
171 : kMax16BranchOffset - kTrampolineSlotsSize * 16;
172 internal_trampoline_exception_ = false;
173 last_bound_pos_ = 0;
174
175 trampoline_emitted_ = v8_flags.force_long_branches;
176 unbound_labels_count_ = 0;
177 block_buffer_growth_ = false;
178}
179
180void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
181 GetCode(isolate->main_thread_local_isolate(), desc);
182}
183void Assembler::GetCode(LocalIsolate* isolate, CodeDesc* desc,
184 SafepointTableBuilderBase* safepoint_table_builder,
185 int handler_table_offset) {
186 // As a crutch to avoid having to add manual Align calls wherever we use a
187 // raw workflow to create InstructionStream objects (mostly in tests), add
188 // another Align call here. It does no harm - the end of the InstructionStream
189 // object is aligned to the (larger) kCodeAlignment anyways.
190 // TODO(jgruber): Consider moving responsibility for proper alignment to
191 // metadata table builders (safepoint, handler, constant pool, code
192 // comments).
193 DataAlign(InstructionStream::kMetadataAlignment);
194
195 // EmitForbiddenSlotInstruction(); TODO:LOONG64 why?
196
197 int code_comments_size = WriteCodeComments();
198
199 DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
200
201 AllocateAndInstallRequestedHeapNumbers(isolate);
202
203 // Set up code descriptor.
204 // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
205 // this point to make CodeDesc initialization less fiddly.
206
207 static constexpr int kConstantPoolSize = 0;
208 static constexpr int kBuiltinJumpTableInfoSize = 0;
209 const int instruction_size = pc_offset();
210 const int builtin_jump_table_info_offset =
211 instruction_size - kBuiltinJumpTableInfoSize;
212 const int code_comments_offset =
213 builtin_jump_table_info_offset - code_comments_size;
214 const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
215 const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
216 ? constant_pool_offset
217 : handler_table_offset;
218 const int safepoint_table_offset =
219 (safepoint_table_builder == kNoSafepointTable)
220 ? handler_table_offset2
221 : safepoint_table_builder->safepoint_table_offset();
222 const int reloc_info_offset =
223 static_cast<int>(reloc_info_writer.pos() - buffer_->start());
224 CodeDesc::Initialize(desc, this, safepoint_table_offset,
225 handler_table_offset2, constant_pool_offset,
226 code_comments_offset, builtin_jump_table_info_offset,
227 reloc_info_offset);
228}
229
230void Assembler::Align(int m) {
231 // If not, the loop below won't terminate.
232 DCHECK(IsAligned(pc_offset(), kInstrSize));
233 DCHECK(m >= kInstrSize && base::bits::IsPowerOfTwo(m));
234 while ((pc_offset() & (m - 1)) != 0) {
235 nop();
236 }
237}
238
239void Assembler::CodeTargetAlign() {
240 // No advantage to aligning branch/call targets to more than
241 // single instruction, that I am aware of.
242 Align(4);
243}
244
245Register Assembler::GetRkReg(Instr instr) {
246 return Register::from_code((instr & kRkFieldMask) >> kRkShift);
247}
248
249Register Assembler::GetRjReg(Instr instr) {
250 return Register::from_code((instr & kRjFieldMask) >> kRjShift);
251}
252
253Register Assembler::GetRdReg(Instr instr) {
254 return Register::from_code((instr & kRdFieldMask) >> kRdShift);
255}
256
257uint32_t Assembler::GetRk(Instr instr) {
258 return (instr & kRkFieldMask) >> kRkShift;
259}
260
261uint32_t Assembler::GetRkField(Instr instr) { return instr & kRkFieldMask; }
262
263uint32_t Assembler::GetRj(Instr instr) {
264 return (instr & kRjFieldMask) >> kRjShift;
265}
266
267uint32_t Assembler::GetRjField(Instr instr) { return instr & kRjFieldMask; }
268
269uint32_t Assembler::GetRd(Instr instr) {
270 return (instr & kRdFieldMask) >> kRdShift;
271}
272
273uint32_t Assembler::GetRdField(Instr instr) { return instr & kRdFieldMask; }
274
275uint32_t Assembler::GetSa2(Instr instr) {
276 return (instr & kSa2FieldMask) >> kSaShift;
277}
278
279uint32_t Assembler::GetSa2Field(Instr instr) { return instr & kSa2FieldMask; }
280
281uint32_t Assembler::GetSa3(Instr instr) {
282 return (instr & kSa3FieldMask) >> kSaShift;
283}
284
285uint32_t Assembler::GetSa3Field(Instr instr) { return instr & kSa3FieldMask; }
286
287// Labels refer to positions in the (to be) generated code.
288// There are bound, linked, and unused labels.
289//
290// Bound labels refer to known positions in the already
291// generated code. pos() is the position the label refers to.
292//
293// Linked labels refer to unknown positions in the code
294// to be generated; pos() is the position of the last
295// instruction using the label.
296
297// The link chain is terminated by a value in the instruction of 0,
298// which is an otherwise illegal value (branch 0 is inf loop).
299// The instruction 16-bit offset field addresses 32-bit words, but in
300// code is conv to an 18-bit value addressing bytes, hence the -4 value.
301
302const int kEndOfChain = 0;
303// Determines the end of the Jump chain (a subset of the label link chain).
304const int kEndOfJumpChain = 0;
305
306bool Assembler::IsBranch(Instr instr) {
307 uint32_t opcode = (instr >> 26) << 26;
308 // Checks if the instruction is a branch.
309 bool isBranch = opcode == BEQZ || opcode == BNEZ || opcode == BCZ ||
310 opcode == B || opcode == BL || opcode == BEQ ||
311 opcode == BNE || opcode == BLT || opcode == BGE ||
312 opcode == BLTU || opcode == BGEU;
313 return isBranch;
314}
315
316bool Assembler::IsB(Instr instr) {
317 uint32_t opcode = (instr >> 26) << 26;
318 // Checks if the instruction is a b.
319 bool isBranch = opcode == B || opcode == BL;
320 return isBranch;
321}
322
323bool Assembler::IsBz(Instr instr) {
324 uint32_t opcode = (instr >> 26) << 26;
325 // Checks if the instruction is a branch.
326 bool isBranch = opcode == BEQZ || opcode == BNEZ || opcode == BCZ;
327 return isBranch;
328}
329
330bool Assembler::IsEmittedConstant(Instr instr) {
331 // Add GetLabelConst function?
332 uint32_t label_constant = instr & ~kImm16Mask;
333 return label_constant == 0; // Emitted label const in reg-exp engine.
334}
335
336bool Assembler::IsJ(Instr instr) {
337 uint32_t opcode = (instr >> 26) << 26;
338 // Checks if the instruction is a jump.
339 return opcode == JIRL;
340}
341
342bool Assembler::IsLu12i_w(Instr instr) {
343 uint32_t opcode = (instr >> 25) << 25;
344 return opcode == LU12I_W;
345}
346
347bool Assembler::IsOri(Instr instr) {
348 uint32_t opcode = (instr >> 22) << 22;
349 return opcode == ORI;
350}
351
352bool Assembler::IsLu32i_d(Instr instr) {
353 uint32_t opcode = (instr >> 25) << 25;
354 return opcode == LU32I_D;
355}
356
357bool Assembler::IsLu52i_d(Instr instr) {
358 uint32_t opcode = (instr >> 22) << 22;
359 return opcode == LU52I_D;
360}
361
362bool Assembler::IsMov(Instr instr, Register rd, Register rj) {
363 // Checks if the instruction is a OR with zero_reg argument (aka MOV).
364 Instr instr1 =
365 OR | zero_reg.code() << kRkShift | rj.code() << kRjShift | rd.code();
366 return instr == instr1;
367}
368
369bool Assembler::IsPcAddi(Instr instr) {
370 uint32_t opcode = (instr >> 25) << 25;
371 return opcode == PCADDI;
372}
373
374bool Assembler::IsNop(Instr instr, unsigned int type) {
375 // See Assembler::nop(type).
376 DCHECK_LT(type, 32);
377
378 Instr instr1 =
379 ANDI | ((type & kImm12Mask) << kRkShift) | (zero_reg.code() << kRjShift);
380
381 return instr == instr1;
382}
383
384static inline int32_t GetOffsetOfBranch(Instr instr,
385 Assembler::OffsetSize bits) {
386 int32_t result = 0;
387 if (bits == 16) {
388 result = (instr << 6) >> 16;
389 } else if (bits == 21) {
390 uint32_t low16 = instr << 6;
391 low16 = low16 >> 16;
392 low16 &= 0xffff;
393 int32_t hi5 = (instr << 27) >> 11;
394 result = hi5 | low16;
395 } else {
396 uint32_t low16 = instr << 6;
397 low16 = low16 >> 16;
398 low16 &= 0xffff;
399 int32_t hi10 = (instr << 22) >> 6;
400 result = hi10 | low16;
401 DCHECK_EQ(bits, 26);
402 }
403 return result << 2;
404}
405
406static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
407 if (Assembler::IsB(instr)) {
408 return Assembler::OffsetSize::kOffset26;
409 } else if (Assembler::IsBz(instr)) {
410 return Assembler::OffsetSize::kOffset21;
411 } else {
412 DCHECK(Assembler::IsBranch(instr));
413 return Assembler::OffsetSize::kOffset16;
414 }
415}
416
417static inline int32_t AddBranchOffset(int pos, Instr instr) {
418 Assembler::OffsetSize bits = OffsetSizeInBits(instr);
419
420 int32_t imm = GetOffsetOfBranch(instr, bits);
421
422 if (imm == kEndOfChain) {
423 // EndOfChain sentinel is returned directly, not relative to pc or pos.
424 return kEndOfChain;
425 } else {
426 // Handle the case that next branch position is 0.
427 // TODO(LOONG_dev): Define -4 as a constant
428 int32_t offset = pos + imm;
429 return offset == 0 ? -4 : offset;
430 }
431}
432
433int Assembler::target_at(int pos, bool is_internal) {
434 if (is_internal) {
435 int64_t* p = reinterpret_cast<int64_t*>(buffer_start_ + pos);
436 int64_t address = *p;
437 if (address == kEndOfJumpChain) {
438 return kEndOfChain;
439 } else {
440 int64_t instr_address = reinterpret_cast<int64_t>(p);
441 DCHECK(instr_address - address < INT_MAX);
442 int delta = static_cast<int>(instr_address - address);
443 DCHECK(pos > delta);
444 return pos - delta;
445 }
446 }
447 Instr instr = instr_at(pos);
448
449 // TODO(LOONG_dev) remove after remove label_at_put?
450 if ((instr & ~kImm16Mask) == 0) {
451 // Emitted label constant, not part of a branch.
452 if (instr == 0) {
453 return kEndOfChain;
454 } else {
455 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
456 return (imm18 + pos);
457 }
458 }
459
460 // Check we have a branch, jump or pcaddi instruction.
461 DCHECK(IsBranch(instr) || IsPcAddi(instr));
462 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
463 // the compiler uses arithmetic shifts for signed integers.
464 if (IsBranch(instr)) {
465 return AddBranchOffset(pos, instr);
466 } else if (IsPcAddi(instr)) {
467 // see LoadLabelRelative
468 int32_t si20;
469 si20 = (instr >> kRjShift) & 0xfffff;
470 if (si20 == kEndOfJumpChain) {
471 // EndOfChain sentinel is returned directly, not relative to pc or pos.
472 return kEndOfChain;
473 }
474 return pos + (si20 << 2);
475 } else {
476 UNREACHABLE();
477 }
478}
479
480static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
481 Instr instr) {
482 int32_t bits = OffsetSizeInBits(instr);
483 int32_t imm = target_pos - pos;
484 DCHECK_EQ(imm & 3, 0);
485 imm >>= 2;
486
487 DCHECK(is_intn(imm, bits));
488
489 if (bits == 16) {
490 const int32_t mask = ((1 << 16) - 1) << 10;
491 instr &= ~mask;
492 return instr | ((imm << 10) & mask);
493 } else if (bits == 21) {
494 const int32_t mask = 0x3fffc1f;
495 instr &= ~mask;
496 uint32_t low16 = (imm & kImm16Mask) << 10;
497 int32_t hi5 = (imm >> 16) & 0x1f;
498 return instr | low16 | hi5;
499 } else {
500 DCHECK_EQ(bits, 26);
501 const int32_t mask = 0x3ffffff;
502 instr &= ~mask;
503 uint32_t low16 = (imm & kImm16Mask) << 10;
504 int32_t hi10 = (imm >> 16) & 0x3ff;
505 return instr | low16 | hi10;
506 }
507}
508
509void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
510 if (is_internal) {
511 uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
512 *reinterpret_cast<uint64_t*>(buffer_start_ + pos) = imm;
513 return;
514 }
515 Instr instr = instr_at(pos);
516 if ((instr & ~kImm16Mask) == 0) {
517 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
518 // Emitted label constant, not part of a branch.
519 // Make label relative to Code pointer of generated Code object.
520 instr_at_put(
521 pos, target_pos + (InstructionStream::kHeaderSize - kHeapObjectTag));
522 return;
523 }
524
525 if (IsPcAddi(instr)) {
526 // For LoadLabelRelative function.
527 int32_t imm = target_pos - pos;
528 DCHECK_EQ(imm & 3, 0);
529 DCHECK(is_int22(imm));
530 uint32_t siMask = 0xfffff << kRjShift;
531 uint32_t si20 = ((imm >> 2) << kRjShift) & siMask;
532 instr = (instr & ~siMask) | si20;
533 instr_at_put(pos, instr);
534 return;
535 }
536
537 DCHECK(IsBranch(instr));
538 instr = SetBranchOffset(pos, target_pos, instr);
539 instr_at_put(pos, instr);
540}
541
542void Assembler::print(const Label* L) {
543 if (L->is_unused()) {
544 PrintF("unused label\n");
545 } else if (L->is_bound()) {
546 PrintF("bound label to %d\n", L->pos());
547 } else if (L->is_linked()) {
548 Label l;
549 l.link_to(L->pos());
550 PrintF("unbound label");
551 while (l.is_linked()) {
552 PrintF("@ %d ", l.pos());
553 Instr instr = instr_at(l.pos());
554 if ((instr & ~kImm16Mask) == 0) {
555 PrintF("value\n");
556 } else {
557 PrintF("%d\n", instr);
558 }
559 next(&l, is_internal_reference(&l));
560 }
561 } else {
562 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
563 }
564}
565
566void Assembler::bind_to(Label* L, int pos) {
567 DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
568 int trampoline_pos = kInvalidSlotPos;
569 bool is_internal = false;
570 if (L->is_linked() && !trampoline_emitted_) {
571 unbound_labels_count_--;
572 if (!is_internal_reference(L)) {
573 next_buffer_check_ += kTrampolineSlotsSize;
574 }
575 }
576
577 while (L->is_linked()) {
578 int fixup_pos = L->pos();
579 int dist = pos - fixup_pos;
580 is_internal = is_internal_reference(L);
581 next(L, is_internal); // Call next before overwriting link with target at
582 // fixup_pos.
583 Instr instr = instr_at(fixup_pos);
584 if (is_internal) {
585 target_at_put(fixup_pos, pos, is_internal);
586 } else {
587 if (IsBranch(instr)) {
588 int branch_offset = BranchOffset(instr);
589 if (dist > branch_offset) {
590 if (trampoline_pos == kInvalidSlotPos) {
591 trampoline_pos = get_trampoline_entry(fixup_pos);
592 CHECK_NE(trampoline_pos, kInvalidSlotPos);
593 }
594 CHECK((trampoline_pos - fixup_pos) <= branch_offset);
595 target_at_put(fixup_pos, trampoline_pos, false);
596 fixup_pos = trampoline_pos;
597 }
598 target_at_put(fixup_pos, pos, false);
599 } else {
600 DCHECK(IsJ(instr) || IsLu12i_w(instr) || IsEmittedConstant(instr) ||
601 IsPcAddi(instr));
602 target_at_put(fixup_pos, pos, false);
603 }
604 }
605 }
606 L->bind_to(pos);
607
608 // Keep track of the last bound label so we don't eliminate any instructions
609 // before a bound label.
610 if (pos > last_bound_pos_) last_bound_pos_ = pos;
611}
612
613void Assembler::bind(Label* L) {
614 DCHECK(!L->is_bound()); // Label can only be bound once.
615 bind_to(L, pc_offset());
616}
617
618void Assembler::next(Label* L, bool is_internal) {
619 DCHECK(L->is_linked());
620 int link = target_at(L->pos(), is_internal);
621 if (link == kEndOfChain) {
622 L->Unuse();
623 } else if (link == -4) {
624 // Next position is pc_offset == 0
625 L->link_to(0);
626 } else {
627 DCHECK_GE(link, 0);
628 L->link_to(link);
629 }
630}
631
632bool Assembler::is_near_c(Label* L) {
633 DCHECK(L->is_bound());
634 return pc_offset() - L->pos() < kMax16BranchOffset - 4 * kInstrSize;
635}
636
637bool Assembler::is_near(Label* L, OffsetSize bits) {
638 DCHECK(L->is_bound());
639 return ((pc_offset() - L->pos()) <
640 (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
641}
642
643bool Assembler::is_near_a(Label* L) {
644 DCHECK(L->is_bound());
645 return pc_offset() - L->pos() <= kMax26BranchOffset - 4 * kInstrSize;
646}
647
648int Assembler::BranchOffset(Instr instr) {
649 int bits = OffsetSize::kOffset16;
650
651 uint32_t opcode = (instr >> 26) << 26;
652 switch (opcode) {
653 case B:
654 case BL:
655 bits = OffsetSize::kOffset26;
656 break;
657 case BNEZ:
658 case BEQZ:
659 case BCZ:
660 bits = OffsetSize::kOffset21;
661 break;
662 case BNE:
663 case BEQ:
664 case BLT:
665 case BGE:
666 case BLTU:
667 case BGEU:
668 case JIRL:
669 bits = OffsetSize::kOffset16;
670 break;
671 default:
672 break;
673 }
674
675 return (1 << (bits + 2 - 1)) - 1;
676}
677
678// We have to use a temporary register for things that can be relocated even
679// if they can be encoded in the LOONG's 16 bits of immediate-offset
680// instruction space. There is no guarantee that the relocated location can be
681// similarly encoded.
682bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
683 return !RelocInfo::IsNoInfo(rmode);
684}
685
686void Assembler::GenB(Opcode opcode, Register rj, int32_t si21) {
687 BlockTrampolinePoolScope block_trampoline_pool(this);
688 DCHECK((BEQZ == opcode || BNEZ == opcode) && is_int21(si21) && rj.is_valid());
689 Instr instr = opcode | (si21 & kImm16Mask) << kRkShift |
690 (rj.code() << kRjShift) | ((si21 & 0x1fffff) >> 16);
691 emit(instr);
692}
693
694void Assembler::GenB(Opcode opcode, CFRegister cj, int32_t si21, bool isEq) {
695 BlockTrampolinePoolScope block_trampoline_pool(this);
696 DCHECK(BCZ == opcode && is_int21(si21));
697 DCHECK(cj >= 0 && cj <= 7);
698 int32_t sc = (isEq ? cj : cj + 8);
699 Instr instr = opcode | (si21 & kImm16Mask) << kRkShift | (sc << kRjShift) |
700 ((si21 & 0x1fffff) >> 16);
701 emit(instr);
702}
703
704void Assembler::GenB(Opcode opcode, int32_t si26) {
705 BlockTrampolinePoolScope block_trampoline_pool(this);
706 DCHECK((B == opcode || BL == opcode) && is_int26(si26));
707 Instr instr =
708 opcode | ((si26 & kImm16Mask) << kRkShift) | ((si26 & kImm26Mask) >> 16);
709 emit(instr);
710}
711
712void Assembler::GenBJ(Opcode opcode, Register rj, Register rd, int32_t si16) {
713 BlockTrampolinePoolScope block_trampoline_pool(this);
714 DCHECK(is_int16(si16));
715 Instr instr = opcode | ((si16 & kImm16Mask) << kRkShift) |
716 (rj.code() << kRjShift) | rd.code();
717 emit(instr);
718}
719
720void Assembler::GenCmp(Opcode opcode, FPUCondition cond, FPURegister fk,
721 FPURegister fj, CFRegister cd) {
722 DCHECK(opcode == FCMP_COND_S || opcode == FCMP_COND_D);
723 Instr instr = opcode | cond << kCondShift | (fk.code() << kFkShift) |
724 (fj.code() << kFjShift) | cd;
725 emit(instr);
726}
727
728void Assembler::GenSel(Opcode opcode, CFRegister ca, FPURegister fk,
729 FPURegister fj, FPURegister rd) {
730 DCHECK((opcode == FSEL));
731 Instr instr = opcode | ca << kCondShift | (fk.code() << kFkShift) |
732 (fj.code() << kFjShift) | rd.code();
733 emit(instr);
734}
735
736void Assembler::GenRegister(Opcode opcode, Register rj, Register rd,
737 bool rjrd) {
738 DCHECK(rjrd);
739 Instr instr = 0;
740 instr = opcode | (rj.code() << kRjShift) | rd.code();
741 emit(instr);
742}
743
744void Assembler::GenRegister(Opcode opcode, FPURegister fj, FPURegister fd) {
745 Instr instr = opcode | (fj.code() << kFjShift) | fd.code();
746 emit(instr);
747}
748
749void Assembler::GenRegister(Opcode opcode, Register rj, FPURegister fd) {
750 DCHECK((opcode == MOVGR2FR_W) || (opcode == MOVGR2FR_D) ||
751 (opcode == MOVGR2FRH_W));
752 Instr instr = opcode | (rj.code() << kRjShift) | fd.code();
753 emit(instr);
754}
755
756void Assembler::GenRegister(Opcode opcode, FPURegister fj, Register rd) {
757 DCHECK((opcode == MOVFR2GR_S) || (opcode == MOVFR2GR_D) ||
758 (opcode == MOVFRH2GR_S));
759 Instr instr = opcode | (fj.code() << kFjShift) | rd.code();
760 emit(instr);
761}
762
763void Assembler::GenRegister(Opcode opcode, Register rj, FPUControlRegister fd) {
764 DCHECK((opcode == MOVGR2FCSR));
765 Instr instr = opcode | (rj.code() << kRjShift) | fd.code();
766 emit(instr);
767}
768
769void Assembler::GenRegister(Opcode opcode, FPUControlRegister fj, Register rd) {
770 DCHECK((opcode == MOVFCSR2GR));
771 Instr instr = opcode | (fj.code() << kFjShift) | rd.code();
772 emit(instr);
773}
774
775void Assembler::GenRegister(Opcode opcode, FPURegister fj, CFRegister cd) {
776 DCHECK((opcode == MOVFR2CF));
777 Instr instr = opcode | (fj.code() << kFjShift) | cd;
778 emit(instr);
779}
780
781void Assembler::GenRegister(Opcode opcode, CFRegister cj, FPURegister fd) {
782 DCHECK((opcode == MOVCF2FR));
783 Instr instr = opcode | cj << kFjShift | fd.code();
784 emit(instr);
785}
786
787void Assembler::GenRegister(Opcode opcode, Register rj, CFRegister cd) {
788 DCHECK((opcode == MOVGR2CF));
789 Instr instr = opcode | (rj.code() << kRjShift) | cd;
790 emit(instr);
791}
792
793void Assembler::GenRegister(Opcode opcode, CFRegister cj, Register rd) {
794 DCHECK((opcode == MOVCF2GR));
795 Instr instr = opcode | cj << kFjShift | rd.code();
796 emit(instr);
797}
798
799void Assembler::GenRegister(Opcode opcode, Register rk, Register rj,
800 Register rd) {
801 Instr instr =
802 opcode | (rk.code() << kRkShift) | (rj.code() << kRjShift) | rd.code();
803 emit(instr);
804}
805
806void Assembler::GenRegister(Opcode opcode, FPURegister fk, FPURegister fj,
807 FPURegister fd) {
808 Instr instr =
809 opcode | (fk.code() << kFkShift) | (fj.code() << kFjShift) | fd.code();
810 emit(instr);
811}
812
813void Assembler::GenRegister(Opcode opcode, FPURegister fa, FPURegister fk,
814 FPURegister fj, FPURegister fd) {
815 Instr instr = opcode | (fa.code() << kFaShift) | (fk.code() << kFkShift) |
816 (fj.code() << kFjShift) | fd.code();
817 emit(instr);
818}
819
820void Assembler::GenRegister(Opcode opcode, Register rk, Register rj,
821 FPURegister fd) {
822 Instr instr =
823 opcode | (rk.code() << kRkShift) | (rj.code() << kRjShift) | fd.code();
824 emit(instr);
825}
826
827void Assembler::GenImm(Opcode opcode, int32_t bit3, Register rk, Register rj,
828 Register rd) {
829 DCHECK(is_uint3(bit3));
830 Instr instr = opcode | (bit3 & 0x7) << kSaShift | (rk.code() << kRkShift) |
831 (rj.code() << kRjShift) | rd.code();
832 emit(instr);
833}
834
835void Assembler::GenImm(Opcode opcode, int32_t bit6m, int32_t bit6l, Register rj,
836 Register rd) {
837 DCHECK(is_uint6(bit6m) && is_uint6(bit6l));
838 Instr instr = opcode | (bit6m & 0x3f) << 16 | (bit6l & 0x3f) << kRkShift |
839 (rj.code() << kRjShift) | rd.code();
840 emit(instr);
841}
842
843void Assembler::GenImm(Opcode opcode, int32_t bit20, Register rd) {
844 // DCHECK(is_uint20(bit20) || is_int20(bit20));
845 Instr instr = opcode | (bit20 & 0xfffff) << kRjShift | rd.code();
846 emit(instr);
847}
848
849void Assembler::GenImm(Opcode opcode, int32_t bit15) {
850 DCHECK(is_uint15(bit15));
851 Instr instr = opcode | (bit15 & 0x7fff);
852 emit(instr);
853}
854
855void Assembler::GenImm(Opcode opcode, int32_t value, Register rj, Register rd,
856 int32_t value_bits) {
857 DCHECK(value_bits == 6 || value_bits == 12 || value_bits == 14 ||
858 value_bits == 16);
859 uint32_t imm = value & 0x3f;
860 if (value_bits == 12) {
861 imm = value & kImm12Mask;
862 } else if (value_bits == 14) {
863 imm = value & 0x3fff;
864 } else if (value_bits == 16) {
865 imm = value & kImm16Mask;
866 }
867 Instr instr = opcode | imm << kRkShift | (rj.code() << kRjShift) | rd.code();
868 emit(instr);
869}
870
871void Assembler::GenImm(Opcode opcode, int32_t bit12, Register rj,
872 FPURegister fd) {
873 DCHECK(is_int12(bit12));
874 Instr instr = opcode | ((bit12 & kImm12Mask) << kRkShift) |
875 (rj.code() << kRjShift) | fd.code();
876 emit(instr);
877}
878
879// Returns the next free trampoline entry.
880int32_t Assembler::get_trampoline_entry(int32_t pos) {
881 int32_t trampoline_entry = kInvalidSlotPos;
882 if (!internal_trampoline_exception_) {
883 if (trampoline_.start() > pos) {
884 trampoline_entry = trampoline_.take_slot();
885 }
886
887 if (kInvalidSlotPos == trampoline_entry) {
888 internal_trampoline_exception_ = true;
889 }
890 }
891 return trampoline_entry;
892}
893
894uint64_t Assembler::jump_address(Label* L) {
895 int64_t target_pos;
896 if (L->is_bound()) {
897 target_pos = L->pos();
898 } else {
899 if (L->is_linked()) {
900 target_pos = L->pos(); // L's link.
901 L->link_to(pc_offset());
902 } else {
903 L->link_to(pc_offset());
904 return kEndOfJumpChain;
905 }
906 }
907 uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
908 DCHECK_EQ(imm & 3, 0);
909
910 return imm;
911}
912
913uint64_t Assembler::branch_long_offset(Label* L) {
914 int64_t target_pos;
915
916 if (L->is_bound()) {
917 target_pos = L->pos();
918 } else {
919 if (L->is_linked()) {
920 target_pos = L->pos(); // L's link.
921 L->link_to(pc_offset());
922 } else {
923 L->link_to(pc_offset());
924 return kEndOfJumpChain;
925 }
926 }
927 int64_t offset = target_pos - pc_offset();
928 DCHECK_EQ(offset & 3, 0);
929
930 return static_cast<uint64_t>(offset);
931}
932
933int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
934 int32_t target_pos;
935
936 if (L->is_bound()) {
937 target_pos = L->pos();
938 } else {
939 if (L->is_linked()) {
940 target_pos = L->pos();
941 L->link_to(pc_offset());
942 } else {
943 L->link_to(pc_offset());
944 if (!trampoline_emitted_) {
945 unbound_labels_count_++;
946 next_buffer_check_ -= kTrampolineSlotsSize;
947 }
948 return kEndOfChain;
949 }
950 }
951
952 int32_t offset = target_pos - pc_offset();
953 DCHECK(is_intn(offset, bits + 2));
954 DCHECK_EQ(offset & 3, 0);
955
956 return offset;
957}
958
959void Assembler::label_at_put(Label* L, int at_offset) {
960 int target_pos;
961 if (L->is_bound()) {
962 target_pos = L->pos();
963 instr_at_put(at_offset, target_pos + (InstructionStream::kHeaderSize -
964 kHeapObjectTag));
965 } else {
966 if (L->is_linked()) {
967 target_pos = L->pos(); // L's link.
968 int32_t imm18 = target_pos - at_offset;
969 DCHECK_EQ(imm18 & 3, 0);
970 int32_t imm16 = imm18 >> 2;
971 DCHECK(is_int16(imm16));
972 instr_at_put(at_offset, (imm16 & kImm16Mask));
973 } else {
974 target_pos = kEndOfChain;
975 instr_at_put(at_offset, 0);
976 if (!trampoline_emitted_) {
977 unbound_labels_count_++;
978 next_buffer_check_ -= kTrampolineSlotsSize;
979 }
980 }
981 L->link_to(at_offset);
982 }
983}
984
985//------- Branch and jump instructions --------
986
987void Assembler::b(int32_t offset) { GenB(B, offset); }
988
989void Assembler::bl(int32_t offset) { GenB(BL, offset); }
990
991void Assembler::beq(Register rj, Register rd, int32_t offset) {
992 GenBJ(BEQ, rj, rd, offset);
993}
994
995void Assembler::bne(Register rj, Register rd, int32_t offset) {
996 GenBJ(BNE, rj, rd, offset);
997}
998
999void Assembler::blt(Register rj, Register rd, int32_t offset) {
1000 GenBJ(BLT, rj, rd, offset);
1001}
1002
1003void Assembler::bge(Register rj, Register rd, int32_t offset) {
1004 GenBJ(BGE, rj, rd, offset);
1005}
1006
1007void Assembler::bltu(Register rj, Register rd, int32_t offset) {
1008 GenBJ(BLTU, rj, rd, offset);
1009}
1010
1011void Assembler::bgeu(Register rj, Register rd, int32_t offset) {
1012 GenBJ(BGEU, rj, rd, offset);
1013}
1014
1015void Assembler::beqz(Register rj, int32_t offset) { GenB(BEQZ, rj, offset); }
1016void Assembler::bnez(Register rj, int32_t offset) { GenB(BNEZ, rj, offset); }
1017
1018void Assembler::jirl(Register rd, Register rj, int32_t offset) {
1019 GenBJ(JIRL, rj, rd, offset);
1020}
1021
1022void Assembler::bceqz(CFRegister cj, int32_t si21) {
1023 GenB(BCZ, cj, si21, true);
1024}
1025
1026void Assembler::bcnez(CFRegister cj, int32_t si21) {
1027 GenB(BCZ, cj, si21, false);
1028}
1029
1030// -------Data-processing-instructions---------
1031
1032// Arithmetic.
1033void Assembler::add_w(Register rd, Register rj, Register rk) {
1034 GenRegister(ADD_W, rk, rj, rd);
1035}
1036
1037void Assembler::add_d(Register rd, Register rj, Register rk) {
1038 GenRegister(ADD_D, rk, rj, rd);
1039}
1040
1041void Assembler::sub_w(Register rd, Register rj, Register rk) {
1042 GenRegister(SUB_W, rk, rj, rd);
1043}
1044
1045void Assembler::sub_d(Register rd, Register rj, Register rk) {
1046 GenRegister(SUB_D, rk, rj, rd);
1047}
1048
1049void Assembler::addi_w(Register rd, Register rj, int32_t si12) {
1050 GenImm(ADDI_W, si12, rj, rd, 12);
1051}
1052
1053void Assembler::addi_d(Register rd, Register rj, int32_t si12) {
1054 GenImm(ADDI_D, si12, rj, rd, 12);
1055}
1056
1057void Assembler::addu16i_d(Register rd, Register rj, int32_t si16) {
1058 GenImm(ADDU16I_D, si16, rj, rd, 16);
1059}
1060
1061void Assembler::alsl_w(Register rd, Register rj, Register rk, int32_t sa2) {
1062 DCHECK(is_uint2(sa2 - 1));
1063 GenImm(ALSL_W, sa2 - 1, rk, rj, rd);
1064}
1065
1066void Assembler::alsl_wu(Register rd, Register rj, Register rk, int32_t sa2) {
1067 DCHECK(is_uint2(sa2 - 1));
1068 GenImm(ALSL_WU, sa2 + 3, rk, rj, rd);
1069}
1070
1071void Assembler::alsl_d(Register rd, Register rj, Register rk, int32_t sa2) {
1072 DCHECK(is_uint2(sa2 - 1));
1073 GenImm(ALSL_D, sa2 - 1, rk, rj, rd);
1074}
1075
1076void Assembler::lu12i_w(Register rd, int32_t si20) {
1077 GenImm(LU12I_W, si20, rd);
1078}
1079
1080void Assembler::lu32i_d(Register rd, int32_t si20) {
1081 GenImm(LU32I_D, si20, rd);
1082}
1083
1084void Assembler::lu52i_d(Register rd, Register rj, int32_t si12) {
1085 GenImm(LU52I_D, si12, rj, rd, 12);
1086}
1087
1088void Assembler::slt(Register rd, Register rj, Register rk) {
1089 GenRegister(SLT, rk, rj, rd);
1090}
1091
1092void Assembler::sltu(Register rd, Register rj, Register rk) {
1093 GenRegister(SLTU, rk, rj, rd);
1094}
1095
1096void Assembler::slti(Register rd, Register rj, int32_t si12) {
1097 GenImm(SLTI, si12, rj, rd, 12);
1098}
1099
1100void Assembler::sltui(Register rd, Register rj, int32_t si12) {
1101 GenImm(SLTUI, si12, rj, rd, 12);
1102}
1103
1104void Assembler::pcaddi(Register rd, int32_t si20) { GenImm(PCADDI, si20, rd); }
1105
1106void Assembler::pcaddu12i(Register rd, int32_t si20) {
1107 GenImm(PCADDU12I, si20, rd);
1108}
1109
1110void Assembler::pcaddu18i(Register rd, int32_t si20) {
1111 GenImm(PCADDU18I, si20, rd);
1112}
1113
1114void Assembler::pcalau12i(Register rd, int32_t si20) {
1115 GenImm(PCALAU12I, si20, rd);
1116}
1117
1118void Assembler::and_(Register rd, Register rj, Register rk) {
1119 GenRegister(AND, rk, rj, rd);
1120}
1121
1122void Assembler::or_(Register rd, Register rj, Register rk) {
1123 GenRegister(OR, rk, rj, rd);
1124}
1125
1126void Assembler::xor_(Register rd, Register rj, Register rk) {
1127 GenRegister(XOR, rk, rj, rd);
1128}
1129
1130void Assembler::nor(Register rd, Register rj, Register rk) {
1131 GenRegister(NOR, rk, rj, rd);
1132}
1133
1134void Assembler::andn(Register rd, Register rj, Register rk) {
1135 GenRegister(ANDN, rk, rj, rd);
1136}
1137
1138void Assembler::orn(Register rd, Register rj, Register rk) {
1139 GenRegister(ORN, rk, rj, rd);
1140}
1141
1142void Assembler::andi(Register rd, Register rj, int32_t ui12) {
1143 GenImm(ANDI, ui12, rj, rd, 12);
1144}
1145
1146void Assembler::ori(Register rd, Register rj, int32_t ui12) {
1147 GenImm(ORI, ui12, rj, rd, 12);
1148}
1149
1150void Assembler::xori(Register rd, Register rj, int32_t ui12) {
1151 GenImm(XORI, ui12, rj, rd, 12);
1152}
1153
1154void Assembler::mul_w(Register rd, Register rj, Register rk) {
1155 GenRegister(MUL_W, rk, rj, rd);
1156}
1157
1158void Assembler::mulh_w(Register rd, Register rj, Register rk) {
1159 GenRegister(MULH_W, rk, rj, rd);
1160}
1161
1162void Assembler::mulh_wu(Register rd, Register rj, Register rk) {
1163 GenRegister(MULH_WU, rk, rj, rd);
1164}
1165
1166void Assembler::mul_d(Register rd, Register rj, Register rk) {
1167 GenRegister(MUL_D, rk, rj, rd);
1168}
1169
1170void Assembler::mulh_d(Register rd, Register rj, Register rk) {
1171 GenRegister(MULH_D, rk, rj, rd);
1172}
1173
1174void Assembler::mulh_du(Register rd, Register rj, Register rk) {
1175 GenRegister(MULH_DU, rk, rj, rd);
1176}
1177
1178void Assembler::mulw_d_w(Register rd, Register rj, Register rk) {
1179 GenRegister(MULW_D_W, rk, rj, rd);
1180}
1181
1182void Assembler::mulw_d_wu(Register rd, Register rj, Register rk) {
1183 GenRegister(MULW_D_WU, rk, rj, rd);
1184}
1185
1186void Assembler::div_w(Register rd, Register rj, Register rk) {
1187 GenRegister(DIV_W, rk, rj, rd);
1188}
1189
1190void Assembler::mod_w(Register rd, Register rj, Register rk) {
1191 GenRegister(MOD_W, rk, rj, rd);
1192}
1193
1194void Assembler::div_wu(Register rd, Register rj, Register rk) {
1195 GenRegister(DIV_WU, rk, rj, rd);
1196}
1197
1198void Assembler::mod_wu(Register rd, Register rj, Register rk) {
1199 GenRegister(MOD_WU, rk, rj, rd);
1200}
1201
1202void Assembler::div_d(Register rd, Register rj, Register rk) {
1203 GenRegister(DIV_D, rk, rj, rd);
1204}
1205
1206void Assembler::mod_d(Register rd, Register rj, Register rk) {
1207 GenRegister(MOD_D, rk, rj, rd);
1208}
1209
1210void Assembler::div_du(Register rd, Register rj, Register rk) {
1211 GenRegister(DIV_DU, rk, rj, rd);
1212}
1213
1214void Assembler::mod_du(Register rd, Register rj, Register rk) {
1215 GenRegister(MOD_DU, rk, rj, rd);
1216}
1217
1218// Shifts.
1219void Assembler::sll_w(Register rd, Register rj, Register rk) {
1220 GenRegister(SLL_W, rk, rj, rd);
1221}
1222
1223void Assembler::srl_w(Register rd, Register rj, Register rk) {
1224 GenRegister(SRL_W, rk, rj, rd);
1225}
1226
1227void Assembler::sra_w(Register rd, Register rj, Register rk) {
1228 GenRegister(SRA_W, rk, rj, rd);
1229}
1230
1231void Assembler::rotr_w(Register rd, Register rj, Register rk) {
1232 GenRegister(ROTR_W, rk, rj, rd);
1233}
1234
1235void Assembler::slli_w(Register rd, Register rj, int32_t ui5) {
1236 DCHECK(is_uint5(ui5));
1237 GenImm(SLLI_W, ui5 + 0x20, rj, rd, 6);
1238}
1239
1240void Assembler::srli_w(Register rd, Register rj, int32_t ui5) {
1241 DCHECK(is_uint5(ui5));
1242 GenImm(SRLI_W, ui5 + 0x20, rj, rd, 6);
1243}
1244
1245void Assembler::srai_w(Register rd, Register rj, int32_t ui5) {
1246 DCHECK(is_uint5(ui5));
1247 GenImm(SRAI_W, ui5 + 0x20, rj, rd, 6);
1248}
1249
1250void Assembler::rotri_w(Register rd, Register rj, int32_t ui5) {
1251 DCHECK(is_uint5(ui5));
1252 GenImm(ROTRI_W, ui5 + 0x20, rj, rd, 6);
1253}
1254
1255void Assembler::sll_d(Register rd, Register rj, Register rk) {
1256 GenRegister(SLL_D, rk, rj, rd);
1257}
1258
1259void Assembler::srl_d(Register rd, Register rj, Register rk) {
1260 GenRegister(SRL_D, rk, rj, rd);
1261}
1262
1263void Assembler::sra_d(Register rd, Register rj, Register rk) {
1264 GenRegister(SRA_D, rk, rj, rd);
1265}
1266
1267void Assembler::rotr_d(Register rd, Register rj, Register rk) {
1268 GenRegister(ROTR_D, rk, rj, rd);
1269}
1270
1271void Assembler::slli_d(Register rd, Register rj, int32_t ui6) {
1272 GenImm(SLLI_D, ui6, rj, rd, 6);
1273}
1274
1275void Assembler::srli_d(Register rd, Register rj, int32_t ui6) {
1276 GenImm(SRLI_D, ui6, rj, rd, 6);
1277}
1278
1279void Assembler::srai_d(Register rd, Register rj, int32_t ui6) {
1280 GenImm(SRAI_D, ui6, rj, rd, 6);
1281}
1282
1283void Assembler::rotri_d(Register rd, Register rj, int32_t ui6) {
1284 GenImm(ROTRI_D, ui6, rj, rd, 6);
1285}
1286
1287// Bit twiddling.
1288void Assembler::ext_w_b(Register rd, Register rj) {
1289 GenRegister(EXT_W_B, rj, rd);
1290}
1291
1292void Assembler::ext_w_h(Register rd, Register rj) {
1293 GenRegister(EXT_W_H, rj, rd);
1294}
1295
1296void Assembler::clo_w(Register rd, Register rj) { GenRegister(CLO_W, rj, rd); }
1297
1298void Assembler::clz_w(Register rd, Register rj) { GenRegister(CLZ_W, rj, rd); }
1299
1300void Assembler::cto_w(Register rd, Register rj) { GenRegister(CTO_W, rj, rd); }
1301
1302void Assembler::ctz_w(Register rd, Register rj) { GenRegister(CTZ_W, rj, rd); }
1303
1304void Assembler::clo_d(Register rd, Register rj) { GenRegister(CLO_D, rj, rd); }
1305
1306void Assembler::clz_d(Register rd, Register rj) { GenRegister(CLZ_D, rj, rd); }
1307
1308void Assembler::cto_d(Register rd, Register rj) { GenRegister(CTO_D, rj, rd); }
1309
1310void Assembler::ctz_d(Register rd, Register rj) { GenRegister(CTZ_D, rj, rd); }
1311
1312void Assembler::bytepick_w(Register rd, Register rj, Register rk, int32_t sa2) {
1313 DCHECK(is_uint2(sa2));
1314 GenImm(BYTEPICK_W, sa2, rk, rj, rd);
1315}
1316
1317void Assembler::bytepick_d(Register rd, Register rj, Register rk, int32_t sa3) {
1318 GenImm(BYTEPICK_D, sa3, rk, rj, rd);
1319}
1320
1321void Assembler::revb_2h(Register rd, Register rj) {
1322 GenRegister(REVB_2H, rj, rd);
1323}
1324
1325void Assembler::revb_4h(Register rd, Register rj) {
1326 GenRegister(REVB_4H, rj, rd);
1327}
1328
1329void Assembler::revb_2w(Register rd, Register rj) {
1330 GenRegister(REVB_2W, rj, rd);
1331}
1332
1333void Assembler::revb_d(Register rd, Register rj) {
1334 GenRegister(REVB_D, rj, rd);
1335}
1336
1337void Assembler::revh_2w(Register rd, Register rj) {
1338 GenRegister(REVH_2W, rj, rd);
1339}
1340
1341void Assembler::revh_d(Register rd, Register rj) {
1342 GenRegister(REVH_D, rj, rd);
1343}
1344
1345void Assembler::bitrev_4b(Register rd, Register rj) {
1346 GenRegister(BITREV_4B, rj, rd);
1347}
1348
1349void Assembler::bitrev_8b(Register rd, Register rj) {
1350 GenRegister(BITREV_8B, rj, rd);
1351}
1352
1353void Assembler::bitrev_w(Register rd, Register rj) {
1354 GenRegister(BITREV_W, rj, rd);
1355}
1356
1357void Assembler::bitrev_d(Register rd, Register rj) {
1358 GenRegister(BITREV_D, rj, rd);
1359}
1360
1361void Assembler::bstrins_w(Register rd, Register rj, int32_t msbw,
1362 int32_t lsbw) {
1363 DCHECK(is_uint5(msbw) && is_uint5(lsbw));
1364 GenImm(BSTR_W, msbw + 0x20, lsbw, rj, rd);
1365}
1366
1367void Assembler::bstrins_d(Register rd, Register rj, int32_t msbd,
1368 int32_t lsbd) {
1369 GenImm(BSTRINS_D, msbd, lsbd, rj, rd);
1370}
1371
1372void Assembler::bstrpick_w(Register rd, Register rj, int32_t msbw,
1373 int32_t lsbw) {
1374 DCHECK(is_uint5(msbw) && is_uint5(lsbw));
1375 GenImm(BSTR_W, msbw + 0x20, lsbw + 0x20, rj, rd);
1376}
1377
1378void Assembler::bstrpick_d(Register rd, Register rj, int32_t msbd,
1379 int32_t lsbd) {
1380 GenImm(BSTRPICK_D, msbd, lsbd, rj, rd);
1381}
1382
1383void Assembler::maskeqz(Register rd, Register rj, Register rk) {
1384 GenRegister(MASKEQZ, rk, rj, rd);
1385}
1386
1387void Assembler::masknez(Register rd, Register rj, Register rk) {
1388 GenRegister(MASKNEZ, rk, rj, rd);
1389}
1390
1391// Memory-instructions
1392void Assembler::ld_b(Register rd, Register rj, int32_t si12) {
1393 GenImm(LD_B, si12, rj, rd, 12);
1394}
1395
1396void Assembler::ld_h(Register rd, Register rj, int32_t si12) {
1397 GenImm(LD_H, si12, rj, rd, 12);
1398}
1399
1400void Assembler::ld_w(Register rd, Register rj, int32_t si12) {
1401 GenImm(LD_W, si12, rj, rd, 12);
1402}
1403
1404void Assembler::ld_d(Register rd, Register rj, int32_t si12) {
1405 GenImm(LD_D, si12, rj, rd, 12);
1406}
1407
1408void Assembler::ld_bu(Register rd, Register rj, int32_t si12) {
1409 GenImm(LD_BU, si12, rj, rd, 12);
1410}
1411
1412void Assembler::ld_hu(Register rd, Register rj, int32_t si12) {
1413 GenImm(LD_HU, si12, rj, rd, 12);
1414}
1415
1416void Assembler::ld_wu(Register rd, Register rj, int32_t si12) {
1417 GenImm(LD_WU, si12, rj, rd, 12);
1418}
1419
1420void Assembler::st_b(Register rd, Register rj, int32_t si12) {
1421 GenImm(ST_B, si12, rj, rd, 12);
1422}
1423
1424void Assembler::st_h(Register rd, Register rj, int32_t si12) {
1425 GenImm(ST_H, si12, rj, rd, 12);
1426}
1427
1428void Assembler::st_w(Register rd, Register rj, int32_t si12) {
1429 GenImm(ST_W, si12, rj, rd, 12);
1430}
1431
1432void Assembler::st_d(Register rd, Register rj, int32_t si12) {
1433 GenImm(ST_D, si12, rj, rd, 12);
1434}
1435
1436void Assembler::ldx_b(Register rd, Register rj, Register rk) {
1437 GenRegister(LDX_B, rk, rj, rd);
1438}
1439
1440void Assembler::ldx_h(Register rd, Register rj, Register rk) {
1441 GenRegister(LDX_H, rk, rj, rd);
1442}
1443
1444void Assembler::ldx_w(Register rd, Register rj, Register rk) {
1445 GenRegister(LDX_W, rk, rj, rd);
1446}
1447
1448void Assembler::ldx_d(Register rd, Register rj, Register rk) {
1449 GenRegister(LDX_D, rk, rj, rd);
1450}
1451
1452void Assembler::ldx_bu(Register rd, Register rj, Register rk) {
1453 GenRegister(LDX_BU, rk, rj, rd);
1454}
1455
1456void Assembler::ldx_hu(Register rd, Register rj, Register rk) {
1457 GenRegister(LDX_HU, rk, rj, rd);
1458}
1459
1460void Assembler::ldx_wu(Register rd, Register rj, Register rk) {
1461 GenRegister(LDX_WU, rk, rj, rd);
1462}
1463
1464void Assembler::stx_b(Register rd, Register rj, Register rk) {
1465 GenRegister(STX_B, rk, rj, rd);
1466}
1467
1468void Assembler::stx_h(Register rd, Register rj, Register rk) {
1469 GenRegister(STX_H, rk, rj, rd);
1470}
1471
1472void Assembler::stx_w(Register rd, Register rj, Register rk) {
1473 GenRegister(STX_W, rk, rj, rd);
1474}
1475
1476void Assembler::stx_d(Register rd, Register rj, Register rk) {
1477 GenRegister(STX_D, rk, rj, rd);
1478}
1479
1480void Assembler::ldptr_w(Register rd, Register rj, int32_t si14) {
1481 DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
1482 GenImm(LDPTR_W, si14 >> 2, rj, rd, 14);
1483}
1484
1485void Assembler::ldptr_d(Register rd, Register rj, int32_t si14) {
1486 DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
1487 GenImm(LDPTR_D, si14 >> 2, rj, rd, 14);
1488}
1489
1490void Assembler::stptr_w(Register rd, Register rj, int32_t si14) {
1491 DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
1492 GenImm(STPTR_W, si14 >> 2, rj, rd, 14);
1493}
1494
1495void Assembler::stptr_d(Register rd, Register rj, int32_t si14) {
1496 DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
1497 GenImm(STPTR_D, si14 >> 2, rj, rd, 14);
1498}
1499
1500void Assembler::amswap_w(Register rd, Register rk, Register rj) {
1501 GenRegister(AMSWAP_W, rk, rj, rd);
1502}
1503
1504void Assembler::amswap_d(Register rd, Register rk, Register rj) {
1505 GenRegister(AMSWAP_D, rk, rj, rd);
1506}
1507
1508void Assembler::amadd_w(Register rd, Register rk, Register rj) {
1509 GenRegister(AMADD_W, rk, rj, rd);
1510}
1511
1512void Assembler::amadd_d(Register rd, Register rk, Register rj) {
1513 GenRegister(AMADD_D, rk, rj, rd);
1514}
1515
1516void Assembler::amand_w(Register rd, Register rk, Register rj) {
1517 GenRegister(AMAND_W, rk, rj, rd);
1518}
1519
1520void Assembler::amand_d(Register rd, Register rk, Register rj) {
1521 GenRegister(AMAND_D, rk, rj, rd);
1522}
1523
1524void Assembler::amor_w(Register rd, Register rk, Register rj) {
1525 GenRegister(AMOR_W, rk, rj, rd);
1526}
1527
1528void Assembler::amor_d(Register rd, Register rk, Register rj) {
1529 GenRegister(AMOR_D, rk, rj, rd);
1530}
1531
1532void Assembler::amxor_w(Register rd, Register rk, Register rj) {
1533 GenRegister(AMXOR_W, rk, rj, rd);
1534}
1535
1536void Assembler::amxor_d(Register rd, Register rk, Register rj) {
1537 GenRegister(AMXOR_D, rk, rj, rd);
1538}
1539
1540void Assembler::ammax_w(Register rd, Register rk, Register rj) {
1541 GenRegister(AMMAX_W, rk, rj, rd);
1542}
1543
1544void Assembler::ammax_d(Register rd, Register rk, Register rj) {
1545 GenRegister(AMMAX_D, rk, rj, rd);
1546}
1547
1548void Assembler::ammin_w(Register rd, Register rk, Register rj) {
1549 GenRegister(AMMIN_W, rk, rj, rd);
1550}
1551
1552void Assembler::ammin_d(Register rd, Register rk, Register rj) {
1553 GenRegister(AMMIN_D, rk, rj, rd);
1554}
1555
1556void Assembler::ammax_wu(Register rd, Register rk, Register rj) {
1557 GenRegister(AMMAX_WU, rk, rj, rd);
1558}
1559
1560void Assembler::ammax_du(Register rd, Register rk, Register rj) {
1561 GenRegister(AMMAX_DU, rk, rj, rd);
1562}
1563
1564void Assembler::ammin_wu(Register rd, Register rk, Register rj) {
1565 GenRegister(AMMIN_WU, rk, rj, rd);
1566}
1567
1568void Assembler::ammin_du(Register rd, Register rk, Register rj) {
1569 GenRegister(AMMIN_DU, rk, rj, rd);
1570}
1571
1572void Assembler::amswap_db_w(Register rd, Register rk, Register rj) {
1573 GenRegister(AMSWAP_DB_W, rk, rj, rd);
1574}
1575
1576void Assembler::amswap_db_d(Register rd, Register rk, Register rj) {
1577 GenRegister(AMSWAP_DB_D, rk, rj, rd);
1578}
1579
1580void Assembler::amadd_db_w(Register rd, Register rk, Register rj) {
1581 GenRegister(AMADD_DB_W, rk, rj, rd);
1582}
1583
1584void Assembler::amadd_db_d(Register rd, Register rk, Register rj) {
1585 GenRegister(AMADD_DB_D, rk, rj, rd);
1586}
1587
1588void Assembler::amand_db_w(Register rd, Register rk, Register rj) {
1589 GenRegister(AMAND_DB_W, rk, rj, rd);
1590}
1591
1592void Assembler::amand_db_d(Register rd, Register rk, Register rj) {
1593 GenRegister(AMAND_DB_D, rk, rj, rd);
1594}
1595
1596void Assembler::amor_db_w(Register rd, Register rk, Register rj) {
1597 GenRegister(AMOR_DB_W, rk, rj, rd);
1598}
1599
1600void Assembler::amor_db_d(Register rd, Register rk, Register rj) {
1601 GenRegister(AMOR_DB_D, rk, rj, rd);
1602}
1603
1604void Assembler::amxor_db_w(Register rd, Register rk, Register rj) {
1605 GenRegister(AMXOR_DB_W, rk, rj, rd);
1606}
1607
1608void Assembler::amxor_db_d(Register rd, Register rk, Register rj) {
1609 GenRegister(AMXOR_DB_D, rk, rj, rd);
1610}
1611
1612void Assembler::ammax_db_w(Register rd, Register rk, Register rj) {
1613 GenRegister(AMMAX_DB_W, rk, rj, rd);
1614}
1615
1616void Assembler::ammax_db_d(Register rd, Register rk, Register rj) {
1617 GenRegister(AMMAX_DB_D, rk, rj, rd);
1618}
1619
1620void Assembler::ammin_db_w(Register rd, Register rk, Register rj) {
1621 GenRegister(AMMIN_DB_W, rk, rj, rd);
1622}
1623
1624void Assembler::ammin_db_d(Register rd, Register rk, Register rj) {
1625 GenRegister(AMMIN_DB_D, rk, rj, rd);
1626}
1627
1628void Assembler::ammax_db_wu(Register rd, Register rk, Register rj) {
1629 GenRegister(AMMAX_DB_WU, rk, rj, rd);
1630}
1631
1632void Assembler::ammax_db_du(Register rd, Register rk, Register rj) {
1633 GenRegister(AMMAX_DB_DU, rk, rj, rd);
1634}
1635
1636void Assembler::ammin_db_wu(Register rd, Register rk, Register rj) {
1637 GenRegister(AMMIN_DB_WU, rk, rj, rd);
1638}
1639
1640void Assembler::ammin_db_du(Register rd, Register rk, Register rj) {
1641 GenRegister(AMMIN_DB_DU, rk, rj, rd);
1642}
1643
1644void Assembler::ll_w(Register rd, Register rj, int32_t si14) {
1645 DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
1646 GenImm(LL_W, si14 >> 2, rj, rd, 14);
1647}
1648
1649void Assembler::ll_d(Register rd, Register rj, int32_t si14) {
1650 DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
1651 GenImm(LL_D, si14 >> 2, rj, rd, 14);
1652}
1653
1654void Assembler::sc_w(Register rd, Register rj, int32_t si14) {
1655 DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
1656 GenImm(SC_W, si14 >> 2, rj, rd, 14);
1657}
1658
1659void Assembler::sc_d(Register rd, Register rj, int32_t si14) {
1660 DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
1661 GenImm(SC_D, si14 >> 2, rj, rd, 14);
1662}
1663
1664void Assembler::dbar(int32_t hint) { GenImm(DBAR, hint); }
1665
1666void Assembler::ibar(int32_t hint) { GenImm(IBAR, hint); }
1667
1668// Break instruction.
1669void Assembler::break_(uint32_t code, bool break_as_stop) {
1670 DCHECK(
1671 (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) ||
1672 (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode)));
1673 GenImm(BREAK, code);
1674}
1675
1676void Assembler::stop(uint32_t code) {
1677 DCHECK_GT(code, kMaxWatchpointCode);
1678 DCHECK_LE(code, kMaxStopCode);
1679#if defined(V8_HOST_ARCH_LOONG64)
1680 break_(0x4321);
1681#else // V8_HOST_ARCH_LOONG64
1682 break_(code, true);
1683#endif
1684}
1685
1686void Assembler::fadd_s(FPURegister fd, FPURegister fj, FPURegister fk) {
1687 GenRegister(FADD_S, fk, fj, fd);
1688}
1689
1690void Assembler::fadd_d(FPURegister fd, FPURegister fj, FPURegister fk) {
1691 GenRegister(FADD_D, fk, fj, fd);
1692}
1693
1694void Assembler::fsub_s(FPURegister fd, FPURegister fj, FPURegister fk) {
1695 GenRegister(FSUB_S, fk, fj, fd);
1696}
1697
1698void Assembler::fsub_d(FPURegister fd, FPURegister fj, FPURegister fk) {
1699 GenRegister(FSUB_D, fk, fj, fd);
1700}
1701
1702void Assembler::fmul_s(FPURegister fd, FPURegister fj, FPURegister fk) {
1703 GenRegister(FMUL_S, fk, fj, fd);
1704}
1705
1706void Assembler::fmul_d(FPURegister fd, FPURegister fj, FPURegister fk) {
1707 GenRegister(FMUL_D, fk, fj, fd);
1708}
1709
1710void Assembler::fdiv_s(FPURegister fd, FPURegister fj, FPURegister fk) {
1711 GenRegister(FDIV_S, fk, fj, fd);
1712}
1713
1714void Assembler::fdiv_d(FPURegister fd, FPURegister fj, FPURegister fk) {
1715 GenRegister(FDIV_D, fk, fj, fd);
1716}
1717
1718void Assembler::fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk,
1719 FPURegister fa) {
1720 GenRegister(FMADD_S, fa, fk, fj, fd);
1721}
1722
1723void Assembler::fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk,
1724 FPURegister fa) {
1725 GenRegister(FMADD_D, fa, fk, fj, fd);
1726}
1727
1728void Assembler::fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk,
1729 FPURegister fa) {
1730 GenRegister(FMSUB_S, fa, fk, fj, fd);
1731}
1732
1733void Assembler::fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk,
1734 FPURegister fa) {
1735 GenRegister(FMSUB_D, fa, fk, fj, fd);
1736}
1737
1738void Assembler::fnmadd_s(FPURegister fd, FPURegister fj, FPURegister fk,
1739 FPURegister fa) {
1740 GenRegister(FNMADD_S, fa, fk, fj, fd);
1741}
1742
1743void Assembler::fnmadd_d(FPURegister fd, FPURegister fj, FPURegister fk,
1744 FPURegister fa) {
1745 GenRegister(FNMADD_D, fa, fk, fj, fd);
1746}
1747
1748void Assembler::fnmsub_s(FPURegister fd, FPURegister fj, FPURegister fk,
1749 FPURegister fa) {
1750 GenRegister(FNMSUB_S, fa, fk, fj, fd);
1751}
1752
1753void Assembler::fnmsub_d(FPURegister fd, FPURegister fj, FPURegister fk,
1754 FPURegister fa) {
1755 GenRegister(FNMSUB_D, fa, fk, fj, fd);
1756}
1757
1758void Assembler::fmax_s(FPURegister fd, FPURegister fj, FPURegister fk) {
1759 GenRegister(FMAX_S, fk, fj, fd);
1760}
1761
1762void Assembler::fmax_d(FPURegister fd, FPURegister fj, FPURegister fk) {
1763 GenRegister(FMAX_D, fk, fj, fd);
1764}
1765
1766void Assembler::fmin_s(FPURegister fd, FPURegister fj, FPURegister fk) {
1767 GenRegister(FMIN_S, fk, fj, fd);
1768}
1769
1770void Assembler::fmin_d(FPURegister fd, FPURegister fj, FPURegister fk) {
1771 GenRegister(FMIN_D, fk, fj, fd);
1772}
1773
1774void Assembler::fmaxa_s(FPURegister fd, FPURegister fj, FPURegister fk) {
1775 GenRegister(FMAXA_S, fk, fj, fd);
1776}
1777
1778void Assembler::fmaxa_d(FPURegister fd, FPURegister fj, FPURegister fk) {
1779 GenRegister(FMAXA_D, fk, fj, fd);
1780}
1781
1782void Assembler::fmina_s(FPURegister fd, FPURegister fj, FPURegister fk) {
1783 GenRegister(FMINA_S, fk, fj, fd);
1784}
1785
1786void Assembler::fmina_d(FPURegister fd, FPURegister fj, FPURegister fk) {
1787 GenRegister(FMINA_D, fk, fj, fd);
1788}
1789
1790void Assembler::fabs_s(FPURegister fd, FPURegister fj) {
1791 GenRegister(FABS_S, fj, fd);
1792}
1793
1794void Assembler::fabs_d(FPURegister fd, FPURegister fj) {
1795 GenRegister(FABS_D, fj, fd);
1796}
1797
1798void Assembler::fneg_s(FPURegister fd, FPURegister fj) {
1799 GenRegister(FNEG_S, fj, fd);
1800}
1801
1802void Assembler::fneg_d(FPURegister fd, FPURegister fj) {
1803 GenRegister(FNEG_D, fj, fd);
1804}
1805
1806void Assembler::fsqrt_s(FPURegister fd, FPURegister fj) {
1807 GenRegister(FSQRT_S, fj, fd);
1808}
1809
1810void Assembler::fsqrt_d(FPURegister fd, FPURegister fj) {
1811 GenRegister(FSQRT_D, fj, fd);
1812}
1813
1814void Assembler::frecip_s(FPURegister fd, FPURegister fj) {
1815 GenRegister(FRECIP_S, fj, fd);
1816}
1817
1818void Assembler::frecip_d(FPURegister fd, FPURegister fj) {
1819 GenRegister(FRECIP_D, fj, fd);
1820}
1821
1822void Assembler::frsqrt_s(FPURegister fd, FPURegister fj) {
1823 GenRegister(FRSQRT_S, fj, fd);
1824}
1825
1826void Assembler::frsqrt_d(FPURegister fd, FPURegister fj) {
1827 GenRegister(FRSQRT_D, fj, fd);
1828}
1829
1830void Assembler::fscaleb_s(FPURegister fd, FPURegister fj, FPURegister fk) {
1831 GenRegister(FSCALEB_S, fk, fj, fd);
1832}
1833
1834void Assembler::fscaleb_d(FPURegister fd, FPURegister fj, FPURegister fk) {
1835 GenRegister(FSCALEB_D, fk, fj, fd);
1836}
1837
1838void Assembler::flogb_s(FPURegister fd, FPURegister fj) {
1839 GenRegister(FLOGB_S, fj, fd);
1840}
1841
1842void Assembler::flogb_d(FPURegister fd, FPURegister fj) {
1843 GenRegister(FLOGB_D, fj, fd);
1844}
1845
1846void Assembler::fcopysign_s(FPURegister fd, FPURegister fj, FPURegister fk) {
1847 GenRegister(FCOPYSIGN_S, fk, fj, fd);
1848}
1849
1850void Assembler::fcopysign_d(FPURegister fd, FPURegister fj, FPURegister fk) {
1851 GenRegister(FCOPYSIGN_D, fk, fj, fd);
1852}
1853
1854void Assembler::fclass_s(FPURegister fd, FPURegister fj) {
1855 GenRegister(FCLASS_S, fj, fd);
1856}
1857
1858void Assembler::fclass_d(FPURegister fd, FPURegister fj) {
1859 GenRegister(FCLASS_D, fj, fd);
1860}
1861
1862void Assembler::fcmp_cond_s(FPUCondition cc, FPURegister fj, FPURegister fk,
1863 CFRegister cd) {
1864 GenCmp(FCMP_COND_S, cc, fk, fj, cd);
1865}
1866
1867void Assembler::fcmp_cond_d(FPUCondition cc, FPURegister fj, FPURegister fk,
1868 CFRegister cd) {
1869 GenCmp(FCMP_COND_D, cc, fk, fj, cd);
1870}
1871
1872void Assembler::fcvt_s_d(FPURegister fd, FPURegister fj) {
1873 GenRegister(FCVT_S_D, fj, fd);
1874}
1875
1876void Assembler::fcvt_d_s(FPURegister fd, FPURegister fj) {
1877 GenRegister(FCVT_D_S, fj, fd);
1878}
1879
1880void Assembler::ffint_s_w(FPURegister fd, FPURegister fj) {
1881 GenRegister(FFINT_S_W, fj, fd);
1882}
1883
1884void Assembler::ffint_s_l(FPURegister fd, FPURegister fj) {
1885 GenRegister(FFINT_S_L, fj, fd);
1886}
1887
1888void Assembler::ffint_d_w(FPURegister fd, FPURegister fj) {
1889 GenRegister(FFINT_D_W, fj, fd);
1890}
1891
1892void Assembler::ffint_d_l(FPURegister fd, FPURegister fj) {
1893 GenRegister(FFINT_D_L, fj, fd);
1894}
1895
1896void Assembler::ftint_w_s(FPURegister fd, FPURegister fj) {
1897 GenRegister(FTINT_W_S, fj, fd);
1898}
1899
1900void Assembler::ftint_w_d(FPURegister fd, FPURegister fj) {
1901 GenRegister(FTINT_W_D, fj, fd);
1902}
1903
1904void Assembler::ftint_l_s(FPURegister fd, FPURegister fj) {
1905 GenRegister(FTINT_L_S, fj, fd);
1906}
1907
1908void Assembler::ftint_l_d(FPURegister fd, FPURegister fj) {
1909 GenRegister(FTINT_L_D, fj, fd);
1910}
1911
1912void Assembler::ftintrm_w_s(FPURegister fd, FPURegister fj) {
1913 GenRegister(FTINTRM_W_S, fj, fd);
1914}
1915
1916void Assembler::ftintrm_w_d(FPURegister fd, FPURegister fj) {
1917 GenRegister(FTINTRM_W_D, fj, fd);
1918}
1919
1920void Assembler::ftintrm_l_s(FPURegister fd, FPURegister fj) {
1921 GenRegister(FTINTRM_L_S, fj, fd);
1922}
1923
1924void Assembler::ftintrm_l_d(FPURegister fd, FPURegister fj) {
1925 GenRegister(FTINTRM_L_D, fj, fd);
1926}
1927
1928void Assembler::ftintrp_w_s(FPURegister fd, FPURegister fj) {
1929 GenRegister(FTINTRP_W_S, fj, fd);
1930}
1931
1932void Assembler::ftintrp_w_d(FPURegister fd, FPURegister fj) {
1933 GenRegister(FTINTRP_W_D, fj, fd);
1934}
1935
1936void Assembler::ftintrp_l_s(FPURegister fd, FPURegister fj) {
1937 GenRegister(FTINTRP_L_S, fj, fd);
1938}
1939
1940void Assembler::ftintrp_l_d(FPURegister fd, FPURegister fj) {
1941 GenRegister(FTINTRP_L_D, fj, fd);
1942}
1943
1944void Assembler::ftintrz_w_s(FPURegister fd, FPURegister fj) {
1945 GenRegister(FTINTRZ_W_S, fj, fd);
1946}
1947
1948void Assembler::ftintrz_w_d(FPURegister fd, FPURegister fj) {
1949 GenRegister(FTINTRZ_W_D, fj, fd);
1950}
1951
1952void Assembler::ftintrz_l_s(FPURegister fd, FPURegister fj) {
1953 GenRegister(FTINTRZ_L_S, fj, fd);
1954}
1955
1956void Assembler::ftintrz_l_d(FPURegister fd, FPURegister fj) {
1957 GenRegister(FTINTRZ_L_D, fj, fd);
1958}
1959
1960void Assembler::ftintrne_w_s(FPURegister fd, FPURegister fj) {
1961 GenRegister(FTINTRNE_W_S, fj, fd);
1962}
1963
1964void Assembler::ftintrne_w_d(FPURegister fd, FPURegister fj) {
1965 GenRegister(FTINTRNE_W_D, fj, fd);
1966}
1967
1968void Assembler::ftintrne_l_s(FPURegister fd, FPURegister fj) {
1969 GenRegister(FTINTRNE_L_S, fj, fd);
1970}
1971
1972void Assembler::ftintrne_l_d(FPURegister fd, FPURegister fj) {
1973 GenRegister(FTINTRNE_L_D, fj, fd);
1974}
1975
1976void Assembler::frint_s(FPURegister fd, FPURegister fj) {
1977 GenRegister(FRINT_S, fj, fd);
1978}
1979
1980void Assembler::frint_d(FPURegister fd, FPURegister fj) {
1981 GenRegister(FRINT_D, fj, fd);
1982}
1983
1984void Assembler::fmov_s(FPURegister fd, FPURegister fj) {
1985 GenRegister(FMOV_S, fj, fd);
1986}
1987
1988void Assembler::fmov_d(FPURegister fd, FPURegister fj) {
1989 GenRegister(FMOV_D, fj, fd);
1990}
1991
1992void Assembler::fsel(CFRegister ca, FPURegister fd, FPURegister fj,
1993 FPURegister fk) {
1994 GenSel(FSEL, ca, fk, fj, fd);
1995}
1996
1997void Assembler::movgr2fr_w(FPURegister fd, Register rj) {
1998 GenRegister(MOVGR2FR_W, rj, fd);
1999}
2000
2001void Assembler::movgr2fr_d(FPURegister fd, Register rj) {
2002 GenRegister(MOVGR2FR_D, rj, fd);
2003}
2004
2005void Assembler::movgr2frh_w(FPURegister fd, Register rj) {
2006 GenRegister(MOVGR2FRH_W, rj, fd);
2007}
2008
2009void Assembler::movfr2gr_s(Register rd, FPURegister fj) {
2010 GenRegister(MOVFR2GR_S, fj, rd);
2011}
2012
2013void Assembler::movfr2gr_d(Register rd, FPURegister fj) {
2014 GenRegister(MOVFR2GR_D, fj, rd);
2015}
2016
2017void Assembler::movfrh2gr_s(Register rd, FPURegister fj) {
2018 GenRegister(MOVFRH2GR_S, fj, rd);
2019}
2020
2021void Assembler::movgr2fcsr(Register rj, FPUControlRegister fcsr) {
2022 GenRegister(MOVGR2FCSR, rj, fcsr);
2023}
2024
2025void Assembler::movfcsr2gr(Register rd, FPUControlRegister fcsr) {
2026 GenRegister(MOVFCSR2GR, fcsr, rd);
2027}
2028
2029void Assembler::movfr2cf(CFRegister cd, FPURegister fj) {
2030 GenRegister(MOVFR2CF, fj, cd);
2031}
2032
2033void Assembler::movcf2fr(FPURegister fd, CFRegister cj) {
2034 GenRegister(MOVCF2FR, cj, fd);
2035}
2036
2037void Assembler::movgr2cf(CFRegister cd, Register rj) {
2038 GenRegister(MOVGR2CF, rj, cd);
2039}
2040
2041void Assembler::movcf2gr(Register rd, CFRegister cj) {
2042 GenRegister(MOVCF2GR, cj, rd);
2043}
2044
2045void Assembler::fld_s(FPURegister fd, Register rj, int32_t si12) {
2046 GenImm(FLD_S, si12, rj, fd);
2047}
2048
2049void Assembler::fld_d(FPURegister fd, Register rj, int32_t si12) {
2050 GenImm(FLD_D, si12, rj, fd);
2051}
2052
2053void Assembler::fst_s(FPURegister fd, Register rj, int32_t si12) {
2054 GenImm(FST_S, si12, rj, fd);
2055}
2056
2057void Assembler::fst_d(FPURegister fd, Register rj, int32_t si12) {
2058 GenImm(FST_D, si12, rj, fd);
2059}
2060
2061void Assembler::fldx_s(FPURegister fd, Register rj, Register rk) {
2062 GenRegister(FLDX_S, rk, rj, fd);
2063}
2064
2065void Assembler::fldx_d(FPURegister fd, Register rj, Register rk) {
2066 GenRegister(FLDX_D, rk, rj, fd);
2067}
2068
2069void Assembler::fstx_s(FPURegister fd, Register rj, Register rk) {
2070 GenRegister(FSTX_S, rk, rj, fd);
2071}
2072
2073void Assembler::fstx_d(FPURegister fd, Register rj, Register rk) {
2074 GenRegister(FSTX_D, rk, rj, fd);
2075}
2076
2077void Assembler::AdjustBaseAndOffset(MemOperand* src) {
2078 // is_int12 must be passed a signed value, hence the static cast below.
2079 if ((!src->hasIndexReg() && is_int12(src->offset())) || src->hasIndexReg()) {
2080 return;
2081 }
2082 UseScratchRegisterScope temps(this);
2083 Register scratch = temps.Acquire();
2084 if (is_uint12(static_cast<int32_t>(src->offset()))) {
2085 ori(scratch, zero_reg, src->offset() & kImm12Mask);
2086 } else {
2087 lu12i_w(scratch, src->offset() >> 12 & 0xfffff);
2088 if (src->offset() & kImm12Mask) {
2089 ori(scratch, scratch, src->offset() & kImm12Mask);
2090 }
2091 }
2092 src->index_ = scratch;
2093 src->offset_ = 0;
2094}
2095
2096void Assembler::RelocateRelativeReference(
2097 RelocInfo::Mode rmode, Address pc, intptr_t pc_delta,
2098 WritableJitAllocation* jit_allocation) {
2099 DCHECK(RelocInfo::IsRelativeCodeTarget(rmode) ||
2100 RelocInfo::IsNearBuiltinEntry(rmode));
2101 Instr instr = instr_at(pc);
2103 offset = (((offset & 0x3ff) << 22 >> 6) | ((offset >> 10) & kImm16Mask)) << 2;
2104 offset -= pc_delta;
2105 offset >>= 2;
2106 offset = ((offset & kImm16Mask) << kRkShift) | ((offset & kImm26Mask) >> 16);
2107 Instr new_instr = (instr & ~kImm26Mask) | offset;
2108 instr_at_put(pc, new_instr, jit_allocation);
2109 return;
2110}
2111
2112void Assembler::GrowBuffer() {
2113 // Compute new buffer size.
2114 int old_size = buffer_->size();
2115 int new_size = std::min(2 * old_size, old_size + 1 * MB);
2116
2117 // Some internal data structures overflow for very large buffers,
2118 // they must ensure that kMaximalBufferSize is not too large.
2119 if (new_size > kMaximalBufferSize) {
2120 V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
2121 }
2122
2123 // Set up new buffer.
2124 std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
2125 DCHECK_EQ(new_size, new_buffer->size());
2126 uint8_t* new_start = new_buffer->start();
2127
2128 // Copy the data.
2129 intptr_t pc_delta = new_start - buffer_start_;
2130 intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
2131 size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
2132 MemMove(new_start, buffer_start_, pc_offset());
2133 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2134 reloc_size);
2135
2136 // Switch buffers.
2137 buffer_ = std::move(new_buffer);
2138 buffer_start_ = new_start;
2139 pc_ += pc_delta;
2140 pc_for_safepoint_ += pc_delta;
2141 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2142 reloc_info_writer.last_pc() + pc_delta);
2143
2144 // None of our relocation types are pc relative pointing outside the code
2145 // buffer nor pc absolute pointing inside the code buffer, so there is no need
2146 // to relocate any emitted relocation entries.
2147
2148 // Relocate internal references.
2149 for (auto pos : internal_reference_positions_) {
2150 Address address = reinterpret_cast<intptr_t>(buffer_start_) + pos;
2151 intptr_t internal_ref = ReadUnalignedValue<intptr_t>(address);
2152 if (internal_ref != kEndOfJumpChain) {
2153 internal_ref += pc_delta;
2154 WriteUnalignedValue<intptr_t>(address, internal_ref);
2155 }
2156 }
2157}
2158
2159void Assembler::db(uint8_t data) {
2160 if (!is_buffer_growth_blocked()) {
2161 CheckBuffer();
2162 }
2163 *reinterpret_cast<uint8_t*>(pc_) = data;
2164 pc_ += sizeof(uint8_t);
2165}
2166
2167void Assembler::dd(uint32_t data) {
2168 if (!is_buffer_growth_blocked()) {
2169 CheckBuffer();
2170 }
2171 *reinterpret_cast<uint32_t*>(pc_) = data;
2172 pc_ += sizeof(uint32_t);
2173}
2174
2175void Assembler::dq(uint64_t data) {
2176 if (!is_buffer_growth_blocked()) {
2177 CheckBuffer();
2178 }
2179 *reinterpret_cast<uint64_t*>(pc_) = data;
2180 pc_ += sizeof(uint64_t);
2181}
2182
2183void Assembler::dd(Label* label) {
2184 if (!is_buffer_growth_blocked()) {
2185 CheckBuffer();
2186 }
2187 uint64_t data;
2188 if (label->is_bound()) {
2189 data = reinterpret_cast<uint64_t>(buffer_start_ + label->pos());
2190 } else {
2191 data = jump_address(label);
2192 unbound_labels_count_++;
2193 internal_reference_positions_.insert(label->pos());
2194 }
2195 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2196 EmitHelper(data);
2197}
2198
2199void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2200 if (!ShouldRecordRelocInfo(rmode)) return;
2201 // We do not try to reuse pool constants.
2202 RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data);
2203 DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
2204 reloc_info_writer.Write(&rinfo);
2205}
2206
2207void Assembler::BlockTrampolinePoolFor(int instructions) {
2208 CheckTrampolinePoolQuick(instructions);
2209 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2210}
2211
2212void Assembler::CheckTrampolinePool() {
2213 // Some small sequences of instructions must not be broken up by the
2214 // insertion of a trampoline pool; such sequences are protected by setting
2215 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2216 // which are both checked here. Also, recursive calls to CheckTrampolinePool
2217 // are blocked by trampoline_pool_blocked_nesting_.
2218 if ((trampoline_pool_blocked_nesting_ > 0) ||
2219 (pc_offset() < no_trampoline_pool_before_)) {
2220 // Emission is currently blocked; make sure we try again as soon as
2221 // possible.
2222 if (trampoline_pool_blocked_nesting_ > 0) {
2223 next_buffer_check_ = pc_offset() + kInstrSize;
2224 } else {
2225 next_buffer_check_ = no_trampoline_pool_before_;
2226 }
2227 return;
2228 }
2229
2230 DCHECK(!trampoline_emitted_);
2231 DCHECK_GE(unbound_labels_count_, 0);
2232 if (unbound_labels_count_ > 0) {
2233 // First we emit jump (2 instructions), then we emit trampoline pool.
2234 {
2235 BlockTrampolinePoolScope block_trampoline_pool(this);
2236 Label after_pool;
2237 b(&after_pool);
2238 nop(); // TODO(LOONG_dev): remove this
2239
2240 int pool_start = pc_offset();
2241 for (int i = 0; i < unbound_labels_count_; i++) {
2242 {
2243 b(&after_pool);
2244 nop(); // TODO(LOONG_dev): remove this
2245 }
2246 }
2247 nop();
2248 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2249 bind(&after_pool);
2250
2251 trampoline_emitted_ = true;
2252 // As we are only going to emit trampoline once, we need to prevent any
2253 // further emission.
2254 next_buffer_check_ = kMaxInt;
2255 }
2256 } else {
2257 // Number of branches to unbound label at this point is zero, so we can
2258 // move next buffer check to maximum.
2259 next_buffer_check_ =
2260 pc_offset() + kMax16BranchOffset - kTrampolineSlotsSize * 16;
2261 }
2262 return;
2263}
2264
2265Address Assembler::target_address_at(Address pc) {
2266 Instr instr0 = instr_at(pc);
2267 if (IsB(instr0)) {
2268 int32_t offset = instr0 & kImm26Mask;
2269 offset = (((offset & 0x3ff) << 22 >> 6) | ((offset >> 10) & kImm16Mask))
2270 << 2;
2271 return pc + offset;
2272 }
2273 Instr instr1 = instr_at(pc + 1 * kInstrSize);
2274 Instr instr2 = instr_at(pc + 2 * kInstrSize);
2275
2276 // Interpret 3 instructions for address generated by li: See listing in
2277 // Assembler::set_target_address_at() just below.
2278 DCHECK((IsLu12i_w(instr0) && (IsOri(instr1)) && (IsLu32i_d(instr2))));
2279
2280 // Assemble the 48 bit value.
2281 uint64_t hi20 = ((uint64_t)(instr2 >> 5) & 0xfffff) << 32;
2282 uint64_t mid20 = ((uint64_t)(instr0 >> 5) & 0xfffff) << 12;
2283 uint64_t low12 = ((uint64_t)(instr1 >> 10) & 0xfff);
2284 int64_t addr = static_cast<int64_t>(hi20 | mid20 | low12);
2285
2286 // Sign extend to get canonical address.
2287 addr = (addr << 16) >> 16;
2288 return static_cast<Address>(addr);
2289}
2290
2291uint32_t Assembler::target_compressed_address_at(Address pc) {
2292 Instr instr0 = instr_at(pc);
2293 Instr instr1 = instr_at(pc + 1 * kInstrSize);
2294
2295 // Interpret 2 instructions for address generated by li: See listing in
2296 // Assembler::set_target_compressed_value_at just below.
2297 DCHECK((IsLu12i_w(instr0) && (IsOri(instr1))));
2298
2299 // Assemble the 32 bit value.
2300 uint32_t hi20 = ((uint32_t)(instr0 >> 5) & 0xfffff) << 12;
2301 uint32_t low12 = ((uint32_t)(instr1 >> 10) & 0xfff);
2302 uint32_t addr = static_cast<uint32_t>(hi20 | low12);
2303
2304 return addr;
2305}
2306
2307// On loong64, a target address is stored in a 3-instruction sequence:
2308// 0: lu12i_w(rd, (j.imm64_ >> 12) & kImm20Mask);
2309// 1: ori(rd, rd, j.imm64_ & kImm12Mask);
2310// 2: lu32i_d(rd, (j.imm64_ >> 32) & kImm20Mask);
2311//
2312// Patching the address must replace all the lui & ori instructions,
2313// and flush the i-cache.
2314//
2315void Assembler::set_target_value_at(Address pc, uint64_t target,
2316 WritableJitAllocation* jit_allocation,
2317 ICacheFlushMode icache_flush_mode) {
2318 // There is an optimization where only 3 instructions are used to load address
2319 // in code on LOONG64 because only 48-bits of address is effectively used.
2320 // It relies on fact the upper [63:48] bits are not used for virtual address
2321 // translation and they have to be set according to value of bit 47 in order
2322 // get canonical address.
2323#ifdef DEBUG
2324 // Check we have the result from a li macro-instruction.
2325 Instr instr0 = instr_at(pc);
2326 Instr instr1 = instr_at(pc + kInstrSize);
2327 Instr instr2 = instr_at(pc + kInstrSize * 2);
2328 DCHECK((IsLu12i_w(instr0) && IsOri(instr1) && IsLu32i_d(instr2)) ||
2329 IsB(instr0));
2330#endif
2331
2332 Instr instr = instr_at(pc);
2333 if (IsB(instr)) {
2334 int32_t offset = (target - pc) >> 2;
2335 CHECK(is_int26(offset));
2336 offset =
2337 ((offset & kImm16Mask) << kRkShift) | ((offset & kImm26Mask) >> 16);
2338 Instr new_instr = (instr & ~kImm26Mask) | offset;
2339 instr_at_put(pc, new_instr, jit_allocation);
2340 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
2341 FlushInstructionCache(pc, kInstrSize);
2342 }
2343 return;
2344 }
2345 uint32_t rd_code = GetRd(instr);
2346
2347 // Must use 3 instructions to insure patchable code.
2348 // lu12i_w rd, middle-20.
2349 // ori rd, rd, low-12.
2350 // lu32i_d rd, high-20.
2351 Instr new_instr0 =
2352 LU12I_W | (((target >> 12) & 0xfffff) << kRjShift) | rd_code;
2353 Instr new_instr1 =
2354 ORI | (target & 0xfff) << kRkShift | (rd_code << kRjShift) | rd_code;
2355 Instr new_instr2 =
2356 LU32I_D | (((target >> 32) & 0xfffff) << kRjShift) | rd_code;
2357 instr_at_put(pc, new_instr0, jit_allocation);
2358 instr_at_put(pc + kInstrSize, new_instr1, jit_allocation);
2359 instr_at_put(pc + kInstrSize * 2, new_instr2, jit_allocation);
2360
2361 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
2362 FlushInstructionCache(pc, 3 * kInstrSize);
2363 }
2364}
2365
2366void Assembler::set_target_compressed_value_at(
2367 Address pc, uint32_t target, WritableJitAllocation* jit_allocation,
2368 ICacheFlushMode icache_flush_mode) {
2369#ifdef DEBUG
2370 // Check we have the result from a li macro-instruction.
2371 Instr instr0 = instr_at(pc);
2372 Instr instr1 = instr_at(pc + kInstrSize);
2373 DCHECK(IsLu12i_w(instr0) && IsOri(instr1));
2374#endif
2375
2376 Instr instr = instr_at(pc);
2377 uint32_t rd_code = GetRd(instr);
2378
2379 // Must use 2 instructions to insure patchable code.
2380 // lu12i_w rd, high-20.
2381 // ori rd, rd, low-12.
2382 Instr new_instr0 =
2383 LU12I_W | (((target >> 12) & 0xfffff) << kRjShift) | rd_code;
2384 Instr new_instr1 =
2385 ORI | (target & 0xfff) << kRkShift | (rd_code << kRjShift) | rd_code;
2386 instr_at_put(pc, new_instr0, jit_allocation);
2387 instr_at_put(pc + kInstrSize, new_instr1, jit_allocation);
2388
2389 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
2390 FlushInstructionCache(pc, 2 * kInstrSize);
2391 }
2392}
2393
2394} // namespace internal
2395} // namespace v8
2396
2397#endif // V8_TARGET_ARCH_LOONG64
#define BREAK
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
SourcePosition pos
static V8_INLINE Address target_address_at(Address pc, Address constant_pool)
static bool supports_wasm_simd_128_
static unsigned supported_
static void ProbeImpl(bool cross_compile)
V8_INLINE Operand(int32_t immediate, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
static const int kApplyMask
Definition reloc-info.h:369
uint32_t wasm_call_tag() const
static constexpr int ModeMask(Mode mode)
Definition reloc-info.h:272
Operand const offset_
Register const value_
Register const index_
Handle< Code > code
base::OwnedVector< uint8_t > buffer_
Definition assembler.cc:111
Label label
int32_t offset
Instruction * instr
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int pc_offset
uint32_t const mask
int m
Definition mul-fft.cc:294
STL namespace.
int int32_t
Definition unicode.cc:40
uintptr_t Address
Definition memory.h:13
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr UnconditionalBranchOp BL
const int kSa3FieldMask
bool DoubleToSmiInteger(double value, int *smi_int_value)
void PrintF(const char *format,...)
Definition utils.cc:39
void FlushInstructionCache(void *start, size_t size)
constexpr int B
int ToNumber(Register reg)
constexpr int kImm16Mask
constexpr int L
const int kRkFieldMask
V8_EXPORT_PRIVATE void MemMove(void *dest, const void *src, size_t size)
Definition memcopy.h:189
constexpr bool is_intn(int64_t x, unsigned n)
Definition utils.h:568
const int kSa2FieldMask
V8_EXPORT_PRIVATE FlagValues v8_flags
Register ToRegister(int num)
const int kEndOfJumpChain
constexpr uint8_t kInstrSize
const int kRjFieldMask
constexpr int kMaxInt
Definition globals.h:374
const int kRdFieldMask
const int kEndOfChain
constexpr int kNumRegisters
#define UNREACHABLE()
Definition logging.h:67
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403