v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
assembler-ppc.cc
Go to the documentation of this file.
1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
33// The original source code covered by the above license above has been
34// modified significantly by Google Inc.
35// Copyright 2014 the V8 project authors. All rights reserved.
36
38
39#if defined(__PASE__)
40#include <sys/utsname.h>
41#endif
42
43#if V8_TARGET_ARCH_PPC64
44
45#include "src/base/bits.h"
46#include "src/base/cpu.h"
50
51namespace v8 {
52namespace internal {
53
54// Get the CPU features enabled by the build.
55static unsigned CpuFeaturesImpliedByCompiler() {
56 unsigned answer = 0;
57 return answer;
58}
59
61#if V8_ENABLE_WEBASSEMBLY
62 return CpuFeatures::IsSupported(PPC_9_PLUS);
63#else
64 return false;
65#endif // V8_ENABLE_WEBASSEMBLY
66}
67
68void CpuFeatures::ProbeImpl(bool cross_compile) {
71
72 // Only use statically determined features for cross compile (snapshot).
73 if (cross_compile) return;
74
75// Probe for additional features at runtime.
76#ifdef USE_SIMULATOR
77 // Simulator
78 supported_ |= (1u << PPC_10_PLUS);
79#else
80 base::CPU cpu;
81 if (cpu.part() == base::CPU::kPPCPower10) {
82#if defined(__PASE__)
83 // Some P10 features such as prefixed isns will only be supported in future
84 // ibmi versions. We only enable full power 10 features if version>7.4
85 struct utsname uts;
86 memset(reinterpret_cast<void*>(&uts), 0, sizeof(uts));
87 int r = uname(&uts);
88 CHECK_GE(r, 0);
89 int rel = atoi(uts.release);
90 if (rel > 4) {
91 supported_ |= (1u << PPC_10_PLUS);
92 } else {
93 supported_ |= (1u << PPC_9_PLUS);
94 }
95#else
96 supported_ |= (1u << PPC_10_PLUS);
97#endif
98 } else if (cpu.part() == base::CPU::kPPCPower9) {
99 supported_ |= (1u << PPC_9_PLUS);
100 } else if (cpu.part() == base::CPU::kPPCPower8) {
101 supported_ |= (1u << PPC_8_PLUS);
102 }
103#if V8_OS_LINUX
104 if (cpu.icache_line_size() != base::CPU::kUnknownCacheLineSize) {
105 icache_line_size_ = cpu.icache_line_size();
106 }
107#endif
108#endif
109 if (supported_ & (1u << PPC_10_PLUS)) supported_ |= (1u << PPC_9_PLUS);
110 if (supported_ & (1u << PPC_9_PLUS)) supported_ |= (1u << PPC_8_PLUS);
111
112 // Set a static value on whether Simd is supported.
113 // This variable is only used for certain archs to query SupportWasmSimd128()
114 // at runtime in builtins using an extern ref. Other callers should use
115 // CpuFeatures::SupportWasmSimd128().
117}
118
120 const char* ppc_arch = nullptr;
121 ppc_arch = "ppc64";
122 printf("target %s\n", ppc_arch);
123}
124
126 printf("PPC_8_PLUS=%d\n", CpuFeatures::IsSupported(PPC_8_PLUS));
127 printf("PPC_9_PLUS=%d\n", CpuFeatures::IsSupported(PPC_9_PLUS));
128 printf("PPC_10_PLUS=%d\n", CpuFeatures::IsSupported(PPC_10_PLUS));
129}
130
131Register ToRegister(int num) {
132 DCHECK(num >= 0 && num < kNumRegisters);
133 const Register kRegisters[] = {r0, sp, r2, r3, r4, r5, r6, r7,
134 r8, r9, r10, r11, ip, r13, r14, r15,
135 r16, r17, r18, r19, r20, r21, r22, r23,
136 r24, r25, r26, r27, r28, r29, r30, fp};
137 return kRegisters[num];
138}
139
140// -----------------------------------------------------------------------------
141// Implementation of RelocInfo
142
143const int RelocInfo::kApplyMask =
146
148 // The deserializer needs to know whether a pointer is specially
149 // coded. Being specially coded on PPC means that it is a lis/ori
150 // instruction sequence or is a constant pool entry, and these are
151 // always the case inside code objects.
152 return true;
153}
154
158 }
159 return false;
160}
161
162uint32_t RelocInfo::wasm_call_tag() const {
164 return static_cast<uint32_t>(
166}
167
168// -----------------------------------------------------------------------------
169// Implementation of Operand and MemOperand
170// See assembler-ppc-inl.h for inlined constructors
171
172Operand::Operand(Handle<HeapObject> handle) {
173 rm_ = no_reg;
174 value_.immediate = static_cast<intptr_t>(handle.address());
176}
177
178Operand Operand::EmbeddedNumber(double value) {
179 int32_t smi;
180 if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
182 result.is_heap_number_request_ = true;
183 result.value_.heap_number_request = HeapNumberRequest(value);
184 return result;
185}
186
187MemOperand::MemOperand(Register rn, int64_t offset)
188 : ra_(rn), offset_(offset), rb_(no_reg) {}
189
190MemOperand::MemOperand(Register ra, Register rb)
191 : ra_(ra), offset_(0), rb_(rb) {}
192
193MemOperand::MemOperand(Register ra, Register rb, int64_t offset)
194 : ra_(ra), offset_(offset), rb_(rb) {}
195
196void Assembler::AllocateAndInstallRequestedHeapNumbers(LocalIsolate* isolate) {
197 DCHECK_IMPLIES(isolate == nullptr, heap_number_requests_.empty());
198 for (auto& request : heap_number_requests_) {
199 Handle<HeapObject> object =
200 isolate->factory()->NewHeapNumber<AllocationType::kOld>(
201 request.heap_number());
202 Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
204 set_target_address_at(pc, constant_pool, object.address(), nullptr,
205 SKIP_ICACHE_FLUSH);
206 }
207}
208
209// -----------------------------------------------------------------------------
210// Specific instructions, constants, and masks.
211
212Assembler::Assembler(const AssemblerOptions& options,
213 std::unique_ptr<AssemblerBuffer> buffer)
214 : AssemblerBase(options, std::move(buffer)),
215 scratch_register_list_({ip}),
216 constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
217 reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
218
219 no_trampoline_pool_before_ = 0;
220 trampoline_pool_blocked_nesting_ = 0;
221 constant_pool_entry_sharing_blocked_nesting_ = 0;
222 next_trampoline_check_ = kMaxInt;
223 internal_trampoline_exception_ = false;
224 last_bound_pos_ = 0;
225 optimizable_cmpi_pos_ = -1;
226 trampoline_emitted_ = v8_flags.force_long_branches;
227 tracked_branch_count_ = 0;
228 relocations_.reserve(128);
229}
230
231void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
232 GetCode(isolate->main_thread_local_isolate(), desc);
233}
234void Assembler::GetCode(LocalIsolate* isolate, CodeDesc* desc,
235 SafepointTableBuilderBase* safepoint_table_builder,
236 int handler_table_offset) {
237 // As a crutch to avoid having to add manual Align calls wherever we use a
238 // raw workflow to create InstructionStream objects (mostly in tests), add
239 // another Align call here. It does no harm - the end of the InstructionStream
240 // object is aligned to the (larger) kCodeAlignment anyways.
241 // TODO(jgruber): Consider moving responsibility for proper alignment to
242 // metadata table builders (safepoint, handler, constant pool, code
243 // comments).
244 DataAlign(InstructionStream::kMetadataAlignment);
245
246 // Emit constant pool if necessary.
247 int constant_pool_size = EmitConstantPool();
248
249 EmitRelocations();
250
251 int code_comments_size = WriteCodeComments();
252
253 AllocateAndInstallRequestedHeapNumbers(isolate);
254
255 // Set up code descriptor.
256 // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
257 // this point to make CodeDesc initialization less fiddly.
258
259 static constexpr int kBuiltinJumpTableInfoSize = 0;
260 const int instruction_size = pc_offset();
261 const int builtin_jump_table_info_offset =
262 instruction_size - kBuiltinJumpTableInfoSize;
263 const int code_comments_offset =
264 builtin_jump_table_info_offset - code_comments_size;
265 const int constant_pool_offset = code_comments_offset - constant_pool_size;
266 const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
267 ? constant_pool_offset
268 : handler_table_offset;
269 const int safepoint_table_offset =
270 (safepoint_table_builder == kNoSafepointTable)
271 ? handler_table_offset2
272 : safepoint_table_builder->safepoint_table_offset();
273 const int reloc_info_offset =
274 static_cast<int>(reloc_info_writer.pos() - buffer_->start());
275 CodeDesc::Initialize(desc, this, safepoint_table_offset,
276 handler_table_offset2, constant_pool_offset,
277 code_comments_offset, builtin_jump_table_info_offset,
278 reloc_info_offset);
279}
280
281void Assembler::Align(int m) {
282 DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
283 DCHECK_EQ(pc_offset() & (kInstrSize - 1), 0);
284 while ((pc_offset() & (m - 1)) != 0) {
285 nop();
286 }
287}
288
289void Assembler::CodeTargetAlign() { Align(8); }
290
291Condition Assembler::GetCondition(Instr instr) {
292 switch (instr & kCondMask) {
293 case BT:
294 return eq;
295 case BF:
296 return ne;
297 default:
299 }
300}
301
302bool Assembler::IsLis(Instr instr) {
303 return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr) == r0;
304}
305
306bool Assembler::IsLi(Instr instr) {
307 return ((instr & kOpcodeMask) == ADDI) && GetRA(instr) == r0;
308}
309
310bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
311
312bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
313
314bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
315
316Register Assembler::GetRA(Instr instr) {
317 return Register::from_code(Instruction::RAValue(instr));
318}
319
320Register Assembler::GetRB(Instr instr) {
321 return Register::from_code(Instruction::RBValue(instr));
322}
323
324// This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
325bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
326 Instr instr4, Instr instr5) {
327 // Check the instructions are indeed a five part load (into r12)
328 // 3d800000 lis r12, 0
329 // 618c0000 ori r12, r12, 0
330 // 798c07c6 rldicr r12, r12, 32, 31
331 // 658c00c3 oris r12, r12, 195
332 // 618ccd40 ori r12, r12, 52544
333 return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C) &&
334 (instr3 == 0x798C07C6) && ((instr4 >> 16) == 0x658C) &&
335 ((instr5 >> 16) == 0x618C));
336}
337
338bool Assembler::IsCmpRegister(Instr instr) {
339 return (((instr & kOpcodeMask) == EXT2) &&
340 ((EXT2 | (instr & kExt2OpcodeMask)) == CMP));
341}
342
343bool Assembler::IsRlwinm(Instr instr) {
344 return ((instr & kOpcodeMask) == RLWINMX);
345}
346
347bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); }
348
349bool Assembler::IsRldicl(Instr instr) {
350 return (((instr & kOpcodeMask) == EXT5) &&
351 ((EXT5 | (instr & kExt5OpcodeMask)) == RLDICL));
352}
353
354bool Assembler::IsCmpImmediate(Instr instr) {
355 return ((instr & kOpcodeMask) == CMPI);
356}
357
358bool Assembler::IsCrSet(Instr instr) {
359 return (((instr & kOpcodeMask) == EXT1) &&
360 ((EXT1 | (instr & kExt1OpcodeMask)) == CREQV));
361}
362
363Register Assembler::GetCmpImmediateRegister(Instr instr) {
364 DCHECK(IsCmpImmediate(instr));
365 return GetRA(instr);
366}
367
368int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
369 DCHECK(IsCmpImmediate(instr));
370 return instr & kOff16Mask;
371}
372
373// Labels refer to positions in the (to be) generated code.
374// There are bound, linked, and unused labels.
375//
376// Bound labels refer to known positions in the already
377// generated code. pos() is the position the label refers to.
378//
379// Linked labels refer to unknown positions in the code
380// to be generated; pos() is the position of the last
381// instruction using the label.
382
383// The link chain is terminated by a negative code position (must be aligned)
384const int kEndOfChain = -4;
385
386// Dummy opcodes for unbound label mov instructions or jump table entries.
387enum {
388 kUnboundMovLabelOffsetOpcode = 0 << 26,
389 kUnboundAddLabelOffsetOpcode = 1 << 26,
390 kUnboundAddLabelLongOffsetOpcode = 2 << 26,
391 kUnboundMovLabelAddrOpcode = 3 << 26,
392 kUnboundJumpTableEntryOpcode = 4 << 26
393};
394
395int Assembler::target_at(int pos) {
396 Instr instr = instr_at(pos);
397 // check which type of branch this is 16 or 26 bit offset
398 uint32_t opcode = instr & kOpcodeMask;
399 int link;
400 switch (opcode) {
401 case BX:
402 link = SIGN_EXT_IMM26(instr & kImm26Mask);
403 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
404 break;
405 case BCX:
406 link = SIGN_EXT_IMM16((instr & kImm16Mask));
407 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
408 break;
409 case kUnboundMovLabelOffsetOpcode:
410 case kUnboundAddLabelOffsetOpcode:
411 case kUnboundAddLabelLongOffsetOpcode:
412 case kUnboundMovLabelAddrOpcode:
413 case kUnboundJumpTableEntryOpcode:
414 link = SIGN_EXT_IMM26(instr & kImm26Mask);
415 link <<= 2;
416 break;
417 default:
418 DCHECK(false);
419 return -1;
420 }
421
422 if (link == 0) return kEndOfChain;
423 return pos + link;
424}
425
426void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
427 Instr instr = instr_at(pos);
428 uint32_t opcode = instr & kOpcodeMask;
429
430 if (is_branch != nullptr) {
431 *is_branch = (opcode == BX || opcode == BCX);
432 }
433
434 switch (opcode) {
435 case BX: {
436 int imm26 = target_pos - pos;
437 CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
438 if (imm26 == kInstrSize && !(instr & kLKMask)) {
439 // Branch to next instr without link.
440 instr = ORI; // nop: ori, 0,0,0
441 } else {
442 instr &= ((~kImm26Mask) | kAAMask | kLKMask);
443 instr |= (imm26 & kImm26Mask);
444 }
445 instr_at_put(pos, instr);
446 break;
447 }
448 case BCX: {
449 int imm16 = target_pos - pos;
450 CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
451 if (imm16 == kInstrSize && !(instr & kLKMask)) {
452 // Branch to next instr without link.
453 instr = ORI; // nop: ori, 0,0,0
454 } else {
455 instr &= ((~kImm16Mask) | kAAMask | kLKMask);
456 instr |= (imm16 & kImm16Mask);
457 }
458 instr_at_put(pos, instr);
459 break;
460 }
461 case kUnboundMovLabelOffsetOpcode: {
462 // Load the position of the label relative to the generated code object
463 // pointer in a register.
464 Register dst = Register::from_code(instr_at(pos + kInstrSize));
466 target_pos + (InstructionStream::kHeaderSize - kHeapObjectTag);
467 PatchingAssembler patcher(
468 options(), reinterpret_cast<uint8_t*>(buffer_start_ + pos), 2);
469 patcher.bitwise_mov32(dst, offset);
470 break;
471 }
472 case kUnboundAddLabelLongOffsetOpcode:
473 case kUnboundAddLabelOffsetOpcode: {
474 // dst = base + position + immediate
475 Instr operands = instr_at(pos + kInstrSize);
476 Register dst = Register::from_code((operands >> 27) & 0x1F);
477 Register base = Register::from_code((operands >> 22) & 0x1F);
478 int32_t delta = (opcode == kUnboundAddLabelLongOffsetOpcode)
479 ? static_cast<int32_t>(instr_at(pos + 2 * kInstrSize))
480 : (SIGN_EXT_IMM22(operands & kImm22Mask));
481 int32_t offset = target_pos + delta;
482 PatchingAssembler patcher(
483 options(), reinterpret_cast<uint8_t*>(buffer_start_ + pos),
484 2 + static_cast<int32_t>(opcode == kUnboundAddLabelLongOffsetOpcode));
485 patcher.bitwise_add32(dst, base, offset);
486 if (opcode == kUnboundAddLabelLongOffsetOpcode) patcher.nop();
487 break;
488 }
489 case kUnboundMovLabelAddrOpcode: {
490 // Load the address of the label in a register.
491 Register dst = Register::from_code(instr_at(pos + kInstrSize));
492 PatchingAssembler patcher(options(),
493 reinterpret_cast<uint8_t*>(buffer_start_ + pos),
494 kMovInstructionsNoConstantPool);
495 // Keep internal references relative until EmitRelocations.
496 patcher.bitwise_mov(dst, target_pos);
497 break;
498 }
499 case kUnboundJumpTableEntryOpcode: {
500 PatchingAssembler patcher(options(),
501 reinterpret_cast<uint8_t*>(buffer_start_ + pos),
502 kSystemPointerSize / kInstrSize);
503 // Keep internal references relative until EmitRelocations.
504 patcher.dp(target_pos);
505 break;
506 }
507 default:
508 DCHECK(false);
509 break;
510 }
511}
512
513int Assembler::max_reach_from(int pos) {
514 Instr instr = instr_at(pos);
515 uint32_t opcode = instr & kOpcodeMask;
516
517 // check which type of branch this is 16 or 26 bit offset
518 switch (opcode) {
519 case BX:
520 return 26;
521 case BCX:
522 return 16;
523 case kUnboundMovLabelOffsetOpcode:
524 case kUnboundAddLabelOffsetOpcode:
525 case kUnboundMovLabelAddrOpcode:
526 case kUnboundJumpTableEntryOpcode:
527 return 0; // no limit on reach
528 }
529
530 DCHECK(false);
531 return 0;
532}
533
534void Assembler::bind_to(Label* L, int pos) {
535 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
536 int32_t trampoline_pos = kInvalidSlotPos;
537 bool is_branch = false;
538 while (L->is_linked()) {
539 int fixup_pos = L->pos();
540 int32_t offset = pos - fixup_pos;
541 int maxReach = max_reach_from(fixup_pos);
542 next(L); // call next before overwriting link with target at fixup_pos
543 if (maxReach && is_intn(offset, maxReach) == false) {
544 if (trampoline_pos == kInvalidSlotPos) {
545 trampoline_pos = get_trampoline_entry();
546 CHECK_NE(trampoline_pos, kInvalidSlotPos);
547 target_at_put(trampoline_pos, pos);
548 }
549 target_at_put(fixup_pos, trampoline_pos);
550 } else {
551 target_at_put(fixup_pos, pos, &is_branch);
552 }
553 }
554 L->bind_to(pos);
555
556 if (!trampoline_emitted_ && is_branch) {
557 UntrackBranch();
558 }
559
560 // Keep track of the last bound label so we don't eliminate any instructions
561 // before a bound label.
562 if (pos > last_bound_pos_) last_bound_pos_ = pos;
563}
564
565void Assembler::bind(Label* L) {
566 DCHECK(!L->is_bound()); // label can only be bound once
567 bind_to(L, pc_offset());
568}
569
570void Assembler::next(Label* L) {
571 DCHECK(L->is_linked());
572 int link = target_at(L->pos());
573 if (link == kEndOfChain) {
574 L->Unuse();
575 } else {
576 DCHECK_GE(link, 0);
577 L->link_to(link);
578 }
579}
580
581bool Assembler::is_near(Label* L, Condition cond) {
582 DCHECK(L->is_bound());
583 if (L->is_bound() == false) return false;
584
585 int maxReach = ((cond == al) ? 26 : 16);
586 int offset = L->pos() - pc_offset();
587
588 return is_intn(offset, maxReach);
589}
590
591void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
592 DoubleRegister frb, RCBit r) {
593 emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
594}
595
596void Assembler::d_form(Instr instr, Register rt, Register ra,
597 const intptr_t val, bool signed_disp) {
598 if (signed_disp) {
599 if (!is_int16(val)) {
600 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
601 }
602 CHECK(is_int16(val));
603 } else {
604 if (!is_uint16(val)) {
605 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
606 ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
607 val, val, is_uint16(val), kImm16Mask);
608 }
609 CHECK(is_uint16(val));
610 }
611 emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
612}
613
614void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
615 OEBit o, RCBit r) {
616 emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
617}
618
619void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
620 int maskbit, RCBit r) {
621 int sh0_4 = shift & 0x1F;
622 int sh5 = (shift >> 5) & 0x1;
623 int m0_4 = maskbit & 0x1F;
624 int m5 = (maskbit >> 5) & 0x1;
625
626 emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
627 m5 * B5 | sh5 * B1 | r);
628}
629
630void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
631 int maskbit, RCBit r) {
632 int m0_4 = maskbit & 0x1F;
633 int m5 = (maskbit >> 5) & 0x1;
634
635 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
636 m5 * B5 | r);
637}
638
639// Returns the next free trampoline entry.
640int32_t Assembler::get_trampoline_entry() {
641 int32_t trampoline_entry = kInvalidSlotPos;
642
643 if (!internal_trampoline_exception_) {
644 trampoline_entry = trampoline_.take_slot();
645
646 if (kInvalidSlotPos == trampoline_entry) {
647 internal_trampoline_exception_ = true;
648 }
649 }
650 return trampoline_entry;
651}
652
653int Assembler::link(Label* L) {
654 int position;
655 if (L->is_bound()) {
656 position = L->pos();
657 } else {
658 if (L->is_linked()) {
659 position = L->pos(); // L's link
660 } else {
661 // was: target_pos = kEndOfChain;
662 // However, using self to mark the first reference
663 // should avoid most instances of branch offset overflow. See
664 // target_at() for where this is converted back to kEndOfChain.
666 }
667 L->link_to(pc_offset());
668 }
669
670 return position;
671}
672
673// Branch instructions.
674
675void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
676 emit(EXT1 | static_cast<uint32_t>(bo) | condition_bit * B16 | BCLRX | lk);
677}
678
679void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
680 emit(EXT1 | static_cast<uint32_t>(bo) | condition_bit * B16 | BCCTRX | lk);
681}
682
683// Pseudo op - branch to link register
684void Assembler::blr() { bclr(BA, 0, LeaveLK); }
685
686// Pseudo op - branch to count register -- used for "jump"
687void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
688
689void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
690
691void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
692 int imm16 = branch_offset;
693 CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
694 emit(BCX | static_cast<uint32_t>(bo) | condition_bit * B16 |
695 (imm16 & kImm16Mask) | lk);
696}
697
698void Assembler::b(int branch_offset, LKBit lk) {
699 int imm26 = branch_offset;
700 CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
701 emit(BX | (imm26 & kImm26Mask) | lk);
702}
703
704void Assembler::xori(Register dst, Register src, const Operand& imm) {
705 d_form(XORI, src, dst, imm.immediate(), false);
706}
707
708void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
709 d_form(XORIS, rs, ra, imm.immediate(), false);
710}
711
712void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
713 RCBit rc) {
714 sh &= 0x1F;
715 mb &= 0x1F;
716 me &= 0x1F;
717 emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
718 me << 1 | rc);
719}
720
721void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
722 RCBit rc) {
723 mb &= 0x1F;
724 me &= 0x1F;
725 emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
726 me << 1 | rc);
727}
728
729void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
730 RCBit rc) {
731 sh &= 0x1F;
732 mb &= 0x1F;
733 me &= 0x1F;
734 emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
735 me << 1 | rc);
736}
737
738void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
739 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
740 rlwinm(dst, src, val.immediate(), 0, 31 - val.immediate(), rc);
741}
742
743void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
744 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
745 rlwinm(dst, src, 32 - val.immediate(), val.immediate(), 31, rc);
746}
747
748void Assembler::clrrwi(Register dst, Register src, const Operand& val,
749 RCBit rc) {
750 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
751 rlwinm(dst, src, 0, 0, 31 - val.immediate(), rc);
752}
753
754void Assembler::clrlwi(Register dst, Register src, const Operand& val,
755 RCBit rc) {
756 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
757 rlwinm(dst, src, 0, val.immediate(), 31, rc);
758}
759
760void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
761 rlwnm(ra, rs, rb, 0, 31, r);
762}
763
764void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
765 rlwinm(ra, rs, sh, 0, 31, r);
766}
767
768void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
769 rlwinm(ra, rs, 32 - sh, 0, 31, r);
770}
771
772void Assembler::subi(Register dst, Register src, const Operand& imm) {
773 addi(dst, src, Operand(-(imm.immediate())));
774}
775
776void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
777 RCBit r) {
778 xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
779}
780
781void Assembler::adde(Register dst, Register src1, Register src2, OEBit o,
782 RCBit r) {
783 xo_form(EXT2 | ADDEX, dst, src1, src2, o, r);
784}
785
786void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
787 // a special xo_form
788 emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
789}
790
791void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
792 RCBit r) {
793 xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
794}
795
796void Assembler::subc(Register dst, Register src1, Register src2, OEBit o,
797 RCBit r) {
798 xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
799}
800
801void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
802 RCBit r) {
803 xo_form(EXT2 | SUBFEX, dst, src2, src1, o, r);
804}
805
806void Assembler::subfic(Register dst, Register src, const Operand& imm) {
807 d_form(SUBFIC, dst, src, imm.immediate(), true);
808}
809
810void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
811 RCBit r) {
812 xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
813}
814
815// Multiply low word
816void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
817 RCBit r) {
818 xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
819}
820
821void Assembler::mulli(Register dst, Register src, const Operand& imm) {
822 d_form(MULLI, dst, src, imm.immediate(), true);
823}
824
825// Multiply hi doubleword
826void Assembler::mulhd(Register dst, Register src1, Register src2, RCBit r) {
827 xo_form(EXT2 | MULHD, dst, src1, src2, LeaveOE, r);
828}
829
830// Multiply hi doubleword unsigned
831void Assembler::mulhdu(Register dst, Register src1, Register src2, RCBit r) {
832 xo_form(EXT2 | MULHDU, dst, src1, src2, LeaveOE, r);
833}
834
835// Multiply hi word
836void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
837 xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
838}
839
840// Multiply hi word unsigned
841void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
842 xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
843}
844
845// Divide word
846void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
847 RCBit r) {
848 xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
849}
850
851// Divide word unsigned
852void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
853 RCBit r) {
854 xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
855}
856
857void Assembler::addi(Register dst, Register src, const Operand& imm) {
858 DCHECK(src != r0); // use li instead to show intent
859 d_form(ADDI, dst, src, imm.immediate(), true);
860}
861
862void Assembler::addis(Register dst, Register src, const Operand& imm) {
863 DCHECK(src != r0); // use lis instead to show intent
864 d_form(ADDIS, dst, src, imm.immediate(), true);
865}
866
867void Assembler::addic(Register dst, Register src, const Operand& imm) {
868 d_form(ADDIC, dst, src, imm.immediate(), true);
869}
870
871void Assembler::andi(Register ra, Register rs, const Operand& imm) {
872 d_form(ANDIx, rs, ra, imm.immediate(), false);
873}
874
875void Assembler::andis(Register ra, Register rs, const Operand& imm) {
876 d_form(ANDISx, rs, ra, imm.immediate(), false);
877}
878
879void Assembler::ori(Register ra, Register rs, const Operand& imm) {
880 d_form(ORI, rs, ra, imm.immediate(), false);
881}
882
883void Assembler::oris(Register dst, Register src, const Operand& imm) {
884 d_form(ORIS, src, dst, imm.immediate(), false);
885}
886
887void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
888 intptr_t imm16 = src2.immediate();
889 int L = 1;
890 DCHECK(is_int16(imm16));
891 DCHECK(cr.code() >= 0 && cr.code() <= 7);
892 imm16 &= kImm16Mask;
893 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
894}
895
896void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
897 uintptr_t uimm16 = src2.immediate();
898 int L = 1;
899 DCHECK(is_uint16(uimm16));
900 DCHECK(cr.code() >= 0 && cr.code() <= 7);
901 uimm16 &= kImm16Mask;
902 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
903}
904
905void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
906 intptr_t imm16 = src2.immediate();
907 int L = 0;
908 int pos = pc_offset();
909 DCHECK(is_int16(imm16));
910 DCHECK(cr.code() >= 0 && cr.code() <= 7);
911 imm16 &= kImm16Mask;
912
913 // For cmpwi against 0, save postition and cr for later examination
914 // of potential optimization.
915 if (imm16 == 0 && pos > 0 && last_bound_pos_ != pos) {
916 optimizable_cmpi_pos_ = pos;
917 cmpi_cr_ = cr;
918 }
919 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
920}
921
922void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
923 uintptr_t uimm16 = src2.immediate();
924 int L = 0;
925 DCHECK(is_uint16(uimm16));
926 DCHECK(cr.code() >= 0 && cr.code() <= 7);
927 uimm16 &= kImm16Mask;
928 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
929}
930
931void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
932 emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
933 cb * B6);
934}
935
936// Pseudo op - load immediate
937void Assembler::li(Register dst, const Operand& imm) {
938 d_form(ADDI, dst, r0, imm.immediate(), true);
939}
940
941void Assembler::lis(Register dst, const Operand& imm) {
942 d_form(ADDIS, dst, r0, imm.immediate(), true);
943}
944
945// Pseudo op - move register
946void Assembler::mr(Register dst, Register src) {
947 // actually or(dst, src, src)
948 orx(dst, src, src);
949}
950
951void Assembler::lbz(Register dst, const MemOperand& src) {
952 DCHECK(src.ra_ != r0);
953 d_form(LBZ, dst, src.ra(), src.offset(), true);
954}
955
956void Assembler::lhz(Register dst, const MemOperand& src) {
957 DCHECK(src.ra_ != r0);
958 d_form(LHZ, dst, src.ra(), src.offset(), true);
959}
960
961void Assembler::lwz(Register dst, const MemOperand& src) {
962 DCHECK(src.ra_ != r0);
963 d_form(LWZ, dst, src.ra(), src.offset(), true);
964}
965
966void Assembler::lwzu(Register dst, const MemOperand& src) {
967 DCHECK(src.ra_ != r0);
968 d_form(LWZU, dst, src.ra(), src.offset(), true);
969}
970
971void Assembler::lha(Register dst, const MemOperand& src) {
972 DCHECK(src.ra_ != r0);
973 d_form(LHA, dst, src.ra(), src.offset(), true);
974}
975
976void Assembler::lwa(Register dst, const MemOperand& src) {
977 int offset = src.offset();
978 DCHECK(src.ra_ != r0);
979 CHECK(!(offset & 3) && is_int16(offset));
981 emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
982}
983
984void Assembler::stb(Register dst, const MemOperand& src) {
985 DCHECK(src.ra_ != r0);
986 d_form(STB, dst, src.ra(), src.offset(), true);
987}
988
989void Assembler::sth(Register dst, const MemOperand& src) {
990 DCHECK(src.ra_ != r0);
991 d_form(STH, dst, src.ra(), src.offset(), true);
992}
993
994void Assembler::stw(Register dst, const MemOperand& src) {
995 DCHECK(src.ra_ != r0);
996 d_form(STW, dst, src.ra(), src.offset(), true);
997}
998
999void Assembler::stwu(Register dst, const MemOperand& src) {
1000 DCHECK(src.ra_ != r0);
1001 d_form(STWU, dst, src.ra(), src.offset(), true);
1002}
1003
1004void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
1005 emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
1006}
1007
1008// 64bit specific instructions
1009void Assembler::ld(Register rd, const MemOperand& src) {
1010 int offset = src.offset();
1011 DCHECK(src.ra_ != r0);
1012 CHECK(!(offset & 3) && is_int16(offset));
1014 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
1015}
1016
1017void Assembler::ldu(Register rd, const MemOperand& src) {
1018 int offset = src.offset();
1019 DCHECK(src.ra_ != r0);
1020 CHECK(!(offset & 3) && is_int16(offset));
1022 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
1023}
1024
1025void Assembler::std(Register rs, const MemOperand& src) {
1026 int offset = src.offset();
1027 DCHECK(src.ra_ != r0);
1028 CHECK(!(offset & 3) && is_int16(offset));
1030 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
1031}
1032
1033void Assembler::stdu(Register rs, const MemOperand& src) {
1034 int offset = src.offset();
1035 DCHECK(src.ra_ != r0);
1036 CHECK(!(offset & 3) && is_int16(offset));
1038 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
1039}
1040
1041void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
1042 md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
1043}
1044
1045void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
1046 md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
1047}
1048
1049void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
1050 mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
1051}
1052
1053void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
1054 md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
1055}
1056
1057void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
1058 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1059 rldicr(dst, src, val.immediate(), 63 - val.immediate(), rc);
1060}
1061
1062void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
1063 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1064 rldicl(dst, src, 64 - val.immediate(), val.immediate(), rc);
1065}
1066
1067void Assembler::clrrdi(Register dst, Register src, const Operand& val,
1068 RCBit rc) {
1069 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1070 rldicr(dst, src, 0, 63 - val.immediate(), rc);
1071}
1072
1073void Assembler::clrldi(Register dst, Register src, const Operand& val,
1074 RCBit rc) {
1075 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1076 rldicl(dst, src, 0, val.immediate(), rc);
1077}
1078
1079void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
1080 md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
1081}
1082
1083void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
1084 int sh0_4 = sh & 0x1F;
1085 int sh5 = (sh >> 5) & 0x1;
1086
1087 emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
1088 sh5 * B1 | r);
1089}
1090
1091void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
1092 rldcl(ra, rs, rb, 0, r);
1093}
1094
1095void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
1096 rldicl(ra, rs, sh, 0, r);
1097}
1098
1099void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
1100 rldicl(ra, rs, 64 - sh, 0, r);
1101}
1102
1103void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
1104 RCBit r) {
1105 xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
1106}
1107
1108void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
1109 RCBit r) {
1110 xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
1111}
1112
1113void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
1114 RCBit r) {
1115 xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
1116}
1117
1118// Prefixed instructions.
1119#define GENERATE_PREFIX_SUFFIX_BITS(immediate, prefix, suffix) \
1120 CHECK(is_int34(immediate)); \
1121 int32_t prefix = \
1122 SIGN_EXT_IMM18((immediate >> 16) & kImm18Mask); /* 18 bits.*/ \
1123 int16_t suffix = immediate & kImm16Mask; /* 16 bits.*/ \
1124 DCHECK(is_int18(prefix));
1125
1126void Assembler::paddi(Register dst, Register src, const Operand& imm) {
1127 CHECK(CpuFeatures::IsSupported(PPC_10_PLUS));
1128 DCHECK(src != r0); // use pli instead to show intent.
1129 intptr_t immediate = imm.immediate();
1130 GENERATE_PREFIX_SUFFIX_BITS(immediate, hi, lo)
1131 BlockTrampolinePoolScope block_trampoline_pool(this);
1132 pload_store_mls(Operand(hi));
1133 addi(dst, src, Operand(lo));
1134}
1135
1136void Assembler::pli(Register dst, const Operand& imm) {
1137 CHECK(CpuFeatures::IsSupported(PPC_10_PLUS));
1138 intptr_t immediate = imm.immediate();
1139 GENERATE_PREFIX_SUFFIX_BITS(immediate, hi, lo)
1140 BlockTrampolinePoolScope block_trampoline_pool(this);
1141 pload_store_mls(Operand(hi));
1142 li(dst, Operand(lo));
1143}
1144
1145void Assembler::psubi(Register dst, Register src, const Operand& imm) {
1146 paddi(dst, src, Operand(-(imm.immediate())));
1147}
1148
1149void Assembler::plbz(Register dst, const MemOperand& src) {
1150 DCHECK(src.ra_ != r0);
1151 int64_t offset = src.offset();
1152 GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1153 BlockTrampolinePoolScope block_trampoline_pool(this);
1154 pload_store_mls(Operand(hi));
1155 lbz(dst, MemOperand(src.ra(), lo));
1156}
1157
1158void Assembler::plhz(Register dst, const MemOperand& src) {
1159 DCHECK(src.ra_ != r0);
1160 int64_t offset = src.offset();
1161 GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1162 BlockTrampolinePoolScope block_trampoline_pool(this);
1163 pload_store_mls(Operand(hi));
1164 lhz(dst, MemOperand(src.ra(), lo));
1165}
1166
1167void Assembler::plha(Register dst, const MemOperand& src) {
1168 DCHECK(src.ra_ != r0);
1169 int64_t offset = src.offset();
1170 GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1171 BlockTrampolinePoolScope block_trampoline_pool(this);
1172 pload_store_mls(Operand(hi));
1173 lha(dst, MemOperand(src.ra(), lo));
1174}
1175
1176void Assembler::plwz(Register dst, const MemOperand& src) {
1177 DCHECK(src.ra_ != r0);
1178 int64_t offset = src.offset();
1179 GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1180 BlockTrampolinePoolScope block_trampoline_pool(this);
1181 pload_store_mls(Operand(hi));
1182 lwz(dst, MemOperand(src.ra(), lo));
1183}
1184
1185void Assembler::plwa(Register dst, const MemOperand& src) {
1186 DCHECK(src.ra_ != r0);
1187 int64_t offset = src.offset();
1188 GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1189 BlockTrampolinePoolScope block_trampoline_pool(this);
1190 pload_store_8ls(Operand(hi));
1191 emit(PPLWA | dst.code() * B21 | src.ra().code() * B16 | (lo & kImm16Mask));
1192}
1193
1194void Assembler::pld(Register dst, const MemOperand& src) {
1195 DCHECK(src.ra_ != r0);
1196 int64_t offset = src.offset();
1197 GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1198 BlockTrampolinePoolScope block_trampoline_pool(this);
1199 pload_store_8ls(Operand(hi));
1200 emit(PPLD | dst.code() * B21 | src.ra().code() * B16 | (lo & kImm16Mask));
1201}
1202
1203void Assembler::plfs(DoubleRegister dst, const MemOperand& src) {
1204 DCHECK(src.ra_ != r0);
1205 int64_t offset = src.offset();
1206 GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1207 BlockTrampolinePoolScope block_trampoline_pool(this);
1208 pload_store_mls(Operand(hi));
1209 lfs(dst, MemOperand(src.ra(), lo));
1210}
1211
1212void Assembler::plfd(DoubleRegister dst, const MemOperand& src) {
1213 DCHECK(src.ra_ != r0);
1214 int64_t offset = src.offset();
1215 GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1216 BlockTrampolinePoolScope block_trampoline_pool(this);
1217 pload_store_mls(Operand(hi));
1218 lfd(dst, MemOperand(src.ra(), lo));
1219}
1220
1221void Assembler::pstb(Register src, const MemOperand& dst) {
1222 DCHECK(dst.ra_ != r0);
1223 int64_t offset = dst.offset();
1224 GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1225 BlockTrampolinePoolScope block_trampoline_pool(this);
1226 pload_store_mls(Operand(hi));
1227 stb(src, MemOperand(dst.ra(), lo));
1228}
1229
1230void Assembler::psth(Register src, const MemOperand& dst) {
1231 DCHECK(dst.ra_ != r0);
1232 int64_t offset = dst.offset();
1233 GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1234 BlockTrampolinePoolScope block_trampoline_pool(this);
1235 pload_store_mls(Operand(hi));
1236 sth(src, MemOperand(dst.ra(), lo));
1237}
1238
1239void Assembler::pstw(Register src, const MemOperand& dst) {
1240 DCHECK(dst.ra_ != r0);
1241 int64_t offset = dst.offset();
1242 GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1243 BlockTrampolinePoolScope block_trampoline_pool(this);
1244 pload_store_mls(Operand(hi));
1245 stw(src, MemOperand(dst.ra(), lo));
1246}
1247
1248void Assembler::pstd(Register src, const MemOperand& dst) {
1249 DCHECK(dst.ra_ != r0);
1250 int64_t offset = dst.offset();
1251 GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1252 BlockTrampolinePoolScope block_trampoline_pool(this);
1253 pload_store_8ls(Operand(hi));
1254 emit(PPSTD | src.code() * B21 | dst.ra().code() * B16 | (lo & kImm16Mask));
1255}
1256
1257void Assembler::pstfs(const DoubleRegister src, const MemOperand& dst) {
1258 DCHECK(dst.ra_ != r0);
1259 int64_t offset = dst.offset();
1260 GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1261 BlockTrampolinePoolScope block_trampoline_pool(this);
1262 pload_store_mls(Operand(hi));
1263 stfs(src, MemOperand(dst.ra(), lo));
1264}
1265
1266void Assembler::pstfd(const DoubleRegister src, const MemOperand& dst) {
1267 DCHECK(dst.ra_ != r0);
1268 int64_t offset = dst.offset();
1269 GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
1270 BlockTrampolinePoolScope block_trampoline_pool(this);
1271 pload_store_mls(Operand(hi));
1272 stfd(src, MemOperand(dst.ra(), lo));
1273}
1274#undef GENERATE_PREFIX_SUFFIX_BITS
1275
1276int Assembler::instructions_required_for_mov(Register dst,
1277 const Operand& src) const {
1278 bool canOptimize =
1279 !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
1280 if (use_constant_pool_for_mov(dst, src, canOptimize)) {
1281 if (ConstantPoolAccessIsInOverflow()) {
1282 return kMovInstructionsConstantPool + 1;
1283 }
1284 return kMovInstructionsConstantPool;
1285 }
1286 DCHECK(!canOptimize);
1287 return kMovInstructionsNoConstantPool;
1288}
1289
1290bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
1291 bool canOptimize) const {
1292 if (!V8_EMBEDDED_CONSTANT_POOL_BOOL || !is_constant_pool_available()) {
1293 // If there is no constant pool available, we must use a mov
1294 // immediate sequence.
1295 return false;
1296 }
1297 intptr_t value = src.immediate();
1298 bool allowOverflow = !((canOptimize && is_int32(value)) || dst == r0);
1299 if (canOptimize &&
1300 (is_int16(value) ||
1301 (CpuFeatures::IsSupported(PPC_10_PLUS) && is_int34(value)))) {
1302 // Prefer a single-instruction load-immediate.
1303 return false;
1304 }
1305 if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
1306 // Prefer non-relocatable two-instruction bitwise-mov32 over
1307 // overflow sequence.
1308 return false;
1309 }
1310
1311 return true;
1312}
1313
1314void Assembler::EnsureSpaceFor(int space_needed) {
1315 if (buffer_space() <= (kGap + space_needed)) {
1316 GrowBuffer(space_needed);
1317 }
1318}
1319
1320bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1321 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1322 if (assembler != nullptr && assembler->predictable_code_size()) return true;
1323 return assembler->options().record_reloc_info_for_serialization;
1324 } else if (RelocInfo::IsNoInfo(rmode_)) {
1325 return false;
1326 }
1327 return true;
1328}
1329
1330// Primarily used for loading constants
1331// This should really move to be in macro-assembler as it
1332// is really a pseudo instruction
1333// Some usages of this intend for a FIXED_SEQUENCE to be used
1334// Todo - break this dependency so we can optimize mov() in general
1335// and only use the generic version when we require a fixed sequence
1336void Assembler::mov(Register dst, const Operand& src) {
1337 intptr_t value;
1338 if (src.IsHeapNumberRequest()) {
1339 RequestHeapNumber(src.heap_number_request());
1340 value = 0;
1341 } else {
1342 value = src.immediate();
1343 }
1344 bool relocatable = src.must_output_reloc_info(this);
1345 bool canOptimize;
1346
1347 canOptimize =
1348 !(relocatable ||
1349 (is_trampoline_pool_blocked() &&
1350 (!is_int16(value) ||
1351 !(CpuFeatures::IsSupported(PPC_10_PLUS) && is_int34(value)))));
1352
1353 if (!src.IsHeapNumberRequest() &&
1354 use_constant_pool_for_mov(dst, src, canOptimize)) {
1355 DCHECK(is_constant_pool_available());
1356 if (relocatable) {
1357 RecordRelocInfo(src.rmode_);
1358 }
1359 ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
1360 if (access == ConstantPoolEntry::OVERFLOWED) {
1361 addis(dst, kConstantPoolRegister, Operand::Zero());
1362 ld(dst, MemOperand(dst, 0));
1363 } else {
1364 ld(dst, MemOperand(kConstantPoolRegister, 0));
1365 }
1366 return;
1367 }
1368
1369 if (canOptimize) {
1370 if (is_int16(value)) {
1371 li(dst, Operand(value));
1372 } else if (CpuFeatures::IsSupported(PPC_10_PLUS) && is_int34(value)) {
1373 pli(dst, Operand(value));
1374 } else {
1375 uint16_t u16;
1376 if (is_int32(value)) {
1377 lis(dst, Operand(value >> 16));
1378 } else {
1379 if (is_int48(value)) {
1380 li(dst, Operand(value >> 32));
1381 } else {
1382 lis(dst, Operand(value >> 48));
1383 u16 = ((value >> 32) & 0xFFFF);
1384 if (u16) {
1385 ori(dst, dst, Operand(u16));
1386 }
1387 }
1388 sldi(dst, dst, Operand(32));
1389 u16 = ((value >> 16) & 0xFFFF);
1390 if (u16) {
1391 oris(dst, dst, Operand(u16));
1392 }
1393 }
1394 u16 = (value & 0xFFFF);
1395 if (u16) {
1396 ori(dst, dst, Operand(u16));
1397 }
1398 }
1399 return;
1400 }
1401
1402 DCHECK(!canOptimize);
1403 if (relocatable) {
1404 RecordRelocInfo(src.rmode_);
1405 }
1406 bitwise_mov(dst, value);
1407}
1408
1409void Assembler::bitwise_mov(Register dst, intptr_t value) {
1410 BlockTrampolinePoolScope block_trampoline_pool(this);
1411 int32_t hi_32 = static_cast<int32_t>(value >> 32);
1412 int32_t lo_32 = static_cast<int32_t>(value);
1413 int hi_word = static_cast<int>(hi_32 >> 16);
1414 int lo_word = static_cast<int>(hi_32 & 0xFFFF);
1415 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1416 ori(dst, dst, Operand(lo_word));
1417 sldi(dst, dst, Operand(32));
1418 hi_word = static_cast<int>(((lo_32 >> 16) & 0xFFFF));
1419 lo_word = static_cast<int>(lo_32 & 0xFFFF);
1420 oris(dst, dst, Operand(hi_word));
1421 ori(dst, dst, Operand(lo_word));
1422}
1423
1424void Assembler::bitwise_mov32(Register dst, int32_t value) {
1425 BlockTrampolinePoolScope block_trampoline_pool(this);
1426 int hi_word = static_cast<int>(value >> 16);
1427 int lo_word = static_cast<int>(value & 0xFFFF);
1428 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1429 ori(dst, dst, Operand(lo_word));
1430}
1431
1432void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
1433 BlockTrampolinePoolScope block_trampoline_pool(this);
1434 if (is_int16(value)) {
1435 addi(dst, src, Operand(value));
1436 nop();
1437 } else {
1438 int hi_word = static_cast<int>(value >> 16);
1439 int lo_word = static_cast<int>(value & 0xFFFF);
1440 if (lo_word & 0x8000) hi_word++;
1441 addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
1442 addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
1443 }
1444}
1445
1446void Assembler::patch_pc_address(Register dst, int pc_offset,
1447 int return_address_offset) {
1448 DCHECK(is_int16(return_address_offset));
1449 Assembler patching_assembler(
1450 AssemblerOptions{},
1451 ExternalAssemblerBuffer(buffer_start_ + pc_offset, kInstrSize + kGap));
1452 patching_assembler.addi(dst, dst, Operand(return_address_offset));
1453}
1454
1455void Assembler::mov_label_offset(Register dst, Label* label) {
1456 int position = link(label);
1457 if (label->is_bound()) {
1458 // Load the position of the label relative to the generated code object.
1459 mov(dst,
1460 Operand(position + InstructionStream::kHeaderSize - kHeapObjectTag));
1461 } else {
1462 // Encode internal reference to unbound label. We use a dummy opcode
1463 // such that it won't collide with any opcode that might appear in the
1464 // label's chain. Encode the destination register in the 2nd instruction.
1465 int link = position - pc_offset();
1466 DCHECK_EQ(0, link & 3);
1467 link >>= 2;
1468 DCHECK(is_int26(link));
1469
1470 // When the label is bound, these instructions will be patched
1471 // with a 2 instruction mov sequence that will load the
1472 // destination register with the position of the label from the
1473 // beginning of the code.
1474 //
1475 // target_at extracts the link and target_at_put patches the instructions.
1476 BlockTrampolinePoolScope block_trampoline_pool(this);
1477 emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
1478 emit(dst.code());
1479 }
1480}
1481
1482void Assembler::add_label_offset(Register dst, Register base, Label* label,
1483 int delta) {
1484 int position = link(label);
1485 if (label->is_bound()) {
1486 // dst = base + position + delta
1487 position += delta;
1488 bitwise_add32(dst, base, position);
1489 } else {
1490 // Encode internal reference to unbound label. We use a dummy opcode
1491 // such that it won't collide with any opcode that might appear in the
1492 // label's chain. Encode the operands in the 2nd instruction.
1493 int link = position - pc_offset();
1494 DCHECK_EQ(0, link & 3);
1495 link >>= 2;
1496 DCHECK(is_int26(link));
1497 BlockTrampolinePoolScope block_trampoline_pool(this);
1498
1499 emit((is_int22(delta) ? kUnboundAddLabelOffsetOpcode
1500 : kUnboundAddLabelLongOffsetOpcode) |
1501 (link & kImm26Mask));
1502 emit(dst.code() * B27 | base.code() * B22 | (delta & kImm22Mask));
1503
1504 if (!is_int22(delta)) {
1505 emit(delta);
1506 }
1507 }
1508}
1509
1510void Assembler::mov_label_addr(Register dst, Label* label) {
1511 CheckBuffer();
1512 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
1513 int position = link(label);
1514 if (label->is_bound()) {
1515 // Keep internal references relative until EmitRelocations.
1516 bitwise_mov(dst, position);
1517 } else {
1518 // Encode internal reference to unbound label. We use a dummy opcode
1519 // such that it won't collide with any opcode that might appear in the
1520 // label's chain. Encode the destination register in the 2nd instruction.
1521 int link = position - pc_offset();
1522 DCHECK_EQ(0, link & 3);
1523 link >>= 2;
1524 DCHECK(is_int26(link));
1525
1526 // When the label is bound, these instructions will be patched
1527 // with a multi-instruction mov sequence that will load the
1528 // destination register with the address of the label.
1529 //
1530 // target_at extracts the link and target_at_put patches the instructions.
1531 BlockTrampolinePoolScope block_trampoline_pool(this);
1532 emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
1533 emit(dst.code());
1534 DCHECK_GE(kMovInstructionsNoConstantPool, 2);
1535 for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
1536 }
1537}
1538
1539void Assembler::emit_label_addr(Label* label) {
1540 CheckBuffer();
1541 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
1542 int position = link(label);
1543 if (label->is_bound()) {
1544 // Keep internal references relative until EmitRelocations.
1545 dp(position);
1546 } else {
1547 // Encode internal reference to unbound label. We use a dummy opcode
1548 // such that it won't collide with any opcode that might appear in the
1549 // label's chain.
1550 int link = position - pc_offset();
1551 DCHECK_EQ(0, link & 3);
1552 link >>= 2;
1553 DCHECK(is_int26(link));
1554
1555 // When the label is bound, the instruction(s) will be patched
1556 // as a jump table entry containing the label address. target_at extracts
1557 // the link and target_at_put patches the instruction(s).
1558 BlockTrampolinePoolScope block_trampoline_pool(this);
1559 emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
1560 nop();
1561 }
1562}
1563
1564// Special register instructions
1565void Assembler::crxor(int bt, int ba, int bb) {
1566 emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
1567}
1568
1569void Assembler::creqv(int bt, int ba, int bb) {
1570 emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
1571}
1572
1573void Assembler::mflr(Register dst) {
1574 emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11); // Ignore RC bit
1575}
1576
1577void Assembler::mtlr(Register src) {
1578 emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11); // Ignore RC bit
1579}
1580
1581void Assembler::mtctr(Register src) {
1582 emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11); // Ignore RC bit
1583}
1584
1585void Assembler::mtxer(Register src) {
1586 emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
1587}
1588
1589void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
1590 DCHECK_LT(static_cast<int>(bit), 32);
1591 int bf = cr.code();
1592 int bfa = bit / CRWIDTH;
1593 emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
1594}
1595
1596void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
1597
1598void Assembler::mtcrf(Register src, uint8_t FXM) {
1599 emit(MTCRF | src.code() * B21 | FXM * B12);
1600}
1601void Assembler::mffprd(Register dst, DoubleRegister src) {
1602 emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
1603}
1604
1605void Assembler::mffprwz(Register dst, DoubleRegister src) {
1606 emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
1607}
1608
1609void Assembler::mtfprd(DoubleRegister dst, Register src) {
1610 emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
1611}
1612
1613void Assembler::mtfprwz(DoubleRegister dst, Register src) {
1614 emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
1615}
1616
1617void Assembler::mtfprwa(DoubleRegister dst, Register src) {
1618 emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
1619}
1620
1621// Exception-generating instructions and debugging support.
1622// Stops with a non-negative code less than kNumOfWatchedStops support
1623// enabling/disabling and a counter feature. See simulator-ppc.h .
1624void Assembler::stop(Condition cond, int32_t code, CRegister cr) {
1625 if (cond != al) {
1626 Label skip;
1627 b(NegateCondition(cond), &skip, cr);
1628 bkpt(0);
1629 bind(&skip);
1630 } else {
1631 bkpt(0);
1632 }
1633}
1634
1635void Assembler::bkpt(uint32_t imm16) { emit(0x7D821008); }
1636
1637void Assembler::dcbf(Register ra, Register rb) {
1638 emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
1639}
1640
1641void Assembler::sync() { emit(EXT2 | SYNC); }
1642
1643void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
1644
1645void Assembler::icbi(Register ra, Register rb) {
1646 emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
1647}
1648
1649void Assembler::isync() { emit(EXT1 | ISYNC); }
1650
1651// Floating point support
1652
1653void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
1654 int offset = src.offset();
1655 Register ra = src.ra();
1656 DCHECK(ra != r0);
1657 CHECK(is_int16(offset));
1658 int imm16 = offset & kImm16Mask;
1659 // could be x_form instruction with some casting magic
1660 emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
1661}
1662
1663void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
1664 int offset = src.offset();
1665 Register ra = src.ra();
1666 DCHECK(ra != r0);
1667 CHECK(is_int16(offset));
1668 int imm16 = offset & kImm16Mask;
1669 // could be x_form instruction with some casting magic
1670 emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
1671}
1672
1673void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
1674 int offset = src.offset();
1675 Register ra = src.ra();
1676 CHECK(is_int16(offset));
1677 DCHECK(ra != r0);
1678 int imm16 = offset & kImm16Mask;
1679 // could be x_form instruction with some casting magic
1680 emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
1681}
1682
1683void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
1684 int offset = src.offset();
1685 Register ra = src.ra();
1686 CHECK(is_int16(offset));
1687 DCHECK(ra != r0);
1688 int imm16 = offset & kImm16Mask;
1689 // could be x_form instruction with some casting magic
1690 emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
1691}
1692
1693void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
1694 int offset = src.offset();
1695 Register ra = src.ra();
1696 CHECK(is_int16(offset));
1697 DCHECK(ra != r0);
1698 int imm16 = offset & kImm16Mask;
1699 // could be x_form instruction with some casting magic
1700 emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
1701}
1702
1703void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
1704 int offset = src.offset();
1705 Register ra = src.ra();
1706 CHECK(is_int16(offset));
1707 DCHECK(ra != r0);
1708 int imm16 = offset & kImm16Mask;
1709 // could be x_form instruction with some casting magic
1710 emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
1711}
1712
1713void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
1714 int offset = src.offset();
1715 Register ra = src.ra();
1716 CHECK(is_int16(offset));
1717 DCHECK(ra != r0);
1718 int imm16 = offset & kImm16Mask;
1719 // could be x_form instruction with some casting magic
1720 emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
1721}
1722
1723void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
1724 int offset = src.offset();
1725 Register ra = src.ra();
1726 CHECK(is_int16(offset));
1727 DCHECK(ra != r0);
1728 int imm16 = offset & kImm16Mask;
1729 // could be x_form instruction with some casting magic
1730 emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
1731}
1732
1733void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
1734 const DoubleRegister frb, RCBit rc) {
1735 a_form(EXT4 | FSUB, frt, fra, frb, rc);
1736}
1737
1738void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
1739 const DoubleRegister frb, RCBit rc) {
1740 a_form(EXT4 | FADD, frt, fra, frb, rc);
1741}
1742
1743void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
1744 const DoubleRegister frc, RCBit rc) {
1745 emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
1746 rc);
1747}
1748
1749void Assembler::fcpsgn(const DoubleRegister frt, const DoubleRegister fra,
1750 const DoubleRegister frb, RCBit rc) {
1751 emit(EXT4 | FCPSGN | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1752 rc);
1753}
1754
1755void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
1756 const DoubleRegister frb, RCBit rc) {
1757 a_form(EXT4 | FDIV, frt, fra, frb, rc);
1758}
1759
1760void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
1761 CRegister cr) {
1762 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1763 emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
1764}
1765
1766void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
1767 RCBit rc) {
1768 emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
1769}
1770
1771void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
1772 emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
1773}
1774
1775void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
1776 emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
1777}
1778
1779void Assembler::fctiwuz(const DoubleRegister frt, const DoubleRegister frb) {
1780 emit(EXT4 | FCTIWUZ | frt.code() * B21 | frb.code() * B11);
1781}
1782
1783void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
1784 RCBit rc) {
1785 emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
1786}
1787
1788void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
1789 RCBit rc) {
1790 emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
1791}
1792
1793void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
1794 RCBit rc) {
1795 emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
1796}
1797
1798void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
1799 RCBit rc) {
1800 emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
1801}
1802
1803void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
1804 RCBit rc) {
1805 emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
1806}
1807
1808void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
1809 RCBit rc) {
1810 emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
1811}
1812
1813void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
1814 RCBit rc) {
1815 emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
1816}
1817
1818void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
1819 RCBit rc) {
1820 emit(EXT3 | FCFIDUS | frt.code() * B21 | frb.code() * B11 | rc);
1821}
1822
1823void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
1824 RCBit rc) {
1825 emit(EXT3 | FCFIDS | frt.code() * B21 | frb.code() * B11 | rc);
1826}
1827
1828void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
1829 RCBit rc) {
1830 emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
1831}
1832
1833void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
1834 RCBit rc) {
1835 emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
1836}
1837
1838void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
1839 RCBit rc) {
1840 emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
1841}
1842
1843void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
1844 RCBit rc) {
1845 emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
1846}
1847
1848void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
1849 const DoubleRegister frc, const DoubleRegister frb,
1850 RCBit rc) {
1851 emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1852 frc.code() * B6 | rc);
1853}
1854
1855void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
1856 RCBit rc) {
1857 emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
1858}
1859
1860void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
1861 DCHECK_LT(static_cast<int>(bit), 32);
1862 int bt = bit;
1863 emit(EXT4 | MTFSB0 | bt * B21 | rc);
1864}
1865
1866void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
1867 DCHECK_LT(static_cast<int>(bit), 32);
1868 int bt = bit;
1869 emit(EXT4 | MTFSB1 | bt * B21 | rc);
1870}
1871
1872void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
1873 emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
1874}
1875
1876void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
1877 emit(EXT4 | MFFS | frt.code() * B21 | rc);
1878}
1879
1880void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
1881 RCBit rc) {
1882 emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
1883}
1884
1885void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
1886 RCBit rc) {
1887 emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
1888}
1889
1890void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
1891 RCBit rc) {
1892 emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
1893}
1894
1895void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
1896 const DoubleRegister frc, const DoubleRegister frb,
1897 RCBit rc) {
1898 emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1899 frc.code() * B6 | rc);
1900}
1901
1902void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
1903 const DoubleRegister frc, const DoubleRegister frb,
1904 RCBit rc) {
1905 emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1906 frc.code() * B6 | rc);
1907}
1908
1909// Vector instructions
1910void Assembler::mfvsrd(const Register ra, const Simd128Register rs) {
1911 int SX = 1;
1912 emit(MFVSRD | rs.code() * B21 | ra.code() * B16 | SX);
1913}
1914
1915void Assembler::mfvsrwz(const Register ra, const Simd128Register rs) {
1916 int SX = 1;
1917 emit(MFVSRWZ | rs.code() * B21 | ra.code() * B16 | SX);
1918}
1919
1920void Assembler::mtvsrd(const Simd128Register rt, const Register ra) {
1921 int TX = 1;
1922 emit(MTVSRD | rt.code() * B21 | ra.code() * B16 | TX);
1923}
1924
1925void Assembler::mtvsrdd(const Simd128Register rt, const Register ra,
1926 const Register rb) {
1927 int TX = 1;
1928 emit(MTVSRDD | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | TX);
1929}
1930
1931void Assembler::lxvd(const Simd128Register rt, const MemOperand& src) {
1932 CHECK(src.rb().is_valid());
1933 int TX = 1;
1934 emit(LXVD | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
1935 TX);
1936}
1937
1938void Assembler::lxvx(const Simd128Register rt, const MemOperand& src) {
1939 CHECK(src.rb().is_valid());
1940 int TX = 1;
1941 emit(LXVX | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
1942 TX);
1943}
1944
1945void Assembler::lxsdx(const Simd128Register rt, const MemOperand& src) {
1946 CHECK(src.rb().is_valid());
1947 int TX = 1;
1948 emit(LXSDX | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
1949 TX);
1950}
1951
1952void Assembler::lxsibzx(const Simd128Register rt, const MemOperand& src) {
1953 CHECK(src.rb().is_valid());
1954 int TX = 1;
1955 emit(LXSIBZX | rt.code() * B21 | src.ra().code() * B16 |
1956 src.rb().code() * B11 | TX);
1957}
1958
1959void Assembler::lxsihzx(const Simd128Register rt, const MemOperand& src) {
1960 CHECK(src.rb().is_valid());
1961 int TX = 1;
1962 emit(LXSIHZX | rt.code() * B21 | src.ra().code() * B16 |
1963 src.rb().code() * B11 | TX);
1964}
1965
1966void Assembler::lxsiwzx(const Simd128Register rt, const MemOperand& src) {
1967 CHECK(src.rb().is_valid());
1968 int TX = 1;
1969 emit(LXSIWZX | rt.code() * B21 | src.ra().code() * B16 |
1970 src.rb().code() * B11 | TX);
1971}
1972
1973void Assembler::stxsdx(const Simd128Register rs, const MemOperand& dst) {
1974 CHECK(dst.rb().is_valid());
1975 int SX = 1;
1976 emit(STXSDX | rs.code() * B21 | dst.ra().code() * B16 |
1977 dst.rb().code() * B11 | SX);
1978}
1979
1980void Assembler::stxsibx(const Simd128Register rs, const MemOperand& dst) {
1981 CHECK(dst.rb().is_valid());
1982 int SX = 1;
1983 emit(STXSIBX | rs.code() * B21 | dst.ra().code() * B16 |
1984 dst.rb().code() * B11 | SX);
1985}
1986
1987void Assembler::stxsihx(const Simd128Register rs, const MemOperand& dst) {
1988 CHECK(dst.rb().is_valid());
1989 int SX = 1;
1990 emit(STXSIHX | rs.code() * B21 | dst.ra().code() * B16 |
1991 dst.rb().code() * B11 | SX);
1992}
1993
1994void Assembler::stxsiwx(const Simd128Register rs, const MemOperand& dst) {
1995 CHECK(dst.rb().is_valid());
1996 int SX = 1;
1997 emit(STXSIWX | rs.code() * B21 | dst.ra().code() * B16 |
1998 dst.rb().code() * B11 | SX);
1999}
2000
2001void Assembler::stxvd(const Simd128Register rt, const MemOperand& dst) {
2002 CHECK(dst.rb().is_valid());
2003 int SX = 1;
2004 emit(STXVD | rt.code() * B21 | dst.ra().code() * B16 | dst.rb().code() * B11 |
2005 SX);
2006}
2007
2008void Assembler::stxvx(const Simd128Register rt, const MemOperand& dst) {
2009 CHECK(dst.rb().is_valid());
2010 int SX = 1;
2011 emit(STXVX | rt.code() * B21 | dst.ra().code() * B16 | dst.rb().code() * B11 |
2012 SX);
2013}
2014
2015void Assembler::xxspltib(const Simd128Register rt, const Operand& imm) {
2016 int TX = 1;
2017 CHECK(is_uint8(imm.immediate()));
2018 emit(XXSPLTIB | (rt.code() & 0x1F) * B21 | (imm.immediate() & 0xFF) * B11 |
2019 TX);
2020}
2021
2022// Pseudo instructions.
2023void Assembler::nop(int type) {
2024 Register reg = r0;
2025 switch (type) {
2026 case NON_MARKING_NOP:
2027 reg = r0;
2028 break;
2029 case GROUP_ENDING_NOP:
2030 reg = r2;
2031 break;
2032 case DEBUG_BREAK_NOP:
2033 reg = r3;
2034 break;
2035 default:
2036 UNIMPLEMENTED();
2037 }
2038
2039 ori(reg, reg, Operand::Zero());
2040}
2041
2042bool Assembler::IsNop(Instr instr, int type) {
2043 int reg = 0;
2044 switch (type) {
2045 case NON_MARKING_NOP:
2046 reg = 0;
2047 break;
2048 case GROUP_ENDING_NOP:
2049 reg = 2;
2050 break;
2051 case DEBUG_BREAK_NOP:
2052 reg = 3;
2053 break;
2054 default:
2055 UNIMPLEMENTED();
2056 }
2057 return instr == (ORI | reg * B21 | reg * B16);
2058}
2059
2060void Assembler::GrowBuffer(int needed) {
2061 DCHECK_EQ(buffer_start_, buffer_->start());
2062
2063 // Compute new buffer size.
2064 int old_size = buffer_->size();
2065 int new_size = std::min(2 * old_size, old_size + 1 * MB);
2066 int space = buffer_space() + (new_size - old_size);
2067 new_size += (space < needed) ? needed - space : 0;
2068
2069 // Some internal data structures overflow for very large buffers,
2070 // they must ensure that kMaximalBufferSize is not too large.
2071 if (new_size > kMaximalBufferSize) {
2072 V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
2073 }
2074
2075 // Set up new buffer.
2076 std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
2077 DCHECK_EQ(new_size, new_buffer->size());
2078 uint8_t* new_start = new_buffer->start();
2079
2080 // Copy the data.
2081 intptr_t pc_delta = new_start - buffer_start_;
2082 intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
2083 size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
2084 MemMove(new_start, buffer_start_, pc_offset());
2085 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2086 reloc_size);
2087
2088 // Switch buffers.
2089 buffer_ = std::move(new_buffer);
2090 buffer_start_ = new_start;
2091 pc_ += pc_delta;
2092 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2093 reloc_info_writer.last_pc() + pc_delta);
2094
2095 // None of our relocation types are pc relative pointing outside the code
2096 // buffer nor pc absolute pointing inside the code buffer, so there is no need
2097 // to relocate any emitted relocation entries.
2098}
2099
2100void Assembler::db(uint8_t data) {
2101 CheckBuffer();
2102 *reinterpret_cast<uint8_t*>(pc_) = data;
2103 pc_ += sizeof(uint8_t);
2104}
2105
2106void Assembler::dd(uint32_t data) {
2107 CheckBuffer();
2108 *reinterpret_cast<uint32_t*>(pc_) = data;
2109 pc_ += sizeof(uint32_t);
2110}
2111
2112void Assembler::dq(uint64_t value) {
2113 CheckBuffer();
2114 *reinterpret_cast<uint64_t*>(pc_) = value;
2115 pc_ += sizeof(uint64_t);
2116}
2117
2118void Assembler::dp(uintptr_t data) {
2119 CheckBuffer();
2120 *reinterpret_cast<uintptr_t*>(pc_) = data;
2121 pc_ += sizeof(uintptr_t);
2122}
2123
2124void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2125 if (!ShouldRecordRelocInfo(rmode)) return;
2126 DeferredRelocInfo rinfo(pc_offset(), rmode, data);
2127 relocations_.push_back(rinfo);
2128}
2129
2130void Assembler::EmitRelocations() {
2131 EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
2132
2133 for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
2134 it != relocations_.end(); it++) {
2135 RelocInfo::Mode rmode = it->rmode();
2136 Address pc = reinterpret_cast<Address>(buffer_start_) + it->position();
2137 RelocInfo rinfo(pc, rmode, it->data());
2138
2139 // Fix up internal references now that they are guaranteed to be bound.
2140 if (RelocInfo::IsInternalReference(rmode)) {
2141 // Jump table entry
2142 intptr_t pos = static_cast<intptr_t>(Memory<Address>(pc));
2143 Memory<Address>(pc) = reinterpret_cast<Address>(buffer_start_) + pos;
2144 } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
2145 // mov sequence
2146 intptr_t pos = static_cast<intptr_t>(target_address_at(pc, kNullAddress));
2147 set_target_address_at(pc, 0,
2148 reinterpret_cast<Address>(buffer_start_) + pos,
2149 nullptr, SKIP_ICACHE_FLUSH);
2150 }
2151
2152 reloc_info_writer.Write(&rinfo);
2153 }
2154}
2155
2156void Assembler::BlockTrampolinePoolFor(int instructions) {
2157 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2158}
2159
2160void Assembler::CheckTrampolinePool() {
2161 // Some small sequences of instructions must not be broken up by the
2162 // insertion of a trampoline pool; such sequences are protected by setting
2163 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2164 // which are both checked here. Also, recursive calls to CheckTrampolinePool
2165 // are blocked by trampoline_pool_blocked_nesting_.
2166 if (trampoline_pool_blocked_nesting_ > 0) return;
2167 if (pc_offset() < no_trampoline_pool_before_) {
2168 next_trampoline_check_ = no_trampoline_pool_before_;
2169 return;
2170 }
2171
2172 DCHECK(!trampoline_emitted_);
2173 if (tracked_branch_count_ > 0) {
2174 int size = tracked_branch_count_ * kInstrSize;
2175
2176 // As we are only going to emit trampoline once, we need to prevent any
2177 // further emission.
2178 trampoline_emitted_ = true;
2179 next_trampoline_check_ = kMaxInt;
2180
2181 // First we emit jump, then we emit trampoline pool.
2182 b(size + kInstrSize, LeaveLK);
2183 for (int i = size; i > 0; i -= kInstrSize) {
2184 b(i, LeaveLK);
2185 }
2186
2187 trampoline_ = Trampoline(pc_offset() - size, tracked_branch_count_);
2188 }
2189}
2190
2191PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
2192 uint8_t* address, int instructions)
2193 : Assembler(options, ExternalAssemblerBuffer(
2194 address, instructions * kInstrSize + kGap)) {
2195 DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
2196}
2197
2198PatchingAssembler::~PatchingAssembler() {
2199 // Check that the code was patched as expected.
2200 DCHECK_EQ(pc_, buffer_start_ + buffer_->size() - kGap);
2201 DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
2202}
2203
2204} // namespace internal
2205} // namespace v8
2206
2207#endif // V8_TARGET_ARCH_PPC64
SourcePosition pos
static const int kUnknownCacheLineSize
Definition cpu.h:73
@ kPPCPower9
Definition cpu.h:67
@ kPPCPower10
Definition cpu.h:67
@ kPPCPower8
Definition cpu.h:67
static V8_INLINE bool IsConstantPoolLoadStart(Address pc, ConstantPoolEntry::Access *access=nullptr)
static V8_INLINE Address target_address_at(Address pc, Address constant_pool)
static bool IsSupported(CpuFeature f)
static bool supports_wasm_simd_128_
static unsigned supported_
static unsigned icache_line_size_
static void ProbeImpl(bool cross_compile)
static Operand EmbeddedNumber(double number)
union v8::internal::Operand::Value value_
RelocInfo::Mode rmode_
V8_INLINE Operand(int32_t immediate, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
static const int kApplyMask
Definition reloc-info.h:369
uint32_t wasm_call_tag() const
static constexpr int ModeMask(Mode mode)
Definition reloc-info.h:272
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
Operand const offset_
base::OwnedVector< uint8_t > buffer_
Definition assembler.cc:111
#define V8_EMBEDDED_CONSTANT_POOL_BOOL
Definition globals.h:81
#define CRWIDTH
#define SIGN_EXT_IMM16(imm)
#define SIGN_EXT_IMM22(imm)
#define SIGN_EXT_IMM26(imm)
Label label
int32_t offset
DirectHandle< JSReceiver > options
Instruction * instr
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int pc_offset
int position
Definition liveedit.cc:290
int m
Definition mul-fft.cc:294
int r
Definition mul-fft.cc:298
STL namespace.
int int32_t
Definition unicode.cc:40
unsigned short uint16_t
Definition unicode.cc:39
uintptr_t Address
Definition memory.h:13
constexpr Address kNullAddress
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr int B21
constexpr int B16
bool DoubleToSmiInteger(double value, int *smi_int_value)
void PrintF(const char *format,...)
Definition utils.cc:39
constexpr int kImm16Mask
constexpr int L
V8_EXPORT_PRIVATE void MemMove(void *dest, const void *src, size_t size)
Definition memcopy.h:189
Condition NegateCondition(Condition cond)
constexpr bool is_intn(int64_t x, unsigned n)
Definition utils.h:568
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
Register ToRegister(int num)
constexpr Register r11
static unsigned CpuFeaturesImpliedByCompiler()
constexpr uint8_t kInstrSize
const int kOpcodeMask
static constexpr Address kNullAddress
Definition v8-internal.h:53
constexpr int kMaxInt
Definition globals.h:374
std::unique_ptr< AssemblerBuffer > ExternalAssemblerBuffer(void *start, int size)
Definition assembler.cc:161
constexpr MiscInstructionsBits74 BX
const int kEndOfChain
constexpr int kNumRegisters
#define CHECK_GE(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define UNIMPLEMENTED()
Definition logging.h:66
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define V8PRIdPTR
Definition macros.h:332
#define V8PRIxPTR
Definition macros.h:331
std::unique_ptr< ValueMirror > value