v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
assembler-s390.cc
Go to the documentation of this file.
1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
33// The original source code covered by the above license above has been
34// modified significantly by Google Inc.
35// Copyright 2014 the V8 project authors. All rights reserved.
36
38#include <set>
39#include <string>
40
41#if V8_TARGET_ARCH_S390X
42
43#if V8_HOST_ARCH_S390X && !V8_OS_ZOS
44#include <elf.h> // Required for auxv checks for STFLE support
45#include <sys/auxv.h>
46#endif
47
48#include "src/base/bits.h"
49#include "src/base/cpu.h"
53
54namespace v8 {
55namespace internal {
56
57// Get the CPU features enabled by the build.
58static unsigned CpuFeaturesImpliedByCompiler() {
59 unsigned answer = 0;
60 return answer;
61}
62
63static bool supportsCPUFeature(const char* feature) {
64#if V8_OS_ZOS
65 // TODO(gabylb): zos - use cpu_init() and cpu_supports() to test support of
66 // z/OS features when the current compiler supports them.
67 // Currently the only feature to be checked is Vector Extension Facility
68 // ("vector128" on z/OS, "vx" on LoZ) - hence the assert in case that changed.
69 assert(strcmp(feature, "vx") == 0);
70 return __is_vxf_available();
71#else
72 static std::set<std::string>& features = *new std::set<std::string>();
73 static std::set<std::string>& all_available_features =
74 *new std::set<std::string>({"iesan3", "zarch", "stfle", "msa", "ldisp",
75 "eimm", "dfp", "etf3eh", "highgprs", "te",
76 "vx"});
77 if (features.empty()) {
78#if V8_HOST_ARCH_S390X
79
80#ifndef HWCAP_S390_VX
81#define HWCAP_S390_VX 2048
82#endif
83#define CHECK_AVAILABILITY_FOR(mask, value) \
84 if (f & mask) features.insert(value);
85
86 // initialize feature vector
87 uint64_t f = getauxval(AT_HWCAP);
88 CHECK_AVAILABILITY_FOR(HWCAP_S390_ESAN3, "iesan3")
89 CHECK_AVAILABILITY_FOR(HWCAP_S390_ZARCH, "zarch")
90 CHECK_AVAILABILITY_FOR(HWCAP_S390_STFLE, "stfle")
91 CHECK_AVAILABILITY_FOR(HWCAP_S390_MSA, "msa")
92 CHECK_AVAILABILITY_FOR(HWCAP_S390_LDISP, "ldisp")
93 CHECK_AVAILABILITY_FOR(HWCAP_S390_EIMM, "eimm")
94 CHECK_AVAILABILITY_FOR(HWCAP_S390_DFP, "dfp")
95 CHECK_AVAILABILITY_FOR(HWCAP_S390_ETF3EH, "etf3eh")
96 CHECK_AVAILABILITY_FOR(HWCAP_S390_HIGH_GPRS, "highgprs")
97 CHECK_AVAILABILITY_FOR(HWCAP_S390_TE, "te")
98 CHECK_AVAILABILITY_FOR(HWCAP_S390_VX, "vx")
99#else
100 // import all features
101 features.insert(all_available_features.begin(),
102 all_available_features.end());
103#endif
104 }
105 USE(all_available_features);
106 return features.find(feature) != features.end();
107#endif // !V8_OS_ZOS
108}
109
110#undef CHECK_AVAILABILITY_FOR
111#undef HWCAP_S390_VX
112
113// Check whether Store Facility STFLE instruction is available on the platform.
114// Instruction returns a bit vector of the enabled hardware facilities.
115static bool supportsSTFLE() {
116#if V8_OS_ZOS
117 return __is_stfle_available();
118#elif V8_HOST_ARCH_S390X
119 static bool read_tried = false;
120 static uint32_t auxv_hwcap = 0;
121
122 if (!read_tried) {
123 // Open the AUXV (auxiliary vector) pseudo-file
124 int fd = open("/proc/self/auxv", O_RDONLY);
125
126 read_tried = true;
127 if (fd != -1) {
128 static Elf64_auxv_t buffer[16];
129 Elf64_auxv_t* auxv_element;
130 int bytes_read = 0;
131 while (bytes_read >= 0) {
132 // Read a chunk of the AUXV
133 bytes_read = read(fd, buffer, sizeof(buffer));
134 // Locate and read the platform field of AUXV if it is in the chunk
135 for (auxv_element = buffer;
136 auxv_element + sizeof(auxv_element) <= buffer + bytes_read &&
137 auxv_element->a_type != AT_NULL;
138 auxv_element++) {
139 // We are looking for HWCAP entry in AUXV to search for STFLE support
140 if (auxv_element->a_type == AT_HWCAP) {
141 /* Note: Both auxv_hwcap and buffer are static */
142 auxv_hwcap = auxv_element->a_un.a_val;
143 goto done_reading;
144 }
145 }
146 }
147 done_reading:
148 close(fd);
149 }
150 }
151
152 // Did not find result
153 if (0 == auxv_hwcap) {
154 return false;
155 }
156
157 // HWCAP_S390_STFLE is defined to be 4 in include/asm/elf.h. Currently
158 // hardcoded in case that include file does not exist.
159 const uint32_t _HWCAP_S390_STFLE = 4;
160 return (auxv_hwcap & _HWCAP_S390_STFLE);
161#else
162 // STFLE is not available on non-s390 hosts
163 return false;
164#endif
165}
166
168#if V8_ENABLE_WEBASSEMBLY
169 return CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1);
170#else
171 return false;
172#endif // V8_ENABLE_WEBASSEMBLY
173}
174
175void CpuFeatures::ProbeImpl(bool cross_compile) {
177 icache_line_size_ = 256;
178
179 // Only use statically determined features for cross compile (snapshot).
180 if (cross_compile) return;
181
182#ifdef DEBUG
183 initialized_ = true;
184#endif
185
186 static bool performSTFLE = supportsSTFLE();
187
188// Need to define host, as we are generating inlined S390 assembly to test
189// for facilities.
190#if V8_HOST_ARCH_S390X
191 if (performSTFLE) {
192 // STFLE D(B) requires:
193 // GPR0 to specify # of double words to update minus 1.
194 // i.e. GPR0 = 0 for 1 doubleword
195 // D(B) to specify to memory location to store the facilities bits
196 // The facilities we are checking for are:
197 // Bit 45 - Distinct Operands for instructions like ARK, SRK, etc.
198 // As such, we require only 1 double word
199 int64_t facilities[3] = {0L};
200#if V8_OS_ZOS
201 int64_t reg0 = 2;
202 asm volatile(" stfle %0" : "=m"(facilities), __ZL_NR("+", r0)(reg0)::"cc");
203#else
204 int16_t reg0;
205 // LHI sets up GPR0
206 // STFLE is specified as .insn, as opcode is not recognized.
207 // We register the instructions kill r0 (LHI) and the CC (STFLE).
208 asm volatile(
209 "lhi %%r0,2\n"
210 ".insn s,0xb2b00000,%0\n"
211 : "=Q"(facilities), "=r"(reg0)
212 :
213 : "cc", "r0");
214#endif // V8_OS_ZOS
215
216 uint64_t one = static_cast<uint64_t>(1);
217 // Test for Distinct Operands Facility - Bit 45
218 if (facilities[0] & (one << (63 - 45))) {
219 supported_ |= (1u << DISTINCT_OPS);
220 }
221 // Test for General Instruction Extension Facility - Bit 34
222 if (facilities[0] & (one << (63 - 34))) {
223 supported_ |= (1u << GENERAL_INSTR_EXT);
224 }
225 // Test for Floating Point Extension Facility - Bit 37
226 if (facilities[0] & (one << (63 - 37))) {
227 supported_ |= (1u << FLOATING_POINT_EXT);
228 }
229 // Test for Vector Facility - Bit 129
230 if (facilities[2] & (one << (63 - (129 - 128))) &&
231 supportsCPUFeature("vx")) {
232 supported_ |= (1u << VECTOR_FACILITY);
233 }
234 // Test for Vector Enhancement Facility 1 - Bit 135
235 if (facilities[2] & (one << (63 - (135 - 128))) &&
236 supportsCPUFeature("vx")) {
237 supported_ |= (1u << VECTOR_ENHANCE_FACILITY_1);
238 }
239 // Test for Vector Enhancement Facility 2 - Bit 148
240 if (facilities[2] & (one << (63 - (148 - 128))) &&
241 supportsCPUFeature("vx")) {
242 supported_ |= (1u << VECTOR_ENHANCE_FACILITY_2);
243 }
244 // Test for Miscellaneous Instruction Extension Facility - Bit 58
245 if (facilities[0] & (1lu << (63 - 58))) {
246 supported_ |= (1u << MISC_INSTR_EXT2);
247 }
248 }
249#else
250 // All distinct ops instructions can be simulated
251 supported_ |= (1u << DISTINCT_OPS);
252 // RISBG can be simulated
253 supported_ |= (1u << GENERAL_INSTR_EXT);
254 supported_ |= (1u << FLOATING_POINT_EXT);
255 supported_ |= (1u << MISC_INSTR_EXT2);
256 USE(performSTFLE); // To avoid assert
257 USE(supportsCPUFeature);
258 supported_ |= (1u << VECTOR_FACILITY);
259 supported_ |= (1u << VECTOR_ENHANCE_FACILITY_1);
260 supported_ |= (1u << VECTOR_ENHANCE_FACILITY_2);
261#endif
262 supported_ |= (1u << FPU);
263
264 // Set a static value on whether Simd is supported.
265 // This variable is only used for certain archs to query SupportWasmSimd128()
266 // at runtime in builtins using an extern ref. Other callers should use
267 // CpuFeatures::SupportWasmSimd128().
269}
270
272 const char* s390_arch = "s390x";
273 PrintF("target %s\n", s390_arch);
274}
275
277 PrintF("FPU=%d\n", CpuFeatures::IsSupported(FPU));
278 PrintF("FPU_EXT=%d\n", CpuFeatures::IsSupported(FLOATING_POINT_EXT));
279 PrintF("GENERAL_INSTR=%d\n", CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
280 PrintF("DISTINCT_OPS=%d\n", CpuFeatures::IsSupported(DISTINCT_OPS));
281 PrintF("VECTOR_FACILITY=%d\n", CpuFeatures::IsSupported(VECTOR_FACILITY));
282 PrintF("VECTOR_ENHANCE_FACILITY_1=%d\n",
283 CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1));
284 PrintF("VECTOR_ENHANCE_FACILITY_2=%d\n",
285 CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2));
286 PrintF("MISC_INSTR_EXT2=%d\n", CpuFeatures::IsSupported(MISC_INSTR_EXT2));
287}
288
289Register ToRegister(int num) {
290 DCHECK(num >= 0 && num < kNumRegisters);
291 const Register kRegisters[] = {r0, r1, r2, r3, r4, r5, r6, r7,
292 r8, r9, r10, fp, ip, r13, r14, sp};
293 return kRegisters[num];
294}
295
296// -----------------------------------------------------------------------------
297// Implementation of RelocInfo
298
299const int RelocInfo::kApplyMask =
302
304 // The deserializer needs to know whether a pointer is specially
305 // coded. Being specially coded on S390 means that it is an iihf/iilf
306 // instruction sequence, and that is always the case inside code
307 // objects.
308 return true;
309}
310
311bool RelocInfo::IsInConstantPool() { return false; }
312
313uint32_t RelocInfo::wasm_call_tag() const {
315 return static_cast<uint32_t>(
317}
318
319// -----------------------------------------------------------------------------
320// Implementation of Operand and MemOperand
321// See assembler-s390-inl.h for inlined constructors
322
323Operand::Operand(Handle<HeapObject> handle) {
324 AllowHandleDereference using_location;
325 rm_ = no_reg;
326 value_.immediate = static_cast<intptr_t>(handle.address());
328}
329
330Operand Operand::EmbeddedNumber(double value) {
331 int32_t smi;
332 if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
334 result.is_heap_number_request_ = true;
335 result.value_.heap_number_request = HeapNumberRequest(value);
336 return result;
337}
338
339MemOperand::MemOperand(Register rn, int32_t offset)
340 : baseRegister(rn), indexRegister(r0), offset_(offset) {}
341
342MemOperand::MemOperand(Register rx, Register rb, int32_t offset)
343 : baseRegister(rb), indexRegister(rx), offset_(offset) {}
344
345void Assembler::AllocateAndInstallRequestedHeapNumbers(LocalIsolate* isolate) {
346 DCHECK_IMPLIES(isolate == nullptr, heap_number_requests_.empty());
347 for (auto& request : heap_number_requests_) {
348 Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
349 Handle<HeapObject> object =
350 isolate->factory()->NewHeapNumber<AllocationType::kOld>(
351 request.heap_number());
352 set_target_address_at(pc, kNullAddress, object.address(), nullptr,
353 SKIP_ICACHE_FLUSH);
354 }
355}
356
357// -----------------------------------------------------------------------------
358// Specific instructions, constants, and masks.
359
360Assembler::Assembler(const AssemblerOptions& options,
361 std::unique_ptr<AssemblerBuffer> buffer)
362 : AssemblerBase(options, std::move(buffer)),
363 scratch_register_list_(DefaultTmpList()),
364 scratch_double_register_list_(DefaultFPTmpList()) {
365 reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
366 last_bound_pos_ = 0;
367 relocations_.reserve(128);
368}
369
370void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
371 GetCode(isolate->main_thread_local_isolate(), desc);
372}
373void Assembler::GetCode(LocalIsolate* isolate, CodeDesc* desc,
374 SafepointTableBuilderBase* safepoint_table_builder,
375 int handler_table_offset) {
376 // As a crutch to avoid having to add manual Align calls wherever we use a
377 // raw workflow to create Code objects (mostly in tests), add another Align
378 // call here. It does no harm - the end of the Code object is aligned to the
379 // (larger) kCodeAlignment anyways.
380 // TODO(jgruber): Consider moving responsibility for proper alignment to
381 // metadata table builders (safepoint, handler, constant pool, code
382 // comments).
383 DataAlign(InstructionStream::kMetadataAlignment);
384
385 EmitRelocations();
386
387 int code_comments_size = WriteCodeComments();
388
389 AllocateAndInstallRequestedHeapNumbers(isolate);
390
391 // Set up code descriptor.
392 // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
393 // this point to make CodeDesc initialization less fiddly.
394
395 static constexpr int kConstantPoolSize = 0;
396 static constexpr int kBuiltinJumpTableInfoSize = 0;
397 const int instruction_size = pc_offset();
398 const int builtin_jump_table_info_offset =
399 instruction_size - kBuiltinJumpTableInfoSize;
400 const int code_comments_offset =
401 builtin_jump_table_info_offset - code_comments_size;
402 const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
403 const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
404 ? constant_pool_offset
405 : handler_table_offset;
406 const int safepoint_table_offset =
407 (safepoint_table_builder == kNoSafepointTable)
408 ? handler_table_offset2
409 : safepoint_table_builder->safepoint_table_offset();
410 const int reloc_info_offset =
411 static_cast<int>(reloc_info_writer.pos() - buffer_->start());
412 CodeDesc::Initialize(desc, this, safepoint_table_offset,
413 handler_table_offset2, constant_pool_offset,
414 code_comments_offset, builtin_jump_table_info_offset,
415 reloc_info_offset);
416}
417
418void Assembler::Align(int m) {
419 DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
420 while ((pc_offset() & (m - 1)) != 0) {
421 nop(0);
422 }
423}
424
425void Assembler::CodeTargetAlign() { Align(8); }
426
427Condition Assembler::GetCondition(Instr instr) {
428 switch (instr & kCondMask) {
429 case BT:
430 return eq;
431 case BF:
432 return ne;
433 default:
435 }
436}
437
438// This code assumes a FIXED_SEQUENCE for 64bit loads (iihf/iilf)
439bool Assembler::Is64BitLoadIntoIP(SixByteInstr instr1, SixByteInstr instr2) {
440 // Check the instructions are the iihf/iilf load into ip
441 return (((instr1 >> 32) == 0xC0C8) && ((instr2 >> 32) == 0xC0C9));
442}
443
444// Labels refer to positions in the (to be) generated code.
445// There are bound, linked, and unused labels.
446//
447// Bound labels refer to known positions in the already
448// generated code. pos() is the position the label refers to.
449//
450// Linked labels refer to unknown positions in the code
451// to be generated; pos() is the position of the last
452// instruction using the label.
453
454// The link chain is terminated by a negative code position (must be aligned)
455const int kEndOfChain = -4;
456
457// Returns the target address of the relative instructions, typically
458// of the form: pos + imm (where immediate is in # of halfwords for
459// BR* and LARL).
460int Assembler::target_at(int pos) {
461 SixByteInstr instr = instr_at(pos);
462 // check which type of branch this is 16 or 26 bit offset
463 Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
464
465 if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode) {
466 int16_t imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
467 imm16 <<= 1; // immediate is in # of halfwords
468 if (imm16 == 0) return kEndOfChain;
469 return pos + imm16;
470 } else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
471 BRASL == opcode || LGRL == opcode) {
472 int32_t imm32 =
473 static_cast<int32_t>(instr & (static_cast<uint64_t>(0xFFFFFFFF)));
474 if (LLILF != opcode)
475 imm32 <<= 1; // BR* + LARL treat immediate in # of halfwords
476 if (imm32 == 0) return kEndOfChain;
477 return pos + imm32;
478 } else if (BRXHG == opcode) {
479 // offset is in bits 16-31 of 48 bit instruction
480 instr = instr >> 16;
481 int16_t imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
482 imm16 <<= 1; // immediate is in # of halfwords
483 if (imm16 == 0) return kEndOfChain;
484 return pos + imm16;
485 }
486
487 // Unknown condition
488 DCHECK(false);
489 return -1;
490}
491
492// Update the target address of the current relative instruction.
493void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
494 SixByteInstr instr = instr_at(pos);
495 Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
496
497 if (is_branch != nullptr) {
498 *is_branch =
499 (opcode == BRC || opcode == BRCT || opcode == BRCTG || opcode == BRCL ||
500 opcode == BRASL || opcode == BRXH || opcode == BRXHG);
501 }
502
503 if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode) {
504 int16_t imm16 = target_pos - pos;
505 instr &= (~0xFFFF);
506 DCHECK(is_int16(imm16));
507 instr_at_put<FourByteInstr>(pos, instr | (imm16 >> 1));
508 return;
509 } else if (BRCL == opcode || LARL == opcode || BRASL == opcode ||
510 LGRL == opcode) {
511 // Immediate is in # of halfwords
512 int32_t imm32 = target_pos - pos;
513 instr &= (~static_cast<uint64_t>(0xFFFFFFFF));
514 instr_at_put<SixByteInstr>(pos, instr | (imm32 >> 1));
515 return;
516 } else if (LLILF == opcode) {
517 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
518 // Emitted label constant, not part of a branch.
519 // Make label relative to InstructionStream pointer of generated
520 // InstructionStream object.
521 int32_t imm32 =
522 target_pos + (InstructionStream::kHeaderSize - kHeapObjectTag);
523 instr &= (~static_cast<uint64_t>(0xFFFFFFFF));
524 instr_at_put<SixByteInstr>(pos, instr | imm32);
525 return;
526 } else if (BRXHG == opcode) {
527 // Immediate is in bits 16-31 of 48 bit instruction
528 int32_t imm16 = target_pos - pos;
529 instr &= (0xFFFF0000FFFF); // clear bits 16-31
530 imm16 &= 0xFFFF; // clear high halfword
531 imm16 <<= 16;
532 // Immediate is in # of halfwords
533 instr_at_put<SixByteInstr>(pos, instr | (imm16 >> 1));
534 return;
535 }
536 DCHECK(false);
537}
538
539// Returns the maximum number of bits given instruction can address.
540int Assembler::max_reach_from(int pos) {
541 Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
542 // Check which type of instr. In theory, we can return
543 // the values below + 1, given offset is # of halfwords
544 if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode ||
545 BRXHG == opcode) {
546 return 16;
547 } else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
548 BRASL == opcode || LGRL == opcode) {
549 return 31; // Using 31 as workaround instead of 32 as
550 // is_intn(x,32) doesn't work on 32-bit platforms.
551 // llilf: Emitted label constant, not part of
552 // a branch (regexp PushBacktrack).
553 }
554 DCHECK(false);
555 return 16;
556}
557
558void Assembler::bind_to(Label* L, int pos) {
559 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
560 bool is_branch = false;
561 while (L->is_linked()) {
562 int fixup_pos = L->pos();
563#ifdef DEBUG
564 int32_t offset = pos - fixup_pos;
565 int maxReach = max_reach_from(fixup_pos);
566#endif
567 next(L); // call next before overwriting link with target at fixup_pos
568 DCHECK(is_intn(offset, maxReach));
569 target_at_put(fixup_pos, pos, &is_branch);
570 }
571 L->bind_to(pos);
572
573 // Keep track of the last bound label so we don't eliminate any instructions
574 // before a bound label.
575 if (pos > last_bound_pos_) last_bound_pos_ = pos;
576}
577
578void Assembler::bind(Label* L) {
579 DCHECK(!L->is_bound()); // label can only be bound once
580 bind_to(L, pc_offset());
581}
582
583void Assembler::next(Label* L) {
584 DCHECK(L->is_linked());
585 int link = target_at(L->pos());
586 if (link == kEndOfChain) {
587 L->Unuse();
588 } else {
589 DCHECK_GE(link, 0);
590 L->link_to(link);
591 }
592}
593
594int Assembler::link(Label* L) {
595 int position;
596 if (L->is_bound()) {
597 position = L->pos();
598 } else {
599 if (L->is_linked()) {
600 position = L->pos(); // L's link
601 } else {
602 // was: target_pos = kEndOfChain;
603 // However, using self to mark the first reference
604 // should avoid most instances of branch offset overflow. See
605 // target_at() for where this is converted back to kEndOfChain.
607 }
608 L->link_to(pc_offset());
609 }
610
611 return position;
612}
613
614void Assembler::load_label_offset(Register r1, Label* L) {
615 int target_pos;
616 int constant;
617 if (L->is_bound()) {
618 target_pos = L->pos();
619 constant = target_pos + (InstructionStream::kHeaderSize - kHeapObjectTag);
620 } else {
621 if (L->is_linked()) {
622 target_pos = L->pos(); // L's link
623 } else {
624 // was: target_pos = kEndOfChain;
625 // However, using branch to self to mark the first reference
626 // should avoid most instances of branch offset overflow. See
627 // target_at() for where this is converted back to kEndOfChain.
628 target_pos = pc_offset();
629 }
630 L->link_to(pc_offset());
631
632 constant = target_pos - pc_offset();
633 }
634 llilf(r1, Operand(constant));
635}
636
637// Pseudo op - branch on condition
638void Assembler::branchOnCond(Condition c, int branch_offset, bool is_bound,
639 bool force_long_branch) {
640 int offset_in_halfwords = branch_offset / 2;
641 if (is_bound && is_int16(offset_in_halfwords) && !force_long_branch) {
642 brc(c, Operand(offset_in_halfwords)); // short jump
643 } else {
644 brcl(c, Operand(offset_in_halfwords)); // long jump
645 }
646}
647
648// Exception-generating instructions and debugging support.
649// Stops with a non-negative code less than kNumOfWatchedStops support
650// enabling/disabling and a counter feature. See simulator-s390.h .
651void Assembler::stop(Condition cond, int32_t code, CRegister cr) {
652 if (cond != al) {
653 Label skip;
654 b(NegateCondition(cond), &skip, Label::kNear);
655 bkpt(0);
656 bind(&skip);
657 } else {
658 bkpt(0);
659 }
660}
661
662void Assembler::bkpt(uint32_t imm16) {
663 // GDB software breakpoint instruction
664 emit2bytes(0x0001);
665}
666
667// Pseudo instructions.
668void Assembler::nop(int type) {
669 switch (type) {
670 case 0:
671 lr(r0, r0);
672 break;
673 case DEBUG_BREAK_NOP:
674 // TODO(john.yan): Use a better NOP break
675 oill(r3, Operand::Zero());
676 break;
677#if V8_OS_ZOS
678 case BASR_CALL_TYPE_NOP:
679 emit2bytes(0x0000);
680 break;
681 case BRAS_CALL_TYPE_NOP:
682 emit2bytes(0x0001);
683 break;
684 case BRASL_CALL_TYPE_NOP:
685 emit2bytes(0x0011);
686 break;
687#endif
688 default:
690 }
691}
692
693// -------------------------
694// Load Address Instructions
695// -------------------------
696// Load Address Relative Long
697void Assembler::larl(Register r1, Label* l) {
698 larl(r1, Operand(branch_offset(l)));
699}
700
701void Assembler::lgrl(Register r1, Label* l) {
702 lgrl(r1, Operand(branch_offset(l)));
703}
704
705void Assembler::EnsureSpaceFor(int space_needed) {
706 if (buffer_space() <= (kGap + space_needed)) {
707 GrowBuffer(space_needed);
708 }
709}
710
711void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
712 DCHECK(RelocInfo::IsCodeTarget(rmode));
713 EnsureSpace ensure_space(this);
714
715 RecordRelocInfo(rmode);
716 int32_t target_index = AddCodeTarget(target);
717 brasl(r14, Operand(target_index));
718}
719
720void Assembler::jump(Handle<Code> target, RelocInfo::Mode rmode,
721 Condition cond) {
722 DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
723 EnsureSpace ensure_space(this);
724
725 RecordRelocInfo(rmode);
726 int32_t target_index = AddCodeTarget(target);
727 brcl(cond, Operand(target_index));
728}
729
730// end of S390instructions
731
732bool Assembler::IsNop(SixByteInstr instr, int type) {
733 DCHECK((0 == type) || (DEBUG_BREAK_NOP == type));
734 if (DEBUG_BREAK_NOP == type) {
735 return ((instr & 0xFFFFFFFF) == 0xA53B0000); // oill r3, 0
736 }
737 return ((instr & 0xFFFF) == 0x1800); // lr r0,r0
738}
739
740// dummy instruction reserved for special use.
741void Assembler::dumy(int r1, int x2, int b2, int d2) {
742#if defined(USE_SIMULATOR)
743 int op = 0xE353;
744 uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
745 (static_cast<uint64_t>(r1) & 0xF) * B36 |
746 (static_cast<uint64_t>(x2) & 0xF) * B32 |
747 (static_cast<uint64_t>(b2) & 0xF) * B28 |
748 (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
749 (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
750 (static_cast<uint64_t>(op & 0x00FF));
751 emit6bytes(code);
752#endif
753}
754
755void Assembler::GrowBuffer(int needed) {
756 DCHECK_EQ(buffer_start_, buffer_->start());
757
758 // Compute new buffer size.
759 int old_size = buffer_->size();
760 int new_size = std::min(2 * old_size, old_size + 1 * MB);
761 int space = buffer_space() + (new_size - old_size);
762 new_size += (space < needed) ? needed - space : 0;
763
764 // Some internal data structures overflow for very large buffers,
765 // they must ensure that kMaximalBufferSize is not too large.
766 if (new_size > kMaximalBufferSize) {
767 V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
768 }
769
770 // Set up new buffer.
771 std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
772 DCHECK_EQ(new_size, new_buffer->size());
773 uint8_t* new_start = new_buffer->start();
774
775 // Copy the data.
776 intptr_t pc_delta = new_start - buffer_start_;
777 intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
778 size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
779 MemMove(new_start, buffer_start_, pc_offset());
780 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
781 reloc_size);
782
783 // Switch buffers.
784 buffer_ = std::move(new_buffer);
785 buffer_start_ = new_start;
786 pc_ += pc_delta;
787 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
788 reloc_info_writer.last_pc() + pc_delta);
789
790 // None of our relocation types are pc relative pointing outside the code
791 // buffer nor pc absolute pointing inside the code buffer, so there is no need
792 // to relocate any emitted relocation entries.
793}
794
795void Assembler::db(uint8_t data) {
796 CheckBuffer();
797 *reinterpret_cast<uint8_t*>(pc_) = data;
798 pc_ += sizeof(uint8_t);
799}
800
801void Assembler::dh(uint16_t data) {
802 CheckBuffer();
803 *reinterpret_cast<uint16_t*>(pc_) = data;
804 pc_ += sizeof(uint16_t);
805}
806
807void Assembler::dd(uint32_t data) {
808 CheckBuffer();
809 *reinterpret_cast<uint32_t*>(pc_) = data;
810 pc_ += sizeof(uint32_t);
811}
812
813void Assembler::dq(uint64_t value) {
814 CheckBuffer();
815 *reinterpret_cast<uint64_t*>(pc_) = value;
816 pc_ += sizeof(uint64_t);
817}
818
819void Assembler::dp(uintptr_t data) {
820 CheckBuffer();
821 *reinterpret_cast<uintptr_t*>(pc_) = data;
822 pc_ += sizeof(uintptr_t);
823}
824
825void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
826 if (!ShouldRecordRelocInfo(rmode)) return;
827 DeferredRelocInfo rinfo(pc_offset(), rmode, data);
828 relocations_.push_back(rinfo);
829}
830
831void Assembler::emit_label_addr(Label* label) {
832 CheckBuffer();
833 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
834 int position = link(label);
835 DCHECK(label->is_bound());
836 // Keep internal references relative until EmitRelocations.
837 dp(position);
838}
839
840void Assembler::EmitRelocations() {
841 EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
842
843 for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
844 it != relocations_.end(); it++) {
845 RelocInfo::Mode rmode = it->rmode();
846 Address pc = reinterpret_cast<Address>(buffer_start_) + it->position();
847 RelocInfo rinfo(pc, rmode, it->data());
848
849 // Fix up internal references now that they are guaranteed to be bound.
850 if (RelocInfo::IsInternalReference(rmode)) {
851 // Jump table entry
852 Address pos = Memory<Address>(pc);
853 Memory<Address>(pc) = reinterpret_cast<Address>(buffer_start_) + pos;
854 } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
855 // mov sequence
856 Address pos = target_address_at(pc, 0);
857 set_target_address_at(pc, 0,
858 reinterpret_cast<Address>(buffer_start_) + pos,
859 nullptr, SKIP_ICACHE_FLUSH);
860 }
861
862 reloc_info_writer.Write(&rinfo);
863 }
864}
865
866RegList Assembler::DefaultTmpList() { return {r1, ip}; }
867DoubleRegList Assembler::DefaultFPTmpList() {
869}
870
871} // namespace internal
872} // namespace v8
873#endif // V8_TARGET_ARCH_S390X
#define one
SourcePosition pos
static V8_INLINE Address target_address_at(Address pc, Address constant_pool)
static bool IsSupported(CpuFeature f)
static bool supports_wasm_simd_128_
static unsigned supported_
static unsigned icache_line_size_
static void ProbeImpl(bool cross_compile)
static Operand EmbeddedNumber(double number)
union v8::internal::Operand::Value value_
RelocInfo::Mode rmode_
V8_INLINE Operand(int32_t immediate, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
static const int kApplyMask
Definition reloc-info.h:369
uint32_t wasm_call_tag() const
static constexpr int ModeMask(Mode mode)
Definition reloc-info.h:272
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
Operand const offset_
base::OwnedVector< uint8_t > buffer_
Definition assembler.cc:111
#define SIGN_EXT_IMM16(imm)
#define B32
#define B36
Label label
int32_t offset
Instruction * instr
ZoneVector< RpoNumber > & result
int pc_offset
int position
Definition liveedit.cc:290
int m
Definition mul-fft.cc:294
STL namespace.
int int32_t
Definition unicode.cc:40
unsigned short uint16_t
Definition unicode.cc:39
signed short int16_t
Definition unicode.cc:38
uintptr_t Address
Definition memory.h:13
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
PerThreadAssertScopeDebugOnly< true, HANDLE_DEREFERENCE_ASSERT > AllowHandleDereference
uint64_t SixByteInstr
bool DoubleToSmiInteger(double value, int *smi_int_value)
void PrintF(const char *format,...)
Definition utils.cc:39
constexpr DoubleRegister kScratchDoubleReg
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr int L
constexpr int B28
V8_EXPORT_PRIVATE void MemMove(void *dest, const void *src, size_t size)
Definition memcopy.h:189
Condition NegateCondition(Condition cond)
constexpr bool is_intn(int64_t x, unsigned n)
Definition utils.h:568
const int kHeapObjectTag
Definition v8-internal.h:72
constexpr LowDwVfpRegister kDoubleRegZero
Register ToRegister(int num)
static unsigned CpuFeaturesImpliedByCompiler()
const int kEndOfChain
constexpr int kNumRegisters
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define UNIMPLEMENTED()
Definition logging.h:66
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293