v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
code-generator-s390.cc
Go to the documentation of this file.
1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
14#include "src/compiler/osr.h"
16
17#if V8_ENABLE_WEBASSEMBLY
20#endif // V8_ENABLE_WEBASSEMBLY
21
22namespace v8 {
23namespace internal {
24namespace compiler {
25
26#define __ masm()->
27
28#define kScratchReg ip
29
30// Adds S390-specific methods to convert InstructionOperands.
32 public:
35
36 size_t OutputCount() { return instr_->OutputCount(); }
37
42
47
48 bool CompareLogical() const {
49 switch (instr_->flags_condition()) {
54 return true;
55 default:
56 return false;
57 }
59 }
60
61 Operand InputImmediate(size_t index) {
62 Constant constant = ToConstant(instr_->InputAt(index));
63 switch (constant.type()) {
65 return Operand(constant.ToInt32());
67 return Operand::EmbeddedNumber(constant.ToFloat32());
69 return Operand::EmbeddedNumber(constant.ToFloat64().value());
71 return Operand(constant.ToInt64());
73 return Operand(constant.ToExternalReference());
77 break;
78 }
80 }
81
82 MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
83 const size_t index = *first_index;
84 if (mode) *mode = AddressingModeField::decode(instr_->opcode());
86 case kMode_None:
87 break;
88 case kMode_MR:
89 *first_index += 1;
90 return MemOperand(InputRegister(index + 0), 0);
91 case kMode_MRI:
92 *first_index += 2;
93 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
94 case kMode_MRR:
95 *first_index += 2;
96 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
97 case kMode_MRRI:
98 *first_index += 3;
99 return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
100 InputInt32(index + 2));
101 case kMode_Root:
102 *first_index += 1;
103 return MemOperand(kRootRegister, InputInt32(index));
104 }
105 UNREACHABLE();
106 }
107
109 size_t first_index = 0) {
110 return MemoryOperand(mode, &first_index);
111 }
112
118
119 MemOperand SlotToMemOperand(int slot) const {
121 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
122 }
123
125 InstructionOperand* op = instr_->InputAt(index);
127 }
128
130#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
131 // We want to read the 32-bits directly from memory
132 MemOperand mem = InputStackSlot(index);
133 return MemOperand(mem.rx(), mem.rb(), mem.offset() + 4);
134#else
135 return InputStackSlot(index);
136#endif
137 }
138};
139
140static inline bool HasRegisterOutput(Instruction* instr, int index = 0) {
141 return instr->OutputCount() > 0 && instr->OutputAt(index)->IsRegister();
142}
143
144static inline bool HasFPRegisterInput(Instruction* instr, int index) {
145 return instr->InputAt(index)->IsFPRegister();
146}
147
148static inline bool HasRegisterInput(Instruction* instr, int index) {
149 return instr->InputAt(index)->IsRegister() ||
151}
152
153static inline bool HasImmediateInput(Instruction* instr, size_t index) {
154 return instr->InputAt(index)->IsImmediate();
155}
156
157static inline bool HasFPStackSlotInput(Instruction* instr, size_t index) {
158 return instr->InputAt(index)->IsFPStackSlot();
159}
160
161static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
162 return instr->InputAt(index)->IsStackSlot() ||
164}
165
166namespace {
167
168class OutOfLineRecordWrite final : public OutOfLineCode {
169 public:
170 OutOfLineRecordWrite(CodeGenerator* gen, Register object, MemOperand operand,
171 Register value, Register scratch0, Register scratch1,
172 RecordWriteMode mode, StubCallMode stub_mode,
173 UnwindingInfoWriter* unwinding_info_writer)
174 : OutOfLineCode(gen),
175 object_(object),
176 operand_(operand),
177 value_(value),
178 scratch0_(scratch0),
179 scratch1_(scratch1),
180 mode_(mode),
181#if V8_ENABLE_WEBASSEMBLY
182 stub_mode_(stub_mode),
183#endif // V8_ENABLE_WEBASSEMBLY
184 must_save_lr_(!gen->frame_access_state()->has_frame()),
185 unwinding_info_writer_(unwinding_info_writer),
186 zone_(gen->zone()) {
187 DCHECK(!AreAliased(object, scratch0, scratch1));
188 DCHECK(!AreAliased(value, scratch0, scratch1));
189 }
190
191 void Generate() final {
193 __ DecompressTagged(value_, value_);
194 }
195 __ CheckPageFlag(value_, scratch0_,
197 exit());
198 __ lay(scratch1_, operand_);
199 SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
202 if (must_save_lr_) {
203 // We need to save and restore r14 if the frame was elided.
204 __ Push(r14);
205 unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset());
206 }
208 __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
209#if V8_ENABLE_WEBASSEMBLY
210 } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
211 __ CallRecordWriteStubSaveRegisters(object_, scratch1_, save_fp_mode,
212 StubCallMode::kCallWasmRuntimeStub);
213#endif // V8_ENABLE_WEBASSEMBLY
214 } else {
215 __ CallRecordWriteStubSaveRegisters(object_, scratch1_, save_fp_mode);
216 }
217 if (must_save_lr_) {
218 // We need to save and restore r14 if the frame was elided.
219 __ Pop(r14);
220 unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
221 }
222 }
223
224 private:
225 Register const object_;
227 Register const value_;
228 Register const scratch0_;
229 Register const scratch1_;
231#if V8_ENABLE_WEBASSEMBLY
232 StubCallMode stub_mode_;
233#endif // V8_ENABLE_WEBASSEMBLY
235 UnwindingInfoWriter* const unwinding_info_writer_;
237};
238
240 switch (condition) {
241 case kEqual:
242 return eq;
243 case kNotEqual:
244 return ne;
246 // unsigned number never less than 0
247 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
248 return CC_NOP;
249 [[fallthrough]];
250 case kSignedLessThan:
251 return lt;
253 // unsigned number always greater than or equal 0
254 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
255 return CC_ALWAYS;
256 [[fallthrough]];
258 return ge;
260 // unsigned number never less than 0
261 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
262 return CC_EQ;
263 [[fallthrough]];
265 return le;
267 // unsigned number always greater than or equal 0
268 if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
269 return ne;
270 [[fallthrough]];
272 return gt;
273 case kOverflow:
274 // Overflow checked for AddS64/SubS64 only.
275 switch (op) {
276 case kS390_Add32:
277 case kS390_Add64:
278 case kS390_Sub32:
279 case kS390_Sub64:
280 case kS390_Abs64:
281 case kS390_Abs32:
282 case kS390_Mul32:
283 case kS390_Mul64WithOverflow:
284 return overflow;
285 default:
286 break;
287 }
288 break;
289 case kNotOverflow:
290 switch (op) {
291 case kS390_Add32:
292 case kS390_Add64:
293 case kS390_Sub32:
294 case kS390_Sub64:
295 case kS390_Abs64:
296 case kS390_Abs32:
297 case kS390_Mul32:
298 case kS390_Mul64WithOverflow:
299 return nooverflow;
300 default:
301 break;
302 }
303 break;
304 default:
305 break;
306 }
307 UNREACHABLE();
308}
309
310#define GET_MEMOPERAND32(ret, fi) \
311 ([&](int& ret) { \
312 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
313 MemOperand mem(r0); \
314 if (mode != kMode_None) { \
315 size_t first_index = (fi); \
316 mem = i.MemoryOperand(&mode, &first_index); \
317 ret = first_index; \
318 } else { \
319 mem = i.InputStackSlot32(fi); \
320 } \
321 return mem; \
322 })(ret)
323
324#define GET_MEMOPERAND(ret, fi) \
325 ([&](int& ret) { \
326 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
327 MemOperand mem(r0); \
328 if (mode != kMode_None) { \
329 size_t first_index = (fi); \
330 mem = i.MemoryOperand(&mode, &first_index); \
331 ret = first_index; \
332 } else { \
333 mem = i.InputStackSlot(fi); \
334 } \
335 return mem; \
336 })(ret)
337
338#define RRInstr(instr) \
339 [&]() { \
340 DCHECK(i.OutputRegister() == i.InputRegister(0)); \
341 __ instr(i.OutputRegister(), i.InputRegister(1)); \
342 return 2; \
343 }
344#define RIInstr(instr) \
345 [&]() { \
346 DCHECK(i.OutputRegister() == i.InputRegister(0)); \
347 __ instr(i.OutputRegister(), i.InputImmediate(1)); \
348 return 2; \
349 }
350#define RMInstr(instr, GETMEM) \
351 [&]() { \
352 DCHECK(i.OutputRegister() == i.InputRegister(0)); \
353 int ret = 2; \
354 __ instr(i.OutputRegister(), GETMEM(ret, 1)); \
355 return ret; \
356 }
357#define RM32Instr(instr) RMInstr(instr, GET_MEMOPERAND32)
358#define RM64Instr(instr) RMInstr(instr, GET_MEMOPERAND)
359
360#define RRRInstr(instr) \
361 [&]() { \
362 __ instr(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); \
363 return 2; \
364 }
365#define RRIInstr(instr) \
366 [&]() { \
367 __ instr(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1)); \
368 return 2; \
369 }
370#define RRMInstr(instr, GETMEM) \
371 [&]() { \
372 int ret = 2; \
373 __ instr(i.OutputRegister(), i.InputRegister(0), GETMEM(ret, 1)); \
374 return ret; \
375 }
376#define RRM32Instr(instr) RRMInstr(instr, GET_MEMOPERAND32)
377#define RRM64Instr(instr) RRMInstr(instr, GET_MEMOPERAND)
378
379#define DDInstr(instr) \
380 [&]() { \
381 DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
382 __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
383 return 2; \
384 }
385
386#define DMInstr(instr) \
387 [&]() { \
388 DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
389 int ret = 2; \
390 __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1)); \
391 return ret; \
392 }
393
394#define DMTInstr(instr) \
395 [&]() { \
396 DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
397 int ret = 2; \
398 __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1), \
399 kScratchDoubleReg); \
400 return ret; \
401 }
402
403#define R_MInstr(instr) \
404 [&]() { \
405 int ret = 2; \
406 __ instr(i.OutputRegister(), GET_MEMOPERAND(ret, 0)); \
407 return ret; \
408 }
409
410#define R_DInstr(instr) \
411 [&]() { \
412 __ instr(i.OutputRegister(), i.InputDoubleRegister(0)); \
413 return 2; \
414 }
415
416#define D_DInstr(instr) \
417 [&]() { \
418 __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
419 return 2; \
420 }
421
422#define D_MInstr(instr) \
423 [&]() { \
424 int ret = 2; \
425 __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0)); \
426 return ret; \
427 }
428
429#define D_MTInstr(instr) \
430 [&]() { \
431 int ret = 2; \
432 __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0), \
433 kScratchDoubleReg); \
434 return ret; \
435 }
436
437static int nullInstr() { UNREACHABLE(); }
438
439template <int numOfOperand, class RType, class MType, class IType>
440static inline int AssembleOp(Instruction* instr, RType r, MType m, IType i) {
442 if (mode != kMode_None || HasStackSlotInput(instr, numOfOperand - 1)) {
443 return m();
444 } else if (HasRegisterInput(instr, numOfOperand - 1)) {
445 return r();
446 } else if (HasImmediateInput(instr, numOfOperand - 1)) {
447 return i();
448 } else {
449 UNREACHABLE();
450 }
451}
452
453template <class _RR, class _RM, class _RI>
454static inline int AssembleBinOp(Instruction* instr, _RR _rr, _RM _rm, _RI _ri) {
455 return AssembleOp<2>(instr, _rr, _rm, _ri);
456}
457
458template <class _R, class _M, class _I>
459static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
460 return AssembleOp<1>(instr, _r, _m, _i);
461}
462
463#define ASSEMBLE_BIN_OP(_rr, _rm, _ri) AssembleBinOp(instr, _rr, _rm, _ri)
464#define ASSEMBLE_UNARY_OP(_r, _m, _i) AssembleUnaryOp(instr, _r, _m, _i)
465
466#define CHECK_AND_ZERO_EXT_OUTPUT(num) \
467 ([&](int index) { \
468 DCHECK(HasImmediateInput(instr, (index))); \
469 int doZeroExt = i.InputInt32(index); \
470 if (doZeroExt) __ LoadU32(i.OutputRegister(), i.OutputRegister()); \
471 })(num)
472
473#define ASSEMBLE_BIN32_OP(_rr, _rm, _ri) \
474 { CHECK_AND_ZERO_EXT_OUTPUT(AssembleBinOp(instr, _rr, _rm, _ri)); }
475
476} // namespace
477
478#define ASSEMBLE_FLOAT_UNOP(asm_instr) \
479 do { \
480 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
481 } while (0)
482
483#define ASSEMBLE_FLOAT_BINOP(asm_instr) \
484 do { \
485 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
486 i.InputDoubleRegister(1)); \
487 } while (0)
488
489#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
490 do { \
491 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
492 if (mode != kMode_None) { \
493 size_t first_index = 1; \
494 MemOperand operand = i.MemoryOperand(&mode, &first_index); \
495 if (i.CompareLogical()) { \
496 __ cmpl_instr(i.InputRegister(0), operand); \
497 } else { \
498 __ cmp_instr(i.InputRegister(0), operand); \
499 } \
500 } else if (HasRegisterInput(instr, 1)) { \
501 if (i.CompareLogical()) { \
502 __ cmpl_instr(i.InputRegister(0), i.InputRegister(1)); \
503 } else { \
504 __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
505 } \
506 } else if (HasImmediateInput(instr, 1)) { \
507 if (i.CompareLogical()) { \
508 __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
509 } else { \
510 __ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
511 } \
512 } else { \
513 DCHECK(HasStackSlotInput(instr, 1)); \
514 if (i.CompareLogical()) { \
515 __ cmpl_instr(i.InputRegister(0), i.InputStackSlot(1)); \
516 } else { \
517 __ cmp_instr(i.InputRegister(0), i.InputStackSlot(1)); \
518 } \
519 } \
520 } while (0)
521
522#define ASSEMBLE_COMPARE32(cmp_instr, cmpl_instr) \
523 do { \
524 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
525 if (mode != kMode_None) { \
526 size_t first_index = 1; \
527 MemOperand operand = i.MemoryOperand(&mode, &first_index); \
528 if (i.CompareLogical()) { \
529 __ cmpl_instr(i.InputRegister(0), operand); \
530 } else { \
531 __ cmp_instr(i.InputRegister(0), operand); \
532 } \
533 } else if (HasRegisterInput(instr, 1)) { \
534 if (i.CompareLogical()) { \
535 __ cmpl_instr(i.InputRegister(0), i.InputRegister(1)); \
536 } else { \
537 __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
538 } \
539 } else if (HasImmediateInput(instr, 1)) { \
540 if (i.CompareLogical()) { \
541 __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
542 } else { \
543 __ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
544 } \
545 } else { \
546 DCHECK(HasStackSlotInput(instr, 1)); \
547 if (i.CompareLogical()) { \
548 __ cmpl_instr(i.InputRegister(0), i.InputStackSlot32(1)); \
549 } else { \
550 __ cmp_instr(i.InputRegister(0), i.InputStackSlot32(1)); \
551 } \
552 } \
553 } while (0)
554
555#define ASSEMBLE_FLOAT_COMPARE(cmp_rr_instr, cmp_rm_instr, load_instr) \
556 do { \
557 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
558 if (mode != kMode_None) { \
559 size_t first_index = 1; \
560 MemOperand operand = i.MemoryOperand(&mode, &first_index); \
561 __ cmp_rm_instr(i.InputDoubleRegister(0), operand); \
562 } else if (HasFPRegisterInput(instr, 1)) { \
563 __ cmp_rr_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
564 } else { \
565 USE(HasFPStackSlotInput); \
566 DCHECK(HasFPStackSlotInput(instr, 1)); \
567 MemOperand operand = i.InputStackSlot(1); \
568 if (operand.offset() >= 0) { \
569 __ cmp_rm_instr(i.InputDoubleRegister(0), operand); \
570 } else { \
571 __ load_instr(kScratchDoubleReg, operand); \
572 __ cmp_rr_instr(i.InputDoubleRegister(0), kScratchDoubleReg); \
573 } \
574 } \
575 } while (0)
576
577// Divide instruction dr will implicity use register pair
578// r0 & r1 below.
579// R0:R1 = R1 / divisor - R0 remainder
580// Copy remainder to output reg
581#define ASSEMBLE_MODULO(div_instr, shift_instr) \
582 do { \
583 __ mov(r0, i.InputRegister(0)); \
584 __ shift_instr(r0, Operand(32)); \
585 __ div_instr(r0, i.InputRegister(1)); \
586 __ LoadU32(i.OutputRegister(), r0); \
587 } while (0)
588
589#define ASSEMBLE_FLOAT_MODULO() \
590 do { \
591 FrameScope scope(masm(), StackFrame::MANUAL); \
592 __ Push(r2, r3, r4, r5); \
593 __ PrepareCallCFunction(0, 2, kScratchReg); \
594 __ MovToFloatParameters(i.InputDoubleRegister(0), \
595 i.InputDoubleRegister(1)); \
596 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); \
597 __ MovFromFloatResult(i.OutputDoubleRegister()); \
598 __ Pop(r2, r3, r4, r5); \
599 } while (0)
600
601#define ASSEMBLE_IEEE754_UNOP(name) \
602 do { \
603 /* TODO(bmeurer): We should really get rid of this special instruction, */ \
604 /* and generate a CallAddress instruction instead. */ \
605 FrameScope scope(masm(), StackFrame::MANUAL); \
606 __ Push(r2, r3, r4, r5); \
607 __ PrepareCallCFunction(0, 1, kScratchReg); \
608 __ MovToFloatParameter(i.InputDoubleRegister(0)); \
609 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
610 /* Move the result in the double result register. */ \
611 __ MovFromFloatResult(i.OutputDoubleRegister()); \
612 __ Pop(r2, r3, r4, r5); \
613 } while (0)
614
615#define ASSEMBLE_IEEE754_BINOP(name) \
616 do { \
617 /* TODO(bmeurer): We should really get rid of this special instruction, */ \
618 /* and generate a CallAddress instruction instead. */ \
619 FrameScope scope(masm(), StackFrame::MANUAL); \
620 __ Push(r2, r3, r4, r5); \
621 __ PrepareCallCFunction(0, 2, kScratchReg); \
622 __ MovToFloatParameters(i.InputDoubleRegister(0), \
623 i.InputDoubleRegister(1)); \
624 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
625 /* Move the result in the double result register. */ \
626 __ MovFromFloatResult(i.OutputDoubleRegister()); \
627 __ Pop(r2, r3, r4, r5); \
628 } while (0)
629
630//
631// Only MRI mode for these instructions available
632#define ASSEMBLE_LOAD_FLOAT(asm_instr) \
633 do { \
634 DoubleRegister result = i.OutputDoubleRegister(); \
635 AddressingMode mode = kMode_None; \
636 MemOperand operand = i.MemoryOperand(&mode); \
637 __ asm_instr(result, operand); \
638 } while (0)
639
640#define ASSEMBLE_LOAD_INTEGER(asm_instr) \
641 do { \
642 Register result = i.OutputRegister(); \
643 AddressingMode mode = kMode_None; \
644 MemOperand operand = i.MemoryOperand(&mode); \
645 __ asm_instr(result, operand); \
646 } while (0)
647
648#define ASSEMBLE_LOADANDTEST64(asm_instr_rr, asm_instr_rm) \
649 { \
650 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
651 Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0; \
652 if (mode != kMode_None) { \
653 size_t first_index = 0; \
654 MemOperand operand = i.MemoryOperand(&mode, &first_index); \
655 __ asm_instr_rm(dst, operand); \
656 } else if (HasRegisterInput(instr, 0)) { \
657 __ asm_instr_rr(dst, i.InputRegister(0)); \
658 } else { \
659 DCHECK(HasStackSlotInput(instr, 0)); \
660 __ asm_instr_rm(dst, i.InputStackSlot(0)); \
661 } \
662 }
663
664#define ASSEMBLE_LOADANDTEST32(asm_instr_rr, asm_instr_rm) \
665 { \
666 AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
667 Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0; \
668 if (mode != kMode_None) { \
669 size_t first_index = 0; \
670 MemOperand operand = i.MemoryOperand(&mode, &first_index); \
671 __ asm_instr_rm(dst, operand); \
672 } else if (HasRegisterInput(instr, 0)) { \
673 __ asm_instr_rr(dst, i.InputRegister(0)); \
674 } else { \
675 DCHECK(HasStackSlotInput(instr, 0)); \
676 __ asm_instr_rm(dst, i.InputStackSlot32(0)); \
677 } \
678 }
679
680#define ASSEMBLE_STORE_FLOAT32() \
681 do { \
682 size_t index = 0; \
683 AddressingMode mode = kMode_None; \
684 MemOperand operand = i.MemoryOperand(&mode, &index); \
685 DoubleRegister value = i.InputDoubleRegister(index); \
686 __ StoreF32(value, operand); \
687 } while (0)
688
689#define ASSEMBLE_STORE_DOUBLE() \
690 do { \
691 size_t index = 0; \
692 AddressingMode mode = kMode_None; \
693 MemOperand operand = i.MemoryOperand(&mode, &index); \
694 DoubleRegister value = i.InputDoubleRegister(index); \
695 __ StoreF64(value, operand); \
696 } while (0)
697
698#define ASSEMBLE_STORE_INTEGER(asm_instr) \
699 do { \
700 size_t index = 0; \
701 AddressingMode mode = kMode_None; \
702 MemOperand operand = i.MemoryOperand(&mode, &index); \
703 Register value = i.InputRegister(index); \
704 __ asm_instr(value, operand); \
705 } while (0)
706
707static inline bool is_wasm_on_be(OptimizedCompilationInfo* info) {
708#if defined(V8_ENABLE_WEBASSEMBLY) && defined(V8_TARGET_BIG_ENDIAN)
709 return info->IsWasm();
710#else
711 return false;
712#endif
713}
714
715#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(load_and_ext) \
716 do { \
717 Register old_value = i.InputRegister(0); \
718 Register new_value = i.InputRegister(1); \
719 Register output = i.OutputRegister(); \
720 Register addr = kScratchReg; \
721 Register temp0 = r0; \
722 Register temp1 = r1; \
723 size_t index = 2; \
724 AddressingMode mode = kMode_None; \
725 MemOperand op = i.MemoryOperand(&mode, &index); \
726 __ lay(addr, op); \
727 __ AtomicCmpExchangeU8(addr, output, old_value, new_value, temp0, temp1); \
728 __ load_and_ext(output, output); \
729 } while (false)
730
731#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(load_and_ext) \
732 do { \
733 Register old_value = i.InputRegister(0); \
734 Register new_value = i.InputRegister(1); \
735 Register output = i.OutputRegister(); \
736 Register addr = kScratchReg; \
737 Register temp0 = r0; \
738 Register temp1 = r1; \
739 size_t index = 2; \
740 AddressingMode mode = kMode_None; \
741 MemOperand op = i.MemoryOperand(&mode, &index); \
742 __ lay(addr, op); \
743 if (is_wasm_on_be(info())) { \
744 Register temp2 = \
745 GetRegisterThatIsNotOneOf(output, old_value, new_value); \
746 Register temp3 = \
747 GetRegisterThatIsNotOneOf(output, old_value, new_value, temp2); \
748 __ Push(temp2, temp3); \
749 __ lrvr(temp2, old_value); \
750 __ lrvr(temp3, new_value); \
751 __ ShiftRightU32(temp2, temp2, Operand(16)); \
752 __ ShiftRightU32(temp3, temp3, Operand(16)); \
753 __ AtomicCmpExchangeU16(addr, output, temp2, temp3, temp0, temp1); \
754 __ lrvr(output, output); \
755 __ ShiftRightU32(output, output, Operand(16)); \
756 __ Pop(temp2, temp3); \
757 } else { \
758 __ AtomicCmpExchangeU16(addr, output, old_value, new_value, temp0, \
759 temp1); \
760 } \
761 __ load_and_ext(output, output); \
762 } while (false)
763
764#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD() \
765 do { \
766 Register new_val = i.InputRegister(1); \
767 Register output = i.OutputRegister(); \
768 Register addr = kScratchReg; \
769 size_t index = 2; \
770 AddressingMode mode = kMode_None; \
771 MemOperand op = i.MemoryOperand(&mode, &index); \
772 __ lay(addr, op); \
773 if (is_wasm_on_be(info())) { \
774 __ lrvr(r0, output); \
775 __ lrvr(r1, new_val); \
776 __ CmpAndSwap(r0, r1, MemOperand(addr)); \
777 __ lrvr(output, r0); \
778 } else { \
779 __ CmpAndSwap(output, new_val, MemOperand(addr)); \
780 } \
781 __ LoadU32(output, output); \
782 } while (false)
783
784#define ASSEMBLE_ATOMIC_BINOP_WORD(load_and_op, op) \
785 do { \
786 Register value = i.InputRegister(2); \
787 Register result = i.OutputRegister(0); \
788 Register addr = r1; \
789 AddressingMode mode = kMode_None; \
790 MemOperand op = i.MemoryOperand(&mode); \
791 __ lay(addr, op); \
792 if (is_wasm_on_be(info())) { \
793 Label do_cs; \
794 __ bind(&do_cs); \
795 __ LoadU32(r0, MemOperand(addr)); \
796 __ lrvr(ip, r0); \
797 __ op(ip, ip, value); \
798 __ lrvr(ip, ip); \
799 __ CmpAndSwap(r0, ip, MemOperand(addr)); \
800 __ bne(&do_cs, Label::kNear); \
801 __ lrvr(result, r0); \
802 } else { \
803 __ load_and_op(result, value, MemOperand(addr)); \
804 } \
805 __ LoadU32(result, result); \
806 } while (false)
807
808#define ASSEMBLE_ATOMIC_BINOP_WORD64(load_and_op, op) \
809 do { \
810 Register value = i.InputRegister(2); \
811 Register result = i.OutputRegister(0); \
812 Register addr = r1; \
813 AddressingMode mode = kMode_None; \
814 MemOperand op = i.MemoryOperand(&mode); \
815 __ lay(addr, op); \
816 if (is_wasm_on_be(info())) { \
817 Label do_cs; \
818 __ bind(&do_cs); \
819 __ LoadU64(r0, MemOperand(addr)); \
820 __ lrvgr(ip, r0); \
821 __ op(ip, ip, value); \
822 __ lrvgr(ip, ip); \
823 __ CmpAndSwap64(r0, ip, MemOperand(addr)); \
824 __ bne(&do_cs, Label::kNear); \
825 __ lrvgr(result, r0); \
826 break; \
827 } \
828 __ load_and_op(result, value, MemOperand(addr)); \
829 } while (false)
830
831#define ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, \
832 maybe_reverse_bytes) \
833 do { \
834 /* At the moment this is only true when dealing with 2-byte values.*/ \
835 bool reverse_bytes = maybe_reverse_bytes && is_wasm_on_be(info()); \
836 USE(reverse_bytes); \
837 Label do_cs; \
838 __ LoadU32(prev, MemOperand(addr, offset)); \
839 __ bind(&do_cs); \
840 if (reverse_bytes) { \
841 Register temp2 = GetRegisterThatIsNotOneOf(value, result, prev); \
842 __ Push(temp2); \
843 __ lrvr(temp2, prev); \
844 __ RotateInsertSelectBits(temp2, temp2, Operand(start), Operand(end), \
845 Operand(static_cast<intptr_t>(shift_amount)), \
846 true); \
847 __ RotateInsertSelectBits(temp, value, Operand(start), Operand(end), \
848 Operand(static_cast<intptr_t>(shift_amount)), \
849 true); \
850 __ bin_inst(new_val, temp2, temp); \
851 __ lrvr(temp2, new_val); \
852 __ lr(temp, prev); \
853 __ RotateInsertSelectBits(temp, temp2, Operand(start), Operand(end), \
854 Operand(static_cast<intptr_t>(shift_amount)), \
855 false); \
856 __ Pop(temp2); \
857 } else { \
858 __ RotateInsertSelectBits(temp, value, Operand(start), Operand(end), \
859 Operand(static_cast<intptr_t>(shift_amount)), \
860 true); \
861 __ bin_inst(new_val, prev, temp); \
862 __ lr(temp, prev); \
863 __ RotateInsertSelectBits(temp, new_val, Operand(start), Operand(end), \
864 Operand::Zero(), false); \
865 } \
866 __ CmpAndSwap(prev, temp, MemOperand(addr, offset)); \
867 __ bne(&do_cs, Label::kNear); \
868 } while (false)
869
870#ifdef V8_TARGET_BIG_ENDIAN
871#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
872 { \
873 constexpr int offset = -(2 * index); \
874 constexpr int shift_amount = 16 - (index * 16); \
875 constexpr int start = 48 - shift_amount; \
876 constexpr int end = start + 15; \
877 ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, true); \
878 extract_result(); \
879 }
880#define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
881 { \
882 constexpr int offset = -(index); \
883 constexpr int shift_amount = 24 - (index * 8); \
884 constexpr int start = 56 - shift_amount; \
885 constexpr int end = start + 7; \
886 ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, false); \
887 extract_result(); \
888 }
889#else
890#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
891 { \
892 constexpr int offset = -(2 * index); \
893 constexpr int shift_amount = index * 16; \
894 constexpr int start = 48 - shift_amount; \
895 constexpr int end = start + 15; \
896 ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, false); \
897 extract_result(); \
898 }
899#define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
900 { \
901 constexpr int offset = -(index); \
902 constexpr int shift_amount = index * 8; \
903 constexpr int start = 56 - shift_amount; \
904 constexpr int end = start + 7; \
905 ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, false); \
906 extract_result(); \
907 }
908#endif // V8_TARGET_BIG_ENDIAN
909
910#define ASSEMBLE_ATOMIC_BINOP_HALFWORD(bin_inst, extract_result) \
911 do { \
912 Register value = i.InputRegister(2); \
913 Register result = i.OutputRegister(0); \
914 Register prev = i.TempRegister(0); \
915 Register new_val = r0; \
916 Register addr = r1; \
917 Register temp = kScratchReg; \
918 AddressingMode mode = kMode_None; \
919 MemOperand op = i.MemoryOperand(&mode); \
920 Label two, done; \
921 __ lay(addr, op); \
922 __ tmll(addr, Operand(3)); \
923 __ b(Condition(2), &two); \
924 /* word boundary */ \
925 ATOMIC_BIN_OP_HALFWORD(bin_inst, 0, extract_result); \
926 __ b(&done); \
927 __ bind(&two); \
928 /* halfword boundary */ \
929 ATOMIC_BIN_OP_HALFWORD(bin_inst, 1, extract_result); \
930 __ bind(&done); \
931 } while (false)
932
933#define ASSEMBLE_ATOMIC_BINOP_BYTE(bin_inst, extract_result) \
934 do { \
935 Register value = i.InputRegister(2); \
936 Register result = i.OutputRegister(0); \
937 Register addr = i.TempRegister(0); \
938 Register prev = r0; \
939 Register new_val = r1; \
940 Register temp = kScratchReg; \
941 AddressingMode mode = kMode_None; \
942 MemOperand op = i.MemoryOperand(&mode); \
943 Label done, one, two, three; \
944 __ lay(addr, op); \
945 __ tmll(addr, Operand(3)); \
946 __ b(Condition(1), &three); \
947 __ b(Condition(2), &two); \
948 __ b(Condition(4), &one); \
949 /* ending with 0b00 (word boundary) */ \
950 ATOMIC_BIN_OP_BYTE(bin_inst, 0, extract_result); \
951 __ b(&done); \
952 /* ending with 0b01 */ \
953 __ bind(&one); \
954 ATOMIC_BIN_OP_BYTE(bin_inst, 1, extract_result); \
955 __ b(&done); \
956 /* ending with 0b10 (hw boundary) */ \
957 __ bind(&two); \
958 ATOMIC_BIN_OP_BYTE(bin_inst, 2, extract_result); \
959 __ b(&done); \
960 /* ending with 0b11 */ \
961 __ bind(&three); \
962 ATOMIC_BIN_OP_BYTE(bin_inst, 3, extract_result); \
963 __ bind(&done); \
964 } while (false)
965
966#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64() \
967 do { \
968 Register new_val = i.InputRegister(1); \
969 Register output = i.OutputRegister(); \
970 Register addr = kScratchReg; \
971 size_t index = 2; \
972 AddressingMode mode = kMode_None; \
973 MemOperand op = i.MemoryOperand(&mode, &index); \
974 __ lay(addr, op); \
975 if (is_wasm_on_be(info())) { \
976 __ lrvgr(r0, output); \
977 __ lrvgr(r1, new_val); \
978 __ CmpAndSwap64(r0, r1, MemOperand(addr)); \
979 __ lrvgr(output, r0); \
980 } else { \
981 __ CmpAndSwap64(output, new_val, MemOperand(addr)); \
982 } \
983 } while (false)
984
986 __ LeaveFrame(StackFrame::MANUAL);
988}
989
991 if (frame_access_state()->has_frame()) {
992 __ RestoreFrameStateForTailCall();
993 }
995}
996
997namespace {
998
999void FlushPendingPushRegisters(MacroAssembler* masm,
1000 FrameAccessState* frame_access_state,
1001 ZoneVector<Register>* pending_pushes) {
1002 switch (pending_pushes->size()) {
1003 case 0:
1004 break;
1005 case 1:
1006 masm->Push((*pending_pushes)[0]);
1007 break;
1008 case 2:
1009 masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
1010 break;
1011 case 3:
1012 masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
1013 (*pending_pushes)[2]);
1014 break;
1015 default:
1016 UNREACHABLE();
1017 }
1018 frame_access_state->IncreaseSPDelta(pending_pushes->size());
1019 pending_pushes->clear();
1020}
1021
1022void AdjustStackPointerForTailCall(
1023 MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
1024 ZoneVector<Register>* pending_pushes = nullptr,
1025 bool allow_shrinkage = true) {
1026 int current_sp_offset = state->GetSPToFPSlotCount() +
1028 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
1029 if (stack_slot_delta > 0) {
1030 if (pending_pushes != nullptr) {
1031 FlushPendingPushRegisters(masm, state, pending_pushes);
1032 }
1033 masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
1034 state->IncreaseSPDelta(stack_slot_delta);
1035 } else if (allow_shrinkage && stack_slot_delta < 0) {
1036 if (pending_pushes != nullptr) {
1037 FlushPendingPushRegisters(masm, state, pending_pushes);
1038 }
1039 masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
1040 state->IncreaseSPDelta(stack_slot_delta);
1041 }
1042}
1043
1044} // namespace
1045
1047 int first_unused_slot_offset) {
1048 ZoneVector<MoveOperands*> pushes(zone());
1050
1051 if (!pushes.empty() &&
1052 (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
1053 first_unused_slot_offset)) {
1054 S390OperandConverter g(this, instr);
1055 ZoneVector<Register> pending_pushes(zone());
1056 for (auto move : pushes) {
1057 LocationOperand destination_location(
1058 LocationOperand::cast(move->destination()));
1059 InstructionOperand source(move->source());
1060 AdjustStackPointerForTailCall(
1062 destination_location.index() - pending_pushes.size(),
1063 &pending_pushes);
1064 // Pushes of non-register data types are not supported.
1065 DCHECK(source.IsRegister());
1066 LocationOperand source_location(LocationOperand::cast(source));
1067 pending_pushes.push_back(source_location.GetRegister());
1068 // TODO(arm): We can push more than 3 registers at once. Add support in
1069 // the macro-assembler for pushing a list of registers.
1070 if (pending_pushes.size() == 3) {
1071 FlushPendingPushRegisters(masm(), frame_access_state(),
1072 &pending_pushes);
1073 }
1074 move->Eliminate();
1075 }
1076 FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
1077 }
1078 AdjustStackPointerForTailCall(masm(), frame_access_state(),
1079 first_unused_slot_offset, nullptr, false);
1080}
1081
1083 int first_unused_slot_offset) {
1084 AdjustStackPointerForTailCall(masm(), frame_access_state(),
1085 first_unused_slot_offset);
1086}
1087
1088// Check that {kJavaScriptCallCodeStartRegister} is correct.
1090 Register scratch = r1;
1091 __ ComputeCodeStartAddress(scratch);
1092 __ CmpS64(scratch, kJavaScriptCallCodeStartRegister);
1093 __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
1094}
1095
1096#ifdef V8_ENABLE_LEAPTIERING
1097void CodeGenerator::AssembleDispatchHandleRegisterCheck() {
1099}
1100#endif // V8_ENABLE_LEAPTIERING
1101
1104}
1105
1106// Assembles an instruction after register allocation, producing machine code.
1108 Instruction* instr) {
1109 S390OperandConverter i(this, instr);
1110 ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
1111
1112 switch (opcode) {
1113 case kArchComment:
1114 __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)),
1115 SourceLocation());
1116 break;
1117 case kArchCallCodeObject: {
1118 if (HasRegisterInput(instr, 0)) {
1119 Register reg = i.InputRegister(0);
1121 instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
1123 __ CallCodeObject(reg);
1124 } else {
1125 __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
1126 }
1129 break;
1130 }
1131 case kArchCallBuiltinPointer: {
1132 DCHECK(!instr->InputAt(0)->IsImmediate());
1133 Register builtin_index = i.InputRegister(0);
1134 Register target =
1135 instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister)
1137 : builtin_index;
1138 __ CallBuiltinByIndex(builtin_index, target);
1141 break;
1142 }
1143#if V8_ENABLE_WEBASSEMBLY
1144 case kArchCallWasmFunction:
1145 case kArchCallWasmFunctionIndirect: {
1146 // We must not share code targets for calls to builtins for wasm code, as
1147 // they might need to be patched individually.
1148 if (instr->InputAt(0)->IsImmediate()) {
1149 DCHECK_EQ(opcode, kArchCallWasmFunction);
1150 Constant constant = i.ToConstant(instr->InputAt(0));
1151 Address wasm_code = static_cast<Address>(constant.ToInt64());
1152 __ Call(wasm_code, constant.rmode());
1153 } else if (opcode == kArchCallWasmFunctionIndirect) {
1154 __ CallWasmCodePointer(i.InputRegister(0));
1155 } else {
1156 __ Call(i.InputRegister(0));
1157 }
1160 break;
1161 }
1162 case kArchTailCallWasm:
1163 case kArchTailCallWasmIndirect: {
1164 // We must not share code targets for calls to builtins for wasm code, as
1165 // they might need to be patched individually.
1166 if (instr->InputAt(0)->IsImmediate()) {
1167 DCHECK_EQ(opcode, kArchTailCallWasm);
1168 Constant constant = i.ToConstant(instr->InputAt(0));
1169 Address wasm_code = static_cast<Address>(constant.ToInt64());
1170 __ Jump(wasm_code, constant.rmode());
1171 } else if (opcode == kArchTailCallWasmIndirect) {
1172 __ CallWasmCodePointer(i.InputRegister(0), CallJumpMode::kTailCall);
1173 } else {
1174 __ Jump(i.InputRegister(0));
1175 }
1178 break;
1179 }
1180#endif // V8_ENABLE_WEBASSEMBLY
1181 case kArchTailCallCodeObject: {
1182 if (HasRegisterInput(instr, 0)) {
1183 Register reg = i.InputRegister(0);
1185 instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
1187 __ JumpCodeObject(reg);
1188 } else {
1189 // We cannot use the constant pool to load the target since
1190 // we've already restored the caller's frame.
1191 ConstantPoolUnavailableScope constant_pool_unavailable(masm());
1192 __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
1193 }
1196 break;
1197 }
1198 case kArchTailCallAddress: {
1199 CHECK(!instr->InputAt(0)->IsImmediate());
1200 Register reg = i.InputRegister(0);
1202 instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
1204 __ Jump(reg);
1207 break;
1208 }
1209 case kArchCallJSFunction: {
1210 Register func = i.InputRegister(0);
1211 if (v8_flags.debug_code) {
1212 // Check the function's context matches the context argument.
1213 __ LoadTaggedField(kScratchReg,
1214 FieldMemOperand(func, JSFunction::kContextOffset));
1215 __ CmpS64(cp, kScratchReg);
1216 __ Assert(eq, AbortReason::kWrongFunctionContext);
1217 }
1218 uint32_t num_arguments =
1219 i.InputUint32(instr->JSCallArgumentCountInputIndex());
1220 __ CallJSFunction(func, num_arguments);
1223 break;
1224 }
1225 case kArchPrepareCallCFunction: {
1226 int const num_gp_parameters = ParamField::decode(instr->opcode());
1227 int const num_fp_parameters = FPParamField::decode(instr->opcode());
1228 __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters,
1229 kScratchReg);
1230 // Frame alignment requires using FP-relative frame addressing.
1232 break;
1233 }
1234 case kArchSaveCallerRegisters: {
1235 fp_mode_ =
1236 static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
1239 // kReturnRegister0 should have been saved before entering the stub.
1240 int bytes = __ PushCallerSaved(fp_mode_, ip, kReturnRegister0);
1242 DCHECK_EQ(0, frame_access_state()->sp_delta());
1246 break;
1247 }
1248 case kArchRestoreCallerRegisters: {
1249 DCHECK(fp_mode_ ==
1250 static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
1253 // Don't overwrite the returned value.
1254 int bytes = __ PopCallerSaved(fp_mode_, ip, kReturnRegister0);
1256 DCHECK_EQ(0, frame_access_state()->sp_delta());
1259 break;
1260 }
1261 case kArchPrepareTailCall:
1263 break;
1264 case kArchCallCFunctionWithFrameState:
1265 case kArchCallCFunction: {
1266 int const num_gp_parameters = ParamField::decode(instr->opcode());
1267 int const fp_param_field = FPParamField::decode(instr->opcode());
1268 int num_fp_parameters = fp_param_field;
1269 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes;
1270 Label return_location;
1271 bool has_function_descriptor = false;
1272#if ABI_USES_FUNCTION_DESCRIPTORS
1273 int kNumFPParametersMask = kHasFunctionDescriptorBitMask - 1;
1274 num_fp_parameters = kNumFPParametersMask & fp_param_field;
1275 has_function_descriptor =
1276 (fp_param_field & kHasFunctionDescriptorBitMask) != 0;
1277#endif
1278 // Put the return address in a stack slot.
1279#if V8_ENABLE_WEBASSEMBLY
1280 if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
1281 // Put the return address in a stack slot.
1282 __ larl(r0, &return_location);
1283 __ StoreU64(r0,
1284 MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
1285 set_isolate_data_slots = SetIsolateDataSlots::kNo;
1286 }
1287#endif // V8_ENABLE_WEBASSEMBLY
1288 int pc_offset;
1289 if (instr->InputAt(0)->IsImmediate()) {
1290 ExternalReference ref = i.InputExternalReference(0);
1291 pc_offset = __ CallCFunction(ref, num_gp_parameters, num_fp_parameters,
1292 set_isolate_data_slots,
1293 has_function_descriptor, &return_location);
1294 } else {
1295 Register func = i.InputRegister(0);
1296 pc_offset = __ CallCFunction(func, num_gp_parameters, num_fp_parameters,
1297 set_isolate_data_slots,
1298 has_function_descriptor, &return_location);
1299 }
1300 RecordSafepoint(instr->reference_map(), pc_offset);
1301
1302 bool const needs_frame_state =
1303 (opcode == kArchCallCFunctionWithFrameState);
1304 if (needs_frame_state) {
1306 }
1307
1309 // Ideally, we should decrement SP delta to match the change of stack
1310 // pointer in CallCFunction. However, for certain architectures (e.g.
1311 // ARM), there may be more strict alignment requirement, causing old SP
1312 // to be saved on the stack. In those cases, we can not calculate the SP
1313 // delta statically.
1316 // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
1317 // Here, we assume the sequence to be:
1318 // kArchSaveCallerRegisters;
1319 // kArchCallCFunction;
1320 // kArchRestoreCallerRegisters;
1321 int bytes =
1322 __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
1324 }
1325 break;
1326 }
1327 case kArchJmp:
1328 AssembleArchJump(i.InputRpo(0));
1329 break;
1330 case kArchBinarySearchSwitch:
1332 break;
1333 case kArchTableSwitch:
1335 break;
1336 case kArchAbortCSADcheck:
1337 DCHECK(i.InputRegister(0) == r3);
1338 {
1339 // We don't actually want to generate a pile of code for this, so just
1340 // claim there is a stack frame, without generating one.
1341 FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
1342 __ CallBuiltin(Builtin::kAbortCSADcheck);
1343 }
1344 __ stop();
1345 break;
1346 case kArchDebugBreak:
1347 __ DebugBreak();
1348 break;
1349 case kArchNop:
1350 case kArchThrowTerminator:
1351 // don't emit code for nops.
1352 break;
1353 case kArchDeoptimize: {
1354 DeoptimizationExit* exit =
1356 __ b(exit->label());
1357 break;
1358 }
1359 case kArchRet:
1360 AssembleReturn(instr->InputAt(0));
1361 break;
1362 case kArchFramePointer:
1363 __ mov(i.OutputRegister(), fp);
1364 break;
1365 case kArchParentFramePointer:
1366 if (frame_access_state()->has_frame()) {
1367 __ LoadU64(i.OutputRegister(), MemOperand(fp, 0));
1368 } else {
1369 __ mov(i.OutputRegister(), fp);
1370 }
1371 break;
1372#if V8_ENABLE_WEBASSEMBLY
1373 case kArchStackPointer:
1374 __ mov(i.OutputRegister(), sp);
1375 break;
1376 case kArchSetStackPointer: {
1377 DCHECK(instr->InputAt(0)->IsRegister());
1378 __ mov(sp, i.InputRegister(0));
1379 break;
1380 }
1381#endif // V8_ENABLE_WEBASSEMBLY
1382 case kArchStackPointerGreaterThan: {
1383 // Potentially apply an offset to the current stack pointer before the
1384 // comparison to consider the size difference of an optimized frame versus
1385 // the contained unoptimized frames.
1386
1387 Register lhs_register = sp;
1388 uint32_t offset;
1389
1391 lhs_register = i.TempRegister(0);
1392 __ SubS64(lhs_register, sp, Operand(offset));
1393 }
1394
1395 constexpr size_t kValueIndex = 0;
1396 DCHECK(instr->InputAt(kValueIndex)->IsRegister());
1397 __ CmpU64(lhs_register, i.InputRegister(kValueIndex));
1398 break;
1399 }
1400 case kArchStackCheckOffset:
1401 __ LoadSmiLiteral(i.OutputRegister(),
1403 break;
1404 case kArchTruncateDoubleToI:
1405 __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
1406 i.InputDoubleRegister(0), DetermineStubCallMode());
1407 break;
1408 case kArchStoreWithWriteBarrier: {
1410 AddressingMode addressing_mode =
1412 Register object = i.InputRegister(0);
1413 size_t index = 0;
1414 MemOperand operand = i.MemoryOperand(&addressing_mode, &index);
1415 Register value = i.InputRegister(index);
1416 Register scratch0 = i.TempRegister(0);
1417 Register scratch1 = i.TempRegister(1);
1418
1419 if (v8_flags.debug_code) {
1420 // Checking that |value| is not a cleared weakref: our write barrier
1421 // does not support that for now.
1422 __ CmpS64(value, Operand(kClearedWeakHeapObjectLower32));
1423 __ Check(ne, AbortReason::kOperandIsCleared);
1424 }
1425
1426 OutOfLineRecordWrite* ool = zone()->New<OutOfLineRecordWrite>(
1427 this, object, operand, value, scratch0, scratch1, mode,
1429 __ StoreTaggedField(value, operand);
1430
1432 __ JumpIfSmi(value, ool->exit());
1433 }
1434 __ CheckPageFlag(object, scratch0,
1436 ool->entry());
1437 __ bind(ool->exit());
1438 break;
1439 }
1440 case kArchStoreIndirectWithWriteBarrier:
1441 UNREACHABLE();
1442 case kArchStackSlot: {
1443 FrameOffset offset =
1444 frame_access_state()->GetFrameOffset(i.InputInt32(0));
1445 __ AddS64(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
1446 Operand(offset.offset()));
1447 break;
1448 }
1449 case kS390_Peek: {
1450 int reverse_slot = i.InputInt32(0);
1451 int offset =
1452 FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
1453 if (instr->OutputAt(0)->IsFPRegister()) {
1454 LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
1455 if (op->representation() == MachineRepresentation::kFloat64) {
1456 __ LoadF64(i.OutputDoubleRegister(), MemOperand(fp, offset));
1457 } else if (op->representation() == MachineRepresentation::kFloat32) {
1458 __ LoadF32(i.OutputFloatRegister(), MemOperand(fp, offset));
1459 } else {
1460 DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
1461 __ LoadV128(i.OutputSimd128Register(), MemOperand(fp, offset),
1462 kScratchReg);
1463 }
1464 } else {
1465 __ LoadU64(i.OutputRegister(), MemOperand(fp, offset));
1466 }
1467 break;
1468 }
1469 case kS390_Abs32:
1470 // TODO(john.yan): zero-ext
1471 __ lpr(i.OutputRegister(0), i.InputRegister(0));
1472 break;
1473 case kS390_Abs64:
1474 __ lpgr(i.OutputRegister(0), i.InputRegister(0));
1475 break;
1476 case kS390_And32:
1477 // zero-ext
1478 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1479 ASSEMBLE_BIN32_OP(RRRInstr(nrk), RM32Instr(And), RIInstr(nilf));
1480 } else {
1481 ASSEMBLE_BIN32_OP(RRInstr(nr), RM32Instr(And), RIInstr(nilf));
1482 }
1483 break;
1484 case kS390_And64:
1485 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1486 ASSEMBLE_BIN_OP(RRRInstr(ngrk), RM64Instr(ng), nullInstr);
1487 } else {
1488 ASSEMBLE_BIN_OP(RRInstr(ngr), RM64Instr(ng), nullInstr);
1489 }
1490 break;
1491 case kS390_Or32:
1492 // zero-ext
1493 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1494 ASSEMBLE_BIN32_OP(RRRInstr(ork), RM32Instr(Or), RIInstr(oilf));
1495 } else {
1496 ASSEMBLE_BIN32_OP(RRInstr(or_z), RM32Instr(Or), RIInstr(oilf));
1497 }
1498 break;
1499 case kS390_Or64:
1500 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1501 ASSEMBLE_BIN_OP(RRRInstr(ogrk), RM64Instr(og), nullInstr);
1502 } else {
1503 ASSEMBLE_BIN_OP(RRInstr(ogr), RM64Instr(og), nullInstr);
1504 }
1505 break;
1506 case kS390_Xor32:
1507 // zero-ext
1508 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1509 ASSEMBLE_BIN32_OP(RRRInstr(xrk), RM32Instr(Xor), RIInstr(xilf));
1510 } else {
1511 ASSEMBLE_BIN32_OP(RRInstr(xr), RM32Instr(Xor), RIInstr(xilf));
1512 }
1513 break;
1514 case kS390_Xor64:
1515 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1516 ASSEMBLE_BIN_OP(RRRInstr(xgrk), RM64Instr(xg), nullInstr);
1517 } else {
1518 ASSEMBLE_BIN_OP(RRInstr(xgr), RM64Instr(xg), nullInstr);
1519 }
1520 break;
1521 case kS390_ShiftLeft32:
1522 // zero-ext
1523 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1524 ASSEMBLE_BIN32_OP(RRRInstr(ShiftLeftU32), nullInstr,
1525 RRIInstr(ShiftLeftU32));
1526 } else {
1527 ASSEMBLE_BIN32_OP(RRInstr(sll), nullInstr, RIInstr(sll));
1528 }
1529 break;
1530 case kS390_ShiftLeft64:
1531 ASSEMBLE_BIN_OP(RRRInstr(sllg), nullInstr, RRIInstr(sllg));
1532 break;
1533 case kS390_ShiftRight32:
1534 // zero-ext
1535 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1536 ASSEMBLE_BIN32_OP(RRRInstr(srlk), nullInstr, RRIInstr(srlk));
1537 } else {
1538 ASSEMBLE_BIN32_OP(RRInstr(srl), nullInstr, RIInstr(srl));
1539 }
1540 break;
1541 case kS390_ShiftRight64:
1542 ASSEMBLE_BIN_OP(RRRInstr(srlg), nullInstr, RRIInstr(srlg));
1543 break;
1544 case kS390_ShiftRightArith32:
1545 // zero-ext
1546 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1547 ASSEMBLE_BIN32_OP(RRRInstr(srak), nullInstr, RRIInstr(srak));
1548 } else {
1549 ASSEMBLE_BIN32_OP(RRInstr(sra), nullInstr, RIInstr(sra));
1550 }
1551 break;
1552 case kS390_ShiftRightArith64:
1553 ASSEMBLE_BIN_OP(RRRInstr(srag), nullInstr, RRIInstr(srag));
1554 break;
1555 case kS390_RotRight32: {
1556 // zero-ext
1557 if (HasRegisterInput(instr, 1)) {
1558 __ lcgr(kScratchReg, i.InputRegister(1));
1559 __ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1560 } else {
1561 __ rll(i.OutputRegister(), i.InputRegister(0),
1562 Operand(32 - i.InputInt32(1)));
1563 }
1565 break;
1566 }
1567 case kS390_RotRight64:
1568 if (HasRegisterInput(instr, 1)) {
1569 __ lcgr(kScratchReg, i.InputRegister(1));
1570 __ rllg(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1571 } else {
1573 __ rllg(i.OutputRegister(), i.InputRegister(0),
1574 Operand(64 - i.InputInt32(1)));
1575 }
1576 break;
1577 // TODO(john.yan): clean up kS390_RotLeftAnd...
1578 case kS390_RotLeftAndClear64:
1579 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1580 int shiftAmount = i.InputInt32(1);
1581 int endBit = 63 - shiftAmount;
1582 int startBit = 63 - i.InputInt32(2);
1583 __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
1584 Operand(startBit), Operand(endBit),
1585 Operand(shiftAmount), true);
1586 } else {
1587 int shiftAmount = i.InputInt32(1);
1588 int clearBit = 63 - i.InputInt32(2);
1589 __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1590 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1591 __ srlg(i.OutputRegister(), i.OutputRegister(),
1592 Operand(clearBit + shiftAmount));
1593 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(shiftAmount));
1594 }
1595 break;
1596 case kS390_RotLeftAndClearLeft64:
1597 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1598 int shiftAmount = i.InputInt32(1);
1599 int endBit = 63;
1600 int startBit = 63 - i.InputInt32(2);
1601 __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
1602 Operand(startBit), Operand(endBit),
1603 Operand(shiftAmount), true);
1604 } else {
1605 int shiftAmount = i.InputInt32(1);
1606 int clearBit = 63 - i.InputInt32(2);
1607 __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1608 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1609 __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1610 }
1611 break;
1612 case kS390_RotLeftAndClearRight64:
1613 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1614 int shiftAmount = i.InputInt32(1);
1615 int endBit = 63 - i.InputInt32(2);
1616 int startBit = 0;
1617 __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
1618 Operand(startBit), Operand(endBit),
1619 Operand(shiftAmount), true);
1620 } else {
1621 int shiftAmount = i.InputInt32(1);
1622 int clearBit = i.InputInt32(2);
1623 __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1624 __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1625 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1626 }
1627 break;
1628 case kS390_Add32: {
1629 // zero-ext
1630 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1631 ASSEMBLE_BIN32_OP(RRRInstr(ark), RM32Instr(AddS32), RRIInstr(AddS32));
1632 } else {
1633 ASSEMBLE_BIN32_OP(RRInstr(ar), RM32Instr(AddS32), RIInstr(AddS32));
1634 }
1635 break;
1636 }
1637 case kS390_Add64:
1638 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1639 ASSEMBLE_BIN_OP(RRRInstr(agrk), RM64Instr(ag), RRIInstr(AddS64));
1640 } else {
1641 ASSEMBLE_BIN_OP(RRInstr(agr), RM64Instr(ag), RIInstr(agfi));
1642 }
1643 break;
1644 case kS390_AddFloat:
1645 ASSEMBLE_BIN_OP(DDInstr(aebr), DMTInstr(AddFloat32), nullInstr);
1646 break;
1647 case kS390_AddDouble:
1648 ASSEMBLE_BIN_OP(DDInstr(adbr), DMTInstr(AddFloat64), nullInstr);
1649 break;
1650 case kS390_Sub32:
1651 // zero-ext
1652 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1653 ASSEMBLE_BIN32_OP(RRRInstr(srk), RM32Instr(SubS32), RRIInstr(SubS32));
1654 } else {
1655 ASSEMBLE_BIN32_OP(RRInstr(sr), RM32Instr(SubS32), RIInstr(SubS32));
1656 }
1657 break;
1658 case kS390_Sub64:
1659 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1660 ASSEMBLE_BIN_OP(RRRInstr(sgrk), RM64Instr(sg), RRIInstr(SubS64));
1661 } else {
1662 ASSEMBLE_BIN_OP(RRInstr(sgr), RM64Instr(sg), RIInstr(SubS64));
1663 }
1664 break;
1665 case kS390_SubFloat:
1666 ASSEMBLE_BIN_OP(DDInstr(sebr), DMTInstr(SubFloat32), nullInstr);
1667 break;
1668 case kS390_SubDouble:
1669 ASSEMBLE_BIN_OP(DDInstr(sdbr), DMTInstr(SubFloat64), nullInstr);
1670 break;
1671 case kS390_Mul32:
1672 // zero-ext
1673 if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
1674 ASSEMBLE_BIN32_OP(RRRInstr(msrkc), RM32Instr(msc), RIInstr(MulS32));
1675 } else {
1676 ASSEMBLE_BIN32_OP(RRInstr(MulS32), RM32Instr(MulS32), RIInstr(MulS32));
1677 }
1678 break;
1679 case kS390_Mul32WithOverflow:
1680 // zero-ext
1681 ASSEMBLE_BIN32_OP(RRRInstr(Mul32WithOverflowIfCCUnequal),
1682 RRM32Instr(Mul32WithOverflowIfCCUnequal),
1683 RRIInstr(Mul32WithOverflowIfCCUnequal));
1684 break;
1685 case kS390_Mul64:
1686 ASSEMBLE_BIN_OP(RRInstr(MulS64), RM64Instr(MulS64), RIInstr(MulS64));
1687 break;
1688 case kS390_Mul64WithOverflow: {
1689 Register dst = i.OutputRegister(), src1 = i.InputRegister(0),
1690 src2 = i.InputRegister(1);
1691 CHECK(!AreAliased(dst, src1, src2));
1692 if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
1693 __ msgrkc(dst, src1, src2);
1694 } else {
1695 // Mul high.
1696 __ MulHighS64(r1, src1, src2);
1697 // Mul low.
1698 __ mov(dst, src1);
1699 __ MulS64(dst, src2);
1700 // Test whether {high} is a sign-extension of {result}.
1701 __ ShiftRightS64(r0, dst, Operand(63));
1702 __ CmpU64(r1, r0);
1703 }
1704 break;
1705 }
1706 case kS390_MulHigh32:
1707 // zero-ext
1708 ASSEMBLE_BIN_OP(RRRInstr(MulHighS32), RRM32Instr(MulHighS32),
1709 RRIInstr(MulHighS32));
1710 break;
1711 case kS390_MulHighU32:
1712 // zero-ext
1713 ASSEMBLE_BIN_OP(RRRInstr(MulHighU32), RRM32Instr(MulHighU32),
1714 RRIInstr(MulHighU32));
1715 break;
1716 case kS390_MulHighU64:
1717 ASSEMBLE_BIN_OP(RRRInstr(MulHighU64), nullInstr, nullInstr);
1718 break;
1719 case kS390_MulHighS64:
1720 ASSEMBLE_BIN_OP(RRRInstr(MulHighS64), nullInstr, nullInstr);
1721 break;
1722 case kS390_MulFloat:
1723 ASSEMBLE_BIN_OP(DDInstr(meebr), DMTInstr(MulFloat32), nullInstr);
1724 break;
1725 case kS390_MulDouble:
1726 ASSEMBLE_BIN_OP(DDInstr(mdbr), DMTInstr(MulFloat64), nullInstr);
1727 break;
1728 case kS390_Div64:
1729 ASSEMBLE_BIN_OP(RRRInstr(DivS64), RRM64Instr(DivS64), nullInstr);
1730 break;
1731 case kS390_Div32: {
1732 // zero-ext
1733 ASSEMBLE_BIN_OP(RRRInstr(DivS32), RRM32Instr(DivS32), nullInstr);
1734 break;
1735 }
1736 case kS390_DivU64:
1737 ASSEMBLE_BIN_OP(RRRInstr(DivU64), RRM64Instr(DivU64), nullInstr);
1738 break;
1739 case kS390_DivU32: {
1740 // zero-ext
1741 ASSEMBLE_BIN_OP(RRRInstr(DivU32), RRM32Instr(DivU32), nullInstr);
1742 break;
1743 }
1744 case kS390_DivFloat:
1745 ASSEMBLE_BIN_OP(DDInstr(debr), DMTInstr(DivFloat32), nullInstr);
1746 break;
1747 case kS390_DivDouble:
1748 ASSEMBLE_BIN_OP(DDInstr(ddbr), DMTInstr(DivFloat64), nullInstr);
1749 break;
1750 case kS390_Mod32:
1751 // zero-ext
1752 ASSEMBLE_BIN_OP(RRRInstr(ModS32), RRM32Instr(ModS32), nullInstr);
1753 break;
1754 case kS390_ModU32:
1755 // zero-ext
1756 ASSEMBLE_BIN_OP(RRRInstr(ModU32), RRM32Instr(ModU32), nullInstr);
1757 break;
1758 case kS390_Mod64:
1759 ASSEMBLE_BIN_OP(RRRInstr(ModS64), RRM64Instr(ModS64), nullInstr);
1760 break;
1761 case kS390_ModU64:
1762 ASSEMBLE_BIN_OP(RRRInstr(ModU64), RRM64Instr(ModU64), nullInstr);
1763 break;
1764 case kS390_AbsFloat:
1765 __ lpebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1766 break;
1767 case kS390_SqrtFloat:
1768 ASSEMBLE_UNARY_OP(D_DInstr(sqebr), nullInstr, nullInstr);
1769 break;
1770 case kS390_SqrtDouble:
1771 ASSEMBLE_UNARY_OP(D_DInstr(sqdbr), nullInstr, nullInstr);
1772 break;
1773 case kS390_FloorFloat:
1774 __ FloorF32(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1775 break;
1776 case kS390_CeilFloat:
1777 __ CeilF32(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1778 break;
1779 case kS390_TruncateFloat:
1780 __ TruncF32(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1781 break;
1782 // Double operations
1783 case kS390_ModDouble:
1785 break;
1786 case kIeee754Float64Acos:
1788 break;
1789 case kIeee754Float64Acosh:
1790 ASSEMBLE_IEEE754_UNOP(acosh);
1791 break;
1792 case kIeee754Float64Asin:
1794 break;
1795 case kIeee754Float64Asinh:
1796 ASSEMBLE_IEEE754_UNOP(asinh);
1797 break;
1798 case kIeee754Float64Atanh:
1799 ASSEMBLE_IEEE754_UNOP(atanh);
1800 break;
1801 case kIeee754Float64Atan:
1803 break;
1804 case kIeee754Float64Atan2:
1806 break;
1807 case kIeee754Float64Tan:
1809 break;
1810 case kIeee754Float64Tanh:
1812 break;
1813 case kIeee754Float64Cbrt:
1815 break;
1816 case kIeee754Float64Sin:
1818 break;
1819 case kIeee754Float64Sinh:
1821 break;
1822 case kIeee754Float64Cos:
1824 break;
1825 case kIeee754Float64Cosh:
1827 break;
1828 case kIeee754Float64Exp:
1830 break;
1831 case kIeee754Float64Expm1:
1832 ASSEMBLE_IEEE754_UNOP(expm1);
1833 break;
1834 case kIeee754Float64Log:
1836 break;
1837 case kIeee754Float64Log1p:
1838 ASSEMBLE_IEEE754_UNOP(log1p);
1839 break;
1840 case kIeee754Float64Log2:
1842 break;
1843 case kIeee754Float64Log10:
1844 ASSEMBLE_IEEE754_UNOP(log10);
1845 break;
1846 case kIeee754Float64Pow:
1848 break;
1849 case kS390_Neg32:
1850 __ lcr(i.OutputRegister(), i.InputRegister(0));
1852 break;
1853 case kS390_Neg64:
1854 __ lcgr(i.OutputRegister(), i.InputRegister(0));
1855 break;
1856 case kS390_MaxFloat:
1857 __ FloatMax(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1858 i.InputDoubleRegister(1));
1859 break;
1860 case kS390_MaxDouble:
1861 __ DoubleMax(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1862 i.InputDoubleRegister(1));
1863 break;
1864 case kS390_MinFloat:
1865 __ FloatMin(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1866 i.InputDoubleRegister(1));
1867 break;
1868 case kS390_FloatNearestInt:
1869 __ NearestIntF32(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1870 break;
1871 case kS390_MinDouble:
1872 __ DoubleMin(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1873 i.InputDoubleRegister(1));
1874 break;
1875 case kS390_AbsDouble:
1876 __ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1877 break;
1878 case kS390_FloorDouble:
1879 __ FloorF64(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1880 break;
1881 case kS390_CeilDouble:
1882 __ CeilF64(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1883 break;
1884 case kS390_TruncateDouble:
1885 __ TruncF64(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1886 break;
1887 case kS390_RoundDouble:
1888 __ fidbra(ROUND_TO_NEAREST_AWAY_FROM_0, i.OutputDoubleRegister(),
1889 i.InputDoubleRegister(0));
1890 break;
1891 case kS390_DoubleNearestInt:
1892 __ NearestIntF64(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1893 break;
1894 case kS390_NegFloat:
1895 ASSEMBLE_UNARY_OP(D_DInstr(lcebr), nullInstr, nullInstr);
1896 break;
1897 case kS390_NegDouble:
1898 ASSEMBLE_UNARY_OP(D_DInstr(lcdbr), nullInstr, nullInstr);
1899 break;
1900 case kS390_Cntlz32: {
1901 __ CountLeadingZerosU32(i.OutputRegister(), i.InputRegister(0), r0);
1902 break;
1903 }
1904 case kS390_Cntlz64: {
1905 __ CountLeadingZerosU64(i.OutputRegister(), i.InputRegister(0), r0);
1906 break;
1907 }
1908 case kS390_Popcnt32:
1909 __ Popcnt32(i.OutputRegister(), i.InputRegister(0));
1910 break;
1911 case kS390_Popcnt64:
1912 __ Popcnt64(i.OutputRegister(), i.InputRegister(0));
1913 break;
1914 case kS390_Cmp32:
1915 ASSEMBLE_COMPARE32(CmpS32, CmpU32);
1916 break;
1917 case kS390_Cmp64:
1918 ASSEMBLE_COMPARE(CmpS64, CmpU64);
1919 break;
1920 case kS390_CmpFloat:
1921 ASSEMBLE_FLOAT_COMPARE(cebr, ceb, ley);
1922 // __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1923 break;
1924 case kS390_CmpDouble:
1925 ASSEMBLE_FLOAT_COMPARE(cdbr, cdb, ldy);
1926 // __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1927 break;
1928 case kS390_Tst32:
1929 if (HasRegisterInput(instr, 1)) {
1930 __ And(r0, i.InputRegister(0), i.InputRegister(1));
1931 } else {
1932 // detect tmlh/tmhl/tmhh case
1933 Operand opnd = i.InputImmediate(1);
1934 if (is_uint16(opnd.immediate())) {
1935 __ tmll(i.InputRegister(0), opnd);
1936 } else {
1937 __ lr(r0, i.InputRegister(0));
1938 __ nilf(r0, opnd);
1939 }
1940 }
1941 break;
1942 case kS390_Tst64:
1943 if (HasRegisterInput(instr, 1)) {
1944 __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
1945 } else {
1946 Operand opnd = i.InputImmediate(1);
1947 if (is_uint16(opnd.immediate())) {
1948 __ tmll(i.InputRegister(0), opnd);
1949 } else {
1950 __ AndP(r0, i.InputRegister(0), opnd);
1951 }
1952 }
1953 break;
1954 case kS390_Float64SilenceNaN: {
1955 DoubleRegister value = i.InputDoubleRegister(0);
1956 DoubleRegister result = i.OutputDoubleRegister();
1957 __ CanonicalizeNaN(result, value);
1958 break;
1959 }
1960 case kS390_Push: {
1961 int stack_decrement = i.InputInt32(0);
1962 int slots = stack_decrement / kSystemPointerSize;
1963 LocationOperand* op = LocationOperand::cast(instr->InputAt(1));
1964 MachineRepresentation rep = op->representation();
1965 int pushed_slots = ElementSizeInPointers(rep);
1966 // Slot-sized arguments are never padded but there may be a gap if
1967 // the slot allocator reclaimed other padding slots. Adjust the stack
1968 // here to skip any gap.
1969 __ AllocateStackSpace((slots - pushed_slots) * kSystemPointerSize);
1970 switch (rep) {
1972 __ lay(sp, MemOperand(sp, -kSystemPointerSize));
1973 __ StoreF32(i.InputDoubleRegister(1), MemOperand(sp));
1974 break;
1976 __ lay(sp, MemOperand(sp, -kDoubleSize));
1977 __ StoreF64(i.InputDoubleRegister(1), MemOperand(sp));
1978 break;
1980 __ lay(sp, MemOperand(sp, -kSimd128Size));
1981 __ StoreV128(i.InputDoubleRegister(1), MemOperand(sp), kScratchReg);
1982 break;
1983 default:
1984 __ Push(i.InputRegister(1));
1985 break;
1986 }
1988 break;
1989 }
1990 case kS390_PushFrame: {
1991 int num_slots = i.InputInt32(1);
1992 __ lay(sp, MemOperand(sp, -num_slots * kSystemPointerSize));
1993 if (instr->InputAt(0)->IsFPRegister()) {
1994 LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1995 if (op->representation() == MachineRepresentation::kFloat64) {
1996 __ StoreF64(i.InputDoubleRegister(0), MemOperand(sp));
1997 } else {
1998 DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
1999 __ StoreF32(i.InputDoubleRegister(0), MemOperand(sp));
2000 }
2001 } else {
2002 __ StoreU64(i.InputRegister(0), MemOperand(sp));
2003 }
2004 break;
2005 }
2006 case kS390_StoreToStackSlot: {
2007 int slot = i.InputInt32(1);
2008 if (instr->InputAt(0)->IsFPRegister()) {
2009 LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
2010 if (op->representation() == MachineRepresentation::kFloat64) {
2011 __ StoreF64(i.InputDoubleRegister(0),
2012 MemOperand(sp, slot * kSystemPointerSize));
2013 } else if (op->representation() == MachineRepresentation::kFloat32) {
2014 __ StoreF32(i.InputDoubleRegister(0),
2015 MemOperand(sp, slot * kSystemPointerSize));
2016 } else {
2017 DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
2018 __ StoreV128(i.InputDoubleRegister(0),
2020 }
2021 } else {
2022 __ StoreU64(i.InputRegister(0),
2023 MemOperand(sp, slot * kSystemPointerSize));
2024 }
2025 break;
2026 }
2027 case kS390_SignExtendWord8ToInt32:
2028 __ lbr(i.OutputRegister(), i.InputRegister(0));
2030 break;
2031 case kS390_SignExtendWord16ToInt32:
2032 __ lhr(i.OutputRegister(), i.InputRegister(0));
2034 break;
2035 case kS390_SignExtendWord8ToInt64:
2036 __ lgbr(i.OutputRegister(), i.InputRegister(0));
2037 break;
2038 case kS390_SignExtendWord16ToInt64:
2039 __ lghr(i.OutputRegister(), i.InputRegister(0));
2040 break;
2041 case kS390_SignExtendWord32ToInt64:
2042 __ lgfr(i.OutputRegister(), i.InputRegister(0));
2043 break;
2044 case kS390_Uint32ToUint64:
2045 // Zero extend
2046 __ llgfr(i.OutputRegister(), i.InputRegister(0));
2047 break;
2048 case kS390_Int64ToInt32:
2049 // sign extend
2050 __ lgfr(i.OutputRegister(), i.InputRegister(0));
2051 break;
2052 // Convert Fixed to Floating Point
2053 case kS390_Int64ToFloat32:
2054 __ ConvertInt64ToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2055 break;
2056 case kS390_Int64ToDouble:
2057 __ ConvertInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2058 break;
2059 case kS390_Uint64ToFloat32:
2060 __ ConvertUnsignedInt64ToFloat(i.OutputDoubleRegister(),
2061 i.InputRegister(0));
2062 break;
2063 case kS390_Uint64ToDouble:
2064 __ ConvertUnsignedInt64ToDouble(i.OutputDoubleRegister(),
2065 i.InputRegister(0));
2066 break;
2067 case kS390_Int32ToFloat32:
2068 __ ConvertIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2069 break;
2070 case kS390_Int32ToDouble:
2071 __ ConvertIntToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2072 break;
2073 case kS390_Uint32ToFloat32:
2074 __ ConvertUnsignedIntToFloat(i.OutputDoubleRegister(),
2075 i.InputRegister(0));
2076 break;
2077 case kS390_Uint32ToDouble:
2078 __ ConvertUnsignedIntToDouble(i.OutputDoubleRegister(),
2079 i.InputRegister(0));
2080 break;
2081 case kS390_DoubleToInt32: {
2082 Label done;
2083 if (i.OutputCount() > 1) {
2084 __ mov(i.OutputRegister(1), Operand(1));
2085 }
2086 __ ConvertDoubleToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
2088 __ b(Condition(0xE), &done, Label::kNear); // normal case
2089 if (i.OutputCount() > 1) {
2090 __ mov(i.OutputRegister(1), Operand::Zero());
2091 } else {
2092 __ mov(i.OutputRegister(0), Operand::Zero());
2093 }
2094 __ bind(&done);
2095 break;
2096 }
2097 case kS390_DoubleToUint32: {
2098 Label done;
2099 if (i.OutputCount() > 1) {
2100 __ mov(i.OutputRegister(1), Operand(1));
2101 }
2102 __ ConvertDoubleToUnsignedInt32(i.OutputRegister(0),
2103 i.InputDoubleRegister(0));
2104 __ b(Condition(0xE), &done, Label::kNear); // normal case
2105 if (i.OutputCount() > 1) {
2106 __ mov(i.OutputRegister(1), Operand::Zero());
2107 } else {
2108 __ mov(i.OutputRegister(0), Operand::Zero());
2109 }
2110 __ bind(&done);
2111 break;
2112 }
2113 case kS390_DoubleToInt64: {
2114 Label done;
2115 if (i.OutputCount() > 1) {
2116 __ mov(i.OutputRegister(1), Operand(1));
2117 }
2118 __ ConvertDoubleToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
2119 __ b(Condition(0xE), &done, Label::kNear); // normal case
2120 if (i.OutputCount() > 1) {
2121 __ mov(i.OutputRegister(1), Operand::Zero());
2122 } else {
2123 __ mov(i.OutputRegister(0), Operand::Zero());
2124 }
2125 __ bind(&done);
2126 break;
2127 }
2128 case kS390_DoubleToUint64: {
2129 Label done;
2130 if (i.OutputCount() > 1) {
2131 __ mov(i.OutputRegister(1), Operand(1));
2132 }
2133 __ ConvertDoubleToUnsignedInt64(i.OutputRegister(0),
2134 i.InputDoubleRegister(0));
2135 __ b(Condition(0xE), &done, Label::kNear); // normal case
2136 if (i.OutputCount() > 1) {
2137 __ mov(i.OutputRegister(1), Operand::Zero());
2138 } else {
2139 __ mov(i.OutputRegister(0), Operand::Zero());
2140 }
2141 __ bind(&done);
2142 break;
2143 }
2144 case kS390_Float32ToInt32: {
2145 Label done;
2146 __ ConvertFloat32ToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
2147 kRoundToZero);
2148 bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
2149 if (set_overflow_to_min_i32) {
2150 // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
2151 // because INT32_MIN allows easier out-of-bounds detection.
2152 __ b(Condition(0xE), &done, Label::kNear); // normal case
2153 __ llilh(i.OutputRegister(0), Operand(0x8000));
2154 }
2155 __ bind(&done);
2156 break;
2157 }
2158 case kS390_Float32ToUint32: {
2159 Label done;
2160 __ ConvertFloat32ToUnsignedInt32(i.OutputRegister(0),
2161 i.InputDoubleRegister(0));
2162 bool set_overflow_to_min_u32 = MiscField::decode(instr->opcode());
2163 if (set_overflow_to_min_u32) {
2164 // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
2165 // because 0 allows easier out-of-bounds detection.
2166 __ b(Condition(0xE), &done, Label::kNear); // normal case
2167 __ mov(i.OutputRegister(0), Operand::Zero());
2168 }
2169 __ bind(&done);
2170 break;
2171 }
2172 case kS390_Float32ToUint64: {
2173 Label done;
2174 if (i.OutputCount() > 1) {
2175 __ mov(i.OutputRegister(1), Operand(1));
2176 }
2177 __ ConvertFloat32ToUnsignedInt64(i.OutputRegister(0),
2178 i.InputDoubleRegister(0));
2179 __ b(Condition(0xE), &done, Label::kNear); // normal case
2180 if (i.OutputCount() > 1) {
2181 __ mov(i.OutputRegister(1), Operand::Zero());
2182 } else {
2183 __ mov(i.OutputRegister(0), Operand::Zero());
2184 }
2185 __ bind(&done);
2186 break;
2187 }
2188 case kS390_Float32ToInt64: {
2189 Label done;
2190 if (i.OutputCount() > 1) {
2191 __ mov(i.OutputRegister(1), Operand(1));
2192 }
2193 __ ConvertFloat32ToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
2194 __ b(Condition(0xE), &done, Label::kNear); // normal case
2195 if (i.OutputCount() > 1) {
2196 __ mov(i.OutputRegister(1), Operand::Zero());
2197 } else {
2198 __ mov(i.OutputRegister(0), Operand::Zero());
2199 }
2200 __ bind(&done);
2201 break;
2202 }
2203 case kS390_DoubleToFloat32:
2204 ASSEMBLE_UNARY_OP(D_DInstr(ledbr), nullInstr, nullInstr);
2205 break;
2206 case kS390_Float32ToDouble:
2207 ASSEMBLE_UNARY_OP(D_DInstr(ldebr), D_MInstr(LoadF32AsF64), nullInstr);
2208 break;
2209 case kS390_DoubleExtractLowWord32:
2210 __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
2211 __ llgfr(i.OutputRegister(), i.OutputRegister());
2212 break;
2213 case kS390_DoubleExtractHighWord32:
2214 __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
2215 __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(32));
2216 break;
2217 case kS390_DoubleFromWord32Pair:
2218 __ LoadU32(kScratchReg, i.InputRegister(1));
2219 __ ShiftLeftU64(i.TempRegister(0), i.InputRegister(0), Operand(32));
2220 __ OrP(i.TempRegister(0), i.TempRegister(0), kScratchReg);
2221 __ MovInt64ToDouble(i.OutputDoubleRegister(), i.TempRegister(0));
2222 break;
2223 case kS390_DoubleInsertLowWord32:
2224 __ lgdr(kScratchReg, i.InputDoubleRegister(0));
2225 __ lr(kScratchReg, i.InputRegister(1));
2226 __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2227 break;
2228 case kS390_DoubleInsertHighWord32:
2229 __ sllg(kScratchReg, i.InputRegister(1), Operand(32));
2230 __ lgdr(r0, i.InputDoubleRegister(0));
2231 __ lr(kScratchReg, r0);
2232 __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2233 break;
2234 case kS390_DoubleConstruct:
2235 __ sllg(kScratchReg, i.InputRegister(0), Operand(32));
2236 __ lr(kScratchReg, i.InputRegister(1));
2237
2238 // Bitwise convert from GPR to FPR
2239 __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2240 break;
2241 case kS390_LoadWordS8:
2242 ASSEMBLE_LOAD_INTEGER(LoadS8);
2243 break;
2244 case kS390_BitcastFloat32ToInt32:
2245 ASSEMBLE_UNARY_OP(R_DInstr(MovFloatToInt), R_MInstr(LoadU32), nullInstr);
2246 break;
2247 case kS390_BitcastInt32ToFloat32:
2248 __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2249 break;
2250 case kS390_BitcastDoubleToInt64:
2251 __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
2252 break;
2253 case kS390_BitcastInt64ToDouble:
2254 __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2255 break;
2256 case kS390_LoadWordU8:
2257 ASSEMBLE_LOAD_INTEGER(LoadU8);
2258 break;
2259 case kS390_LoadWordU16:
2260 ASSEMBLE_LOAD_INTEGER(LoadU16);
2261 break;
2262 case kS390_LoadWordS16:
2263 ASSEMBLE_LOAD_INTEGER(LoadS16);
2264 break;
2265 case kS390_LoadWordU32:
2266 ASSEMBLE_LOAD_INTEGER(LoadU32);
2267 break;
2268 case kS390_LoadWordS32:
2269 ASSEMBLE_LOAD_INTEGER(LoadS32);
2270 break;
2271 case kS390_LoadReverse16:
2273 break;
2274 case kS390_LoadReverse32:
2276 break;
2277 case kS390_LoadReverse64:
2279 break;
2280 case kS390_LoadReverse16RR:
2281 __ lrvr(i.OutputRegister(), i.InputRegister(0));
2282 __ rll(i.OutputRegister(), i.OutputRegister(), Operand(16));
2283 break;
2284 case kS390_LoadReverse32RR:
2285 __ lrvr(i.OutputRegister(), i.InputRegister(0));
2286 break;
2287 case kS390_LoadReverse64RR:
2288 __ lrvgr(i.OutputRegister(), i.InputRegister(0));
2289 break;
2290 case kS390_LoadReverseSimd128RR:
2291 __ vlgv(r0, i.InputSimd128Register(0), MemOperand(r0, 0), Condition(3));
2292 __ vlgv(r1, i.InputSimd128Register(0), MemOperand(r0, 1), Condition(3));
2293 __ lrvgr(r0, r0);
2294 __ lrvgr(r1, r1);
2295 __ vlvg(i.OutputSimd128Register(), r0, MemOperand(r0, 1), Condition(3));
2296 __ vlvg(i.OutputSimd128Register(), r1, MemOperand(r0, 0), Condition(3));
2297 break;
2298 case kS390_LoadReverseSimd128: {
2299 AddressingMode mode = kMode_None;
2300 MemOperand operand = i.MemoryOperand(&mode);
2301 Simd128Register dst = i.OutputSimd128Register();
2302 if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
2303 is_uint12(operand.offset())) {
2304 __ vlbr(dst, operand, Condition(4));
2305 } else {
2306 __ lrvg(r0, operand);
2307 __ lrvg(r1, MemOperand(operand.rx(), operand.rb(),
2308 operand.offset() + kSystemPointerSize));
2309 __ vlvgp(dst, r1, r0);
2310 }
2311 break;
2312 }
2313 case kS390_LoadWord64:
2315 break;
2316 case kS390_LoadAndTestWord32: {
2317 ASSEMBLE_LOADANDTEST32(ltr, lt_z);
2318 break;
2319 }
2320 case kS390_LoadAndTestWord64: {
2321 ASSEMBLE_LOADANDTEST64(ltgr, ltg);
2322 break;
2323 }
2324 case kS390_LoadFloat32:
2325 ASSEMBLE_LOAD_FLOAT(LoadF32);
2326 break;
2327 case kS390_LoadDouble:
2328 ASSEMBLE_LOAD_FLOAT(LoadF64);
2329 break;
2330 case kS390_LoadSimd128: {
2331 AddressingMode mode = kMode_None;
2332 MemOperand operand = i.MemoryOperand(&mode);
2333 __ vl(i.OutputSimd128Register(), operand, Condition(0));
2334 break;
2335 }
2336 case kS390_StoreWord8:
2337 ASSEMBLE_STORE_INTEGER(StoreU8);
2338 break;
2339 case kS390_StoreWord16:
2340 ASSEMBLE_STORE_INTEGER(StoreU16);
2341 break;
2342 case kS390_StoreWord32:
2343 ASSEMBLE_STORE_INTEGER(StoreU32);
2344 break;
2345 case kS390_StoreWord64:
2346 ASSEMBLE_STORE_INTEGER(StoreU64);
2347 break;
2348 case kS390_StoreReverse16:
2350 break;
2351 case kS390_StoreReverse32:
2353 break;
2354 case kS390_StoreReverse64:
2356 break;
2357 case kS390_StoreReverseSimd128: {
2358 size_t index = 0;
2359 AddressingMode mode = kMode_None;
2360 MemOperand operand = i.MemoryOperand(&mode, &index);
2361 if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
2362 is_uint12(operand.offset())) {
2363 __ vstbr(i.InputSimd128Register(index), operand, Condition(4));
2364 } else {
2365 __ vlgv(r0, i.InputSimd128Register(index), MemOperand(r0, 1),
2366 Condition(3));
2367 __ vlgv(r1, i.InputSimd128Register(index), MemOperand(r0, 0),
2368 Condition(3));
2369 __ strvg(r0, operand);
2370 __ strvg(r1, MemOperand(operand.rx(), operand.rb(),
2371 operand.offset() + kSystemPointerSize));
2372 }
2373 break;
2374 }
2375 case kS390_StoreFloat32:
2377 break;
2378 case kS390_StoreDouble:
2380 break;
2381 case kS390_StoreSimd128: {
2382 size_t index = 0;
2383 AddressingMode mode = kMode_None;
2384 MemOperand operand = i.MemoryOperand(&mode, &index);
2385 __ vst(i.InputSimd128Register(index), operand, Condition(0));
2386 break;
2387 }
2388 case kS390_Lay: {
2389 MemOperand mem = i.MemoryOperand();
2390 if (!is_int20(mem.offset())) {
2391 // Add directly to the base register in case the index register (rx) is
2392 // r0.
2393 DCHECK(is_int32(mem.offset()));
2394 __ AddS64(ip, mem.rb(), Operand(mem.offset()));
2395 mem = MemOperand(mem.rx(), ip);
2396 }
2397 __ lay(i.OutputRegister(), mem);
2398 break;
2399 }
2400 case kAtomicExchangeInt8:
2401 case kAtomicExchangeUint8: {
2402 Register base = i.InputRegister(0);
2403 Register index = i.InputRegister(1);
2404 Register value = i.InputRegister(2);
2405 Register output = i.OutputRegister();
2406 __ la(r1, MemOperand(base, index));
2407 __ AtomicExchangeU8(r1, value, output, r0);
2408 if (opcode == kAtomicExchangeInt8) {
2409 __ LoadS8(output, output);
2410 } else {
2411 __ LoadU8(output, output);
2412 }
2413 break;
2414 }
2415 case kAtomicExchangeInt16:
2416 case kAtomicExchangeUint16: {
2417 Register base = i.InputRegister(0);
2418 Register index = i.InputRegister(1);
2419 Register value = i.InputRegister(2);
2420 Register output = i.OutputRegister();
2421 bool reverse_bytes = is_wasm_on_be(info());
2422 __ la(r1, MemOperand(base, index));
2424 if (reverse_bytes) {
2425 value_ = ip;
2426 __ lrvr(value_, value);
2427 __ ShiftRightU32(value_, value_, Operand(16));
2428 }
2429 __ AtomicExchangeU16(r1, value_, output, r0);
2430 if (reverse_bytes) {
2431 __ lrvr(output, output);
2432 __ ShiftRightU32(output, output, Operand(16));
2433 }
2434 if (opcode == kAtomicExchangeInt16) {
2435 __ lghr(output, output);
2436 } else {
2437 __ llghr(output, output);
2438 }
2439 break;
2440 }
2441 case kAtomicExchangeWord32: {
2442 Register base = i.InputRegister(0);
2443 Register index = i.InputRegister(1);
2444 Register value = i.InputRegister(2);
2445 Register output = i.OutputRegister();
2446 Label do_cs;
2447 bool reverse_bytes = is_wasm_on_be(info());
2448 __ lay(r1, MemOperand(base, index));
2450 if (reverse_bytes) {
2451 value_ = ip;
2452 __ lrvr(value_, value);
2453 }
2454 __ LoadU32(output, MemOperand(r1));
2455 __ bind(&do_cs);
2456 __ cs(output, value_, MemOperand(r1));
2457 __ bne(&do_cs, Label::kNear);
2458 if (reverse_bytes) {
2459 __ lrvr(output, output);
2460 __ LoadU32(output, output);
2461 }
2462 break;
2463 }
2464 case kAtomicCompareExchangeInt8:
2466 break;
2467 case kAtomicCompareExchangeUint8:
2469 break;
2470 case kAtomicCompareExchangeInt16:
2472 break;
2473 case kAtomicCompareExchangeUint16:
2475 break;
2476 case kAtomicCompareExchangeWord32:
2478 break;
2479#define ATOMIC_BINOP_CASE(op, inst) \
2480 case kAtomic##op##Int8: \
2481 ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
2482 intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
2483 __ srlk(result, prev, Operand(shift_right)); \
2484 __ LoadS8(result, result); \
2485 }); \
2486 break; \
2487 case kAtomic##op##Uint8: \
2488 ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
2489 int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
2490 __ RotateInsertSelectBits(result, prev, Operand(56), Operand(63), \
2491 Operand(static_cast<intptr_t>(rotate_left)), \
2492 true); \
2493 }); \
2494 break; \
2495 case kAtomic##op##Int16: \
2496 ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
2497 intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
2498 __ srlk(result, prev, Operand(shift_right)); \
2499 if (is_wasm_on_be(info())) { \
2500 __ lrvr(result, result); \
2501 __ ShiftRightS32(result, result, Operand(16)); \
2502 } \
2503 __ LoadS16(result, result); \
2504 }); \
2505 break; \
2506 case kAtomic##op##Uint16: \
2507 ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
2508 int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
2509 __ RotateInsertSelectBits(result, prev, Operand(48), Operand(63), \
2510 Operand(static_cast<intptr_t>(rotate_left)), \
2511 true); \
2512 if (is_wasm_on_be(info())) { \
2513 __ lrvr(result, result); \
2514 __ ShiftRightU32(result, result, Operand(16)); \
2515 } \
2516 }); \
2517 break;
2518 ATOMIC_BINOP_CASE(Add, AddS32)
2519 ATOMIC_BINOP_CASE(Sub, SubS32)
2520 ATOMIC_BINOP_CASE(And, And)
2521 ATOMIC_BINOP_CASE(Or, Or)
2522 ATOMIC_BINOP_CASE(Xor, Xor)
2523#undef ATOMIC_BINOP_CASE
2524 case kAtomicAddWord32:
2525 ASSEMBLE_ATOMIC_BINOP_WORD(laa, AddS32);
2526 break;
2527 case kAtomicSubWord32:
2528 ASSEMBLE_ATOMIC_BINOP_WORD(LoadAndSub32, SubS32);
2529 break;
2530 case kAtomicAndWord32:
2531 ASSEMBLE_ATOMIC_BINOP_WORD(lan, AndP);
2532 break;
2533 case kAtomicOrWord32:
2535 break;
2536 case kAtomicXorWord32:
2537 ASSEMBLE_ATOMIC_BINOP_WORD(lax, XorP);
2538 break;
2539 case kS390_Word64AtomicAddUint64:
2540 ASSEMBLE_ATOMIC_BINOP_WORD64(laag, AddS64);
2541 break;
2542 case kS390_Word64AtomicSubUint64:
2543 ASSEMBLE_ATOMIC_BINOP_WORD64(LoadAndSub64, SubS64);
2544 break;
2545 case kS390_Word64AtomicAndUint64:
2546 ASSEMBLE_ATOMIC_BINOP_WORD64(lang, AndP);
2547 break;
2548 case kS390_Word64AtomicOrUint64:
2550 break;
2551 case kS390_Word64AtomicXorUint64:
2552 ASSEMBLE_ATOMIC_BINOP_WORD64(laxg, XorP);
2553 break;
2554 case kS390_Word64AtomicExchangeUint64: {
2555 Register base = i.InputRegister(0);
2556 Register index = i.InputRegister(1);
2557 Register value = i.InputRegister(2);
2558 Register output = i.OutputRegister();
2559 bool reverse_bytes = is_wasm_on_be(info());
2560 Label do_cs;
2562 __ la(r1, MemOperand(base, index));
2563 if (reverse_bytes) {
2564 value_ = ip;
2565 __ lrvgr(value_, value);
2566 }
2567 __ lg(output, MemOperand(r1));
2568 __ bind(&do_cs);
2569 __ csg(output, value_, MemOperand(r1));
2570 __ bne(&do_cs, Label::kNear);
2571 if (reverse_bytes) {
2572 __ lrvgr(output, output);
2573 }
2574 break;
2575 }
2576 case kS390_Word64AtomicCompareExchangeUint64:
2578 break;
2579 // Simd Support.
2580#define SIMD_SHIFT_LIST(V) \
2581 V(I64x2Shl) \
2582 V(I64x2ShrS) \
2583 V(I64x2ShrU) \
2584 V(I32x4Shl) \
2585 V(I32x4ShrS) \
2586 V(I32x4ShrU) \
2587 V(I16x8Shl) \
2588 V(I16x8ShrS) \
2589 V(I16x8ShrU) \
2590 V(I8x16Shl) \
2591 V(I8x16ShrS) \
2592 V(I8x16ShrU)
2593
2594#define EMIT_SIMD_SHIFT(name) \
2595 case kS390_##name: { \
2596 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2597 i.InputRegister(1), kScratchDoubleReg); \
2598 break; \
2599 }
2601#undef EMIT_SIMD_SHIFT
2602#undef SIMD_SHIFT_LIST
2603
2604#define SIMD_BINOP_LIST(V) \
2605 V(F64x2Add) \
2606 V(F64x2Sub) \
2607 V(F64x2Mul) \
2608 V(F64x2Div) \
2609 V(F64x2Min) \
2610 V(F64x2Max) \
2611 V(F64x2Eq) \
2612 V(F64x2Ne) \
2613 V(F64x2Lt) \
2614 V(F64x2Le) \
2615 V(F64x2Pmin) \
2616 V(F64x2Pmax) \
2617 V(F32x4Add) \
2618 V(F32x4Sub) \
2619 V(F32x4Mul) \
2620 V(F32x4Div) \
2621 V(F32x4Min) \
2622 V(F32x4Max) \
2623 V(F32x4Eq) \
2624 V(F32x4Ne) \
2625 V(F32x4Lt) \
2626 V(F32x4Le) \
2627 V(F32x4Pmin) \
2628 V(F32x4Pmax) \
2629 V(I64x2Add) \
2630 V(I64x2Sub) \
2631 V(I64x2Eq) \
2632 V(I64x2Ne) \
2633 V(I64x2GtS) \
2634 V(I64x2GeS) \
2635 V(I32x4Add) \
2636 V(I32x4Sub) \
2637 V(I32x4Mul) \
2638 V(I32x4Eq) \
2639 V(I32x4Ne) \
2640 V(I32x4GtS) \
2641 V(I32x4GeS) \
2642 V(I32x4GtU) \
2643 V(I32x4MinS) \
2644 V(I32x4MinU) \
2645 V(I32x4MaxS) \
2646 V(I32x4MaxU) \
2647 V(I16x8Add) \
2648 V(I16x8Sub) \
2649 V(I16x8Mul) \
2650 V(I16x8Eq) \
2651 V(I16x8Ne) \
2652 V(I16x8GtS) \
2653 V(I16x8GeS) \
2654 V(I16x8GtU) \
2655 V(I16x8MinS) \
2656 V(I16x8MinU) \
2657 V(I16x8MaxS) \
2658 V(I16x8MaxU) \
2659 V(I16x8RoundingAverageU) \
2660 V(I8x16Add) \
2661 V(I8x16Sub) \
2662 V(I8x16Eq) \
2663 V(I8x16Ne) \
2664 V(I8x16GtS) \
2665 V(I8x16GeS) \
2666 V(I8x16GtU) \
2667 V(I8x16MinS) \
2668 V(I8x16MinU) \
2669 V(I8x16MaxS) \
2670 V(I8x16MaxU) \
2671 V(I8x16RoundingAverageU) \
2672 V(S128And) \
2673 V(S128Or) \
2674 V(S128Xor) \
2675 V(S128AndNot)
2676
2677#define EMIT_SIMD_BINOP(name) \
2678 case kS390_##name: { \
2679 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2680 i.InputSimd128Register(1)); \
2681 break; \
2682 }
2684#undef EMIT_SIMD_BINOP
2685#undef SIMD_BINOP_LIST
2686
2687#define SIMD_UNOP_LIST(V) \
2688 V(F64x2Splat, Simd128Register, DoubleRegister) \
2689 V(F64x2Abs, Simd128Register, Simd128Register) \
2690 V(F64x2Neg, Simd128Register, Simd128Register) \
2691 V(F64x2Sqrt, Simd128Register, Simd128Register) \
2692 V(F64x2Ceil, Simd128Register, Simd128Register) \
2693 V(F64x2Floor, Simd128Register, Simd128Register) \
2694 V(F64x2Trunc, Simd128Register, Simd128Register) \
2695 V(F64x2NearestInt, Simd128Register, Simd128Register) \
2696 V(F32x4Splat, Simd128Register, DoubleRegister) \
2697 V(F32x4Abs, Simd128Register, Simd128Register) \
2698 V(F32x4Neg, Simd128Register, Simd128Register) \
2699 V(F32x4Sqrt, Simd128Register, Simd128Register) \
2700 V(F32x4Ceil, Simd128Register, Simd128Register) \
2701 V(F32x4Floor, Simd128Register, Simd128Register) \
2702 V(F32x4Trunc, Simd128Register, Simd128Register) \
2703 V(F32x4NearestInt, Simd128Register, Simd128Register) \
2704 V(I64x2Splat, Simd128Register, Register) \
2705 V(I64x2Abs, Simd128Register, Simd128Register) \
2706 V(I64x2Neg, Simd128Register, Simd128Register) \
2707 V(I64x2SConvertI32x4Low, Simd128Register, Simd128Register) \
2708 V(I64x2SConvertI32x4High, Simd128Register, Simd128Register) \
2709 V(I64x2UConvertI32x4Low, Simd128Register, Simd128Register) \
2710 V(I64x2UConvertI32x4High, Simd128Register, Simd128Register) \
2711 V(I32x4Splat, Simd128Register, Register) \
2712 V(I32x4Abs, Simd128Register, Simd128Register) \
2713 V(I32x4Neg, Simd128Register, Simd128Register) \
2714 V(I32x4SConvertI16x8Low, Simd128Register, Simd128Register) \
2715 V(I32x4SConvertI16x8High, Simd128Register, Simd128Register) \
2716 V(I32x4UConvertI16x8Low, Simd128Register, Simd128Register) \
2717 V(I32x4UConvertI16x8High, Simd128Register, Simd128Register) \
2718 V(I16x8Splat, Simd128Register, Register) \
2719 V(I16x8Abs, Simd128Register, Simd128Register) \
2720 V(I16x8Neg, Simd128Register, Simd128Register) \
2721 V(I16x8SConvertI8x16Low, Simd128Register, Simd128Register) \
2722 V(I16x8SConvertI8x16High, Simd128Register, Simd128Register) \
2723 V(I16x8UConvertI8x16Low, Simd128Register, Simd128Register) \
2724 V(I16x8UConvertI8x16High, Simd128Register, Simd128Register) \
2725 V(I8x16Splat, Simd128Register, Register) \
2726 V(I8x16Abs, Simd128Register, Simd128Register) \
2727 V(I8x16Neg, Simd128Register, Simd128Register) \
2728 V(S128Not, Simd128Register, Simd128Register)
2729
2730#define EMIT_SIMD_UNOP(name, dtype, stype) \
2731 case kS390_##name: { \
2732 __ name(i.Output##dtype(), i.Input##stype(0)); \
2733 break; \
2734 }
2736#undef EMIT_SIMD_UNOP
2737#undef SIMD_UNOP_LIST
2738
2739#define SIMD_EXTRACT_LANE_LIST(V) \
2740 V(F64x2ExtractLane, DoubleRegister) \
2741 V(F32x4ExtractLane, DoubleRegister) \
2742 V(I64x2ExtractLane, Register) \
2743 V(I32x4ExtractLane, Register) \
2744 V(I16x8ExtractLaneU, Register) \
2745 V(I16x8ExtractLaneS, Register) \
2746 V(I8x16ExtractLaneU, Register) \
2747 V(I8x16ExtractLaneS, Register)
2748
2749#define EMIT_SIMD_EXTRACT_LANE(name, dtype) \
2750 case kS390_##name: { \
2751 __ name(i.Output##dtype(), i.InputSimd128Register(0), i.InputInt8(1), \
2752 kScratchReg); \
2753 break; \
2754 }
2756#undef EMIT_SIMD_EXTRACT_LANE
2757#undef SIMD_EXTRACT_LANE_LIST
2758
2759#define SIMD_REPLACE_LANE_LIST(V) \
2760 V(F64x2ReplaceLane, DoubleRegister) \
2761 V(F32x4ReplaceLane, DoubleRegister) \
2762 V(I64x2ReplaceLane, Register) \
2763 V(I32x4ReplaceLane, Register) \
2764 V(I16x8ReplaceLane, Register) \
2765 V(I8x16ReplaceLane, Register)
2766
2767#define EMIT_SIMD_REPLACE_LANE(name, stype) \
2768 case kS390_##name: { \
2769 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2770 i.Input##stype(2), i.InputInt8(1), kScratchReg); \
2771 break; \
2772 }
2774#undef EMIT_SIMD_REPLACE_LANE
2775#undef SIMD_REPLACE_LANE_LIST
2776
2777#define SIMD_EXT_MUL_LIST(V) \
2778 V(I64x2ExtMulLowI32x4S) \
2779 V(I64x2ExtMulHighI32x4S) \
2780 V(I64x2ExtMulLowI32x4U) \
2781 V(I64x2ExtMulHighI32x4U) \
2782 V(I32x4ExtMulLowI16x8S) \
2783 V(I32x4ExtMulHighI16x8S) \
2784 V(I32x4ExtMulLowI16x8U) \
2785 V(I32x4ExtMulHighI16x8U) \
2786 V(I16x8ExtMulLowI8x16S) \
2787 V(I16x8ExtMulHighI8x16S) \
2788 V(I16x8ExtMulLowI8x16U) \
2789 V(I16x8ExtMulHighI8x16U)
2790
2791#define EMIT_SIMD_EXT_MUL(name) \
2792 case kS390_##name: { \
2793 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2794 i.InputSimd128Register(1), kScratchDoubleReg); \
2795 break; \
2796 }
2798#undef EMIT_SIMD_EXT_MUL
2799#undef SIMD_EXT_MUL_LIST
2800
2801#define SIMD_ALL_TRUE_LIST(V) \
2802 V(I64x2AllTrue) \
2803 V(I32x4AllTrue) \
2804 V(I16x8AllTrue) \
2805 V(I8x16AllTrue)
2806
2807#define EMIT_SIMD_ALL_TRUE(name) \
2808 case kS390_##name: { \
2809 __ name(i.OutputRegister(), i.InputSimd128Register(0), kScratchReg, \
2810 kScratchDoubleReg); \
2811 break; \
2812 }
2814#undef EMIT_SIMD_ALL_TRUE
2815#undef SIMD_ALL_TRUE_LIST
2816
2817#define SIMD_QFM_LIST(V) \
2818 V(F64x2Qfma) \
2819 V(F64x2Qfms) \
2820 V(F32x4Qfma) \
2821 V(F32x4Qfms)
2822
2823#define EMIT_SIMD_QFM(name) \
2824 case kS390_##name: { \
2825 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2826 i.InputSimd128Register(1), i.InputSimd128Register(2)); \
2827 break; \
2828 }
2830#undef EMIT_SIMD_QFM
2831#undef SIMD_QFM_LIST
2832
2833#define SIMD_ADD_SUB_SAT_LIST(V) \
2834 V(I16x8AddSatS) \
2835 V(I16x8SubSatS) \
2836 V(I16x8AddSatU) \
2837 V(I16x8SubSatU) \
2838 V(I8x16AddSatS) \
2839 V(I8x16SubSatS) \
2840 V(I8x16AddSatU) \
2841 V(I8x16SubSatU)
2842
2843#define EMIT_SIMD_ADD_SUB_SAT(name) \
2844 case kS390_##name: { \
2845 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2846 i.InputSimd128Register(1), kScratchDoubleReg, \
2847 i.ToSimd128Register(instr->TempAt(0))); \
2848 break; \
2849 }
2851#undef EMIT_SIMD_ADD_SUB_SAT
2852#undef SIMD_ADD_SUB_SAT_LIST
2853
2854#define SIMD_EXT_ADD_PAIRWISE_LIST(V) \
2855 V(I32x4ExtAddPairwiseI16x8S) \
2856 V(I32x4ExtAddPairwiseI16x8U) \
2857 V(I16x8ExtAddPairwiseI8x16S) \
2858 V(I16x8ExtAddPairwiseI8x16U)
2859
2860#define EMIT_SIMD_EXT_ADD_PAIRWISE(name) \
2861 case kS390_##name: { \
2862 __ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
2863 kScratchDoubleReg, i.ToSimd128Register(instr->TempAt(0))); \
2864 break; \
2865 }
2867#undef EMIT_SIMD_EXT_ADD_PAIRWISE
2868#undef SIMD_EXT_ADD_PAIRWISE_LIST
2869
2870 case kS390_I64x2Mul: {
2871 __ I64x2Mul(i.OutputSimd128Register(), i.InputSimd128Register(0),
2872 i.InputSimd128Register(1), r0, r1, ip);
2873 break;
2874 }
2875 case kS390_I32x4GeU: {
2876 __ I32x4GeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
2877 i.InputSimd128Register(1), kScratchDoubleReg);
2878 break;
2879 }
2880 case kS390_I16x8GeU: {
2881 __ I16x8GeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
2882 i.InputSimd128Register(1), kScratchDoubleReg);
2883 break;
2884 }
2885 case kS390_I8x16GeU: {
2886 __ I8x16GeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
2887 i.InputSimd128Register(1), kScratchDoubleReg);
2888 break;
2889 }
2890 // vector boolean unops
2891 case kS390_V128AnyTrue: {
2892 __ V128AnyTrue(i.OutputRegister(), i.InputSimd128Register(0),
2893 kScratchReg);
2894 break;
2895 }
2896 // vector bitwise ops
2897 case kS390_S128Const: {
2898 uint64_t low = make_uint64(i.InputUint32(1), i.InputUint32(0));
2899 uint64_t high = make_uint64(i.InputUint32(3), i.InputUint32(2));
2900 __ S128Const(i.OutputSimd128Register(), high, low, r0, ip);
2901 break;
2902 }
2903 case kS390_S128Zero: {
2904 Simd128Register dst = i.OutputSimd128Register();
2905 __ S128Zero(dst, dst);
2906 break;
2907 }
2908 case kS390_S128AllOnes: {
2909 Simd128Register dst = i.OutputSimd128Register();
2910 __ S128AllOnes(dst, dst);
2911 break;
2912 }
2913 case kS390_S128Select: {
2914 Simd128Register dst = i.OutputSimd128Register();
2915 Simd128Register mask = i.InputSimd128Register(0);
2916 Simd128Register src1 = i.InputSimd128Register(1);
2917 Simd128Register src2 = i.InputSimd128Register(2);
2918 __ S128Select(dst, src1, src2, mask);
2919 break;
2920 }
2921 // vector conversions
2922 case kS390_I32x4SConvertF32x4: {
2923 __ I32x4SConvertF32x4(i.OutputSimd128Register(),
2924 i.InputSimd128Register(0), kScratchDoubleReg,
2925 kScratchReg);
2926 break;
2927 }
2928 case kS390_I32x4UConvertF32x4: {
2929 __ I32x4UConvertF32x4(i.OutputSimd128Register(),
2930 i.InputSimd128Register(0), kScratchDoubleReg,
2931 kScratchReg);
2932 break;
2933 }
2934 case kS390_F32x4SConvertI32x4: {
2935 __ F32x4SConvertI32x4(i.OutputSimd128Register(),
2936 i.InputSimd128Register(0), kScratchDoubleReg,
2937 kScratchReg);
2938 break;
2939 }
2940 case kS390_F32x4UConvertI32x4: {
2941 __ F32x4UConvertI32x4(i.OutputSimd128Register(),
2942 i.InputSimd128Register(0), kScratchDoubleReg,
2943 kScratchReg);
2944 break;
2945 }
2946 case kS390_I16x8SConvertI32x4: {
2947 __ I16x8SConvertI32x4(i.OutputSimd128Register(),
2948 i.InputSimd128Register(0),
2949 i.InputSimd128Register(1));
2950 break;
2951 }
2952 case kS390_I8x16SConvertI16x8: {
2953 __ I8x16SConvertI16x8(i.OutputSimd128Register(),
2954 i.InputSimd128Register(0),
2955 i.InputSimd128Register(1));
2956 break;
2957 }
2958 case kS390_I16x8UConvertI32x4: {
2959 __ I16x8UConvertI32x4(i.OutputSimd128Register(),
2960 i.InputSimd128Register(0),
2961 i.InputSimd128Register(1), kScratchDoubleReg);
2962 break;
2963 }
2964 case kS390_I8x16UConvertI16x8: {
2965 __ I8x16UConvertI16x8(i.OutputSimd128Register(),
2966 i.InputSimd128Register(0),
2967 i.InputSimd128Register(1), kScratchDoubleReg);
2968 break;
2969 }
2970 case kS390_I8x16Shuffle: {
2971 uint64_t low = make_uint64(i.InputUint32(3), i.InputUint32(2));
2972 uint64_t high = make_uint64(i.InputUint32(5), i.InputUint32(4));
2973 __ I8x16Shuffle(i.OutputSimd128Register(), i.InputSimd128Register(0),
2974 i.InputSimd128Register(1), high, low, r0, ip,
2976 break;
2977 }
2978 case kS390_I8x16Swizzle: {
2979 __ I8x16Swizzle(i.OutputSimd128Register(), i.InputSimd128Register(0),
2980 i.InputSimd128Register(1), r0, r1, kScratchDoubleReg);
2981 break;
2982 }
2983 case kS390_I64x2BitMask: {
2984 __ I64x2BitMask(i.OutputRegister(), i.InputSimd128Register(0),
2986 break;
2987 }
2988 case kS390_I32x4BitMask: {
2989 __ I32x4BitMask(i.OutputRegister(), i.InputSimd128Register(0),
2991 break;
2992 }
2993 case kS390_I16x8BitMask: {
2994 __ I16x8BitMask(i.OutputRegister(), i.InputSimd128Register(0),
2996 break;
2997 }
2998 case kS390_I8x16BitMask: {
2999 __ I8x16BitMask(i.OutputRegister(), i.InputSimd128Register(0), r0, ip,
3001 break;
3002 }
3003 case kS390_I32x4DotI16x8S: {
3004 __ I32x4DotI16x8S(i.OutputSimd128Register(), i.InputSimd128Register(0),
3005 i.InputSimd128Register(1), kScratchDoubleReg);
3006 break;
3007 }
3008
3009 case kS390_I16x8DotI8x16S: {
3010 __ I16x8DotI8x16S(i.OutputSimd128Register(), i.InputSimd128Register(0),
3011 i.InputSimd128Register(1), kScratchDoubleReg);
3012 break;
3013 }
3014 case kS390_I32x4DotI8x16AddS: {
3015 __ I32x4DotI8x16AddS(i.OutputSimd128Register(), i.InputSimd128Register(0),
3016 i.InputSimd128Register(1), i.InputSimd128Register(2),
3017 kScratchDoubleReg, i.TempSimd128Register(0));
3018 break;
3019 }
3020 case kS390_I16x8Q15MulRSatS: {
3021 __ I16x8Q15MulRSatS(i.OutputSimd128Register(), i.InputSimd128Register(0),
3022 i.InputSimd128Register(1), kScratchDoubleReg,
3023 i.ToSimd128Register(instr->TempAt(0)),
3024 i.ToSimd128Register(instr->TempAt(1)));
3025 break;
3026 }
3027 case kS390_I8x16Popcnt: {
3028 __ I8x16Popcnt(i.OutputSimd128Register(), i.InputSimd128Register(0));
3029 break;
3030 }
3031 case kS390_F64x2ConvertLowI32x4S: {
3032 __ F64x2ConvertLowI32x4S(i.OutputSimd128Register(),
3033 i.InputSimd128Register(0));
3034 break;
3035 }
3036 case kS390_F64x2ConvertLowI32x4U: {
3037 __ F64x2ConvertLowI32x4U(i.OutputSimd128Register(),
3038 i.InputSimd128Register(0));
3039 break;
3040 }
3041 case kS390_F64x2PromoteLowF32x4: {
3042 __ F64x2PromoteLowF32x4(i.OutputSimd128Register(),
3043 i.InputSimd128Register(0), kScratchDoubleReg, r0,
3044 r1, ip);
3045 break;
3046 }
3047 case kS390_F32x4DemoteF64x2Zero: {
3048 __ F32x4DemoteF64x2Zero(i.OutputSimd128Register(),
3049 i.InputSimd128Register(0), kScratchDoubleReg, r0,
3050 r1, ip);
3051 break;
3052 }
3053 case kS390_I32x4TruncSatF64x2SZero: {
3054 __ I32x4TruncSatF64x2SZero(i.OutputSimd128Register(),
3055 i.InputSimd128Register(0), kScratchDoubleReg);
3056 break;
3057 }
3058 case kS390_I32x4TruncSatF64x2UZero: {
3059 __ I32x4TruncSatF64x2UZero(i.OutputSimd128Register(),
3060 i.InputSimd128Register(0), kScratchDoubleReg);
3061 break;
3062 }
3063#define LOAD_SPLAT(type) \
3064 AddressingMode mode = kMode_None; \
3065 MemOperand operand = i.MemoryOperand(&mode); \
3066 Simd128Register dst = i.OutputSimd128Register(); \
3067 __ LoadAndSplat##type##LE(dst, operand, kScratchReg);
3068 case kS390_S128Load64Splat: {
3069 LOAD_SPLAT(64x2);
3070 break;
3071 }
3072 case kS390_S128Load32Splat: {
3073 LOAD_SPLAT(32x4);
3074 break;
3075 }
3076 case kS390_S128Load16Splat: {
3077 LOAD_SPLAT(16x8);
3078 break;
3079 }
3080 case kS390_S128Load8Splat: {
3081 LOAD_SPLAT(8x16);
3082 break;
3083 }
3084#undef LOAD_SPLAT
3085#define LOAD_EXTEND(type) \
3086 AddressingMode mode = kMode_None; \
3087 MemOperand operand = i.MemoryOperand(&mode); \
3088 Simd128Register dst = i.OutputSimd128Register(); \
3089 __ LoadAndExtend##type##LE(dst, operand, kScratchReg);
3090 case kS390_S128Load32x2U: {
3091 LOAD_EXTEND(32x2U);
3092 break;
3093 }
3094 case kS390_S128Load32x2S: {
3095 LOAD_EXTEND(32x2S);
3096 break;
3097 }
3098 case kS390_S128Load16x4U: {
3099 LOAD_EXTEND(16x4U);
3100 break;
3101 }
3102 case kS390_S128Load16x4S: {
3103 LOAD_EXTEND(16x4S);
3104 break;
3105 }
3106 case kS390_S128Load8x8U: {
3107 LOAD_EXTEND(8x8U);
3108 break;
3109 }
3110 case kS390_S128Load8x8S: {
3111 LOAD_EXTEND(8x8S);
3112 break;
3113 }
3114#undef LOAD_EXTEND
3115#define LOAD_AND_ZERO(type) \
3116 AddressingMode mode = kMode_None; \
3117 MemOperand operand = i.MemoryOperand(&mode); \
3118 Simd128Register dst = i.OutputSimd128Register(); \
3119 __ LoadV##type##ZeroLE(dst, operand, kScratchReg);
3120 case kS390_S128Load32Zero: {
3121 LOAD_AND_ZERO(32);
3122 break;
3123 }
3124 case kS390_S128Load64Zero: {
3125 LOAD_AND_ZERO(64);
3126 break;
3127 }
3128#undef LOAD_AND_ZERO
3129#undef LOAD_EXTEND
3130#define LOAD_LANE(type, lane) \
3131 AddressingMode mode = kMode_None; \
3132 size_t index = 2; \
3133 MemOperand operand = i.MemoryOperand(&mode, &index); \
3134 Simd128Register dst = i.OutputSimd128Register(); \
3135 DCHECK_EQ(dst, i.InputSimd128Register(0)); \
3136 __ LoadLane##type##LE(dst, operand, lane, kScratchReg);
3137 case kS390_S128Load8Lane: {
3138 LOAD_LANE(8, 15 - i.InputUint8(1));
3139 break;
3140 }
3141 case kS390_S128Load16Lane: {
3142 LOAD_LANE(16, 7 - i.InputUint8(1));
3143 break;
3144 }
3145 case kS390_S128Load32Lane: {
3146 LOAD_LANE(32, 3 - i.InputUint8(1));
3147 break;
3148 }
3149 case kS390_S128Load64Lane: {
3150 LOAD_LANE(64, 1 - i.InputUint8(1));
3151 break;
3152 }
3153#undef LOAD_LANE
3154#define STORE_LANE(type, lane) \
3155 AddressingMode mode = kMode_None; \
3156 size_t index = 2; \
3157 MemOperand operand = i.MemoryOperand(&mode, &index); \
3158 Simd128Register src = i.InputSimd128Register(0); \
3159 __ StoreLane##type##LE(src, operand, lane, kScratchReg);
3160 case kS390_S128Store8Lane: {
3161 STORE_LANE(8, 15 - i.InputUint8(1));
3162 break;
3163 }
3164 case kS390_S128Store16Lane: {
3165 STORE_LANE(16, 7 - i.InputUint8(1));
3166 break;
3167 }
3168 case kS390_S128Store32Lane: {
3169 STORE_LANE(32, 3 - i.InputUint8(1));
3170 break;
3171 }
3172 case kS390_S128Store64Lane: {
3173 STORE_LANE(64, 1 - i.InputUint8(1));
3174 break;
3175 }
3176#undef STORE_LANE
3177 case kS390_StoreCompressTagged: {
3178 CHECK(!instr->HasOutput());
3179 size_t index = 0;
3180 AddressingMode mode = kMode_None;
3181 MemOperand operand = i.MemoryOperand(&mode, &index);
3182 Register value = i.InputRegister(index);
3183 __ StoreTaggedField(value, operand, r1);
3184 break;
3185 }
3186 case kS390_LoadDecompressTaggedSigned: {
3187 CHECK(instr->HasOutput());
3188 __ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
3189 break;
3190 }
3191 case kS390_LoadDecompressTagged: {
3192 CHECK(instr->HasOutput());
3193 __ DecompressTagged(i.OutputRegister(), i.MemoryOperand());
3194 break;
3195 }
3196 default:
3197 UNREACHABLE();
3198 }
3199 return kSuccess;
3200}
3201
3202// Assembles branches after an instruction.
3203void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
3204 S390OperandConverter i(this, instr);
3205 Label* tlabel = branch->true_label;
3206 Label* flabel = branch->false_label;
3207 ArchOpcode op = instr->arch_opcode();
3208 FlagsCondition condition = branch->condition;
3209
3211 if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
3212 // check for unordered if necessary
3213 // Branching to flabel/tlabel according to what's expected by tests
3214 if (cond == le || cond == eq || cond == lt) {
3215 __ bunordered(flabel);
3216 } else if (cond == gt || cond == ne || cond == ge) {
3217 __ bunordered(tlabel);
3218 }
3219 }
3220 __ b(cond, tlabel);
3221 if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
3222}
3223
3225 BranchInfo* branch) {
3226 AssembleArchBranch(instr, branch);
3227}
3228
3230 RpoNumber target) {
3231 __ b(GetLabel(target));
3232}
3233
3234#if V8_ENABLE_WEBASSEMBLY
3235void CodeGenerator::AssembleArchTrap(Instruction* instr,
3237 class OutOfLineTrap final : public OutOfLineCode {
3238 public:
3239 OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
3240 : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
3241
3242 void Generate() final {
3243 S390OperandConverter i(gen_, instr_);
3244 TrapId trap_id =
3245 static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
3246 GenerateCallToTrap(trap_id);
3247 }
3248
3249 private:
3250 void GenerateCallToTrap(TrapId trap_id) {
3251 gen_->AssembleSourcePosition(instr_);
3252 // A direct call to a wasm runtime stub defined in this module.
3253 // Just encode the stub index. This will be patched when the code
3254 // is added to the native module and copied into wasm code space.
3255 __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
3256 ReferenceMap* reference_map =
3257 gen_->zone()->New<ReferenceMap>(gen_->zone());
3258 gen_->RecordSafepoint(reference_map);
3259 if (v8_flags.debug_code) {
3260 __ stop();
3261 }
3262 }
3263
3264 Instruction* instr_;
3265 CodeGenerator* gen_;
3266 };
3267 auto ool = zone()->New<OutOfLineTrap>(this, instr);
3268 Label* tlabel = ool->entry();
3269 Label end;
3270
3271 ArchOpcode op = instr->arch_opcode();
3273 if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
3274 // check for unordered if necessary
3275 if (cond == le || cond == eq || cond == lt) {
3276 __ bunordered(&end);
3277 } else if (cond == gt || cond == ne || cond == ge) {
3278 __ bunordered(tlabel);
3279 }
3280 }
3281 __ b(cond, tlabel);
3282 __ bind(&end);
3283}
3284#endif // V8_ENABLE_WEBASSEMBLY
3285
3286// Assembles boolean materializations after an instruction.
3289 S390OperandConverter i(this, instr);
3290 ArchOpcode op = instr->arch_opcode();
3291 bool check_unordered = (op == kS390_CmpDouble || op == kS390_CmpFloat);
3292
3293 // Overflow checked for add/sub only.
3295 (op == kS390_Add32 || op == kS390_Add64 || op == kS390_Sub32 ||
3296 op == kS390_Sub64 || op == kS390_Mul32 ||
3297 op == kS390_Mul64WithOverflow));
3298
3299 // Materialize a full 32-bit 1 or 0 value. The result register is always the
3300 // last output of the instruction.
3301 DCHECK_NE(0u, instr->OutputCount());
3302 Register reg = i.OutputRegister(instr->OutputCount() - 1);
3304 Label done;
3305 if (check_unordered) {
3306 __ mov(reg, (cond == eq || cond == le || cond == lt) ? Operand::Zero()
3307 : Operand(1));
3308 __ bunordered(&done);
3309 }
3310
3311 // TODO(john.yan): use load imm high on condition here
3312 __ mov(reg, Operand::Zero());
3313 __ mov(kScratchReg, Operand(1));
3314 // locr is sufficient since reg's upper 32 is guarrantee to be 0
3315 __ locr(cond, reg, kScratchReg);
3316 __ bind(&done);
3317}
3318
3320 UNREACHABLE();
3321}
3322
3324 BranchInfo* branch) {
3325 UNREACHABLE();
3326}
3327
3329 S390OperandConverter i(this, instr);
3330 Register input = i.InputRegister(0);
3331 std::vector<std::pair<int32_t, Label*>> cases;
3332 for (size_t index = 2; index < instr->InputCount(); index += 2) {
3333 cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
3334 }
3335 AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
3336 cases.data() + cases.size());
3337}
3338
3340 S390OperandConverter i(this, instr);
3341 Register input = i.InputRegister(0);
3342 int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
3343 base::Vector<Label*> cases = zone()->AllocateVector<Label*>(case_count);
3344 for (int32_t index = 0; index < case_count; ++index) {
3345 cases[index] = GetLabel(i.InputRpo(index + 2));
3346 }
3347 Label* const table = AddJumpTable(cases);
3348 __ CmpU64(input, Operand(case_count));
3349 __ bge(GetLabel(i.InputRpo(1)));
3350 __ larl(kScratchReg, table);
3351 __ ShiftLeftU64(r1, input, Operand(kSystemPointerSizeLog2));
3352 __ LoadU64(kScratchReg, MemOperand(kScratchReg, r1));
3353 __ Jump(kScratchReg);
3354}
3355
3358 UNIMPLEMENTED();
3359}
3360
3361void CodeGenerator::FinishFrame(Frame* frame) {
3362 auto call_descriptor = linkage()->GetIncomingDescriptor();
3363 const DoubleRegList double_saves = call_descriptor->CalleeSavedFPRegisters();
3364
3365 // Save callee-saved Double registers.
3366 if (!double_saves.is_empty()) {
3367 frame->AlignSavedCalleeRegisterSlots();
3368 DCHECK_EQ(kNumCalleeSavedDoubles, double_saves.Count());
3369 frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
3371 }
3372 // Save callee-saved registers.
3373 const RegList saves = call_descriptor->CalleeSavedRegisters();
3374 if (!saves.is_empty()) {
3375 // register save area does not include the fp or constant pool pointer.
3376 const int num_saves = kNumCalleeSaved - 1;
3377 frame->AllocateSavedCalleeRegisterSlots(num_saves);
3378 }
3379}
3380
3382 auto call_descriptor = linkage()->GetIncomingDescriptor();
3383
3384 if (frame_access_state()->has_frame()) {
3385 if (call_descriptor->IsCFunctionCall()) {
3386#if V8_ENABLE_WEBASSEMBLY
3387 if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
3388 __ StubPrologue(StackFrame::C_WASM_ENTRY);
3389 // Reserve stack space for saving the c_entry_fp later.
3390 __ lay(sp, MemOperand(sp, -kSystemPointerSize));
3391#else
3392 // For balance.
3393 if (false) {
3394#endif // V8_ENABLE_WEBASSEMBLY
3395 } else {
3396 __ Push(r14, fp);
3397 __ mov(fp, sp);
3398 }
3399 } else if (call_descriptor->IsJSFunctionCall()) {
3400 __ Prologue(ip);
3401 } else {
3403 // TODO(mbrandy): Detect cases where ip is the entrypoint (for
3404 // efficient initialization of the constant pool pointer register).
3405 __ StubPrologue(type);
3406#if V8_ENABLE_WEBASSEMBLY
3407 if (call_descriptor->IsAnyWasmFunctionCall() ||
3408 call_descriptor->IsWasmImportWrapper() ||
3409 call_descriptor->IsWasmCapiFunction()) {
3410 // For import wrappers and C-API functions, this stack slot is only used
3411 // for printing stack traces in V8. Also, it holds a WasmImportData
3412 // instead of the trusted instance data, which is taken care of in the
3413 // frames accessors.
3415 }
3416 if (call_descriptor->IsWasmCapiFunction()) {
3417 // Reserve space for saving the PC later.
3418 __ lay(sp, MemOperand(sp, -kSystemPointerSize));
3419 }
3420#endif // V8_ENABLE_WEBASSEMBLY
3421 }
3423 }
3424
3425 int required_slots =
3426 frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
3427 if (info()->is_osr()) {
3428 // TurboFan OSR-compiled functions cannot be entered directly.
3429 __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
3430
3431 // Unoptimized code jumps directly to this entrypoint while the unoptimized
3432 // frame is still on the stack. Optimized code uses OSR values directly from
3433 // the unoptimized frame. Thus, all that needs to be done is to allocate the
3434 // remaining stack slots.
3435 __ RecordComment("-- OSR entrypoint --");
3437 required_slots -= osr_helper()->UnoptimizedFrameSlots();
3438 }
3439
3440 const DoubleRegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
3441 const RegList saves = call_descriptor->CalleeSavedRegisters();
3442
3443 if (required_slots > 0) {
3444#if V8_ENABLE_WEBASSEMBLY
3445 if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
3446 // For WebAssembly functions with big frames we have to do the stack
3447 // overflow check before we construct the frame. Otherwise we may not
3448 // have enough space on the stack to call the runtime for the stack
3449 // overflow.
3450 Label done;
3451
3452 // If the frame is bigger than the stack, we throw the stack overflow
3453 // exception unconditionally. Thereby we can avoid the integer overflow
3454 // check in the condition code.
3455 if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
3456 Register stack_limit = r1;
3457 __ LoadStackLimit(stack_limit, StackLimitKind::kRealStackLimit);
3458 __ AddS64(stack_limit, stack_limit,
3459 Operand(required_slots * kSystemPointerSize));
3460 __ CmpU64(sp, stack_limit);
3461 __ bge(&done);
3462 }
3463
3464 if (v8_flags.experimental_wasm_growable_stacks) {
3467 regs_to_save.set(
3468 WasmHandleStackOverflowDescriptor::FrameBaseRegister());
3469 for (auto reg : wasm::kGpParamRegisters) regs_to_save.set(reg);
3470 __ MultiPush(regs_to_save);
3471 DoubleRegList fp_regs_to_save;
3472 for (auto reg : wasm::kFpParamRegisters) fp_regs_to_save.set(reg);
3473 __ MultiPushF64OrV128(fp_regs_to_save, r1);
3475 Operand(required_slots * kSystemPointerSize));
3476 __ AddS64(
3477 WasmHandleStackOverflowDescriptor::FrameBaseRegister(), fp,
3478 Operand(call_descriptor->ParameterSlotCount() * kSystemPointerSize +
3480 __ CallBuiltin(Builtin::kWasmHandleStackOverflow);
3481 __ MultiPopF64OrV128(fp_regs_to_save, r1);
3482 __ MultiPop(regs_to_save);
3483 } else {
3484 __ Call(static_cast<intptr_t>(Builtin::kWasmStackOverflow),
3486 // The call does not return, hence we can ignore any references and just
3487 // define an empty safepoint.
3488 ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
3489 RecordSafepoint(reference_map);
3490 if (v8_flags.debug_code) __ stop();
3491 }
3492
3493 __ bind(&done);
3494 }
3495#endif // V8_ENABLE_WEBASSEMBLY
3496
3497 // Skip callee-saved and return slots, which are pushed below.
3498 required_slots -= saves.Count();
3499 required_slots -= frame()->GetReturnSlotCount();
3500 required_slots -= (kDoubleSize / kSystemPointerSize) * saves_fp.Count();
3501 __ lay(sp, MemOperand(sp, -required_slots * kSystemPointerSize));
3502 }
3503
3504 // Save callee-saved Double registers.
3505 if (!saves_fp.is_empty()) {
3506 __ MultiPushDoubles(saves_fp);
3507 DCHECK_EQ(kNumCalleeSavedDoubles, saves_fp.Count());
3508 }
3509
3510 // Save callee-saved registers.
3511 if (!saves.is_empty()) {
3512 __ MultiPush(saves);
3513 // register save area does not include the fp or constant pool pointer.
3514 }
3515
3516 const int returns = frame()->GetReturnSlotCount();
3517 // Create space for returns.
3518 __ AllocateStackSpace(returns * kSystemPointerSize);
3519
3520 if (!frame()->tagged_slots().IsEmpty()) {
3521 __ mov(kScratchReg, Operand(0));
3522 for (int spill_slot : frame()->tagged_slots()) {
3523 FrameOffset offset = frame_access_state()->GetFrameOffset(spill_slot);
3524 DCHECK(offset.from_frame_pointer());
3525 __ StoreU64(kScratchReg, MemOperand(fp, offset.offset()));
3526 }
3527 }
3528}
3529
3530void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
3531 auto call_descriptor = linkage()->GetIncomingDescriptor();
3532
3533 const int returns = frame()->GetReturnSlotCount();
3534 if (returns != 0) {
3535 // Create space for returns.
3536 __ lay(sp, MemOperand(sp, returns * kSystemPointerSize));
3537 }
3538
3539 // Restore registers.
3540 const RegList saves = call_descriptor->CalleeSavedRegisters();
3541 if (!saves.is_empty()) {
3542 __ MultiPop(saves);
3543 }
3544
3545 // Restore double registers.
3546 const DoubleRegList double_saves = call_descriptor->CalleeSavedFPRegisters();
3547 if (!double_saves.is_empty()) {
3548 __ MultiPopDoubles(double_saves);
3549 }
3550
3552
3553 S390OperandConverter g(this, nullptr);
3554 const int parameter_slots =
3555 static_cast<int>(call_descriptor->ParameterSlotCount());
3556
3557 // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
3558 // Check RawMachineAssembler::PopAndReturn.
3559 if (parameter_slots != 0) {
3560 if (additional_pop_count->IsImmediate()) {
3561 DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
3562 } else if (v8_flags.debug_code) {
3563 __ CmpS64(g.ToRegister(additional_pop_count), Operand(0));
3564 __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue);
3565 }
3566 }
3567
3568#if V8_ENABLE_WEBASSEMBLY
3569 if (call_descriptor->IsAnyWasmFunctionCall() &&
3570 v8_flags.experimental_wasm_growable_stacks) {
3571 {
3572 UseScratchRegisterScope temps{masm()};
3573 Register scratch = temps.Acquire();
3574 __ LoadU64(scratch,
3576 __ CmpU64(
3577 scratch,
3578 Operand(StackFrame::TypeToMarker(StackFrame::WASM_SEGMENT_START)));
3579 }
3580 Label done;
3581 __ bne(&done);
3584 __ MultiPush(regs_to_save);
3585 DoubleRegList fp_regs_to_save;
3586 for (auto reg : wasm::kFpParamRegisters) fp_regs_to_save.set(reg);
3587 __ MultiPushF64OrV128(fp_regs_to_save, r1);
3589 __ PrepareCallCFunction(1, r0);
3590 __ CallCFunction(ExternalReference::wasm_shrink_stack(), 1);
3591 // Restore old FP. We don't need to restore old SP explicitly, because
3592 // it will be restored from FP in LeaveFrame before return.
3593 __ mov(fp, kReturnRegister0);
3594 __ MultiPopF64OrV128(fp_regs_to_save, r1);
3595 __ MultiPop(regs_to_save);
3596 __ bind(&done);
3597 }
3598#endif // V8_ENABLE_WEBASSEMBLY
3599
3600 Register argc_reg = r5;
3601 // Functions with JS linkage have at least one parameter (the receiver).
3602 // If {parameter_slots} == 0, it means it is a builtin with
3603 // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
3604 // itself.
3605 const bool drop_jsargs = parameter_slots != 0 &&
3607 call_descriptor->IsJSFunctionCall();
3608
3609 if (call_descriptor->IsCFunctionCall()) {
3611 } else if (frame_access_state()->has_frame()) {
3612 // Canonicalize JSFunction return sites for now unless they have an variable
3613 // number of stack slot pops
3614 if (additional_pop_count->IsImmediate() &&
3615 g.ToConstant(additional_pop_count).ToInt32() == 0) {
3616 if (return_label_.is_bound()) {
3617 __ b(&return_label_);
3618 return;
3619 } else {
3620 __ bind(&return_label_);
3621 }
3622 }
3623 if (drop_jsargs) {
3624 // Get the actual argument count.
3625 DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
3626 __ LoadU64(argc_reg, MemOperand(fp, StandardFrameConstants::kArgCOffset));
3627 }
3629 }
3630
3631 if (drop_jsargs) {
3632 // We must pop all arguments from the stack (including the receiver).
3633 // The number of arguments without the receiver is
3634 // max(argc_reg, parameter_slots-1), and the receiver is added in
3635 // DropArguments().
3636 DCHECK(!call_descriptor->CalleeSavedRegisters().has(argc_reg));
3637 if (parameter_slots > 1) {
3638 Label skip;
3639 __ CmpS64(argc_reg, Operand(parameter_slots));
3640 __ bgt(&skip);
3641 __ mov(argc_reg, Operand(parameter_slots));
3642 __ bind(&skip);
3643 }
3644 __ DropArguments(argc_reg);
3645 } else if (additional_pop_count->IsImmediate()) {
3646 int additional_count = g.ToConstant(additional_pop_count).ToInt32();
3647 __ Drop(parameter_slots + additional_count);
3648 } else if (parameter_slots == 0) {
3649 __ Drop(g.ToRegister(additional_pop_count));
3650 } else {
3651 // {additional_pop_count} is guaranteed to be zero if {parameter_slots !=
3652 // 0}. Check RawMachineAssembler::PopAndReturn.
3653 __ Drop(parameter_slots);
3654 }
3655 __ Ret();
3656}
3657
3659
3661 ZoneDeque<DeoptimizationExit*>* exits) {}
3662
3663AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
3664 auto rep = LocationOperand::cast(source)->representation();
3665 int new_slots = ElementSizeInPointers(rep);
3666 S390OperandConverter g(this, nullptr);
3667 int last_frame_slot_id =
3668 frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
3669 int sp_delta = frame_access_state_->sp_delta();
3670 int slot_id = last_frame_slot_id + sp_delta + new_slots;
3671 AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
3672 if (source->IsFloatStackSlot() || source->IsDoubleStackSlot()) {
3673 __ LoadU64(r1, g.ToMemOperand(source));
3674 __ Push(r1);
3675 frame_access_state()->IncreaseSPDelta(new_slots);
3676 } else {
3677 // Bump the stack pointer and assemble the move.
3678 __ lay(sp, MemOperand(sp, -(new_slots * kSystemPointerSize)));
3679 frame_access_state()->IncreaseSPDelta(new_slots);
3680 AssembleMove(source, &stack_slot);
3681 }
3682 temp_slots_ += new_slots;
3683 return stack_slot;
3684}
3685
3686void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
3687 int dropped_slots = ElementSizeInPointers(rep);
3688 S390OperandConverter g(this, nullptr);
3689 if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) {
3690 frame_access_state()->IncreaseSPDelta(-dropped_slots);
3691 __ Pop(r1);
3692 __ StoreU64(r1, g.ToMemOperand(dest));
3693 } else {
3694 int last_frame_slot_id =
3695 frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
3696 int sp_delta = frame_access_state_->sp_delta();
3697 int slot_id = last_frame_slot_id + sp_delta;
3698 AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
3699 AssembleMove(&stack_slot, dest);
3700 frame_access_state()->IncreaseSPDelta(-dropped_slots);
3701 __ lay(sp, MemOperand(sp, dropped_slots * kSystemPointerSize));
3702 }
3703 temp_slots_ -= dropped_slots;
3704}
3705
3707 if (temp_slots_ > 0) {
3710 temp_slots_ = 0;
3711 }
3712}
3713
3714void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
3716 // Must be kept in sync with {MoveTempLocationTo}.
3717 if (!IsFloatingPoint(rep) ||
3718 ((IsFloatingPoint(rep) &&
3720 // The scratch register for this rep is available.
3721 int scratch_reg_code =
3723 AllocatedOperand scratch(LocationOperand::REGISTER, rep, scratch_reg_code);
3724 DCHECK(!AreAliased(kScratchReg, r0, r1));
3725 AssembleMove(source, &scratch);
3726 } else {
3727 // The scratch register is blocked by pending moves. Use the stack instead.
3728 Push(source);
3729 }
3730}
3731
3732void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
3734 if (!IsFloatingPoint(rep) ||
3735 ((IsFloatingPoint(rep) &&
3737 int scratch_reg_code =
3739 AllocatedOperand scratch(LocationOperand::REGISTER, rep, scratch_reg_code);
3740 DCHECK(!AreAliased(kScratchReg, r0, r1));
3741 AssembleMove(&scratch, dest);
3742 } else {
3743 Pop(dest, rep);
3744 }
3745 move_cycle_ = MoveCycleState();
3746}
3747
3748void CodeGenerator::SetPendingMove(MoveOperands* move) {
3749 if ((move->source().IsConstant() || move->source().IsFPStackSlot()) &&
3750 !move->destination().IsFPRegister()) {
3752 }
3753}
3754
3755void CodeGenerator::AssembleMove(InstructionOperand* source,
3756 InstructionOperand* destination) {
3757 S390OperandConverter g(this, nullptr);
3758 // Dispatch on the source and destination operand kinds. Not all
3759 // combinations are possible.
3760 // If a move type needs the scratch register, this also needs to be recorded
3761 // in {SetPendingMove} to avoid conflicts with the gap resolver.
3762 if (source->IsRegister()) {
3763 DCHECK(destination->IsRegister() || destination->IsStackSlot());
3764 Register src = g.ToRegister(source);
3765 if (destination->IsRegister()) {
3766 __ Move(g.ToRegister(destination), src);
3767 } else {
3768 __ StoreU64(src, g.ToMemOperand(destination));
3769 }
3770 } else if (source->IsStackSlot()) {
3771 DCHECK(destination->IsRegister() || destination->IsStackSlot());
3772 MemOperand src = g.ToMemOperand(source);
3773 if (destination->IsRegister()) {
3774 __ LoadU64(g.ToRegister(destination), src);
3775 } else {
3776 Register temp = r1;
3777 __ LoadU64(temp, src, r0);
3778 __ StoreU64(temp, g.ToMemOperand(destination));
3779 }
3780 } else if (source->IsConstant()) {
3781 Constant src = g.ToConstant(source);
3782 if (destination->IsRegister() || destination->IsStackSlot()) {
3783 Register dst = destination->IsRegister() ? g.ToRegister(destination) : r1;
3784 switch (src.type()) {
3785 case Constant::kInt32:
3786 __ mov(dst, Operand(src.ToInt32(), src.rmode()));
3787 break;
3788 case Constant::kInt64:
3789 __ mov(dst, Operand(src.ToInt64(), src.rmode()));
3790 break;
3791 case Constant::kFloat32:
3792 __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
3793 break;
3794 case Constant::kFloat64:
3795 __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
3796 break;
3798 __ Move(dst, src.ToExternalReference());
3799 break;
3800 case Constant::kHeapObject: {
3801 Handle<HeapObject> src_object = src.ToHeapObject();
3803 if (IsMaterializableFromRoot(src_object, &index)) {
3804 __ LoadRoot(dst, index);
3805 } else {
3806 __ Move(dst, src_object);
3807 }
3808 break;
3809 }
3811 Handle<HeapObject> src_object = src.ToHeapObject();
3813 if (IsMaterializableFromRoot(src_object, &index)) {
3814 __ LoadTaggedRoot(dst, index);
3815 } else {
3816 __ Move(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
3817 }
3818 break;
3819 }
3821 UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390.
3822 }
3823 if (destination->IsStackSlot()) {
3824 __ StoreU64(dst, g.ToMemOperand(destination), r0);
3825 }
3826 } else {
3827 DoubleRegister dst = destination->IsFPRegister()
3828 ? g.ToDoubleRegister(destination)
3830 double value = (src.type() == Constant::kFloat32)
3831 ? src.ToFloat32()
3832 : src.ToFloat64().value();
3833 if (src.type() == Constant::kFloat32) {
3834 __ LoadF32<float>(dst, src.ToFloat32(), r1);
3835 } else {
3836 __ LoadF64<double>(dst, value, r1);
3837 }
3838
3839 if (destination->IsFloatStackSlot()) {
3840 __ StoreF32(dst, g.ToMemOperand(destination));
3841 } else if (destination->IsDoubleStackSlot()) {
3842 __ StoreF64(dst, g.ToMemOperand(destination));
3843 }
3844 }
3845 } else if (source->IsFPRegister()) {
3848 if (destination->IsSimd128Register()) {
3849 __ vlr(g.ToSimd128Register(destination), g.ToSimd128Register(source),
3850 Condition(0), Condition(0), Condition(0));
3851 } else {
3852 DCHECK(destination->IsSimd128StackSlot());
3853 __ StoreV128(g.ToSimd128Register(source), g.ToMemOperand(destination),
3854 r1);
3855 }
3856 } else {
3857 DoubleRegister src = g.ToDoubleRegister(source);
3858 if (destination->IsFPRegister()) {
3859 DoubleRegister dst = g.ToDoubleRegister(destination);
3860 __ Move(dst, src);
3861 } else {
3862 DCHECK(destination->IsFPStackSlot());
3863 LocationOperand* op = LocationOperand::cast(source);
3864 if (op->representation() == MachineRepresentation::kFloat64) {
3865 __ StoreF64(src, g.ToMemOperand(destination));
3866 } else {
3867 __ StoreF32(src, g.ToMemOperand(destination));
3868 }
3869 }
3870 }
3871 } else if (source->IsFPStackSlot()) {
3872 DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
3873 MemOperand src = g.ToMemOperand(source);
3874 if (destination->IsFPRegister()) {
3875 LocationOperand* op = LocationOperand::cast(source);
3876 if (op->representation() == MachineRepresentation::kFloat64) {
3877 __ LoadF64(g.ToDoubleRegister(destination), src);
3878 } else if (op->representation() == MachineRepresentation::kFloat32) {
3879 __ LoadF32(g.ToDoubleRegister(destination), src);
3880 } else {
3881 DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
3882 __ LoadV128(g.ToSimd128Register(destination), g.ToMemOperand(source),
3883 r1);
3884 }
3885 } else {
3886 LocationOperand* op = LocationOperand::cast(source);
3888 if (op->representation() == MachineRepresentation::kFloat64) {
3889 __ LoadF64(temp, src);
3890 __ StoreF64(temp, g.ToMemOperand(destination));
3891 } else if (op->representation() == MachineRepresentation::kFloat32) {
3892 __ LoadF32(temp, src);
3893 __ StoreF32(temp, g.ToMemOperand(destination));
3894 } else {
3895 DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
3896 __ LoadV128(kScratchDoubleReg, g.ToMemOperand(source), r1);
3897 __ StoreV128(kScratchDoubleReg, g.ToMemOperand(destination), r1);
3898 }
3899 }
3900 } else {
3901 UNREACHABLE();
3902 }
3903}
3904
3905// Swaping contents in source and destination.
3906// source and destination could be:
3907// Register,
3908// FloatRegister,
3909// DoubleRegister,
3910// StackSlot,
3911// FloatStackSlot,
3912// or DoubleStackSlot
3913void CodeGenerator::AssembleSwap(InstructionOperand* source,
3914 InstructionOperand* destination) {
3915 S390OperandConverter g(this, nullptr);
3916 if (source->IsRegister()) {
3917 Register src = g.ToRegister(source);
3918 if (destination->IsRegister()) {
3919 __ SwapP(src, g.ToRegister(destination), kScratchReg);
3920 } else {
3921 DCHECK(destination->IsStackSlot());
3922 __ SwapP(src, g.ToMemOperand(destination), kScratchReg);
3923 }
3924 } else if (source->IsStackSlot()) {
3925 DCHECK(destination->IsStackSlot());
3926 __ SwapP(g.ToMemOperand(source), g.ToMemOperand(destination), kScratchReg,
3927 r0);
3928 } else if (source->IsFloatRegister()) {
3929 DoubleRegister src = g.ToDoubleRegister(source);
3930 if (destination->IsFloatRegister()) {
3931 __ SwapFloat32(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
3932 } else {
3933 DCHECK(destination->IsFloatStackSlot());
3934 __ SwapFloat32(src, g.ToMemOperand(destination), kScratchDoubleReg);
3935 }
3936 } else if (source->IsDoubleRegister()) {
3937 DoubleRegister src = g.ToDoubleRegister(source);
3938 if (destination->IsDoubleRegister()) {
3939 __ SwapDouble(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
3940 } else {
3941 DCHECK(destination->IsDoubleStackSlot());
3942 __ SwapDouble(src, g.ToMemOperand(destination), kScratchDoubleReg);
3943 }
3944 } else if (source->IsFloatStackSlot()) {
3945 DCHECK(destination->IsFloatStackSlot());
3946 __ SwapFloat32(g.ToMemOperand(source), g.ToMemOperand(destination),
3948 } else if (source->IsDoubleStackSlot()) {
3949 DCHECK(destination->IsDoubleStackSlot());
3950 __ SwapDouble(g.ToMemOperand(source), g.ToMemOperand(destination),
3952 } else if (source->IsSimd128Register()) {
3953 Simd128Register src = g.ToSimd128Register(source);
3954 if (destination->IsSimd128Register()) {
3955 __ SwapSimd128(src, g.ToSimd128Register(destination), kScratchDoubleReg);
3956 } else {
3957 DCHECK(destination->IsSimd128StackSlot());
3958 __ SwapSimd128(src, g.ToMemOperand(destination), kScratchDoubleReg);
3959 }
3960 } else if (source->IsSimd128StackSlot()) {
3961 DCHECK(destination->IsSimd128StackSlot());
3962 __ SwapSimd128(g.ToMemOperand(source), g.ToMemOperand(destination),
3964 } else {
3965 UNREACHABLE();
3966 }
3967}
3968
3969void CodeGenerator::AssembleJumpTable(base::Vector<Label*> targets) {
3970 for (auto target : targets) {
3971 __ emit_label_addr(target);
3972 }
3973}
3974
3975#undef __
3976
3977} // namespace compiler
3978} // namespace internal
3979} // namespace v8
friend Zone
Definition asm-types.cc:195
#define Assert(condition)
static constexpr T decode(U value)
Definition bit-field.h:66
static constexpr int kFixedSlotCountAboveFp
static constexpr int kFixedFrameSizeAboveFp
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static Operand EmbeddedNumber(double number)
static V8_INLINE Operand Zero()
constexpr void set(RegisterT reg)
constexpr int8_t code() const
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static constexpr int kFrameTypeOffset
T * New(Args &&... args)
Definition zone.h:114
base::Vector< T > AllocateVector(size_t length)
Definition zone.h:136
void MoveToTempLocation(InstructionOperand *src, MachineRepresentation rep) final
void AssembleTailCallAfterGap(Instruction *instr, int first_unused_stack_slot)
void AssembleReturn(InstructionOperand *pop)
void AssembleTailCallBeforeGap(Instruction *instr, int first_unused_stack_slot)
FrameAccessState * frame_access_state() const
CodeGenResult AssembleArchInstruction(Instruction *instr)
DeoptimizationExit * BuildTranslation(Instruction *instr, int pc_offset, size_t frame_state_offset, size_t immediate_args_count, OutputFrameStateCombine state_combine)
void AssembleArchBinarySearchSwitch(Instruction *instr)
void AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target)
static void GetPushCompatibleMoves(Instruction *instr, PushTypeFlags push_type, ZoneVector< MoveOperands * > *pushes)
void AssembleArchBoolean(Instruction *instr, FlagsCondition condition)
void AssembleJumpTable(base::Vector< Label * > targets)
void AssembleArchBranch(Instruction *instr, BranchInfo *branch)
void AssembleMove(InstructionOperand *source, InstructionOperand *destination) final
void SetPendingMove(MoveOperands *move) final
bool ShouldApplyOffsetToStackCheck(Instruction *instr, uint32_t *offset)
void RecordSafepoint(ReferenceMap *references, int pc_offset=0)
void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block, std::pair< int32_t, Label * > *begin, std::pair< int32_t, Label * > *end)
void PrepareForDeoptimizationExits(ZoneDeque< DeoptimizationExit * > *exits)
void AssembleArchTableSwitch(Instruction *instr)
bool IsMaterializableFromRoot(Handle< HeapObject > object, RootIndex *index_return)
void AssembleArchConditionalBranch(Instruction *instr, BranchInfo *branch)
AllocatedOperand Push(InstructionOperand *src) final
void MoveTempLocationTo(InstructionOperand *dst, MachineRepresentation rep) final
void AssembleArchDeoptBranch(Instruction *instr, BranchInfo *branch)
void RecordCallPosition(Instruction *instr)
void AssembleSwap(InstructionOperand *source, InstructionOperand *destination) final
Label * AddJumpTable(base::Vector< Label * > targets)
void AssembleArchConditionalBoolean(Instruction *instr)
void RecordDeoptInfo(Instruction *instr, int pc_offset)
OptimizedCompilationInfo * info() const
void AssembleArchSelect(Instruction *instr, FlagsCondition condition)
void Pop(InstructionOperand *src, MachineRepresentation rep) final
FrameOffset GetFrameOffset(int spill_slot) const
Definition frame.cc:61
Constant ToConstant(InstructionOperand *op) const
const InstructionOperand * OutputAt(size_t i) const
InstructionCode opcode() const
const InstructionOperand * InputAt(size_t i) const
FlagsCondition flags_condition() const
CallDescriptor * GetIncomingDescriptor() const
Definition linkage.h:405
MachineRepresentation representation() const
static LocationOperand * cast(InstructionOperand *op)
static OutputFrameStateCombine Ignore()
MemOperand MemoryOperand(AddressingMode *mode, size_t *first_index)
S390OperandConverter(CodeGenerator *gen, Instruction *instr)
MemOperand ToMemOperand(InstructionOperand *op) const
MemOperand MemoryOperand(AddressingMode *mode=nullptr, size_t first_index=0)
UnwindingInfoWriter *const unwinding_info_writer_
#define ATOMIC_BINOP_CASE(op, inst)
bool must_save_lr_
Zone * zone_
#define ASSEMBLE_IEEE754_UNOP(name)
Register const object_
#define ASSEMBLE_IEEE754_BINOP(name)
Register const value_
RecordWriteMode const mode_
Operand const operand_
Register const scratch1_
Register const scratch0_
#define ASSEMBLE_COMPARE(asm_instr)
#define EMIT_SIMD_UNOP(name)
#define EMIT_SIMD_QFM(name)
#define ASSEMBLE_FLOAT_COMPARE(cmp_instr)
#define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrp, asm_instrx)
#define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrp, asm_instrx, must_be_aligned)
#define ASSEMBLE_FLOAT_MODULO()
#define EMIT_SIMD_ALL_TRUE(name)
#define EMIT_SIMD_BINOP(name)
#define EMIT_SIMD_EXT_ADD_PAIRWISE(name)
#define EMIT_SIMD_SHIFT(name)
#define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrp, asm_instrx, must_be_aligned)
#define ASSEMBLE_ATOMIC_BINOP_WORD(load_and_op, op)
#define RM32Instr(instr)
#define DDInstr(instr)
#define LOAD_EXTEND(type)
#define SIMD_EXTRACT_LANE_LIST(V)
#define EMIT_SIMD_REPLACE_LANE(name, stype)
#define EMIT_SIMD_EXT_MUL(name)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD()
#define R_MInstr(instr)
#define D_DInstr(instr)
#define ASSEMBLE_BIN_OP(_rr, _rm, _ri)
#define ASSEMBLE_STORE_DOUBLE()
#define ASSEMBLE_BIN32_OP(_rr, _rm, _ri)
#define RM64Instr(instr)
#define STORE_LANE(type, lane)
#define ASSEMBLE_ATOMIC_BINOP_WORD64(load_and_op, op)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(load_and_ext)
#define R_DInstr(instr)
#define LOAD_AND_ZERO(type)
#define ASSEMBLE_UNARY_OP(_r, _m, _i)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(load_and_ext)
#define RRInstr(instr)
#define ASSEMBLE_STORE_FLOAT32()
#define RIInstr(instr)
#define EMIT_SIMD_ADD_SUB_SAT(name)
#define LOAD_SPLAT(type)
#define RRM32Instr(instr)
#define ASSEMBLE_COMPARE32(cmp_instr, cmpl_instr)
#define LOAD_LANE(type, lane)
#define RRM64Instr(instr)
#define SIMD_REPLACE_LANE_LIST(V)
#define RRIInstr(instr)
#define DMTInstr(instr)
#define CHECK_AND_ZERO_EXT_OUTPUT(num)
#define D_MInstr(instr)
#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64()
#define ASSEMBLE_LOADANDTEST32(asm_instr_rr, asm_instr_rm)
#define EMIT_SIMD_EXTRACT_LANE(name, dtype)
#define RRRInstr(instr)
#define ASSEMBLE_LOADANDTEST64(asm_instr_rr, asm_instr_rm)
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
Definition globals.h:161
int end
int32_t offset
Instruction * instr
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int pc_offset
LiftoffRegList regs_to_save
uint32_t const mask
#define SIMD_UNOP_LIST(V)
#define SIMD_ALL_TRUE_LIST(V)
#define SIMD_BINOP_LIST(V)
#define SIMD_QFM_LIST(V)
#define SIMD_SHIFT_LIST(V)
#define SIMD_EXT_ADD_PAIRWISE_LIST(V)
#define SIMD_EXT_MUL_LIST(V)
#define SIMD_ADD_SUB_SAT_LIST(V)
SetIsolateDataSlots
InstructionOperand source
InstructionOperand destination
int m
Definition mul-fft.cc:294
int r
Definition mul-fft.cc:298
v8::SourceLocation SourceLocation
int int32_t
Definition unicode.cc:40
uintptr_t Address
Definition memory.h:13
static bool HasImmediateInput(Instruction *instr, size_t index)
static bool HasRegisterInput(Instruction *instr, size_t index)
static bool HasFPStackSlotInput(Instruction *instr, size_t index)
static Condition FlagsConditionToCondition(FlagsCondition condition)
static bool is_wasm_on_be(OptimizedCompilationInfo *info)
static bool HasRegisterOutput(Instruction *instr, int index=0)
static bool HasFPRegisterInput(Instruction *instr, int index)
static bool HasStackSlotInput(Instruction *instr, size_t index)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
constexpr Register kGpReturnRegisters[]
constexpr Register kRootRegister
constexpr VFPRoundingMode kRoundToNearest
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
constexpr int kSimd128Size
Definition globals.h:706
V8_EXPORT_PRIVATE constexpr int ElementSizeInPointers(MachineRepresentation rep)
DwVfpRegister DoubleRegister
constexpr DoubleRegister kScratchDoubleReg
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
constexpr Register kScratchReg
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
QwNeonRegister Simd128Register
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr bool IsFloatingPoint(MachineRepresentation rep)
constexpr Register kReturnRegister0
constexpr Register kWasmImplicitArgRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const int kNumCalleeSavedDoubles
Definition reglist-ppc.h:59
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int kHasFunctionDescriptorBitMask
constexpr Register kJavaScriptCallCodeStartRegister
constexpr VFPRoundingMode kRoundToZero
return value
Definition map-inl.h:893
constexpr Register cp
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
Definition globals.h:407
const int kNumCalleeSaved
Definition reglist-arm.h:48
const uint32_t kClearedWeakHeapObjectLower32
Definition globals.h:981
static int FrameSlotToFPOffset(int slot)
BodyGen *const gen_
BodyGen * gen
ro::BitSet tagged_slots
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
uint64_t make_uint64(uint32_t high, uint32_t low)
Definition macros.h:365