v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instruction-scheduler-mips64.cc
Go to the documentation of this file.
1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
7
8namespace v8 {
9namespace internal {
10namespace compiler {
11
12bool InstructionScheduler::SchedulerSupported() { return true; }
13
15 const Instruction* instr) const {
16 switch (instr->arch_opcode()) {
17 case kMips64AbsD:
18 case kMips64AbsS:
19 case kMips64Add:
20 case kMips64AddD:
21 case kMips64AddS:
22 case kMips64And:
23 case kMips64And32:
24 case kMips64AssertEqual:
25 case kMips64BitcastDL:
26 case kMips64BitcastLD:
27 case kMips64ByteSwap32:
28 case kMips64ByteSwap64:
29 case kMips64CeilWD:
30 case kMips64CeilWS:
31 case kMips64Clz:
32 case kMips64Cmp:
33 case kMips64CmpD:
34 case kMips64CmpS:
35 case kMips64Ctz:
36 case kMips64CvtDL:
37 case kMips64CvtDS:
38 case kMips64CvtDUl:
39 case kMips64CvtDUw:
40 case kMips64CvtDW:
41 case kMips64CvtSD:
42 case kMips64CvtSL:
43 case kMips64CvtSUl:
44 case kMips64CvtSUw:
45 case kMips64CvtSW:
46 case kMips64DMulHigh:
47 case kMips64DMulHighU:
48 case kMips64DMulOvf:
49 case kMips64MulHighU:
50 case kMips64Dadd:
51 case kMips64DaddOvf:
52 case kMips64Dclz:
53 case kMips64Dctz:
54 case kMips64Ddiv:
55 case kMips64DdivU:
56 case kMips64Dext:
57 case kMips64Dins:
58 case kMips64Div:
59 case kMips64DivD:
60 case kMips64DivS:
61 case kMips64DivU:
62 case kMips64Dlsa:
63 case kMips64Dmod:
64 case kMips64DmodU:
65 case kMips64Dmul:
66 case kMips64Dpopcnt:
67 case kMips64Dror:
68 case kMips64Dsar:
69 case kMips64Dshl:
70 case kMips64Dshr:
71 case kMips64Dsub:
72 case kMips64DsubOvf:
73 case kMips64Ext:
74 case kMips64F64x2Abs:
75 case kMips64F64x2Neg:
76 case kMips64F64x2Sqrt:
77 case kMips64F64x2Add:
78 case kMips64F64x2Sub:
79 case kMips64F64x2Mul:
80 case kMips64F64x2Div:
81 case kMips64F64x2Min:
82 case kMips64F64x2Max:
83 case kMips64F64x2Eq:
84 case kMips64F64x2Ne:
85 case kMips64F64x2Lt:
86 case kMips64F64x2Le:
87 case kMips64F64x2Pmin:
88 case kMips64F64x2Pmax:
89 case kMips64F64x2Ceil:
90 case kMips64F64x2Floor:
91 case kMips64F64x2Trunc:
92 case kMips64F64x2NearestInt:
93 case kMips64F64x2ConvertLowI32x4S:
94 case kMips64F64x2ConvertLowI32x4U:
95 case kMips64F64x2PromoteLowF32x4:
96 case kMips64I64x2Splat:
97 case kMips64I64x2ExtractLane:
98 case kMips64I64x2ReplaceLane:
99 case kMips64I64x2Add:
100 case kMips64I64x2Sub:
101 case kMips64I64x2Mul:
102 case kMips64I64x2Neg:
103 case kMips64I64x2Shl:
104 case kMips64I64x2ShrS:
105 case kMips64I64x2ShrU:
106 case kMips64I64x2BitMask:
107 case kMips64I64x2Eq:
108 case kMips64I64x2Ne:
109 case kMips64I64x2GtS:
110 case kMips64I64x2GeS:
111 case kMips64I64x2Abs:
112 case kMips64I64x2SConvertI32x4Low:
113 case kMips64I64x2SConvertI32x4High:
114 case kMips64I64x2UConvertI32x4Low:
115 case kMips64I64x2UConvertI32x4High:
116 case kMips64ExtMulLow:
117 case kMips64ExtMulHigh:
118 case kMips64ExtAddPairwise:
119 case kMips64F32x4Abs:
120 case kMips64F32x4Add:
121 case kMips64F32x4Eq:
122 case kMips64F32x4ExtractLane:
123 case kMips64F32x4Lt:
124 case kMips64F32x4Le:
125 case kMips64F32x4Max:
126 case kMips64F32x4Min:
127 case kMips64F32x4Mul:
128 case kMips64F32x4Div:
129 case kMips64F32x4Ne:
130 case kMips64F32x4Neg:
131 case kMips64F32x4Sqrt:
132 case kMips64F32x4ReplaceLane:
133 case kMips64F32x4SConvertI32x4:
134 case kMips64F32x4Splat:
135 case kMips64F32x4Sub:
136 case kMips64F32x4UConvertI32x4:
137 case kMips64F32x4Pmin:
138 case kMips64F32x4Pmax:
139 case kMips64F32x4Ceil:
140 case kMips64F32x4Floor:
141 case kMips64F32x4Trunc:
142 case kMips64F32x4NearestInt:
143 case kMips64F32x4DemoteF64x2Zero:
144 case kMips64F64x2Splat:
145 case kMips64F64x2ExtractLane:
146 case kMips64F64x2ReplaceLane:
147 case kMips64Float32Max:
148 case kMips64Float32Min:
149 case kMips64Float32RoundDown:
150 case kMips64Float32RoundTiesEven:
151 case kMips64Float32RoundTruncate:
152 case kMips64Float32RoundUp:
153 case kMips64Float64ExtractLowWord32:
154 case kMips64Float64ExtractHighWord32:
155 case kMips64Float64FromWord32Pair:
156 case kMips64Float64InsertLowWord32:
157 case kMips64Float64InsertHighWord32:
158 case kMips64Float64Max:
159 case kMips64Float64Min:
160 case kMips64Float64RoundDown:
161 case kMips64Float64RoundTiesEven:
162 case kMips64Float64RoundTruncate:
163 case kMips64Float64RoundUp:
164 case kMips64Float64SilenceNaN:
165 case kMips64FloorWD:
166 case kMips64FloorWS:
167 case kMips64I16x8Add:
168 case kMips64I16x8AddSatS:
169 case kMips64I16x8AddSatU:
170 case kMips64I16x8Eq:
171 case kMips64I16x8ExtractLaneU:
172 case kMips64I16x8ExtractLaneS:
173 case kMips64I16x8GeS:
174 case kMips64I16x8GeU:
175 case kMips64I16x8GtS:
176 case kMips64I16x8GtU:
177 case kMips64I16x8MaxS:
178 case kMips64I16x8MaxU:
179 case kMips64I16x8MinS:
180 case kMips64I16x8MinU:
181 case kMips64I16x8Mul:
182 case kMips64I16x8Ne:
183 case kMips64I16x8Neg:
184 case kMips64I16x8ReplaceLane:
185 case kMips64I8x16SConvertI16x8:
186 case kMips64I16x8SConvertI32x4:
187 case kMips64I16x8SConvertI8x16High:
188 case kMips64I16x8SConvertI8x16Low:
189 case kMips64I16x8Shl:
190 case kMips64I16x8ShrS:
191 case kMips64I16x8ShrU:
192 case kMips64I16x8Splat:
193 case kMips64I16x8Sub:
194 case kMips64I16x8SubSatS:
195 case kMips64I16x8SubSatU:
196 case kMips64I8x16UConvertI16x8:
197 case kMips64I16x8UConvertI32x4:
198 case kMips64I16x8UConvertI8x16High:
199 case kMips64I16x8UConvertI8x16Low:
200 case kMips64I16x8RoundingAverageU:
201 case kMips64I16x8Abs:
202 case kMips64I16x8BitMask:
203 case kMips64I16x8Q15MulRSatS:
204 case kMips64I32x4Add:
205 case kMips64I32x4Eq:
206 case kMips64I32x4ExtractLane:
207 case kMips64I32x4GeS:
208 case kMips64I32x4GeU:
209 case kMips64I32x4GtS:
210 case kMips64I32x4GtU:
211 case kMips64I32x4MaxS:
212 case kMips64I32x4MaxU:
213 case kMips64I32x4MinS:
214 case kMips64I32x4MinU:
215 case kMips64I32x4Mul:
216 case kMips64I32x4Ne:
217 case kMips64I32x4Neg:
218 case kMips64I32x4ReplaceLane:
219 case kMips64I32x4SConvertF32x4:
220 case kMips64I32x4SConvertI16x8High:
221 case kMips64I32x4SConvertI16x8Low:
222 case kMips64I32x4Shl:
223 case kMips64I32x4ShrS:
224 case kMips64I32x4ShrU:
225 case kMips64I32x4Splat:
226 case kMips64I32x4Sub:
227 case kMips64I32x4UConvertF32x4:
228 case kMips64I32x4UConvertI16x8High:
229 case kMips64I32x4UConvertI16x8Low:
230 case kMips64I32x4Abs:
231 case kMips64I32x4BitMask:
232 case kMips64I32x4DotI16x8S:
233 case kMips64I32x4TruncSatF64x2SZero:
234 case kMips64I32x4TruncSatF64x2UZero:
235 case kMips64I8x16Add:
236 case kMips64I8x16AddSatS:
237 case kMips64I8x16AddSatU:
238 case kMips64I8x16Eq:
239 case kMips64I8x16ExtractLaneU:
240 case kMips64I8x16ExtractLaneS:
241 case kMips64I8x16GeS:
242 case kMips64I8x16GeU:
243 case kMips64I8x16GtS:
244 case kMips64I8x16GtU:
245 case kMips64I8x16MaxS:
246 case kMips64I8x16MaxU:
247 case kMips64I8x16MinS:
248 case kMips64I8x16MinU:
249 case kMips64I8x16Ne:
250 case kMips64I8x16Neg:
251 case kMips64I8x16ReplaceLane:
252 case kMips64I8x16Shl:
253 case kMips64I8x16ShrS:
254 case kMips64I8x16ShrU:
255 case kMips64I8x16Splat:
256 case kMips64I8x16Sub:
257 case kMips64I8x16SubSatS:
258 case kMips64I8x16SubSatU:
259 case kMips64I8x16RoundingAverageU:
260 case kMips64I8x16Abs:
261 case kMips64I8x16Popcnt:
262 case kMips64I8x16BitMask:
263 case kMips64Ins:
264 case kMips64Lsa:
265 case kMips64MaxD:
266 case kMips64MaxS:
267 case kMips64MinD:
268 case kMips64MinS:
269 case kMips64Mod:
270 case kMips64ModU:
271 case kMips64Mov:
272 case kMips64Mul:
273 case kMips64MulD:
274 case kMips64MulHigh:
275 case kMips64MulOvf:
276 case kMips64MulS:
277 case kMips64NegD:
278 case kMips64NegS:
279 case kMips64Nor:
280 case kMips64Nor32:
281 case kMips64Or:
282 case kMips64Or32:
283 case kMips64Popcnt:
284 case kMips64Ror:
285 case kMips64RoundWD:
286 case kMips64RoundWS:
287 case kMips64S128And:
288 case kMips64S128Or:
289 case kMips64S128Not:
290 case kMips64S128Select:
291 case kMips64S128AndNot:
292 case kMips64S128Xor:
293 case kMips64S128Const:
294 case kMips64S128Zero:
295 case kMips64S128AllOnes:
296 case kMips64S16x8InterleaveEven:
297 case kMips64S16x8InterleaveOdd:
298 case kMips64S16x8InterleaveLeft:
299 case kMips64S16x8InterleaveRight:
300 case kMips64S16x8PackEven:
301 case kMips64S16x8PackOdd:
302 case kMips64S16x2Reverse:
303 case kMips64S16x4Reverse:
304 case kMips64I64x2AllTrue:
305 case kMips64I32x4AllTrue:
306 case kMips64I16x8AllTrue:
307 case kMips64I8x16AllTrue:
308 case kMips64V128AnyTrue:
309 case kMips64S32x4InterleaveEven:
310 case kMips64S32x4InterleaveOdd:
311 case kMips64S32x4InterleaveLeft:
312 case kMips64S32x4InterleaveRight:
313 case kMips64S32x4PackEven:
314 case kMips64S32x4PackOdd:
315 case kMips64S32x4Shuffle:
316 case kMips64S8x16Concat:
317 case kMips64S8x16InterleaveEven:
318 case kMips64S8x16InterleaveOdd:
319 case kMips64S8x16InterleaveLeft:
320 case kMips64S8x16InterleaveRight:
321 case kMips64S8x16PackEven:
322 case kMips64S8x16PackOdd:
323 case kMips64S8x2Reverse:
324 case kMips64S8x4Reverse:
325 case kMips64S8x8Reverse:
326 case kMips64I8x16Shuffle:
327 case kMips64I8x16Swizzle:
328 case kMips64Sar:
329 case kMips64Seb:
330 case kMips64Seh:
331 case kMips64Shl:
332 case kMips64Shr:
333 case kMips64SqrtD:
334 case kMips64SqrtS:
335 case kMips64Sub:
336 case kMips64SubD:
337 case kMips64SubS:
338 case kMips64TruncLD:
339 case kMips64TruncLS:
340 case kMips64TruncUlD:
341 case kMips64TruncUlS:
342 case kMips64TruncUwD:
343 case kMips64TruncUwS:
344 case kMips64TruncWD:
345 case kMips64TruncWS:
346 case kMips64Tst:
347 case kMips64Xor:
348 case kMips64Xor32:
349 return kNoOpcodeFlags;
350
351 case kMips64Lb:
352 case kMips64Lbu:
353 case kMips64Ld:
354 case kMips64Ldc1:
355 case kMips64Lh:
356 case kMips64Lhu:
357 case kMips64Lw:
358 case kMips64Lwc1:
359 case kMips64Lwu:
360 case kMips64MsaLd:
361 case kMips64Peek:
362 case kMips64Uld:
363 case kMips64Uldc1:
364 case kMips64Ulh:
365 case kMips64Ulhu:
366 case kMips64Ulw:
367 case kMips64Ulwu:
368 case kMips64Ulwc1:
369 case kMips64S128LoadSplat:
370 case kMips64S128Load8x8S:
371 case kMips64S128Load8x8U:
372 case kMips64S128Load16x4S:
373 case kMips64S128Load16x4U:
374 case kMips64S128Load32x2S:
375 case kMips64S128Load32x2U:
376 case kMips64S128Load32Zero:
377 case kMips64S128Load64Zero:
378 case kMips64S128LoadLane:
379 case kMips64Word64AtomicLoadUint64:
380
381 return kIsLoadOperation;
382
383 case kMips64ModD:
384 case kMips64MsaSt:
385 case kMips64Push:
386 case kMips64Sb:
387 case kMips64Sd:
388 case kMips64Sdc1:
389 case kMips64Sh:
390 case kMips64StackClaim:
391 case kMips64StoreToStackSlot:
392 case kMips64Sw:
393 case kMips64Swc1:
394 case kMips64Usd:
395 case kMips64Usdc1:
396 case kMips64Ush:
397 case kMips64Usw:
398 case kMips64Uswc1:
399 case kMips64Sync:
400 case kMips64S128StoreLane:
401 case kMips64StoreCompressTagged:
402 case kMips64Word64AtomicStoreWord64:
403 case kMips64Word64AtomicAddUint64:
404 case kMips64Word64AtomicSubUint64:
405 case kMips64Word64AtomicAndUint64:
406 case kMips64Word64AtomicOrUint64:
407 case kMips64Word64AtomicXorUint64:
408 case kMips64Word64AtomicExchangeUint64:
409 case kMips64Word64AtomicCompareExchangeUint64:
410 return kHasSideEffect;
411
412#define CASE(Name) case k##Name:
414#undef CASE
415 // Already covered in architecture independent code.
416 UNREACHABLE();
417 }
418
419 UNREACHABLE();
420}
421
423 BRANCH = 4, // Estimated max.
424 RINT_S = 4, // Estimated.
425 RINT_D = 4, // Estimated.
426
427 MULT = 4,
428 MULTU = 4,
429 DMULT = 4,
431
432 MUL = 7,
433 DMUL = 7,
434 MUH = 7,
435 MUHU = 7,
436 DMUH = 7,
437 DMUHU = 7,
438
439 DIV = 50, // Min:11 Max:50
440 DDIV = 50,
441 DIVU = 50,
442 DDIVU = 50,
443
444 ABS_S = 4,
445 ABS_D = 4,
446 NEG_S = 4,
447 NEG_D = 4,
448 ADD_S = 4,
449 ADD_D = 4,
450 SUB_S = 4,
451 SUB_D = 4,
452 MAX_S = 4, // Estimated.
453 MIN_S = 4,
454 MAX_D = 4, // Estimated.
455 MIN_D = 4,
458 MUL_S = 4,
459
464
467
470
475
477
482
487
492
497
502
503 MOV_S = 4,
504 MOV_D = 4,
505
508
511
514
517
518 MUL_D = 5,
523
526
529
530 DIV_S = 17,
531 SQRT_S = 17,
532
533 DIV_D = 32,
534 SQRT_D = 32,
535
536 MTC1 = 4,
537 MTHC1 = 4,
538 DMTC1 = 4,
539 LWC1 = 4,
540 LDC1 = 4,
541
542 MFC1 = 1,
543 MFHC1 = 1,
544 DMFC1 = 1,
545 MFHI = 1,
546 MFLO = 1,
547 SWC1 = 1,
548 SDC1 = 1,
549};
550
551int DadduLatency(bool is_operand_register = true) {
552 if (is_operand_register) {
553 return 1;
554 } else {
555 return 2; // Estimated max.
556 }
557}
558
559int DsubuLatency(bool is_operand_register = true) {
560 return DadduLatency(is_operand_register);
561}
562
563int AndLatency(bool is_operand_register = true) {
564 return DadduLatency(is_operand_register);
565}
566
567int OrLatency(bool is_operand_register = true) {
568 return DadduLatency(is_operand_register);
569}
570
571int NorLatency(bool is_operand_register = true) {
572 if (is_operand_register) {
573 return 1;
574 } else {
575 return 2; // Estimated max.
576 }
577}
578
579int XorLatency(bool is_operand_register = true) {
580 return DadduLatency(is_operand_register);
581}
582
583int MulLatency(bool is_operand_register = true) {
584 if (is_operand_register) {
585 return Latency::MUL;
586 } else {
587 return Latency::MUL + 1;
588 }
589}
590
591int DmulLatency(bool is_operand_register = true) {
592 int latency = 0;
593 if (kArchVariant >= kMips64r6) {
594 latency = Latency::DMUL;
595 } else {
596 latency = Latency::DMULT + Latency::MFLO;
597 }
598 if (!is_operand_register) {
599 latency += 1;
600 }
601 return latency;
602}
603
604int MulhLatency(bool is_operand_register = true) {
605 int latency = 0;
606 if (kArchVariant >= kMips64r6) {
607 latency = Latency::MUH;
608 } else {
609 latency = Latency::MULT + Latency::MFHI;
610 }
611 if (!is_operand_register) {
612 latency += 1;
613 }
614 return latency;
615}
616
617int MulhuLatency(bool is_operand_register = true) {
618 int latency = 0;
619 if (kArchVariant >= kMips64r6) {
620 latency = Latency::MUH;
621 } else {
622 latency = Latency::MULTU + Latency::MFHI;
623 }
624 if (!is_operand_register) {
625 latency += 1;
626 }
627 return latency;
628}
629
630int DMulhLatency(bool is_operand_register = true) {
631 int latency = 0;
632 if (kArchVariant >= kMips64r6) {
633 latency = Latency::DMUH;
634 } else {
635 latency = Latency::DMULT + Latency::MFHI;
636 }
637 if (!is_operand_register) {
638 latency += 1;
639 }
640 return latency;
641}
642
643int DivLatency(bool is_operand_register = true) {
644 if (is_operand_register) {
645 return Latency::DIV;
646 } else {
647 return Latency::DIV + 1;
648 }
649}
650
651int DivuLatency(bool is_operand_register = true) {
652 if (is_operand_register) {
653 return Latency::DIVU;
654 } else {
655 return Latency::DIVU + 1;
656 }
657}
658
659int DdivLatency(bool is_operand_register = true) {
660 int latency = 0;
661 if (kArchVariant >= kMips64r6) {
662 latency = Latency::DDIV;
663 } else {
664 latency = Latency::DDIV + Latency::MFLO;
665 }
666 if (!is_operand_register) {
667 latency += 1;
668 }
669 return latency;
670}
671
672int DdivuLatency(bool is_operand_register = true) {
673 int latency = 0;
674 if (kArchVariant >= kMips64r6) {
675 latency = Latency::DDIVU;
676 } else {
677 latency = Latency::DDIVU + Latency::MFLO;
678 }
679 if (!is_operand_register) {
680 latency += 1;
681 }
682 return latency;
683}
684
685int ModLatency(bool is_operand_register = true) {
686 int latency = 0;
687 if (kArchVariant >= kMips64r6) {
688 latency = 1;
689 } else {
690 latency = Latency::DIV + Latency::MFHI;
691 }
692 if (!is_operand_register) {
693 latency += 1;
694 }
695 return latency;
696}
697
698int ModuLatency(bool is_operand_register = true) {
699 int latency = 0;
700 if (kArchVariant >= kMips64r6) {
701 latency = 1;
702 } else {
703 latency = Latency::DIVU + Latency::MFHI;
704 }
705 if (!is_operand_register) {
706 latency += 1;
707 }
708 return latency;
709}
710
711int DmodLatency(bool is_operand_register = true) {
712 int latency = 0;
713 if (kArchVariant >= kMips64r6) {
714 latency = 1;
715 } else {
716 latency = Latency::DDIV + Latency::MFHI;
717 }
718 if (!is_operand_register) {
719 latency += 1;
720 }
721 return latency;
722}
723
724int DmoduLatency(bool is_operand_register = true) {
725 int latency = 0;
726 if (kArchVariant >= kMips64r6) {
727 latency = 1;
728 } else {
729 latency = Latency::DDIV + Latency::MFHI;
730 }
731 if (!is_operand_register) {
732 latency += 1;
733 }
734 return latency;
735}
736
738 if (kArchVariant >= kMips64r6) {
739 return Latency::BRANCH + 1;
740 } else {
741 return 1;
742 }
743}
744
746 if (kArchVariant >= kMips64r6) {
747 return Latency::BRANCH + 1;
748 } else {
749 return 1;
750 }
751}
752
754 // Estimated max.
755 return DadduLatency() + 1;
756}
757
759 // Estimated.
760 return DadduLatency(false) + Latency::BRANCH + 5;
761}
762
764 // Estimated max.
765 return 1 + DadduLatency() + Latency::BRANCH + 2;
766}
767
768int SmiUntagLatency() { return 1; }
769
771 // Estimated max.
772 return 2 * (DlsaLatency() + DadduLatency(false)) + 2 + Latency::BRANCH +
773 Latency::BRANCH + 2 * DsubuLatency(false) + 2 + Latency::BRANCH + 1;
774}
775
776int AssertLatency() { return 1; }
777
779 int frame_alignment = MacroAssembler::ActivationFrameAlignment();
780 if (frame_alignment > kSystemPointerSize) {
781 return 1 + DsubuLatency(false) + AndLatency(false) + 1;
782 } else {
783 return DsubuLatency(false);
784 }
785}
786
788 return 3; // Estimated max.
789}
790
792
794 if (kArchVariant >= kMips64r6) {
795 return AlignedMemoryLatency();
796 } else {
798 }
799}
800
802 if (kArchVariant >= kMips64r6) {
803 return AlignedMemoryLatency();
804 } else {
805 // Estimated max.
806 return AdjustBaseAndOffsetLatency() + 3;
807 }
808}
809
811 if (kArchVariant >= kMips64r6) {
812 return AlignedMemoryLatency();
813 } else {
814 return UlwLatency() + 1;
815 }
816}
817
819 if (kArchVariant >= kMips64r6) {
820 return AlignedMemoryLatency();
821 } else {
822 // Estimated max.
823 return AdjustBaseAndOffsetLatency() + 3;
824 }
825}
826
828 if (kArchVariant >= kMips64r6) {
829 return AlignedMemoryLatency();
830 } else {
831 return UlwLatency() + Latency::MTC1;
832 }
833}
834
836 if (kArchVariant >= kMips64r6) {
837 return AlignedMemoryLatency();
838 } else {
839 return UldLatency() + Latency::DMTC1;
840 }
841}
842
844 if (kArchVariant >= kMips64r6) {
845 return AlignedMemoryLatency();
846 } else {
847 // Estimated max.
849 }
850}
851
853 if (kArchVariant >= kMips64r6) {
854 return AlignedMemoryLatency();
855 } else {
856 return AdjustBaseAndOffsetLatency() + 2;
857 }
858}
859
861 if (kArchVariant >= kMips64r6) {
862 return AlignedMemoryLatency();
863 } else {
864 return AdjustBaseAndOffsetLatency() + 2;
865 }
866}
867
869 if (kArchVariant >= kMips64r6) {
870 return AlignedMemoryLatency();
871 } else {
872 return Latency::MFC1 + UswLatency();
873 }
874}
875
877 if (kArchVariant >= kMips64r6) {
878 return AlignedMemoryLatency();
879 } else {
880 return Latency::DMFC1 + UsdLatency();
881 }
882}
883
884int Lwc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::LWC1; }
885
886int Swc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::SWC1; }
887
888int Sdc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::SDC1; }
889
890int Ldc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::LDC1; }
891
893 int latency = DsubuLatency(false);
894 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
895 latency++;
896 }
897 return latency;
898}
899
901 int latency = DsubuLatency(false);
902 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
903 latency += Sdc1Latency();
904 }
905 return latency;
906}
907
909 int latency = MultiPushLatency();
910 if (fp_mode == SaveFPRegsMode::kSave) {
911 latency += MultiPushFPULatency();
912 }
913 return latency;
914}
915
917 int latency = DadduLatency(false);
918 for (int16_t i = 0; i < kNumRegisters; i++) {
919 latency++;
920 }
921 return latency;
922}
923
925 int latency = DadduLatency(false);
926 for (int16_t i = 0; i < kNumRegisters; i++) {
927 latency += Ldc1Latency();
928 }
929 return latency;
930}
931
933 int latency = MultiPopLatency();
934 if (fp_mode == SaveFPRegsMode::kSave) {
935 latency += MultiPopFPULatency();
936 }
937 return latency;
938}
939
941 // Estimated.
942 int latency = AndLatency(false) + Latency::BRANCH + 2 + CallLatency();
944 latency++;
945 } else {
946 latency += DadduLatency(false);
947 }
948 return latency;
949}
950
952
954 // Estimated max.
955 return Latency::BRANCH;
956}
957
959 int latency = 0;
960 if (kArchVariant >= kMips64r6) {
961 latency = DlsaLatency() + 2;
962 } else {
963 latency = 6;
964 }
965 latency += 2;
966 return latency;
967}
968
970 return Latency::BRANCH + GenerateSwitchTableLatency();
971}
972
974 // Estimated max.
975 return DadduLatency(false) + JumpLatency();
976}
977
979 // Estimated max.
980 return DadduLatency(false) + MultiPopLatency() + MultiPopFPULatency() +
981 Latency::BRANCH + DadduLatency() + 1 + DropAndRetLatency();
982}
983
985 return 2 + Latency::TRUNC_W_D + Latency::MFC1 + 2 + AndLatency(false) +
986 Latency::BRANCH;
987}
988
989int CallStubDelayedLatency() { return 1 + CallLatency(); }
990
992 // TODO(mips): This no longer reflects how TruncateDoubleToI is called.
993 return TryInlineTruncateDoubleToILatency() + 1 + DsubuLatency(false) +
995}
996
998 return AndLatency(false) + AlignedMemoryLatency() + AndLatency(false) +
999 Latency::BRANCH;
1000}
1001
1002int SltuLatency(bool is_operand_register = true) {
1003 if (is_operand_register) {
1004 return 1;
1005 } else {
1006 return 2; // Estimated max.
1007 }
1008}
1009
1011 return 2; // Estimated max.
1012}
1013
1015 return SltuLatency() + 2; // Estimated max.
1016}
1017
1019 if (kArchVariant >= kMips64r6 && bdslot == PROTECT) {
1021 } else {
1022 return BranchShortHelperLatency();
1023 }
1024}
1025
1026int MoveLatency() { return 1; }
1027
1029
1031
1033 // Estimated max.
1034 return 6;
1035}
1036
1038 // Estimated max.
1039 return 6;
1040}
1041
1043 // Estimated max.
1044 return MulLatency() + MulhLatency() + 2;
1045}
1046
1047int DclzLatency() { return 1; }
1048
1050 if (kArchVariant >= kMips64r6) {
1051 return 3 + DclzLatency();
1052 } else {
1053 return DadduLatency(false) + XorLatency() + AndLatency() + DclzLatency() +
1054 1 + DsubuLatency();
1055 }
1056}
1057
1059 if (kArchVariant >= kMips64r6) {
1060 return 4;
1061 } else {
1062 return DadduLatency(false) + XorLatency() + AndLatency() + 1 +
1063 DsubuLatency();
1064 }
1065}
1066
1068 return 2 + AndLatency() + DsubuLatency() + 1 + AndLatency() + 1 +
1069 AndLatency() + DadduLatency() + 1 + DadduLatency() + 1 + AndLatency() +
1070 1 + MulLatency() + 1;
1071}
1072
1074 return 2 + AndLatency() + DsubuLatency() + 1 + AndLatency() + 1 +
1075 AndLatency() + DadduLatency() + 1 + DadduLatency() + 1 + AndLatency() +
1076 1 + DmulLatency() + 1;
1077}
1078
1079int CompareFLatency() { return Latency::C_cond_S; }
1080
1082
1084
1086
1088
1090
1092 if (kArchVariant >= kMips64r6) {
1093 return Latency::NEG_S;
1094 } else {
1095 // Estimated.
1096 return CompareIsNanF32Latency() + 2 * Latency::BRANCH + Latency::NEG_S +
1097 Latency::MFC1 + 1 + XorLatency() + Latency::MTC1;
1098 }
1099}
1100
1102 if (kArchVariant >= kMips64r6) {
1103 return Latency::NEG_D;
1104 } else {
1105 // Estimated.
1106 return CompareIsNanF64Latency() + 2 * Latency::BRANCH + Latency::NEG_D +
1107 Latency::DMFC1 + 1 + XorLatency() + Latency::DMTC1;
1108 }
1109}
1110
1112 if (kArchVariant >= kMips64r6) {
1113 return Latency::RINT_D + 4;
1114 } else {
1115 // For ceil_l_d, floor_l_d, round_l_d, trunc_l_d latency is 4.
1116 return Latency::DMFC1 + 1 + Latency::BRANCH + Latency::MOV_D + 4 +
1117 Latency::DMFC1 + Latency::BRANCH + Latency::CVT_D_L + 2 +
1118 Latency::MTHC1;
1119 }
1120}
1121
1123 if (kArchVariant >= kMips64r6) {
1124 return Latency::RINT_S + 4;
1125 } else {
1126 // For ceil_w_s, floor_w_s, round_w_s, trunc_w_s latency is 4.
1127 return Latency::MFC1 + 1 + Latency::BRANCH + Latency::MOV_S + 4 +
1128 Latency::MFC1 + Latency::BRANCH + Latency::CVT_S_W + 2 +
1129 Latency::MTC1;
1130 }
1131}
1132
1134 // Estimated max.
1135 int latency = CompareIsNanF32Latency() + Latency::BRANCH;
1136 if (kArchVariant >= kMips64r6) {
1137 return latency + Latency::MAX_S;
1138 } else {
1139 return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() +
1140 Latency::MFC1 + 1 + Latency::MOV_S;
1141 }
1142}
1143
1145 // Estimated max.
1146 int latency = CompareIsNanF64Latency() + Latency::BRANCH;
1147 if (kArchVariant >= kMips64r6) {
1148 return latency + Latency::MAX_D;
1149 } else {
1150 return latency + 5 * Latency::BRANCH + 2 * CompareF64Latency() +
1151 Latency::DMFC1 + Latency::MOV_D;
1152 }
1153}
1154
1156 // Estimated max.
1157 int latency = CompareIsNanF32Latency() + Latency::BRANCH;
1158 if (kArchVariant >= kMips64r6) {
1159 return latency + Latency::MIN_S;
1160 } else {
1161 return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() +
1162 Latency::MFC1 + 1 + Latency::MOV_S;
1163 }
1164}
1165
1167 // Estimated max.
1168 int latency = CompareIsNanF64Latency() + Latency::BRANCH;
1169 if (kArchVariant >= kMips64r6) {
1170 return latency + Latency::MIN_D;
1171 } else {
1172 return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() +
1173 Latency::DMFC1 + Latency::MOV_D;
1174 }
1175}
1176
1177int TruncLSLatency(bool load_status) {
1178 int latency = Latency::TRUNC_L_S + Latency::DMFC1;
1179 if (load_status) {
1180 latency += SltuLatency() + 7;
1181 }
1182 return latency;
1183}
1184
1185int TruncLDLatency(bool load_status) {
1186 int latency = Latency::TRUNC_L_D + Latency::DMFC1;
1187 if (load_status) {
1188 latency += SltuLatency() + 7;
1189 }
1190 return latency;
1191}
1192
1194 // Estimated max.
1195 return 2 * CompareF32Latency() + CompareIsNanF32Latency() +
1196 4 * Latency::BRANCH + Latency::SUB_S + 2 * Latency::TRUNC_L_S +
1197 3 * Latency::DMFC1 + OrLatency() + Latency::MTC1 + Latency::MOV_S +
1198 SltuLatency() + 4;
1199}
1200
1202 // Estimated max.
1203 return 2 * CompareF64Latency() + CompareIsNanF64Latency() +
1204 4 * Latency::BRANCH + Latency::SUB_D + 2 * Latency::TRUNC_L_D +
1205 3 * Latency::DMFC1 + OrLatency() + Latency::DMTC1 + Latency::MOV_D +
1206 SltuLatency() + 4;
1207}
1208
1210
1211int ByteSwapSignedLatency() { return 2; }
1212
1214 bool is_one_instruction =
1215 (kArchVariant == kMips64r6) ? is_int9(offset) : is_int16(offset);
1216 if (is_one_instruction) {
1217 return 1;
1218 } else {
1219 return 3;
1220 }
1221}
1222
1223int ExtractBitsLatency(bool sign_extend, int size) {
1224 int latency = 2;
1225 if (sign_extend) {
1226 switch (size) {
1227 case 8:
1228 case 16:
1229 case 32:
1230 latency += 1;
1231 break;
1232 default:
1233 UNREACHABLE();
1234 }
1235 }
1236 return latency;
1237}
1238
1239int InsertBitsLatency() { return 2 + DsubuLatency(false) + 2; }
1240
1242 bool is_one_instruction =
1243 (kArchVariant == kMips64r6) ? is_int9(offset) : is_int16(offset);
1244 if (is_one_instruction) {
1245 return 1;
1246 } else {
1247 return 3;
1248 }
1249}
1250
1251int Word32AtomicExchangeLatency(bool sign_extend, int size) {
1252 return DadduLatency(false) + 1 + DsubuLatency() + 2 + LlLatency(0) +
1253 ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() +
1254 ScLatency(0) + BranchShortLatency() + 1;
1255}
1256
1257int Word32AtomicCompareExchangeLatency(bool sign_extend, int size) {
1258 return 2 + DsubuLatency() + 2 + LlLatency(0) +
1259 ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() +
1260 ScLatency(0) + BranchShortLatency() + 1;
1261}
1262
1264 // Basic latency modeling for MIPS64 instructions. They have been determined
1265 // in empirical way.
1266 switch (instr->arch_opcode()) {
1267 case kArchCallCodeObject:
1268#if V8_ENABLE_WEBASSEMBLY
1269 case kArchCallWasmFunction:
1270#endif // V8_ENABLE_WEBASSEMBLY
1271 return CallLatency();
1272 case kArchTailCallCodeObject:
1273#if V8_ENABLE_WEBASSEMBLY
1274 case kArchTailCallWasm:
1275#endif // V8_ENABLE_WEBASSEMBLY
1276 case kArchTailCallAddress:
1277 return JumpLatency();
1278 case kArchCallJSFunction: {
1279 int latency = 0;
1280 if (v8_flags.debug_code) {
1281 latency = 1 + AssertLatency();
1282 }
1283 return latency + 1 + DadduLatency(false) + CallLatency();
1284 }
1285 case kArchPrepareCallCFunction:
1287 case kArchSaveCallerRegisters: {
1288 auto fp_mode =
1289 static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
1290 return PushCallerSavedLatency(fp_mode);
1291 }
1292 case kArchRestoreCallerRegisters: {
1293 auto fp_mode =
1294 static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
1295 return PopCallerSavedLatency(fp_mode);
1296 }
1297 case kArchPrepareTailCall:
1298 return 2;
1299 case kArchCallCFunction:
1300 return CallCFunctionLatency();
1301 case kArchJmp:
1302 return AssembleArchJumpLatency();
1303 case kArchTableSwitch:
1305 case kArchAbortCSADcheck:
1306 return CallLatency() + 1;
1307 case kArchDebugBreak:
1308 return 1;
1309 case kArchComment:
1310 case kArchNop:
1311 case kArchThrowTerminator:
1312 case kArchDeoptimize:
1313 return 0;
1314 case kArchRet:
1315 return AssemblerReturnLatency();
1316 case kArchFramePointer:
1317 return 1;
1318 case kArchParentFramePointer:
1319 // Estimated max.
1320 return AlignedMemoryLatency();
1321 case kArchTruncateDoubleToI:
1323 case kArchStoreWithWriteBarrier:
1324 return DadduLatency() + 1 + CheckPageFlagLatency();
1325 case kArchStackSlot:
1326 // Estimated max.
1327 return DadduLatency(false) + AndLatency(false) + AssertLatency() +
1328 DadduLatency(false) + AndLatency(false) + BranchShortLatency() +
1329 1 + DsubuLatency() + DadduLatency();
1330 case kIeee754Float64Acos:
1331 case kIeee754Float64Acosh:
1332 case kIeee754Float64Asin:
1333 case kIeee754Float64Asinh:
1334 case kIeee754Float64Atan:
1335 case kIeee754Float64Atanh:
1336 case kIeee754Float64Atan2:
1337 case kIeee754Float64Cos:
1338 case kIeee754Float64Cosh:
1339 case kIeee754Float64Cbrt:
1340 case kIeee754Float64Exp:
1341 case kIeee754Float64Expm1:
1342 case kIeee754Float64Log:
1343 case kIeee754Float64Log1p:
1344 case kIeee754Float64Log10:
1345 case kIeee754Float64Log2:
1346 case kIeee754Float64Pow:
1347 case kIeee754Float64Sin:
1348 case kIeee754Float64Sinh:
1349 case kIeee754Float64Tan:
1350 case kIeee754Float64Tanh:
1353 case kMips64Add:
1354 case kMips64Dadd:
1355 return DadduLatency(instr->InputAt(1)->IsRegister());
1356 case kMips64DaddOvf:
1357 return DaddOverflowLatency();
1358 case kMips64Sub:
1359 case kMips64Dsub:
1360 return DsubuLatency(instr->InputAt(1)->IsRegister());
1361 case kMips64DsubOvf:
1362 return DsubOverflowLatency();
1363 case kMips64Mul:
1364 return MulLatency();
1365 case kMips64MulOvf:
1366 case kMips64DMulOvf:
1367 return MulOverflowLatency();
1368 case kMips64MulHigh:
1369 return MulhLatency();
1370 case kMips64MulHighU:
1371 return MulhuLatency();
1372 case kMips64DMulHigh:
1373 return DMulhLatency();
1374 case kMips64Div: {
1375 int latency = DivLatency(instr->InputAt(1)->IsRegister());
1376 if (kArchVariant >= kMips64r6) {
1377 return latency++;
1378 } else {
1379 return latency + MovzLatency();
1380 }
1381 }
1382 case kMips64DivU: {
1383 int latency = DivuLatency(instr->InputAt(1)->IsRegister());
1384 if (kArchVariant >= kMips64r6) {
1385 return latency++;
1386 } else {
1387 return latency + MovzLatency();
1388 }
1389 }
1390 case kMips64Mod:
1391 return ModLatency();
1392 case kMips64ModU:
1393 return ModuLatency();
1394 case kMips64Dmul:
1395 return DmulLatency();
1396 case kMips64Ddiv: {
1397 int latency = DdivLatency();
1398 if (kArchVariant >= kMips64r6) {
1399 return latency++;
1400 } else {
1401 return latency + MovzLatency();
1402 }
1403 }
1404 case kMips64DdivU: {
1405 int latency = DdivuLatency();
1406 if (kArchVariant >= kMips64r6) {
1407 return latency++;
1408 } else {
1409 return latency + MovzLatency();
1410 }
1411 }
1412 case kMips64Dmod:
1413 return DmodLatency();
1414 case kMips64DmodU:
1415 return DmoduLatency();
1416 case kMips64Dlsa:
1417 case kMips64Lsa:
1418 return DlsaLatency();
1419 case kMips64And:
1420 return AndLatency(instr->InputAt(1)->IsRegister());
1421 case kMips64And32: {
1422 bool is_operand_register = instr->InputAt(1)->IsRegister();
1423 int latency = AndLatency(is_operand_register);
1424 if (is_operand_register) {
1425 return latency + 2;
1426 } else {
1427 return latency + 1;
1428 }
1429 }
1430 case kMips64Or:
1431 return OrLatency(instr->InputAt(1)->IsRegister());
1432 case kMips64Or32: {
1433 bool is_operand_register = instr->InputAt(1)->IsRegister();
1434 int latency = OrLatency(is_operand_register);
1435 if (is_operand_register) {
1436 return latency + 2;
1437 } else {
1438 return latency + 1;
1439 }
1440 }
1441 case kMips64Nor:
1442 return NorLatency(instr->InputAt(1)->IsRegister());
1443 case kMips64Nor32: {
1444 bool is_operand_register = instr->InputAt(1)->IsRegister();
1445 int latency = NorLatency(is_operand_register);
1446 if (is_operand_register) {
1447 return latency + 2;
1448 } else {
1449 return latency + 1;
1450 }
1451 }
1452 case kMips64Xor:
1453 return XorLatency(instr->InputAt(1)->IsRegister());
1454 case kMips64Xor32: {
1455 bool is_operand_register = instr->InputAt(1)->IsRegister();
1456 int latency = XorLatency(is_operand_register);
1457 if (is_operand_register) {
1458 return latency + 2;
1459 } else {
1460 return latency + 1;
1461 }
1462 }
1463 case kMips64Clz:
1464 case kMips64Dclz:
1465 return DclzLatency();
1466 case kMips64Ctz:
1467 return CtzLatency();
1468 case kMips64Dctz:
1469 return DctzLatency();
1470 case kMips64Popcnt:
1471 return PopcntLatency();
1472 case kMips64Dpopcnt:
1473 return DpopcntLatency();
1474 case kMips64Shl:
1475 return 1;
1476 case kMips64Shr:
1477 case kMips64Sar:
1478 return 2;
1479 case kMips64Ext:
1480 case kMips64Ins:
1481 case kMips64Dext:
1482 case kMips64Dins:
1483 case kMips64Dshl:
1484 case kMips64Dshr:
1485 case kMips64Dsar:
1486 case kMips64Ror:
1487 case kMips64Dror:
1488 return 1;
1489 case kMips64Tst:
1490 return AndLatency(instr->InputAt(1)->IsRegister());
1491 case kMips64Mov:
1492 return 1;
1493 case kMips64CmpS:
1494 return MoveLatency() + CompareF32Latency();
1495 case kMips64AddS:
1496 return Latency::ADD_S;
1497 case kMips64SubS:
1498 return Latency::SUB_S;
1499 case kMips64MulS:
1500 return Latency::MUL_S;
1501 case kMips64DivS:
1502 return Latency::DIV_S;
1503 case kMips64AbsS:
1504 return Latency::ABS_S;
1505 case kMips64NegS:
1506 return NegdLatency();
1507 case kMips64SqrtS:
1508 return Latency::SQRT_S;
1509 case kMips64MaxS:
1510 return Latency::MAX_S;
1511 case kMips64MinS:
1512 return Latency::MIN_S;
1513 case kMips64CmpD:
1514 return MoveLatency() + CompareF64Latency();
1515 case kMips64AddD:
1516 return Latency::ADD_D;
1517 case kMips64SubD:
1518 return Latency::SUB_D;
1519 case kMips64MulD:
1520 return Latency::MUL_D;
1521 case kMips64DivD:
1522 return Latency::DIV_D;
1523 case kMips64ModD:
1526 case kMips64AbsD:
1527 return Latency::ABS_D;
1528 case kMips64NegD:
1529 return NegdLatency();
1530 case kMips64SqrtD:
1531 return Latency::SQRT_D;
1532 case kMips64MaxD:
1533 return Latency::MAX_D;
1534 case kMips64MinD:
1535 return Latency::MIN_D;
1536 case kMips64Float64RoundDown:
1537 case kMips64Float64RoundTruncate:
1538 case kMips64Float64RoundUp:
1539 case kMips64Float64RoundTiesEven:
1540 return Float64RoundLatency();
1541 case kMips64Float32RoundDown:
1542 case kMips64Float32RoundTruncate:
1543 case kMips64Float32RoundUp:
1544 case kMips64Float32RoundTiesEven:
1545 return Float32RoundLatency();
1546 case kMips64Float32Max:
1547 return Float32MaxLatency();
1548 case kMips64Float64Max:
1549 return Float64MaxLatency();
1550 case kMips64Float32Min:
1551 return Float32MinLatency();
1552 case kMips64Float64Min:
1553 return Float64MinLatency();
1554 case kMips64Float64SilenceNaN:
1555 return Latency::SUB_D;
1556 case kMips64CvtSD:
1557 return Latency::CVT_S_D;
1558 case kMips64CvtDS:
1559 return Latency::CVT_D_S;
1560 case kMips64CvtDW:
1561 return Latency::MTC1 + Latency::CVT_D_W;
1562 case kMips64CvtSW:
1563 return Latency::MTC1 + Latency::CVT_S_W;
1564 case kMips64CvtSUw:
1565 return 1 + Latency::DMTC1 + Latency::CVT_S_L;
1566 case kMips64CvtSL:
1567 return Latency::DMTC1 + Latency::CVT_S_L;
1568 case kMips64CvtDL:
1569 return Latency::DMTC1 + Latency::CVT_D_L;
1570 case kMips64CvtDUw:
1571 return 1 + Latency::DMTC1 + Latency::CVT_D_L;
1572 case kMips64CvtDUl:
1573 return 2 * Latency::BRANCH + 3 + 2 * Latency::DMTC1 +
1574 2 * Latency::CVT_D_L + Latency::ADD_D;
1575 case kMips64CvtSUl:
1576 return 2 * Latency::BRANCH + 3 + 2 * Latency::DMTC1 +
1577 2 * Latency::CVT_S_L + Latency::ADD_S;
1578 case kMips64FloorWD:
1579 return Latency::FLOOR_W_D + Latency::MFC1;
1580 case kMips64CeilWD:
1581 return Latency::CEIL_W_D + Latency::MFC1;
1582 case kMips64RoundWD:
1583 return Latency::ROUND_W_D + Latency::MFC1;
1584 case kMips64TruncWD:
1585 return Latency::TRUNC_W_D + Latency::MFC1;
1586 case kMips64FloorWS:
1587 return Latency::FLOOR_W_S + Latency::MFC1;
1588 case kMips64CeilWS:
1589 return Latency::CEIL_W_S + Latency::MFC1;
1590 case kMips64RoundWS:
1591 return Latency::ROUND_W_S + Latency::MFC1;
1592 case kMips64TruncWS:
1593 return Latency::TRUNC_W_S + Latency::MFC1 + 2 + MovnLatency();
1594 case kMips64TruncLS:
1595 return TruncLSLatency(instr->OutputCount() > 1);
1596 case kMips64TruncLD:
1597 return TruncLDLatency(instr->OutputCount() > 1);
1598 case kMips64TruncUwD:
1599 // Estimated max.
1600 return CompareF64Latency() + 2 * Latency::BRANCH +
1601 2 * Latency::TRUNC_W_D + Latency::SUB_D + OrLatency() +
1602 Latency::MTC1 + Latency::MFC1 + Latency::MTHC1 + 1;
1603 case kMips64TruncUwS:
1604 // Estimated max.
1605 return CompareF32Latency() + 2 * Latency::BRANCH +
1606 2 * Latency::TRUNC_W_S + Latency::SUB_S + OrLatency() +
1607 Latency::MTC1 + 2 * Latency::MFC1 + 2 + MovzLatency();
1608 case kMips64TruncUlS:
1609 return TruncUlSLatency();
1610 case kMips64TruncUlD:
1611 return TruncUlDLatency();
1612 case kMips64BitcastDL:
1613 return Latency::DMFC1;
1614 case kMips64BitcastLD:
1615 return Latency::DMTC1;
1616 case kMips64Float64ExtractLowWord32:
1617 return Latency::MFC1;
1618 case kMips64Float64InsertLowWord32:
1619 return Latency::MFHC1 + Latency::MTC1 + Latency::MTHC1;
1620 case kMips64Float64FromWord32Pair:
1621 return Latency::MTC1 + Latency::MTHC1;
1622 case kMips64Float64ExtractHighWord32:
1623 return Latency::MFHC1;
1624 case kMips64Float64InsertHighWord32:
1625 return Latency::MTHC1;
1626 case kMips64Seb:
1627 case kMips64Seh:
1628 return 1;
1629 case kMips64Lbu:
1630 case kMips64Lb:
1631 case kMips64Lhu:
1632 case kMips64Lh:
1633 case kMips64Lwu:
1634 case kMips64Lw:
1635 case kMips64Ld:
1636 case kMips64Sb:
1637 case kMips64Sh:
1638 case kMips64Sw:
1639 case kMips64Sd:
1640 return AlignedMemoryLatency();
1641 case kMips64Lwc1:
1642 return Lwc1Latency();
1643 case kMips64Ldc1:
1644 return Ldc1Latency();
1645 case kMips64Swc1:
1646 return Swc1Latency();
1647 case kMips64Sdc1:
1648 return Sdc1Latency();
1649 case kMips64Ulhu:
1650 case kMips64Ulh:
1651 return UlhuLatency();
1652 case kMips64Ulwu:
1653 return UlwuLatency();
1654 case kMips64Ulw:
1655 return UlwLatency();
1656 case kMips64Uld:
1657 return UldLatency();
1658 case kMips64Ulwc1:
1659 return Ulwc1Latency();
1660 case kMips64Uldc1:
1661 return Uldc1Latency();
1662 case kMips64Ush:
1663 return UshLatency();
1664 case kMips64Usw:
1665 return UswLatency();
1666 case kMips64Usd:
1667 return UsdLatency();
1668 case kMips64Uswc1:
1669 return Uswc1Latency();
1670 case kMips64Usdc1:
1671 return Usdc1Latency();
1672 case kMips64Push: {
1673 int latency = 0;
1674 if (instr->InputAt(0)->IsFPRegister()) {
1675 latency = Sdc1Latency() + DsubuLatency(false);
1676 } else {
1677 latency = PushLatency();
1678 }
1679 return latency;
1680 }
1681 case kMips64Peek: {
1682 int latency = 0;
1683 if (instr->OutputAt(0)->IsFPRegister()) {
1684 auto op = LocationOperand::cast(instr->OutputAt(0));
1685 switch (op->representation()) {
1687 latency = Ldc1Latency();
1688 break;
1690 latency = Latency::LWC1;
1691 break;
1692 default:
1693 UNREACHABLE();
1694 }
1695 } else {
1696 latency = AlignedMemoryLatency();
1697 }
1698 return latency;
1699 }
1700 case kMips64StackClaim:
1701 return DsubuLatency(false);
1702 case kMips64StoreToStackSlot: {
1703 int latency = 0;
1704 if (instr->InputAt(0)->IsFPRegister()) {
1705 if (instr->InputAt(0)->IsSimd128Register()) {
1706 latency = 1; // Estimated value.
1707 } else {
1708 latency = Sdc1Latency();
1709 }
1710 } else {
1711 latency = AlignedMemoryLatency();
1712 }
1713 return latency;
1714 }
1715 case kMips64ByteSwap64:
1716 return ByteSwapSignedLatency();
1717 case kMips64ByteSwap32:
1718 return ByteSwapSignedLatency();
1719 case kAtomicLoadInt8:
1720 case kAtomicLoadUint8:
1721 case kAtomicLoadInt16:
1722 case kAtomicLoadUint16:
1723 case kAtomicLoadWord32:
1724 return 2;
1725 case kAtomicStoreWord8:
1726 case kAtomicStoreWord16:
1727 case kAtomicStoreWord32:
1728 return 3;
1729 case kAtomicExchangeInt8:
1730 return Word32AtomicExchangeLatency(true, 8);
1731 case kAtomicExchangeUint8:
1732 return Word32AtomicExchangeLatency(false, 8);
1733 case kAtomicExchangeInt16:
1734 return Word32AtomicExchangeLatency(true, 16);
1735 case kAtomicExchangeUint16:
1736 return Word32AtomicExchangeLatency(false, 16);
1737 case kAtomicExchangeWord32:
1738 return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1;
1739 case kAtomicCompareExchangeInt8:
1740 return Word32AtomicCompareExchangeLatency(true, 8);
1741 case kAtomicCompareExchangeUint8:
1742 return Word32AtomicCompareExchangeLatency(false, 8);
1743 case kAtomicCompareExchangeInt16:
1744 return Word32AtomicCompareExchangeLatency(true, 16);
1745 case kAtomicCompareExchangeUint16:
1746 return Word32AtomicCompareExchangeLatency(false, 16);
1747 case kAtomicCompareExchangeWord32:
1748 return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) +
1749 BranchShortLatency() + 1;
1750 case kMips64AssertEqual:
1751 return AssertLatency();
1752 default:
1753 return 1;
1754 }
1755}
1756
1757} // namespace compiler
1758} // namespace internal
1759} // namespace v8
static constexpr T decode(U value)
Definition bit-field.h:66
static int ActivationFrameAlignment()
int GetTargetInstructionFlags(const Instruction *instr) const
static int GetInstructionLatency(const Instruction *instr)
static LocationOperand * cast(InstructionOperand *op)
@ kMips64r6
static const ArchVariants kArchVariant
#define COMMON_ARCH_OPCODE_LIST(V)
int32_t offset
Instruction * instr
int NorLatency(bool is_operand_register=true)
int MulLatency(bool is_operand_register=true)
int MulhuLatency(bool is_operand_register=true)
int DsubuLatency(bool is_operand_register=true)
int MulhLatency(bool is_operand_register=true)
int Word32AtomicExchangeLatency(bool sign_extend, int size)
int ExtractBitsLatency(bool sign_extend, int size)
int ModuLatency(bool is_operand_register=true)
int DdivLatency(bool is_operand_register=true)
int DivLatency(bool is_operand_register=true)
int OrLatency(bool is_operand_register=true)
int DmoduLatency(bool is_operand_register=true)
int DmulLatency(bool is_operand_register=true)
int DivuLatency(bool is_operand_register=true)
int DmodLatency(bool is_operand_register=true)
int PopCallerSavedLatency(SaveFPRegsMode fp_mode)
int XorLatency(bool is_operand_register=true)
int DMulhLatency(bool is_operand_register=true)
int DadduLatency(bool is_operand_register=true)
int DdivuLatency(bool is_operand_register=true)
int AndLatency(bool is_operand_register=true)
int PushCallerSavedLatency(SaveFPRegsMode fp_mode)
int Word32AtomicCompareExchangeLatency(bool sign_extend, int size)
int SltuLatency(bool is_operand_register=true)
int ModLatency(bool is_operand_register=true)
constexpr int kSystemPointerSize
Definition globals.h:410
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int kNumRegisters