v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
int64-lowering-reducer.h
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_TURBOSHAFT_INT64_LOWERING_REDUCER_H_
6#define V8_COMPILER_TURBOSHAFT_INT64_LOWERING_REDUCER_H_
7
8#if !V8_ENABLE_WEBASSEMBLY
9#error This header should only be included if WebAssembly is enabled.
10#endif // !V8_ENABLE_WEBASSEMBLY
11
19
21
23
24// This reducer is run on 32 bit platforms to lower unsupported 64 bit integer
25// operations to supported 32 bit operations.
26template <class Next>
27class Int64LoweringReducer : public Next {
28 public:
30
31 // We could use V<Union<Word32, Word32Pair>> for Word32OrWord32Pair instead of
32 // OpIndex, but this would clash with the expected return types of
33 // ReduceWordBinop/ReduceShift/etc.
35
37 wasm::CallOrigin origin = __ data() -> is_js_to_wasm()
40 // To compute the machine signature, it doesn't matter whether types
41 // are canonicalized, just use whichever signature is present (functions
42 // will have one and wrappers the other).
43 if (__ data()->wasm_module_sig()) {
44 sig_ =
45 CreateMachineSignature(zone_, __ data()->wasm_module_sig(), origin);
46 } else {
47 sig_ = CreateMachineSignature(zone_, __ data()->wasm_canonical_sig(),
48 origin);
49 }
50
52 }
53
58 if (rep == WordRepresentation::Word64()) {
59 V<Word32Pair> left_pair = V<Word32Pair>::Cast(left);
60 V<Word32Pair> right_pair = V<Word32Pair>::Cast(right);
61 switch (kind) {
63 return LowerPairBinOp(left_pair, right_pair,
66 return LowerPairBinOp(left_pair, right_pair,
69 return LowerPairBinOp(left_pair, right_pair,
72 return LowerBitwiseAnd(left_pair, right_pair);
74 return LowerBitwiseOr(left_pair, right_pair);
76 return LowerBitwiseXor(left_pair, right_pair);
77 default:
78 FATAL("WordBinopOp kind %d not supported by int64 lowering",
79 static_cast<int>(kind));
80 }
81 }
82 return Next::ReduceWordBinop(left, right, kind, rep);
83 }
84
86 ShiftOp::Kind kind, WordRepresentation rep) {
87 if (rep == WordRepresentation::Word64()) {
88 V<Word32Pair> left_pair = V<Word32Pair>::Cast(left);
89 switch (kind) {
90 case ShiftOp::Kind::kShiftLeft:
91 return LowerPairShiftOp(left_pair, right,
93 case ShiftOp::Kind::kShiftRightArithmetic:
94 return LowerPairShiftOp(
96 case ShiftOp::Kind::kShiftRightLogical:
97 return LowerPairShiftOp(left_pair, right,
99 case ShiftOp::Kind::kRotateRight:
100 return LowerRotateRight(left_pair, right);
101 default:
102 FATAL("Shiftop kind %d not supported by int64 lowering",
103 static_cast<int>(kind));
104 }
105 }
106 return Next::ReduceShift(left, right, kind, rep);
107 }
108
112 if (rep != WordRepresentation::Word64()) {
113 return Next::ReduceComparison(left, right, kind, rep);
114 }
115
116 auto [left_low, left_high] = Unpack(V<Word32Pair>::Cast(left));
117 auto [right_low, right_high] = Unpack(V<Word32Pair>::Cast(right));
118 V<Word32> high_comparison;
119 V<Word32> low_comparison;
120 switch (kind) {
122 // TODO(wasm): Use explicit comparisons and && here?
123 return __ Word32Equal(
124 __ Word32BitwiseOr(__ Word32BitwiseXor(left_low, right_low),
125 __ Word32BitwiseXor(left_high, right_high)),
126 0);
128 high_comparison = __ Int32LessThan(left_high, right_high);
129 low_comparison = __ Uint32LessThan(left_low, right_low);
130 break;
132 high_comparison = __ Int32LessThan(left_high, right_high);
133 low_comparison = __ Uint32LessThanOrEqual(left_low, right_low);
134 break;
136 high_comparison = __ Uint32LessThan(left_high, right_high);
137 low_comparison = __ Uint32LessThan(left_low, right_low);
138 break;
140 high_comparison = __ Uint32LessThan(left_high, right_high);
141 low_comparison = __ Uint32LessThanOrEqual(left_low, right_low);
142 break;
143 }
144
145 return __ Word32BitwiseOr(
146 high_comparison,
147 __ Word32BitwiseAnd(__ Word32Equal(left_high, right_high),
148 low_comparison));
149 }
150
153 const TSCallDescriptor* descriptor, OpEffects effects) {
154 const bool is_tail_call = false;
155 return LowerCall(callee, frame_state, arguments, descriptor, effects,
156 is_tail_call);
157 }
158
161 const TSCallDescriptor* descriptor) {
162 const bool is_tail_call = true;
164 return LowerCall(callee, frame_state, arguments, descriptor,
165 OpEffects().CanCallAnything(), is_tail_call);
166 }
167
170 uint32_t high = value.integral >> 32;
171 uint32_t low = value.integral & std::numeric_limits<uint32_t>::max();
172 return __ Tuple(__ Word32Constant(low), __ Word32Constant(high));
173 }
174 return Next::ReduceConstant(kind, value);
175 }
176
177 OpIndex REDUCE(Parameter)(int32_t parameter_index, RegisterRepresentation rep,
178 const char* debug_name = "") {
179 int32_t param_count = static_cast<int32_t>(sig_->parameter_count());
180 // Handle special indices (closure, context).
181 if (parameter_index < 0) {
182 return Next::ReduceParameter(parameter_index, rep, debug_name);
183 }
184 if (parameter_index > param_count) {
186 int param_offset =
187 std::count(sig_->parameters().begin(), sig_->parameters().end(),
189 return Next::ReduceParameter(parameter_index + param_offset, rep,
190 debug_name);
191 }
192 int32_t new_index = param_index_map_[parameter_index];
193 if (rep == RegisterRepresentation::Word64()) {
195 return __ Tuple(Next::ReduceParameter(new_index, rep),
196 Next::ReduceParameter(new_index + 1, rep));
197 }
198 return Next::ReduceParameter(new_index, rep, debug_name);
199 }
200
202 base::Vector<const OpIndex> return_values,
203 bool spill_caller_frame_slots) {
204 if (!returns_i64_) {
205 return Next::ReduceReturn(pop_count, return_values,
206 spill_caller_frame_slots);
207 }
208 base::SmallVector<OpIndex, 8> lowered_values;
209 for (size_t i = 0; i < sig_->return_count(); ++i) {
210 if (sig_->GetReturn(i) == MachineRepresentation::kWord64) {
211 auto [low, high] = Unpack(return_values[i]);
212 lowered_values.push_back(low);
213 lowered_values.push_back(high);
214 } else {
215 lowered_values.push_back(return_values[i]);
216 }
217 }
218 return Next::ReduceReturn(pop_count, base::VectorOf(lowered_values),
219 spill_caller_frame_slots);
220 }
221
224 WordRepresentation rep) {
225 if (rep == RegisterRepresentation::Word64()) {
226 V<Word32Pair> input_pair = V<Word32Pair>::Cast(input);
227 switch (kind) {
229 return LowerClz(input_pair);
231 return LowerCtz(input_pair);
233 return LowerPopCount(input_pair);
235 return LowerSignExtend(
236 __ Word32SignExtend8(Unpack(input_pair).first));
238 return LowerSignExtend(
239 __ Word32SignExtend16(Unpack(input_pair).first));
241 auto [low, high] = Unpack(input_pair);
242 V<Word32> reversed_low = __ Word32ReverseBytes(low);
243 V<Word32> reversed_high = __ Word32ReverseBytes(high);
244 return __ Tuple(reversed_high, reversed_low);
245 }
246 default:
247 FATAL("WordUnaryOp kind %d not supported by int64 lowering",
248 static_cast<int>(kind));
249 }
250 }
251 return Next::ReduceWordUnary(input, kind, rep);
252 }
253
255 ChangeOp::Assumption assumption,
258 auto word32 = RegisterRepresentation::Word32();
259 auto word64 = RegisterRepresentation::Word64();
260 auto float64 = RegisterRepresentation::Float64();
261 using Kind = ChangeOp::Kind;
262 if (from != word64 && to != word64) {
263 return Next::ReduceChange(input, kind, assumption, from, to);
264 }
265
266 if (from == word32 && to == word64) {
267 if (kind == Kind::kZeroExtend) {
268 return __ Tuple(V<Word32>::Cast(input), __ Word32Constant(0));
269 }
270 if (kind == Kind::kSignExtend) {
271 return LowerSignExtend(input);
272 }
273 }
274 if (from == float64 && to == word64) {
275 if (kind == Kind::kBitcast) {
276 return __ Tuple(__ Float64ExtractLowWord32(input),
277 __ Float64ExtractHighWord32(input));
278 }
279 }
280 if (from == word64 && to == float64) {
281 if (kind == Kind::kBitcast) {
282 auto input_w32p = V<Word32Pair>::Cast(input);
283 return __ BitcastWord32PairToFloat64(
284 __ template Projection<1>(input_w32p),
285 __ template Projection<0>(input_w32p));
286 }
287 }
288 if (from == word64 && to == word32 && kind == Kind::kTruncate) {
289 auto input_w32p = V<Word32Pair>::Cast(input);
290 return __ template Projection<0>(input_w32p);
291 }
292 std::stringstream str;
293 str << "ChangeOp " << kind << " from " << from << " to " << to
294 << "not supported by int64 lowering";
295 FATAL("%s", str.str().c_str());
296 }
297
298 std::pair<OptionalV<Word32>, int32_t> IncreaseOffset(OptionalV<Word32> index,
299 int32_t offset,
300 int32_t add_offset,
301 bool tagged_base) {
302 // Note that the offset will just wrap around. Still, we need to always
303 // use an offset that is not std::numeric_limits<int32_t>::min() on tagged
304 // loads.
305 // TODO(dmercadier): Replace LoadOp::OffsetIsValid by taking care of this
306 // special case in the LoadStoreSimplificationReducer instead.
307 int32_t new_offset =
308 static_cast<uint32_t>(offset) + static_cast<uint32_t>(add_offset);
309 OptionalV<Word32> new_index = index;
310 if (!LoadOp::OffsetIsValid(new_offset, tagged_base)) {
311 // We cannot encode the new offset so we use the old offset
312 // instead and use the Index to represent the extra offset.
313 new_offset = offset;
314 if (index.has_value()) {
315 new_index = __ Word32Add(new_index.value(), add_offset);
316 } else {
317 new_index = __ Word32Constant(sizeof(int32_t));
318 }
319 }
320 return {new_index, new_offset};
321 }
322
324 MemoryRepresentation loaded_rep,
325 RegisterRepresentation result_rep, int32_t offset,
326 uint8_t element_scale) {
327 if (kind.is_atomic) {
328 if (loaded_rep == MemoryRepresentation::Int64() ||
329 loaded_rep == MemoryRepresentation::Uint64()) {
330 // TODO(jkummerow): Support non-zero scales in AtomicWord32PairOp, and
331 // remove the corresponding bailout in MachineOptimizationReducer to
332 // allow generating them.
333 CHECK_EQ(element_scale, 0);
334 return __ AtomicWord32PairLoad(base, index, offset);
335 }
336 if (result_rep == RegisterRepresentation::Word64()) {
337 return __ Tuple(
338 __ Load(base, index, kind, loaded_rep,
339 RegisterRepresentation::Word32(), offset, element_scale),
340 __ Word32Constant(0));
341 }
342 }
343 if (loaded_rep == MemoryRepresentation::Int64() ||
344 loaded_rep == MemoryRepresentation::Uint64()) {
345 auto [high_index, high_offset] =
346 IncreaseOffset(index, offset, sizeof(int32_t), kind.tagged_base);
347 return __ Tuple(
348 Next::ReduceLoad(base, index, kind, MemoryRepresentation::Int32(),
350 element_scale),
351 Next::ReduceLoad(
352 base, high_index, kind, MemoryRepresentation::Int32(),
353 RegisterRepresentation::Word32(), high_offset, element_scale));
354 }
355 return Next::ReduceLoad(base, index, kind, loaded_rep, result_rep, offset,
356 element_scale);
357 }
358
361 WriteBarrierKind write_barrier, int32_t offset,
362 uint8_t element_size_log2,
363 bool maybe_initializing_or_transitioning,
364 IndirectPointerTag maybe_indirect_pointer_tag) {
365 if (stored_rep == MemoryRepresentation::Int64() ||
366 stored_rep == MemoryRepresentation::Uint64()) {
367 auto [low, high] = Unpack(value);
368 if (kind.is_atomic) {
369 // TODO(jkummerow): Support non-zero scales in AtomicWord32PairOp, and
370 // remove the corresponding bailout in MachineOptimizationReducer to
371 // allow generating them.
372 CHECK_EQ(element_size_log2, 0);
373 return __ AtomicWord32PairStore(base, index, low, high, offset);
374 }
375 // low store
376 Next::ReduceStore(base, index, low, kind, MemoryRepresentation::Int32(),
377 write_barrier, offset, element_size_log2,
378 maybe_initializing_or_transitioning,
379 maybe_indirect_pointer_tag);
380 // high store
381 auto [high_index, high_offset] =
382 IncreaseOffset(index, offset, sizeof(int32_t), kind.tagged_base);
383 Next::ReduceStore(
384 base, high_index, high, kind, MemoryRepresentation::Int32(),
385 write_barrier, high_offset, element_size_log2,
386 maybe_initializing_or_transitioning, maybe_indirect_pointer_tag);
387 return V<None>::Invalid();
388 }
389 return Next::ReduceStore(base, index, value, kind, stored_rep,
390 write_barrier, offset, element_size_log2,
391 maybe_initializing_or_transitioning,
392 maybe_indirect_pointer_tag);
393 }
394
396 OptionalOpIndex expected, AtomicRMWOp::BinOp bin_op,
397 RegisterRepresentation in_out_rep,
398 MemoryRepresentation memory_rep,
400 if (in_out_rep != RegisterRepresentation::Word64()) {
401 return Next::ReduceAtomicRMW(base, index, value, expected, bin_op,
402 in_out_rep, memory_rep, kind);
403 }
404 auto [value_low, value_high] = Unpack(value);
405 if (memory_rep == MemoryRepresentation::Int64() ||
406 memory_rep == MemoryRepresentation::Uint64()) {
407 if (bin_op == AtomicRMWOp::BinOp::kCompareExchange) {
408 auto [expected_low, expected_high] = Unpack(expected.value());
409 return __ AtomicWord32PairCompareExchange(
410 base, index, value_low, value_high, expected_low, expected_high);
411 } else {
412 return __ AtomicWord32PairBinop(base, index, value_low, value_high,
413 bin_op);
414 }
415 }
416
417 OpIndex new_expected = OpIndex::Invalid();
418 if (bin_op == AtomicRMWOp::BinOp::kCompareExchange) {
419 auto [expected_low, expected_high] = Unpack(expected.value());
420 new_expected = expected_low;
421 }
422 return __ Tuple(Next::ReduceAtomicRMW(
423 base, index, value_low, new_expected, bin_op,
424 RegisterRepresentation::Word32(), memory_rep, kind),
425 __ Word32Constant(0));
426 }
427
430 if (rep == RegisterRepresentation::Word64()) {
433 auto word32 = RegisterRepresentation::Word32();
434 inputs_low.reserve(inputs.size());
435 inputs_high.reserve(inputs.size());
436 for (OpIndex input : inputs) {
437 auto input_w32p = V<Word32Pair>::Cast(input);
438 inputs_low.push_back(__ template Projection<0>(input_w32p));
439 inputs_high.push_back(__ template Projection<1>(input_w32p));
440 }
441 return __ Tuple(Next::ReducePhi(base::VectorOf(inputs_low), word32),
442 Next::ReducePhi(base::VectorOf(inputs_high), word32));
443 }
444 return Next::ReducePhi(inputs, rep);
445 }
446
448 if (rep == RegisterRepresentation::Word64()) {
449 auto input_w32p = V<Word32Pair>::Cast(input);
450 V<Word32> low = __ PendingLoopPhi(__ template Projection<0>(input_w32p));
451 V<Word32> high = __ PendingLoopPhi(__ template Projection<1>(input_w32p));
452 return __ Tuple(low, high);
453 }
454 return Next::ReducePendingLoopPhi(input, rep);
455 }
456
457 void FixLoopPhi(const PhiOp& input_phi, OpIndex output_index,
458 Block* output_graph_loop) {
459 if (input_phi.rep == RegisterRepresentation::Word64()) {
460 const TupleOp& tuple = __ Get(output_index).template Cast<TupleOp>();
461 DCHECK_EQ(tuple.input_count, 2);
462 OpIndex new_inputs[2] = {__ MapToNewGraph(input_phi.input(0)),
463 __ MapToNewGraph(input_phi.input(1))};
464 for (size_t i = 0; i < 2; ++i) {
465 OpIndex phi_index = tuple.input(i);
466 if (!output_graph_loop->Contains(phi_index)) {
467 continue;
468 }
469#ifdef DEBUG
470 const PendingLoopPhiOp& pending_phi =
471 __ Get(phi_index).template Cast<PendingLoopPhiOp>();
473 DCHECK_EQ(
474 pending_phi.first(),
475 __ Projection(new_inputs[0], i, RegisterRepresentation::Word32()));
476#endif
477 __ output_graph().template Replace<PhiOp>(
478 phi_index,
479 base::VectorOf({__ Projection(new_inputs[0], i,
481 __ Projection(new_inputs[1], i,
484 }
485 return;
486 }
487 return Next::FixLoopPhi(input_phi, output_index, output_graph_loop);
488 }
489
490 V<Simd128> REDUCE(Simd128Splat)(V<Any> input, Simd128SplatOp::Kind kind) {
491 // TODO(14108): Introduce I32-pair splat for better codegen.
492 if (kind != Simd128SplatOp::Kind::kI64x2) {
493 return Next::ReduceSimd128Splat(input, kind);
494 }
495 auto [low, high] = Unpack(V<Word32Pair>::Cast(input));
496 V<Simd128> base = __ Simd128Splat(low, Simd128SplatOp::Kind::kI32x4);
497 V<Simd128> first_replaced = __ Simd128ReplaceLane(
498 base, high, Simd128ReplaceLaneOp::Kind::kI32x4, 1);
499 return __ Simd128ReplaceLane(first_replaced, high,
500 Simd128ReplaceLaneOp::Kind::kI32x4, 3);
501 }
502
504 Simd128ExtractLaneOp::Kind kind,
505 uint8_t lane) {
506 if (kind != Simd128ExtractLaneOp::Kind::kI64x2) {
507 return Next::ReduceSimd128ExtractLane(input, kind, lane);
508 }
510 input, Simd128ExtractLaneOp::Kind::kI32x4, 2 * lane));
512 input, Simd128ExtractLaneOp::Kind::kI32x4, 2 * lane + 1));
513 return __ Tuple(low, high);
514 }
515
517 Simd128ReplaceLaneOp::Kind kind,
518 uint8_t lane) {
519 // TODO(14108): Introduce I32-pair lane replacement for better codegen.
520 if (kind != Simd128ReplaceLaneOp::Kind::kI64x2) {
521 return Next::ReduceSimd128ReplaceLane(into, new_lane, kind, lane);
522 }
523 auto [low, high] = Unpack(V<Word32Pair>::Cast(new_lane));
524 V<Simd128> low_replaced = __ Simd128ReplaceLane(
525 into, low, Simd128ReplaceLaneOp::Kind::kI32x4, 2 * lane);
526 return __ Simd128ReplaceLane(
527 low_replaced, high, Simd128ReplaceLaneOp::Kind::kI32x4, 2 * lane + 1);
528 }
529
531 base::Vector<const OpIndex> inputs, bool inlined,
532 const FrameStateData* data) {
533 bool has_int64_input = false;
534
535 for (MachineType type : data->machine_types) {
538 has_int64_input = true;
539 break;
540 }
541 }
542 if (!has_int64_input) {
543 return Next::ReduceFrameState(inputs, inlined, data);
544 }
546 if (inlined) {
548 }
549 const FrameStateFunctionInfo* function_info =
550 data->frame_state_info.function_info();
551 uint16_t lowered_parameter_count = function_info->parameter_count();
552 int lowered_local_count = function_info->local_count();
553
554 for (size_t i = inlined; i < inputs.size(); ++i) {
555 // In case of inlining the parent FrameState is an additional input,
556 // however, it doesn't have an entry in the machine_types vector, so that
557 // index has to be adapted.
558 size_t machine_type_index = i - inlined;
560 data->machine_types[machine_type_index]) ==
562 auto [low, high] = Unpack(V<Word32Pair>::Cast(inputs[i]));
563 builder.AddInput(MachineType::Int32(), low);
564 builder.AddInput(MachineType::Int32(), high);
565 // Note that the first input (after the optional parent FrameState) is
566 // the JSClosure, so the first parameter is at index 1 (+1 in case of
567 // nested inlining).
568 if (i <= inlined + function_info->parameter_count()) {
569 ++lowered_parameter_count;
570 } else {
571 ++lowered_local_count;
572 }
573 } else {
574 // Just copy over the existing input.
575 builder.AddInput(data->machine_types[machine_type_index], inputs[i]);
576 }
577 }
578 Zone* zone = Asm().data()->compilation_zone();
579 auto* function_info_lowered = zone->New<compiler::FrameStateFunctionInfo>(
580 compiler::FrameStateType::kLiftoffFunction, lowered_parameter_count,
581 function_info->max_arguments(), lowered_local_count,
582 function_info->shared_info(), kNullMaybeHandle,
583 function_info->wasm_liftoff_frame_size(),
584 function_info->wasm_function_index());
585 const FrameStateInfo& frame_state_info = data->frame_state_info;
586 auto* frame_state_info_lowered = zone->New<compiler::FrameStateInfo>(
587 frame_state_info.bailout_id(), frame_state_info.state_combine(),
588 function_info_lowered);
589
590 return Next::ReduceFrameState(
591 builder.Inputs(), builder.inlined(),
592 builder.AllocateFrameStateData(*frame_state_info_lowered, zone));
593 }
594
595 private:
597#ifdef DEBUG
598 if (const TupleOp* tuple = matcher_.TryCast<TupleOp>(input)) {
599 DCHECK_EQ(2, tuple->input_count);
601 ValidateOpInputRep(__ output_graph(), tuple->input(0), word32);
602 ValidateOpInputRep(__ output_graph(), tuple->input(1), word32);
603 } else if (const DidntThrowOp* didnt_throw =
604 matcher_.TryCast<DidntThrowOp>(input)) {
605 // If it's a call, it must be a call that returns exactly one i64.
606 // (Note that the CallDescriptor has already been lowered to [i32, i32].)
607 const CallOp& call =
608 __ Get(didnt_throw->throwing_operation()).template Cast<CallOp>();
609 DCHECK_EQ(call.descriptor->descriptor->ReturnCount(), 2);
610 DCHECK_EQ(call.descriptor->descriptor->GetReturnType(0),
612 DCHECK_EQ(call.descriptor->descriptor->GetReturnType(1),
614 } else {
616 }
617#endif
618 return true;
619 }
620
621 std::pair<V<Word32>, V<Word32>> Unpack(V<Word32Pair> input) {
623 return {__ template Projection<0>(input), __ template Projection<1>(input)};
624 }
625
627 // We use SAR to preserve the sign in the high word.
628 return __ Tuple(input, __ Word32ShiftRightArithmetic(input, 31));
629 }
630
632 auto [low, high] = Unpack(input);
634 IF (__ Word32Equal(high, 0)) {
635 result = __ Word32Add(32, __ Word32CountLeadingZeros(low));
636 } ELSE {
637 result = __ Word32CountLeadingZeros(high);
638 }
639
640 return __ Tuple<Word32, Word32>(result, __ Word32Constant(0));
641 }
642
644 DCHECK(SupportedOperations::word32_ctz());
645 auto [low, high] = Unpack(input);
647 IF (__ Word32Equal(low, 0)) {
648 result = __ Word32Add(32, __ Word32CountTrailingZeros(high));
649 } ELSE {
650 result = __ Word32CountTrailingZeros(low);
651 }
652
653 return __ Tuple<Word32, Word32>(result, __ Word32Constant(0));
654 }
655
657 DCHECK(SupportedOperations::word32_popcnt());
658 auto [low, high] = Unpack(input);
659 return __ Tuple(
660 __ Word32Add(__ Word32PopCount(low), __ Word32PopCount(high)),
661 __ Word32Constant(0));
662 }
663
666 auto [left_low, left_high] = Unpack(left);
667 auto [right_low, right_high] = Unpack(right);
668 return __ Word32PairBinop(left_low, left_high, right_low, right_high, kind);
669 }
670
673 auto [left_low, left_high] = Unpack(left);
674 // Note: The rhs of a 64 bit shift is a 32 bit value in turboshaft.
675 V<Word32> right_high = __ Word32Constant(0);
676 return __ Word32PairBinop(left_low, left_high, right, right_high, kind);
677 }
678
680 auto [left_low, left_high] = Unpack(left);
681 auto [right_low, right_high] = Unpack(right);
682 V<Word32> low_result = __ Word32BitwiseAnd(left_low, right_low);
683 V<Word32> high_result = __ Word32BitwiseAnd(left_high, right_high);
684 return __ Tuple(low_result, high_result);
685 }
686
688 auto [left_low, left_high] = Unpack(left);
689 auto [right_low, right_high] = Unpack(right);
690 V<Word32> low_result = __ Word32BitwiseOr(left_low, right_low);
691 V<Word32> high_result = __ Word32BitwiseOr(left_high, right_high);
692 return __ Tuple(low_result, high_result);
693 }
694
696 auto [left_low, left_high] = Unpack(left);
697 auto [right_low, right_high] = Unpack(right);
698 V<Word32> low_result = __ Word32BitwiseXor(left_low, right_low);
699 V<Word32> high_result = __ Word32BitwiseXor(left_high, right_high);
700 return __ Tuple(low_result, high_result);
701 }
702
704 // This reducer assumes that all rotates are mapped to rotate right.
705 DCHECK(!SupportedOperations::word64_rol());
706 auto [left_low, left_high] = Unpack(left);
707 V<Word32> shift = right;
708 uint32_t constant_shift = 0;
709
710 if (matcher_.MatchIntegralWord32Constant(shift, &constant_shift)) {
711 // Precondition: 0 <= shift < 64.
712 uint32_t shift_value = constant_shift & 0x3F;
713 if (shift_value == 0) {
714 // No-op, return original tuple.
715 return left;
716 }
717 if (shift_value == 32) {
718 // Swap low and high of left.
719 return __ Tuple(left_high, left_low);
720 }
721
722 V<Word32> low_input = left_high;
723 V<Word32> high_input = left_low;
724 if (shift_value < 32) {
725 low_input = left_low;
726 high_input = left_high;
727 }
728
729 uint32_t masked_shift_value = shift_value & 0x1F;
730 V<Word32> masked_shift = __ Word32Constant(masked_shift_value);
731 V<Word32> inv_shift = __ Word32Constant(32 - masked_shift_value);
732
733 V<Word32> low_node = __ Word32BitwiseOr(
734 __ Word32ShiftRightLogical(low_input, masked_shift),
735 __ Word32ShiftLeft(high_input, inv_shift));
736 V<Word32> high_node = __ Word32BitwiseOr(
737 __ Word32ShiftRightLogical(high_input, masked_shift),
738 __ Word32ShiftLeft(low_input, inv_shift));
739 return __ Tuple(low_node, high_node);
740 }
741
742 V<Word32> safe_shift = shift;
743 if (!SupportedOperations::word32_shift_is_safe()) {
744 // safe_shift = shift % 32
745 safe_shift = __ Word32BitwiseAnd(shift, 0x1F);
746 }
747 V<Word32> all_bits_set = __ Word32Constant(-1);
748 V<Word32> inv_mask = __ Word32BitwiseXor(
749 __ Word32ShiftRightLogical(all_bits_set, safe_shift), all_bits_set);
750 V<Word32> bit_mask = __ Word32BitwiseXor(inv_mask, all_bits_set);
751
752 V<Word32> less_than_32 = __ Int32LessThan(shift, 32);
753 // The low word and the high word can be swapped either at the input or
754 // at the output. We swap the inputs so that shift does not have to be
755 // kept for so long in a register.
756 ScopedVar<Word32> var_low(this, left_high);
757 ScopedVar<Word32> var_high(this, left_low);
758 IF (less_than_32) {
759 var_low = left_low;
760 var_high = left_high;
761 }
762
763 V<Word32> rotate_low = __ Word32RotateRight(var_low, safe_shift);
764 V<Word32> rotate_high = __ Word32RotateRight(var_high, safe_shift);
765
766 V<Word32> low_node =
767 __ Word32BitwiseOr(__ Word32BitwiseAnd(rotate_low, bit_mask),
768 __ Word32BitwiseAnd(rotate_high, inv_mask));
769 V<Word32> high_node =
770 __ Word32BitwiseOr(__ Word32BitwiseAnd(rotate_high, bit_mask),
771 __ Word32BitwiseAnd(rotate_low, inv_mask));
772 return __ Tuple(low_node, high_node);
773 }
774
777 const TSCallDescriptor* descriptor, OpEffects effects,
778 bool is_tail_call) {
779 // Iterate over the call descriptor to skip lowering if the signature does
780 // not contain an i64.
781 const CallDescriptor* call_descriptor = descriptor->descriptor;
782 size_t param_count = call_descriptor->ParameterCount();
783 size_t i64_params = 0;
784 for (size_t i = 0; i < param_count; ++i) {
785 i64_params += call_descriptor->GetParameterType(i).representation() ==
787 }
788 size_t return_count = call_descriptor->ReturnCount();
789 size_t i64_returns = 0;
790 for (size_t i = 0; i < return_count; ++i) {
791 i64_returns += call_descriptor->GetReturnType(i).representation() ==
793 }
794 if (i64_params + i64_returns == 0) {
795 // No lowering required.
796 return is_tail_call ? Next::ReduceTailCall(callee, arguments, descriptor)
797 : Next::ReduceCall(callee, frame_state, arguments,
798 descriptor, effects);
799 }
800
801 // Transform the BigIntToI64 call descriptor into BigIntToI32Pair (this is
802 // the only use case currently, it may be extended in the future).
803 // The correct target is already set during graph building.
804 CallDescriptor* maybe_special_replacement =
806 call_descriptor);
807 if (maybe_special_replacement) call_descriptor = maybe_special_replacement;
808 // Create descriptor with 2 i32s for every i64.
809 const CallDescriptor* lowered_descriptor =
810 GetI32WasmCallDescriptor(__ graph_zone(), call_descriptor);
811
812 // Map the arguments by unpacking i64 arguments (which have already been
813 // lowered to Tuple(i32, i32).)
815 lowered_args.reserve(param_count + i64_params);
816
817 DCHECK_EQ(param_count, arguments.size());
818 for (size_t i = 0; i < param_count; ++i) {
819 if (call_descriptor->GetParameterType(i).representation() ==
821 auto [low, high] = Unpack(arguments[i]);
822 lowered_args.push_back(low);
823 lowered_args.push_back(high);
824 } else {
825 lowered_args.push_back(arguments[i]);
826 }
827 }
828
829 auto lowered_ts_descriptor =
830 TSCallDescriptor::Create(lowered_descriptor, descriptor->can_throw,
832 OpIndex call =
833 is_tail_call
834 ? Next::ReduceTailCall(callee, base::VectorOf(lowered_args),
835 lowered_ts_descriptor)
836 : Next::ReduceCall(callee, frame_state,
837 base::VectorOf(lowered_args),
838 lowered_ts_descriptor, effects);
839 if (is_tail_call) {
840 // Tail calls don't return anything to the calling function.
841 return call;
842 }
843 if (i64_returns == 0 || return_count == 0) {
844 return call;
845 } else if (return_count == 1) {
846 // There isn't any projection in the input graph for calls returning
847 // exactly one value. Return a tuple of projections for the int64.
848 DCHECK_EQ(i64_returns, 1);
849 return call;
850 }
851
852 // Wrap the call node with a tuple of projections of the lowered call.
853 // Example for a call returning [int64, int32]:
854 // In: Call(...) -> [int64, int32]
855 // Out: call = Call() -> [int32, int32, int32]
856 // Tuple(
857 // Tuple(Projection(call, 0), Projection(call, 1)),
858 // Projection(call, 2))
859 //
860 // This way projections on the original call node will be automatically
861 // "rewired" to the correct projection of the lowered call.
862 auto word32 = RegisterRepresentation::Word32();
863 base::SmallVector<V<Any>, 16> tuple_inputs;
864 tuple_inputs.reserve(return_count);
865 size_t projection_index = 0; // index of the lowered call results.
866
867 for (size_t i = 0; i < return_count; ++i) {
868 MachineRepresentation machine_rep =
869 call_descriptor->GetReturnType(i).representation();
870 if (machine_rep == MachineRepresentation::kWord64) {
871 tuple_inputs.push_back(
872 __ Tuple(__ Projection(call, projection_index, word32),
873 __ Projection(call, projection_index + 1, word32)));
874 projection_index += 2;
875 } else {
876 tuple_inputs.push_back(__ Projection(
877 call, projection_index++,
879 }
880 }
881 DCHECK_EQ(projection_index, return_count + i64_returns);
882 return __ Tuple(base::VectorOf(tuple_inputs));
883 }
884
886 // Add one implicit parameter in front.
888 int32_t new_index = 0;
889 for (size_t i = 0; i < sig_->parameter_count(); ++i) {
890 param_index_map_.push_back(++new_index);
891 if (sig_->GetParam(i) == MachineRepresentation::kWord64) {
892 // i64 becomes [i32 low, i32 high], so the next parameter index is
893 // shifted by one.
894 ++new_index;
895 }
896 }
897
898 returns_i64_ = std::any_of(sig_->returns().begin(), sig_->returns().end(),
899 [](const MachineRepresentation rep) {
900 return rep == MachineRepresentation::kWord64;
901 });
902 }
903
907 bool returns_i64_ = false; // Returns at least one i64.
908 const OperationMatcher& matcher_{__ matcher()};
909};
910
912
913} // namespace v8::internal::compiler::turboshaft
914
915#endif // V8_COMPILER_TURBOSHAFT_INT64_LOWERING_REDUCER_H_
#define REDUCE(operation)
#define ELSE
#define IF(...)
int16_t parameter_count
Definition builtins.cc:67
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
Builtins::Kind kind
Definition builtins.cc:40
void reserve(size_t new_capacity)
constexpr MachineRepresentation representation() const
static constexpr MachineType Int32()
void push_back(const T &value)
T * New(Args &&... args)
Definition zone.h:114
MachineType GetParameterType(size_t index) const
Definition linkage.h:290
MachineType GetReturnType(size_t index) const
Definition linkage.h:281
IndirectHandle< SharedFunctionInfo > shared_info() const
OutputFrameStateCombine state_combine() const
V8_EXPORT_PRIVATE compiler::CallDescriptor * GetLoweredCallDescriptor(const compiler::CallDescriptor *original)
bool Contains(OpIndex op_idx) const
Definition graph.h:322
const FrameStateData * AllocateFrameStateData(const FrameStateInfo &info, Zone *zone)
Definition deopt-data.h:83
void AddInput(MachineType type, OpIndex input)
Definition deopt-data.h:41
OpIndex REDUCE TailCall(OpIndex callee, base::Vector< const OpIndex > arguments, const TSCallDescriptor *descriptor)
OpIndex REDUCE Parameter(int32_t parameter_index, RegisterRepresentation rep, const char *debug_name="")
V< None > REDUCE Store(OpIndex base, OptionalOpIndex index, OpIndex value, StoreOp::Kind kind, MemoryRepresentation stored_rep, WriteBarrierKind write_barrier, int32_t offset, uint8_t element_size_log2, bool maybe_initializing_or_transitioning, IndirectPointerTag maybe_indirect_pointer_tag)
V< Any > LowerCall(V< CallTarget > callee, OptionalV< FrameState > frame_state, base::Vector< const OpIndex > arguments, const TSCallDescriptor *descriptor, OpEffects effects, bool is_tail_call)
V< Simd128 > REDUCE Simd128ReplaceLane(V< Simd128 > into, V< Any > new_lane, Simd128ReplaceLaneOp::Kind kind, uint8_t lane)
OpIndex REDUCE AtomicRMW(OpIndex base, OpIndex index, OpIndex value, OptionalOpIndex expected, AtomicRMWOp::BinOp bin_op, RegisterRepresentation in_out_rep, MemoryRepresentation memory_rep, MemoryAccessKind kind)
OpIndex REDUCE PendingLoopPhi(OpIndex input, RegisterRepresentation rep)
std::pair< V< Word32 >, V< Word32 > > Unpack(V< Word32Pair > input)
V< Word32Pair > LowerPairBinOp(V< Word32Pair > left, V< Word32Pair > right, Word32PairBinopOp::Kind kind)
V< Any > REDUCE Simd128ExtractLane(V< Simd128 > input, Simd128ExtractLaneOp::Kind kind, uint8_t lane)
void FixLoopPhi(const PhiOp &input_phi, OpIndex output_index, Block *output_graph_loop)
V< Word32 > REDUCE Comparison(V< Any > left, V< Any > right, ComparisonOp::Kind kind, RegisterRepresentation rep)
OpIndex REDUCE Change(OpIndex input, ChangeOp::Kind kind, ChangeOp::Assumption assumption, RegisterRepresentation from, RegisterRepresentation to)
Word32OrWord32Pair REDUCE WordUnary(Word32OrWord32Pair input, WordUnaryOp::Kind kind, WordRepresentation rep)
OpIndex REDUCE Load(OpIndex base, OptionalOpIndex index, LoadOp::Kind kind, MemoryRepresentation loaded_rep, RegisterRepresentation result_rep, int32_t offset, uint8_t element_scale)
V< None > REDUCE Return(V< Word32 > pop_count, base::Vector< const OpIndex > return_values, bool spill_caller_frame_slots)
OpIndex REDUCE Phi(base::Vector< const OpIndex > inputs, RegisterRepresentation rep)
V< Word32Pair > LowerBitwiseXor(V< Word32Pair > left, V< Word32Pair > right)
V< Simd128 > REDUCE Simd128Splat(V< Any > input, Simd128SplatOp::Kind kind)
Word32OrWord32Pair REDUCE WordBinop(Word32OrWord32Pair left, Word32OrWord32Pair right, WordBinopOp::Kind kind, WordRepresentation rep)
V< Word32Pair > LowerPairShiftOp(V< Word32Pair > left, V< Word32 > right, Word32PairBinopOp::Kind kind)
std::pair< OptionalV< Word32 >, int32_t > IncreaseOffset(OptionalV< Word32 > index, int32_t offset, int32_t add_offset, bool tagged_base)
V< Word32Pair > LowerRotateRight(V< Word32Pair > left, V< Word32 > right)
V< Word32Pair > LowerBitwiseAnd(V< Word32Pair > left, V< Word32Pair > right)
V< Word32Pair > LowerBitwiseOr(V< Word32Pair > left, V< Word32Pair > right)
static constexpr MemoryRepresentation Int32()
static constexpr MemoryRepresentation Int64()
static constexpr MemoryRepresentation Uint64()
static constexpr OpIndex Invalid()
Definition index.h:88
static constexpr RegisterRepresentation FromMachineType(MachineType type)
static constexpr RegisterRepresentation FromMachineRepresentation(MachineRepresentation rep)
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation Word64()
static V< T > Cast(V< U > index)
Definition index.h:632
compiler::WasmCallDescriptors * call_descriptors()
#define TURBOSHAFT_REDUCER_BOILERPLATE(Name)
Definition assembler.h:823
Zone * graph_zone
int32_t offset
ZoneVector< RpoNumber > & result
Point from
Point to
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition vector.h:360
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
Signature< MachineRepresentation > * CreateMachineSignature(Zone *zone, const Signature< T > *sig, wasm::CallOrigin origin)
WasmEngine * GetWasmEngine()
constexpr NullMaybeHandleType kNullMaybeHandle
return value
Definition map-inl.h:893
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
#define FATAL(...)
Definition logging.h:47
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
static constexpr bool OffsetIsValid(int32_t offset, bool tagged_base)
static const TSCallDescriptor * Create(const CallDescriptor *descriptor, CanThrow can_throw, LazyDeoptOnThrow lazy_deopt_on_throw, Zone *graph_zone, const JSWasmCallParameters *js_wasm_call_parameters=nullptr)