v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-assembler-riscv.cc
Go to the documentation of this file.
1// Copyright 2024 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
9namespace v8 {
10namespace internal {
11namespace maglev {
12
13#define __ masm->
14
16 Register size_in_bytes) {
17 __ SubWord(object, object, Operand(size_in_bytes));
18 __ AddWord(object, object, Operand(kHeapObjectTag));
19}
20
22 int size_in_bytes) {
23 __ AddWord(object, object, Operand(kHeapObjectTag - size_in_bytes));
24}
25
26template <typename T>
27void AllocateRaw(MaglevAssembler* masm, Isolate* isolate,
28 RegisterSnapshot register_snapshot, Register object,
29 T size_in_bytes, AllocationType alloc_type,
30 AllocationAlignment alignment) {
31 DCHECK(masm->allow_allocate());
32 // TODO(victorgomes): Call the runtime for large object allocation.
33 // TODO(victorgomes): Support double alignment.
34 DCHECK_EQ(alignment, kTaggedAligned);
35 if (v8_flags.single_generation) {
36 alloc_type = AllocationType::kOld;
37 }
38 ExternalReference top = SpaceAllocationTopAddress(isolate, alloc_type);
39 ExternalReference limit = SpaceAllocationLimitAddress(isolate, alloc_type);
40
41 ZoneLabelRef done(masm);
43 Register scratch = temps.AcquireScratch();
44 // We are a bit short on registers, so we use the same register for {object}
45 // and {new_top}. Once we have defined {new_top}, we don't use {object} until
46 // {new_top} is used for the last time. And there (at the end of this
47 // function), we recover the original {object} from {new_top} by subtracting
48 // {size_in_bytes}.
49 Register new_top = object;
50 // Check if there is enough space.
51 __ LoadWord(object, __ ExternalReferenceAsOperand(top, scratch));
52 __ AddWord(new_top, object, Operand(size_in_bytes));
53 __ LoadWord(scratch, __ ExternalReferenceAsOperand(limit, scratch));
54
55 // Call runtime if new_top >= limit.
57 __ MakeDeferredCode(
58 [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
59 Register object, AllocationType alloc_type, T size_in_bytes,
60 ZoneLabelRef done) {
61 AllocateSlow(masm, register_snapshot, object,
62 AllocateBuiltin(alloc_type), size_in_bytes, done);
63 },
64 register_snapshot, object, alloc_type, size_in_bytes, done),
65 ge, new_top, Operand(scratch));
66
67 // Store new top and tag object.
68 __ Move(__ ExternalReferenceAsOperand(top, scratch), new_top);
69 SubSizeAndTagObject(masm, object, size_in_bytes);
70 __ bind(*done);
71}
72
73void MaglevAssembler::Allocate(RegisterSnapshot register_snapshot,
74 Register object, int size_in_bytes,
75 AllocationType alloc_type,
76 AllocationAlignment alignment) {
77 AllocateRaw(this, isolate_, register_snapshot, object, size_in_bytes,
78 alloc_type, alignment);
79}
80
81void MaglevAssembler::Allocate(RegisterSnapshot register_snapshot,
82 Register object, Register size_in_bytes,
83 AllocationType alloc_type,
84 AllocationAlignment alignment) {
85 AllocateRaw(this, isolate_, register_snapshot, object, size_in_bytes,
86 alloc_type, alignment);
87}
88
89void MaglevAssembler::OSRPrologue(Graph* graph) {
90 DCHECK(graph->is_osr());
91 CHECK(!graph->has_recursive_calls());
92
93 uint32_t source_frame_size =
94 graph->min_maglev_stackslots_for_unoptimized_frame_size();
95
96 if (v8_flags.maglev_assert_stack_size && v8_flags.debug_code) {
97 MaglevAssembler::TemporaryRegisterScope temps(this);
98 Register scratch = temps.AcquireScratch();
99 int32_t expected_osr_stack_size =
100 source_frame_size * kSystemPointerSize +
102 AddWord(scratch, sp, Operand(expected_osr_stack_size));
103 MacroAssembler::SbxCheck(eq, AbortReason::kOsrUnexpectedStackSize, scratch,
104 Operand(fp));
105 }
106
107 uint32_t target_frame_size =
108 graph->tagged_stack_slots() + graph->untagged_stack_slots();
109 // CHECK_EQ(target_frame_size % 2, 1);
110 CHECK_LE(source_frame_size, target_frame_size);
111 if (source_frame_size < target_frame_size) {
112 ASM_CODE_COMMENT_STRING(this, "Growing frame for OSR");
113 uint32_t additional_tagged =
114 source_frame_size < graph->tagged_stack_slots()
115 ? graph->tagged_stack_slots() - source_frame_size
116 : 0;
117 for (size_t i = 0; i < additional_tagged; ++i) {
118 Push(zero_reg);
119 }
120 uint32_t size_so_far = source_frame_size + additional_tagged;
121 CHECK_LE(size_so_far, target_frame_size);
122 if (size_so_far < target_frame_size) {
123 Sub64(sp, sp,
124 Operand((target_frame_size - size_so_far) * kSystemPointerSize));
125 }
126 }
127}
128
129void MaglevAssembler::Prologue(Graph* graph) {
130 ASM_CODE_COMMENT(this);
131 MaglevAssembler::TemporaryRegisterScope temps(this);
132 // We add two extra registers to the scope. Ideally we could add all the
133 // allocatable general registers, except Context, JSFunction, NewTarget and
134 // ArgCount. Unfortunately, OptimizeCodeOrTailCallOptimizedCodeSlot and
135 // LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing pick random registers and
136 // we could alias those.
137 // TODO(victorgomes): Fix these builtins to either use the scope or pass the
138 // used registers manually.
139 temps.Include({s7, s8}); // use register not overlapping with flags,
140 // feedback and so on
141 DCHECK(!graph->is_osr());
142
143 CallTarget();
145
146 if (graph->has_recursive_calls()) {
147 BindCallTarget(code_gen_state()->entry_label());
148 }
149
150 // Tiering support.
151#ifndef V8_ENABLE_LEAPTIERING
152 if (v8_flags.turbofan) {
153 using D = MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor;
154 Register flags = D::GetRegisterParameter(D::kFlags);
155 Register feedback_vector = D::GetRegisterParameter(D::kFeedbackVector);
157 flags, feedback_vector,
158 kJavaScriptCallArgCountRegister, // flags - t4, feedback - a6,
159 // kJavaScriptCallArgCountRegister -
160 // a0
163 DCHECK(!temps.Available().has(flags));
164 DCHECK(!temps.Available().has(feedback_vector));
165 Move(feedback_vector,
166 compilation_info()->toplevel_compilation_unit()->feedback().object());
167 Label needs_processing, done;
169 flags, feedback_vector, CodeKind::MAGLEV, &needs_processing);
170 Jump(&done);
171 bind(&needs_processing);
172 TailCallBuiltin(Builtin::kMaglevOptimizeCodeOrTailCallOptimizedCodeSlot);
173 bind(&done);
174 }
175#endif
176
177 EnterFrame(StackFrame::MAGLEV);
178 // Save arguments in frame.
179 // TODO(leszeks): Consider eliding this frame if we don't make any calls
180 // that could clobber these registers.
181 // Push the context and the JSFunction.
184 // Push the actual argument count and a _possible_ stack slot.
186 // Initialize stack slots.
187 if (graph->tagged_stack_slots() > 0) {
188 ASM_CODE_COMMENT_STRING(this, "Initializing stack slots");
189
190 // Magic value. Experimentally, an unroll size of 8 doesn't seem any
191 // worse than fully unrolled pushes.
192 const int kLoopUnrollSize = 8;
193 int tagged_slots = graph->tagged_stack_slots();
194
195 if (tagged_slots < 2 * kLoopUnrollSize) {
196 // If the frame is small enough, just unroll the frame fill
197 // completely.
198 for (int i = 0; i < tagged_slots; ++i) {
199 Push(zero_reg);
200 }
201 } else {
202 // Extract the first few slots to round to the unroll size.
203 int first_slots = tagged_slots % kLoopUnrollSize;
204 for (int i = 0; i < first_slots; ++i) {
205 Push(zero_reg);
206 }
207 MaglevAssembler::TemporaryRegisterScope temps(this);
208 Register count = temps.AcquireScratch();
209 Move(count, tagged_slots / kLoopUnrollSize);
210 // We enter the loop unconditionally, so make sure we need to loop at
211 // least once.
212 DCHECK_GT(tagged_slots / kLoopUnrollSize, 0);
213 Label loop;
214 bind(&loop);
215 for (int i = 0; i < kLoopUnrollSize; ++i) {
216 Push(zero_reg);
217 }
218 Sub64(count, count, Operand(1));
219 MacroAssembler::Branch(&loop, gt, count, Operand(zero_reg), Label::kNear);
220 }
221 }
222 if (graph->untagged_stack_slots() > 0) {
223 // Extend sp by the size of the remaining untagged part of the frame,
224 // no need to initialise these.
225 Sub64(sp, sp, Operand(graph->untagged_stack_slots() * kSystemPointerSize));
226 }
227}
228
229void MaglevAssembler::MaybeEmitDeoptBuiltinsCall(size_t eager_deopt_count,
230 Label* eager_deopt_entry,
231 size_t lazy_deopt_count,
232 Label* lazy_deopt_entry) {
234
236
237 MaglevAssembler::TemporaryRegisterScope scope(this);
238 Register scratch = scope.AcquireScratch();
239 if (eager_deopt_count > 0) {
240 bind(eager_deopt_entry);
241 LoadEntryFromBuiltin(Builtin::kDeoptimizationEntry_Eager, scratch);
242 MacroAssembler::Jump(scratch);
243 }
244 if (lazy_deopt_count > 0) {
245 bind(lazy_deopt_entry);
246 LoadEntryFromBuiltin(Builtin::kDeoptimizationEntry_Lazy, scratch);
247 MacroAssembler::Jump(scratch);
248 }
249}
250
252 Register char_code,
253 Register scratch) {
254 DCHECK_NE(char_code, scratch);
255 if (v8_flags.debug_code) {
256 MacroAssembler::Assert(less_equal, AbortReason::kUnexpectedValue, char_code,
258 }
259 Register table = scratch;
260 LoadRoot(table, RootIndex::kSingleCharacterStringTable);
261 LoadTaggedFieldByIndex(result, table, char_code, kTaggedSize,
262 OFFSET_OF_DATA_START(FixedArray));
263}
264
265void MaglevAssembler::StringFromCharCode(RegisterSnapshot register_snapshot,
266 Label* char_code_fits_one_byte,
267 Register result, Register char_code,
268 Register scratch,
269 CharCodeMaskMode mask_mode) {
270 ZeroExtendWord(char_code, char_code);
271 AssertZeroExtended(char_code);
272 DCHECK_NE(char_code, scratch);
273 ZoneLabelRef done(this);
274 if (mask_mode == CharCodeMaskMode::kMustApplyMask) {
275 And(char_code, char_code, Operand(0xFFFF));
276 }
277 // Allocate two-bytes string if {char_code} doesn't fit one byte.
278 MacroAssembler::Branch( // FIXME: reimplement with JumpToDeferredIf
280 [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
281 ZoneLabelRef done, Register result, Register char_code,
282 Register scratch) {
283 MaglevAssembler::TemporaryRegisterScope temps(masm);
284 // Ensure that {result} never aliases {scratch}, otherwise use
285 // a temporary register to restore {result} at the end.
286 const bool need_restore_result = (scratch == result);
287 Register string =
288 need_restore_result ? temps.AcquireScratch() : result;
289 // Ensure that {char_code} never aliases {result}, otherwise use
290 // the given {scratch} register.
291 if (char_code == result) {
292 __ Move(scratch, char_code);
293 char_code = scratch;
294 }
295 DCHECK(char_code != string);
296 DCHECK(scratch != string);
297 DCHECK(!register_snapshot.live_tagged_registers.has(char_code));
298 register_snapshot.live_registers.set(char_code);
299 __ AllocateTwoByteString(register_snapshot, string, 1);
300 __ And(scratch, char_code, Operand(0xFFFF));
301 __ Sh(scratch, FieldMemOperand(
302 string, OFFSET_OF_DATA_START(SeqTwoByteString)));
303 if (need_restore_result) {
304 __ Move(result, string);
305 }
306 __ jmp(*done);
307 },
308 register_snapshot, done, result, char_code, scratch),
309 Ugreater_equal, char_code, Operand(String::kMaxOneByteCharCode));
310
311 if (char_code_fits_one_byte != nullptr) {
312 bind(char_code_fits_one_byte);
313 }
314 LoadSingleCharacterString(result, char_code, scratch);
315 bind(*done);
316}
317// Sets equality flag in pseudo flags reg.
318void MaglevAssembler::IsObjectType(Register object, Register scratch1,
319 Register scratch2, InstanceType type) {
320 ASM_CODE_COMMENT(this);
321 constexpr Register flags = MaglevAssembler::GetFlagsRegister();
322 Label ConditionMet, Done;
325 LoadCompressedMap(scratch1, object);
326 std::optional<RootIndex> expected =
328 Tagged_t expected_ptr = ReadOnlyRootPtr(*expected);
329 li(scratch2, expected_ptr);
330 Sll32(scratch2, scratch2, Operand(0));
331 MacroAssembler::Branch(&ConditionMet, Condition::kEqual, scratch1,
332 Operand(scratch2), Label::kNear);
333 } else {
334 CompareObjectTypeAndJump(object, scratch1, scratch2, type,
335 Condition::kEqual, &ConditionMet, Label::kNear);
336 }
337 Li(flags, 1); // Condition is not met by default and
338 // flags is set after a scratch is used,
339 // so no harm if they are aliased.
340 Jump(&Done, Label::kNear);
341 bind(&ConditionMet);
342 Mv(flags, zero_reg); // Condition is met
343 bind(&Done);
344}
345
348 RegisterSnapshot& register_snapshot, Register result, Register string,
349 Register index, Register instance_type, [[maybe_unused]] Register scratch2,
350 Label* result_fits_one_byte) {
351 ZoneLabelRef done(this);
352 Label seq_string;
353 Label cons_string;
354 Label sliced_string;
355
356 Label* deferred_runtime_call = MakeDeferredCode(
357 [](MaglevAssembler* masm,
359 RegisterSnapshot register_snapshot, ZoneLabelRef done, Register result,
360 Register string, Register index) {
361 DCHECK(!register_snapshot.live_registers.has(result));
362 DCHECK(!register_snapshot.live_registers.has(string));
363 DCHECK(!register_snapshot.live_registers.has(index));
364 {
365 SaveRegisterStateForCall save_register_state(masm, register_snapshot);
366 __ SmiTag(index);
367 __ Push(string, index);
368 __ Move(kContextRegister, masm->native_context().object());
369 // This call does not throw nor can deopt.
370 if (mode ==
372 __ CallRuntime(Runtime::kStringCodePointAt);
373 } else {
374 DCHECK_EQ(mode,
376 __ CallRuntime(Runtime::kStringCharCodeAt);
377 }
378 save_register_state.DefineSafepoint();
381 }
382 __ jmp(*done);
383 },
384 mode, register_snapshot, done, result, string, index);
385
386 // We might need to try more than one time for ConsString, SlicedString and
387 // ThinString.
388 Label loop;
389 bind(&loop);
390
391 if (v8_flags.debug_code) {
392 Register scratch = instance_type;
393
394 // Check if {string} is a string.
395 AssertObjectTypeInRange(string, FIRST_STRING_TYPE, LAST_STRING_TYPE,
396 AbortReason::kUnexpectedValue);
397
398 Lw(scratch, FieldMemOperand(string, offsetof(String, length_)));
399 Check(kUnsignedLessThan, AbortReason::kUnexpectedValue, index,
400 Operand(scratch));
401 }
402
403 // Get instance type.
404 LoadInstanceType(instance_type, string);
405
406 {
407 MaglevAssembler::TemporaryRegisterScope temps(this);
408 Register representation = temps.AcquireScratch();
409
410 // TODO(victorgomes): Add fast path for external strings.
411 And(representation, instance_type, Operand(kStringRepresentationMask));
412 MacroAssembler::Branch(&seq_string, kEqual, representation,
413 Operand(kSeqStringTag), Label::kNear);
414 MacroAssembler::Branch(&cons_string, kEqual, representation,
415 Operand(kConsStringTag), Label::kNear);
416 MacroAssembler::Branch(&sliced_string, kEqual, representation,
418 MacroAssembler::Branch(deferred_runtime_call, kNotEqual, representation,
419 Operand(kThinStringTag));
420 // Fallthrough to thin string.
421 }
422
423 // Is a thin string.
424 {
425 LoadTaggedField(string, string, offsetof(ThinString, actual_));
427 }
428
429 bind(&sliced_string);
430 {
431 MaglevAssembler::TemporaryRegisterScope temps(this);
432 Register offset = temps.AcquireScratch();
433
435 offsetof(SlicedString, offset_));
436 LoadTaggedField(string, string, offsetof(SlicedString, parent_));
437 Add32(index, index, Operand(offset));
439 }
440
441 bind(&cons_string);
442 {
443 // Reuse {instance_type} register here, since CompareRoot requires a scratch
444 // register as well.
445 Register second_string = instance_type;
446 LoadTaggedFieldWithoutDecompressing(second_string, string,
447 offsetof(ConsString, second_));
448 CompareRoot(second_string,
449 RootIndex::kempty_string); // Sets 1 to flag if not equal
450 JumpIf(ne, deferred_runtime_call); // Check the flag to not be equal 0
451 LoadTaggedField(string, string, offsetof(ConsString, first_));
453 Label::kNear); // Try again with first string.
454 }
455
456 bind(&seq_string);
457 {
458 Label two_byte_string;
459 And(instance_type, instance_type, Operand(kStringEncodingMask));
460 MacroAssembler::Branch(&two_byte_string, equal, instance_type,
462 // The result of one-byte string will be the same for both modes
463 // (CharCodeAt/CodePointAt), since it cannot be the first half of a
464 // surrogate pair.
465 AddWord(result, string, Operand(index));
466 Lbu(result, MemOperand(result, OFFSET_OF_DATA_START(SeqOneByteString) -
468 MacroAssembler::Branch(result_fits_one_byte);
469
470 bind(&two_byte_string);
471 // {instance_type} is unused from this point, so we can use as scratch.
472 Register scratch = instance_type;
473
474 Register scaled_index = scratch;
475 Sll32(scaled_index, index, Operand(1));
476 AddWord(result, string, Operand(scaled_index));
477 Lhu(result, MemOperand(result, OFFSET_OF_DATA_START(SeqTwoByteString) -
479
481 Register first_code_point = scratch;
482 And(first_code_point, result, Operand(0xfc00));
483 MacroAssembler::Branch(*done, kNotEqual, first_code_point,
484 Operand(0xd800), Label::kNear);
485
486 Register length = scratch;
487 Lw(length, FieldMemOperand(string, offsetof(String, length_)));
488 Add32(index, index, Operand(1));
489 MacroAssembler::Branch(*done, kGreaterThanEqual, index, Operand(length),
491
492 Register second_code_point = scratch;
493 Sll32(second_code_point, index, Operand(1));
494 AddWord(second_code_point, string, second_code_point);
495 Lhu(second_code_point,
496 MemOperand(second_code_point,
497 OFFSET_OF_DATA_START(SeqTwoByteString) - kHeapObjectTag));
498
499 // {index} is not needed at this point.
500 Register scratch2 = index;
501 And(scratch2, second_code_point, Operand(0xfc00));
502 MacroAssembler::Branch(*done, kNotEqual, scratch2, Operand(0xdc00),
504
505 int surrogate_offset = 0x10000 - (0xd800 << 10) - 0xdc00;
506 Add32(second_code_point, second_code_point, Operand(surrogate_offset));
507 Sll32(result, result, Operand(10));
508 Add32(result, result, Operand(second_code_point));
509 }
510
511 // Fallthrough.
512 }
513
514 bind(*done);
515
516 if (v8_flags.debug_code) {
517 // We make sure that the user of this macro is not relying in string and
518 // index to not be clobbered.
519 if (result != string) {
520 Li(string, 0xdeadbeef);
521 }
522 if (result != index) {
523 Li(index, 0xdeadbeef);
524 }
525 }
526}
527
529 ZoneLabelRef done(this);
530 Label* slow_path = MakeDeferredCode(
531 [](MaglevAssembler* masm, DoubleRegister src, Register dst,
532 ZoneLabelRef done) {
533 __ push(ra);
535 __ StoreDouble(src, MemOperand(sp, 0));
536 __ CallBuiltin(Builtin::kDoubleToI);
537 __ LoadWord(dst, MemOperand(sp, 0));
538 __ AddWord(sp, sp, Operand(kDoubleSize));
539 __ pop(ra);
540 __ Jump(*done);
541 },
542 src, dst, done);
543 TryInlineTruncateDoubleToI(dst, src, *done);
544 Jump(slow_path);
545 bind(*done);
546 ZeroExtendWord(dst, dst); // FIXME: is zero extension really needed here?
547}
548
550 Label* fail) {
551 MaglevAssembler::TemporaryRegisterScope temps(this);
552 DoubleRegister converted_back = temps.AcquireScratchDouble();
553 Register rcmp = temps.AcquireScratch();
554
555 // Convert the input float64 value to int32.
556 Trunc_w_d(dst, src);
557 // Convert that int32 value back to float64.
558 Cvt_d_w(converted_back, dst);
559 // Check that the result of the float64->int32->float64 is equal to the input
560 // (i.e. that the conversion didn't truncate).
561 CompareF64(rcmp, EQ, src, converted_back); // rcmp is 0 if not equal
563 fail, eq, rcmp, Operand(zero_reg)); // if we don't know branch distance
564 // then lets use MacroAssembler::Branch, it will make sure we fit
565
566 // Check if {input} is -0.
567 Label check_done;
568 BranchShort(&check_done, ne, dst, Operand(zero_reg));
569
570 // In case of 0, we need to check for the IEEE 0 pattern (which is all zeros).
572 rcmp, src); // FIXME: should we enable this in MaglevAssembler as well?
573
574 MacroAssembler::Branch(fail, ne, rcmp, Operand(zero_reg));
575
576 bind(&check_done);
577}
578
580 DoubleRegister src,
581 Label* fail) {
582 MaglevAssembler::TemporaryRegisterScope temps(this);
583 DoubleRegister converted_back = temps.AcquireScratchDouble();
584 Register rcmp = temps.AcquireScratch();
585
586 // Convert the input float64 value to uint32.
587 Trunc_uw_d(dst, src);
588 // Convert that uint32 value back to float64.
589 Cvt_d_uw(converted_back, dst);
590 // Check that the result of the float64->uint32->float64 is equal to the input
591 // (i.e. that the conversion didn't truncate).
592 CompareF64(rcmp, EQ, src, converted_back); // rcmp is 0 if not equal
593 MacroAssembler::Branch(fail, eq, rcmp, Operand(zero_reg));
594
595 // Check if {input} is -0.
596 Label check_done;
597 BranchShort(&check_done, ne, dst, Operand(zero_reg));
598
599 // In case of 0, we need to check for the IEEE 0 pattern (which is all zeros).
601 rcmp, src); // FIXME: should we enable this in MaglevAssembler as well?
602
603 MacroAssembler::Branch(fail, ne, rcmp, Operand(zero_reg));
604
605 bind(&check_done);
606}
607
609 DoubleRegister value,
610 Label* success, Label* fail) {
611 MaglevAssembler::TemporaryRegisterScope temps(this);
612 DoubleRegister converted_back = temps.AcquireScratchDouble();
613 Register rcmp = temps.AcquireScratch();
614
615 // Convert the input float64 value to int32.
616 Trunc_w_d(result, value);
617 // Convert that int32 value back to float64.
618 Cvt_d_w(converted_back, result);
619 // Check that the result of the float64->int32->float64 is equal to
620 // the input (i.e. that the conversion didn't truncate).
621 CompareF64(rcmp, EQ, value, converted_back); // rcmp is 0 if not equal
622 MacroAssembler::Branch(fail, eq, rcmp, Operand(zero_reg));
623 Jump(success);
624}
625
626} // namespace maglev
627} // namespace internal
628} // namespace v8
Simd128Register Simd128Register ra
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
Tagged_t ReadOnlyRootPtr(RootIndex index)
void Lbu(Register rd, const MemOperand &rs)
void Sh(Register rd, const MemOperand &rs)
void IsObjectType(Register heap_object, Register scratch1, Register scratch2, InstanceType type)
void SmiUntag(Register reg, SBit s=LeaveCC)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void CompareRoot(Register obj, RootIndex index)
void Move(Register dst, Tagged< Smi > smi)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void BranchShort(Label *label, Condition cond, Register r1, const Operand &r2, bool need_link=false)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void Trunc_w_d(FPURegister fd, FPURegister fs)
void SmiTag(Register reg, SBit s=LeaveCC)
void CompareObjectTypeAndJump(Register heap_object, Register map, Register type_reg, InstanceType type, Condition cond, Label *target, Label::Distance distance)
void SbxCheck(Condition cc, AbortReason reason)
void StoreDouble(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void Lhu(Register rd, const MemOperand &rs)
void Jump(Register target, Condition cond=al)
void LoadRoot(Register destination, RootIndex index) final
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch)
void Cvt_d_w(FPURegister fd, Register rs)
void Lw(Register rd, const MemOperand &rs)
void LoadCompressedMap(Register dst, Register object)
void Cvt_d_uw(FPURegister fd, FPURegister fs)
void Check(Condition cond, AbortReason reason)
void AllocateStackSpace(Register bytes)
void AssertZeroExtended(Register int32_register)
void Branch(Label *label, bool need_link=false)
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, Label *done)
void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
static const int32_t kMaxOneByteCharCode
Definition string.h:500
void LoadSingleCharacterString(Register result, int char_code)
void LoadAndUntagTaggedSignedField(Register result, Register object, int offset)
MaglevAssembler(Isolate *isolate, Zone *zone, MaglevCodeGenState *code_gen_state)
void AssertObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, AbortReason reason)
void StringFromCharCode(RegisterSnapshot register_snapshot, Label *char_code_fits_one_byte, Register result, Register char_code, Register scratch, CharCodeMaskMode mask_mode)
void Jump(Label *target, Label::Distance distance=Label::kFar)
void CallRuntime(Runtime::FunctionId fid)
void LoadInstanceType(Register instance_type, Register heap_object)
void Allocate(RegisterSnapshot register_snapshot, Register result, int size_in_bytes, AllocationType alloc_type=AllocationType::kYoung, AllocationAlignment alignment=kTaggedAligned)
void TryTruncateDoubleToUint32(Register dst, DoubleRegister src, Label *fail)
void StringCharCodeOrCodePointAt(BuiltinStringPrototypeCharCodeOrCodePointAt::Mode mode, RegisterSnapshot &register_snapshot, Register result, Register string, Register index, Register scratch1, Register scratch2, Label *result_fits_one_byte)
void AllocateTwoByteString(RegisterSnapshot register_snapshot, Register result, int length)
void Move(StackSlot dst, Register src)
void TryTruncateDoubleToInt32(Register dst, DoubleRegister src, Label *fail)
Label * MakeDeferredCode(Function &&deferred_code_gen, Args &&... args)
void TruncateDoubleToInt32(Register dst, DoubleRegister src)
void MaybeEmitDeoptBuiltinsCall(size_t eager_deopt_count, Label *eager_deopt_entry, size_t lazy_deopt_count, Label *lazy_deopt_entry)
MaglevCompilationInfo * compilation_info() const
MaglevCodeGenState * code_gen_state() const
void LoadTaggedFieldByIndex(Register result, Register object, Register index, int scale, int offset)
void TryChangeFloat64ToIndex(Register result, DoubleRegister value, Label *success, Label *fail)
void JumpIf(Condition cond, Label *target, Label::Distance distance=Label::kFar)
void LoadTaggedFieldWithoutDecompressing(Register result, Register object, int offset)
void LoadTaggedField(Register result, MemOperand operand)
Operand const offset_
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
int32_t offset
ZoneVector< RpoNumber > & result
const int length_
Definition mul-fft.cc:473
V8_INLINE constexpr std::optional< RootIndex > UniqueMapOfInstanceType(InstanceType type)
void AllocateSlow(MaglevAssembler *masm, RegisterSnapshot register_snapshot, Register object, Builtin builtin, T size_in_bytes, ZoneLabelRef done)
Builtin AllocateBuiltin(AllocationType alloc_type)
ExternalReference SpaceAllocationTopAddress(Isolate *isolate, AllocationType alloc_type)
void SubSizeAndTagObject(MaglevAssembler *masm, Register object, Register size_in_bytes)
void AllocateRaw(MaglevAssembler *masm, Isolate *isolate, RegisterSnapshot register_snapshot, Register object, T size_in_bytes, AllocationType alloc_type, AllocationAlignment alignment)
ExternalReference SpaceAllocationLimitAddress(Isolate *isolate, AllocationType alloc_type)
const uint32_t kStringEncodingMask
constexpr int kTaggedSize
Definition globals.h:542
DwVfpRegister DoubleRegister
const uint32_t kTwoByteStringTag
constexpr InstanceType LAST_STRING_TYPE
constexpr Register kJavaScriptCallArgCountRegister
Address Tagged_t
Definition globals.h:547
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Register kReturnRegister0
const uint32_t kStringRepresentationMask
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int kDoubleSize
Definition globals.h:407
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
template const char * string
ro::BitSet tagged_slots
BytecodeSequenceNode * parent_
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define OFFSET_OF_DATA_START(Type)
#define V8_STATIC_ROOTS_BOOL
Definition v8config.h:1001