v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-assembler-arm.cc
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
9
10namespace v8 {
11namespace internal {
12namespace maglev {
13
14#define __ masm->
15
16namespace {
17void SubSizeAndTagObject(MaglevAssembler* masm, Register object,
18 Register size_in_bytes) {
19 __ sub(object, object, size_in_bytes, LeaveCC);
20 __ add(object, object, Operand(kHeapObjectTag), LeaveCC);
21}
22
23void SubSizeAndTagObject(MaglevAssembler* masm, Register object,
24 int size_in_bytes) {
25 __ add(object, object, Operand(kHeapObjectTag - size_in_bytes), LeaveCC);
26}
27
28template <typename T>
29void AllocateRaw(MaglevAssembler* masm, Isolate* isolate,
30 RegisterSnapshot register_snapshot, Register object,
31 T size_in_bytes, AllocationType alloc_type,
32 AllocationAlignment alignment) {
33 // TODO(victorgomes): Call the runtime for large object allocation.
34 // TODO(victorgomes): Support double alignment.
35 DCHECK(masm->allow_allocate());
36 DCHECK_EQ(alignment, kTaggedAligned);
37 if (v8_flags.single_generation) {
38 alloc_type = AllocationType::kOld;
39 }
40 ExternalReference top = SpaceAllocationTopAddress(isolate, alloc_type);
41 ExternalReference limit = SpaceAllocationLimitAddress(isolate, alloc_type);
42 ZoneLabelRef done(masm);
43 MaglevAssembler::TemporaryRegisterScope temps(masm);
44 Register scratch = temps.AcquireScratch();
45 // We are a bit short on registers, so we use the same register for {object}
46 // and {new_top}. Once we have defined {new_top}, we don't use {object} until
47 // {new_top} is used for the last time. And there (at the end of this
48 // function), we recover the original {object} from {new_top} by subtracting
49 // {size_in_bytes}.
50 Register new_top = object;
51 // Check if there is enough space.
52 __ ldr(object, __ ExternalReferenceAsOperand(top, scratch));
53 __ add(new_top, object, Operand(size_in_bytes), LeaveCC);
54 __ ldr(scratch, __ ExternalReferenceAsOperand(limit, scratch));
55 __ cmp(new_top, scratch);
56 // Otherwise call runtime.
58 register_snapshot, object, AllocateBuiltin(alloc_type),
59 size_in_bytes, done);
60 // Store new top and tag object.
61 __ Move(__ ExternalReferenceAsOperand(top, scratch), new_top);
62 SubSizeAndTagObject(masm, object, size_in_bytes);
63 __ bind(*done);
64}
65} // namespace
66
68 Register object, int size_in_bytes,
69 AllocationType alloc_type,
70 AllocationAlignment alignment) {
71 AllocateRaw(this, isolate_, register_snapshot, object, size_in_bytes,
72 alloc_type, alignment);
73}
74
76 Register object, Register size_in_bytes,
77 AllocationType alloc_type,
78 AllocationAlignment alignment) {
79 AllocateRaw(this, isolate_, register_snapshot, object, size_in_bytes,
80 alloc_type, alignment);
81}
82
84 TemporaryRegisterScope temps(this);
85 Register scratch = temps.AcquireScratch();
86
87 DCHECK(graph->is_osr());
88 CHECK(!graph->has_recursive_calls());
89
90 uint32_t source_frame_size =
91 graph->min_maglev_stackslots_for_unoptimized_frame_size();
92
93 if (v8_flags.debug_code) {
94 add(scratch, sp,
95 Operand(source_frame_size * kSystemPointerSize +
97 cmp(scratch, fp);
98 Assert(eq, AbortReason::kOsrUnexpectedStackSize);
99 }
100
101 uint32_t target_frame_size =
102 graph->tagged_stack_slots() + graph->untagged_stack_slots();
103 CHECK_LE(source_frame_size, target_frame_size);
104
105 if (source_frame_size < target_frame_size) {
106 ASM_CODE_COMMENT_STRING(this, "Growing frame for OSR");
107 uint32_t additional_tagged =
108 source_frame_size < graph->tagged_stack_slots()
109 ? graph->tagged_stack_slots() - source_frame_size
110 : 0;
111 if (additional_tagged) {
112 Move(scratch, 0);
113 }
114 for (size_t i = 0; i < additional_tagged; ++i) {
115 Push(scratch);
116 }
117 uint32_t size_so_far = source_frame_size + additional_tagged;
118 CHECK_LE(size_so_far, target_frame_size);
119 if (size_so_far < target_frame_size) {
120 sub(sp, sp,
121 Operand((target_frame_size - size_so_far) * kSystemPointerSize));
122 }
123 }
124}
125
127 TemporaryRegisterScope temps(this);
128 temps.Include({r4, r8});
129
130 DCHECK(!graph->is_osr());
131
133
134 if (graph->has_recursive_calls()) {
135 bind(code_gen_state()->entry_label());
136 }
137
138#ifndef V8_ENABLE_LEAPTIERING
139 // Tiering support.
140 if (v8_flags.turbofan) {
142 Register flags = D::GetRegisterParameter(D::kFlags);
143 Register feedback_vector = D::GetRegisterParameter(D::kFeedbackVector);
144 DCHECK(!AreAliased(flags, feedback_vector, kJavaScriptCallArgCountRegister,
147 DCHECK(!temps.Available().has(flags));
148 DCHECK(!temps.Available().has(feedback_vector));
149 Move(feedback_vector,
150 compilation_info()->toplevel_compilation_unit()->feedback().object());
151 Condition needs_processing =
153 CodeKind::MAGLEV);
154 // Tail call on Arm produces 3 instructions, so we emit that in deferred
155 // code.
156 JumpToDeferredIf(needs_processing, [](MaglevAssembler* masm) {
158 Builtin::kMaglevOptimizeCodeOrTailCallOptimizedCodeSlot);
159 });
160 }
161#endif // !V8_ENABLE_LEAPTIERING
162
163 EnterFrame(StackFrame::MAGLEV);
164 // Save arguments in frame.
165 // TODO(leszeks): Consider eliding this frame if we don't make any calls
166 // that could clobber these registers.
168 Push(kJSFunctionRegister); // Callee's JS function.
169 Push(kJavaScriptCallArgCountRegister); // Actual argument count.
170
171 // Initialize stack slots.
172 if (graph->tagged_stack_slots() > 0) {
173 ASM_CODE_COMMENT_STRING(this, "Initializing stack slots");
174 TemporaryRegisterScope temps(this);
175 Register scratch = temps.AcquireScratch();
176 Move(scratch, 0);
177
178 // Magic value. Experimentally, an unroll size of 8 doesn't seem any
179 // worse than fully unrolled pushes.
180 const int kLoopUnrollSize = 8;
181 int tagged_slots = graph->tagged_stack_slots();
182 if (tagged_slots < kLoopUnrollSize) {
183 // If the frame is small enough, just unroll the frame fill
184 // completely.
185 for (int i = 0; i < tagged_slots; ++i) {
186 Push(scratch);
187 }
188 } else {
189 // Extract the first few slots to round to the unroll size.
190 int first_slots = tagged_slots % kLoopUnrollSize;
191 for (int i = 0; i < first_slots; ++i) {
192 Push(scratch);
193 }
194 Register unroll_counter = temps.AcquireScratch();
195 Move(unroll_counter, tagged_slots / kLoopUnrollSize);
196 // We enter the loop unconditionally, so make sure we need to loop at
197 // least once.
198 DCHECK_GT(tagged_slots / kLoopUnrollSize, 0);
199 Label loop;
200 bind(&loop);
201 for (int i = 0; i < kLoopUnrollSize; ++i) {
202 Push(scratch);
203 }
204 sub(unroll_counter, unroll_counter, Operand(1), SetCC);
205 b(kGreaterThan, &loop);
206 }
207 }
208 if (graph->untagged_stack_slots() > 0) {
209 // Extend rsp by the size of the remaining untagged part of the frame,
210 // no need to initialise these.
211 sub(sp, sp, Operand(graph->untagged_stack_slots() * kSystemPointerSize));
212 }
213}
214
216 Label* eager_deopt_entry,
217 size_t lazy_deopt_count,
218 Label* lazy_deopt_entry) {
219 CheckConstPool(true, false);
220}
221
223 Register char_code,
224 Register scratch) {
225 DCHECK_NE(char_code, scratch);
226 if (v8_flags.debug_code) {
228 Assert(kUnsignedLessThanEqual, AbortReason::kUnexpectedValue);
229 }
230 Register table = scratch;
231 LoadRoot(table, RootIndex::kSingleCharacterStringTable);
232 add(table, table, Operand(char_code, LSL, kTaggedSizeLog2));
234}
235
237 Label* char_code_fits_one_byte,
238 Register result, Register char_code,
239 Register scratch,
240 CharCodeMaskMode mask_mode) {
241 AssertZeroExtended(char_code);
242 DCHECK_NE(char_code, scratch);
243 ZoneLabelRef done(this);
244 if (mask_mode == CharCodeMaskMode::kMustApplyMask) {
245 and_(char_code, char_code, Operand(0xFFFF));
246 }
250 [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
251 ZoneLabelRef done, Register result, Register char_code,
252 Register scratch) {
253 // Be sure to save {char_code}. If it aliases with {result}, use
254 // the scratch register.
255 // TODO(victorgomes): This is probably not needed any more, because
256 // we now ensure that results registers don't alias with inputs/temps.
257 // Confirm, and drop this check.
258 if (char_code == result) {
259 __ Move(scratch, char_code);
260 char_code = scratch;
261 }
262 DCHECK_NE(char_code, result);
263 DCHECK(!register_snapshot.live_tagged_registers.has(char_code));
264 register_snapshot.live_registers.set(char_code);
265 __ AllocateTwoByteString(register_snapshot, result, 1);
266 __ strh(char_code, FieldMemOperand(
268 __ b(*done);
269 },
270 register_snapshot, done, result, char_code, scratch);
271 if (char_code_fits_one_byte != nullptr) {
272 bind(char_code_fits_one_byte);
273 }
274 LoadSingleCharacterString(result, char_code, scratch);
275 bind(*done);
276}
277
280 RegisterSnapshot& register_snapshot, Register result, Register string,
281 Register index, Register instance_type, Register scratch2,
282 Label* result_fits_one_byte) {
283 ZoneLabelRef done(this);
284 Label seq_string;
285 Label cons_string;
286 Label sliced_string;
287
288 Label* deferred_runtime_call = MakeDeferredCode(
289 [](MaglevAssembler* masm,
291 RegisterSnapshot register_snapshot, ZoneLabelRef done, Register result,
292 Register string, Register index) {
293 DCHECK(!register_snapshot.live_registers.has(result));
294 DCHECK(!register_snapshot.live_registers.has(string));
295 DCHECK(!register_snapshot.live_registers.has(index));
296 {
297 SaveRegisterStateForCall save_register_state(masm, register_snapshot);
298 __ SmiTag(index);
299 __ Push(string, index);
301 // This call does not throw nor can deopt.
302 if (mode ==
304 __ CallRuntime(Runtime::kStringCodePointAt);
305 } else {
306 DCHECK_EQ(mode,
308 __ CallRuntime(Runtime::kStringCharCodeAt);
309 }
310 save_register_state.DefineSafepoint();
313 }
314 __ b(*done);
315 },
316 mode, register_snapshot, done, result, string, index);
317
318 // We might need to try more than one time for ConsString, SlicedString and
319 // ThinString.
320 Label loop;
321 bind(&loop);
322
323 if (v8_flags.debug_code) {
324 // Check if {string} is a string.
325 AssertObjectTypeInRange(string, FIRST_STRING_TYPE, LAST_STRING_TYPE,
326 AbortReason::kUnexpectedValue);
327
328 Register scratch = instance_type;
329 ldr(scratch, FieldMemOperand(string, offsetof(String, length_)));
330 cmp(index, scratch);
331 Check(lo, AbortReason::kUnexpectedValue);
332 }
333
334 // Get instance type.
335 LoadInstanceType(instance_type, string);
336
337 {
338 TemporaryRegisterScope temps(this);
339 Register representation = temps.AcquireScratch();
340
341 // TODO(victorgomes): Add fast path for external strings.
342 and_(representation, instance_type, Operand(kStringRepresentationMask));
343 cmp(representation, Operand(kSeqStringTag));
344 b(eq, &seq_string);
345 cmp(representation, Operand(kConsStringTag));
346 b(eq, &cons_string);
347 cmp(representation, Operand(kSlicedStringTag));
348 b(eq, &sliced_string);
349 cmp(representation, Operand(kThinStringTag));
350 b(ne, deferred_runtime_call);
351 // Fallthrough to thin string.
352 }
353
354 // Is a thin string.
355 {
356 ldr(string, FieldMemOperand(string, offsetof(ThinString, actual_)));
357 b(&loop);
358 }
359
360 bind(&sliced_string);
361 {
362 TemporaryRegisterScope temps(this);
364
366 offsetof(SlicedString, offset_));
367 LoadTaggedField(string, string, offsetof(SlicedString, parent_));
368 add(index, index, offset);
369 b(&loop);
370 }
371
372 bind(&cons_string);
373 {
374 // Reuse {instance_type} register here, since CompareRoot requires a scratch
375 // register as well.
376 Register second_string = instance_type;
377 ldr(second_string, FieldMemOperand(string, offsetof(ConsString, second_)));
378 CompareRoot(second_string, RootIndex::kempty_string);
379 b(ne, deferred_runtime_call);
380 ldr(string, FieldMemOperand(string, offsetof(ConsString, first_)));
381 b(&loop); // Try again with first string.
382 }
383
384 bind(&seq_string);
385 {
386 Label two_byte_string;
387 tst(instance_type, Operand(kOneByteStringTag));
388 b(eq, &two_byte_string);
389 // The result of one-byte string will be the same for both modes
390 // (CharCodeAt/CodePointAt), since it cannot be the first half of a
391 // surrogate pair.
392 add(index, index,
394 ldrb(result, MemOperand(string, index));
395 b(result_fits_one_byte);
396
397 bind(&two_byte_string);
398 // {instance_type} is unused from this point, so we can use as scratch.
399 Register scratch = instance_type;
400 lsl(scratch, index, Operand(1));
401 add(scratch, scratch,
403
405 ldrh(result, MemOperand(string, scratch));
406 } else {
407 DCHECK_EQ(mode,
409 Register string_backup = string;
410 if (result == string) {
411 string_backup = scratch2;
412 Move(string_backup, string);
413 }
414 ldrh(result, MemOperand(string, scratch));
415
416 Register first_code_point = scratch;
417 and_(first_code_point, result, Operand(0xfc00));
418 cmp(first_code_point, Operand(0xd800));
419 b(ne, *done);
420
421 Register length = scratch;
422 ldr(length, FieldMemOperand(string_backup, offsetof(String, length_)));
423 add(index, index, Operand(1));
424 cmp(index, length);
425 b(ge, *done);
426
427 Register second_code_point = scratch;
428 lsl(index, index, Operand(1));
429 add(index, index,
431 ldrh(second_code_point, MemOperand(string_backup, index));
432
433 // {index} is not needed at this point.
434 Register scratch2 = index;
435 and_(scratch2, second_code_point, Operand(0xfc00));
436 cmp(scratch2, Operand(0xdc00));
437 b(ne, *done);
438
439 int surrogate_offset = 0x10000 - (0xd800 << 10) - 0xdc00;
440 add(second_code_point, second_code_point, Operand(surrogate_offset));
441 lsl(result, result, Operand(10));
442 add(result, result, second_code_point);
443 }
444
445 // Fallthrough.
446 }
447
448 bind(*done);
449
450 if (v8_flags.debug_code) {
451 // We make sure that the user of this macro is not relying in string and
452 // index to not be clobbered.
453 if (result != string) {
454 Move(string, 0xdeadbeef);
455 }
456 if (result != index) {
457 Move(index, 0xdeadbeef);
458 }
459 }
460}
461
463 ZoneLabelRef done(this);
464 Label* slow_path = MakeDeferredCode(
465 [](MaglevAssembler* masm, DoubleRegister src, Register dst,
466 ZoneLabelRef done) {
467 __ push(lr);
469 __ vstr(src, MemOperand(sp, 0));
470 __ CallBuiltin(Builtin::kDoubleToI);
471 __ ldr(dst, MemOperand(sp, 0));
472 __ add(sp, sp, Operand(kDoubleSize));
473 __ pop(lr);
474 __ Jump(*done);
475 },
476 src, dst, done);
477 TryInlineTruncateDoubleToI(dst, src, *done);
478 Jump(slow_path);
479 bind(*done);
480}
481
483 Label* fail) {
484 UseScratchRegisterScope temps(this);
485 LowDwVfpRegister low_double = temps.AcquireLowD();
486 SwVfpRegister temp_vfps = low_double.low();
487 DoubleRegister converted_back = low_double;
488 Label done;
489
490 // Convert the input float64 value to int32.
491 vcvt_s32_f64(temp_vfps, src);
492 vmov(dst, temp_vfps);
493
494 // Convert that int32 value back to float64.
495 vcvt_f64_s32(converted_back, temp_vfps);
496
497 // Check that the result of the float64->int32->float64 is equal to the input
498 // (i.e. that the conversion didn't truncate.
499 VFPCompareAndSetFlags(src, converted_back);
500 JumpIf(kNotEqual, fail);
501
502 // Check if {input} is -0.
503 tst(dst, dst);
504 JumpIf(kNotEqual, &done);
505
506 // In case of 0, we need to check the high bits for the IEEE -0 pattern.
507 {
508 Register high_word32_of_input = temps.Acquire();
509 VmovHigh(high_word32_of_input, src);
510 cmp(high_word32_of_input, Operand(0));
511 JumpIf(kLessThan, fail);
512 }
513
514 bind(&done);
515}
516
518 DoubleRegister src,
519 Label* fail) {
520 UseScratchRegisterScope temps(this);
521 LowDwVfpRegister low_double = temps.AcquireLowD();
522 SwVfpRegister temp_vfps = low_double.low();
523 DoubleRegister converted_back = low_double;
524 Label done;
525
526 // Convert the input float64 value to uint32.
527 vcvt_u32_f64(temp_vfps, src);
528 vmov(dst, temp_vfps);
529
530 // Convert that uint32 value back to float64.
531 vcvt_f64_u32(converted_back, temp_vfps);
532
533 // Check that the result of the float64->uint32->float64 is equal to the input
534 // (i.e. that the conversion didn't truncate.
535 VFPCompareAndSetFlags(src, converted_back);
536 JumpIf(kNotEqual, fail);
537
538 // Check if {input} is -0.
539 tst(dst, dst);
540 JumpIf(kNotEqual, &done);
541
542 // In case of 0, we need to check the high bits for the IEEE -0 pattern.
543 {
544 Register high_word32_of_input = temps.Acquire();
545 VmovHigh(high_word32_of_input, src);
546 cmp(high_word32_of_input, Operand(0));
547 JumpIf(kLessThan, fail);
548 }
549
550 bind(&done);
551}
552
554 DoubleRegister value,
555 Label* success, Label* fail) {
556 UseScratchRegisterScope temps(this);
557 LowDwVfpRegister low_double = temps.AcquireLowD();
558 SwVfpRegister temp_vfps = low_double.low();
559 DoubleRegister converted_back = low_double;
560 // Convert the input float64 value to int32.
561 vcvt_s32_f64(temp_vfps, value);
562 vmov(result, temp_vfps);
563 // Convert that int32 value back to float64.
564 vcvt_f64_s32(converted_back, temp_vfps);
565 // Check that the result of the float64->int32->float64 is equal to
566 // the input (i.e. that the conversion didn't truncate).
567 VFPCompareAndSetFlags(value, converted_back);
568 JumpIf(kNotEqual, fail);
569 Jump(success);
570}
571
572} // namespace maglev
573} // namespace internal
574} // namespace v8
#define Assert(condition)
void vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void CheckConstPool(bool force_emit, bool require_jump)
void vmov(const SwVfpRegister dst, Float32 imm)
void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void cmp(Register src1, const Operand &src2, Condition cond=al)
void ldrh(Register dst, const MemOperand &src, Condition cond=al)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void lsl(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void ldrb(Register dst, const MemOperand &src, Condition cond=al)
void strh(Register src, const MemOperand &dst, Condition cond=al)
void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void vstr(const DwVfpRegister src, const Register base, int offset, const Condition cond=al)
void tst(Register src1, const Operand &src2, Condition cond=al)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
SwVfpRegister low() const
void SmiUntag(Register reg, SBit s=LeaveCC)
void CompareRoot(Register obj, RootIndex index)
void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2, const Condition cond=al)
void SmiTag(Register reg, SBit s=LeaveCC)
void LoadRoot(Register destination, RootIndex index) final
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void VmovHigh(Register dst, DwVfpRegister src)
Condition LoadFeedbackVectorFlagsAndCheckIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind)
void Check(Condition cond, AbortReason reason)
void AllocateStackSpace(Register bytes)
void AssertZeroExtended(Register int32_register)
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, Label *done)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
constexpr void set(RegisterT reg)
constexpr bool has(RegisterT reg) const
static const int32_t kMaxOneByteCharCode
Definition string.h:500
IndirectHandle< NativeContext > object() const
compiler::NativeContextRef native_context() const
void LoadSingleCharacterString(Register result, int char_code)
void LoadAndUntagTaggedSignedField(Register result, Register object, int offset)
void AssertObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, AbortReason reason)
void StringFromCharCode(RegisterSnapshot register_snapshot, Label *char_code_fits_one_byte, Register result, Register char_code, Register scratch, CharCodeMaskMode mask_mode)
void Jump(Label *target, Label::Distance distance=Label::kFar)
void CallRuntime(Runtime::FunctionId fid)
void LoadInstanceType(Register instance_type, Register heap_object)
void Allocate(RegisterSnapshot register_snapshot, Register result, int size_in_bytes, AllocationType alloc_type=AllocationType::kYoung, AllocationAlignment alignment=kTaggedAligned)
void TryTruncateDoubleToUint32(Register dst, DoubleRegister src, Label *fail)
void StringCharCodeOrCodePointAt(BuiltinStringPrototypeCharCodeOrCodePointAt::Mode mode, RegisterSnapshot &register_snapshot, Register result, Register string, Register index, Register scratch1, Register scratch2, Label *result_fits_one_byte)
void AllocateTwoByteString(RegisterSnapshot register_snapshot, Register result, int length)
void Move(StackSlot dst, Register src)
void TryTruncateDoubleToInt32(Register dst, DoubleRegister src, Label *fail)
Label * MakeDeferredCode(Function &&deferred_code_gen, Args &&... args)
void JumpToDeferredIf(Condition cond, Function &&deferred_code_gen, Args &&... args)
void TruncateDoubleToInt32(Register dst, DoubleRegister src)
void MaybeEmitDeoptBuiltinsCall(size_t eager_deopt_count, Label *eager_deopt_entry, size_t lazy_deopt_count, Label *lazy_deopt_entry)
MaglevCompilationInfo * compilation_info() const
MaglevCodeGenState * code_gen_state() const
void TryChangeFloat64ToIndex(Register result, DoubleRegister value, Label *success, Label *fail)
void JumpIf(Condition cond, Label *target, Label::Distance distance=Label::kFar)
void LoadTaggedField(Register result, MemOperand operand)
Operand const offset_
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
int32_t offset
ZoneVector< RpoNumber > & result
const int length_
Definition mul-fft.cc:473
void AllocateSlow(MaglevAssembler *masm, RegisterSnapshot register_snapshot, Register object, Builtin builtin, T size_in_bytes, ZoneLabelRef done)
Builtin AllocateBuiltin(AllocationType alloc_type)
ExternalReference SpaceAllocationTopAddress(Isolate *isolate, AllocationType alloc_type)
void SubSizeAndTagObject(MaglevAssembler *masm, Register object, Register size_in_bytes)
void AllocateRaw(MaglevAssembler *masm, Isolate *isolate, RegisterSnapshot register_snapshot, Register object, T size_in_bytes, AllocationType alloc_type, AllocationAlignment alignment)
ExternalReference SpaceAllocationLimitAddress(Isolate *isolate, AllocationType alloc_type)
constexpr InstanceType LAST_STRING_TYPE
constexpr ShiftOp LSL
constexpr SBit LeaveCC
constexpr Register kJavaScriptCallArgCountRegister
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
const uint32_t kOneByteStringTag
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr int kTaggedSizeLog2
Definition globals.h:543
constexpr Register kReturnRegister0
const uint32_t kStringRepresentationMask
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr SBit SetCC
constexpr int kDoubleSize
Definition globals.h:407
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
template const char * string
ro::BitSet tagged_slots
BytecodeSequenceNode * parent_
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define OFFSET_OF_DATA_START(Type)