v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-assembler-x64.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/base/logging.h"
16
17namespace v8 {
18namespace internal {
19namespace maglev {
20
21#define __ masm->
22
23namespace {
24void LoadNewAllocationTop(MaglevAssembler* masm, Register new_top,
25 Register object, int size_in_bytes) {
26 __ leaq(new_top, Operand(object, size_in_bytes));
27}
28
29void LoadNewAllocationTop(MaglevAssembler* masm, Register new_top,
30 Register object, Register size_in_bytes) {
31 __ Move(new_top, object);
32 __ addq(new_top, size_in_bytes);
33}
34
35template <typename T>
36void AllocateRaw(MaglevAssembler* masm, Isolate* isolate,
37 RegisterSnapshot register_snapshot, Register object,
38 T size_in_bytes, AllocationType alloc_type,
39 AllocationAlignment alignment) {
40 // TODO(victorgomes): Call the runtime for large object allocation.
41 // TODO(victorgomes): Support double alignment.
42 DCHECK_EQ(alignment, kTaggedAligned);
43 if (v8_flags.single_generation) {
44 alloc_type = AllocationType::kOld;
45 }
46 ExternalReference top = SpaceAllocationTopAddress(isolate, alloc_type);
47 ExternalReference limit = SpaceAllocationLimitAddress(isolate, alloc_type);
48 ZoneLabelRef done(masm);
49 Register new_top = kScratchRegister;
50 // Check if there is enough space.
51 __ Move(object, __ ExternalReferenceAsOperand(top));
52 LoadNewAllocationTop(masm, new_top, object, size_in_bytes);
53 __ cmpq(new_top, __ ExternalReferenceAsOperand(limit));
54 // Otherwise call runtime.
56 register_snapshot, object, AllocateBuiltin(alloc_type),
57 size_in_bytes, done);
58 // Store new top and tag object.
59 __ movq(__ ExternalReferenceAsOperand(top), new_top);
60 __ addq(object, Immediate(kHeapObjectTag));
61 __ bind(*done);
62}
63} // namespace
64
65void MaglevAssembler::Allocate(RegisterSnapshot register_snapshot,
66 Register object, int size_in_bytes,
67 AllocationType alloc_type,
68 AllocationAlignment alignment) {
69 AllocateRaw(this, isolate_, register_snapshot, object, size_in_bytes,
70 alloc_type, alignment);
71}
72
73void MaglevAssembler::Allocate(RegisterSnapshot register_snapshot,
74 Register object, Register size_in_bytes,
75 AllocationType alloc_type,
76 AllocationAlignment alignment) {
77 AllocateRaw(this, isolate_, register_snapshot, object, size_in_bytes,
78 alloc_type, alignment);
79}
80
82 Register char_code,
83 Register scratch) {
84 AssertZeroExtended(char_code);
85 if (v8_flags.debug_code) {
86 cmpq(char_code, Immediate(String::kMaxOneByteCharCode));
87 Assert(below_equal, AbortReason::kUnexpectedValue);
88 }
89 DCHECK_NE(char_code, scratch);
90 Register table = scratch;
91 LoadRoot(table, RootIndex::kSingleCharacterStringTable);
92 LoadTaggedFieldByIndex(result, table, char_code, kTaggedSize,
93 OFFSET_OF_DATA_START(FixedArray));
94}
95
96void MaglevAssembler::StringFromCharCode(RegisterSnapshot register_snapshot,
97 Label* char_code_fits_one_byte,
98 Register result, Register char_code,
99 Register scratch,
100 CharCodeMaskMode mask_mode) {
101 DCHECK_NE(char_code, scratch);
102 ZoneLabelRef done(this);
103 if (mask_mode == CharCodeMaskMode::kMustApplyMask) {
104 andl(char_code, Immediate(0xFFFF));
105 }
106 cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
108 above,
109 [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
110 ZoneLabelRef done, Register result, Register char_code,
111 Register scratch) {
112 // Be sure to save {char_code}. If it aliases with {result}, use
113 // the scratch register.
114 // TODO(victorgomes): This is probably not needed any more, because
115 // we now ensure that results registers don't alias with inputs/temps.
116 // Confirm, and drop this check.
117 if (char_code == result) {
118 // This is guaranteed to be true since we've already checked
119 // char_code != scratch.
120 DCHECK_NE(scratch, result);
121 __ Move(scratch, char_code);
122 char_code = scratch;
123 }
124 DCHECK(!register_snapshot.live_tagged_registers.has(char_code));
125 register_snapshot.live_registers.set(char_code);
126 __ AllocateTwoByteString(register_snapshot, result, 1);
127 __ movw(FieldOperand(result, OFFSET_OF_DATA_START(SeqTwoByteString)),
128 char_code);
129 __ jmp(*done);
130 },
131 register_snapshot, done, result, char_code, scratch);
132 if (char_code_fits_one_byte != nullptr) {
133 bind(char_code_fits_one_byte);
134 }
135 LoadSingleCharacterString(result, char_code, scratch);
136 bind(*done);
137}
138
141 RegisterSnapshot& register_snapshot, Register result, Register string,
142 Register index, Register scratch1, Register scratch2,
143 Label* result_fits_one_byte) {
144 ZoneLabelRef done(this);
145 Label seq_string;
146 Label cons_string;
147 Label sliced_string;
148
149 Label* deferred_runtime_call = MakeDeferredCode(
150 [](MaglevAssembler* masm,
152 RegisterSnapshot register_snapshot, ZoneLabelRef done, Register result,
153 Register string, Register index) {
154 DCHECK(!register_snapshot.live_registers.has(result));
155 DCHECK(!register_snapshot.live_registers.has(string));
156 DCHECK(!register_snapshot.live_registers.has(index));
157 {
158 SaveRegisterStateForCall save_register_state(masm, register_snapshot);
159 __ Push(string);
160 __ SmiTag(index);
161 __ Push(index);
162 __ Move(kContextRegister, masm->native_context().object());
163 // This call does not throw nor can deopt.
164 if (mode ==
166 __ CallRuntime(Runtime::kStringCodePointAt);
167 } else {
168 DCHECK_EQ(mode,
170 __ CallRuntime(Runtime::kStringCharCodeAt);
171 }
172 save_register_state.DefineSafepoint();
175 }
176 __ jmp(*done);
177 },
178 mode, register_snapshot, done, result, string, index);
179
180 // We might need to try more than one time for ConsString, SlicedString and
181 // ThinString.
182 Label loop;
183 bind(&loop);
184
185 if (v8_flags.debug_code) {
186 // Check if {string} is a string.
187 AssertNotSmi(string);
188 LoadMap(scratch1, string);
189 CmpInstanceTypeRange(scratch1, scratch1, FIRST_STRING_TYPE,
191 Check(below_equal, AbortReason::kUnexpectedValue);
192
193 movl(scratch1, FieldOperand(string, offsetof(String, length_)));
194 cmpl(index, scratch1);
195 Check(below, AbortReason::kUnexpectedValue);
196 }
197
198#if V8_STATIC_ROOTS_BOOL
199 Register map = scratch1;
200 LoadMapForCompare(map, string);
201#else
202 Register instance_type = scratch1;
203 // Get instance type.
204 LoadInstanceType(instance_type, string);
205#endif
206
207 {
208#if V8_STATIC_ROOTS_BOOL
209 using StringTypeRange = InstanceTypeChecker::kUniqueMapRangeOfStringType;
210 // Check the string map ranges in dense increasing order, to avoid needing
211 // to subtract away the lower bound.
212 static_assert(StringTypeRange::kSeqString.first == 0);
213 CompareInt32AndJumpIf(map, StringTypeRange::kSeqString.second,
215
216 static_assert(StringTypeRange::kSeqString.second + Map::kSize ==
217 StringTypeRange::kExternalString.first);
218 CompareInt32AndJumpIf(map, StringTypeRange::kExternalString.second,
219 kUnsignedLessThanEqual, deferred_runtime_call);
220 // TODO(victorgomes): Add fast path for external strings.
221
222 static_assert(StringTypeRange::kExternalString.second + Map::kSize ==
223 StringTypeRange::kConsString.first);
224 CompareInt32AndJumpIf(map, StringTypeRange::kConsString.second,
225 kUnsignedLessThanEqual, &cons_string, Label::kNear);
226
227 static_assert(StringTypeRange::kConsString.second + Map::kSize ==
228 StringTypeRange::kSlicedString.first);
229 CompareInt32AndJumpIf(map, StringTypeRange::kSlicedString.second,
230 kUnsignedLessThanEqual, &sliced_string, Label::kNear);
231
232 static_assert(StringTypeRange::kSlicedString.second + Map::kSize ==
233 StringTypeRange::kThinString.first);
234 // No need to check for thin strings, they're the last string map.
235 static_assert(StringTypeRange::kThinString.second ==
236 InstanceTypeChecker::kStringMapUpperBound);
237 // Fallthrough to thin string.
238#else
239 // TODO(victorgomes): Add fast path for external strings.
240 Register representation = kScratchRegister;
241 movl(representation, instance_type);
242 andl(representation, Immediate(kStringRepresentationMask));
243 cmpl(representation, Immediate(kSeqStringTag));
244 j(equal, &seq_string, Label::kNear);
245 cmpl(representation, Immediate(kConsStringTag));
246 j(equal, &cons_string, Label::kNear);
247 cmpl(representation, Immediate(kSlicedStringTag));
248 j(equal, &sliced_string, Label::kNear);
249 cmpl(representation, Immediate(kThinStringTag));
250 j(not_equal, deferred_runtime_call);
251 // Fallthrough to thin string.
252#endif
253 }
254
255 // Is a thin string.
256 {
257 LoadTaggedField(string, string, offsetof(ThinString, actual_));
258 jmp(&loop, Label::kNear);
259 }
260
261 bind(&sliced_string);
262 {
263 Register offset = scratch1;
265 offsetof(SlicedString, offset_));
266 LoadTaggedField(string, string, offsetof(SlicedString, parent_));
267 addl(index, offset);
268 jmp(&loop, Label::kNear);
269 }
270
271 bind(&cons_string);
272 {
273 CompareRoot(FieldOperand(string, offsetof(ConsString, second_)),
274 RootIndex::kempty_string);
275 j(not_equal, deferred_runtime_call);
276 LoadTaggedField(string, string, offsetof(ConsString, first_));
277 jmp(&loop, Label::kNear); // Try again with first string.
278 }
279
280 bind(&seq_string);
281 {
282 Label two_byte_string;
283#if V8_STATIC_ROOTS_BOOL
284 if (InstanceTypeChecker::kTwoByteStringMapBit == 0) {
286 InstanceTypeChecker::kStringMapEncodingMask,
287 &two_byte_string, Label::kNear);
288 } else {
289 TestInt32AndJumpIfAnySet(map, InstanceTypeChecker::kStringMapEncodingMask,
290 &two_byte_string, Label::kNear);
291 }
292#else
293 andl(instance_type, Immediate(kStringEncodingMask));
294 cmpl(instance_type, Immediate(kTwoByteStringTag));
295 j(equal, &two_byte_string, Label::kNear);
296#endif
297 // The result of one-byte string will be the same for both modes
298 // (CharCodeAt/CodePointAt), since it cannot be the first half of a
299 // surrogate pair.
300 movzxbl(result, FieldOperand(string, index, times_1,
301 OFFSET_OF_DATA_START(SeqOneByteString)));
302 jmp(result_fits_one_byte);
303 bind(&two_byte_string);
304
306 movzxwl(result, FieldOperand(string, index, times_2,
307 OFFSET_OF_DATA_START(SeqTwoByteString)));
308 } else {
309 DCHECK_EQ(mode,
311 Register string_backup = string;
312 if (result == string) {
313 string_backup = scratch2;
314 movq(string_backup, string);
315 }
316 movzxwl(result, FieldOperand(string, index, times_2,
317 OFFSET_OF_DATA_START(SeqTwoByteString)));
318
319 Register first_code_point = scratch1;
320 movl(first_code_point, result);
321 andl(first_code_point, Immediate(0xfc00));
322 cmpl(first_code_point, Immediate(0xd800));
323 j(not_equal, *done);
324
325 Register length = scratch1;
326 StringLength(length, string_backup);
327 incl(index);
328 cmpl(index, length);
329 j(greater_equal, *done);
330
331 Register second_code_point = scratch1;
332 movzxwl(second_code_point,
333 FieldOperand(string_backup, index, times_2,
334 OFFSET_OF_DATA_START(SeqTwoByteString)));
335 movl(scratch2, second_code_point);
336 andl(scratch2, Immediate(0xfc00));
337 cmpl(scratch2, Immediate(0xdc00));
338 j(not_equal, *done);
339
340 int surrogate_offset = 0x10000 - (0xd800 << 10) - 0xdc00;
341 addl(second_code_point, Immediate(surrogate_offset));
342 shll(result, Immediate(10));
343 addl(result, second_code_point);
344 }
345
346 // Fallthrough.
347 }
348
349 bind(*done);
350
351 if (v8_flags.debug_code) {
352 // We make sure that the user of this macro is not relying in string and
353 // index to not be clobbered.
354 if (result != string) {
355 movl(string, Immediate(0xdeadbeef));
356 }
357 if (result != index) {
358 movl(index, Immediate(0xdeadbeef));
359 }
360 }
361}
362
364 ZoneLabelRef done(this);
365
366 Cvttsd2siq(dst, src);
367 // Check whether the Cvt overflowed.
368 cmpq(dst, Immediate(1));
370 overflow,
371 [](MaglevAssembler* masm, DoubleRegister src, Register dst,
372 ZoneLabelRef done) {
373 // Push the double register onto the stack as an input argument.
375 __ Movsd(MemOperand(rsp, 0), src);
376 __ CallBuiltin(Builtin::kDoubleToI);
377 // DoubleToI sets the result on the stack, pop the result off the stack.
378 // Avoid using `pop` to not mix implicit and explicit rsp updates.
379 __ movl(dst, MemOperand(rsp, 0));
380 __ addq(rsp, Immediate(kDoubleSize));
381 __ jmp(*done);
382 },
383 src, dst, done);
384 bind(*done);
385 // Zero extend the converted value to complete the truncation.
386 movl(dst, dst);
387}
388
390 Label* fail) {
392 // Truncating conversion of the input float64 value to an int32.
393 Cvttpd2dq(kScratchDoubleReg, src);
394 // Convert that int32 value back to float64.
396 // Check that the result of the float64->int32->float64 is equal to the input
397 // (i.e. that the conversion didn't truncate).
398 Ucomisd(kScratchDoubleReg, src);
399 JumpIf(parity_even, fail);
400 JumpIf(not_equal, fail);
401
402 // Move to general purpose register.
403 Cvttsd2si(dst, src);
404
405 // Check if {input} is -0.
406 Label check_done;
407 cmpl(dst, Immediate(0));
408 j(not_equal, &check_done);
409
410 // In case of 0, we need to check the high bits for the IEEE -0 pattern.
411 Register high_word32_of_input = kScratchRegister;
412 Pextrd(high_word32_of_input, src, 1);
413 cmpl(high_word32_of_input, Immediate(0));
414 JumpIf(less, fail);
415
416 bind(&check_done);
417}
418
420 DoubleRegister src,
421 Label* fail) {
422 DoubleRegister converted_back = kScratchDoubleReg;
423
424 // Convert the input float64 value to int64.
425 Cvttsd2siq(dst, src);
426 // Truncate and zero extend to uint32.
427 movl(dst, dst);
428 // Convert that value back to float64.
429 Cvtqsi2sd(converted_back, dst);
430 // Check that the result of the float64->uint32->float64 is equal to the input
431 // (i.e. that the conversion didn't truncate.
432 Ucomisd(src, converted_back);
433 JumpIf(parity_even, fail);
434 JumpIf(not_equal, fail);
435
436 // Check if {input} is -0.
437 Label check_done;
438 cmpl(dst, Immediate(0));
439 j(not_equal, &check_done);
440
441 // In case of 0, we need to check the high bits for the IEEE -0 pattern.
442 Register high_word32_of_input = kScratchRegister;
443 Pextrd(high_word32_of_input, src, 1);
444 cmpl(high_word32_of_input, Immediate(0));
445 JumpIf(less, fail);
446
447 bind(&check_done);
448}
449
451 DoubleRegister value,
452 Label* success, Label* fail) {
453 // Truncating conversion of the input float64 value to an int32.
454 Cvttpd2dq(kScratchDoubleReg, value);
455 // Convert that int32 value back to float64.
457 // Check that the result of the float64->int32->float64 is equal to
458 // the input (i.e. that the conversion didn't truncate).
459 Ucomisd(value, kScratchDoubleReg);
460 JumpIf(parity_even, fail);
461 JumpIf(kNotEqual, fail);
462
463 // Move to general purpose register.
464 Cvttsd2si(result, value);
465 Jump(success);
466}
467
468void MaglevAssembler::OSRPrologue(Graph* graph) {
469 DCHECK(graph->is_osr());
470 CHECK(!graph->has_recursive_calls());
471
472 uint32_t source_frame_size =
473 graph->min_maglev_stackslots_for_unoptimized_frame_size();
474
475 if (V8_ENABLE_SANDBOX_BOOL || v8_flags.debug_code) {
477 subq(kScratchRegister, rsp);
478 cmpq(kScratchRegister,
479 Immediate(source_frame_size * kSystemPointerSize +
481 SbxCheck(equal, AbortReason::kOsrUnexpectedStackSize);
482 }
483
484 uint32_t target_frame_size =
485 graph->tagged_stack_slots() + graph->untagged_stack_slots();
486 CHECK_LE(source_frame_size, target_frame_size);
487
488 if (source_frame_size < target_frame_size) {
489 ASM_CODE_COMMENT_STRING(this, "Growing frame for OSR");
491 uint32_t additional_tagged =
492 source_frame_size < graph->tagged_stack_slots()
493 ? graph->tagged_stack_slots() - source_frame_size
494 : 0;
495 for (size_t i = 0; i < additional_tagged; ++i) {
497 }
498 uint32_t size_so_far = source_frame_size + additional_tagged;
499 CHECK_LE(size_so_far, target_frame_size);
500 if (size_so_far < target_frame_size) {
501 subq(rsp,
502 Immediate((target_frame_size - size_so_far) * kSystemPointerSize));
503 }
504 }
505}
506
507void MaglevAssembler::Prologue(Graph* graph) {
508 DCHECK(!graph->is_osr());
509
510 CodeEntry();
511
513
514 if (graph->has_recursive_calls()) {
515 BindJumpTarget(code_gen_state()->entry_label());
516 }
517
518#ifndef V8_ENABLE_LEAPTIERING
519 // Tiering support.
520 if (v8_flags.turbofan) {
521 using D = MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor;
522 Register feedback_vector = D::GetRegisterParameter(D::kFeedbackVector);
527 Move(feedback_vector,
528 compilation_info()->toplevel_compilation_unit()->feedback().object());
529 TailCallBuiltin(Builtin::kMaglevOptimizeCodeOrTailCallOptimizedCodeSlot,
531 CodeKind::MAGLEV));
532 }
533#endif // !V8_ENABLE_LEAPTIERING
534
535 EnterFrame(StackFrame::MAGLEV);
536 // Save arguments in frame.
537 // TODO(leszeks): Consider eliding this frame if we don't make any calls
538 // that could clobber these registers.
540 Push(kJSFunctionRegister); // Callee's JS function.
541 Push(kJavaScriptCallArgCountRegister); // Actual argument count.
542
543 // Initialize stack slots.
544 if (graph->tagged_stack_slots() > 0) {
545 ASM_CODE_COMMENT_STRING(this, "Initializing stack slots");
546 // TODO(leszeks): Consider filling with xmm + movdqa instead.
547 Move(rax, 0);
548
549 // Magic value. Experimentally, an unroll size of 8 doesn't seem any
550 // worse than fully unrolled pushes.
551 const int kLoopUnrollSize = 8;
552 int tagged_slots = graph->tagged_stack_slots();
553 if (tagged_slots < 2 * kLoopUnrollSize) {
554 // If the frame is small enough, just unroll the frame fill
555 // completely.
556 for (int i = 0; i < tagged_slots; ++i) {
557 pushq(rax);
558 }
559 } else {
560 // Extract the first few slots to round to the unroll size.
561 int first_slots = tagged_slots % kLoopUnrollSize;
562 for (int i = 0; i < first_slots; ++i) {
563 pushq(rax);
564 }
565 Move(rbx, tagged_slots / kLoopUnrollSize);
566 // We enter the loop unconditionally, so make sure we need to loop at
567 // least once.
568 DCHECK_GT(tagged_slots / kLoopUnrollSize, 0);
569 Label loop;
570 bind(&loop);
571 for (int i = 0; i < kLoopUnrollSize; ++i) {
572 pushq(rax);
573 }
574 decl(rbx);
575 j(greater, &loop);
576 }
577 }
578 if (graph->untagged_stack_slots() > 0) {
579 // Extend rsp by the size of the remaining untagged part of the frame,
580 // no need to initialise these.
581 subq(rsp, Immediate(graph->untagged_stack_slots() * kSystemPointerSize));
582 }
583}
584
585void MaglevAssembler::MaybeEmitDeoptBuiltinsCall(size_t eager_deopt_count,
586 Label* eager_deopt_entry,
587 size_t lazy_deopt_count,
588 Label* lazy_deopt_entry) {}
589
590} // namespace maglev
591} // namespace internal
592} // namespace v8
#define Assert(condition)
void shll(const VRegister &vd, const VRegister &vn, int shift)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
void pushq(Immediate value)
void movw(Register reg, uint32_t immediate, Condition cond=al)
void movl(Operand dst, Label *src)
void movq(XMMRegister dst, Operand src)
void Cvttsd2si(Register dst, XMMRegister src)
void Cvttsd2siq(Register dst, XMMRegister src)
void CmpInstanceTypeRange(Register map, Register instance_type_out, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void SmiUntag(Register reg, SBit s=LeaveCC)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void CompareRoot(Register obj, RootIndex index)
void Cvtqsi2sd(XMMRegister dst, Register src)
void SmiTag(Register reg, SBit s=LeaveCC)
void SbxCheck(Condition cc, AbortReason reason)
void LoadRoot(Register destination, RootIndex index) final
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void Check(Condition cond, AbortReason reason)
void AllocateStackSpace(Register bytes)
void AssertZeroExtended(Register int32_register)
Condition CheckFeedbackVectorFlagsNeedsProcessing(Register feedback_vector, CodeKind current_code_kind)
void LoadMap(Register destination, Register object)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void Pextrd(Register dst, XMMRegister src, uint8_t imm8)
static const int32_t kMaxOneByteCharCode
Definition string.h:500
void LoadSingleCharacterString(Register result, int char_code)
void LoadAndUntagTaggedSignedField(Register result, Register object, int offset)
MaglevAssembler(Isolate *isolate, Zone *zone, MaglevCodeGenState *code_gen_state)
void StringFromCharCode(RegisterSnapshot register_snapshot, Label *char_code_fits_one_byte, Register result, Register char_code, Register scratch, CharCodeMaskMode mask_mode)
void StringLength(Register result, Register string)
void Jump(Label *target, Label::Distance distance=Label::kFar)
void CallRuntime(Runtime::FunctionId fid)
void LoadInstanceType(Register instance_type, Register heap_object)
void Allocate(RegisterSnapshot register_snapshot, Register result, int size_in_bytes, AllocationType alloc_type=AllocationType::kYoung, AllocationAlignment alignment=kTaggedAligned)
void LoadMapForCompare(Register dst, Register obj)
void TryTruncateDoubleToUint32(Register dst, DoubleRegister src, Label *fail)
void StringCharCodeOrCodePointAt(BuiltinStringPrototypeCharCodeOrCodePointAt::Mode mode, RegisterSnapshot &register_snapshot, Register result, Register string, Register index, Register scratch1, Register scratch2, Label *result_fits_one_byte)
void AllocateTwoByteString(RegisterSnapshot register_snapshot, Register result, int length)
void Move(StackSlot dst, Register src)
void TryTruncateDoubleToInt32(Register dst, DoubleRegister src, Label *fail)
Label * MakeDeferredCode(Function &&deferred_code_gen, Args &&... args)
void JumpToDeferredIf(Condition cond, Function &&deferred_code_gen, Args &&... args)
void TruncateDoubleToInt32(Register dst, DoubleRegister src)
void MaybeEmitDeoptBuiltinsCall(size_t eager_deopt_count, Label *eager_deopt_entry, size_t lazy_deopt_count, Label *lazy_deopt_entry)
MaglevCompilationInfo * compilation_info() const
void TestInt32AndJumpIfAllClear(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
void CompareInt32AndJumpIf(Register r1, Register r2, Condition cond, Label *target, Label::Distance distance=Label::kFar)
MaglevCodeGenState * code_gen_state() const
void LoadTaggedFieldByIndex(Register result, Register object, Register index, int scale, int offset)
void TryChangeFloat64ToIndex(Register result, DoubleRegister value, Label *success, Label *fail)
void JumpIf(Condition cond, Label *target, Label::Distance distance=Label::kFar)
void LoadTaggedField(Register result, MemOperand operand)
void TestInt32AndJumpIfAnySet(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
Operand const offset_
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
int32_t offset
double second
ZoneVector< RpoNumber > & result
const int length_
Definition mul-fft.cc:473
void AllocateSlow(MaglevAssembler *masm, RegisterSnapshot register_snapshot, Register object, Builtin builtin, T size_in_bytes, ZoneLabelRef done)
Builtin AllocateBuiltin(AllocationType alloc_type)
ExternalReference SpaceAllocationTopAddress(Isolate *isolate, AllocationType alloc_type)
void AllocateRaw(MaglevAssembler *masm, Isolate *isolate, RegisterSnapshot register_snapshot, Register object, T size_in_bytes, AllocationType alloc_type, AllocationAlignment alignment)
ExternalReference SpaceAllocationLimitAddress(Isolate *isolate, AllocationType alloc_type)
const uint32_t kStringEncodingMask
constexpr int kTaggedSize
Definition globals.h:542
DwVfpRegister DoubleRegister
const uint32_t kTwoByteStringTag
constexpr InstanceType LAST_STRING_TYPE
constexpr DoubleRegister kScratchDoubleReg
Operand FieldOperand(Register object, int offset)
constexpr Register kJavaScriptCallArgCountRegister
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Register kReturnRegister0
constexpr Register kScratchRegister
const uint32_t kStringRepresentationMask
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int kDoubleSize
Definition globals.h:407
constexpr Register kJavaScriptCallDispatchHandleRegister
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
template const char * string
ro::BitSet tagged_slots
BytecodeSequenceNode * parent_
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define OFFSET_OF_DATA_START(Type)