v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-assembler-s390.cc
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
9
10namespace v8 {
11namespace internal {
12namespace maglev {
13
14#define __ masm->
15
16namespace {
17void SubSizeAndTagObject(MaglevAssembler* masm, Register object,
18 Register size_in_bytes) {
19 __ SubS64(object, size_in_bytes);
20 __ AddS64(object, Operand(kHeapObjectTag));
21}
22
23void SubSizeAndTagObject(MaglevAssembler* masm, Register object,
24 int size_in_bytes) {
25 DCHECK(is_int20(kHeapObjectTag - size_in_bytes));
26 __ lay(object, MemOperand(object, kHeapObjectTag - size_in_bytes));
27}
28
29template <typename T>
30void AllocateRaw(MaglevAssembler* masm, Isolate* isolate,
31 RegisterSnapshot register_snapshot, Register object,
32 T size_in_bytes, AllocationType alloc_type,
33 AllocationAlignment alignment) {
34 // TODO(victorgomes): Call the runtime for large object allocation.
35 // TODO(victorgomes): Support double alignment.
36 DCHECK(masm->allow_allocate());
37 DCHECK_EQ(alignment, kTaggedAligned);
38 if (v8_flags.single_generation) {
39 alloc_type = AllocationType::kOld;
40 }
41 ExternalReference top = SpaceAllocationTopAddress(isolate, alloc_type);
42 ExternalReference limit = SpaceAllocationLimitAddress(isolate, alloc_type);
43 ZoneLabelRef done(masm);
44 MaglevAssembler::TemporaryRegisterScope temps(masm);
45 Register scratch = temps.AcquireScratch();
46 // We are a bit short on registers, so we use the same register for {object}
47 // and {new_top}. Once we have defined {new_top}, we don't use {object} until
48 // {new_top} is used for the last time. And there (at the end of this
49 // function), we recover the original {object} from {new_top} by subtracting
50 // {size_in_bytes}.
51 Register new_top = object;
52 // Check if there is enough space.
53 __ LoadU64(object, __ ExternalReferenceAsOperand(top, scratch));
54 __ AddU64(object, size_in_bytes);
55 __ LoadU64(scratch, __ ExternalReferenceAsOperand(limit, scratch));
56 __ CmpU64(new_top, scratch);
57 // Otherwise call runtime.
59 register_snapshot, object, AllocateBuiltin(alloc_type),
60 size_in_bytes, done);
61 // Store new top and tag object.
62 __ Move(__ ExternalReferenceAsOperand(top, scratch), new_top);
63 SubSizeAndTagObject(masm, object, size_in_bytes);
64 __ bind(*done);
65}
66} // namespace
67
68void MaglevAssembler::Allocate(RegisterSnapshot register_snapshot,
69 Register object, int size_in_bytes,
70 AllocationType alloc_type,
71 AllocationAlignment alignment) {
72 AllocateRaw(this, isolate_, register_snapshot, object, size_in_bytes,
73 alloc_type, alignment);
74}
75
76void MaglevAssembler::Allocate(RegisterSnapshot register_snapshot,
77 Register object, Register size_in_bytes,
78 AllocationType alloc_type,
79 AllocationAlignment alignment) {
80 AllocateRaw(this, isolate_, register_snapshot, object, size_in_bytes,
81 alloc_type, alignment);
82}
83
84void MaglevAssembler::OSRPrologue(Graph* graph) {
85 TemporaryRegisterScope temps(this);
86 Register scratch = temps.AcquireScratch();
87
88 DCHECK(graph->is_osr());
89 CHECK(!graph->has_recursive_calls());
90
91 uint32_t source_frame_size =
92 graph->min_maglev_stackslots_for_unoptimized_frame_size();
93
94 if (v8_flags.debug_code) {
95 lgr(scratch, sp);
96 lay(scratch,
97 MemOperand(scratch, source_frame_size * kSystemPointerSize +
99 CmpU64(scratch, fp);
100 Assert(eq, AbortReason::kOsrUnexpectedStackSize);
101 }
102
103 uint32_t target_frame_size =
104 graph->tagged_stack_slots() + graph->untagged_stack_slots();
105 CHECK_LE(source_frame_size, target_frame_size);
106
107 if (source_frame_size < target_frame_size) {
108 ASM_CODE_COMMENT_STRING(this, "Growing frame for OSR");
109 uint32_t additional_tagged =
110 source_frame_size < graph->tagged_stack_slots()
111 ? graph->tagged_stack_slots() - source_frame_size
112 : 0;
113 if (additional_tagged) {
114 Move(scratch, 0);
115 }
116 for (size_t i = 0; i < additional_tagged; ++i) {
117 Push(scratch);
118 }
119 uint32_t size_so_far = source_frame_size + additional_tagged;
120 CHECK_LE(size_so_far, target_frame_size);
121 if (size_so_far < target_frame_size) {
122 lay(sp, MemOperand(
123 sp, -(target_frame_size - size_so_far) * kSystemPointerSize));
124 }
125 }
126}
127
128void MaglevAssembler::Prologue(Graph* graph) {
129 TemporaryRegisterScope temps(this);
130 temps.Include({r6, r8});
131 Register scratch = temps.AcquireScratch();
132 DCHECK(!graph->is_osr());
133
134 BailoutIfDeoptimized(scratch);
135
136 if (graph->has_recursive_calls()) {
137 bind(code_gen_state()->entry_label());
138 }
139
140#ifndef V8_ENABLE_LEAPTIERING
141 // Tiering support.
142 if (v8_flags.turbofan) {
143 using D = MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor;
144 Register flags = D::GetRegisterParameter(D::kFlags);
145 Register feedback_vector = D::GetRegisterParameter(D::kFeedbackVector);
149 DCHECK(!temps.Available().has(flags));
150 DCHECK(!temps.Available().has(feedback_vector));
151 Move(feedback_vector,
152 compilation_info()->toplevel_compilation_unit()->feedback().object());
153 TailCallBuiltin(Builtin::kMaglevOptimizeCodeOrTailCallOptimizedCodeSlot,
155 flags, feedback_vector, CodeKind::MAGLEV));
156 }
157#endif // !V8_ENABLE_LEAPTIERING
158
159 EnterFrame(StackFrame::MAGLEV);
160 // Save arguments in frame.
161 // TODO(leszeks): Consider eliding this frame if we don't make any calls
162 // that could clobber these registers.
164 Push(kJSFunctionRegister); // Callee's JS function.
165 Push(kJavaScriptCallArgCountRegister); // Actual argument count.
166
167 // Initialize stack slots.
168 if (graph->tagged_stack_slots() > 0) {
169 ASM_CODE_COMMENT_STRING(this, "Initializing stack slots");
170 Move(scratch, 0);
171
172 // Magic value. Experimentally, an unroll size of 8 doesn't seem any
173 // worse than fully unrolled pushes.
174 const int kLoopUnrollSize = 8;
175 int tagged_slots = graph->tagged_stack_slots();
176 if (tagged_slots < kLoopUnrollSize) {
177 // If the frame is small enough, just unroll the frame fill
178 // completely.
179 for (int i = 0; i < tagged_slots; ++i) {
180 Push(scratch);
181 }
182 } else {
183 // Extract the first few slots to round to the unroll size.
184 int first_slots = tagged_slots % kLoopUnrollSize;
185 for (int i = 0; i < first_slots; ++i) {
186 Push(scratch);
187 }
188 Register unroll_counter = temps.AcquireScratch();
189 Move(unroll_counter, tagged_slots / kLoopUnrollSize);
190 // We enter the loop unconditionally, so make sure we need to loop at
191 // least once.
192 DCHECK_GT(tagged_slots / kLoopUnrollSize, 0);
193 Label loop;
194 bind(&loop);
195 for (int i = 0; i < kLoopUnrollSize; ++i) {
196 Push(scratch);
197 }
198 SubS32(unroll_counter, Operand(1));
199 bgt(&loop);
200 }
201 }
202 if (graph->untagged_stack_slots() > 0) {
203 // Extend rsp by the size of the remaining untagged part of the frame,
204 // no need to initialise these.
205 lay(sp,
206 MemOperand(sp, -graph->untagged_stack_slots() * kSystemPointerSize));
207 }
208}
209
210void MaglevAssembler::MaybeEmitDeoptBuiltinsCall(size_t eager_deopt_count,
211 Label* eager_deopt_entry,
212 size_t lazy_deopt_count,
213 Label* lazy_deopt_entry) {}
214
216 Register char_code,
217 Register scratch) {
218 DCHECK_NE(char_code, scratch);
219 if (v8_flags.debug_code) {
220 CmpU32(char_code, Operand(String::kMaxOneByteCharCode));
221 Assert(le, AbortReason::kUnexpectedValue);
222 }
223 Register table = scratch;
224 LoadRoot(table, RootIndex::kSingleCharacterStringTable);
225 LoadTaggedFieldByIndex(result, table, char_code, kTaggedSize,
226 OFFSET_OF_DATA_START(FixedArray));
227}
228
229void MaglevAssembler::StringFromCharCode(RegisterSnapshot register_snapshot,
230 Label* char_code_fits_one_byte,
231 Register result, Register char_code,
232 Register scratch,
233 CharCodeMaskMode mask_mode) {
234 AssertZeroExtended(char_code);
235 DCHECK_NE(char_code, scratch);
236 ZoneLabelRef done(this);
237 if (mask_mode == CharCodeMaskMode::kMustApplyMask) {
238 AndP(char_code, char_code, Operand(0xFFFF));
239 }
240 CmpU32(char_code, Operand(String::kMaxOneByteCharCode));
243 [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
244 ZoneLabelRef done, Register result, Register char_code,
245 Register scratch) {
246 // Be sure to save {char_code}. If it aliases with {result}, use
247 // the scratch register.
248 // TODO(victorgomes): This is probably not needed any more, because
249 // we now ensure that results registers don't alias with inputs/temps.
250 // Confirm, and drop this check.
251 if (char_code == result) {
252 __ Move(scratch, char_code);
253 char_code = scratch;
254 }
255 DCHECK(char_code != result);
256 DCHECK(!register_snapshot.live_tagged_registers.has(char_code));
257 register_snapshot.live_registers.set(char_code);
258 __ AllocateTwoByteString(register_snapshot, result, 1);
259 __ StoreU16(
260 char_code,
261 FieldMemOperand(result, OFFSET_OF_DATA_START(SeqTwoByteString)));
262 __ b(*done);
263 },
264 register_snapshot, done, result, char_code, scratch);
265 if (char_code_fits_one_byte != nullptr) {
266 bind(char_code_fits_one_byte);
267 }
268 LoadSingleCharacterString(result, char_code, scratch);
269 bind(*done);
270}
271
274 RegisterSnapshot& register_snapshot, Register result, Register string,
275 Register index, Register instance_type, Register scratch2,
276 Label* result_fits_one_byte) {
277 ZoneLabelRef done(this);
278 Label seq_string;
279 Label cons_string;
280 Label sliced_string;
281
282 Label* deferred_runtime_call = MakeDeferredCode(
283 [](MaglevAssembler* masm,
285 RegisterSnapshot register_snapshot, ZoneLabelRef done, Register result,
286 Register string, Register index) {
287 DCHECK(!register_snapshot.live_registers.has(result));
288 DCHECK(!register_snapshot.live_registers.has(string));
289 DCHECK(!register_snapshot.live_registers.has(index));
290 {
291 SaveRegisterStateForCall save_register_state(masm, register_snapshot);
292 __ SmiTag(index);
293 __ Push(string, index);
294 __ Move(kContextRegister, masm->native_context().object());
295 // This call does not throw nor can deopt.
296 if (mode ==
298 __ CallRuntime(Runtime::kStringCodePointAt);
299 } else {
300 DCHECK_EQ(mode,
302 __ CallRuntime(Runtime::kStringCharCodeAt);
303 }
304 save_register_state.DefineSafepoint();
307 }
308 __ b(*done);
309 },
310 mode, register_snapshot, done, result, string, index);
311
312 // We might need to try more than one time for ConsString, SlicedString and
313 // ThinString.
314 Label loop;
315 bind(&loop);
316
317 if (v8_flags.debug_code) {
318 // Check if {string} is a string.
319 AssertObjectTypeInRange(string, FIRST_STRING_TYPE, LAST_STRING_TYPE,
320 AbortReason::kUnexpectedValue);
321
322 Register scratch = instance_type;
323
324 LoadU32(scratch, FieldMemOperand(string, offsetof(String, length_)));
325 CmpS32(index, scratch);
326 Check(lt, AbortReason::kUnexpectedValue);
327 }
328
329 // Get instance type.
330 LoadInstanceType(instance_type, string);
331
332 {
333 TemporaryRegisterScope temps(this);
334 Register representation = temps.AcquireScratch();
335
336 // TODO(victorgomes): Add fast path for external strings.
337 And(representation, instance_type, Operand(kStringRepresentationMask));
338 CmpS32(representation, Operand(kSeqStringTag));
339 beq(&seq_string);
340 And(representation, Operand(kConsStringTag));
341 beq(&cons_string);
342 CmpS32(representation, Operand(kSlicedStringTag));
343 beq(&sliced_string);
344 CmpS32(representation, Operand(kThinStringTag));
345 bne(deferred_runtime_call);
346 // Fallthrough to thin string.
347 }
348
349 // Is a thin string.
350 {
351 LoadTaggedField(string,
352 FieldMemOperand(string, offsetof(ThinString, actual_)));
353 b(&loop);
354 }
355
356 bind(&sliced_string);
357 {
358 TemporaryRegisterScope temps(this);
359 Register offset = temps.AcquireScratch();
360
362 offsetof(SlicedString, offset_));
363 LoadTaggedField(string, string, offsetof(SlicedString, parent_));
364 AddS32(index, index, offset);
365 b(&loop);
366 }
367
368 bind(&cons_string);
369 {
370 // Reuse {instance_type} register here, since CompareRoot requires a scratch
371 // register as well.
372 Register second_string = instance_type;
373 LoadU64(second_string,
374 FieldMemOperand(string, offsetof(ConsString, second_)));
375 CompareRoot(second_string, RootIndex::kempty_string);
376 bne(deferred_runtime_call);
377 LoadTaggedField(string,
378 FieldMemOperand(string, offsetof(ConsString, first_)));
379 b(&loop); // Try again with first string.
380 }
381
382 bind(&seq_string);
383 {
384 Label two_byte_string;
385 And(instance_type, Operand(kStringEncodingMask));
386 CmpS32(instance_type, Operand(kTwoByteStringTag));
387 beq(&two_byte_string);
388 // The result of one-byte string will be the same for both modes
389 // (CharCodeAt/CodePointAt), since it cannot be the first half of a
390 // surrogate pair.
391 // AndP(index, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
392 LoadU8(result, FieldMemOperand(string, index,
393 OFFSET_OF_DATA_START(SeqOneByteString)));
394 b(result_fits_one_byte);
395
396 bind(&two_byte_string);
397 // {instance_type} is unused from this point, so we can use as scratch.
398 Register scratch = instance_type;
399 ShiftLeftU64(scratch, index, Operand(1));
400 AddU64(scratch,
401 Operand(OFFSET_OF_DATA_START(SeqTwoByteString) - kHeapObjectTag));
402
404 LoadU16(result, MemOperand(string, scratch));
405 } else {
406 DCHECK_EQ(mode,
408 Register string_backup = string;
409 if (result == string) {
410 string_backup = scratch2;
411 Move(string_backup, string);
412 }
413 LoadU16(result, MemOperand(string, scratch));
414
415 Register first_code_point = scratch;
416 And(first_code_point, result, Operand(0xfc00));
417 CmpS32(first_code_point, Operand(0xd800));
418 bne(*done);
419
420 Register length = scratch;
421 LoadU32(length, FieldMemOperand(string, offsetof(String, length_)));
422 AddS32(index, index, Operand(1));
423 CmpS32(index, length);
424 bge(*done);
425
426 Register second_code_point = scratch;
427 ShiftLeftU32(index, index, Operand(1));
428 AddU32(index,
429 Operand(OFFSET_OF_DATA_START(SeqTwoByteString) - kHeapObjectTag));
430 LoadU16(second_code_point, MemOperand(string_backup, index));
431
432 // {index} is not needed at this point.
433 Register scratch2 = index;
434 And(scratch2, second_code_point, Operand(0xfc00));
435 CmpS32(scratch2, Operand(0xdc00));
436 bne(*done);
437
438 int surrogate_offset = 0x10000 - (0xd800 << 10) - 0xdc00;
439 AddS32(second_code_point, second_code_point, Operand(surrogate_offset));
440 ShiftLeftU32(result, result, Operand(10));
441 AddS32(result, result, second_code_point);
442 }
443
444 // Fallthrough.
445 }
446
447 bind(*done);
448
449 if (v8_flags.debug_code) {
450 // We make sure that the user of this macro is not relying in string and
451 // index to not be clobbered.
452 if (result != string) {
453 Move(string, 0xdeadbeef);
454 }
455 if (result != index) {
456 Move(index, 0xdeadbeef);
457 }
458 }
459}
460
462 ZoneLabelRef done(this);
463 Label* slow_path = MakeDeferredCode(
464 [](MaglevAssembler* masm, DoubleRegister src, Register dst,
465 ZoneLabelRef done) {
466 __ push(r14);
468 __ StoreF64(src, MemOperand(sp));
469 __ CallBuiltin(Builtin::kDoubleToI);
470 __ LoadU64(dst, MemOperand(sp));
471 __ lay(sp, MemOperand(sp, kDoubleSize));
472 __ pop(r14);
473 __ Jump(*done);
474 },
475 src, dst, done);
476 TryInlineTruncateDoubleToI(dst, src, *done);
477 Jump(slow_path);
478 bind(*done);
479 // Zero extend the converted value to complete the truncation.
480 LoadU32(dst, dst);
481}
482
484 Label* fail) {
485 TemporaryRegisterScope temps(this);
486 DoubleRegister temp = temps.AcquireScratchDouble();
487 Label done;
488
489 // Convert the input float64 value to int32.
490 ConvertDoubleToInt32(dst, src);
491
492 // Convert that int32 value back to float64.
493 ConvertIntToDouble(temp, dst);
494
495 // Check that the result of the float64->int32->float64 is equal to the input
496 // (i.e. that the conversion didn't truncate.
497 CmpF64(src, temp);
498 JumpIf(ne, fail);
499
500 // Check if {input} is -0.
501 CmpS32(dst, Operand::Zero());
502 JumpIf(ne, &done);
503
504 // In case of 0, we need to check the high bits for the IEEE -0 pattern.
505 {
506 MovDoubleToInt64(r1, src);
507 ShiftRightS64(r1, r1, Operand(63));
508 CmpS64(r1, Operand(0));
509 JumpIf(lt, fail);
510 }
511
512 bind(&done);
513}
514
516 DoubleRegister src,
517 Label* fail) {
518 TemporaryRegisterScope temps(this);
519 DoubleRegister temp = temps.AcquireScratchDouble();
520 Label done;
521
522 // Convert the input float64 value to uint32.
524
525 // Convert that uint32 value back to float64.
527
528 // Check that the result of the float64->uint32->float64 is equal to the input
529 // (i.e. that the conversion didn't truncate.
530 CmpF64(src, temp);
531 JumpIf(ne, fail);
532
533 // Check if {input} is -0.
534 CmpS32(dst, Operand::Zero());
535 JumpIf(ne, &done);
536
537 // In case of 0, we need to check the high bits for the IEEE -0 pattern.
538 {
539 MovDoubleToInt64(r1, src);
540 ShiftRightS64(r1, r1, Operand(63));
541 CmpS64(r1, Operand(0));
542 JumpIf(lt, fail);
543 }
544
545 bind(&done);
546}
547
549 DoubleRegister value,
550 Label* success, Label* fail) {
551 TemporaryRegisterScope temps(this);
552 DoubleRegister temp = temps.AcquireScratchDouble();
553 // Convert the input float64 value to int32.
555 // Convert that int32 value back to float64.
557 // Check that the result of the float64->int32->float64 is equal to
558 // the input (i.e. that the conversion didn't truncate).
559 CmpF64(value, temp);
560 JumpIf(ne, fail);
561 Jump(success);
562}
563
564} // namespace maglev
565} // namespace internal
566} // namespace v8
#define Assert(condition)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void bne(Register rj, Register rd, int32_t offset)
void bge(Register rj, Register rd, int32_t offset)
void bgt(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void beq(Register rj, Register rd, int32_t offset)
void ConvertIntToDouble(Register src, DoubleRegister dst)
void LoadU8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void SmiUntag(Register reg, SBit s=LeaveCC)
void StoreF64(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
void CompareRoot(Register obj, RootIndex index)
void LoadU16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void CmpU32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void SmiTag(Register reg, SBit s=LeaveCC)
void MovDoubleToInt64(Register dst, DoubleRegister src)
void ConvertDoubleToUnsignedInt32(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void ShiftLeftU32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void CmpU64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void AddS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
void LoadRoot(Register destination, RootIndex index) final
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void LoadU32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst)
void AddU32(Register dst, Register src1, Register src2)
void ShiftRightS64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void ConvertDoubleToInt32(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void SubS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
Condition LoadFeedbackVectorFlagsAndCheckIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind)
void ShiftLeftU64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void CmpS64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void Check(Condition cond, AbortReason reason)
void AllocateStackSpace(Register bytes)
void AssertZeroExtended(Register int32_register)
void CmpS32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void StoreU16(Register src, const MemOperand &mem, Register scratch)
void AddU64(Register dst, const Operand &imm)
void LoadU64(Register dst, const MemOperand &mem, Register scratch=no_reg)
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, Label *done)
void CmpF64(DoubleRegister src1, DoubleRegister src2)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void AndP(Register dst, Register src)
static V8_INLINE Operand Zero()
static const int32_t kMaxOneByteCharCode
Definition string.h:500
void LoadSingleCharacterString(Register result, int char_code)
void LoadAndUntagTaggedSignedField(Register result, Register object, int offset)
MaglevAssembler(Isolate *isolate, Zone *zone, MaglevCodeGenState *code_gen_state)
void AssertObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, AbortReason reason)
void StringFromCharCode(RegisterSnapshot register_snapshot, Label *char_code_fits_one_byte, Register result, Register char_code, Register scratch, CharCodeMaskMode mask_mode)
void Jump(Label *target, Label::Distance distance=Label::kFar)
void CallRuntime(Runtime::FunctionId fid)
void LoadInstanceType(Register instance_type, Register heap_object)
void Allocate(RegisterSnapshot register_snapshot, Register result, int size_in_bytes, AllocationType alloc_type=AllocationType::kYoung, AllocationAlignment alignment=kTaggedAligned)
void TryTruncateDoubleToUint32(Register dst, DoubleRegister src, Label *fail)
void StringCharCodeOrCodePointAt(BuiltinStringPrototypeCharCodeOrCodePointAt::Mode mode, RegisterSnapshot &register_snapshot, Register result, Register string, Register index, Register scratch1, Register scratch2, Label *result_fits_one_byte)
void AllocateTwoByteString(RegisterSnapshot register_snapshot, Register result, int length)
void Move(StackSlot dst, Register src)
void TryTruncateDoubleToInt32(Register dst, DoubleRegister src, Label *fail)
Label * MakeDeferredCode(Function &&deferred_code_gen, Args &&... args)
void JumpToDeferredIf(Condition cond, Function &&deferred_code_gen, Args &&... args)
void TruncateDoubleToInt32(Register dst, DoubleRegister src)
void MaybeEmitDeoptBuiltinsCall(size_t eager_deopt_count, Label *eager_deopt_entry, size_t lazy_deopt_count, Label *lazy_deopt_entry)
MaglevCompilationInfo * compilation_info() const
MaglevCodeGenState * code_gen_state() const
void LoadTaggedFieldByIndex(Register result, Register object, Register index, int scale, int offset)
void TryChangeFloat64ToIndex(Register result, DoubleRegister value, Label *success, Label *fail)
void JumpIf(Condition cond, Label *target, Label::Distance distance=Label::kFar)
void LoadTaggedField(Register result, MemOperand operand)
Operand const offset_
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
int32_t offset
ZoneVector< RpoNumber > & result
const int length_
Definition mul-fft.cc:473
void AllocateSlow(MaglevAssembler *masm, RegisterSnapshot register_snapshot, Register object, Builtin builtin, T size_in_bytes, ZoneLabelRef done)
Builtin AllocateBuiltin(AllocationType alloc_type)
ExternalReference SpaceAllocationTopAddress(Isolate *isolate, AllocationType alloc_type)
void SubSizeAndTagObject(MaglevAssembler *masm, Register object, Register size_in_bytes)
void AllocateRaw(MaglevAssembler *masm, Isolate *isolate, RegisterSnapshot register_snapshot, Register object, T size_in_bytes, AllocationType alloc_type, AllocationAlignment alignment)
ExternalReference SpaceAllocationLimitAddress(Isolate *isolate, AllocationType alloc_type)
const uint32_t kStringEncodingMask
constexpr int kTaggedSize
Definition globals.h:542
DwVfpRegister DoubleRegister
const uint32_t kTwoByteStringTag
constexpr InstanceType LAST_STRING_TYPE
constexpr Register kJavaScriptCallArgCountRegister
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr Register kReturnRegister0
const uint32_t kStringRepresentationMask
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int kDoubleSize
Definition globals.h:407
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
template const char * string
ro::BitSet tagged_slots
BytecodeSequenceNode * parent_
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define OFFSET_OF_DATA_START(Type)