v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
macro-assembler-x64.cc
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <climits>
6#include <cstdint>
7
9
10#if V8_TARGET_ARCH_X64
11
12#include <optional>
13
14#include "src/base/bits.h"
28#include "src/common/globals.h"
29#include "src/debug/debug.h"
37#include "src/objects/smi.h"
40
41// Satisfy cpplint check, but don't include platform-specific header. It is
42// included recursively via macro-assembler.h.
43#if 0
45#endif
46
47#define __ ACCESS_MASM(masm)
48
49namespace v8 {
50namespace internal {
51
52Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
53 DCHECK_GE(index, 0);
54 // arg[0] = rsp + kPCOnStackSize;
55 // arg[i] = arg[0] + i * kSystemPointerSize;
56 return Operand(rsp, kPCOnStackSize + index * kSystemPointerSize);
57}
58
60 endbr64();
61}
62
63void MacroAssembler::Load(Register destination, ExternalReference source) {
64 if (root_array_available_ && options().enable_root_relative_access) {
65 intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
66 if (is_int32(delta)) {
67 movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
68 return;
69 }
70 }
71 // Safe code.
72 if (destination == rax && !options().isolate_independent_code) {
73 load_rax(source);
74 } else {
76 }
77}
78
79void MacroAssembler::Store(ExternalReference destination, Register source) {
80 if (root_array_available_ && options().enable_root_relative_access) {
81 intptr_t delta =
83 if (is_int32(delta)) {
84 movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
85 return;
86 }
87 }
88 // Safe code.
89 if (source == rax && !options().isolate_independent_code) {
91 } else {
93 }
94}
95
97 int constant_index) {
98 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
99 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
103}
104
106 intptr_t offset) {
107 DCHECK(is_int32(offset));
108 if (offset == 0) {
110 } else {
111 leaq(destination, Operand(kRootRegister, static_cast<int32_t>(offset)));
112 }
113}
114
117}
118
119void MacroAssembler::StoreRootRelative(int32_t offset, Register value) {
120 movq(Operand(kRootRegister, offset), value);
121}
122
124 ExternalReference source) {
125 if (root_array_available()) {
126 if (source.IsIsolateFieldId()) {
127 leaq(destination,
128 Operand(kRootRegister, source.offset_from_root_register()));
129 return;
130 }
131 if (options().enable_root_relative_access) {
132 intptr_t delta =
134 if (is_int32(delta)) {
135 leaq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
136 return;
137 }
138 } else if (options().isolate_independent_code) {
140 return;
141 }
142 }
143 Move(destination, source);
144}
145
146Operand MacroAssembler::ExternalReferenceAsOperand(ExternalReference reference,
147 Register scratch) {
148 if (root_array_available()) {
149 if (reference.IsIsolateFieldId()) {
150 return Operand(kRootRegister, reference.offset_from_root_register());
151 }
152 if (options().enable_root_relative_access) {
153 int64_t delta =
155 if (is_int32(delta)) {
156 return Operand(kRootRegister, static_cast<int32_t>(delta));
157 }
158 }
159 if (options().isolate_independent_code) {
160 if (IsAddressableThroughRootRegister(isolate(), reference)) {
161 // Some external references can be efficiently loaded as an offset from
162 // kRootRegister.
163 intptr_t offset =
165 CHECK(is_int32(offset));
166 return Operand(kRootRegister, static_cast<int32_t>(offset));
167 } else {
168 // Otherwise, do a memory load from the external reference table.
169 movq(scratch, Operand(kRootRegister,
171 isolate(), reference)));
172 return Operand(scratch, 0);
173 }
174 }
175 }
176 Move(scratch, reference);
177 return Operand(scratch, 0);
178}
179
180void MacroAssembler::PushAddress(ExternalReference source) {
183}
184
187 return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index));
188}
189
191 static_assert(!CanBeImmediate(RootIndex::kUndefinedValue) ||
192 std::is_same_v<Tagged_t, uint32_t>);
193 if (CanBeImmediate(index)) {
194 mov_tagged(destination,
195 Immediate(static_cast<uint32_t>(ReadOnlyRootPtr(index))));
196 return;
197 }
200}
201
202void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
203 if (CanBeImmediate(index)) {
205 static_cast<uint32_t>(ReadOnlyRootPtr(index)));
206 return;
207 }
210}
211
214 Push(RootAsOperand(index));
215}
216
217void MacroAssembler::CompareRoot(Register with, RootIndex index,
218 ComparisonMode mode) {
219 if (mode == ComparisonMode::kFullPointer ||
222 // Some smi roots contain system pointer size values like stack limits.
223 cmpq(with, RootAsOperand(index));
224 return;
225 }
226 CompareTaggedRoot(with, index);
227}
228
229void MacroAssembler::CompareTaggedRoot(Register with, RootIndex index) {
231 if (CanBeImmediate(index)) {
232 cmp_tagged(with, Immediate(static_cast<uint32_t>(ReadOnlyRootPtr(index))));
233 return;
234 }
236 // Some smi roots contain system pointer size values like stack limits.
239 cmp_tagged(with, RootAsOperand(index));
240}
241
242void MacroAssembler::CompareRoot(Operand with, RootIndex index) {
243 if (CanBeImmediate(index)) {
244 cmp_tagged(with, Immediate(static_cast<uint32_t>(ReadOnlyRootPtr(index))));
245 return;
246 }
248 DCHECK(!with.AddressUsesRegister(kScratchRegister));
251 mov_tagged(kScratchRegister, RootAsOperand(index));
252 cmp_tagged(with, kScratchRegister);
253 } else {
254 // Some smi roots contain system pointer size values like stack limits.
256 cmpq(with, kScratchRegister);
257 }
258}
259
260void MacroAssembler::LoadCompressedMap(Register destination, Register object) {
262 mov_tagged(destination, FieldOperand(object, HeapObject::kMapOffset));
263}
264
265void MacroAssembler::LoadMap(Register destination, Register object) {
267#ifdef V8_MAP_PACKING
268 UnpackMapWord(destination);
269#endif
270}
271
272void MacroAssembler::LoadFeedbackVector(Register dst, Register closure,
273 Label* fbv_undef,
274 Label::Distance distance) {
275 Label done;
276
277 // Load the feedback vector from the closure.
278 TaggedRegister feedback_cell(dst);
279 LoadTaggedField(feedback_cell,
280 FieldOperand(closure, JSFunction::kFeedbackCellOffset));
281 LoadTaggedField(dst, FieldOperand(feedback_cell, FeedbackCell::kValueOffset));
282
283 // Check if feedback vector is valid.
284 IsObjectType(dst, FEEDBACK_VECTOR_TYPE, rcx);
285 j(equal, &done, Label::kNear);
286
287 // Not valid, load undefined.
288 LoadRoot(dst, RootIndex::kUndefinedValue);
289 jmp(fbv_undef, distance);
290
291 bind(&done);
292}
293
295 Operand field_operand) {
297 DecompressTagged(destination, field_operand);
298 } else {
299 mov_tagged(destination, field_operand);
300 }
301}
302
304 Operand field_operand) {
306}
307
309 Register destination, Operand field_operand) {
310 mov_tagged(destination, field_operand);
311}
312
313#ifdef V8_MAP_PACKING
314void MacroAssembler::UnpackMapWord(Register r) {
315 // Clear the top two bytes (which may include metadata). Must be in sync with
316 // MapWord::Unpack, and vice versa.
317 shlq(r, Immediate(16));
318 shrq(r, Immediate(16));
319 xorq(r, Immediate(Internals::kMapWordXorMask));
320}
321#endif
322
324 Operand field_operand) {
326 DecompressTaggedSigned(destination, field_operand);
327 } else {
328 mov_tagged(destination, field_operand);
329 }
330}
331
332void MacroAssembler::PushTaggedField(Operand field_operand, Register scratch) {
334 DCHECK(!field_operand.AddressUsesRegister(scratch));
335 DecompressTagged(scratch, field_operand);
336 Push(scratch);
337 } else {
338 Push(field_operand);
339 }
340}
341
342void MacroAssembler::SmiUntagField(Register dst, Operand src) {
343 SmiUntag(dst, src);
344}
345
346void MacroAssembler::SmiUntagFieldUnsigned(Register dst, Operand src) {
347 SmiUntagUnsigned(dst, src);
348}
349
350void MacroAssembler::StoreTaggedField(Operand dst_field_operand,
351 Immediate value) {
353 movl(dst_field_operand, value);
354 } else {
355 movq(dst_field_operand, value);
356 }
357}
358
359void MacroAssembler::StoreTaggedField(Operand dst_field_operand,
360 Register value) {
362 movl(dst_field_operand, value);
363 } else {
364 movq(dst_field_operand, value);
365 }
366}
367
368void MacroAssembler::StoreTaggedSignedField(Operand dst_field_operand,
369 Tagged<Smi> value) {
370 if (SmiValuesAre32Bits()) {
371 Move(kScratchRegister, value);
372 movq(dst_field_operand, kScratchRegister);
373 } else {
374 StoreTaggedField(dst_field_operand, Immediate(value));
375 }
376}
377
378void MacroAssembler::AtomicStoreTaggedField(Operand dst_field_operand,
379 Register value) {
381 movl(kScratchRegister, value);
382 xchgl(kScratchRegister, dst_field_operand);
383 } else {
384 movq(kScratchRegister, value);
385 xchgq(kScratchRegister, dst_field_operand);
386 }
387}
388
390 Operand field_operand) {
391 ASM_CODE_COMMENT(this);
392 movl(destination, field_operand);
393}
394
396 Operand field_operand) {
397 ASM_CODE_COMMENT(this);
398 movl(destination, field_operand);
400}
401
402void MacroAssembler::DecompressTagged(Register destination, Register source) {
403 ASM_CODE_COMMENT(this);
404 movl(destination, source);
406}
407
409 Tagged_t immediate) {
410 ASM_CODE_COMMENT(this);
411 leaq(destination,
412 Operand(kPtrComprCageBaseRegister, static_cast<int32_t>(immediate)));
413}
414
416 Operand field_operand) {
417#if V8_ENABLE_SANDBOX
418 ASM_CODE_COMMENT(this);
419 movl(destination, field_operand);
421 orq(destination,
422 Operand{kRootRegister, IsolateData::trusted_cage_base_offset()});
423#else
424 UNREACHABLE();
425#endif // V8_ENABLE_SANDBOX
426}
427
428void MacroAssembler::RecordWriteField(Register object, int offset,
429 Register value, Register slot_address,
430 SaveFPRegsMode save_fp,
431 SmiCheck smi_check,
432 ReadOnlyCheck ro_check,
433 SlotDescriptor slot) {
434 ASM_CODE_COMMENT(this);
435 DCHECK(!AreAliased(object, value, slot_address));
436 // First, check if a write barrier is even needed. The tests below
437 // catch stores of Smis and read-only objects.
438 Label done;
439
440#if V8_STATIC_ROOTS_BOOL
441 if (ro_check == ReadOnlyCheck::kInline) {
442 // Quick check for Read-only and small Smi values.
443 static_assert(StaticReadOnlyRoot::kLastAllocatedRoot < kRegularPageSize);
445 }
446#endif // V8_STATIC_ROOTS_BOOL
447
448 // Skip barrier if writing a smi.
449 if (smi_check == SmiCheck::kInline) {
450 JumpIfSmi(value, &done);
451 }
452
453 // Although the object register is tagged, the offset is relative to the start
454 // of the object, so the offset must be a multiple of kTaggedSize.
456
457 leaq(slot_address, FieldOperand(object, offset));
458 if (v8_flags.slow_debug_code) {
459 ASM_CODE_COMMENT_STRING(this, "Debug check slot_address");
460 Label ok;
461 testb(slot_address, Immediate(kTaggedSize - 1));
462 j(zero, &ok, Label::kNear);
463 int3();
464 bind(&ok);
465 }
466
467 RecordWrite(object, slot_address, value, save_fp, SmiCheck::kOmit,
469
470 bind(&done);
471
472 // Clobber clobbered input registers when running with the debug-code flag
473 // turned on to provoke errors.
474 if (v8_flags.slow_debug_code) {
475 ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
477 Move(slot_address, kZapValue, RelocInfo::NO_INFO);
478 }
479}
480
481void MacroAssembler::EncodeSandboxedPointer(Register value) {
482 ASM_CODE_COMMENT(this);
483#ifdef V8_ENABLE_SANDBOX
484 subq(value, kPtrComprCageBaseRegister);
485 shlq(value, Immediate(kSandboxedPointerShift));
486#else
487 UNREACHABLE();
488#endif
489}
490
491void MacroAssembler::DecodeSandboxedPointer(Register value) {
492 ASM_CODE_COMMENT(this);
493#ifdef V8_ENABLE_SANDBOX
494 shrq(value, Immediate(kSandboxedPointerShift));
495 addq(value, kPtrComprCageBaseRegister);
496#else
497 UNREACHABLE();
498#endif
499}
500
502 Operand field_operand) {
503 ASM_CODE_COMMENT(this);
504 movq(destination, field_operand);
506}
507
508void MacroAssembler::StoreSandboxedPointerField(Operand dst_field_operand,
509 Register value) {
510 ASM_CODE_COMMENT(this);
512 DCHECK(!dst_field_operand.AddressUsesRegister(kScratchRegister));
513 movq(kScratchRegister, value);
515 movq(dst_field_operand, kScratchRegister);
516}
517
519 Register destination, Operand field_operand,
520 ExternalPointerTagRange tag_range, Register scratch,
521 IsolateRootLocation isolateRootLocation) {
522 DCHECK(!AreAliased(destination, scratch));
523#ifdef V8_ENABLE_SANDBOX
524 DCHECK(!tag_range.IsEmpty());
526 DCHECK(!field_operand.AddressUsesRegister(scratch));
527 if (isolateRootLocation == IsolateRootLocation::kInRootRegister) {
529 // TODO(saelo): consider using an ExternalReference here.
530 movq(scratch,
531 Operand(kRootRegister,
532 IsolateData::external_pointer_table_offset() +
534 } else {
535 DCHECK(isolateRootLocation == IsolateRootLocation::kInScratchRegister);
536 movq(scratch,
537 Operand(scratch,
538 IsolateData::external_pointer_table_offset() +
540 }
541 movl(destination, field_operand);
542 shrq(destination, Immediate(kExternalPointerIndexShift));
543 static_assert(kExternalPointerTableEntrySize == 8);
544 movq(destination, Operand(scratch, destination, times_8, 0));
545
546 // We don't expect to see empty fields here. If this is ever needed, consider
547 // using an dedicated empty value entry for those tags instead (i.e. an entry
548 // with the right tag and nullptr payload).
550
551 if (tag_range.Size() == 1) {
552 // The common and simple case: we expect exactly one tag.
553 movq(scratch, destination);
554 shrq(scratch, Immediate(kExternalPointerTagShift));
555 andl(scratch, Immediate(kExternalPointerShiftedTagMask));
556 cmpl(scratch, Immediate(tag_range.first));
557 SbxCheck(equal, AbortReason::kExternalPointerTagMismatch);
558 movq(scratch, Immediate64(kExternalPointerPayloadMask));
559 andq(destination, scratch);
560 } else {
561 // Not currently supported. Implement once needed.
563 UNREACHABLE();
564 }
565#else
566 movq(destination, field_operand);
567#endif // V8_ENABLE_SANDBOX
568}
569
571 Operand field_operand,
573 Register scratch) {
574#ifdef V8_ENABLE_SANDBOX
575 LoadIndirectPointerField(destination, field_operand, tag, scratch);
576#else
577 LoadTaggedField(destination, field_operand);
578#endif // V8_ENABLE_SANDBOX
579}
580
581void MacroAssembler::StoreTrustedPointerField(Operand dst_field_operand,
582 Register value) {
583#ifdef V8_ENABLE_SANDBOX
584 StoreIndirectPointerField(dst_field_operand, value);
585#else
586 StoreTaggedField(dst_field_operand, value);
587#endif // V8_ENABLE_SANDBOX
588}
589
591 Operand field_operand,
593 Register scratch) {
594#ifdef V8_ENABLE_SANDBOX
595 DCHECK(!AreAliased(destination, scratch));
596 Register handle = scratch;
597 movl(handle, field_operand);
598 ResolveIndirectPointerHandle(destination, handle, tag);
599#else
600 UNREACHABLE();
601#endif // V8_ENABLE_SANDBOX
602}
603
604void MacroAssembler::StoreIndirectPointerField(Operand dst_field_operand,
605 Register value) {
606#ifdef V8_ENABLE_SANDBOX
608 FieldOperand(value, ExposedTrustedObject::kSelfIndirectPointerOffset));
609 movl(dst_field_operand, kScratchRegister);
610#else
611 UNREACHABLE();
612#endif // V8_ENABLE_SANDBOX
613}
614
615#ifdef V8_ENABLE_SANDBOX
616void MacroAssembler::ResolveIndirectPointerHandle(Register destination,
617 Register handle,
618 IndirectPointerTag tag) {
619 // The tag implies which pointer table to use.
620 if (tag == kUnknownIndirectPointerTag) {
621 // In this case we have to rely on the handle marking to determine which
622 // pointer table to use.
623 Label is_trusted_pointer_handle, done;
624 testl(handle, Immediate(kCodePointerHandleMarker));
625 j(zero, &is_trusted_pointer_handle, Label::kNear);
626 ResolveCodePointerHandle(destination, handle);
627 jmp(&done, Label::kNear);
628 bind(&is_trusted_pointer_handle);
629 ResolveTrustedPointerHandle(destination, handle,
631 bind(&done);
632 } else if (tag == kCodeIndirectPointerTag) {
633 ResolveCodePointerHandle(destination, handle);
634 } else {
635 ResolveTrustedPointerHandle(destination, handle, tag);
636 }
637}
638
639void MacroAssembler::ResolveTrustedPointerHandle(Register destination,
640 Register handle,
641 IndirectPointerTag tag) {
642 DCHECK_NE(tag, kCodeIndirectPointerTag);
644 shrl(handle, Immediate(kTrustedPointerHandleShift));
645 static_assert(kTrustedPointerTableEntrySize == 8);
648 Operand{kRootRegister, IsolateData::trusted_pointer_table_offset()});
649 movq(destination, Operand{destination, handle, times_8, 0});
650 // Untag the pointer and remove the marking bit in one operation.
651 Register tag_reg = handle;
652 movq(tag_reg, Immediate64(~(tag | kTrustedPointerTableMarkBit)));
653 andq(destination, tag_reg);
654}
655
656void MacroAssembler::ResolveCodePointerHandle(Register destination,
657 Register handle) {
659 Register table = destination;
660 LoadCodePointerTableBase(table);
661 shrl(handle, Immediate(kCodePointerHandleShift));
662 // The code pointer table entry size is 16 bytes, so we have to do an
663 // explicit shift first (times_16 doesn't exist).
667 // The LSB is used as marking bit by the code pointer table, so here we have
668 // to set it using a bitwise OR as it may or may not be set.
669 orq(destination, Immediate(kHeapObjectTag));
670}
671
672void MacroAssembler::LoadCodeEntrypointViaCodePointer(Register destination,
673 Operand field_operand,
674 CodeEntrypointTag tag) {
676 DCHECK(!field_operand.AddressUsesRegister(kScratchRegister));
678 LoadCodePointerTableBase(kScratchRegister);
679 movl(destination, field_operand);
680 shrl(destination, Immediate(kCodePointerHandleShift));
683 if (tag != 0) {
684 // Can this be improved?
685 movq(kScratchRegister, Immediate64(tag));
687 }
688}
689
690void MacroAssembler::LoadCodePointerTableBase(Register destination) {
691#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
692 if (!options().isolate_independent_code && isolate()) {
693 // Embed the code pointer table address into the code.
695 ExternalReference::code_pointer_table_base_address(isolate()));
696 } else {
697 // Force indirect load via root register as a workaround for
698 // isolate-independent code (for example, for Wasm).
701 }
702#else
703 // Embed the code pointer table address into the code.
705 ExternalReference::global_code_pointer_table_base_address());
706#endif // V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
707}
708#endif // V8_ENABLE_SANDBOX
709
710#ifdef V8_ENABLE_LEAPTIERING
711void MacroAssembler::LoadEntrypointFromJSDispatchTable(
712 Register destination, Register dispatch_handle) {
713 DCHECK(!AreAliased(destination, dispatch_handle, kScratchRegister));
714 LoadAddress(kScratchRegister, ExternalReference::js_dispatch_table_address());
715 movq(destination, dispatch_handle);
716 shrl(destination, Immediate(kJSDispatchHandleShift));
719 JSDispatchEntry::kEntrypointOffset));
720}
721
722void MacroAssembler::LoadEntrypointFromJSDispatchTable(
723 Register destination, JSDispatchHandle dispatch_handle) {
725 LoadAddress(kScratchRegister, ExternalReference::js_dispatch_table_address());
726 // WARNING: This offset calculation is only safe if we have already stored a
727 // RelocInfo for the dispatch handle, e.g. in CallJSDispatchEntry, (thus
728 // keeping the dispatch entry alive) _and_ because the entrypoints are not
729 // compatible (thus meaning that the offset calculation is not invalidated by
730 // a compaction).
731 // TODO(leszeks): Make this less of a footgun.
732 static_assert(!JSDispatchTable::kSupportsCompaction);
733 int offset = JSDispatchTable::OffsetOfEntry(dispatch_handle) +
734 JSDispatchEntry::kEntrypointOffset;
736}
737
738void MacroAssembler::LoadParameterCountFromJSDispatchTable(
739 Register destination, Register dispatch_handle) {
740 DCHECK(!AreAliased(destination, dispatch_handle, kScratchRegister));
741 LoadAddress(kScratchRegister, ExternalReference::js_dispatch_table_address());
742 movq(destination, dispatch_handle);
743 shrl(destination, Immediate(kJSDispatchHandleShift));
745 static_assert(JSDispatchEntry::kParameterCountMask == 0xffff);
747 JSDispatchEntry::kCodeObjectOffset));
748}
749
750void MacroAssembler::LoadEntrypointAndParameterCountFromJSDispatchTable(
751 Register entrypoint, Register parameter_count, Register dispatch_handle) {
752 DCHECK(!AreAliased(entrypoint, parameter_count, dispatch_handle,
754 LoadAddress(kScratchRegister, ExternalReference::js_dispatch_table_address());
756 movq(offset, dispatch_handle);
757 shrl(offset, Immediate(kJSDispatchHandleShift));
759 movq(entrypoint, Operand(kScratchRegister, offset, times_1,
760 JSDispatchEntry::kEntrypointOffset));
761 static_assert(JSDispatchEntry::kParameterCountMask == 0xffff);
763 JSDispatchEntry::kCodeObjectOffset));
764}
765#endif
766
768 Operand field_operand) {
770#ifdef V8_ENABLE_SANDBOX
771 DecompressProtected(destination, field_operand);
772#else
773 LoadTaggedField(destination, field_operand);
774#endif
775}
776
777void MacroAssembler::CallEphemeronKeyBarrier(Register object,
778 Register slot_address,
779 SaveFPRegsMode fp_mode) {
780 ASM_CODE_COMMENT(this);
781 DCHECK(!AreAliased(object, slot_address));
785
787 Register slot_address_parameter =
789 MovePair(slot_address_parameter, slot_address, object_parameter, object);
790
793}
794
796 Register slot_address,
797 SaveFPRegsMode fp_mode,
798 IndirectPointerTag tag) {
799 ASM_CODE_COMMENT(this);
800 DCHECK(!AreAliased(object, slot_address));
801 // TODO(saelo) if necessary, we could introduce a "SaveRegisters version of
802 // this function and make this code not save clobbered registers. It's
803 // probably not currently worth the effort though since stores to indirect
804 // pointer fields are fairly rare.
807 object, slot_address);
809
810 Register object_parameter =
812 Register slot_address_parameter =
814 MovePair(slot_address_parameter, slot_address, object_parameter, object);
815
816 Register tag_parameter =
818 Move(tag_parameter, tag);
819
822}
823
825 Register slot_address,
826 SaveFPRegsMode fp_mode,
827 StubCallMode mode) {
828 ASM_CODE_COMMENT(this);
829 DCHECK(!AreAliased(object, slot_address));
834 Register slot_address_parameter =
836 MovePair(object_parameter, object, slot_address_parameter, slot_address);
837
838 CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, mode);
840}
841
842void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
843 SaveFPRegsMode fp_mode,
844 StubCallMode mode) {
845 ASM_CODE_COMMENT(this);
846 // Use CallRecordWriteStubSaveRegisters if the object and slot registers
847 // need to be caller saved.
850#if V8_ENABLE_WEBASSEMBLY
851 if (mode == StubCallMode::kCallWasmRuntimeStub) {
852 // Use {near_call} for direct Wasm call within a module.
853 intptr_t wasm_target =
854 static_cast<intptr_t>(wasm::WasmCode::GetRecordWriteBuiltin(fp_mode));
856#else
857 if (false) {
858#endif
859 } else {
861 }
862}
863
864#ifdef V8_IS_TSAN
865void MacroAssembler::CallTSANStoreStub(Register address, Register value,
866 SaveFPRegsMode fp_mode, int size,
867 StubCallMode mode,
868 std::memory_order order) {
869 ASM_CODE_COMMENT(this);
870 DCHECK(!AreAliased(address, value));
871 TSANStoreDescriptor descriptor;
872 RegList registers = descriptor.allocatable_registers();
873
875
876 Register address_parameter(
877 descriptor.GetRegisterParameter(TSANStoreDescriptor::kAddress));
878 Register value_parameter(
879 descriptor.GetRegisterParameter(TSANStoreDescriptor::kValue));
880
881 // Prepare argument registers for calling GetTSANStoreStub.
882 MovePair(address_parameter, address, value_parameter, value);
883
884#if V8_ENABLE_WEBASSEMBLY
885 if (mode != StubCallMode::kCallWasmRuntimeStub) {
886 // JS functions and Wasm wrappers.
887 CallBuiltin(CodeFactory::GetTSANStoreStub(fp_mode, size, order));
888 } else {
889 // Wasm functions should call builtins through their far jump table.
890 auto wasm_target = static_cast<intptr_t>(
891 wasm::WasmCode::GetTSANStoreBuiltin(fp_mode, size, order));
893 }
894#else
895 CallBuiltin(CodeFactory::GetTSANStoreStub(fp_mode, size, order));
896#endif // V8_ENABLE_WEBASSEMBLY
897
899}
900
901void MacroAssembler::CallTSANRelaxedLoadStub(Register address,
902 SaveFPRegsMode fp_mode, int size,
903 StubCallMode mode) {
904 TSANLoadDescriptor descriptor;
905 RegList registers = descriptor.allocatable_registers();
906
908
909 Register address_parameter(
910 descriptor.GetRegisterParameter(TSANLoadDescriptor::kAddress));
911
912 // Prepare argument registers for calling TSANRelaxedLoad.
913 Move(address_parameter, address);
914
915#if V8_ENABLE_WEBASSEMBLY
916 if (mode != StubCallMode::kCallWasmRuntimeStub) {
917 // JS functions and Wasm wrappers.
918 CallBuiltin(CodeFactory::GetTSANRelaxedLoadStub(fp_mode, size));
919 } else {
920 // Wasm functions should call builtins through their far jump table.
921 auto wasm_target = static_cast<intptr_t>(
922 wasm::WasmCode::GetTSANRelaxedLoadBuiltin(fp_mode, size));
924 }
925#else
926 CallBuiltin(CodeFactory::GetTSANRelaxedLoadStub(fp_mode, size));
927#endif // V8_ENABLE_WEBASSEMBLY
928
930}
931#endif // V8_IS_TSAN
932
933void MacroAssembler::RecordWrite(Register object, Register slot_address,
934 Register value, SaveFPRegsMode fp_mode,
935 SmiCheck smi_check, ReadOnlyCheck ro_check,
936 SlotDescriptor slot) {
937 ASM_CODE_COMMENT(this);
938 DCHECK(!AreAliased(object, slot_address, value));
939 AssertNotSmi(object);
940
941 if (v8_flags.disable_write_barriers) {
942 return;
943 }
944
945 if (v8_flags.slow_debug_code) {
946 ASM_CODE_COMMENT_STRING(this, "Debug check slot_address");
947 Label ok;
948 if (slot.contains_indirect_pointer()) {
949 Push(object); // Use object register as scratch
950 Register scratch = object;
951 Push(slot_address); // Use slot address register to load the value into
952 Register value_in_slot = slot_address;
953 LoadIndirectPointerField(value_in_slot, Operand(slot_address, 0),
954 slot.indirect_pointer_tag(), scratch);
955 cmp_tagged(value, value_in_slot);
956 // These pops don't affect the flag registers, so we can do them before
957 // the conditional jump below.
958 Pop(slot_address);
959 Pop(object);
960 } else {
961 cmp_tagged(value, Operand(slot_address, 0));
962 }
963 j(equal, &ok, Label::kNear);
964 int3();
965 bind(&ok);
966 }
967
968 // First, check if a write barrier is even needed. The tests below
969 // catch stores of smis and read-only objects, as well as stores into the
970 // young generation.
971 Label done;
972
973#if V8_STATIC_ROOTS_BOOL
974 if (ro_check == ReadOnlyCheck::kInline) {
975 // Quick check for Read-only and small Smi values.
976 static_assert(StaticReadOnlyRoot::kLastAllocatedRoot < kRegularPageSize);
978 }
979#endif // V8_STATIC_ROOTS_BOOL
980
981 if (smi_check == SmiCheck::kInline) {
982 // Skip barrier if writing a smi.
983 JumpIfSmi(value, &done);
984 }
985
986 if (slot.contains_indirect_pointer()) {
987 // The indirect pointer write barrier is only enabled during marking.
988 JumpIfNotMarking(&done);
989 } else {
990#if V8_ENABLE_STICKY_MARK_BITS_BOOL
991 DCHECK(!AreAliased(kScratchRegister, object, slot_address, value));
992 Label stub_call;
993
994 JumpIfMarking(&stub_call);
995
996 // Save the slot_address in the xmm scratch register.
997 movq(kScratchDoubleReg, slot_address);
998 Register scratch0 = slot_address;
999 CheckMarkBit(object, kScratchRegister, scratch0, carry, &done);
1001 not_zero, &done, Label::kFar);
1002 CheckMarkBit(value, kScratchRegister, scratch0, carry, &done);
1003 movq(slot_address, kScratchDoubleReg);
1004 bind(&stub_call);
1005#else // !V8_ENABLE_STICKY_MARK_BITS_BOOL
1006 CheckPageFlag(value,
1007 value, // Used as scratch.
1009 Label::kNear);
1010
1011 CheckPageFlag(object,
1012 value, // Used as scratch.
1014 Label::kNear);
1015#endif // !V8_ENABLE_STICKY_MARK_BITS_BOOL
1016 }
1017
1018 if (slot.contains_direct_pointer()) {
1019 CallRecordWriteStub(object, slot_address, fp_mode,
1021 } else {
1022 DCHECK(slot.contains_indirect_pointer());
1023 CallIndirectPointerBarrier(object, slot_address, fp_mode,
1024 slot.indirect_pointer_tag());
1025 }
1026
1027 bind(&done);
1028
1029 // Clobber clobbered registers when running with the debug-code flag
1030 // turned on to provoke errors.
1031 if (v8_flags.slow_debug_code) {
1032 ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
1033 Move(slot_address, kZapValue, RelocInfo::NO_INFO);
1035 }
1036}
1037
1039 Label L;
1040 j(cc, &L, Label::kNear);
1041 Abort(reason);
1042 // Control will not return here.
1043 bind(&L);
1044}
1045
1047 Check(cc, reason);
1048}
1049
1051 int frame_alignment = base::OS::ActivationFrameAlignment();
1052 int frame_alignment_mask = frame_alignment - 1;
1053 if (frame_alignment > kSystemPointerSize) {
1054 ASM_CODE_COMMENT(this);
1055 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1056 Label alignment_as_expected;
1057 testq(rsp, Immediate(frame_alignment_mask));
1058 j(zero, &alignment_as_expected, Label::kNear);
1059 // Abort if stack is not aligned.
1060 int3();
1061 bind(&alignment_as_expected);
1062 }
1063}
1064
1066 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
1067 if (kFrameAlignment > 0) {
1068 DCHECK(base::bits::IsPowerOfTwo(kFrameAlignment));
1069 DCHECK(is_int8(kFrameAlignment));
1070 andq(rsp, Immediate(-kFrameAlignment));
1071 }
1072}
1073
1075 ASM_CODE_COMMENT(this);
1076 if (v8_flags.code_comments) {
1077 RecordComment("Abort message:", SourceLocation{});
1078 RecordComment(GetAbortReason(reason), SourceLocation{});
1079 }
1080
1081 // Without debug code, save the code size and just trap.
1082 if (!v8_flags.debug_code || v8_flags.trap_on_abort) {
1083 int3();
1084 return;
1085 }
1086
1087 if (should_abort_hard()) {
1088 // We don't care if we constructed a frame. Just pretend we did.
1089 FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
1090 Move(kCArgRegs[0], static_cast<int>(reason));
1092 LoadAddress(rax, ExternalReference::abort_with_reason());
1093 call(rax);
1094 return;
1095 }
1096
1097 Move(rdx, Smi::FromInt(static_cast<int>(reason)));
1098
1099 {
1100 // We don't actually want to generate a pile of code for this, so just
1101 // claim there is a stack frame, without generating one.
1102 FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
1103 if (root_array_available()) {
1104 // Generate an indirect call via builtins entry table here in order to
1105 // ensure that the interpreter_entry_return_pc_offset is the same for
1106 // InterpreterEntryTrampoline and InterpreterEntryTrampolineForProfiling
1107 // when v8_flags.debug_code is enabled.
1108 Call(EntryFromBuiltinAsOperand(Builtin::kAbort));
1109 } else {
1110 CallBuiltin(Builtin::kAbort);
1111 }
1112 }
1113
1114 // Control will not return here.
1115 int3();
1116}
1117
1118void MacroAssembler::CallRuntime(const Runtime::Function* f,
1119 int num_arguments) {
1120 ASM_CODE_COMMENT(this);
1121 // If the expected number of arguments of the runtime function is
1122 // constant, we check that the actual number of arguments match the
1123 // expectation.
1124 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1125
1126 // TODO(1236192): Most runtime routines don't need the number of
1127 // arguments passed in because it is constant. At some point we
1128 // should remove this need and make the runtime routine entry code
1129 // smarter.
1130 Move(rax, num_arguments);
1132
1133 bool switch_to_central = options().is_wasm;
1134 CallBuiltin(Builtins::RuntimeCEntry(f->result_size, switch_to_central));
1135}
1136
1138 // ----------- S t a t e -------------
1139 // -- rsp[0] : return address
1140 // -- rsp[8] : argument num_arguments - 1
1141 // ...
1142 // -- rsp[8 * num_arguments] : argument 0 (receiver)
1143 //
1144 // For runtime functions with variable arguments:
1145 // -- rax : number of arguments
1146 // -----------------------------------
1147 ASM_CODE_COMMENT(this);
1148 const Runtime::Function* function = Runtime::FunctionForId(fid);
1149 DCHECK_EQ(1, function->result_size);
1150 if (function->nargs >= 0) {
1151 Move(rax, function->nargs);
1152 }
1154}
1155
1156void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
1157 bool builtin_exit_frame) {
1158 ASM_CODE_COMMENT(this);
1159 // Set the entry point and jump to the C entry runtime stub.
1160 LoadAddress(rbx, ext);
1161 TailCallBuiltin(Builtins::CEntry(1, ArgvMode::kStack, builtin_exit_frame));
1162}
1163
1164namespace {
1165
1166#ifndef V8_ENABLE_LEAPTIERING
1167// Only used when leaptiering is disabled.
1168void TailCallOptimizedCodeSlot(MacroAssembler* masm,
1169 Register optimized_code_entry, Register closure,
1170 Register scratch1, Register scratch2,
1171 JumpMode jump_mode) {
1172 // ----------- S t a t e -------------
1173 // rax : actual argument count
1174 // rdx : new target (preserved for callee if needed, and caller)
1175 // rsi : current context, used for the runtime call
1176 // rdi : target function (preserved for callee if needed, and caller)
1177 // -----------------------------------
1178 ASM_CODE_COMMENT(masm);
1180 DCHECK(!AreAliased(rax, rdx, closure, rsi, optimized_code_entry, scratch1,
1181 scratch2));
1182
1183 Label heal_optimized_code_slot;
1184
1185 // If the optimized code is cleared, go to runtime to update the optimization
1186 // marker field.
1187 __ LoadWeakValue(optimized_code_entry, &heal_optimized_code_slot);
1188
1189 // The entry references a CodeWrapper object. Unwrap it now.
1191 optimized_code_entry,
1192 FieldOperand(optimized_code_entry, CodeWrapper::kCodeOffset), scratch1);
1193
1194 // Check if the optimized code is marked for deopt. If it is, call the
1195 // runtime to clear it.
1196 __ AssertCode(optimized_code_entry);
1197 __ TestCodeIsMarkedForDeoptimization(optimized_code_entry);
1198 __ j(not_zero, &heal_optimized_code_slot);
1199
1200 // Optimized code is good, get it into the closure and link the closure into
1201 // the optimized functions list, then tail call the optimized code.
1202 __ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure,
1203 scratch1, scratch2);
1204 static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
1205 __ Move(rcx, optimized_code_entry);
1206 __ JumpCodeObject(rcx, kJSEntrypointTag, jump_mode);
1207
1208 // Optimized code slot contains deoptimized code or code is cleared and
1209 // optimized code marker isn't updated. Evict the code, update the marker
1210 // and re-enter the closure's code.
1211 __ bind(&heal_optimized_code_slot);
1212 __ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot, jump_mode);
1213}
1214#endif // V8_ENABLE_LEAPTIERING
1215
1216} // namespace
1217
1218#ifdef V8_ENABLE_DEBUG_CODE
1219void MacroAssembler::AssertFeedbackCell(Register object, Register scratch) {
1220 if (v8_flags.debug_code) {
1221 IsObjectType(object, FEEDBACK_CELL_TYPE, scratch);
1222 Assert(equal, AbortReason::kExpectedFeedbackCell);
1223 }
1224}
1225void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) {
1226 if (v8_flags.debug_code) {
1227 IsObjectType(object, FEEDBACK_VECTOR_TYPE, scratch);
1228 Assert(equal, AbortReason::kExpectedFeedbackVector);
1229 }
1230}
1231#endif // V8_ENABLE_DEBUG_CODE
1232
1234 Runtime::FunctionId function_id, JumpMode jump_mode) {
1235 // ----------- S t a t e -------------
1236 // -- rax : actual argument count (preserved for callee)
1237 // -- rdx : new target (preserved for callee)
1238 // -- rdi : target function (preserved for callee)
1239 // -- r15 : dispatch handle (preserved for callee)
1240 // -----------------------------------
1241 ASM_CODE_COMMENT(this);
1242 {
1243 FrameScope scope(this, StackFrame::INTERNAL);
1244 // Push a copy of the target function, the new target, the actual argument
1245 // count, and the dispatch handle.
1250#ifdef V8_ENABLE_LEAPTIERING
1251 // No need to SmiTag since dispatch handles always look like Smis.
1252 static_assert(kJSDispatchHandleShift > 0);
1254#endif
1255 // Function is also the parameter to the runtime call.
1257
1258 CallRuntime(function_id, 1);
1259 movq(rcx, rax);
1260
1261 // Restore target function, new target, actual argument count, and dispatch
1262 // handle.
1263#ifdef V8_ENABLE_LEAPTIERING
1265#endif
1270 }
1271 static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
1272 JumpCodeObject(rcx, kJSEntrypointTag, jump_mode);
1273}
1274
1276 Register optimized_code, Register closure, Register scratch1,
1277 Register slot_address) {
1278 ASM_CODE_COMMENT(this);
1279 DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address));
1281
1282#ifdef V8_ENABLE_LEAPTIERING
1283 UNREACHABLE();
1284#else
1285 // Store the optimized code in the closure.
1286 AssertCode(optimized_code);
1287 StoreCodePointerField(FieldOperand(closure, JSFunction::kCodeOffset),
1288 optimized_code);
1289
1290 // Write barrier clobbers scratch1 below.
1291 Register value = scratch1;
1292 movq(value, optimized_code);
1293
1294 RecordWriteField(closure, JSFunction::kCodeOffset, value, slot_address,
1297#endif // V8_ENABLE_LEAPTIERING
1298}
1299
1300#ifndef V8_ENABLE_LEAPTIERING
1301
1302// Read off the flags in the feedback vector and check if there
1303// is optimized code or a tiering state that needs to be processed.
1305 Register feedback_vector, CodeKind current_code_kind) {
1306 ASM_CODE_COMMENT(this);
1307 DCHECK(CodeKindCanTierUp(current_code_kind));
1308 uint32_t flag_mask =
1310 testw(FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset),
1311 Immediate(flag_mask));
1312 return not_zero;
1313}
1314
1316 Register feedback_vector, CodeKind current_code_kind,
1317 Label* flags_need_processing) {
1318 ASM_CODE_COMMENT(this);
1319 j(CheckFeedbackVectorFlagsNeedsProcessing(feedback_vector, current_code_kind),
1320 flags_need_processing);
1321}
1322
1324 Register feedback_vector, Register closure, JumpMode jump_mode) {
1325 ASM_CODE_COMMENT(this);
1326 DCHECK(!AreAliased(feedback_vector, closure));
1327
1328 Label maybe_has_optimized_code, maybe_needs_logging;
1329 // Check if optimized code is available.
1330 testw(FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset),
1332 j(zero, &maybe_needs_logging);
1333
1334 GenerateTailCallToReturnedCode(Runtime::kCompileOptimized, jump_mode);
1335
1336 bind(&maybe_needs_logging);
1337 testw(FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset),
1338 Immediate(FeedbackVector::LogNextExecutionBit::kMask));
1339 j(zero, &maybe_has_optimized_code);
1340 GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution, jump_mode);
1341
1342 bind(&maybe_has_optimized_code);
1343 Register optimized_code_entry = kJavaScriptCallCodeStartRegister;
1345 optimized_code_entry,
1346 FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
1347 TailCallOptimizedCodeSlot(this, optimized_code_entry, closure, r9,
1349 jump_mode);
1350}
1351
1352#endif // !V8_ENABLE_LEAPTIERING
1353
1355 Register exclusion) const {
1356 int bytes = 0;
1357 RegList saved_regs = kCallerSaved - exclusion;
1358 bytes += kSystemPointerSize * saved_regs.Count();
1359
1360 // R12 to r15 are callee save on all platforms.
1361 if (fp_mode == SaveFPRegsMode::kSave) {
1363 }
1364
1365 return bytes;
1366}
1367
1369 Register exclusion) {
1370 ASM_CODE_COMMENT(this);
1371 int bytes = 0;
1372 bytes += PushAll(kCallerSaved - exclusion);
1373 if (fp_mode == SaveFPRegsMode::kSave) {
1375 }
1376
1377 return bytes;
1378}
1379
1380int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
1381 ASM_CODE_COMMENT(this);
1382 int bytes = 0;
1383 if (fp_mode == SaveFPRegsMode::kSave) {
1385 }
1386 bytes += PopAll(kCallerSaved - exclusion);
1387
1388 return bytes;
1389}
1390
1392 int bytes = 0;
1393 for (Register reg : registers) {
1394 pushq(reg);
1395 bytes += kSystemPointerSize;
1396 }
1397 return bytes;
1398}
1399
1401 int bytes = 0;
1402 for (Register reg : base::Reversed(registers)) {
1403 popq(reg);
1404 bytes += kSystemPointerSize;
1405 }
1406 return bytes;
1407}
1408
1409int MacroAssembler::PushAll(DoubleRegList registers, int stack_slot_size) {
1410 if (registers.is_empty()) return 0;
1411 const int delta = stack_slot_size * registers.Count();
1412 AllocateStackSpace(delta);
1413 int slot = 0;
1414 for (XMMRegister reg : registers) {
1415 if (stack_slot_size == kDoubleSize) {
1416 Movsd(Operand(rsp, slot), reg);
1417 } else {
1418 DCHECK_EQ(stack_slot_size, 2 * kDoubleSize);
1419 Movdqu(Operand(rsp, slot), reg);
1420 }
1421 slot += stack_slot_size;
1422 }
1423 DCHECK_EQ(slot, delta);
1424 return delta;
1425}
1426
1427int MacroAssembler::PopAll(DoubleRegList registers, int stack_slot_size) {
1428 if (registers.is_empty()) return 0;
1429 int slot = 0;
1430 for (XMMRegister reg : registers) {
1431 if (stack_slot_size == kDoubleSize) {
1432 Movsd(reg, Operand(rsp, slot));
1433 } else {
1434 DCHECK_EQ(stack_slot_size, 2 * kDoubleSize);
1435 Movdqu(reg, Operand(rsp, slot));
1436 }
1437 slot += stack_slot_size;
1438 }
1439 DCHECK_EQ(slot, stack_slot_size * registers.Count());
1440 addq(rsp, Immediate(slot));
1441 return slot;
1442}
1443
1444void MacroAssembler::Movq(XMMRegister dst, Register src) {
1445 if (CpuFeatures::IsSupported(AVX)) {
1446 CpuFeatureScope avx_scope(this, AVX);
1447 vmovq(dst, src);
1448 } else {
1449 movq(dst, src);
1450 }
1451}
1452
1453void MacroAssembler::Movq(Register dst, XMMRegister src) {
1454 if (CpuFeatures::IsSupported(AVX)) {
1455 CpuFeatureScope avx_scope(this, AVX);
1456 vmovq(dst, src);
1457 } else {
1458 movq(dst, src);
1459 }
1460}
1461
1462void MacroAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) {
1463 if (CpuFeatures::IsSupported(AVX)) {
1464 CpuFeatureScope avx_scope(this, AVX);
1465 vpextrq(dst, src, imm8);
1466 } else {
1467 CpuFeatureScope sse_scope(this, SSE4_1);
1468 pextrq(dst, src, imm8);
1469 }
1470}
1471
1473 if (CpuFeatures::IsSupported(AVX)) {
1474 CpuFeatureScope scope(this, AVX);
1475 vcvtss2sd(dst, src, src);
1476 } else {
1477 cvtss2sd(dst, src);
1478 }
1479}
1480
1481void MacroAssembler::Cvtss2sd(XMMRegister dst, Operand src) {
1482 if (CpuFeatures::IsSupported(AVX)) {
1483 CpuFeatureScope scope(this, AVX);
1484 vcvtss2sd(dst, dst, src);
1485 } else {
1486 cvtss2sd(dst, src);
1487 }
1488}
1489
1491 if (CpuFeatures::IsSupported(AVX)) {
1492 CpuFeatureScope scope(this, AVX);
1493 vcvtsd2ss(dst, src, src);
1494 } else {
1495 cvtsd2ss(dst, src);
1496 }
1497}
1498
1499void MacroAssembler::Cvtsd2ss(XMMRegister dst, Operand src) {
1500 if (CpuFeatures::IsSupported(AVX)) {
1501 CpuFeatureScope scope(this, AVX);
1502 vcvtsd2ss(dst, dst, src);
1503 } else {
1504 cvtsd2ss(dst, src);
1505 }
1506}
1507
1508void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
1509 if (CpuFeatures::IsSupported(AVX)) {
1510 CpuFeatureScope scope(this, AVX);
1511 vcvtlsi2sd(dst, kScratchDoubleReg, src);
1512 } else {
1513 xorpd(dst, dst);
1514 cvtlsi2sd(dst, src);
1515 }
1516}
1517
1518void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) {
1519 if (CpuFeatures::IsSupported(AVX)) {
1520 CpuFeatureScope scope(this, AVX);
1521 vcvtlsi2sd(dst, kScratchDoubleReg, src);
1522 } else {
1523 xorpd(dst, dst);
1524 cvtlsi2sd(dst, src);
1525 }
1526}
1527
1528void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
1529 if (CpuFeatures::IsSupported(AVX)) {
1530 CpuFeatureScope scope(this, AVX);
1531 vcvtlsi2ss(dst, kScratchDoubleReg, src);
1532 } else {
1533 xorps(dst, dst);
1534 cvtlsi2ss(dst, src);
1535 }
1536}
1537
1538void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) {
1539 if (CpuFeatures::IsSupported(AVX)) {
1540 CpuFeatureScope scope(this, AVX);
1541 vcvtlsi2ss(dst, kScratchDoubleReg, src);
1542 } else {
1543 xorps(dst, dst);
1544 cvtlsi2ss(dst, src);
1545 }
1546}
1547
1548void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
1549 if (CpuFeatures::IsSupported(AVX)) {
1550 CpuFeatureScope scope(this, AVX);
1551 vcvtqsi2ss(dst, kScratchDoubleReg, src);
1552 } else {
1553 xorps(dst, dst);
1554 cvtqsi2ss(dst, src);
1555 }
1556}
1557
1558void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) {
1559 if (CpuFeatures::IsSupported(AVX)) {
1560 CpuFeatureScope scope(this, AVX);
1561 vcvtqsi2ss(dst, kScratchDoubleReg, src);
1562 } else {
1563 xorps(dst, dst);
1564 cvtqsi2ss(dst, src);
1565 }
1566}
1567
1568void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
1569 if (CpuFeatures::IsSupported(AVX)) {
1570 CpuFeatureScope scope(this, AVX);
1571 vcvtqsi2sd(dst, kScratchDoubleReg, src);
1572 } else {
1573 xorpd(dst, dst);
1574 cvtqsi2sd(dst, src);
1575 }
1576}
1577
1578void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) {
1579 if (CpuFeatures::IsSupported(AVX)) {
1580 CpuFeatureScope scope(this, AVX);
1581 vcvtqsi2sd(dst, kScratchDoubleReg, src);
1582 } else {
1583 xorpd(dst, dst);
1584 cvtqsi2sd(dst, src);
1585 }
1586}
1587
1588void MacroAssembler::Cvtlui2ss(XMMRegister dst, Register src) {
1589 // Zero-extend the 32 bit value to 64 bit.
1590 movl(kScratchRegister, src);
1592}
1593
1594void MacroAssembler::Cvtlui2ss(XMMRegister dst, Operand src) {
1595 // Zero-extend the 32 bit value to 64 bit.
1596 movl(kScratchRegister, src);
1598}
1599
1600void MacroAssembler::Cvtlui2sd(XMMRegister dst, Register src) {
1601 // Zero-extend the 32 bit value to 64 bit.
1602 movl(kScratchRegister, src);
1604}
1605
1606void MacroAssembler::Cvtlui2sd(XMMRegister dst, Operand src) {
1607 // Zero-extend the 32 bit value to 64 bit.
1608 movl(kScratchRegister, src);
1610}
1611
1612void MacroAssembler::Cvtqui2ss(XMMRegister dst, Register src) {
1613 Label done;
1614 Cvtqsi2ss(dst, src);
1615 testq(src, src);
1616 j(positive, &done, Label::kNear);
1617
1618 // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors).
1619 if (src != kScratchRegister) movq(kScratchRegister, src);
1620 shrq(kScratchRegister, Immediate(1));
1621 // The LSB is shifted into CF. If it is set, set the LSB in {tmp}.
1622 Label msb_not_set;
1623 j(not_carry, &msb_not_set, Label::kNear);
1624 orq(kScratchRegister, Immediate(1));
1625 bind(&msb_not_set);
1627 Addss(dst, dst);
1628 bind(&done);
1629}
1630
1631void MacroAssembler::Cvtqui2ss(XMMRegister dst, Operand src) {
1632 movq(kScratchRegister, src);
1634}
1635
1636void MacroAssembler::Cvtqui2sd(XMMRegister dst, Register src) {
1637 Label done;
1638 Cvtqsi2sd(dst, src);
1639 testq(src, src);
1640 j(positive, &done, Label::kNear);
1641
1642 // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors).
1643 if (src != kScratchRegister) movq(kScratchRegister, src);
1644 shrq(kScratchRegister, Immediate(1));
1645 // The LSB is shifted into CF. If it is set, set the LSB in {tmp}.
1646 Label msb_not_set;
1647 j(not_carry, &msb_not_set, Label::kNear);
1648 orq(kScratchRegister, Immediate(1));
1649 bind(&msb_not_set);
1651 Addsd(dst, dst);
1652 bind(&done);
1653}
1654
1655void MacroAssembler::Cvtqui2sd(XMMRegister dst, Operand src) {
1656 movq(kScratchRegister, src);
1658}
1659
1660void MacroAssembler::Cvttss2si(Register dst, XMMRegister src) {
1661 if (CpuFeatures::IsSupported(AVX)) {
1662 CpuFeatureScope scope(this, AVX);
1663 vcvttss2si(dst, src);
1664 } else {
1665 cvttss2si(dst, src);
1666 }
1667}
1668
1669void MacroAssembler::Cvttss2si(Register dst, Operand src) {
1670 if (CpuFeatures::IsSupported(AVX)) {
1671 CpuFeatureScope scope(this, AVX);
1672 vcvttss2si(dst, src);
1673 } else {
1674 cvttss2si(dst, src);
1675 }
1676}
1677
1678void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) {
1679 if (CpuFeatures::IsSupported(AVX)) {
1680 CpuFeatureScope scope(this, AVX);
1681 vcvttsd2si(dst, src);
1682 } else {
1683 cvttsd2si(dst, src);
1684 }
1685}
1686
1687void MacroAssembler::Cvttsd2si(Register dst, Operand src) {
1688 if (CpuFeatures::IsSupported(AVX)) {
1689 CpuFeatureScope scope(this, AVX);
1690 vcvttsd2si(dst, src);
1691 } else {
1692 cvttsd2si(dst, src);
1693 }
1694}
1695
1696void MacroAssembler::Cvttss2siq(Register dst, XMMRegister src) {
1697 if (CpuFeatures::IsSupported(AVX)) {
1698 CpuFeatureScope scope(this, AVX);
1699 vcvttss2siq(dst, src);
1700 } else {
1701 cvttss2siq(dst, src);
1702 }
1703}
1704
1705void MacroAssembler::Cvttss2siq(Register dst, Operand src) {
1706 if (CpuFeatures::IsSupported(AVX)) {
1707 CpuFeatureScope scope(this, AVX);
1708 vcvttss2siq(dst, src);
1709 } else {
1710 cvttss2siq(dst, src);
1711 }
1712}
1713
1714void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
1715 if (CpuFeatures::IsSupported(AVX)) {
1716 CpuFeatureScope scope(this, AVX);
1717 vcvttsd2siq(dst, src);
1718 } else {
1719 cvttsd2siq(dst, src);
1720 }
1721}
1722
1723void MacroAssembler::Cvttsd2siq(Register dst, Operand src) {
1724 if (CpuFeatures::IsSupported(AVX)) {
1725 CpuFeatureScope scope(this, AVX);
1726 vcvttsd2siq(dst, src);
1727 } else {
1728 cvttsd2siq(dst, src);
1729 }
1730}
1731
1733 ASM_CODE_COMMENT(this);
1734 CpuFeatureScope f16c_scope(this, F16C);
1735 CpuFeatureScope avx_scope(this, AVX);
1736
1737 vcvtph2ps(dst, src);
1738 Cvtss2sd(dst, dst);
1739}
1740
1741void MacroAssembler::Cvtpd2ph(XMMRegister dst, XMMRegister src, Register tmp) {
1742 ASM_CODE_COMMENT(this);
1743 CpuFeatureScope f16c_scope(this, F16C);
1744 CpuFeatureScope avx_scope(this, AVX);
1746 DCHECK_NE(tmp, tmp2);
1747 DCHECK_NE(dst, src);
1748
1749 // Conversion algo from
1750 // https://github.com/tc39/proposal-float16array/issues/12#issuecomment-2256642971
1751 Label f32tof16;
1752 // Convert Float64 -> Float32.
1753 Cvtsd2ss(dst, src);
1754 vmovd(tmp, dst);
1755 // Mask off sign bit.
1756 andl(tmp, Immediate(kFP32WithoutSignMask));
1757 // Underflow to zero.
1758 cmpl(tmp, Immediate(kFP32MinFP16ZeroRepresentable));
1759 j(below, &f32tof16);
1760 // Overflow to infinity.
1761 cmpl(tmp, Immediate(kFP32MaxFP16Representable));
1762 j(above_equal, &f32tof16);
1763 // Detection of subnormal numbers.
1764 cmpl(tmp, Immediate(kFP32SubnormalThresholdOfFP16));
1765 setcc(above_equal, tmp2);
1766 movzxbl(tmp2, tmp2);
1767 // Compute 0x1000 for normal and 0x0000 for denormal numbers.
1768 shll(tmp2, Immediate(12));
1769 // Look at the last thirteen bits of the mantissa which will be shifted out
1770 // when converting from float32 to float16. (The round and sticky bits.)
1771 // Normal numbers: If the round bit is set and sticky bits are zero, then
1772 // adjust the float32 mantissa.
1773 // Denormal numbers: If all bits are zero, then adjust the mantissa.
1774 andl(tmp, Immediate(0x1fff));
1775 // Check round and sticky bits.
1776 cmpl(tmp, tmp2);
1777 j(not_equal, &f32tof16);
1778
1779 // Adjust mantissa by -1/0/+1.
1780 Move(kScratchDoubleReg, static_cast<uint32_t>(1));
1781 psignd(kScratchDoubleReg, src);
1782 paddd(dst, kScratchDoubleReg);
1783
1784 bind(&f32tof16);
1785 // Convert Float32 -> Float16.
1786 vcvtps2ph(dst, dst, 4);
1787}
1788
1789namespace {
1790template <typename OperandOrXMMRegister, bool is_double>
1791void ConvertFloatToUint64(MacroAssembler* masm, Register dst,
1792 OperandOrXMMRegister src, Label* fail) {
1793 Label success;
1794 // There does not exist a native float-to-uint instruction, so we have to use
1795 // a float-to-int, and postprocess the result.
1796 if (is_double) {
1797 masm->Cvttsd2siq(dst, src);
1798 } else {
1799 masm->Cvttss2siq(dst, src);
1800 }
1801 // If the result of the conversion is positive, we are already done.
1802 masm->testq(dst, dst);
1803 masm->j(positive, &success);
1804 // The result of the first conversion was negative, which means that the
1805 // input value was not within the positive int64 range. We subtract 2^63
1806 // and convert it again to see if it is within the uint64 range.
1807 if (is_double) {
1808 masm->Move(kScratchDoubleReg, -9223372036854775808.0);
1809 masm->Addsd(kScratchDoubleReg, src);
1810 masm->Cvttsd2siq(dst, kScratchDoubleReg);
1811 } else {
1812 masm->Move(kScratchDoubleReg, -9223372036854775808.0f);
1813 masm->Addss(kScratchDoubleReg, src);
1814 masm->Cvttss2siq(dst, kScratchDoubleReg);
1815 }
1816 masm->testq(dst, dst);
1817 // The only possible negative value here is 0x8000000000000000, which is
1818 // used on x64 to indicate an integer overflow.
1819 masm->j(negative, fail ? fail : &success);
1820 // The input value is within uint64 range and the second conversion worked
1821 // successfully, but we still have to undo the subtraction we did
1822 // earlier.
1823 masm->Move(kScratchRegister, 0x8000000000000000);
1824 masm->orq(dst, kScratchRegister);
1825 masm->bind(&success);
1826}
1827
1828template <typename OperandOrXMMRegister, bool is_double>
1829void ConvertFloatToUint32(MacroAssembler* masm, Register dst,
1830 OperandOrXMMRegister src, Label* fail) {
1831 Label success;
1832 // There does not exist a native float-to-uint instruction, so we have to use
1833 // a float-to-int, and postprocess the result.
1834 if (is_double) {
1835 masm->Cvttsd2si(dst, src);
1836 } else {
1837 masm->Cvttss2si(dst, src);
1838 }
1839 // If the result of the conversion is positive, we are already done.
1840 masm->testl(dst, dst);
1841 masm->j(positive, &success);
1842 // The result of the first conversion was negative, which means that the
1843 // input value was not within the positive int32 range. We subtract 2^31
1844 // and convert it again to see if it is within the uint32 range.
1845 if (is_double) {
1846 masm->Move(kScratchDoubleReg, -2147483648.0);
1847 masm->Addsd(kScratchDoubleReg, src);
1848 masm->Cvttsd2si(dst, kScratchDoubleReg);
1849 } else {
1850 masm->Move(kScratchDoubleReg, -2147483648.0f);
1851 masm->Addss(kScratchDoubleReg, src);
1852 masm->Cvttss2si(dst, kScratchDoubleReg);
1853 }
1854 masm->testl(dst, dst);
1855 // The only possible negative value here is 0x80000000, which is
1856 // used on x64 to indicate an integer overflow.
1857 masm->j(negative, fail ? fail : &success);
1858 // The input value is within uint32 range and the second conversion worked
1859 // successfully, but we still have to undo the subtraction we did
1860 // earlier.
1861 masm->Move(kScratchRegister, 0x80000000);
1862 masm->orl(dst, kScratchRegister);
1863 masm->bind(&success);
1864}
1865} // namespace
1866
1867void MacroAssembler::Cvttsd2uiq(Register dst, Operand src, Label* fail) {
1868 ConvertFloatToUint64<Operand, true>(this, dst, src, fail);
1869}
1870
1871void MacroAssembler::Cvttsd2uiq(Register dst, XMMRegister src, Label* fail) {
1872 ConvertFloatToUint64<XMMRegister, true>(this, dst, src, fail);
1873}
1874
1875void MacroAssembler::Cvttsd2ui(Register dst, Operand src, Label* fail) {
1876 ConvertFloatToUint32<Operand, true>(this, dst, src, fail);
1877}
1878
1879void MacroAssembler::Cvttsd2ui(Register dst, XMMRegister src, Label* fail) {
1880 ConvertFloatToUint32<XMMRegister, true>(this, dst, src, fail);
1881}
1882
1883void MacroAssembler::Cvttss2uiq(Register dst, Operand src, Label* fail) {
1884 ConvertFloatToUint64<Operand, false>(this, dst, src, fail);
1885}
1886
1887void MacroAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* fail) {
1888 ConvertFloatToUint64<XMMRegister, false>(this, dst, src, fail);
1889}
1890
1891void MacroAssembler::Cvttss2ui(Register dst, Operand src, Label* fail) {
1892 ConvertFloatToUint32<Operand, false>(this, dst, src, fail);
1893}
1894
1895void MacroAssembler::Cvttss2ui(Register dst, XMMRegister src, Label* fail) {
1896 ConvertFloatToUint32<XMMRegister, false>(this, dst, src, fail);
1897}
1898
1900 if (CpuFeatures::IsSupported(AVX)) {
1901 CpuFeatureScope avx_scope(this, AVX);
1902 vcmpeqss(dst, src);
1903 } else {
1904 cmpeqss(dst, src);
1905 }
1906}
1907
1909 if (CpuFeatures::IsSupported(AVX)) {
1910 CpuFeatureScope avx_scope(this, AVX);
1911 vcmpeqsd(dst, src);
1912 } else {
1913 cmpeqsd(dst, src);
1914 }
1915}
1916
1918 YMMRegister scratch) {
1919 ASM_CODE_COMMENT(this);
1920 CpuFeatureScope avx2_scope(this, AVX2);
1921 if (dst == src) {
1922 vpcmpeqd(scratch, scratch, scratch);
1923 vpxor(dst, dst, scratch);
1924 } else {
1925 vpcmpeqd(dst, dst, dst);
1926 vpxor(dst, dst, src);
1927 }
1928}
1929
1931 YMMRegister src1, YMMRegister src2,
1932 YMMRegister scratch) {
1933 ASM_CODE_COMMENT(this);
1934 CpuFeatureScope avx2_scope(this, AVX2);
1935 // v256.select = v256.or(v256.and(v1, c), v256.andnot(v2, c)).
1936 // pandn(x, y) = !x & y, so we have to flip the mask and input.
1937 vpandn(scratch, mask, src2);
1938 vpand(dst, src1, mask);
1939 vpor(dst, dst, scratch);
1940}
1941
1942// ----------------------------------------------------------------------------
1943// Smi tagging, untagging and tag detection.
1944
1946 Move(kScratchRegister, source);
1947 return kScratchRegister;
1948}
1949
1950void MacroAssembler::Cmp(Register dst, int32_t src) {
1951 if (src == 0) {
1952 testl(dst, dst);
1953 } else {
1954 cmpl(dst, Immediate(src));
1955 }
1956}
1957
1959 YMMRegister tmp1, YMMRegister tmp2) {
1960 ASM_CODE_COMMENT(this);
1961 DCHECK(!AreAliased(dst, tmp1, tmp2));
1962 DCHECK(!AreAliased(lhs, tmp1, tmp2));
1963 DCHECK(!AreAliased(rhs, tmp1, tmp2));
1965 CpuFeatureScope avx_scope(this, AVX2);
1966 // 1. Multiply high dword of each qword of left with right.
1967 vpsrlq(tmp1, lhs, uint8_t{32});
1968 vpmuludq(tmp1, tmp1, rhs);
1969 // 2. Multiply high dword of each qword of right with left.
1970 vpsrlq(tmp2, rhs, uint8_t{32});
1971 vpmuludq(tmp2, tmp2, lhs);
1972 // 3. Add 1 and 2, then shift left by 32 (this is the high dword of result).
1973 vpaddq(tmp2, tmp2, tmp1);
1974 vpsllq(tmp2, tmp2, uint8_t{32});
1975 // 4. Multiply low dwords (this is the low dword of result).
1976 vpmuludq(dst, lhs, rhs);
1977 // 5. Add 3 and 4.
1978 vpaddq(dst, dst, tmp2);
1979}
1980
1981#define DEFINE_ISPLAT(name, suffix, instr_mov) \
1982 void MacroAssembler::name(YMMRegister dst, Register src) { \
1983 ASM_CODE_COMMENT(this); \
1984 DCHECK(CpuFeatures::IsSupported(AVX) && CpuFeatures::IsSupported(AVX2)); \
1985 CpuFeatureScope avx_scope(this, AVX); \
1986 CpuFeatureScope avx2_scope(this, AVX2); \
1987 instr_mov(dst, src); \
1988 vpbroadcast##suffix(dst, dst); \
1989 } \
1990 \
1991 void MacroAssembler::name(YMMRegister dst, Operand src) { \
1992 ASM_CODE_COMMENT(this); \
1993 DCHECK(CpuFeatures::IsSupported(AVX2)); \
1994 CpuFeatureScope avx2_scope(this, AVX2); \
1995 vpbroadcast##suffix(dst, src); \
1996 }
1997
1998MACRO_ASM_X64_ISPLAT_LIST(DEFINE_ISPLAT)
1999
2000#undef DEFINE_ISPLAT
2001
2003 ASM_CODE_COMMENT(this);
2005 CpuFeatureScope avx2_scope(this, AVX2);
2006 vbroadcastsd(dst, src);
2007}
2008
2010 ASM_CODE_COMMENT(this);
2012 CpuFeatureScope avx2_scope(this, AVX2);
2013 vbroadcastss(dst, src);
2014}
2015
2017 YMMRegister scratch) {
2018 ASM_CODE_COMMENT(this);
2020 CpuFeatureScope avx_scope(this, AVX);
2021 CpuFeatureScope avx2_scope(this, AVX2);
2022 vminpd(scratch, lhs, rhs);
2023 vminpd(dst, rhs, lhs);
2024 vorpd(scratch, scratch, dst);
2025 vcmpunordpd(dst, dst, scratch);
2026 vorpd(scratch, scratch, dst);
2027 vpsrlq(dst, dst, uint8_t{13});
2028 vandnpd(dst, dst, scratch);
2029}
2030
2032 YMMRegister scratch) {
2033 ASM_CODE_COMMENT(this);
2035 CpuFeatureScope avx_scope(this, AVX);
2036 CpuFeatureScope avx2_scope(this, AVX2);
2037 vmaxpd(scratch, lhs, rhs);
2038 vmaxpd(dst, rhs, lhs);
2039 vxorpd(dst, dst, scratch);
2040 vorpd(scratch, scratch, dst);
2041 vsubpd(scratch, scratch, dst);
2042 vcmpunordpd(dst, dst, scratch);
2043 vpsrlq(dst, dst, uint8_t{13});
2044 vandnpd(dst, dst, scratch);
2045}
2046
2048 YMMRegister scratch) {
2049 ASM_CODE_COMMENT(this);
2051 CpuFeatureScope avx_scope(this, AVX);
2052 CpuFeatureScope avx2_scope(this, AVX2);
2053 vminps(scratch, lhs, rhs);
2054 vminps(dst, rhs, lhs);
2055 vorps(scratch, scratch, dst);
2056 vcmpunordps(dst, dst, scratch);
2057 vorps(scratch, scratch, dst);
2058 vpsrld(dst, dst, uint8_t{10});
2059 vandnps(dst, dst, scratch);
2060}
2061
2063 YMMRegister scratch) {
2064 ASM_CODE_COMMENT(this);
2066 CpuFeatureScope avx_scope(this, AVX);
2067 CpuFeatureScope avx2_scope(this, AVX2);
2068 vmaxps(scratch, lhs, rhs);
2069 vmaxps(dst, rhs, lhs);
2070 vxorps(dst, dst, scratch);
2071 vorps(scratch, scratch, dst);
2072 vsubps(scratch, scratch, dst);
2073 vcmpunordps(dst, dst, scratch);
2074 vpsrld(dst, dst, uint8_t{10});
2075 vandnps(dst, dst, scratch);
2076}
2077
2079 YMMRegister scratch, YMMRegister scratch2) {
2080 ASM_CODE_COMMENT(this);
2081 CpuFeatureScope f16c_scope(this, F16C);
2082 CpuFeatureScope avx_scope(this, AVX);
2083 CpuFeatureScope avx2_scope(this, AVX2);
2084 vcvtph2ps(scratch, lhs);
2085 vcvtph2ps(scratch2, rhs);
2086 // The minps instruction doesn't propagate NaNs and +0's in its first
2087 // operand. Perform minps in both orders, merge the results, and adjust.
2088 vminps(dst, scratch, scratch2);
2089 vminps(scratch, scratch2, scratch);
2090 // Propagate -0's and NaNs, which may be non-canonical.
2091 vorps(scratch, scratch, dst);
2092 // Canonicalize NaNs by quieting and clearing the payload.
2093 vcmpunordps(dst, dst, scratch);
2094 vorps(scratch, scratch, dst);
2095 vpsrld(dst, dst, uint8_t{10});
2096 vandnps(dst, dst, scratch);
2097 vcvtps2ph(dst, dst, 0);
2098}
2099
2101 YMMRegister scratch, YMMRegister scratch2) {
2102 ASM_CODE_COMMENT(this);
2103 CpuFeatureScope f16c_scope(this, F16C);
2104 CpuFeatureScope avx_scope(this, AVX);
2105 CpuFeatureScope avx2_scope(this, AVX2);
2106 vcvtph2ps(scratch, lhs);
2107 vcvtph2ps(scratch2, rhs);
2108 // The maxps instruction doesn't propagate NaNs and +0's in its first
2109 // operand. Perform maxps in both orders, merge the results, and adjust.
2110 vmaxps(dst, scratch, scratch2);
2111 vmaxps(scratch, scratch2, scratch);
2112 // Find discrepancies.
2113 vxorps(dst, dst, scratch);
2114 // Propagate NaNs, which may be non-canonical.
2115 vorps(scratch, scratch, dst);
2116 // Propagate sign discrepancy and (subtle) quiet NaNs.
2117 vsubps(scratch, scratch, dst);
2118 // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
2119 vcmpunordps(dst, dst, scratch);
2120 vpsrld(dst, dst, uint8_t{10});
2121 vandnps(dst, dst, scratch);
2122 vcvtps2ph(dst, dst, 0);
2123}
2124
2125// 1. Zero extend 4 packed 32-bit integers in src1 to 4 packed 64-bit integers
2126// in scratch
2127// 2. Zero extend 4 packed 32-bit integers in src2 to 4 packed 64-bit integers
2128// in dst
2129// 3. Multiply packed doubleword integers in scratch with dst, the extended zero
2130// are ignored
2132 XMMRegister src2, YMMRegister scratch,
2133 bool is_signed) {
2134 ASM_CODE_COMMENT(this);
2136 CpuFeatureScope avx_scope(this, AVX2);
2137 vpmovzxdq(scratch, src1);
2138 vpmovzxdq(dst, src2);
2139 if (is_signed) {
2140 vpmuldq(dst, scratch, dst);
2141 } else {
2142 vpmuludq(dst, scratch, dst);
2143 }
2144}
2145
2146// 1. Extend 8 packed 16-bit integers in src1 to 8 packed 32-bit integers in
2147// scratch
2148// 2. Extend 8 packed 16-bit integers in src2 to 8 packed 32-bit integers in dst
2149// 3. Multiply the packed doubleword integers in scratch and dst and store the
2150// low 32 bits of each product in dst.
2152 XMMRegister src2, YMMRegister scratch,
2153 bool is_signed) {
2154 ASM_CODE_COMMENT(this);
2156 CpuFeatureScope avx_scope(this, AVX2);
2157 is_signed ? vpmovsxwd(scratch, src1) : vpmovzxwd(scratch, src1);
2158 is_signed ? vpmovsxwd(dst, src2) : vpmovzxwd(dst, src2);
2159 vpmulld(dst, dst, scratch);
2160}
2161
2163 XMMRegister src2, YMMRegister scratch,
2164 bool is_signed) {
2165 ASM_CODE_COMMENT(this);
2167 CpuFeatureScope avx_scope(this, AVX2);
2168 is_signed ? vpmovsxbw(scratch, src1) : vpmovzxbw(scratch, src1);
2169 is_signed ? vpmovsxbw(dst, src2) : vpmovzxbw(dst, src2);
2170 vpmullw(dst, dst, scratch);
2171}
2172
2173void MacroAssembler::I32x8ExtAddPairwiseI16x16S(YMMRegister dst,
2174 YMMRegister src,
2175 YMMRegister scratch) {
2176 ASM_CODE_COMMENT(this);
2178 CpuFeatureScope avx2_scope(this, AVX2);
2179 Move(scratch, uint32_t{1});
2180 vpbroadcastw(scratch, scratch);
2181 // vpmaddwd multiplies signed words in src and op, producing
2182 // signed doublewords, then adds pairwise.
2183 // src = |l0|l1|...|l14|l15|
2184 // dst = |l0*1+l1*1|l2*1+l3*1|...|l14*1+l15*1|
2185 vpmaddwd(dst, src, scratch);
2186}
2187
2188void MacroAssembler::I32x8ExtAddPairwiseI16x16U(YMMRegister dst,
2189 YMMRegister src,
2190 YMMRegister scratch) {
2191 ASM_CODE_COMMENT(this);
2193 CpuFeatureScope avx2_scope(this, AVX2);
2194 // src = |l0|l1|...l14|l15|
2195 // scratch = |0|l0|0|l2|...|0|l14|
2196 vpsrld(scratch, src, 16);
2197 // dst = |0|l1|0|l3|...|0|l15|
2198 vpblendw(dst, src, scratch, 0xAA);
2199 vpaddd(dst, dst, scratch);
2200}
2201
2202void MacroAssembler::I16x16ExtAddPairwiseI8x32S(YMMRegister dst,
2203 YMMRegister src,
2204 YMMRegister scratch) {
2205 ASM_CODE_COMMENT(this);
2207 CpuFeatureScope avx2_scope(this, AVX2);
2208 Move(scratch, uint32_t{1});
2209 vpbroadcastb(scratch, scratch);
2210 // pmaddubsw treats the first operand as unsigned, so scratch here should
2211 // be first operand
2212 // src = |l0|l1|...|l34|l35|
2213 // dst = |l0*1+l1*1|l2*1+l3*1|...|l34*1+l35*1|
2214 vpmaddubsw(dst, scratch, src);
2215}
2216
2217void MacroAssembler::I16x16ExtAddPairwiseI8x32U(YMMRegister dst,
2218 YMMRegister src,
2219 YMMRegister scratch) {
2220 ASM_CODE_COMMENT(this);
2222 CpuFeatureScope avx2_scope(this, AVX2);
2223 Move(scratch, uint32_t{1});
2224 vpbroadcastb(scratch, scratch);
2225 vpmaddubsw(dst, src, scratch);
2226}
2227
2229 YMMRegister tmp, Register scratch) {
2230 ASM_CODE_COMMENT(this);
2232 CpuFeatureScope avx_scope(this, AVX);
2233 CpuFeatureScope avx2_scope(this, AVX2);
2234 Operand int32_overflow_as_float = ExternalReferenceAsOperand(
2235 ExternalReference::address_of_wasm_i32x8_int32_overflow_as_float(),
2236 scratch);
2237 // This algorithm works by:
2238 // 1. lanes with NaNs are zero-ed
2239 // 2. lanes ge than 2147483648.0f (MAX_INT32+1) set to 0xffff'ffff
2240 // 3. cvttps2dq sets all out of range lanes to 0x8000'0000
2241 // a. correct for underflows (< MIN_INT32)
2242 // b. wrong for overflow, and we know which lanes overflow from 2.
2243 // 4. adjust for 3b by xor-ing 2 and 3
2244 // a. 0x8000'0000 xor 0xffff'ffff = 0x7fff'ffff (MAX_INT32)
2245 vcmpeqps(tmp, src, src);
2246 vandps(dst, src, tmp);
2247 vcmpgeps(tmp, src, int32_overflow_as_float);
2248 vcvttps2dq(dst, dst);
2249 vpxor(dst, dst, tmp);
2250}
2251
2253 YMMRegister tmp, Register scratch) {
2254 ASM_CODE_COMMENT(this);
2257
2258 CpuFeatureScope f16c_scope(this, F16C);
2259 CpuFeatureScope avx_scope(this, AVX);
2260 CpuFeatureScope avx2_scope(this, AVX2);
2261
2262 Operand op = ExternalReferenceAsOperand(
2263 ExternalReference::address_of_wasm_i32x8_int32_overflow_as_float(),
2264 scratch);
2265 // Convert source f16 to f32.
2266 vcvtph2ps(dst, src);
2267 // Compare it to itself, NaNs are turn to 0s because don't equal to itself.
2268 vcmpeqps(tmp, dst, dst);
2269 // Reset NaNs.
2270 vandps(dst, dst, tmp);
2271 // Detect positive Infinity as an overflow above MAX_INT32.
2272 vcmpgeps(tmp, dst, op);
2273 // Convert f32 to i32.
2274 vcvttps2dq(dst, dst);
2275 // cvttps2dq sets all out of range lanes to 0x8000'0000,
2276 // but as soon as source values are result of conversion from f16,
2277 // and so less than MAX_INT32, only +Infinity is an issue.
2278 // Convert all infinities to MAX_INT32 and let vpackssdw
2279 // clamp it to MAX_INT16 later.
2280 // 0x8000'0000 xor 0xffff'ffff(from 2 steps before) = 0x7fff'ffff (MAX_INT32)
2281 vpxor(dst, dst, tmp);
2282 // We now have 8 i32 values. Using one character per 16 bits:
2283 // dst: [AABBCCDDEEFFGGHH]
2284 // Create a copy of the upper four values in the lower half of {tmp}
2285 // (so the upper half of the immediate doesn't matter):
2286 vpermq(tmp, dst, 0x4E); // 0b01001110
2287 // tmp: [EEFFGGHHAABBCCDD]
2288 // Now pack them together as i16s. Note that {vpackssdw} interleaves
2289 // 128-bit chunks from each input, and takes care of saturating each
2290 // value to kMinInt16 and kMaxInt16. We will then ignore the upper half
2291 // of {dst}.
2292 vpackssdw(dst, dst, tmp);
2293 // dst: [EFGHABCDABCDEFGH]
2294 // <--><--><--><-->
2295 // ↑ ↑ ↑ └── from lower half of {dst}
2296 // │ │ └────── from lower half of {tmp}
2297 // │ └────────── from upper half of {dst} (ignored)
2298 // └────────────── from upper half of {tmp} (ignored)
2299}
2300
2302 YMMRegister tmp) {
2303 ASM_CODE_COMMENT(this);
2306
2307 CpuFeatureScope f16c_scope(this, F16C);
2308 CpuFeatureScope avx_scope(this, AVX);
2309 CpuFeatureScope avx2_scope(this, AVX2);
2310
2311 Operand op = ExternalReferenceAsOperand(
2312 ExternalReference::address_of_wasm_i32x8_int32_overflow_as_float(),
2314 vcvtph2ps(dst, src);
2315 // NAN->0, negative->0.
2316 vpxor(tmp, tmp, tmp);
2317 vmaxps(dst, dst, tmp);
2318 // Detect positive Infinity as an overflow above MAX_INT32.
2319 vcmpgeps(tmp, dst, op);
2320 // Convert to int.
2321 vcvttps2dq(dst, dst);
2322 // cvttps2dq sets all out of range lanes to 0x8000'0000,
2323 // but as soon as source values are result of conversion from f16,
2324 // and so less than MAX_INT32, only +Infinity is an issue.
2325 // Convert all infinities to MAX_INT32 and let vpackusdw
2326 // clamp it to MAX_INT16 later.
2327 // 0x8000'0000 xor 0xffff'ffff(from 2 steps before) = 0x7fff'ffff (MAX_INT32)
2328 vpxor(dst, dst, tmp);
2329 // Move high part to a spare register.
2330 // See detailed comment in {I16x8SConvertF16x8} for how this works.
2331 vpermq(tmp, dst, 0x4E); // 0b01001110
2332 vpackusdw(dst, dst, tmp);
2333}
2334
2336 XMMRegister src2, XMMRegister src3,
2337 YMMRegister tmp, YMMRegister tmp2) {
2338 CpuFeatureScope fma3_scope(this, FMA3);
2339 CpuFeatureScope f16c_scope(this, F16C);
2340
2341 if (dst.code() == src2.code()) {
2342 vcvtph2ps(dst, dst);
2343 vcvtph2ps(tmp, src1);
2344 vcvtph2ps(tmp2, src3);
2345 vfmadd213ps(dst, tmp, tmp2);
2346 } else if (dst.code() == src3.code()) {
2347 vcvtph2ps(dst, dst);
2348 vcvtph2ps(tmp, src2);
2349 vcvtph2ps(tmp2, src1);
2350 vfmadd231ps(dst, tmp, tmp2);
2351 } else {
2352 vcvtph2ps(dst, src1);
2353 vcvtph2ps(tmp, src2);
2354 vcvtph2ps(tmp2, src3);
2355 vfmadd213ps(dst, tmp, tmp2);
2356 }
2357 vcvtps2ph(dst, dst, 0);
2358}
2359
2361 XMMRegister src2, XMMRegister src3,
2362 YMMRegister tmp, YMMRegister tmp2) {
2363 CpuFeatureScope fma3_scope(this, FMA3);
2364 CpuFeatureScope f16c_scope(this, F16C);
2365
2366 if (dst.code() == src2.code()) {
2367 vcvtph2ps(dst, dst);
2368 vcvtph2ps(tmp, src1);
2369 vcvtph2ps(tmp2, src3);
2370 vfnmadd213ps(dst, tmp, tmp2);
2371 } else if (dst.code() == src3.code()) {
2372 vcvtph2ps(dst, dst);
2373 vcvtph2ps(tmp, src2);
2374 vcvtph2ps(tmp2, src1);
2375 vfnmadd231ps(dst, tmp, tmp2);
2376 } else {
2377 vcvtph2ps(dst, src1);
2378 vcvtph2ps(tmp, src2);
2379 vcvtph2ps(tmp2, src3);
2380 vfnmadd213ps(dst, tmp, tmp2);
2381 }
2382 vcvtps2ph(dst, dst, 0);
2383}
2384
2386 YMMRegister src2, YMMRegister src3,
2387 YMMRegister tmp) {
2388 QFMA(ps);
2389}
2390
2392 YMMRegister src2, YMMRegister src3,
2393 YMMRegister tmp) {
2394 QFMS(ps);
2395}
2396
2398 YMMRegister src2, YMMRegister src3,
2399 YMMRegister tmp) {
2400 QFMA(pd);
2401}
2402
2404 YMMRegister src2, YMMRegister src3,
2405 YMMRegister tmp) {
2406 QFMS(pd);
2407}
2408
2410 YMMRegister src2, YMMRegister src3,
2411 YMMRegister scratch,
2412 YMMRegister splat_reg) {
2413 ASM_CODE_COMMENT(this);
2415 // It's guaranteed in instruction selector
2416 DCHECK_EQ(dst, src3);
2417 if (CpuFeatures::IsSupported(AVX_VNNI_INT8)) {
2418 CpuFeatureScope avx_vnni_int8_scope(this, AVX_VNNI_INT8);
2419 vpdpbssd(dst, src2, src1);
2420 return;
2421 } else if (CpuFeatures::IsSupported(AVX_VNNI)) {
2422 CpuFeatureScope avx_scope(this, AVX_VNNI);
2423 vpdpbusd(dst, src2, src1);
2424 return;
2425 }
2426
2427 DCHECK_NE(scratch, splat_reg);
2428 CpuFeatureScope avx_scope(this, AVX);
2429 CpuFeatureScope avx2_scope(this, AVX2);
2430 // splat_reg = i16x16.splat(1)
2431 vpcmpeqd(splat_reg, splat_reg, splat_reg);
2432 vpsrlw(splat_reg, splat_reg, uint8_t{15});
2433 vpmaddubsw(scratch, src2, src1);
2434 vpmaddwd(scratch, splat_reg, scratch);
2435 vpaddd(dst, src3, scratch);
2436}
2437
2439 YMMRegister scratch1,
2440 YMMRegister scratch2) {
2441 ASM_CODE_COMMENT(this);
2443 CpuFeatureScope avx_scope(this, AVX);
2444 CpuFeatureScope avx2_scope(this, AVX2);
2445
2446 // NAN->0, negative->0.
2447 vpxor(scratch1, scratch1, scratch1);
2448 vmaxps(dst, src, scratch1);
2449 // scratch1: float representation of max_signed.
2450 vpcmpeqd(scratch1, scratch1, scratch1);
2451 vpsrld(scratch1, scratch1, uint8_t{1}); // 0x7fffffff
2452 vcvtdq2ps(scratch1, scratch1); // 0x4f000000
2453 // scratch2: convert (src-max_signed).
2454 // Set positive overflow lanes to 0x7FFFFFFF.
2455 // Set negative lanes to 0.
2456 vsubps(scratch2, dst, scratch1);
2457
2458 vcmpleps(scratch1, scratch1, scratch2);
2459 vcvttps2dq(scratch2, scratch2);
2460 vpxor(scratch2, scratch2, scratch1);
2461 vpxor(scratch1, scratch1, scratch1);
2462 vpmaxsd(scratch2, scratch2, scratch1);
2463 // Convert to int. Overflow lanes above max_signed will be 0x80000000.
2464 vcvttps2dq(dst, dst);
2465 // Add (src-max_signed) for overflow lanes.
2466 vpaddd(dst, dst, scratch2);
2467}
2468
2469void MacroAssembler::SmiTag(Register reg) {
2470 static_assert(kSmiTag == 0);
2473 DCHECK_EQ(kSmiShift, 1);
2474 addl(reg, reg);
2475 } else {
2476 shlq(reg, Immediate(kSmiShift));
2477 }
2478#ifdef ENABLE_SLOW_DCHECKS
2480#endif
2481}
2482
2483void MacroAssembler::SmiTag(Register dst, Register src) {
2484 DCHECK(dst != src);
2486 movl(dst, src);
2487 } else {
2488 movq(dst, src);
2489 }
2490 SmiTag(dst);
2491}
2492
2493void MacroAssembler::SmiUntag(Register reg) {
2494 static_assert(kSmiTag == 0);
2496 // TODO(v8:7703): Is there a way to avoid this sign extension when pointer
2497 // compression is enabled?
2499 sarl(reg, Immediate(kSmiShift));
2500 movsxlq(reg, reg);
2501 } else {
2502 sarq(reg, Immediate(kSmiShift));
2503 }
2504}
2505
2507 static_assert(kSmiTag == 0);
2511 shrl(reg, Immediate(kSmiShift));
2512 } else {
2513 shrq(reg, Immediate(kSmiShift));
2514 }
2515}
2516
2517void MacroAssembler::SmiUntag(Register dst, Register src) {
2518 DCHECK(dst != src);
2520 movsxlq(dst, src);
2521 } else {
2522 movq(dst, src);
2523 }
2524 // TODO(v8:7703): Call SmiUntag(reg) if we can find a way to avoid the extra
2525 // mov when pointer compression is enabled.
2526 static_assert(kSmiTag == 0);
2528 sarq(dst, Immediate(kSmiShift));
2529}
2530
2531void MacroAssembler::SmiUntag(Register dst, Operand src) {
2532 if (SmiValuesAre32Bits()) {
2533 // Sign extend to 64-bit.
2534 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
2535 } else {
2538 movsxlq(dst, src);
2539 } else {
2540 movq(dst, src);
2541 }
2542 sarq(dst, Immediate(kSmiShift));
2543 }
2544}
2545
2546void MacroAssembler::SmiUntagUnsigned(Register dst, Operand src) {
2547 if (SmiValuesAre32Bits()) {
2548 // Zero extend to 64-bit.
2549 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
2550 } else {
2553 movl(dst, src);
2555 shrl(dst, Immediate(kSmiShift));
2556 } else {
2557 movq(dst, src);
2558 shrq(dst, Immediate(kSmiShift));
2559 }
2560 }
2561}
2562
2563void MacroAssembler::SmiToInt32(Register reg) {
2564 AssertSmi(reg);
2565 static_assert(kSmiTag == 0);
2568 sarl(reg, Immediate(kSmiShift));
2569 } else {
2570 shrq(reg, Immediate(kSmiShift));
2571 }
2572}
2573
2574void MacroAssembler::SmiToInt32(Register dst, Register src) {
2575 if (dst != src) {
2576 mov_tagged(dst, src);
2577 }
2578 SmiToInt32(dst);
2579}
2580
2581void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
2582 AssertSmi(smi1);
2583 AssertSmi(smi2);
2584 cmp_tagged(smi1, smi2);
2585}
2586
2587void MacroAssembler::SmiCompare(Register dst, Tagged<Smi> src) {
2588 AssertSmi(dst);
2589 Cmp(dst, src);
2590}
2591
2592void MacroAssembler::Cmp(Register dst, Tagged<Smi> src) {
2593 if (src.value() == 0) {
2594 test_tagged(dst, dst);
2595 } else if (COMPRESS_POINTERS_BOOL) {
2596 cmp_tagged(dst, Immediate(src));
2597 } else {
2599 Register constant_reg = GetSmiConstant(src);
2600 cmp_tagged(dst, constant_reg);
2601 }
2602}
2603
2604void MacroAssembler::SmiCompare(Register dst, Operand src) {
2605 AssertSmi(dst);
2606 AssertSmi(src);
2607 cmp_tagged(dst, src);
2608}
2609
2610void MacroAssembler::SmiCompare(Operand dst, Register src) {
2611 AssertSmi(dst);
2612 AssertSmi(src);
2613 cmp_tagged(dst, src);
2614}
2615
2616void MacroAssembler::SmiCompare(Operand dst, Tagged<Smi> src) {
2617 AssertSmi(dst);
2618 if (SmiValuesAre32Bits()) {
2619 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src.value()));
2620 } else {
2622 cmpl(dst, Immediate(src));
2623 }
2624}
2625
2626void MacroAssembler::Cmp(Operand dst, Tagged<Smi> src) {
2627 // The Operand cannot use the smi register.
2628 Register smi_reg = GetSmiConstant(src);
2629 DCHECK(!dst.AddressUsesRegister(smi_reg));
2630 cmp_tagged(dst, smi_reg);
2631}
2632
2634#ifdef V8_COMPRESS_POINTERS
2635 ASM_CODE_COMMENT(this);
2636 static constexpr unsigned int clobber_mask = 0x515151;
2637 static constexpr int rot_to_unused =
2639 rolq(src, Immediate(rot_to_unused));
2640 xorq(src, Immediate(clobber_mask));
2641 rorq(src, Immediate(rot_to_unused));
2642#endif
2643}
2644
2645Condition MacroAssembler::CheckSmi(Register src) {
2646 static_assert(kSmiTag == 0);
2647 testb(src, Immediate(kSmiTagMask));
2648 return zero;
2649}
2650
2652 static_assert(kSmiTag == 0);
2653 testb(src, Immediate(kSmiTagMask));
2654 return zero;
2655}
2656
2657void MacroAssembler::JumpIfSmi(Register src, Label* on_smi,
2659 Condition smi = CheckSmi(src);
2660 j(smi, on_smi, near_jump);
2661}
2662
2663void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi,
2665 Condition smi = CheckSmi(src);
2666 j(NegateCondition(smi), on_not_smi, near_jump);
2667}
2668
2669void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
2671 Condition smi = CheckSmi(src);
2672 j(NegateCondition(smi), on_not_smi, near_jump);
2673}
2674
2675void MacroAssembler::SmiAddConstant(Operand dst, Tagged<Smi> constant) {
2676 if (constant.value() != 0) {
2677 if (SmiValuesAre32Bits()) {
2678 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant.value()));
2679 } else {
2681 if (kTaggedSize == kInt64Size) {
2682 // Sign-extend value after addition
2683 movl(kScratchRegister, dst);
2684 addl(kScratchRegister, Immediate(constant));
2686 movq(dst, kScratchRegister);
2687 } else {
2689 addl(dst, Immediate(constant));
2690 }
2691 }
2692 }
2693}
2694
2695SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
2696 if (SmiValuesAre32Bits()) {
2697 DCHECK(is_uint6(shift));
2698 // There is a possible optimization if shift is in the range 60-63, but that
2699 // will (and must) never happen.
2700 if (dst != src) {
2701 movq(dst, src);
2702 }
2703 if (shift < kSmiShift) {
2704 sarq(dst, Immediate(kSmiShift - shift));
2705 } else {
2706 shlq(dst, Immediate(shift - kSmiShift));
2707 }
2708 return SmiIndex(dst, times_1);
2709 } else {
2711 // We have to sign extend the index register to 64-bit as the SMI might
2712 // be negative.
2713 movsxlq(dst, src);
2714 if (shift < kSmiShift) {
2715 sarq(dst, Immediate(kSmiShift - shift));
2716 } else if (shift != kSmiShift) {
2717 if (shift - kSmiShift <= static_cast<int>(times_8)) {
2718 return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiShift));
2719 }
2720 shlq(dst, Immediate(shift - kSmiShift));
2721 }
2722 return SmiIndex(dst, times_1);
2723 }
2724}
2725
2726void MacroAssembler::Switch(Register scratch, Register reg, int case_value_base,
2727 Label** labels, int num_labels) {
2728 Register table = scratch;
2729 Label fallthrough, jump_table;
2730 if (case_value_base != 0) {
2731 subq(reg, Immediate(case_value_base));
2732 }
2733 cmpq(reg, Immediate(num_labels));
2734 j(above_equal, &fallthrough);
2735 leaq(table, MemOperand(&jump_table));
2736#ifdef V8_ENABLE_CET_IBT
2737 // Add the notrack prefix to disable landing pad enforcement.
2738 jmp(MemOperand(table, reg, times_8, 0), /*notrack=*/true);
2739#else
2740 jmp(MemOperand(table, reg, times_8, 0));
2741#endif
2742 // Emit the jump table inline, under the assumption that it's not too big.
2744 bind(&jump_table);
2745 for (int i = 0; i < num_labels; ++i) {
2746 dq(labels[i]);
2747 }
2748 bind(&fallthrough);
2749}
2750
2751void MacroAssembler::Push(Tagged<Smi> source) {
2752 intptr_t smi = static_cast<intptr_t>(source.ptr());
2753 if (is_int32(smi)) {
2754 Push(Immediate(static_cast<int32_t>(smi)));
2755 return;
2756 }
2757 int first_byte_set = base::bits::CountTrailingZeros64(smi) / 8;
2758 int last_byte_set = (63 - base::bits::CountLeadingZeros64(smi)) / 8;
2759 if (first_byte_set == last_byte_set) {
2760 // This sequence has only 7 bytes, compared to the 12 bytes below.
2761 Push(Immediate(0));
2762 movb(Operand(rsp, first_byte_set),
2763 Immediate(static_cast<int8_t>(smi >> (8 * first_byte_set))));
2764 return;
2765 }
2766 Register constant = GetSmiConstant(source);
2767 Push(constant);
2768}
2769
2770// ----------------------------------------------------------------------------
2771
2772void MacroAssembler::Move(Register dst, Tagged<Smi> source) {
2773 static_assert(kSmiTag == 0);
2774 int value = source.value();
2775 if (value == 0) {
2776 xorl(dst, dst);
2777 } else if (SmiValuesAre32Bits()) {
2778 Move(dst, source.ptr(), RelocInfo::NO_INFO);
2779 } else {
2780 uint32_t uvalue = static_cast<uint32_t>(source.ptr());
2781 Move(dst, uvalue);
2782 }
2783}
2784
2785void MacroAssembler::Move(Operand dst, intptr_t x) {
2786 if (is_int32(x)) {
2787 movq(dst, Immediate(static_cast<int32_t>(x)));
2788 } else {
2790 movq(dst, kScratchRegister);
2791 }
2792}
2793
2794void MacroAssembler::Move(Register dst, ExternalReference ext) {
2795 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
2796 // non-isolate-independent code. In many cases it might be cheaper than
2797 // embedding the relocatable value.
2798 if (root_array_available()) {
2799 if (ext.IsIsolateFieldId()) {
2800 leaq(dst, Operand(kRootRegister, ext.offset_from_root_register()));
2801 return;
2802 } else if (options().isolate_independent_code) {
2804 return;
2805 }
2806 }
2807 // External references should not get created with IDs if
2808 // `!root_array_available()`.
2809 CHECK(!ext.IsIsolateFieldId());
2810 movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
2811}
2812
2813void MacroAssembler::Move(Register dst, Register src) {
2814 if (dst != src) {
2815 movq(dst, src);
2816 }
2817}
2818
2819void MacroAssembler::Move(Register dst, Operand src) { movq(dst, src); }
2820void MacroAssembler::Move(Register dst, Immediate src) {
2821 if (src.rmode() == RelocInfo::Mode::NO_INFO) {
2822 Move(dst, src.value());
2823 } else {
2824 movl(dst, src);
2825 }
2826}
2827
2829 if (dst != src) {
2830 Movaps(dst, src);
2831 }
2832}
2833
2834void MacroAssembler::MovePair(Register dst0, Register src0, Register dst1,
2835 Register src1) {
2836 if (dst0 != src1) {
2837 // Normal case: Writing to dst0 does not destroy src1.
2838 Move(dst0, src0);
2839 Move(dst1, src1);
2840 } else if (dst1 != src0) {
2841 // Only dst0 and src1 are the same register,
2842 // but writing to dst1 does not destroy src0.
2843 Move(dst1, src1);
2844 Move(dst0, src0);
2845 } else {
2846 // dst0 == src1, and dst1 == src0, a swap is required:
2847 // dst0 \/ src0
2848 // dst1 /\ src1
2849 xchgq(dst0, dst1);
2850 }
2851}
2852
2853void MacroAssembler::MoveNumber(Register dst, double value) {
2854 int32_t smi;
2855 if (DoubleToSmiInteger(value, &smi)) {
2856 Move(dst, Smi::FromInt(smi));
2857 } else {
2858 movq_heap_number(dst, value);
2859 }
2860}
2861
2862void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
2863 if (src == 0) {
2864 Xorps(dst, dst);
2865 } else {
2866 unsigned nlz = base::bits::CountLeadingZeros(src);
2867 unsigned ntz = base::bits::CountTrailingZeros(src);
2868 unsigned pop = base::bits::CountPopulation(src);
2869 DCHECK_NE(0u, pop);
2870 if (pop + ntz + nlz == 32) {
2871 Pcmpeqd(dst, dst);
2872 if (ntz) Pslld(dst, static_cast<uint8_t>(ntz + nlz));
2873 if (nlz) Psrld(dst, static_cast<uint8_t>(nlz));
2874 } else {
2875 movl(kScratchRegister, Immediate(src));
2876 Movd(dst, kScratchRegister);
2877 }
2878 }
2879}
2880
2881void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
2882 if (src == 0) {
2883 Xorpd(dst, dst);
2884 } else {
2885 unsigned nlz = base::bits::CountLeadingZeros(src);
2886 unsigned ntz = base::bits::CountTrailingZeros(src);
2887 unsigned pop = base::bits::CountPopulation(src);
2888 DCHECK_NE(0u, pop);
2889 if (pop + ntz + nlz == 64) {
2890 Pcmpeqd(dst, dst);
2891 if (ntz) Psllq(dst, static_cast<uint8_t>(ntz + nlz));
2892 if (nlz) Psrlq(dst, static_cast<uint8_t>(nlz));
2893 } else {
2894 uint32_t lower = static_cast<uint32_t>(src);
2895 uint32_t upper = static_cast<uint32_t>(src >> 32);
2896 if (upper == 0) {
2897 Move(dst, lower);
2898 } else {
2899 movq(kScratchRegister, src);
2900 Movq(dst, kScratchRegister);
2901 }
2902 }
2903 }
2904}
2905
2906void MacroAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) {
2907 if (high == low) {
2908 Move(dst, low);
2909 Punpcklqdq(dst, dst);
2910 return;
2911 }
2912
2913 Move(dst, low);
2914 movq(kScratchRegister, high);
2915 Pinsrq(dst, dst, kScratchRegister, uint8_t{1});
2916}
2917
2918// ----------------------------------------------------------------------------
2919
2920void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2921 if (IsSmi(*source)) {
2922 Cmp(dst, Cast<Smi>(*source));
2923 } else if (root_array_available_ && options().isolate_independent_code) {
2924 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
2925 // non-isolate-independent code. In many cases it might be cheaper than
2926 // embedding the relocatable value.
2927 // TODO(v8:9706): Fix-it! This load will always uncompress the value
2928 // even when we are loading a compressed embedded object.
2930 cmp_tagged(dst, kScratchRegister);
2931 } else if (COMPRESS_POINTERS_BOOL) {
2933 DCHECK(is_uint32(index));
2934 cmpl(dst, Immediate(static_cast<int>(index),
2936 } else {
2938 Immediate64(source.address(), RelocInfo::FULL_EMBEDDED_OBJECT));
2939 cmpq(dst, kScratchRegister);
2940 }
2941}
2942
2943void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
2944 if (IsSmi(*source)) {
2945 Cmp(dst, Cast<Smi>(*source));
2946 } else if (root_array_available_ && options().isolate_independent_code) {
2947 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
2948 // non-isolate-independent code. In many cases it might be cheaper than
2949 // embedding the relocatable value.
2950 // TODO(v8:9706): Fix-it! This load will always uncompress the value
2951 // even when we are loading a compressed embedded object.
2953 cmp_tagged(dst, kScratchRegister);
2954 } else if (COMPRESS_POINTERS_BOOL) {
2956 DCHECK(is_uint32(index));
2957 cmpl(dst, Immediate(static_cast<int>(index),
2959 } else {
2962 cmp_tagged(dst, kScratchRegister);
2963 }
2964}
2965
2966void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
2967 unsigned higher_limit) {
2968 ASM_CODE_COMMENT(this);
2969 DCHECK_LT(lower_limit, higher_limit);
2970 if (lower_limit != 0) {
2971 leal(kScratchRegister, Operand(value, 0u - lower_limit));
2972 cmpl(kScratchRegister, Immediate(higher_limit - lower_limit));
2973 } else {
2974 cmpl(value, Immediate(higher_limit));
2975 }
2976}
2977
2978void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
2979 unsigned higher_limit, Label* on_in_range,
2981 CompareRange(value, lower_limit, higher_limit);
2982 j(below_equal, on_in_range, near_jump);
2983}
2984
2985void MacroAssembler::Push(Handle<HeapObject> source) {
2986 Move(kScratchRegister, source);
2988}
2989
2990void MacroAssembler::PushArray(Register array, Register size, Register scratch,
2991 PushArrayOrder order) {
2992 DCHECK(!AreAliased(array, size, scratch));
2993 Register counter = scratch;
2994 Label loop, entry;
2995 if (order == PushArrayOrder::kReverse) {
2996 Move(counter, 0);
2997 jmp(&entry);
2998 bind(&loop);
2999 Push(Operand(array, counter, times_system_pointer_size, 0));
3000 incq(counter);
3001 bind(&entry);
3002 cmpq(counter, size);
3003 j(less, &loop, Label::kNear);
3004 } else {
3005 movq(counter, size);
3006 jmp(&entry);
3007 bind(&loop);
3008 Push(Operand(array, counter, times_system_pointer_size, 0));
3009 bind(&entry);
3010 decq(counter);
3011 j(greater_equal, &loop, Label::kNear);
3012 }
3013}
3014
3015void MacroAssembler::Move(Register result, Handle<HeapObject> object,
3016 RelocInfo::Mode rmode) {
3017 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
3018 // non-isolate-independent code. In many cases it might be cheaper than
3019 // embedding the relocatable value.
3020 if (root_array_available_ && options().isolate_independent_code) {
3021 // TODO(v8:9706): Fix-it! This load will always uncompress the value
3022 // even when we are loading a compressed embedded object.
3024 } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
3025 EmbeddedObjectIndex index = AddEmbeddedObject(object);
3026 DCHECK(is_uint32(index));
3027 movl(result, Immediate(static_cast<int>(index), rmode));
3028 } else {
3030 movq(result, Immediate64(object.address(), rmode));
3031 }
3032}
3033
3034void MacroAssembler::Move(Operand dst, Handle<HeapObject> object,
3035 RelocInfo::Mode rmode) {
3036 Move(kScratchRegister, object, rmode);
3037 movq(dst, kScratchRegister);
3038}
3039
3040void MacroAssembler::Drop(int stack_elements) {
3041 if (stack_elements > 0) {
3042 addq(rsp, Immediate(stack_elements * kSystemPointerSize));
3043 }
3044}
3045
3046void MacroAssembler::DropUnderReturnAddress(int stack_elements,
3047 Register scratch) {
3048 DCHECK_GT(stack_elements, 0);
3049 if (stack_elements == 1) {
3050 popq(MemOperand(rsp, 0));
3051 return;
3052 }
3053
3054 PopReturnAddressTo(scratch);
3055 Drop(stack_elements);
3056 PushReturnAddressFrom(scratch);
3057}
3058
3059void MacroAssembler::DropArguments(Register count) {
3060 leaq(rsp, Operand(rsp, count, times_system_pointer_size, 0));
3061}
3062
3063void MacroAssembler::DropArguments(Register count, Register scratch) {
3064 DCHECK(!AreAliased(count, scratch));
3065 PopReturnAddressTo(scratch);
3066 DropArguments(count);
3067 PushReturnAddressFrom(scratch);
3068}
3069
3071 Register receiver,
3072 Register scratch) {
3073 DCHECK(!AreAliased(argc, receiver, scratch));
3074 PopReturnAddressTo(scratch);
3075 DropArguments(argc);
3076 Push(receiver);
3077 PushReturnAddressFrom(scratch);
3078}
3079
3081 Operand receiver,
3082 Register scratch) {
3083 DCHECK(!AreAliased(argc, scratch));
3084 DCHECK(!receiver.AddressUsesRegister(scratch));
3085 PopReturnAddressTo(scratch);
3086 DropArguments(argc);
3087 Push(receiver);
3088 PushReturnAddressFrom(scratch);
3089}
3090
3091void MacroAssembler::Push(Register src) { pushq(src); }
3092
3093void MacroAssembler::Push(Operand src) { pushq(src); }
3094
3095void MacroAssembler::PushQuad(Operand src) { pushq(src); }
3096
3097void MacroAssembler::Push(Immediate value) { pushq(value); }
3098
3099void MacroAssembler::PushImm32(int32_t imm32) { pushq_imm32(imm32); }
3100
3101void MacroAssembler::Pop(Register dst) { popq(dst); }
3102
3103void MacroAssembler::Pop(Operand dst) { popq(dst); }
3104
3105void MacroAssembler::PopQuad(Operand dst) { popq(dst); }
3106
3107void MacroAssembler::Jump(const ExternalReference& reference) {
3110 isolate(), reference)));
3111}
3112
3113void MacroAssembler::Jump(Operand op) { jmp(op); }
3114
3115void MacroAssembler::Jump(Operand op, Condition cc) {
3116 Label skip;
3117 j(NegateCondition(cc), &skip, Label::kNear);
3118 Jump(op);
3119 bind(&skip);
3120}
3121
3125}
3126
3128 Condition cc) {
3129 Label skip;
3130 j(NegateCondition(cc), &skip, Label::kNear);
3131 Jump(destination, rmode);
3132 bind(&skip);
3133}
3134
3135void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
3136 DCHECK_IMPLIES(options().isolate_independent_code,
3139 if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
3140 TailCallBuiltin(builtin);
3141 return;
3142 }
3144 jmp(code_object, rmode);
3145}
3146
3147void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
3148 Condition cc) {
3149 DCHECK_IMPLIES(options().isolate_independent_code,
3152 if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
3153 TailCallBuiltin(builtin, cc);
3154 return;
3155 }
3157 j(cc, code_object, rmode);
3158}
3159
3160void MacroAssembler::Call(ExternalReference ext) {
3163}
3164
3165void MacroAssembler::Call(Operand op) {
3166 if (!CpuFeatures::IsSupported(INTEL_ATOM)) {
3167 call(op);
3168 } else {
3171 }
3172}
3173
3177}
3178
3179void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
3180 DCHECK_IMPLIES(options().isolate_independent_code,
3183 if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
3184 CallBuiltin(builtin);
3185 return;
3186 }
3188 call(code_object, rmode);
3189}
3190
3193 return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin));
3194}
3195
3196Operand MacroAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) {
3197 if (SmiValuesAre32Bits()) {
3198 // The builtin_index register contains the builtin index as a Smi.
3199 Move(kScratchRegister, builtin_index); // Callee checks for equality.
3202 IsolateData::builtin_entry_table_offset());
3203 } else {
3205
3206 // The builtin_index register contains the builtin index as a Smi.
3207 // Untagging is folded into the indexing operand below (we use
3208 // times_half_system_pointer_size since smis are already shifted by one).
3209 return Operand(kRootRegister, builtin_index, times_half_system_pointer_size,
3210 IsolateData::builtin_entry_table_offset());
3211 }
3212}
3213
3214void MacroAssembler::CallBuiltinByIndex(Register builtin_index) {
3215 Call(EntryFromBuiltinIndexAsOperand(builtin_index));
3216}
3217
3220 switch (options().builtin_call_jump_mode) {
3223 break;
3225 near_call(static_cast<intptr_t>(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
3226 break;
3229 break;
3231 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
3233 break;
3234 }
3235 }
3236}
3237
3240 CommentForOffHeapTrampoline("tail call", builtin));
3241 switch (options().builtin_call_jump_mode) {
3244 break;
3246 near_jmp(static_cast<intptr_t>(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
3247 break;
3250 break;
3252 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
3254 break;
3255 }
3256 }
3257}
3258
3261 CommentForOffHeapTrampoline("tail call", builtin));
3262 switch (options().builtin_call_jump_mode) {
3265 break;
3267 near_j(cc, static_cast<intptr_t>(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
3268 break;
3271 break;
3273 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
3274 j(cc, code, RelocInfo::CODE_TARGET);
3275 break;
3276 }
3277 }
3278}
3279
3281 Register code_object,
3282 CodeEntrypointTag tag) {
3283 ASM_CODE_COMMENT(this);
3284#ifdef V8_ENABLE_SANDBOX
3285 LoadCodeEntrypointViaCodePointer(
3286 destination, FieldOperand(code_object, Code::kSelfIndirectPointerOffset),
3287 tag);
3288#else
3289 movq(destination, FieldOperand(code_object, Code::kInstructionStartOffset));
3290#endif
3291}
3292
3293void MacroAssembler::CallCodeObject(Register code_object,
3294 CodeEntrypointTag tag) {
3295 LoadCodeInstructionStart(code_object, code_object, tag);
3296 call(code_object);
3297}
3298
3299void MacroAssembler::JumpCodeObject(Register code_object, CodeEntrypointTag tag,
3300 JumpMode jump_mode) {
3301 // TODO(saelo): can we avoid using this for JavaScript functions
3302 // (kJSEntrypointTag) and instead use a variant that ensures that the caller
3303 // and callee agree on the signature (i.e. parameter count)?
3304 LoadCodeInstructionStart(code_object, code_object, tag);
3305 switch (jump_mode) {
3306 case JumpMode::kJump:
3307 jmp(code_object);
3308 return;
3310 pushq(code_object);
3311 Ret();
3312 return;
3313 }
3314}
3315
3316void MacroAssembler::CallJSFunction(Register function_object,
3317 uint16_t argument_count) {
3318#ifdef V8_ENABLE_LEAPTIERING
3319 static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
3320 static_assert(kJavaScriptCallDispatchHandleRegister == r15, "ABI mismatch");
3321 movl(r15, FieldOperand(function_object, JSFunction::kDispatchHandleOffset));
3322 LoadEntrypointAndParameterCountFromJSDispatchTable(rcx, rbx, r15);
3323 // Force a safe crash if the parameter count doesn't match.
3324 cmpl(rbx, Immediate(argument_count));
3325 SbxCheck(less_equal, AbortReason::kJSSignatureMismatch);
3326 call(rcx);
3327#else
3329 LoadTaggedField(rcx, FieldOperand(function_object, JSFunction::kCodeOffset));
3331#endif
3332}
3333
3334#if V8_ENABLE_LEAPTIERING
3335void MacroAssembler::CallJSDispatchEntry(JSDispatchHandle dispatch_handle,
3336 uint16_t argument_count) {
3337 static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
3338 static_assert(kJavaScriptCallDispatchHandleRegister == r15, "ABI mismatch");
3340 Immediate(dispatch_handle.value(), RelocInfo::JS_DISPATCH_HANDLE));
3341 // WARNING: This entrypoint load is only safe because we are storing a
3342 // RelocInfo for the dispatch handle in the movl above (thus keeping the
3343 // dispatch entry alive) _and_ because the entrypoints are not compactable
3344 // (thus meaning that the calculation in the entrypoint load is not
3345 // invalidated by a compaction).
3346 // TODO(leszeks): Make this less of a footgun.
3347 static_assert(!JSDispatchTable::kSupportsCompaction);
3348 LoadEntrypointFromJSDispatchTable(rcx, dispatch_handle);
3349 CHECK_EQ(argument_count,
3350 IsolateGroup::current()->js_dispatch_table()->GetParameterCount(
3351 dispatch_handle));
3352 call(rcx);
3353}
3354#endif
3355
3356void MacroAssembler::JumpJSFunction(Register function_object,
3357 JumpMode jump_mode) {
3359#ifdef V8_ENABLE_LEAPTIERING
3360 // This implementation is not currently used because callers usually need
3361 // to load both entry point and parameter count and then do something with
3362 // the latter before the actual call.
3363 UNREACHABLE();
3364#else
3365 LoadTaggedField(rcx, FieldOperand(function_object, JSFunction::kCodeOffset));
3366 JumpCodeObject(rcx, kJSEntrypointTag, jump_mode);
3367#endif
3368}
3369
3370#ifdef V8_ENABLE_WEBASSEMBLY
3371
3372void MacroAssembler::CallWasmCodePointer(Register target,
3373 uint64_t signature_hash,
3374 CallJumpMode call_jump_mode) {
3375 ASM_CODE_COMMENT(this);
3376 Move(kScratchRegister, ExternalReference::wasm_code_pointer_table());
3377
3378#ifdef V8_ENABLE_SANDBOX
3379 static_assert(sizeof(wasm::WasmCodePointerTableEntry) == 16);
3380 // Check that using a 32-bit shift is valid for any valid code pointer.
3381 static_assert(wasm::WasmCodePointerTable::kMaxWasmCodePointers <=
3382 (kMaxInt >> 4));
3383 shll(target, Immediate(4));
3384 // Add `target` and `kScratchRegister` early to free `kScratchRegister` again.
3385 addq(target, kScratchRegister);
3386
3387 Operand signature_hash_op{target,
3388 wasm::WasmCodePointerTable::kOffsetOfSignatureHash};
3389 if (is_int32(signature_hash)) {
3390 // cmpq sign-extends the 32-bit immediate.
3391 cmpq(signature_hash_op, Immediate(static_cast<int32_t>(signature_hash)));
3392 } else {
3393 Move(kScratchRegister, signature_hash);
3394 cmpq(kScratchRegister, signature_hash_op);
3395 }
3396 Label fail, ok;
3397 j(Condition::kNotEqual, &fail, Label::Distance::kNear);
3399
3400 bind(&fail);
3401 Abort(AbortReason::kWasmSignatureMismatch);
3402
3403 bind(&ok);
3404 Operand target_op{target, 0};
3405#else
3406 static_assert(sizeof(wasm::WasmCodePointerTableEntry) == 8);
3407 Operand target_op{kScratchRegister, target, ScaleFactor::times_8, 0};
3408#endif
3409
3410 if (call_jump_mode == CallJumpMode::kTailCall) {
3411 jmp(target_op);
3412 } else {
3413 call(target_op);
3414 }
3415}
3416
3417void MacroAssembler::CallWasmCodePointerNoSignatureCheck(Register target) {
3418 Move(kScratchRegister, ExternalReference::wasm_code_pointer_table());
3419
3420#ifdef V8_ENABLE_SANDBOX
3421 static_assert(sizeof(wasm::WasmCodePointerTableEntry) == 16);
3422 // Check that using a 32-bit shift is valid for any valid code pointer.
3423 static_assert(wasm::WasmCodePointerTable::kMaxWasmCodePointers <=
3424 (kMaxInt >> 4));
3425 shll(target, Immediate(4));
3426 call(Operand(kScratchRegister, target, ScaleFactor::times_1, 0));
3427#else
3428 static_assert(sizeof(wasm::WasmCodePointerTableEntry) == 8);
3429 call(Operand(kScratchRegister, target, ScaleFactor::times_8, 0));
3430#endif
3431}
3432
3433void MacroAssembler::LoadWasmCodePointer(Register dst, Operand src) {
3434 static_assert(sizeof(WasmCodePointer) == 4);
3435 movl(dst, src);
3436}
3437
3438#endif
3439
3440void MacroAssembler::PextrdPreSse41(Register dst, XMMRegister src,
3441 uint8_t imm8) {
3442 if (imm8 == 0) {
3443 Movd(dst, src);
3444 return;
3445 }
3446 DCHECK_EQ(1, imm8);
3447 movq(dst, src);
3448 shrq(dst, Immediate(32));
3449}
3450
3451namespace {
3452template <typename Op>
3453void PinsrdPreSse41Helper(MacroAssembler* masm, XMMRegister dst, Op src,
3454 uint8_t imm8, uint32_t* load_pc_offset) {
3455 masm->Movd(kScratchDoubleReg, src);
3456 if (load_pc_offset) *load_pc_offset = masm->pc_offset();
3457 if (imm8 == 1) {
3458 masm->punpckldq(dst, kScratchDoubleReg);
3459 } else {
3460 DCHECK_EQ(0, imm8);
3461 masm->Movss(dst, kScratchDoubleReg);
3462 }
3463}
3464} // namespace
3465
3466void MacroAssembler::PinsrdPreSse41(XMMRegister dst, Register src, uint8_t imm8,
3467 uint32_t* load_pc_offset) {
3468 PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset);
3469}
3470
3471void MacroAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
3472 uint32_t* load_pc_offset) {
3473 PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset);
3474}
3475
3476void MacroAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Register src2,
3477 uint8_t imm8, uint32_t* load_pc_offset) {
3478 PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
3479 imm8, load_pc_offset, {SSE4_1});
3480}
3481
3482void MacroAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2,
3483 uint8_t imm8, uint32_t* load_pc_offset) {
3484 PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
3485 imm8, load_pc_offset, {SSE4_1});
3486}
3487
3488void MacroAssembler::Lzcntl(Register dst, Register src) {
3489 if (CpuFeatures::IsSupported(LZCNT)) {
3490 CpuFeatureScope scope(this, LZCNT);
3491 lzcntl(dst, src);
3492 return;
3493 }
3494 Label not_zero_src;
3495 bsrl(dst, src);
3496 j(not_zero, &not_zero_src, Label::kNear);
3497 Move(dst, 63); // 63^31 == 32
3498 bind(&not_zero_src);
3499 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
3500}
3501
3502void MacroAssembler::Lzcntl(Register dst, Operand src) {
3503 if (CpuFeatures::IsSupported(LZCNT)) {
3504 CpuFeatureScope scope(this, LZCNT);
3505 lzcntl(dst, src);
3506 return;
3507 }
3508 Label not_zero_src;
3509 bsrl(dst, src);
3510 j(not_zero, &not_zero_src, Label::kNear);
3511 Move(dst, 63); // 63^31 == 32
3512 bind(&not_zero_src);
3513 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
3514}
3515
3516void MacroAssembler::Lzcntq(Register dst, Register src) {
3517 if (CpuFeatures::IsSupported(LZCNT)) {
3518 CpuFeatureScope scope(this, LZCNT);
3519 lzcntq(dst, src);
3520 return;
3521 }
3522 Label not_zero_src;
3523 bsrq(dst, src);
3524 j(not_zero, &not_zero_src, Label::kNear);
3525 Move(dst, 127); // 127^63 == 64
3526 bind(&not_zero_src);
3527 xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
3528}
3529
3530void MacroAssembler::Lzcntq(Register dst, Operand src) {
3531 if (CpuFeatures::IsSupported(LZCNT)) {
3532 CpuFeatureScope scope(this, LZCNT);
3533 lzcntq(dst, src);
3534 return;
3535 }
3536 Label not_zero_src;
3537 bsrq(dst, src);
3538 j(not_zero, &not_zero_src, Label::kNear);
3539 Move(dst, 127); // 127^63 == 64
3540 bind(&not_zero_src);
3541 xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
3542}
3543
3544void MacroAssembler::Tzcntq(Register dst, Register src) {
3545 if (CpuFeatures::IsSupported(BMI1)) {
3546 CpuFeatureScope scope(this, BMI1);
3547 tzcntq(dst, src);
3548 return;
3549 }
3550 Label not_zero_src;
3551 bsfq(dst, src);
3552 j(not_zero, &not_zero_src, Label::kNear);
3553 // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
3554 Move(dst, 64);
3555 bind(&not_zero_src);
3556}
3557
3558void MacroAssembler::Tzcntq(Register dst, Operand src) {
3559 if (CpuFeatures::IsSupported(BMI1)) {
3560 CpuFeatureScope scope(this, BMI1);
3561 tzcntq(dst, src);
3562 return;
3563 }
3564 Label not_zero_src;
3565 bsfq(dst, src);
3566 j(not_zero, &not_zero_src, Label::kNear);
3567 // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
3568 Move(dst, 64);
3569 bind(&not_zero_src);
3570}
3571
3572void MacroAssembler::Tzcntl(Register dst, Register src) {
3573 if (CpuFeatures::IsSupported(BMI1)) {
3574 CpuFeatureScope scope(this, BMI1);
3575 tzcntl(dst, src);
3576 return;
3577 }
3578 Label not_zero_src;
3579 bsfl(dst, src);
3580 j(not_zero, &not_zero_src, Label::kNear);
3581 Move(dst, 32); // The result of tzcnt is 32 if src = 0.
3582 bind(&not_zero_src);
3583}
3584
3585void MacroAssembler::Tzcntl(Register dst, Operand src) {
3586 if (CpuFeatures::IsSupported(BMI1)) {
3587 CpuFeatureScope scope(this, BMI1);
3588 tzcntl(dst, src);
3589 return;
3590 }
3591 Label not_zero_src;
3592 bsfl(dst, src);
3593 j(not_zero, &not_zero_src, Label::kNear);
3594 Move(dst, 32); // The result of tzcnt is 32 if src = 0.
3595 bind(&not_zero_src);
3596}
3597
3598void MacroAssembler::Popcntl(Register dst, Register src) {
3599 if (CpuFeatures::IsSupported(POPCNT)) {
3600 CpuFeatureScope scope(this, POPCNT);
3601 popcntl(dst, src);
3602 return;
3603 }
3604 UNREACHABLE();
3605}
3606
3607void MacroAssembler::Popcntl(Register dst, Operand src) {
3608 if (CpuFeatures::IsSupported(POPCNT)) {
3609 CpuFeatureScope scope(this, POPCNT);
3610 popcntl(dst, src);
3611 return;
3612 }
3613 UNREACHABLE();
3614}
3615
3616void MacroAssembler::Popcntq(Register dst, Register src) {
3617 if (CpuFeatures::IsSupported(POPCNT)) {
3618 CpuFeatureScope scope(this, POPCNT);
3619 popcntq(dst, src);
3620 return;
3621 }
3622 UNREACHABLE();
3623}
3624
3625void MacroAssembler::Popcntq(Register dst, Operand src) {
3626 if (CpuFeatures::IsSupported(POPCNT)) {
3627 CpuFeatureScope scope(this, POPCNT);
3628 popcntq(dst, src);
3629 return;
3630 }
3631 UNREACHABLE();
3632}
3633
3635 // Adjust this code if not the case.
3637 static_assert(StackHandlerConstants::kNextOffset == 0);
3638
3639 Push(Immediate(0)); // Padding.
3640
3641 // Link the current handler as the next handler.
3642 ExternalReference handler_address =
3643 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
3644 Push(ExternalReferenceAsOperand(handler_address));
3645
3646 // Set this new handler as the current one.
3647 movq(ExternalReferenceAsOperand(handler_address), rsp);
3648}
3649
3651 static_assert(StackHandlerConstants::kNextOffset == 0);
3652 ExternalReference handler_address =
3653 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
3654 Pop(ExternalReferenceAsOperand(handler_address));
3655 addq(rsp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
3656}
3657
3658void MacroAssembler::Ret() { ret(0); }
3659
3660void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3661 if (is_uint16(bytes_dropped)) {
3662 ret(bytes_dropped);
3663 } else {
3664 PopReturnAddressTo(scratch);
3665 addq(rsp, Immediate(bytes_dropped));
3666 PushReturnAddressFrom(scratch);
3667 ret(0);
3668 }
3669}
3670
3671void MacroAssembler::IncsspqIfSupported(Register number_of_words,
3672 Register scratch) {
3673 // Optimized code can validate at runtime whether the cpu supports the
3674 // incsspq instruction, so it shouldn't use this method.
3675 CHECK(isolate()->IsGeneratingEmbeddedBuiltins());
3676 DCHECK_NE(number_of_words, scratch);
3677 Label not_supported;
3678 ExternalReference supports_cetss =
3679 ExternalReference::supports_cetss_address();
3680 Operand supports_cetss_operand =
3681 ExternalReferenceAsOperand(supports_cetss, scratch);
3682 cmpb(supports_cetss_operand, Immediate(0));
3683 j(equal, &not_supported, Label::kNear);
3684 incsspq(number_of_words);
3685 bind(&not_supported);
3686}
3687
3688#if V8_STATIC_ROOTS_BOOL
3689void MacroAssembler::CompareInstanceTypeWithUniqueCompressedMap(
3690 Register map, InstanceType type) {
3691 std::optional<RootIndex> expected =
3693 CHECK(expected);
3694 Tagged_t expected_ptr = ReadOnlyRootPtr(*expected);
3695 cmp_tagged(map, Immediate(expected_ptr));
3696}
3697
3698void MacroAssembler::IsObjectTypeFast(Register object, InstanceType type,
3699 Register compressed_map_scratch) {
3700 ASM_CODE_COMMENT(this);
3702 LoadCompressedMap(compressed_map_scratch, object);
3703 CompareInstanceTypeWithUniqueCompressedMap(compressed_map_scratch, type);
3704}
3705#endif // V8_STATIC_ROOTS_BOOL
3706
3707void MacroAssembler::IsObjectType(Register heap_object, InstanceType type,
3708 Register map) {
3709#if V8_STATIC_ROOTS_BOOL
3711 LoadCompressedMap(map, heap_object);
3712 CompareInstanceTypeWithUniqueCompressedMap(map, type);
3713 return;
3714 }
3715#endif // V8_STATIC_ROOTS_BOOL
3716 CmpObjectType(heap_object, type, map);
3717}
3718
3719void MacroAssembler::IsObjectTypeInRange(Register heap_object,
3720 InstanceType lower_limit,
3721 InstanceType higher_limit,
3722 Register scratch) {
3723 DCHECK_LT(lower_limit, higher_limit);
3724#if V8_STATIC_ROOTS_BOOL
3725 if (auto range = InstanceTypeChecker::UniqueMapRangeOfInstanceTypeRange(
3726 lower_limit, higher_limit)) {
3727 LoadCompressedMap(scratch, heap_object);
3728 CompareRange(scratch, range->first, range->second);
3729 return;
3730 }
3731#endif // V8_STATIC_ROOTS_BOOL
3732 LoadMap(scratch, heap_object);
3733 CmpInstanceTypeRange(scratch, scratch, lower_limit, higher_limit);
3734}
3735
3736void MacroAssembler::JumpIfJSAnyIsNotPrimitive(Register heap_object,
3737 Register scratch, Label* target,
3738 Label::Distance distance,
3739 Condition cc) {
3740 CHECK(cc == Condition::kUnsignedLessThan ||
3741 cc == Condition::kUnsignedGreaterThanEqual);
3743#ifdef DEBUG
3744 Label ok;
3745 LoadMap(scratch, heap_object);
3746 CmpInstanceTypeRange(scratch, scratch, FIRST_JS_RECEIVER_TYPE,
3747 LAST_JS_RECEIVER_TYPE);
3748 j(Condition::kUnsignedLessThanEqual, &ok, Label::Distance::kNear);
3749 LoadMap(scratch, heap_object);
3750 CmpInstanceTypeRange(scratch, scratch, FIRST_PRIMITIVE_HEAP_OBJECT_TYPE,
3751 LAST_PRIMITIVE_HEAP_OBJECT_TYPE);
3752 j(Condition::kUnsignedLessThanEqual, &ok, Label::Distance::kNear);
3753 Abort(AbortReason::kInvalidReceiver);
3754 bind(&ok);
3755#endif // DEBUG
3756
3757 // All primitive object's maps are allocated at the start of the read only
3758 // heap. Thus JS_RECEIVER's must have maps with larger (compressed)
3759 // addresses.
3760 LoadCompressedMap(scratch, heap_object);
3761 cmp_tagged(scratch, Immediate(InstanceTypeChecker::kNonJsReceiverMapLimit));
3762 } else {
3763 static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
3764 CmpObjectType(heap_object, FIRST_JS_RECEIVER_TYPE, scratch);
3765 }
3766 j(cc, target, distance);
3767}
3768
3769void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type,
3770 Register map) {
3771 LoadMap(map, heap_object);
3772 CmpInstanceType(map, type);
3773}
3774
3775void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3776 cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
3777}
3778
3779void MacroAssembler::CmpInstanceTypeRange(Register map,
3780 Register instance_type_out,
3781 InstanceType lower_limit,
3782 InstanceType higher_limit) {
3783 DCHECK_LT(lower_limit, higher_limit);
3784 movzxwl(instance_type_out, FieldOperand(map, Map::kInstanceTypeOffset));
3785 CompareRange(instance_type_out, lower_limit, higher_limit);
3786}
3787
3789 const int kByteWithDeoptBitOffset = 0 * kByteSize;
3790 const int kByteWithDeoptBitOffsetInBits = kByteWithDeoptBitOffset * 8;
3791 static_assert(V8_TARGET_LITTLE_ENDIAN == 1);
3792 static_assert(FIELD_SIZE(Code::kFlagsOffset) * kBitsPerByte == 32);
3793 static_assert(Code::kMarkedForDeoptimizationBit >
3794 kByteWithDeoptBitOffsetInBits);
3795 testb(FieldOperand(code, Code::kFlagsOffset + kByteWithDeoptBitOffset),
3796 Immediate(1 << (Code::kMarkedForDeoptimizationBit -
3797 kByteWithDeoptBitOffsetInBits)));
3798}
3799
3800void MacroAssembler::TestCodeIsTurbofanned(Register code) {
3801 testl(FieldOperand(code, Code::kFlagsOffset),
3802 Immediate(1 << Code::kIsTurbofannedBit));
3803}
3804
3805Immediate MacroAssembler::ClearedValue() const {
3806 return Immediate(static_cast<int32_t>(i::ClearedValue(isolate()).ptr()));
3807}
3808
3809#ifdef V8_ENABLE_DEBUG_CODE
3810void MacroAssembler::AssertNotSmi(Register object) {
3811 if (!v8_flags.debug_code) return;
3812 ASM_CODE_COMMENT(this);
3813 Condition is_smi = CheckSmi(object);
3814 Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
3815}
3816
3817void MacroAssembler::AssertSmi(Register object) {
3818 if (!v8_flags.debug_code) return;
3819 ASM_CODE_COMMENT(this);
3820 Condition is_smi = CheckSmi(object);
3821 Check(is_smi, AbortReason::kOperandIsNotASmi);
3822#ifdef ENABLE_SLOW_DCHECKS
3824#endif
3825}
3826
3827void MacroAssembler::AssertSmi(Operand object) {
3828 if (!v8_flags.debug_code) return;
3829 ASM_CODE_COMMENT(this);
3830 Condition is_smi = CheckSmi(object);
3831 Check(is_smi, AbortReason::kOperandIsNotASmi);
3832}
3833
3834void MacroAssembler::AssertZeroExtended(Register int32_register) {
3835 if (!v8_flags.slow_debug_code) return;
3836 ASM_CODE_COMMENT(this);
3837 DCHECK_NE(int32_register, kScratchRegister);
3838 movl(kScratchRegister, Immediate(kMaxUInt32)); // zero-extended
3839 cmpq(int32_register, kScratchRegister);
3840 Check(below_equal, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
3841}
3842
3843void MacroAssembler::AssertSignedBitOfSmiIsZero(Register smi_register) {
3844 if (!v8_flags.slow_debug_code) return;
3845 ASM_CODE_COMMENT(this);
3847 testl(smi_register, Immediate(int32_t{0x10000000}));
3848 Check(zero, AbortReason::kSignedBitOfSmiIsNotZero);
3849}
3850
3851void MacroAssembler::AssertMap(Register object) {
3852 if (!v8_flags.debug_code) return;
3853 ASM_CODE_COMMENT(this);
3854 testb(object, Immediate(kSmiTagMask));
3855 Check(not_equal, AbortReason::kOperandIsNotAMap);
3856 Push(object);
3857 LoadMap(object, object);
3858 CmpInstanceType(object, MAP_TYPE);
3859 popq(object);
3860 Check(equal, AbortReason::kOperandIsNotAMap);
3861}
3862
3863void MacroAssembler::AssertCode(Register object) {
3864 if (!v8_flags.debug_code) return;
3865 ASM_CODE_COMMENT(this);
3866 testb(object, Immediate(kSmiTagMask));
3867 Check(not_equal, AbortReason::kOperandIsNotACode);
3868 Push(object);
3869 LoadMap(object, object);
3870 CmpInstanceType(object, CODE_TYPE);
3871 popq(object);
3872 Check(equal, AbortReason::kOperandIsNotACode);
3873}
3874
3876 Register object) {
3877 if (!PointerCompressionIsEnabled()) return;
3878 if (!v8_flags.debug_code) return;
3879 ASM_CODE_COMMENT(this);
3880 Label ok;
3881 // We may not have any scratch registers so we preserve our input register.
3882 pushq(object);
3883 j(CheckSmi(object), &ok);
3884 // Clear the lower 32 bits.
3885 shrq(object, Immediate(32));
3886 shlq(object, Immediate(32));
3887 // Either the value is now equal to the pointer compression cage base or it's
3888 // zero if we got a compressed pointer register as input.
3889 j(zero, &ok);
3890 cmpq(object, kPtrComprCageBaseRegister);
3891 Check(equal, AbortReason::kObjectNotTagged);
3892 bind(&ok);
3893 popq(object);
3894}
3895
3896void MacroAssembler::AssertConstructor(Register object) {
3897 if (!v8_flags.debug_code) return;
3898 ASM_CODE_COMMENT(this);
3899 testb(object, Immediate(kSmiTagMask));
3900 Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
3901 Push(object);
3902 LoadMap(object, object);
3903 testb(FieldOperand(object, Map::kBitFieldOffset),
3904 Immediate(Map::Bits1::IsConstructorBit::kMask));
3905 Pop(object);
3906 Check(not_zero, AbortReason::kOperandIsNotAConstructor);
3907}
3908
3909void MacroAssembler::AssertFunction(Register object) {
3910 if (!v8_flags.debug_code) return;
3911 ASM_CODE_COMMENT(this);
3912 testb(object, Immediate(kSmiTagMask));
3913 Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
3914 Push(object);
3915 LoadMap(object, object);
3916 CmpInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE,
3917 LAST_JS_FUNCTION_TYPE);
3918 Pop(object);
3919 Check(below_equal, AbortReason::kOperandIsNotAFunction);
3920}
3921
3922void MacroAssembler::AssertCallableFunction(Register object) {
3923 if (!v8_flags.debug_code) return;
3924 ASM_CODE_COMMENT(this);
3925 testb(object, Immediate(kSmiTagMask));
3926 Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
3927 Push(object);
3928 LoadMap(object, object);
3931 Pop(object);
3932 Check(below_equal, AbortReason::kOperandIsNotACallableFunction);
3933}
3934
3935void MacroAssembler::AssertBoundFunction(Register object) {
3936 if (!v8_flags.debug_code) return;
3937 ASM_CODE_COMMENT(this);
3938 testb(object, Immediate(kSmiTagMask));
3939 Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
3940 Push(object);
3941 IsObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
3942 Pop(object);
3943 Check(equal, AbortReason::kOperandIsNotABoundFunction);
3944}
3945
3946void MacroAssembler::AssertGeneratorObject(Register object) {
3947 if (!v8_flags.debug_code) return;
3948 ASM_CODE_COMMENT(this);
3949 testb(object, Immediate(kSmiTagMask));
3950 Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
3951
3952 // Load map
3953 Register map = object;
3954 Push(object);
3955 LoadMap(map, object);
3956
3957 // Check if JSGeneratorObject
3958 CmpInstanceTypeRange(map, kScratchRegister, FIRST_JS_GENERATOR_OBJECT_TYPE,
3959 LAST_JS_GENERATOR_OBJECT_TYPE);
3960 // Restore generator object to register and perform assertion
3961 Pop(object);
3962 Check(below_equal, AbortReason::kOperandIsNotAGeneratorObject);
3963}
3964
3966 if (!v8_flags.debug_code) return;
3967 ASM_CODE_COMMENT(this);
3968 Label done_checking;
3969 AssertNotSmi(object);
3970 Cmp(object, isolate()->factory()->undefined_value());
3971 j(equal, &done_checking);
3972 Register map = object;
3973 Push(object);
3974 LoadMap(map, object);
3975 Cmp(map, isolate()->factory()->allocation_site_map());
3976 Pop(object);
3977 Assert(equal, AbortReason::kExpectedUndefinedOrCell);
3978 bind(&done_checking);
3979}
3980
3981void MacroAssembler::AssertJSAny(Register object, Register map_tmp,
3982 AbortReason abort_reason) {
3983 if (!v8_flags.debug_code) return;
3984
3985 ASM_CODE_COMMENT(this);
3986 DCHECK(!AreAliased(object, map_tmp));
3987 Label ok;
3988
3990
3991 JumpIfSmi(object, &ok, dist);
3992
3993 LoadMap(map_tmp, object);
3994 CmpInstanceType(map_tmp, LAST_NAME_TYPE);
3995 j(below_equal, &ok, dist);
3996
3997 CmpInstanceType(map_tmp, FIRST_JS_RECEIVER_TYPE);
3998 j(above_equal, &ok, dist);
3999
4000 CompareRoot(map_tmp, RootIndex::kHeapNumberMap);
4001 j(equal, &ok, dist);
4002
4003 CompareRoot(map_tmp, RootIndex::kBigIntMap);
4004 j(equal, &ok, dist);
4005
4006 CompareRoot(object, RootIndex::kUndefinedValue);
4007 j(equal, &ok, dist);
4008
4009 CompareRoot(object, RootIndex::kTrueValue);
4010 j(equal, &ok, dist);
4011
4012 CompareRoot(object, RootIndex::kFalseValue);
4013 j(equal, &ok, dist);
4014
4015 CompareRoot(object, RootIndex::kNullValue);
4016 j(equal, &ok, dist);
4017
4018 Abort(abort_reason);
4019
4020 bind(&ok);
4021}
4022
4024 if (v8_flags.debug_code) Check(cc, reason);
4025}
4026
4028 if (v8_flags.debug_code) Abort(reason);
4029}
4030#endif // V8_ENABLE_DEBUG_CODE
4031
4032void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
4033 cmpl(in_out, Immediate(kClearedWeakHeapObjectLower32));
4034 j(equal, target_if_cleared);
4035
4036 andq(in_out, Immediate(~static_cast<int32_t>(kWeakHeapObjectMask)));
4037}
4038
4039void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value) {
4040 DCHECK_GT(value, 0);
4041 if (v8_flags.native_code_counters && counter->Enabled()) {
4042 ASM_CODE_COMMENT(this);
4043 Operand counter_operand =
4045 // This operation has to be exactly 32-bit wide in case the external
4046 // reference table redirects the counter to a uint32_t dummy_stats_counter_
4047 // field.
4048 if (value == 1) {
4049 incl(counter_operand);
4050 } else {
4051 addl(counter_operand, Immediate(value));
4052 }
4053 }
4054}
4055
4056void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value) {
4057 DCHECK_GT(value, 0);
4058 if (v8_flags.native_code_counters && counter->Enabled()) {
4059 ASM_CODE_COMMENT(this);
4060 Operand counter_operand =
4062 // This operation has to be exactly 32-bit wide in case the external
4063 // reference table redirects the counter to a uint32_t dummy_stats_counter_
4064 // field.
4065 if (value == 1) {
4066 decl(counter_operand);
4067 } else {
4068 subl(counter_operand, Immediate(value));
4069 }
4070 }
4071}
4072
4073#ifdef V8_ENABLE_LEAPTIERING
4075 Register function, Register new_target, Register actual_parameter_count,
4076 InvokeType type, ArgumentAdaptionMode argument_adaption_mode) {
4077 ASM_CODE_COMMENT(this);
4078 DCHECK_EQ(function, rdi);
4079 LoadTaggedField(rsi, FieldOperand(function, JSFunction::kContextOffset));
4080 InvokeFunctionCode(rdi, new_target, actual_parameter_count, type,
4081 argument_adaption_mode);
4082}
4083
4085 Register function, Register new_target, Register actual_parameter_count,
4086 InvokeType type, ArgumentAdaptionMode argument_adaption_mode) {
4087 ASM_CODE_COMMENT(this);
4088 // You can't call a function without a valid frame.
4090 DCHECK_EQ(function, rdi);
4091 DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx);
4092
4094 movl(dispatch_handle,
4095 FieldOperand(function, JSFunction::kDispatchHandleOffset));
4096
4097 AssertFunction(function);
4098
4099 // On function call, call into the debugger if necessary.
4100 Label debug_hook, continue_after_hook;
4101 {
4102 ExternalReference debug_hook_active =
4103 ExternalReference::debug_hook_on_function_call_address(isolate());
4104 Operand debug_hook_active_operand =
4105 ExternalReferenceAsOperand(debug_hook_active);
4106 cmpb(debug_hook_active_operand, Immediate(0));
4107 j(not_equal, &debug_hook);
4108 }
4109 bind(&continue_after_hook);
4110
4111 // Clear the new.target register if not given.
4112 if (!new_target.is_valid()) {
4113 LoadRoot(rdx, RootIndex::kUndefinedValue);
4114 }
4115
4116 if (argument_adaption_mode == ArgumentAdaptionMode::kAdapt) {
4117 Register expected_parameter_count = rbx;
4118 LoadParameterCountFromJSDispatchTable(expected_parameter_count,
4119 dispatch_handle);
4120 InvokePrologue(expected_parameter_count, actual_parameter_count, type);
4121 }
4122
4123 // We call indirectly through the code field in the function to
4124 // allow recompilation to take effect without changing any of the
4125 // call sites.
4126 static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
4127 LoadEntrypointFromJSDispatchTable(rcx, dispatch_handle);
4128 switch (type) {
4129 case InvokeType::kCall:
4130 call(rcx);
4131 break;
4132 case InvokeType::kJump:
4133 jmp(rcx);
4134 break;
4135 }
4136 Label done;
4137 jmp(&done, Label::kNear);
4138
4139 // Deferred debug hook.
4140 bind(&debug_hook);
4141 CallDebugOnFunctionCall(function, new_target, dispatch_handle,
4142 actual_parameter_count);
4143 jmp(&continue_after_hook);
4144
4145 bind(&done);
4146}
4147#else
4148void MacroAssembler::InvokeFunction(Register function, Register new_target,
4149 Register actual_parameter_count,
4150 InvokeType type) {
4151 ASM_CODE_COMMENT(this);
4153 rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
4154 movzxwq(rbx,
4155 FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
4156
4157 InvokeFunction(function, new_target, rbx, actual_parameter_count, type);
4158}
4159
4160void MacroAssembler::InvokeFunction(Register function, Register new_target,
4161 Register expected_parameter_count,
4162 Register actual_parameter_count,
4163 InvokeType type) {
4164 DCHECK_EQ(function, rdi);
4165 LoadTaggedField(rsi, FieldOperand(function, JSFunction::kContextOffset));
4166 InvokeFunctionCode(rdi, new_target, expected_parameter_count,
4167 actual_parameter_count, type);
4168}
4169
4170void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
4171 Register expected_parameter_count,
4172 Register actual_parameter_count,
4173 InvokeType type) {
4174 ASM_CODE_COMMENT(this);
4175 // You can't call a function without a valid frame.
4177 DCHECK_EQ(function, rdi);
4178 DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx);
4179
4180 AssertFunction(function);
4181
4182 // On function call, call into the debugger if necessary.
4183 Label debug_hook, continue_after_hook;
4184 {
4185 ExternalReference debug_hook_active =
4186 ExternalReference::debug_hook_on_function_call_address(isolate());
4187 Operand debug_hook_active_operand =
4188 ExternalReferenceAsOperand(debug_hook_active);
4189 cmpb(debug_hook_active_operand, Immediate(0));
4190 j(not_equal, &debug_hook);
4191 }
4192 bind(&continue_after_hook);
4193
4194 // Clear the new.target register if not given.
4195 if (!new_target.is_valid()) {
4196 LoadRoot(rdx, RootIndex::kUndefinedValue);
4197 }
4198
4199 Label done;
4200 InvokePrologue(expected_parameter_count, actual_parameter_count, type);
4201 // We call indirectly through the code field in the function to
4202 // allow recompilation to take effect without changing any of the
4203 // call sites.
4204 static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
4205 constexpr int unused_argument_count = 0;
4206 switch (type) {
4207 case InvokeType::kCall:
4208 CallJSFunction(function, unused_argument_count);
4209 break;
4210 case InvokeType::kJump:
4211 JumpJSFunction(function);
4212 break;
4213 }
4214 jmp(&done, Label::kNear);
4215
4216 // Deferred debug hook.
4217 bind(&debug_hook);
4218 CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
4219 actual_parameter_count);
4220 jmp(&continue_after_hook);
4221
4222 bind(&done);
4223}
4224#endif // V8_ENABLE_LEAPTIERING
4225
4230 : IsolateData::jslimit_offset();
4231
4232 CHECK(is_int32(offset));
4233 return Operand(kRootRegister, static_cast<int32_t>(offset));
4234}
4235
4237 Register num_args, Label* stack_overflow,
4238 Label::Distance stack_overflow_distance) {
4239 ASM_CODE_COMMENT(this);
4240 DCHECK_NE(num_args, kScratchRegister);
4241 // Check the stack for overflow. We are not trying to catch
4242 // interruptions (e.g. debug break and preemption) here, so the "real stack
4243 // limit" is checked.
4244 movq(kScratchRegister, rsp);
4245 // Make kScratchRegister the space we have left. The stack might already be
4246 // overflowed here which will cause kScratchRegister to become negative.
4248 // TODO(victorgomes): Use ia32 approach with leaq, since it requires less
4249 // instructions.
4250 sarq(kScratchRegister, Immediate(kSystemPointerSizeLog2));
4251 // Check if the arguments will overflow the stack.
4252 cmpq(kScratchRegister, num_args);
4253 // Signed comparison.
4254 // TODO(victorgomes): Save some bytes in the builtins that use stack checks
4255 // by jumping to a builtin that throws the exception.
4256 j(less_equal, stack_overflow, stack_overflow_distance);
4257}
4258
4259void MacroAssembler::InvokePrologue(Register expected_parameter_count,
4260 Register actual_parameter_count,
4261 InvokeType type) {
4262 ASM_CODE_COMMENT(this);
4263 if (expected_parameter_count == actual_parameter_count) {
4264 Move(rax, actual_parameter_count);
4265 return;
4266 }
4267 Label regular_invoke;
4268
4269 // If overapplication or if the actual argument count is equal to the
4270 // formal parameter count, no need to push extra undefined values.
4271 subq(expected_parameter_count, actual_parameter_count);
4272 j(less_equal, &regular_invoke, Label::kFar);
4273
4274 Label stack_overflow;
4275 StackOverflowCheck(expected_parameter_count, &stack_overflow);
4276
4277 // Underapplication. Move the arguments already in the stack, including the
4278 // receiver and the return address.
4279 {
4280 Label copy, check;
4281 Register src = r8, dest = rsp, num = r9, current = r11;
4282 movq(src, rsp);
4283 leaq(kScratchRegister,
4284 Operand(expected_parameter_count, times_system_pointer_size, 0));
4286 // Extra words are for the return address (if a jump).
4287 int extra_words =
4289
4290 leaq(num, Operand(rax, extra_words)); // Number of words to copy.
4291 Move(current, 0);
4292 // Fall-through to the loop body because there are non-zero words to copy.
4293 bind(&copy);
4295 Operand(src, current, times_system_pointer_size, 0));
4296 movq(Operand(dest, current, times_system_pointer_size, 0),
4298 incq(current);
4299 bind(&check);
4300 cmpq(current, num);
4301 j(less, &copy);
4302 leaq(r8, Operand(rsp, num, times_system_pointer_size, 0));
4303 }
4304 // Fill remaining expected arguments with undefined values.
4305 LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
4306 {
4307 Label loop;
4308 bind(&loop);
4309 decq(expected_parameter_count);
4310 movq(Operand(r8, expected_parameter_count, times_system_pointer_size, 0),
4312 j(greater, &loop, Label::kNear);
4313 }
4314 jmp(&regular_invoke);
4315
4316 bind(&stack_overflow);
4317 {
4318 FrameScope frame(
4319 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
4320 CallRuntime(Runtime::kThrowStackOverflow);
4321 int3(); // This should be unreachable.
4322 }
4323 bind(&regular_invoke);
4324}
4325
4327 Register fun, Register new_target,
4328 Register expected_parameter_count_or_dispatch_handle,
4329 Register actual_parameter_count) {
4330 ASM_CODE_COMMENT(this);
4331 // Load receiver to pass it later to DebugOnFunctionCall hook.
4332 // Receiver is located on top of the stack if we have a frame (usually a
4333 // construct frame), or after the return address if we do not yet have a
4334 // frame.
4335 movq(kScratchRegister, Operand(rsp, has_frame() ? 0 : kSystemPointerSize));
4336
4337 FrameScope frame(
4338 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
4339
4340 SmiTag(expected_parameter_count_or_dispatch_handle);
4341 Push(expected_parameter_count_or_dispatch_handle);
4342
4343 SmiTag(actual_parameter_count);
4344 Push(actual_parameter_count);
4345 SmiUntag(actual_parameter_count);
4346
4347 if (new_target.is_valid()) {
4349 }
4350 Push(fun);
4351 Push(fun);
4353 CallRuntime(Runtime::kDebugOnFunctionCall);
4354 Pop(fun);
4355 if (new_target.is_valid()) {
4356 Pop(new_target);
4357 }
4358 Pop(actual_parameter_count);
4359 SmiUntag(actual_parameter_count);
4360 Pop(expected_parameter_count_or_dispatch_handle);
4361 SmiUntag(expected_parameter_count_or_dispatch_handle);
4362}
4363
4365 ASM_CODE_COMMENT(this);
4366 pushq(rbp); // Caller's frame pointer.
4367 movq(rbp, rsp);
4368 Push(Immediate(StackFrame::TypeToMarker(type)));
4369}
4370
4372 ASM_CODE_COMMENT(this);
4373 pushq(rbp); // Caller's frame pointer.
4374 movq(rbp, rsp);
4375 Push(kContextRegister); // Callee's context.
4376 Push(kJSFunctionRegister); // Callee's JS function.
4377 Push(kJavaScriptCallArgCountRegister); // Actual argument count.
4378}
4379
4381 ASM_CODE_COMMENT(this);
4382 pushq(rbp);
4383 movq(rbp, rsp);
4384 if (!StackFrame::IsJavaScript(type)) {
4387 Push(Immediate(StackFrame::TypeToMarker(type)));
4388 }
4389#if V8_ENABLE_WEBASSEMBLY
4390 if (type == StackFrame::WASM) Push(kWasmImplicitArgRegister);
4391#endif // V8_ENABLE_WEBASSEMBLY
4392}
4393
4395 ASM_CODE_COMMENT(this);
4396 // TODO(v8:11429): Consider passing BASELINE instead, and checking for
4397 // IsJSFrame or similar. Could then unify with manual frame leaves in the
4398 // interpreter too.
4399 if (v8_flags.debug_code && !StackFrame::IsJavaScript(type)) {
4401 Immediate(StackFrame::TypeToMarker(type)));
4402 Check(equal, AbortReason::kStackFrameTypesMustMatch);
4403 }
4404 movq(rsp, rbp);
4405 popq(rbp);
4406}
4407
4408#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOS)
4409void MacroAssembler::AllocateStackSpace(Register bytes_scratch) {
4410 ASM_CODE_COMMENT(this);
4411 // On Windows and on macOS, we cannot increment the stack size by more than
4412 // one page (minimum page size is 4KB) without accessing at least one byte on
4413 // the page. Check this:
4414 // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
4415 Label check_offset;
4416 Label touch_next_page;
4417 jmp(&check_offset);
4418 bind(&touch_next_page);
4419 subq(rsp, Immediate(kStackPageSize));
4420 // Just to touch the page, before we increment further.
4421 movb(Operand(rsp, 0), Immediate(0));
4422 subq(bytes_scratch, Immediate(kStackPageSize));
4423
4425 cmpq(bytes_scratch, Immediate(kStackPageSize));
4426 j(greater_equal, &touch_next_page);
4427
4428 subq(rsp, bytes_scratch);
4429}
4430
4431void MacroAssembler::AllocateStackSpace(int bytes) {
4432 ASM_CODE_COMMENT(this);
4433 DCHECK_GE(bytes, 0);
4434 while (bytes >= kStackPageSize) {
4435 subq(rsp, Immediate(kStackPageSize));
4436 movb(Operand(rsp, 0), Immediate(0));
4437 bytes -= kStackPageSize;
4438 }
4439 if (bytes == 0) return;
4440 subq(rsp, Immediate(bytes));
4441}
4442#endif
4443
4444void MacroAssembler::EnterExitFrame(int extra_slots,
4445 StackFrame::Type frame_type,
4446 Register c_function) {
4447 ASM_CODE_COMMENT(this);
4448 DCHECK(frame_type == StackFrame::EXIT ||
4449 frame_type == StackFrame::BUILTIN_EXIT ||
4450 frame_type == StackFrame::API_ACCESSOR_EXIT ||
4451 frame_type == StackFrame::API_CALLBACK_EXIT);
4452
4453 // Set up the frame structure on the stack.
4454 // All constants are relative to the frame pointer of the exit frame.
4459 pushq(rbp);
4460 movq(rbp, rsp);
4461
4462 Push(Immediate(StackFrame::TypeToMarker(frame_type)));
4464 Push(Immediate(0)); // Saved entry sp, patched below.
4465
4466 DCHECK(!AreAliased(rbp, kContextRegister, c_function));
4467 using ER = ExternalReference;
4468 Store(ER::Create(IsolateAddressId::kCEntryFPAddress, isolate()), rbp);
4469 Store(ER::Create(IsolateAddressId::kContextAddress, isolate()),
4471 Store(ER::Create(IsolateAddressId::kCFunctionAddress, isolate()), c_function);
4472
4473#ifdef V8_TARGET_OS_WIN
4474 // Note this is only correct under the assumption that the caller hasn't
4475 // considered home stack slots already.
4476 // TODO(jgruber): This is a bit hacky since the caller in most cases still
4477 // needs to know about the home stack slots in order to address reserved
4478 // slots. Consider moving this fully into caller code.
4479 extra_slots += kWindowsHomeStackSlots;
4480#endif
4482
4484
4485 // Patch the saved entry sp.
4486 movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
4487}
4488
4490 ASM_CODE_COMMENT(this);
4491
4492 leave();
4493
4494 // Restore the current context from top and clear it in debug mode.
4495 ExternalReference context_address =
4496 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
4497 Operand context_operand = ExternalReferenceAsOperand(context_address);
4498 movq(rsi, context_operand);
4499#ifdef DEBUG
4500 Move(context_operand, Context::kInvalidContext);
4501#endif
4502
4503 // Clear the top frame.
4504 ExternalReference c_entry_fp_address =
4505 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
4506 Operand c_entry_fp_operand = ExternalReferenceAsOperand(c_entry_fp_address);
4507 Move(c_entry_fp_operand, 0);
4508}
4509
4510void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
4511 ASM_CODE_COMMENT(this);
4512 // Load native context.
4513 LoadMap(dst, rsi);
4515 dst,
4516 FieldOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
4517 // Load value from native context.
4518 LoadTaggedField(dst, Operand(dst, Context::SlotOffset(index)));
4519}
4520
4521void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
4522 CodeKind min_opt_level,
4523 Register feedback_vector,
4524 FeedbackSlot slot,
4525 Label* on_result,
4526 Label::Distance distance) {
4527 ASM_CODE_COMMENT(this);
4528 Label fallthrough, on_mark_deopt;
4530 scratch_and_result,
4531 FieldOperand(feedback_vector,
4532 FeedbackVector::OffsetOfElementAt(slot.ToInt())));
4533 LoadWeakValue(scratch_and_result, &fallthrough);
4534
4535 // Is it marked_for_deoptimization? If yes, clear the slot.
4536 {
4537 // The entry references a CodeWrapper object. Unwrap it now.
4539 scratch_and_result,
4540 FieldOperand(scratch_and_result, CodeWrapper::kCodeOffset),
4542
4543 TestCodeIsMarkedForDeoptimization(scratch_and_result);
4544
4545 if (min_opt_level == CodeKind::TURBOFAN_JS) {
4546 j(not_zero, &on_mark_deopt, Label::Distance::kNear);
4547
4548 TestCodeIsTurbofanned(scratch_and_result);
4549 j(not_zero, on_result, distance);
4550 jmp(&fallthrough);
4551 } else {
4552 DCHECK_EQ(min_opt_level, CodeKind::MAGLEV);
4553 j(equal, on_result, distance);
4554 }
4555
4556 bind(&on_mark_deopt);
4558 FieldOperand(feedback_vector,
4559 FeedbackVector::OffsetOfElementAt(slot.ToInt())),
4560 ClearedValue());
4561 }
4562
4563 bind(&fallthrough);
4564 Move(scratch_and_result, 0);
4565}
4566
4568 DCHECK_GE(num_arguments, 0);
4569#ifdef V8_TARGET_OS_WIN
4570 return std::max(num_arguments, kWindowsHomeStackSlots);
4571#else
4572 return std::max(num_arguments - kRegisterPassedArguments, 0);
4573#endif
4574}
4575
4576void MacroAssembler::PrepareCallCFunction(int num_arguments) {
4577 ASM_CODE_COMMENT(this);
4578 int frame_alignment = base::OS::ActivationFrameAlignment();
4579 DCHECK_NE(frame_alignment, 0);
4580 DCHECK_GE(num_arguments, 0);
4581
4582 // Make stack end at alignment and allocate space for arguments and old rsp.
4583 movq(kScratchRegister, rsp);
4584 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
4585 int argument_slots_on_stack =
4587 AllocateStackSpace((argument_slots_on_stack + 1) * kSystemPointerSize);
4588 andq(rsp, Immediate(-frame_alignment));
4589 movq(Operand(rsp, argument_slots_on_stack * kSystemPointerSize),
4591}
4592
4593int MacroAssembler::CallCFunction(ExternalReference function, int num_arguments,
4594 SetIsolateDataSlots set_isolate_data_slots,
4595 Label* return_location) {
4596 // Note: The "CallCFunction" code comment will be generated by the other
4597 // CallCFunction method called below.
4598 LoadAddress(rax, function);
4599 return CallCFunction(rax, num_arguments, set_isolate_data_slots,
4600 return_location);
4601}
4602
4603int MacroAssembler::CallCFunction(Register function, int num_arguments,
4604 SetIsolateDataSlots set_isolate_data_slots,
4605 Label* return_location) {
4606 ASM_CODE_COMMENT(this);
4607 DCHECK_LE(num_arguments, kMaxCParameters);
4608 DCHECK(has_frame());
4609 // Check stack alignment.
4610 if (v8_flags.debug_code) {
4612 }
4613
4614 // Save the frame pointer and PC so that the stack layout remains iterable,
4615 // even without an ExitFrame which normally exists between JS and C frames.
4616 Label get_pc;
4617
4618 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
4619 DCHECK(!AreAliased(kScratchRegister, function));
4620 leaq(kScratchRegister, Operand(&get_pc, 0));
4621
4623 movq(ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerPC),
4625 movq(ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerFP), rbp);
4626 }
4627
4628 call(function);
4629 int call_pc_offset = pc_offset();
4630 bind(&get_pc);
4631 if (return_location) bind(return_location);
4632
4633 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
4634 // We don't unset the PC; the FP is the source of truth.
4635 movq(ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerFP),
4636 Immediate(0));
4637 }
4638
4640 DCHECK_GE(num_arguments, 0);
4641 int argument_slots_on_stack =
4643 movq(rsp, Operand(rsp, argument_slots_on_stack * kSystemPointerSize));
4644
4645 return call_pc_offset;
4646}
4647
4649 Register header) {
4650 constexpr intptr_t alignment_mask =
4652 if (header == object) {
4653 andq(header, Immediate(~alignment_mask));
4654 } else {
4655 movq(header, Immediate(~alignment_mask));
4656 andq(header, object);
4657 }
4658}
4659
4660void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask,
4661 Condition cc, Label* condition_met,
4662 Label::Distance condition_met_distance) {
4663 ASM_CODE_COMMENT(this);
4664 DCHECK(cc == zero || cc == not_zero);
4665 MemoryChunkHeaderFromObject(object, scratch);
4666 if (mask < (1 << kBitsPerByte)) {
4667 testb(Operand(scratch, MemoryChunk::FlagsOffset()),
4668 Immediate(static_cast<uint8_t>(mask)));
4669 } else {
4670 testl(Operand(scratch, MemoryChunk::FlagsOffset()), Immediate(mask));
4671 }
4672 j(cc, condition_met, condition_met_distance);
4673}
4674
4675void MacroAssembler::JumpIfMarking(Label* is_marking,
4676 Label::Distance condition_met_distance) {
4677 testb(Operand(kRootRegister, IsolateData::is_marking_flag_offset()),
4678 Immediate(static_cast<uint8_t>(1)));
4679 j(not_zero, is_marking, condition_met_distance);
4680}
4681
4682void MacroAssembler::JumpIfNotMarking(Label* not_marking,
4683 Label::Distance condition_met_distance) {
4684 testb(Operand(kRootRegister, IsolateData::is_marking_flag_offset()),
4685 Immediate(static_cast<uint8_t>(1)));
4686 j(zero, not_marking, condition_met_distance);
4687}
4688
4689void MacroAssembler::CheckMarkBit(Register object, Register scratch0,
4690 Register scratch1, Condition cc,
4691 Label* condition_met,
4692 Label::Distance condition_met_distance) {
4693 ASM_CODE_COMMENT(this);
4694 DCHECK(cc == carry || cc == not_carry);
4695 DCHECK(!AreAliased(object, scratch0, scratch1));
4696
4697 // Computing cell.
4698 MemoryChunkHeaderFromObject(object, scratch0);
4699#ifdef V8_ENABLE_SANDBOX
4700 movl(scratch0, Operand(scratch0, MemoryChunk::MetadataIndexOffset()));
4701 andl(scratch0,
4702 Immediate(MemoryChunkConstants::kMetadataPointerTableSizeMask));
4703 shll(scratch0, Immediate(kSystemPointerSizeLog2));
4704 LoadAddress(scratch1,
4705 ExternalReference::memory_chunk_metadata_table_address());
4706 movq(scratch0, Operand(scratch1, scratch0, times_1, 0));
4707#else // !V8_ENABLE_SANDBOX
4708 movq(scratch0, Operand(scratch0, MemoryChunk::MetadataOffset()));
4709#endif // !V8_ENABLE_SANDBOX
4710 if (v8_flags.slow_debug_code) {
4711 Push(object);
4712 movq(scratch1, Operand(scratch0, MemoryChunkMetadata::AreaStartOffset()));
4713 MemoryChunkHeaderFromObject(scratch1, scratch1);
4714 MemoryChunkHeaderFromObject(object, object);
4715 cmpq(object, scratch1);
4716 Check(equal, AbortReason::kMetadataAreaStartDoesNotMatch);
4717 Pop(object);
4718 }
4719 addq(scratch0, Immediate(MutablePageMetadata::MarkingBitmapOffset()));
4720
4721 movq(scratch1, object);
4722 andq(scratch1, Immediate(MemoryChunk::GetAlignmentMaskForAssembler()));
4723 // It's important not to fold the next two shifts.
4724 shrq(scratch1, Immediate(kTaggedSizeLog2 + MarkingBitmap::kBitsPerCellLog2));
4725 shlq(scratch1, Immediate(kBitsPerByteLog2));
4726 addq(scratch0, scratch1);
4727
4728 // Computing mask.
4729 movq(scratch1, object);
4730 andq(scratch1, Immediate(MemoryChunk::GetAlignmentMaskForAssembler()));
4731 shrq(scratch1, Immediate(kTaggedSizeLog2));
4732 andq(scratch1, Immediate(MarkingBitmap::kBitIndexMask));
4733 btq(Operand(scratch0, 0), scratch1);
4734
4735 j(cc, condition_met, condition_met_distance);
4736}
4737
4738void MacroAssembler::ComputeCodeStartAddress(Register dst) {
4739 Label current;
4740 bind(&current);
4741 int pc = pc_offset();
4742 // Load effective address to get the address of the current instruction.
4743 leaq(dst, Operand(&current, -pc));
4744}
4745
4746// Check if the code object is marked for deoptimization. If it is, then it
4747// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
4748// to:
4749// 1. read from memory the word that contains that bit, which can be found in
4750// the flags in the referenced {Code} object;
4751// 2. test kMarkedForDeoptimizationBit in those flags; and
4752// 3. if it is not zero then it jumps to the builtin.
4753//
4754// Note: With leaptiering we simply assert the code is not deoptimized.
4755void MacroAssembler::BailoutIfDeoptimized(Register scratch) {
4756 int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
4757 if (v8_flags.debug_code || !V8_ENABLE_LEAPTIERING_BOOL) {
4759 scratch, Operand(kJavaScriptCallCodeStartRegister, offset));
4761 }
4762#ifdef V8_ENABLE_LEAPTIERING
4763 if (v8_flags.debug_code) {
4764 Assert(zero, AbortReason::kInvalidDeoptimizedCode);
4765 }
4766#else
4767 TailCallBuiltin(Builtin::kCompileLazyDeoptimizedCode, not_zero);
4768#endif
4769}
4770
4771void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
4772 DeoptimizeKind kind, Label* ret,
4773 Label*) {
4774 ASM_CODE_COMMENT(this);
4775 // Note: Assembler::call is used here on purpose to guarantee fixed-size
4776 // exits even on Atom CPUs; see MacroAssembler::Call for Atom-specific
4777 // performance tuning which emits a different instruction sequence.
4782}
4783
4784void MacroAssembler::Trap() { int3(); }
4785void MacroAssembler::DebugBreak() { int3(); }
4786
4787// Calls an API function. Allocates HandleScope, extracts returned value
4788// from handle and propagates exceptions. Clobbers C argument registers
4789// and C caller-saved registers. Restores context. On return removes
4790// (*argc_operand + slots_to_drop_on_return) * kSystemPointerSize
4791// (GCed, includes the call JS arguments space and the additional space
4792// allocated for the fast call).
4793void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling,
4794 Register function_address,
4795 ExternalReference thunk_ref, Register thunk_arg,
4796 int slots_to_drop_on_return,
4797 MemOperand* argc_operand,
4798 MemOperand return_value_operand) {
4799 ASM_CODE_COMMENT(masm);
4800 Label propagate_exception;
4801 Label delete_allocated_handles;
4802 Label leave_exit_frame;
4803
4804 using ER = ExternalReference;
4805
4806 Isolate* isolate = masm->isolate();
4808 ER::handle_scope_next_address(isolate), no_reg);
4810 ER::handle_scope_limit_address(isolate), no_reg);
4812 ER::handle_scope_level_address(isolate), no_reg);
4813
4814 Register return_value = rax;
4815 Register scratch = kCArgRegs[3];
4816
4817 // Allocate HandleScope in callee-saved registers.
4818 // We will need to restore the HandleScope after the call to the API function,
4819 // by allocating it in callee-saved registers it'll be preserved by C code.
4820 Register prev_next_address_reg = r12;
4821 Register prev_limit_reg = r15;
4822
4823 // C arguments (kCArgRegs[0/1]) are expected to be initialized outside, so
4824 // this function must not corrupt them. kScratchRegister might be used
4825 // implicitly by the macro assembler.
4826 DCHECK(!AreAliased(kCArgRegs[0], kCArgRegs[1], // C args
4827 return_value, scratch, kScratchRegister,
4828 prev_next_address_reg, prev_limit_reg));
4829 // function_address and thunk_arg might overlap but this function must not
4830 // corrupted them until the call is made (i.e. overlap with return_value is
4831 // fine).
4832 DCHECK(!AreAliased(function_address, // incoming parameters
4833 scratch, kScratchRegister, prev_next_address_reg,
4834 prev_limit_reg));
4835 DCHECK(!AreAliased(thunk_arg, // incoming parameters
4836 scratch, kScratchRegister, prev_next_address_reg,
4837 prev_limit_reg));
4838 {
4840 "Allocate HandleScope in callee-save registers.");
4841 __ movq(prev_next_address_reg, next_mem_op);
4842 __ movq(prev_limit_reg, limit_mem_op);
4843 __ addl(level_mem_op, Immediate(1));
4844 }
4845
4846 Label profiler_or_side_effects_check_enabled, done_api_call;
4847 if (with_profiling) {
4848 __ RecordComment("Check if profiler or side effects check is enabled");
4849 __ cmpb(__ ExternalReferenceAsOperand(IsolateFieldId::kExecutionMode),
4850 Immediate(0));
4851 __ j(not_zero, &profiler_or_side_effects_check_enabled);
4852#ifdef V8_RUNTIME_CALL_STATS
4853 __ RecordComment("Check if RCS is enabled");
4854 __ Move(scratch, ER::address_of_runtime_stats_flag());
4855 __ cmpl(Operand(scratch, 0), Immediate(0));
4856 __ j(not_zero, &profiler_or_side_effects_check_enabled);
4857#endif // V8_RUNTIME_CALL_STATS
4858 }
4859
4860 __ RecordComment("Call the api function directly.");
4861 __ call(function_address);
4862 __ bind(&done_api_call);
4863
4864 __ RecordComment("Load the value from ReturnValue");
4865 __ movq(return_value, return_value_operand);
4866
4867 {
4869 masm,
4870 "No more valid handles (the result handle was the last one)."
4871 "Restore previous handle scope.");
4872 __ subl(level_mem_op, Immediate(1));
4873 __ Assert(above_equal, AbortReason::kInvalidHandleScopeLevel);
4874 __ movq(next_mem_op, prev_next_address_reg);
4875 __ cmpq(prev_limit_reg, limit_mem_op);
4876 __ j(not_equal, &delete_allocated_handles);
4877 }
4878
4879 __ RecordComment("Leave the API exit frame.");
4880 __ bind(&leave_exit_frame);
4881
4882 Register argc_reg = prev_limit_reg;
4883 if (argc_operand != nullptr) {
4884 __ movq(argc_reg, *argc_operand);
4885 }
4887
4888 {
4890 "Check if the function scheduled an exception.");
4892 __ ExternalReferenceAsOperand(ER::exception_address(isolate), no_reg),
4893 RootIndex::kTheHoleValue);
4894 __ j(not_equal, &propagate_exception);
4895 }
4896
4897 __ AssertJSAny(return_value, scratch,
4898 AbortReason::kAPICallReturnedInvalidObject);
4899
4900 if (argc_operand == nullptr) {
4901 DCHECK_NE(slots_to_drop_on_return, 0);
4902 __ ret(slots_to_drop_on_return * kSystemPointerSize);
4903 } else {
4904 __ PopReturnAddressTo(scratch);
4905 // {argc_operand} was loaded into {argc_reg} above.
4906 __ leaq(rsp, Operand(rsp, argc_reg, times_system_pointer_size,
4907 slots_to_drop_on_return * kSystemPointerSize));
4908 // Push and ret (instead of jmp) to keep the RSB and the CET shadow stack
4909 // balanced.
4910 __ PushReturnAddressFrom(scratch);
4911 __ ret(0);
4912 }
4913 if (with_profiling) {
4914 ASM_CODE_COMMENT_STRING(masm, "Call the api function via thunk wrapper.");
4915 // Call the api function via thunk wrapper.
4916 __ bind(&profiler_or_side_effects_check_enabled);
4917 // Additional parameter is the address of the actual callback function.
4918 if (thunk_arg.is_valid()) {
4919 MemOperand thunk_arg_mem_op = __ ExternalReferenceAsOperand(
4920 IsolateFieldId::kApiCallbackThunkArgument);
4921 __ movq(thunk_arg_mem_op, thunk_arg);
4922 }
4923 __ Call(thunk_ref);
4924 __ jmp(&done_api_call);
4925 }
4926 __ RecordComment("An exception was thrown. Propagate it.");
4927 __ bind(&propagate_exception);
4928 __ TailCallRuntime(Runtime::kPropagateException);
4929 {
4931 masm, "HandleScope limit has changed. Delete allocated extensions.");
4932 __ bind(&delete_allocated_handles);
4933 __ movq(limit_mem_op, prev_limit_reg);
4934 // Save the return value in a callee-save register.
4935 Register saved_result = prev_limit_reg;
4936 __ movq(saved_result, return_value);
4937 __ LoadAddress(kCArgRegs[0], ER::isolate_address());
4938 __ Call(ER::delete_handle_scope_extensions());
4939 __ movq(return_value, saved_result);
4940 __ jmp(&leave_exit_frame);
4941 }
4942}
4943
4944} // namespace internal
4945} // namespace v8
4946
4947#undef __
4948
4949#endif // V8_TARGET_ARCH_X64
constexpr int kRegularPageSize
constexpr int kReturnAddressStackSlotCount
#define Assert(condition)
int16_t parameter_count
Definition builtins.cc:67
Builtins::Kind kind
Definition builtins.cc:40
static int ActivationFrameAlignment()
EmbeddedObjectIndex AddEmbeddedObject(IndirectHandle< HeapObject > object)
Definition assembler.cc:285
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
const AssemblerOptions & options() const
Definition assembler.h:339
void cmpeqsd(XMMRegister dst, XMMRegister src)
void load_rax(Address value, RelocInfo::Mode rmode)
void near_j(Condition cc, intptr_t disp, RelocInfo::Mode rmode)
void cvtqsi2sd(XMMRegister dst, Operand src)
void shll(const VRegister &vd, const VRegister &vn, int shift)
void vmovd(XMMRegister dst, Register src)
void vmovq(XMMRegister dst, Register src)
void vpinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8)
void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Register src2)
void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void pd(uint8_t op, XMMRegister dst, Operand src)
void popcntl(Register dst, Register src)
void vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8)
void testw(Register reg, Operand op)
void store_rax(Address dst, RelocInfo::Mode mode)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
void vcvttss2siq(Register dst, XMMRegister src)
void vpsrlq(XMMRegister dst, XMMRegister src, uint8_t imm8)
void vbroadcastss(XMMRegister dst, XMMRegister src)
void bsfq(Register dst, Register src)
void pinsrq(XMMRegister dst, Register src, uint8_t imm8)
void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Register src2)
void cmpeqss(XMMRegister dst, XMMRegister src)
void vcvttsd2si(Register dst, XMMRegister src)
void ps(uint8_t op, XMMRegister dst, Operand src)
void cmpw(Operand dst, Immediate src)
void btq(Operand dst, Register src)
void bsrq(Register dst, Register src)
void tzcntl(Register dst, Register src)
void cvttsd2si(Register dst, Operand src)
void vcvttss2si(Register dst, XMMRegister src)
void setcc(Condition cc, Register reg)
void pushq(Immediate value)
void movb(Register dst, Operand src)
void vcmpeqsd(XMMRegister dst, XMMRegister src)
void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2)
void shift(Operand dst, Immediate shift_amount, int subcode, int size)
void vpsllq(XMMRegister dst, XMMRegister src, uint8_t imm8)
void pushq_imm32(int32_t imm32)
void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Register src2)
void cvtss2sd(XMMRegister dst, Operand src)
void cvtlsi2ss(XMMRegister dst, Operand src)
void movq_heap_number(Register dst, double value)
void cmpb(Register reg, Immediate imm8)
void vpdpbssd(XMMRegister dst, XMMRegister src1, XMMRegister src2)
void tzcntq(Register dst, Register src)
void lzcntl(Register dst, Register src)
void pextrq(Register dst, XMMRegister src, int8_t imm8)
void vcvtph2ps(XMMRegister dst, XMMRegister src)
void vpblendw(XMMRegister dst, XMMRegister src1, XMMRegister src2, uint8_t mask)
void cvttss2siq(Register dst, XMMRegister src)
void lzcntq(Register dst, Register src)
void bsrl(Register dst, Register src)
void popq(Register dst)
void ext(const VRegister &vd, const VRegister &vn, const VRegister &vm, int index)
void incsspq(Register number_of_words)
void cvttsd2siq(Register dst, XMMRegister src)
void vpsrlw(XMMRegister dst, XMMRegister src, uint8_t imm8)
void vcvttsd2siq(Register dst, XMMRegister src)
void cvtqsi2ss(XMMRegister dst, Operand src)
void vcmpeqss(XMMRegister dst, XMMRegister src)
void near_jmp(intptr_t disp, RelocInfo::Mode rmode)
void vpextrq(Register dst, XMMRegister src, int8_t imm8)
void popcntq(Register dst, Register src)
void movsxlq(Register dst, Register src)
void movl(Operand dst, Label *src)
void testb(Register reg, Operand op)
void vbroadcastsd(YMMRegister dst, XMMRegister src)
void vpermq(YMMRegister dst, Operand src, uint8_t imm8)
void vcvtps2ph(XMMRegister dst, XMMRegister src, uint8_t imm8)
void bsfl(Register dst, Register src)
void cvttss2si(Register dst, Operand src)
int SizeOfCodeGeneratedSince(Label *label)
Instruction * pc() const
void vcvttps2dq(XMMRegister dst, XMMRegister src)
void cvtlsi2sd(XMMRegister dst, Operand src)
void movq(XMMRegister dst, Operand src)
void vpdpbusd(XMMRegister dst, XMMRegister src1, XMMRegister src2)
static constexpr Builtin RecordWrite(SaveFPRegsMode fp_mode)
static bool IsIsolateIndependentBuiltin(Tagged< Code > code)
Definition builtins.cc:372
V8_EXPORT_PRIVATE Handle< Code > code_handle(Builtin builtin)
Definition builtins.cc:154
static constexpr Builtin RuntimeCEntry(int result_size, bool switch_to_central_stack=false)
static constexpr Builtin EphemeronKeyBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin IndirectPointerBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin CEntry(int result_size, ArgvMode argv_mode, bool builtin_exit_frame=false, bool switch_to_central_stack=false)
static const int kIsTurbofannedBit
Definition code.h:458
static const int kMarkedForDeoptimizationBit
Definition code.h:456
static constexpr int kContextOrFrameTypeOffset
static const int kInvalidContext
Definition contexts.h:578
static V8_INLINE constexpr int SlotOffset(int index)
Definition contexts.h:516
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
static constexpr int kCallerSPDisplacement
static V8_EXPORT_PRIVATE ExternalReference address_of_code_pointer_table_base_address()
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr uint32_t kFlagsTieringStateIsAnyRequested
static constexpr uint32_t FlagMaskForNeedsProcessingCheckFrom(CodeKind code_kind)
static constexpr int OffsetOfElementAt(int index)
static constexpr int kHeaderSize
static constexpr int kMapOffset
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static const int kExternalPointerTableBasePointerOffset
static constexpr int BuiltinEntrySlotOffset(Builtin id)
static constexpr int real_jslimit_offset()
static IsolateGroup * current()
Builtins * builtins()
Definition isolate.h:1443
Address BuiltinEntry(Builtin builtin)
static bool IsAddressableThroughRootRegister(Isolate *isolate, const ExternalReference &reference)
static constexpr bool CanBeImmediate(RootIndex index)
V8_INLINE std::string CommentForOffHeapTrampoline(const char *prefix, Builtin builtin)
static int32_t RootRegisterOffsetForExternalReferenceTableEntry(Isolate *isolate, const ExternalReference &reference)
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index)
Tagged_t ReadOnlyRootPtr(RootIndex index)
void IndirectLoadConstant(Register destination, Handle< HeapObject > object)
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
void IndirectLoadExternalReference(Register destination, ExternalReference reference)
void PextrdPreSse41(Register dst, XMMRegister src, uint8_t imm8)
void Cvttsd2si(Register dst, XMMRegister src)
void EncodeSandboxedPointer(Register value)
void Cvttss2si(Register dst, XMMRegister src)
void Cvtqui2sd(XMMRegister dst, Register src)
void PushAll(RegList registers)
void S256Not(YMMRegister dst, YMMRegister src, YMMRegister scratch)
void Cvtlui2sd(XMMRegister dst, Register src)
static int ArgumentStackSlotsForCFunctionCall(int num_arguments)
void Call(Register target, Condition cond=al)
void CallJSFunction(Register function_object, uint16_t argument_count)
void Cvttss2siq(Register dst, XMMRegister src)
void CallDebugOnFunctionCall(Register fun, Register new_target, Register expected_parameter_count, Register actual_parameter_count)
void Cvttsd2siq(Register dst, XMMRegister src)
void Cvttsd2uiq(Register dst, Operand src, Label *fail=nullptr)
void Cvttss2uiq(Register dst, Operand src, Label *fail=nullptr)
void F32x8Max(YMMRegister dst, YMMRegister lhs, YMMRegister rhs, YMMRegister scratch)
void LoadAddress(Register destination, ExternalReference source)
void PushAddress(ExternalReference source)
void TestCodeIsMarkedForDeoptimization(Register code, Register scratch)
void Cmp(const Register &rn, int imm)
void AssertMap(Register object) NOOP_UNLESS_DEBUG_CODE
void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void Popcntq(Register dst, Register src)
void DecompressTaggedSigned(const Register &destination, const MemOperand &field_operand)
void Pextrq(Register dst, XMMRegister src, int8_t imm8)
void Drop(int count, Condition cond=al)
void F32x8Qfms(YMMRegister dst, YMMRegister src1, YMMRegister src2, YMMRegister src3, YMMRegister tmp)
void ClobberDecompressedSmiBits(Register smi)
void IsObjectType(Register heap_object, Register scratch1, Register scratch2, InstanceType type)
void CmpInstanceTypeRange(Register map, Register instance_type_out, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void SmiUntag(Register reg, SBit s=LeaveCC)
void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void SmiUntagUnsigned(Register reg)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void LoadExternalPointerField(Register destination, MemOperand field_operand, ExternalPointerTagRange tag_range, Register isolate_root=Register::no_reg())
void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
void near_call(int offset, RelocInfo::Mode rmode)
void I16x16ExtMul(YMMRegister dst, XMMRegister src1, XMMRegister src2, YMMRegister scratch, bool is_signed)
void Cmpeqss(XMMRegister dst, XMMRegister src)
void CompareRoot(Register obj, RootIndex index)
void Store(ExternalReference destination, Register source)
void Move(Register dst, Tagged< Smi > smi)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void Cvtqsi2sd(XMMRegister dst, Register src)
void StackOverflowCheck(Register num_args, Register scratch, Label *stack_overflow)
void F16x8Max(YMMRegister dst, XMMRegister lhs, XMMRegister rhs, YMMRegister scratch, YMMRegister scratch2)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallBuiltinByIndex(Register builtin_index, Register target)
void LoadTrustedPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void F64x4Max(YMMRegister dst, YMMRegister lhs, YMMRegister rhs, YMMRegister scratch)
void I32x8DotI8x32I7x32AddS(YMMRegister dst, YMMRegister src1, YMMRegister src2, YMMRegister src3, YMMRegister scratch, YMMRegister splat_reg)
void LoadRootRelative(Register destination, int32_t offset) final
void JumpIfSmi(Register value, Label *smi_label)
void CallCodeObject(Register code_object)
void LoadSandboxedPointerField(Register destination, MemOperand field_operand)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void LoadRootRegisterOffset(Register destination, intptr_t offset) final
void PopQuad(Operand dst)
void Cvtlsi2ss(XMMRegister dst, Register src)
void Cvtqui2ss(XMMRegister dst, Register src)
void Cvtph2pd(XMMRegister dst, XMMRegister src)
void Cvtqsi2ss(XMMRegister dst, Register src)
void LoadCodeInstructionStart(Register destination, Register code_object, CodeEntrypointTag tag=kDefaultCodeEntrypointTag)
void F16x8Qfma(YMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister src3, YMMRegister tmp, YMMRegister tmp2)
void PushTaggedField(Operand field_operand, Register scratch)
void LoadFeedbackVector(Register dst, Register closure, Register scratch, Label *fbv_undef)
void EmitIncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void AssertCode(Register object) NOOP_UNLESS_DEBUG_CODE
void Movq(XMMRegister dst, Register src)
void near_jump(int offset, RelocInfo::Mode rmode)
void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void JumpIfUnsignedLessThan(Register x, int32_t y, Label *dest)
void Load(Register destination, ExternalReference source)
void DecodeSandboxedPointer(Register value)
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg) const
void CompareTaggedRoot(Register with, RootIndex index)
void JumpIfJSAnyIsNotPrimitive(Register heap_object, Register scratch, Label *target, Label::Distance distance=Label::kFar, Condition condition=Condition::kUnsignedGreaterThanEqual)
void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
Operand EntryFromBuiltinIndexAsOperand(Register builtin_index)
void SmiTag(Register reg, SBit s=LeaveCC)
void SbxCheck(Condition cc, AbortReason reason)
void Cvttsd2ui(Register dst, XMMRegister src, XMMRegister tmp)
void PushArray(Register array, Register size, Register scratch, PushArrayOrder order=PushArrayOrder::kNormal)
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
SmiIndex SmiToIndex(Register dst, Register src, int shift)
void Tzcntq(Register dst, Register src)
void RecordWriteField(Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void SmiUntagFieldUnsigned(Register dst, Operand src)
void Cvtsd2ss(XMMRegister dst, XMMRegister src)
void I16x8SConvertF16x8(YMMRegister dst, XMMRegister src, YMMRegister tmp, Register scratch)
MemOperand ExternalReferenceAsOperand(ExternalReference reference, Register scratch)
void CmpObjectType(Register heap_object, InstanceType type, Register map)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void LoadIndirectPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void CallIndirectPointerBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode, IndirectPointerTag tag)
int LeaveFrame(StackFrame::Type type)
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void F64x4Qfma(YMMRegister dst, YMMRegister src1, YMMRegister src2, YMMRegister src3, YMMRegister tmp)
void JumpIfMarking(Label *is_marking, Label::Distance condition_met_distance=Label::kFar)
void JumpToExternalReference(const ExternalReference &builtin, bool builtin_exit_frame=false)
void F32x8Min(YMMRegister dst, YMMRegister lhs, YMMRegister rhs, YMMRegister scratch)
void StoreCodePointerField(Register value, MemOperand dst_field_operand)
void IsObjectTypeInRange(Register heap_object, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void CheckFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
Operand RootAsOperand(RootIndex index)
void Jump(Register target, Condition cond=al)
void DropUnderReturnAddress(int stack_elements, Register scratch=kScratchRegister)
void LoadRoot(Register destination, RootIndex index) final
void RecordWrite(Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void DecompressProtected(const Register &destination, const MemOperand &field_operand)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void F64x4Splat(YMMRegister dst, XMMRegister src)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void Popcntl(Register dst, Register src)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void CompareRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit)
void JumpCodeObject(Register code_object, JumpMode jump_mode=JumpMode::kJump)
void MoveNumber(Register dst, double value)
void Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8, uint32_t *load_pc_offset=nullptr)
void Cvtlsi2sd(XMMRegister dst, Register src)
void EmitDecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void I64x4ExtMul(YMMRegister dst, XMMRegister src1, XMMRegister src2, YMMRegister scratch, bool is_signed)
void LoadFromConstantsTable(Register destination, int constant_index) final
void PushImm32(int32_t imm32)
void LoadProtectedPointerField(Register destination, MemOperand field_operand)
MemOperand EntryFromBuiltinAsOperand(Builtin builtin)
void CmpInstanceType(Register map, InstanceType type)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void Lzcntl(Register dst, Register src)
void LoadTaggedRoot(Register destination, RootIndex index)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void StoreIndirectPointerField(Register value, MemOperand dst_field_operand)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void MovePair(Register dst0, Register src0, Register dst1, Register src1)
void JumpJSFunction(Register function_object, JumpMode jump_mode=JumpMode::kJump)
void JumpIfNotMarking(Label *not_marking, Label::Distance condition_met_distance=Label::kFar)
void I32x8ExtMul(YMMRegister dst, XMMRegister src1, XMMRegister src2, YMMRegister scratch, bool is_signed)
void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
void CheckMarkBit(Register object, Register scratch0, Register scratch1, Condition cc, Label *condition_met, Label::Distance condition_met_distance=Label::kFar)
void LoadCompressedMap(Register dst, Register object)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void LoadWeakValue(Register out, Register in, Label *target_if_cleared)
void I16x8TruncF16x8U(YMMRegister dst, XMMRegister src, YMMRegister tmp)
void CallBuiltin(Builtin builtin, Condition cond=al)
void PushQuad(Operand src)
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id)
void Cvtlui2ss(XMMRegister dst, Register src)
void LoadCodePointerField(Register destination, MemOperand field_operand)
void AssertJSAny(Register object, Register map_tmp, Register tmp, AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE
void CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode)
void I32x8SConvertF32x8(YMMRegister dst, YMMRegister src, YMMRegister tmp, Register scratch)
Operand StackLimitAsOperand(StackLimitKind kind)
void Cvtpd2ph(XMMRegister dst, XMMRegister src, Register tmp)
void Check(Condition cond, AbortReason reason)
void F64x4Qfms(YMMRegister dst, YMMRegister src1, YMMRegister src2, YMMRegister src3, YMMRegister tmp)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void SmiAddConstant(Operand dst, Tagged< Smi > constant)
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void StoreTrustedPointerField(Register value, MemOperand dst_field_operand)
void DropArgumentsAndPushNewReceiver(Register argc, Register receiver)
void AssertSignedBitOfSmiIsZero(Register smi) NOOP_UNLESS_DEBUG_CODE
void StoreSandboxedPointerField(Register value, MemOperand dst_field_operand)
void AllocateStackSpace(Register bytes)
void AssertZeroExtended(Register int32_register)
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure)
void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void PinsrdPreSse41(XMMRegister dst, Register src, uint8_t imm8, uint32_t *load_pc_offset)
void F16x8Qfms(YMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister src3, YMMRegister tmp, YMMRegister tmp2)
void Cvtss2sd(XMMRegister dst, XMMRegister src)
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, Register feedback_vector)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void F32x8Splat(YMMRegister dst, XMMRegister src)
void I64x4Mul(YMMRegister dst, YMMRegister lhs, YMMRegister rhs, YMMRegister tmp1, YMMRegister tmp2)
void CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
Condition CheckFeedbackVectorFlagsNeedsProcessing(Register feedback_vector, CodeKind current_code_kind)
void TryLoadOptimizedOsrCode(Register scratch_and_result, CodeKind min_opt_level, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
void MemoryChunkHeaderFromObject(Register object, Register header)
void F64x4Min(YMMRegister dst, YMMRegister lhs, YMMRegister rhs, YMMRegister scratch)
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void AssertUndefinedOrAllocationSite(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void IncsspqIfSupported(Register number_of_words, Register scratch)
void SmiCompare(Register smi1, Register smi2)
void Lzcntq(Register dst, Register src)
void Cmpeqsd(XMMRegister dst, XMMRegister src)
void F32x8Qfma(YMMRegister dst, YMMRegister src1, YMMRegister src2, YMMRegister src3, YMMRegister tmp)
void SmiUntagField(Register dst, const MemOperand &src)
void PopAll(RegList registers)
void StubPrologue(StackFrame::Type type)
void StoreTaggedSignedField(Operand dst_field_operand, Tagged< Smi > value)
void TestCodeIsTurbofanned(Register code)
void DecompressTagged(const Register &destination, const MemOperand &field_operand)
Condition CheckSmi(Register src)
void StoreRootRelative(int32_t offset, Register value) final
void F16x8Min(YMMRegister dst, XMMRegister lhs, XMMRegister rhs, YMMRegister scratch, YMMRegister scratch2)
void LoadTaggedSignedField(const Register &destination, const MemOperand &field_operand)
void LoadMap(Register destination, Register object)
void AtomicStoreTaggedField(const Register &value, const Register &dst_base, const Register &dst_index, const Register &temp)
void TailCallRuntime(Runtime::FunctionId fid)
void LoadNativeContextSlot(Register dst, int index)
void Tzcntl(Register dst, Register src)
void LoadTaggedFieldWithoutDecompressing(const Register &destination, const MemOperand &field_operand)
void S256Select(YMMRegister dst, YMMRegister mask, YMMRegister src1, YMMRegister src2, YMMRegister scratch)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
Register GetSmiConstant(Tagged< Smi > value)
void Switch(Register scratch, Register value, int case_value_base, Label **labels, int num_labels)
void AssertSmiOrHeapObjectInMainCompressionCage(Register object) NOOP_UNLESS_DEBUG_CODE
void Cvttss2ui(Register dst, XMMRegister src, XMMRegister tmp)
void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void I32x8TruncF32x8U(YMMRegister dst, YMMRegister src, YMMRegister scratch1, YMMRegister scratch2)
static constexpr uint32_t kBitsPerCellLog2
Definition marking.h:99
static constexpr uint32_t kBitIndexMask
Definition marking.h:101
static constexpr intptr_t AreaStartOffset()
static constexpr MainThreadFlags kIsInReadOnlyHeapMask
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr intptr_t FlagsOffset()
static constexpr intptr_t MetadataOffset()
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static constexpr intptr_t GetAlignmentMaskForAssembler()
static constexpr intptr_t MarkingBitmapOffset()
constexpr unsigned Count() const
static constexpr bool IsCompressedEmbeddedObject(Mode mode)
Definition reloc-info.h:206
static constexpr bool IsCodeTarget(Mode mode)
Definition reloc-info.h:196
static constexpr bool IsFullEmbeddedObject(Mode mode)
Definition reloc-info.h:203
static constexpr bool IsImmortalImmovable(RootIndex root_index)
Definition roots.h:616
static V8_EXPORT_PRIVATE const Function * FunctionForId(FunctionId id)
Definition runtime.cc:350
void PinsrHelper(Assembler *assm, AvxFn< Op > avx, NoAvxFn< Op > noavx, XMMRegister dst, XMMRegister src1, Op src2, uint8_t imm8, uint32_t *load_pc_offset=nullptr, std::optional< CpuFeature > feature=std::nullopt)
static SlotDescriptor ForCodePointerSlot()
Definition assembler.h:311
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
Operand GetArgumentOperand(int index) const
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static bool IsJavaScript(Type t)
Definition frames.h:284
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static constexpr Builtin GetRecordWriteBuiltin(SaveFPRegsMode fp_mode)
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define V8_ENABLE_LEAPTIERING_BOOL
Definition globals.h:151
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define DEBUG_BOOL
Definition globals.h:87
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
LineAndColumn current
DirectHandle< Object > new_target
Definition execution.cc:75
int32_t offset
TNode< Object > target
TNode< Object > receiver
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int x
uint32_t const mask
#define QFMA(ps_or_pd)
#define QFMS(ps_or_pd)
#define MACRO_ASM_X64_ISPLAT_LIST(V)
ReadOnlyCheck
SmiCheck
ComparisonMode
ArgumentAdaptionMode
InvokeType
SetIsolateDataSlots
JumpMode
@ kPushAndReturn
RegListBase< RegisterT > registers
InstructionOperand destination
int r
Definition mul-fft.cc:298
int int32_t
Definition unicode.cc:40
constexpr unsigned CountLeadingZeros(T value)
Definition bits.h:100
constexpr unsigned CountTrailingZeros64(uint64_t value)
Definition bits.h:164
constexpr unsigned CountLeadingZeros64(uint64_t value)
Definition bits.h:125
constexpr unsigned CountTrailingZeros(T value)
Definition bits.h:144
constexpr unsigned CountPopulation(T value)
Definition bits.h:26
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
auto Reversed(T &t)
Definition iterator.h:105
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
Definition bounds.h:20
V8_INLINE constexpr std::optional< RootIndex > UniqueMapOfInstanceType(InstanceType type)
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr Register kRootRegister
constexpr int kCodePointerTableEntrySizeLog2
constexpr int kByteSize
Definition globals.h:395
constexpr uint64_t kExternalPointerTagShift
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kInt64Size
Definition globals.h:402
constexpr int kBitsPerByte
Definition globals.h:682
constexpr uint32_t kFP32SubnormalThresholdOfFP16
Definition conversions.h:66
bool DoubleToSmiInteger(double value, int *smi_int_value)
static V8_INLINE constexpr bool IsSharedExternalPointerType(ExternalPointerTagRange tag_range)
constexpr DoubleRegister kScratchDoubleReg
constexpr uint64_t kExternalPointerPayloadMask
const int kSmiTagSize
Definition v8-internal.h:87
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr ExternalPointerTagRange kAnyExternalPointerTagRange(kFirstExternalPointerTag, kLastExternalPointerTag)
constexpr bool CodeKindCanTierUp(CodeKind kind)
Definition code-kind.h:95
constexpr Register kJavaScriptCallTargetRegister
constexpr int kPCOnStackSize
Definition globals.h:412
constexpr uint32_t kFP32MinFP16ZeroRepresentable
Definition conversions.h:64
constexpr int kCodePointerTableEntryCodeObjectOffset
constexpr int kTrustedPointerTableEntrySize
Operand FieldOperand(Register object, int offset)
const Address kWeakHeapObjectMask
Definition globals.h:967
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
constexpr Register kJavaScriptCallArgCountRegister
Address Tagged_t
Definition globals.h:547
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
base::StrongAlias< JSDispatchHandleAliasTag, uint32_t > JSDispatchHandle
Definition globals.h:557
TagRange< ExternalPointerTag > ExternalPointerTagRange
static const int kRegisterPassedArguments
constexpr int kFPOnStackSize
Definition globals.h:413
constexpr int L
constexpr int kSystemPointerSize
Definition globals.h:410
const RegList kCallerSaved
Definition reglist-arm.h:42
const char * GetAbortReason(AbortReason reason)
constexpr int kTaggedSizeLog2
Definition globals.h:543
static constexpr int kMaxCParameters
constexpr uint32_t kZapValue
Definition globals.h:1005
constexpr bool SmiValuesAre31Bits()
constexpr Register kScratchRegister
Condition NegateCondition(Condition cond)
constexpr int kInt32Size
Definition globals.h:401
@ LAST_CALLABLE_JS_FUNCTION_TYPE
@ FIRST_CALLABLE_JS_FUNCTION_TYPE
constexpr Register kWasmImplicitArgRegister
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
constexpr uint32_t kTrustedPointerHandleShift
constexpr uint32_t kFP32MaxFP16Representable
Definition conversions.h:65
constexpr uint32_t kCodePointerHandleShift
const int kHeapObjectTag
Definition v8-internal.h:72
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
const int kSmiShiftSize
const int kSmiValueSize
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr uint32_t kFP32WithoutSignMask
Definition conversions.h:63
constexpr bool SmiValuesAre32Bits()
constexpr Register kJavaScriptCallCodeStartRegister
constexpr int kJSDispatchTableEntrySizeLog2
Definition globals.h:562
constexpr Register r11
constexpr Register kPtrComprCageBaseRegister
const intptr_t kSmiTagMask
Definition v8-internal.h:88
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
const int kSmiTag
Definition v8-internal.h:86
constexpr int kMaxInt
Definition globals.h:374
@ times_half_system_pointer_size
constexpr int kBitsPerByteLog2
Definition globals.h:683
constexpr uint64_t kTrustedPointerTableMarkBit
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
Definition globals.h:407
constexpr Register kJavaScriptCallDispatchHandleRegister
static constexpr DoubleRegList kAllocatableDoubleRegisters
Definition reglist.h:43
constexpr uint32_t kCodePointerHandleMarker
const uint32_t kClearedWeakHeapObjectLower32
Definition globals.h:981
constexpr uint32_t kMaxUInt32
Definition globals.h:387
bool is_signed(Condition cond)
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
constexpr uint64_t kExternalPointerShiftedTagMask
static V8_INLINE constexpr bool ExternalPointerCanBeEmpty(ExternalPointerTagRange tag_range)
constexpr bool PointerCompressionIsEnabled()
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
Local< T > Handle
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define FIELD_SIZE(Name)
Definition utils.h:259
#define V8_STATIC_ROOTS_BOOL
Definition v8config.h:1001