v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
builtins-loong64.cc
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_LOONG64
6
12#include "src/debug/debug.h"
17// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
21#include "src/heap/heap-inl.h"
22#include "src/objects/cell.h"
23#include "src/objects/foreign.h"
27#include "src/objects/smi.h"
28#include "src/runtime/runtime.h"
29
30#if V8_ENABLE_WEBASSEMBLY
35#endif // V8_ENABLE_WEBASSEMBLY
36
37namespace v8 {
38namespace internal {
39
40#define __ ACCESS_MASM(masm)
41
42void Builtins::Generate_Adaptor(MacroAssembler* masm,
43 int formal_parameter_count, Address address) {
45 __ TailCallBuiltin(
46 Builtins::AdaptorWithBuiltinExitFrame(formal_parameter_count));
47}
48
49namespace {
50
51enum class ArgumentsElementType {
52 kRaw, // Push arguments as they are.
53 kHandle // Dereference arguments before pushing.
54};
55
56void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
57 Register scratch, Register scratch2,
58 ArgumentsElementType element_type) {
59 DCHECK(!AreAliased(array, argc, scratch));
60 Label loop, entry;
61 __ Sub_d(scratch, argc, Operand(kJSArgcReceiverSlots));
62 __ Branch(&entry);
63 __ bind(&loop);
64 __ Alsl_d(scratch2, scratch, array, kSystemPointerSizeLog2);
65 __ Ld_d(scratch2, MemOperand(scratch2, 0));
66 if (element_type == ArgumentsElementType::kHandle) {
67 __ Ld_d(scratch2, MemOperand(scratch2, 0));
68 }
69 __ Push(scratch2);
70 __ bind(&entry);
71 __ Add_d(scratch, scratch, Operand(-1));
72 __ Branch(&loop, greater_equal, scratch, Operand(zero_reg));
73}
74
75void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
76 // ----------- S t a t e -------------
77 // -- a0 : number of arguments
78 // -- a1 : constructor function
79 // -- a3 : new target
80 // -- cp : context
81 // -- ra : return address
82 // -- sp[...]: constructor arguments
83 // -----------------------------------
84
85 // Enter a construct frame.
86 {
87 FrameScope scope(masm, StackFrame::CONSTRUCT);
88
89 // Preserve the incoming parameters on the stack.
90 __ Push(cp, a0);
91
92 // Set up pointer to first argument (skip receiver).
93 __ Add_d(
94 t2, fp,
96 // Copy arguments and receiver to the expression stack.
97 // t2: Pointer to start of arguments.
98 // a0: Number of arguments.
99 Generate_PushArguments(masm, t2, a0, t3, t0, ArgumentsElementType::kRaw);
100 // The receiver for the builtin/api call.
101 __ PushRoot(RootIndex::kTheHoleValue);
102
103 // Call the function.
104 // a0: number of arguments (untagged)
105 // a1: constructor function
106 // a3: new target
107 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
108
109 // Restore context from the frame.
111 // Restore arguments count from the frame.
113 // Leave construct frame.
114 }
115
116 // Remove caller arguments from the stack and return.
117 __ DropArguments(t3);
118 __ Ret();
119}
120
121} // namespace
122
123// The construct stub for ES5 constructor functions and ES6 class constructors.
124void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
125 // ----------- S t a t e -------------
126 // -- a0: number of arguments (untagged)
127 // -- a1: constructor function
128 // -- a3: new target
129 // -- cp: context
130 // -- ra: return address
131 // -- sp[...]: constructor arguments
132 // -----------------------------------
133
134 // Enter a construct frame.
135 FrameScope scope(masm, StackFrame::MANUAL);
136 Label post_instantiation_deopt_entry, not_create_implicit_receiver;
137 __ EnterFrame(StackFrame::CONSTRUCT);
138
139 // Preserve the incoming parameters on the stack.
140 __ Push(cp, a0, a1);
141 __ PushRoot(RootIndex::kUndefinedValue);
142 __ Push(a3);
143
144 // ----------- S t a t e -------------
145 // -- sp[0*kSystemPointerSize]: new target
146 // -- sp[1*kSystemPointerSize]: padding
147 // -- a1 and sp[2*kSystemPointerSize]: constructor function
148 // -- sp[3*kSystemPointerSize]: number of arguments
149 // -- sp[4*kSystemPointerSize]: context
150 // -----------------------------------
151
152 __ LoadTaggedField(
153 t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
154 __ Ld_wu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
155 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
156 __ JumpIfIsInRange(
157 t2, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
158 static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
159 &not_create_implicit_receiver);
160
161 // If not derived class constructor: Allocate the new receiver object.
162 __ CallBuiltin(Builtin::kFastNewObject);
163 __ Branch(&post_instantiation_deopt_entry);
164
165 // Else: use TheHoleValue as receiver for constructor call
166 __ bind(&not_create_implicit_receiver);
167 __ LoadRoot(a0, RootIndex::kTheHoleValue);
168
169 // ----------- S t a t e -------------
170 // -- a0: receiver
171 // -- Slot 4 / sp[0*kSystemPointerSize]: new target
172 // -- Slot 3 / sp[1*kSystemPointerSize]: padding
173 // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
174 // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments
175 // -- Slot 0 / sp[4*kSystemPointerSize]: context
176 // -----------------------------------
177 // Deoptimizer enters here.
178 masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
179 masm->pc_offset());
180 __ bind(&post_instantiation_deopt_entry);
181
182 // Restore new target.
183 __ Pop(a3);
184
185 // Push the allocated receiver to the stack.
186 __ Push(a0);
187
188 // We need two copies because we may have to return the original one
189 // and the calling conventions dictate that the called function pops the
190 // receiver. The second copy is pushed after the arguments, we saved in a6
191 // since a0 will store the return value of callRuntime.
192 __ mov(a6, a0);
193
194 // Set up pointer to last argument.
195 __ Add_d(
196 t2, fp,
198
199 // ----------- S t a t e -------------
200 // -- r3: new target
201 // -- sp[0*kSystemPointerSize]: implicit receiver
202 // -- sp[1*kSystemPointerSize]: implicit receiver
203 // -- sp[2*kSystemPointerSize]: padding
204 // -- sp[3*kSystemPointerSize]: constructor function
205 // -- sp[4*kSystemPointerSize]: number of arguments
206 // -- sp[5*kSystemPointerSize]: context
207 // -----------------------------------
208
209 // Restore constructor function and argument count.
212
213 Label stack_overflow;
214 __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
215
216 // TODO(victorgomes): When the arguments adaptor is completely removed, we
217 // should get the formal parameter count and copy the arguments in its
218 // correct position (including any undefined), instead of delaying this to
219 // InvokeFunction.
220
221 // Copy arguments and receiver to the expression stack.
222 // t2: Pointer to start of argument.
223 // a0: Number of arguments.
224 Generate_PushArguments(masm, t2, a0, t0, t1, ArgumentsElementType::kRaw);
225 // We need two copies because we may have to return the original one
226 // and the calling conventions dictate that the called function pops the
227 // receiver. The second copy is pushed after the arguments,
228 __ Push(a6);
229
230 // Call the function.
231 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
232
233 // If the result is an object (in the ECMA sense), we should get rid
234 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
235 // on page 74.
236 Label use_receiver, do_throw, leave_and_return, check_receiver;
237
238 // If the result is undefined, we jump out to using the implicit receiver.
239 __ JumpIfNotRoot(a0, RootIndex::kUndefinedValue, &check_receiver);
240
241 // Otherwise we do a smi check and fall through to check if the return value
242 // is a valid receiver.
243
244 // Throw away the result of the constructor invocation and use the
245 // on-stack receiver as the result.
246 __ bind(&use_receiver);
247 __ Ld_d(a0, MemOperand(sp, 0 * kSystemPointerSize));
248 __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw);
249
250 __ bind(&leave_and_return);
251 // Restore arguments count from the frame.
253 // Leave construct frame.
254 __ LeaveFrame(StackFrame::CONSTRUCT);
255
256 // Remove caller arguments from the stack and return.
257 __ DropArguments(a1);
258 __ Ret();
259
260 __ bind(&check_receiver);
261 __ JumpIfSmi(a0, &use_receiver);
262
263 // Check if the type of the result is not an object in the ECMA sense.
264 __ JumpIfJSAnyIsNotPrimitive(a0, t2, &leave_and_return);
265 __ Branch(&use_receiver);
266
267 __ bind(&do_throw);
268 // Restore the context from the frame.
270 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
271 __ break_(0xCC);
272
273 __ bind(&stack_overflow);
274 // Restore the context from the frame.
276 __ CallRuntime(Runtime::kThrowStackOverflow);
277 __ break_(0xCC);
278}
279
280void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
281 Generate_JSBuiltinsConstructStubHelper(masm);
282}
283
284static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
285 Register scratch) {
286 DCHECK(!AreAliased(code, scratch));
287 // Verify that the code kind is baseline code via the CodeKind.
288 __ Ld_d(scratch, FieldMemOperand(code, Code::kFlagsOffset));
289 __ DecodeField<Code::KindField>(scratch);
290 __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
291 Operand(static_cast<int>(CodeKind::BASELINE)));
292}
293
294// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
295// the more general dispatch.
297 MacroAssembler* masm, Register sfi, Register bytecode, Register scratch1,
298 Label* is_baseline, Label* is_unavailable) {
299 DCHECK(!AreAliased(bytecode, scratch1));
300 ASM_CODE_COMMENT(masm);
301 Label done;
302
303 Register data = bytecode;
304 __ LoadTrustedPointerField(
305 data,
306 FieldMemOperand(sfi, SharedFunctionInfo::kTrustedFunctionDataOffset),
308
309 __ GetObjectType(data, scratch1, scratch1);
310
311#ifndef V8_JITLESS
312 if (v8_flags.debug_code) {
313 Label not_baseline;
314 __ Branch(&not_baseline, ne, scratch1, Operand(CODE_TYPE));
315 AssertCodeIsBaseline(masm, data, scratch1);
316 __ Branch(is_baseline);
317 __ bind(&not_baseline);
318 } else {
319 __ Branch(is_baseline, eq, scratch1, Operand(CODE_TYPE));
320 }
321#endif // !V8_JITLESS
322
323 __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
324 __ LoadProtectedPointerField(
325 bytecode, FieldMemOperand(data, InterpreterData::kBytecodeArrayOffset));
326
327 __ bind(&done);
328
329 __ GetObjectType(bytecode, scratch1, scratch1);
330 __ Branch(is_unavailable, ne, scratch1, Operand(BYTECODE_ARRAY_TYPE));
331}
332
333// static
334void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
335 // ----------- S t a t e -------------
336 // -- a0 : the value to pass to the generator
337 // -- a1 : the JSGeneratorObject to resume
338 // -- ra : return address
339 // -----------------------------------
340 // Store input value into generator object.
341 __ StoreTaggedField(
342 a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
343 __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0,
345 // Check that a1 is still valid, RecordWrite might have clobbered it.
346 __ AssertGeneratorObject(a1);
347
348 // Load suspended function and context.
349 __ LoadTaggedField(a5,
350 FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
351 __ LoadTaggedField(cp, FieldMemOperand(a5, JSFunction::kContextOffset));
352
353 // Flood function if we are stepping.
354 Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
355 Label stepping_prepared;
356 ExternalReference debug_hook =
357 ExternalReference::debug_hook_on_function_call_address(masm->isolate());
358 __ li(a6, debug_hook);
359 __ Ld_b(a6, MemOperand(a6, 0));
360 __ Branch(&prepare_step_in_if_stepping, ne, a6, Operand(zero_reg));
361
362 // Flood function if we need to continue stepping in the suspended generator.
363 ExternalReference debug_suspended_generator =
364 ExternalReference::debug_suspended_generator_address(masm->isolate());
365 __ li(a6, debug_suspended_generator);
366 __ Ld_d(a6, MemOperand(a6, 0));
367 __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a6));
368 __ bind(&stepping_prepared);
369
370 // Check the stack for overflow. We are not trying to catch interruptions
371 // (i.e. debug break and preemption) here, so check the "real stack limit".
372 Label stack_overflow;
373 __ LoadStackLimit(kScratchReg,
374 MacroAssembler::StackLimitKind::kRealStackLimit);
375 __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
376
378
379 // Compute actual arguments count value as a formal parameter count without
380 // receiver, loaded from the dispatch table entry or shared function info.
381#if V8_ENABLE_LEAPTIERING
384 Register scratch = t5;
385 __ Ld_w(dispatch_handle,
386 FieldMemOperand(a5, JSFunction::kDispatchHandleOffset));
387 __ LoadEntrypointAndParameterCountFromJSDispatchTable(
388 code, argc, dispatch_handle, scratch);
389
390 // In case the formal parameter count is kDontAdaptArgumentsSentinel the
391 // actual arguments count should be set accordingly.
393 Label is_bigger;
394 __ BranchShort(&is_bigger, kGreaterThan, argc, Operand(JSParameterCount(0)));
395 __ li(argc, Operand(JSParameterCount(0)));
396 __ bind(&is_bigger);
397#else
398 __ LoadTaggedField(
399 argc, FieldMemOperand(a5, JSFunction::kSharedFunctionInfoOffset));
400 __ Ld_hu(argc, FieldMemOperand(
401 argc, SharedFunctionInfo::kFormalParameterCountOffset));
402
403 // Generator functions are always created from user code and thus the
404 // formal parameter count is never equal to kDontAdaptArgumentsSentinel,
405 // which is used only for certain non-generator builtin functions.
406#endif // V8_ENABLE_LEAPTIERING
407
408 // ----------- S t a t e -------------
409 // -- a0 : actual arguments count
410 // -- a1 : the JSGeneratorObject to resume
411 // -- a2 : target code object (leaptiering only)
412 // -- a4 : dispatch handle (leaptiering only)
413 // -- a5 : generator function
414 // -- cp : generator context
415 // -- ra : return address
416 // -----------------------------------
417
418 // Copy the function arguments from the generator object's register file.
419 {
420 Label done_loop, loop;
421 __ Sub_d(a3, argc, Operand(kJSArgcReceiverSlots));
422 __ LoadTaggedField(
423 t1,
424 FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
425 __ bind(&loop);
426 __ Sub_d(a3, a3, Operand(1));
427 __ Branch(&done_loop, lt, a3, Operand(zero_reg));
428 __ Alsl_d(kScratchReg, a3, t1, kTaggedSizeLog2);
429 __ LoadTaggedField(
432 __ Push(kScratchReg);
433 __ Branch(&loop);
434 __ bind(&done_loop);
435 // Push receiver.
436 __ LoadTaggedField(kScratchReg,
437 FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
438 __ Push(kScratchReg);
439 }
440
441 // Underlying function needs to have bytecode available.
442 if (v8_flags.debug_code) {
443 Label ok, is_baseline, is_unavailable;
444 Register sfi = a3;
445 Register bytecode = a3;
446 __ LoadTaggedField(
447 sfi, FieldMemOperand(a5, JSFunction::kSharedFunctionInfoOffset));
448 GetSharedFunctionInfoBytecodeOrBaseline(masm, sfi, bytecode, t5,
449 &is_baseline, &is_unavailable);
450 __ Branch(&ok);
451
452 __ bind(&is_unavailable);
453 __ Abort(AbortReason::kMissingBytecodeArray);
454
455 __ bind(&is_baseline);
456 __ GetObjectType(a3, a3, bytecode);
457 __ Assert(eq, AbortReason::kMissingBytecodeArray, bytecode,
458 Operand(CODE_TYPE));
459 __ bind(&ok);
460 }
461
462 // Resume (Ignition/TurboFan) generator object.
463 {
464 // We abuse new.target both to indicate that this is a resume call and to
465 // pass in the generator object. In ordinary calls, new.target is always
466 // undefined because generator functions are non-constructable.
467 __ Move(a3, a1); // new.target
468 __ Move(a1, a5); // target
469#if V8_ENABLE_LEAPTIERING
470 // Actual arguments count and code start are already initialized above.
471 __ Jump(code);
472#else
473 // Actual arguments count is already initialized above.
474 __ JumpJSFunction(a1);
475#endif // V8_ENABLE_LEAPTIERING
476 }
477
478 __ bind(&prepare_step_in_if_stepping);
479 {
480 FrameScope scope(masm, StackFrame::INTERNAL);
481 __ Push(a1, a5);
482 // Push hole as receiver since we do not use it for stepping.
483 __ PushRoot(RootIndex::kTheHoleValue);
484 __ CallRuntime(Runtime::kDebugOnFunctionCall);
485 __ Pop(a1);
486 }
487 __ LoadTaggedField(a5,
488 FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
489 __ Branch(&stepping_prepared);
490
491 __ bind(&prepare_step_in_suspended_generator);
492 {
493 FrameScope scope(masm, StackFrame::INTERNAL);
494 __ Push(a1);
495 __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
496 __ Pop(a1);
497 }
498 __ LoadTaggedField(a5,
499 FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
500 __ Branch(&stepping_prepared);
501
502 __ bind(&stack_overflow);
503 {
504 FrameScope scope(masm, StackFrame::INTERNAL);
505 __ CallRuntime(Runtime::kThrowStackOverflow);
506 __ break_(0xCC); // This should be unreachable.
507 }
508}
509
510void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
511 FrameScope scope(masm, StackFrame::INTERNAL);
512 __ Push(a1);
513 __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
514}
515
516// Clobbers scratch1 and scratch2; preserves all other registers.
517static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
518 Register scratch1, Register scratch2) {
519 // Check the stack for overflow. We are not trying to catch
520 // interruptions (e.g. debug break and preemption) here, so the "real stack
521 // limit" is checked.
522 Label okay;
523 __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
524 // Make a2 the space we have left. The stack might already be overflowed
525 // here which will cause r2 to become negative.
526 __ sub_d(scratch1, sp, scratch1);
527 // Check if the arguments will overflow the stack.
528 __ slli_d(scratch2, argc, kSystemPointerSizeLog2);
529 __ Branch(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison.
530
531 // Out of stack space.
532 __ CallRuntime(Runtime::kThrowStackOverflow);
533
534 __ bind(&okay);
535}
536
537namespace {
538
539// Called with the native C calling convention. The corresponding function
540// signature is either:
541//
542// using JSEntryFunction = GeneratedCode<Address(
543// Address root_register_value, Address new_target, Address target,
544// Address receiver, intptr_t argc, Address** args)>;
545// or
546// using JSEntryFunction = GeneratedCode<Address(
547// Address root_register_value, MicrotaskQueue* microtask_queue)>;
548void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
549 Builtin entry_trampoline) {
550 Label invoke, handler_entry, exit;
551
552 {
553 NoRootArrayScope no_root_array(masm);
554
555 // Registers:
556 // either
557 // a0: root register value
558 // a1: entry address
559 // a2: function
560 // a3: receiver
561 // a4: argc
562 // a5: argv
563 // or
564 // a0: root register value
565 // a1: microtask_queue
566
567 // Save callee saved registers on the stack.
568 __ MultiPush(kCalleeSaved | ra);
569
570 // Save callee-saved FPU registers.
571 __ MultiPushFPU(kCalleeSavedFPU);
572 // Set up the reserved register for 0.0.
573 __ Move(kDoubleRegZero, 0.0);
574
575 // Initialize the root register.
576 // C calling convention. The first argument is passed in a0.
577 __ mov(kRootRegister, a0);
578
579#ifdef V8_COMPRESS_POINTERS
580 // Initialize the pointer cage base register.
581 __ LoadRootRelative(kPtrComprCageBaseRegister,
582 IsolateData::cage_base_offset());
583#endif
584 }
585
586 // a1: entry address
587 // a2: function
588 // a3: receiver
589 // a4: argc
590 // a5: argv
591
592 // We build an EntryFrame.
593 __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used.
594 __ li(s2, Operand(StackFrame::TypeToMarker(type)));
595 __ li(s3, Operand(StackFrame::TypeToMarker(type)));
596 ExternalReference c_entry_fp = ExternalReference::Create(
597 IsolateAddressId::kCEntryFPAddress, masm->isolate());
598 __ li(s5, c_entry_fp);
599 __ Ld_d(s4, MemOperand(s5, 0));
600 __ Push(s1, s2, s3, s4);
601
602 // Clear c_entry_fp, now we've pushed its previous value to the stack.
603 // If the c_entry_fp is not already zero and we don't clear it, the
604 // StackFrameIteratorForProfiler will assume we are executing C++ and miss the
605 // JS frames on top.
606 __ St_d(zero_reg, MemOperand(s5, 0));
607
608 __ LoadIsolateField(s1, IsolateFieldId::kFastCCallCallerFP);
609 __ Ld_d(s2, MemOperand(s1, 0));
610 __ St_d(zero_reg, MemOperand(s1, 0));
611 __ LoadIsolateField(s1, IsolateFieldId::kFastCCallCallerPC);
612 __ Ld_d(s3, MemOperand(s1, 0));
613 __ St_d(zero_reg, MemOperand(s1, 0));
614 __ Push(s2, s3);
615
616 // Set up frame pointer for the frame to be pushed.
618
619 // Registers:
620 // either
621 // a1: entry address
622 // a2: function
623 // a3: receiver
624 // a4: argc
625 // a5: argv
626 // or
627 // a1: microtask_queue
628 //
629 // Stack:
630 // fast api call pc |
631 // fast api call fp |
632 // C entry FP |
633 // function slot | entry frame
634 // context slot |
635 // bad fp (0xFF...F) |
636 // callee saved registers + ra
637
638 // If this is the outermost JS call, set js_entry_sp value.
639 Label non_outermost_js;
640 ExternalReference js_entry_sp = ExternalReference::Create(
641 IsolateAddressId::kJSEntrySPAddress, masm->isolate());
642 __ li(s1, js_entry_sp);
643 __ Ld_d(s2, MemOperand(s1, 0));
644 __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg));
645 __ St_d(fp, MemOperand(s1, 0));
647 Label cont;
648 __ b(&cont);
649 __ nop(); // Branch delay slot nop.
650 __ bind(&non_outermost_js);
651 __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME));
652 __ bind(&cont);
653 __ Push(s3);
654
655 // Jump to a faked try block that does the invoke, with a faked catch
656 // block that sets the exception.
657 __ jmp(&invoke);
658 __ bind(&handler_entry);
659
660 // Store the current pc as the handler offset. It's used later to create the
661 // handler table.
662 masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
663
664 // Caught exception: Store result (exception) in the exception
665 // field in the JSEnv and return a failure sentinel. Coming in here the
666 // fp will be invalid because the PushStackHandler below sets it to 0 to
667 // signal the existence of the JSEntry frame.
668 __ li(s1, ExternalReference::Create(IsolateAddressId::kExceptionAddress,
669 masm->isolate()));
670 __ St_d(a0,
671 MemOperand(s1, 0)); // We come back from 'invoke'. result is in a0.
672 __ LoadRoot(a0, RootIndex::kException);
673 __ b(&exit); // b exposes branch delay slot.
674 __ nop(); // Branch delay slot nop.
675
676 // Invoke: Link this frame into the handler chain.
677 __ bind(&invoke);
678 __ PushStackHandler();
679 // If an exception not caught by another handler occurs, this handler
680 // returns control to the code after the bal(&invoke) above, which
681 // restores all kCalleeSaved registers (including cp and fp) to their
682 // saved values before returning a failure to C.
683 //
684 // Registers:
685 // either
686 // a0: root register value
687 // a1: entry address
688 // a2: function
689 // a3: receiver
690 // a4: argc
691 // a5: argv
692 // or
693 // a0: root register value
694 // a1: microtask_queue
695 //
696 // Stack:
697 // handler frame
698 // entry frame
699 // fast api call pc
700 // fast api call fp
701 // C entry FP
702 // function slot
703 // context slot
704 // bad fp (0xFF...F)
705 // callee saved registers + ra
706
707 // Invoke the function by calling through JS entry trampoline builtin and
708 // pop the faked function when we return.
709 __ CallBuiltin(entry_trampoline);
710
711 // Unlink this frame from the handler chain.
712 __ PopStackHandler();
713
714 __ bind(&exit); // a0 holds result
715 // Check if the current stack frame is marked as the outermost JS frame.
716 Label non_outermost_js_2;
717 __ Pop(a5);
718 __ Branch(&non_outermost_js_2, ne, a5,
720 __ li(a5, js_entry_sp);
721 __ St_d(zero_reg, MemOperand(a5, 0));
722 __ bind(&non_outermost_js_2);
723
724 // Restore the top frame descriptors from the stack.
725 __ Pop(a4, a5);
726 __ LoadIsolateField(a6, IsolateFieldId::kFastCCallCallerFP);
727 __ St_d(a4, MemOperand(a6, 0));
728 __ LoadIsolateField(a6, IsolateFieldId::kFastCCallCallerPC);
729 __ St_d(a5, MemOperand(a6, 0));
730
731 __ Pop(a5);
732 __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
733 masm->isolate()));
734 __ St_d(a5, MemOperand(a4, 0));
735
736 // Reset the stack to the callee saved registers.
738
739 // Restore callee-saved fpu registers.
740 __ MultiPopFPU(kCalleeSavedFPU);
741
742 // Restore callee saved registers from the stack.
743 __ MultiPop(kCalleeSaved | ra);
744 // Return.
745 __ Jump(ra);
746}
747
748} // namespace
749
750void Builtins::Generate_JSEntry(MacroAssembler* masm) {
751 Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
752}
753
754void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
755 Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
756 Builtin::kJSConstructEntryTrampoline);
757}
758
759void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
760 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
761 Builtin::kRunMicrotasksTrampoline);
762}
763
764static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
765 bool is_construct) {
766 // ----------- S t a t e -------------
767 // -- a1: new.target
768 // -- a2: function
769 // -- a3: receiver_pointer
770 // -- a4: argc
771 // -- a5: argv
772 // -----------------------------------
773
774 // Enter an internal frame.
775 {
776 FrameScope scope(masm, StackFrame::INTERNAL);
777
778 // Setup the context (we need to use the caller context from the isolate).
779 ExternalReference context_address = ExternalReference::Create(
780 IsolateAddressId::kContextAddress, masm->isolate());
781 __ li(cp, context_address);
782 __ Ld_d(cp, MemOperand(cp, 0));
783
784 // Push the function and the receiver onto the stack.
785 __ Push(a2);
786
787 // Check if we have enough stack space to push all arguments.
788 __ mov(a6, a4);
789 Generate_CheckStackOverflow(masm, a6, a0, s2);
790
791 // Copy arguments to the stack.
792 // a4: argc
793 // a5: argv, i.e. points to first arg
794 Generate_PushArguments(masm, a5, a4, s1, s2, ArgumentsElementType::kHandle);
795
796 // Push the receive.
797 __ Push(a3);
798
799 // a0: argc
800 // a1: function
801 // a3: new.target
802 __ mov(a3, a1);
803 __ mov(a1, a2);
804 __ mov(a0, a4);
805
806 // Initialize all JavaScript callee-saved registers, since they will be seen
807 // by the garbage collector as part of handlers.
808 __ LoadRoot(a4, RootIndex::kUndefinedValue);
809 __ mov(a5, a4);
810 __ mov(s1, a4);
811 __ mov(s2, a4);
812 __ mov(s3, a4);
813 __ mov(s4, a4);
814 __ mov(s5, a4);
815#ifndef V8_COMPRESS_POINTERS
816 __ mov(s8, a4);
817#endif
818 // s6 holds the root address. Do not clobber.
819 // s7 is cp. Do not init.
820 // s8 is pointer cage base register (kPointerCageBaseRegister).
821
822 // Invoke the code.
823 Builtin builtin = is_construct ? Builtin::kConstruct : Builtins::Call();
824 __ CallBuiltin(builtin);
825
826 // Leave internal frame.
827 }
828 __ Jump(ra);
829}
830
831void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
833}
834
835void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
837}
838
839void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
840 // a1: microtask_queue
842 __ TailCallBuiltin(Builtin::kRunMicrotasks);
843}
844
845static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
846 Register scratch2) {
847 Register params_size = scratch1;
848
849 // Get the size of the formal parameters + receiver (in bytes).
850 __ Ld_d(params_size,
852 __ Ld_hu(params_size,
853 FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
854
855 Register actual_params_size = scratch2;
856 // Compute the size of the actual parameters + receiver (in bytes).
857 __ Ld_d(actual_params_size,
859
860 // If actual is bigger than formal, then we should use it to free up the stack
861 // arguments.
862 __ slt(t2, params_size, actual_params_size);
863 __ Movn(params_size, actual_params_size, t2);
864
865 // Leave the frame (also dropping the register file).
866 __ LeaveFrame(StackFrame::INTERPRETED);
867
868 // Drop arguments.
869 __ DropArguments(params_size);
870}
871
872// Advance the current bytecode offset. This simulates what all bytecode
873// handlers do upon completion of the underlying operation. Will bail out to a
874// label if the bytecode (without prefix) is a return bytecode. Will not advance
875// the bytecode offset if the current bytecode is a JumpLoop, instead just
876// re-executing the JumpLoop to jump to the correct bytecode.
877static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
878 Register bytecode_array,
879 Register bytecode_offset,
880 Register bytecode, Register scratch1,
881 Register scratch2, Register scratch3,
882 Label* if_return) {
883 Register bytecode_size_table = scratch1;
884
885 // The bytecode offset value will be increased by one in wide and extra wide
886 // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
887 // will restore the original bytecode. In order to simplify the code, we have
888 // a backup of it.
889 Register original_bytecode_offset = scratch3;
890 DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
891 bytecode_size_table, original_bytecode_offset));
892 __ Move(original_bytecode_offset, bytecode_offset);
893 __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
894
895 // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
896 Label process_bytecode, extra_wide;
897 static_assert(0 == static_cast<int>(interpreter::Bytecode::kWide));
898 static_assert(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
899 static_assert(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
900 static_assert(3 ==
901 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
902 __ Branch(&process_bytecode, hi, bytecode, Operand(3));
903 __ And(scratch2, bytecode, Operand(1));
904 __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg));
905
906 // Load the next bytecode and update table to the wide scaled table.
907 __ Add_d(bytecode_offset, bytecode_offset, Operand(1));
908 __ Add_d(scratch2, bytecode_array, bytecode_offset);
909 __ Ld_bu(bytecode, MemOperand(scratch2, 0));
910 __ Add_d(bytecode_size_table, bytecode_size_table,
912 __ jmp(&process_bytecode);
913
914 __ bind(&extra_wide);
915 // Load the next bytecode and update table to the extra wide scaled table.
916 __ Add_d(bytecode_offset, bytecode_offset, Operand(1));
917 __ Add_d(scratch2, bytecode_array, bytecode_offset);
918 __ Ld_bu(bytecode, MemOperand(scratch2, 0));
919 __ Add_d(bytecode_size_table, bytecode_size_table,
921
922 __ bind(&process_bytecode);
923
924// Bailout to the return label if this is a return bytecode.
925#define JUMP_IF_EQUAL(NAME) \
926 __ Branch(if_return, eq, bytecode, \
927 Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
929#undef JUMP_IF_EQUAL
930
931 // If this is a JumpLoop, re-execute it to perform the jump to the beginning
932 // of the loop.
933 Label end, not_jump_loop;
934 __ Branch(&not_jump_loop, ne, bytecode,
935 Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
936 // We need to restore the original bytecode_offset since we might have
937 // increased it to skip the wide / extra-wide prefix bytecode.
938 __ Move(bytecode_offset, original_bytecode_offset);
939 __ jmp(&end);
940
941 __ bind(&not_jump_loop);
942 // Otherwise, load the size of the current bytecode and advance the offset.
943 __ Add_d(scratch2, bytecode_size_table, bytecode);
944 __ Ld_b(scratch2, MemOperand(scratch2, 0));
945 __ Add_d(bytecode_offset, bytecode_offset, scratch2);
946
947 __ bind(&end);
948}
949
950namespace {
951
952void ResetSharedFunctionInfoAge(MacroAssembler* masm, Register sfi) {
953 __ St_h(zero_reg, FieldMemOperand(sfi, SharedFunctionInfo::kAgeOffset));
954}
955
956void ResetJSFunctionAge(MacroAssembler* masm, Register js_function,
957 Register scratch) {
958 __ LoadTaggedField(
959 scratch,
960 FieldMemOperand(js_function, JSFunction::kSharedFunctionInfoOffset));
961 ResetSharedFunctionInfoAge(masm, scratch);
962}
963
964void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
965 Register feedback_vector, Register scratch) {
966 DCHECK(!AreAliased(feedback_vector, scratch));
967 __ Ld_bu(scratch,
968 FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
969 __ And(scratch, scratch, Operand(~FeedbackVector::OsrUrgencyBits::kMask));
970 __ St_b(scratch,
971 FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
972}
973
974} // namespace
975
976// static
977void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
978 UseScratchRegisterScope temps(masm);
979 temps.Include({s1, s2, s3});
980 auto descriptor =
981 Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
982 Register closure = descriptor.GetRegisterParameter(
983 BaselineOutOfLinePrologueDescriptor::kClosure);
984 // Load the feedback cell and vector from the closure.
985 Register feedback_cell = temps.Acquire();
986 Register feedback_vector = temps.Acquire();
987 __ LoadTaggedField(feedback_cell,
988 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
989 __ LoadTaggedField(
990 feedback_vector,
991 FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
992 {
993 UseScratchRegisterScope temps(masm);
994 Register scratch = temps.Acquire();
995 __ AssertFeedbackVector(feedback_vector, scratch);
996 }
997
998#ifndef V8_ENABLE_LEAPTIERING
999 // Check for an tiering state.
1000 Label flags_need_processing;
1001 Register flags = no_reg;
1002 {
1003 UseScratchRegisterScope temps(masm);
1004 flags = temps.Acquire();
1005 // flags will be used only in |flags_need_processing|
1006 // and outside it can be reused.
1007 __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
1008 flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing);
1009 }
1010#endif // !V8_ENABLE_LEAPTIERING
1011
1012 {
1013 UseScratchRegisterScope temps(masm);
1014 ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire());
1015 }
1016 // Increment invocation count for the function.
1017 {
1018 UseScratchRegisterScope temps(masm);
1019 Register invocation_count = temps.Acquire();
1020 __ Ld_w(invocation_count,
1021 FieldMemOperand(feedback_vector,
1022 FeedbackVector::kInvocationCountOffset));
1023 __ Add_w(invocation_count, invocation_count, Operand(1));
1024 __ St_w(invocation_count,
1025 FieldMemOperand(feedback_vector,
1026 FeedbackVector::kInvocationCountOffset));
1027 }
1028
1029 FrameScope frame_scope(masm, StackFrame::MANUAL);
1030 {
1031 ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1032 // Normally the first thing we'd do here is Push(ra, fp), but we already
1033 // entered the frame in BaselineCompiler::Prologue, as we had to use the
1034 // value ra before the call to this BaselineOutOfLinePrologue builtin.
1035 Register callee_context = descriptor.GetRegisterParameter(
1036 BaselineOutOfLinePrologueDescriptor::kCalleeContext);
1037 Register callee_js_function = descriptor.GetRegisterParameter(
1038 BaselineOutOfLinePrologueDescriptor::kClosure);
1039 {
1040 UseScratchRegisterScope temps(masm);
1041 ResetJSFunctionAge(masm, callee_js_function, temps.Acquire());
1042 }
1043 __ Push(callee_context, callee_js_function);
1044 DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1045 DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1046
1047 Register argc = descriptor.GetRegisterParameter(
1048 BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1049 // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1050 // the frame, so load it into a register.
1051 Register bytecode_array = descriptor.GetRegisterParameter(
1052 BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1053 __ Push(argc, bytecode_array, feedback_cell, feedback_vector);
1054
1055 {
1056 UseScratchRegisterScope temps(masm);
1057 Register invocation_count = temps.Acquire();
1058 __ AssertFeedbackVector(feedback_vector, invocation_count);
1059 }
1060 }
1061
1062 Label call_stack_guard;
1063 Register frame_size = descriptor.GetRegisterParameter(
1064 BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1065 {
1066 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1067 // Stack check. This folds the checks for both the interrupt stack limit
1068 // check and the real stack limit into one by just checking for the
1069 // interrupt limit. The interrupt limit is either equal to the real stack
1070 // limit or tighter. By ensuring we have space until that limit after
1071 // building the frame we can quickly precheck both at once.
1072 UseScratchRegisterScope temps(masm);
1073 Register sp_minus_frame_size = temps.Acquire();
1074 __ Sub_d(sp_minus_frame_size, sp, frame_size);
1075 Register interrupt_limit = temps.Acquire();
1076 __ LoadStackLimit(interrupt_limit,
1077 MacroAssembler::StackLimitKind::kInterruptStackLimit);
1078 __ Branch(&call_stack_guard, Uless, sp_minus_frame_size,
1079 Operand(interrupt_limit));
1080 }
1081
1082 // Do "fast" return to the caller pc in ra.
1083 // TODO(v8:11429): Document this frame setup better.
1084 __ Ret();
1085
1086#ifndef V8_ENABLE_LEAPTIERING
1087 __ bind(&flags_need_processing);
1088 {
1089 ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1090 UseScratchRegisterScope temps(masm);
1091 temps.Exclude(flags);
1092 // Ensure the flags is not allocated again.
1093 // Drop the frame created by the baseline call.
1094 __ Pop(ra, fp);
1095 __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
1096 __ Trap();
1097 }
1098#endif // !V8_ENABLE_LEAPTIERING
1099
1100 __ bind(&call_stack_guard);
1101 {
1102 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1103 FrameScope frame_scope(masm, StackFrame::INTERNAL);
1104 // Save incoming new target or generator
1106#ifdef V8_ENABLE_LEAPTIERING
1107 // No need to SmiTag as dispatch handles always look like Smis.
1108 static_assert(kJSDispatchHandleShift > 0);
1110#endif
1111 __ SmiTag(frame_size);
1112 __ Push(frame_size);
1113 __ CallRuntime(Runtime::kStackGuardWithGap);
1114#ifdef V8_ENABLE_LEAPTIERING
1116#endif
1118 }
1119 __ Ret();
1120 temps.Exclude({s1, s2, s3});
1121}
1122
1123// static
1124void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
1125 // We're here because we got deopted during BaselineOutOfLinePrologue's stack
1126 // check. Undo all its frame creation and call into the interpreter instead.
1127
1128 // Drop the feedback vector, the bytecode offset (was the feedback vector
1129 // but got replaced during deopt) and bytecode array.
1130 __ Drop(3);
1131
1132 // Context, closure, argc.
1135
1136 // Drop frame pointer
1137 __ LeaveFrame(StackFrame::BASELINE);
1138
1139 // Enter the interpreter.
1140 __ TailCallBuiltin(Builtin::kInterpreterEntryTrampoline);
1141}
1142
1143// Generate code for entering a JS function with the interpreter.
1144// On entry to the function the receiver and arguments have been pushed on the
1145// stack left to right.
1146//
1147// The live registers are:
1148// o a0 : actual argument count
1149// o a1: the JS function object being called.
1150// o a3: the incoming new target or generator object
1151// o a4: the dispatch handle through which we were called
1152// o cp: our context
1153// o fp: the caller's frame pointer
1154// o sp: stack pointer
1155// o ra: return address
1156//
1157// The function builds an interpreter frame. See InterpreterFrameConstants in
1158// frame-constants.h for its layout.
1160 MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
1161 Register closure = a1;
1162
1163 // Get the bytecode array from the function object and load it into
1164 // kInterpreterBytecodeArrayRegister.
1165 Register sfi = a5;
1166 __ LoadTaggedField(
1167 sfi, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1168 ResetSharedFunctionInfoAge(masm, sfi);
1169
1170
1171 // The bytecode array could have been flushed from the shared function info,
1172 // if so, call into CompileLazy.
1173 Label is_baseline, compile_lazy;
1175 masm, sfi, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline,
1176 &compile_lazy);
1177
1178#ifdef V8_ENABLE_SANDBOX
1179 // Validate the parameter count. This protects against an attacker swapping
1180 // the bytecode (or the dispatch handle) such that the parameter count of the
1181 // dispatch entry doesn't match the one of the BytecodeArray.
1182 // TODO(saelo): instead of this validation step, it would probably be nicer
1183 // if we could store the BytecodeArray directly in the dispatch entry and
1184 // load it from there. Then we can easily guarantee that the parameter count
1185 // of the entry matches the parameter count of the bytecode.
1188 __ LoadParameterCountFromJSDispatchTable(a6, dispatch_handle, a7);
1190 BytecodeArray::kParameterSizeOffset));
1191 __ SbxCheck(eq, AbortReason::kJSSignatureMismatch, a6, Operand(a7));
1192#endif // V8_ENABLE_SANDBOX
1193
1194 Label push_stack_frame;
1195 Register feedback_vector = a2;
1196 __ LoadFeedbackVector(feedback_vector, closure, a5, &push_stack_frame);
1197
1198#ifndef V8_JITLESS
1199#ifndef V8_ENABLE_LEAPTIERING
1200 // If feedback vector is valid, check for optimized code and update invocation
1201 // count.
1202
1203 // Check the tiering state.
1204 Label flags_need_processing;
1205 Register flags = t0;
1206 __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
1207 flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
1208 &flags_need_processing);
1209#endif // !V8_ENABLE_LEAPTIERING
1210
1211 ResetFeedbackVectorOsrUrgency(masm, feedback_vector, a5);
1212
1213 // Increment invocation count for the function.
1214 __ Ld_w(a5, FieldMemOperand(feedback_vector,
1215 FeedbackVector::kInvocationCountOffset));
1216 __ Add_w(a5, a5, Operand(1));
1217 __ St_w(a5, FieldMemOperand(feedback_vector,
1218 FeedbackVector::kInvocationCountOffset));
1219
1220 // Open a frame scope to indicate that there is a frame on the stack. The
1221 // MANUAL indicates that the scope shouldn't actually generate code to set up
1222 // the frame (that is done below).
1223#else
1224 // Note: By omitting the above code in jitless mode we also disable:
1225 // - kFlagsLogNextExecution: only used for logging/profiling; and
1226 // - kInvocationCountOffset: only used for tiering heuristics and code
1227 // coverage.
1228#endif // !V8_JITLESS
1229
1230 __ bind(&push_stack_frame);
1231 FrameScope frame_scope(masm, StackFrame::MANUAL);
1232 __ PushStandardFrame(closure);
1233
1234 // Load initial bytecode offset.
1237
1238 // Push bytecode array, Smi tagged bytecode array offset and the feedback
1239 // vector.
1241 __ Push(kInterpreterBytecodeArrayRegister, a5, feedback_vector);
1242
1243 // Allocate the local and temporary register file on the stack.
1244 Label stack_overflow;
1245 {
1246 // Load frame size (word) from the BytecodeArray object.
1248 BytecodeArray::kFrameSizeOffset));
1249
1250 // Do a stack check to ensure we don't go over the limit.
1251 __ Sub_d(a6, sp, Operand(a5));
1252 __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
1253 __ Branch(&stack_overflow, lo, a6, Operand(a2));
1254
1255 // If ok, push undefined as the initial value for all register file entries.
1256 Label loop_header;
1257 Label loop_check;
1258 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1259 __ Branch(&loop_check);
1260 __ bind(&loop_header);
1261 // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1263 // Continue loop if not done.
1264 __ bind(&loop_check);
1265 __ Sub_d(a5, a5, Operand(kSystemPointerSize));
1266 __ Branch(&loop_header, ge, a5, Operand(zero_reg));
1267 }
1268
1269 // If the bytecode array has a valid incoming new target or generator object
1270 // register, initialize it with incoming value which was passed in a3.
1271 Label no_incoming_new_target_or_generator_register;
1272 __ Ld_w(a5, FieldMemOperand(
1274 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1275 __ Branch(&no_incoming_new_target_or_generator_register, eq, a5,
1276 Operand(zero_reg));
1277 __ Alsl_d(a5, a5, fp, kSystemPointerSizeLog2);
1278 __ St_d(a3, MemOperand(a5, 0));
1279 __ bind(&no_incoming_new_target_or_generator_register);
1280
1281 // Perform interrupt stack check.
1282 // TODO(solanes): Merge with the real stack limit check above.
1283 Label stack_check_interrupt, after_stack_check_interrupt;
1284 __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit);
1285 __ Branch(&stack_check_interrupt, lo, sp, Operand(a5));
1286 __ bind(&after_stack_check_interrupt);
1287
1288 // The accumulator is already loaded with undefined.
1289
1290 // Load the dispatch table into a register and dispatch to the bytecode
1291 // handler at the current bytecode offset.
1292 Label do_dispatch;
1293 __ bind(&do_dispatch);
1295 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1298 __ Ld_bu(a7, MemOperand(t5, 0));
1303
1304 __ RecordComment("--- InterpreterEntryReturnPC point ---");
1306 masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(
1307 masm->pc_offset());
1308 } else {
1310 // Both versions must be the same up to this point otherwise the builtins
1311 // will not be interchangable.
1312 CHECK_EQ(
1313 masm->isolate()->heap()->interpreter_entry_return_pc_offset().value(),
1314 masm->pc_offset());
1315 }
1316
1317 // Any returns to the entry trampoline are either due to the return bytecode
1318 // or the interpreter tail calling a builtin and then a dispatch.
1319
1320 // Get bytecode array and bytecode offset from the stack frame.
1326
1327 // Either return, or advance to the next bytecode and dispatch.
1328 Label do_return;
1331 __ Ld_bu(a1, MemOperand(a1, 0));
1334 a5, &do_return);
1335 __ jmp(&do_dispatch);
1336
1337 __ bind(&do_return);
1338 // The return value is in a0.
1339 LeaveInterpreterFrame(masm, t0, t1);
1340 __ Jump(ra);
1341
1342 __ bind(&stack_check_interrupt);
1343 // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1344 // for the call to the StackGuard.
1350 __ CallRuntime(Runtime::kStackGuard);
1351
1352 // After the call, restore the bytecode array, bytecode offset and accumulator
1353 // registers again. Also, restore the bytecode offset in the stack to its
1354 // previous value.
1359 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1360
1363
1364 __ jmp(&after_stack_check_interrupt);
1365
1366#ifndef V8_JITLESS
1367#ifndef V8_ENABLE_LEAPTIERING
1368 __ bind(&flags_need_processing);
1369 __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
1370#endif // !V8_ENABLE_LEAPTIERING
1371
1372 __ bind(&is_baseline);
1373 {
1374#ifndef V8_ENABLE_LEAPTIERING
1375 // Load the feedback vector from the closure.
1376 __ LoadTaggedField(
1377 feedback_vector,
1378 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1379 __ LoadTaggedField(
1380 feedback_vector,
1381 FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
1382
1383 Label install_baseline_code;
1384 // Check if feedback vector is valid. If not, call prepare for baseline to
1385 // allocate it.
1386 __ LoadTaggedField(
1387 t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1388 __ Ld_hu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
1389 __ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
1390
1391 // Check for an tiering state.
1392 __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
1393 flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing);
1394
1395 // TODO(loong64, 42204201): This fastcase is difficult to support with the
1396 // sandbox as it requires getting write access to the dispatch table. See
1397 // `JSFunction::UpdateCode`. We might want to remove it for all
1398 // configurations as it does not seem to be performance sensitive.
1399
1400 // Load the baseline code into the closure.
1402 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
1403 __ ReplaceClosureCodeWithOptimizedCode(a2, closure);
1404 __ JumpCodeObject(a2, kJSEntrypointTag);
1405
1406 __ bind(&install_baseline_code);
1407#endif // !V8_ENABLE_LEAPTIERING
1408
1409 __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
1410 }
1411#endif // !V8_JITLESS
1412
1413 __ bind(&compile_lazy);
1414 __ GenerateTailCallToReturnedCode(Runtime::kCompileLazy);
1415 // Unreachable code.
1416 __ break_(0xCC);
1417
1418 __ bind(&stack_overflow);
1419 __ CallRuntime(Runtime::kThrowStackOverflow);
1420 // Unreachable code.
1421 __ break_(0xCC);
1422}
1423
1424static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1425 Register start_address,
1426 Register scratch, Register scratch2) {
1427 // Find the address of the last argument.
1428 __ Sub_d(scratch, num_args, Operand(1));
1429 __ slli_d(scratch, scratch, kSystemPointerSizeLog2);
1430 __ Sub_d(start_address, start_address, scratch);
1431
1432 // Push the arguments.
1433 __ PushArray(start_address, num_args, scratch, scratch2,
1435}
1436
1437// static
1439 MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1442 // ----------- S t a t e -------------
1443 // -- a0 : the number of arguments
1444 // -- a2 : the address of the first argument to be pushed. Subsequent
1445 // arguments should be consecutive above this, in the same order as
1446 // they are to be pushed onto the stack.
1447 // -- a1 : the target to call (can be any Object).
1448 // -----------------------------------
1449 Label stack_overflow;
1451 // The spread argument should not be pushed.
1452 __ Sub_d(a0, a0, Operand(1));
1453 }
1454
1455 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1456 __ Sub_d(a3, a0, Operand(kJSArgcReceiverSlots));
1457 } else {
1458 __ mov(a3, a0);
1459 }
1460
1461 __ StackOverflowCheck(a3, a4, t0, &stack_overflow);
1462
1463 // This function modifies a2, t0 and a4.
1464 GenerateInterpreterPushArgs(masm, a3, a2, a4, t0);
1465
1466 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1467 __ PushRoot(RootIndex::kUndefinedValue);
1468 }
1469
1471 // Pass the spread in the register a2.
1472 // a2 already points to the penultime argument, the spread
1473 // is below that.
1474 __ Ld_d(a2, MemOperand(a2, -kSystemPointerSize));
1475 }
1476
1477 // Call the target.
1479 __ TailCallBuiltin(Builtin::kCallWithSpread);
1480 } else {
1481 __ TailCallBuiltin(Builtins::Call(receiver_mode));
1482 }
1483
1484 __ bind(&stack_overflow);
1485 {
1486 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1487 // Unreachable code.
1488 __ break_(0xCC);
1489 }
1490}
1491
1492// static
1494 MacroAssembler* masm, InterpreterPushArgsMode mode) {
1495 // ----------- S t a t e -------------
1496 // -- a0 : argument count
1497 // -- a3 : new target
1498 // -- a1 : constructor to call
1499 // -- a2 : allocation site feedback if available, undefined otherwise.
1500 // -- a4 : address of the first argument
1501 // -----------------------------------
1502 Label stack_overflow;
1503 __ StackOverflowCheck(a0, a5, t0, &stack_overflow);
1504
1506 // The spread argument should not be pushed.
1507 __ Sub_d(a0, a0, Operand(1));
1508 }
1509
1510 Register argc_without_receiver = a6;
1511 __ Sub_d(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
1512
1513 // Push the arguments, This function modifies t0, a4 and a5.
1514 GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5, t0);
1515
1516 // Push a slot for the receiver.
1517 __ Push(zero_reg);
1518
1520 // Pass the spread in the register a2.
1521 // a4 already points to the penultimate argument, the spread
1522 // lies in the next interpreter register.
1523 __ Ld_d(a2, MemOperand(a4, -kSystemPointerSize));
1524 } else {
1525 __ AssertUndefinedOrAllocationSite(a2, t0);
1526 }
1527
1529 __ AssertFunction(a1);
1530
1531 // Tail call to the function-specific construct stub (still in the caller
1532 // context at this point).
1533 __ TailCallBuiltin(Builtin::kArrayConstructorImpl);
1534 } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1535 // Call the constructor with a0, a1, and a3 unmodified.
1536 __ TailCallBuiltin(Builtin::kConstructWithSpread);
1537 } else {
1539 // Call the constructor with a0, a1, and a3 unmodified.
1540 __ TailCallBuiltin(Builtin::kConstruct);
1541 }
1542
1543 __ bind(&stack_overflow);
1544 {
1545 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1546 // Unreachable code.
1547 __ break_(0xCC);
1548 }
1549}
1550
1551// static
1553 MacroAssembler* masm, ForwardWhichFrame which_frame) {
1554 // ----------- S t a t e -------------
1555 // -- a3 : new target
1556 // -- a1 : constructor to call
1557 // -----------------------------------
1558 Label stack_overflow;
1559
1560 // Load the frame pointer into a4.
1561 switch (which_frame) {
1563 __ Move(a4, fp);
1564 break;
1567 break;
1568 }
1569
1570 // Load the argument count into a0.
1572 __ StackOverflowCheck(a0, a5, t0, &stack_overflow);
1573
1574 // Point a4 to the base of the argument list to forward, excluding the
1575 // receiver.
1576 __ Add_d(a4, a4,
1579
1580 // Copy arguments on the stack. a5 and t0 are scratch registers.
1581 Register argc_without_receiver = a6;
1582 __ Sub_d(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
1583 __ PushArray(a4, argc_without_receiver, a5, t0);
1584
1585 // Push a slot for the receiver.
1586 __ Push(zero_reg);
1587
1588 // Call the constructor with a0, a1, and a3 unmodified.
1589 __ TailCallBuiltin(Builtin::kConstruct);
1590
1591 __ bind(&stack_overflow);
1592 {
1593 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1594 __ break_(0xCC);
1595 }
1596}
1597
1598namespace {
1599
1600void NewImplicitReceiver(MacroAssembler* masm) {
1601 // ----------- S t a t e -------------
1602 // -- a0 : the number of arguments
1603 // -- a1 : constructor to call (checked to be a JSFunction)
1604 // -- a3 : new target
1605 //
1606 // Stack:
1607 // -- Implicit Receiver
1608 // -- [arguments without receiver]
1609 // -- Implicit Receiver
1610 // -- Context
1611 // -- FastConstructMarker
1612 // -- FramePointer
1613 // -----------------------------------
1614 Register implicit_receiver = a4;
1615
1616 // Save live registers.
1617 __ SmiTag(a0);
1618 __ Push(a0, a1, a3);
1619 __ CallBuiltin(Builtin::kFastNewObject);
1620 // Save result.
1621 __ Move(implicit_receiver, a0);
1622 // Restore live registers.
1623 __ Pop(a0, a1, a3);
1624 __ SmiUntag(a0);
1625
1626 // Patch implicit receiver (in arguments)
1627 __ StoreReceiver(implicit_receiver);
1628 // Patch second implicit (in construct frame)
1629 __ St_d(implicit_receiver,
1631
1632 // Restore context.
1634}
1635
1636} // namespace
1637
1638// static
1639void Builtins::Generate_InterpreterPushArgsThenFastConstructFunction(
1640 MacroAssembler* masm) {
1641 // ----------- S t a t e -------------
1642 // -- a0 : argument count
1643 // -- a1 : constructor to call (checked to be a JSFunction)
1644 // -- a3 : new target
1645 // -- a4 : address of the first argument
1646 // -- cp : context pointer
1647 // -----------------------------------
1648 __ AssertFunction(a1);
1649
1650 // Check if target has a [[Construct]] internal method.
1651 Label non_constructor;
1652 __ LoadMap(a2, a1);
1653 __ Ld_bu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1654 __ And(a2, a2, Operand(Map::Bits1::IsConstructorBit::kMask));
1655 __ Branch(&non_constructor, eq, a2, Operand(zero_reg));
1656
1657 // Add a stack check before pushing arguments.
1658 Label stack_overflow;
1659 __ StackOverflowCheck(a0, a2, a5, &stack_overflow);
1660
1661 // Enter a construct frame.
1662 FrameScope scope(masm, StackFrame::MANUAL);
1663 __ EnterFrame(StackFrame::FAST_CONSTRUCT);
1664
1665 // Implicit receiver stored in the construct frame.
1666 __ LoadRoot(a2, RootIndex::kTheHoleValue);
1667 __ Push(cp, a2);
1668
1669 // Push arguments + implicit receiver.
1670 Register argc_without_receiver = a7;
1671 __ Sub_d(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
1672 GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5, a6);
1673 __ Push(a2);
1674
1675 // Check if it is a builtin call.
1676 Label builtin_call;
1677 __ LoadTaggedField(
1678 a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1679 __ Ld_wu(a2, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
1680 __ And(a5, a2, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
1681 __ Branch(&builtin_call, ne, a5, Operand(zero_reg));
1682
1683 // Check if we need to create an implicit receiver.
1684 Label not_create_implicit_receiver;
1685 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(a2);
1686 __ JumpIfIsInRange(
1687 a2, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
1688 static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
1689 &not_create_implicit_receiver);
1690 NewImplicitReceiver(masm);
1691 __ bind(&not_create_implicit_receiver);
1692
1693 // Call the function.
1694 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
1695
1696 // ----------- S t a t e -------------
1697 // -- a0 constructor result
1698 //
1699 // Stack:
1700 // -- Implicit Receiver
1701 // -- Context
1702 // -- FastConstructMarker
1703 // -- FramePointer
1704 // -----------------------------------
1705
1706 // Store offset of return address for deoptimizer.
1707 masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
1708 masm->pc_offset());
1709
1710 // If the result is an object (in the ECMA sense), we should get rid
1711 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
1712 // on page 74.
1713 Label use_receiver, do_throw, leave_and_return, check_receiver;
1714
1715 // If the result is undefined, we jump out to using the implicit receiver.
1716 __ JumpIfNotRoot(a0, RootIndex::kUndefinedValue, &check_receiver);
1717
1718 // Throw away the result of the constructor invocation and use the
1719 // on-stack receiver as the result.
1720 __ bind(&use_receiver);
1721 __ Ld_d(a0,
1723 __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw);
1724
1725 __ bind(&leave_and_return);
1726 // Leave construct frame.
1727 __ LeaveFrame(StackFrame::FAST_CONSTRUCT);
1728 __ Ret();
1729
1730 // Otherwise we do a smi check and fall through to check if the return value
1731 // is a valid receiver.
1732 __ bind(&check_receiver);
1733
1734 // If the result is a smi, it is *not* an object in the ECMA sense.
1735 __ JumpIfSmi(a0, &use_receiver);
1736
1737 // Check if the type of the result is not an object in the ECMA sense.
1738 __ JumpIfJSAnyIsNotPrimitive(a0, a4, &leave_and_return);
1739 __ Branch(&use_receiver);
1740
1741 __ bind(&builtin_call);
1742 // TODO(victorgomes): Check the possibility to turn this into a tailcall.
1743 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
1744 __ LeaveFrame(StackFrame::FAST_CONSTRUCT);
1745 __ Ret();
1746
1747 __ bind(&do_throw);
1748 // Restore the context from the frame.
1750 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
1751 // Unreachable code.
1752 __ break_(0xCC);
1753
1754 __ bind(&stack_overflow);
1755 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1756 // Unreachable code.
1757 __ break_(0xCC);
1758
1759 // Called Construct on an Object that doesn't have a [[Construct]] internal
1760 // method.
1761 __ bind(&non_constructor);
1762 __ TailCallBuiltin(Builtin::kConstructedNonConstructable);
1763}
1764
1765static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1766 // Set the return address to the correct point in the interpreter entry
1767 // trampoline.
1768 Label builtin_trampoline, trampoline_loaded;
1769 Tagged<Smi> interpreter_entry_return_pc_offset(
1770 masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1771 DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1772
1773 // If the SFI function_data is an InterpreterData, the function will have a
1774 // custom copy of the interpreter entry trampoline for profiling. If so,
1775 // get the custom trampoline, otherwise grab the entry address of the global
1776 // trampoline.
1778 __ LoadTaggedField(
1779 t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
1780 __ LoadTrustedPointerField(
1781 t0, FieldMemOperand(t0, SharedFunctionInfo::kTrustedFunctionDataOffset),
1783 __ JumpIfObjectType(&builtin_trampoline, ne, t0, INTERPRETER_DATA_TYPE,
1785
1786 __ LoadProtectedPointerField(
1787 t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
1788 __ LoadCodeInstructionStart(t0, t0, kJSEntrypointTag);
1789 __ Branch(&trampoline_loaded);
1790
1791 __ bind(&builtin_trampoline);
1792 __ li(t0, ExternalReference::
1793 address_of_interpreter_entry_trampoline_instruction_start(
1794 masm->isolate()));
1795 __ Ld_d(t0, MemOperand(t0, 0));
1796
1797 __ bind(&trampoline_loaded);
1798 __ Add_d(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
1799
1800 // Initialize the dispatch table register.
1802 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1803
1804 // Get the bytecode array pointer from the frame.
1807
1808 if (v8_flags.debug_code) {
1809 // Check function data field is actually a BytecodeArray object.
1811 __ Assert(ne,
1812 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1813 kScratchReg, Operand(zero_reg));
1814 __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
1815 __ Assert(eq,
1816 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1817 a1, Operand(BYTECODE_ARRAY_TYPE));
1818 }
1819
1820 // Get the target bytecode offset from the frame.
1823
1824 if (v8_flags.debug_code) {
1825 Label okay;
1828 // Unreachable code.
1829 __ break_(0xCC);
1830 __ bind(&okay);
1831 }
1832
1833 // Dispatch to the target bytecode.
1836 __ Ld_bu(a7, MemOperand(a1, 0));
1840}
1841
1842void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1843 // Advance the current bytecode offset stored within the given interpreter
1844 // stack frame. This simulates what all bytecode handlers do upon completion
1845 // of the underlying operation.
1851
1852 Label enter_bytecode, function_entry_bytecode;
1853 __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
1856
1857 // Load the current bytecode.
1860 __ Ld_bu(a1, MemOperand(a1, 0));
1861
1862 // Advance to the next bytecode.
1863 Label if_return;
1866 a4, &if_return);
1867
1868 __ bind(&enter_bytecode);
1869 // Convert new bytecode offset to a Smi and save in the stackframe.
1872
1874
1875 __ bind(&function_entry_bytecode);
1876 // If the code deoptimizes during the implicit function entry stack interrupt
1877 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1878 // not a valid bytecode offset. Detect this case and advance to the first
1879 // actual bytecode.
1882 __ Branch(&enter_bytecode);
1883
1884 // We should never take the if_return path.
1885 __ bind(&if_return);
1886 __ Abort(AbortReason::kInvalidBytecodeAdvance);
1887}
1888
1889void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1891}
1892
1893namespace {
1894void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1895 bool javascript_builtin,
1896 bool with_result) {
1897 const RegisterConfiguration* config(RegisterConfiguration::Default());
1898 int allocatable_register_count = config->num_allocatable_general_registers();
1899 UseScratchRegisterScope temps(masm);
1900 Register scratch = temps.Acquire();
1901 if (with_result) {
1902 if (javascript_builtin) {
1903 __ mov(scratch, a0);
1904 } else {
1905 // Overwrite the hole inserted by the deoptimizer with the return value
1906 // from the LAZY deopt point.
1907 __ St_d(a0,
1908 MemOperand(
1909 sp, config->num_allocatable_general_registers() *
1912 }
1913 }
1914 for (int i = allocatable_register_count - 1; i >= 0; --i) {
1915 int code = config->GetAllocatableGeneralCode(i);
1916 __ Pop(Register::from_code(code));
1917 if (javascript_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1919 }
1920 }
1921
1922 if (with_result && javascript_builtin) {
1923 // Overwrite the hole inserted by the deoptimizer with the return value from
1924 // the LAZY deopt point. t0 contains the arguments count, the return value
1925 // from LAZY is always the last argument.
1926 constexpr int return_value_offset =
1929 __ Add_d(a0, a0, Operand(return_value_offset));
1930 __ Alsl_d(t0, a0, sp, kSystemPointerSizeLog2);
1931 __ St_d(scratch, MemOperand(t0, 0));
1932 // Recover arguments count.
1933 __ Sub_d(a0, a0, Operand(return_value_offset));
1934 }
1935
1936 __ Ld_d(
1937 fp,
1939 // Load builtin index (stored as a Smi) and use it to get the builtin start
1940 // address from the builtins table.
1941 __ Pop(t0);
1942 __ Add_d(sp, sp,
1944 __ Pop(ra);
1945 __ LoadEntryFromBuiltinIndex(t0, t0);
1946 __ Jump(t0);
1947}
1948} // namespace
1949
1950void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1951 Generate_ContinueToBuiltinHelper(masm, false, false);
1952}
1953
1954void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1955 MacroAssembler* masm) {
1956 Generate_ContinueToBuiltinHelper(masm, false, true);
1957}
1958
1959void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1960 Generate_ContinueToBuiltinHelper(masm, true, false);
1961}
1962
1963void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1964 MacroAssembler* masm) {
1965 Generate_ContinueToBuiltinHelper(masm, true, true);
1966}
1967
1968void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1969 {
1970 FrameScope scope(masm, StackFrame::INTERNAL);
1971 __ CallRuntime(Runtime::kNotifyDeoptimized);
1972 }
1973
1975 __ Ld_d(a0, MemOperand(sp, 0 * kSystemPointerSize));
1976 __ Add_d(sp, sp, Operand(1 * kSystemPointerSize)); // Remove state.
1977 __ Ret();
1978}
1979
1980namespace {
1981
1982void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
1983 Operand offset = Operand(zero_reg)) {
1984 __ Add_d(ra, entry_address, offset);
1985 // And "return" to the OSR entry point of the function.
1986 __ Ret();
1987}
1988
1989enum class OsrSourceTier {
1990 kInterpreter,
1991 kBaseline,
1992};
1993
1994void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
1995 Register maybe_target_code,
1996 Register expected_param_count) {
1997 Label jump_to_optimized_code;
1998 {
1999 // If maybe_target_code is not null, no need to call into runtime. A
2000 // precondition here is: if maybe_target_code is an InstructionStream
2001 // object, it must NOT be marked_for_deoptimization (callers must ensure
2002 // this).
2003 __ CompareTaggedAndBranch(&jump_to_optimized_code, ne, maybe_target_code,
2004 Operand(Smi::zero()));
2005 }
2006
2007 ASM_CODE_COMMENT(masm);
2008 {
2009 FrameScope scope(masm, StackFrame::INTERNAL);
2010 __ Push(expected_param_count);
2011 __ CallRuntime(Runtime::kCompileOptimizedOSR);
2012 __ Pop(expected_param_count);
2013 }
2014
2015 // If the code object is null, just return to the caller.
2016 __ CompareTaggedAndBranch(&jump_to_optimized_code, ne, maybe_target_code,
2017 Operand(Smi::zero()));
2018 __ Ret();
2019
2020 __ bind(&jump_to_optimized_code);
2021
2022 const Register scratch(a2);
2023 CHECK(!AreAliased(maybe_target_code, expected_param_count, scratch));
2024
2025 // OSR entry tracing.
2026 {
2027 Label next;
2028 __ li(scratch, ExternalReference::address_of_log_or_trace_osr());
2029 __ Ld_bu(scratch, MemOperand(scratch, 0));
2030 __ Branch(&next, eq, scratch, Operand(zero_reg));
2031
2032 {
2033 FrameScope scope(masm, StackFrame::INTERNAL);
2034 // Preserve arguments.
2035 __ Push(maybe_target_code, expected_param_count);
2036 __ CallRuntime(Runtime::kLogOrTraceOptimizedOSREntry, 0);
2037 __ Pop(maybe_target_code, expected_param_count);
2038 }
2039
2040 __ bind(&next);
2041 }
2042
2043 if (source == OsrSourceTier::kInterpreter) {
2044 // Drop the handler frame that is be sitting on top of the actual
2045 // JavaScript frame. This is the case then OSR is triggered from bytecode.
2046 __ LeaveFrame(StackFrame::STUB);
2047 }
2048
2049 // Check we are actually jumping to an OSR code object. This among other
2050 // things ensures that the object contains deoptimization data below.
2051 __ Ld_wu(scratch, FieldMemOperand(maybe_target_code, Code::kOsrOffsetOffset));
2052 __ Check(Condition::kNotEqual, AbortReason::kExpectedOsrCode, scratch,
2053 Operand(BytecodeOffset::None().ToInt()));
2054
2055 // Check the target has a matching parameter count. This ensures that the OSR
2056 // code will correctly tear down our frame when leaving.
2057 __ Ld_hu(scratch,
2058 FieldMemOperand(maybe_target_code, Code::kParameterCountOffset));
2059 __ SmiUntag(expected_param_count);
2060 __ SbxCheck(Condition::kEqual, AbortReason::kOsrUnexpectedStackSize, scratch,
2061 Operand(expected_param_count));
2062
2063 // Load deoptimization data from the code object.
2064 // <deopt_data> = <code>[#deoptimization_data_offset]
2065 __ LoadProtectedPointerField(
2066 scratch, MemOperand(maybe_target_code,
2067 Code::kDeoptimizationDataOrInterpreterDataOffset -
2069
2070 // Load the OSR entrypoint offset from the deoptimization data.
2071 // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
2072 __ SmiUntagField(
2076
2077 __ LoadCodeInstructionStart(maybe_target_code, maybe_target_code,
2079
2080 // Compute the target address = code_entry + osr_offset
2081 // <entry_addr> = <code_entry> + <osr_offset>
2082 Generate_OSREntry(masm, maybe_target_code, Operand(scratch));
2083}
2084} // namespace
2085
2086void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
2087 using D = OnStackReplacementDescriptor;
2088 static_assert(D::kParameterCount == 2);
2089 OnStackReplacement(masm, OsrSourceTier::kInterpreter,
2090 D::MaybeTargetCodeRegister(),
2091 D::ExpectedParameterCountRegister());
2092}
2093
2094void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
2095 using D = OnStackReplacementDescriptor;
2096 static_assert(D::kParameterCount == 2);
2097
2098 __ Ld_d(kContextRegister,
2100 OnStackReplacement(masm, OsrSourceTier::kBaseline,
2101 D::MaybeTargetCodeRegister(),
2102 D::ExpectedParameterCountRegister());
2103}
2104
2105// static
2106void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
2107 // ----------- S t a t e -------------
2108 // -- a0 : argc
2109 // -- sp[0] : receiver
2110 // -- sp[4] : thisArg
2111 // -- sp[8] : argArray
2112 // -----------------------------------
2113
2114 Register argc = a0;
2115 Register arg_array = a2;
2116 Register receiver = a1;
2117 Register this_arg = a5;
2118 Register undefined_value = a3;
2119 Register scratch = a4;
2120
2121 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2122
2123 // 1. Load receiver into a1, argArray into a2 (if present), remove all
2124 // arguments from the stack (including the receiver), and push thisArg (if
2125 // present) instead.
2126 {
2127 __ Sub_d(scratch, argc, JSParameterCount(0));
2129 __ Ld_d(arg_array, MemOperand(sp, 2 * kSystemPointerSize));
2130 __ Movz(arg_array, undefined_value, scratch); // if argc == 0
2131 __ Movz(this_arg, undefined_value, scratch); // if argc == 0
2132 __ Sub_d(scratch, scratch, Operand(1));
2133 __ Movz(arg_array, undefined_value, scratch); // if argc == 1
2134 __ Ld_d(receiver, MemOperand(sp, 0));
2135 __ DropArgumentsAndPushNewReceiver(argc, this_arg);
2136 }
2137
2138 // ----------- S t a t e -------------
2139 // -- a2 : argArray
2140 // -- a1 : receiver
2141 // -- a3 : undefined root value
2142 // -- sp[0] : thisArg
2143 // -----------------------------------
2144
2145 // 2. We don't need to check explicitly for callable receiver here,
2146 // since that's the first thing the Call/CallWithArrayLike builtins
2147 // will do.
2148
2149 // 3. Tail call with no arguments if argArray is null or undefined.
2150 Label no_arguments;
2151 __ LoadRoot(scratch, RootIndex::kNullValue);
2152 __ CompareTaggedAndBranch(&no_arguments, eq, arg_array, Operand(scratch));
2153 __ CompareTaggedAndBranch(&no_arguments, eq, arg_array,
2154 Operand(undefined_value));
2155
2156 // 4a. Apply the receiver to the given argArray.
2157 __ TailCallBuiltin(Builtin::kCallWithArrayLike);
2158
2159 // 4b. The argArray is either null or undefined, so we tail call without any
2160 // arguments to the receiver.
2161 __ bind(&no_arguments);
2162 {
2163 __ li(a0, JSParameterCount(0));
2164 DCHECK(receiver == a1);
2165 __ TailCallBuiltin(Builtins::Call());
2166 }
2167}
2168
2169// static
2170void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
2171 // 1. Get the callable to call (passed as receiver) from the stack.
2172 { __ Pop(a1); }
2173
2174 // 2. Make sure we have at least one argument.
2175 // a0: actual number of arguments
2176 {
2177 Label done;
2178 __ Branch(&done, ne, a0, Operand(JSParameterCount(0)));
2179 __ PushRoot(RootIndex::kUndefinedValue);
2180 __ Add_d(a0, a0, Operand(1));
2181 __ bind(&done);
2182 }
2183
2184 // 3. Adjust the actual number of arguments.
2185 __ addi_d(a0, a0, -1);
2186
2187 // 4. Call the callable.
2188 __ TailCallBuiltin(Builtins::Call());
2189}
2190
2191void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
2192 // ----------- S t a t e -------------
2193 // -- a0 : argc
2194 // -- sp[0] : receiver
2195 // -- sp[8] : target (if argc >= 1)
2196 // -- sp[16] : thisArgument (if argc >= 2)
2197 // -- sp[24] : argumentsList (if argc == 3)
2198 // -----------------------------------
2199
2200 Register argc = a0;
2201 Register arguments_list = a2;
2202 Register target = a1;
2203 Register this_argument = a5;
2204 Register undefined_value = a3;
2205 Register scratch = a4;
2206
2207 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2208
2209 // 1. Load target into a1 (if present), argumentsList into a2 (if present),
2210 // remove all arguments from the stack (including the receiver), and push
2211 // thisArgument (if present) instead.
2212 {
2213 // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
2214 // consistent state for a simple pop operation.
2215
2216 __ Sub_d(scratch, argc, Operand(JSParameterCount(0)));
2217 __ Ld_d(target, MemOperand(sp, kSystemPointerSize));
2218 __ Ld_d(this_argument, MemOperand(sp, 2 * kSystemPointerSize));
2219 __ Ld_d(arguments_list, MemOperand(sp, 3 * kSystemPointerSize));
2220 __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
2221 __ Movz(this_argument, undefined_value, scratch); // if argc == 0
2222 __ Movz(target, undefined_value, scratch); // if argc == 0
2223 __ Sub_d(scratch, scratch, Operand(1));
2224 __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
2225 __ Movz(this_argument, undefined_value, scratch); // if argc == 1
2226 __ Sub_d(scratch, scratch, Operand(1));
2227 __ Movz(arguments_list, undefined_value, scratch); // if argc == 2
2228
2229 __ DropArgumentsAndPushNewReceiver(argc, this_argument);
2230 }
2231
2232 // ----------- S t a t e -------------
2233 // -- a2 : argumentsList
2234 // -- a1 : target
2235 // -- a3 : undefined root value
2236 // -- sp[0] : thisArgument
2237 // -----------------------------------
2238
2239 // 2. We don't need to check explicitly for callable target here,
2240 // since that's the first thing the Call/CallWithArrayLike builtins
2241 // will do.
2242
2243 // 3. Apply the target to the given argumentsList.
2244 __ TailCallBuiltin(Builtin::kCallWithArrayLike);
2245}
2246
2247void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
2248 // ----------- S t a t e -------------
2249 // -- a0 : argc
2250 // -- sp[0] : receiver
2251 // -- sp[8] : target
2252 // -- sp[16] : argumentsList
2253 // -- sp[24] : new.target (optional)
2254 // -----------------------------------
2255
2256 Register argc = a0;
2257 Register arguments_list = a2;
2258 Register target = a1;
2259 Register new_target = a3;
2260 Register undefined_value = a4;
2261 Register scratch = a5;
2262
2263 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2264
2265 // 1. Load target into a1 (if present), argumentsList into a2 (if present),
2266 // new.target into a3 (if present, otherwise use target), remove all
2267 // arguments from the stack (including the receiver), and push thisArgument
2268 // (if present) instead.
2269 {
2270 // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
2271 // consistent state for a simple pop operation.
2272
2273 __ Sub_d(scratch, argc, Operand(JSParameterCount(0)));
2274 __ Ld_d(target, MemOperand(sp, kSystemPointerSize));
2275 __ Ld_d(arguments_list, MemOperand(sp, 2 * kSystemPointerSize));
2277 __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
2278 __ Movz(new_target, undefined_value, scratch); // if argc == 0
2279 __ Movz(target, undefined_value, scratch); // if argc == 0
2280 __ Sub_d(scratch, scratch, Operand(1));
2281 __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
2282 __ Movz(new_target, target, scratch); // if argc == 1
2283 __ Sub_d(scratch, scratch, Operand(1));
2284 __ Movz(new_target, target, scratch); // if argc == 2
2285
2286 __ DropArgumentsAndPushNewReceiver(argc, undefined_value);
2287 }
2288
2289 // ----------- S t a t e -------------
2290 // -- a2 : argumentsList
2291 // -- a1 : target
2292 // -- a3 : new.target
2293 // -- sp[0] : receiver (undefined)
2294 // -----------------------------------
2295
2296 // 2. We don't need to check explicitly for constructor target here,
2297 // since that's the first thing the Construct/ConstructWithArrayLike
2298 // builtins will do.
2299
2300 // 3. We don't need to check explicitly for constructor new.target here,
2301 // since that's the second thing the Construct/ConstructWithArrayLike
2302 // builtins will do.
2303
2304 // 4. Construct the target with the given new.target and argumentsList.
2305 __ TailCallBuiltin(Builtin::kConstructWithArrayLike);
2306}
2307
2308namespace {
2309
2310// Allocate new stack space for |count| arguments and shift all existing
2311// arguments already on the stack. |pointer_to_new_space_out| points to the
2312// first free slot on the stack to copy additional arguments to and
2313// |argc_in_out| is updated to include |count|.
2314void Generate_AllocateSpaceAndShiftExistingArguments(
2315 MacroAssembler* masm, Register count, Register argc_in_out,
2316 Register pointer_to_new_space_out, Register scratch1, Register scratch2,
2317 Register scratch3) {
2318 DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
2319 scratch2));
2320 Register old_sp = scratch1;
2321 Register new_space = scratch2;
2322 __ mov(old_sp, sp);
2323 __ slli_d(new_space, count, kSystemPointerSizeLog2);
2324 __ Sub_d(sp, sp, Operand(new_space));
2325
2326 Register end = scratch2;
2327 Register value = scratch3;
2328 Register dest = pointer_to_new_space_out;
2329 __ mov(dest, sp);
2330 __ Alsl_d(end, argc_in_out, old_sp, kSystemPointerSizeLog2);
2331 Label loop, done;
2332 __ Branch(&done, ge, old_sp, Operand(end));
2333 __ bind(&loop);
2334 __ Ld_d(value, MemOperand(old_sp, 0));
2335 __ St_d(value, MemOperand(dest, 0));
2336 __ Add_d(old_sp, old_sp, Operand(kSystemPointerSize));
2337 __ Add_d(dest, dest, Operand(kSystemPointerSize));
2338 __ Branch(&loop, lt, old_sp, Operand(end));
2339 __ bind(&done);
2340
2341 // Update total number of arguments.
2342 __ Add_d(argc_in_out, argc_in_out, count);
2343}
2344
2345} // namespace
2346
2347// static
2348void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2349 Builtin target_builtin) {
2350 // ----------- S t a t e -------------
2351 // -- a1 : target
2352 // -- a0 : number of parameters on the stack
2353 // -- a2 : arguments list (a FixedArray)
2354 // -- a4 : len (number of elements to push from args)
2355 // -- a3 : new.target (for [[Construct]])
2356 // -----------------------------------
2357 if (v8_flags.debug_code) {
2358 // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
2359 Label ok, fail;
2360 __ AssertNotSmi(a2);
2361 __ GetObjectType(a2, a5, a5);
2362 __ Branch(&ok, eq, a5, Operand(FIXED_ARRAY_TYPE));
2363 __ Branch(&fail, ne, a5, Operand(FIXED_DOUBLE_ARRAY_TYPE));
2364 __ Branch(&ok, eq, a4, Operand(zero_reg));
2365 // Fall through.
2366 __ bind(&fail);
2367 __ Abort(AbortReason::kOperandIsNotAFixedArray);
2368
2369 __ bind(&ok);
2370 }
2371
2372 Register args = a2;
2373 Register len = a4;
2374
2375 // Check for stack overflow.
2376 Label stack_overflow;
2377 __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow);
2378
2379 // Move the arguments already in the stack,
2380 // including the receiver and the return address.
2381 // a4: Number of arguments to make room for.
2382 // a0: Number of arguments already on the stack.
2383 // a7: Points to first free slot on the stack after arguments were shifted.
2384 Generate_AllocateSpaceAndShiftExistingArguments(masm, a4, a0, a7, a6, t0, t1);
2385
2386 // Push arguments onto the stack (thisArgument is already on the stack).
2387 {
2388 Label done, push, loop;
2389 Register src = a6;
2390 Register scratch = len;
2391
2392 __ addi_d(src, args, OFFSET_OF_DATA_START(FixedArray) - kHeapObjectTag);
2393 __ Branch(&done, eq, len, Operand(zero_reg));
2394 __ slli_d(scratch, len, kSystemPointerSizeLog2);
2395 __ Sub_d(scratch, sp, Operand(scratch));
2396#if !V8_STATIC_ROOTS_BOOL
2397 // We do not use the Branch(reg, RootIndex) macro without static roots,
2398 // as it would do a LoadRoot behind the scenes and we want to avoid that
2399 // in a loop.
2400 __ LoadTaggedRoot(t1, RootIndex::kTheHoleValue);
2401#endif // !V8_STATIC_ROOTS_BOOL
2402 __ bind(&loop);
2403 __ LoadTaggedField(a5, MemOperand(src, 0));
2404 __ addi_d(src, src, kTaggedSize);
2405#if V8_STATIC_ROOTS_BOOL
2406 __ Branch(&push, ne, a5, RootIndex::kTheHoleValue);
2407#else
2408 __ slli_w(t0, a5, 0);
2409 __ Branch(&push, ne, t0, Operand(t1));
2410#endif
2411 __ LoadRoot(a5, RootIndex::kUndefinedValue);
2412 __ bind(&push);
2413 __ St_d(a5, MemOperand(a7, 0));
2414 __ Add_d(a7, a7, Operand(kSystemPointerSize));
2415 __ Add_d(scratch, scratch, Operand(kSystemPointerSize));
2416 __ Branch(&loop, ne, scratch, Operand(sp));
2417 __ bind(&done);
2418 }
2419
2420 // Tail-call to the actual Call or Construct builtin.
2421 __ TailCallBuiltin(target_builtin);
2422
2423 __ bind(&stack_overflow);
2424 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2425}
2426
2427// static
2428void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2429 CallOrConstructMode mode,
2430 Builtin target_builtin) {
2431 // ----------- S t a t e -------------
2432 // -- a0 : the number of arguments
2433 // -- a3 : the new.target (for [[Construct]] calls)
2434 // -- a1 : the target to call (can be any Object)
2435 // -- a2 : start index (to support rest parameters)
2436 // -----------------------------------
2437
2438 // Check if new.target has a [[Construct]] internal method.
2439 if (mode == CallOrConstructMode::kConstruct) {
2440 Label new_target_constructor, new_target_not_constructor;
2441 __ JumpIfSmi(a3, &new_target_not_constructor);
2442 __ LoadTaggedField(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
2443 __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
2444 __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask));
2445 __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
2446 __ bind(&new_target_not_constructor);
2447 {
2448 FrameScope scope(masm, StackFrame::MANUAL);
2449 __ EnterFrame(StackFrame::INTERNAL);
2450 __ Push(a3);
2451 __ CallRuntime(Runtime::kThrowNotConstructor);
2452 }
2453 __ bind(&new_target_constructor);
2454 }
2455
2456 Label stack_done, stack_overflow;
2458 __ Sub_d(a7, a7, Operand(kJSArgcReceiverSlots));
2459 __ Sub_d(a7, a7, a2);
2460 __ Branch(&stack_done, le, a7, Operand(zero_reg));
2461 {
2462 // Check for stack overflow.
2463 __ StackOverflowCheck(a7, a4, a5, &stack_overflow);
2464
2465 // Forward the arguments from the caller frame.
2466
2467 // Point to the first argument to copy (skipping the receiver).
2468 __ Add_d(a6, fp,
2471 __ Alsl_d(a6, a2, a6, kSystemPointerSizeLog2);
2472
2473 // Move the arguments already in the stack,
2474 // including the receiver and the return address.
2475 // a7: Number of arguments to make room for.
2476 // a0: Number of arguments already on the stack.
2477 // a2: Points to first free slot on the stack after arguments were shifted.
2478 Generate_AllocateSpaceAndShiftExistingArguments(masm, a7, a0, a2, t0, t1,
2479 t2);
2480
2481 // Copy arguments from the caller frame.
2482 // TODO(victorgomes): Consider using forward order as potentially more cache
2483 // friendly.
2484 {
2485 Label loop;
2486 __ bind(&loop);
2487 {
2488 __ Sub_w(a7, a7, Operand(1));
2489 __ Alsl_d(t0, a7, a6, kSystemPointerSizeLog2);
2490 __ Ld_d(kScratchReg, MemOperand(t0, 0));
2491 __ Alsl_d(t0, a7, a2, kSystemPointerSizeLog2);
2492 __ St_d(kScratchReg, MemOperand(t0, 0));
2493 __ Branch(&loop, ne, a7, Operand(zero_reg));
2494 }
2495 }
2496 }
2497 __ bind(&stack_done);
2498 // Tail-call to the actual Call or Construct builtin.
2499 __ TailCallBuiltin(target_builtin);
2500
2501 __ bind(&stack_overflow);
2502 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2503}
2504
2505// static
2506void Builtins::Generate_CallFunction(MacroAssembler* masm,
2507 ConvertReceiverMode mode) {
2508 // ----------- S t a t e -------------
2509 // -- a0 : the number of arguments
2510 // -- a1 : the function to call (checked to be a JSFunction)
2511 // -----------------------------------
2512 __ AssertFunction(a1);
2513
2514 __ LoadTaggedField(
2515 a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2516
2517 // Enter the context of the function; ToObject has to run in the function
2518 // context, and we also need to take the global proxy from the function
2519 // context in case of conversion.
2520 __ LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2521 // We need to convert the receiver for non-native sloppy mode functions.
2522 Label done_convert;
2523 __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
2524 __ And(kScratchReg, a3,
2525 Operand(SharedFunctionInfo::IsNativeBit::kMask |
2526 SharedFunctionInfo::IsStrictBit::kMask));
2527 __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
2528 {
2529 // ----------- S t a t e -------------
2530 // -- a0 : the number of arguments
2531 // -- a1 : the function to call (checked to be a JSFunction)
2532 // -- a2 : the shared function info.
2533 // -- cp : the function context.
2534 // -----------------------------------
2535
2537 // Patch receiver to global proxy.
2538 __ LoadGlobalProxy(a3);
2539 } else {
2540 Label convert_to_object, convert_receiver;
2541 __ LoadReceiver(a3);
2542 __ JumpIfSmi(a3, &convert_to_object);
2543 __ JumpIfJSAnyIsNotPrimitive(a3, a4, &done_convert);
2545 Label convert_global_proxy;
2546 __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
2547 __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
2548 __ bind(&convert_global_proxy);
2549 {
2550 // Patch receiver to global proxy.
2551 __ LoadGlobalProxy(a3);
2552 }
2553 __ Branch(&convert_receiver);
2554 }
2555 __ bind(&convert_to_object);
2556 {
2557 // Convert receiver using ToObject.
2558 // TODO(bmeurer): Inline the allocation here to avoid building the frame
2559 // in the fast case? (fall back to AllocateInNewSpace?)
2560 FrameScope scope(masm, StackFrame::INTERNAL);
2561 __ SmiTag(a0);
2562 __ Push(a0, a1);
2563 __ mov(a0, a3);
2564 __ Push(cp);
2565 __ CallBuiltin(Builtin::kToObject);
2566 __ Pop(cp);
2567 __ mov(a3, a0);
2568 __ Pop(a0, a1);
2569 __ SmiUntag(a0);
2570 }
2571 __ LoadTaggedField(
2572 a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2573 __ bind(&convert_receiver);
2574 }
2575 __ StoreReceiver(a3);
2576 }
2577 __ bind(&done_convert);
2578
2579 // ----------- S t a t e -------------
2580 // -- a0 : the number of arguments
2581 // -- a1 : the function to call (checked to be a JSFunction)
2582 // -- a2 : the shared function info.
2583 // -- cp : the function context.
2584 // -----------------------------------
2585
2586#ifdef V8_ENABLE_LEAPTIERING
2587 __ InvokeFunctionCode(a1, no_reg, a0, InvokeType::kJump);
2588#else
2589 __ Ld_hu(
2590 a2, FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
2591 __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
2592#endif // V8_ENABLE_LEAPTIERING
2593}
2594
2595// static
2596void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2597 // ----------- S t a t e -------------
2598 // -- a0 : the number of arguments
2599 // -- a1 : the function to call (checked to be a JSBoundFunction)
2600 // -----------------------------------
2601 __ AssertBoundFunction(a1);
2602
2603 // Patch the receiver to [[BoundThis]].
2604 {
2605 __ LoadTaggedField(t0,
2606 FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
2607 __ StoreReceiver(t0);
2608 }
2609
2610 // Load [[BoundArguments]] into a2 and length of that into a4.
2611 __ LoadTaggedField(
2612 a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2613 __ SmiUntagField(a4, FieldMemOperand(a2, offsetof(FixedArray, length_)));
2614
2615 // ----------- S t a t e -------------
2616 // -- a0 : the number of arguments
2617 // -- a1 : the function to call (checked to be a JSBoundFunction)
2618 // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2619 // -- a4 : the number of [[BoundArguments]]
2620 // -----------------------------------
2621
2622 // Reserve stack space for the [[BoundArguments]].
2623 {
2624 Label done;
2625 __ slli_d(a5, a4, kSystemPointerSizeLog2);
2626 __ Sub_d(t0, sp, Operand(a5));
2627 // Check the stack for overflow. We are not trying to catch interruptions
2628 // (i.e. debug break and preemption) here, so check the "real stack limit".
2629 __ LoadStackLimit(kScratchReg,
2630 MacroAssembler::StackLimitKind::kRealStackLimit);
2631 __ Branch(&done, hs, t0, Operand(kScratchReg));
2632 {
2633 FrameScope scope(masm, StackFrame::MANUAL);
2634 __ EnterFrame(StackFrame::INTERNAL);
2635 __ CallRuntime(Runtime::kThrowStackOverflow);
2636 }
2637 __ bind(&done);
2638 }
2639
2640 // Pop receiver.
2641 __ Pop(t0);
2642
2643 // Push [[BoundArguments]].
2644 {
2645 Label loop, done_loop;
2646 __ SmiUntagField(a4, FieldMemOperand(a2, offsetof(FixedArray, length_)));
2647 __ Add_d(a0, a0, Operand(a4));
2648 __ Add_d(a2, a2,
2649 Operand(OFFSET_OF_DATA_START(FixedArray) - kHeapObjectTag));
2650 __ bind(&loop);
2651 __ Sub_d(a4, a4, Operand(1));
2652 __ Branch(&done_loop, lt, a4, Operand(zero_reg));
2653 __ Alsl_d(a5, a4, a2, kTaggedSizeLog2);
2654 __ LoadTaggedField(kScratchReg, MemOperand(a5, 0));
2655 __ Push(kScratchReg);
2656 __ Branch(&loop);
2657 __ bind(&done_loop);
2658 }
2659
2660 // Push receiver.
2661 __ Push(t0);
2662
2663 // Call the [[BoundTargetFunction]] via the Call builtin.
2664 __ LoadTaggedField(
2665 a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2666 __ TailCallBuiltin(Builtins::Call());
2667}
2668
2669// static
2670void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2671 // ----------- S t a t e -------------
2672 // -- a0 : the number of arguments
2673 // -- a1 : the target to call (can be any Object).
2674 // -----------------------------------
2675
2676 Register target = a1;
2677 Register map = t1;
2678 Register instance_type = t2;
2679 Register scratch = t3;
2680 DCHECK(!AreAliased(a0, target, map, instance_type, scratch));
2681
2682 Label non_callable, class_constructor;
2683 __ JumpIfSmi(target, &non_callable);
2684 __ LoadMap(map, target);
2685 __ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
2686 scratch);
2687 __ TailCallBuiltin(Builtins::CallFunction(mode), ls, scratch,
2690 __ TailCallBuiltin(Builtin::kCallBoundFunction, eq, instance_type,
2691 Operand(JS_BOUND_FUNCTION_TYPE));
2692
2693 // Check if target has a [[Call]] internal method.
2694 {
2695 Register flags = t1;
2696 __ Ld_bu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2697 map = no_reg;
2698 __ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask));
2699 __ Branch(&non_callable, eq, flags, Operand(zero_reg));
2700 }
2701
2702 __ TailCallBuiltin(Builtin::kCallProxy, eq, instance_type,
2703 Operand(JS_PROXY_TYPE));
2704
2705 // Check if target is a wrapped function and call CallWrappedFunction external
2706 // builtin
2707 __ TailCallBuiltin(Builtin::kCallWrappedFunction, eq, instance_type,
2708 Operand(JS_WRAPPED_FUNCTION_TYPE));
2709
2710 // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2711 // Check that the function is not a "classConstructor".
2712 __ Branch(&class_constructor, eq, instance_type,
2713 Operand(JS_CLASS_CONSTRUCTOR_TYPE));
2714
2715 // 2. Call to something else, which might have a [[Call]] internal method (if
2716 // not we raise an exception).
2717 // Overwrite the original receiver with the (original) target.
2718 __ StoreReceiver(target);
2719 // Let the "call_as_function_delegate" take care of the rest.
2720 __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2721 __ TailCallBuiltin(
2723
2724 // 3. Call to something that is not callable.
2725 __ bind(&non_callable);
2726 {
2727 FrameScope scope(masm, StackFrame::INTERNAL);
2728 __ Push(target);
2729 __ CallRuntime(Runtime::kThrowCalledNonCallable);
2730 }
2731
2732 // 4. The function is a "classConstructor", need to raise an exception.
2733 __ bind(&class_constructor);
2734 {
2735 FrameScope frame(masm, StackFrame::INTERNAL);
2736 __ Push(target);
2737 __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2738 }
2739}
2740
2741void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2742 // ----------- S t a t e -------------
2743 // -- a0 : the number of arguments
2744 // -- a1 : the constructor to call (checked to be a JSFunction)
2745 // -- a3 : the new target (checked to be a constructor)
2746 // -----------------------------------
2747 __ AssertConstructor(a1);
2748 __ AssertFunction(a1);
2749
2750 // Calling convention for function specific ConstructStubs require
2751 // a2 to contain either an AllocationSite or undefined.
2752 __ LoadRoot(a2, RootIndex::kUndefinedValue);
2753
2754 Label call_generic_stub;
2755
2756 // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2757 __ LoadTaggedField(
2758 a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2759 __ Ld_wu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
2760 __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2761 __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg));
2762
2763 __ TailCallBuiltin(Builtin::kJSBuiltinsConstructStub);
2764
2765 __ bind(&call_generic_stub);
2766 __ TailCallBuiltin(Builtin::kJSConstructStubGeneric);
2767}
2768
2769// static
2770void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2771 // ----------- S t a t e -------------
2772 // -- a0 : the number of arguments
2773 // -- a1 : the function to call (checked to be a JSBoundFunction)
2774 // -- a3 : the new target (checked to be a constructor)
2775 // -----------------------------------
2776 __ AssertConstructor(a1);
2777 __ AssertBoundFunction(a1);
2778
2779 // Load [[BoundArguments]] into a2 and length of that into a4.
2780 __ LoadTaggedField(
2781 a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2782 __ SmiUntagField(a4, FieldMemOperand(a2, offsetof(FixedArray, length_)));
2783
2784 // ----------- S t a t e -------------
2785 // -- a0 : the number of arguments
2786 // -- a1 : the function to call (checked to be a JSBoundFunction)
2787 // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2788 // -- a3 : the new target (checked to be a constructor)
2789 // -- a4 : the number of [[BoundArguments]]
2790 // -----------------------------------
2791
2792 // Reserve stack space for the [[BoundArguments]].
2793 {
2794 Label done;
2795 __ slli_d(a5, a4, kSystemPointerSizeLog2);
2796 __ Sub_d(t0, sp, Operand(a5));
2797 // Check the stack for overflow. We are not trying to catch interruptions
2798 // (i.e. debug break and preemption) here, so check the "real stack limit".
2799 __ LoadStackLimit(kScratchReg,
2800 MacroAssembler::StackLimitKind::kRealStackLimit);
2801 __ Branch(&done, hs, t0, Operand(kScratchReg));
2802 {
2803 FrameScope scope(masm, StackFrame::MANUAL);
2804 __ EnterFrame(StackFrame::INTERNAL);
2805 __ CallRuntime(Runtime::kThrowStackOverflow);
2806 }
2807 __ bind(&done);
2808 }
2809
2810 // Pop receiver.
2811 __ Pop(t0);
2812
2813 // Push [[BoundArguments]].
2814 {
2815 Label loop, done_loop;
2816 __ SmiUntagField(a4, FieldMemOperand(a2, offsetof(FixedArray, length_)));
2817 __ Add_d(a0, a0, Operand(a4));
2818 __ Add_d(a2, a2,
2819 Operand(OFFSET_OF_DATA_START(FixedArray) - kHeapObjectTag));
2820 __ bind(&loop);
2821 __ Sub_d(a4, a4, Operand(1));
2822 __ Branch(&done_loop, lt, a4, Operand(zero_reg));
2823 __ Alsl_d(a5, a4, a2, kTaggedSizeLog2);
2824 __ LoadTaggedField(kScratchReg, MemOperand(a5, 0));
2825 __ Push(kScratchReg);
2826 __ Branch(&loop);
2827 __ bind(&done_loop);
2828 }
2829
2830 // Push receiver.
2831 __ Push(t0);
2832
2833 // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2834 {
2835 Label skip_load;
2836 __ CompareTaggedAndBranch(&skip_load, ne, a1, Operand(a3));
2837 __ LoadTaggedField(
2838 a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2839 __ bind(&skip_load);
2840 }
2841
2842 // Construct the [[BoundTargetFunction]] via the Construct builtin.
2843 __ LoadTaggedField(
2844 a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2845 __ TailCallBuiltin(Builtin::kConstruct);
2846}
2847
2848// static
2849void Builtins::Generate_Construct(MacroAssembler* masm) {
2850 // ----------- S t a t e -------------
2851 // -- a0 : the number of arguments
2852 // -- a1 : the constructor to call (can be any Object)
2853 // -- a3 : the new target (either the same as the constructor or
2854 // the JSFunction on which new was invoked initially)
2855 // -----------------------------------
2856
2857 Register target = a1;
2858 Register map = t1;
2859 Register instance_type = t2;
2860 Register scratch = t3;
2861 DCHECK(!AreAliased(a0, target, map, instance_type, scratch));
2862
2863 // Check if target is a Smi.
2864 Label non_constructor, non_proxy;
2865 __ JumpIfSmi(target, &non_constructor);
2866
2867 // Check if target has a [[Construct]] internal method.
2868 __ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset));
2869 {
2870 Register flags = t3;
2871 __ Ld_bu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2872 __ And(flags, flags, Operand(Map::Bits1::IsConstructorBit::kMask));
2873 __ Branch(&non_constructor, eq, flags, Operand(zero_reg));
2874 }
2875
2876 // Dispatch based on instance type.
2877 __ GetInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE, scratch);
2878 __ TailCallBuiltin(Builtin::kConstructFunction, ls, scratch,
2879 Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
2880
2881 // Only dispatch to bound functions after checking whether they are
2882 // constructors.
2883 __ TailCallBuiltin(Builtin::kConstructBoundFunction, eq, instance_type,
2884 Operand(JS_BOUND_FUNCTION_TYPE));
2885
2886 // Only dispatch to proxies after checking whether they are constructors.
2887 __ Branch(&non_proxy, ne, instance_type, Operand(JS_PROXY_TYPE));
2888 __ TailCallBuiltin(Builtin::kConstructProxy);
2889
2890 // Called Construct on an exotic Object with a [[Construct]] internal method.
2891 __ bind(&non_proxy);
2892 {
2893 // Overwrite the original receiver with the (original) target.
2894 __ StoreReceiver(target);
2895 // Let the "call_as_constructor_delegate" take care of the rest.
2896 __ LoadNativeContextSlot(target,
2897 Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2898 __ TailCallBuiltin(Builtins::CallFunction());
2899 }
2900
2901 // Called Construct on an Object that doesn't have a [[Construct]] internal
2902 // method.
2903 __ bind(&non_constructor);
2904 __ TailCallBuiltin(Builtin::kConstructedNonConstructable);
2905}
2906
2907#if V8_ENABLE_WEBASSEMBLY
2908// Compute register lists for parameters to be saved. We save all parameter
2909// registers (see wasm-linkage.h). They might be overwritten in the runtime
2910// call below. We don't have any callee-saved registers in wasm, so no need to
2911// store anything else.
2912constexpr RegList kSavedGpRegs = ([]() constexpr {
2913 RegList saved_gp_regs;
2914 for (Register gp_param_reg : wasm::kGpParamRegisters) {
2915 saved_gp_regs.set(gp_param_reg);
2916 }
2917
2918 // The instance data has already been stored in the fixed part of the frame.
2919 saved_gp_regs.clear(kWasmImplicitArgRegister);
2920 // All set registers were unique.
2921 CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) - 1);
2923 saved_gp_regs.Count());
2924 return saved_gp_regs;
2925})();
2926
2927constexpr DoubleRegList kSavedFpRegs = ([]() constexpr {
2928 DoubleRegList saved_fp_regs;
2929 for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
2930 saved_fp_regs.set(fp_param_reg);
2931 }
2932
2933 CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
2935 saved_fp_regs.Count());
2936 return saved_fp_regs;
2937})();
2938
2939// When entering this builtin, we have just created a Wasm stack frame:
2940//
2941// [ Wasm instance data ] <-- sp
2942// [ WASM frame marker ]
2943// [ saved fp ] <-- fp
2944//
2945// Add the feedback vector to the stack.
2946//
2947// [ feedback vector ] <-- sp
2948// [ Wasm instance data ]
2949// [ WASM frame marker ]
2950// [ saved fp ] <-- fp
2951void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
2952 Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
2953 Register vector = t1;
2954 Register scratch = t2;
2955 Label allocate_vector, done;
2956
2957 __ LoadTaggedField(
2959 WasmTrustedInstanceData::kFeedbackVectorsOffset));
2960 __ Alsl_d(vector, func_index, vector, kTaggedSizeLog2);
2961 __ LoadTaggedField(vector,
2962 FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray)));
2963 __ JumpIfSmi(vector, &allocate_vector);
2964 __ bind(&done);
2965 __ Push(vector);
2966 __ Ret();
2967
2968 __ bind(&allocate_vector);
2969 // Feedback vector doesn't exist yet. Call the runtime to allocate it.
2970 // We temporarily change the frame type for this, because we need special
2971 // handling by the stack walker in case of GC.
2972 __ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM_LIFTOFF_SETUP));
2974
2975 // Save registers.
2976 __ MultiPush(kSavedGpRegs);
2977 __ MultiPushFPU(kSavedFpRegs);
2978 __ Push(ra);
2979
2980 // Arguments to the runtime function: instance data, func_index, and an
2981 // additional stack slot for the NativeModule.
2982 __ SmiTag(func_index);
2983 __ Push(kWasmImplicitArgRegister, func_index, zero_reg);
2984 __ Move(cp, Smi::zero());
2985 __ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3);
2986 __ mov(vector, kReturnRegister0);
2987
2988 // Restore registers and frame type.
2989 __ Pop(ra);
2990 __ MultiPopFPU(kSavedFpRegs);
2991 __ MultiPop(kSavedGpRegs);
2993 MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset));
2994 __ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM));
2996 __ Branch(&done);
2997}
2998
2999void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
3000 // The function index was put in t0 by the jump table trampoline.
3001 // Convert to Smi for the runtime call
3003
3004 {
3005 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
3006 FrameScope scope(masm, StackFrame::INTERNAL);
3007
3008 // Save registers that we need to keep alive across the runtime call.
3010 __ MultiPush(kSavedGpRegs);
3011 __ MultiPushFPU(kSavedFpRegs);
3012
3013 // kFixedFrameSizeFromFp is hard coded to include space for Simd
3014 // registers, so we still need to allocate extra (unused) space on the stack
3015 // as if they were saved.
3016 __ Sub_d(sp, sp, kSavedFpRegs.Count() * kDoubleSize);
3017
3019
3020 // Initialize the JavaScript context with 0. CEntry will use it to
3021 // set the current context on the isolate.
3022 __ Move(kContextRegister, Smi::zero());
3023 __ CallRuntime(Runtime::kWasmCompileLazy, 2);
3024
3025 // Untag the returned Smi into into t0, for later use.
3026 static_assert(!kSavedGpRegs.has(t0));
3027 __ SmiUntag(t0, a0);
3028
3029 __ Add_d(sp, sp, kSavedFpRegs.Count() * kDoubleSize);
3030 // Restore registers.
3031 __ MultiPopFPU(kSavedFpRegs);
3032 __ MultiPop(kSavedGpRegs);
3034 }
3035
3036 // The runtime function returned the jump table slot offset as a Smi (now in
3037 // t0). Use that to compute the jump target.
3038 static_assert(!kSavedGpRegs.has(t1));
3040 WasmTrustedInstanceData::kJumpTableStartOffset));
3041 __ Add_d(t0, t1, Operand(t0));
3042
3043 // Finally, jump to the jump table slot for the function.
3044 __ Jump(t0);
3045}
3046
3047void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
3048 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
3049 {
3050 FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
3051
3052 // Save all parameter registers. They might hold live values, we restore
3053 // them after the runtime call.
3056
3057 // Initialize the JavaScript context with 0. CEntry will use it to
3058 // set the current context on the isolate.
3059 __ Move(cp, Smi::zero());
3060 __ CallRuntime(Runtime::kWasmDebugBreak, 0);
3061
3062 // Restore registers.
3065 }
3066 __ Ret();
3067}
3068
3069namespace {
3070// Check that the stack was in the old state (if generated code assertions are
3071// enabled), and switch to the new state.
3072void SwitchStackState(MacroAssembler* masm, Register jmpbuf, Register tmp,
3074 wasm::JumpBuffer::StackState new_state) {
3075#if V8_ENABLE_SANDBOX
3076 __ Ld_w(tmp, MemOperand(jmpbuf, wasm::kJmpBufStateOffset));
3077 Label ok;
3078 __ JumpIfEqual(tmp, old_state, &ok);
3079 __ Trap();
3080 __ bind(&ok);
3081#endif
3082 __ li(tmp, Operand(new_state));
3083 __ St_w(tmp, MemOperand(jmpbuf, wasm::kJmpBufStateOffset));
3084}
3085
3086// Switch the stack pointer.
3087void SwitchStackPointer(MacroAssembler* masm, Register jmpbuf) {
3088 __ Ld_d(sp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
3089}
3090
3091void FillJumpBuffer(MacroAssembler* masm, Register jmpbuf, Label* target,
3092 Register tmp) {
3093 __ mov(tmp, sp);
3094 __ St_d(tmp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
3095 __ St_d(fp, MemOperand(jmpbuf, wasm::kJmpBufFpOffset));
3096 __ LoadStackLimit(tmp, __ StackLimitKind::kRealStackLimit);
3097 __ St_d(tmp, MemOperand(jmpbuf, wasm::kJmpBufStackLimitOffset));
3098
3099 __ LoadLabelRelative(tmp, target);
3100 // Stash the address in the jump buffer.
3101 __ St_d(tmp, MemOperand(jmpbuf, wasm::kJmpBufPcOffset));
3102}
3103
3104void LoadJumpBuffer(MacroAssembler* masm, Register jmpbuf, bool load_pc,
3105 Register tmp, wasm::JumpBuffer::StackState expected_state) {
3106 SwitchStackPointer(masm, jmpbuf);
3107 __ Ld_d(fp, MemOperand(jmpbuf, wasm::kJmpBufFpOffset));
3108 SwitchStackState(masm, jmpbuf, tmp, expected_state, wasm::JumpBuffer::Active);
3109 if (load_pc) {
3110 __ Ld_d(tmp, MemOperand(jmpbuf, wasm::kJmpBufPcOffset));
3111 __ Jump(tmp);
3112 }
3113 // The stack limit in StackGuard is set separately under the ExecutionAccess
3114 // lock.
3115}
3116
3117void SaveState(MacroAssembler* masm, Register active_continuation, Register tmp,
3118 Label* suspend) {
3119 Register jmpbuf = tmp;
3120 __ LoadExternalPointerField(
3121 jmpbuf,
3122 FieldMemOperand(active_continuation,
3123 WasmContinuationObject::kStackOffset),
3125 __ Add_d(jmpbuf, jmpbuf, wasm::StackMemory::jmpbuf_offset());
3126
3127 UseScratchRegisterScope temps(masm);
3128 FillJumpBuffer(masm, jmpbuf, suspend, temps.Acquire());
3129}
3130
3131void LoadTargetJumpBuffer(MacroAssembler* masm, Register target_continuation,
3132 Register tmp,
3133 wasm::JumpBuffer::StackState expected_state) {
3134 Register target_jmpbuf = target_continuation;
3135 __ LoadExternalPointerField(
3136 target_jmpbuf,
3137 FieldMemOperand(target_continuation,
3138 WasmContinuationObject::kStackOffset),
3140 __ Add_d(target_jmpbuf, target_jmpbuf, wasm::StackMemory::jmpbuf_offset());
3141
3142 __ St_d(zero_reg,
3143 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset));
3144 // Switch stack!
3145 LoadJumpBuffer(masm, target_jmpbuf, false, tmp, expected_state);
3146}
3147
3148// Updates the stack limit and central stack info, and validates the switch.
3149void SwitchStacks(MacroAssembler* masm, Register old_continuation,
3150 bool return_switch,
3151 const std::initializer_list<Register> keep) {
3152 using ER = ExternalReference;
3153
3154 for (auto reg : keep) {
3155 __ Push(reg);
3156 }
3157
3158 {
3159 __ PrepareCallCFunction(2, a0);
3160 FrameScope scope(masm, StackFrame::MANUAL);
3161 __ li(kCArgRegs[0], ExternalReference::isolate_address(masm->isolate()));
3162 __ mov(kCArgRegs[1], old_continuation);
3163 __ CallCFunction(
3164 return_switch ? ER::wasm_return_switch() : ER::wasm_switch_stacks(), 2);
3165 }
3166
3167 for (auto it = std::rbegin(keep); it != std::rend(keep); ++it) {
3168 __ Pop(*it);
3169 }
3170}
3171
3172void ReloadParentContinuation(MacroAssembler* masm, Register return_reg,
3173 Register return_value, Register context,
3174 Register tmp1, Register tmp2, Register tmp3) {
3175 Register active_continuation = tmp1;
3176 __ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
3177
3178 // Set a null pointer in the jump buffer's SP slot to indicate to the stack
3179 // frame iterator that this stack is empty.
3180 Register jmpbuf = tmp2;
3181 __ LoadExternalPointerField(
3182 jmpbuf,
3183 FieldMemOperand(active_continuation,
3184 WasmContinuationObject::kStackOffset),
3186 __ Add_d(jmpbuf, jmpbuf, wasm::StackMemory::jmpbuf_offset());
3187 __ St_d(zero_reg, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
3188 {
3189 UseScratchRegisterScope temps(masm);
3190 Register scratch = temps.Acquire();
3191 SwitchStackState(masm, jmpbuf, scratch, wasm::JumpBuffer::Active,
3193 }
3194 Register parent = tmp2;
3195 __ LoadTaggedField(parent,
3196 FieldMemOperand(active_continuation,
3197 WasmContinuationObject::kParentOffset));
3198
3199 // Update active continuation root.
3200 int32_t active_continuation_offset =
3202 RootIndex::kActiveContinuation);
3203 __ St_d(parent, MemOperand(kRootRegister, active_continuation_offset));
3204 jmpbuf = parent;
3205 __ LoadExternalPointerField(
3206 jmpbuf, FieldMemOperand(parent, WasmContinuationObject::kStackOffset),
3208 __ Add_d(jmpbuf, jmpbuf, wasm::StackMemory::jmpbuf_offset());
3209
3210 // Switch stack!
3211 SwitchStacks(masm, active_continuation, true,
3212 {return_reg, return_value, context, jmpbuf});
3213 LoadJumpBuffer(masm, jmpbuf, false, tmp3, wasm::JumpBuffer::Inactive);
3214}
3215
3216void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
3217 Register tmp2) {
3218 Register suspender = tmp1;
3219 __ LoadRoot(suspender, RootIndex::kActiveSuspender);
3220 __ LoadTaggedField(
3221 suspender,
3222 FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
3223 int32_t active_suspender_offset =
3225 RootIndex::kActiveSuspender);
3226 __ St_d(suspender, MemOperand(kRootRegister, active_suspender_offset));
3227}
3228
3229void ResetStackSwitchFrameStackSlots(MacroAssembler* masm) {
3230 __ St_d(zero_reg,
3231 MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset));
3232 __ St_d(zero_reg,
3233 MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
3234}
3235
3236// TODO(irezvov): Consolidate with arm64 RegisterAllocator.
3237class RegisterAllocator {
3238 public:
3239 class Scoped {
3240 public:
3241 Scoped(RegisterAllocator* allocator, Register* reg)
3242 : allocator_(allocator), reg_(reg) {}
3243 ~Scoped() { allocator_->Free(reg_); }
3244
3245 private:
3246 RegisterAllocator* allocator_;
3247 Register* reg_;
3248 };
3249
3250 explicit RegisterAllocator(const RegList& registers)
3252 void Ask(Register* reg) {
3253 DCHECK_EQ(*reg, no_reg);
3254 DCHECK(!available_.is_empty());
3255 *reg = available_.PopFirst();
3256 allocated_registers_.push_back(reg);
3257 }
3258
3259 bool registerIsAvailable(const Register& reg) { return available_.has(reg); }
3260
3261 void Pinned(const Register& requested, Register* reg) {
3262 if (!registerIsAvailable(requested)) {
3263 printf("%s register is ocupied!", RegisterName(requested));
3264 }
3265 DCHECK(registerIsAvailable(requested));
3266 *reg = requested;
3267 Reserve(requested);
3268 allocated_registers_.push_back(reg);
3269 }
3270
3271 void Free(Register* reg) {
3272 DCHECK_NE(*reg, no_reg);
3273 available_.set(*reg);
3274 *reg = no_reg;
3276 find(allocated_registers_.begin(), allocated_registers_.end(), reg));
3277 }
3278
3279 void Reserve(const Register& reg) {
3280 if (reg == no_reg) {
3281 return;
3282 }
3283 DCHECK(registerIsAvailable(reg));
3284 available_.clear(reg);
3285 }
3286
3287 void Reserve(const Register& reg1, const Register& reg2,
3288 const Register& reg3 = no_reg, const Register& reg4 = no_reg,
3289 const Register& reg5 = no_reg, const Register& reg6 = no_reg) {
3290 Reserve(reg1);
3291 Reserve(reg2);
3292 Reserve(reg3);
3293 Reserve(reg4);
3294 Reserve(reg5);
3295 Reserve(reg6);
3296 }
3297
3298 bool IsUsed(const Register& reg) {
3299 return initial_.has(reg) && !registerIsAvailable(reg);
3300 }
3301
3302 void ResetExcept(const Register& reg1 = no_reg, const Register& reg2 = no_reg,
3303 const Register& reg3 = no_reg, const Register& reg4 = no_reg,
3304 const Register& reg5 = no_reg,
3305 const Register& reg6 = no_reg) {
3307 available_.clear(reg1);
3308 available_.clear(reg2);
3309 available_.clear(reg3);
3310 available_.clear(reg4);
3311 available_.clear(reg5);
3312 available_.clear(reg6);
3313
3314 auto it = allocated_registers_.begin();
3315 while (it != allocated_registers_.end()) {
3316 if (registerIsAvailable(**it)) {
3317 **it = no_reg;
3318 allocated_registers_.erase(it);
3319 } else {
3320 it++;
3321 }
3322 }
3323 }
3324
3325 static RegisterAllocator WithAllocatableGeneralRegisters() {
3326 RegList list;
3327 const RegisterConfiguration* config(RegisterConfiguration::Default());
3328
3329 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
3330 int code = config->GetAllocatableGeneralCode(i);
3331 Register candidate = Register::from_code(code);
3332 list.set(candidate);
3333 }
3334 return RegisterAllocator(list);
3335 }
3336
3337 private:
3338 std::vector<Register*> allocated_registers_;
3339 const RegList initial_;
3341};
3342
3343#define DEFINE_REG(Name) \
3344 Register Name = no_reg; \
3345 regs.Ask(&Name);
3346
3347#define DEFINE_REG_W(Name) \
3348 DEFINE_REG(Name); \
3349 Name = Name.W();
3350
3351#define ASSIGN_REG(Name) regs.Ask(&Name);
3352
3353#define ASSIGN_REG_W(Name) \
3354 ASSIGN_REG(Name); \
3355 Name = Name.W();
3356
3357#define DEFINE_PINNED(Name, Reg) \
3358 Register Name = no_reg; \
3359 regs.Pinned(Reg, &Name);
3360
3361#define ASSIGN_PINNED(Name, Reg) regs.Pinned(Reg, &Name);
3362
3363#define DEFINE_SCOPED(Name) \
3364 DEFINE_REG(Name) \
3365 RegisterAllocator::Scoped scope_##Name(&regs, &Name);
3366
3367#define FREE_REG(Name) regs.Free(&Name);
3368
3369// Loads the context field of the WasmTrustedInstanceData or WasmImportData
3370// depending on the data's type, and places the result in the input register.
3371void GetContextFromImplicitArg(MacroAssembler* masm, Register data,
3372 Register scratch) {
3373 Label instance;
3374 Label end;
3375 __ LoadTaggedField(scratch, FieldMemOperand(data, HeapObject::kMapOffset));
3376 __ Ld_hu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3377 __ Branch(&instance, eq, scratch, Operand(WASM_TRUSTED_INSTANCE_DATA_TYPE));
3378
3379 __ LoadTaggedField(
3380 data, FieldMemOperand(data, WasmImportData::kNativeContextOffset));
3381 __ jmp(&end);
3382
3383 __ bind(&instance);
3384 __ LoadTaggedField(
3385 data,
3386 FieldMemOperand(data, WasmTrustedInstanceData::kNativeContextOffset));
3387 __ bind(&end);
3388}
3389
3390} // namespace
3391
3392void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) {
3393 // Push registers in reverse order so that they are on the stack like
3394 // in an array, with the first item being at the lowest address.
3395 constexpr int cnt_fp = arraysize(wasm::kFpParamRegisters);
3396 constexpr int cnt_gp = arraysize(wasm::kGpParamRegisters) - 1;
3397 int required_stack_space = cnt_fp * kDoubleSize + cnt_gp * kSystemPointerSize;
3398 __ Sub_d(sp, sp, Operand(required_stack_space));
3399 for (int i = cnt_fp - 1; i >= 0; i--) {
3401 MemOperand(sp, i * kDoubleSize + cnt_gp * kSystemPointerSize));
3402 }
3403
3404 // Without wasm::kGpParamRegisters[0] here.
3405 for (int i = cnt_gp; i >= 1; i--) {
3407 MemOperand(sp, (i - 1) * kSystemPointerSize));
3408 }
3409 // Reserve a slot for the signature.
3410 __ Push(zero_reg);
3411 __ TailCallBuiltin(Builtin::kWasmToJsWrapperCSA);
3412}
3413
3414void Builtins::Generate_WasmTrapHandlerLandingPad(MacroAssembler* masm) {
3415 // This builtin gets called from the WebAssembly trap handler when an
3416 // out-of-bounds memory access happened or when a null reference gets
3417 // dereferenced. This builtin then fakes a call from the instruction that
3418 // triggered the signal to the runtime. This is done by setting a return
3419 // address and then jumping to a builtin which will call further to the
3420 // runtime.
3421 // As the return address we use the fault address + 1. Using the fault address
3422 // itself would cause problems with safepoints and source positions.
3423 //
3424 // The problem with safepoints is that a safepoint has to be registered at the
3425 // return address, and that at most one safepoint should be registered at a
3426 // location. However, there could already be a safepoint registered at the
3427 // fault address if the fault address is the return address of a call.
3428 //
3429 // The problem with source positions is that the stack trace code looks for
3430 // the source position of a call before the return address. The source
3431 // position of the faulty memory access, however, is recorded at the fault
3432 // address. Therefore the stack trace code would not find the source position
3433 // if we used the fault address as the return address.
3435 __ TailCallBuiltin(Builtin::kWasmTrapHandlerThrowTrap);
3436}
3437
3438void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
3439 auto regs = RegisterAllocator::WithAllocatableGeneralRegisters();
3440 // Set up the stackframe.
3441 __ EnterFrame(StackFrame::STACK_SWITCH);
3442
3443 DEFINE_PINNED(suspender, a0);
3445
3446 __ Sub_d(
3447 sp, sp,
3448 Operand(StackSwitchFrameConstants::kNumSpillSlots * kSystemPointerSize));
3449 // Set a sentinel value for the spill slots visited by the GC.
3450 ResetStackSwitchFrameStackSlots(masm);
3451
3452 // -------------------------------------------
3453 // Save current state in active jump buffer.
3454 // -------------------------------------------
3455 Label resume;
3457 __ LoadRoot(continuation, RootIndex::kActiveContinuation);
3458 DEFINE_REG(jmpbuf);
3459 DEFINE_REG(scratch);
3460 __ LoadExternalPointerField(
3461 jmpbuf,
3462 FieldMemOperand(continuation, WasmContinuationObject::kStackOffset),
3464 __ Add_d(jmpbuf, jmpbuf, wasm::StackMemory::jmpbuf_offset());
3465 FillJumpBuffer(masm, jmpbuf, &resume, scratch);
3466 SwitchStackState(masm, jmpbuf, scratch, wasm::JumpBuffer::Active,
3468 regs.ResetExcept(suspender, continuation);
3469
3470 DEFINE_REG(suspender_continuation);
3471 __ LoadTaggedField(
3472 suspender_continuation,
3473 FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
3474 if (v8_flags.debug_code) {
3475 // -------------------------------------------
3476 // Check that the suspender's continuation is the active continuation.
3477 // -------------------------------------------
3478 // TODO(thibaudm): Once we add core stack-switching instructions, this
3479 // check will not hold anymore: it's possible that the active continuation
3480 // changed (due to an internal switch), so we have to update the suspender.
3481 Label ok;
3482 __ Branch(&ok, eq, suspender_continuation, Operand(continuation));
3483 __ Trap();
3484 __ bind(&ok);
3485 }
3486 // -------------------------------------------
3487 // Update roots.
3488 // -------------------------------------------
3489 DEFINE_REG(caller);
3490 __ LoadTaggedField(caller,
3491 FieldMemOperand(suspender_continuation,
3492 WasmContinuationObject::kParentOffset));
3493 int32_t active_continuation_offset =
3495 RootIndex::kActiveContinuation);
3496 __ St_d(caller, MemOperand(kRootRegister, active_continuation_offset));
3497 DEFINE_REG(parent);
3498 __ LoadTaggedField(
3499 parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
3500 int32_t active_suspender_offset =
3502 RootIndex::kActiveSuspender);
3503 __ St_d(parent, MemOperand(kRootRegister, active_suspender_offset));
3504 regs.ResetExcept(suspender, caller, continuation);
3505
3506 // -------------------------------------------
3507 // Load jump buffer.
3508 // -------------------------------------------
3509 SwitchStacks(masm, continuation, false, {caller, suspender});
3511 ASSIGN_REG(jmpbuf);
3512 __ LoadExternalPointerField(
3513 jmpbuf, FieldMemOperand(caller, WasmContinuationObject::kStackOffset),
3515 __ Add_d(jmpbuf, jmpbuf, wasm::StackMemory::jmpbuf_offset());
3516 __ LoadTaggedField(
3518 FieldMemOperand(suspender, WasmSuspenderObject::kPromiseOffset));
3519 MemOperand GCScanSlotPlace =
3520 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset);
3521 __ St_d(zero_reg, GCScanSlotPlace);
3522 ASSIGN_REG(scratch)
3523 LoadJumpBuffer(masm, jmpbuf, true, scratch, wasm::JumpBuffer::Inactive);
3524 __ Trap();
3525 __ bind(&resume);
3526 __ LeaveFrame(StackFrame::STACK_SWITCH);
3527 __ Ret();
3528}
3529
3530namespace {
3531// Resume the suspender stored in the closure. We generate two variants of this
3532// builtin: the onFulfilled variant resumes execution at the saved PC and
3533// forwards the value, the onRejected variant throws the value.
3534
3535void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
3536 auto regs = RegisterAllocator::WithAllocatableGeneralRegisters();
3537 __ EnterFrame(StackFrame::STACK_SWITCH);
3538
3539 DEFINE_PINNED(closure, kJSFunctionRegister); // a1
3540
3541 __ Sub_d(
3542 sp, sp,
3543 Operand(StackSwitchFrameConstants::kNumSpillSlots * kSystemPointerSize));
3544 // Set a sentinel value for the spill slots visited by the GC.
3545 ResetStackSwitchFrameStackSlots(masm);
3546
3547 regs.ResetExcept(closure);
3548
3549 // -------------------------------------------
3550 // Load suspender from closure.
3551 // -------------------------------------------
3552 DEFINE_REG(sfi);
3553 __ LoadTaggedField(
3554 sfi,
3555 MemOperand(
3556 closure,
3558 FREE_REG(closure);
3559 // Suspender should be ObjectRegister register to be used in
3560 // RecordWriteField calls later.
3562 DEFINE_REG(resume_data);
3563 __ LoadTaggedField(
3564 resume_data,
3565 FieldMemOperand(sfi, SharedFunctionInfo::kUntrustedFunctionDataOffset));
3566 __ LoadTaggedField(
3567 suspender,
3568 FieldMemOperand(resume_data, WasmResumeData::kSuspenderOffset));
3569 regs.ResetExcept(suspender);
3570
3571 // -------------------------------------------
3572 // Save current state.
3573 // -------------------------------------------
3574 Label suspend;
3575 DEFINE_REG(active_continuation);
3576 __ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
3577 DEFINE_REG(current_jmpbuf);
3578 DEFINE_REG(scratch);
3579 __ LoadExternalPointerField(
3580 current_jmpbuf,
3581 FieldMemOperand(active_continuation,
3582 WasmContinuationObject::kStackOffset),
3584 __ Add_d(current_jmpbuf, current_jmpbuf, wasm::StackMemory::jmpbuf_offset());
3585 FillJumpBuffer(masm, current_jmpbuf, &suspend, scratch);
3586 SwitchStackState(masm, current_jmpbuf, scratch, wasm::JumpBuffer::Active,
3588 FREE_REG(current_jmpbuf);
3589
3590 // -------------------------------------------
3591 // Set the suspender and continuation parents and update the roots
3592 // -------------------------------------------
3593 DEFINE_REG(active_suspender);
3594 __ LoadRoot(active_suspender, RootIndex::kActiveSuspender);
3595 __ StoreTaggedField(
3596 active_suspender,
3597 FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
3598 __ RecordWriteField(suspender, WasmSuspenderObject::kParentOffset,
3599 active_suspender, kRAHasBeenSaved,
3601 int32_t active_suspender_offset =
3603 RootIndex::kActiveSuspender);
3604 __ St_d(suspender, MemOperand(kRootRegister, active_suspender_offset));
3605
3606 // Next line we are going to load a field from suspender, but we have to use
3607 // the same register for target_continuation to use it in RecordWriteField.
3608 // So, free suspender here to use pinned reg, but load from it next line.
3609 FREE_REG(suspender);
3611 suspender = target_continuation;
3612 __ LoadTaggedField(
3613 target_continuation,
3614 FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
3615 suspender = no_reg;
3616
3617 __ StoreTaggedField(active_continuation,
3618 FieldMemOperand(target_continuation,
3619 WasmContinuationObject::kParentOffset));
3620 DEFINE_REG(old_continuation);
3621 __ Move(old_continuation, active_continuation);
3622 __ RecordWriteField(
3623 target_continuation, WasmContinuationObject::kParentOffset,
3624 active_continuation, kRAHasBeenSaved, SaveFPRegsMode::kIgnore);
3625 int32_t active_continuation_offset =
3627 RootIndex::kActiveContinuation);
3628 __ St_d(target_continuation,
3629 MemOperand(kRootRegister, active_continuation_offset));
3630
3631 SwitchStacks(masm, old_continuation, false, {target_continuation});
3632
3633 regs.ResetExcept(target_continuation);
3634
3635 // -------------------------------------------
3636 // Load state from target jmpbuf (longjmp).
3637 // -------------------------------------------
3638 regs.Reserve(kReturnRegister0);
3639 DEFINE_REG(target_jmpbuf);
3640 ASSIGN_REG(scratch);
3641 __ LoadExternalPointerField(
3642 target_jmpbuf,
3643 FieldMemOperand(target_continuation,
3644 WasmContinuationObject::kStackOffset),
3646 __ Add_d(target_jmpbuf, target_jmpbuf, wasm::StackMemory::jmpbuf_offset());
3647 // Move resolved value to return register.
3649 MemOperand GCScanSlotPlace =
3650 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset);
3651 __ St_d(zero_reg, GCScanSlotPlace);
3652 if (on_resume == wasm::OnResume::kThrow) {
3653 // Switch to the continuation's stack without restoring the PC.
3654 LoadJumpBuffer(masm, target_jmpbuf, false, scratch,
3656 // Pop this frame now. The unwinder expects that the first STACK_SWITCH
3657 // frame is the outermost one.
3658 __ LeaveFrame(StackFrame::STACK_SWITCH);
3659 // Forward the onRejected value to kThrow.
3660 __ Push(kReturnRegister0);
3661 __ CallRuntime(Runtime::kThrow);
3662 } else {
3663 // Resume the continuation normally.
3664 LoadJumpBuffer(masm, target_jmpbuf, true, scratch,
3666 }
3667 __ Trap();
3668 __ bind(&suspend);
3669 __ LeaveFrame(StackFrame::STACK_SWITCH);
3670 // Pop receiver + parameter.
3671 __ Add_d(sp, sp, Operand(2 * kSystemPointerSize));
3672 __ Ret();
3673}
3674} // namespace
3675
3676void Builtins::Generate_WasmResume(MacroAssembler* masm) {
3677 Generate_WasmResumeHelper(masm, wasm::OnResume::kContinue);
3678}
3679
3680void Builtins::Generate_WasmReject(MacroAssembler* masm) {
3681 Generate_WasmResumeHelper(masm, wasm::OnResume::kThrow);
3682}
3683
3684void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
3685 // Only needed on x64.
3686 __ Trap();
3687}
3688
3689namespace {
3690void SwitchToAllocatedStack(MacroAssembler* masm, RegisterAllocator& regs,
3691 Register wasm_instance, Register wrapper_buffer,
3692 Register& original_fp, Register& new_wrapper_buffer,
3693 Label* suspend) {
3694 ResetStackSwitchFrameStackSlots(masm);
3695 DEFINE_SCOPED(scratch)
3696 DEFINE_REG(target_continuation)
3697 __ LoadRoot(target_continuation, RootIndex::kActiveContinuation);
3698 DEFINE_REG(parent_continuation)
3699 __ LoadTaggedField(parent_continuation,
3700 FieldMemOperand(target_continuation,
3701 WasmContinuationObject::kParentOffset));
3702
3703 SaveState(masm, parent_continuation, scratch, suspend);
3704
3705 SwitchStacks(masm, parent_continuation, false,
3706 {wasm_instance, wrapper_buffer});
3707
3708 FREE_REG(parent_continuation);
3709 // Save the old stack's fp in t0, and use it to access the parameters in
3710 // the parent frame.
3711 regs.Pinned(t1, &original_fp);
3712 __ mov(original_fp, fp);
3713 __ LoadRoot(target_continuation, RootIndex::kActiveContinuation);
3714 LoadTargetJumpBuffer(masm, target_continuation, scratch,
3716 FREE_REG(target_continuation);
3717
3718 // Push the loaded fp. We know it is null, because there is no frame yet,
3719 // so we could also push 0 directly. In any case we need to push it,
3720 // because this marks the base of the stack segment for
3721 // the stack frame iterator.
3722 __ EnterFrame(StackFrame::STACK_SWITCH);
3723
3724 int stack_space =
3725 RoundUp(StackSwitchFrameConstants::kNumSpillSlots * kSystemPointerSize +
3726 JSToWasmWrapperFrameConstants::kWrapperBufferSize,
3727 16);
3728 __ Sub_d(sp, sp, Operand(stack_space));
3729
3730 ASSIGN_REG(new_wrapper_buffer)
3731
3732 __ mov(new_wrapper_buffer, sp);
3733 // Copy data needed for return handling from old wrapper buffer to new one.
3734 // kWrapperBufferRefReturnCount will be copied too, because 8 bytes are copied
3735 // at the same time.
3736 static_assert(JSToWasmWrapperFrameConstants::kWrapperBufferRefReturnCount ==
3737 JSToWasmWrapperFrameConstants::kWrapperBufferReturnCount + 4);
3738 __ Ld_d(scratch,
3739 MemOperand(wrapper_buffer,
3740 JSToWasmWrapperFrameConstants::kWrapperBufferReturnCount));
3741 __ St_d(scratch,
3742 MemOperand(new_wrapper_buffer,
3743 JSToWasmWrapperFrameConstants::kWrapperBufferReturnCount));
3744 __ Ld_d(
3745 scratch,
3746 MemOperand(
3747 wrapper_buffer,
3748 JSToWasmWrapperFrameConstants::kWrapperBufferSigRepresentationArray));
3749 __ St_d(
3750 scratch,
3751 MemOperand(
3752 new_wrapper_buffer,
3753 JSToWasmWrapperFrameConstants::kWrapperBufferSigRepresentationArray));
3754}
3755
3756void SwitchBackAndReturnPromise(MacroAssembler* masm, RegisterAllocator& regs,
3757 wasm::Promise mode, Label* return_promise) {
3758 regs.ResetExcept();
3759 // The return value of the wasm function becomes the parameter of the
3760 // FulfillPromise builtin, and the promise is the return value of this
3761 // wrapper.
3762 static const Builtin_FulfillPromise_InterfaceDescriptor desc;
3763 DEFINE_PINNED(promise, desc.GetRegisterParameter(0));
3764 DEFINE_PINNED(return_value, desc.GetRegisterParameter(1));
3765 DEFINE_SCOPED(tmp);
3766 DEFINE_SCOPED(tmp2);
3767 DEFINE_SCOPED(tmp3);
3768 if (mode == wasm::kPromise) {
3769 __ mov(return_value, kReturnRegister0);
3770 __ LoadRoot(promise, RootIndex::kActiveSuspender);
3771 __ LoadTaggedField(
3772 promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset));
3773 }
3774
3775 __ Ld_d(kContextRegister,
3776 MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
3777 GetContextFromImplicitArg(masm, kContextRegister, tmp);
3778
3779 ReloadParentContinuation(masm, promise, return_value, kContextRegister, tmp,
3780 tmp2, tmp3);
3781 RestoreParentSuspender(masm, tmp, tmp2);
3782
3783 if (mode == wasm::kPromise) {
3784 __ li(tmp, Operand(1));
3785 __ St_d(tmp,
3786 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset));
3787 __ Push(promise);
3788 __ CallBuiltin(Builtin::kFulfillPromise);
3789 __ Pop(promise);
3790 }
3791 FREE_REG(promise);
3792 FREE_REG(return_value);
3793
3794 __ bind(return_promise);
3795}
3796
3797void GenerateExceptionHandlingLandingPad(MacroAssembler* masm,
3798 RegisterAllocator& regs,
3799 Label* return_promise) {
3800 regs.ResetExcept();
3801 static const Builtin_RejectPromise_InterfaceDescriptor desc;
3802 DEFINE_PINNED(promise, desc.GetRegisterParameter(0));
3803 DEFINE_PINNED(reason, desc.GetRegisterParameter(1));
3804 DEFINE_PINNED(debug_event, desc.GetRegisterParameter(2));
3805 int catch_handler = __ pc_offset();
3806
3807 DEFINE_SCOPED(thread_in_wasm_flag_addr);
3808 thread_in_wasm_flag_addr = a2;
3809
3810 // Unset thread_in_wasm_flag.
3811 __ Ld_d(
3812 thread_in_wasm_flag_addr,
3814 __ St_w(zero_reg, MemOperand(thread_in_wasm_flag_addr, 0));
3815
3816 // The exception becomes the parameter of the RejectPromise builtin, and the
3817 // promise is the return value of this wrapper.
3818 __ mov(reason, kReturnRegister0);
3819 __ LoadRoot(promise, RootIndex::kActiveSuspender);
3820 __ LoadTaggedField(
3821 promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset));
3822
3823 __ Ld_d(kContextRegister,
3824 MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
3825
3826 DEFINE_SCOPED(tmp);
3827 DEFINE_SCOPED(tmp2);
3828 DEFINE_SCOPED(tmp3);
3829 GetContextFromImplicitArg(masm, kContextRegister, tmp);
3830 ReloadParentContinuation(masm, promise, reason, kContextRegister, tmp, tmp2,
3831 tmp3);
3832 RestoreParentSuspender(masm, tmp, tmp2);
3833
3834 __ li(tmp, Operand(1));
3835 __ St_d(tmp,
3836 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset));
3837 __ Push(promise);
3838 __ LoadRoot(debug_event, RootIndex::kTrueValue);
3839 __ CallBuiltin(Builtin::kRejectPromise);
3840 __ Pop(promise);
3841
3842 // Run the rest of the wrapper normally (deconstruct the frame, ...).
3843 __ jmp(return_promise);
3844
3845 masm->isolate()->builtins()->SetJSPIPromptHandlerOffset(catch_handler);
3846}
3847
3848void JSToWasmWrapperHelper(MacroAssembler* masm, wasm::Promise mode) {
3849 bool stack_switch = mode == wasm::kPromise || mode == wasm::kStressSwitch;
3850 auto regs = RegisterAllocator::WithAllocatableGeneralRegisters();
3851
3852 __ EnterFrame(stack_switch ? StackFrame::STACK_SWITCH
3853 : StackFrame::JS_TO_WASM);
3854
3855 __ AllocateStackSpace(StackSwitchFrameConstants::kNumSpillSlots *
3857
3858 // Load the implicit argument (instance data or import data) from the frame.
3860 __ Ld_d(implicit_arg,
3861 MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset));
3862
3863 DEFINE_PINNED(wrapper_buffer,
3865
3866 Label suspend;
3867 Register original_fp = no_reg;
3868 Register new_wrapper_buffer = no_reg;
3869 if (stack_switch) {
3870 SwitchToAllocatedStack(masm, regs, implicit_arg, wrapper_buffer,
3871 original_fp, new_wrapper_buffer, &suspend);
3872 } else {
3873 original_fp = fp;
3874 new_wrapper_buffer = wrapper_buffer;
3875 }
3876
3877 regs.ResetExcept(original_fp, wrapper_buffer, implicit_arg,
3878 new_wrapper_buffer);
3879
3880 {
3881 __ St_d(
3882 new_wrapper_buffer,
3883 MemOperand(fp, JSToWasmWrapperFrameConstants::kWrapperBufferOffset));
3884 if (stack_switch) {
3885 __ St_d(implicit_arg,
3886 MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
3887 DEFINE_SCOPED(scratch)
3888 __ Ld_d(
3889 scratch,
3890 MemOperand(original_fp,
3891 JSToWasmWrapperFrameConstants::kResultArrayParamOffset));
3892 __ St_d(scratch,
3893 MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset));
3894 }
3895 }
3896 {
3897 DEFINE_SCOPED(result_size);
3898 __ Ld_d(result_size, MemOperand(wrapper_buffer,
3899 JSToWasmWrapperFrameConstants::
3900 kWrapperBufferStackReturnBufferSize));
3901 __ slli_d(result_size, result_size, kSystemPointerSizeLog2);
3902 __ Sub_d(sp, sp, result_size);
3903 }
3904
3905 __ St_d(
3906 sp,
3907 MemOperand(
3908 new_wrapper_buffer,
3909 JSToWasmWrapperFrameConstants::kWrapperBufferStackReturnBufferStart));
3910
3911 if (stack_switch) {
3912 FREE_REG(new_wrapper_buffer)
3913 }
3914 FREE_REG(implicit_arg)
3915 for (auto reg : wasm::kGpParamRegisters) {
3916 regs.Reserve(reg);
3917 }
3918
3919 // The first GP parameter holds the trusted instance data or the import data.
3920 // This is handled specially.
3921 int stack_params_offset =
3924 int param_padding = stack_params_offset & kSystemPointerSize;
3925 stack_params_offset += param_padding;
3926
3927 {
3928 DEFINE_SCOPED(params_start);
3929 __ Ld_d(
3930 params_start,
3931 MemOperand(wrapper_buffer,
3932 JSToWasmWrapperFrameConstants::kWrapperBufferParamStart));
3933 {
3934 // Push stack parameters on the stack.
3935 DEFINE_SCOPED(params_end);
3936 __ Ld_d(
3937 params_end,
3938 MemOperand(wrapper_buffer,
3939 JSToWasmWrapperFrameConstants::kWrapperBufferParamEnd));
3940 DEFINE_SCOPED(last_stack_param);
3941
3942 __ Add_d(last_stack_param, params_start, Operand(stack_params_offset));
3943 Label loop_start;
3944 __ bind(&loop_start);
3945
3946 Label finish_stack_params;
3947 __ Branch(&finish_stack_params, ge, last_stack_param,
3948 Operand(params_end));
3949
3950 // Push parameter
3951 {
3952 DEFINE_SCOPED(scratch);
3953 __ Sub_d(params_end, params_end, Operand(kSystemPointerSize));
3954 __ Ld_d(scratch, MemOperand(params_end, 0));
3955 __ Push(scratch);
3956 }
3957
3958 __ Branch(&loop_start);
3959
3960 __ bind(&finish_stack_params);
3961 }
3962
3963 size_t next_offset = 0;
3964 for (size_t i = 1; i < arraysize(wasm::kGpParamRegisters); ++i) {
3965 // Check that {params_start} does not overlap with any of the parameter
3966 // registers, so that we don't overwrite it by accident with the loads
3967 // below.
3968 DCHECK_NE(params_start, wasm::kGpParamRegisters[i]);
3970 MemOperand(params_start, next_offset));
3971 next_offset += kSystemPointerSize;
3972 }
3973
3974 next_offset += param_padding;
3975 for (size_t i = 0; i < arraysize(wasm::kFpParamRegisters); ++i) {
3977 MemOperand(params_start, next_offset));
3978 next_offset += kDoubleSize;
3979 }
3980 DCHECK_EQ(next_offset, stack_params_offset);
3981 }
3982
3983 {
3984 DEFINE_SCOPED(thread_in_wasm_flag_addr);
3985 __ Ld_d(thread_in_wasm_flag_addr,
3988 DEFINE_SCOPED(scratch);
3989 __ li(scratch, Operand(1));
3990 __ St_w(scratch, MemOperand(thread_in_wasm_flag_addr, 0));
3991 }
3992
3993 __ St_d(zero_reg,
3994 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset));
3995 {
3996 DEFINE_SCOPED(call_target);
3997 __ LoadWasmCodePointer(
3998 call_target,
3999 MemOperand(wrapper_buffer,
4000 JSToWasmWrapperFrameConstants::kWrapperBufferCallTarget));
4001 // We do the call without a signature check here, since the wrapper loaded
4002 // the signature from the same trusted object as the call target to set up
4003 // the stack layout. We could add a signature hash and pass it through to
4004 // verify it here, but an attacker that could corrupt the signature could
4005 // also corrupt that signature hash (which is outside of the sandbox).
4006 __ CallWasmCodePointerNoSignatureCheck(call_target);
4007 }
4008
4009 regs.ResetExcept();
4010 // The wrapper_buffer has to be in a2 as the correct parameter register.
4011 regs.Reserve(kReturnRegister0, kReturnRegister1);
4012 ASSIGN_PINNED(wrapper_buffer, a2);
4013 {
4014 DEFINE_SCOPED(thread_in_wasm_flag_addr);
4015 __ Ld_d(thread_in_wasm_flag_addr,
4018 __ St_w(zero_reg, MemOperand(thread_in_wasm_flag_addr, 0));
4019 }
4020
4021 __ Ld_d(wrapper_buffer,
4022 MemOperand(fp, JSToWasmWrapperFrameConstants::kWrapperBufferOffset));
4023
4025 MemOperand(
4026 wrapper_buffer,
4027 JSToWasmWrapperFrameConstants::kWrapperBufferFPReturnRegister1));
4029 MemOperand(
4030 wrapper_buffer,
4031 JSToWasmWrapperFrameConstants::kWrapperBufferFPReturnRegister2));
4033 MemOperand(
4034 wrapper_buffer,
4035 JSToWasmWrapperFrameConstants::kWrapperBufferGPReturnRegister1));
4037 MemOperand(
4038 wrapper_buffer,
4039 JSToWasmWrapperFrameConstants::kWrapperBufferGPReturnRegister2));
4040
4041 // Call the return value builtin with
4042 // a0: wasm instance.
4043 // a1: the result JSArray for multi-return.
4044 // a2: pointer to the byte buffer which contains all parameters.
4045 if (stack_switch) {
4046 __ Ld_d(a1, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset));
4047 __ Ld_d(a0, MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
4048 } else {
4049 __ Ld_d(
4050 a1,
4051 MemOperand(fp, JSToWasmWrapperFrameConstants::kResultArrayParamOffset));
4052 __ Ld_d(a0,
4053 MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset));
4054 }
4055
4056 Register scratch = a3;
4057 GetContextFromImplicitArg(masm, a0, scratch);
4058 __ Call(BUILTIN_CODE(masm->isolate(), JSToWasmHandleReturns),
4060
4061 Label return_promise;
4062 if (stack_switch) {
4063 SwitchBackAndReturnPromise(masm, regs, mode, &return_promise);
4064 }
4065 __ bind(&suspend);
4066
4067 __ LeaveFrame(stack_switch ? StackFrame::STACK_SWITCH
4068 : StackFrame::JS_TO_WASM);
4069 // Despite returning to the different location for regular and stack switching
4070 // versions, incoming argument count matches both cases:
4071 // instance and result array without suspend or
4072 // or promise resolve/reject params for callback.
4073 __ Add_d(sp, sp, Operand(2 * kSystemPointerSize));
4074 __ Ret();
4075
4076 // Catch handler for the stack-switching wrapper: reject the promise with the
4077 // thrown exception.
4078 if (mode == wasm::kPromise) {
4079 GenerateExceptionHandlingLandingPad(masm, regs, &return_promise);
4080 }
4081}
4082} // namespace
4083
4084void Builtins::Generate_JSToWasmWrapperAsm(MacroAssembler* masm) {
4085 JSToWasmWrapperHelper(masm, wasm::kNoPromise);
4086}
4087
4088void Builtins::Generate_WasmReturnPromiseOnSuspendAsm(MacroAssembler* masm) {
4089 JSToWasmWrapperHelper(masm, wasm::kPromise);
4090}
4091
4092void Builtins::Generate_JSToWasmStressSwitchStacksAsm(MacroAssembler* masm) {
4093 JSToWasmWrapperHelper(masm, wasm::kStressSwitch);
4094}
4095
4096namespace {
4097
4098static constexpr Register kOldSPRegister = s3;
4099static constexpr Register kSwitchFlagRegister = s4;
4100
4101void SwitchToTheCentralStackIfNeeded(MacroAssembler* masm, Register argc_input,
4102 Register target_input,
4103 Register argv_input) {
4104 using ER = ExternalReference;
4105
4106 __ mov(kSwitchFlagRegister, zero_reg);
4107 __ mov(kOldSPRegister, sp);
4108
4109 // Using a2-a4 as temporary registers, because they will be rewritten
4110 // before exiting to native code anyway.
4111
4112 ER on_central_stack_flag_loc = ER::Create(
4113 IsolateAddressId::kIsOnCentralStackFlagAddress, masm->isolate());
4114 const Register& on_central_stack_flag = a2;
4115 __ li(on_central_stack_flag, on_central_stack_flag_loc);
4116 __ Ld_b(on_central_stack_flag, MemOperand(on_central_stack_flag, 0));
4117
4118 Label do_not_need_to_switch;
4119 __ Branch(&do_not_need_to_switch, ne, on_central_stack_flag,
4120 Operand(zero_reg));
4121
4122 // Switch to central stack.
4123 Register central_stack_sp = a4;
4124 DCHECK(!AreAliased(central_stack_sp, argc_input, argv_input, target_input));
4125 {
4126 __ Push(argc_input, target_input, argv_input);
4127 __ PrepareCallCFunction(2, a0);
4128 __ li(kCArgRegs[0], ER::isolate_address(masm->isolate()));
4129 __ mov(kCArgRegs[1], kOldSPRegister);
4130 __ CallCFunction(ER::wasm_switch_to_the_central_stack(), 2,
4132 __ mov(central_stack_sp, kReturnRegister0);
4133 __ Pop(argc_input, target_input, argv_input);
4134 }
4135
4136 static constexpr int kReturnAddressSlotOffset = 1 * kSystemPointerSize;
4137 static constexpr int kPadding = 1 * kSystemPointerSize;
4138 __ Sub_d(sp, central_stack_sp, Operand(kReturnAddressSlotOffset + kPadding));
4139 __ li(kSwitchFlagRegister, 1);
4140
4141 // Update the sp saved in the frame.
4142 // It will be used to calculate the callee pc during GC.
4143 // The pc is going to be on the new stack segment, so rewrite it here.
4144 __ Add_d(central_stack_sp, sp, Operand(kSystemPointerSize));
4145 __ St_d(central_stack_sp, MemOperand(fp, ExitFrameConstants::kSPOffset));
4146
4147 __ bind(&do_not_need_to_switch);
4148}
4149
4150void SwitchFromTheCentralStackIfNeeded(MacroAssembler* masm) {
4151 using ER = ExternalReference;
4152
4153 Label no_stack_change;
4154
4155 __ Branch(&no_stack_change, eq, kSwitchFlagRegister, Operand(zero_reg));
4156
4157 {
4159 __ PrepareCallCFunction(1, a0);
4160 __ li(kCArgRegs[0], ER::isolate_address(masm->isolate()));
4161 __ CallCFunction(ER::wasm_switch_from_the_central_stack(), 1,
4164 }
4165
4166 __ mov(sp, kOldSPRegister);
4167
4168 __ bind(&no_stack_change);
4169}
4170
4171} // namespace
4172
4173#endif // V8_ENABLE_WEBASSEMBLY
4174
4175void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
4176 ArgvMode argv_mode, bool builtin_exit_frame,
4177 bool switch_to_central_stack) {
4178 // Called from JavaScript; parameters are on stack as if calling JS function
4179 // a0: number of arguments including receiver
4180 // a1: pointer to C++ function
4181 // fp: frame pointer (restored after C call)
4182 // sp: stack pointer (restored as callee's sp after C call)
4183 // cp: current context (C callee-saved)
4184
4185 // If argv_mode == ArgvMode::kRegister:
4186 // a2: pointer to the first argument
4187
4188 using ER = ExternalReference;
4189
4190 // Move input arguments to more convenient registers.
4191 static constexpr Register argc_input = a0;
4192 static constexpr Register target_fun = s1; // C callee-saved
4193 static constexpr Register argv = a1;
4194 static constexpr Register scratch = a3;
4195 static constexpr Register argc_sav = s0; // C callee-saved
4196
4197 __ mov(target_fun, argv);
4198
4199 if (argv_mode == ArgvMode::kRegister) {
4200 // Move argv into the correct register.
4201 __ mov(argv, a2);
4202 } else {
4203 // Compute the argv pointer in a callee-saved register.
4204 __ Alsl_d(argv, argc_input, sp, kSystemPointerSizeLog2);
4205 __ Sub_d(argv, argv, kSystemPointerSize);
4206 }
4207
4208 // Enter the exit frame that transitions from JavaScript to C++.
4209 FrameScope scope(masm, StackFrame::MANUAL);
4210 __ EnterExitFrame(
4211 scratch, 0,
4212 builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
4213
4214 // Store a copy of argc in callee-saved registers for later.
4215 __ mov(argc_sav, argc_input);
4216
4217 // a0: number of arguments including receiver
4218 // s0: number of arguments including receiver (C callee-saved)
4219 // a1: pointer to first argument
4220 // s1: pointer to builtin function (C callee-saved)
4221
4222 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
4223 // also need to reserve the 4 argument slots on the stack.
4224
4225 __ AssertStackIsAligned();
4226
4227#if V8_ENABLE_WEBASSEMBLY
4228 if (switch_to_central_stack) {
4229 SwitchToTheCentralStackIfNeeded(masm, argc_input, target_fun, argv);
4230 }
4231#endif // V8_ENABLE_WEBASSEMBLY
4232
4233 // Call C built-in.
4234 // a0 = argc, a1 = argv, a2 = isolate, s1 = target_fun
4235 DCHECK_EQ(kCArgRegs[0], argc_input);
4236 DCHECK_EQ(kCArgRegs[1], argv);
4237 __ li(kCArgRegs[2], ER::isolate_address());
4238
4239 __ StoreReturnAddressAndCall(target_fun);
4240
4241#if V8_ENABLE_WEBASSEMBLY
4242 if (switch_to_central_stack) {
4243 SwitchFromTheCentralStackIfNeeded(masm);
4244 }
4245#endif // V8_ENABLE_WEBASSEMBLY
4246
4247 // Result returned in a0 or a1:a0 - do not destroy these registers!
4248
4249 // Check result for exception sentinel.
4250 Label exception_returned;
4251 // The returned value may be a trusted object, living outside of the main
4252 // pointer compression cage, so we need to use full pointer comparison here.
4253 __ CompareRootAndBranch(a0, RootIndex::kException, eq, &exception_returned,
4255
4256 // Check that there is no exception, otherwise we
4257 // should have returned the exception sentinel.
4258 if (v8_flags.debug_code) {
4259 Label okay;
4260 ER exception_address =
4261 ER::Create(IsolateAddressId::kExceptionAddress, masm->isolate());
4262 __ Ld_d(scratch, __ ExternalReferenceAsOperand(exception_address, no_reg));
4263 // Cannot use check here as it attempts to generate call into runtime.
4264 __ Branch(&okay, eq, scratch, RootIndex::kTheHoleValue);
4265 __ stop();
4266 __ bind(&okay);
4267 }
4268
4269 // Exit C frame and return.
4270 // a0:a1: result
4271 // sp: stack pointer
4272 // fp: frame pointer
4273 // s0: still holds argc (C caller-saved).
4274 __ LeaveExitFrame(scratch);
4275 if (argv_mode == ArgvMode::kStack) {
4276 DCHECK(!AreAliased(scratch, argc_sav));
4277 __ Alsl_d(sp, argc_sav, sp, kSystemPointerSizeLog2);
4278 }
4279
4280 __ Ret();
4281
4282 // Handling of exception.
4283 __ bind(&exception_returned);
4284
4285 ER pending_handler_context_address = ER::Create(
4286 IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
4287 ER pending_handler_entrypoint_address = ER::Create(
4288 IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
4289 ER pending_handler_fp_address =
4290 ER::Create(IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
4291 ER pending_handler_sp_address =
4292 ER::Create(IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
4293
4294 // Ask the runtime for help to determine the handler. This will set a0 to
4295 // contain the current exception, don't clobber it.
4296 {
4297 FrameScope scope(masm, StackFrame::MANUAL);
4298 __ PrepareCallCFunction(3, 0, a0);
4299 __ mov(kCArgRegs[0], zero_reg);
4300 __ mov(kCArgRegs[1], zero_reg);
4301 __ li(kCArgRegs[2], ER::isolate_address());
4302 __ CallCFunction(ER::Create(Runtime::kUnwindAndFindExceptionHandler), 3,
4304 }
4305
4306 // Retrieve the handler context, SP and FP.
4307 __ li(cp, pending_handler_context_address);
4308 __ Ld_d(cp, MemOperand(cp, 0));
4309 __ li(sp, pending_handler_sp_address);
4310 __ Ld_d(sp, MemOperand(sp, 0));
4311 __ li(fp, pending_handler_fp_address);
4312 __ Ld_d(fp, MemOperand(fp, 0));
4313
4314 // If the handler is a JS frame, restore the context to the frame. Note that
4315 // the context will be set to (cp == 0) for non-JS frames.
4316 Label zero;
4317 __ Branch(&zero, eq, cp, Operand(zero_reg));
4319 __ bind(&zero);
4320
4321 // Clear c_entry_fp, like we do in `LeaveExitFrame`.
4322 ER c_entry_fp_address =
4323 ER::Create(IsolateAddressId::kCEntryFPAddress, masm->isolate());
4324 __ St_d(zero_reg, __ ExternalReferenceAsOperand(c_entry_fp_address, no_reg));
4325
4326 // Compute the handler entry address and jump to it.
4327 __ Ld_d(scratch, __ ExternalReferenceAsOperand(
4328 pending_handler_entrypoint_address, no_reg));
4329 __ Jump(scratch);
4330}
4331
4332#if V8_ENABLE_WEBASSEMBLY
4333void Builtins::Generate_WasmHandleStackOverflow(MacroAssembler* masm) {
4334 using ER = ExternalReference;
4335 Register frame_base = WasmHandleStackOverflowDescriptor::FrameBaseRegister();
4337 {
4338 DCHECK_NE(kCArgRegs[1], frame_base);
4339 DCHECK_NE(kCArgRegs[3], frame_base);
4340 __ mov(kCArgRegs[3], gap);
4341 __ mov(kCArgRegs[1], sp);
4342 __ sub_d(kCArgRegs[2], frame_base, kCArgRegs[1]);
4343 __ mov(kCArgRegs[4], fp);
4344 FrameScope scope(masm, StackFrame::INTERNAL);
4345 __ Push(kCArgRegs[3]);
4346 __ li(kCArgRegs[0], ER::isolate_address());
4347 __ PrepareCallCFunction(5, kScratchReg);
4348 __ CallCFunction(ER::wasm_grow_stack(), 5);
4349 __ Pop(gap);
4351 }
4352 Label call_runtime;
4353 // wasm_grow_stack returns zero if it cannot grow a stack.
4354 __ BranchShort(&call_runtime, eq, kReturnRegister0, Operand(zero_reg));
4355 {
4356 UseScratchRegisterScope temps(masm);
4357 Register new_fp = temps.Acquire();
4358 // Calculate old FP - SP offset to adjust FP accordingly to new SP.
4359 __ sub_d(new_fp, fp, sp);
4360 __ add_d(new_fp, kReturnRegister0, new_fp);
4361 __ mov(fp, new_fp);
4362 }
4363 __ mov(sp, kReturnRegister0);
4364 {
4365 UseScratchRegisterScope temps(masm);
4366 Register scratch = temps.Acquire();
4367 __ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM_SEGMENT_START));
4369 }
4370 __ Ret();
4371
4372 __ bind(&call_runtime);
4373 // If wasm_grow_stack returns zero interruption or stack overflow
4374 // should be handled by runtime call.
4375 {
4377 MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset));
4378 __ LoadTaggedField(
4380 WasmTrustedInstanceData::kNativeContextOffset));
4381 FrameScope scope(masm, StackFrame::MANUAL);
4382 __ EnterFrame(StackFrame::INTERNAL);
4383 __ SmiTag(gap);
4384 __ Push(gap);
4385 __ CallRuntime(Runtime::kWasmStackGuard);
4386 __ LeaveFrame(StackFrame::INTERNAL);
4387 __ Ret();
4388 }
4389}
4390#endif // V8_ENABLE_WEBASSEMBLY
4391
4392void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
4393 Label done;
4394 Register result_reg = t0;
4395
4396 Register scratch = GetRegisterThatIsNotOneOf(result_reg);
4397 Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
4398 Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
4399 DoubleRegister double_scratch = kScratchDoubleReg;
4400
4401 // Account for saved regs.
4402 const int kArgumentOffset = 4 * kSystemPointerSize;
4403
4404 __ Push(result_reg);
4405 __ Push(scratch, scratch2, scratch3);
4406
4407 // Load double input.
4408 __ Fld_d(double_scratch, MemOperand(sp, kArgumentOffset));
4409
4410 // Try a conversion to a signed integer.
4411 __ TryInlineTruncateDoubleToI(result_reg, double_scratch, &done);
4412
4413 // Load the double value and perform a manual truncation.
4414 Register input_high = scratch2;
4415 Register input_low = scratch3;
4416
4417 // TryInlineTruncateDoubleToI destory kScratchDoubleReg, so reload it.
4418 __ Ld_d(result_reg, MemOperand(sp, kArgumentOffset));
4419
4420 // Extract the biased exponent in result.
4421 __ bstrpick_d(input_high, result_reg,
4424
4425 __ Sub_d(scratch, input_high,
4427 Label not_zero;
4428 __ Branch(&not_zero, lt, scratch, Operand(zero_reg));
4429 __ mov(result_reg, zero_reg);
4430 __ Branch(&done);
4431 __ bind(&not_zero);
4432
4433 // Isolate the mantissa bits, and set the implicit '1'.
4434 __ bstrpick_d(input_low, result_reg, HeapNumber::kMantissaBits - 1, 0);
4435 __ Or(input_low, input_low, Operand(1ULL << HeapNumber::kMantissaBits));
4436
4437 Label lessthan_zero_reg;
4438 __ Branch(&lessthan_zero_reg, ge, result_reg, Operand(zero_reg));
4439 __ Sub_d(input_low, zero_reg, Operand(input_low));
4440 __ bind(&lessthan_zero_reg);
4441
4442 // Shift the mantissa bits in the correct place. We know that we have to shift
4443 // it left here, because exponent >= 63 >= kMantissaBits.
4444 __ Sub_d(input_high, input_high,
4446 __ sll_w(result_reg, input_low, input_high);
4447
4448 __ bind(&done);
4449
4450 __ St_d(result_reg, MemOperand(sp, kArgumentOffset));
4451 __ Pop(scratch, scratch2, scratch3);
4452 __ Pop(result_reg);
4453 __ Ret();
4454}
4455
4456void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm,
4457 CallApiCallbackMode mode) {
4458 // ----------- S t a t e -------------
4459 // CallApiCallbackMode::kOptimizedNoProfiling/kOptimized modes:
4460 // -- a1 : api function address
4461 // Both modes:
4462 // -- a2 : arguments count (not including the receiver)
4463 // -- a3 : FunctionTemplateInfo
4464 // -- cp : context
4465 // -- sp[0] : receiver
4466 // -- sp[8] : first argument
4467 // -- ...
4468 // -- sp[(argc) * 8] : last argument
4469 // -----------------------------------
4470
4471 Register function_callback_info_arg = kCArgRegs[0];
4472
4473 Register api_function_address = no_reg;
4474 Register argc = no_reg;
4475 Register func_templ = no_reg;
4476 Register topmost_script_having_context = no_reg;
4477 Register scratch = t0;
4478
4479 switch (mode) {
4481 argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister();
4482 topmost_script_having_context = CallApiCallbackGenericDescriptor::
4484 func_templ =
4486 break;
4487
4490 // Caller context is always equal to current context because we don't
4491 // inline Api calls cross-context.
4492 topmost_script_having_context = kContextRegister;
4493 api_function_address =
4494 CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister();
4496 func_templ =
4498 break;
4499 }
4500 DCHECK(!AreAliased(api_function_address, topmost_script_having_context, argc,
4501 func_templ, scratch));
4502
4503 using FCA = FunctionCallbackArguments;
4504 using ER = ExternalReference;
4505 using FC = ApiCallbackExitFrameConstants;
4506
4507 static_assert(FCA::kArgsLength == 6);
4508 static_assert(FCA::kNewTargetIndex == 5);
4509 static_assert(FCA::kTargetIndex == 4);
4510 static_assert(FCA::kReturnValueIndex == 3);
4511 static_assert(FCA::kContextIndex == 2);
4512 static_assert(FCA::kIsolateIndex == 1);
4513 static_assert(FCA::kUnusedIndex == 0);
4514
4515 // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
4516 //
4517 // Target state:
4518 // sp[0 * kSystemPointerSize]: kUnused <= FCA::implicit_args_
4519 // sp[1 * kSystemPointerSize]: kIsolate
4520 // sp[2 * kSystemPointerSize]: kContext
4521 // sp[3 * kSystemPointerSize]: undefined (kReturnValue)
4522 // sp[4 * kSystemPointerSize]: kTarget
4523 // sp[5 * kSystemPointerSize]: undefined (kNewTarget)
4524 // Existing state:
4525 // sp[6 * kSystemPointerSize]: <= FCA:::values_
4526
4527 __ StoreRootRelative(IsolateData::topmost_script_having_context_offset(),
4528 topmost_script_having_context);
4529 if (mode == CallApiCallbackMode::kGeneric) {
4530 api_function_address = ReassignRegister(topmost_script_having_context);
4531 }
4532
4533 // Reserve space on the stack.
4534 __ Sub_d(sp, sp, Operand(FCA::kArgsLength * kSystemPointerSize));
4535
4536 // kIsolate.
4537 __ li(scratch, ER::isolate_address());
4538 __ St_d(scratch, MemOperand(sp, FCA::kIsolateIndex * kSystemPointerSize));
4539
4540 // kContext.
4541 __ St_d(cp, MemOperand(sp, FCA::kContextIndex * kSystemPointerSize));
4542
4543 // kReturnValue.
4544 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
4545 __ St_d(scratch, MemOperand(sp, FCA::kReturnValueIndex * kSystemPointerSize));
4546
4547 // kTarget.
4548 __ St_d(func_templ, MemOperand(sp, FCA::kTargetIndex * kSystemPointerSize));
4549
4550 // kNewTarget.
4551 __ St_d(scratch, MemOperand(sp, FCA::kNewTargetIndex * kSystemPointerSize));
4552
4553 // kUnused.
4554 __ St_d(scratch, MemOperand(sp, FCA::kUnusedIndex * kSystemPointerSize));
4555
4556 FrameScope frame_scope(masm, StackFrame::MANUAL);
4557 if (mode == CallApiCallbackMode::kGeneric) {
4558 __ LoadExternalPointerField(
4559 api_function_address,
4560 FieldMemOperand(func_templ,
4561 FunctionTemplateInfo::kMaybeRedirectedCallbackOffset),
4563 }
4564
4565 __ EnterExitFrame(scratch, FC::getExtraSlotsCountFrom<ExitFrameConstants>(),
4566 StackFrame::API_CALLBACK_EXIT);
4567
4568 MemOperand argc_operand = MemOperand(fp, FC::kFCIArgcOffset);
4569 {
4570 ASM_CODE_COMMENT_STRING(masm, "Initialize FunctionCallbackInfo");
4571 // FunctionCallbackInfo::length_.
4572 // TODO(ishell): pass JSParameterCount(argc) to simplify things on the
4573 // caller end.
4574 __ St_d(argc, argc_operand);
4575
4576 // FunctionCallbackInfo::implicit_args_.
4577 __ Add_d(scratch, fp, Operand(FC::kImplicitArgsArrayOffset));
4578 __ St_d(scratch, MemOperand(fp, FC::kFCIImplicitArgsOffset));
4579
4580 // FunctionCallbackInfo::values_ (points at JS arguments on the stack).
4581 __ Add_d(scratch, fp, Operand(FC::kFirstArgumentOffset));
4582 __ St_d(scratch, MemOperand(fp, FC::kFCIValuesOffset));
4583 }
4584
4585 __ RecordComment("v8::FunctionCallback's argument.");
4586 // function_callback_info_arg = v8::FunctionCallbackInfo&
4587 __ Add_d(function_callback_info_arg, fp,
4588 Operand(FC::kFunctionCallbackInfoOffset));
4589
4590 DCHECK(
4591 !AreAliased(api_function_address, scratch, function_callback_info_arg));
4592
4593 ExternalReference thunk_ref = ER::invoke_function_callback(mode);
4594 Register no_thunk_arg = no_reg;
4595
4596 MemOperand return_value_operand = MemOperand(fp, FC::kReturnValueOffset);
4597 static constexpr int kSlotsToDropOnReturn =
4598 FC::kFunctionCallbackInfoArgsLength + kJSArgcReceiverSlots;
4599
4600 const bool with_profiling =
4602 CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
4603 thunk_ref, no_thunk_arg, kSlotsToDropOnReturn,
4604 &argc_operand, return_value_operand);
4605}
4606
4607void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
4608 // ----------- S t a t e -------------
4609 // -- cp : context
4610 // -- a1 : receiver
4611 // -- a3 : accessor info
4612 // -- a0 : holder
4613 // -----------------------------------
4614
4615 Register name_arg = kCArgRegs[0];
4616 Register property_callback_info_arg = kCArgRegs[1];
4617
4618 Register api_function_address = a2;
4619 Register receiver = ApiGetterDescriptor::ReceiverRegister();
4622 Register scratch = a4;
4623 Register undef = a5;
4624 Register scratch2 = a6;
4625
4626 DCHECK(!AreAliased(receiver, holder, callback, scratch, undef, scratch2));
4627
4628 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
4629 // name below the exit frame to make GC aware of them.
4630 using PCA = PropertyCallbackArguments;
4631 using ER = ExternalReference;
4632 using FC = ApiAccessorExitFrameConstants;
4633
4634 static_assert(PCA::kPropertyKeyIndex == 0);
4635 static_assert(PCA::kShouldThrowOnErrorIndex == 1);
4636 static_assert(PCA::kHolderIndex == 2);
4637 static_assert(PCA::kIsolateIndex == 3);
4638 static_assert(PCA::kHolderV2Index == 4);
4639 static_assert(PCA::kReturnValueIndex == 5);
4640 static_assert(PCA::kDataIndex == 6);
4641 static_assert(PCA::kThisIndex == 7);
4642 static_assert(PCA::kArgsLength == 8);
4643
4644 // Set up v8::PropertyCallbackInfo's (PCI) args_ on the stack as follows:
4645 // Target state:
4646 // sp[0 * kSystemPointerSize]: name <= PCI:args_
4647 // sp[1 * kSystemPointerSize]: kShouldThrowOnErrorIndex
4648 // sp[2 * kSystemPointerSize]: kHolderIndex
4649 // sp[3 * kSystemPointerSize]: kIsolateIndex
4650 // sp[4 * kSystemPointerSize]: kHolderV2Index
4651 // sp[5 * kSystemPointerSize]: kReturnValueIndex
4652 // sp[6 * kSystemPointerSize]: kDataIndex
4653 // sp[7 * kSystemPointerSize]: kThisIndex / receiver
4654
4655 __ LoadTaggedField(scratch,
4656 FieldMemOperand(callback, AccessorInfo::kDataOffset));
4657 __ LoadRoot(undef, RootIndex::kUndefinedValue);
4658 __ li(scratch2, ER::isolate_address());
4659 Register holderV2 = zero_reg;
4660 __ Push(receiver, scratch, // kThisIndex, kDataIndex
4661 undef, holderV2); // kReturnValueIndex, kHolderV2Index
4662 __ Push(scratch2, holder); // kIsolateIndex, kHolderIndex
4663
4664 // |name_arg| clashes with |holder|, so we need to push holder first.
4665 __ LoadTaggedField(name_arg,
4666 FieldMemOperand(callback, AccessorInfo::kNameOffset));
4667 static_assert(kDontThrow == 0);
4668 Register should_throw_on_error =
4669 zero_reg; // should_throw_on_error -> kDontThrow
4670 __ Push(should_throw_on_error, name_arg);
4671
4672 __ RecordComment("Load api_function_address");
4673 __ LoadExternalPointerField(
4674 api_function_address,
4675 FieldMemOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset),
4677
4678 FrameScope frame_scope(masm, StackFrame::MANUAL);
4679 __ EnterExitFrame(scratch, FC::getExtraSlotsCountFrom<ExitFrameConstants>(),
4680 StackFrame::API_ACCESSOR_EXIT);
4681
4682 __ RecordComment("Create v8::PropertyCallbackInfo object on the stack.");
4683 // property_callback_info_arg = v8::PropertyCallbackInfo&
4684 __ Add_d(property_callback_info_arg, fp, Operand(FC::kArgsArrayOffset));
4685
4686 DCHECK(!AreAliased(api_function_address, property_callback_info_arg, name_arg,
4687 callback, scratch, scratch2));
4688
4689#ifdef V8_ENABLE_DIRECT_HANDLE
4690 // name_arg = Local<Name>(name), name value was pushed to GC-ed stack space.
4691 // |name_arg| is already initialized above.
4692#else
4693 // name_arg = Local<Name>(&name), which is &args_array[kPropertyKeyIndex].
4694 static_assert(PCA::kPropertyKeyIndex == 0);
4695 __ mov(name_arg, property_callback_info_arg);
4696#endif
4697
4698 ER thunk_ref = ER::invoke_accessor_getter_callback();
4699 // Pass AccessorInfo to thunk wrapper in case profiler or side-effect
4700 // checking is enabled.
4701 Register thunk_arg = callback;
4702
4703 MemOperand return_value_operand = MemOperand(fp, FC::kReturnValueOffset);
4704 static constexpr int kSlotsToDropOnReturn =
4705 FC::kPropertyCallbackInfoArgsLength;
4706 MemOperand* const kUseStackSpaceConstant = nullptr;
4707
4708 const bool with_profiling = true;
4709 CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
4710 thunk_ref, thunk_arg, kSlotsToDropOnReturn,
4711 kUseStackSpaceConstant, return_value_operand);
4712}
4713
4714void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
4715 // The sole purpose of DirectCEntry is for movable callers (e.g. any general
4716 // purpose InstructionStream object) to be able to call into C functions that
4717 // may trigger GC and thus move the caller.
4718 //
4719 // DirectCEntry places the return address on the stack (updated by the GC),
4720 // making the call GC safe. The irregexp backend relies on this.
4721
4722 __ St_d(ra, MemOperand(sp, 0)); // Store the return address.
4723 __ Call(t5); // Call the C++ function.
4724 __ Ld_d(ra, MemOperand(sp, 0)); // Return to calling code.
4725
4726 // TODO(LOONG_dev): LOONG64 Check this assert.
4727 if (v8_flags.debug_code && v8_flags.enable_slow_asserts) {
4728 // In case of an error the return address may point to a memory area
4729 // filled with kZapValue by the GC. Dereference the address and check for
4730 // this.
4731 __ Ld_d(a4, MemOperand(ra, 0));
4732 __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
4733 Operand(reinterpret_cast<uint64_t>(kZapValue)));
4734 }
4735
4736 __ Jump(ra);
4737}
4738
4739namespace {
4740
4741// This code tries to be close to ia32 code so that any changes can be
4742// easily ported.
4743void Generate_DeoptimizationEntry(MacroAssembler* masm,
4744 DeoptimizeKind deopt_kind) {
4745 Isolate* isolate = masm->isolate();
4746
4747 // Unlike on ARM we don't save all the registers, just the useful ones.
4748 // For the rest, there are gaps on the stack, so the offsets remain the same.
4750
4751 RegList restored_regs = kJSCallerSaved | kCalleeSaved;
4752 RegList saved_regs = restored_regs | sp | ra;
4753
4754 const int kSimd128RegsSize = kSimd128Size * Simd128Register::kNumRegisters;
4755
4756 // Save all allocatable simd128 / double registers before messing with them.
4757 // TODO(loong64): Add simd support here.
4758 __ Sub_d(sp, sp, Operand(kSimd128RegsSize));
4759 const RegisterConfiguration* config = RegisterConfiguration::Default();
4760 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4761 int code = config->GetAllocatableDoubleCode(i);
4762 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
4763 int offset = code * kSimd128Size;
4764 __ Fst_d(fpu_reg, MemOperand(sp, offset));
4765 }
4766
4767 // Push saved_regs (needed to populate FrameDescription::registers_).
4768 // Leave gaps for other registers.
4769 __ Sub_d(sp, sp, kNumberOfRegisters * kSystemPointerSize);
4770 for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
4771 if ((saved_regs.bits() & (1 << i)) != 0) {
4773 }
4774 }
4775
4776 __ li(a2,
4777 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
4778 __ St_d(fp, MemOperand(a2, 0));
4779
4780 const int kSavedRegistersAreaSize =
4781 (kNumberOfRegisters * kSystemPointerSize) + kSimd128RegsSize;
4782
4783 // Get the address of the location in the code object (a2) (return
4784 // address for lazy deoptimization) and compute the fp-to-sp delta in
4785 // register a3.
4786 __ mov(a2, ra);
4787 __ Add_d(a3, sp, Operand(kSavedRegistersAreaSize));
4788
4789 __ sub_d(a3, fp, a3);
4790
4791 // Allocate a new deoptimizer object.
4792 __ PrepareCallCFunction(5, a4);
4793 // Pass six arguments, according to n64 ABI.
4794 __ mov(a0, zero_reg);
4795 Label context_check;
4797 __ JumpIfSmi(a1, &context_check);
4799 __ bind(&context_check);
4800 __ li(a1, Operand(static_cast<int>(deopt_kind)));
4801 // a2: code address or 0 already loaded.
4802 // a3: already has fp-to-sp delta.
4804
4805 // Call Deoptimizer::New().
4806 {
4807 AllowExternalCallThatCantCauseGC scope(masm);
4808 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
4809 }
4810
4811 // Preserve "deoptimizer" object in register a0 and get the input
4812 // frame descriptor pointer to a1 (deoptimizer->input_);
4813 // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
4814 __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset()));
4815
4816 // Copy core registers into FrameDescription::registers_[kNumRegisters].
4818 for (int i = 0; i < kNumberOfRegisters; i++) {
4819 int offset =
4821 if ((saved_regs.bits() & (1 << i)) != 0) {
4822 __ Ld_d(a2, MemOperand(sp, i * kSystemPointerSize));
4823 __ St_d(a2, MemOperand(a1, offset));
4824 } else if (v8_flags.debug_code) {
4825 __ li(a2, Operand(kDebugZapValue));
4826 __ St_d(a2, MemOperand(a1, offset));
4827 }
4828 }
4829
4830 // Copy simd128 / double registers to the input frame.
4831 // TODO(loong64): Add simd support here.
4832 int simd128_regs_offset = FrameDescription::simd128_registers_offset();
4833 for (int i = 0; i < config->num_allocatable_simd128_registers(); ++i) {
4834 int code = config->GetAllocatableSimd128Code(i);
4835 int dst_offset = code * kSimd128Size + simd128_regs_offset;
4836 int src_offset =
4838 __ Fld_d(f0, MemOperand(sp, src_offset));
4839 __ Fst_d(f0, MemOperand(a1, dst_offset));
4840 }
4841
4842 // Remove the saved registers from the stack.
4843 __ Add_d(sp, sp, Operand(kSavedRegistersAreaSize));
4844
4845 // Compute a pointer to the unwinding limit in register a2; that is
4846 // the first stack slot not part of the input frame.
4848 __ add_d(a2, a2, sp);
4849
4850 // Unwind the stack down to - but not including - the unwinding
4851 // limit and copy the contents of the activation frame to the input
4852 // frame description.
4853 __ Add_d(a3, a1, Operand(FrameDescription::frame_content_offset()));
4854 Label pop_loop;
4855 Label pop_loop_header;
4856 __ Branch(&pop_loop_header);
4857 __ bind(&pop_loop);
4858 __ Pop(a4);
4859 __ St_d(a4, MemOperand(a3, 0));
4860 __ addi_d(a3, a3, sizeof(uint64_t));
4861 __ bind(&pop_loop_header);
4862 __ BranchShort(&pop_loop, ne, a2, Operand(sp));
4863 // Compute the output frame in the deoptimizer.
4864 __ Push(a0); // Preserve deoptimizer object across call.
4865 // a0: deoptimizer object; a1: scratch.
4866 __ PrepareCallCFunction(1, a1);
4867 // Call Deoptimizer::ComputeOutputFrames().
4868 {
4869 AllowExternalCallThatCantCauseGC scope(masm);
4870 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
4871 }
4872 __ Pop(a0); // Restore deoptimizer object (class Deoptimizer).
4873
4875
4876 // Replace the current (input) frame with the output frames.
4877 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
4878 // Outer loop state: a4 = current "FrameDescription** output_",
4879 // a1 = one past the last FrameDescription**.
4881 __ Ld_d(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
4882 __ Alsl_d(a1, a1, a4, kSystemPointerSizeLog2);
4883 __ Branch(&outer_loop_header);
4884
4885 __ bind(&outer_push_loop);
4886 Register current_frame = a2;
4887 Register frame_size = a3;
4888 __ Ld_d(current_frame, MemOperand(a4, 0));
4889 __ Ld_d(frame_size,
4891 __ Branch(&inner_loop_header);
4892
4893 __ bind(&inner_push_loop);
4894 __ Sub_d(frame_size, frame_size, Operand(sizeof(uint64_t)));
4895 __ Add_d(a6, current_frame, Operand(frame_size));
4897 __ Push(a7);
4898
4899 __ bind(&inner_loop_header);
4900 __ BranchShort(&inner_push_loop, ne, frame_size, Operand(zero_reg));
4901
4902 __ Add_d(a4, a4, Operand(kSystemPointerSize));
4903
4904 __ bind(&outer_loop_header);
4905 __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
4906
4907 // TODO(loong64): Add simd support here.
4908 for (int i = 0; i < config->num_allocatable_simd128_registers(); ++i) {
4909 int code = config->GetAllocatableSimd128Code(i);
4910 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
4911 int src_offset = code * kSimd128Size + simd128_regs_offset;
4912 __ Fld_d(fpu_reg, MemOperand(current_frame, src_offset));
4913 }
4914
4915 // Push pc and continuation from the last output frame.
4916 __ Ld_d(a6, MemOperand(current_frame, FrameDescription::pc_offset()));
4917 __ Push(a6);
4918 __ Ld_d(a6,
4920 __ Push(a6);
4921
4922 // Technically restoring 'at' should work unless zero_reg is also restored
4923 // but it's safer to check for this.
4924 DCHECK(!(restored_regs.has(t7)));
4925 // Restore the registers from the last output frame.
4926 __ mov(t7, current_frame);
4927 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
4928 int offset =
4930 if ((restored_regs.bits() & (1 << i)) != 0) {
4931 __ Ld_d(ToRegister(i), MemOperand(t7, offset));
4932 }
4933 }
4934
4935 // If the continuation is non-zero (JavaScript), branch to the continuation.
4936 // For Wasm just return to the pc from the last output frame in the lr
4937 // register.
4938 Label end;
4939 __ Pop(t7); // Get continuation, leave pc on stack.
4940 __ Pop(ra);
4941 __ BranchShort(&end, eq, t7, Operand(zero_reg));
4942 __ Jump(t7);
4943 __ bind(&end);
4944 __ Jump(ra);
4945}
4946
4947} // namespace
4948
4949void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
4950 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
4951}
4952
4953void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
4954 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
4955}
4956
4957// If there is baseline code on the shared function info, converts an
4958// interpreter frame into a baseline frame and continues execution in baseline
4959// code. Otherwise execution continues with bytecode.
4960void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
4961 MacroAssembler* masm) {
4962 Label start;
4963 __ bind(&start);
4964
4965 // Get function from the frame.
4966 Register closure = a1;
4968
4969 // Get the InstructionStream object from the shared function info.
4970 Register code_obj = s1;
4971 __ LoadTaggedField(
4972 code_obj,
4973 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
4974
4975 ResetSharedFunctionInfoAge(masm, code_obj);
4976
4977 __ LoadTrustedPointerField(
4978 code_obj,
4979 FieldMemOperand(code_obj, SharedFunctionInfo::kTrustedFunctionDataOffset),
4981
4982 // For OSR entry it is safe to assume we always have baseline code.
4983 if (v8_flags.debug_code) {
4984 __ GetObjectType(code_obj, t2, t2);
4985 __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODE_TYPE));
4986 AssertCodeIsBaseline(masm, code_obj, t2);
4987 }
4988
4989 // Load the feedback cell and vector.
4990 Register feedback_cell = a2;
4991 Register feedback_vector = t5;
4992 __ LoadTaggedField(feedback_cell,
4993 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
4994 __ LoadTaggedField(
4995 feedback_vector,
4996 FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
4997
4998 Label install_baseline_code;
4999 // Check if feedback vector is valid. If not, call prepare for baseline to
5000 // allocate it.
5001 __ JumpIfObjectType(&install_baseline_code, ne, feedback_vector,
5002 FEEDBACK_VECTOR_TYPE, t2);
5003
5004 // Save BytecodeOffset from the stack frame.
5007 // Replace bytecode offset with feedback cell.
5010 __ St_d(feedback_cell,
5012 feedback_cell = no_reg;
5013 // Update feedback vector cache.
5016 __ St_d(feedback_vector,
5018 feedback_vector = no_reg;
5019
5020 // Compute baseline pc for bytecode offset.
5021 Register get_baseline_pc = a3;
5022 __ li(get_baseline_pc,
5023 ExternalReference::baseline_pc_for_next_executed_bytecode());
5024
5028
5029 // Get bytecode array from the stack frame.
5032 // Save the accumulator register, since it's clobbered by the below call.
5034 {
5035 __ Move(kCArgRegs[0], code_obj);
5038 FrameScope scope(masm, StackFrame::INTERNAL);
5039 __ PrepareCallCFunction(3, 0, a4);
5040 __ CallCFunction(get_baseline_pc, 3, 0);
5041 }
5042 __ LoadCodeInstructionStart(code_obj, code_obj, kJSEntrypointTag);
5043 __ Add_d(code_obj, code_obj, kReturnRegister0);
5045
5046 // TODO(liuyu): Remove Ld as arm64 after register reallocation.
5049 Generate_OSREntry(masm, code_obj);
5050 __ Trap(); // Unreachable.
5051
5052 __ bind(&install_baseline_code);
5053 {
5054 FrameScope scope(masm, StackFrame::INTERNAL);
5056 __ Push(closure);
5057 __ CallRuntime(Runtime::kInstallBaselineCode, 1);
5059 }
5060 // Retry from the start after installing baseline code.
5061 __ Branch(&start);
5062}
5063
5064void Builtins::Generate_RestartFrameTrampoline(MacroAssembler* masm) {
5065 // Restart the current frame:
5066 // - Look up current function on the frame.
5067 // - Leave the frame.
5068 // - Restart the frame by calling the function.
5069
5072 __ LeaveFrame(StackFrame::INTERPRETED);
5073
5074 // The arguments are already in the stack (including any necessary padding),
5075 // we should not try to massage the arguments again.
5076#ifdef V8_ENABLE_LEAPTIERING
5077 __ InvokeFunction(a1, a0, InvokeType::kJump,
5079#else
5080 __ li(a2, Operand(kDontAdaptArgumentsSentinel));
5081 __ InvokeFunction(a1, a2, a0, InvokeType::kJump);
5082#endif
5083}
5084
5085#undef __
5086
5087} // namespace internal
5088} // namespace v8
5089
5090#endif // V8_TARGET_ARCH_LOONG64
#define Assert(condition)
const RegList initial_
RegList available_
#define JUMP_IF_EQUAL(NAME)
#define ASSIGN_REG(Name)
RegisterAllocator * allocator_
std::vector< Register * > allocated_registers_
#define ASSIGN_PINNED(Name, Reg)
#define DEFINE_PINNED(Name, Reg)
#define DEFINE_SCOPED(Name)
Register * reg_
#define FREE_REG(Name)
#define DEFINE_REG(Name)
interpreter::Bytecode bytecode
Definition builtins.cc:43
#define BUILTIN_CODE(isolate, name)
Definition builtins.h:45
#define RETURN_BYTECODE_LIST(V)
Definition bytecodes.h:575
static void Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler *masm, InterpreterPushArgsMode mode)
static void Generate_CallOrConstructForwardVarargs(MacroAssembler *masm, CallOrConstructMode mode, Builtin target_builtin)
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
Definition builtins.cc:189
static void Generate_InterpreterEntryTrampoline(MacroAssembler *masm, InterpreterEntryTrampolineMode mode)
static void Generate_Adaptor(MacroAssembler *masm, int formal_parameter_count, Address builtin_address)
static void Generate_CEntry(MacroAssembler *masm, int result_size, ArgvMode argv_mode, bool builtin_exit_frame, bool switch_to_central_stack)
static constexpr Builtin CallFunction(ConvertReceiverMode=ConvertReceiverMode::kAny)
static constexpr Builtin AdaptorWithBuiltinExitFrame(int formal_parameter_count)
static void Generate_Call(MacroAssembler *masm, ConvertReceiverMode mode)
static void Generate_CallFunction(MacroAssembler *masm, ConvertReceiverMode mode)
static void Generate_CallOrConstructVarargs(MacroAssembler *masm, Builtin target_builtin)
static void Generate_CallApiCallbackImpl(MacroAssembler *masm, CallApiCallbackMode mode)
static constexpr Builtin Call(ConvertReceiverMode=ConvertReceiverMode::kAny)
static void Generate_CallBoundFunctionImpl(MacroAssembler *masm)
static void Generate_ConstructForwardAllArgsImpl(MacroAssembler *masm, ForwardWhichFrame which_frame)
static void Generate_InterpreterPushArgsThenCallImpl(MacroAssembler *masm, ConvertReceiverMode receiver_mode, InterpreterPushArgsMode mode)
static constexpr BytecodeOffset None()
Definition utils.h:675
static DEFINE_PARAMETERS_VARARGS(kActualArgumentsCount, kTopmostScriptHavingContext, kFunctionTemplateInfo) DEFINE_PARAMETER_TYPES(MachineType constexpr Register TopmostScriptHavingContextRegister()
static DEFINE_PARAMETERS_VARARGS(kApiFunctionAddress, kActualArgumentsCount, kFunctionTemplateInfo) DEFINE_PARAMETER_TYPES(MachineType constexpr Register ActualArgumentsCountRegister()
static constexpr int kContextOrFrameTypeOffset
static constexpr int kFixedSlotCountAboveFp
static constexpr int kFixedFrameSizeAboveFp
static int caller_frame_top_offset()
static int output_count_offset()
static constexpr int kNextExitFrameFPOffset
static constexpr int kNextFastCallFramePCOffset
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr int simd128_registers_offset()
static const int kMantissaBits
Definition heap-number.h:39
static const int kExponentBits
Definition heap-number.h:40
static const int kExponentBias
Definition heap-number.h:41
static constexpr int kHeaderSize
static constexpr int kMapOffset
static constexpr uint32_t thread_in_wasm_flag_address_offset()
Definition isolate.h:1338
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index)
constexpr void clear(RegisterT reg)
static constexpr DwVfpRegister from_code(int8_t code)
constexpr int8_t code() const
static const RegisterConfiguration * Default()
static constexpr Register from_code(int code)
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr Tagged< Smi > zero()
Definition smi.h:99
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static constexpr int kFixedFrameSizeFromFp
static constexpr int kFrameTypeOffset
static constexpr DoubleRegList kPushedFpRegs
static constexpr int SharedFunctionInfoOffsetInTaggedJSFunction()
static constexpr uint32_t jmpbuf_offset()
Definition stacks.h:176
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
Definition globals.h:161
int start
int end
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
DirectHandle< Object > new_target
Definition execution.cc:75
bool is_construct
Definition execution.cc:82
int32_t offset
TNode< Context > context
TNode< Object > this_arg
TNode< Object > receiver
TNode< Object > callback
LiftoffRegister reg
MovableLabel continuation
Register tmp
int pc_offset
RegListBase< RegisterT > registers
const int length_
Definition mul-fft.cc:473
int int32_t
Definition unicode.cc:40
void Free(void *memory)
Definition memory.h:63
ApiCallbackExitFrameConstants FC
Definition frames.cc:1270
FunctionCallbackArguments FCA
Definition frames.cc:1271
void Or(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void push(LiftoffAssembler *assm, LiftoffRegister reg, ValueKind kind, int padding=0)
constexpr DoubleRegister kFpReturnRegisters[]
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
constexpr Register kGpReturnRegisters[]
constexpr Register no_reg
constexpr Register kRootRegister
constexpr int kByteSize
Definition globals.h:395
constexpr int kFunctionEntryBytecodeOffset
Definition globals.h:854
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kSimd128Size
Definition globals.h:706
DwVfpRegister DoubleRegister
static void Generate_CheckStackOverflow(MacroAssembler *masm, Register argc, Register scratch1, Register scratch2)
constexpr DoubleRegister kScratchDoubleReg
const RegList kCalleeSaved
Definition reglist-arm.h:31
static void Generate_InterpreterEnterBytecode(MacroAssembler *masm)
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr Register kJavaScriptCallTargetRegister
constexpr int kNumberOfRegisters
constexpr uint16_t kDontAdaptArgumentsSentinel
Definition globals.h:2779
constexpr Register kJavaScriptCallArgCountRegister
constexpr Register kInterpreterAccumulatorRegister
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
constexpr Register kScratchReg
InterpreterPushArgsMode
Definition globals.h:2233
constexpr int kJSArgcReceiverSlots
Definition globals.h:2778
static void GenerateInterpreterPushArgs(MacroAssembler *masm, Register num_args, Register start_address, Register scratch)
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler *masm, Register bytecode_array, Register bytecode_offset, Register bytecode, Register scratch1, Register scratch2, Register scratch3, Label *if_return)
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
static void LeaveInterpreterFrame(MacroAssembler *masm, Register scratch1, Register scratch2)
constexpr Register kReturnRegister1
constexpr int kTaggedSizeLog2
Definition globals.h:543
constexpr uint32_t kDebugZapValue
Definition globals.h:1015
constexpr uint32_t kZapValue
Definition globals.h:1005
constexpr Register kReturnRegister0
@ LAST_CALLABLE_JS_FUNCTION_TYPE
@ FIRST_CALLABLE_JS_FUNCTION_TYPE
constexpr Register kWasmImplicitArgRegister
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
constexpr Register kInterpreterDispatchTableRegister
const int kHeapObjectTag
Definition v8-internal.h:72
@ kFunctionTemplateInfoCallbackTag
constexpr Register kWasmTrapHandlerFaultAddressRegister
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register kJavaScriptCallExtraArg1Register
const RegList kJSCallerSaved
Definition reglist-arm.h:23
constexpr int JSParameterCount(int param_count_without_receiver)
Definition globals.h:2782
Register ToRegister(int num)
const DoubleRegList kCalleeSavedFPU
constexpr Register kJavaScriptCallCodeStartRegister
constexpr Register kPtrComprCageBaseRegister
Register ReassignRegister(Register &source)
constexpr Register kWasmCompileLazyFuncIndexRegister
static void AssertCodeIsBaseline(MacroAssembler *masm, Register code, Register scratch)
static void Generate_JSEntryTrampolineHelper(MacroAssembler *masm, bool is_construct)
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
constexpr Register cp
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
Definition globals.h:407
constexpr Register kJavaScriptCallDispatchHandleRegister
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler *masm, Register sfi, Register bytecode, Register scratch1, Label *is_baseline, Label *is_unavailable)
constexpr Register kInterpreterBytecodeOffsetRegister
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
constexpr Register kInterpreterBytecodeArrayRegister
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
#define arraysize(array)
Definition macros.h:67
#define OFFSET_OF_DATA_START(Type)