v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
builtins-arm64.cc
Go to the documentation of this file.
1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_ARM64
6
12// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
15#include "src/debug/debug.h"
19#include "src/heap/heap-inl.h"
21#include "src/objects/cell.h"
22#include "src/objects/foreign.h"
27#include "src/objects/smi.h"
28#include "src/runtime/runtime.h"
29
30#if V8_ENABLE_WEBASSEMBLY
33#include "src/wasm/stacks.h"
37#endif // V8_ENABLE_WEBASSEMBLY
38
39#if defined(V8_OS_WIN)
41#endif // V8_OS_WIN
42
43namespace v8 {
44namespace internal {
45
46#define __ ACCESS_MASM(masm)
47
48namespace {
49constexpr int kReceiverOnStackSize = kSystemPointerSize;
50} // namespace
51
52void Builtins::Generate_Adaptor(MacroAssembler* masm,
53 int formal_parameter_count, Address address) {
54 __ CodeEntry();
55
57 __ TailCallBuiltin(
58 Builtins::AdaptorWithBuiltinExitFrame(formal_parameter_count));
59}
60
61namespace {
62
63void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
64 // ----------- S t a t e -------------
65 // -- x0 : number of arguments
66 // -- x1 : constructor function
67 // -- x3 : new target
68 // -- cp : context
69 // -- lr : return address
70 // -- sp[...]: constructor arguments
71 // -----------------------------------
72
73 ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
74 Label stack_overflow;
75
76 __ StackOverflowCheck(x0, &stack_overflow);
77
78 // Enter a construct frame.
79 {
80 FrameScope scope(masm, StackFrame::CONSTRUCT);
81 Label already_aligned;
82 Register argc = x0;
83
84 if (v8_flags.debug_code) {
85 // Check that FrameScope pushed the context on to the stack already.
86 __ Peek(x2, 0);
87 __ Cmp(x2, cp);
88 __ Check(eq, AbortReason::kUnexpectedValue);
89 }
90
91 // Push number of arguments.
92 __ Push(argc, padreg);
93
94 // Round up to maintain alignment.
95 Register slot_count = x2;
96 Register slot_count_without_rounding = x12;
97 __ Add(slot_count_without_rounding, argc, 1);
98 __ Bic(slot_count, slot_count_without_rounding, 1);
99 __ Claim(slot_count);
100
101 // Preserve the incoming parameters on the stack.
102 __ LoadRoot(x4, RootIndex::kTheHoleValue);
103
104 // Compute a pointer to the slot immediately above the location on the
105 // stack to which arguments will be later copied.
106 __ SlotAddress(x2, argc);
107
108 // Store padding, if needed.
109 __ Tbnz(slot_count_without_rounding, 0, &already_aligned);
110 __ Str(padreg, MemOperand(x2));
111 __ Bind(&already_aligned);
112
113 // TODO(victorgomes): When the arguments adaptor is completely removed, we
114 // should get the formal parameter count and copy the arguments in its
115 // correct position (including any undefined), instead of delaying this to
116 // InvokeFunction.
117
118 // Copy arguments to the expression stack.
119 {
120 Register count = x2;
121 Register dst = x10;
122 Register src = x11;
123 __ SlotAddress(dst, 0);
124 // Poke the hole (receiver).
125 __ Str(x4, MemOperand(dst));
126 __ Add(dst, dst, kSystemPointerSize); // Skip receiver.
127 __ Add(src, fp,
129 kSystemPointerSize); // Skip receiver.
130 __ Sub(count, argc, kJSArgcReceiverSlots);
131 __ CopyDoubleWords(dst, src, count);
132 }
133
134 // ----------- S t a t e -------------
135 // -- x0: number of arguments (untagged)
136 // -- x1: constructor function
137 // -- x3: new target
138 // If argc is odd:
139 // -- sp[0*kSystemPointerSize]: the hole (receiver)
140 // -- sp[1*kSystemPointerSize]: argument 1
141 // -- ...
142 // -- sp[(n-1)*kSystemPointerSize]: argument (n - 1)
143 // -- sp[(n+0)*kSystemPointerSize]: argument n
144 // -- sp[(n+1)*kSystemPointerSize]: padding
145 // -- sp[(n+2)*kSystemPointerSize]: padding
146 // -- sp[(n+3)*kSystemPointerSize]: number of arguments
147 // -- sp[(n+4)*kSystemPointerSize]: context (pushed by FrameScope)
148 // If argc is even:
149 // -- sp[0*kSystemPointerSize]: the hole (receiver)
150 // -- sp[1*kSystemPointerSize]: argument 1
151 // -- ...
152 // -- sp[(n-1)*kSystemPointerSize]: argument (n - 1)
153 // -- sp[(n+0)*kSystemPointerSize]: argument n
154 // -- sp[(n+1)*kSystemPointerSize]: padding
155 // -- sp[(n+2)*kSystemPointerSize]: number of arguments
156 // -- sp[(n+3)*kSystemPointerSize]: context (pushed by FrameScope)
157 // -----------------------------------
158
159 // Call the function.
160 __ InvokeFunctionWithNewTarget(x1, x3, argc, InvokeType::kCall);
161
162 // Restore the context from the frame.
164 // Restore arguments count from the frame. Use fp relative
165 // addressing to avoid the circular dependency between padding existence and
166 // argc parity.
168 // Leave construct frame.
169 }
170
171 // Remove caller arguments from the stack and return.
172 __ DropArguments(x1);
173 __ Ret();
174
175 __ Bind(&stack_overflow);
176 {
177 FrameScope scope(masm, StackFrame::INTERNAL);
178 __ CallRuntime(Runtime::kThrowStackOverflow);
179 __ Unreachable();
180 }
181}
182
183} // namespace
184
185// The construct stub for ES5 constructor functions and ES6 class constructors.
186void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
187 // ----------- S t a t e -------------
188 // -- x0 : number of arguments
189 // -- x1 : constructor function
190 // -- x3 : new target
191 // -- lr : return address
192 // -- cp : context pointer
193 // -- sp[...]: constructor arguments
194 // -----------------------------------
195
196 ASM_LOCATION("Builtins::Generate_JSConstructStubGeneric");
197
198 FrameScope scope(masm, StackFrame::MANUAL);
199 // Enter a construct frame.
200 __ EnterFrame(StackFrame::CONSTRUCT);
201 Label post_instantiation_deopt_entry, not_create_implicit_receiver;
202
203 if (v8_flags.debug_code) {
204 // Check that FrameScope pushed the context on to the stack already.
205 __ Peek(x2, 0);
206 __ Cmp(x2, cp);
207 __ Check(eq, AbortReason::kUnexpectedValue);
208 }
209
210 // Preserve the incoming parameters on the stack.
211 __ Push(x0, x1, padreg, x3);
212
213 // ----------- S t a t e -------------
214 // -- sp[0*kSystemPointerSize]: new target
215 // -- sp[1*kSystemPointerSize]: padding
216 // -- x1 and sp[2*kSystemPointerSize]: constructor function
217 // -- sp[3*kSystemPointerSize]: number of arguments
218 // -- sp[4*kSystemPointerSize]: context (pushed by FrameScope)
219 // -----------------------------------
220
221 __ LoadTaggedField(
222 x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
223 __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
224 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
225 __ JumpIfIsInRange(
226 w4, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
227 static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
228 &not_create_implicit_receiver);
229
230 // If not derived class constructor: Allocate the new receiver object.
231 __ CallBuiltin(Builtin::kFastNewObject);
232
233 __ B(&post_instantiation_deopt_entry);
234
235 // Else: use TheHoleValue as receiver for constructor call
236 __ Bind(&not_create_implicit_receiver);
237 __ LoadRoot(x0, RootIndex::kTheHoleValue);
238
239 // ----------- S t a t e -------------
240 // -- x0: receiver
241 // -- Slot 4 / sp[0*kSystemPointerSize]: new target
242 // -- Slot 3 / sp[1*kSystemPointerSize]: padding
243 // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
244 // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments
245 // -- Slot 0 / sp[4*kSystemPointerSize]: context
246 // -----------------------------------
247 // Deoptimizer enters here.
248 masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
249 masm->pc_offset());
250
251 __ Bind(&post_instantiation_deopt_entry);
252
253 // Restore new target from the top of the stack.
254 __ Peek(x3, 0 * kSystemPointerSize);
255
256 // Restore constructor function and argument count.
259
260 // Copy arguments to the expression stack. The called function pops the
261 // receiver along with its arguments, so we need an extra receiver on the
262 // stack, in case we have to return it later.
263
264 // Overwrite the new target with a receiver.
265 __ Poke(x0, 0);
266
267 // Push two further copies of the receiver. One will be popped by the called
268 // function. The second acts as padding if the number of arguments plus
269 // receiver is odd - pushing receiver twice avoids branching. It also means
270 // that we don't have to handle the even and odd cases specially on
271 // InvokeFunction's return, as top of stack will be the receiver in either
272 // case.
273 __ Push(x0, x0);
274
275 // ----------- S t a t e -------------
276 // -- x3: new target
277 // -- x12: number of arguments (untagged)
278 // -- sp[0*kSystemPointerSize]: implicit receiver (overwrite if argc
279 // odd)
280 // -- sp[1*kSystemPointerSize]: implicit receiver
281 // -- sp[2*kSystemPointerSize]: implicit receiver
282 // -- sp[3*kSystemPointerSize]: padding
283 // -- x1 and sp[4*kSystemPointerSize]: constructor function
284 // -- sp[5*kSystemPointerSize]: number of arguments
285 // -- sp[6*kSystemPointerSize]: context
286 // -----------------------------------
287
288 // Round the number of arguments down to the next even number, and claim
289 // slots for the arguments. If the number of arguments was odd, the last
290 // argument will overwrite one of the receivers pushed above.
291 Register argc_without_receiver = x11;
292 __ Sub(argc_without_receiver, x12, kJSArgcReceiverSlots);
293 __ Bic(x10, x12, 1);
294
295 // Check if we have enough stack space to push all arguments.
296 Label stack_overflow;
297 __ StackOverflowCheck(x10, &stack_overflow);
298 __ Claim(x10);
299
300 // TODO(victorgomes): When the arguments adaptor is completely removed, we
301 // should get the formal parameter count and copy the arguments in its
302 // correct position (including any undefined), instead of delaying this to
303 // InvokeFunction.
304
305 // Copy the arguments.
306 {
307 Register count = x2;
308 Register dst = x10;
309 Register src = x11;
310 __ Mov(count, argc_without_receiver);
311 __ Poke(x0, 0); // Add the receiver.
312 __ SlotAddress(dst, 1); // Skip receiver.
313 __ Add(src, fp,
315 __ CopyDoubleWords(dst, src, count);
316 }
317
318 // Call the function.
319 __ Mov(x0, x12);
320 __ InvokeFunctionWithNewTarget(x1, x3, x0, InvokeType::kCall);
321
322 // If the result is an object (in the ECMA sense), we should get rid
323 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
324 // on page 74.
325 Label use_receiver, do_throw, leave_and_return, check_receiver;
326
327 // If the result is undefined, we jump out to using the implicit receiver.
328 __ CompareRoot(x0, RootIndex::kUndefinedValue);
329 __ B(ne, &check_receiver);
330
331 // Throw away the result of the constructor invocation and use the
332 // on-stack receiver as the result.
333 __ Bind(&use_receiver);
334 __ Peek(x0, 0 * kSystemPointerSize);
335 __ CompareRoot(x0, RootIndex::kTheHoleValue);
336 __ B(eq, &do_throw);
337
338 __ Bind(&leave_and_return);
339 // Restore arguments count from the frame.
341 // Leave construct frame.
342 __ LeaveFrame(StackFrame::CONSTRUCT);
343 // Remove caller arguments from the stack and return.
344 __ DropArguments(x1);
345 __ Ret();
346
347 // Otherwise we do a smi check and fall through to check if the return value
348 // is a valid receiver.
349 __ bind(&check_receiver);
350
351 // If the result is a smi, it is *not* an object in the ECMA sense.
352 __ JumpIfSmi(x0, &use_receiver);
353
354 // Check if the type of the result is not an object in the ECMA sense.
355 __ JumpIfJSAnyIsNotPrimitive(x0, x4, &leave_and_return);
356 __ B(&use_receiver);
357
358 __ Bind(&do_throw);
359 // Restore the context from the frame.
361 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
362 __ Unreachable();
363
364 __ Bind(&stack_overflow);
365 // Restore the context from the frame.
367 __ CallRuntime(Runtime::kThrowStackOverflow);
368 __ Unreachable();
369}
370void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
371 Generate_JSBuiltinsConstructStubHelper(masm);
372}
373
374void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
375 FrameScope scope(masm, StackFrame::INTERNAL);
376 __ PushArgument(x1);
377 __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
378 __ Unreachable();
379}
380
381static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm,
382 Register code, Register scratch) {
383 // Verify that the code kind is baseline code via the CodeKind.
384 __ Ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
385 __ DecodeField<Code::KindField>(scratch);
386 __ Cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
387 __ Assert(eq, AbortReason::kExpectedBaselineData);
388}
389
390static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
391 Register scratch) {
392 DCHECK(!AreAliased(code, scratch));
393 return AssertCodeIsBaselineAllowClobber(masm, code, scratch);
394}
395
396static void CheckSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
397 Register data,
398 Register scratch,
399 Label* is_baseline,
400 Label* is_bytecode) {
401#if V8_STATIC_ROOTS_BOOL
402 __ IsObjectTypeFast(data, scratch, CODE_TYPE);
403#else
404 __ CompareObjectType(data, scratch, scratch, CODE_TYPE);
405#endif // V8_STATIC_ROOTS_BOOL
406 if (v8_flags.debug_code) {
407 Label not_baseline;
408 __ B(ne, &not_baseline);
409 AssertCodeIsBaseline(masm, data, scratch);
410 __ B(eq, is_baseline);
411 __ Bind(&not_baseline);
412 } else {
413 __ B(eq, is_baseline);
414 }
415
416#if V8_STATIC_ROOTS_BOOL
417 // scratch already contains the compressed map.
418 __ CompareInstanceTypeWithUniqueCompressedMap(scratch, Register::no_reg(),
419 INTERPRETER_DATA_TYPE);
420#else
421 // scratch already contains the instance type.
422 __ Cmp(scratch, INTERPRETER_DATA_TYPE);
423#endif // V8_STATIC_ROOTS_BOOL
424 __ B(ne, is_bytecode);
425}
426
427// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
428// the more general dispatch.
430 MacroAssembler* masm, Register sfi, Register bytecode, Register scratch1,
431 Label* is_baseline, Label* is_unavailable) {
432 DCHECK(!AreAliased(bytecode, scratch1));
433 ASM_CODE_COMMENT(masm);
434 Label done;
435
436 Register data = bytecode;
437 __ LoadTrustedPointerField(
438 data,
439 FieldMemOperand(sfi, SharedFunctionInfo::kTrustedFunctionDataOffset),
441
442 if (V8_JITLESS_BOOL) {
443 __ IsObjectType(data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
444 __ B(ne, &done);
445 } else {
446 CheckSharedFunctionInfoBytecodeOrBaseline(masm, data, scratch1, is_baseline,
447 &done);
448 }
449
450 __ LoadProtectedPointerField(
451 bytecode, FieldMemOperand(data, InterpreterData::kBytecodeArrayOffset));
452
453 __ Bind(&done);
454 __ IsObjectType(bytecode, scratch1, scratch1, BYTECODE_ARRAY_TYPE);
455 __ B(ne, is_unavailable);
456}
457
458// static
459void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
460 // ----------- S t a t e -------------
461 // -- x0 : the value to pass to the generator
462 // -- x1 : the JSGeneratorObject to resume
463 // -- lr : return address
464 // -----------------------------------
465
466 // Store input value into generator object.
467 __ StoreTaggedField(
468 x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
469 __ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0,
471 // Check that x1 is still valid, RecordWrite might have clobbered it.
472 __ AssertGeneratorObject(x1);
473
474 // Load suspended function and context.
475 __ LoadTaggedField(x5,
476 FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
477 __ LoadTaggedField(cp, FieldMemOperand(x5, JSFunction::kContextOffset));
478
479 // Flood function if we are stepping.
480 Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
481 Label stepping_prepared;
482 ExternalReference debug_hook =
483 ExternalReference::debug_hook_on_function_call_address(masm->isolate());
484 __ Mov(x10, debug_hook);
485 __ Ldrsb(x10, MemOperand(x10));
486 __ CompareAndBranch(x10, Operand(0), ne, &prepare_step_in_if_stepping);
487
488 // Flood function if we need to continue stepping in the suspended generator.
489 ExternalReference debug_suspended_generator =
490 ExternalReference::debug_suspended_generator_address(masm->isolate());
491 __ Mov(x10, debug_suspended_generator);
492 __ Ldr(x10, MemOperand(x10));
493 __ CompareAndBranch(x10, Operand(x1), eq,
494 &prepare_step_in_suspended_generator);
495 __ Bind(&stepping_prepared);
496
497 // Check the stack for overflow. We are not trying to catch interruptions
498 // (i.e. debug break and preemption) here, so check the "real stack limit".
499 Label stack_overflow;
500 __ LoadStackLimit(x10, StackLimitKind::kRealStackLimit);
501 __ Cmp(sp, x10);
502 __ B(lo, &stack_overflow);
503
505
506 // Compute actual arguments count value as a formal parameter count without
507 // receiver, loaded from the dispatch table entry or shared function info.
508#if V8_ENABLE_LEAPTIERING
511 Register scratch = x20;
512 __ Ldr(dispatch_handle.W(),
513 FieldMemOperand(x5, JSFunction::kDispatchHandleOffset));
514 __ LoadEntrypointAndParameterCountFromJSDispatchTable(
515 code, argc, dispatch_handle, scratch);
516
517 // In case the formal parameter count is kDontAdaptArgumentsSentinel the
518 // actual arguments count should be set accordingly.
520 __ Cmp(argc, Operand(JSParameterCount(0)));
521 __ Csel(argc, argc, Operand(JSParameterCount(0)), kGreaterThan);
522#else
523 __ LoadTaggedField(
524 argc, FieldMemOperand(x5, JSFunction::kSharedFunctionInfoOffset));
525 __ Ldrh(argc.W(), FieldMemOperand(
526 argc, SharedFunctionInfo::kFormalParameterCountOffset));
527
528 // Generator functions are always created from user code and thus the
529 // formal parameter count is never equal to kDontAdaptArgumentsSentinel,
530 // which is used only for certain non-generator builtin functions.
531#endif // V8_ENABLE_LEAPTIERING
532
533 // Claim slots for arguments and receiver (rounded up to a multiple of two).
534 static_assert(JSParameterCount(0) == 1); // argc includes receiver
535 __ Add(x11, argc, 1);
536 __ Bic(x11, x11, 1);
537 __ Claim(x11);
538
539 // Store padding (which might be replaced by the last argument).
540 __ Sub(x11, x11, 1);
541 __ Poke(padreg, Operand(x11, LSL, kSystemPointerSizeLog2));
542
543 // Poke receiver into highest claimed slot.
544 __ LoadTaggedField(x6,
545 FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
546 __ Poke(x6, __ ReceiverOperand());
547
548 // ----------- S t a t e -------------
549 // -- x0 : actual arguments count
550 // -- x1 : the JSGeneratorObject to resume
551 // -- x2 : target code object (leaptiering only)
552 // -- x4 : dispatch handle (leaptiering only)
553 // -- x5 : generator function
554 // -- cp : generator context
555 // -- lr : return address
556 // -- sp[0 .. arg count] : claimed for receiver and args
557 // -----------------------------------
558
559 // Copy the function arguments from the generator object's register file.
560 {
561 Label loop, done;
562 __ Sub(x10, argc, kJSArgcReceiverSlots);
563 __ Cbz(x10, &done);
564 __ LoadTaggedField(
565 x6,
566 FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
567
568 __ SlotAddress(x12, x10);
569 __ Add(x6, x6, Operand(x10, LSL, kTaggedSizeLog2));
570 __ Add(x6, x6, Operand(OFFSET_OF_DATA_START(FixedArray) - kHeapObjectTag));
571 __ Bind(&loop);
572 __ Sub(x10, x10, 1);
573 __ LoadTaggedField(x11, MemOperand(x6, -kTaggedSize, PreIndex));
574 __ Str(x11, MemOperand(x12, -kSystemPointerSize, PostIndex));
575 __ Cbnz(x10, &loop);
576 __ Bind(&done);
577 }
578
579 // Underlying function needs to have bytecode available.
580 if (v8_flags.debug_code) {
581 Label ok, is_baseline, is_unavailable;
582 Register sfi = x10;
583 Register bytecode = x10;
584 Register scratch = x11;
585 __ LoadTaggedField(
586 sfi, FieldMemOperand(x5, JSFunction::kSharedFunctionInfoOffset));
587 GetSharedFunctionInfoBytecodeOrBaseline(masm, sfi, bytecode, scratch,
588 &is_baseline, &is_unavailable);
589 __ B(&ok);
590
591 __ Bind(&is_unavailable);
592 __ Abort(AbortReason::kMissingBytecodeArray);
593
594 __ Bind(&is_baseline);
595 __ IsObjectType(bytecode, scratch, scratch, CODE_TYPE);
596 __ Assert(eq, AbortReason::kMissingBytecodeArray);
597
598 __ Bind(&ok);
599 }
600
601 // Resume (Ignition/TurboFan) generator object.
602 {
603 // We abuse new.target both to indicate that this is a resume call and to
604 // pass in the generator object. In ordinary calls, new.target is always
605 // undefined because generator functions are non-constructable.
606 __ Mov(x3, x1); // new.target
607 __ Mov(x1, x5); // target
608#if V8_ENABLE_LEAPTIERING
609 // We jump through x17 here because for Branch Identification (BTI) we use
610 // "Call" (`bti c`) rather than "Jump" (`bti j`) landing pads for
611 // tail-called code. See TailCallBuiltin for more information.
612 DCHECK_NE(code, x17);
613 __ Mov(x17, code);
614 // Actual arguments count and code start are already initialized above.
615 __ Jump(x17);
616#else
617 // Actual arguments count is already initialized above.
618 __ JumpJSFunction(x1);
619#endif // V8_ENABLE_LEAPTIERING
620 }
621
622 __ Bind(&prepare_step_in_if_stepping);
623 {
624 FrameScope scope(masm, StackFrame::INTERNAL);
625 // Push hole as receiver since we do not use it for stepping.
626 __ LoadRoot(x6, RootIndex::kTheHoleValue);
627 __ Push(x1, padreg, x5, x6);
628 __ CallRuntime(Runtime::kDebugOnFunctionCall);
629 __ Pop(padreg, x1);
630 __ LoadTaggedField(x5,
631 FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
632 }
633 __ B(&stepping_prepared);
634
635 __ Bind(&prepare_step_in_suspended_generator);
636 {
637 FrameScope scope(masm, StackFrame::INTERNAL);
638 __ Push(x1, padreg);
639 __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
640 __ Pop(padreg, x1);
641 __ LoadTaggedField(x5,
642 FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
643 }
644 __ B(&stepping_prepared);
645
646 __ bind(&stack_overflow);
647 {
648 FrameScope scope(masm, StackFrame::INTERNAL);
649 __ CallRuntime(Runtime::kThrowStackOverflow);
650 __ Unreachable(); // This should be unreachable.
651 }
652}
653
654namespace {
655
656// Called with the native C calling convention. The corresponding function
657// signature is either:
658//
659// using JSEntryFunction = GeneratedCode<Address(
660// Address root_register_value, Address new_target, Address target,
661// Address receiver, intptr_t argc, Address** argv)>;
662// or
663// using JSEntryFunction = GeneratedCode<Address(
664// Address root_register_value, MicrotaskQueue* microtask_queue)>;
665//
666// Input is either:
667// x0: root_register_value.
668// x1: new_target.
669// x2: target.
670// x3: receiver.
671// x4: argc.
672// x5: argv.
673// or
674// x0: root_register_value.
675// x1: microtask_queue.
676// Output:
677// x0: result.
678void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
679 Builtin entry_trampoline) {
680 Label invoke, handler_entry, exit;
681
682 {
683 NoRootArrayScope no_root_array(masm);
684
685#if defined(V8_OS_WIN)
686 // In order to allow Windows debugging tools to reconstruct a call stack, we
687 // must generate information describing how to recover at least fp, sp, and
688 // pc for the calling frame. Here, JSEntry registers offsets to
689 // xdata_encoder which then emits the offset values as part of the unwind
690 // data accordingly.
691 win64_unwindinfo::XdataEncoder* xdata_encoder = masm->GetXdataEncoder();
692 if (xdata_encoder) {
693 xdata_encoder->onFramePointerAdjustment(
696 }
697#endif
698
699 __ PushCalleeSavedRegisters();
700
701 // Set up the reserved register for 0.0.
702 __ Fmov(fp_zero, 0.0);
703
704 // Initialize the root register.
705 // C calling convention. The first argument is passed in x0.
706 __ Mov(kRootRegister, x0);
707
708#ifdef V8_COMPRESS_POINTERS
709 // Initialize the pointer cage base register.
710 __ LoadRootRelative(kPtrComprCageBaseRegister,
711 IsolateData::cage_base_offset());
712#endif
713 }
714
715 // Set up fp. It points to the {fp, lr} pair pushed as the last step in
716 // PushCalleeSavedRegisters.
717 static_assert(
720 __ Mov(fp, sp);
721
722 // Build an entry frame (see layout below).
723
724 // Push frame type markers.
725 __ Mov(x12, StackFrame::TypeToMarker(type));
726 __ Push(x12, xzr);
727
728 __ Mov(x11, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
729 masm->isolate()));
730 __ Ldr(x10, MemOperand(x11)); // x10 = C entry FP.
731
732 // Clear c_entry_fp, now we've loaded its value to be pushed on the stack.
733 // If the c_entry_fp is not already zero and we don't clear it, the
734 // StackFrameIteratorForProfiler will assume we are executing C++ and miss the
735 // JS frames on top.
736 __ Str(xzr, MemOperand(x11));
737
738 // Set js_entry_sp if this is the outermost JS call.
739 Label done;
740 ExternalReference js_entry_sp = ExternalReference::Create(
741 IsolateAddressId::kJSEntrySPAddress, masm->isolate());
742 __ Mov(x12, js_entry_sp);
743 __ Ldr(x11, MemOperand(x12)); // x11 = previous JS entry SP.
744
745 // Select between the inner and outermost frame marker, based on the JS entry
746 // sp. We assert that the inner marker is zero, so we can use xzr to save a
747 // move instruction.
749 __ Cmp(x11, 0); // If x11 is zero, this is the outermost frame.
750 // x11 = JS entry frame marker.
752 __ B(ne, &done);
753 __ Str(fp, MemOperand(x12));
754
755 __ Bind(&done);
756
757 __ LoadIsolateField(x9, IsolateFieldId::kFastCCallCallerFP);
758 __ Ldr(x7, MemOperand(x9));
759 __ Str(xzr, MemOperand(x9));
760 __ LoadIsolateField(x9, IsolateFieldId::kFastCCallCallerPC);
761 __ Ldr(x8, MemOperand(x9));
762 __ Str(xzr, MemOperand(x9));
763 __ Push(x10, x11, x7, x8);
764
765 // The frame set up looks like this:
766 // sp[0] : fast api call pc.
767 // sp[1] : fast api call fp.
768 // sp[2] : JS entry frame marker.
769 // sp[3] : C entry FP.
770 // sp[4] : stack frame marker (0).
771 // sp[5] : stack frame marker (type).
772 // sp[6] : saved fp <- fp points here.
773 // sp[7] : saved lr
774 // sp[8,26) : other saved registers
775
776 // Jump to a faked try block that does the invoke, with a faked catch
777 // block that sets the exception.
778 __ B(&invoke);
779
780 // Prevent the constant pool from being emitted between the record of the
781 // handler_entry position and the first instruction of the sequence here.
782 // There is no risk because Assembler::Emit() emits the instruction before
783 // checking for constant pool emission, but we do not want to depend on
784 // that.
785 {
786 Assembler::BlockPoolsScope block_pools(masm);
787
788 // Store the current pc as the handler offset. It's used later to create the
789 // handler table.
790 __ BindExceptionHandler(&handler_entry);
791 masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
792
793 // Caught exception: Store result (exception) in the exception
794 // field in the JSEnv and return a failure sentinel. Coming in here the
795 // fp will be invalid because UnwindAndFindHandler sets it to 0 to
796 // signal the existence of the JSEntry frame.
797 __ Mov(x10, ExternalReference::Create(IsolateAddressId::kExceptionAddress,
798 masm->isolate()));
799 }
800 __ Str(x0, MemOperand(x10));
801 __ LoadRoot(x0, RootIndex::kException);
802 __ B(&exit);
803
804 // Invoke: Link this frame into the handler chain.
805 __ Bind(&invoke);
806
807 // Push new stack handler.
809 "Unexpected offset for StackHandlerConstants::kSize");
811 "Unexpected offset for StackHandlerConstants::kNextOffset");
812
813 // Link the current handler as the next handler.
814 __ Mov(x11, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
815 masm->isolate()));
816 __ Ldr(x10, MemOperand(x11));
817 __ Push(padreg, x10);
818
819 // Set this new handler as the current one.
820 {
821 UseScratchRegisterScope temps(masm);
822 Register scratch = temps.AcquireX();
823 __ Mov(scratch, sp);
824 __ Str(scratch, MemOperand(x11));
825 }
826
827 // If an exception not caught by another handler occurs, this handler
828 // returns control to the code after the B(&invoke) above, which
829 // restores all callee-saved registers (including cp and fp) to their
830 // saved values before returning a failure to C.
831 //
832 // Invoke the function by calling through JS entry trampoline builtin and
833 // pop the faked function when we return.
834 __ CallBuiltin(entry_trampoline);
835
836 // Pop the stack handler and unlink this frame from the handler chain.
838 "Unexpected offset for StackHandlerConstants::kNextOffset");
839 __ Pop(x10, padreg);
840 __ Mov(x11, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
841 masm->isolate()));
843 __ Str(x10, MemOperand(x11));
844
845 __ Bind(&exit);
846 // x0 holds the result.
847 // The stack pointer points to the top of the entry frame pushed on entry from
848 // C++ (at the beginning of this stub):
849 // sp[0] : fast api call pc.
850 // sp[1] : fast api call fp.
851 // sp[2] : JS entry frame marker.
852 // sp[3] : C entry FP.
853 // sp[4] : stack frame marker (0).
854 // sp[5] : stack frame marker (type).
855 // sp[6] : saved fp <- fp points here.
856 // sp[7] : saved lr
857 // sp[8,26) : other saved registers
858
859 __ Pop(x10, x11);
860 __ LoadIsolateField(x8, IsolateFieldId::kFastCCallCallerPC);
861 __ Str(x10, MemOperand(x8));
862 __ LoadIsolateField(x9, IsolateFieldId::kFastCCallCallerFP);
863 __ Str(x11, MemOperand(x9));
864
865 // Check if the current stack frame is marked as the outermost JS frame.
866 Label non_outermost_js_2;
867 {
868 Register c_entry_fp = x11;
869 __ PeekPair(x10, c_entry_fp, 0);
871 __ B(ne, &non_outermost_js_2);
872 __ Mov(x12, js_entry_sp);
873 __ Str(xzr, MemOperand(x12));
874 __ Bind(&non_outermost_js_2);
875
876 // Restore the top frame descriptors from the stack.
877 __ Mov(x12, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
878 masm->isolate()));
879 __ Str(c_entry_fp, MemOperand(x12));
880 }
881
882 // Reset the stack to the callee saved registers.
883 static_assert(
885 "Size of entry frame is not a multiple of 16 bytes");
886 // fast_c_call_caller_fp and fast_c_call_caller_pc have already been popped.
887 int drop_count =
889 __ Drop(drop_count);
890 // Restore the callee-saved registers and return.
891 __ PopCalleeSavedRegisters();
892 __ Ret();
893}
894
895} // namespace
896
897void Builtins::Generate_JSEntry(MacroAssembler* masm) {
898 Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
899}
900
901void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
902 Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
903 Builtin::kJSConstructEntryTrampoline);
904}
905
906void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
907 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
908 Builtin::kRunMicrotasksTrampoline);
909}
910
911// Input:
912// x1: new.target.
913// x2: function.
914// x3: receiver.
915// x4: argc.
916// x5: argv.
917// Output:
918// x0: result.
919static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
920 bool is_construct) {
921 Register new_target = x1;
922 Register function = x2;
923 Register receiver = x3;
924 Register argc = x4;
925 Register argv = x5;
926 Register scratch = x10;
927 Register slots_to_claim = x11;
928
929 {
930 // Enter an internal frame.
931 FrameScope scope(masm, StackFrame::INTERNAL);
932
933 // Setup the context (we need to use the caller context from the isolate).
934 __ Mov(scratch, ExternalReference::Create(IsolateAddressId::kContextAddress,
935 masm->isolate()));
936 __ Ldr(cp, MemOperand(scratch));
937
938 // Claim enough space for the arguments and the function, including an
939 // optional slot of padding.
940 constexpr int additional_slots = 2;
941 __ Add(slots_to_claim, argc, additional_slots);
942 __ Bic(slots_to_claim, slots_to_claim, 1);
943
944 // Check if we have enough stack space to push all arguments.
945 Label enough_stack_space, stack_overflow;
946 __ StackOverflowCheck(slots_to_claim, &stack_overflow);
947 __ B(&enough_stack_space);
948
949 __ Bind(&stack_overflow);
950 __ CallRuntime(Runtime::kThrowStackOverflow);
951 __ Unreachable();
952
953 __ Bind(&enough_stack_space);
954 __ Claim(slots_to_claim);
955
956 // Store padding (which might be overwritten).
957 __ SlotAddress(scratch, slots_to_claim);
958 __ Str(padreg, MemOperand(scratch, -kSystemPointerSize));
959
960 // Store receiver on the stack.
961 __ Poke(receiver, 0);
962 // Store function on the stack.
963 __ SlotAddress(scratch, argc);
964 __ Str(function, MemOperand(scratch));
965
966 // Copy arguments to the stack in a loop, in reverse order.
967 // x4: argc.
968 // x5: argv.
969 Label loop, done;
970
971 // Skip the argument set up if we have no arguments.
972 __ Cmp(argc, JSParameterCount(0));
973 __ B(eq, &done);
974
975 // scratch has been set to point to the location of the function, which
976 // marks the end of the argument copy.
977 __ SlotAddress(x0, 1); // Skips receiver.
978 __ Bind(&loop);
979 // Load the handle.
980 __ Ldr(x11, MemOperand(argv, kSystemPointerSize, PostIndex));
981 // Dereference the handle.
982 __ Ldr(x11, MemOperand(x11));
983 // Poke the result into the stack.
985 // Loop if we've not reached the end of copy marker.
986 __ Cmp(x0, scratch);
987 __ B(lt, &loop);
988
989 __ Bind(&done);
990
991 __ Mov(x0, argc);
992 __ Mov(x3, new_target);
993 __ Mov(x1, function);
994 // x0: argc.
995 // x1: function.
996 // x3: new.target.
997
998 // Initialize all JavaScript callee-saved registers, since they will be seen
999 // by the garbage collector as part of handlers.
1000 // The original values have been saved in JSEntry.
1001 __ LoadRoot(x19, RootIndex::kUndefinedValue);
1002 __ Mov(x20, x19);
1003 __ Mov(x21, x19);
1004 __ Mov(x22, x19);
1005 __ Mov(x23, x19);
1006 __ Mov(x24, x19);
1007 __ Mov(x25, x19);
1008#ifndef V8_COMPRESS_POINTERS
1009 __ Mov(x28, x19);
1010#endif
1011 // Don't initialize the reserved registers.
1012 // x26 : root register (kRootRegister).
1013 // x27 : context pointer (cp).
1014 // x28 : pointer cage base register (kPtrComprCageBaseRegister).
1015 // x29 : frame pointer (fp).
1016
1017 Builtin builtin = is_construct ? Builtin::kConstruct : Builtins::Call();
1018 __ CallBuiltin(builtin);
1019
1020 // Exit the JS internal frame and remove the parameters (except function),
1021 // and return.
1022 }
1023
1024 // Result is in x0. Return.
1025 __ Ret();
1026}
1027
1028void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
1030}
1031
1032void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
1034}
1035
1036void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
1037 // This expects two C++ function parameters passed by Invoke() in
1038 // execution.cc.
1039 // x0: root_register_value
1040 // x1: microtask_queue
1041
1043 __ TailCallBuiltin(Builtin::kRunMicrotasks);
1044}
1045
1046static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
1047 Register scratch2) {
1048 ASM_CODE_COMMENT(masm);
1049 Register params_size = scratch1;
1050 // Get the size of the formal parameters + receiver (in bytes).
1051 __ Ldr(params_size,
1053 __ Ldrh(params_size.W(),
1054 FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
1055
1056 Register actual_params_size = scratch2;
1057 // Compute the size of the actual parameters + receiver (in bytes).
1058 __ Ldr(actual_params_size,
1060
1061 // If actual is bigger than formal, then we should use it to free up the stack
1062 // arguments.
1063 __ Cmp(params_size, actual_params_size);
1064 __ Csel(params_size, actual_params_size, params_size, kLessThan);
1065
1066 // Leave the frame (also dropping the register file).
1067 __ LeaveFrame(StackFrame::INTERPRETED);
1068
1069 // Drop receiver + arguments.
1070 __ DropArguments(params_size);
1071}
1072
1073// Advance the current bytecode offset. This simulates what all bytecode
1074// handlers do upon completion of the underlying operation. Will bail out to a
1075// label if the bytecode (without prefix) is a return bytecode. Will not advance
1076// the bytecode offset if the current bytecode is a JumpLoop, instead just
1077// re-executing the JumpLoop to jump to the correct bytecode.
1078static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
1079 Register bytecode_array,
1080 Register bytecode_offset,
1081 Register bytecode, Register scratch1,
1082 Register scratch2, Label* if_return) {
1083 ASM_CODE_COMMENT(masm);
1084 Register bytecode_size_table = scratch1;
1085
1086 // The bytecode offset value will be increased by one in wide and extra wide
1087 // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
1088 // will restore the original bytecode. In order to simplify the code, we have
1089 // a backup of it.
1090 Register original_bytecode_offset = scratch2;
1091 DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
1092 bytecode, original_bytecode_offset));
1093
1094 __ Mov(bytecode_size_table, ExternalReference::bytecode_size_table_address());
1095 __ Mov(original_bytecode_offset, bytecode_offset);
1096
1097 // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
1098 Label process_bytecode, extra_wide;
1099 static_assert(0 == static_cast<int>(interpreter::Bytecode::kWide));
1100 static_assert(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
1101 static_assert(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
1102 static_assert(3 ==
1103 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
1104 __ Cmp(bytecode, Operand(0x3));
1105 __ B(hi, &process_bytecode);
1106 __ Tst(bytecode, Operand(0x1));
1107 // The code to load the next bytecode is common to both wide and extra wide.
1108 // We can hoist them up here since they do not modify the flags after Tst.
1109 __ Add(bytecode_offset, bytecode_offset, Operand(1));
1110 __ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
1111 __ B(ne, &extra_wide);
1112
1113 // Update table to the wide scaled table.
1114 __ Add(bytecode_size_table, bytecode_size_table,
1116 __ B(&process_bytecode);
1117
1118 __ Bind(&extra_wide);
1119 // Update table to the extra wide scaled table.
1120 __ Add(bytecode_size_table, bytecode_size_table,
1122
1123 __ Bind(&process_bytecode);
1124
1125// Bailout to the return label if this is a return bytecode.
1126#define JUMP_IF_EQUAL(NAME) \
1127 __ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
1128 __ B(if_return, eq);
1130#undef JUMP_IF_EQUAL
1131
1132 // If this is a JumpLoop, re-execute it to perform the jump to the beginning
1133 // of the loop.
1134 Label end, not_jump_loop;
1135 __ Cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
1136 __ B(ne, &not_jump_loop);
1137 // We need to restore the original bytecode_offset since we might have
1138 // increased it to skip the wide / extra-wide prefix bytecode.
1139 __ Mov(bytecode_offset, original_bytecode_offset);
1140 __ B(&end);
1141
1142 __ bind(&not_jump_loop);
1143 // Otherwise, load the size of the current bytecode and advance the offset.
1144 __ Ldrb(scratch1.W(), MemOperand(bytecode_size_table, bytecode));
1145 __ Add(bytecode_offset, bytecode_offset, scratch1);
1146
1147 __ Bind(&end);
1148}
1149
1150namespace {
1151
1152void ResetSharedFunctionInfoAge(MacroAssembler* masm, Register sfi) {
1153 __ Strh(wzr, FieldMemOperand(sfi, SharedFunctionInfo::kAgeOffset));
1154}
1155
1156void ResetJSFunctionAge(MacroAssembler* masm, Register js_function,
1157 Register scratch) {
1158 const Register shared_function_info(scratch);
1159 __ LoadTaggedField(
1160 shared_function_info,
1161 FieldMemOperand(js_function, JSFunction::kSharedFunctionInfoOffset));
1162 ResetSharedFunctionInfoAge(masm, shared_function_info);
1163}
1164
1165void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
1166 Register feedback_vector, Register scratch) {
1167 DCHECK(!AreAliased(feedback_vector, scratch));
1168 __ Ldrb(scratch,
1169 FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
1170 __ And(scratch, scratch, Operand(~FeedbackVector::OsrUrgencyBits::kMask));
1171 __ Strb(scratch,
1172 FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
1173}
1174
1175} // namespace
1176
1177// static
1178void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
1179 UseScratchRegisterScope temps(masm);
1180 // Need a few extra registers
1181 temps.Include(CPURegList(kXRegSizeInBits, {x12, x13, x14, x15}));
1182
1183 auto descriptor =
1184 Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
1185 Register closure = descriptor.GetRegisterParameter(
1186 BaselineOutOfLinePrologueDescriptor::kClosure);
1187 // Load the feedback cell and vector from the closure.
1188 Register feedback_cell = temps.AcquireX();
1189 Register feedback_vector = temps.AcquireX();
1190 Register scratch = temps.AcquireX();
1191 __ LoadTaggedField(feedback_cell,
1192 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1193 __ LoadTaggedField(
1194 feedback_vector,
1195 FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
1196 __ AssertFeedbackVector(feedback_vector, scratch);
1197
1198#ifndef V8_ENABLE_LEAPTIERING
1199 // Check the tiering state.
1200 Label flags_need_processing;
1201 Register flags = temps.AcquireW();
1202 __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
1203 flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing);
1204#endif // !V8_ENABLE_LEAPTIERING
1205
1206 {
1207 UseScratchRegisterScope temps(masm);
1208 ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.AcquireW());
1209 }
1210
1211 // Increment invocation count for the function.
1212 {
1213 UseScratchRegisterScope temps(masm);
1214 Register invocation_count = temps.AcquireW();
1215 __ Ldr(invocation_count,
1216 FieldMemOperand(feedback_vector,
1217 FeedbackVector::kInvocationCountOffset));
1218 __ Add(invocation_count, invocation_count, Operand(1));
1219 __ Str(invocation_count,
1220 FieldMemOperand(feedback_vector,
1221 FeedbackVector::kInvocationCountOffset));
1222 }
1223
1224 FrameScope frame_scope(masm, StackFrame::MANUAL);
1225 {
1226 ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1227 // Normally the first thing we'd do here is Push(lr, fp), but we already
1228 // entered the frame in BaselineCompiler::Prologue, as we had to use the
1229 // value lr before the call to this BaselineOutOfLinePrologue builtin.
1230
1231 Register callee_context = descriptor.GetRegisterParameter(
1232 BaselineOutOfLinePrologueDescriptor::kCalleeContext);
1233 Register callee_js_function = descriptor.GetRegisterParameter(
1234 BaselineOutOfLinePrologueDescriptor::kClosure);
1235 {
1236 UseScratchRegisterScope temps(masm);
1237 ResetJSFunctionAge(masm, callee_js_function, temps.AcquireX());
1238 }
1239 __ Push(callee_context, callee_js_function);
1240 DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1241 DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1242
1243 Register argc = descriptor.GetRegisterParameter(
1244 BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1245 // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1246 // the frame, so load it into a register.
1247 Register bytecode_array = descriptor.GetRegisterParameter(
1248 BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1249 __ Push(argc, bytecode_array, feedback_cell, feedback_vector);
1250 __ AssertFeedbackVector(feedback_vector, scratch);
1251 }
1252
1253 Label call_stack_guard;
1254 Register frame_size = descriptor.GetRegisterParameter(
1255 BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1256 {
1257 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1258 // Stack check. This folds the checks for both the interrupt stack limit
1259 // check and the real stack limit into one by just checking for the
1260 // interrupt limit. The interrupt limit is either equal to the real stack
1261 // limit or tighter. By ensuring we have space until that limit after
1262 // building the frame we can quickly precheck both at once.
1263 UseScratchRegisterScope temps(masm);
1264
1265 Register sp_minus_frame_size = temps.AcquireX();
1266 __ Sub(sp_minus_frame_size, sp, frame_size);
1267 Register interrupt_limit = temps.AcquireX();
1268 __ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
1269 __ Cmp(sp_minus_frame_size, interrupt_limit);
1270 __ B(lo, &call_stack_guard);
1271 }
1272
1273 // Do "fast" return to the caller pc in lr.
1274 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1275 __ Ret();
1276
1277#ifndef V8_ENABLE_LEAPTIERING
1278 __ bind(&flags_need_processing);
1279 {
1280 ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1281 // Drop the frame created by the baseline call.
1282 __ Pop<MacroAssembler::kAuthLR>(fp, lr);
1283 __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
1284 __ Trap();
1285 }
1286#endif // !V8_ENABLE_LEAPTIERING
1287
1288 __ bind(&call_stack_guard);
1289 {
1290 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1291 Register new_target = descriptor.GetRegisterParameter(
1292 BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
1293
1294 FrameScope frame_scope(masm, StackFrame::INTERNAL);
1295 // Save incoming new target or generator
1296 Register maybe_dispatch_handle = V8_ENABLE_LEAPTIERING_BOOL
1298 : padreg;
1299 // No need to SmiTag as dispatch handles always look like Smis.
1300 static_assert(kJSDispatchHandleShift > 0);
1301 __ Push(maybe_dispatch_handle, new_target);
1302 __ SmiTag(frame_size);
1303 __ PushArgument(frame_size);
1304 __ CallRuntime(Runtime::kStackGuardWithGap);
1305 __ Pop(new_target, maybe_dispatch_handle);
1306 }
1307 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1308 __ Ret();
1309}
1310
1311// static
1312void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
1313 // We're here because we got deopted during BaselineOutOfLinePrologue's stack
1314 // check. Undo all its frame creation and call into the interpreter instead.
1315
1316 // Drop the feedback vector and the bytecode offset (was the feedback vector
1317 // but got replaced during deopt).
1318 __ Drop(2);
1319
1320 // Bytecode array, argc, Closure, Context.
1323
1324 // Drop frame pointer
1325 __ LeaveFrame(StackFrame::BASELINE);
1326
1327 // Enter the interpreter.
1328 __ TailCallBuiltin(Builtin::kInterpreterEntryTrampoline);
1329}
1330
1331// Generate code for entering a JS function with the interpreter.
1332// On entry to the function the receiver and arguments have been pushed on the
1333// stack left to right.
1334//
1335// The live registers are:
1336// - x0: actual argument count
1337// - x1: the JS function object being called.
1338// - x3: the incoming new target or generator object
1339// - x4: the dispatch handle through which we were called
1340// - cp: our context.
1341// - fp: our caller's frame pointer.
1342// - lr: return address.
1343//
1344// The function builds an interpreter frame. See InterpreterFrameConstants in
1345// frame-constants.h for its layout.
1347 MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
1348 Register closure = x1;
1349
1350 // Get the bytecode array from the function object and load it into
1351 // kInterpreterBytecodeArrayRegister.
1352 Register sfi = x5;
1353 __ LoadTaggedField(
1354 sfi, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1355 ResetSharedFunctionInfoAge(masm, sfi);
1356
1357 // The bytecode array could have been flushed from the shared function info,
1358 // if so, call into CompileLazy.
1359 Label is_baseline, compile_lazy;
1362 x11, &is_baseline, &compile_lazy);
1363
1364#ifdef V8_ENABLE_SANDBOX
1365 // Validate the parameter count. This protects against an attacker swapping
1366 // the bytecode (or the dispatch handle) such that the parameter count of the
1367 // dispatch entry doesn't match the one of the BytecodeArray.
1368 // TODO(saelo): instead of this validation step, it would probably be nicer
1369 // if we could store the BytecodeArray directly in the dispatch entry and
1370 // load it from there. Then we can easily guarantee that the parameter count
1371 // of the entry matches the parameter count of the bytecode.
1374 __ LoadParameterCountFromJSDispatchTable(x6, dispatch_handle, x7);
1376 BytecodeArray::kParameterSizeOffset));
1377 __ Cmp(x6, x7);
1378 __ SbxCheck(eq, AbortReason::kJSSignatureMismatch);
1379#endif // V8_ENABLE_SANDBOX
1380
1381 Label push_stack_frame;
1382 Register feedback_vector = x2;
1383 __ LoadFeedbackVector(feedback_vector, closure, x7, &push_stack_frame);
1384
1385#ifndef V8_JITLESS
1386#ifndef V8_ENABLE_LEAPTIERING
1387 // If feedback vector is valid, check for optimized code and update invocation
1388 // count.
1389 Label flags_need_processing;
1390 Register flags = w7;
1391 __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
1392 flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
1393 &flags_need_processing);
1394#endif // !V8_ENABLE_LEAPTIERING
1395
1396 ResetFeedbackVectorOsrUrgency(masm, feedback_vector, w7);
1397
1398 // Increment invocation count for the function.
1399 __ Ldr(w10, FieldMemOperand(feedback_vector,
1400 FeedbackVector::kInvocationCountOffset));
1401 __ Add(w10, w10, Operand(1));
1402 __ Str(w10, FieldMemOperand(feedback_vector,
1403 FeedbackVector::kInvocationCountOffset));
1404
1405 // Open a frame scope to indicate that there is a frame on the stack. The
1406 // MANUAL indicates that the scope shouldn't actually generate code to set up
1407 // the frame (that is done below).
1408#else
1409 // Note: By omitting the above code in jitless mode we also disable:
1410 // - kFlagsLogNextExecution: only used for logging/profiling; and
1411 // - kInvocationCountOffset: only used for tiering heuristics and code
1412 // coverage.
1413#endif // !V8_JITLESS
1414
1415 __ Bind(&push_stack_frame);
1416 FrameScope frame_scope(masm, StackFrame::MANUAL);
1417 __ Push<MacroAssembler::kSignLR>(lr, fp);
1418 __ mov(fp, sp);
1419 __ Push(cp, closure);
1420
1421 // Load the initial bytecode offset.
1424
1425 // Push actual argument count, bytecode array, Smi tagged bytecode array
1426 // offset and the feedback vector.
1429 __ Push(x6, feedback_vector);
1430
1431 // Allocate the local and temporary register file on the stack.
1432 Label stack_overflow;
1433 {
1434 // Load frame size from the BytecodeArray object.
1436 BytecodeArray::kFrameSizeOffset));
1438 BytecodeArray::kMaxArgumentsOffset));
1439 __ Add(w12, w11, Operand(w12, LSL, kSystemPointerSizeLog2));
1440
1441 // Do a stack check to ensure we don't go over the limit.
1442 __ Sub(x10, sp, Operand(x12));
1443 {
1444 UseScratchRegisterScope temps(masm);
1445 Register scratch = temps.AcquireX();
1446 __ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
1447 __ Cmp(x10, scratch);
1448 }
1449 __ B(lo, &stack_overflow);
1450
1451 // If ok, push undefined as the initial value for all register file entries.
1452 // Note: there should always be at least one stack slot for the return
1453 // register in the register file.
1454 Label loop_header;
1455 __ Lsr(x11, x11, kSystemPointerSizeLog2);
1456 // Round up the number of registers to a multiple of 2, to align the stack
1457 // to 16 bytes.
1458 __ Add(x11, x11, 1);
1459 __ Bic(x11, x11, 1);
1460 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1461 __ PushMultipleTimes(kInterpreterAccumulatorRegister, x11);
1462 __ Bind(&loop_header);
1463 }
1464
1465 // If the bytecode array has a valid incoming new target or generator object
1466 // register, initialize it with incoming value which was passed in x3.
1467 Label no_incoming_new_target_or_generator_register;
1468 __ Ldrsw(x10,
1471 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1472 __ Cbz(x10, &no_incoming_new_target_or_generator_register);
1473 __ Str(x3, MemOperand(fp, x10, LSL, kSystemPointerSizeLog2));
1474 __ Bind(&no_incoming_new_target_or_generator_register);
1475
1476 // Perform interrupt stack check.
1477 // TODO(solanes): Merge with the real stack limit check above.
1478 Label stack_check_interrupt, after_stack_check_interrupt;
1479 __ LoadStackLimit(x10, StackLimitKind::kInterruptStackLimit);
1480 __ Cmp(sp, x10);
1481 __ B(lo, &stack_check_interrupt);
1482 __ Bind(&after_stack_check_interrupt);
1483
1484 // The accumulator is already loaded with undefined.
1485
1486 // Load the dispatch table into a register and dispatch to the bytecode
1487 // handler at the current bytecode offset.
1488 Label do_dispatch;
1489 __ bind(&do_dispatch);
1490 __ Mov(
1492 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1495 __ Mov(x1, Operand(x23, LSL, kSystemPointerSizeLog2));
1499
1500 __ RecordComment("--- InterpreterEntryReturnPC point ---");
1502 masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(
1503 masm->pc_offset());
1504 } else {
1506 // Both versions must be the same up to this point otherwise the builtins
1507 // will not be interchangable.
1508 CHECK_EQ(
1509 masm->isolate()->heap()->interpreter_entry_return_pc_offset().value(),
1510 masm->pc_offset());
1511 }
1512
1513 // Any returns to the entry trampoline are either due to the return bytecode
1514 // or the interpreter tail calling a builtin and then a dispatch.
1515
1516 __ JumpTarget();
1517
1518 // Get bytecode array and bytecode offset from the stack frame.
1523
1524 // Either return, or advance to the next bytecode and dispatch.
1525 Label do_return;
1530 &do_return);
1531 __ B(&do_dispatch);
1532
1533 __ bind(&do_return);
1534 // The return value is in x0.
1535 LeaveInterpreterFrame(masm, x2, x5);
1536 __ Ret();
1537
1538 __ bind(&stack_check_interrupt);
1539 // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1540 // for the call to the StackGuard.
1546 __ CallRuntime(Runtime::kStackGuard);
1547
1548 // After the call, restore the bytecode array, bytecode offset and accumulator
1549 // registers again. Also, restore the bytecode offset in the stack to its
1550 // previous value.
1555 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1556
1559
1560 __ jmp(&after_stack_check_interrupt);
1561
1562#ifndef V8_JITLESS
1563#ifndef V8_ENABLE_LEAPTIERING
1564 __ bind(&flags_need_processing);
1565 __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
1566#endif // !V8_ENABLE_LEAPTIERING
1567
1568 __ bind(&is_baseline);
1569 {
1570#ifndef V8_ENABLE_LEAPTIERING
1571 // Load the feedback vector from the closure.
1572 __ LoadTaggedField(
1573 feedback_vector,
1574 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1575 __ LoadTaggedField(
1576 feedback_vector,
1577 FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
1578
1579 Label install_baseline_code;
1580 // Check if feedback vector is valid. If not, call prepare for baseline to
1581 // allocate it.
1582 __ LoadTaggedField(
1583 x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1584 __ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
1585 __ Cmp(x7, FEEDBACK_VECTOR_TYPE);
1586 __ B(ne, &install_baseline_code);
1587
1588 // Check the tiering state.
1589 __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
1590 flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing);
1591
1592 // TODO(olivf, 42204201): This fastcase is difficult to support with the
1593 // sandbox as it requires getting write access to the dispatch table. See
1594 // `JSFunction::UpdateCode`. We might want to remove it for all
1595 // configurations as it does not seem to be performance sensitive.
1596
1597 // Load the baseline code into the closure.
1599 static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
1600 __ ReplaceClosureCodeWithOptimizedCode(x2, closure);
1601 __ JumpCodeObject(x2, kJSEntrypointTag);
1602
1603 __ bind(&install_baseline_code);
1604#endif // !V8_ENABLE_LEAPTIERING
1605
1606 __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
1607 }
1608#endif // !V8_JITLESS
1609
1610 __ bind(&compile_lazy);
1611 __ GenerateTailCallToReturnedCode(Runtime::kCompileLazy);
1612 __ Unreachable(); // Should not return.
1613
1614 __ bind(&stack_overflow);
1615 __ CallRuntime(Runtime::kThrowStackOverflow);
1616 __ Unreachable(); // Should not return.
1617}
1618
1619static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1620 Register first_arg_index,
1621 Register spread_arg_out,
1622 ConvertReceiverMode receiver_mode,
1624 ASM_CODE_COMMENT(masm);
1625 Register last_arg_addr = x10;
1626 Register stack_addr = x11;
1627 Register slots_to_claim = x12;
1628 Register slots_to_copy = x13;
1629
1630 DCHECK(!AreAliased(num_args, first_arg_index, last_arg_addr, stack_addr,
1631 slots_to_claim, slots_to_copy));
1632 // spread_arg_out may alias with the first_arg_index input.
1633 DCHECK(!AreAliased(spread_arg_out, last_arg_addr, stack_addr, slots_to_claim,
1634 slots_to_copy));
1635
1637 // Exclude final spread from slots to claim and the number of arguments.
1638 __ Sub(num_args, num_args, 1);
1639 }
1640
1641 // Round up to an even number of slots.
1642 __ Add(slots_to_claim, num_args, 1);
1643 __ Bic(slots_to_claim, slots_to_claim, 1);
1644
1645 __ Claim(slots_to_claim);
1646 {
1647 // Store padding, which may be overwritten.
1648 UseScratchRegisterScope temps(masm);
1649 Register scratch = temps.AcquireX();
1650 __ Sub(scratch, slots_to_claim, 1);
1651 __ Poke(padreg, Operand(scratch, LSL, kSystemPointerSizeLog2));
1652 }
1653
1654 const bool skip_receiver =
1656 if (skip_receiver) {
1657 __ Sub(slots_to_copy, num_args, kJSArgcReceiverSlots);
1658 } else {
1659 __ Mov(slots_to_copy, num_args);
1660 }
1661 __ SlotAddress(stack_addr, skip_receiver ? 1 : 0);
1662
1663 __ Sub(last_arg_addr, first_arg_index,
1664 Operand(slots_to_copy, LSL, kSystemPointerSizeLog2));
1665 __ Add(last_arg_addr, last_arg_addr, kSystemPointerSize);
1666
1667 // Load the final spread argument into spread_arg_out, if necessary.
1669 __ Ldr(spread_arg_out, MemOperand(last_arg_addr, -kSystemPointerSize));
1670 }
1671
1672 __ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy,
1674
1675 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1676 // Store "undefined" as the receiver arg if we need to.
1677 Register receiver = x14;
1678 __ LoadRoot(receiver, RootIndex::kUndefinedValue);
1679 __ Poke(receiver, 0);
1680 }
1681}
1682
1683// static
1685 MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1688 // ----------- S t a t e -------------
1689 // -- x0 : the number of arguments
1690 // -- x2 : the address of the first argument to be pushed. Subsequent
1691 // arguments should be consecutive above this, in the same order as
1692 // they are to be pushed onto the stack.
1693 // -- x1 : the target to call (can be any Object).
1694 // -----------------------------------
1695
1696 // Push the arguments. num_args may be updated according to mode.
1697 // spread_arg_out will be updated to contain the last spread argument, when
1698 // mode == InterpreterPushArgsMode::kWithFinalSpread.
1699 Register num_args = x0;
1700 Register first_arg_index = x2;
1701 Register spread_arg_out =
1703 GenerateInterpreterPushArgs(masm, num_args, first_arg_index, spread_arg_out,
1704 receiver_mode, mode);
1705
1706 // Call the target.
1708 __ TailCallBuiltin(Builtin::kCallWithSpread);
1709 } else {
1710 __ TailCallBuiltin(Builtins::Call(receiver_mode));
1711 }
1712}
1713
1714// static
1716 MacroAssembler* masm, InterpreterPushArgsMode mode) {
1717 // ----------- S t a t e -------------
1718 // -- x0 : argument count
1719 // -- x3 : new target
1720 // -- x1 : constructor to call
1721 // -- x2 : allocation site feedback if available, undefined otherwise
1722 // -- x4 : address of the first argument
1723 // -----------------------------------
1724 __ AssertUndefinedOrAllocationSite(x2);
1725
1726 // Push the arguments. num_args may be updated according to mode.
1727 // spread_arg_out will be updated to contain the last spread argument, when
1728 // mode == InterpreterPushArgsMode::kWithFinalSpread.
1729 Register num_args = x0;
1730 Register first_arg_index = x4;
1731 Register spread_arg_out =
1733 GenerateInterpreterPushArgs(masm, num_args, first_arg_index, spread_arg_out,
1735
1737 __ AssertFunction(x1);
1738
1739 // Tail call to the array construct stub (still in the caller
1740 // context at this point).
1741 __ TailCallBuiltin(Builtin::kArrayConstructorImpl);
1742 } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1743 // Call the constructor with x0, x1, and x3 unmodified.
1744 __ TailCallBuiltin(Builtin::kConstructWithSpread);
1745 } else {
1747 // Call the constructor with x0, x1, and x3 unmodified.
1748 __ TailCallBuiltin(Builtin::kConstruct);
1749 }
1750}
1751
1752// static
1754 MacroAssembler* masm, ForwardWhichFrame which_frame) {
1755 // ----------- S t a t e -------------
1756 // -- x3 : new target
1757 // -- x1 : constructor to call
1758 // -----------------------------------
1759 Label stack_overflow;
1760
1761 // Load the frame pointer into x4.
1762 switch (which_frame) {
1764 __ Move(x4, fp);
1765 break;
1768 break;
1769 }
1770
1771 // Load the argument count into x0.
1773
1774 // Point x4 to the base of the argument list to forward, excluding the
1775 // receiver.
1776 __ Add(x4, x4,
1779
1780 Register stack_addr = x11;
1781 Register slots_to_claim = x12;
1782 Register argc_without_receiver = x13;
1783
1784 // Round up to even number of slots.
1785 __ Add(slots_to_claim, x0, 1);
1786 __ Bic(slots_to_claim, slots_to_claim, 1);
1787
1788 __ StackOverflowCheck(slots_to_claim, &stack_overflow);
1789
1790 // Adjust the stack pointer.
1791 __ Claim(slots_to_claim);
1792 {
1793 // Store padding, which may be overwritten.
1794 UseScratchRegisterScope temps(masm);
1795 Register scratch = temps.AcquireX();
1796 __ Sub(scratch, slots_to_claim, 1);
1797 __ Poke(padreg, Operand(scratch, LSL, kSystemPointerSizeLog2));
1798 }
1799
1800 // Copy the arguments.
1801 __ Sub(argc_without_receiver, x0, kJSArgcReceiverSlots);
1802 __ SlotAddress(stack_addr, 1);
1803 __ CopyDoubleWords(stack_addr, x4, argc_without_receiver);
1804
1805 // Push a slot for the receiver to be constructed.
1806 __ Mov(x14, Operand(0));
1807 __ Poke(x14, 0);
1808
1809 // Call the constructor with x0, x1, and x3 unmodified.
1810 __ TailCallBuiltin(Builtin::kConstruct);
1811
1812 __ Bind(&stack_overflow);
1813 {
1814 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1815 __ Unreachable();
1816 }
1817}
1818
1819namespace {
1820
1821void NewImplicitReceiver(MacroAssembler* masm) {
1822 // ----------- S t a t e -------------
1823 // -- x0 : the number of arguments
1824 // -- x1 : constructor to call (checked to be a JSFunction)
1825 // -- x3 : new target
1826 //
1827 // Stack:
1828 // -- Implicit Receiver
1829 // -- [arguments without receiver]
1830 // -- Implicit Receiver
1831 // -- Context
1832 // -- FastConstructMarker
1833 // -- FramePointer
1834 // -----------------------------------
1835 Register implicit_receiver = x4;
1836
1837 // Save live registers.
1838 __ SmiTag(x0);
1839 __ Push(x0, x1, x3, padreg);
1840 __ CallBuiltin(Builtin::kFastNewObject);
1841 // Save result.
1842 __ Mov(implicit_receiver, x0);
1843 // Restore live registers.
1844 __ Pop(padreg, x3, x1, x0);
1845 __ SmiUntag(x0);
1846
1847 // Patch implicit receiver (in arguments)
1848 __ Poke(implicit_receiver, 0 * kSystemPointerSize);
1849 // Patch second implicit (in construct frame)
1850 __ Str(implicit_receiver,
1852
1853 // Restore context.
1855}
1856
1857} // namespace
1858
1859// static
1860void Builtins::Generate_InterpreterPushArgsThenFastConstructFunction(
1861 MacroAssembler* masm) {
1862 // ----------- S t a t e -------------
1863 // -- x0 : argument count
1864 // -- x1 : constructor to call (checked to be a JSFunction)
1865 // -- x3 : new target
1866 // -- x4 : address of the first argument
1867 // -- cp : context pointer
1868 // -----------------------------------
1869 __ AssertFunction(x1);
1870
1871 // Check if target has a [[Construct]] internal method.
1872 Label non_constructor;
1873 __ LoadMap(x2, x1);
1874 __ Ldrb(x2, FieldMemOperand(x2, Map::kBitFieldOffset));
1875 __ TestAndBranchIfAllClear(x2, Map::Bits1::IsConstructorBit::kMask,
1876 &non_constructor);
1877
1878 // Enter a construct frame.
1879 FrameScope scope(masm, StackFrame::MANUAL);
1880 __ EnterFrame(StackFrame::FAST_CONSTRUCT);
1881
1882 if (v8_flags.debug_code) {
1883 // Check that FrameScope pushed the context on to the stack already.
1884 __ Peek(x2, 0);
1885 __ Cmp(x2, cp);
1886 __ Check(eq, AbortReason::kUnexpectedValue);
1887 }
1888
1889 // Implicit receiver stored in the construct frame.
1890 __ LoadRoot(x2, RootIndex::kTheHoleValue);
1891 __ Push(x2, padreg);
1892
1893 // Push arguments + implicit receiver.
1897 __ Poke(x2, 0 * kSystemPointerSize);
1898
1899 // Check if it is a builtin call.
1900 Label builtin_call;
1901 __ LoadTaggedField(
1902 x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
1903 __ Ldr(w2, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
1904 __ TestAndBranchIfAnySet(w2, SharedFunctionInfo::ConstructAsBuiltinBit::kMask,
1905 &builtin_call);
1906
1907 // Check if we need to create an implicit receiver.
1908 Label not_create_implicit_receiver;
1909 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(w2);
1910 __ JumpIfIsInRange(
1911 w2, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
1912 static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
1913 &not_create_implicit_receiver);
1914 NewImplicitReceiver(masm);
1915 __ bind(&not_create_implicit_receiver);
1916
1917 // Call the function.
1918 __ InvokeFunctionWithNewTarget(x1, x3, x0, InvokeType::kCall);
1919
1920 // ----------- S t a t e -------------
1921 // -- x0 constructor result
1922 //
1923 // Stack:
1924 // -- Implicit Receiver
1925 // -- Context
1926 // -- FastConstructMarker
1927 // -- FramePointer
1928 // -----------------------------------
1929
1930 // Store offset of return address for deoptimizer.
1931 masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
1932 masm->pc_offset());
1933
1934 // If the result is an object (in the ECMA sense), we should get rid
1935 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
1936 // on page 74.
1937 Label use_receiver, do_throw, leave_and_return, check_receiver;
1938
1939 // If the result is undefined, we jump out to using the implicit receiver.
1940 __ CompareRoot(x0, RootIndex::kUndefinedValue);
1941 __ B(ne, &check_receiver);
1942
1943 // Throw away the result of the constructor invocation and use the
1944 // on-stack receiver as the result.
1945 __ Bind(&use_receiver);
1946 __ Ldr(x0,
1948 __ CompareRoot(x0, RootIndex::kTheHoleValue);
1949 __ B(eq, &do_throw);
1950
1951 __ Bind(&leave_and_return);
1952 // Leave construct frame.
1953 __ LeaveFrame(StackFrame::FAST_CONSTRUCT);
1954 __ Ret();
1955
1956 // Otherwise we do a smi check and fall through to check if the return value
1957 // is a valid receiver.
1958 __ bind(&check_receiver);
1959
1960 // If the result is a smi, it is *not* an object in the ECMA sense.
1961 __ JumpIfSmi(x0, &use_receiver);
1962
1963 // Check if the type of the result is not an object in the ECMA sense.
1964 __ JumpIfJSAnyIsNotPrimitive(x0, x4, &leave_and_return);
1965 __ B(&use_receiver);
1966
1967 __ bind(&builtin_call);
1968 // TODO(victorgomes): Check the possibility to turn this into a tailcall.
1969 __ InvokeFunctionWithNewTarget(x1, x3, x0, InvokeType::kCall);
1970 __ LeaveFrame(StackFrame::FAST_CONSTRUCT);
1971 __ Ret();
1972
1973 __ Bind(&do_throw);
1974 // Restore the context from the frame.
1976 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
1977 __ Unreachable();
1978
1979 // Called Construct on an Object that doesn't have a [[Construct]] internal
1980 // method.
1981 __ bind(&non_constructor);
1982 __ TailCallBuiltin(Builtin::kConstructedNonConstructable);
1983}
1984
1985static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1986 // Initialize the dispatch table register.
1987 __ Mov(
1989 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1990
1991 // Get the bytecode array pointer from the frame.
1994
1995 if (v8_flags.debug_code) {
1996 // Check function data field is actually a BytecodeArray object.
1997 __ AssertNotSmi(
1999 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
2000 __ IsObjectType(kInterpreterBytecodeArrayRegister, x1, x1,
2001 BYTECODE_ARRAY_TYPE);
2002 __ Assert(
2003 eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
2004 }
2005
2006 // Get the target bytecode offset from the frame.
2009
2010 if (v8_flags.debug_code) {
2011 Label okay;
2014 __ B(ge, &okay);
2015 __ Unreachable();
2016 __ bind(&okay);
2017 }
2018
2019 // Dispatch to the target bytecode.
2022 __ Mov(x1, Operand(x23, LSL, kSystemPointerSizeLog2));
2025
2026 {
2027 UseScratchRegisterScope temps(masm);
2028 temps.Exclude(x17);
2030 __ Call(x17);
2031 }
2032
2033 // We return here after having executed the function in the interpreter.
2034 // Now jump to the correct point in the interpreter entry trampoline.
2035 Label builtin_trampoline, trampoline_loaded;
2036 Tagged<Smi> interpreter_entry_return_pc_offset(
2037 masm->isolate()->heap()->interpreter_entry_return_pc_offset());
2038 DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
2039
2040 // If the SFI function_data is an InterpreterData, the function will have a
2041 // custom copy of the interpreter entry trampoline for profiling. If so,
2042 // get the custom trampoline, otherwise grab the entry address of the global
2043 // trampoline.
2045 __ LoadTaggedField(
2046 x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2047 __ LoadTrustedPointerField(
2048 x1, FieldMemOperand(x1, SharedFunctionInfo::kTrustedFunctionDataOffset),
2050 __ IsObjectType(x1, kInterpreterDispatchTableRegister,
2051 kInterpreterDispatchTableRegister, INTERPRETER_DATA_TYPE);
2052 __ B(ne, &builtin_trampoline);
2053
2054 __ LoadProtectedPointerField(
2055 x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
2056 __ LoadCodeInstructionStart(x1, x1, kJSEntrypointTag);
2057 __ B(&trampoline_loaded);
2058
2059 __ Bind(&builtin_trampoline);
2060 __ Mov(x1, ExternalReference::
2061 address_of_interpreter_entry_trampoline_instruction_start(
2062 masm->isolate()));
2063 __ Ldr(x1, MemOperand(x1));
2064
2065 __ Bind(&trampoline_loaded);
2066
2067 {
2068 UseScratchRegisterScope temps(masm);
2069 temps.Exclude(x17);
2070 __ Add(x17, x1, Operand(interpreter_entry_return_pc_offset.value()));
2071 __ Br(x17);
2072 }
2073}
2074
2075void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
2076 // Get bytecode array and bytecode offset from the stack frame.
2081
2082 Label enter_bytecode, function_entry_bytecode;
2086 __ B(eq, &function_entry_bytecode);
2087
2088 // Load the current bytecode.
2091
2092 // Advance to the next bytecode.
2093 Label if_return;
2096 &if_return);
2097
2098 __ bind(&enter_bytecode);
2099 // Convert new bytecode offset to a Smi and save in the stackframe.
2102
2104
2105 __ bind(&function_entry_bytecode);
2106 // If the code deoptimizes during the implicit function entry stack interrupt
2107 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
2108 // not a valid bytecode offset. Detect this case and advance to the first
2109 // actual bytecode.
2112 __ B(&enter_bytecode);
2113
2114 // We should never take the if_return path.
2115 __ bind(&if_return);
2116 __ Abort(AbortReason::kInvalidBytecodeAdvance);
2117}
2118
2119void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
2121}
2122
2123namespace {
2124void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
2125 bool javascript_builtin,
2126 bool with_result) {
2127 const RegisterConfiguration* config(RegisterConfiguration::Default());
2128 int allocatable_register_count = config->num_allocatable_general_registers();
2130 (allocatable_register_count +
2132 allocatable_register_count)) *
2134
2135 UseScratchRegisterScope temps(masm);
2136 Register scratch = temps.AcquireX(); // Temp register is not allocatable.
2137
2138 // Set up frame pointer.
2139 __ Add(fp, sp, frame_size);
2140
2141 if (with_result) {
2142 if (javascript_builtin) {
2143 __ mov(scratch, x0);
2144 } else {
2145 // Overwrite the hole inserted by the deoptimizer with the return value
2146 // from the LAZY deopt point.
2147 __ Str(x0, MemOperand(
2149 }
2150 }
2151
2152 // Restore registers in pairs.
2154 allocatable_register_count * kSystemPointerSize;
2155 for (int i = allocatable_register_count - 1; i > 0; i -= 2) {
2156 int code1 = config->GetAllocatableGeneralCode(i);
2157 int code2 = config->GetAllocatableGeneralCode(i - 1);
2158 Register reg1 = Register::from_code(code1);
2159 Register reg2 = Register::from_code(code2);
2160 __ Ldp(reg1, reg2, MemOperand(fp, offset));
2162 }
2163
2164 // Restore first register separately, if number of registers is odd.
2165 if (allocatable_register_count % 2 != 0) {
2166 int code = config->GetAllocatableGeneralCode(0);
2167 __ Ldr(Register::from_code(code), MemOperand(fp, offset));
2168 }
2169
2170 if (javascript_builtin) __ SmiUntag(kJavaScriptCallArgCountRegister);
2171
2172 if (javascript_builtin && with_result) {
2173 // Overwrite the hole inserted by the deoptimizer with the return value from
2174 // the LAZY deopt point. x0 contains the arguments count, the return value
2175 // from LAZY is always the last argument.
2176 constexpr int return_offset =
2180 __ add(x0, x0, return_offset);
2181 __ Str(scratch, MemOperand(fp, x0, LSL, kSystemPointerSizeLog2));
2182 // Recover argument count.
2183 __ sub(x0, x0, return_offset);
2184 }
2185
2186 // Load builtin index (stored as a Smi) and use it to get the builtin start
2187 // address from the builtins table.
2188 Register builtin = scratch;
2189 __ Ldr(
2190 builtin,
2192
2193 // Restore fp, lr.
2194 __ Mov(sp, fp);
2195 __ Pop<MacroAssembler::kAuthLR>(fp, lr);
2196
2197 __ LoadEntryFromBuiltinIndex(builtin, builtin);
2198 __ Jump(builtin);
2199}
2200} // namespace
2201
2202void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
2203 Generate_ContinueToBuiltinHelper(masm, false, false);
2204}
2205
2206void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
2207 MacroAssembler* masm) {
2208 Generate_ContinueToBuiltinHelper(masm, false, true);
2209}
2210
2211void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
2212 Generate_ContinueToBuiltinHelper(masm, true, false);
2213}
2214
2215void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
2216 MacroAssembler* masm) {
2217 Generate_ContinueToBuiltinHelper(masm, true, true);
2218}
2219
2220void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
2221 {
2222 FrameScope scope(masm, StackFrame::INTERNAL);
2223 __ CallRuntime(Runtime::kNotifyDeoptimized);
2224 }
2225
2226 // Pop TOS register and padding.
2228 __ Pop(x0, padreg);
2229 __ Ret();
2230}
2231
2232namespace {
2233
2234void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
2235 Operand offset = Operand(0)) {
2236 // Pop the return address to this function's caller from the return stack
2237 // buffer, since we'll never return to it.
2238 Label jump;
2239 __ Adr(lr, &jump);
2240 __ Ret();
2241
2242 __ Bind(&jump);
2243
2244 UseScratchRegisterScope temps(masm);
2245 temps.Exclude(x17);
2246 if (offset.IsZero()) {
2247 __ Mov(x17, entry_address);
2248 } else {
2249 __ Add(x17, entry_address, offset);
2250 }
2251 __ Br(x17);
2252}
2253
2254enum class OsrSourceTier {
2255 kInterpreter,
2256 kBaseline,
2257};
2258
2259void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
2260 Register maybe_target_code,
2261 Register expected_param_count) {
2262 Label jump_to_optimized_code;
2263 {
2264 // If maybe_target_code is not null, no need to call into runtime. A
2265 // precondition here is: if maybe_target_code is an InstructionStream
2266 // object, it must NOT be marked_for_deoptimization (callers must ensure
2267 // this).
2268 __ CompareTaggedAndBranch(maybe_target_code, Smi::zero(), ne,
2269 &jump_to_optimized_code);
2270 }
2271
2272 ASM_CODE_COMMENT(masm);
2273 {
2274 FrameScope scope(masm, StackFrame::INTERNAL);
2275 __ Push(expected_param_count, padreg);
2276 __ CallRuntime(Runtime::kCompileOptimizedOSR);
2277 DCHECK_EQ(maybe_target_code, x0);
2278 __ Pop(padreg, expected_param_count);
2279 }
2280
2281 // If the code object is null, just return to the caller.
2282 __ CompareTaggedAndBranch(maybe_target_code, Smi::zero(), ne,
2283 &jump_to_optimized_code);
2284 __ Ret();
2285
2286 __ Bind(&jump_to_optimized_code);
2287
2288 const Register scratch(x2);
2289 CHECK(!AreAliased(maybe_target_code, expected_param_count, scratch));
2290
2291 // OSR entry tracing.
2292 {
2293 Label next;
2294 __ Mov(scratch, ExternalReference::address_of_log_or_trace_osr());
2295 __ Ldrsb(scratch, MemOperand(scratch));
2296 __ Tst(scratch, 0xFF); // Mask to the LSB.
2297 __ B(eq, &next);
2298
2299 {
2300 FrameScope scope(masm, StackFrame::INTERNAL);
2301 // Preserve arguments.
2302 __ Push(maybe_target_code, expected_param_count);
2303 __ CallRuntime(Runtime::kLogOrTraceOptimizedOSREntry, 0);
2304 __ Pop(expected_param_count, maybe_target_code);
2305 }
2306
2307 __ Bind(&next);
2308 }
2309
2310 if (source == OsrSourceTier::kInterpreter) {
2311 // Drop the handler frame that is be sitting on top of the actual
2312 // JavaScript frame. This is the case then OSR is triggered from bytecode.
2313 __ LeaveFrame(StackFrame::STUB);
2314 }
2315
2316 // Check we are actually jumping to an OSR code object. This among other
2317 // things ensures that the object contains deoptimization data below.
2318 __ Ldr(scratch.W(),
2319 FieldMemOperand(maybe_target_code, Code::kOsrOffsetOffset));
2320 __ Cmp(scratch.W(), BytecodeOffset::None().ToInt());
2321 __ SbxCheck(Condition::kNotEqual, AbortReason::kExpectedOsrCode);
2322
2323 // Check the target has a matching parameter count. This ensures that the OSR
2324 // code will correctly tear down our frame when leaving.
2325 __ Ldrh(scratch.W(),
2326 FieldMemOperand(maybe_target_code, Code::kParameterCountOffset));
2327 __ SmiUntag(expected_param_count);
2328 __ Cmp(scratch.W(), expected_param_count.W());
2329 __ SbxCheck(Condition::kEqual, AbortReason::kOsrUnexpectedStackSize);
2330
2331 // Load deoptimization data from the code object.
2332 // <deopt_data> = <code>[#deoptimization_data_offset]
2333 __ LoadProtectedPointerField(
2334 scratch,
2335 FieldMemOperand(maybe_target_code,
2336 Code::kDeoptimizationDataOrInterpreterDataOffset));
2337
2338 // Load the OSR entrypoint offset from the deoptimization data.
2339 // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
2340 __ SmiUntagField(
2341 scratch,
2344
2345 __ LoadCodeInstructionStart(maybe_target_code, maybe_target_code,
2347
2348 // Compute the target address = code_entry + osr_offset
2349 // <entry_addr> = <code_entry> + <osr_offset>
2350 Generate_OSREntry(masm, maybe_target_code, scratch);
2351}
2352
2353} // namespace
2354
2355void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
2356 using D = OnStackReplacementDescriptor;
2357 static_assert(D::kParameterCount == 2);
2358 OnStackReplacement(masm, OsrSourceTier::kInterpreter,
2359 D::MaybeTargetCodeRegister(),
2360 D::ExpectedParameterCountRegister());
2361}
2362
2363void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
2364 using D = OnStackReplacementDescriptor;
2365 static_assert(D::kParameterCount == 2);
2366
2367 __ ldr(kContextRegister,
2369 OnStackReplacement(masm, OsrSourceTier::kBaseline,
2370 D::MaybeTargetCodeRegister(),
2371 D::ExpectedParameterCountRegister());
2372}
2373
2374#ifdef V8_ENABLE_MAGLEV
2375
2376// static
2377void Builtins::Generate_MaglevFunctionEntryStackCheck(MacroAssembler* masm,
2378 bool save_new_target) {
2379 // Input (x0): Stack size (Smi).
2380 // This builtin can be invoked just after Maglev's prologue.
2381 // All registers are available, except (possibly) new.target.
2382 ASM_CODE_COMMENT(masm);
2383 {
2384 FrameScope scope(masm, StackFrame::INTERNAL);
2385 __ AssertSmi(x0);
2386 if (save_new_target) {
2388 __ AssertSmiOrHeapObjectInMainCompressionCage(
2390 }
2392 }
2393 __ PushArgument(x0);
2394 __ CallRuntime(Runtime::kStackGuardWithGap, 1);
2395 if (save_new_target) {
2397 }
2398 }
2399 __ Ret();
2400}
2401
2402#endif // V8_ENABLE_MAGLEV
2403
2404// static
2405void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
2406 // ----------- S t a t e -------------
2407 // -- x0 : argc
2408 // -- sp[0] : receiver
2409 // -- sp[8] : thisArg (if argc >= 1)
2410 // -- sp[16] : argArray (if argc == 2)
2411 // -----------------------------------
2412
2413 ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
2414
2415 Register argc = x0;
2416 Register receiver = x1;
2417 Register arg_array = x2;
2418 Register this_arg = x3;
2419 Register undefined_value = x4;
2420 Register null_value = x5;
2421
2422 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2423 __ LoadRoot(null_value, RootIndex::kNullValue);
2424
2425 // 1. Load receiver into x1, argArray into x2 (if present), remove all
2426 // arguments from the stack (including the receiver), and push thisArg (if
2427 // present) instead.
2428 {
2429 Label done;
2430 __ Mov(this_arg, undefined_value);
2431 __ Mov(arg_array, undefined_value);
2432 __ Peek(receiver, 0);
2433 __ Cmp(argc, Immediate(JSParameterCount(1)));
2434 __ B(lt, &done);
2436 __ B(eq, &done);
2437 __ Peek(arg_array, 2 * kSystemPointerSize);
2438 __ bind(&done);
2439 }
2440 __ DropArguments(argc);
2441 __ PushArgument(this_arg);
2442
2443 // ----------- S t a t e -------------
2444 // -- x2 : argArray
2445 // -- x1 : receiver
2446 // -- sp[0] : thisArg
2447 // -----------------------------------
2448
2449 // 2. We don't need to check explicitly for callable receiver here,
2450 // since that's the first thing the Call/CallWithArrayLike builtins
2451 // will do.
2452
2453 // 3. Tail call with no arguments if argArray is null or undefined.
2454 Label no_arguments;
2455 __ CmpTagged(arg_array, null_value);
2456 __ CcmpTagged(arg_array, undefined_value, ZFlag, ne);
2457 __ B(eq, &no_arguments);
2458
2459 // 4a. Apply the receiver to the given argArray.
2460 __ TailCallBuiltin(Builtin::kCallWithArrayLike);
2461
2462 // 4b. The argArray is either null or undefined, so we tail call without any
2463 // arguments to the receiver.
2464 __ Bind(&no_arguments);
2465 {
2466 __ Mov(x0, JSParameterCount(0));
2467 DCHECK_EQ(receiver, x1);
2468 __ TailCallBuiltin(Builtins::Call());
2469 }
2470}
2471
2472// static
2473void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
2474 Register argc = x0;
2475 Register function = x1;
2476
2477 ASM_LOCATION("Builtins::Generate_FunctionPrototypeCall");
2478
2479 // 1. Get the callable to call (passed as receiver) from the stack.
2480 __ Peek(function, __ ReceiverOperand());
2481
2482 // 2. Handle case with no arguments.
2483 {
2484 Label non_zero;
2485 Register scratch = x10;
2486 __ Cmp(argc, JSParameterCount(0));
2487 __ B(gt, &non_zero);
2488 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
2489 // Overwrite receiver with undefined, which will be the new receiver.
2490 // We do not need to overwrite the padding slot above it with anything.
2491 __ Poke(scratch, 0);
2492 // Call function. The argument count is already zero.
2493 __ TailCallBuiltin(Builtins::Call());
2494 __ Bind(&non_zero);
2495 }
2496
2497 Label arguments_ready;
2498 // 3. Shift arguments. It depends if the arguments is even or odd.
2499 // That is if padding exists or not.
2500 {
2501 Label even;
2502 Register copy_from = x10;
2503 Register copy_to = x11;
2504 Register count = x12;
2505 UseScratchRegisterScope temps(masm);
2506 Register argc_without_receiver = temps.AcquireX();
2507 __ Sub(argc_without_receiver, argc, kJSArgcReceiverSlots);
2508
2509 // CopyDoubleWords changes the count argument.
2510 __ Mov(count, argc_without_receiver);
2511 __ Tbz(argc_without_receiver, 0, &even);
2512
2513 // Shift arguments one slot down on the stack (overwriting the original
2514 // receiver).
2515 __ SlotAddress(copy_from, 1);
2516 __ Sub(copy_to, copy_from, kSystemPointerSize);
2517 __ CopyDoubleWords(copy_to, copy_from, count);
2518 // Overwrite the duplicated remaining last argument.
2519 __ Poke(padreg, Operand(argc_without_receiver, LSL, kXRegSizeLog2));
2520 __ B(&arguments_ready);
2521
2522 // Copy arguments one slot higher in memory, overwriting the original
2523 // receiver and padding.
2524 __ Bind(&even);
2525 __ SlotAddress(copy_from, count);
2526 __ Add(copy_to, copy_from, kSystemPointerSize);
2527 __ CopyDoubleWords(copy_to, copy_from, count,
2529 __ Drop(2);
2530 }
2531
2532 // 5. Adjust argument count to make the original first argument the new
2533 // receiver and call the callable.
2534 __ Bind(&arguments_ready);
2535 __ Sub(argc, argc, 1);
2536 __ TailCallBuiltin(Builtins::Call());
2537}
2538
2539void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
2540 // ----------- S t a t e -------------
2541 // -- x0 : argc
2542 // -- sp[0] : receiver
2543 // -- sp[8] : target (if argc >= 1)
2544 // -- sp[16] : thisArgument (if argc >= 2)
2545 // -- sp[24] : argumentsList (if argc == 3)
2546 // -----------------------------------
2547
2548 ASM_LOCATION("Builtins::Generate_ReflectApply");
2549
2550 Register argc = x0;
2551 Register arguments_list = x2;
2552 Register target = x1;
2553 Register this_argument = x4;
2554 Register undefined_value = x3;
2555
2556 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2557
2558 // 1. Load target into x1 (if present), argumentsList into x2 (if present),
2559 // remove all arguments from the stack (including the receiver), and push
2560 // thisArgument (if present) instead.
2561 {
2562 Label done;
2563 __ Mov(target, undefined_value);
2564 __ Mov(this_argument, undefined_value);
2565 __ Mov(arguments_list, undefined_value);
2566 __ Cmp(argc, Immediate(JSParameterCount(1)));
2567 __ B(lt, &done);
2568 __ Peek(target, kSystemPointerSize);
2569 __ B(eq, &done);
2570 __ Peek(this_argument, 2 * kSystemPointerSize);
2571 __ Cmp(argc, Immediate(JSParameterCount(3)));
2572 __ B(lt, &done);
2573 __ Peek(arguments_list, 3 * kSystemPointerSize);
2574 __ bind(&done);
2575 }
2576 __ DropArguments(argc);
2577 __ PushArgument(this_argument);
2578
2579 // ----------- S t a t e -------------
2580 // -- x2 : argumentsList
2581 // -- x1 : target
2582 // -- sp[0] : thisArgument
2583 // -----------------------------------
2584
2585 // 2. We don't need to check explicitly for callable target here,
2586 // since that's the first thing the Call/CallWithArrayLike builtins
2587 // will do.
2588
2589 // 3. Apply the target to the given argumentsList.
2590 __ TailCallBuiltin(Builtin::kCallWithArrayLike);
2591}
2592
2593void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
2594 // ----------- S t a t e -------------
2595 // -- x0 : argc
2596 // -- sp[0] : receiver
2597 // -- sp[8] : target
2598 // -- sp[16] : argumentsList
2599 // -- sp[24] : new.target (optional)
2600 // -----------------------------------
2601
2602 ASM_LOCATION("Builtins::Generate_ReflectConstruct");
2603
2604 Register argc = x0;
2605 Register arguments_list = x2;
2606 Register target = x1;
2607 Register new_target = x3;
2608 Register undefined_value = x4;
2609
2610 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2611
2612 // 1. Load target into x1 (if present), argumentsList into x2 (if present),
2613 // new.target into x3 (if present, otherwise use target), remove all
2614 // arguments from the stack (including the receiver), and push thisArgument
2615 // (if present) instead.
2616 {
2617 Label done;
2618 __ Mov(target, undefined_value);
2619 __ Mov(arguments_list, undefined_value);
2620 __ Mov(new_target, undefined_value);
2621 __ Cmp(argc, Immediate(JSParameterCount(1)));
2622 __ B(lt, &done);
2623 __ Peek(target, kSystemPointerSize);
2624 __ B(eq, &done);
2625 __ Peek(arguments_list, 2 * kSystemPointerSize);
2626 __ Mov(new_target, target); // new.target defaults to target
2627 __ Cmp(argc, Immediate(JSParameterCount(3)));
2628 __ B(lt, &done);
2629 __ Peek(new_target, 3 * kSystemPointerSize);
2630 __ bind(&done);
2631 }
2632
2633 __ DropArguments(argc);
2634
2635 // Push receiver (undefined).
2636 __ PushArgument(undefined_value);
2637
2638 // ----------- S t a t e -------------
2639 // -- x2 : argumentsList
2640 // -- x1 : target
2641 // -- x3 : new.target
2642 // -- sp[0] : receiver (undefined)
2643 // -----------------------------------
2644
2645 // 2. We don't need to check explicitly for constructor target here,
2646 // since that's the first thing the Construct/ConstructWithArrayLike
2647 // builtins will do.
2648
2649 // 3. We don't need to check explicitly for constructor new.target here,
2650 // since that's the second thing the Construct/ConstructWithArrayLike
2651 // builtins will do.
2652
2653 // 4. Construct the target with the given new.target and argumentsList.
2654 __ TailCallBuiltin(Builtin::kConstructWithArrayLike);
2655}
2656
2657namespace {
2658
2659// Prepares the stack for copying the varargs. First we claim the necessary
2660// slots, taking care of potential padding. Then we copy the existing arguments
2661// one slot up or one slot down, as needed.
2662void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
2663 Register len) {
2664 Label exit, even, init;
2665 Register slots_to_copy = x10;
2666 Register slots_to_claim = x12;
2667
2668 __ Mov(slots_to_copy, argc);
2669 __ Mov(slots_to_claim, len);
2670 __ Tbz(slots_to_claim, 0, &even);
2671
2672 // Claim space we need. If argc (without receiver) is even, slots_to_claim =
2673 // len + 1, as we need one extra padding slot. If argc (without receiver) is
2674 // odd, we know that the original arguments will have a padding slot we can
2675 // reuse (since len is odd), so slots_to_claim = len - 1.
2676 {
2677 Register scratch = x11;
2678 __ Add(slots_to_claim, len, 1);
2679 __ And(scratch, argc, 1);
2680 __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
2681 }
2682
2683 __ Bind(&even);
2684 __ Cbz(slots_to_claim, &exit);
2685 __ Claim(slots_to_claim);
2686 // An alignment slot may have been allocated above. If the number of stack
2687 // parameters is 0, the we have to initialize the alignment slot.
2688 __ Cbz(slots_to_copy, &init);
2689
2690 // Move the arguments already in the stack including the receiver.
2691 {
2692 Register src = x11;
2693 Register dst = x12;
2694 __ SlotAddress(src, slots_to_claim);
2695 __ SlotAddress(dst, 0);
2696 __ CopyDoubleWords(dst, src, slots_to_copy);
2697 __ jmp(&exit);
2698 }
2699 // Initialize the alignment slot with a meaningful value. This is only
2700 // necessary if slots_to_copy is 0, because otherwise the alignment slot
2701 // already contains a valid value. In case slots_to_copy is even, then the
2702 // alignment slot contains the last parameter passed over the stack. In case
2703 // slots_to_copy is odd, then the alignment slot is that alignment slot when
2704 // CallVarArgs (or similar) was called, and already got initialized for that
2705 // call.
2706 {
2707 __ Bind(&init);
2708 // This code here is only reached when the number of stack parameters is 0.
2709 // In that case we have to initialize the alignment slot if there is one.
2710 __ Tbz(len, 0, &exit);
2711 __ Str(xzr, MemOperand(sp, len, LSL, kSystemPointerSizeLog2));
2712 }
2713 __ Bind(&exit);
2714}
2715
2716} // namespace
2717
2718// static
2719// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
2720void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2721 Builtin target_builtin) {
2722 // ----------- S t a t e -------------
2723 // -- x1 : target
2724 // -- x0 : number of parameters on the stack
2725 // -- x2 : arguments list (a FixedArray)
2726 // -- x4 : len (number of elements to push from args)
2727 // -- x3 : new.target (for [[Construct]])
2728 // -----------------------------------
2729 if (v8_flags.debug_code) {
2730 // Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
2731 Label ok, fail;
2732 __ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
2733 __ LoadTaggedField(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
2734 __ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2735 __ Cmp(x13, FIXED_ARRAY_TYPE);
2736 __ B(eq, &ok);
2737 __ Cmp(x13, FIXED_DOUBLE_ARRAY_TYPE);
2738 __ B(ne, &fail);
2739 __ Cmp(x4, 0);
2740 __ B(eq, &ok);
2741 // Fall through.
2742 __ bind(&fail);
2743 __ Abort(AbortReason::kOperandIsNotAFixedArray);
2744
2745 __ bind(&ok);
2746 }
2747
2748 Register arguments_list = x2;
2749 Register argc = x0;
2750 Register len = x4;
2751
2752 Label stack_overflow;
2753 __ StackOverflowCheck(len, &stack_overflow);
2754
2755 // Skip argument setup if we don't need to push any varargs.
2756 Label done;
2757 __ Cbz(len, &done);
2758
2759 Generate_PrepareForCopyingVarargs(masm, argc, len);
2760
2761 // Push varargs.
2762 {
2763 Label loop;
2764 Register src = x10;
2765 Register undefined_value = x12;
2766 Register scratch = x13;
2767 __ Add(src, arguments_list,
2768 OFFSET_OF_DATA_START(FixedArray) - kHeapObjectTag);
2769#if !V8_STATIC_ROOTS_BOOL
2770 // We do not use the CompareRoot macro without static roots as it would do a
2771 // LoadRoot behind the scenes and we want to avoid that in a loop.
2772 Register the_hole_value = x11;
2773 __ LoadTaggedRoot(the_hole_value, RootIndex::kTheHoleValue);
2774#endif // !V8_STATIC_ROOTS_BOOL
2775 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2776 // TODO(all): Consider using Ldp and Stp.
2777 Register dst = x16;
2778 __ SlotAddress(dst, argc);
2779 __ Add(argc, argc, len); // Update new argc.
2780 __ Bind(&loop);
2781 __ Sub(len, len, 1);
2782 __ LoadTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
2783#if V8_STATIC_ROOTS_BOOL
2784 __ CompareRoot(scratch, RootIndex::kTheHoleValue);
2785#else
2786 __ CmpTagged(scratch, the_hole_value);
2787#endif
2788 __ Csel(scratch, scratch, undefined_value, ne);
2789 __ Str(scratch, MemOperand(dst, kSystemPointerSize, PostIndex));
2790 __ Cbnz(len, &loop);
2791 }
2792 __ Bind(&done);
2793 // Tail-call to the actual Call or Construct builtin.
2794 __ TailCallBuiltin(target_builtin);
2795
2796 __ bind(&stack_overflow);
2797 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2798}
2799
2800// static
2801void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2802 CallOrConstructMode mode,
2803 Builtin target_builtin) {
2804 // ----------- S t a t e -------------
2805 // -- x0 : the number of arguments
2806 // -- x3 : the new.target (for [[Construct]] calls)
2807 // -- x1 : the target to call (can be any Object)
2808 // -- x2 : start index (to support rest parameters)
2809 // -----------------------------------
2810
2811 Register argc = x0;
2812 Register start_index = x2;
2813
2814 // Check if new.target has a [[Construct]] internal method.
2815 if (mode == CallOrConstructMode::kConstruct) {
2816 Label new_target_constructor, new_target_not_constructor;
2817 __ JumpIfSmi(x3, &new_target_not_constructor);
2818 __ LoadTaggedField(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
2819 __ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
2820 __ TestAndBranchIfAnySet(x5, Map::Bits1::IsConstructorBit::kMask,
2821 &new_target_constructor);
2822 __ Bind(&new_target_not_constructor);
2823 {
2824 FrameScope scope(masm, StackFrame::MANUAL);
2825 __ EnterFrame(StackFrame::INTERNAL);
2826 __ PushArgument(x3);
2827 __ CallRuntime(Runtime::kThrowNotConstructor);
2828 __ Unreachable();
2829 }
2830 __ Bind(&new_target_constructor);
2831 }
2832
2833 Register len = x6;
2834 Label stack_done, stack_overflow;
2836 __ Subs(len, len, kJSArgcReceiverSlots);
2837 __ Subs(len, len, start_index);
2838 __ B(le, &stack_done);
2839 // Check for stack overflow.
2840 __ StackOverflowCheck(len, &stack_overflow);
2841
2842 Generate_PrepareForCopyingVarargs(masm, argc, len);
2843
2844 // Push varargs.
2845 {
2846 Register args_fp = x5;
2847 Register dst = x13;
2848 // Point to the fist argument to copy from (skipping receiver).
2849 __ Add(args_fp, fp,
2851 __ lsl(start_index, start_index, kSystemPointerSizeLog2);
2852 __ Add(args_fp, args_fp, start_index);
2853 // Point to the position to copy to.
2854 __ SlotAddress(dst, argc);
2855 // Update total number of arguments.
2856 __ Add(argc, argc, len);
2857 __ CopyDoubleWords(dst, args_fp, len);
2858 }
2859
2860 __ Bind(&stack_done);
2861 // Tail-call to the actual Call or Construct builtin.
2862 __ TailCallBuiltin(target_builtin);
2863
2864 __ Bind(&stack_overflow);
2865 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2866}
2867
2868// static
2869void Builtins::Generate_CallFunction(MacroAssembler* masm,
2870 ConvertReceiverMode mode) {
2871 ASM_LOCATION("Builtins::Generate_CallFunction");
2872 // ----------- S t a t e -------------
2873 // -- x0 : the number of arguments
2874 // -- x1 : the function to call (checked to be a JSFunction)
2875 // -----------------------------------
2876 __ AssertCallableFunction(x1);
2877
2878 __ LoadTaggedField(
2879 x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2880
2881 // Enter the context of the function; ToObject has to run in the function
2882 // context, and we also need to take the global proxy from the function
2883 // context in case of conversion.
2884 __ LoadTaggedField(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
2885 // We need to convert the receiver for non-native sloppy mode functions.
2886 Label done_convert;
2887 __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
2888 __ TestAndBranchIfAnySet(w3,
2889 SharedFunctionInfo::IsNativeBit::kMask |
2890 SharedFunctionInfo::IsStrictBit::kMask,
2891 &done_convert);
2892 {
2893 // ----------- S t a t e -------------
2894 // -- x0 : the number of arguments
2895 // -- x1 : the function to call (checked to be a JSFunction)
2896 // -- x2 : the shared function info.
2897 // -- cp : the function context.
2898 // -----------------------------------
2899
2901 // Patch receiver to global proxy.
2902 __ LoadGlobalProxy(x3);
2903 } else {
2904 Label convert_to_object, convert_receiver;
2905 __ Peek(x3, __ ReceiverOperand());
2906 __ JumpIfSmi(x3, &convert_to_object);
2907 __ JumpIfJSAnyIsNotPrimitive(x3, x4, &done_convert);
2909 Label convert_global_proxy;
2910 __ JumpIfRoot(x3, RootIndex::kUndefinedValue, &convert_global_proxy);
2911 __ JumpIfNotRoot(x3, RootIndex::kNullValue, &convert_to_object);
2912 __ Bind(&convert_global_proxy);
2913 {
2914 // Patch receiver to global proxy.
2915 __ LoadGlobalProxy(x3);
2916 }
2917 __ B(&convert_receiver);
2918 }
2919 __ Bind(&convert_to_object);
2920 {
2921 // Convert receiver using ToObject.
2922 // TODO(bmeurer): Inline the allocation here to avoid building the frame
2923 // in the fast case? (fall back to AllocateInNewSpace?)
2924 FrameScope scope(masm, StackFrame::INTERNAL);
2925 __ SmiTag(x0);
2926 __ Push(padreg, x0, x1, cp);
2927 __ Mov(x0, x3);
2928 __ CallBuiltin(Builtin::kToObject);
2929 __ Mov(x3, x0);
2930 __ Pop(cp, x1, x0, padreg);
2931 __ SmiUntag(x0);
2932 }
2933 __ LoadTaggedField(
2934 x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2935 __ Bind(&convert_receiver);
2936 }
2937 __ Poke(x3, __ ReceiverOperand());
2938 }
2939 __ Bind(&done_convert);
2940
2941 // ----------- S t a t e -------------
2942 // -- x0 : the number of arguments
2943 // -- x1 : the function to call (checked to be a JSFunction)
2944 // -- x2 : the shared function info.
2945 // -- cp : the function context.
2946 // -----------------------------------
2947
2948#ifdef V8_ENABLE_LEAPTIERING
2949 __ InvokeFunctionCode(x1, no_reg, x0, InvokeType::kJump);
2950#else
2951 __ Ldrh(x2,
2952 FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
2953 __ InvokeFunctionCode(x1, no_reg, x2, x0, InvokeType::kJump);
2954#endif // V8_ENABLE_LEAPTIERING
2955}
2956
2957namespace {
2958
2959void Generate_PushBoundArguments(MacroAssembler* masm) {
2960 // ----------- S t a t e -------------
2961 // -- x0 : the number of arguments
2962 // -- x1 : target (checked to be a JSBoundFunction)
2963 // -- x3 : new.target (only in case of [[Construct]])
2964 // -----------------------------------
2965
2966 Register bound_argc = x4;
2967 Register bound_argv = x2;
2968
2969 // Load [[BoundArguments]] into x2 and length of that into x4.
2970 Label no_bound_arguments;
2971 __ LoadTaggedField(
2972 bound_argv, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
2973 __ SmiUntagField(bound_argc,
2974 FieldMemOperand(bound_argv, offsetof(FixedArray, length_)));
2975 __ Cbz(bound_argc, &no_bound_arguments);
2976 {
2977 // ----------- S t a t e -------------
2978 // -- x0 : the number of arguments
2979 // -- x1 : target (checked to be a JSBoundFunction)
2980 // -- x2 : the [[BoundArguments]] (implemented as FixedArray)
2981 // -- x3 : new.target (only in case of [[Construct]])
2982 // -- x4 : the number of [[BoundArguments]]
2983 // -----------------------------------
2984
2985 Register argc = x0;
2986
2987 // Check for stack overflow.
2988 {
2989 // Check the stack for overflow. We are not trying to catch interruptions
2990 // (i.e. debug break and preemption) here, so check the "real stack
2991 // limit".
2992 Label done;
2993 __ LoadStackLimit(x10, StackLimitKind::kRealStackLimit);
2994 // Make x10 the space we have left. The stack might already be overflowed
2995 // here which will cause x10 to become negative.
2996 __ Sub(x10, sp, x10);
2997 // Check if the arguments will overflow the stack.
2998 __ Cmp(x10, Operand(bound_argc, LSL, kSystemPointerSizeLog2));
2999 __ B(gt, &done);
3000 __ TailCallRuntime(Runtime::kThrowStackOverflow);
3001 __ Bind(&done);
3002 }
3003
3004 Label copy_bound_args;
3005 Register total_argc = x15;
3006 Register slots_to_claim = x12;
3007 Register scratch = x10;
3008 Register receiver = x14;
3009
3010 __ Sub(argc, argc, kJSArgcReceiverSlots);
3011 __ Add(total_argc, argc, bound_argc);
3012 __ Peek(receiver, 0);
3013
3014 // Round up slots_to_claim to an even number if it is odd.
3015 __ Add(slots_to_claim, bound_argc, 1);
3016 __ Bic(slots_to_claim, slots_to_claim, 1);
3017 __ Claim(slots_to_claim, kSystemPointerSize);
3018
3019 __ Tbz(bound_argc, 0, &copy_bound_args);
3020 {
3021 Label argc_even;
3022 __ Tbz(argc, 0, &argc_even);
3023 // Arguments count is odd (with the receiver it's even), so there's no
3024 // alignment padding above the arguments and we have to "add" it. We
3025 // claimed bound_argc + 1, since it is odd and it was rounded up. +1 here
3026 // is for stack alignment padding.
3027 // 1. Shift args one slot down.
3028 {
3029 Register copy_from = x11;
3030 Register copy_to = x12;
3031 __ SlotAddress(copy_to, slots_to_claim);
3032 __ Add(copy_from, copy_to, kSystemPointerSize);
3033 __ CopyDoubleWords(copy_to, copy_from, argc);
3034 }
3035 // 2. Write a padding in the last slot.
3036 __ Add(scratch, total_argc, 1);
3037 __ Str(padreg, MemOperand(sp, scratch, LSL, kSystemPointerSizeLog2));
3038 __ B(&copy_bound_args);
3039
3040 __ Bind(&argc_even);
3041 // Arguments count is even (with the receiver it's odd), so there's an
3042 // alignment padding above the arguments and we can reuse it. We need to
3043 // claim bound_argc - 1, but we claimed bound_argc + 1, since it is odd
3044 // and it was rounded up.
3045 // 1. Drop 2.
3046 __ Drop(2);
3047 // 2. Shift args one slot up.
3048 {
3049 Register copy_from = x11;
3050 Register copy_to = x12;
3051 __ SlotAddress(copy_to, total_argc);
3052 __ Sub(copy_from, copy_to, kSystemPointerSize);
3053 __ CopyDoubleWords(copy_to, copy_from, argc,
3055 }
3056 }
3057
3058 // If bound_argc is even, there is no alignment massage to do, and we have
3059 // already claimed the correct number of slots (bound_argc).
3060 __ Bind(&copy_bound_args);
3061
3062 // Copy the receiver back.
3063 __ Poke(receiver, 0);
3064 // Copy [[BoundArguments]] to the stack (below the receiver).
3065 {
3066 Label loop;
3067 Register counter = bound_argc;
3068 Register copy_to = x12;
3069 __ Add(bound_argv, bound_argv,
3070 OFFSET_OF_DATA_START(FixedArray) - kHeapObjectTag);
3071 __ SlotAddress(copy_to, 1);
3072 __ Bind(&loop);
3073 __ Sub(counter, counter, 1);
3074 __ LoadTaggedField(scratch,
3075 MemOperand(bound_argv, kTaggedSize, PostIndex));
3076 __ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex));
3077 __ Cbnz(counter, &loop);
3078 }
3079 // Update argc.
3080 __ Add(argc, total_argc, kJSArgcReceiverSlots);
3081 }
3082 __ Bind(&no_bound_arguments);
3083}
3084
3085} // namespace
3086
3087// static
3088void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
3089 // ----------- S t a t e -------------
3090 // -- x0 : the number of arguments
3091 // -- x1 : the function to call (checked to be a JSBoundFunction)
3092 // -----------------------------------
3093 __ AssertBoundFunction(x1);
3094
3095 // Patch the receiver to [[BoundThis]].
3096 __ LoadTaggedField(x10,
3097 FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
3098 __ Poke(x10, __ ReceiverOperand());
3099
3100 // Push the [[BoundArguments]] onto the stack.
3101 Generate_PushBoundArguments(masm);
3102
3103 // Call the [[BoundTargetFunction]] via the Call builtin.
3104 __ LoadTaggedField(
3105 x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
3106 __ TailCallBuiltin(Builtins::Call());
3107}
3108
3109// static
3110void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
3111 // ----------- S t a t e -------------
3112 // -- x0 : the number of arguments
3113 // -- x1 : the target to call (can be any Object).
3114 // -----------------------------------
3115 Register target = x1;
3116 Register map = x4;
3117 Register instance_type = x5;
3118 DCHECK(!AreAliased(x0, target, map, instance_type));
3119
3120 Label non_callable, class_constructor;
3121 __ JumpIfSmi(target, &non_callable);
3122 __ LoadMap(map, target);
3123 __ CompareInstanceTypeRange(map, instance_type,
3126 __ TailCallBuiltin(Builtins::CallFunction(mode), ls);
3127 __ Cmp(instance_type, JS_BOUND_FUNCTION_TYPE);
3128 __ TailCallBuiltin(Builtin::kCallBoundFunction, eq);
3129
3130 // Check if target has a [[Call]] internal method.
3131 {
3132 Register flags = x4;
3133 __ Ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
3134 map = no_reg;
3135 __ TestAndBranchIfAllClear(flags, Map::Bits1::IsCallableBit::kMask,
3136 &non_callable);
3137 }
3138
3139 // Check if target is a proxy and call CallProxy external builtin
3140 __ Cmp(instance_type, JS_PROXY_TYPE);
3141 __ TailCallBuiltin(Builtin::kCallProxy, eq);
3142
3143 // Check if target is a wrapped function and call CallWrappedFunction external
3144 // builtin
3145 __ Cmp(instance_type, JS_WRAPPED_FUNCTION_TYPE);
3146 __ TailCallBuiltin(Builtin::kCallWrappedFunction, eq);
3147
3148 // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
3149 // Check that the function is not a "classConstructor".
3150 __ Cmp(instance_type, JS_CLASS_CONSTRUCTOR_TYPE);
3151 __ B(eq, &class_constructor);
3152
3153 // 2. Call to something else, which might have a [[Call]] internal method (if
3154 // not we raise an exception).
3155 // Overwrite the original receiver with the (original) target.
3156 __ Poke(target, __ ReceiverOperand());
3157
3158 // Let the "call_as_function_delegate" take care of the rest.
3159 __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
3160 __ TailCallBuiltin(
3162
3163 // 3. Call to something that is not callable.
3164 __ bind(&non_callable);
3165 {
3166 FrameScope scope(masm, StackFrame::INTERNAL);
3167 __ PushArgument(target);
3168 __ CallRuntime(Runtime::kThrowCalledNonCallable);
3169 __ Unreachable();
3170 }
3171
3172 // 4. The function is a "classConstructor", need to raise an exception.
3173 __ bind(&class_constructor);
3174 {
3175 FrameScope frame(masm, StackFrame::INTERNAL);
3176 __ PushArgument(target);
3177 __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
3178 __ Unreachable();
3179 }
3180}
3181
3182// static
3183void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
3184 // ----------- S t a t e -------------
3185 // -- x0 : the number of arguments
3186 // -- x1 : the constructor to call (checked to be a JSFunction)
3187 // -- x3 : the new target (checked to be a constructor)
3188 // -----------------------------------
3189 __ AssertConstructor(x1);
3190 __ AssertFunction(x1);
3191
3192 // Calling convention for function specific ConstructStubs require
3193 // x2 to contain either an AllocationSite or undefined.
3194 __ LoadRoot(x2, RootIndex::kUndefinedValue);
3195
3196 Label call_generic_stub;
3197
3198 // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
3199 __ LoadTaggedField(
3200 x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
3201 __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
3202 __ TestAndBranchIfAllClear(
3203 w4, SharedFunctionInfo::ConstructAsBuiltinBit::kMask, &call_generic_stub);
3204
3205 __ TailCallBuiltin(Builtin::kJSBuiltinsConstructStub);
3206
3207 __ bind(&call_generic_stub);
3208 __ TailCallBuiltin(Builtin::kJSConstructStubGeneric);
3209}
3210
3211// static
3212void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
3213 // ----------- S t a t e -------------
3214 // -- x0 : the number of arguments
3215 // -- x1 : the function to call (checked to be a JSBoundFunction)
3216 // -- x3 : the new target (checked to be a constructor)
3217 // -----------------------------------
3218 __ AssertConstructor(x1);
3219 __ AssertBoundFunction(x1);
3220
3221 // Push the [[BoundArguments]] onto the stack.
3222 Generate_PushBoundArguments(masm);
3223
3224 // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
3225 {
3226 Label done;
3227 __ CmpTagged(x1, x3);
3228 __ B(ne, &done);
3229 __ LoadTaggedField(
3230 x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
3231 __ Bind(&done);
3232 }
3233
3234 // Construct the [[BoundTargetFunction]] via the Construct builtin.
3235 __ LoadTaggedField(
3236 x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
3237 __ TailCallBuiltin(Builtin::kConstruct);
3238}
3239
3240// static
3241void Builtins::Generate_Construct(MacroAssembler* masm) {
3242 // ----------- S t a t e -------------
3243 // -- x0 : the number of arguments
3244 // -- x1 : the constructor to call (can be any Object)
3245 // -- x3 : the new target (either the same as the constructor or
3246 // the JSFunction on which new was invoked initially)
3247 // -----------------------------------
3248 Register target = x1;
3249 Register map = x4;
3250 Register instance_type = x5;
3251 DCHECK(!AreAliased(x0, target, map, instance_type));
3252
3253 // Check if target is a Smi.
3254 Label non_constructor, non_proxy;
3255 __ JumpIfSmi(target, &non_constructor);
3256
3257 // Check if target has a [[Construct]] internal method.
3258 __ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset));
3259 {
3260 Register flags = x2;
3261 DCHECK(!AreAliased(x0, target, map, instance_type, flags));
3262 __ Ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
3263 __ TestAndBranchIfAllClear(flags, Map::Bits1::IsConstructorBit::kMask,
3264 &non_constructor);
3265 }
3266
3267 // Dispatch based on instance type.
3268 __ CompareInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
3269 LAST_JS_FUNCTION_TYPE);
3270 __ TailCallBuiltin(Builtin::kConstructFunction, ls);
3271
3272 // Only dispatch to bound functions after checking whether they are
3273 // constructors.
3274 __ Cmp(instance_type, JS_BOUND_FUNCTION_TYPE);
3275 __ TailCallBuiltin(Builtin::kConstructBoundFunction, eq);
3276
3277 // Only dispatch to proxies after checking whether they are constructors.
3278 __ Cmp(instance_type, JS_PROXY_TYPE);
3279 __ B(ne, &non_proxy);
3280 __ TailCallBuiltin(Builtin::kConstructProxy);
3281
3282 // Called Construct on an exotic Object with a [[Construct]] internal method.
3283 __ bind(&non_proxy);
3284 {
3285 // Overwrite the original receiver with the (original) target.
3286 __ Poke(target, __ ReceiverOperand());
3287
3288 // Let the "call_as_constructor_delegate" take care of the rest.
3289 __ LoadNativeContextSlot(target,
3290 Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
3291 __ TailCallBuiltin(Builtins::CallFunction());
3292 }
3293
3294 // Called Construct on an Object that doesn't have a [[Construct]] internal
3295 // method.
3296 __ bind(&non_constructor);
3297 __ TailCallBuiltin(Builtin::kConstructedNonConstructable);
3298}
3299
3300#if V8_ENABLE_WEBASSEMBLY
3301// Compute register lists for parameters to be saved. We save all parameter
3302// registers (see wasm-linkage.h). They might be overwritten in runtime
3303// calls. We don't have any callee-saved registers in wasm, so no need to
3304// store anything else.
3305constexpr RegList kSavedGpRegs = ([]() constexpr {
3306 RegList saved_gp_regs;
3307 for (Register gp_param_reg : wasm::kGpParamRegisters) {
3308 saved_gp_regs.set(gp_param_reg);
3309 }
3310 // The instance data has already been stored in the fixed part of the frame.
3311 saved_gp_regs.clear(kWasmImplicitArgRegister);
3312 // All set registers were unique. The instance is skipped.
3313 CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) - 1);
3314 // We push a multiple of 16 bytes.
3315 CHECK_EQ(0, saved_gp_regs.Count() % 2);
3317 saved_gp_regs.Count());
3318 return saved_gp_regs;
3319})();
3320
3321constexpr DoubleRegList kSavedFpRegs = ([]() constexpr {
3322 DoubleRegList saved_fp_regs;
3323 for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
3324 saved_fp_regs.set(fp_param_reg);
3325 }
3326
3327 CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
3329 saved_fp_regs.Count());
3330 return saved_fp_regs;
3331})();
3332
3333// When entering this builtin, we have just created a Wasm stack frame:
3334//
3335// [ Wasm instance data ] <-- sp
3336// [ WASM frame marker ]
3337// [ saved fp ] <-- fp
3338//
3339// Due to stack alignment restrictions, this builtin adds the feedback vector
3340// plus a filler to the stack. The stack pointer will be
3341// moved an appropriate distance by {PatchPrepareStackFrame}.
3342//
3343// [ (unused) ] <-- sp
3344// [ feedback vector ]
3345// [ Wasm instance data ]
3346// [ WASM frame marker ]
3347// [ saved fp ] <-- fp
3348void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
3349 Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
3350 Register vector = x9;
3351 Register scratch = x10;
3352 Label allocate_vector, done;
3353
3354 __ LoadTaggedField(
3356 WasmTrustedInstanceData::kFeedbackVectorsOffset));
3357 __ Add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2));
3358 __ LoadTaggedField(vector,
3359 FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray)));
3360 __ JumpIfSmi(vector, &allocate_vector);
3361 __ bind(&done);
3362 __ Push(vector, xzr);
3363 __ Ret();
3364
3365 __ bind(&allocate_vector);
3366 // Feedback vector doesn't exist yet. Call the runtime to allocate it.
3367 // We temporarily change the frame type for this, because we need special
3368 // handling by the stack walker in case of GC.
3369 __ Mov(scratch, StackFrame::TypeToMarker(StackFrame::WASM_LIFTOFF_SETUP));
3371 // Save registers.
3372 __ PushXRegList(kSavedGpRegs);
3373 __ PushQRegList(kSavedFpRegs);
3374 __ Push<MacroAssembler::kSignLR>(lr, xzr); // xzr is for alignment.
3375
3376 // Arguments to the runtime function: instance data, func_index, and an
3377 // additional stack slot for the NativeModule. The first pushed register
3378 // is for alignment. {x0} and {x1} are picked arbitrarily.
3379 __ SmiTag(func_index);
3380 __ Push(x0, kWasmImplicitArgRegister, func_index, x1);
3381 __ Mov(cp, Smi::zero());
3382 __ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3);
3383 __ Mov(vector, kReturnRegister0);
3384
3385 // Restore registers and frame type.
3386 __ Pop<MacroAssembler::kAuthLR>(xzr, lr);
3387 __ PopQRegList(kSavedFpRegs);
3388 __ PopXRegList(kSavedGpRegs);
3389 // Restore the instance data from the frame.
3391 MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset));
3392 __ Mov(scratch, StackFrame::TypeToMarker(StackFrame::WASM));
3394 __ B(&done);
3395}
3396
3397void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
3398 // The function index was put in w8 by the jump table trampoline.
3399 // Sign extend and convert to Smi for the runtime call.
3403
3404 UseScratchRegisterScope temps(masm);
3405 temps.Exclude(x17);
3406 {
3407 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
3408 FrameScope scope(masm, StackFrame::INTERNAL);
3409 // Manually save the instance data (which kSavedGpRegs skips because its
3410 // other use puts it into the fixed frame anyway). The stack slot is valid
3411 // because the {FrameScope} (via {EnterFrame}) always reserves it (for stack
3412 // alignment reasons). The instance is needed because once this builtin is
3413 // done, we'll call a regular Wasm function.
3415 MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset));
3416
3417 // Save registers that we need to keep alive across the runtime call.
3418 __ PushXRegList(kSavedGpRegs);
3419 __ PushQRegList(kSavedFpRegs);
3420
3422 // Initialize the JavaScript context with 0. CEntry will use it to
3423 // set the current context on the isolate.
3424 __ Mov(cp, Smi::zero());
3425 __ CallRuntime(Runtime::kWasmCompileLazy, 2);
3426
3427 // Untag the returned Smi into into x17 (ip1), for later use.
3428 static_assert(!kSavedGpRegs.has(x17));
3430
3431 // Restore registers.
3432 __ PopQRegList(kSavedFpRegs);
3433 __ PopXRegList(kSavedGpRegs);
3434 // Restore the instance data from the frame.
3436 MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset));
3437 }
3438
3439 // The runtime function returned the jump table slot offset as a Smi (now in
3440 // x17). Use that to compute the jump target. Use x17 (ip1) for the branch
3441 // target, to be compliant with CFI.
3442 constexpr Register temp = x8;
3443 static_assert(!kSavedGpRegs.has(temp));
3445 WasmTrustedInstanceData::kJumpTableStartOffset));
3446 __ add(x17, temp, Operand(x17));
3447 // Finally, jump to the jump table slot for the function.
3448 __ Jump(x17);
3449}
3450
3451void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
3452 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
3453 {
3454 FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
3455
3456 // Save all parameter registers. They might hold live values, we restore
3457 // them after the runtime call.
3460
3461 // Initialize the JavaScript context with 0. CEntry will use it to
3462 // set the current context on the isolate.
3463 __ Move(cp, Smi::zero());
3464 __ CallRuntime(Runtime::kWasmDebugBreak, 0);
3465
3466 // Restore registers.
3469 }
3470 __ Ret();
3471}
3472
3473namespace {
3474// Check that the stack was in the old state (if generated code assertions are
3475// enabled), and switch to the new state.
3476void SwitchStackState(MacroAssembler* masm, Register stack, Register tmp,
3478 wasm::JumpBuffer::StackState new_state) {
3479#if V8_ENABLE_SANDBOX
3480 __ Ldr(tmp.W(), MemOperand(stack, wasm::kStackStateOffset));
3481 __ Cmp(tmp.W(), old_state);
3482 Label ok;
3483 __ B(&ok, eq);
3484 __ Trap();
3485 __ bind(&ok);
3486#endif
3487 __ Mov(tmp.W(), new_state);
3488 __ Str(tmp.W(), MemOperand(stack, wasm::kStackStateOffset));
3489}
3490
3491// Switch the stack pointer. Also switch the simulator's stack limit when
3492// running on the simulator. This needs to be done as close as possible to
3493// changing the stack pointer, as a mismatch between the stack pointer and the
3494// simulator's stack limit can cause stack access check failures.
3495void SwitchStackPointerAndSimulatorStackLimit(MacroAssembler* masm,
3496 Register stack, Register tmp) {
3497 if (masm->options().enable_simulator_code) {
3498 UseScratchRegisterScope temps(masm);
3499 temps.Exclude(x16);
3500 __ Ldr(tmp, MemOperand(stack, wasm::kStackSpOffset));
3501 __ Ldr(x16, MemOperand(stack, wasm::kStackLimitOffset));
3502 __ Mov(sp, tmp);
3504 } else {
3505 __ Ldr(tmp, MemOperand(stack, wasm::kStackSpOffset));
3506 __ Mov(sp, tmp);
3507 }
3508}
3509
3510void FillJumpBuffer(MacroAssembler* masm, Register stack, Label* pc,
3511 Register tmp) {
3512 __ Mov(tmp, sp);
3513 __ Str(tmp, MemOperand(stack, wasm::kStackSpOffset));
3514 __ Str(fp, MemOperand(stack, wasm::kStackFpOffset));
3515 __ LoadStackLimit(tmp, StackLimitKind::kRealStackLimit);
3516 __ Str(tmp, MemOperand(stack, wasm::kStackLimitOffset));
3517 __ Adr(tmp, pc);
3518 __ Str(tmp, MemOperand(stack, wasm::kStackPcOffset));
3519}
3520
3521void LoadJumpBuffer(MacroAssembler* masm, Register stack, bool load_pc,
3522 Register tmp, wasm::JumpBuffer::StackState expected_state) {
3523 SwitchStackPointerAndSimulatorStackLimit(masm, stack, tmp);
3524 __ Ldr(fp, MemOperand(stack, wasm::kStackFpOffset));
3525 SwitchStackState(masm, stack, tmp, expected_state, wasm::JumpBuffer::Active);
3526 if (load_pc) {
3527 __ Ldr(tmp, MemOperand(stack, wasm::kStackPcOffset));
3528 __ Br(tmp);
3529 }
3530 // The stack limit in StackGuard is set separately under the ExecutionAccess
3531 // lock.
3532}
3533
3534void LoadTargetJumpBuffer(MacroAssembler* masm, Register target_stack,
3535 Register tmp,
3536 wasm::JumpBuffer::StackState expected_state) {
3537 __ Str(xzr,
3538 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset));
3539 // Switch stack!
3540 LoadJumpBuffer(masm, target_stack, false, tmp, expected_state);
3541}
3542
3543// Updates the stack limit and central stack info, and validates the switch.
3544void SwitchStacks(MacroAssembler* masm, Register old_stack, bool return_switch,
3545 const std::initializer_list<CPURegister> keep) {
3546 using ER = ExternalReference;
3547 for (size_t i = 0; i < (keep.size() & ~0x1); i += 2) {
3548 __ Push(keep.begin()[i], keep.begin()[i + 1]);
3549 }
3550 if (keep.size() % 2 == 1) {
3551 __ Push(*(keep.end() - 1), padreg);
3552 }
3553 {
3554 FrameScope scope(masm, StackFrame::MANUAL);
3555 // Move {old_stack} first in case it aliases kCArgRegs[0].
3556 __ Mov(kCArgRegs[1], old_stack);
3557 __ Mov(kCArgRegs[0], ExternalReference::isolate_address(masm->isolate()));
3558 __ CallCFunction(
3559 return_switch ? ER::wasm_return_switch() : ER::wasm_switch_stacks(), 2);
3560 }
3561 if (keep.size() % 2 == 1) {
3562 __ Pop(padreg, *(keep.end() - 1));
3563 }
3564 for (size_t i = (keep.size() & ~0x1); i > 0; i -= 2) {
3565 __ Pop(keep.begin()[i - 1], keep.begin()[i - 2]);
3566 }
3567}
3568
3569void ReloadParentStack(MacroAssembler* masm, Register return_reg,
3570 Register return_value, Register context, Register tmp1,
3571 Register tmp2, Register tmp3) {
3572 Register active_stack = tmp1;
3573 __ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
3574
3575 // Set a null pointer in the jump buffer's SP slot to indicate to the stack
3576 // frame iterator that this stack is empty.
3577 __ Str(xzr, MemOperand(active_stack, wasm::kStackSpOffset));
3578 {
3579 UseScratchRegisterScope temps(masm);
3580 Register scratch = temps.AcquireX();
3581 SwitchStackState(masm, active_stack, scratch, wasm::JumpBuffer::Active,
3583 }
3584 Register parent = tmp2;
3585 __ Ldr(parent, MemOperand(active_stack, wasm::kStackParentOffset));
3586
3587 // Update active stack.
3588 __ StoreRootRelative(IsolateData::active_stack_offset(), parent);
3589
3590 // Switch stack!
3591 SwitchStacks(masm, active_stack, true,
3592 {return_reg, return_value, context, parent});
3593 LoadJumpBuffer(masm, parent, false, tmp3, wasm::JumpBuffer::Inactive);
3594}
3595
3596void RestoreParentSuspender(MacroAssembler* masm, Register tmp1) {
3597 Register suspender = tmp1;
3598 __ LoadRoot(suspender, RootIndex::kActiveSuspender);
3599 __ LoadTaggedField(
3600 suspender,
3601 FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
3602 int32_t active_suspender_offset =
3604 RootIndex::kActiveSuspender);
3605 __ Str(suspender, MemOperand(kRootRegister, active_suspender_offset));
3606}
3607
3608void ResetStackSwitchFrameStackSlots(MacroAssembler* masm) {
3609 __ Str(xzr, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset));
3610 __ Str(xzr, MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
3611}
3612
3613// TODO(irezvov): Consolidate with arm RegisterAllocator.
3614class RegisterAllocator {
3615 public:
3616 class Scoped {
3617 public:
3618 Scoped(RegisterAllocator* allocator, Register* reg):
3619 allocator_(allocator), reg_(reg) {}
3620 ~Scoped() { allocator_->Free(reg_); }
3621 private:
3622 RegisterAllocator* allocator_;
3623 Register* reg_;
3624 };
3625
3626 explicit RegisterAllocator(const CPURegList& registers)
3629 void Ask(Register* reg) {
3630 DCHECK_EQ(*reg, no_reg);
3631 DCHECK(!available_.IsEmpty());
3632 *reg = available_.PopLowestIndex().X();
3633 allocated_registers_.push_back(reg);
3634 }
3635
3636 void Pinned(const Register& requested, Register* reg) {
3637 DCHECK(available_.IncludesAliasOf(requested));
3638 *reg = requested;
3639 Reserve(requested);
3640 allocated_registers_.push_back(reg);
3641 }
3642
3643 void Free(Register* reg) {
3644 DCHECK_NE(*reg, no_reg);
3645 available_.Combine(*reg);
3646 *reg = no_reg;
3648 find(allocated_registers_.begin(), allocated_registers_.end(), reg));
3649 }
3650
3651 void Reserve(const Register& reg) {
3652 if (reg == NoReg) {
3653 return;
3654 }
3655 DCHECK(available_.IncludesAliasOf(reg));
3656 available_.Remove(reg);
3657 }
3658
3659 void Reserve(const Register& reg1,
3660 const Register& reg2,
3661 const Register& reg3 = NoReg,
3662 const Register& reg4 = NoReg,
3663 const Register& reg5 = NoReg,
3664 const Register& reg6 = NoReg) {
3665 Reserve(reg1);
3666 Reserve(reg2);
3667 Reserve(reg3);
3668 Reserve(reg4);
3669 Reserve(reg5);
3670 Reserve(reg6);
3671 }
3672
3673 bool IsUsed(const Register& reg) {
3674 return initial_.IncludesAliasOf(reg)
3675 && !available_.IncludesAliasOf(reg);
3676 }
3677
3678 void ResetExcept(const Register& reg1 = NoReg,
3679 const Register& reg2 = NoReg,
3680 const Register& reg3 = NoReg,
3681 const Register& reg4 = NoReg,
3682 const Register& reg5 = NoReg,
3683 const Register& reg6 = NoReg) {
3685 if (reg1 != NoReg) {
3686 available_.Remove(reg1, reg2, reg3, reg4);
3687 }
3688 if (reg5 != NoReg) {
3689 available_.Remove(reg5, reg6);
3690 }
3691 auto it = allocated_registers_.begin();
3692 while (it != allocated_registers_.end()) {
3693 if (available_.IncludesAliasOf(**it)) {
3694 **it = no_reg;
3695 it = allocated_registers_.erase(it);
3696 } else {
3697 it++;
3698 }
3699 }
3700 }
3701
3702 static RegisterAllocator WithAllocatableGeneralRegisters() {
3703 CPURegList list(kXRegSizeInBits, RegList());
3704 const RegisterConfiguration* config(RegisterConfiguration::Default());
3705 list.set_bits(config->allocatable_general_codes_mask());
3706 return RegisterAllocator(list);
3707 }
3708
3709 private:
3710 std::vector<Register*> allocated_registers_;
3711 const CPURegList initial_;
3712 CPURegList available_;
3713};
3714
3715#define DEFINE_REG(Name) \
3716 Register Name = no_reg; \
3717 regs.Ask(&Name);
3718
3719#define DEFINE_REG_W(Name) \
3720 DEFINE_REG(Name); \
3721 Name = Name.W();
3722
3723#define ASSIGN_REG(Name) \
3724 regs.Ask(&Name);
3725
3726#define ASSIGN_REG_W(Name) \
3727 ASSIGN_REG(Name); \
3728 Name = Name.W();
3729
3730#define DEFINE_PINNED(Name, Reg) \
3731 Register Name = no_reg; \
3732 regs.Pinned(Reg, &Name);
3733
3734#define ASSIGN_PINNED(Name, Reg) regs.Pinned(Reg, &Name);
3735
3736#define DEFINE_SCOPED(Name) \
3737 DEFINE_REG(Name) \
3738 RegisterAllocator::Scoped scope_##Name(&regs, &Name);
3739
3740#define FREE_REG(Name) regs.Free(&Name);
3741
3742// Loads the context field of the WasmTrustedInstanceData or WasmImportData
3743// depending on the data's type, and places the result in the input register.
3744void GetContextFromImplicitArg(MacroAssembler* masm, Register data,
3745 Register scratch) {
3746 __ LoadTaggedField(scratch, FieldMemOperand(data, HeapObject::kMapOffset));
3747 __ CompareInstanceType(scratch, scratch, WASM_TRUSTED_INSTANCE_DATA_TYPE);
3748 Label instance;
3749 Label end;
3750 __ B(eq, &instance);
3751 __ LoadTaggedField(
3752 data, FieldMemOperand(data, WasmImportData::kNativeContextOffset));
3753 __ jmp(&end);
3754 __ bind(&instance);
3755 __ LoadTaggedField(
3756 data,
3757 FieldMemOperand(data, WasmTrustedInstanceData::kNativeContextOffset));
3758 __ bind(&end);
3759}
3760
3761} // namespace
3762
3763void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) {
3764 // Push registers in reverse order so that they are on the stack like
3765 // in an array, with the first item being at the lowest address.
3770
3774 // Reserve a slot for the signature, and one for stack alignment.
3775 __ Push(xzr, xzr);
3776 __ TailCallBuiltin(Builtin::kWasmToJsWrapperCSA);
3777}
3778
3779void Builtins::Generate_WasmTrapHandlerLandingPad(MacroAssembler* masm) {
3781 WasmFrameConstants::kProtectedInstructionReturnAddressOffset);
3782 __ TailCallBuiltin(Builtin::kWasmTrapHandlerThrowTrap);
3783}
3784
3785void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
3786 auto regs = RegisterAllocator::WithAllocatableGeneralRegisters();
3787 // Set up the stackframe.
3788 __ EnterFrame(StackFrame::STACK_SWITCH);
3789
3790 DEFINE_PINNED(suspender, x0);
3792
3793 __ Sub(sp, sp,
3794 Immediate(StackSwitchFrameConstants::kNumSpillSlots *
3796 // Set a sentinel value for the spill slots visited by the GC.
3797 ResetStackSwitchFrameStackSlots(masm);
3798
3799 // -------------------------------------------
3800 // Save current state in active jump buffer.
3801 // -------------------------------------------
3802 Label resume;
3803 DEFINE_REG(stack);
3804 __ LoadRootRelative(stack, IsolateData::active_stack_offset());
3805 DEFINE_REG(scratch);
3806 FillJumpBuffer(masm, stack, &resume, scratch);
3807 SwitchStackState(masm, stack, scratch, wasm::JumpBuffer::Active,
3809 regs.ResetExcept(suspender, stack);
3810
3811 DEFINE_REG(suspender_stack);
3812 __ LoadExternalPointerField(
3813 suspender_stack,
3814 FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset),
3816 if (v8_flags.debug_code) {
3817 // -------------------------------------------
3818 // Check that the suspender's stack is the active stack.
3819 // -------------------------------------------
3820 // TODO(thibaudm): Once we add core stack-switching instructions, this
3821 // check will not hold anymore: it's possible that the active stack changed
3822 // (due to an internal switch), so we have to update the suspender.
3823 __ cmp(suspender_stack, stack);
3824 Label ok;
3825 __ B(&ok, eq);
3826 __ Trap();
3827 __ bind(&ok);
3828 }
3829 // -------------------------------------------
3830 // Update roots.
3831 // -------------------------------------------
3832 DEFINE_REG(caller);
3833 __ Ldr(caller, MemOperand(suspender_stack, wasm::kStackParentOffset));
3834 __ StoreRootRelative(IsolateData::active_stack_offset(), caller);
3835 DEFINE_REG(parent);
3836 __ LoadTaggedField(
3837 parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
3838 int32_t active_suspender_offset =
3840 RootIndex::kActiveSuspender);
3841 __ Str(parent, MemOperand(kRootRegister, active_suspender_offset));
3842 regs.ResetExcept(suspender, caller, stack);
3843
3844 // -------------------------------------------
3845 // Load jump buffer.
3846 // -------------------------------------------
3847 SwitchStacks(masm, stack, false, {caller, suspender});
3848 FREE_REG(stack);
3849 __ LoadTaggedField(
3851 FieldMemOperand(suspender, WasmSuspenderObject::kPromiseOffset));
3852 MemOperand GCScanSlotPlace =
3853 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset);
3854 __ Str(xzr, GCScanSlotPlace);
3855 ASSIGN_REG(scratch)
3856 LoadJumpBuffer(masm, caller, true, scratch, wasm::JumpBuffer::Inactive);
3857 __ Trap();
3858 __ Bind(&resume, BranchTargetIdentifier::kBtiJump);
3859 __ LeaveFrame(StackFrame::STACK_SWITCH);
3860 __ Ret(lr);
3861}
3862
3863namespace {
3864// Resume the suspender stored in the closure. We generate two variants of this
3865// builtin: the onFulfilled variant resumes execution at the saved PC and
3866// forwards the value, the onRejected variant throws the value.
3867
3868void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
3869 auto regs = RegisterAllocator::WithAllocatableGeneralRegisters();
3870 __ EnterFrame(StackFrame::STACK_SWITCH);
3871
3872 DEFINE_PINNED(closure, kJSFunctionRegister); // x1
3873
3874 __ Sub(sp, sp,
3875 Immediate(StackSwitchFrameConstants::kNumSpillSlots *
3877 // Set a sentinel value for the spill slots visited by the GC.
3878 ResetStackSwitchFrameStackSlots(masm);
3879
3880 regs.ResetExcept(closure);
3881
3882 // -------------------------------------------
3883 // Load suspender from closure.
3884 // -------------------------------------------
3885 DEFINE_REG(sfi);
3886 __ LoadTaggedField(
3887 sfi,
3888 MemOperand(
3889 closure,
3891 FREE_REG(closure);
3892 // Suspender should be ObjectRegister register to be used in
3893 // RecordWriteField calls later.
3895 DEFINE_REG(resume_data);
3896 __ LoadTaggedField(
3897 resume_data,
3898 FieldMemOperand(sfi, SharedFunctionInfo::kUntrustedFunctionDataOffset));
3899 // The write barrier uses a fixed register for the host object (rdi). The next
3900 // barrier is on the suspender, so load it in rdi directly.
3901 __ LoadTaggedField(
3902 suspender,
3903 FieldMemOperand(resume_data, WasmResumeData::kSuspenderOffset));
3904 regs.ResetExcept(suspender);
3905
3906 // -------------------------------------------
3907 // Save current state.
3908 // -------------------------------------------
3909 Label suspend;
3910 DEFINE_REG(active_stack);
3911 __ LoadRootRelative(active_stack, IsolateData::active_stack_offset());
3912 DEFINE_REG(scratch);
3913 FillJumpBuffer(masm, active_stack, &suspend, scratch);
3914 SwitchStackState(masm, active_stack, scratch, wasm::JumpBuffer::Active,
3916
3917 // -------------------------------------------
3918 // Set the suspender and stack parents and update the roots
3919 // -------------------------------------------
3920 DEFINE_REG(active_suspender);
3921 __ LoadRoot(active_suspender, RootIndex::kActiveSuspender);
3922 __ StoreTaggedField(
3923 active_suspender,
3924 FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
3925 __ RecordWriteField(suspender, WasmSuspenderObject::kParentOffset,
3926 active_suspender, kLRHasBeenSaved,
3928 int32_t active_suspender_offset =
3930 RootIndex::kActiveSuspender);
3931 __ Str(suspender, MemOperand(kRootRegister, active_suspender_offset));
3932
3933 DEFINE_REG(target_stack);
3934 __ LoadExternalPointerField(
3935 target_stack,
3936 FieldMemOperand(suspender, WasmSuspenderObject::kStackOffset),
3938 FREE_REG(suspender);
3939
3940 __ StoreRootRelative(IsolateData::active_stack_offset(), target_stack);
3941
3942 SwitchStacks(masm, active_stack, false, {target_stack});
3943
3944 regs.ResetExcept(target_stack);
3945
3946 // -------------------------------------------
3947 // Load state from target jmpbuf (longjmp).
3948 // -------------------------------------------
3949 regs.Reserve(kReturnRegister0);
3950 ASSIGN_REG(scratch);
3951 // Move resolved value to return register.
3953 MemOperand GCScanSlotPlace =
3954 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset);
3955 __ Str(xzr, GCScanSlotPlace);
3956 if (on_resume == wasm::OnResume::kThrow) {
3957 // Switch without restoring the PC.
3958 LoadJumpBuffer(masm, target_stack, false, scratch,
3960 // Pop this frame now. The unwinder expects that the first STACK_SWITCH
3961 // frame is the outermost one.
3962 __ LeaveFrame(StackFrame::STACK_SWITCH);
3963 // Forward the onRejected value to kThrow.
3964 __ Push(xzr, kReturnRegister0);
3965 __ CallRuntime(Runtime::kThrow);
3966 } else {
3967 // Resume the stack normally.
3968 LoadJumpBuffer(masm, target_stack, true, scratch,
3970 }
3971 __ Trap();
3972 __ Bind(&suspend, BranchTargetIdentifier::kBtiJump);
3973 __ LeaveFrame(StackFrame::STACK_SWITCH);
3974 // Pop receiver + parameter.
3975 __ DropArguments(2);
3976 __ Ret(lr);
3977}
3978} // namespace
3979
3980void Builtins::Generate_WasmResume(MacroAssembler* masm) {
3981 Generate_WasmResumeHelper(masm, wasm::OnResume::kContinue);
3982}
3983
3984void Builtins::Generate_WasmReject(MacroAssembler* masm) {
3985 Generate_WasmResumeHelper(masm, wasm::OnResume::kThrow);
3986}
3987
3988void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
3989 // Only needed on x64.
3990 __ Trap();
3991}
3992namespace {
3993void SwitchToAllocatedStack(MacroAssembler* masm, RegisterAllocator& regs,
3994 Register wasm_instance, Register wrapper_buffer,
3995 Register& original_fp, Register& new_wrapper_buffer,
3996 Label* suspend) {
3997 ResetStackSwitchFrameStackSlots(masm);
3998 DEFINE_SCOPED(scratch)
3999 DEFINE_REG(parent_stack)
4000 __ LoadRootRelative(parent_stack, IsolateData::active_stack_offset());
4001 __ Ldr(parent_stack, MemOperand(parent_stack, wasm::kStackParentOffset));
4002 FillJumpBuffer(masm, parent_stack, suspend, scratch);
4003 SwitchStacks(masm, parent_stack, false, {wasm_instance, wrapper_buffer});
4004 FREE_REG(parent_stack);
4005 // Save the old stack's fp in x9, and use it to access the parameters in
4006 // the parent frame.
4007 regs.Pinned(x9, &original_fp);
4008 __ Mov(original_fp, fp);
4009 DEFINE_REG(target_stack);
4010 __ LoadRootRelative(target_stack, IsolateData::active_stack_offset());
4011 LoadTargetJumpBuffer(masm, target_stack, scratch,
4013 FREE_REG(target_stack);
4014 // Push the loaded fp. We know it is null, because there is no frame yet,
4015 // so we could also push 0 directly. In any case we need to push it,
4016 // because this marks the base of the stack segment for
4017 // the stack frame iterator.
4018 __ EnterFrame(StackFrame::STACK_SWITCH);
4019 int stack_space =
4020 RoundUp(StackSwitchFrameConstants::kNumSpillSlots * kSystemPointerSize +
4021 JSToWasmWrapperFrameConstants::kWrapperBufferSize,
4022 16);
4023 __ Sub(sp, sp, Immediate(stack_space));
4024 ASSIGN_REG(new_wrapper_buffer)
4025 __ Mov(new_wrapper_buffer, sp);
4026 // Copy data needed for return handling from old wrapper buffer to new one.
4027 // kWrapperBufferRefReturnCount will be copied too, because 8 bytes are copied
4028 // at the same time.
4029 static_assert(JSToWasmWrapperFrameConstants::kWrapperBufferRefReturnCount ==
4030 JSToWasmWrapperFrameConstants::kWrapperBufferReturnCount + 4);
4031 __ Ldr(scratch,
4032 MemOperand(wrapper_buffer,
4033 JSToWasmWrapperFrameConstants::kWrapperBufferReturnCount));
4034 __ Str(scratch,
4035 MemOperand(new_wrapper_buffer,
4036 JSToWasmWrapperFrameConstants::kWrapperBufferReturnCount));
4037 __ Ldr(
4038 scratch,
4039 MemOperand(
4040 wrapper_buffer,
4041 JSToWasmWrapperFrameConstants::kWrapperBufferSigRepresentationArray));
4042 __ Str(
4043 scratch,
4044 MemOperand(
4045 new_wrapper_buffer,
4046 JSToWasmWrapperFrameConstants::kWrapperBufferSigRepresentationArray));
4047}
4048
4049void SwitchBackAndReturnPromise(MacroAssembler* masm, RegisterAllocator& regs,
4050 wasm::Promise mode, Label* return_promise) {
4051 regs.ResetExcept();
4052 // The return value of the wasm function becomes the parameter of the
4053 // FulfillPromise builtin, and the promise is the return value of this
4054 // wrapper.
4055 static const Builtin_FulfillPromise_InterfaceDescriptor desc;
4056 DEFINE_PINNED(promise, desc.GetRegisterParameter(0));
4057 DEFINE_PINNED(return_value, desc.GetRegisterParameter(1));
4058 DEFINE_SCOPED(tmp);
4059 DEFINE_SCOPED(tmp2);
4060 DEFINE_SCOPED(tmp3);
4061 if (mode == wasm::kPromise) {
4062 __ Move(return_value, kReturnRegister0);
4063 __ LoadRoot(promise, RootIndex::kActiveSuspender);
4064 __ LoadTaggedField(
4065 promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset));
4066 }
4067 __ Ldr(kContextRegister,
4068 MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
4069 GetContextFromImplicitArg(masm, kContextRegister, tmp);
4070
4071 ReloadParentStack(masm, promise, return_value, kContextRegister, tmp, tmp2,
4072 tmp3);
4073 RestoreParentSuspender(masm, tmp);
4074
4075 if (mode == wasm::kPromise) {
4076 __ Mov(tmp, 1);
4077 __ Str(tmp,
4078 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset));
4079 __ Push(padreg, promise);
4080 __ CallBuiltin(Builtin::kFulfillPromise);
4081 __ Pop(promise, padreg);
4082 }
4083 FREE_REG(promise);
4084 FREE_REG(return_value);
4085 __ bind(return_promise);
4086}
4087
4088void GenerateExceptionHandlingLandingPad(MacroAssembler* masm,
4089 RegisterAllocator& regs,
4090 Label* return_promise) {
4091 regs.ResetExcept();
4092 static const Builtin_RejectPromise_InterfaceDescriptor desc;
4093 DEFINE_PINNED(promise, desc.GetRegisterParameter(0));
4094 DEFINE_PINNED(reason, desc.GetRegisterParameter(1));
4095 DEFINE_PINNED(debug_event, desc.GetRegisterParameter(2));
4096 int catch_handler = __ pc_offset();
4097 __ JumpTarget();
4098
4099 DEFINE_SCOPED(thread_in_wasm_flag_addr);
4100 thread_in_wasm_flag_addr = x2;
4101 // Unset thread_in_wasm_flag.
4102 __ Ldr(
4103 thread_in_wasm_flag_addr,
4105 __ Str(wzr, MemOperand(thread_in_wasm_flag_addr, 0));
4106
4107 // The exception becomes the parameter of the RejectPromise builtin, and the
4108 // promise is the return value of this wrapper.
4109 __ Move(reason, kReturnRegister0);
4110 __ LoadRoot(promise, RootIndex::kActiveSuspender);
4111 __ LoadTaggedField(
4112 promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset));
4113
4114 __ Ldr(kContextRegister,
4115 MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
4116
4117 DEFINE_SCOPED(tmp);
4118 DEFINE_SCOPED(tmp2);
4119 DEFINE_SCOPED(tmp3);
4120 GetContextFromImplicitArg(masm, kContextRegister, tmp);
4121 ReloadParentStack(masm, promise, reason, kContextRegister, tmp, tmp2, tmp3);
4122 RestoreParentSuspender(masm, tmp);
4123
4124 __ Mov(tmp, 1);
4125 __ Str(tmp,
4126 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset));
4127 __ Push(padreg, promise);
4128 __ LoadRoot(debug_event, RootIndex::kTrueValue);
4129 __ CallBuiltin(Builtin::kRejectPromise);
4130 __ Pop(promise, padreg);
4131
4132 // Run the rest of the wrapper normally (deconstruct the frame, ...).
4133 __ jmp(return_promise);
4134
4135 masm->isolate()->builtins()->SetJSPIPromptHandlerOffset(catch_handler);
4136}
4137
4138void JSToWasmWrapperHelper(MacroAssembler* masm, wasm::Promise mode) {
4139 bool stack_switch = mode == wasm::kPromise || mode == wasm::kStressSwitch;
4140 auto regs = RegisterAllocator::WithAllocatableGeneralRegisters();
4141
4142 __ EnterFrame(stack_switch ? StackFrame::STACK_SWITCH
4143 : StackFrame::JS_TO_WASM);
4144
4145 __ Sub(sp, sp,
4146 Immediate(StackSwitchFrameConstants::kNumSpillSlots *
4148
4149 // Load the implicit argument (instance data or import data) from the frame.
4151 __ Ldr(implicit_arg,
4152 MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset));
4153
4154 DEFINE_PINNED(wrapper_buffer,
4156
4157 Label suspend;
4158 Register original_fp = no_reg;
4159 Register new_wrapper_buffer = no_reg;
4160 if (stack_switch) {
4161 SwitchToAllocatedStack(masm, regs, implicit_arg, wrapper_buffer,
4162 original_fp, new_wrapper_buffer, &suspend);
4163 } else {
4164 original_fp = fp;
4165 new_wrapper_buffer = wrapper_buffer;
4166 }
4167
4168 regs.ResetExcept(original_fp, wrapper_buffer, implicit_arg,
4169 new_wrapper_buffer);
4170
4171 {
4172 __ Str(new_wrapper_buffer,
4173 MemOperand(fp, JSToWasmWrapperFrameConstants::kWrapperBufferOffset));
4174 if (stack_switch) {
4175 __ Str(implicit_arg,
4176 MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
4177 DEFINE_SCOPED(scratch)
4178 __ Ldr(
4179 scratch,
4180 MemOperand(original_fp,
4181 JSToWasmWrapperFrameConstants::kResultArrayParamOffset));
4182 __ Str(scratch,
4183 MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset));
4184 }
4185 }
4186 {
4187 DEFINE_SCOPED(result_size);
4188 __ Ldr(result_size,
4189 MemOperand(wrapper_buffer, JSToWasmWrapperFrameConstants::
4190 kWrapperBufferStackReturnBufferSize));
4191 // The `result_size` is the number of slots needed on the stack to store the
4192 // return values of the wasm function. If `result_size` is an odd number, we
4193 // have to add `1` to preserve stack pointer alignment.
4194 __ Add(result_size, result_size, 1);
4195 __ Bic(result_size, result_size, 1);
4196 __ Sub(sp, sp, Operand(result_size, LSL, kSystemPointerSizeLog2));
4197 }
4198 {
4199 DEFINE_SCOPED(scratch);
4200 __ Mov(scratch, sp);
4201 __ Str(scratch, MemOperand(new_wrapper_buffer,
4202 JSToWasmWrapperFrameConstants::
4203 kWrapperBufferStackReturnBufferStart));
4204 }
4205 if (stack_switch) {
4206 FREE_REG(new_wrapper_buffer)
4207 }
4208 FREE_REG(implicit_arg)
4209 for (auto reg : wasm::kGpParamRegisters) {
4210 regs.Reserve(reg);
4211 }
4212
4213 // The first GP parameter holds the trusted instance data or the import data.
4214 // This is handled specially.
4215 int stack_params_offset =
4218
4219 {
4220 DEFINE_SCOPED(params_start);
4221 __ Ldr(params_start,
4222 MemOperand(wrapper_buffer,
4223 JSToWasmWrapperFrameConstants::kWrapperBufferParamStart));
4224 {
4225 // Push stack parameters on the stack.
4226 DEFINE_SCOPED(params_end);
4227 __ Ldr(params_end,
4228 MemOperand(wrapper_buffer,
4229 JSToWasmWrapperFrameConstants::kWrapperBufferParamEnd));
4230 DEFINE_SCOPED(last_stack_param);
4231
4232 __ Add(last_stack_param, params_start, Immediate(stack_params_offset));
4233 Label loop_start;
4234 {
4235 DEFINE_SCOPED(scratch);
4236 // Check if there is an even number of parameters, so no alignment
4237 // needed.
4238 __ Sub(scratch, params_end, last_stack_param);
4239 __ TestAndBranchIfAllClear(scratch, 0x8, &loop_start);
4240
4241 // Push the first parameter with alignment.
4242 __ Ldr(scratch, MemOperand(params_end, -kSystemPointerSize, PreIndex));
4243 __ Push(xzr, scratch);
4244 }
4245 __ bind(&loop_start);
4246
4247 Label finish_stack_params;
4248 __ Cmp(last_stack_param, params_end);
4249 __ B(ge, &finish_stack_params);
4250
4251 // Push parameter
4252 {
4253 DEFINE_SCOPED(scratch1);
4254 DEFINE_SCOPED(scratch2);
4255 __ Ldp(scratch2, scratch1,
4256 MemOperand(params_end, -2 * kSystemPointerSize, PreIndex));
4257 __ Push(scratch1, scratch2);
4258 }
4259 __ jmp(&loop_start);
4260
4261 __ bind(&finish_stack_params);
4262 }
4263
4264 size_t next_offset = 0;
4265 for (size_t i = 1; i < arraysize(wasm::kGpParamRegisters); i += 2) {
4266 // Check that {params_start} does not overlap with any of the parameter
4267 // registers, so that we don't overwrite it by accident with the loads
4268 // below.
4269 DCHECK_NE(params_start, wasm::kGpParamRegisters[i]);
4270 DCHECK_NE(params_start, wasm::kGpParamRegisters[i + 1]);
4272 MemOperand(params_start, next_offset));
4273 next_offset += 2 * kSystemPointerSize;
4274 }
4275
4276 for (size_t i = 0; i < arraysize(wasm::kFpParamRegisters); i += 2) {
4278 MemOperand(params_start, next_offset));
4279 next_offset += 2 * kDoubleSize;
4280 }
4281 DCHECK_EQ(next_offset, stack_params_offset);
4282 }
4283
4284 {
4285 DEFINE_SCOPED(thread_in_wasm_flag_addr);
4286 __ Ldr(thread_in_wasm_flag_addr,
4289 DEFINE_SCOPED(scratch);
4290 __ Mov(scratch, 1);
4291 __ Str(scratch.W(), MemOperand(thread_in_wasm_flag_addr, 0));
4292 }
4293 __ Str(xzr,
4294 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset));
4295 {
4296 DEFINE_SCOPED(call_target);
4297 __ LoadWasmCodePointer(
4298 call_target,
4299 MemOperand(wrapper_buffer,
4300 JSToWasmWrapperFrameConstants::kWrapperBufferCallTarget));
4301 // We do the call without a signature check here, since the wrapper loaded
4302 // the signature from the same trusted object as the call target to set up
4303 // the stack layout. We could add a signature hash and pass it through to
4304 // verify it here, but an attacker that could corrupt the signature could
4305 // also corrupt that signature hash (which is outside of the sandbox).
4306 __ CallWasmCodePointerNoSignatureCheck(call_target);
4307 }
4308 regs.ResetExcept();
4309 // The wrapper_buffer has to be in x2 as the correct parameter register.
4310 regs.Reserve(kReturnRegister0, kReturnRegister1);
4311 ASSIGN_PINNED(wrapper_buffer, x2);
4312 {
4313 DEFINE_SCOPED(thread_in_wasm_flag_addr);
4314 __ Ldr(thread_in_wasm_flag_addr,
4317 __ Str(wzr, MemOperand(thread_in_wasm_flag_addr, 0));
4318 }
4319
4320 __ Ldr(wrapper_buffer,
4321 MemOperand(fp, JSToWasmWrapperFrameConstants::kWrapperBufferOffset));
4322
4324 MemOperand(
4325 wrapper_buffer,
4326 JSToWasmWrapperFrameConstants::kWrapperBufferFPReturnRegister1));
4328 MemOperand(
4329 wrapper_buffer,
4330 JSToWasmWrapperFrameConstants::kWrapperBufferFPReturnRegister2));
4332 MemOperand(
4333 wrapper_buffer,
4334 JSToWasmWrapperFrameConstants::kWrapperBufferGPReturnRegister1));
4336 MemOperand(
4337 wrapper_buffer,
4338 JSToWasmWrapperFrameConstants::kWrapperBufferGPReturnRegister2));
4339 // Call the return value builtin with
4340 // x0: wasm instance.
4341 // x1: the result JSArray for multi-return.
4342 // x2: pointer to the byte buffer which contains all parameters.
4343 if (stack_switch) {
4344 __ Ldr(x1, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset));
4345 __ Ldr(x0, MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
4346 } else {
4347 __ Ldr(x1, MemOperand(
4348 fp, JSToWasmWrapperFrameConstants::kResultArrayParamOffset));
4349 __ Ldr(x0,
4350 MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset));
4351 }
4352 Register scratch = x3;
4353 GetContextFromImplicitArg(masm, x0, scratch);
4354 __ CallBuiltin(Builtin::kJSToWasmHandleReturns);
4355
4356 Label return_promise;
4357 if (stack_switch) {
4358 SwitchBackAndReturnPromise(masm, regs, mode, &return_promise);
4359 }
4360 __ Bind(&suspend, BranchTargetIdentifier::kBtiJump);
4361
4362 __ LeaveFrame(stack_switch ? StackFrame::STACK_SWITCH
4363 : StackFrame::JS_TO_WASM);
4364 // Despite returning to the different location for regular and stack switching
4365 // versions, incoming argument count matches both cases:
4366 // instance and result array without suspend or
4367 // or promise resolve/reject params for callback.
4368 constexpr int64_t stack_arguments_in = 2;
4369 __ DropArguments(stack_arguments_in);
4370 __ Ret();
4371
4372 // Catch handler for the stack-switching wrapper: reject the promise with the
4373 // thrown exception.
4374 if (mode == wasm::kPromise) {
4375 GenerateExceptionHandlingLandingPad(masm, regs, &return_promise);
4376 }
4377}
4378} // namespace
4379
4380void Builtins::Generate_JSToWasmWrapperAsm(MacroAssembler* masm) {
4381 JSToWasmWrapperHelper(masm, wasm::kNoPromise);
4382}
4383
4384void Builtins::Generate_WasmReturnPromiseOnSuspendAsm(MacroAssembler* masm) {
4385 JSToWasmWrapperHelper(masm, wasm::kPromise);
4386}
4387
4388void Builtins::Generate_JSToWasmStressSwitchStacksAsm(MacroAssembler* masm) {
4389 JSToWasmWrapperHelper(masm, wasm::kStressSwitch);
4390}
4391
4392namespace {
4393void SwitchSimulatorStackLimit(MacroAssembler* masm) {
4394 if (masm->options().enable_simulator_code) {
4395 UseScratchRegisterScope temps(masm);
4396 temps.Exclude(x16);
4397 __ LoadStackLimit(x16, StackLimitKind::kRealStackLimit);
4399 }
4400}
4401
4402static constexpr Register kOldSPRegister = x23;
4403static constexpr Register kSwitchFlagRegister = x24;
4404
4405void SwitchToTheCentralStackIfNeeded(MacroAssembler* masm, Register argc_input,
4406 Register target_input,
4407 Register argv_input) {
4408 using ER = ExternalReference;
4409
4410 __ Mov(kSwitchFlagRegister, 0);
4411 __ Mov(kOldSPRegister, sp);
4412
4413 // Using x2-x4 as temporary registers, because they will be rewritten
4414 // before exiting to native code anyway.
4415
4416 ER on_central_stack_flag_loc = ER::Create(
4417 IsolateAddressId::kIsOnCentralStackFlagAddress, masm->isolate());
4418 const Register& on_central_stack_flag = x2;
4419 __ Mov(on_central_stack_flag, on_central_stack_flag_loc);
4420 __ Ldrb(on_central_stack_flag, MemOperand(on_central_stack_flag));
4421
4422 Label do_not_need_to_switch;
4423 __ Cbnz(on_central_stack_flag, &do_not_need_to_switch);
4424 // Switch to central stack.
4425
4426 static constexpr Register central_stack_sp = x4;
4427 DCHECK(!AreAliased(central_stack_sp, argc_input, argv_input, target_input));
4428 {
4429 __ Push(argc_input, target_input, argv_input, padreg);
4430 __ Mov(kCArgRegs[0], ER::isolate_address());
4431 __ Mov(kCArgRegs[1], kOldSPRegister);
4432 __ CallCFunction(ER::wasm_switch_to_the_central_stack(), 2,
4434 __ Mov(central_stack_sp, kReturnRegister0);
4435 __ Pop(padreg, argv_input, target_input, argc_input);
4436 }
4437 {
4438 // Update the sp saved in the frame.
4439 // It will be used to calculate the callee pc during GC.
4440 // The pc is going to be on the new stack segment, so rewrite it here.
4441 UseScratchRegisterScope temps{masm};
4442 Register new_sp_after_call = temps.AcquireX();
4443 __ Sub(new_sp_after_call, central_stack_sp, kSystemPointerSize);
4444 __ Str(new_sp_after_call, MemOperand(fp, ExitFrameConstants::kSPOffset));
4445 }
4446
4447 SwitchSimulatorStackLimit(masm);
4448
4449 static constexpr int kReturnAddressSlotOffset = 1 * kSystemPointerSize;
4450 static constexpr int kPadding = 1 * kSystemPointerSize;
4451 __ Sub(sp, central_stack_sp, kReturnAddressSlotOffset + kPadding);
4452 __ Mov(kSwitchFlagRegister, 1);
4453
4454 __ bind(&do_not_need_to_switch);
4455}
4456
4457void SwitchFromTheCentralStackIfNeeded(MacroAssembler* masm) {
4458 using ER = ExternalReference;
4459
4460 Label no_stack_change;
4461 __ Cbz(kSwitchFlagRegister, &no_stack_change);
4462
4463 {
4465 __ Mov(kCArgRegs[0], ER::isolate_address());
4466 __ CallCFunction(ER::wasm_switch_from_the_central_stack(), 1,
4469 }
4470
4471 SwitchSimulatorStackLimit(masm);
4472
4473 __ Mov(sp, kOldSPRegister);
4474
4475 __ bind(&no_stack_change);
4476}
4477
4478} // namespace
4479
4480#endif // V8_ENABLE_WEBASSEMBLY
4481
4482void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
4483 ArgvMode argv_mode, bool builtin_exit_frame,
4484 bool switch_to_central_stack) {
4485 ASM_LOCATION("CEntry::Generate entry");
4486
4487 using ER = ExternalReference;
4488
4489 // Register parameters:
4490 // x0: argc (including receiver, untagged)
4491 // x1: target
4492 // If argv_mode == ArgvMode::kRegister:
4493 // x11: argv (pointer to first argument)
4494 //
4495 // The stack on entry holds the arguments and the receiver, with the receiver
4496 // at the highest address:
4497 //
4498 // sp[argc-1]: receiver
4499 // sp[argc-2]: arg[argc-2]
4500 // ... ...
4501 // sp[1]: arg[1]
4502 // sp[0]: arg[0]
4503 //
4504 // The arguments are in reverse order, so that arg[argc-2] is actually the
4505 // first argument to the target function and arg[0] is the last.
4506 static constexpr Register argc_input = x0;
4507 static constexpr Register target_input = x1;
4508 // Initialized below if ArgvMode::kStack.
4509 static constexpr Register argv_input = x11;
4510
4511 if (argv_mode == ArgvMode::kStack) {
4512 // Derive argv from the stack pointer so that it points to the first
4513 // argument.
4514 __ SlotAddress(argv_input, argc_input);
4515 __ Sub(argv_input, argv_input, kReceiverOnStackSize);
4516 }
4517
4518 // If ArgvMode::kStack, argc is reused below and must be retained across the
4519 // call in a callee-saved register.
4520 static constexpr Register argc = x22;
4521
4522 // Enter the exit frame.
4523 const int kNoExtraSpace = 0;
4524 FrameScope scope(masm, StackFrame::MANUAL);
4525 __ EnterExitFrame(
4526 x10, kNoExtraSpace,
4527 builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
4528
4529 if (argv_mode == ArgvMode::kStack) {
4530 __ Mov(argc, argc_input);
4531 }
4532
4533#if V8_ENABLE_WEBASSEMBLY
4534 if (switch_to_central_stack) {
4535 SwitchToTheCentralStackIfNeeded(masm, argc_input, target_input, argv_input);
4536 }
4537#endif // V8_ENABLE_WEBASSEMBLY
4538
4539 // x21 : argv
4540 // x22 : argc
4541 // x23 : call target
4542 //
4543 // The stack (on entry) holds the arguments and the receiver, with the
4544 // receiver at the highest address:
4545 //
4546 // argv[8]: receiver
4547 // argv -> argv[0]: arg[argc-2]
4548 // ... ...
4549 // argv[...]: arg[1]
4550 // argv[...]: arg[0]
4551 //
4552 // Immediately below (after) this is the exit frame, as constructed by
4553 // EnterExitFrame:
4554 // fp[8]: CallerPC (lr)
4555 // fp -> fp[0]: CallerFP (old fp)
4556 // fp[-8]: Space reserved for SPOffset.
4557 // fp[-16]: CodeObject()
4558 // sp[...]: Saved doubles, if saved_doubles is true.
4559 // sp[16]: Alignment padding, if necessary.
4560 // sp[8]: Preserved x22 (used for argc).
4561 // sp -> sp[0]: Space reserved for the return address.
4562
4563 // TODO(jgruber): Swap these registers in the calling convention instead.
4564 static_assert(target_input == x1);
4565 static_assert(argv_input == x11);
4566 __ Swap(target_input, argv_input);
4567 static constexpr Register target = x11;
4568 static constexpr Register argv = x1;
4569 static_assert(!AreAliased(argc_input, argc, target, argv));
4570
4571 // Prepare AAPCS64 arguments to pass to the builtin.
4572 static_assert(argc_input == x0); // Already in the right spot.
4573 static_assert(argv == x1); // Already in the right spot.
4574 __ Mov(x2, ER::isolate_address());
4575
4576 __ StoreReturnAddressAndCall(target);
4577
4578 // Result returned in x0 or x1:x0 - do not destroy these registers!
4579
4580 // x0 result0 The return code from the call.
4581 // x1 result1 For calls which return ObjectPair.
4582 // x22 argc .. only if ArgvMode::kStack.
4583 const Register& result = x0;
4584
4585 // Check result for exception sentinel.
4586 Label exception_returned;
4587 // The returned value may be a trusted object, living outside of the main
4588 // pointer compression cage, so we need to use full pointer comparison here.
4589 __ CompareRoot(result, RootIndex::kException, ComparisonMode::kFullPointer);
4590 __ B(eq, &exception_returned);
4591
4592#if V8_ENABLE_WEBASSEMBLY
4593 if (switch_to_central_stack) {
4594 SwitchFromTheCentralStackIfNeeded(masm);
4595 }
4596#endif // V8_ENABLE_WEBASSEMBLY
4597
4598 // The call succeeded, so unwind the stack and return.
4599 if (argv_mode == ArgvMode::kStack) {
4600 __ Mov(x11, argc); // x11 used as scratch, just til DropArguments below.
4601 __ LeaveExitFrame(x10, x9);
4602 __ DropArguments(x11);
4603 } else {
4604 __ LeaveExitFrame(x10, x9);
4605 }
4606
4607 __ AssertFPCRState();
4608 __ Ret();
4609
4610 // Handling of exception.
4611 __ Bind(&exception_returned);
4612
4613 // Ask the runtime for help to determine the handler. This will set x0 to
4614 // contain the current exception, don't clobber it.
4615 {
4616 FrameScope scope(masm, StackFrame::MANUAL);
4617 __ Mov(x0, 0); // argc.
4618 __ Mov(x1, 0); // argv.
4619 __ Mov(x2, ER::isolate_address());
4620 __ CallCFunction(ER::Create(Runtime::kUnwindAndFindExceptionHandler), 3,
4622 }
4623
4624 // Retrieve the handler context, SP and FP.
4625 __ Mov(cp, ER::Create(IsolateAddressId::kPendingHandlerContextAddress,
4626 masm->isolate()));
4627 __ Ldr(cp, MemOperand(cp));
4628 {
4629 UseScratchRegisterScope temps(masm);
4630 Register scratch = temps.AcquireX();
4631 __ Mov(scratch, ER::Create(IsolateAddressId::kPendingHandlerSPAddress,
4632 masm->isolate()));
4633 __ Ldr(scratch, MemOperand(scratch));
4634 __ Mov(sp, scratch);
4635 }
4636 __ Mov(fp, ER::Create(IsolateAddressId::kPendingHandlerFPAddress,
4637 masm->isolate()));
4638 __ Ldr(fp, MemOperand(fp));
4639
4640 // If the handler is a JS frame, restore the context to the frame. Note that
4641 // the context will be set to (cp == 0) for non-JS frames.
4642 Label not_js_frame;
4643 __ Cbz(cp, &not_js_frame);
4645 __ Bind(&not_js_frame);
4646
4647 {
4648 // Clear c_entry_fp, like we do in `LeaveExitFrame`.
4649 UseScratchRegisterScope temps(masm);
4650 Register scratch = temps.AcquireX();
4651 __ Mov(scratch,
4652 ER::Create(IsolateAddressId::kCEntryFPAddress, masm->isolate()));
4653 __ Str(xzr, MemOperand(scratch));
4654 }
4655
4656 // Compute the handler entry address and jump to it. We use x17 here for the
4657 // jump target, as this jump can occasionally end up at the start of
4658 // InterpreterEnterAtBytecode, which when CFI is enabled starts with
4659 // a "BTI c".
4660 UseScratchRegisterScope temps(masm);
4661 temps.Exclude(x17);
4662 __ Mov(x17, ER::Create(IsolateAddressId::kPendingHandlerEntrypointAddress,
4663 masm->isolate()));
4664 __ Ldr(x17, MemOperand(x17));
4665 __ Br(x17);
4666}
4667
4668#if V8_ENABLE_WEBASSEMBLY
4669void Builtins::Generate_WasmHandleStackOverflow(MacroAssembler* masm) {
4670 using ER = ExternalReference;
4671 Register frame_base = WasmHandleStackOverflowDescriptor::FrameBaseRegister();
4673 {
4674 DCHECK_NE(kCArgRegs[1], frame_base);
4675 DCHECK_NE(kCArgRegs[3], frame_base);
4676 __ Mov(kCArgRegs[3], gap);
4677 __ Mov(kCArgRegs[1], sp);
4678 __ Sub(kCArgRegs[2], frame_base, kCArgRegs[1]);
4679 __ Mov(kCArgRegs[4], fp);
4680 FrameScope scope(masm, StackFrame::INTERNAL);
4681 __ Push(kCArgRegs[3], padreg);
4682 __ Mov(kCArgRegs[0], ER::isolate_address());
4683 __ CallCFunction(ER::wasm_grow_stack(), 5);
4684 __ Pop(padreg, gap);
4686 }
4687 Label call_runtime;
4688 // wasm_grow_stack returns zero if it cannot grow a stack.
4689 __ Cbz(kReturnRegister0, &call_runtime);
4690 {
4691 UseScratchRegisterScope temps(masm);
4692 Register new_fp = temps.AcquireX();
4693 // Calculate old FP - SP offset to adjust FP accordingly to new SP.
4694 __ Mov(new_fp, sp);
4695 __ Sub(new_fp, fp, new_fp);
4696 __ Add(new_fp, kReturnRegister0, new_fp);
4697 __ Mov(fp, new_fp);
4698 }
4699 SwitchSimulatorStackLimit(masm);
4700 __ Mov(sp, kReturnRegister0);
4701 {
4702 UseScratchRegisterScope temps(masm);
4703 Register scratch = temps.AcquireX();
4704 __ Mov(scratch, StackFrame::TypeToMarker(StackFrame::WASM_SEGMENT_START));
4706 }
4707 __ Ret();
4708
4709 __ bind(&call_runtime);
4710 // If wasm_grow_stack returns zero interruption or stack overflow
4711 // should be handled by runtime call.
4712 {
4714 MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset));
4715 __ LoadTaggedField(
4717 WasmTrustedInstanceData::kNativeContextOffset));
4718 FrameScope scope(masm, StackFrame::MANUAL);
4719 __ EnterFrame(StackFrame::INTERNAL);
4720 __ SmiTag(gap);
4721 __ PushArgument(gap);
4722 __ CallRuntime(Runtime::kWasmStackGuard);
4723 __ LeaveFrame(StackFrame::INTERNAL);
4724 __ Ret();
4725 }
4726}
4727#endif // V8_ENABLE_WEBASSEMBLY
4728
4729void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
4730 Label done;
4731 Register result = x7;
4732
4733 DCHECK(result.Is64Bits());
4734
4735 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
4736 UseScratchRegisterScope temps(masm);
4737 Register scratch1 = temps.AcquireX();
4738 Register scratch2 = temps.AcquireX();
4739 DoubleRegister double_scratch = temps.AcquireD();
4740
4741 // Account for saved regs.
4742 const int kArgumentOffset = 2 * kSystemPointerSize;
4743
4744 __ Push(result, scratch1); // scratch1 is also pushed to preserve alignment.
4745 __ Peek(double_scratch, kArgumentOffset);
4746
4747 // Try to convert with a FPU convert instruction. This handles all
4748 // non-saturating cases.
4749 __ TryConvertDoubleToInt64(result, double_scratch, &done);
4750 __ Fmov(result, double_scratch);
4751
4752 // If we reach here we need to manually convert the input to an int32.
4753
4754 // Extract the exponent.
4755 Register exponent = scratch1;
4756 __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
4758
4759 // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
4760 // the mantissa gets shifted completely out of the int32_t result.
4762 __ CzeroX(result, ge);
4763 __ B(ge, &done);
4764
4765 // The Fcvtzs sequence handles all cases except where the conversion causes
4766 // signed overflow in the int64_t target. Since we've already handled
4767 // exponents >= 84, we can guarantee that 63 <= exponent < 84.
4768
4769 if (v8_flags.debug_code) {
4770 __ Cmp(exponent, HeapNumber::kExponentBias + 63);
4771 // Exponents less than this should have been handled by the Fcvt case.
4772 __ Check(ge, AbortReason::kUnexpectedValue);
4773 }
4774
4775 // Isolate the mantissa bits, and set the implicit '1'.
4776 Register mantissa = scratch2;
4777 __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
4778 __ Orr(mantissa, mantissa, 1ULL << HeapNumber::kMantissaBits);
4779
4780 // Negate the mantissa if necessary.
4781 __ Tst(result, kXSignMask);
4782 __ Cneg(mantissa, mantissa, ne);
4783
4784 // Shift the mantissa bits in the correct place. We know that we have to shift
4785 // it left here, because exponent >= 63 >= kMantissaBits.
4786 __ Sub(exponent, exponent,
4788 __ Lsl(result, mantissa, exponent);
4789
4790 __ Bind(&done);
4791 __ Poke(result, kArgumentOffset);
4792 __ Pop(scratch1, result);
4793 __ Ret();
4794}
4795
4796void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm,
4797 CallApiCallbackMode mode) {
4798 // ----------- S t a t e -------------
4799 // CallApiCallbackMode::kOptimizedNoProfiling/kOptimized modes:
4800 // -- x1 : api function address
4801 // Both modes:
4802 // -- x2 : arguments count (not including the receiver)
4803 // -- x3 : FunctionTemplateInfo
4804 // -- cp : context
4805 // -- sp[0] : receiver
4806 // -- sp[8] : first argument
4807 // -- ...
4808 // -- sp[(argc) * 8] : last argument
4809 // -----------------------------------
4810
4811 Register function_callback_info_arg = kCArgRegs[0];
4812
4813 Register api_function_address = no_reg;
4814 Register argc = no_reg;
4815 Register func_templ = no_reg;
4816 Register topmost_script_having_context = no_reg;
4817 Register scratch = x4;
4818
4819 switch (mode) {
4821 argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister();
4822 topmost_script_having_context = CallApiCallbackGenericDescriptor::
4824 func_templ =
4826 break;
4827
4830 // Caller context is always equal to current context because we don't
4831 // inline Api calls cross-context.
4832 topmost_script_having_context = kContextRegister;
4833 api_function_address =
4834 CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister();
4836 func_templ =
4838 break;
4839 }
4840 DCHECK(!AreAliased(api_function_address, topmost_script_having_context, argc,
4841 func_templ, scratch));
4842
4843 using FCA = FunctionCallbackArguments;
4844 using ER = ExternalReference;
4845 using FC = ApiCallbackExitFrameConstants;
4846
4847 static_assert(FCA::kArgsLength == 6);
4848 static_assert(FCA::kNewTargetIndex == 5);
4849 static_assert(FCA::kTargetIndex == 4);
4850 static_assert(FCA::kReturnValueIndex == 3);
4851 static_assert(FCA::kContextIndex == 2);
4852 static_assert(FCA::kIsolateIndex == 1);
4853 static_assert(FCA::kUnusedIndex == 0);
4854
4855 // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
4856 // Target state:
4857 // sp[0 * kSystemPointerSize]: kUnused <= FCA::implicit_args_
4858 // sp[1 * kSystemPointerSize]: kIsolate
4859 // sp[2 * kSystemPointerSize]: kContext
4860 // sp[3 * kSystemPointerSize]: undefined (kReturnValue)
4861 // sp[4 * kSystemPointerSize]: kTarget
4862 // sp[5 * kSystemPointerSize]: undefined (kNewTarget)
4863 // Existing state:
4864 // sp[6 * kSystemPointerSize]: <= FCA:::values_
4865
4866 __ StoreRootRelative(IsolateData::topmost_script_having_context_offset(),
4867 topmost_script_having_context);
4868
4869 if (mode == CallApiCallbackMode::kGeneric) {
4870 api_function_address = ReassignRegister(topmost_script_having_context);
4871 }
4872
4873 // Reserve space on the stack.
4874 static constexpr int kStackSize = FCA::kArgsLength;
4875 static_assert(kStackSize % 2 == 0);
4876 __ Claim(kStackSize, kSystemPointerSize);
4877
4878 // kIsolate.
4879 __ Mov(scratch, ER::isolate_address());
4880 __ Str(scratch, MemOperand(sp, FCA::kIsolateIndex * kSystemPointerSize));
4881
4882 // kContext.
4883 __ Str(cp, MemOperand(sp, FCA::kContextIndex * kSystemPointerSize));
4884
4885 // kReturnValue.
4886 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
4887 __ Str(scratch, MemOperand(sp, FCA::kReturnValueIndex * kSystemPointerSize));
4888
4889 // kTarget.
4890 __ Str(func_templ, MemOperand(sp, FCA::kTargetIndex * kSystemPointerSize));
4891
4892 // kNewTarget.
4893 __ Str(scratch, MemOperand(sp, FCA::kNewTargetIndex * kSystemPointerSize));
4894
4895 // kUnused.
4896 __ Str(scratch, MemOperand(sp, FCA::kUnusedIndex * kSystemPointerSize));
4897
4898 FrameScope frame_scope(masm, StackFrame::MANUAL);
4899 if (mode == CallApiCallbackMode::kGeneric) {
4900 __ LoadExternalPointerField(
4901 api_function_address,
4902 FieldMemOperand(func_templ,
4903 FunctionTemplateInfo::kMaybeRedirectedCallbackOffset),
4905 }
4906
4907 __ EnterExitFrame(scratch, FC::getExtraSlotsCountFrom<ExitFrameConstants>(),
4908 StackFrame::API_CALLBACK_EXIT);
4909
4910 // This is a workaround for performance regression observed on Apple Silicon
4911 // (https://crbug.com/347741609): reading argc value after the call via
4912 // MemOperand argc_operand = MemOperand(fp, FC::kFCIArgcOffset);
4913 // is noticeably slower than using sp-based access:
4914 MemOperand argc_operand = ExitFrameStackSlotOperand(FCA::kLengthOffset);
4915 if (v8_flags.debug_code) {
4916 // Ensure sp-based calculation of FC::length_'s address matches the
4917 // fp-based one.
4918 Label ok;
4919 // +kSystemPointerSize is for the slot at [sp] which is reserved in all
4920 // ExitFrames for storing the return PC.
4921 __ Add(scratch, sp,
4922 FCA::kLengthOffset + kSystemPointerSize - FC::kFCIArgcOffset);
4923 __ cmp(scratch, fp);
4924 __ B(eq, &ok);
4925 __ DebugBreak();
4926 __ Bind(&ok);
4927 }
4928 {
4929 ASM_CODE_COMMENT_STRING(masm, "Initialize v8::FunctionCallbackInfo");
4930 // FunctionCallbackInfo::length_.
4931 // TODO(ishell): pass JSParameterCount(argc) to simplify things on the
4932 // caller end.
4933 __ Str(argc, argc_operand);
4934
4935 // FunctionCallbackInfo::implicit_args_.
4936 __ Add(scratch, fp, Operand(FC::kImplicitArgsArrayOffset));
4937 __ Str(scratch, MemOperand(fp, FC::kFCIImplicitArgsOffset));
4938
4939 // FunctionCallbackInfo::values_ (points at JS arguments on the stack).
4940 __ Add(scratch, fp, Operand(FC::kFirstArgumentOffset));
4941 __ Str(scratch, MemOperand(fp, FC::kFCIValuesOffset));
4942 }
4943
4944 __ RecordComment("v8::FunctionCallback's argument.");
4945 // function_callback_info_arg = v8::FunctionCallbackInfo&
4946 __ Add(function_callback_info_arg, fp,
4947 Operand(FC::kFunctionCallbackInfoOffset));
4948
4949 DCHECK(!AreAliased(api_function_address, function_callback_info_arg));
4950
4951 ExternalReference thunk_ref = ER::invoke_function_callback(mode);
4952 Register no_thunk_arg = no_reg;
4953
4954 MemOperand return_value_operand = MemOperand(fp, FC::kReturnValueOffset);
4955 static constexpr int kSlotsToDropOnReturn =
4956 FC::kFunctionCallbackInfoArgsLength + kJSArgcReceiverSlots;
4957
4958 const bool with_profiling =
4960 CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
4961 thunk_ref, no_thunk_arg, kSlotsToDropOnReturn,
4962 &argc_operand, return_value_operand);
4963}
4964
4965void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
4966 // ----------- S t a t e -------------
4967 // -- cp : context
4968 // -- x1 : receiver
4969 // -- x3 : accessor info
4970 // -- x0 : holder
4971 // -----------------------------------
4972
4973 Register name_arg = kCArgRegs[0];
4974 Register property_callback_info_arg = kCArgRegs[1];
4975
4976 Register api_function_address = x2;
4977 Register receiver = ApiGetterDescriptor::ReceiverRegister();
4980 Register scratch = x4;
4981 Register undef = x5;
4982 Register scratch2 = x6;
4983
4984 DCHECK(!AreAliased(receiver, holder, callback, scratch, undef, scratch2));
4985
4986 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
4987 // name below the exit frame to make GC aware of them.
4988 using PCA = PropertyCallbackArguments;
4989 using ER = ExternalReference;
4990 using FC = ApiAccessorExitFrameConstants;
4991
4992 static_assert(PCA::kPropertyKeyIndex == 0);
4993 static_assert(PCA::kShouldThrowOnErrorIndex == 1);
4994 static_assert(PCA::kHolderIndex == 2);
4995 static_assert(PCA::kIsolateIndex == 3);
4996 static_assert(PCA::kHolderV2Index == 4);
4997 static_assert(PCA::kReturnValueIndex == 5);
4998 static_assert(PCA::kDataIndex == 6);
4999 static_assert(PCA::kThisIndex == 7);
5000 static_assert(PCA::kArgsLength == 8);
5001
5002 // Set up v8::PropertyCallbackInfo's (PCI) args_ on the stack as follows:
5003 // Target state:
5004 // sp[0 * kSystemPointerSize]: name <= PCI::args_
5005 // sp[1 * kSystemPointerSize]: kShouldThrowOnErrorIndex
5006 // sp[2 * kSystemPointerSize]: kHolderIndex
5007 // sp[3 * kSystemPointerSize]: kIsolateIndex
5008 // sp[4 * kSystemPointerSize]: kHolderV2Index
5009 // sp[5 * kSystemPointerSize]: kReturnValueIndex
5010 // sp[6 * kSystemPointerSize]: kDataIndex
5011 // sp[7 * kSystemPointerSize]: kThisIndex / receiver
5012
5013 __ LoadTaggedField(scratch,
5014 FieldMemOperand(callback, AccessorInfo::kDataOffset));
5015 __ LoadRoot(undef, RootIndex::kUndefinedValue);
5016 __ Mov(scratch2, ER::isolate_address());
5017 Register holderV2 = xzr;
5018 __ Push(receiver, scratch, // kThisIndex, kDataIndex
5019 undef, holderV2, // kReturnValueIndex, kHolderV2Index
5020 scratch2, holder); // kIsolateIndex, kHolderIndex
5021
5022 // |name_arg| clashes with |holder|, so we need to push holder first.
5023 __ LoadTaggedField(name_arg,
5024 FieldMemOperand(callback, AccessorInfo::kNameOffset));
5025 static_assert(kDontThrow == 0);
5026 Register should_throw_on_error = xzr; // should_throw_on_error -> kDontThrow
5027 __ Push(should_throw_on_error, name_arg);
5028
5029 __ RecordComment("Load api_function_address");
5030 __ LoadExternalPointerField(
5031 api_function_address,
5032 FieldMemOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset),
5034
5035 FrameScope frame_scope(masm, StackFrame::MANUAL);
5036 __ EnterExitFrame(scratch, FC::getExtraSlotsCountFrom<ExitFrameConstants>(),
5037 StackFrame::API_ACCESSOR_EXIT);
5038
5039 __ RecordComment("Create v8::PropertyCallbackInfo object on the stack.");
5040 // property_callback_info_arg = v8::PropertyCallbackInfo&
5041 __ Add(property_callback_info_arg, fp, Operand(FC::kArgsArrayOffset));
5042
5043 DCHECK(!AreAliased(api_function_address, property_callback_info_arg, name_arg,
5044 callback, scratch, scratch2));
5045
5046#ifdef V8_ENABLE_DIRECT_HANDLE
5047 // name_arg = Local<Name>(name), name value was pushed to GC-ed stack space.
5048 // |name_arg| is already initialized above.
5049#else
5050 // name_arg = Local<Name>(&name), which is &args_array[kPropertyKeyIndex].
5051 static_assert(PCA::kPropertyKeyIndex == 0);
5052 __ mov(name_arg, property_callback_info_arg);
5053#endif
5054
5055 ExternalReference thunk_ref = ER::invoke_accessor_getter_callback();
5056 // Pass AccessorInfo to thunk wrapper in case profiler or side-effect
5057 // checking is enabled.
5058 Register thunk_arg = callback;
5059
5060 MemOperand return_value_operand = MemOperand(fp, FC::kReturnValueOffset);
5061 static constexpr int kSlotsToDropOnReturn =
5062 FC::kPropertyCallbackInfoArgsLength;
5063 MemOperand* const kUseStackSpaceConstant = nullptr;
5064
5065 const bool with_profiling = true;
5066 CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
5067 thunk_ref, thunk_arg, kSlotsToDropOnReturn,
5068 kUseStackSpaceConstant, return_value_operand);
5069}
5070
5071void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
5072 // The sole purpose of DirectCEntry is for movable callers (e.g. any general
5073 // purpose InstructionStream object) to be able to call into C functions that
5074 // may trigger GC and thus move the caller.
5075 //
5076 // DirectCEntry places the return address on the stack (updated by the GC),
5077 // making the call GC safe. The irregexp backend relies on this.
5078
5079 __ Poke<MacroAssembler::kSignLR>(lr, 0); // Store the return address.
5080 __ Blr(x10); // Call the C++ function.
5081 __ Peek<MacroAssembler::kAuthLR>(lr, 0); // Return to calling code.
5082 __ AssertFPCRState();
5083 __ Ret();
5084}
5085
5086namespace {
5087
5088template <typename RegisterT>
5089void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
5090 int dst_offset, const CPURegList& reg_list,
5091 const RegisterT& temp0, const RegisterT& temp1,
5092 int src_offset = 0) {
5093 ASM_CODE_COMMENT(masm);
5094 DCHECK_EQ(reg_list.Count() % 2, 0);
5095 UseScratchRegisterScope temps(masm);
5096 CPURegList copy_to_input = reg_list;
5097 int reg_size = reg_list.RegisterSizeInBytes();
5098 DCHECK_EQ(temp0.SizeInBytes(), reg_size);
5099 DCHECK_EQ(temp1.SizeInBytes(), reg_size);
5100
5101 // Compute some temporary addresses to avoid having the macro assembler set
5102 // up a temp with an offset for accesses out of the range of the addressing
5103 // mode.
5104 Register src = temps.AcquireX();
5105 masm->Add(src, sp, src_offset);
5106 masm->Add(dst, dst, dst_offset);
5107
5108 // Write reg_list into the frame pointed to by dst.
5109 for (int i = 0; i < reg_list.Count(); i += 2) {
5110 masm->Ldp(temp0, temp1, MemOperand(src, i * reg_size));
5111
5112 CPURegister reg0 = copy_to_input.PopLowestIndex();
5113 CPURegister reg1 = copy_to_input.PopLowestIndex();
5114 int offset0 = reg0.code() * reg_size;
5115 int offset1 = reg1.code() * reg_size;
5116
5117 // Pair up adjacent stores, otherwise write them separately.
5118 if (offset1 == offset0 + reg_size) {
5119 masm->Stp(temp0, temp1, MemOperand(dst, offset0));
5120 } else {
5121 masm->Str(temp0, MemOperand(dst, offset0));
5122 masm->Str(temp1, MemOperand(dst, offset1));
5123 }
5124 }
5125 masm->Sub(dst, dst, dst_offset);
5126}
5127
5128void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
5129 const Register& src_base, int src_offset) {
5130 ASM_CODE_COMMENT(masm);
5131 DCHECK_EQ(reg_list.Count() % 2, 0);
5132 UseScratchRegisterScope temps(masm);
5133 CPURegList restore_list = reg_list;
5134 int reg_size = restore_list.RegisterSizeInBytes();
5135
5136 // Compute a temporary addresses to avoid having the macro assembler set
5137 // up a temp with an offset for accesses out of the range of the addressing
5138 // mode.
5139 Register src = temps.AcquireX();
5140 masm->Add(src, src_base, src_offset);
5141
5142 // No need to restore padreg.
5143 restore_list.Remove(padreg);
5144
5145 // Restore every register in restore_list from src.
5146 while (!restore_list.IsEmpty()) {
5147 CPURegister reg0 = restore_list.PopLowestIndex();
5148 CPURegister reg1 = restore_list.PopLowestIndex();
5149 int offset0 = reg0.code() * reg_size;
5150
5151 if (reg1 == NoCPUReg) {
5152 masm->Ldr(reg0, MemOperand(src, offset0));
5153 break;
5154 }
5155
5156 int offset1 = reg1.code() * reg_size;
5157
5158 // Pair up adjacent loads, otherwise read them separately.
5159 if (offset1 == offset0 + reg_size) {
5160 masm->Ldp(reg0, reg1, MemOperand(src, offset0));
5161 } else {
5162 masm->Ldr(reg0, MemOperand(src, offset0));
5163 masm->Ldr(reg1, MemOperand(src, offset1));
5164 }
5165 }
5166}
5167
5168void Generate_DeoptimizationEntry(MacroAssembler* masm,
5169 DeoptimizeKind deopt_kind) {
5170 Isolate* isolate = masm->isolate();
5171
5172 // TODO(all): This code needs to be revisited. We probably only need to save
5173 // caller-saved registers here. Callee-saved registers can be stored directly
5174 // in the input frame.
5175
5176 // Save all allocatable simd128 / double registers.
5177 CPURegList saved_simd128_registers(
5180 RegisterConfiguration::Default()->allocatable_simd128_codes_mask()));
5181 DCHECK_EQ(saved_simd128_registers.Count() % 2, 0);
5182 __ PushCPURegList(saved_simd128_registers);
5183
5184 // We save all the registers except sp, lr, platform register (x18) and the
5185 // masm scratches.
5186 CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 28);
5187 saved_registers.Remove(ip0);
5188 saved_registers.Remove(ip1);
5189 saved_registers.Remove(x18);
5190 saved_registers.Combine(fp);
5191 saved_registers.Align();
5192 DCHECK_EQ(saved_registers.Count() % 2, 0);
5193 __ PushCPURegList(saved_registers);
5194
5195 __ Mov(x3, Operand(ExternalReference::Create(
5196 IsolateAddressId::kCEntryFPAddress, isolate)));
5197 __ Str(fp, MemOperand(x3));
5198
5199 const int kSavedRegistersAreaSize =
5200 (saved_registers.Count() * kXRegSize) +
5201 (saved_simd128_registers.Count() * kQRegSize);
5202
5203 // Floating point registers are saved on the stack above core registers.
5204 const int kSimd128RegistersOffset = saved_registers.Count() * kXRegSize;
5205
5206 Register code_object = x2;
5207 Register fp_to_sp = x3;
5208 // Get the address of the location in the code object. This is the return
5209 // address for lazy deoptimization.
5210 __ Mov(code_object, lr);
5211 // Compute the fp-to-sp delta.
5212 __ Add(fp_to_sp, sp, kSavedRegistersAreaSize);
5213 __ Sub(fp_to_sp, fp, fp_to_sp);
5214
5215 // Allocate a new deoptimizer object.
5217
5218 // Ensure we can safely load from below fp.
5219 DCHECK_GT(kSavedRegistersAreaSize, -StandardFrameConstants::kFunctionOffset);
5221
5222 // If x1 is a smi, zero x0.
5223 __ Tst(x1, kSmiTagMask);
5224 __ CzeroX(x0, eq);
5225
5226 __ Mov(x1, static_cast<int>(deopt_kind));
5227 // Following arguments are already loaded:
5228 // - x2: code object address
5229 // - x3: fp-to-sp delta
5231
5232 {
5233 // Call Deoptimizer::New().
5234 AllowExternalCallThatCantCauseGC scope(masm);
5235 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
5236 }
5237
5238 // Preserve "deoptimizer" object in register x0.
5239 Register deoptimizer = x0;
5240
5241 // Get the input frame descriptor pointer.
5242 __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
5243
5244 // Copy core registers into the input frame.
5245 CopyRegListToFrame(masm, x1, FrameDescription::registers_offset(),
5246 saved_registers, x2, x3);
5247
5248 // Copy simd128 / double registers to the input frame.
5249 CopyRegListToFrame(masm, x1, FrameDescription::simd128_registers_offset(),
5250 saved_simd128_registers, q2, q3, kSimd128RegistersOffset);
5251
5252 // Mark the stack as not iterable for the CPU profiler which won't be able to
5253 // walk the stack without the return address.
5254 {
5255 UseScratchRegisterScope temps(masm);
5256 Register is_iterable = temps.AcquireX();
5257 __ LoadIsolateField(is_iterable, IsolateFieldId::kStackIsIterable);
5258 __ strb(xzr, MemOperand(is_iterable));
5259 }
5260
5261 // Remove the saved registers from the stack.
5262 DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
5263 __ Drop(kSavedRegistersAreaSize / kXRegSize);
5264
5265 // Compute a pointer to the unwinding limit in register x2; that is
5266 // the first stack slot not part of the input frame.
5267 Register unwind_limit = x2;
5268 __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
5269
5270 // Unwind the stack down to - but not including - the unwinding
5271 // limit and copy the contents of the activation frame to the input
5272 // frame description.
5274 __ SlotAddress(x1, 0);
5275 __ Lsr(unwind_limit, unwind_limit, kSystemPointerSizeLog2);
5276 __ Mov(x5, unwind_limit);
5277 __ CopyDoubleWords(x3, x1, x5);
5278 // Since {unwind_limit} is the frame size up to the parameter count, we might
5279 // end up with an unaligned stack pointer. This is later recovered when
5280 // setting the stack pointer to {caller_frame_top_offset}.
5281 __ Bic(unwind_limit, unwind_limit, 1);
5282 __ Drop(unwind_limit);
5283
5284 // Compute the output frame in the deoptimizer.
5285 __ Push(padreg, x0); // Preserve deoptimizer object across call.
5286 {
5287 // Call Deoptimizer::ComputeOutputFrames().
5288 AllowExternalCallThatCantCauseGC scope(masm);
5289 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
5290 }
5291 __ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer).
5292
5293 {
5294 UseScratchRegisterScope temps(masm);
5295 Register scratch = temps.AcquireX();
5297 __ Mov(sp, scratch);
5298 }
5299
5300 // Replace the current (input) frame with the output frames.
5301 Label outer_push_loop, outer_loop_header;
5304 __ Add(x1, x0, Operand(x1, LSL, kSystemPointerSizeLog2));
5305 __ B(&outer_loop_header);
5306
5307 __ Bind(&outer_push_loop);
5308 Register current_frame = x2;
5309 Register frame_size = x3;
5310 __ Ldr(current_frame, MemOperand(x0, kSystemPointerSize, PostIndex));
5311 __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
5312 __ Lsr(frame_size, x3, kSystemPointerSizeLog2);
5313 __ Claim(frame_size, kXRegSize, /*assume_sp_aligned=*/false);
5314
5315 __ Add(x7, current_frame, FrameDescription::frame_content_offset());
5316 __ SlotAddress(x6, 0);
5317 __ CopyDoubleWords(x6, x7, frame_size);
5318
5319 __ Bind(&outer_loop_header);
5320 __ Cmp(x0, x1);
5321 __ B(lt, &outer_push_loop);
5322
5323 RestoreRegList(masm, saved_simd128_registers, current_frame,
5325
5326 {
5327 UseScratchRegisterScope temps(masm);
5328 Register is_iterable = temps.AcquireX();
5329 Register one = x4;
5330 __ LoadIsolateField(is_iterable, IsolateFieldId::kStackIsIterable);
5331 __ Mov(one, Operand(1));
5332 __ strb(one, MemOperand(is_iterable));
5333 }
5334
5335 // TODO(all): ARM copies a lot (if not all) of the last output frame onto the
5336 // stack, then pops it all into registers. Here, we try to load it directly
5337 // into the relevant registers. Is this correct? If so, we should improve the
5338 // ARM code.
5339
5340 // Restore registers from the last output frame.
5341 // Note that lr is not in the list of saved_registers and will be restored
5342 // later. We can use it to hold the address of last output frame while
5343 // reloading the other registers.
5344 DCHECK(!saved_registers.IncludesAliasOf(lr));
5345 Register last_output_frame = lr;
5346 __ Mov(last_output_frame, current_frame);
5347
5348 RestoreRegList(masm, saved_registers, last_output_frame,
5350
5351 UseScratchRegisterScope temps(masm);
5352 temps.Exclude(x17);
5353 Register continuation = x17;
5354 __ Ldr(continuation, MemOperand(last_output_frame,
5356 __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
5357#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
5358 __ Autibsp();
5359#endif
5360 // If the continuation is non-zero (JavaScript), branch to the continuation.
5361 // For Wasm just return to the pc from the last output frame in the lr
5362 // register.
5363 Label end;
5364 __ CompareAndBranch(continuation, 0, eq, &end);
5365 __ Br(continuation);
5366 __ Bind(&end);
5367 __ Ret();
5368}
5369
5370} // namespace
5371
5372void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
5373 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
5374}
5375
5376void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
5377 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
5378}
5379
5380// If there is baseline code on the shared function info, converts an
5381// interpreter frame into a baseline frame and continues execution in baseline
5382// code. Otherwise execution continues with bytecode.
5383void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
5384 MacroAssembler* masm) {
5385 Label start;
5386 __ bind(&start);
5387
5388 // Get function from the frame.
5389 Register closure = x1;
5391
5392 // Get the InstructionStream object from the shared function info.
5393 Register code_obj = x22;
5394 __ LoadTaggedField(
5395 code_obj,
5396 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
5397
5398 ResetSharedFunctionInfoAge(masm, code_obj);
5399
5400 __ LoadTrustedPointerField(
5401 code_obj,
5402 FieldMemOperand(code_obj, SharedFunctionInfo::kTrustedFunctionDataOffset),
5404
5405 // For OSR entry it is safe to assume we always have baseline code.
5406 if (v8_flags.debug_code) {
5407 __ IsObjectType(code_obj, x3, x3, CODE_TYPE);
5408 __ Assert(eq, AbortReason::kExpectedBaselineData);
5409 AssertCodeIsBaseline(masm, code_obj, x3);
5410 }
5411
5412 // Load the feedback cell and vector.
5413 Register feedback_cell = x2;
5414 Register feedback_vector = x15;
5415 __ LoadTaggedField(feedback_cell,
5416 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
5417 __ LoadTaggedField(
5418 feedback_vector,
5419 FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
5420
5421 Label install_baseline_code;
5422 // Check if feedback vector is valid. If not, call prepare for baseline to
5423 // allocate it.
5424 __ IsObjectType(feedback_vector, x3, x3, FEEDBACK_VECTOR_TYPE);
5425 __ B(ne, &install_baseline_code);
5426
5427 // Save BytecodeOffset from the stack frame.
5430 // Replace bytecode offset with feedback cell.
5433 __ Str(feedback_cell,
5435 feedback_cell = no_reg;
5436 // Update feedback vector cache.
5439 __ Str(feedback_vector,
5441 feedback_vector = no_reg;
5442
5443 // Compute baseline pc for bytecode offset.
5444 Register get_baseline_pc = x3;
5445 __ Mov(get_baseline_pc,
5446 ExternalReference::baseline_pc_for_next_executed_bytecode());
5447
5450
5451 // Get bytecode array from the stack frame.
5454 // Save the accumulator register, since it's clobbered by the below call.
5456 {
5457 __ Mov(kCArgRegs[0], code_obj);
5460 FrameScope scope(masm, StackFrame::INTERNAL);
5461 __ CallCFunction(get_baseline_pc, 3, 0);
5462 }
5463 __ LoadCodeInstructionStart(code_obj, code_obj, kJSEntrypointTag);
5464 __ Add(code_obj, code_obj, kReturnRegister0);
5466
5467 Generate_OSREntry(masm, code_obj);
5468 __ Trap(); // Unreachable.
5469
5470 __ bind(&install_baseline_code);
5471 {
5472 FrameScope scope(masm, StackFrame::INTERNAL);
5474 __ PushArgument(closure);
5475 __ CallRuntime(Runtime::kInstallBaselineCode, 1);
5477 }
5478 // Retry from the start after installing baseline code.
5479 __ B(&start);
5480}
5481
5482void Builtins::Generate_RestartFrameTrampoline(MacroAssembler* masm) {
5483 // Frame is being dropped:
5484 // - Look up current function on the frame.
5485 // - Leave the frame.
5486 // - Restart the frame by calling the function.
5487
5490
5491 __ LeaveFrame(StackFrame::INTERPRETED);
5492
5493 // The arguments are already in the stack (including any necessary padding),
5494 // we should not try to massage the arguments again.
5495#ifdef V8_ENABLE_LEAPTIERING
5496 __ InvokeFunction(x1, x0, InvokeType::kJump,
5498#else
5500 __ InvokeFunction(x1, x2, x0, InvokeType::kJump);
5501#endif
5502}
5503
5504#undef __
5505
5506} // namespace internal
5507} // namespace v8
5508
5509#endif // V8_TARGET_ARCH_ARM
#define one
#define Assert(condition)
const RegList initial_
RegList available_
#define JUMP_IF_EQUAL(NAME)
#define ASSIGN_REG(Name)
RegisterAllocator * allocator_
std::vector< Register * > allocated_registers_
#define ASSIGN_PINNED(Name, Reg)
#define DEFINE_PINNED(Name, Reg)
#define DEFINE_SCOPED(Name)
Register * reg_
#define FREE_REG(Name)
#define DEFINE_REG(Name)
interpreter::Bytecode bytecode
Definition builtins.cc:43
#define RETURN_BYTECODE_LIST(V)
Definition bytecodes.h:575
static void Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler *masm, InterpreterPushArgsMode mode)
static void Generate_CallOrConstructForwardVarargs(MacroAssembler *masm, CallOrConstructMode mode, Builtin target_builtin)
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
Definition builtins.cc:189
static void Generate_InterpreterEntryTrampoline(MacroAssembler *masm, InterpreterEntryTrampolineMode mode)
static void Generate_Adaptor(MacroAssembler *masm, int formal_parameter_count, Address builtin_address)
static void Generate_CEntry(MacroAssembler *masm, int result_size, ArgvMode argv_mode, bool builtin_exit_frame, bool switch_to_central_stack)
static constexpr Builtin CallFunction(ConvertReceiverMode=ConvertReceiverMode::kAny)
static constexpr Builtin AdaptorWithBuiltinExitFrame(int formal_parameter_count)
static void Generate_MaglevFunctionEntryStackCheck(MacroAssembler *masm, bool save_new_target)
static void Generate_Call(MacroAssembler *masm, ConvertReceiverMode mode)
static void Generate_CallFunction(MacroAssembler *masm, ConvertReceiverMode mode)
static void Generate_CallOrConstructVarargs(MacroAssembler *masm, Builtin target_builtin)
static void Generate_CallApiCallbackImpl(MacroAssembler *masm, CallApiCallbackMode mode)
static constexpr Builtin Call(ConvertReceiverMode=ConvertReceiverMode::kAny)
static void Generate_CallBoundFunctionImpl(MacroAssembler *masm)
static void Generate_ConstructForwardAllArgsImpl(MacroAssembler *masm, ForwardWhichFrame which_frame)
static void Generate_InterpreterPushArgsThenCallImpl(MacroAssembler *masm, ConvertReceiverMode receiver_mode, InterpreterPushArgsMode mode)
static constexpr BytecodeOffset None()
Definition utils.h:675
constexpr int ToInt() const
Definition utils.h:673
static DEFINE_PARAMETERS_VARARGS(kActualArgumentsCount, kTopmostScriptHavingContext, kFunctionTemplateInfo) DEFINE_PARAMETER_TYPES(MachineType constexpr Register TopmostScriptHavingContextRegister()
static DEFINE_PARAMETERS_VARARGS(kApiFunctionAddress, kActualArgumentsCount, kFunctionTemplateInfo) DEFINE_PARAMETER_TYPES(MachineType constexpr Register ActualArgumentsCountRegister()
static constexpr int kContextOrFrameTypeOffset
static constexpr int kFixedSlotCountAboveFp
static constexpr int kFixedFrameSizeAboveFp
static int caller_frame_top_offset()
static int output_count_offset()
static constexpr int kOffsetToCalleeSavedRegisters
static constexpr int kCalleeSavedRegisterBytesPushedAfterFpLrPair
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr int simd128_registers_offset()
static const int kMantissaBits
Definition heap-number.h:39
static const int kExponentBits
Definition heap-number.h:40
static const int kExponentBias
Definition heap-number.h:41
static constexpr int kHeaderSize
static constexpr int kMapOffset
static constexpr uint32_t thread_in_wasm_flag_address_offset()
Definition isolate.h:1338
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index)
constexpr void clear(RegisterT reg)
static constexpr RegListBase FromBits()
constexpr int8_t code() const
static const RegisterConfiguration * Default()
static constexpr Register from_code(int code)
static constexpr Register no_reg()
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr Tagged< Smi > zero()
Definition smi.h:99
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static constexpr int kFixedFrameSizeFromFp
static constexpr int kFrameTypeOffset
static constexpr DoubleRegList kPushedFpRegs
static constexpr int SharedFunctionInfoOffsetInTaggedJSFunction()
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define V8_ENABLE_LEAPTIERING_BOOL
Definition globals.h:151
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
Definition globals.h:161
int start
int end
DirectHandle< Object > new_target
Definition execution.cc:75
bool is_construct
Definition execution.cc:82
#define V8_JITLESS_BOOL
int32_t offset
TNode< Context > context
TNode< Object > this_arg
TNode< Object > receiver
TNode< Object > callback
ZoneVector< RpoNumber > & result
LiftoffRegister reg
MovableLabel continuation
int pc_offset
#define ASM_LOCATION(message)
RegListBase< RegisterT > registers
const int length_
Definition mul-fft.cc:473
int int32_t
Definition unicode.cc:40
void Free(void *memory)
Definition memory.h:63
void Add(RWDigits Z, Digits X, Digits Y)
ApiCallbackExitFrameConstants FC
Definition frames.cc:1270
FunctionCallbackArguments FCA
Definition frames.cc:1271
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void Sub(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
constexpr int kStackStateOffset
Definition stacks.h:212
constexpr DoubleRegister kFpReturnRegisters[]
constexpr int kStackSpOffset
Definition stacks.h:202
constexpr int kStackFpOffset
Definition stacks.h:204
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
constexpr int kStackParentOffset
Definition stacks.h:210
constexpr Register kGpReturnRegisters[]
constexpr int kStackLimitOffset
Definition stacks.h:208
constexpr int kStackPcOffset
Definition stacks.h:206
constexpr Register no_reg
constexpr Register kRootRegister
constexpr AddrMode PreIndex
constexpr int kByteSize
Definition globals.h:395
constexpr int kFunctionEntryBytecodeOffset
Definition globals.h:854
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
constexpr int kTaggedSize
Definition globals.h:542
DwVfpRegister DoubleRegister
static void Generate_InterpreterEnterBytecode(MacroAssembler *masm)
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr Register kJavaScriptCallTargetRegister
constexpr ShiftOp LSL
constexpr int B
constexpr uint16_t kDontAdaptArgumentsSentinel
Definition globals.h:2779
constexpr Register kJavaScriptCallArgCountRegister
constexpr Register kInterpreterAccumulatorRegister
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
InterpreterPushArgsMode
Definition globals.h:2233
constexpr int kJSArgcReceiverSlots
Definition globals.h:2778
static void GenerateInterpreterPushArgs(MacroAssembler *masm, Register num_args, Register start_address, Register scratch)
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler *masm, Register bytecode_array, Register bytecode_offset, Register bytecode, Register scratch1, Register scratch2, Register scratch3, Label *if_return)
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
static void LeaveInterpreterFrame(MacroAssembler *masm, Register scratch1, Register scratch2)
constexpr Register kReturnRegister1
constexpr int kTaggedSizeLog2
Definition globals.h:543
const Instr kImmExceptionIsSwitchStackLimit
constexpr Register kReturnRegister0
@ LAST_CALLABLE_JS_FUNCTION_TYPE
@ FIRST_CALLABLE_JS_FUNCTION_TYPE
constexpr Register kWasmImplicitArgRegister
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
constexpr Register kInterpreterDispatchTableRegister
const int kHeapObjectTag
Definition v8-internal.h:72
constexpr int kQRegSize
@ kFunctionTemplateInfoCallbackTag
constexpr Register kWasmTrapHandlerFaultAddressRegister
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register kJavaScriptCallExtraArg1Register
constexpr int JSParameterCount(int param_count_without_receiver)
Definition globals.h:2782
constexpr Register kJavaScriptCallCodeStartRegister
constexpr AddrMode PostIndex
constexpr CPURegister NoCPUReg
constexpr Register kPtrComprCageBaseRegister
const intptr_t kSmiTagMask
Definition v8-internal.h:88
Register ReassignRegister(Register &source)
constexpr Register kWasmCompileLazyFuncIndexRegister
static void AssertCodeIsBaseline(MacroAssembler *masm, Register code, Register scratch)
static void Generate_JSEntryTrampolineHelper(MacroAssembler *masm, bool is_construct)
constexpr int kXRegSizeInBits
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
constexpr Register cp
constexpr int kQRegSizeInBits
constexpr Register kCArgRegs[]
constexpr int64_t kXSignMask
constexpr int kDoubleSize
Definition globals.h:407
constexpr Register kJavaScriptCallDispatchHandleRegister
constexpr int kXRegSizeLog2
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler *masm, Register sfi, Register bytecode, Register scratch1, Label *is_baseline, Label *is_unavailable)
constexpr int kXRegSize
constexpr Register NoReg
constexpr Register kInterpreterBytecodeOffsetRegister
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
MemOperand ExitFrameStackSlotOperand(int offset)
constexpr Register kInterpreterBytecodeArrayRegister
constexpr Register padreg
constexpr bool PointerCompressionIsEnabled()
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
#define arraysize(array)
Definition macros.h:67
#define OFFSET_OF_DATA_START(Type)