v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
builtins-mips64.cc
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_MIPS64
6
11#include "src/debug/debug.h"
16// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
20#include "src/heap/heap-inl.h"
21#include "src/objects/cell.h"
22#include "src/objects/foreign.h"
26#include "src/objects/smi.h"
27#include "src/runtime/runtime.h"
28
29#if V8_ENABLE_WEBASSEMBLY
33#endif // V8_ENABLE_WEBASSEMBLY
34
35namespace v8 {
36namespace internal {
37
38#define __ ACCESS_MASM(masm)
39
40void Builtins::Generate_Adaptor(MacroAssembler* masm,
41 int formal_parameter_count, Address address) {
43 __ TailCallBuiltin(
44 Builtins::AdaptorWithBuiltinExitFrame(formal_parameter_count));
45}
46
47namespace {
48
49enum class ArgumentsElementType {
50 kRaw, // Push arguments as they are.
51 kHandle // Dereference arguments before pushing.
52};
53
54void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
55 Register scratch, Register scratch2,
56 ArgumentsElementType element_type) {
57 DCHECK(!AreAliased(array, argc, scratch));
58 Label loop, entry;
59 __ Dsubu(scratch, argc, Operand(kJSArgcReceiverSlots));
60 __ Branch(&entry);
61 __ bind(&loop);
62 __ Dlsa(scratch2, array, scratch, kSystemPointerSizeLog2);
63 __ Ld(scratch2, MemOperand(scratch2));
64 if (element_type == ArgumentsElementType::kHandle) {
65 __ Ld(scratch2, MemOperand(scratch2));
66 }
67 __ push(scratch2);
68 __ bind(&entry);
69 __ Daddu(scratch, scratch, Operand(-1));
70 __ Branch(&loop, greater_equal, scratch, Operand(zero_reg));
71}
72
73void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
74 // ----------- S t a t e -------------
75 // -- a0 : number of arguments
76 // -- a1 : constructor function
77 // -- a3 : new target
78 // -- cp : context
79 // -- ra : return address
80 // -- sp[...]: constructor arguments
81 // -----------------------------------
82
83 // Enter a construct frame.
84 {
85 FrameScope scope(masm, StackFrame::CONSTRUCT);
86
87 // Preserve the incoming parameters on the stack.
88 __ Push(cp, a0);
89
90 // Set up pointer to first argument (skip receiver).
91 __ Daddu(
92 t2, fp,
94 // Copy arguments and receiver to the expression stack.
95 // t2: Pointer to start of arguments.
96 // a0: Number of arguments.
97 Generate_PushArguments(masm, t2, a0, t3, t0, ArgumentsElementType::kRaw);
98 // The receiver for the builtin/api call.
99 __ PushRoot(RootIndex::kTheHoleValue);
100
101 // Call the function.
102 // a0: number of arguments (untagged)
103 // a1: constructor function
104 // a3: new target
105 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
106
107 // Restore context from the frame.
109 // Restore arguments count from the frame.
111 // Leave construct frame.
112 }
113
114 // Remove caller arguments from the stack and return.
115 __ DropArguments(t3);
116 __ Ret();
117}
118
119} // namespace
120
121// The construct stub for ES5 constructor functions and ES6 class constructors.
122void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
123 // ----------- S t a t e -------------
124 // -- a0: number of arguments (untagged)
125 // -- a1: constructor function
126 // -- a3: new target
127 // -- cp: context
128 // -- ra: return address
129 // -- sp[...]: constructor arguments
130 // -----------------------------------
131
132 // Enter a construct frame.
133 FrameScope scope(masm, StackFrame::MANUAL);
134 Label post_instantiation_deopt_entry, not_create_implicit_receiver;
135 __ EnterFrame(StackFrame::CONSTRUCT);
136
137 // Preserve the incoming parameters on the stack.
138 __ Push(cp, a0, a1);
139 __ PushRoot(RootIndex::kUndefinedValue);
140 __ Push(a3);
141
142 // ----------- S t a t e -------------
143 // -- sp[0*kSystemPointerSize]: new target
144 // -- sp[1*kSystemPointerSize]: padding
145 // -- a1 and sp[2*kSystemPointerSize]: constructor function
146 // -- sp[3*kSystemPointerSize]: number of arguments
147 // -- sp[4*kSystemPointerSize]: context
148 // -----------------------------------
149
150 __ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
151 __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
152 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
153 __ JumpIfIsInRange(
154 t2, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
155 static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
156 &not_create_implicit_receiver);
157
158 // If not derived class constructor: Allocate the new receiver object.
159 __ CallBuiltin(Builtin::kFastNewObject);
160 __ Branch(&post_instantiation_deopt_entry);
161
162 // Else: use TheHoleValue as receiver for constructor call
163 __ bind(&not_create_implicit_receiver);
164 __ LoadRoot(v0, RootIndex::kTheHoleValue);
165
166 // ----------- S t a t e -------------
167 // -- v0: receiver
168 // -- Slot 4 / sp[0*kSystemPointerSize]: new target
169 // -- Slot 3 / sp[1*kSystemPointerSize]: padding
170 // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
171 // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments
172 // -- Slot 0 / sp[4*kSystemPointerSize]: context
173 // -----------------------------------
174 // Deoptimizer enters here.
175 masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
176 masm->pc_offset());
177 __ bind(&post_instantiation_deopt_entry);
178
179 // Restore new target.
180 __ Pop(a3);
181
182 // Push the allocated receiver to the stack.
183 __ Push(v0);
184
185 // We need two copies because we may have to return the original one
186 // and the calling conventions dictate that the called function pops the
187 // receiver. The second copy is pushed after the arguments, we saved in a6
188 // since v0 will store the return value of callRuntime.
189 __ mov(a6, v0);
190
191 // Set up pointer to last argument.
192 __ Daddu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
194
195 // ----------- S t a t e -------------
196 // -- a3: new target
197 // -- sp[0*kSystemPointerSize]: implicit receiver
198 // -- sp[1*kSystemPointerSize]: implicit receiver
199 // -- sp[2*kSystemPointerSize]: padding
200 // -- sp[3*kSystemPointerSize]: constructor function
201 // -- sp[4*kSystemPointerSize]: number of arguments
202 // -- sp[5*kSystemPointerSize]: context
203 // -----------------------------------
204
205 // Restore constructor function and argument count.
208
209 Label stack_overflow;
210 __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
211
212 // TODO(victorgomes): When the arguments adaptor is completely removed, we
213 // should get the formal parameter count and copy the arguments in its
214 // correct position (including any undefined), instead of delaying this to
215 // InvokeFunction.
216
217 // Copy arguments and receiver to the expression stack.
218 // t2: Pointer to start of argument.
219 // a0: Number of arguments.
220 Generate_PushArguments(masm, t2, a0, t0, t1, ArgumentsElementType::kRaw);
221 // We need two copies because we may have to return the original one
222 // and the calling conventions dictate that the called function pops the
223 // receiver. The second copy is pushed after the arguments,
224 __ Push(a6);
225
226 // Call the function.
227 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
228
229 // If the result is an object (in the ECMA sense), we should get rid
230 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
231 // on page 74.
232 Label use_receiver, do_throw, leave_and_return, check_receiver;
233
234 // If the result is undefined, we jump out to using the implicit receiver.
235 __ JumpIfNotRoot(v0, RootIndex::kUndefinedValue, &check_receiver);
236
237 // Otherwise we do a smi check and fall through to check if the return value
238 // is a valid receiver.
239
240 // Throw away the result of the constructor invocation and use the
241 // on-stack receiver as the result.
242 __ bind(&use_receiver);
243 __ Ld(v0, MemOperand(sp, 0 * kSystemPointerSize));
244 __ JumpIfRoot(v0, RootIndex::kTheHoleValue, &do_throw);
245
246 __ bind(&leave_and_return);
247 // Restore arguments count from the frame.
249 // Leave construct frame.
250 __ LeaveFrame(StackFrame::CONSTRUCT);
251
252 // Remove caller arguments from the stack and return.
253 __ DropArguments(a1);
254 __ Ret();
255
256 __ bind(&check_receiver);
257 __ JumpIfSmi(v0, &use_receiver);
258
259 // If the type of the result (stored in its map) is less than
260 // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
261 __ GetObjectType(v0, t2, t2);
262 static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
263 __ Branch(&leave_and_return, greater_equal, t2,
264 Operand(FIRST_JS_RECEIVER_TYPE));
265 __ Branch(&use_receiver);
266
267 __ bind(&do_throw);
268 // Restore the context from the frame.
270 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
271 __ break_(0xCC);
272
273 __ bind(&stack_overflow);
274 // Restore the context from the frame.
276 __ CallRuntime(Runtime::kThrowStackOverflow);
277 __ break_(0xCC);
278}
279
280void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
281 Generate_JSBuiltinsConstructStubHelper(masm);
282}
283
284static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
285 Register scratch) {
286 DCHECK(!AreAliased(code, scratch));
287 // Verify that the code kind is baseline code via the CodeKind.
288 __ Ld(scratch, FieldMemOperand(code, Code::kFlagsOffset));
289 __ DecodeField<Code::KindField>(scratch);
290 __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
291 Operand(static_cast<int>(CodeKind::BASELINE)));
292}
293
294// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
295// the more general dispatch.
297 MacroAssembler* masm, Register sfi, Register bytecode, Register scratch1,
298 Label* is_baseline, Label* is_unavailable) {
299 Label done;
300
301 Register data = bytecode;
302 __ Ld(data,
303 FieldMemOperand(sfi, SharedFunctionInfo::kTrustedFunctionDataOffset));
304
305
306 __ GetObjectType(data, scratch1, scratch1);
307
308#ifndef V8_JITLESS
309 if (v8_flags.debug_code) {
310 Label not_baseline;
311 __ Branch(&not_baseline, ne, scratch1, Operand(CODE_TYPE));
312 AssertCodeIsBaseline(masm, data, scratch1);
313 __ Branch(is_baseline);
314 __ bind(&not_baseline);
315 } else {
316 __ Branch(is_baseline, eq, scratch1, Operand(CODE_TYPE));
317 }
318#endif // !V8_JITLESS
319
320 __ Branch(&done, eq, scratch1, Operand(BYTECODE_ARRAY_TYPE));
321
322 __ Branch(is_unavailable, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
323 __ Ld(data, FieldMemOperand(data, InterpreterData::kBytecodeArrayOffset));
324 __ bind(&done);
325}
326
327// static
328void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
329 // ----------- S t a t e -------------
330 // -- v0 : the value to pass to the generator
331 // -- a1 : the JSGeneratorObject to resume
332 // -- ra : return address
333 // -----------------------------------
334 // Store input value into generator object.
335 __ Sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
336 __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
338 // Check that a1 is still valid, RecordWrite might have clobbered it.
339 __ AssertGeneratorObject(a1);
340
341 // Load suspended function and context.
342 __ Ld(a5, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
343 __ Ld(cp, FieldMemOperand(a5, JSFunction::kContextOffset));
344
345 // Flood function if we are stepping.
346 Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
347 Label stepping_prepared;
348 ExternalReference debug_hook =
349 ExternalReference::debug_hook_on_function_call_address(masm->isolate());
350 __ li(a6, debug_hook);
351 __ Lb(a6, MemOperand(a6));
352 __ Branch(&prepare_step_in_if_stepping, ne, a6, Operand(zero_reg));
353
354 // Flood function if we need to continue stepping in the suspended generator.
355 ExternalReference debug_suspended_generator =
356 ExternalReference::debug_suspended_generator_address(masm->isolate());
357 __ li(a6, debug_suspended_generator);
358 __ Ld(a6, MemOperand(a6));
359 __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a6));
360 __ bind(&stepping_prepared);
361
362 // Check the stack for overflow. We are not trying to catch interruptions
363 // (i.e. debug break and preemption) here, so check the "real stack limit".
364 Label stack_overflow;
365 __ LoadStackLimit(kScratchReg,
366 MacroAssembler::StackLimitKind::kRealStackLimit);
367 __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
368
369 // ----------- S t a t e -------------
370 // -- a1 : the JSGeneratorObject to resume
371 // -- a4 : generator function
372 // -- cp : generator context
373 // -- ra : return address
374 // -----------------------------------
375
376 // Push holes for arguments to generator function. Since the parser forced
377 // context allocation for any variables in generators, the actual argument
378 // values have already been copied into the context and these dummy values
379 // will never be used.
381#if V8_ENABLE_LEAPTIERING
384 __ Lw(dispatch_handle,
385 FieldMemOperand(a5, JSFunction::kDispatchHandleOffset));
386 __ LoadEntrypointAndParameterCountFromJSDispatchTable(code, argc,
387 dispatch_handle, t3);
388 // In case the formal parameter count is kDontAdaptArgumentsSentinel the
389 // actual arguments count should be set accordingly.
391 __ li(t1, Operand(JSParameterCount(0)));
392 __ slt(t3, argc, t1);
393 __ movn(argc, t1, t3);
394
395#else
396 __ Ld(argc, FieldMemOperand(a5, JSFunction::kSharedFunctionInfoOffset));
397 __ Lhu(argc, FieldMemOperand(
398 argc, SharedFunctionInfo::kFormalParameterCountOffset));
399#endif // V8_ENABLE_LEAPTIERING
400 {
401 Label done_loop, loop;
402 __ Dsubu(a3, argc, Operand(kJSArgcReceiverSlots));
403 __ Ld(t1, FieldMemOperand(
404 a1, JSGeneratorObject::kParametersAndRegistersOffset));
405 __ bind(&loop);
406 __ Dsubu(a3, a3, Operand(1));
407 __ Branch(&done_loop, lt, a3, Operand(zero_reg));
409 __ Ld(kScratchReg,
411 __ Push(kScratchReg);
412 __ Branch(&loop);
413 __ bind(&done_loop);
414 // Push receiver.
415 __ Ld(kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
416 __ Push(kScratchReg);
417 }
418
419 // Underlying function needs to have bytecode available.
420 if (v8_flags.debug_code) {
421 Label is_baseline, is_unavailable, ok;
422 __ Ld(a3, FieldMemOperand(a5, JSFunction::kSharedFunctionInfoOffset));
423 GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a3, t3, &is_baseline,
424 &is_unavailable);
425 __ jmp(&ok);
426
427 __ bind(&is_unavailable);
428 __ Abort(AbortReason::kMissingBytecodeArray);
429
430 __ bind(&is_baseline);
431 __ GetObjectType(a3, a3, a3);
432 __ Assert(eq, AbortReason::kMissingBytecodeArray, a3, Operand(CODE_TYPE));
433
434 __ bind(&ok);
435 }
436
437 // Resume (Ignition/TurboFan) generator object.
438 {
439 // We abuse new.target both to indicate that this is a resume call and to
440 // pass in the generator object. In ordinary calls, new.target is always
441 // undefined because generator functions are non-constructable.
442 __ Move(a3, a1);
443 __ Move(a1, a5);
444#if V8_ENABLE_LEAPTIERING
445 __ Jump(code);
446#else
447 // Actual arguments count is already initialized above.
448 __ JumpJSFunction(a1);
449#endif // V8_ENABLE_LEAPTIERING
450 }
451
452 __ bind(&prepare_step_in_if_stepping);
453 {
454 FrameScope scope(masm, StackFrame::INTERNAL);
455 __ Push(a1, a5);
456 // Push hole as receiver since we do not use it for stepping.
457 __ PushRoot(RootIndex::kTheHoleValue);
458 __ CallRuntime(Runtime::kDebugOnFunctionCall);
459 __ Pop(a1);
460 }
461 __ Branch(USE_DELAY_SLOT, &stepping_prepared);
462 __ Ld(a5, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
463
464 __ bind(&prepare_step_in_suspended_generator);
465 {
466 FrameScope scope(masm, StackFrame::INTERNAL);
467 __ Push(a1);
468 __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
469 __ Pop(a1);
470 }
471 __ Branch(USE_DELAY_SLOT, &stepping_prepared);
472 __ Ld(a5, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
473
474 __ bind(&stack_overflow);
475 {
476 FrameScope scope(masm, StackFrame::INTERNAL);
477 __ CallRuntime(Runtime::kThrowStackOverflow);
478 __ break_(0xCC); // This should be unreachable.
479 }
480}
481
482void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
483 FrameScope scope(masm, StackFrame::INTERNAL);
484 __ Push(a1);
485 __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
486}
487
488// Clobbers scratch1 and scratch2; preserves all other registers.
489static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
490 Register scratch1, Register scratch2) {
491 // Check the stack for overflow. We are not trying to catch
492 // interruptions (e.g. debug break and preemption) here, so the "real stack
493 // limit" is checked.
494 Label okay;
495 __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
496 // Make a2 the space we have left. The stack might already be overflowed
497 // here which will cause r2 to become negative.
498 __ dsubu(scratch1, sp, scratch1);
499 // Check if the arguments will overflow the stack.
500 __ dsll(scratch2, argc, kSystemPointerSizeLog2);
501 __ Branch(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison.
502
503 // Out of stack space.
504 __ CallRuntime(Runtime::kThrowStackOverflow);
505
506 __ bind(&okay);
507}
508
509namespace {
510
511// Called with the native C calling convention. The corresponding function
512// signature is either:
513//
514// using JSEntryFunction = GeneratedCode<Address(
515// Address root_register_value, Address new_target, Address target,
516// Address receiver, intptr_t argc, Address** args)>;
517// or
518// using JSEntryFunction = GeneratedCode<Address(
519// Address root_register_value, MicrotaskQueue* microtask_queue)>;
520void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
521 Builtin entry_trampoline) {
522 Label invoke, handler_entry, exit;
523
524 {
525 NoRootArrayScope no_root_array(masm);
526
527 // TODO(plind): unify the ABI description here.
528 // Registers:
529 // either
530 // a0: root register value
531 // a1: entry address
532 // a2: function
533 // a3: receiver
534 // a4: argc
535 // a5: argv
536 // or
537 // a0: root register value
538 // a1: microtask_queue
539 //
540 // Stack:
541 // 0 arg slots on mips64 (4 args slots on mips)
542
543 // Save callee saved registers on the stack.
544 __ MultiPush(kCalleeSaved | ra);
545
546 // Save callee-saved FPU registers.
547 __ MultiPushFPU(kCalleeSavedFPU);
548 // Set up the reserved register for 0.0.
549 __ Move(kDoubleRegZero, 0.0);
550
551 // Initialize the root register.
552 // C calling convention. The first argument is passed in a0.
553 __ mov(kRootRegister, a0);
554 }
555
556 // a1: entry address
557 // a2: function
558 // a3: receiver
559 // a4: argc
560 // a5: argv
561
562 // We build an EntryFrame.
563 __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used.
564 __ li(s2, Operand(StackFrame::TypeToMarker(type)));
565 __ li(s3, Operand(StackFrame::TypeToMarker(type)));
566 ExternalReference c_entry_fp = ExternalReference::Create(
567 IsolateAddressId::kCEntryFPAddress, masm->isolate());
568 __ li(s5, c_entry_fp);
569 __ Ld(s4, MemOperand(s5));
570 __ Push(s1, s2, s3, s4);
571
572 // Clear c_entry_fp, now we've pushed its previous value to the stack.
573 // If the c_entry_fp is not already zero and we don't clear it, the
574 // StackFrameIteratorForProfiler will assume we are executing C++ and miss the
575 // JS frames on top.
576 __ Sd(zero_reg, MemOperand(s5));
577
578 __ LoadIsolateField(s1, IsolateFieldId::kFastCCallCallerFP);
579 __ Ld(s2, MemOperand(s1, 0));
580 __ Sd(zero_reg, MemOperand(s1, 0));
581 __ LoadIsolateField(s1, IsolateFieldId::kFastCCallCallerPC);
582 __ Ld(s3, MemOperand(s1, 0));
583 __ Sd(zero_reg, MemOperand(s1, 0));
584 __ Push(s2, s3);
585
586 // Set up frame pointer for the frame to be pushed.
588
589 // Registers:
590 // either
591 // a1: entry address
592 // a2: function
593 // a3: receiver
594 // a4: argc
595 // a5: argv
596 // or
597 // a1: microtask_queue
598 //
599 // Stack:
600 // fast api call pc |
601 // fast api call fp |
602 // C entry FP |
603 // function slot | entry frame
604 // context slot |
605 // bad fp (0xFF...F) |
606 // callee saved registers + ra
607
608 // If this is the outermost JS call, set js_entry_sp value.
609 Label non_outermost_js;
610 ExternalReference js_entry_sp = ExternalReference::Create(
611 IsolateAddressId::kJSEntrySPAddress, masm->isolate());
612 __ li(s1, js_entry_sp);
613 __ Ld(s2, MemOperand(s1));
614 __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg));
615 __ Sd(fp, MemOperand(s1));
617 Label cont;
618 __ b(&cont);
619 __ nop(); // Branch delay slot nop.
620 __ bind(&non_outermost_js);
621 __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME));
622 __ bind(&cont);
623 __ push(s3);
624
625 // Jump to a faked try block that does the invoke, with a faked catch
626 // block that sets the exception.
627 __ jmp(&invoke);
628 __ bind(&handler_entry);
629
630 // Store the current pc as the handler offset. It's used later to create the
631 // handler table.
632 masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
633
634 // Caught exception: Store result (exception) in the exception
635 // field in the JSEnv and return a failure sentinel. Coming in here the
636 // fp will be invalid because the PushStackHandler below sets it to 0 to
637 // signal the existence of the JSEntry frame.
638 __ li(s1, ExternalReference::Create(IsolateAddressId::kExceptionAddress,
639 masm->isolate()));
640 __ Sd(v0, MemOperand(s1)); // We come back from 'invoke'. result is in v0.
641 __ LoadRoot(v0, RootIndex::kException);
642 __ b(&exit); // b exposes branch delay slot.
643 __ nop(); // Branch delay slot nop.
644
645 // Invoke: Link this frame into the handler chain.
646 __ bind(&invoke);
647 __ PushStackHandler();
648 // If an exception not caught by another handler occurs, this handler
649 // returns control to the code after the bal(&invoke) above, which
650 // restores all kCalleeSaved registers (including cp and fp) to their
651 // saved values before returning a failure to C.
652 //
653 // Registers:
654 // either
655 // a0: root register value
656 // a1: entry address
657 // a2: function
658 // a3: receiver
659 // a4: argc
660 // a5: argv
661 // or
662 // a0: root register value
663 // a1: microtask_queue
664 //
665 // Stack:
666 // handler frame
667 // entry frame
668 // fast api call pc
669 // fast api call fp
670 // C entry FP
671 // function slot
672 // context slot
673 // bad fp (0xFF...F)
674 // callee saved registers + ra
675
676 // Invoke the function by calling through JS entry trampoline builtin and
677 // pop the faked function when we return.
678 __ CallBuiltin(entry_trampoline);
679
680 // Unlink this frame from the handler chain.
681 __ PopStackHandler();
682
683 __ bind(&exit); // v0 holds result
684 // Check if the current stack frame is marked as the outermost JS frame.
685 Label non_outermost_js_2;
686 __ pop(a5);
687 __ Branch(&non_outermost_js_2, ne, a5,
689 __ li(a5, js_entry_sp);
690 __ Sd(zero_reg, MemOperand(a5));
691 __ bind(&non_outermost_js_2);
692
693 // Restore the top frame descriptors from the stack.
694 __ Pop(a4, a5);
695 __ LoadIsolateField(a6, IsolateFieldId::kFastCCallCallerFP);
696 __ Sd(a4, MemOperand(a6, 0));
697 __ LoadIsolateField(a6, IsolateFieldId::kFastCCallCallerPC);
698 __ Sd(a5, MemOperand(a6, 0));
699
700 __ pop(a5);
701 __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
702 masm->isolate()));
703 __ Sd(a5, MemOperand(a4));
704
705 // Reset the stack to the callee saved registers.
707
708 // Restore callee-saved fpu registers.
709 __ MultiPopFPU(kCalleeSavedFPU);
710
711 // Restore callee saved registers from the stack.
712 __ MultiPop(kCalleeSaved | ra);
713 // Return.
714 __ Jump(ra);
715}
716
717} // namespace
718
719void Builtins::Generate_JSEntry(MacroAssembler* masm) {
720 Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
721}
722
723void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
724 Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
725 Builtin::kJSConstructEntryTrampoline);
726}
727
728void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
729 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
730 Builtin::kRunMicrotasksTrampoline);
731}
732
733static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
734 bool is_construct) {
735 // ----------- S t a t e -------------
736 // -- a1: new.target
737 // -- a2: function
738 // -- a3: receiver_pointer
739 // -- a4: argc
740 // -- a5: argv
741 // -----------------------------------
742
743 // Enter an internal frame.
744 {
745 FrameScope scope(masm, StackFrame::INTERNAL);
746
747 // Setup the context (we need to use the caller context from the isolate).
748 ExternalReference context_address = ExternalReference::Create(
749 IsolateAddressId::kContextAddress, masm->isolate());
750 __ li(cp, context_address);
751 __ Ld(cp, MemOperand(cp));
752
753 // Push the function onto the stack.
754 __ Push(a2);
755
756 // Check if we have enough stack space to push all arguments.
757 __ mov(a6, a4);
758 Generate_CheckStackOverflow(masm, a6, a0, s2);
759
760 // Copy arguments to the stack.
761 // a4: argc
762 // a5: argv, i.e. points to first arg
763 Generate_PushArguments(masm, a5, a4, s1, s2, ArgumentsElementType::kHandle);
764
765 // Push the receive.
766 __ Push(a3);
767
768 // a0: argc
769 // a1: function
770 // a3: new.target
771 __ mov(a3, a1);
772 __ mov(a1, a2);
773 __ mov(a0, a4);
774
775 // Initialize all JavaScript callee-saved registers, since they will be seen
776 // by the garbage collector as part of handlers.
777 __ LoadRoot(a4, RootIndex::kUndefinedValue);
778 __ mov(a5, a4);
779 __ mov(s1, a4);
780 __ mov(s2, a4);
781 __ mov(s3, a4);
782 __ mov(s4, a4);
783 __ mov(s5, a4);
784 // s6 holds the root address. Do not clobber.
785 // s7 is cp. Do not init.
786
787 // Invoke the code.
788 Builtin builtin = is_construct ? Builtin::kConstruct : Builtins::Call();
789 __ CallBuiltin(builtin);
790
791 // Leave internal frame.
792 }
793 __ Jump(ra);
794}
795
796void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
798}
799
800void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
802}
803
804void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
805 // a1: microtask_queue
807 __ TailCallBuiltin(Builtin::kRunMicrotasks);
808}
809
810static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
811 Register scratch2) {
812 Register params_size = scratch1;
813
814 // Get the size of the formal parameters + receiver (in bytes).
815 __ Ld(params_size,
817 __ Lhu(params_size,
818 FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
819
820 Register actual_params_size = scratch2;
821 // Compute the size of the actual parameters + receiver (in bytes).
822 __ Ld(actual_params_size,
824
825 // If actual is bigger than formal, then we should use it to free up the stack
826 // arguments.
827 __ slt(t2, params_size, actual_params_size);
828 __ movn(params_size, actual_params_size, t2);
829
830 // Leave the frame (also dropping the register file).
831 __ LeaveFrame(StackFrame::INTERPRETED);
832
833 // Drop arguments.
834 __ DropArguments(params_size);
835}
836
837// Advance the current bytecode offset. This simulates what all bytecode
838// handlers do upon completion of the underlying operation. Will bail out to a
839// label if the bytecode (without prefix) is a return bytecode. Will not advance
840// the bytecode offset if the current bytecode is a JumpLoop, instead just
841// re-executing the JumpLoop to jump to the correct bytecode.
842static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
843 Register bytecode_array,
844 Register bytecode_offset,
845 Register bytecode, Register scratch1,
846 Register scratch2, Register scratch3,
847 Label* if_return) {
848 Register bytecode_size_table = scratch1;
849
850 // The bytecode offset value will be increased by one in wide and extra wide
851 // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
852 // will restore the original bytecode. In order to simplify the code, we have
853 // a backup of it.
854 Register original_bytecode_offset = scratch3;
855 DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
856 bytecode_size_table, original_bytecode_offset));
857 __ Move(original_bytecode_offset, bytecode_offset);
858 __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
859
860 // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
861 Label process_bytecode, extra_wide;
862 static_assert(0 == static_cast<int>(interpreter::Bytecode::kWide));
863 static_assert(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
864 static_assert(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
865 static_assert(3 ==
866 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
867 __ Branch(&process_bytecode, hi, bytecode, Operand(3));
868 __ And(scratch2, bytecode, Operand(1));
869 __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg));
870
871 // Load the next bytecode and update table to the wide scaled table.
872 __ Daddu(bytecode_offset, bytecode_offset, Operand(1));
873 __ Daddu(scratch2, bytecode_array, bytecode_offset);
874 __ Lbu(bytecode, MemOperand(scratch2));
875 __ Daddu(bytecode_size_table, bytecode_size_table,
877 __ jmp(&process_bytecode);
878
879 __ bind(&extra_wide);
880 // Load the next bytecode and update table to the extra wide scaled table.
881 __ Daddu(bytecode_offset, bytecode_offset, Operand(1));
882 __ Daddu(scratch2, bytecode_array, bytecode_offset);
883 __ Lbu(bytecode, MemOperand(scratch2));
884 __ Daddu(bytecode_size_table, bytecode_size_table,
886
887 __ bind(&process_bytecode);
888
889// Bailout to the return label if this is a return bytecode.
890#define JUMP_IF_EQUAL(NAME) \
891 __ Branch(if_return, eq, bytecode, \
892 Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
894#undef JUMP_IF_EQUAL
895
896 // If this is a JumpLoop, re-execute it to perform the jump to the beginning
897 // of the loop.
898 Label end, not_jump_loop;
899 __ Branch(&not_jump_loop, ne, bytecode,
900 Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
901 // We need to restore the original bytecode_offset since we might have
902 // increased it to skip the wide / extra-wide prefix bytecode.
903 __ Move(bytecode_offset, original_bytecode_offset);
904 __ jmp(&end);
905
906 __ bind(&not_jump_loop);
907 // Otherwise, load the size of the current bytecode and advance the offset.
908 __ Daddu(scratch2, bytecode_size_table, bytecode);
909 __ Lb(scratch2, MemOperand(scratch2));
910 __ Daddu(bytecode_offset, bytecode_offset, scratch2);
911
912 __ bind(&end);
913}
914
915namespace {
916
917void ResetSharedFunctionInfoAge(MacroAssembler* masm, Register sfi) {
918 __ Sh(zero_reg, FieldMemOperand(sfi, SharedFunctionInfo::kAgeOffset));
919}
920
921void ResetJSFunctionAge(MacroAssembler* masm, Register js_function,
922 Register scratch) {
923 __ Ld(scratch,
924 FieldMemOperand(js_function, JSFunction::kSharedFunctionInfoOffset));
925 ResetSharedFunctionInfoAge(masm, scratch);
926}
927
928void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
929 Register feedback_vector, Register scratch) {
930 DCHECK(!AreAliased(feedback_vector, scratch));
931 __ Lbu(scratch,
932 FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
933 __ And(scratch, scratch, Operand(~FeedbackVector::OsrUrgencyBits::kMask));
934 __ Sb(scratch,
935 FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
936}
937} // namespace
938
939// static
940void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
941 UseScratchRegisterScope temps(masm);
942 temps.Include({s1, s2, s3});
943 auto descriptor =
944 Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
945 Register closure = descriptor.GetRegisterParameter(
946 BaselineOutOfLinePrologueDescriptor::kClosure);
947 // Load the feedback vector from the closure.
948 Register feedback_cell = temps.Acquire();
949 Register feedback_vector = temps.Acquire();
950 __ Ld(feedback_cell,
951 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
952 __ Ld(feedback_vector,
953 FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
954 {
955 UseScratchRegisterScope temps(masm);
956 Register scratch = temps.Acquire();
957 __ AssertFeedbackVector(feedback_vector, scratch);
958 }
959
960#ifndef V8_ENABLE_LEAPTIERING
961 // Check for an tiering state.
962 Label flags_need_processing;
963 Register flags = no_reg;
964 {
965 UseScratchRegisterScope temps(masm);
966 flags = temps.Acquire();
967 // flags will be used only in |flags_need_processing|
968 // and outside it can be reused.
969 __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
970 flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing);
971 }
972#endif // !V8_ENABLE_LEAPTIERING
973
974 {
975 UseScratchRegisterScope temps(masm);
976 ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire());
977 }
978 // Increment invocation count for the function.
979 {
980 UseScratchRegisterScope temps(masm);
981 Register invocation_count = temps.Acquire();
982 __ Lw(invocation_count,
983 FieldMemOperand(feedback_vector,
984 FeedbackVector::kInvocationCountOffset));
985 __ Addu(invocation_count, invocation_count, Operand(1));
986 __ Sw(invocation_count,
987 FieldMemOperand(feedback_vector,
988 FeedbackVector::kInvocationCountOffset));
989 }
990
991 FrameScope frame_scope(masm, StackFrame::MANUAL);
992 {
993 ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
994 // Normally the first thing we'd do here is Push(ra, fp), but we already
995 // entered the frame in BaselineCompiler::Prologue, as we had to use the
996 // value lr before the call to this BaselineOutOfLinePrologue builtin.
997 Register callee_context = descriptor.GetRegisterParameter(
998 BaselineOutOfLinePrologueDescriptor::kCalleeContext);
999 Register callee_js_function = descriptor.GetRegisterParameter(
1000 BaselineOutOfLinePrologueDescriptor::kClosure);
1001 {
1002 UseScratchRegisterScope temps(masm);
1003 ResetJSFunctionAge(masm, callee_js_function, temps.Acquire());
1004 }
1005 __ Push(callee_context, callee_js_function);
1006 DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1007 DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1008
1009 Register argc = descriptor.GetRegisterParameter(
1010 BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1011 // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1012 // the frame, so load it into a register.
1013 Register bytecode_array = descriptor.GetRegisterParameter(
1014 BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1015 __ Push(argc, bytecode_array, feedback_cell, feedback_vector);
1016
1017 {
1018 UseScratchRegisterScope temps(masm);
1019 Register invocation_count = temps.Acquire();
1020 __ AssertFeedbackVector(feedback_vector, invocation_count);
1021 }
1022 }
1023
1024 Label call_stack_guard;
1025 Register frame_size = descriptor.GetRegisterParameter(
1026 BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1027 {
1028 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1029 // Stack check. This folds the checks for both the interrupt stack limit
1030 // check and the real stack limit into one by just checking for the
1031 // interrupt limit. The interrupt limit is either equal to the real stack
1032 // limit or tighter. By ensuring we have space until that limit after
1033 // building the frame we can quickly precheck both at once.
1034 UseScratchRegisterScope temps(masm);
1035 Register sp_minus_frame_size = temps.Acquire();
1036 __ Dsubu(sp_minus_frame_size, sp, frame_size);
1037 Register interrupt_limit = temps.Acquire();
1038 __ LoadStackLimit(interrupt_limit,
1039 MacroAssembler::StackLimitKind::kInterruptStackLimit);
1040 __ Branch(&call_stack_guard, Uless, sp_minus_frame_size,
1041 Operand(interrupt_limit));
1042 }
1043
1044 // Do "fast" return to the caller pc in ra.
1045 // TODO(v8:11429): Document this frame setup better.
1046 __ Ret();
1047
1048#ifndef V8_ENABLE_LEAPTIERING
1049 __ bind(&flags_need_processing);
1050 {
1051 ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1052 UseScratchRegisterScope temps(masm);
1053 temps.Exclude(flags);
1054 // Ensure the flags is not allocated again.
1055 // Drop the frame created by the baseline call.
1056 __ Pop(ra, fp);
1057 __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
1058 __ Trap();
1059 }
1060#endif // !V8_ENABLE_LEAPTIERING
1061
1062 __ bind(&call_stack_guard);
1063 {
1064 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1065 FrameScope frame_scope(masm, StackFrame::INTERNAL);
1066 // Save incoming new target or generator
1068#ifdef V8_ENABLE_LEAPTIERING
1069 // No need to SmiTag as dispatch handles always look like Smis.
1070 static_assert(kJSDispatchHandleShift > 0);
1072#endif
1073 __ SmiTag(frame_size);
1074 __ Push(frame_size);
1075 __ CallRuntime(Runtime::kStackGuardWithGap);
1076#ifdef V8_ENABLE_LEAPTIERING
1078#endif
1080 }
1081 __ Ret();
1082 temps.Exclude({s1, s2, s3});
1083}
1084
1085// static
1086void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
1087 // We're here because we got deopted during BaselineOutOfLinePrologue's stack
1088 // check. Undo all its frame creation and call into the interpreter instead.
1089
1090 // Drop the feedback vector, the bytecode offset (was the feedback vector
1091 // but got replaced during deopt) and bytecode array.
1092 __ Drop(3);
1093
1094 // Context, closure, argc.
1097
1098 // Drop frame pointer
1099 __ LeaveFrame(StackFrame::BASELINE);
1100
1101 // Enter the interpreter.
1102 __ TailCallBuiltin(Builtin::kInterpreterEntryTrampoline);
1103}
1104
1105// Generate code for entering a JS function with the interpreter.
1106// On entry to the function the receiver and arguments have been pushed on the
1107// stack left to right.
1108//
1109// The live registers are:
1110// o a0 : actual argument count
1111// o a1: the JS function object being called.
1112// o a3: the incoming new target or generator object
1113// o cp: our context
1114// o fp: the caller's frame pointer
1115// o sp: stack pointer
1116// o ra: return address
1117//
1118// The function builds an interpreter frame. See InterpreterFrameConstants in
1119// frame-constants.h for its layout.
1121 MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
1122 Register closure = a1;
1123
1124 // Get the bytecode array from the function object and load it into
1125 // kInterpreterBytecodeArrayRegister.
1126 Register sfi = a5;
1127 __ Ld(sfi, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1128 ResetSharedFunctionInfoAge(masm, sfi);
1129
1130 // The bytecode array could have been flushed from the shared function info,
1131 // if so, call into CompileLazy.
1132 Label is_baseline, compile_lazy;
1134 masm, sfi, kInterpreterBytecodeArrayRegister, kScratchReg2, &is_baseline,
1135 &compile_lazy);
1136
1137 Label push_stack_frame;
1138 Register feedback_vector = a2;
1139 __ LoadFeedbackVector(feedback_vector, closure, a5, &push_stack_frame);
1140
1141#ifndef V8_JITLESS
1142#ifndef V8_ENABLE_LEAPTIERING
1143 // If feedback vector is valid, check for optimized code and update invocation
1144 // count.
1145
1146 // Check the tiering state.
1147 Label flags_need_processing;
1148 Register flags = a5;
1149 __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
1150 flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
1151 &flags_need_processing);
1152#endif // !V8_ENABLE_LEAPTIERING
1153 ResetFeedbackVectorOsrUrgency(masm, feedback_vector, a5);
1154 // Increment invocation count for the function.
1155 __ Lw(a5, FieldMemOperand(feedback_vector,
1156 FeedbackVector::kInvocationCountOffset));
1157 __ Addu(a5, a5, Operand(1));
1158 __ Sw(a5, FieldMemOperand(feedback_vector,
1159 FeedbackVector::kInvocationCountOffset));
1160
1161 // Open a frame scope to indicate that there is a frame on the stack. The
1162 // MANUAL indicates that the scope shouldn't actually generate code to set up
1163 // the frame (that is done below).
1164#else
1165 // Note: By omitting the above code in jitless mode we also disable:
1166 // - kFlagsLogNextExecution: only used for logging/profiling; and
1167 // - kInvocationCountOffset: only used for tiering heuristics and code
1168 // coverage.
1169#endif // !V8_JITLESS
1170
1171 __ bind(&push_stack_frame);
1172 FrameScope frame_scope(masm, StackFrame::MANUAL);
1173 __ PushStandardFrame(closure);
1174
1175 // Load initial bytecode offset.
1178
1179 // Push bytecode array, Smi tagged bytecode array offset, and the feedback
1180 // vector.
1182 __ Push(kInterpreterBytecodeArrayRegister, a5, feedback_vector);
1183
1184 // Allocate the local and temporary register file on the stack.
1185 Label stack_overflow;
1186 {
1187 // Load frame size (word) from the BytecodeArray object.
1189 BytecodeArray::kFrameSizeOffset));
1190
1191 // Do a stack check to ensure we don't go over the limit.
1192 __ Dsubu(a6, sp, Operand(a5));
1193 __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
1194 __ Branch(&stack_overflow, lo, a6, Operand(a2));
1195
1196 // If ok, push undefined as the initial value for all register file entries.
1197 Label loop_header;
1198 Label loop_check;
1199 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1200 __ Branch(&loop_check);
1201 __ bind(&loop_header);
1202 // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1204 // Continue loop if not done.
1205 __ bind(&loop_check);
1206 __ Dsubu(a5, a5, Operand(kSystemPointerSize));
1207 __ Branch(&loop_header, ge, a5, Operand(zero_reg));
1208 }
1209
1210 // If the bytecode array has a valid incoming new target or generator object
1211 // register, initialize it with incoming value which was passed in r3.
1212 Label no_incoming_new_target_or_generator_register;
1213 __ Lw(a5, FieldMemOperand(
1215 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1216 __ Branch(&no_incoming_new_target_or_generator_register, eq, a5,
1217 Operand(zero_reg));
1218 __ Dlsa(a5, fp, a5, kSystemPointerSizeLog2);
1219 __ Sd(a3, MemOperand(a5));
1220 __ bind(&no_incoming_new_target_or_generator_register);
1221
1222 // Perform interrupt stack check.
1223 // TODO(solanes): Merge with the real stack limit check above.
1224 Label stack_check_interrupt, after_stack_check_interrupt;
1225 __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit);
1226 __ Branch(&stack_check_interrupt, lo, sp, Operand(a5));
1227 __ bind(&after_stack_check_interrupt);
1228
1229 // The accumulator is already loaded with undefined.
1230
1231 // Load the dispatch table into a register and dispatch to the bytecode
1232 // handler at the current bytecode offset.
1233 Label do_dispatch;
1234 __ bind(&do_dispatch);
1236 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1239 __ Lbu(a7, MemOperand(a0));
1244
1245 __ RecordComment("--- InterpreterEntryReturnPC point ---");
1247 masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(
1248 masm->pc_offset());
1249 } else {
1251 // Both versions must be the same up to this point otherwise the builtins
1252 // will not be interchangable.
1253 CHECK_EQ(
1254 masm->isolate()->heap()->interpreter_entry_return_pc_offset().value(),
1255 masm->pc_offset());
1256 }
1257
1258 // Any returns to the entry trampoline are either due to the return bytecode
1259 // or the interpreter tail calling a builtin and then a dispatch.
1260
1261 // Get bytecode array and bytecode offset from the stack frame.
1267
1268 // Either return, or advance to the next bytecode and dispatch.
1269 Label do_return;
1272 __ Lbu(a1, MemOperand(a1));
1275 a5, &do_return);
1276 __ jmp(&do_dispatch);
1277
1278 __ bind(&do_return);
1279 // The return value is in v0.
1280 LeaveInterpreterFrame(masm, t0, t1);
1281 __ Jump(ra);
1282
1283 __ bind(&stack_check_interrupt);
1284 // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1285 // for the call to the StackGuard.
1291 __ CallRuntime(Runtime::kStackGuard);
1292
1293 // After the call, restore the bytecode array, bytecode offset and accumulator
1294 // registers again. Also, restore the bytecode offset in the stack to its
1295 // previous value.
1300 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1301
1304
1305 __ jmp(&after_stack_check_interrupt);
1306
1307#ifndef V8_JITLESS
1308#ifndef V8_ENABLE_LEAPTIERING
1309 __ bind(&flags_need_processing);
1310 __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
1311#endif // !V8_ENABLE_LEAPTIERING
1312 __ bind(&is_baseline);
1313 {
1314#ifndef V8_ENABLE_LEAPTIERING
1315 // Load the feedback vector from the closure.
1316 __ Ld(feedback_vector,
1317 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1318 __ Ld(feedback_vector,
1319 FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
1320
1321 Label install_baseline_code;
1322 // Check if feedback vector is valid. If not, call prepare for baseline to
1323 // allocate it.
1324 __ Ld(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1325 __ Lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
1326 __ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
1327
1328 // Check for an tiering state.
1329 __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
1330 flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing);
1331
1332 // TODO(mips64, 42204201): This fastcase is difficult to support with the
1333 // sandbox as it requires getting write access to the dispatch table. See
1334 // `JSFunction::UpdateCode`. We might want to remove it for all
1335 // configurations as it does not seem to be performance sensitive.
1336
1337 // Load the baseline code into the closure.
1339 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
1340 __ ReplaceClosureCodeWithOptimizedCode(a2, closure, t0, t1);
1341 __ JumpCodeObject(a2, kJSEntrypointTag);
1342
1343 __ bind(&install_baseline_code);
1344
1345#endif // V8_ENABLE_LEAPTIERING
1346 __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
1347 }
1348#endif // !V8_JITLESS
1349
1350 __ bind(&compile_lazy);
1351 __ GenerateTailCallToReturnedCode(Runtime::kCompileLazy);
1352 // Unreachable code.
1353 __ break_(0xCC);
1354
1355 __ bind(&stack_overflow);
1356 __ CallRuntime(Runtime::kThrowStackOverflow);
1357 // Unreachable code.
1358 __ break_(0xCC);
1359}
1360
1361static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1362 Register start_address,
1363 Register scratch, Register scratch2) {
1364 // Find the address of the last argument.
1365 __ Dsubu(scratch, num_args, Operand(1));
1366 __ dsll(scratch, scratch, kSystemPointerSizeLog2);
1367 __ Dsubu(start_address, start_address, scratch);
1368
1369 // Push the arguments.
1370 __ PushArray(start_address, num_args, scratch, scratch2,
1372}
1373
1374// static
1376 MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1379 // ----------- S t a t e -------------
1380 // -- a0 : the number of arguments
1381 // -- a2 : the address of the first argument to be pushed. Subsequent
1382 // arguments should be consecutive above this, in the same order as
1383 // they are to be pushed onto the stack.
1384 // -- a1 : the target to call (can be any Object).
1385 // -----------------------------------
1386 Label stack_overflow;
1388 // The spread argument should not be pushed.
1389 __ Dsubu(a0, a0, Operand(1));
1390 }
1391
1392 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1393 __ Dsubu(a3, a0, Operand(kJSArgcReceiverSlots));
1394 } else {
1395 __ mov(a3, a0);
1396 }
1397
1398 __ StackOverflowCheck(a3, a4, t0, &stack_overflow);
1399
1400 // This function modifies a2, t0 and a4.
1401 GenerateInterpreterPushArgs(masm, a3, a2, a4, t0);
1402
1403 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1404 __ PushRoot(RootIndex::kUndefinedValue);
1405 }
1406
1408 // Pass the spread in the register a2.
1409 // a2 already points to the penultime argument, the spread
1410 // is below that.
1411 __ Ld(a2, MemOperand(a2, -kSystemPointerSize));
1412 }
1413
1414 // Call the target.
1416 __ TailCallBuiltin(Builtin::kCallWithSpread);
1417 } else {
1418 __ TailCallBuiltin(Builtins::Call(receiver_mode));
1419 }
1420
1421 __ bind(&stack_overflow);
1422 {
1423 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1424 // Unreachable code.
1425 __ break_(0xCC);
1426 }
1427}
1428
1429// static
1431 MacroAssembler* masm, InterpreterPushArgsMode mode) {
1432 // ----------- S t a t e -------------
1433 // -- a0 : argument count
1434 // -- a3 : new target
1435 // -- a1 : constructor to call
1436 // -- a2 : allocation site feedback if available, undefined otherwise.
1437 // -- a4 : address of the first argument
1438 // -----------------------------------
1439 Label stack_overflow;
1440 __ StackOverflowCheck(a0, a5, t0, &stack_overflow);
1441
1443 // The spread argument should not be pushed.
1444 __ Dsubu(a0, a0, Operand(1));
1445 }
1446
1447 Register argc_without_receiver = a6;
1448 __ Dsubu(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
1449 // Push the arguments, This function modifies t0, a4 and a5.
1450 GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5, t0);
1451
1452 // Push a slot for the receiver.
1453 __ push(zero_reg);
1454
1456 // Pass the spread in the register a2.
1457 // a4 already points to the penultimate argument, the spread
1458 // lies in the next interpreter register.
1459 __ Ld(a2, MemOperand(a4, -kSystemPointerSize));
1460 } else {
1461 __ AssertUndefinedOrAllocationSite(a2, t0);
1462 }
1463
1465 __ AssertFunction(a1);
1466
1467 // Tail call to the function-specific construct stub (still in the caller
1468 // context at this point).
1469 __ TailCallBuiltin(Builtin::kArrayConstructorImpl);
1470 } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1471 // Call the constructor with a0, a1, and a3 unmodified.
1472 __ TailCallBuiltin(Builtin::kConstructWithSpread);
1473 } else {
1475 // Call the constructor with a0, a1, and a3 unmodified.
1476 __ TailCallBuiltin(Builtin::kConstruct);
1477 }
1478
1479 __ bind(&stack_overflow);
1480 {
1481 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1482 // Unreachable code.
1483 __ break_(0xCC);
1484 }
1485}
1486
1487// static
1489 MacroAssembler* masm, ForwardWhichFrame which_frame) {
1490 // ----------- S t a t e -------------
1491 // -- a3 : new target
1492 // -- a1 : constructor to call
1493 // -----------------------------------
1494 Label stack_overflow;
1495
1496 // Load the frame pointer into a4.
1497 switch (which_frame) {
1499 __ Move(a4, fp);
1500 break;
1503 break;
1504 }
1505
1506 // Load the argument count into a0.
1508 __ StackOverflowCheck(a0, a5, t0, &stack_overflow);
1509
1510 // Point a4 to the base of the argument list to forward, excluding the
1511 // receiver.
1512 __ Daddu(a4, a4,
1515
1516 // Copy arguments on the stack. a5 and t0 are scratch registers.
1517 Register argc_without_receiver = a6;
1518 __ Dsubu(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
1519 __ PushArray(a4, argc_without_receiver, a5, t0);
1520
1521 // Push a slot for the receiver.
1522 __ push(zero_reg);
1523
1524 // Call the constructor with a0, a1, and a3 unmodified.
1525 __ TailCallBuiltin(Builtin::kConstruct);
1526
1527 __ bind(&stack_overflow);
1528 {
1529 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1530 // Unreachable code.
1531 __ break_(0xCC);
1532 }
1533}
1534
1535namespace {
1536
1537void NewImplicitReceiver(MacroAssembler* masm) {
1538 // ----------- S t a t e -------------
1539 // -- a0 : the number of arguments
1540 // -- a1 : constructor to call (checked to be a JSFunction)
1541 // -- a3 : new target
1542 //
1543 // Stack:
1544 // -- Implicit Receiver
1545 // -- [arguments without receiver]
1546 // -- Implicit Receiver
1547 // -- Context
1548 // -- FastConstructMarker
1549 // -- FramePointer
1550 // -----------------------------------
1551
1552 // Save live registers.
1553 __ SmiTag(a0);
1554 __ Push(a0, a1, a3);
1555 __ CallBuiltin(Builtin::kFastNewObject);
1556 __ Pop(a0, a1, a3);
1557 __ SmiUntag(a0);
1558
1559 // Patch implicit receiver (in arguments)
1560 __ StoreReceiver(v0);
1561 // Patch second implicit (in construct frame)
1562 __ Sd(v0,
1564
1565 // Restore context.
1567}
1568
1569} // namespace
1570
1571// static
1572void Builtins::Generate_InterpreterPushArgsThenFastConstructFunction(
1573 MacroAssembler* masm) {
1574 // ----------- S t a t e -------------
1575 // -- a0 : argument count
1576 // -- a1 : constructor to call (checked to be a JSFunction)
1577 // -- a3 : new target
1578 // -- a4 : address of the first argument
1579 // -- cp : context pointer
1580 // -----------------------------------
1581 __ AssertFunction(a1);
1582
1583 // Check if target has a [[Construct]] internal method.
1584 Label non_constructor;
1585 __ LoadMap(a2, a1);
1586 __ Lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1587 __ And(a2, a2, Operand(Map::Bits1::IsConstructorBit::kMask));
1588 __ Branch(&non_constructor, eq, a2, Operand(zero_reg));
1589
1590 // Add a stack check before pushing arguments.
1591 Label stack_overflow;
1592 __ StackOverflowCheck(a0, a2, a5, &stack_overflow);
1593
1594 // Enter a construct frame.
1595 FrameScope scope(masm, StackFrame::MANUAL);
1596 __ EnterFrame(StackFrame::FAST_CONSTRUCT);
1597
1598 // Implicit receiver stored in the construct frame.
1599 __ LoadRoot(a2, RootIndex::kTheHoleValue);
1600 __ Push(cp, a2);
1601
1602 // Push arguments + implicit receiver.
1603 Register argc_without_receiver = a7;
1604 __ Dsubu(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
1605 GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5, a6);
1606 __ Push(a2);
1607
1608 // Check if it is a builtin call.
1609 Label builtin_call;
1610 __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1611 __ Lwu(a2, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
1612 __ And(a5, a2, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
1613 __ Branch(&builtin_call, ne, a5, Operand(zero_reg));
1614
1615 // Check if we need to create an implicit receiver.
1616 Label not_create_implicit_receiver;
1617 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(a2);
1618 __ JumpIfIsInRange(
1619 a2, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
1620 static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
1621 &not_create_implicit_receiver);
1622 NewImplicitReceiver(masm);
1623 __ bind(&not_create_implicit_receiver);
1624
1625 // Call the function.
1626 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
1627
1628 // ----------- S t a t e -------------
1629 // -- v0 constructor result
1630 //
1631 // Stack:
1632 // -- Implicit Receiver
1633 // -- Context
1634 // -- FastConstructMarker
1635 // -- FramePointer
1636 // -----------------------------------
1637
1638 // Store offset of return address for deoptimizer.
1639 masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
1640 masm->pc_offset());
1641
1642 // If the result is an object (in the ECMA sense), we should get rid
1643 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
1644 // on page 74.
1645 Label use_receiver, do_throw, leave_and_return, check_receiver;
1646
1647 // If the result is undefined, we jump out to using the implicit receiver.
1648 __ JumpIfNotRoot(v0, RootIndex::kUndefinedValue, &check_receiver);
1649
1650 // Throw away the result of the constructor invocation and use the
1651 // on-stack receiver as the result.
1652 __ bind(&use_receiver);
1653 __ Ld(v0,
1655 __ JumpIfRoot(v0, RootIndex::kTheHoleValue, &do_throw);
1656
1657 __ bind(&leave_and_return);
1658 // Leave construct frame.
1659 __ LeaveFrame(StackFrame::FAST_CONSTRUCT);
1660 __ Ret();
1661
1662 // Otherwise we do a smi check and fall through to check if the return value
1663 // is a valid receiver.
1664 __ bind(&check_receiver);
1665
1666 // If the result is a smi, it is *not* an object in the ECMA sense.
1667 __ JumpIfSmi(v0, &use_receiver);
1668
1669 // Check if the type of the result is not an object in the ECMA sense.
1670 __ GetObjectType(v0, a4, a4);
1671 __ Branch(&leave_and_return, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
1672 __ Branch(&use_receiver);
1673
1674 __ bind(&builtin_call);
1675 // TODO(victorgomes): Check the possibility to turn this into a tailcall.
1676 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
1677 __ LeaveFrame(StackFrame::FAST_CONSTRUCT);
1678 __ Ret();
1679
1680 __ bind(&do_throw);
1681 // Restore the context from the frame.
1683 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
1684 // Unreachable code.
1685 __ break_(0xCC);
1686
1687 __ bind(&stack_overflow);
1688 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1689 // Unreachable code.
1690 __ break_(0xCC);
1691
1692 // Called Construct on an Object that doesn't have a [[Construct]] internal
1693 // method.
1694 __ bind(&non_constructor);
1695 __ TailCallBuiltin(Builtin::kConstructedNonConstructable);
1696}
1697
1698static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1699 // Set the return address to the correct point in the interpreter entry
1700 // trampoline.
1701 Label builtin_trampoline, trampoline_loaded;
1702 Tagged<Smi> interpreter_entry_return_pc_offset(
1703 masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1704 DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1705
1706 // If the SFI function_data is an InterpreterData, the function will have a
1707 // custom copy of the interpreter entry trampoline for profiling. If so,
1708 // get the custom trampoline, otherwise grab the entry address of the global
1709 // trampoline.
1711 __ Ld(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
1712 __ Ld(t0,
1713 FieldMemOperand(t0, SharedFunctionInfo::kTrustedFunctionDataOffset));
1714 __ GetObjectType(t0, kInterpreterDispatchTableRegister,
1716 __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
1717 Operand(INTERPRETER_DATA_TYPE));
1718
1719 __ Ld(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
1720 __ LoadCodeInstructionStart(t0, t0, kJSEntrypointTag);
1721 __ Branch(&trampoline_loaded);
1722
1723 __ bind(&builtin_trampoline);
1724 __ li(t0, ExternalReference::
1725 address_of_interpreter_entry_trampoline_instruction_start(
1726 masm->isolate()));
1727 __ Ld(t0, MemOperand(t0));
1728
1729 __ bind(&trampoline_loaded);
1730 __ Daddu(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
1731
1732 // Initialize the dispatch table register.
1734 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1735
1736 // Get the bytecode array pointer from the frame.
1739
1740 if (v8_flags.debug_code) {
1741 // Check function data field is actually a BytecodeArray object.
1743 __ Assert(ne,
1744 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1745 kScratchReg, Operand(zero_reg));
1746 __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
1747 __ Assert(eq,
1748 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1749 a1, Operand(BYTECODE_ARRAY_TYPE));
1750 }
1751
1752 // Get the target bytecode offset from the frame.
1755
1756 if (v8_flags.debug_code) {
1757 Label okay;
1760 // Unreachable code.
1761 __ break_(0xCC);
1762 __ bind(&okay);
1763 }
1764
1765 // Dispatch to the target bytecode.
1768 __ Lbu(a7, MemOperand(a1));
1772}
1773
1774void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1775 // Advance the current bytecode offset stored within the given interpreter
1776 // stack frame. This simulates what all bytecode handlers do upon completion
1777 // of the underlying operation.
1783
1784 Label enter_bytecode, function_entry_bytecode;
1785 __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
1788
1789 // Load the current bytecode.
1792 __ Lbu(a1, MemOperand(a1));
1793
1794 // Advance to the next bytecode.
1795 Label if_return;
1798 a4, &if_return);
1799
1800 __ bind(&enter_bytecode);
1801 // Convert new bytecode offset to a Smi and save in the stackframe.
1804
1806
1807 __ bind(&function_entry_bytecode);
1808 // If the code deoptimizes during the implicit function entry stack interrupt
1809 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1810 // not a valid bytecode offset. Detect this case and advance to the first
1811 // actual bytecode.
1814 __ Branch(&enter_bytecode);
1815
1816 // We should never take the if_return path.
1817 __ bind(&if_return);
1818 __ Abort(AbortReason::kInvalidBytecodeAdvance);
1819}
1820
1821void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1823}
1824
1825namespace {
1826void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1827 bool javascript_builtin,
1828 bool with_result) {
1829 const RegisterConfiguration* config(RegisterConfiguration::Default());
1830 int allocatable_register_count = config->num_allocatable_general_registers();
1831 UseScratchRegisterScope temps(masm);
1832 Register scratch = temps.Acquire();
1833
1834 if (with_result) {
1835 if (javascript_builtin) {
1836 __ mov(scratch, v0);
1837 } else {
1838 // Overwrite the hole inserted by the deoptimizer with the return value
1839 // from the LAZY deopt point.
1840 __ Sd(v0,
1841 MemOperand(sp,
1842 config->num_allocatable_general_registers() *
1845 }
1846 }
1847 for (int i = allocatable_register_count - 1; i >= 0; --i) {
1848 int code = config->GetAllocatableGeneralCode(i);
1849 __ Pop(Register::from_code(code));
1850 if (javascript_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1852 }
1853 }
1854
1855 if (with_result && javascript_builtin) {
1856 // Overwrite the hole inserted by the deoptimizer with the return value from
1857 // the LAZY deopt point. t0 contains the arguments count, the return value
1858 // from LAZY is always the last argument.
1859 constexpr int return_value_offset =
1862 __ Daddu(a0, a0, Operand(return_value_offset));
1863 __ Dlsa(t0, sp, a0, kSystemPointerSizeLog2);
1864 __ Sd(scratch, MemOperand(t0));
1865 // Recover arguments count.
1866 __ Dsubu(a0, a0, Operand(return_value_offset));
1867 }
1868
1869 __ Ld(fp, MemOperand(
1871 // Load builtin index (stored as a Smi) and use it to get the builtin start
1872 // address from the builtins table.
1873 __ Pop(t0);
1874 __ Daddu(sp, sp,
1876 __ Pop(ra);
1877 __ LoadEntryFromBuiltinIndex(t0, t0);
1878 __ Jump(t0);
1879}
1880} // namespace
1881
1882void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1883 Generate_ContinueToBuiltinHelper(masm, false, false);
1884}
1885
1886void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1887 MacroAssembler* masm) {
1888 Generate_ContinueToBuiltinHelper(masm, false, true);
1889}
1890
1891void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1892 Generate_ContinueToBuiltinHelper(masm, true, false);
1893}
1894
1895void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1896 MacroAssembler* masm) {
1897 Generate_ContinueToBuiltinHelper(masm, true, true);
1898}
1899
1900void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1901 {
1902 FrameScope scope(masm, StackFrame::INTERNAL);
1903 __ CallRuntime(Runtime::kNotifyDeoptimized);
1904 }
1905
1907 __ Ld(v0, MemOperand(sp, 0 * kSystemPointerSize));
1908 __ Ret(USE_DELAY_SLOT);
1909 // Safe to fill delay slot Addu will emit one instruction.
1910 __ Daddu(sp, sp, Operand(1 * kSystemPointerSize)); // Remove state.
1911}
1912
1913namespace {
1914
1915void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
1916 Operand offset = Operand(zero_reg)) {
1917 __ Daddu(ra, entry_address, offset);
1918 // And "return" to the OSR entry point of the function.
1919 __ Ret();
1920}
1921
1922enum class OsrSourceTier {
1923 kInterpreter,
1924 kBaseline,
1925};
1926
1927void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
1928 Register maybe_target_code,
1929 Register expected_param_count) {
1930 Label jump_to_optimized_code;
1931 {
1932 // If maybe_target_code is not null, no need to call into runtime. A
1933 // precondition here is: if maybe_target_code is an InstructionStream
1934 // object, it must NOT be marked_for_deoptimization (callers must ensure
1935 // this).
1936 __ Branch(&jump_to_optimized_code, ne, maybe_target_code,
1937 Operand(Smi::zero()));
1938 }
1939
1940 ASM_CODE_COMMENT(masm);
1941 {
1942 FrameScope scope(masm, StackFrame::INTERNAL);
1943 __ Push(expected_param_count);
1944 __ CallRuntime(Runtime::kCompileOptimizedOSR);
1945 __ Pop(expected_param_count);
1946 }
1947
1948 // If the code object is null, just return to the caller.
1949 __ Ret(eq, maybe_target_code, Operand(Smi::zero()));
1950 __ bind(&jump_to_optimized_code);
1951
1952 const Register scratch(a2);
1953 CHECK(!AreAliased(maybe_target_code, expected_param_count, scratch));
1954 // OSR entry tracing.
1955 {
1956 Label next;
1957 __ li(scratch, ExternalReference::address_of_log_or_trace_osr());
1958 __ Lbu(scratch, MemOperand(scratch));
1959 __ Branch(&next, eq, scratch, Operand(zero_reg));
1960
1961 {
1962 FrameScope scope(masm, StackFrame::INTERNAL);
1963 __ Push(maybe_target_code, expected_param_count);
1964 __ CallRuntime(Runtime::kLogOrTraceOptimizedOSREntry, 0);
1965 __ Pop(maybe_target_code, expected_param_count);
1966 }
1967
1968 __ bind(&next);
1969 }
1970
1971 if (source == OsrSourceTier::kInterpreter) {
1972 // Drop the handler frame that is be sitting on top of the actual
1973 // JavaScript frame. This is the case then OSR is triggered from bytecode.
1974 __ LeaveFrame(StackFrame::STUB);
1975 }
1976
1977 // Check we are actually jumping to an OSR code object. This among other
1978 // things ensures that the object contains deoptimization data below.
1979 __ Lwu(scratch, FieldMemOperand(maybe_target_code, Code::kOsrOffsetOffset));
1980 __ Check(ne, AbortReason::kExpectedOsrCode, scratch,
1981 Operand(BytecodeOffset::None().ToInt()));
1982
1983 // Check the target has a matching parameter count. This ensures that the OSR
1984 // code will correctly tear down our frame when leaving.
1985 __ Lhu(scratch,
1986 FieldMemOperand(maybe_target_code, Code::kParameterCountOffset));
1987 __ SmiUntag(expected_param_count);
1988 __ Check(eq, AbortReason::kOsrUnexpectedStackSize, scratch,
1989 Operand(expected_param_count));
1990
1991 // Load deoptimization data from the code object.
1992 // <deopt_data> = <code>[#deoptimization_data_offset]
1993 __ Ld(scratch, MemOperand(maybe_target_code,
1994 Code::kDeoptimizationDataOrInterpreterDataOffset -
1996
1997 // Load the OSR entrypoint offset from the deoptimization data.
1998 // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1999 __ SmiUntag(scratch,
2003
2004 __ LoadCodeInstructionStart(maybe_target_code, maybe_target_code,
2006
2007 // Compute the target address = code_entry + osr_offset
2008 // <entry_addr> = <code_entry> + <osr_offset>
2009 Generate_OSREntry(masm, maybe_target_code, Operand(scratch));
2010}
2011} // namespace
2012
2013void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
2014 using D = OnStackReplacementDescriptor;
2015 static_assert(D::kParameterCount == 2);
2016 OnStackReplacement(masm, OsrSourceTier::kInterpreter,
2017 D::MaybeTargetCodeRegister(),
2018 D::ExpectedParameterCountRegister());
2019}
2020
2021void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
2022 using D = OnStackReplacementDescriptor;
2023 static_assert(D::kParameterCount == 2);
2024
2027 OnStackReplacement(masm, OsrSourceTier::kBaseline,
2028 D::MaybeTargetCodeRegister(),
2029 D::ExpectedParameterCountRegister());
2030}
2031
2032// static
2033void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
2034 // ----------- S t a t e -------------
2035 // -- a0 : argc
2036 // -- sp[0] : receiver
2037 // -- sp[4] : thisArg
2038 // -- sp[8] : argArray
2039 // -----------------------------------
2040
2041 Register argc = a0;
2042 Register arg_array = a2;
2043 Register receiver = a1;
2044 Register this_arg = a5;
2045 Register undefined_value = a3;
2046 Register scratch = a4;
2047
2048 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2049
2050 // 1. Load receiver into a1, argArray into a2 (if present), remove all
2051 // arguments from the stack (including the receiver), and push thisArg (if
2052 // present) instead.
2053 {
2054 __ Dsubu(scratch, argc, JSParameterCount(0));
2056 __ Ld(arg_array, MemOperand(sp, 2 * kSystemPointerSize));
2057 __ Movz(arg_array, undefined_value, scratch); // if argc == 0
2058 __ Movz(this_arg, undefined_value, scratch); // if argc == 0
2059 __ Dsubu(scratch, scratch, Operand(1));
2060 __ Movz(arg_array, undefined_value, scratch); // if argc == 1
2061 __ Ld(receiver, MemOperand(sp));
2062 __ DropArgumentsAndPushNewReceiver(argc, this_arg);
2063 }
2064
2065 // ----------- S t a t e -------------
2066 // -- a2 : argArray
2067 // -- a1 : receiver
2068 // -- a3 : undefined root value
2069 // -- sp[0] : thisArg
2070 // -----------------------------------
2071
2072 // 2. We don't need to check explicitly for callable receiver here,
2073 // since that's the first thing the Call/CallWithArrayLike builtins
2074 // will do.
2075
2076 // 3. Tail call with no arguments if argArray is null or undefined.
2077 Label no_arguments;
2078 __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments);
2079 __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
2080
2081 // 4a. Apply the receiver to the given argArray.
2082 __ TailCallBuiltin(Builtin::kCallWithArrayLike);
2083
2084 // 4b. The argArray is either null or undefined, so we tail call without any
2085 // arguments to the receiver.
2086 __ bind(&no_arguments);
2087 {
2088 __ li(a0, JSParameterCount(0));
2089 DCHECK(receiver == a1);
2090 __ TailCallBuiltin(Builtins::Call());
2091 }
2092}
2093
2094// static
2095void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
2096 // 1. Get the callable to call (passed as receiver) from the stack.
2097 {
2098 __ Pop(a1);
2099 }
2100
2101 // 2. Make sure we have at least one argument.
2102 // a0: actual number of arguments
2103 {
2104 Label done;
2105 __ Branch(&done, ne, a0, Operand(JSParameterCount(0)));
2106 __ PushRoot(RootIndex::kUndefinedValue);
2107 __ Daddu(a0, a0, Operand(1));
2108 __ bind(&done);
2109 }
2110
2111 // 3. Adjust the actual number of arguments.
2112 __ daddiu(a0, a0, -1);
2113
2114 // 4. Call the callable.
2115 __ TailCallBuiltin(Builtins::Call());
2116}
2117
2118void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
2119 // ----------- S t a t e -------------
2120 // -- a0 : argc
2121 // -- sp[0] : receiver
2122 // -- sp[8] : target (if argc >= 1)
2123 // -- sp[16] : thisArgument (if argc >= 2)
2124 // -- sp[24] : argumentsList (if argc == 3)
2125 // -----------------------------------
2126
2127 Register argc = a0;
2128 Register arguments_list = a2;
2129 Register target = a1;
2130 Register this_argument = a5;
2131 Register undefined_value = a3;
2132 Register scratch = a4;
2133
2134 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2135
2136 // 1. Load target into a1 (if present), argumentsList into a2 (if present),
2137 // remove all arguments from the stack (including the receiver), and push
2138 // thisArgument (if present) instead.
2139 {
2140 // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
2141 // consistent state for a simple pop operation.
2142
2143 __ Dsubu(scratch, argc, Operand(JSParameterCount(0)));
2144 __ Ld(target, MemOperand(sp, kSystemPointerSize));
2145 __ Ld(this_argument, MemOperand(sp, 2 * kSystemPointerSize));
2146 __ Ld(arguments_list, MemOperand(sp, 3 * kSystemPointerSize));
2147 __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
2148 __ Movz(this_argument, undefined_value, scratch); // if argc == 0
2149 __ Movz(target, undefined_value, scratch); // if argc == 0
2150 __ Dsubu(scratch, scratch, Operand(1));
2151 __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
2152 __ Movz(this_argument, undefined_value, scratch); // if argc == 1
2153 __ Dsubu(scratch, scratch, Operand(1));
2154 __ Movz(arguments_list, undefined_value, scratch); // if argc == 2
2155
2156 __ DropArgumentsAndPushNewReceiver(argc, this_argument);
2157 }
2158
2159 // ----------- S t a t e -------------
2160 // -- a2 : argumentsList
2161 // -- a1 : target
2162 // -- a3 : undefined root value
2163 // -- sp[0] : thisArgument
2164 // -----------------------------------
2165
2166 // 2. We don't need to check explicitly for callable target here,
2167 // since that's the first thing the Call/CallWithArrayLike builtins
2168 // will do.
2169
2170 // 3. Apply the target to the given argumentsList.
2171 __ TailCallBuiltin(Builtin::kCallWithArrayLike);
2172}
2173
2174void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
2175 // ----------- S t a t e -------------
2176 // -- a0 : argc
2177 // -- sp[0] : receiver
2178 // -- sp[8] : target
2179 // -- sp[16] : argumentsList
2180 // -- sp[24] : new.target (optional)
2181 // -----------------------------------
2182
2183 Register argc = a0;
2184 Register arguments_list = a2;
2185 Register target = a1;
2186 Register new_target = a3;
2187 Register undefined_value = a4;
2188 Register scratch = a5;
2189
2190 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2191
2192 // 1. Load target into a1 (if present), argumentsList into a2 (if present),
2193 // new.target into a3 (if present, otherwise use target), remove all
2194 // arguments from the stack (including the receiver), and push thisArgument
2195 // (if present) instead.
2196 {
2197 // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
2198 // consistent state for a simple pop operation.
2199
2200 __ Dsubu(scratch, argc, Operand(JSParameterCount(0)));
2201 __ Ld(target, MemOperand(sp, kSystemPointerSize));
2202 __ Ld(arguments_list, MemOperand(sp, 2 * kSystemPointerSize));
2204 __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
2205 __ Movz(new_target, undefined_value, scratch); // if argc == 0
2206 __ Movz(target, undefined_value, scratch); // if argc == 0
2207 __ Dsubu(scratch, scratch, Operand(1));
2208 __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
2209 __ Movz(new_target, target, scratch); // if argc == 1
2210 __ Dsubu(scratch, scratch, Operand(1));
2211 __ Movz(new_target, target, scratch); // if argc == 2
2212
2213 __ DropArgumentsAndPushNewReceiver(argc, undefined_value);
2214 }
2215
2216 // ----------- S t a t e -------------
2217 // -- a2 : argumentsList
2218 // -- a1 : target
2219 // -- a3 : new.target
2220 // -- sp[0] : receiver (undefined)
2221 // -----------------------------------
2222
2223 // 2. We don't need to check explicitly for constructor target here,
2224 // since that's the first thing the Construct/ConstructWithArrayLike
2225 // builtins will do.
2226
2227 // 3. We don't need to check explicitly for constructor new.target here,
2228 // since that's the second thing the Construct/ConstructWithArrayLike
2229 // builtins will do.
2230
2231 // 4. Construct the target with the given new.target and argumentsList.
2232 __ TailCallBuiltin(Builtin::kConstructWithArrayLike);
2233}
2234
2235namespace {
2236
2237// Allocate new stack space for |count| arguments and shift all existing
2238// arguments already on the stack. |pointer_to_new_space_out| points to the
2239// first free slot on the stack to copy additional arguments to and
2240// |argc_in_out| is updated to include |count|.
2241void Generate_AllocateSpaceAndShiftExistingArguments(
2242 MacroAssembler* masm, Register count, Register argc_in_out,
2243 Register pointer_to_new_space_out, Register scratch1, Register scratch2,
2244 Register scratch3) {
2245 DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
2246 scratch2));
2247 Register old_sp = scratch1;
2248 Register new_space = scratch2;
2249 __ mov(old_sp, sp);
2250 __ dsll(new_space, count, kSystemPointerSizeLog2);
2251 __ Dsubu(sp, sp, Operand(new_space));
2252
2253 Register end = scratch2;
2254 Register value = scratch3;
2255 Register dest = pointer_to_new_space_out;
2256 __ mov(dest, sp);
2257 __ Dlsa(end, old_sp, argc_in_out, kSystemPointerSizeLog2);
2258 Label loop, done;
2259 __ Branch(&done, ge, old_sp, Operand(end));
2260 __ bind(&loop);
2261 __ Ld(value, MemOperand(old_sp, 0));
2262 __ Sd(value, MemOperand(dest, 0));
2263 __ Daddu(old_sp, old_sp, Operand(kSystemPointerSize));
2264 __ Daddu(dest, dest, Operand(kSystemPointerSize));
2265 __ Branch(&loop, lt, old_sp, Operand(end));
2266 __ bind(&done);
2267
2268 // Update total number of arguments.
2269 __ Daddu(argc_in_out, argc_in_out, count);
2270}
2271
2272} // namespace
2273
2274// static
2275void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2276 Builtin target_builtin) {
2277 // ----------- S t a t e -------------
2278 // -- a1 : target
2279 // -- a0 : number of parameters on the stack
2280 // -- a2 : arguments list (a FixedArray)
2281 // -- a4 : len (number of elements to push from args)
2282 // -- a3 : new.target (for [[Construct]])
2283 // -----------------------------------
2284 if (v8_flags.debug_code) {
2285 // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
2286 Label ok, fail;
2287 __ AssertNotSmi(a2);
2288 __ GetObjectType(a2, t8, t8);
2289 __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE));
2290 __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE));
2291 __ Branch(&ok, eq, a4, Operand(zero_reg));
2292 // Fall through.
2293 __ bind(&fail);
2294 __ Abort(AbortReason::kOperandIsNotAFixedArray);
2295
2296 __ bind(&ok);
2297 }
2298
2299 Register args = a2;
2300 Register len = a4;
2301
2302 // Check for stack overflow.
2303 Label stack_overflow;
2304 __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow);
2305
2306 // Move the arguments already in the stack,
2307 // including the receiver and the return address.
2308 // a4: Number of arguments to make room for.
2309 // a0: Number of arguments already on the stack.
2310 // a7: Points to first free slot on the stack after arguments were shifted.
2311 Generate_AllocateSpaceAndShiftExistingArguments(masm, a4, a0, a7, a6, t0, t1);
2312
2313 // Push arguments onto the stack (thisArgument is already on the stack).
2314 {
2315 Label done, push, loop;
2316 Register src = a6;
2317 Register scratch = len;
2318
2319 __ daddiu(src, args, OFFSET_OF_DATA_START(FixedArray) - kHeapObjectTag);
2320 __ Branch(&done, eq, len, Operand(zero_reg), i::USE_DELAY_SLOT);
2321 __ dsll(scratch, len, kSystemPointerSizeLog2);
2322 __ Dsubu(scratch, sp, Operand(scratch));
2323 __ LoadRoot(t1, RootIndex::kTheHoleValue);
2324 __ bind(&loop);
2325 __ Ld(a5, MemOperand(src));
2326 __ daddiu(src, src, kSystemPointerSize);
2327 __ Branch(&push, ne, a5, Operand(t1));
2328 __ LoadRoot(a5, RootIndex::kUndefinedValue);
2329 __ bind(&push);
2330 __ Sd(a5, MemOperand(a7, 0));
2331 __ Daddu(a7, a7, Operand(kSystemPointerSize));
2332 __ Daddu(scratch, scratch, Operand(kSystemPointerSize));
2333 __ Branch(&loop, ne, scratch, Operand(sp));
2334 __ bind(&done);
2335 }
2336
2337 // Tail-call to the actual Call or Construct builtin.
2338 __ TailCallBuiltin(target_builtin);
2339
2340 __ bind(&stack_overflow);
2341 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2342}
2343
2344// static
2345void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2346 CallOrConstructMode mode,
2347 Builtin target_builtin) {
2348 // ----------- S t a t e -------------
2349 // -- a0 : the number of arguments
2350 // -- a3 : the new.target (for [[Construct]] calls)
2351 // -- a1 : the target to call (can be any Object)
2352 // -- a2 : start index (to support rest parameters)
2353 // -----------------------------------
2354
2355 // Check if new.target has a [[Construct]] internal method.
2356 if (mode == CallOrConstructMode::kConstruct) {
2357 Label new_target_constructor, new_target_not_constructor;
2358 __ JumpIfSmi(a3, &new_target_not_constructor);
2360 __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
2361 __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask));
2362 __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
2363 __ bind(&new_target_not_constructor);
2364 {
2365 FrameScope scope(masm, StackFrame::MANUAL);
2366 __ EnterFrame(StackFrame::INTERNAL);
2367 __ Push(a3);
2368 __ CallRuntime(Runtime::kThrowNotConstructor);
2369 }
2370 __ bind(&new_target_constructor);
2371 }
2372
2373 Label stack_done, stack_overflow;
2375 __ Dsubu(a7, a7, Operand(kJSArgcReceiverSlots));
2376 __ Dsubu(a7, a7, a2);
2377 __ Branch(&stack_done, le, a7, Operand(zero_reg));
2378 {
2379 // Check for stack overflow.
2380 __ StackOverflowCheck(a7, a4, a5, &stack_overflow);
2381
2382 // Forward the arguments from the caller frame.
2383
2384 // Point to the first argument to copy (skipping the receiver).
2385 __ Daddu(a6, fp,
2388 __ Dlsa(a6, a6, a2, kSystemPointerSizeLog2);
2389
2390 // Move the arguments already in the stack,
2391 // including the receiver and the return address.
2392 // a7: Number of arguments to make room for.
2393 // a0: Number of arguments already on the stack.
2394 // a2: Points to first free slot on the stack after arguments were shifted.
2395 Generate_AllocateSpaceAndShiftExistingArguments(masm, a7, a0, a2, t0, t1,
2396 t2);
2397
2398 // Copy arguments from the caller frame.
2399 // TODO(victorgomes): Consider using forward order as potentially more cache
2400 // friendly.
2401 {
2402 Label loop;
2403 __ bind(&loop);
2404 {
2405 __ Subu(a7, a7, Operand(1));
2406 __ Dlsa(t0, a6, a7, kSystemPointerSizeLog2);
2407 __ Ld(kScratchReg, MemOperand(t0));
2408 __ Dlsa(t0, a2, a7, kSystemPointerSizeLog2);
2409 __ Sd(kScratchReg, MemOperand(t0));
2410 __ Branch(&loop, ne, a7, Operand(zero_reg));
2411 }
2412 }
2413 }
2414 __ bind(&stack_done);
2415 // Tail-call to the actual Call or Construct builtin.
2416 __ TailCallBuiltin(target_builtin);
2417
2418 __ bind(&stack_overflow);
2419 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2420}
2421
2422// static
2423void Builtins::Generate_CallFunction(MacroAssembler* masm,
2424 ConvertReceiverMode mode) {
2425 // ----------- S t a t e -------------
2426 // -- a0 : the number of arguments
2427 // -- a1 : the function to call (checked to be a JSFunction)
2428 // -----------------------------------
2429 __ AssertCallableFunction(a1);
2430
2431 __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2432
2433 // Enter the context of the function; ToObject has to run in the function
2434 // context, and we also need to take the global proxy from the function
2435 // context in case of conversion.
2436 __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2437 // We need to convert the receiver for non-native sloppy mode functions.
2438 Label done_convert;
2439 __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
2440 __ And(kScratchReg, a3,
2441 Operand(SharedFunctionInfo::IsNativeBit::kMask |
2442 SharedFunctionInfo::IsStrictBit::kMask));
2443 __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
2444 {
2445 // ----------- S t a t e -------------
2446 // -- a0 : the number of arguments
2447 // -- a1 : the function to call (checked to be a JSFunction)
2448 // -- a2 : the shared function info.
2449 // -- cp : the function context.
2450 // -----------------------------------
2451
2453 // Patch receiver to global proxy.
2454 __ LoadGlobalProxy(a3);
2455 } else {
2456 Label convert_to_object, convert_receiver;
2457 __ LoadReceiver(a3);
2458 __ JumpIfSmi(a3, &convert_to_object);
2459 static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2460 __ GetObjectType(a3, a4, a4);
2461 __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
2463 Label convert_global_proxy;
2464 __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
2465 __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
2466 __ bind(&convert_global_proxy);
2467 {
2468 // Patch receiver to global proxy.
2469 __ LoadGlobalProxy(a3);
2470 }
2471 __ Branch(&convert_receiver);
2472 }
2473 __ bind(&convert_to_object);
2474 {
2475 // Convert receiver using ToObject.
2476 // TODO(bmeurer): Inline the allocation here to avoid building the frame
2477 // in the fast case? (fall back to AllocateInNewSpace?)
2478 FrameScope scope(masm, StackFrame::INTERNAL);
2479 __ SmiTag(a0);
2480 __ Push(a0, a1);
2481 __ mov(a0, a3);
2482 __ Push(cp);
2483 __ CallBuiltin(Builtin::kToObject);
2484 __ Pop(cp);
2485 __ mov(a3, v0);
2486 __ Pop(a0, a1);
2487 __ SmiUntag(a0);
2488 }
2489 __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2490 __ bind(&convert_receiver);
2491 }
2492 __ StoreReceiver(a3);
2493 }
2494 __ bind(&done_convert);
2495
2496 // ----------- S t a t e -------------
2497 // -- a0 : the number of arguments
2498 // -- a1 : the function to call (checked to be a JSFunction)
2499 // -- a2 : the shared function info.
2500 // -- cp : the function context.
2501 // -----------------------------------
2502#ifdef V8_ENABLE_LEAPTIERING
2503 __ InvokeFunctionCode(a1, no_reg, a0, InvokeType::kJump);
2504#else
2505 __ Lhu(a2,
2506 FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
2507 __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
2508#endif
2509}
2510
2511// static
2512void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2513 // ----------- S t a t e -------------
2514 // -- a0 : the number of arguments
2515 // -- a1 : the function to call (checked to be a JSBoundFunction)
2516 // -----------------------------------
2517 __ AssertBoundFunction(a1);
2518
2519 // Patch the receiver to [[BoundThis]].
2520 {
2521 __ Ld(t0, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
2522 __ StoreReceiver(t0);
2523 }
2524
2525 // Load [[BoundArguments]] into a2 and length of that into a4.
2526 __ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2527 __ SmiUntag(a4, FieldMemOperand(a2, offsetof(FixedArray, length_)));
2528
2529 // ----------- S t a t e -------------
2530 // -- a0 : the number of arguments
2531 // -- a1 : the function to call (checked to be a JSBoundFunction)
2532 // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2533 // -- a4 : the number of [[BoundArguments]]
2534 // -----------------------------------
2535
2536 // Reserve stack space for the [[BoundArguments]].
2537 {
2538 Label done;
2539 __ dsll(a5, a4, kSystemPointerSizeLog2);
2540 __ Dsubu(t0, sp, Operand(a5));
2541 // Check the stack for overflow. We are not trying to catch interruptions
2542 // (i.e. debug break and preemption) here, so check the "real stack limit".
2543 __ LoadStackLimit(kScratchReg,
2544 MacroAssembler::StackLimitKind::kRealStackLimit);
2545 __ Branch(&done, hs, t0, Operand(kScratchReg));
2546 {
2547 FrameScope scope(masm, StackFrame::MANUAL);
2548 __ EnterFrame(StackFrame::INTERNAL);
2549 __ CallRuntime(Runtime::kThrowStackOverflow);
2550 }
2551 __ bind(&done);
2552 }
2553
2554 // Pop receiver.
2555 __ Pop(t0);
2556
2557 // Push [[BoundArguments]].
2558 {
2559 Label loop, done_loop;
2560 __ SmiUntag(a4, FieldMemOperand(a2, offsetof(FixedArray, length_)));
2561 __ Daddu(a0, a0, Operand(a4));
2562 __ Daddu(a2, a2,
2563 Operand(OFFSET_OF_DATA_START(FixedArray) - kHeapObjectTag));
2564 __ bind(&loop);
2565 __ Dsubu(a4, a4, Operand(1));
2566 __ Branch(&done_loop, lt, a4, Operand(zero_reg));
2567 __ Dlsa(a5, a2, a4, kSystemPointerSizeLog2);
2568 __ Ld(kScratchReg, MemOperand(a5));
2569 __ Push(kScratchReg);
2570 __ Branch(&loop);
2571 __ bind(&done_loop);
2572 }
2573
2574 // Push receiver.
2575 __ Push(t0);
2576
2577 // Call the [[BoundTargetFunction]] via the Call builtin.
2578 __ Ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2579 __ TailCallBuiltin(Builtins::Call());
2580}
2581
2582// static
2583void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2584 // ----------- S t a t e -------------
2585 // -- a0 : the number of arguments
2586 // -- a1 : the target to call (can be any Object).
2587 // -----------------------------------
2588 Register target = a1;
2589 Register map = t1;
2590 Register instance_type = t2;
2591 Register scratch = t8;
2592 DCHECK(!AreAliased(a0, target, map, instance_type, scratch));
2593
2594 Label non_callable, class_constructor;
2595 __ JumpIfSmi(target, &non_callable);
2596 __ LoadMap(map, target);
2597 __ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
2598 scratch);
2599 __ TailCallBuiltin(Builtins::CallFunction(mode), ls, scratch,
2602 __ TailCallBuiltin(Builtin::kCallBoundFunction, eq, instance_type,
2603 Operand(JS_BOUND_FUNCTION_TYPE));
2604
2605 // Check if target has a [[Call]] internal method.
2606 {
2607 Register flags = t1;
2608 __ Lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2609 map = no_reg;
2610 __ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask));
2611 __ Branch(&non_callable, eq, flags, Operand(zero_reg));
2612 }
2613
2614 __ TailCallBuiltin(Builtin::kCallProxy, eq, instance_type,
2615 Operand(JS_PROXY_TYPE));
2616
2617 // Check if target is a wrapped function and call CallWrappedFunction external
2618 // builtin
2619 __ TailCallBuiltin(Builtin::kCallWrappedFunction, eq, instance_type,
2620 Operand(JS_WRAPPED_FUNCTION_TYPE));
2621
2622 // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2623 // Check that the function is not a "classConstructor".
2624 __ Branch(&class_constructor, eq, instance_type,
2625 Operand(JS_CLASS_CONSTRUCTOR_TYPE));
2626
2627 // 2. Call to something else, which might have a [[Call]] internal method (if
2628 // not we raise an exception).
2629 // Overwrite the original receiver with the (original) target.
2630 __ StoreReceiver(target);
2631 // Let the "call_as_function_delegate" take care of the rest.
2632 __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2633 __ TailCallBuiltin(
2635
2636 // 3. Call to something that is not callable.
2637 __ bind(&non_callable);
2638 {
2639 FrameScope scope(masm, StackFrame::INTERNAL);
2640 __ Push(target);
2641 __ CallRuntime(Runtime::kThrowCalledNonCallable);
2642 }
2643
2644 // 4. The function is a "classConstructor", need to raise an exception.
2645 __ bind(&class_constructor);
2646 {
2647 FrameScope frame(masm, StackFrame::INTERNAL);
2648 __ Push(target);
2649 __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2650 }
2651}
2652
2653void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2654 // ----------- S t a t e -------------
2655 // -- a0 : the number of arguments
2656 // -- a1 : the constructor to call (checked to be a JSFunction)
2657 // -- a3 : the new target (checked to be a constructor)
2658 // -----------------------------------
2659 __ AssertConstructor(a1);
2660 __ AssertFunction(a1);
2661
2662 // Calling convention for function specific ConstructStubs require
2663 // a2 to contain either an AllocationSite or undefined.
2664 __ LoadRoot(a2, RootIndex::kUndefinedValue);
2665
2666 Label call_generic_stub;
2667
2668 // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2669 __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2670 __ lwu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
2671 __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2672 __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg));
2673
2674 __ TailCallBuiltin(Builtin::kJSBuiltinsConstructStub);
2675
2676 __ bind(&call_generic_stub);
2677 __ TailCallBuiltin(Builtin::kJSConstructStubGeneric);
2678}
2679
2680// static
2681void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2682 // ----------- S t a t e -------------
2683 // -- a0 : the number of arguments
2684 // -- a1 : the function to call (checked to be a JSBoundFunction)
2685 // -- a3 : the new target (checked to be a constructor)
2686 // -----------------------------------
2687 __ AssertConstructor(a1);
2688 __ AssertBoundFunction(a1);
2689
2690 // Load [[BoundArguments]] into a2 and length of that into a4.
2691 __ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2692 __ SmiUntag(a4, FieldMemOperand(a2, offsetof(FixedArray, length_)));
2693
2694 // ----------- S t a t e -------------
2695 // -- a0 : the number of arguments
2696 // -- a1 : the function to call (checked to be a JSBoundFunction)
2697 // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2698 // -- a3 : the new target (checked to be a constructor)
2699 // -- a4 : the number of [[BoundArguments]]
2700 // -----------------------------------
2701
2702 // Reserve stack space for the [[BoundArguments]].
2703 {
2704 Label done;
2705 __ dsll(a5, a4, kSystemPointerSizeLog2);
2706 __ Dsubu(t0, sp, Operand(a5));
2707 // Check the stack for overflow. We are not trying to catch interruptions
2708 // (i.e. debug break and preemption) here, so check the "real stack limit".
2709 __ LoadStackLimit(kScratchReg,
2710 MacroAssembler::StackLimitKind::kRealStackLimit);
2711 __ Branch(&done, hs, t0, Operand(kScratchReg));
2712 {
2713 FrameScope scope(masm, StackFrame::MANUAL);
2714 __ EnterFrame(StackFrame::INTERNAL);
2715 __ CallRuntime(Runtime::kThrowStackOverflow);
2716 }
2717 __ bind(&done);
2718 }
2719
2720 // Pop receiver.
2721 __ Pop(t0);
2722
2723 // Push [[BoundArguments]].
2724 {
2725 Label loop, done_loop;
2726 __ SmiUntag(a4, FieldMemOperand(a2, offsetof(FixedArray, length_)));
2727 __ Daddu(a0, a0, Operand(a4));
2728 __ Daddu(a2, a2,
2729 Operand(OFFSET_OF_DATA_START(FixedArray) - kHeapObjectTag));
2730 __ bind(&loop);
2731 __ Dsubu(a4, a4, Operand(1));
2732 __ Branch(&done_loop, lt, a4, Operand(zero_reg));
2733 __ Dlsa(a5, a2, a4, kSystemPointerSizeLog2);
2734 __ Ld(kScratchReg, MemOperand(a5));
2735 __ Push(kScratchReg);
2736 __ Branch(&loop);
2737 __ bind(&done_loop);
2738 }
2739
2740 // Push receiver.
2741 __ Push(t0);
2742
2743 // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2744 {
2745 Label skip_load;
2746 __ Branch(&skip_load, ne, a1, Operand(a3));
2747 __ Ld(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2748 __ bind(&skip_load);
2749 }
2750
2751 // Construct the [[BoundTargetFunction]] via the Construct builtin.
2752 __ Ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2753 __ TailCallBuiltin(Builtin::kConstruct);
2754}
2755
2756// static
2757void Builtins::Generate_Construct(MacroAssembler* masm) {
2758 // ----------- S t a t e -------------
2759 // -- a0 : the number of arguments
2760 // -- a1 : the constructor to call (can be any Object)
2761 // -- a3 : the new target (either the same as the constructor or
2762 // the JSFunction on which new was invoked initially)
2763 // -----------------------------------
2764
2765 Register target = a1;
2766 Register map = t1;
2767 Register instance_type = t2;
2768 Register scratch = t8;
2769 DCHECK(!AreAliased(a0, target, map, instance_type, scratch));
2770
2771 // Check if target is a Smi.
2772 Label non_constructor, non_proxy;
2773 __ JumpIfSmi(target, &non_constructor);
2774
2775 // Check if target has a [[Construct]] internal method.
2776 __ ld(map, FieldMemOperand(target, HeapObject::kMapOffset));
2777 {
2778 Register flags = t3;
2779 __ Lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2780 __ And(flags, flags, Operand(Map::Bits1::IsConstructorBit::kMask));
2781 __ Branch(&non_constructor, eq, flags, Operand(zero_reg));
2782 }
2783
2784 // Dispatch based on instance type.
2785 __ GetInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE, scratch);
2786 __ TailCallBuiltin(Builtin::kConstructFunction, ls, scratch,
2787 Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
2788
2789 // Only dispatch to bound functions after checking whether they are
2790 // constructors.
2791 __ TailCallBuiltin(Builtin::kConstructBoundFunction, eq, instance_type,
2792 Operand(JS_BOUND_FUNCTION_TYPE));
2793
2794 // Only dispatch to proxies after checking whether they are constructors.
2795 __ Branch(&non_proxy, ne, instance_type, Operand(JS_PROXY_TYPE));
2796 __ TailCallBuiltin(Builtin::kConstructProxy);
2797
2798 // Called Construct on an exotic Object with a [[Construct]] internal method.
2799 __ bind(&non_proxy);
2800 {
2801 // Overwrite the original receiver with the (original) target.
2802 __ StoreReceiver(target);
2803 // Let the "call_as_constructor_delegate" take care of the rest.
2804 __ LoadNativeContextSlot(target,
2805 Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2806 __ TailCallBuiltin(Builtins::CallFunction());
2807 }
2808
2809 // Called Construct on an Object that doesn't have a [[Construct]] internal
2810 // method.
2811 __ bind(&non_constructor);
2812 __ TailCallBuiltin(Builtin::kConstructedNonConstructable);
2813}
2814
2815#if V8_ENABLE_WEBASSEMBLY
2816// Compute register lists for parameters to be saved. We save all parameter
2817// registers (see wasm-linkage.h). They might be overwritten in the runtime
2818// call below. We don't have any callee-saved registers in wasm, so no need to
2819// store anything else.
2820constexpr RegList kSavedGpRegs = ([]() constexpr {
2821 RegList saved_gp_regs;
2822 for (Register gp_param_reg : wasm::kGpParamRegisters) {
2823 saved_gp_regs.set(gp_param_reg);
2824 }
2825
2826 // The instance data has already been stored in the fixed part of the frame.
2827 saved_gp_regs.clear(kWasmImplicitArgRegister);
2828 // All set registers were unique.
2829 CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) - 1);
2831 saved_gp_regs.Count());
2832 return saved_gp_regs;
2833})();
2834
2835constexpr DoubleRegList kSavedFpRegs = ([]() constexpr {
2836 DoubleRegList saved_fp_regs;
2837 for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
2838 saved_fp_regs.set(fp_param_reg);
2839 }
2840
2841 CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
2843 saved_fp_regs.Count());
2844 return saved_fp_regs;
2845})();
2846
2847// When entering this builtin, we have just created a Wasm stack frame:
2848//
2849// [ Wasm instance data ] <-- sp
2850// [ WASM frame marker ]
2851// [ saved fp ] <-- fp
2852//
2853// Add the feedback vector to the stack.
2854//
2855// [ feedback vector ] <-- sp
2856// [ Wasm instance data ]
2857// [ WASM frame marker ]
2858// [ saved fp ] <-- fp
2859void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
2860 Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
2861 Register vector = t1;
2862 Register scratch = t2;
2863 Label allocate_vector, done;
2864
2865 __ Ld(vector,
2867 WasmTrustedInstanceData::kFeedbackVectorsOffset));
2868 __ Dlsa(vector, vector, func_index, kTaggedSizeLog2);
2869 __ Ld(vector, FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray)));
2870 __ JumpIfSmi(vector, &allocate_vector);
2871 __ bind(&done);
2872 __ Push(vector);
2873 __ Ret();
2874
2875 __ bind(&allocate_vector);
2876 // Feedback vector doesn't exist yet. Call the runtime to allocate it.
2877 // We temporarily change the frame type for this, because we need special
2878 // handling by the stack walker in case of GC.
2879 __ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM_LIFTOFF_SETUP));
2881
2882 // Save registers.
2883 __ MultiPush(kSavedGpRegs);
2884 __ MultiPushFPU(kSavedFpRegs);
2885 __ Push(ra);
2886
2887 // Arguments to the runtime function: instance data, func_index, and an
2888 // additional stack slot for the NativeModule.
2889 __ SmiTag(func_index);
2890 __ Push(kWasmImplicitArgRegister, func_index, zero_reg);
2891 __ Move(cp, Smi::zero());
2892 __ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3);
2893 __ mov(vector, kReturnRegister0);
2894
2895 // Restore registers and frame type.
2896 __ Pop(ra);
2897 __ MultiPopFPU(kSavedFpRegs);
2898 __ MultiPop(kSavedGpRegs);
2900 MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset));
2901 __ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM));
2903 __ Branch(&done);
2904}
2905
2906void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2907 // The function index was put in t0 by the jump table trampoline.
2908 // Convert to Smi for the runtime call
2910
2911 {
2912 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2913 FrameScope scope(masm, StackFrame::INTERNAL);
2914
2915 // Save registers that we need to keep alive across the runtime call.
2917 __ MultiPush(kSavedGpRegs);
2918 // Check if machine has simd enabled, if so push vector registers. If not
2919 // then only push double registers.
2920 Label push_doubles, simd_pushed;
2921 __ li(a1, ExternalReference::supports_wasm_simd_128_address());
2922 // If > 0 then simd is available.
2923 __ Lbu(a1, MemOperand(a1));
2924 __ Branch(&push_doubles, le, a1, Operand(zero_reg));
2925 // Save vector registers.
2926 {
2927 CpuFeatureScope msa_scope(
2929 __ MultiPushMSA(kSavedFpRegs);
2930 }
2931 __ Branch(&simd_pushed);
2932 __ bind(&push_doubles);
2933 __ MultiPushFPU(kSavedFpRegs);
2934 // kFixedFrameSizeFromFp is hard coded to include space for Simd
2935 // registers, so we still need to allocate extra (unused) space on the stack
2936 // as if they were saved.
2937 __ Dsubu(sp, sp, kSavedFpRegs.Count() * kDoubleSize);
2938 __ bind(&simd_pushed);
2939
2941
2942 // Initialize the JavaScript context with 0. CEntry will use it to
2943 // set the current context on the isolate.
2944 __ Move(kContextRegister, Smi::zero());
2945 __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2946
2947 // Restore registers.
2948 Label pop_doubles, simd_popped;
2949 __ li(a1, ExternalReference::supports_wasm_simd_128_address());
2950 // If > 0 then simd is available.
2951 __ Lbu(a1, MemOperand(a1));
2952 __ Branch(&pop_doubles, le, a1, Operand(zero_reg));
2953 // Pop vector registers.
2954 {
2955 CpuFeatureScope msa_scope(
2957 __ MultiPopMSA(kSavedFpRegs);
2958 }
2959 __ Branch(&simd_popped);
2960 __ bind(&pop_doubles);
2961 __ Daddu(sp, sp, kSavedFpRegs.Count() * kDoubleSize);
2962 __ MultiPopFPU(kSavedFpRegs);
2963 __ bind(&simd_popped);
2964 __ MultiPop(kSavedGpRegs);
2966 }
2967
2968 // Untag the returned Smi, for later use.
2969 static_assert(!kSavedGpRegs.has(v0));
2970 __ SmiUntag(v0);
2971
2972 // The runtime function returned the jump table slot offset as a Smi (now in
2973 // t8). Use that to compute the jump target.
2974 static_assert(!kSavedGpRegs.has(t8));
2976 WasmTrustedInstanceData::kJumpTableStartOffset));
2977 __ Daddu(t8, v0, t8);
2978
2979 // Finally, jump to the jump table slot for the function.
2980 __ Jump(t8);
2981}
2982
2983void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2984 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2985 {
2986 FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2987
2988 // Save all parameter registers. They might hold live values, we restore
2989 // them after the runtime call.
2992
2993 // Initialize the JavaScript context with 0. CEntry will use it to
2994 // set the current context on the isolate.
2995 __ Move(cp, Smi::zero());
2996 __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2997
2998 // Restore registers.
3001 }
3002 __ Ret();
3003}
3004
3005void Builtins::Generate_WasmReturnPromiseOnSuspendAsm(MacroAssembler* masm) {
3006 __ Trap();
3007}
3008
3009void Builtins::Generate_JSToWasmStressSwitchStacksAsm(MacroAssembler* masm) {
3010 __ Trap();
3011}
3012
3013void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) {
3014 // Push registers in reverse order so that they are on the stack like
3015 // in an array, with the first item being at the lowest address.
3016 constexpr int cnt_fp = arraysize(wasm::kFpParamRegisters);
3017 constexpr int cnt_gp = arraysize(wasm::kGpParamRegisters) - 1;
3018 int required_stack_space = cnt_fp * kDoubleSize + cnt_gp * kSystemPointerSize;
3019 __ Dsubu(sp, sp, Operand(required_stack_space));
3020 for (int i = cnt_fp - 1; i >= 0; i--) {
3022 MemOperand(sp, i * kDoubleSize + cnt_gp * kSystemPointerSize));
3023 }
3024
3025 // Without wasm::kGpParamRegisters[0] here.
3026 for (int i = cnt_gp; i >= 1; i--) {
3028 MemOperand(sp, (i - 1) * kSystemPointerSize));
3029 }
3030 // Reserve a slot for the signature.
3031 __ Push(zero_reg);
3032 __ TailCallBuiltin(Builtin::kWasmToJsWrapperCSA);
3033}
3034
3035void Builtins::Generate_WasmTrapHandlerLandingPad(MacroAssembler* masm) {
3036 __ Trap();
3037}
3038
3039void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
3040 // TODO(v8:12191): Implement for this platform.
3041 __ Trap();
3042}
3043
3044void Builtins::Generate_WasmResume(MacroAssembler* masm) {
3045 // TODO(v8:12191): Implement for this platform.
3046 __ Trap();
3047}
3048
3049void Builtins::Generate_WasmReject(MacroAssembler* masm) {
3050 // TODO(v8:12191): Implement for this platform.
3051 __ Trap();
3052}
3053
3054void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
3055 // Only needed on x64.
3056 __ Trap();
3057}
3058
3059void Builtins::Generate_JSToWasmWrapperAsm(MacroAssembler* masm) { __ Trap(); }
3060
3061#endif // V8_ENABLE_WEBASSEMBLY
3062
3063void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
3064 ArgvMode argv_mode, bool builtin_exit_frame,
3065 bool switch_to_central_stack) {
3066 // Called from JavaScript; parameters are on stack as if calling JS function
3067 // a0: number of arguments including receiver
3068 // a1: pointer to builtin function
3069 // fp: frame pointer (restored after C call)
3070 // sp: stack pointer (restored as callee's sp after C call)
3071 // cp: current context (C callee-saved)
3072 //
3073 // If argv_mode == ArgvMode::kRegister:
3074 // a2: pointer to the first argument
3075
3076 using ER = ExternalReference;
3077
3078 // Move input arguments to more convenient registers.
3079 static constexpr Register argc_input = a0;
3080 static constexpr Register target_fun = s1; // C callee-saved
3081 static constexpr Register argv = a1;
3082 static constexpr Register scratch = a3;
3083 static constexpr Register argc_sav = s0; // C callee-saved
3084
3085 __ mov(target_fun, argv);
3086
3087 if (argv_mode == ArgvMode::kRegister) {
3088 // Move argv into the correct register.
3089 __ mov(argv, a2);
3090 } else {
3091 // Compute the argv pointer in a callee-saved register.
3092 __ Dlsa(argv, sp, argc_input, kSystemPointerSizeLog2);
3093 __ Dsubu(argv, argv, kSystemPointerSize);
3094 }
3095
3096 // Enter the exit frame that transitions from JavaScript to C++.
3097 FrameScope scope(masm, StackFrame::MANUAL);
3098 __ EnterExitFrame(
3099 scratch, 0,
3100 builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
3101
3102 // Store a copy of argc in callee-saved registers for later.
3103 __ mov(argc_sav, argc_input);
3104
3105 // a0: number of arguments including receiver
3106 // s0: number of arguments including receiver (C callee-saved)
3107 // a1: pointer to first argument
3108 // s1: pointer to builtin function (C callee-saved)
3109
3110 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
3111 // also need to reserve the 4 argument slots on the stack.
3112
3113 __ AssertStackIsAligned();
3114
3115 // Call C built-in.
3116 // a0 = argc, a1 = argv, a2 = isolate, s1 = target_fun
3117 DCHECK_EQ(kCArgRegs[0], argc_input);
3118 DCHECK_EQ(kCArgRegs[1], argv);
3119 __ li(kCArgRegs[2], ER::isolate_address());
3120
3121 __ StoreReturnAddressAndCall(target_fun);
3122
3123 // Result returned in v0 or v1:v0 - do not destroy these registers!
3124
3125 // Check result for exception sentinel.
3126 Label exception_returned;
3127 __ LoadRoot(a4, RootIndex::kException);
3128 __ Branch(&exception_returned, eq, a4, Operand(v0));
3129
3130 // Check that there is no exception, otherwise we
3131 // should have returned the exception sentinel.
3132 if (v8_flags.debug_code) {
3133 Label okay;
3134 ER exception_address =
3135 ER::Create(IsolateAddressId::kExceptionAddress, masm->isolate());
3136 __ Ld(scratch, __ ExternalReferenceAsOperand(exception_address, no_reg));
3137 __ LoadRoot(a4, RootIndex::kTheHoleValue);
3138 // Cannot use check here as it attempts to generate call into runtime.
3139 __ Branch(&okay, eq, a4, Operand(scratch));
3140 __ stop();
3141 __ bind(&okay);
3142 }
3143
3144 // Exit C frame and return.
3145 // v0:v1: result
3146 // sp: stack pointer
3147 // fp: frame pointer
3148 // s0: still holds argc (C caller-saved).
3149 __ LeaveExitFrame(scratch);
3150 if (argv_mode == ArgvMode::kStack) {
3151 DCHECK(!AreAliased(scratch, argc_sav));
3152 __ Dlsa(sp, sp, argc_sav, kPointerSizeLog2);
3153 }
3154
3155 __ Ret();
3156
3157 // Handling of exception.
3158 __ bind(&exception_returned);
3159
3160 ER pending_handler_context_address = ER::Create(
3161 IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
3162 ER pending_handler_entrypoint_address = ER::Create(
3163 IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
3164 ER pending_handler_fp_address =
3165 ER::Create(IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
3166 ER pending_handler_sp_address =
3167 ER::Create(IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
3168
3169 // Ask the runtime for help to determine the handler. This will set v0 to
3170 // contain the current exception, don't clobber it.
3171 {
3172 FrameScope scope(masm, StackFrame::MANUAL);
3173 __ PrepareCallCFunction(3, 0, a0);
3174 __ mov(kCArgRegs[0], zero_reg);
3175 __ mov(kCArgRegs[1], zero_reg);
3176 __ li(kCArgRegs[2], ER::isolate_address());
3177 __ CallCFunction(ER::Create(Runtime::kUnwindAndFindExceptionHandler), 3,
3179 }
3180
3181 // Retrieve the handler context, SP and FP.
3182 __ li(cp, pending_handler_context_address);
3183 __ Ld(cp, MemOperand(cp));
3184 __ li(sp, pending_handler_sp_address);
3185 __ Ld(sp, MemOperand(sp));
3186 __ li(fp, pending_handler_fp_address);
3187 __ Ld(fp, MemOperand(fp));
3188
3189 // If the handler is a JS frame, restore the context to the frame. Note that
3190 // the context will be set to (cp == 0) for non-JS frames.
3191 Label zero;
3192 __ Branch(&zero, eq, cp, Operand(zero_reg));
3194 __ bind(&zero);
3195
3196 // Clear c_entry_fp, like we do in `LeaveExitFrame`.
3197 ER c_entry_fp_address =
3198 ER::Create(IsolateAddressId::kCEntryFPAddress, masm->isolate());
3199 __ Sd(zero_reg, __ ExternalReferenceAsOperand(c_entry_fp_address, no_reg));
3200
3201 // Compute the handler entry address and jump to it.
3202 __ Ld(t9, __ ExternalReferenceAsOperand(pending_handler_entrypoint_address,
3203 no_reg));
3204 __ Jump(t9);
3205}
3206
3207#if V8_ENABLE_WEBASSEMBLY
3208void Builtins::Generate_WasmHandleStackOverflow(MacroAssembler* masm) {
3209 __ Trap();
3210}
3211#endif // V8_ENABLE_WEBASSEMBLY
3212
3213void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
3214 Label done;
3215 Register result_reg = t0;
3216
3217 Register scratch = GetRegisterThatIsNotOneOf(result_reg);
3218 Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
3219 Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
3220 DoubleRegister double_scratch = kScratchDoubleReg;
3221
3222 // Account for saved regs.
3223 const int kArgumentOffset = 4 * kSystemPointerSize;
3224
3225 __ Push(result_reg);
3226 __ Push(scratch, scratch2, scratch3);
3227
3228 // Load double input.
3229 __ Ldc1(double_scratch, MemOperand(sp, kArgumentOffset));
3230
3231 // Try a conversion to a signed integer.
3232 __ Trunc_w_d(double_scratch, double_scratch);
3233 // Move the converted value into the result register.
3234 __ mfc1(scratch3, double_scratch);
3235
3236 // Retrieve the FCSR.
3237 __ cfc1(scratch, FCSR);
3238
3239 // Check for overflow and NaNs.
3240 __ And(scratch, scratch,
3243 // If we had no exceptions then set result_reg and we are done.
3244 Label error;
3245 __ Branch(&error, ne, scratch, Operand(zero_reg));
3246 __ Move(result_reg, scratch3);
3247 __ Branch(&done);
3248 __ bind(&error);
3249
3250 // Load the double value and perform a manual truncation.
3251 Register input_high = scratch2;
3252 Register input_low = scratch3;
3253
3254 __ Lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
3255 __ Lw(input_high,
3256 MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
3257
3258 Label normal_exponent;
3259 // Extract the biased exponent in result.
3260 __ Ext(result_reg, input_high, HeapNumber::kExponentShift,
3262
3263 // Check for Infinity and NaNs, which should return 0.
3264 __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
3265 __ Movz(result_reg, zero_reg, scratch);
3266 __ Branch(&done, eq, scratch, Operand(zero_reg));
3267
3268 // Express exponent as delta to (number of mantissa bits + 31).
3269 __ Subu(result_reg, result_reg,
3271
3272 // If the delta is strictly positive, all bits would be shifted away,
3273 // which means that we can return 0.
3274 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
3275 __ mov(result_reg, zero_reg);
3276 __ Branch(&done);
3277
3278 __ bind(&normal_exponent);
3279 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
3280 // Calculate shift.
3281 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
3282
3283 // Save the sign.
3284 Register sign = result_reg;
3285 result_reg = no_reg;
3286 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
3287
3288 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
3289 // to check for this specific case.
3290 Label high_shift_needed, high_shift_done;
3291 __ Branch(&high_shift_needed, lt, scratch, Operand(32));
3292 __ mov(input_high, zero_reg);
3293 __ Branch(&high_shift_done);
3294 __ bind(&high_shift_needed);
3295
3296 // Set the implicit 1 before the mantissa part in input_high.
3297 __ Or(input_high, input_high,
3299 // Shift the mantissa bits to the correct position.
3300 // We don't need to clear non-mantissa bits as they will be shifted away.
3301 // If they weren't, it would mean that the answer is in the 32bit range.
3302 __ sllv(input_high, input_high, scratch);
3303
3304 __ bind(&high_shift_done);
3305
3306 // Replace the shifted bits with bits from the lower mantissa word.
3307 Label pos_shift, shift_done;
3308 __ li(kScratchReg, 32);
3309 __ subu(scratch, kScratchReg, scratch);
3310 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
3311
3312 // Negate scratch.
3313 __ Subu(scratch, zero_reg, scratch);
3314 __ sllv(input_low, input_low, scratch);
3315 __ Branch(&shift_done);
3316
3317 __ bind(&pos_shift);
3318 __ srlv(input_low, input_low, scratch);
3319
3320 __ bind(&shift_done);
3321 __ Or(input_high, input_high, Operand(input_low));
3322 // Restore sign if necessary.
3323 __ mov(scratch, sign);
3324 result_reg = sign;
3325 sign = no_reg;
3326 __ Subu(result_reg, zero_reg, input_high);
3327 __ Movz(result_reg, input_high, scratch);
3328
3329 __ bind(&done);
3330
3331 __ Sd(result_reg, MemOperand(sp, kArgumentOffset));
3332 __ Pop(scratch, scratch2, scratch3);
3333 __ Pop(result_reg);
3334 __ Ret();
3335}
3336
3337void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm,
3338 CallApiCallbackMode mode) {
3339 // ----------- S t a t e -------------
3340 // CallApiCallbackMode::kOptimizedNoProfiling/kOptimized modes:
3341 // -- a1 : api function address
3342 // Both modes:
3343 // -- a2 : arguments count (not including the receiver)
3344 // -- a3 : FunctionTemplateInfo
3345 // -- cp : context
3346 // -- sp[0] : receiver
3347 // -- sp[8] : first argument
3348 // -- ...
3349 // -- sp[(argc) * 8] : last argument
3350 // -----------------------------------
3351
3352 Register function_callback_info_arg = kCArgRegs[0];
3353
3354 Register api_function_address = no_reg;
3355 Register argc = no_reg;
3356 Register func_templ = no_reg;
3357 Register topmost_script_having_context = no_reg;
3358 Register scratch = t0;
3359
3360 switch (mode) {
3362 argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister();
3363 topmost_script_having_context = CallApiCallbackGenericDescriptor::
3365 func_templ =
3367 break;
3368
3371 // Caller context is always equal to current context because we don't
3372 // inline Api calls cross-context.
3373 topmost_script_having_context = kContextRegister;
3374 api_function_address =
3375 CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister();
3377 func_templ =
3379 break;
3380 }
3381 DCHECK(!AreAliased(api_function_address, topmost_script_having_context, argc,
3382 func_templ, scratch));
3383
3384 using FCA = FunctionCallbackArguments;
3385 using ER = ExternalReference;
3386 using FC = ApiCallbackExitFrameConstants;
3387
3388 static_assert(FCA::kArgsLength == 6);
3389 static_assert(FCA::kNewTargetIndex == 5);
3390 static_assert(FCA::kTargetIndex == 4);
3391 static_assert(FCA::kReturnValueIndex == 3);
3392 static_assert(FCA::kContextIndex == 2);
3393 static_assert(FCA::kIsolateIndex == 1);
3394 static_assert(FCA::kUnusedIndex == 0);
3395
3396 // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
3397 //
3398 // Target state:
3399 // sp[0 * kSystemPointerSize]: kUnused <= FCA::implicit_args_
3400 // sp[1 * kSystemPointerSize]: kIsolate
3401 // sp[2 * kSystemPointerSize]: kContext
3402 // sp[3 * kSystemPointerSize]: undefined (kReturnValue)
3403 // sp[4 * kSystemPointerSize]: kTarget
3404 // sp[5 * kSystemPointerSize]: undefined (kNewTarget)
3405 // Existing state:
3406 // sp[6 * kSystemPointerSize]: <= FCA:::values_
3407
3408 __ StoreRootRelative(IsolateData::topmost_script_having_context_offset(),
3409 topmost_script_having_context);
3410 if (mode == CallApiCallbackMode::kGeneric) {
3411 api_function_address = ReassignRegister(topmost_script_having_context);
3412 }
3413
3414 // Reserve space on the stack.
3415 __ Dsubu(sp, sp, Operand(FCA::kArgsLength * kSystemPointerSize));
3416
3417 // kIsolate.
3418 __ li(scratch, ER::isolate_address());
3419 __ Sd(scratch, MemOperand(sp, FCA::kIsolateIndex * kSystemPointerSize));
3420
3421 // kContext.
3422 __ Sd(cp, MemOperand(sp, FCA::kContextIndex * kSystemPointerSize));
3423
3424 // kReturnValue.
3425 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3426 __ Sd(scratch, MemOperand(sp, FCA::kReturnValueIndex * kSystemPointerSize));
3427
3428 // kTarget.
3429 __ Sd(func_templ, MemOperand(sp, FCA::kTargetIndex * kSystemPointerSize));
3430
3431 // kNewTarget.
3432 __ Sd(scratch, MemOperand(sp, FCA::kNewTargetIndex * kSystemPointerSize));
3433
3434 // kUnused.
3435 __ Sd(scratch, MemOperand(sp, FCA::kUnusedIndex * kSystemPointerSize));
3436
3437 FrameScope frame_scope(masm, StackFrame::MANUAL);
3438 if (mode == CallApiCallbackMode::kGeneric) {
3439 __ Ld(
3440 api_function_address,
3441 FieldMemOperand(func_templ,
3442 FunctionTemplateInfo::kMaybeRedirectedCallbackOffset));
3443 }
3444
3445 __ EnterExitFrame(scratch, FC::getExtraSlotsCountFrom<ExitFrameConstants>(),
3446 StackFrame::API_CALLBACK_EXIT);
3447
3448 MemOperand argc_operand = MemOperand(fp, FC::kFCIArgcOffset);
3449 {
3450 ASM_CODE_COMMENT_STRING(masm, "Initialize FunctionCallbackInfo");
3451 // FunctionCallbackInfo::length_.
3452 // TODO(ishell): pass JSParameterCount(argc) to simplify things on the
3453 // caller end.
3454 __ Sd(argc, argc_operand);
3455
3456 // FunctionCallbackInfo::implicit_args_.
3457 __ Daddu(scratch, fp, Operand(FC::kImplicitArgsArrayOffset));
3458 __ Sd(scratch, MemOperand(fp, FC::kFCIImplicitArgsOffset));
3459
3460 // FunctionCallbackInfo::values_ (points at JS arguments on the stack).
3461 __ Daddu(scratch, fp, Operand(FC::kFirstArgumentOffset));
3462 __ Sd(scratch, MemOperand(fp, FC::kFCIValuesOffset));
3463 }
3464
3465 __ RecordComment("v8::FunctionCallback's argument.");
3466 // function_callback_info_arg = v8::FunctionCallbackInfo&
3467 __ Daddu(function_callback_info_arg, fp,
3468 Operand(FC::kFunctionCallbackInfoOffset));
3469
3470 DCHECK(
3471 !AreAliased(api_function_address, scratch, function_callback_info_arg));
3472
3473 ExternalReference thunk_ref = ER::invoke_function_callback(mode);
3474 Register no_thunk_arg = no_reg;
3475
3476 MemOperand return_value_operand = MemOperand(fp, FC::kReturnValueOffset);
3477 static constexpr int kSlotsToDropOnReturn =
3478 FC::kFunctionCallbackInfoArgsLength + kJSArgcReceiverSlots;
3479
3480 const bool with_profiling =
3482 CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
3483 thunk_ref, no_thunk_arg, kSlotsToDropOnReturn,
3484 &argc_operand, return_value_operand);
3485}
3486
3487void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3488 // ----------- S t a t e -------------
3489 // -- cp : context
3490 // -- a1 : receiver
3491 // -- a3 : accessor info
3492 // -- a0 : holder
3493 // -----------------------------------
3494
3495 Register name_arg = kCArgRegs[0];
3496 Register property_callback_info_arg = kCArgRegs[1];
3497
3498 Register api_function_address = a2;
3499 Register receiver = ApiGetterDescriptor::ReceiverRegister();
3502 Register scratch = a4;
3503 Register undef = a5;
3504 Register scratch2 = a6;
3505
3506 DCHECK(!AreAliased(receiver, holder, callback, scratch, undef, scratch2));
3507
3508 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3509 // name below the exit frame to make GC aware of them.
3510 using PCA = PropertyCallbackArguments;
3511 using ER = ExternalReference;
3512 using FC = ApiAccessorExitFrameConstants;
3513
3514 static_assert(PCA::kPropertyKeyIndex == 0);
3515 static_assert(PCA::kShouldThrowOnErrorIndex == 1);
3516 static_assert(PCA::kHolderIndex == 2);
3517 static_assert(PCA::kIsolateIndex == 3);
3518 static_assert(PCA::kHolderV2Index == 4);
3519 static_assert(PCA::kReturnValueIndex == 5);
3520 static_assert(PCA::kDataIndex == 6);
3521 static_assert(PCA::kThisIndex == 7);
3522 static_assert(PCA::kArgsLength == 8);
3523
3524 // Set up PropertyCallbackInfo's (PCI) args_ on the stack as follows:
3525 // Target state:
3526 // sp[0 * kSystemPointerSize]: name <= PCI:args_
3527 // sp[1 * kSystemPointerSize]: kShouldThrowOnErrorIndex
3528 // sp[2 * kSystemPointerSize]: kHolderIndex
3529 // sp[3 * kSystemPointerSize]: kIsolateIndex
3530 // sp[4 * kSystemPointerSize]: kHolderV2Index
3531 // sp[5 * kSystemPointerSize]: kReturnValueIndex
3532 // sp[6 * kSystemPointerSize]: kDataIndex
3533 // sp[7 * kSystemPointerSize]: kThisIndex / receiver
3534
3535 __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
3536 __ LoadRoot(undef, RootIndex::kUndefinedValue);
3537 __ li(scratch2, ER::isolate_address());
3538 Register holderV2 = zero_reg;
3539 __ Push(receiver, scratch, // kThisIndex, kDataIndex
3540 undef, holderV2); // kReturnValueIndex, kHolderV2Index
3541 __ Push(scratch2, holder); // kIsolateIndex, kHolderIndex
3542
3543 // |name_arg| clashes with |holder|, so we need to push holder first.
3544 __ Ld(name_arg, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3545
3546 static_assert(kDontThrow == 0);
3547 Register should_throw_on_error =
3548 zero_reg; // should_throw_on_error -> kDontThrow
3549 __ Push(should_throw_on_error, name_arg);
3550
3551 __ RecordComment("Load api_function_address");
3552 __ Ld(api_function_address,
3553 FieldMemOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset));
3554
3555 FrameScope frame_scope(masm, StackFrame::MANUAL);
3556 __ EnterExitFrame(scratch, FC::getExtraSlotsCountFrom<ExitFrameConstants>(),
3557 StackFrame::API_ACCESSOR_EXIT);
3558
3559 __ RecordComment("Create v8::PropertyCallbackInfo object on the stack.");
3560 // property_callback_info_arg = v8::PropertyCallbackInfo&
3561 __ Daddu(property_callback_info_arg, fp, Operand(FC::kArgsArrayOffset));
3562
3563 DCHECK(!AreAliased(api_function_address, property_callback_info_arg, name_arg,
3564 callback, scratch, scratch2));
3565
3566#ifdef V8_ENABLE_DIRECT_HANDLE
3567 // name_arg = Local<Name>(name), name value was pushed to GC-ed stack space.
3568 // |name_arg| is already initialized above.
3569#else
3570 // name_arg = Local<Name>(&name), which is &args_array[kPropertyKeyIndex].
3571 static_assert(PCA::kPropertyKeyIndex == 0);
3572 __ mov(name_arg, property_callback_info_arg);
3573#endif
3574
3575 ER thunk_ref = ER::invoke_accessor_getter_callback();
3576 // Pass AccessorInfo to thunk wrapper in case profiler or side-effect
3577 // checking is enabled.
3578 Register thunk_arg = callback;
3579
3580 MemOperand return_value_operand = MemOperand(fp, FC::kReturnValueOffset);
3581 static constexpr int kSlotsToDropOnReturn =
3582 FC::kPropertyCallbackInfoArgsLength;
3583 MemOperand* const kUseStackSpaceConstant = nullptr;
3584
3585 const bool with_profiling = true;
3586 CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
3587 thunk_ref, thunk_arg, kSlotsToDropOnReturn,
3588 kUseStackSpaceConstant, return_value_operand);
3589}
3590
3591void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3592 // The sole purpose of DirectCEntry is for movable callers (e.g. any general
3593 // purpose InstructionStream object) to be able to call into C functions that
3594 // may trigger GC and thus move the caller.
3595 //
3596 // DirectCEntry places the return address on the stack (updated by the GC),
3597 // making the call GC safe. The irregexp backend relies on this.
3598
3599 // Make place for arguments to fit C calling convention. Callers use
3600 // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't
3601 // have to do that here. Any caller must drop kCArgsSlotsSize stack space
3602 // after the call.
3603 __ daddiu(sp, sp, -kCArgsSlotsSize);
3604
3605 __ Sd(ra, MemOperand(sp, kCArgsSlotsSize)); // Store the return address.
3606 __ Call(t9); // Call the C++ function.
3607 __ Ld(t9, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code.
3608
3609 if (v8_flags.debug_code && v8_flags.enable_slow_asserts) {
3610 // In case of an error the return address may point to a memory area
3611 // filled with kZapValue by the GC. Dereference the address and check for
3612 // this.
3613 __ Uld(a4, MemOperand(t9));
3614 __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
3615 Operand(reinterpret_cast<uint64_t>(kZapValue)));
3616 }
3617
3618 __ Jump(t9);
3619}
3620
3621namespace {
3622
3623// This code tries to be close to ia32 code so that any changes can be
3624// easily ported.
3625void Generate_DeoptimizationEntry(MacroAssembler* masm,
3626 DeoptimizeKind deopt_kind) {
3627 Isolate* isolate = masm->isolate();
3628
3629 // Unlike on ARM we don't save all the registers, just the useful ones.
3630 // For the rest, there are gaps on the stack, so the offsets remain the same.
3632
3633 RegList restored_regs = kJSCallerSaved | kCalleeSaved;
3634 RegList saved_regs = restored_regs | sp | ra;
3635
3636 const int kMSARegsSize = kSimd128Size * MSARegister::kNumRegisters;
3637
3638 // Save all allocatable simd128 / double registers before messing with them.
3639 __ Dsubu(sp, sp, Operand(kMSARegsSize));
3640 const RegisterConfiguration* config = RegisterConfiguration::Default();
3641 {
3642 // Check if machine has simd support, if so save vector registers.
3643 // If not then save double registers.
3644 Label no_simd, done;
3645 UseScratchRegisterScope temps(masm);
3646 Register scratch = temps.Acquire();
3647
3648 __ li(scratch, ExternalReference::supports_wasm_simd_128_address());
3649 // If > 0 then simd is available.
3650 __ Lbu(scratch, MemOperand(scratch));
3651 __ Branch(&no_simd, le, scratch, Operand(zero_reg));
3652
3653 CpuFeatureScope msa_scope(
3655 for (int i = 0; i < config->num_allocatable_simd128_registers(); ++i) {
3656 int code = config->GetAllocatableSimd128Code(i);
3657 int offset = code * kSimd128Size;
3658 const MSARegister fpu_reg = MSARegister::from_code(code);
3659 __ st_d(fpu_reg, MemOperand(sp, offset));
3660 }
3661 __ Branch(&done);
3662
3663 __ bind(&no_simd);
3664 for (int i = 0; i < config->num_allocatable_simd128_registers(); ++i) {
3665 int code = config->GetAllocatableSimd128Code(i);
3666 int offset = code * kSimd128Size;
3667 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3668 __ Sdc1(fpu_reg, MemOperand(sp, offset));
3669 }
3670
3671 __ bind(&done);
3672 }
3673
3674 // Push saved_regs (needed to populate FrameDescription::registers_).
3675 // Leave gaps for other registers.
3676 __ Dsubu(sp, sp, kNumberOfRegisters * kSystemPointerSize);
3677 for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
3678 if ((saved_regs.bits() & (1 << i)) != 0) {
3680 }
3681 }
3682
3683 __ li(a2,
3684 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
3685 __ Sd(fp, MemOperand(a2));
3686
3687 const int kSavedRegistersAreaSize =
3688 (kNumberOfRegisters * kSystemPointerSize) + kMSARegsSize;
3689
3690 // Get the address of the location in the code object (a2) (return
3691 // address for lazy deoptimization) and compute the fp-to-sp delta in
3692 // register a3.
3693 __ mov(a2, ra);
3694 __ Daddu(a3, sp, Operand(kSavedRegistersAreaSize));
3695
3696 __ Dsubu(a3, fp, a3);
3697
3698 // Allocate a new deoptimizer object.
3699 __ PrepareCallCFunction(5, a4);
3700 // Pass six arguments, according to n64 ABI.
3701 __ mov(a0, zero_reg);
3702 Label context_check;
3704 __ JumpIfSmi(a1, &context_check);
3706 __ bind(&context_check);
3707 __ li(a1, Operand(static_cast<int>(deopt_kind)));
3708 // a2: code address or 0 already loaded.
3709 // a3: already has fp-to-sp delta.
3711
3712 // Call Deoptimizer::New().
3713 {
3714 AllowExternalCallThatCantCauseGC scope(masm);
3715 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
3716 }
3717
3718 // Preserve "deoptimizer" object in register v0 and get the input
3719 // frame descriptor pointer to a1 (deoptimizer->input_);
3720 // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
3721 __ mov(a0, v0);
3723
3724 // Copy core registers into FrameDescription::registers_[kNumRegisters].
3726 for (int i = 0; i < kNumberOfRegisters; i++) {
3727 int offset =
3729 if ((saved_regs.bits() & (1 << i)) != 0) {
3730 __ Ld(a2, MemOperand(sp, i * kSystemPointerSize));
3731 __ Sd(a2, MemOperand(a1, offset));
3732 } else if (v8_flags.debug_code) {
3733 __ li(a2, kDebugZapValue);
3734 __ Sd(a2, MemOperand(a1, offset));
3735 }
3736 }
3737
3738 // Copy simd128 / double registers to the input frame.
3739 int simd128_regs_offset = FrameDescription::simd128_registers_offset();
3740 {
3741 // Check if machine has simd support, if so copy vector registers.
3742 // If not then copy double registers.
3743 Label no_simd, done;
3744 UseScratchRegisterScope temps(masm);
3745 Register scratch = temps.Acquire();
3746
3747 __ li(scratch, ExternalReference::supports_wasm_simd_128_address());
3748 // If > 0 then simd is available.
3749 __ Lbu(scratch, MemOperand(scratch));
3750 __ Branch(&no_simd, le, scratch, Operand(zero_reg));
3751
3752 CpuFeatureScope msa_scope(
3754 for (int i = 0; i < config->num_allocatable_simd128_registers(); ++i) {
3755 int code = config->GetAllocatableSimd128Code(i);
3756 int dst_offset = code * kSimd128Size + simd128_regs_offset;
3757 int src_offset =
3759 __ ld_d(w0, MemOperand(sp, src_offset));
3760 __ st_d(w0, MemOperand(a1, dst_offset));
3761 }
3762 __ Branch(&done);
3763
3764 __ bind(&no_simd);
3765 for (int i = 0; i < config->num_allocatable_simd128_registers(); ++i) {
3766 int code = config->GetAllocatableSimd128Code(i);
3767 int dst_offset = code * kSimd128Size + simd128_regs_offset;
3768 int src_offset =
3770 __ Ldc1(f0, MemOperand(sp, src_offset));
3771 __ Sdc1(f0, MemOperand(a1, dst_offset));
3772 }
3773
3774 __ bind(&done);
3775 }
3776
3777 // Remove the saved registers from the stack.
3778 __ Daddu(sp, sp, Operand(kSavedRegistersAreaSize));
3779
3780 // Compute a pointer to the unwinding limit in register a2; that is
3781 // the first stack slot not part of the input frame.
3783 __ Daddu(a2, a2, sp);
3784
3785 // Unwind the stack down to - but not including - the unwinding
3786 // limit and copy the contents of the activation frame to the input
3787 // frame description.
3788 __ Daddu(a3, a1, Operand(FrameDescription::frame_content_offset()));
3789 Label pop_loop;
3790 Label pop_loop_header;
3791 __ BranchShort(&pop_loop_header);
3792 __ bind(&pop_loop);
3793 __ pop(a4);
3794 __ Sd(a4, MemOperand(a3, 0));
3795 __ daddiu(a3, a3, sizeof(uint64_t));
3796 __ bind(&pop_loop_header);
3797 __ BranchShort(&pop_loop, ne, a2, Operand(sp));
3798 // Compute the output frame in the deoptimizer.
3799 __ push(a0); // Preserve deoptimizer object across call.
3800 // a0: deoptimizer object; a1: scratch.
3801 __ PrepareCallCFunction(1, a1);
3802 // Call Deoptimizer::ComputeOutputFrames().
3803 {
3804 AllowExternalCallThatCantCauseGC scope(masm);
3805 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
3806 }
3807 __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
3808
3810
3811 // Replace the current (input) frame with the output frames.
3812 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
3813 // Outer loop state: a4 = current "FrameDescription** output_",
3814 // a1 = one past the last FrameDescription**.
3816 __ Ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
3817 __ Dlsa(a1, a4, a1, kSystemPointerSizeLog2);
3818 __ BranchShort(&outer_loop_header);
3819
3820 __ bind(&outer_push_loop);
3821 Register current_frame = a2;
3822 Register frame_size = a3;
3823 __ Ld(current_frame, MemOperand(a4, 0));
3824 __ Ld(frame_size,
3826 __ BranchShort(&inner_loop_header);
3827
3828 __ bind(&inner_push_loop);
3829 __ Dsubu(frame_size, frame_size, Operand(sizeof(uint64_t)));
3830 __ Daddu(a6, current_frame, Operand(frame_size));
3832 __ push(a7);
3833
3834 __ bind(&inner_loop_header);
3835 __ BranchShort(&inner_push_loop, ne, frame_size, Operand(zero_reg));
3836
3837 __ Daddu(a4, a4, Operand(kSystemPointerSize));
3838
3839 __ bind(&outer_loop_header);
3840 __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
3841
3842 {
3843 // Check if machine has simd support, if so restore vector registers.
3844 // If not then restore double registers.
3845 Label no_simd, done;
3846 UseScratchRegisterScope temps(masm);
3847 Register scratch = temps.Acquire();
3848
3849 __ li(scratch, ExternalReference::supports_wasm_simd_128_address());
3850 // If > 0 then simd is available.
3851 __ Lbu(scratch, MemOperand(scratch));
3852 __ Branch(&no_simd, le, scratch, Operand(zero_reg));
3853
3854 CpuFeatureScope msa_scope(
3856 for (int i = 0; i < config->num_allocatable_simd128_registers(); ++i) {
3857 int code = config->GetAllocatableSimd128Code(i);
3858 int src_offset = code * kSimd128Size + simd128_regs_offset;
3859 const MSARegister fpu_reg = MSARegister::from_code(code);
3860 __ ld_d(fpu_reg, MemOperand(current_frame, src_offset));
3861 }
3862 __ Branch(&done);
3863
3864 __ bind(&no_simd);
3865 for (int i = 0; i < config->num_allocatable_simd128_registers(); ++i) {
3866 int code = config->GetAllocatableSimd128Code(i);
3867 int src_offset = code * kSimd128Size + simd128_regs_offset;
3868 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3869 __ Ldc1(fpu_reg, MemOperand(current_frame, src_offset));
3870 }
3871
3872 __ bind(&done);
3873 }
3874
3875 // Push pc and continuation from the last output frame.
3876 __ Ld(a6, MemOperand(current_frame, FrameDescription::pc_offset()));
3877 __ push(a6);
3878 __ Ld(a6, MemOperand(current_frame, FrameDescription::continuation_offset()));
3879 __ push(a6);
3880
3881 // Technically restoring 'at' should work unless zero_reg is also restored
3882 // but it's safer to check for this.
3883 DCHECK(!(restored_regs.has(at)));
3884 // Restore the registers from the last output frame.
3885 __ mov(at, current_frame);
3886 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
3887 int offset =
3889 if ((restored_regs.bits() & (1 << i)) != 0) {
3890 __ Ld(ToRegister(i), MemOperand(at, offset));
3891 }
3892 }
3893
3894 // If the continuation is non-zero (JavaScript), branch to the continuation.
3895 // For Wasm just return to the pc from the last output frame in the lr
3896 // register.
3897 Label end;
3898 __ pop(at); // Get continuation, leave pc on stack.
3899 __ pop(ra);
3900 __ Branch(&end, eq, at, Operand(zero_reg));
3901 __ Jump(at);
3902
3903 __ bind(&end);
3904 __ Jump(ra);
3905}
3906
3907} // namespace
3908
3909void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
3910 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
3911}
3912
3913void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
3914 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
3915}
3916
3917// If there is baseline code on the shared function info, converts an
3918// interpreter frame into a baseline frame and continues execution in baseline
3919// code. Otherwise execution continues with bytecode.
3920void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
3921 MacroAssembler* masm) {
3922 Label start;
3923 __ bind(&start);
3924
3925 // Get function from the frame.
3926 Register closure = a1;
3928
3929 // Get the InstructionStream object from the shared function info.
3930 Register code_obj = s1;
3931 __ Ld(code_obj,
3932 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
3933
3934 ResetSharedFunctionInfoAge(masm, code_obj);
3935
3936 __ Ld(code_obj,
3937 FieldMemOperand(code_obj,
3938 SharedFunctionInfo::kTrustedFunctionDataOffset));
3939
3940 // For OSR entry it is safe to assume we always have baseline code.
3941 if (v8_flags.debug_code) {
3942 __ GetObjectType(code_obj, t2, t2);
3943 __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODE_TYPE));
3944 AssertCodeIsBaseline(masm, code_obj, t2);
3945 }
3946
3947 // Load the feedback cell and vector.
3948 Register feedback_cell = a2;
3949 Register feedback_vector = t8;
3950 __ Ld(feedback_cell,
3951 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
3952 __ Ld(feedback_vector,
3953 FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
3954
3955 Label install_baseline_code;
3956 // Check if feedback vector is valid. If not, call prepare for baseline to
3957 // allocate it.
3958 __ GetObjectType(feedback_vector, t2, t2);
3959 __ Branch(&install_baseline_code, ne, t2, Operand(FEEDBACK_VECTOR_TYPE));
3960
3961 // Save BytecodeOffset from the stack frame.
3964 // Replace BytecodeOffset with feedback cell.
3967 __ Sd(feedback_cell,
3969 feedback_cell = no_reg;
3970 // Update feedback vector cache.
3973 __ Sd(feedback_vector,
3975 feedback_vector = no_reg;
3976
3977 // Compute baseline pc for bytecode offset.
3978 Register get_baseline_pc = a3;
3979 __ li(get_baseline_pc,
3980 ExternalReference::baseline_pc_for_next_executed_bytecode());
3981
3985
3986 // Get bytecode array from the stack frame.
3989 // Save the accumulator register, since it's clobbered by the below call.
3991 {
3992 __ Move(kCArgRegs[0], code_obj);
3995 FrameScope scope(masm, StackFrame::INTERNAL);
3996 __ PrepareCallCFunction(3, 0, a4);
3997 __ CallCFunction(get_baseline_pc, 3, 0);
3998 }
3999 __ LoadCodeInstructionStart(code_obj, code_obj, kJSEntrypointTag);
4000 __ Daddu(code_obj, code_obj, kReturnRegister0);
4002
4003 // TODO(liuyu): Remove Ld as arm64 after register reallocation.
4006 Generate_OSREntry(masm, code_obj);
4007 __ Trap(); // Unreachable.
4008
4009 __ bind(&install_baseline_code);
4010 {
4011 FrameScope scope(masm, StackFrame::INTERNAL);
4013 __ Push(closure);
4014 __ CallRuntime(Runtime::kInstallBaselineCode, 1);
4016 }
4017 // Retry from the start after installing baseline code.
4018 __ Branch(&start);
4019}
4020
4021void Builtins::Generate_RestartFrameTrampoline(MacroAssembler* masm) {
4022 // Frame is being dropped:
4023 // - Look up current function on the frame.
4024 // - Leave the frame.
4025 // - Restart the frame by calling the function.
4026
4029
4030 // Pop return address and frame.
4031 __ LeaveFrame(StackFrame::INTERPRETED);
4032#ifdef V8_ENABLE_LEAPTIERING
4033 __ InvokeFunction(a1, a0, InvokeType::kJump,
4035#else
4036 __ li(a2, Operand(kDontAdaptArgumentsSentinel));
4037
4038 __ InvokeFunction(a1, a2, a0, InvokeType::kJump);
4039#endif
4040}
4041
4042#undef __
4043
4044} // namespace internal
4045} // namespace v8
4046
4047#endif // V8_TARGET_ARCH_MIPS64
#define Assert(condition)
#define JUMP_IF_EQUAL(NAME)
interpreter::Bytecode bytecode
Definition builtins.cc:43
#define RETURN_BYTECODE_LIST(V)
Definition bytecodes.h:575
static void Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler *masm, InterpreterPushArgsMode mode)
static void Generate_CallOrConstructForwardVarargs(MacroAssembler *masm, CallOrConstructMode mode, Builtin target_builtin)
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
Definition builtins.cc:189
static void Generate_InterpreterEntryTrampoline(MacroAssembler *masm, InterpreterEntryTrampolineMode mode)
static void Generate_Adaptor(MacroAssembler *masm, int formal_parameter_count, Address builtin_address)
static void Generate_CEntry(MacroAssembler *masm, int result_size, ArgvMode argv_mode, bool builtin_exit_frame, bool switch_to_central_stack)
static constexpr Builtin CallFunction(ConvertReceiverMode=ConvertReceiverMode::kAny)
static constexpr Builtin AdaptorWithBuiltinExitFrame(int formal_parameter_count)
static void Generate_Call(MacroAssembler *masm, ConvertReceiverMode mode)
static void Generate_CallFunction(MacroAssembler *masm, ConvertReceiverMode mode)
static void Generate_CallOrConstructVarargs(MacroAssembler *masm, Builtin target_builtin)
static void Generate_CallApiCallbackImpl(MacroAssembler *masm, CallApiCallbackMode mode)
static constexpr Builtin Call(ConvertReceiverMode=ConvertReceiverMode::kAny)
static void Generate_CallBoundFunctionImpl(MacroAssembler *masm)
static void Generate_ConstructForwardAllArgsImpl(MacroAssembler *masm, ForwardWhichFrame which_frame)
static void Generate_InterpreterPushArgsThenCallImpl(MacroAssembler *masm, ConvertReceiverMode receiver_mode, InterpreterPushArgsMode mode)
static constexpr BytecodeOffset None()
Definition utils.h:675
static DEFINE_PARAMETERS_VARARGS(kActualArgumentsCount, kTopmostScriptHavingContext, kFunctionTemplateInfo) DEFINE_PARAMETER_TYPES(MachineType constexpr Register TopmostScriptHavingContextRegister()
static DEFINE_PARAMETERS_VARARGS(kApiFunctionAddress, kActualArgumentsCount, kFunctionTemplateInfo) DEFINE_PARAMETER_TYPES(MachineType constexpr Register ActualArgumentsCountRegister()
static constexpr int kContextOrFrameTypeOffset
static constexpr int kFixedSlotCountAboveFp
static constexpr int kFixedFrameSizeAboveFp
static int caller_frame_top_offset()
static int output_count_offset()
static constexpr int kNextExitFrameFPOffset
static constexpr int kNextFastCallFramePCOffset
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr int simd128_registers_offset()
static const int kMantissaBits
Definition heap-number.h:39
static const uint32_t kSignMask
Definition heap-number.h:36
static const uint32_t kExponentMask
Definition heap-number.h:37
static const int kMantissaBitsInTopWord
Definition heap-number.h:45
static const int kExponentBits
Definition heap-number.h:40
static const int kExponentBias
Definition heap-number.h:41
static const int kExponentShift
Definition heap-number.h:42
static const int kNonMantissaBitsInTopWord
Definition heap-number.h:46
static constexpr int kHeaderSize
static constexpr int kMapOffset
constexpr void clear(RegisterT reg)
static constexpr MSARegister from_code(int8_t code)
constexpr int8_t code() const
static const RegisterConfiguration * Default()
static constexpr Register from_code(int code)
static constexpr int kMantissaOffset
static constexpr int kExponentOffset
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr Tagged< Smi > zero()
Definition smi.h:99
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static constexpr int kFixedFrameSizeFromFp
static constexpr int kFrameTypeOffset
static constexpr DoubleRegList kPushedFpRegs
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
int start
int end
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
DirectHandle< Object > new_target
Definition execution.cc:75
bool is_construct
Definition execution.cc:82
int32_t offset
TNode< Object > this_arg
TNode< Object > receiver
TNode< Object > callback
const int length_
Definition mul-fft.cc:473
ApiCallbackExitFrameConstants FC
Definition frames.cc:1270
FunctionCallbackArguments FCA
Definition frames.cc:1271
void Or(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void push(LiftoffAssembler *assm, LiftoffRegister reg, ValueKind kind, int padding=0)
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
constexpr Register no_reg
constexpr Register kRootRegister
constexpr int kByteSize
Definition globals.h:395
constexpr int kFunctionEntryBytecodeOffset
Definition globals.h:854
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
constexpr int kSimd128Size
Definition globals.h:706
constexpr int kPointerSizeLog2
Definition globals.h:600
DwVfpRegister DoubleRegister
const uint32_t kFCSRInvalidOpCauseMask
static void Generate_CheckStackOverflow(MacroAssembler *masm, Register argc, Register scratch1, Register scratch2)
constexpr DoubleRegister kScratchDoubleReg
const RegList kCalleeSaved
Definition reglist-arm.h:31
static void Generate_InterpreterEnterBytecode(MacroAssembler *masm)
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr Register kJavaScriptCallTargetRegister
constexpr int kNumberOfRegisters
constexpr FPUControlRegister FCSR
constexpr uint16_t kDontAdaptArgumentsSentinel
Definition globals.h:2779
constexpr Register kJavaScriptCallArgCountRegister
constexpr Register kInterpreterAccumulatorRegister
constexpr Register kScratchReg2
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
constexpr Register kScratchReg
InterpreterPushArgsMode
Definition globals.h:2233
constexpr int kJSArgcReceiverSlots
Definition globals.h:2778
static void GenerateInterpreterPushArgs(MacroAssembler *masm, Register num_args, Register start_address, Register scratch)
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler *masm, Register bytecode_array, Register bytecode_offset, Register bytecode, Register scratch1, Register scratch2, Register scratch3, Label *if_return)
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
static void LeaveInterpreterFrame(MacroAssembler *masm, Register scratch1, Register scratch2)
constexpr int kTaggedSizeLog2
Definition globals.h:543
constexpr uint32_t kDebugZapValue
Definition globals.h:1015
constexpr uint32_t kZapValue
Definition globals.h:1005
constexpr Register kReturnRegister0
const uint32_t kFCSROverflowCauseMask
@ LAST_CALLABLE_JS_FUNCTION_TYPE
@ FIRST_CALLABLE_JS_FUNCTION_TYPE
constexpr Register kWasmImplicitArgRegister
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const int kCArgsSlotsSize
constexpr Register kInterpreterDispatchTableRegister
const int kHeapObjectTag
Definition v8-internal.h:72
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register kJavaScriptCallExtraArg1Register
const RegList kJSCallerSaved
Definition reglist-arm.h:23
constexpr int JSParameterCount(int param_count_without_receiver)
Definition globals.h:2782
Register ToRegister(int num)
const uint32_t kFCSRUnderflowCauseMask
const DoubleRegList kCalleeSavedFPU
constexpr Register kJavaScriptCallCodeStartRegister
Register ReassignRegister(Register &source)
constexpr Register kWasmCompileLazyFuncIndexRegister
static void AssertCodeIsBaseline(MacroAssembler *masm, Register code, Register scratch)
static void Generate_JSEntryTrampolineHelper(MacroAssembler *masm, bool is_construct)
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
constexpr Register cp
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
Definition globals.h:407
constexpr Register kJavaScriptCallDispatchHandleRegister
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler *masm, Register sfi, Register bytecode, Register scratch1, Label *is_baseline, Label *is_unavailable)
constexpr Register kInterpreterBytecodeOffsetRegister
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
constexpr Register kInterpreterBytecodeArrayRegister
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define arraysize(array)
Definition macros.h:67
#define OFFSET_OF_DATA_START(Type)