v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
builtins-riscv.cc
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
10#include "src/debug/debug.h"
15// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
18#include "src/heap/heap-inl.h"
19#include "src/objects/cell.h"
20#include "src/objects/foreign.h"
24#include "src/objects/smi.h"
25#include "src/runtime/runtime.h"
26
27#if V8_ENABLE_WEBASSEMBLY
32#endif // V8_ENABLE_WEBASSEMBLY
33
34namespace v8 {
35namespace internal {
36
37#define __ ACCESS_MASM(masm)
38
40 int formal_parameter_count, Address address) {
41 ASM_CODE_COMMENT(masm);
43 __ TailCallBuiltin(
44 Builtins::AdaptorWithBuiltinExitFrame(formal_parameter_count));
45}
46
47namespace {
48
49enum class ArgumentsElementType {
50 kRaw, // Push arguments as they are.
51 kHandle // Dereference arguments before pushing.
52};
53
54void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
55 Register scratch, Register scratch2,
56 ArgumentsElementType element_type) {
57 ASM_CODE_COMMENT(masm);
58 DCHECK(!AreAliased(array, argc, scratch));
59 Label loop, entry;
60 __ SubWord(scratch, argc, Operand(kJSArgcReceiverSlots));
61 __ Branch(&entry);
62 __ bind(&loop);
63 __ CalcScaledAddress(scratch2, array, scratch, kSystemPointerSizeLog2);
64 __ LoadWord(scratch2, MemOperand(scratch2));
65 if (element_type == ArgumentsElementType::kHandle) {
66 __ LoadWord(scratch2, MemOperand(scratch2));
67 }
68 __ push(scratch2);
69 __ bind(&entry);
70 __ AddWord(scratch, scratch, Operand(-1));
71 __ Branch(&loop, greater_equal, scratch, Operand(zero_reg));
72}
73
74void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
75 // ----------- S t a t e -------------
76 // -- a0 : number of arguments
77 // -- a1 : constructor function
78 // -- a3 : new target
79 // -- cp : context
80 // -- ra : return address
81 // -- sp[...]: constructor arguments
82 // -----------------------------------
83
84 // Enter a construct frame.
85 {
86 FrameScope scope(masm, StackFrame::CONSTRUCT);
87
88 // Preserve the incoming parameters on the stack.
89 __ Push(cp, a0);
90
91 // Set up pointer to first argument (skip receiver).
92 __ AddWord(
93 t2, fp,
95 // t2: Pointer to start of arguments.
96 // a0: Number of arguments.
97 {
98 UseScratchRegisterScope temps(masm);
99 temps.Include(t0);
100 Generate_PushArguments(masm, t2, a0, temps.Acquire(), temps.Acquire(),
101 ArgumentsElementType::kRaw);
102 }
103 // The receiver for the builtin/api call.
104 __ PushRoot(RootIndex::kTheHoleValue);
105
106 // Call the function.
107 // a0: number of arguments (untagged)
108 // a1: constructor function
109 // a3: new target
110 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
111
112 // Restore context from the frame.
114 // Restore arguments count from the frame.
115 __ LoadWord(kScratchReg,
117 // Leave construct frame.
118 }
119
120 // Remove caller arguments from the stack and return.
121 __ DropArguments(kScratchReg);
122 __ Ret();
123}
124
125} // namespace
126
127// The construct stub for ES5 constructor functions and ES6 class constructors.
128void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
129 // ----------- S t a t e -------------
130 // -- a0: number of arguments (untagged)
131 // -- a1: constructor function
132 // -- a3: new target
133 // -- cp: context
134 // -- ra: return address
135 // -- sp[...]: constructor arguments
136 // -----------------------------------
137 // Enter a construct frame.
138 FrameScope scope(masm, StackFrame::MANUAL);
139 Label post_instantiation_deopt_entry, not_create_implicit_receiver;
140 __ EnterFrame(StackFrame::CONSTRUCT);
141
142 // Preserve the incoming parameters on the stack.
143 __ Push(cp, a0, a1);
144 __ PushRoot(RootIndex::kUndefinedValue);
145 __ Push(a3);
146
147 // ----------- S t a t e -------------
148 // -- sp[0*kSystemPointerSize]: new target
149 // -- sp[1*kSystemPointerSize]: padding
150 // -- a1 and sp[2*kSystemPointerSize]: constructor function
151 // -- sp[3*kSystemPointerSize]: number of arguments
152 // -- sp[4*kSystemPointerSize]: context
153 // -----------------------------------
154 {
155 UseScratchRegisterScope temps(masm);
156 temps.Include(t1, t2);
157 Register func_info = temps.Acquire();
158 __ LoadTaggedField(
159 func_info, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
160 __ Load32U(func_info,
161 FieldMemOperand(func_info, SharedFunctionInfo::kFlagsOffset));
162 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(func_info);
163 __ JumpIfIsInRange(
164 func_info,
165 static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
166 static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
167 &not_create_implicit_receiver);
168 // If not derived class constructor: Allocate the new receiver object.
169 __ CallBuiltin(Builtin::kFastNewObject);
170 __ BranchShort(&post_instantiation_deopt_entry);
171
172 // Else: use TheHoleValue as receiver for constructor call
173 __ bind(&not_create_implicit_receiver);
174 __ LoadRoot(a0, RootIndex::kTheHoleValue);
175 }
176 // ----------- S t a t e -------------
177 // -- a0: receiver
178 // -- Slot 4 / sp[0*kSystemPointerSize]: new target
179 // -- Slot 3 / sp[1*kSystemPointerSize]: padding
180 // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
181 // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments
182 // -- Slot 0 / sp[4*kSystemPointerSize]: context
183 // -----------------------------------
184 // Deoptimizer enters here.
185 masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
186 masm->pc_offset());
187 __ bind(&post_instantiation_deopt_entry);
188
189 // Restore new target.
190 __ Pop(a3);
191
192 // Push the allocated receiver to the stack.
193 __ Push(a0);
194
195 // We need two copies because we may have to return the original one
196 // and the calling conventions dictate that the called function pops the
197 // receiver. The second copy is pushed after the arguments, we saved in a6
198 // since a0 will store the return value of callRuntime.
199 __ Move(a6, a0);
200
201 // Set up pointer to first argument (skip receiver)..
202 __ AddWord(
203 t2, fp,
205
206 // ----------- S t a t e -------------
207 // -- a3: new target
208 // -- sp[0*kSystemPointerSize]: implicit receiver
209 // -- sp[1*kSystemPointerSize]: implicit receiver
210 // -- sp[2*kSystemPointerSize]: padding
211 // -- sp[3*kSystemPointerSize]: constructor function
212 // -- sp[4*kSystemPointerSize]: number of arguments
213 // -- sp[5*kSystemPointerSize]: context
214 // -----------------------------------
215
216 // Restore constructor function and argument count.
219
220 Label stack_overflow;
221 {
222 UseScratchRegisterScope temps(masm);
223 __ StackOverflowCheck(a0, temps.Acquire(), temps.Acquire(),
224 &stack_overflow);
225 }
226 // TODO(victorgomes): When the arguments adaptor is completely removed, we
227 // should get the formal parameter count and copy the arguments in its
228 // correct position (including any undefined), instead of delaying this to
229 // InvokeFunction.
230
231 // Copy arguments and receiver to the expression stack.
232 // t2: Pointer to start of argument.
233 // a0: Number of arguments.
234 {
235 UseScratchRegisterScope temps(masm);
236 Generate_PushArguments(masm, t2, a0, temps.Acquire(), temps.Acquire(),
237 ArgumentsElementType::kRaw);
238 }
239 // We need two copies because we may have to return the original one
240 // and the calling conventions dictate that the called function pops the
241 // receiver. The second copy is pushed after the arguments,
242 __ Push(a6);
243
244 // Call the function.
245 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
246
247 // If the result is an object (in the ECMA sense), we should get rid
248 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
249 // on page 74.
250 Label use_receiver, do_throw, leave_and_return, check_receiver;
251
252 // If the result is undefined, we jump out to using the implicit receiver.
253 __ JumpIfNotRoot(a0, RootIndex::kUndefinedValue, &check_receiver);
254
255 // Otherwise we do a smi check and fall through to check if the return value
256 // is a valid receiver.
257
258 // Throw away the result of the constructor invocation and use the
259 // on-stack receiver as the result.
260 __ bind(&use_receiver);
261 __ LoadWord(a0, MemOperand(sp, 0 * kSystemPointerSize));
262 __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw);
263
264 __ bind(&leave_and_return);
265 // Restore arguments count from the frame.
267 // Leave construct frame.
268 __ LeaveFrame(StackFrame::CONSTRUCT);
269
270 // Remove caller arguments from the stack and return.
271 __ DropArguments(a1);
272 __ Ret();
273
274 __ bind(&check_receiver);
275 __ JumpIfSmi(a0, &use_receiver);
276
277 // If the type of the result (stored in its map) is less than
278 // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
279 {
280 UseScratchRegisterScope temps(masm);
281 temps.Include(t1, t2);
282 Register map = temps.Acquire(), type = temps.Acquire();
283 __ GetObjectType(a0, map, type);
284
285 static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
286 __ Branch(&leave_and_return, greater_equal, type,
287 Operand(FIRST_JS_RECEIVER_TYPE));
288 __ Branch(&use_receiver);
289 }
290 __ bind(&do_throw);
291 // Restore the context from the frame.
293 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
294 __ break_(0xCC);
295
296 __ bind(&stack_overflow);
297 // Restore the context from the frame.
299 __ CallRuntime(Runtime::kThrowStackOverflow);
300 __ break_(0xCC);
301}
302
303void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
304 Generate_JSBuiltinsConstructStubHelper(masm);
305}
306
308 Register scratch) {
309 DCHECK(!AreAliased(code, scratch));
310 // Verify that the code kind is baseline code via the CodeKind.
311 __ LoadWord(scratch, FieldMemOperand(code, Code::kFlagsOffset));
312 __ DecodeField<Code::KindField>(scratch);
313 __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
314 Operand(static_cast<int64_t>(CodeKind::BASELINE)));
315}
316
317// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
318// the more general dispatch.
320 MacroAssembler* masm, Register sfi, Register bytecode, Register scratch1,
321 Label* is_baseline, Label* is_unavailable) {
322 DCHECK(!AreAliased(bytecode, scratch1));
323 ASM_CODE_COMMENT(masm);
324 Label done;
325
326 Register data = bytecode;
327 __ LoadTrustedPointerField(
328 data,
329 FieldMemOperand(sfi, SharedFunctionInfo::kTrustedFunctionDataOffset),
331
332 __ GetObjectType(data, scratch1, scratch1);
333#ifndef V8_JITLESS
334 if (v8_flags.debug_code) {
335 Label not_baseline;
336 __ Branch(&not_baseline, ne, scratch1, Operand(CODE_TYPE));
337 AssertCodeIsBaseline(masm, data, scratch1);
338 __ Branch(is_baseline);
339 __ bind(&not_baseline);
340 } else {
341 __ Branch(is_baseline, eq, scratch1, Operand(CODE_TYPE));
342 }
343#endif // !V8_JITLESS
344 __ Branch(&done, eq, scratch1, Operand(BYTECODE_ARRAY_TYPE));
345 __ Branch(is_unavailable, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
346 __ LoadProtectedPointerField(
347 bytecode, FieldMemOperand(data, InterpreterData::kBytecodeArrayOffset));
348 __ bind(&done);
349}
350
351// static
352void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
353 // ----------- S t a t e -------------
354 // -- a0 : the value to pass to the generator
355 // -- a1 : the JSGeneratorObject to resume
356 // -- ra : return address
357 // -----------------------------------
358 // Store input value into generator object.
359 __ StoreTaggedField(
360 a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
361 __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0,
363 // Check that a1 is still valid, RecordWrite might have clobbered it.
364 __ AssertGeneratorObject(a1);
365
366 // Load suspended function and context.
367 __ LoadTaggedField(a4,
368 FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
369 __ LoadTaggedField(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
370
371 // Flood function if we are stepping.
372 Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
373 Label stepping_prepared;
374 ExternalReference debug_hook =
375 ExternalReference::debug_hook_on_function_call_address(masm->isolate());
376 __ li(a5, debug_hook);
377 __ Lb(a5, MemOperand(a5));
378 __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
379
380 // Flood function if we need to continue stepping in the suspended generator.
381 ExternalReference debug_suspended_generator =
382 ExternalReference::debug_suspended_generator_address(masm->isolate());
383 __ li(a5, debug_suspended_generator);
384 __ LoadWord(a5, MemOperand(a5));
385 __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
386 __ bind(&stepping_prepared);
387
388 // Check the stack for overflow. We are not trying to catch interruptions
389 // (i.e. debug break and preemption) here, so check the "real stack limit".
390 Label stack_overflow;
392 __ Branch(&stack_overflow, Uless, sp, Operand(kScratchReg));
393
394 // ----------- S t a t e -------------
395 // -- a1 : the JSGeneratorObject to resume
396 // -- a4 : generator function
397 // -- cp : generator context
398 // -- ra : return address
399 // -----------------------------------
400
401 // Push holes for arguments to generator function. Since the parser forced
402 // context allocation for any variables in generators, the actual argument
403 // values have already been copied into the context and these dummy values
404 // will never be used.
405 __ LoadTaggedField(
406 a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
407 __ Lhu(a3,
408 FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
409 __ SubWord(a3, a3, Operand(kJSArgcReceiverSlots));
410 __ LoadTaggedField(
411 t1,
412 FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
413 {
414 Label done_loop, loop;
415 __ bind(&loop);
416 __ SubWord(a3, a3, Operand(1));
417 __ Branch(&done_loop, lt, a3, Operand(zero_reg), Label::Distance::kNear);
418 __ CalcScaledAddress(kScratchReg, t1, a3, kTaggedSizeLog2);
419 __ LoadTaggedField(
422 __ Push(kScratchReg);
423 __ Branch(&loop);
424 __ bind(&done_loop);
425 // Push receiver.
426 __ LoadTaggedField(kScratchReg,
427 FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
428 __ Push(kScratchReg);
429 }
430
431 // Underlying function needs to have bytecode available.
432 if (v8_flags.debug_code) {
433 Label ok, is_baseline, is_unavailable;
434 Register sfi = a3;
435 Register bytecode = a3;
436 __ LoadTaggedField(
437 sfi, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
438 GetSharedFunctionInfoBytecodeOrBaseline(masm, sfi, bytecode, t5,
439 &is_baseline, &is_unavailable);
440 __ Branch(&ok);
441 __ bind(&is_unavailable);
442 __ Abort(AbortReason::kMissingBytecodeArray);
443 __ bind(&is_baseline);
444 __ GetObjectType(bytecode, t5, t5);
445 __ Assert(eq, AbortReason::kMissingBytecodeArray, t5, Operand(CODE_TYPE));
446 __ bind(&ok);
447 }
448
449 // Resume (Ignition/TurboFan) generator object.
450 {
451 // TODO(40931165): use parameter count from JSDispatchTable and validate
452 // that it matches the number of values in the JSGeneratorObject.
453 __ LoadTaggedField(
454 a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
455 __ Lhu(a0, FieldMemOperand(
456 a0, SharedFunctionInfo::kFormalParameterCountOffset));
457 // We abuse new.target both to indicate that this is a resume call and to
458 // pass in the generator object. In ordinary calls, new.target is always
459 // undefined because generator functions are non-constructable.
460 __ Move(a3, a1);
461 __ Move(a1, a4);
462 __ JumpJSFunction(a1);
463 }
464
465 __ bind(&prepare_step_in_if_stepping);
466 {
467 FrameScope scope(masm, StackFrame::INTERNAL);
468 __ Push(a1, a4);
469 // Push hole as receiver since we do not use it for stepping.
470 __ PushRoot(RootIndex::kTheHoleValue);
471 __ CallRuntime(Runtime::kDebugOnFunctionCall);
472 __ Pop(a1);
473 }
474 __ LoadTaggedField(a4,
475 FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
476 __ Branch(&stepping_prepared);
477
478 __ bind(&prepare_step_in_suspended_generator);
479 {
480 FrameScope scope(masm, StackFrame::INTERNAL);
481 __ Push(a1);
482 __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
483 __ Pop(a1);
484 }
485 __ LoadTaggedField(a4,
486 FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
487 __ Branch(&stepping_prepared);
488
489 __ bind(&stack_overflow);
490 {
491 FrameScope scope(masm, StackFrame::INTERNAL);
492 __ CallRuntime(Runtime::kThrowStackOverflow);
493 __ break_(0xCC); // This should be unreachable.
494 }
495}
496
497void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
498 FrameScope scope(masm, StackFrame::INTERNAL);
499 __ Push(a1);
500 __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
501}
502
503// Clobbers scratch1 and scratch2; preserves all other registers.
505 Register scratch1, Register scratch2) {
506 // Check the stack for overflow. We are not trying to catch
507 // interruptions (e.g. debug break and preemption) here, so the "real stack
508 // limit" is checked.
509 Label okay;
510 __ LoadStackLimit(scratch1, StackLimitKind::kRealStackLimit);
511 // Make a2 the space we have left. The stack might already be overflowed
512 // here which will cause r2 to become negative.
513 __ SubWord(scratch1, sp, scratch1);
514 // Check if the arguments will overflow the stack.
515 __ SllWord(scratch2, argc, kSystemPointerSizeLog2);
516 __ Branch(&okay, gt, scratch1, Operand(scratch2),
517 Label::Distance::kNear); // Signed comparison.
518
519 // Out of stack space.
520 __ CallRuntime(Runtime::kThrowStackOverflow);
521
522 __ bind(&okay);
523}
524
525namespace {
526
527// Called with the native C calling convention. The corresponding function
528// signature is either:
529//
530// using JSEntryFunction = GeneratedCode<Address(
531// Address root_register_value, Address new_target, Address target,
532// Address receiver, intptr_t argc, Address** args)>;
533// or
534// using JSEntryFunction = GeneratedCode<Address(
535// Address root_register_value, MicrotaskQueue* microtask_queue)>;
536void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
537 Builtin entry_trampoline) {
538 Label invoke, handler_entry, exit;
539
540 {
541 NoRootArrayScope no_root_array(masm);
542
543 // TODO(plind): unify the ABI description here.
544 // Registers:
545 // either
546 // a0: root register value
547 // a1: entry address
548 // a2: function
549 // a3: receiver
550 // a4: argc
551 // a5: argv
552 // or
553 // a0: root register value
554 // a1: microtask_queue
555
556 // Save callee saved registers on the stack.
557 __ MultiPush(kCalleeSaved | ra);
558
559 // Save callee-saved FPU registers.
560 __ MultiPushFPU(kCalleeSavedFPU);
561 // Set up the reserved register for 0.0.
562 __ LoadFPRImmediate(kDoubleRegZero, 0.0);
563 __ LoadFPRImmediate(kSingleRegZero, 0.0f);
564
565 // Initialize the root register.
566 // C calling convention. The first argument is passed in a0.
567 __ Move(kRootRegister, a0);
568
569#ifdef V8_COMPRESS_POINTERS
570 // Initialize the pointer cage base register.
571 __ LoadRootRelative(kPtrComprCageBaseRegister,
572 IsolateData::cage_base_offset());
573#endif
574 }
575
576 // a1: entry address
577 // a2: function
578 // a3: receiver
579 // a4: argc
580 // a5: argv
581
582 // We build an EntryFrame.
583 __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used.
584 __ li(s2, Operand(StackFrame::TypeToMarker(type)));
585 __ li(s3, Operand(StackFrame::TypeToMarker(type)));
586 ExternalReference c_entry_fp = ExternalReference::Create(
587 IsolateAddressId::kCEntryFPAddress, masm->isolate());
588 __ li(s5, c_entry_fp);
589 __ LoadWord(s4, MemOperand(s5));
590 __ Push(s1, s2, s3, s4);
591 // Clear c_entry_fp, now we've pushed its previous value to the stack.
592 // If the c_entry_fp is not already zero and we don't clear it, the
593 // StackFrameIteratorForProfiler will assume we are executing C++ and miss the
594 // JS frames on top.
595 __ StoreWord(zero_reg, MemOperand(s5));
596
597 __ LoadIsolateField(s1, IsolateFieldId::kFastCCallCallerFP);
598 __ LoadWord(s2, MemOperand(s1, 0));
599 __ StoreWord(zero_reg, MemOperand(s1, 0));
600
601 __ LoadIsolateField(s1, IsolateFieldId::kFastCCallCallerPC);
602 __ LoadWord(s3, MemOperand(s1, 0));
603 __ StoreWord(zero_reg, MemOperand(s1, 0));
604 __ Push(s2, s3);
605 // Set up frame pointer for the frame to be pushed.
607 // Registers:
608 // either
609 // a1: entry address
610 // a2: function
611 // a3: receiver
612 // a4: argc
613 // a5: argv
614 // or
615 // a1: microtask_queue
616 //
617 // Stack:
618 // fast api call pc
619 // fast api call fp
620 // caller fp |
621 // function slot | entry frame
622 // context slot |
623 // bad fp (0xFF...F) |
624 // callee saved registers + ra
625
626 // If this is the outermost JS call, set js_entry_sp value.
627 Label non_outermost_js;
628 ExternalReference js_entry_sp = ExternalReference::Create(
629 IsolateAddressId::kJSEntrySPAddress, masm->isolate());
630 __ li(s1, js_entry_sp);
631 __ LoadWord(s2, MemOperand(s1));
632 __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg),
634 __ StoreWord(fp, MemOperand(s1));
636 Label cont;
637 __ Branch(&cont);
638 __ bind(&non_outermost_js);
639 __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME));
640 __ bind(&cont);
641 __ push(s3);
642
643 // Jump to a faked try block that does the invoke, with a faked catch
644 // block that sets the exception.
645 __ BranchShort(&invoke);
646 __ bind(&handler_entry);
647
648 // Store the current pc as the handler offset. It's used later to create the
649 // handler table.
650 masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
651
652 // Caught exception: Store result (exception) in the exception
653 // field in the JSEnv and return a failure sentinel. Coming in here the
654 // fp will be invalid because the PushStackHandler below sets it to 0 to
655 // signal the existence of the JSEntry frame.
656 __ li(s1, ExternalReference::Create(IsolateAddressId::kExceptionAddress,
657 masm->isolate()));
658 __ StoreWord(a0,
659 MemOperand(s1)); // We come back from 'invoke'. result is in a0.
660 __ LoadRoot(a0, RootIndex::kException);
661 __ BranchShort(&exit);
662
663 // Invoke: Link this frame into the handler chain.
664 __ bind(&invoke);
665 __ PushStackHandler();
666 // If an exception not caught by another handler occurs, this handler
667 // returns control to the code after the bal(&invoke) above, which
668 // restores all kCalleeSaved registers (including cp and fp) to their
669 // saved values before returning a failure to C.
670 //
671 // Registers:
672 // either
673 // a0: root register value
674 // a1: entry address
675 // a2: function
676 // a3: receiver
677 // a4: argc
678 // a5: argv
679 // or
680 // a0: root register value
681 // a1: microtask_queue
682 //
683 // Stack:
684 // fast api call pc.
685 // fast api call fp.
686 // JS entry frame marker
687 // caller fp |
688 // function slot | entry frame
689 // context slot |
690 // bad fp (0xFF...F) |
691 // handler frame
692 // entry frame
693 // callee saved registers + ra
694 // [ O32: 4 args slots]
695 // args
696 //
697 // Invoke the function by calling through JS entry trampoline builtin and
698 // pop the faked function when we return.
699 __ CallBuiltin(entry_trampoline);
700
701 // Unlink this frame from the handler chain.
702 __ PopStackHandler();
703
704 __ bind(&exit); // a0 holds result
705 // Check if the current stack frame is marked as the outermost JS frame.
706
707 Label non_outermost_js_2;
708 __ pop(a5);
709 __ Branch(&non_outermost_js_2, ne, a5,
712 __ li(a5, js_entry_sp);
713 __ StoreWord(zero_reg, MemOperand(a5));
714 __ bind(&non_outermost_js_2);
715
716 __ Pop(s2, s3);
717 __ LoadIsolateField(s1, IsolateFieldId::kFastCCallCallerFP);
718 __ StoreWord(s2, MemOperand(s1, 0));
719 __ LoadIsolateField(s1, IsolateFieldId::kFastCCallCallerPC);
720 __ StoreWord(s3, MemOperand(s1, 0));
721 // Restore the top frame descriptors from the stack.
722 __ pop(a5);
723 __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
724 masm->isolate()));
725 __ StoreWord(a5, MemOperand(a4));
726
727 // Reset the stack to the callee saved registers.
729
730 // Restore callee-saved fpu registers.
731 __ MultiPopFPU(kCalleeSavedFPU);
732
733 // Restore callee saved registers from the stack.
734 __ MultiPop(kCalleeSaved | ra);
735 // Return.
736 __ Jump(ra);
737}
738
739} // namespace
740
741void Builtins::Generate_JSEntry(MacroAssembler* masm) {
742 Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
743}
744
745void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
746 Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
747 Builtin::kJSConstructEntryTrampoline);
748}
749
750void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
751 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
752 Builtin::kRunMicrotasksTrampoline);
753}
754
756 bool is_construct) {
757 // ----------- S t a t e -------------
758 // -- a1: new.target
759 // -- a2: function
760 // -- a3: receiver_pointer
761 // -- a4: argc
762 // -- a5: argv
763 // -----------------------------------
764
765 // Enter an internal frame.
766 {
767 FrameScope scope(masm, StackFrame::INTERNAL);
768
769 // Setup the context (we need to use the caller context from the isolate).
771 IsolateAddressId::kContextAddress, masm->isolate());
772 __ li(cp, context_address);
773 __ LoadWord(cp, MemOperand(cp));
774
775 // Push the function onto the stack.
776 __ Push(a2);
777
778 // Check if we have enough stack space to push all arguments.
779 __ mv(a6, a4);
780 Generate_CheckStackOverflow(masm, a6, a0, s2);
781
782 // Copy arguments to the stack.
783 // a4: argc
784 // a5: argv, i.e. points to first arg
785 {
786 UseScratchRegisterScope temps(masm);
787 Generate_PushArguments(masm, a5, a4, temps.Acquire(), temps.Acquire(),
788 ArgumentsElementType::kHandle);
789 }
790 // Push the receive.
791 __ Push(a3);
792
793 // a0: argc
794 // a1: function
795 // a3: new.target
796 __ Move(a3, a1);
797 __ Move(a1, a2);
798 __ Move(a0, a4);
799
800 // Initialize all JavaScript callee-saved registers, since they will be seen
801 // by the garbage collector as part of handlers.
802 __ LoadRoot(a4, RootIndex::kUndefinedValue);
803 __ Move(a5, a4);
804 __ Move(s1, a4);
805 __ Move(s2, a4);
806 __ Move(s3, a4);
807 __ Move(s4, a4);
808 __ Move(s5, a4);
809 __ Move(s8, a4);
810 __ Move(s9, a4);
811 __ Move(s10, a4);
812#ifndef V8_COMPRESS_POINTERS
813 __ Move(s11, a4);
814#endif
815 // s6 holds the root address. Do not clobber.
816 // s7 is cp. Do not init.
817 // s11 is pointer cage base register (kPointerCageBaseRegister).
818
819 // Invoke the code.
820 Builtin builtin = is_construct ? Builtin::kConstruct : Builtins::Call();
821 __ CallBuiltin(builtin);
822
823 // Leave internal frame.
824 }
825 __ Jump(ra);
826}
827
828void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
830}
831
832void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
834}
835
836void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
837 // a1: microtask_queue
839 __ TailCallBuiltin(Builtin::kRunMicrotasks);
840}
841
843 Register scratch2) {
844 ASM_CODE_COMMENT(masm);
845 Register params_size = scratch1;
846
847 // Get the size of the formal parameters + receiver (in bytes).
848 __ LoadWord(params_size,
850 __ Lhu(params_size,
851 FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
852
853 Register actual_params_size = scratch2;
854 Label L1;
855 // Compute the size of the actual parameters + receiver (in bytes).
856 __ LoadWord(actual_params_size,
858 // If actual is bigger than formal, then we should use it to free up the stack
859 // arguments.
860 __ Branch(&L1, le, actual_params_size, Operand(params_size),
862 __ Move(params_size, actual_params_size);
863 __ bind(&L1);
864
865 // Leave the frame (also dropping the register file).
866 __ LeaveFrame(StackFrame::INTERPRETED);
867
868 // Drop receiver + arguments.
869 __ DropArguments(params_size);
870}
871
872// Advance the current bytecode offset. This simulates what all bytecode
873// handlers do upon completion of the underlying operation. Will bail out to a
874// label if the bytecode (without prefix) is a return bytecode. Will not advance
875// the bytecode offset if the current bytecode is a JumpLoop, instead just
876// re-executing the JumpLoop to jump to the correct bytecode.
878 Register bytecode_array,
879 Register bytecode_offset,
880 Register bytecode, Register scratch1,
881 Register scratch2, Register scratch3,
882 Label* if_return) {
883 ASM_CODE_COMMENT(masm);
884 Register bytecode_size_table = scratch1;
885
886 // The bytecode offset value will be increased by one in wide and extra wide
887 // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
888 // will restore the original bytecode. In order to simplify the code, we have
889 // a backup of it.
890 Register original_bytecode_offset = scratch3;
891 DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
892 bytecode_size_table, original_bytecode_offset));
893 __ Move(original_bytecode_offset, bytecode_offset);
894 __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
895
896 // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
897 Label process_bytecode, extra_wide;
898 static_assert(0 == static_cast<int>(interpreter::Bytecode::kWide));
899 static_assert(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
900 static_assert(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
901 static_assert(3 ==
902 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
903 __ Branch(&process_bytecode, Ugreater, bytecode, Operand(3),
905 __ And(scratch2, bytecode, Operand(1));
906 __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg),
908
909 // Load the next bytecode and update table to the wide scaled table.
910 __ AddWord(bytecode_offset, bytecode_offset, Operand(1));
911 __ AddWord(scratch2, bytecode_array, bytecode_offset);
912 __ Lbu(bytecode, MemOperand(scratch2));
913 __ AddWord(bytecode_size_table, bytecode_size_table,
915 __ BranchShort(&process_bytecode);
916
917 __ bind(&extra_wide);
918 // Load the next bytecode and update table to the extra wide scaled table.
919 __ AddWord(bytecode_offset, bytecode_offset, Operand(1));
920 __ AddWord(scratch2, bytecode_array, bytecode_offset);
921 __ Lbu(bytecode, MemOperand(scratch2));
922 __ AddWord(bytecode_size_table, bytecode_size_table,
924
925 __ bind(&process_bytecode);
926
927// Bailout to the return label if this is a return bytecode.
928#define JUMP_IF_EQUAL(NAME) \
929 __ Branch(if_return, eq, bytecode, \
930 Operand(static_cast<int64_t>(interpreter::Bytecode::k##NAME)));
932#undef JUMP_IF_EQUAL
933
934 // If this is a JumpLoop, re-execute it to perform the jump to the beginning
935 // of the loop.
936 Label end, not_jump_loop;
937 __ Branch(&not_jump_loop, ne, bytecode,
938 Operand(static_cast<int64_t>(interpreter::Bytecode::kJumpLoop)),
940 // We need to restore the original bytecode_offset since we might have
941 // increased it to skip the wide / extra-wide prefix bytecode.
942 __ Move(bytecode_offset, original_bytecode_offset);
943 __ BranchShort(&end);
944
945 __ bind(&not_jump_loop);
946 // Otherwise, load the size of the current bytecode and advance the offset.
947 __ AddWord(scratch2, bytecode_size_table, bytecode);
948 __ Lb(scratch2, MemOperand(scratch2));
949 __ AddWord(bytecode_offset, bytecode_offset, scratch2);
950
951 __ bind(&end);
952}
953
954namespace {
955void ResetSharedFunctionInfoAge(MacroAssembler* masm, Register sfi) {
956 __ Sh(zero_reg, FieldMemOperand(sfi, SharedFunctionInfo::kAgeOffset));
957}
958
959void ResetJSFunctionAge(MacroAssembler* masm, Register js_function,
960 Register scratch) {
961 const Register shared_function_info(scratch);
962 __ LoadTaggedField(
963 shared_function_info,
964 FieldMemOperand(js_function, JSFunction::kSharedFunctionInfoOffset));
965 ResetSharedFunctionInfoAge(masm, shared_function_info);
966}
967
968void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
969 Register feedback_vector, Register scratch) {
970 DCHECK(!AreAliased(feedback_vector, scratch));
971 __ Lbu(scratch,
972 FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
973 __ And(scratch, scratch, Operand(~FeedbackVector::OsrUrgencyBits::kMask));
974 __ Sb(scratch,
975 FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
976}
977
978} // namespace
979
980// static
981void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
982 ASM_CODE_COMMENT(masm);
983 UseScratchRegisterScope temps(masm);
984 temps.Include({kScratchReg, kScratchReg2, s1});
985 auto descriptor =
986 Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
987 Register closure = descriptor.GetRegisterParameter(
988 BaselineOutOfLinePrologueDescriptor::kClosure);
989 // Load the feedback cell and vector from the closure.
990 Register feedback_cell = temps.Acquire();
991 Register feedback_vector = temps.Acquire();
992 __ LoadTaggedField(feedback_cell,
993 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
994 __ LoadTaggedField(
995 feedback_vector,
996 FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
997 {
998 UseScratchRegisterScope temp(masm);
999 Register type = temps.Acquire();
1000 __ AssertFeedbackVector(feedback_vector, type);
1001 }
1002
1003#ifndef V8_ENABLE_LEAPTIERING
1004 // Check for an tiering state.
1005 Label flags_need_processing;
1006 Register flags = temps.Acquire();
1007 __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
1008 flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing);
1009#endif // !V8_ENABLE_LEAPTIERING
1010
1011 {
1012 UseScratchRegisterScope temps(masm);
1013 ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire());
1014 }
1015 // Increment invocation count for the function.
1016 {
1017 UseScratchRegisterScope temps(masm);
1018 Register invocation_count = temps.Acquire();
1019 __ Lw(invocation_count,
1020 FieldMemOperand(feedback_vector,
1021 FeedbackVector::kInvocationCountOffset));
1022 __ Add32(invocation_count, invocation_count, Operand(1));
1023 __ Sw(invocation_count,
1024 FieldMemOperand(feedback_vector,
1025 FeedbackVector::kInvocationCountOffset));
1026 }
1027
1028 FrameScope frame_scope(masm, StackFrame::MANUAL);
1029 {
1030 ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1031 // Normally the first thing we'd do here is Push(ra, fp), but we already
1032 // entered the frame in BaselineCompiler::Prologue, as we had to use the
1033 // value ra before the call to this BaselineOutOfLinePrologue builtin.
1034 Register callee_context = descriptor.GetRegisterParameter(
1035 BaselineOutOfLinePrologueDescriptor::kCalleeContext);
1036 Register callee_js_function = descriptor.GetRegisterParameter(
1037 BaselineOutOfLinePrologueDescriptor::kClosure);
1038 {
1039 UseScratchRegisterScope temps(masm);
1040 ResetJSFunctionAge(masm, callee_js_function, temps.Acquire());
1041 }
1042 __ Push(callee_context, callee_js_function);
1043 DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1044 DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1045
1046 Register argc = descriptor.GetRegisterParameter(
1047 BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1048 // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1049 // the frame, so load it into a register.
1050 Register bytecode_array = descriptor.GetRegisterParameter(
1051 BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1052 __ Push(argc, bytecode_array, feedback_cell, feedback_vector);
1053 // Baseline code frames store the feedback vector where interpreter would
1054 // store the bytecode offset.
1055 {
1056 UseScratchRegisterScope temp(masm);
1057 Register type = temps.Acquire();
1058 __ AssertFeedbackVector(feedback_vector, type);
1059 }
1060 }
1061
1062 Label call_stack_guard;
1063 Register frame_size = descriptor.GetRegisterParameter(
1064 BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1065 {
1066 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1067 // Stack check. This folds the checks for both the interrupt stack limit
1068 // check and the real stack limit into one by just checking for the
1069 // interrupt limit. The interrupt limit is either equal to the real stack
1070 // limit or tighter. By ensuring we have space until that limit after
1071 // building the frame we can quickly precheck both at once.
1072 UseScratchRegisterScope temps(masm);
1073 Register sp_minus_frame_size = temps.Acquire();
1074 __ SubWord(sp_minus_frame_size, sp, frame_size);
1075 Register interrupt_limit = temps.Acquire();
1076 __ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
1077 __ Branch(&call_stack_guard, Uless, sp_minus_frame_size,
1078 Operand(interrupt_limit));
1079 }
1080
1081 // Do "fast" return to the caller pc in ra.
1082 // TODO(v8:11429): Document this frame setup better.
1083 __ Ret();
1084
1085#ifndef V8_ENABLE_LEAPTIERING
1086 __ bind(&flags_need_processing);
1087 {
1088 ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1089 // Drop the frame created by the baseline call.
1090 __ Pop(ra, fp);
1091 __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
1092 __ Trap();
1093 }
1094#endif // !V8_ENABLE_LEAPTIERING
1095
1096 __ bind(&call_stack_guard);
1097 {
1098 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1099 FrameScope frame_scope(masm, StackFrame::INTERNAL);
1100 // Save incoming new target or generator
1102#if defined(V8_ENABLE_LEAPTIERING) && defined(V8_TARGET_ARCH_RISCV64)
1103 // No need to SmiTag as dispatch handles always look like Smis.
1104 static_assert(kJSDispatchHandleShift > 0);
1106#endif
1107 __ SmiTag(frame_size);
1108 __ Push(frame_size);
1109 __ CallRuntime(Runtime::kStackGuardWithGap);
1110#if defined(V8_ENABLE_LEAPTIERING) && defined(V8_TARGET_ARCH_RISCV64)
1112#endif
1114 }
1115 __ Ret();
1116 temps.Exclude({kScratchReg, kScratchReg2, s1});
1117}
1118
1119// static
1120void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
1121 // We're here because we got deopted during BaselineOutOfLinePrologue's stack
1122 // check. Undo all its frame creation and call into the interpreter instead.
1123
1124 // Drop bytecode offset (was the feedback vector but got replaced during
1125 // deopt) and bytecode array.
1126 __ AddWord(sp, sp, Operand(3 * kSystemPointerSize));
1127
1128 // Context, closure, argc.
1131
1132 // Drop frame pointer
1133 __ LeaveFrame(StackFrame::BASELINE);
1134
1135 // Enter the interpreter.
1136 __ TailCallBuiltin(Builtin::kInterpreterEntryTrampoline);
1137}
1138
1139// Generate code for entering a JS function with the interpreter.
1140// On entry to the function the receiver and arguments have been pushed on the
1141// stack left to right.
1142//
1143// The live registers are:
1144// o a0 : actual argument count
1145// o a1: the JS function object being called.
1146// o a3: the incoming new target or generator object
1147// o a4: the dispatch handle through which we were called
1148// o cp: our context
1149// o fp: the caller's frame pointer
1150// o sp: stack pointer
1151// o ra: return address
1152//
1153// The function builds an interpreter frame. See InterpreterFrameConstants in
1154// frames-constants.h for its layout.
1156 MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
1157 Register closure = a1;
1158 // Get the bytecode array from the function object and load it into
1159 // kInterpreterBytecodeArrayRegister.
1160 Register sfi = a5;
1161 __ LoadTaggedField(
1162 sfi, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1163 ResetSharedFunctionInfoAge(masm, sfi);
1164
1165 // The bytecode array could have been flushed from the shared function info,
1166 // if so, call into CompileLazy.
1167 Label is_baseline, compile_lazy;
1169 masm, sfi, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline,
1170 &compile_lazy);
1171
1172#ifdef V8_ENABLE_SANDBOX
1173 // Validate the parameter count. This protects against an attacker swapping
1174 // the bytecode (or the dispatch handle) such that the parameter count of the
1175 // dispatch entry doesn't match the one of the BytecodeArray.
1176 // TODO(saelo): instead of this validation step, it would probably be nicer
1177 // if we could store the BytecodeArray directly in the dispatch entry and
1178 // load it from there. Then we can easily guarantee that the parameter count
1179 // of the entry matches the parameter count of the bytecode.
1181 Register dispatch_handle = kJavaScriptCallDispatchHandleRegister; // a4
1182 __ LoadParameterCountFromJSDispatchTable(a2, dispatch_handle, a6);
1184 BytecodeArray::kParameterSizeOffset));
1185 __ SbxCheck(eq, AbortReason::kJSSignatureMismatch, a2, Operand(a6));
1186#endif // V8_ENABLE_SANDBOX
1187
1188 Label push_stack_frame;
1189 Register feedback_vector = a2;
1190 __ LoadFeedbackVector(feedback_vector, closure, a6, &push_stack_frame);
1191
1192#ifndef V8_JITLESS
1193#ifndef V8_ENABLE_LEAPTIERING
1194 // If feedback vector is valid, check for optimized code and update invocation
1195 // count.
1196
1197 // Check the tiering state.
1198 Label flags_need_processing;
1199 Register flags = a6;
1200 __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
1201 flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
1202 &flags_need_processing);
1203#endif // V8_ENABLE_LEAPTIERING
1204 ResetFeedbackVectorOsrUrgency(masm, feedback_vector, a4);
1205
1206 // Increment invocation count for the function.
1207 __ Lw(a6, FieldMemOperand(feedback_vector,
1208 FeedbackVector::kInvocationCountOffset));
1209 __ Add32(a6, a6, Operand(1));
1210 __ Sw(a6, FieldMemOperand(feedback_vector,
1211 FeedbackVector::kInvocationCountOffset));
1212
1213 // Open a frame scope to indicate that there is a frame on the stack. The
1214 // MANUAL indicates that the scope shouldn't actually generate code to set up
1215 // the frame (that is done below).
1216#else
1217 // Note: By omitting the above code in jitless mode we also disable:
1218 // - kFlagsLogNextExecution: only used for logging/profiling; and
1219 // - kInvocationCountOffset: only used for tiering heuristics and code
1220 // coverage.
1221#endif // !V8_JITLESS
1222
1223 __ bind(&push_stack_frame);
1224 FrameScope frame_scope(masm, StackFrame::MANUAL);
1225 __ PushStandardFrame(closure);
1226
1227 // Load initial bytecode offset.
1230
1231 // Push bytecode array, Smi tagged bytecode array offset, and the feedback
1232 // vector.
1234 __ Push(kInterpreterBytecodeArrayRegister, a6, feedback_vector);
1235
1236 // Allocate the local and temporary register file on the stack.
1237 Label stack_overflow;
1238 {
1239 // Load frame size (word) from the BytecodeArray object.
1241 BytecodeArray::kFrameSizeOffset));
1242
1243 // Do a stack check to ensure we don't go over the limit.
1244 __ SubWord(a5, sp, Operand(a6));
1245 __ LoadStackLimit(a2, StackLimitKind::kRealStackLimit);
1246 __ Branch(&stack_overflow, Uless, a5, Operand(a2));
1247
1248 // If ok, push undefined as the initial value for all register file entries.
1249 Label loop_header;
1250 Label loop_check;
1251 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1252 __ BranchShort(&loop_check);
1253 __ bind(&loop_header);
1254 // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1256 // Continue loop if not done.
1257 __ bind(&loop_check);
1258 __ SubWord(a6, a6, Operand(kSystemPointerSize));
1259 __ Branch(&loop_header, ge, a6, Operand(zero_reg));
1260 }
1261
1262 // If the bytecode array has a valid incoming new target or generator object
1263 // register, initialize it with incoming value which was passed in a3.
1264 Label no_incoming_new_target_or_generator_register;
1265 __ Lw(a5, FieldMemOperand(
1267 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1268 __ Branch(&no_incoming_new_target_or_generator_register, eq, a5,
1269 Operand(zero_reg), Label::Distance::kNear);
1270 __ CalcScaledAddress(a5, fp, a5, kSystemPointerSizeLog2);
1271 __ StoreWord(a3, MemOperand(a5));
1272 __ bind(&no_incoming_new_target_or_generator_register);
1273
1274 // Perform interrupt stack check.
1275 // TODO(solanes): Merge with the real stack limit check above.
1276 Label stack_check_interrupt, after_stack_check_interrupt;
1277 __ LoadStackLimit(a5, StackLimitKind::kInterruptStackLimit);
1278 __ Branch(&stack_check_interrupt, Uless, sp, Operand(a5),
1280 __ bind(&after_stack_check_interrupt);
1281
1282 // Load the dispatch table into a register and dispatch to the bytecode
1283 // handler at the current bytecode offset.
1284 Label do_dispatch;
1285 __ bind(&do_dispatch);
1287 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1290 __ Lbu(a7, MemOperand(a1));
1291 __ CalcScaledAddress(kScratchReg, kInterpreterDispatchTableRegister, a7,
1295
1296 __ RecordComment("--- InterpreterEntryReturnPC point ---");
1298 masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(
1299 masm->pc_offset());
1300 } else {
1302 // Both versions must be the same up to this point otherwise the builtins
1303 // will not be interchangable.
1304 CHECK_EQ(
1305 masm->isolate()->heap()->interpreter_entry_return_pc_offset().value(),
1306 masm->pc_offset());
1307 }
1308
1309 // Any returns to the entry trampoline are either due to the return bytecode
1310 // or the interpreter tail calling a builtin and then a dispatch.
1311
1312 // Get bytecode array and bytecode offset from the stack frame.
1318
1319 // Either return, or advance to the next bytecode and dispatch.
1320 Label do_return;
1323 __ Lbu(a1, MemOperand(a1));
1326 a5, &do_return);
1327 __ Branch(&do_dispatch);
1328
1329 __ bind(&do_return);
1330 // The return value is in a0.
1331 LeaveInterpreterFrame(masm, t0, t1);
1332 __ Jump(ra);
1333
1334 __ bind(&stack_check_interrupt);
1335 // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1336 // for the call to the StackGuard.
1340 __ StoreWord(
1343 __ CallRuntime(Runtime::kStackGuard);
1344
1345 // After the call, restore the bytecode array, bytecode offset and accumulator
1346 // registers again. Also, restore the bytecode offset in the stack to its
1347 // previous value.
1352 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1353
1355 __ StoreWord(
1357
1358 __ Branch(&after_stack_check_interrupt);
1359
1360#ifndef V8_JITLESS
1361#ifndef V8_ENABLE_LEAPTIERING
1362 __ bind(&flags_need_processing);
1363 __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
1364#endif // !V8_ENABLE_LEAPTIERING
1365 __ bind(&is_baseline);
1366 {
1367#ifndef V8_ENABLE_LEAPTIERING
1368 // Load the feedback vector from the closure.
1369 __ LoadTaggedField(
1370 feedback_vector,
1371 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1372 __ LoadTaggedField(
1373 feedback_vector,
1374 FieldMemOperand(feedback_vector, FeedbackCell::kValueOffset));
1375
1376 Label install_baseline_code;
1377 // Check if feedback vector is valid. If not, call prepare for baseline to
1378 // allocate it.
1379 __ LoadTaggedField(
1380 t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1381 __ Lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
1382 __ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
1383
1384 // Check for an tiering state.
1385 __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
1386 flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing);
1387
1388 // TODO(olivf, 42204201): This fastcase is difficult to support with the
1389 // sandbox as it requires getting write access to the dispatch table. See
1390 // `JSFunction::UpdateCode`. We might want to remove it for all
1391 // configurations as it does not seem to be performance sensitive.
1392 // Load the baseline code into the closure.
1394 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
1395 __ ReplaceClosureCodeWithOptimizedCode(a2, closure);
1396 __ JumpCodeObject(a2, kJSEntrypointTag);
1397
1398 __ bind(&install_baseline_code);
1399#endif // !V8_ENABLE_LEAPTIERING
1400 __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
1401 }
1402#endif // !V8_JITLESS
1403
1404 __ bind(&compile_lazy);
1405 __ GenerateTailCallToReturnedCode(Runtime::kCompileLazy);
1406 // Unreachable code.
1407 __ break_(0xCC);
1408
1409 __ bind(&stack_overflow);
1410 __ CallRuntime(Runtime::kThrowStackOverflow);
1411 // Unreachable code.
1412 __ break_(0xCC);
1413}
1414
1416 Register start_address,
1417 Register scratch) {
1418 ASM_CODE_COMMENT(masm);
1419 // Find the address of the last argument.
1420 __ SubWord(scratch, num_args, Operand(1));
1421 __ SllWord(scratch, scratch, kSystemPointerSizeLog2);
1422 __ SubWord(start_address, start_address, scratch);
1423
1424 // Push the arguments.
1425 __ PushArray(start_address, num_args,
1427}
1428
1429// static
1431 MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1434 // ----------- S t a t e -------------
1435 // -- a0 : the number of arguments
1436 // -- a2 : the address of the first argument to be pushed. Subsequent
1437 // arguments should be consecutive above this, in the same order as
1438 // they are to be pushed onto the stack.
1439 // -- a1 : the target to call (can be any Object).
1440 // -----------------------------------
1441 Label stack_overflow;
1443 // The spread argument should not be pushed.
1444 __ SubWord(a0, a0, Operand(1));
1445 }
1446
1447 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1448 __ SubWord(a3, a0, Operand(kJSArgcReceiverSlots));
1449 } else {
1450 __ Move(a3, a0);
1451 }
1452 __ StackOverflowCheck(a3, a4, t0, &stack_overflow);
1453
1454 // This function modifies a2 and a4.
1455 GenerateInterpreterPushArgs(masm, a3, a2, a4);
1456 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1457 __ PushRoot(RootIndex::kUndefinedValue);
1458 }
1459
1461 // Pass the spread in the register a2.
1462 // a2 already points to the penultime argument, the spread
1463 // is below that.
1464 __ LoadWord(a2, MemOperand(a2, -kSystemPointerSize));
1465 }
1466
1467 // Call the target.
1469 __ TailCallBuiltin(Builtin::kCallWithSpread);
1470 } else {
1471 __ TailCallBuiltin(Builtins::Call(receiver_mode));
1472 }
1473
1474 __ bind(&stack_overflow);
1475 {
1476 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1477 // Unreachable code.
1478 __ break_(0xCC);
1479 }
1480}
1481
1482// static
1485 // ----------- S t a t e -------------
1486 // -- a0 : argument count
1487 // -- a3 : new target
1488 // -- a1 : constructor to call
1489 // -- a2 : allocation site feedback if available, undefined otherwise.
1490 // -- a4 : address of the first argument
1491 // -----------------------------------
1492 Label stack_overflow;
1493 __ StackOverflowCheck(a0, a5, t0, &stack_overflow);
1494
1496 // The spread argument should not be pushed.
1497 __ SubWord(a0, a0, Operand(1));
1498 }
1499 Register argc_without_receiver = a6;
1500 __ SubWord(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
1501 // Push the arguments, This function modifies a4 and a5.
1502 GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5);
1503
1504 // Push a slot for the receiver.
1505 __ push(zero_reg);
1506
1508 // Pass the spread in the register a2.
1509 // a4 already points to the penultimate argument, the spread
1510 // lies in the next interpreter register.
1511 __ LoadWord(a2, MemOperand(a4, -kSystemPointerSize));
1512 } else {
1513 __ AssertUndefinedOrAllocationSite(a2, t0);
1514 }
1515
1517 __ AssertFunction(a1);
1518
1519 // Tail call to the function-specific construct stub (still in the caller
1520 // context at this point).
1521 __ TailCallBuiltin(Builtin::kArrayConstructorImpl);
1522 } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1523 // Call the constructor with a0, a1, and a3 unmodified.
1524 __ TailCallBuiltin(Builtin::kConstructWithSpread);
1525 } else {
1527 // Call the constructor with a0, a1, and a3 unmodified.
1528 __ TailCallBuiltin(Builtin::kConstruct);
1529 }
1530
1531 __ bind(&stack_overflow);
1532 {
1533 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1534 // Unreachable code.
1535 __ break_(0xCC);
1536 }
1537}
1538
1539// static
1541 MacroAssembler* masm, ForwardWhichFrame which_frame) {
1542 // ----------- S t a t e -------------
1543 // -- a3 : new target
1544 // -- a1 : constructor to call
1545 // -----------------------------------
1546 Label stack_overflow;
1547
1548 // Load the frame pointer into a4.
1549 switch (which_frame) {
1551 __ Move(a4, fp);
1552 break;
1555 break;
1556 }
1557
1558 // Load the argument count into a0.
1560 __ StackOverflowCheck(a0, a5, t0, &stack_overflow);
1561
1562 // Point a4 to the base of the argument list to forward, excluding the
1563 // receiver.
1564 __ AddWord(a4, a4,
1567
1568 // Copy arguments on the stack. a5 is a scratch register.
1569 Register argc_without_receiver = a6;
1570 __ SubWord(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
1571 __ PushArray(a4, argc_without_receiver);
1572
1573 // Push a slot for the receiver.
1574 __ push(zero_reg);
1575
1576 // Call the constructor with a0, a1, and a3 unmodified.
1577 __ TailCallBuiltin(Builtin::kConstruct);
1578
1579 __ bind(&stack_overflow);
1580 {
1581 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1582 // Unreachable code.
1583 __ break_(0xCC);
1584 }
1585}
1586
1587namespace {
1588
1589void NewImplicitReceiver(MacroAssembler* masm) {
1590 // ----------- S t a t e -------------
1591 // -- a0 : the number of arguments
1592 // -- a1 : constructor to call (checked to be a JSFunction)
1593 // -- a3 : new target
1594 //
1595 // Stack:
1596 // -- Implicit Receiver
1597 // -- [arguments without receiver]
1598 // -- Implicit Receiver
1599 // -- Context
1600 // -- FastConstructMarker
1601 // -- FramePointer
1602 // -----------------------------------
1603 Register implicit_receiver = a4;
1604
1605 // Save live registers.
1606 __ SmiTag(a0);
1607 __ Push(a0, a1, a3);
1608 __ CallBuiltin(Builtin::kFastNewObject);
1609 // Save result.
1610 __ mv(implicit_receiver, a0);
1611 __ Pop(a0, a1, a3);
1612 __ SmiUntag(a0);
1613
1614 // Patch implicit receiver (in arguments)
1615 __ StoreReceiver(implicit_receiver);
1616 // Patch second implicit (in construct frame)
1617 __ StoreWord(
1618 implicit_receiver,
1620
1621 // Restore context.
1623}
1624
1625} // namespace
1626
1627// static
1628void Builtins::Generate_InterpreterPushArgsThenFastConstructFunction(
1629 MacroAssembler* masm) {
1630 // ----------- S t a t e -------------
1631 // -- a0 : argument count
1632 // -- a1 : constructor to call (checked to be a JSFunction)
1633 // -- a3 : new target
1634 // -- a4 : address of the first argument
1635 // -- cp : context pointer
1636 // -----------------------------------
1637 __ AssertFunction(a1);
1638
1639 // Check if target has a [[Construct]] internal method.
1640 Label non_constructor;
1641 __ LoadMap(a2, a1);
1642 __ Lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1643 __ And(a2, a2, Operand(Map::Bits1::IsConstructorBit::kMask));
1644 __ Branch(&non_constructor, eq, a2, Operand(zero_reg));
1645
1646 // Add a stack check before pushing arguments.
1647 Label stack_overflow;
1648 __ StackOverflowCheck(a0, a2, a5, &stack_overflow);
1649
1650 // Enter a construct frame.
1651 FrameScope scope(masm, StackFrame::MANUAL);
1652 __ EnterFrame(StackFrame::FAST_CONSTRUCT);
1653
1654 // Implicit receiver stored in the construct frame.
1655 __ LoadRoot(a2, RootIndex::kTheHoleValue);
1656 __ Push(cp, a2);
1657
1658 // Push arguments + implicit receiver.
1659 Register argc_without_receiver = a7;
1660 __ SubWord(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
1661 GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5);
1662 __ Push(a2);
1663
1664 // Check if it is a builtin call.
1665 Label builtin_call;
1666 __ LoadTaggedField(
1667 a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1668 __ Load32U(a2, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
1669 __ And(a5, a2, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
1670 __ Branch(&builtin_call, ne, a5, Operand(zero_reg));
1671
1672 // Check if we need to create an implicit receiver.
1673 Label not_create_implicit_receiver;
1674 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(a2);
1675 __ JumpIfIsInRange(
1676 a2, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
1677 static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
1678 &not_create_implicit_receiver);
1679 NewImplicitReceiver(masm);
1680 __ bind(&not_create_implicit_receiver);
1681
1682 // Call the function.
1683 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
1684
1685 // ----------- S t a t e -------------
1686 // -- x0 constructor result
1687 //
1688 // Stack:
1689 // -- Implicit Receiver
1690 // -- Context
1691 // -- FastConstructMarker
1692 // -- FramePointer
1693 // -----------------------------------
1694
1695 // Store offset of return address for deoptimizer.
1696 masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
1697 masm->pc_offset());
1698
1699 // If the result is an object (in the ECMA sense), we should get rid
1700 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
1701 // on page 74.
1702 Label use_receiver, do_throw, leave_and_return, check_receiver;
1703
1704 // If the result is undefined, we jump out to using the implicit receiver.
1705 __ JumpIfNotRoot(a0, RootIndex::kUndefinedValue, &check_receiver);
1706 // Throw away the result of the constructor invocation and use the
1707 // on-stack receiver as the result.
1708 __ bind(&use_receiver);
1709 __ LoadWord(
1711 __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw);
1712
1713 __ bind(&leave_and_return);
1714 // Leave construct frame.
1715 __ LeaveFrame(StackFrame::FAST_CONSTRUCT);
1716 __ Ret();
1717
1718 // Otherwise we do a smi check and fall through to check if the return value
1719 // is a valid receiver.
1720 __ bind(&check_receiver);
1721
1722 // If the result is a smi, it is *not* an object in the ECMA sense.
1723 __ JumpIfSmi(a0, &use_receiver);
1724
1725 // Check if the type of the result is not an object in the ECMA sense.
1726 __ JumpIfJSAnyIsNotPrimitive(a0, a4, &leave_and_return);
1727 __ Branch(&use_receiver);
1728
1729 __ bind(&builtin_call);
1730 // TODO(victorgomes): Check the possibility to turn this into a tailcall.
1731 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
1732 __ LeaveFrame(StackFrame::FAST_CONSTRUCT);
1733 __ Ret();
1734
1735 __ bind(&do_throw);
1736 // Restore the context from the frame.
1738 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
1739 // Unreachable code.
1740 __ Trap();
1741
1742 __ bind(&stack_overflow);
1743 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1744 // Unreachable code.
1745 __ Trap();
1746
1747 // Called Construct on an Object that doesn't have a [[Construct]] internal
1748 // method.
1749 __ bind(&non_constructor);
1750 __ TailCallBuiltin(Builtin::kConstructedNonConstructable);
1751}
1752
1754 // Set the return address to the correct point in the interpreter entry
1755 // trampoline.
1756 Label builtin_trampoline, trampoline_loaded;
1757 Tagged<Smi> interpreter_entry_return_pc_offset(
1758 masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1759 DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1760
1761 // If the SFI function_data is an InterpreterData, the function will have a
1762 // custom copy of the interpreter entry trampoline for profiling. If so,
1763 // get the custom trampoline, otherwise grab the entry address of the global
1764 // trampoline.
1766 __ LoadTaggedField(
1767 t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
1768 __ LoadTrustedPointerField(
1769 t0, FieldMemOperand(t0, SharedFunctionInfo::kTrustedFunctionDataOffset),
1771 __ GetObjectType(t0, kInterpreterDispatchTableRegister,
1773 __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
1774 Operand(INTERPRETER_DATA_TYPE), Label::Distance::kNear);
1775
1776 __ LoadProtectedPointerField(
1777 t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
1778 __ LoadCodeInstructionStart(t0, t0, kJSEntrypointTag);
1779 __ BranchShort(&trampoline_loaded);
1780
1781 __ bind(&builtin_trampoline);
1782 __ li(t0, ExternalReference::
1783 address_of_interpreter_entry_trampoline_instruction_start(
1784 masm->isolate()));
1785 __ LoadWord(t0, MemOperand(t0));
1786
1787 __ bind(&trampoline_loaded);
1788 __ AddWord(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
1789
1790 // Initialize the dispatch table register.
1792 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1793
1794 // Get the bytecode array pointer from the frame.
1797
1798 if (v8_flags.debug_code) {
1799 // Check function data field is actually a BytecodeArray object.
1801 __ Assert(ne,
1802 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1803 kScratchReg, Operand(zero_reg));
1804 __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
1805 __ Assert(eq,
1806 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1807 a1, Operand(BYTECODE_ARRAY_TYPE));
1808 }
1809
1810 // Get the target bytecode offset from the frame.
1813
1814 if (v8_flags.debug_code) {
1815 Label okay;
1819 // Unreachable code.
1820 __ break_(0xCC);
1821 __ bind(&okay);
1822 }
1823
1824 // Dispatch to the target bytecode.
1827 __ Lbu(a7, MemOperand(a1));
1828 __ CalcScaledAddress(a1, kInterpreterDispatchTableRegister, a7,
1832}
1833
1834void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1835 // Advance the current bytecode offset stored within the given interpreter
1836 // stack frame. This simulates what all bytecode handlers do upon completion
1837 // of the underlying operation.
1843
1844 Label enter_bytecode, function_entry_bytecode;
1845 __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
1848
1849 // Load the current bytecode.
1852 __ Lbu(a1, MemOperand(a1));
1853
1854 // Advance to the next bytecode.
1855 Label if_return;
1858 a4, &if_return);
1859
1860 __ bind(&enter_bytecode);
1861 // Convert new bytecode offset to a Smi and save in the stackframe.
1863 __ StoreWord(
1865
1867
1868 __ bind(&function_entry_bytecode);
1869 // If the code deoptimizes during the implicit function entry stack interrupt
1870 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1871 // not a valid bytecode offset. Detect this case and advance to the first
1872 // actual bytecode.
1875 __ Branch(&enter_bytecode);
1876
1877 // We should never take the if_return path.
1878 __ bind(&if_return);
1879 __ Abort(AbortReason::kInvalidBytecodeAdvance);
1880}
1881
1882void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1884}
1885
1886namespace {
1887void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1888 bool javascript_builtin,
1889 bool with_result) {
1890 const RegisterConfiguration* config(RegisterConfiguration::Default());
1891 int allocatable_register_count = config->num_allocatable_general_registers();
1892 UseScratchRegisterScope temp(masm);
1893 Register scratch = temp.Acquire();
1894 if (with_result) {
1895 if (javascript_builtin) {
1896 __ Move(scratch, a0);
1897 } else {
1898 // Overwrite the hole inserted by the deoptimizer with the return value
1899 // from the LAZY deopt point.
1900 __ StoreWord(
1901 a0, MemOperand(
1902 sp, config->num_allocatable_general_registers() *
1905 }
1906 }
1907 for (int i = allocatable_register_count - 1; i >= 0; --i) {
1908 int code = config->GetAllocatableGeneralCode(i);
1909 __ Pop(Register::from_code(code));
1910 if (javascript_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1912 }
1913 }
1914
1915 if (with_result && javascript_builtin) {
1916 // Overwrite the hole inserted by the deoptimizer with the return value from
1917 // the LAZY deopt point. t0 contains the arguments count, the return value
1918 // from LAZY is always the last argument.
1919 constexpr int return_value_offset =
1922 __ AddWord(a0, a0, Operand(return_value_offset));
1923 __ CalcScaledAddress(t0, sp, a0, kSystemPointerSizeLog2);
1924 __ StoreWord(scratch, MemOperand(t0));
1925 // Recover arguments count.
1926 __ SubWord(a0, a0, Operand(return_value_offset));
1927 }
1928
1929 __ LoadWord(
1930 fp,
1932 // Load builtin index (stored as a Smi) and use it to get the builtin start
1933 // address from the builtins table.
1934 __ Pop(t6);
1935 __ AddWord(sp, sp,
1937 __ Pop(ra);
1938 __ LoadEntryFromBuiltinIndex(t6, t6);
1939 __ Jump(t6);
1940}
1941} // namespace
1942
1943void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1944 Generate_ContinueToBuiltinHelper(masm, false, false);
1945}
1946
1947void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1948 MacroAssembler* masm) {
1949 Generate_ContinueToBuiltinHelper(masm, false, true);
1950}
1951
1952void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1953 Generate_ContinueToBuiltinHelper(masm, true, false);
1954}
1955
1956void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1957 MacroAssembler* masm) {
1958 Generate_ContinueToBuiltinHelper(masm, true, true);
1959}
1960
1961void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1962 {
1963 FrameScope scope(masm, StackFrame::INTERNAL);
1964 __ CallRuntime(Runtime::kNotifyDeoptimized);
1965 }
1966
1968 __ LoadWord(a0, MemOperand(sp, 0 * kSystemPointerSize));
1969 __ AddWord(sp, sp, Operand(1 * kSystemPointerSize)); // Remove state.
1970 __ Ret();
1971}
1972
1973namespace {
1974
1975void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
1976 Operand offset = Operand(0)) {
1977 __ AddWord(ra, entry_address, offset);
1978 // And "return" to the OSR entry point of the function.
1979 __ Ret();
1980}
1981
1982enum class OsrSourceTier {
1983 kInterpreter,
1984 kBaseline,
1985};
1986
1987void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
1988 Register maybe_target_code,
1989 Register expected_param_count) {
1990 Label jump_to_optimized_code;
1991 {
1992 // If maybe_target_code is not null, no need to call into runtime. A
1993 // precondition here is: if maybe_target_code is an InstructionStream
1994 // object, it must NOT be marked_for_deoptimization (callers must ensure
1995 // this).
1996 __ CompareTaggedAndBranch(&jump_to_optimized_code, ne, maybe_target_code,
1997 Operand(Smi::zero()));
1998 }
1999 ASM_CODE_COMMENT(masm);
2000 {
2001 FrameScope scope(masm, StackFrame::INTERNAL);
2002 __ Push(expected_param_count);
2003 __ CallRuntime(Runtime::kCompileOptimizedOSR);
2004 __ Pop(expected_param_count);
2005 }
2006
2007 // If the code object is null, just return to the caller.
2008 // If the code object is null, just return to the caller.
2009 __ CompareTaggedAndBranch(&jump_to_optimized_code, ne, maybe_target_code,
2010 Operand(Smi::zero()));
2011 __ Ret();
2012 DCHECK_EQ(maybe_target_code, a0); // Already in the right spot.
2013
2014 __ bind(&jump_to_optimized_code);
2015
2016 const Register scratch(a2);
2017 CHECK(!AreAliased(maybe_target_code, expected_param_count, scratch));
2018 // OSR entry tracing.
2019 {
2020 Label next;
2021 __ li(scratch, ExternalReference::address_of_log_or_trace_osr());
2022 __ Lbu(scratch, MemOperand(scratch));
2023 __ Branch(&next, eq, scratch, Operand(zero_reg));
2024
2025 {
2026 FrameScope scope(masm, StackFrame::INTERNAL);
2027 __ Push(maybe_target_code, expected_param_count);
2028 __ CallRuntime(Runtime::kLogOrTraceOptimizedOSREntry, 0);
2029 __ Pop(maybe_target_code, expected_param_count);
2030 }
2031
2032 __ bind(&next);
2033 }
2034
2035 if (source == OsrSourceTier::kInterpreter) {
2036 // Drop the handler frame that is be sitting on top of the actual
2037 // JavaScript frame. This is the case then OSR is triggered from bytecode.
2038 __ LeaveFrame(StackFrame::STUB);
2039 }
2040
2041 // Check we are actually jumping to an OSR code object. This among other
2042 // things ensures that the object contains deoptimization data below.
2043 __ Load32U(scratch,
2044 FieldMemOperand(maybe_target_code, Code::kOsrOffsetOffset));
2045 __ SbxCheck(ne, AbortReason::kExpectedOsrCode, scratch,
2046 Operand(BytecodeOffset::None().ToInt()));
2047
2048 // Check the target has a matching parameter count. This ensures that the OSR
2049 // code will correctly tear down our frame when leaving.
2050 __ Lhu(scratch,
2051 FieldMemOperand(maybe_target_code, Code::kParameterCountOffset));
2052 __ SmiUntag(expected_param_count);
2053 __ SbxCheck(eq, AbortReason::kOsrUnexpectedStackSize, scratch,
2054 Operand(expected_param_count));
2055
2056 // Load deoptimization data from the code object.
2057 // <deopt_data> = <code>[#deoptimization_data_offset]
2058 __ LoadProtectedPointerField(
2059 scratch,
2060 FieldMemOperand(maybe_target_code,
2061 Code::kDeoptimizationDataOrInterpreterDataOffset));
2062
2063 // Load the OSR entrypoint offset from the deoptimization data.
2064 // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
2065 __ SmiUntagField(
2066 scratch,
2069
2070 __ LoadCodeInstructionStart(maybe_target_code, maybe_target_code,
2072
2073 // Compute the target address = code_entry + osr_offset
2074 // <entry_addr> = <code_entry> + <osr_offset>
2075 Generate_OSREntry(masm, maybe_target_code, Operand(scratch));
2076}
2077} // namespace
2078
2079void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
2080 using D = OnStackReplacementDescriptor;
2081 static_assert(D::kParameterCount == 2);
2082 OnStackReplacement(masm, OsrSourceTier::kInterpreter,
2083 D::MaybeTargetCodeRegister(),
2084 D::ExpectedParameterCountRegister());
2085}
2086
2087void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
2088 using D = OnStackReplacementDescriptor;
2089 static_assert(D::kParameterCount == 2);
2090
2091 __ LoadWord(kContextRegister,
2093 OnStackReplacement(masm, OsrSourceTier::kBaseline,
2094 D::MaybeTargetCodeRegister(),
2095 D::ExpectedParameterCountRegister());
2096}
2097
2098#ifdef V8_ENABLE_MAGLEV
2099
2100void Builtins::Generate_MaglevFunctionEntryStackCheck(MacroAssembler* masm,
2101 bool save_new_target) {
2102 // Input (a0): Stack size (Smi).
2103 // This builtin can be invoked just after Maglev's prologue.
2104 // All registers are available, except (possibly) new.target.
2105 Register stack_size = kCArgRegs[0];
2106 ASM_CODE_COMMENT(masm);
2107 {
2108 FrameScope scope(masm, StackFrame::INTERNAL);
2109 __ AssertSmi(stack_size);
2110 if (save_new_target) {
2112 __ AssertSmiOrHeapObjectInMainCompressionCage(
2114 }
2116 }
2117 __ Push(stack_size);
2118 __ CallRuntime(Runtime::kStackGuardWithGap, 1);
2119 if (save_new_target) {
2121 }
2122 }
2123 __ Ret();
2124}
2125
2126#endif // V8_ENABLE_MAGLEV
2127
2128// static
2129void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
2130 // ----------- S t a t e -------------
2131 // -- a0 : argc
2132 // -- sp[0] : receiver
2133 // -- sp[4] : thisArg
2134 // -- sp[8] : argArray
2135 // -----------------------------------
2136
2137 Register argc = a0;
2138 Register arg_array = a2;
2139 Register receiver = a1;
2140 Register this_arg = a5;
2141 Register undefined_value = a3;
2142 Register scratch = a4;
2143
2144 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2145
2146 // 1. Load receiver into a1, argArray into a2 (if present), remove all
2147 // arguments from the stack (including the receiver), and push thisArg (if
2148 // present) instead.
2149 {
2150 // Claim (2 - argc) dummy arguments form the stack, to put the stack in a
2151 // consistent state for a simple pop operation.
2152 __ LoadWord(this_arg, MemOperand(sp, kSystemPointerSize));
2153 __ LoadWord(arg_array, MemOperand(sp, 2 * kSystemPointerSize));
2154 __ SubWord(scratch, argc, JSParameterCount(0));
2155 if (CpuFeatures::IsSupported(ZICOND)) {
2156 __ MoveIfZero(arg_array, undefined_value, scratch); // if argc == 0
2157 __ MoveIfZero(this_arg, undefined_value, scratch); // if argc == 0
2158 __ SubWord(scratch, scratch, Operand(1));
2159 __ MoveIfZero(arg_array, undefined_value, scratch); // if argc == 1
2160 } else {
2161 Label done0, done1;
2162 __ Branch(&done0, ne, scratch, Operand(zero_reg), Label::Distance::kNear);
2163 __ Move(arg_array, undefined_value); // if argc == 0
2164 __ Move(this_arg, undefined_value); // if argc == 0
2165 __ bind(&done0); // else (i.e., argc > 0)
2166
2167 __ Branch(&done1, ne, scratch, Operand(1), Label::Distance::kNear);
2168 __ Move(arg_array, undefined_value); // if argc == 1
2169 __ bind(&done1); // else (i.e., argc > 1)
2170 }
2171 __ LoadWord(receiver, MemOperand(sp));
2172 __ DropArgumentsAndPushNewReceiver(argc, this_arg);
2173 }
2174
2175 // ----------- S t a t e -------------
2176 // -- a2 : argArray
2177 // -- a1 : receiver
2178 // -- a3 : undefined root value
2179 // -- sp[0] : thisArg
2180 // -----------------------------------
2181
2182 // 2. We don't need to check explicitly for callable receiver here,
2183 // since that's the first thing the Call/CallWithArrayLike builtins
2184 // will do.
2185
2186 // 3. Tail call with no arguments if argArray is null or undefined.
2187 Label no_arguments;
2188 __ LoadRoot(scratch, RootIndex::kNullValue);
2189 __ CompareTaggedAndBranch(&no_arguments, eq, arg_array, Operand(scratch));
2190 __ CompareTaggedAndBranch(&no_arguments, eq, arg_array,
2191 Operand(undefined_value));
2192
2193 // 4a. Apply the receiver to the given argArray.
2194 __ TailCallBuiltin(Builtin::kCallWithArrayLike);
2195
2196 // 4b. The argArray is either null or undefined, so we tail call without any
2197 // arguments to the receiver.
2198 __ bind(&no_arguments);
2199 {
2200 __ li(a0, JSParameterCount(0));
2201 DCHECK(receiver == a1);
2202 __ TailCallBuiltin(Builtins::Call());
2203 }
2204}
2205
2206// static
2207void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
2208 // 1. Get the callable to call (passed as receiver) from the stack.
2209 { __ Pop(a1); }
2210
2211 // 2. Make sure we have at least one argument.
2212 // a0: actual number of arguments
2213 {
2214 Label done;
2215 __ Branch(&done, ne, a0, Operand(JSParameterCount(0)),
2217 __ PushRoot(RootIndex::kUndefinedValue);
2218 __ AddWord(a0, a0, Operand(1));
2219 __ bind(&done);
2220 }
2221
2222 // 3. Adjust the actual number of arguments.
2223 __ AddWord(a0, a0, -1);
2224
2225 // 4. Call the callable.
2226 __ TailCallBuiltin(Builtins::Call());
2227}
2228
2229void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
2230 // ----------- S t a t e -------------
2231 // -- a0 : argc
2232 // -- sp[0] : receiver
2233 // -- sp[8] : target (if argc >= 1)
2234 // -- sp[16] : thisArgument (if argc >= 2)
2235 // -- sp[24] : argumentsList (if argc == 3)
2236 // -----------------------------------
2237
2238 Register argc = a0;
2239 Register arguments_list = a2;
2240 Register target = a1;
2241 Register this_argument = a5;
2242 Register undefined_value = a3;
2243
2244 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2245
2246 // 1. Load target into a1 (if present), argumentsList into a2 (if present),
2247 // remove all arguments from the stack (including the receiver), and push
2248 // thisArgument (if present) instead.
2249 {
2250 // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
2251 // consistent state for a simple pop operation.
2252
2253 __ LoadWord(target, MemOperand(sp, kSystemPointerSize));
2254 __ LoadWord(this_argument, MemOperand(sp, 2 * kSystemPointerSize));
2255 __ LoadWord(arguments_list, MemOperand(sp, 3 * kSystemPointerSize));
2256
2257 UseScratchRegisterScope temps(masm);
2258 Register scratch = temps.Acquire();
2259 __ SubWord(scratch, argc, Operand(JSParameterCount(0)));
2260 if (CpuFeatures::IsSupported(ZICOND)) {
2261 __ MoveIfZero(arguments_list, undefined_value, scratch); // if argc == 0
2262 __ MoveIfZero(this_argument, undefined_value, scratch); // if argc == 0
2263 __ MoveIfZero(target, undefined_value, scratch); // if argc == 0
2264 __ SubWord(scratch, scratch, Operand(1));
2265 __ MoveIfZero(arguments_list, undefined_value, scratch); // if argc == 1
2266 __ MoveIfZero(this_argument, undefined_value, scratch); // if argc == 1
2267 __ SubWord(scratch, scratch, Operand(1));
2268 __ MoveIfZero(arguments_list, undefined_value, scratch); // if argc == 2
2269 } else {
2270 Label done0, done1, done2;
2271 __ Branch(&done0, ne, scratch, Operand(zero_reg), Label::Distance::kNear);
2272 __ Move(arguments_list, undefined_value); // if argc == 0
2273 __ Move(this_argument, undefined_value); // if argc == 0
2274 __ Move(target, undefined_value); // if argc == 0
2275 __ bind(&done0); // argc != 0
2276
2277 __ Branch(&done1, ne, scratch, Operand(1), Label::Distance::kNear);
2278 __ Move(arguments_list, undefined_value); // if argc == 1
2279 __ Move(this_argument, undefined_value); // if argc == 1
2280 __ bind(&done1); // argc > 1
2281
2282 __ Branch(&done2, ne, scratch, Operand(2), Label::Distance::kNear);
2283 __ Move(arguments_list, undefined_value); // if argc == 2
2284 __ bind(&done2); // argc > 2
2285 }
2286
2287 __ DropArgumentsAndPushNewReceiver(argc, this_argument);
2288 }
2289
2290 // ----------- S t a t e -------------
2291 // -- a2 : argumentsList
2292 // -- a1 : target
2293 // -- a3 : undefined root value
2294 // -- sp[0] : thisArgument
2295 // -----------------------------------
2296
2297 // 2. We don't need to check explicitly for callable target here,
2298 // since that's the first thing the Call/CallWithArrayLike builtins
2299 // will do.
2300
2301 // 3. Apply the target to the given argumentsList.
2302 __ TailCallBuiltin(Builtin::kCallWithArrayLike);
2303}
2304
2305void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
2306 // ----------- S t a t e -------------
2307 // -- a0 : argc
2308 // -- sp[0] : receiver
2309 // -- sp[8] : target
2310 // -- sp[16] : argumentsList
2311 // -- sp[24] : new.target (optional)
2312 // -----------------------------------
2313 Register argc = a0;
2314 Register arguments_list = a2;
2315 Register target = a1;
2316 Register new_target = a3;
2317 Register undefined_value = a4;
2318
2319 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2320
2321 // 1. Load target into a1 (if present), argumentsList into a2 (if present),
2322 // new.target into a3 (if present, otherwise use target), remove all
2323 // arguments from the stack (including the receiver), and push thisArgument
2324 // (if present) instead.
2325 {
2326 // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
2327 // consistent state for a simple pop operation.
2328 __ LoadWord(target, MemOperand(sp, kSystemPointerSize));
2329 __ LoadWord(arguments_list, MemOperand(sp, 2 * kSystemPointerSize));
2330 __ LoadWord(new_target, MemOperand(sp, 3 * kSystemPointerSize));
2331
2332 UseScratchRegisterScope temps(masm);
2333 Register scratch = temps.Acquire();
2334 __ SubWord(scratch, argc, Operand(JSParameterCount(0)));
2335 if (CpuFeatures::IsSupported(ZICOND)) {
2336 __ MoveIfZero(arguments_list, undefined_value, scratch); // if argc == 0
2337 __ MoveIfZero(new_target, undefined_value, scratch); // if argc == 0
2338 __ MoveIfZero(target, undefined_value, scratch); // if argc == 0
2339 __ SubWord(scratch, scratch, Operand(1));
2340 __ MoveIfZero(arguments_list, undefined_value, scratch); // if argc == 1
2341 __ MoveIfZero(new_target, target, scratch); // if argc == 1
2342 __ SubWord(scratch, scratch, Operand(1));
2343 __ MoveIfZero(new_target, target, scratch); // if argc == 2
2344 } else {
2345 Label done0, done1, done2;
2346 __ Branch(&done0, ne, scratch, Operand(zero_reg), Label::Distance::kNear);
2347 __ Move(arguments_list, undefined_value); // if argc == 0
2348 __ Move(new_target, undefined_value); // if argc == 0
2349 __ Move(target, undefined_value); // if argc == 0
2350 __ bind(&done0);
2351
2352 __ Branch(&done1, ne, scratch, Operand(1), Label::Distance::kNear);
2353 __ Move(arguments_list, undefined_value); // if argc == 1
2354 __ Move(new_target, target); // if argc == 1
2355 __ bind(&done1);
2356
2357 __ Branch(&done2, ne, scratch, Operand(2), Label::Distance::kNear);
2358 __ Move(new_target, target); // if argc == 2
2359 __ bind(&done2);
2360 }
2361
2362 __ DropArgumentsAndPushNewReceiver(argc, undefined_value);
2363 }
2364
2365 // ----------- S t a t e -------------
2366 // -- a2 : argumentsList
2367 // -- a1 : target
2368 // -- a3 : new.target
2369 // -- sp[0] : receiver (undefined)
2370 // -----------------------------------
2371
2372 // 2. We don't need to check explicitly for constructor target here,
2373 // since that's the first thing the Construct/ConstructWithArrayLike
2374 // builtins will do.
2375
2376 // 3. We don't need to check explicitly for constructor new.target here,
2377 // since that's the second thing the Construct/ConstructWithArrayLike
2378 // builtins will do.
2379
2380 // 4. Construct the target with the given new.target and argumentsList.
2381 __ TailCallBuiltin(Builtin::kConstructWithArrayLike);
2382}
2383
2384namespace {
2385
2386// Allocate new stack space for |count| arguments and shift all existing
2387// arguments already on the stack. |pointer_to_new_space_out| points to the
2388// first free slot on the stack to copy additional arguments to and
2389// |argc_in_out| is updated to include |count|.
2390void Generate_AllocateSpaceAndShiftExistingArguments(
2391 MacroAssembler* masm, Register count, Register argc_in_out,
2392 Register pointer_to_new_space_out) {
2393 UseScratchRegisterScope temps(masm);
2394 Register scratch1 = temps.Acquire();
2395 Register scratch2 = temps.Acquire();
2396 Register scratch3 = temps.Acquire();
2397 DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
2398 scratch2));
2399 Register old_sp = scratch1;
2400 Register new_space = scratch2;
2401 __ mv(old_sp, sp);
2402 __ slli(new_space, count, kSystemPointerSizeLog2);
2403 __ SubWord(sp, sp, Operand(new_space));
2404
2405 Register end = scratch2;
2406 Register value = scratch3;
2407 Register dest = pointer_to_new_space_out;
2408 __ mv(dest, sp);
2409 __ CalcScaledAddress(end, old_sp, argc_in_out, kSystemPointerSizeLog2);
2410 Label loop, done;
2411 __ Branch(&done, ge, old_sp, Operand(end));
2412 __ bind(&loop);
2413 __ LoadWord(value, MemOperand(old_sp, 0));
2414 __ StoreWord(value, MemOperand(dest, 0));
2415 __ AddWord(old_sp, old_sp, Operand(kSystemPointerSize));
2416 __ AddWord(dest, dest, Operand(kSystemPointerSize));
2417 __ Branch(&loop, lt, old_sp, Operand(end));
2418 __ bind(&done);
2419
2420 // Update total number of arguments.
2421 __ AddWord(argc_in_out, argc_in_out, count);
2422}
2423
2424} // namespace
2425
2426// static
2428 Builtin target_builtin) {
2429 UseScratchRegisterScope temps(masm);
2430 temps.Include(t1, t0);
2431 // ----------- S t a t e -------------
2432 // -- a1 : target
2433 // -- a0 : number of parameters on the stack
2434 // -- a2 : arguments list (a FixedArray)
2435 // -- a4 : len (number of elements to push from args)
2436 // -- a3 : new.target (for [[Construct]])
2437 // -----------------------------------
2438 if (v8_flags.debug_code) {
2439 // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
2440 Label ok, fail;
2441 __ AssertNotSmi(a2);
2442 __ GetObjectType(a2, kScratchReg, kScratchReg);
2443 __ Branch(&ok, eq, kScratchReg, Operand(FIXED_ARRAY_TYPE),
2445 __ Branch(&fail, ne, kScratchReg, Operand(FIXED_DOUBLE_ARRAY_TYPE),
2447 __ Branch(&ok, eq, a4, Operand(zero_reg), Label::Distance::kNear);
2448 // Fall through.
2449 __ bind(&fail);
2450 __ Abort(AbortReason::kOperandIsNotAFixedArray);
2451
2452 __ bind(&ok);
2453 }
2454
2455 Register args = a2;
2456 Register len = a4;
2457
2458 // Check for stack overflow.
2459 Label stack_overflow;
2460 __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow);
2461
2462 // Move the arguments already in the stack,
2463 // including the receiver and the return address.
2464 // a4: Number of arguments to make room for.
2465 // a0: Number of arguments already on the stack.
2466 // a7: Points to first free slot on the stack after arguments were shifted.
2467 Generate_AllocateSpaceAndShiftExistingArguments(masm, a4, a0, a7);
2468
2469 // Push arguments onto the stack (thisArgument is already on the stack).
2470 {
2471 Label done, push, loop;
2472 Register src = a6;
2473 Register scratch = len;
2474 UseScratchRegisterScope temps(masm);
2476 __ Branch(&done, eq, len, Operand(zero_reg), Label::Distance::kNear);
2477 __ SllWord(scratch, len, kTaggedSizeLog2);
2478 __ SubWord(scratch, sp, Operand(scratch));
2479#if !V8_STATIC_ROOTS_BOOL
2480 // We do not use the Branch(reg, RootIndex) macro without static roots,
2481 // as it would do a LoadRoot behind the scenes and we want to avoid that
2482 // in a loop.
2483 Register hole_value = temps.Acquire();
2484 __ LoadTaggedRoot(hole_value, RootIndex::kTheHoleValue);
2485#endif // !V8_STATIC_ROOTS_BOOL
2486 __ bind(&loop);
2487 __ LoadTaggedField(a5, MemOperand(src));
2488 __ AddWord(src, src, kTaggedSize);
2489#if V8_STATIC_ROOTS_BOOL
2490 __ CompareRootAndBranch(a5, RootIndex::kTheHoleValue, ne, &push);
2491#else
2492 __ CompareTaggedAndBranch(&push, ne, a5, Operand(hole_value));
2493#endif
2494 __ LoadRoot(a5, RootIndex::kUndefinedValue);
2495 __ bind(&push);
2496 __ StoreWord(a5, MemOperand(a7, 0));
2497 __ AddWord(a7, a7, Operand(kSystemPointerSize));
2498 __ AddWord(scratch, scratch, Operand(kTaggedSize));
2499 __ Branch(&loop, ne, scratch, Operand(sp));
2500 __ bind(&done);
2501 }
2502
2503 // Tail-call to the actual Call or Construct builtin.
2504 __ TailCallBuiltin(target_builtin);
2505
2506 __ bind(&stack_overflow);
2507 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2508}
2509
2510// static
2513 Builtin target_builtin) {
2514 // ----------- S t a t e -------------
2515 // -- a0 : the number of arguments
2516 // -- a3 : the new.target (for [[Construct]] calls)
2517 // -- a1 : the target to call (can be any Object)
2518 // -- a2 : start index (to support rest parameters)
2519 // -----------------------------------
2520 UseScratchRegisterScope temps(masm);
2521 temps.Include(t0, t1);
2522 temps.Include(t2);
2523 // Check if new.target has a [[Construct]] internal method.
2524 if (mode == CallOrConstructMode::kConstruct) {
2525 Label new_target_constructor, new_target_not_constructor;
2526 UseScratchRegisterScope temps(masm);
2527 Register scratch = temps.Acquire();
2528 __ JumpIfSmi(a3, &new_target_not_constructor);
2529 __ LoadTaggedField(scratch, FieldMemOperand(a3, HeapObject::kMapOffset));
2530 __ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
2531 __ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
2532 __ Branch(&new_target_constructor, ne, scratch, Operand(zero_reg),
2534 __ bind(&new_target_not_constructor);
2535 {
2536 FrameScope scope(masm, StackFrame::MANUAL);
2537 __ EnterFrame(StackFrame::INTERNAL);
2538 __ Push(a3);
2539 __ CallRuntime(Runtime::kThrowNotConstructor);
2540 }
2541 __ bind(&new_target_constructor);
2542 }
2543
2544 __ Move(a6, fp);
2546
2547 Label stack_done, stack_overflow;
2548 __ SubWord(a7, a7, Operand(kJSArgcReceiverSlots));
2549 __ SubWord(a7, a7, a2);
2550 __ Branch(&stack_done, le, a7, Operand(zero_reg));
2551 {
2552 // Check for stack overflow.
2553 __ StackOverflowCheck(a7, a4, a5, &stack_overflow);
2554
2555 // Forward the arguments from the caller frame.
2556
2557 // Point to the first argument to copy (skipping the receiver).
2558 __ AddWord(a6, a6,
2561 __ CalcScaledAddress(a6, a6, a2, kSystemPointerSizeLog2);
2562
2563 // Move the arguments already in the stack,
2564 // including the receiver and the return address.
2565 // a7: Number of arguments to make room for.
2566 // a0: Number of arguments already on the stack.
2567 // a2: Points to first free slot on the stack after arguments were shifted.
2568 Generate_AllocateSpaceAndShiftExistingArguments(masm, a7, a0, a2);
2569
2570 // Copy arguments from the caller frame.
2571 // TODO(victorgomes): Consider using forward order as potentially more cache
2572 // friendly.
2573 {
2574 Label loop;
2575 __ bind(&loop);
2576 {
2577 UseScratchRegisterScope temps(masm);
2578 Register scratch = temps.Acquire(), addr = temps.Acquire();
2579 __ Sub32(a7, a7, Operand(1));
2580 __ CalcScaledAddress(addr, a6, a7, kSystemPointerSizeLog2);
2581 __ LoadWord(scratch, MemOperand(addr));
2582 __ CalcScaledAddress(addr, a2, a7, kSystemPointerSizeLog2);
2583 __ StoreWord(scratch, MemOperand(addr));
2584 __ Branch(&loop, ne, a7, Operand(zero_reg));
2585 }
2586 }
2587 }
2588 __ bind(&stack_done);
2589 // Tail-call to the actual Call or Construct builtin.
2590 __ TailCallBuiltin(target_builtin);
2591
2592 __ bind(&stack_overflow);
2593 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2594}
2595
2596// static
2598 ConvertReceiverMode mode) {
2599 // ----------- S t a t e -------------
2600 // -- a0 : the number of arguments
2601 // -- a1 : the function to call (checked to be a JSFunction)
2602 // -----------------------------------
2603 __ AssertFunction(a1);
2604
2605 __ LoadTaggedField(
2606 a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2607
2608 // Enter the context of the function; ToObject has to run in the function
2609 // context, and we also need to take the global proxy from the function
2610 // context in case of conversion.
2611 __ LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2612 // We need to convert the receiver for non-native sloppy mode functions.
2613 Label done_convert;
2614 __ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
2615 __ And(kScratchReg, a3,
2616 Operand(SharedFunctionInfo::IsNativeBit::kMask |
2617 SharedFunctionInfo::IsStrictBit::kMask));
2618 __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
2619 {
2620 // ----------- S t a t e -------------
2621 // -- a0 : the number of arguments
2622 // -- a1 : the function to call (checked to be a JSFunction)
2623 // -- a2 : the shared function info.
2624 // -- cp : the function context.
2625 // -----------------------------------
2626
2628 // Patch receiver to global proxy.
2629 __ LoadGlobalProxy(a3);
2630 } else {
2631 Label convert_to_object, convert_receiver;
2632 __ LoadReceiver(a3);
2633 __ JumpIfSmi(a3, &convert_to_object);
2634 __ JumpIfJSAnyIsNotPrimitive(a3, a4, &done_convert);
2636 Label convert_global_proxy;
2637 __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
2638 __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
2639 __ bind(&convert_global_proxy);
2640 {
2641 // Patch receiver to global proxy.
2642 __ LoadGlobalProxy(a3);
2643 }
2644 __ Branch(&convert_receiver);
2645 }
2646 __ bind(&convert_to_object);
2647 {
2648 // Convert receiver using ToObject.
2649 // TODO(bmeurer): Inline the allocation here to avoid building the frame
2650 // in the fast case? (fall back to AllocateInNewSpace?)
2651 FrameScope scope(masm, StackFrame::INTERNAL);
2652 __ SmiTag(a0);
2653 __ Push(a0, a1);
2654 __ Move(a0, a3);
2655 __ Push(cp);
2656 __ CallBuiltin(Builtin::kToObject);
2657 __ Pop(cp);
2658 __ Move(a3, a0);
2659 __ Pop(a0, a1);
2660 __ SmiUntag(a0);
2661 }
2662 __ LoadTaggedField(
2663 a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2664 __ bind(&convert_receiver);
2665 }
2666 __ StoreReceiver(a3);
2667 }
2668 __ bind(&done_convert);
2669
2670 // ----------- S t a t e -------------
2671 // -- a0 : the number of arguments
2672 // -- a1 : the function to call (checked to be a JSFunction)
2673 // -- a2 : the shared function info.
2674 // -- cp : the function context.
2675 // -----------------------------------
2676#if defined(V8_ENABLE_LEAPTIERING) && defined(V8_TARGET_ARCH_RISCV64)
2677 __ InvokeFunctionCode(a1, no_reg, a0, InvokeType::kJump);
2678#else
2679 __ Lhu(a2,
2680 FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
2681 __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
2682#endif
2683}
2684
2685namespace {
2686
2687void Generate_PushBoundArguments(MacroAssembler* masm) {
2688 // ----------- S t a t e -------------
2689 // -- a0 : the number of arguments
2690 // -- a1 : target (checked to be a JSBoundFunction)
2691 // -- a3 : new.target (only in case of [[Construct]])
2692 // -----------------------------------
2693 UseScratchRegisterScope temps(masm);
2694 temps.Include(t0, t1);
2695 Register bound_argc = a4;
2696 Register bound_argv = a2;
2697 // Load [[BoundArguments]] into a2 and length of that into a4.
2698 Label no_bound_arguments;
2699 __ LoadTaggedField(
2700 bound_argv, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2701 __ SmiUntagField(bound_argc,
2702 FieldMemOperand(bound_argv, offsetof(FixedArray, length_)));
2703 __ Branch(&no_bound_arguments, eq, bound_argc, Operand(zero_reg));
2704 {
2705 // ----------- S t a t e -------------
2706 // -- a0 : the number of arguments
2707 // -- a1 : target (checked to be a JSBoundFunction)
2708 // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2709 // -- a3 : new.target (only in case of [[Construct]])
2710 // -- a4: the number of [[BoundArguments]]
2711 // -----------------------------------
2712 UseScratchRegisterScope temps(masm);
2713 Register scratch = temps.Acquire();
2714 Label done;
2715 // Reserve stack space for the [[BoundArguments]].
2716 {
2717 // Check the stack for overflow. We are not trying to catch interruptions
2718 // (i.e. debug break and preemption) here, so check the "real stack
2719 // limit".
2720 __ StackOverflowCheck(a4, temps.Acquire(), temps.Acquire(), nullptr,
2721 &done);
2722 {
2723 FrameScope scope(masm, StackFrame::MANUAL);
2724 __ EnterFrame(StackFrame::INTERNAL);
2725 __ CallRuntime(Runtime::kThrowStackOverflow);
2726 }
2727 __ bind(&done);
2728 }
2729
2730 // Pop receiver.
2731 __ Pop(scratch);
2732
2733 // Push [[BoundArguments]].
2734 {
2735 Label loop, done_loop;
2736 __ SmiUntag(a4, FieldMemOperand(a2, offsetof(FixedArray, length_)));
2737 __ AddWord(a0, a0, Operand(a4));
2738 __ AddWord(a2, a2,
2739 Operand(OFFSET_OF_DATA_START(FixedArray) - kHeapObjectTag));
2740 __ bind(&loop);
2741 __ SubWord(a4, a4, Operand(1));
2742 __ Branch(&done_loop, lt, a4, Operand(zero_reg), Label::Distance::kNear);
2743 __ CalcScaledAddress(a5, a2, a4, kTaggedSizeLog2);
2744 __ LoadTaggedField(kScratchReg, MemOperand(a5));
2745 __ Push(kScratchReg);
2746 __ Branch(&loop);
2747 __ bind(&done_loop);
2748 }
2749
2750 // Push receiver.
2751 __ Push(scratch);
2752 }
2753 __ bind(&no_bound_arguments);
2754}
2755
2756} // namespace
2757
2758// static
2760 // ----------- S t a t e -------------
2761 // -- a0 : the number of arguments
2762 // -- a1 : the function to call (checked to be a JSBoundFunction)
2763 // -----------------------------------
2764 __ AssertBoundFunction(a1);
2765
2766 // Patch the receiver to [[BoundThis]].
2767 {
2768 UseScratchRegisterScope temps(masm);
2769 Register scratch = temps.Acquire();
2770 __ LoadTaggedField(scratch,
2771 FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
2772 __ StoreReceiver(scratch);
2773 }
2774
2775 // Push the [[BoundArguments]] onto the stack.
2776 Generate_PushBoundArguments(masm);
2777
2778 // Call the [[BoundTargetFunction]] via the Call builtin.
2779 __ LoadTaggedField(
2780 a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2781 __ TailCallBuiltin(Builtins::Call());
2782}
2783
2784// static
2786 // ----------- S t a t e -------------
2787 // -- a0 : the number of arguments
2788 // -- a1 : the target to call (can be any Object).
2789 // -----------------------------------
2790
2791 Register target = a1;
2792 Register map = t1;
2793 Register instance_type = t2;
2794 Register scratch = t6;
2795 DCHECK(!AreAliased(a0, target, map, instance_type, scratch));
2796
2797 Label non_callable, class_constructor;
2798 __ JumpIfSmi(target, &non_callable);
2799 __ LoadMap(map, target);
2800 __ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
2801 scratch);
2802 __ TailCallBuiltin(Builtins::CallFunction(mode), ule, scratch,
2805 __ TailCallBuiltin(Builtin::kCallBoundFunction, eq, instance_type,
2806 Operand(JS_BOUND_FUNCTION_TYPE));
2807
2808 // Check if target has a [[Call]] internal method.
2809 {
2810 Register flags = t1;
2811 __ Lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2812 map = no_reg;
2813 __ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask));
2814 __ Branch(&non_callable, eq, flags, Operand(zero_reg));
2815 }
2816
2817 __ TailCallBuiltin(Builtin::kCallProxy, eq, instance_type,
2818 Operand(JS_PROXY_TYPE));
2819
2820 // Check if target is a wrapped function and call CallWrappedFunction external
2821 // builtin
2822 __ TailCallBuiltin(Builtin::kCallWrappedFunction, eq, instance_type,
2823 Operand(JS_WRAPPED_FUNCTION_TYPE));
2824
2825 // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2826 // Check that the function is not a "classConstructor".
2827 __ Branch(&class_constructor, eq, instance_type,
2828 Operand(JS_CLASS_CONSTRUCTOR_TYPE));
2829
2830 // 2. Call to something else, which might have a [[Call]] internal method (if
2831 // not we raise an exception).
2832 // Overwrite the original receiver with the (original) target.
2833 __ StoreReceiver(target);
2834 // Let the "call_as_function_delegate" take care of the rest.
2835 __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2836 __ TailCallBuiltin(
2838
2839 // 3. Call to something that is not callable.
2840 __ bind(&non_callable);
2841 {
2842 FrameScope scope(masm, StackFrame::INTERNAL);
2843 __ Push(target);
2844 __ CallRuntime(Runtime::kThrowCalledNonCallable);
2845 }
2846
2847 // 4. The function is a "classConstructor", need to raise an exception.
2848 __ bind(&class_constructor);
2849 {
2850 FrameScope frame(masm, StackFrame::INTERNAL);
2851 __ Push(target);
2852 __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2853 }
2854}
2855
2856void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2857 // ----------- S t a t e -------------
2858 // -- a0 : the number of arguments
2859 // -- a1 : the constructor to call (checked to be a JSFunction)
2860 // -- a3 : the new target (checked to be a constructor)
2861 // -----------------------------------
2862 __ AssertConstructor(a1);
2863 __ AssertFunction(a1);
2864
2865 // Calling convention for function specific ConstructStubs require
2866 // a2 to contain either an AllocationSite or undefined.
2867 __ LoadRoot(a2, RootIndex::kUndefinedValue);
2868
2869 Label call_generic_stub;
2870
2871 // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2872 __ LoadTaggedField(
2873 a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2874 __ Load32U(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
2875 __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2876 __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg),
2878
2879 __ TailCallBuiltin(Builtin::kJSBuiltinsConstructStub);
2880
2881 __ bind(&call_generic_stub);
2882 __ TailCallBuiltin(Builtin::kJSConstructStubGeneric);
2883}
2884
2885// static
2886void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2887 // ----------- S t a t e -------------
2888 // -- a0 : the number of arguments
2889 // -- a1 : the function to call (checked to be a JSBoundFunction)
2890 // -- a3 : the new target (checked to be a constructor)
2891 // -----------------------------------
2892 __ AssertBoundFunction(a1);
2893
2894 // Push the [[BoundArguments]] onto the stack.
2895 Generate_PushBoundArguments(masm);
2896
2897 // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2898 Label skip;
2899 __ CompareTaggedAndBranch(&skip, ne, a1, Operand(a3));
2900 __ LoadTaggedField(
2901 a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2902 __ bind(&skip);
2903
2904 // Construct the [[BoundTargetFunction]] via the Construct builtin.
2905 __ LoadTaggedField(
2906 a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2907 __ TailCallBuiltin(Builtin::kConstruct);
2908}
2909
2910void Builtins::Generate_Construct(MacroAssembler* masm) {
2911 // ----------- S t a t e -------------
2912 // -- a0 : the number of arguments
2913 // -- a1 : the constructor to call (can be any Object)
2914 // -- a3 : the new target (either the same as the constructor or
2915 // the JSFunction on which new was invoked initially)
2916 // -----------------------------------
2917
2918 Register target = a1;
2919 Register map = t1;
2920 Register instance_type = t2;
2921 Register scratch = t6;
2922 DCHECK(!AreAliased(a0, target, map, instance_type, scratch));
2923
2924 // Check if target is a Smi.
2925 Label non_constructor, non_proxy;
2926 __ JumpIfSmi(target, &non_constructor);
2927
2928 // Check if target has a [[Construct]] internal method.
2929 __ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset));
2930 {
2931 Register flags = t3;
2932 __ Lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2933 __ And(flags, flags, Operand(Map::Bits1::IsConstructorBit::kMask));
2934 __ Branch(&non_constructor, eq, flags, Operand(zero_reg));
2935 }
2936
2937 // Dispatch based on instance type.
2938 __ GetInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE, scratch);
2939 __ TailCallBuiltin(Builtin::kConstructFunction, Uless_equal, scratch,
2940 Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
2941
2942 // Only dispatch to bound functions after checking whether they are
2943 // constructors.
2944 __ TailCallBuiltin(Builtin::kConstructBoundFunction, eq, instance_type,
2945 Operand(JS_BOUND_FUNCTION_TYPE));
2946
2947 // Only dispatch to proxies after checking whether they are constructors.
2948 __ Branch(&non_proxy, ne, instance_type, Operand(JS_PROXY_TYPE));
2949 __ TailCallBuiltin(Builtin::kConstructProxy);
2950
2951 // Called Construct on an exotic Object with a [[Construct]] internal method.
2952 __ bind(&non_proxy);
2953 {
2954 // Overwrite the original receiver with the (original) target.
2955 __ StoreReceiver(target);
2956 // Let the "call_as_constructor_delegate" take care of the rest.
2957 __ LoadNativeContextSlot(target,
2958 Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2959 __ TailCallBuiltin(Builtins::CallFunction());
2960 }
2961
2962 // Called Construct on an Object that doesn't have a [[Construct]] internal
2963 // method.
2964 __ bind(&non_constructor);
2965 __ TailCallBuiltin(Builtin::kConstructedNonConstructable);
2966}
2967
2968#if V8_ENABLE_WEBASSEMBLY
2969// Compute register lists for parameters to be saved. We save all parameter
2970// registers (see wasm-linkage.h). They might be overwritten in the runtime
2971// call below. We don't have any callee-saved registers in wasm, so no need to
2972// store anything else.
2973constexpr RegList kSavedGpRegs = ([]() constexpr {
2976 "frame size mismatch");
2977 RegList saved_gp_regs;
2978 for (Register gp_param_reg : wasm::kGpParamRegisters) {
2979 saved_gp_regs.set(gp_param_reg);
2980 }
2981
2982 // The instance data has already been stored in the fixed part of the frame.
2983 saved_gp_regs.clear(kWasmImplicitArgRegister);
2984 // All set registers were unique.
2985 CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) - 1);
2987 saved_gp_regs.Count());
2988 return saved_gp_regs;
2989})();
2990
2991constexpr DoubleRegList kSavedFpRegs = ([]() constexpr {
2994 "frame size mismatch");
2995 DoubleRegList saved_fp_regs;
2996 for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
2997 saved_fp_regs.set(fp_param_reg);
2998 }
2999
3000 CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
3002 saved_fp_regs.Count());
3003 return saved_fp_regs;
3004})();
3005
3006// When entering this builtin, we have just created a Wasm stack frame:
3007//
3008// [ Wasm instance data ] <-- sp
3009// [ WASM frame marker ]
3010// [ saved fp ] <-- fp
3011//
3012// Add the feedback vector to the stack.
3013//
3014// [ feedback vector ] <-- sp
3015// [ Wasm instance data ]
3016// [ WASM frame marker ]
3017// [ saved fp ] <-- fp
3018void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
3019 Register func_index = wasm::kLiftoffFrameSetupFunctionReg;
3020 Register vector = t1;
3021 Register scratch = t2;
3022 Label allocate_vector, done;
3023
3024 __ LoadTaggedField(
3026 WasmTrustedInstanceData::kFeedbackVectorsOffset));
3027 __ CalcScaledAddress(vector, vector, func_index, kTaggedSizeLog2);
3028 __ LoadTaggedField(vector,
3029 FieldMemOperand(vector, OFFSET_OF_DATA_START(FixedArray)));
3030 __ JumpIfSmi(vector, &allocate_vector);
3031 __ bind(&done);
3032 __ Push(vector);
3033 __ Ret();
3034
3035 __ bind(&allocate_vector);
3036 // Feedback vector doesn't exist yet. Call the runtime to allocate it.
3037 // We temporarily change the frame type for this, because we need special
3038 // handling by the stack walker in case of GC.
3039 __ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM_LIFTOFF_SETUP));
3040 __ StoreWord(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
3041
3042 // Save registers.
3043 __ MultiPush(kSavedGpRegs);
3044 __ MultiPushFPU(kSavedFpRegs);
3045 __ Push(ra);
3046
3047 // Arguments to the runtime function: instance data, func_index, and an
3048 // additional stack slot for the NativeModule.
3049 __ SmiTag(func_index);
3050 __ Push(kWasmImplicitArgRegister, func_index, zero_reg);
3051 __ Move(cp, Smi::zero());
3052 __ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3);
3053 __ mv(vector, kReturnRegister0);
3054
3055 // Restore registers and frame type.
3056 __ Pop(ra);
3057 __ MultiPopFPU(kSavedFpRegs);
3058 __ MultiPop(kSavedGpRegs);
3060 MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset));
3061 __ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM));
3062 __ StoreWord(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
3063 __ Branch(&done);
3064}
3065
3066void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
3067 // The function index was put in t0 by the jump table trampoline.
3068 // Convert to Smi for the runtime call
3070
3071 {
3072 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
3073 FrameScope scope(masm, StackFrame::INTERNAL);
3074
3075 // Save registers that we need to keep alive across the runtime call.
3077 __ MultiPush(kSavedGpRegs);
3078 __ MultiPushFPU(kSavedFpRegs);
3079
3081 // Initialize the JavaScript context with 0. CEntry will use it to
3082 // set the current context on the isolate.
3083 __ Move(kContextRegister, Smi::zero());
3084 __ CallRuntime(Runtime::kWasmCompileLazy, 2);
3085
3086 __ SmiUntag(s1, a0); // move return value to s1 since a0 will be restored
3087 // to the value before the call
3088 CHECK(!kSavedGpRegs.has(s1));
3089
3090 // Restore registers.
3091 __ MultiPopFPU(kSavedFpRegs);
3092 __ MultiPop(kSavedGpRegs);
3094 }
3095
3096 // The runtime function returned the jump table slot offset as a Smi (now in
3097 // x17). Use that to compute the jump target.
3098 __ LoadWord(kScratchReg,
3100 WasmTrustedInstanceData::kJumpTableStartOffset));
3101 __ AddWord(s1, s1, Operand(kScratchReg));
3102 // Finally, jump to the entrypoint.
3103 __ Jump(s1);
3104}
3105
3106void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
3107 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
3108 {
3109 FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
3110
3111 // Save all parameter registers. They might hold live values, we restore
3112 // them after the runtime call.
3115
3116 // Initialize the JavaScript context with 0. CEntry will use it to
3117 // set the current context on the isolate.
3118 __ Move(cp, Smi::zero());
3119 __ CallRuntime(Runtime::kWasmDebugBreak, 0);
3120
3121 // Restore registers.
3124 }
3125 __ Ret();
3126}
3127
3128#endif // V8_ENABLE_WEBASSEMBLY
3129
3130namespace {
3131void SwitchSimulatorStackLimit(MacroAssembler* masm) {
3132 if (masm->options().enable_simulator_code) {
3133 UseScratchRegisterScope temps(masm);
3134 temps.Exclude(kSimulatorBreakArgument);
3135 __ RecordComment("-- Set simulator stack limit --");
3137 __ break_(kExceptionIsSwitchStackLimit, false);
3138 }
3139}
3140
3141static constexpr Register kOldSPRegister = s9;
3142static constexpr Register kSwitchFlagRegister = s10;
3143
3144void SwitchToTheCentralStackIfNeeded(MacroAssembler* masm, Register argc_input,
3145 Register target_input,
3146 Register argv_input) {
3147 using ER = ExternalReference;
3148
3149 __ li(kSwitchFlagRegister, 0);
3150 __ mv(kOldSPRegister, sp);
3151
3152 // Using x2-x4 as temporary registers, because they will be rewritten
3153 // before exiting to native code anyway.
3154
3155 ER on_central_stack_flag_loc = ER::Create(
3156 IsolateAddressId::kIsOnCentralStackFlagAddress, masm->isolate());
3157 const Register& on_central_stack_flag = a2;
3158 __ li(on_central_stack_flag, on_central_stack_flag_loc);
3159 __ Lb(on_central_stack_flag, MemOperand(on_central_stack_flag));
3160
3161 Label do_not_need_to_switch;
3162 __ Branch(&do_not_need_to_switch, ne, on_central_stack_flag,
3163 Operand(zero_reg));
3164 // Switch to central stack.
3165
3166 static constexpr Register central_stack_sp = a4;
3167 DCHECK(!AreAliased(central_stack_sp, argc_input, argv_input, target_input));
3168 {
3169 __ Push(argc_input, target_input, argv_input);
3170 __ PrepareCallCFunction(2, argc_input);
3171 __ li(kCArgRegs[0], ER::isolate_address(masm->isolate()));
3172 __ mv(kCArgRegs[1], kOldSPRegister);
3173 __ CallCFunction(ER::wasm_switch_to_the_central_stack(), 2,
3175 __ mv(central_stack_sp, kReturnRegister0);
3176 __ Pop(argc_input, target_input, argv_input);
3177 }
3178
3179 SwitchSimulatorStackLimit(masm);
3180
3181 static constexpr int kReturnAddressSlotOffset = 1 * kSystemPointerSize;
3182 static constexpr int kPadding = 1 * kSystemPointerSize;
3183 __ SubWord(sp, central_stack_sp, kReturnAddressSlotOffset + kPadding);
3184
3185#ifdef V8_TARGET_ARCH_RISCV32
3186 __ EnforceStackAlignment();
3187#endif
3188 __ li(kSwitchFlagRegister, 1);
3189
3190 // Update the sp saved in the frame.
3191 // It will be used to calculate the callee pc during GC.
3192 // The pc is going to be on the new stack segment, so rewrite it here.
3193 __ AddWord(central_stack_sp, sp, kSystemPointerSize);
3194 __ StoreWord(central_stack_sp, MemOperand(fp, ExitFrameConstants::kSPOffset));
3195
3196 __ bind(&do_not_need_to_switch);
3197}
3198
3199void SwitchFromTheCentralStackIfNeeded(MacroAssembler* masm) {
3200 using ER = ExternalReference;
3201
3202 Label no_stack_change;
3203 __ Branch(&no_stack_change, eq, kSwitchFlagRegister, Operand(zero_reg));
3204
3205 {
3207 __ li(kCArgRegs[0], ER::isolate_address(masm->isolate()));
3209 __ PrepareCallCFunction(1, kReturnRegister1);
3210 __ CallCFunction(ER::wasm_switch_from_the_central_stack(), 1,
3213 }
3214
3215 SwitchSimulatorStackLimit(masm);
3216
3217 __ mv(sp, kOldSPRegister);
3218
3219 __ bind(&no_stack_change);
3220}
3221} // namespace
3222
3223void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
3224 ArgvMode argv_mode, bool builtin_exit_frame,
3225 bool switch_to_central_stack) {
3226 // Called from JavaScript; parameters are on stack as if calling JS function
3227 // a0: number of arguments including receiver
3228 // a1: pointer to c++ function
3229 // fp: frame pointer (restored after C call)
3230 // sp: stack pointer (restored as callee's sp after C call)
3231 // cp: current context (C callee-saved)
3232 // If argv_mode == ArgvMode::kRegister:
3233 // a2: pointer to the first argument
3234 using ER = ExternalReference;
3235
3236 static constexpr Register argc_input = a0;
3237 static constexpr Register target_input = a1;
3238 // Initialized below if ArgvMode::kStack.
3239 static constexpr Register argv_input = s1;
3240 static constexpr Register argc_sav = s3;
3241 static constexpr Register scratch = a3;
3242 if (argv_mode == ArgvMode::kRegister) {
3243 // Move argv into the correct register.
3244 __ Move(s1, a2);
3245 } else {
3246 // Compute the argv pointer in a callee-saved register.
3247 __ CalcScaledAddress(s1, sp, a0, kSystemPointerSizeLog2);
3248 __ SubWord(s1, s1, kSystemPointerSize);
3249 }
3250
3251 // Enter the exit frame that transitions from JavaScript to C++.
3252 FrameScope scope(masm, StackFrame::MANUAL);
3253 __ EnterExitFrame(
3254 scratch, 0,
3255 builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
3256
3257 // s3: number of arguments including receiver (C callee-saved)
3258 // s1: pointer to first argument (C callee-saved)
3259 // s2: pointer to builtin function (C callee-saved)
3260
3261 // Prepare arguments for C routine.
3262 // a0 = argc
3263 __ Move(argc_sav, argc_input);
3264 __ Move(s2, target_input);
3265
3266 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
3267 // also need to reserve the 4 argument slots on the stack.
3268
3269 __ AssertStackIsAligned();
3270
3271#if V8_ENABLE_WEBASSEMBLY
3272 if (switch_to_central_stack) {
3273 SwitchToTheCentralStackIfNeeded(masm, argc_input, target_input, argv_input);
3274 }
3275#endif // V8_ENABLE_WEBASSEMBLY
3276
3277 // a0 = argc, a1 = argv, a2 = isolate
3278 __ li(a2, ER::isolate_address(masm->isolate()));
3279 __ Move(a1, s1);
3280
3281 __ StoreReturnAddressAndCall(s2);
3282
3283 // Check result for exception sentinel.
3284 Label exception_returned;
3285 // The returned value may be a trusted object, living outside of the main
3286 // pointer compression cage, so we need to use full pointer comparison here.
3287 __ CompareRootAndBranch(a0, RootIndex::kException, eq, &exception_returned,
3289
3290 // Result returned in a0 or a1:a0 - do not destroy these registers!
3291#if V8_ENABLE_WEBASSEMBLY
3292 if (switch_to_central_stack) {
3293 SwitchFromTheCentralStackIfNeeded(masm);
3294 }
3295#endif // V8_ENABLE_WEBASSEMBLY
3296 // Exit C frame and return.
3297 // a0:a1: result
3298 // sp: stack pointer
3299 // fp: frame pointer
3300 // s3: still holds argc (C caller-saved).
3301 __ LeaveExitFrame(scratch);
3302 if (argv_mode == ArgvMode::kStack) {
3303 DCHECK(!AreAliased(scratch, argc_sav));
3304 __ DropArguments(argc_sav);
3305 }
3306 __ Ret();
3307
3308 // Handling of exception.
3309 __ bind(&exception_returned);
3310
3311 ER pending_handler_context_address = ER::Create(
3312 IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
3313 ER pending_handler_entrypoint_address = ER::Create(
3314 IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
3315 ER pending_handler_fp_address =
3316 ER::Create(IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
3317 ER pending_handler_sp_address =
3318 ER::Create(IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
3319
3320 // Ask the runtime for help to determine the handler. This will set a0 to
3321 // contain the current exception, don't clobber it.
3322 ER find_handler = ER::Create(Runtime::kUnwindAndFindExceptionHandler);
3323 {
3324 FrameScope scope(masm, StackFrame::MANUAL);
3325 __ PrepareCallCFunction(3, 0, a0);
3326 __ Move(a0, zero_reg);
3327 __ Move(a1, zero_reg);
3328 __ li(a2, ER::isolate_address());
3329 __ CallCFunction(find_handler, 3, SetIsolateDataSlots::kNo);
3330 }
3331
3332 // Retrieve the handler context, SP and FP.
3333 __ li(cp, pending_handler_context_address);
3334 __ LoadWord(cp, MemOperand(cp));
3335 __ li(sp, pending_handler_sp_address);
3336 __ LoadWord(sp, MemOperand(sp));
3337 __ li(fp, pending_handler_fp_address);
3338 __ LoadWord(fp, MemOperand(fp));
3339
3340 // If the handler is a JS frame, restore the context to the frame. Note that
3341 // the context will be set to (cp == 0) for non-JS frames.
3342 Label zero;
3343 __ Branch(&zero, eq, cp, Operand(zero_reg), Label::Distance::kNear);
3345 __ bind(&zero);
3346
3347 // Clear c_entry_fp, like we do in `LeaveExitFrame`.
3348 ER c_entry_fp_address =
3349 ER::Create(IsolateAddressId::kCEntryFPAddress, masm->isolate());
3350 __ StoreWord(zero_reg,
3351 __ ExternalReferenceAsOperand(c_entry_fp_address, no_reg));
3352
3353 // Compute the handler entry address and jump to it.
3354 __ LoadWord(scratch, __ ExternalReferenceAsOperand(
3355 pending_handler_entrypoint_address, no_reg));
3356 __ Jump(scratch);
3357}
3358
3359#if V8_ENABLE_WEBASSEMBLY
3360void Builtins::Generate_WasmHandleStackOverflow(MacroAssembler* masm) {
3361 using ER = ExternalReference;
3362 Register frame_base = WasmHandleStackOverflowDescriptor::FrameBaseRegister();
3364 {
3365 DCHECK_NE(kCArgRegs[1], frame_base);
3366 DCHECK_NE(kCArgRegs[3], frame_base);
3367 __ mv(kCArgRegs[3], gap);
3368 __ mv(kCArgRegs[1], sp);
3369 __ SubWord(kCArgRegs[2], frame_base, kCArgRegs[1]);
3370 __ mv(kCArgRegs[4], fp);
3371 FrameScope scope(masm, StackFrame::INTERNAL);
3372 __ Push(kCArgRegs[3]);
3373 __ li(kCArgRegs[0], ER::isolate_address());
3374 __ PrepareCallCFunction(5, kScratchReg);
3375 __ CallCFunction(ER::wasm_grow_stack(), 5);
3376 __ Pop(gap);
3378 }
3379 Label call_runtime;
3380 // wasm_grow_stack returns zero if it cannot grow a stack.
3381 __ BranchShort(&call_runtime, eq, kReturnRegister0, Operand(zero_reg));
3382 {
3383 UseScratchRegisterScope temps(masm);
3384 Register new_fp = temps.Acquire();
3385 // Calculate old FP - SP offset to adjust FP accordingly to new SP.
3386 __ SubWord(new_fp, fp, sp);
3387 __ AddWord(new_fp, kReturnRegister0, new_fp);
3388 __ mv(fp, new_fp);
3389 }
3390 __ mv(sp, kReturnRegister0);
3391 {
3392 UseScratchRegisterScope temps(masm);
3393 Register scratch = temps.Acquire();
3394 __ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM_SEGMENT_START));
3395 __ StoreWord(scratch,
3397 }
3398 __ Ret();
3399
3400 __ bind(&call_runtime);
3401 // If wasm_grow_stack returns zero interruption or stack overflow
3402 // should be handled by runtime call.
3403 {
3405 MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset));
3406 __ LoadTaggedField(
3408 WasmTrustedInstanceData::kNativeContextOffset));
3409 FrameScope scope(masm, StackFrame::MANUAL);
3410 __ EnterFrame(StackFrame::INTERNAL);
3411 __ SmiTag(gap);
3412 __ Push(gap);
3413 __ CallRuntime(Runtime::kWasmStackGuard);
3414 __ LeaveFrame(StackFrame::INTERNAL);
3415 __ Ret();
3416 }
3417}
3418#endif // V8_ENABLE_WEBASSEMBLY
3419
3420void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
3421 Label done;
3422 Register result_reg = t0;
3423
3424 Register scratch = GetRegisterThatIsNotOneOf(result_reg);
3425 Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
3426 Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
3427 DoubleRegister double_scratch = kScratchDoubleReg;
3428
3429 // Account for saved regs.
3430 const int kArgumentOffset = 4 * kSystemPointerSize;
3431
3432 __ Push(result_reg);
3433 __ Push(scratch, scratch2, scratch3);
3434
3435 // Load double input.
3436 __ LoadDouble(double_scratch, MemOperand(sp, kArgumentOffset));
3437
3438 // Try a conversion to a signed integer, if exception occurs, scratch is
3439 // set to 0
3440 __ Trunc_w_d(scratch3, double_scratch, scratch);
3441
3442 // If we had no exceptions then set result_reg and we are done.
3443 Label error;
3444 __ Branch(&error, eq, scratch, Operand(zero_reg), Label::Distance::kNear);
3445 __ Move(result_reg, scratch3);
3446 __ Branch(&done);
3447 __ bind(&error);
3448
3449 // Load the double value and perform a manual truncation.
3450 Register input_high = scratch2;
3451 Register input_low = scratch3;
3452
3453 __ Lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
3454 __ Lw(input_high,
3455 MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
3456
3457 Label normal_exponent;
3458 // Extract the biased exponent in result.
3459 __ ExtractBits(result_reg, input_high, HeapNumber::kExponentShift,
3461
3462 // Check for Infinity and NaNs, which should return 0.
3463 __ Sub32(scratch, result_reg, HeapNumber::kExponentMask);
3464 __ LoadZeroIfConditionZero(
3465 result_reg,
3466 scratch); // result_reg = scratch == 0 ? 0 : result_reg
3467 __ Branch(&done, eq, scratch, Operand(zero_reg));
3468
3469 // Express exponent as delta to (number of mantissa bits + 31).
3470 __ Sub32(result_reg, result_reg,
3472
3473 // If the delta is strictly positive, all bits would be shifted away,
3474 // which means that we can return 0.
3475 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg),
3477 __ Move(result_reg, zero_reg);
3478 __ Branch(&done);
3479
3480 __ bind(&normal_exponent);
3481 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
3482 // Calculate shift.
3483 __ Add32(scratch, result_reg,
3484 Operand(kShiftBase + HeapNumber::kMantissaBits));
3485
3486 // Save the sign.
3487 Register sign = result_reg;
3488 result_reg = no_reg;
3489 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
3490
3491 // We must specially handle shifts greater than 31.
3492 Label high_shift_needed, high_shift_done;
3493 __ Branch(&high_shift_needed, lt, scratch, Operand(32),
3495 __ Move(input_high, zero_reg);
3496 __ BranchShort(&high_shift_done);
3497 __ bind(&high_shift_needed);
3498
3499 // Set the implicit 1 before the mantissa part in input_high.
3500 __ Or(input_high, input_high,
3502 // Shift the mantissa bits to the correct position.
3503 // We don't need to clear non-mantissa bits as they will be shifted away.
3504 // If they weren't, it would mean that the answer is in the 32bit range.
3505 __ Sll32(input_high, input_high, scratch);
3506
3507 __ bind(&high_shift_done);
3508
3509 // Replace the shifted bits with bits from the lower mantissa word.
3510 Label pos_shift, shift_done, sign_negative;
3511 __ li(kScratchReg, 32);
3512 __ Sub32(scratch, kScratchReg, scratch);
3513 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg), Label::Distance::kNear);
3514
3515 // Negate scratch.
3516 __ Sub32(scratch, zero_reg, scratch);
3517 __ Sll32(input_low, input_low, scratch);
3518 __ BranchShort(&shift_done);
3519
3520 __ bind(&pos_shift);
3521 __ Srl32(input_low, input_low, scratch);
3522
3523 __ bind(&shift_done);
3524 __ Or(input_high, input_high, Operand(input_low));
3525 // Restore sign if necessary.
3526 __ Move(scratch, sign);
3527 result_reg = sign;
3528 sign = no_reg;
3529 __ Sub32(result_reg, zero_reg, input_high);
3530 __ Branch(&sign_negative, ne, scratch, Operand(zero_reg),
3532 __ Move(result_reg, input_high);
3533 __ bind(&sign_negative);
3534
3535 __ bind(&done);
3536
3537 __ StoreWord(result_reg, MemOperand(sp, kArgumentOffset));
3538 __ Pop(scratch, scratch2, scratch3);
3539 __ Pop(result_reg);
3540 __ Ret();
3541}
3542
3543void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) {
3544 int required_stack_space = arraysize(wasm::kFpParamRegisters) * kDoubleSize;
3545 __ SubWord(sp, sp, Operand(required_stack_space));
3546 for (int i = 0; i < static_cast<int>(arraysize(wasm::kFpParamRegisters));
3547 ++i) {
3548 __ StoreDouble(wasm::kFpParamRegisters[i], MemOperand(sp, i * kDoubleSize));
3549 }
3550
3551 constexpr int num_gp = arraysize(wasm::kGpParamRegisters) - 1;
3552 required_stack_space = num_gp * kSystemPointerSize;
3553 __ SubWord(sp, sp, Operand(required_stack_space));
3554 for (int i = 1; i < static_cast<int>(arraysize(wasm::kGpParamRegisters));
3555 ++i) {
3556 __ StoreWord(wasm::kGpParamRegisters[i],
3557 MemOperand(sp, (i - 1) * kSystemPointerSize));
3558 }
3559 // Reserve a slot for the signature.
3560 __ Push(zero_reg);
3561 __ TailCallBuiltin(Builtin::kWasmToJsWrapperCSA);
3562}
3563
3564void Builtins::Generate_WasmTrapHandlerLandingPad(MacroAssembler* masm) {
3565 // This builtin gets called from the WebAssembly trap handler when an
3566 // out-of-bounds memory access happened or when a null reference gets
3567 // dereferenced. This builtin then fakes a call from the instruction that
3568 // triggered the signal to the runtime. This is done by setting a return
3569 // address and then jumping to a builtin which will call further to the
3570 // runtime.
3571 // As the return address we use the fault address + 1. Using the fault address
3572 // itself would cause problems with safepoints and source positions.
3573 //
3574 // The problem with safepoints is that a safepoint has to be registered at the
3575 // return address, and that at most one safepoint should be registered at a
3576 // location. However, there could already be a safepoint registered at the
3577 // fault address if the fault address is the return address of a call.
3578 //
3579 // The problem with source positions is that the stack trace code looks for
3580 // the source position of a call before the return address. The source
3581 // position of the faulty memory access, however, is recorded at the fault
3582 // address. Therefore the stack trace code would not find the source position
3583 // if we used the fault address as the return address.
3585 __ TailCallBuiltin(Builtin::kWasmTrapHandlerThrowTrap);
3586}
3587
3588namespace {
3589// Check that the stack was in the old state (if generated code assertions are
3590// enabled), and switch to the new state.
3591void SwitchStackState(MacroAssembler* masm, Register jmpbuf, Register tmp,
3593 wasm::JumpBuffer::StackState new_state) {
3594 ASM_CODE_COMMENT(masm);
3595#if V8_ENABLE_SANDBOX
3596 __ Lw(tmp, MemOperand(jmpbuf, wasm::kJmpBufStateOffset));
3597 Label ok;
3598 // is branch32?
3599 __ Branch(&ok, eq, tmp, Operand(old_state));
3600 __ Trap();
3601 __ bind(&ok);
3602#endif
3603 __ li(tmp, new_state);
3604 __ Sw(tmp, MemOperand(jmpbuf, wasm::kJmpBufStateOffset));
3605}
3606
3607// Switch the stack pointer. Also switch the simulator's stack limit when
3608// running on the simulator. This needs to be done as close as possible to
3609// changing the stack pointer, as a mismatch between the stack pointer and the
3610// simulator's stack limit can cause stack access check failures.
3611void SwitchStackPointerAndSimulatorStackLimit(MacroAssembler* masm,
3612 Register jmpbuf) {
3613 ASM_CODE_COMMENT(masm);
3614 if (masm->options().enable_simulator_code) {
3615 UseScratchRegisterScope temps(masm);
3616 temps.Exclude(kSimulatorBreakArgument);
3617 __ LoadWord(sp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
3618 __ LoadWord(kSimulatorBreakArgument,
3619 MemOperand(jmpbuf, wasm::kJmpBufStackLimitOffset));
3621 } else {
3622 __ LoadWord(sp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
3623 }
3624}
3625
3626void FillJumpBuffer(MacroAssembler* masm, Register jmpbuf, Label* pc,
3627 Register tmp) {
3628 ASM_CODE_COMMENT(masm);
3629 __ mv(tmp, sp);
3630 __ StoreWord(tmp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
3631 __ StoreWord(fp, MemOperand(jmpbuf, wasm::kJmpBufFpOffset));
3632 __ LoadStackLimit(tmp, StackLimitKind::kRealStackLimit);
3633 __ StoreWord(tmp, MemOperand(jmpbuf, wasm::kJmpBufStackLimitOffset));
3634 __ LoadAddress(tmp, pc);
3635 __ StoreWord(tmp, MemOperand(jmpbuf, wasm::kJmpBufPcOffset));
3636}
3637
3638void LoadJumpBuffer(MacroAssembler* masm, Register jmpbuf, bool load_pc,
3639 Register tmp, wasm::JumpBuffer::StackState expected_state) {
3640 ASM_CODE_COMMENT(masm);
3641 SwitchStackPointerAndSimulatorStackLimit(masm, jmpbuf);
3642 __ LoadWord(fp, MemOperand(jmpbuf, wasm::kJmpBufFpOffset));
3643 SwitchStackState(masm, jmpbuf, tmp, expected_state, wasm::JumpBuffer::Active);
3644 if (load_pc) {
3645 __ LoadWord(tmp, MemOperand(jmpbuf, wasm::kJmpBufPcOffset));
3646 __ Jump(tmp);
3647 }
3648 // The stack limit in StackGuard is set separately under the ExecutionAccess
3649 // lock.
3650}
3651
3652// Updates the stack limit and central stack info, and validates the switch.
3653void SwitchStacks(MacroAssembler* masm, Register old_continuation,
3654 bool return_switch, Register tmp,
3655 const std::initializer_list<Register> keep) {
3656 ASM_CODE_COMMENT(masm);
3657 CHECK(!AreAliased(old_continuation, a0));
3658 using ER = ExternalReference;
3659 for (auto reg : keep) {
3660 __ Push(reg);
3661 }
3662 {
3663 FrameScope scope(masm, StackFrame::MANUAL);
3664 __ li(kCArgRegs[0], ExternalReference::isolate_address(masm->isolate()));
3665 __ mv(kCArgRegs[1], old_continuation);
3666 __ PrepareCallCFunction(2, tmp);
3667 __ CallCFunction(
3668 return_switch ? ER::wasm_return_switch() : ER::wasm_switch_stacks(), 2);
3669 }
3670 for (auto it = std::rbegin(keep); it != std::rend(keep); ++it) {
3671 __ Pop(*it);
3672 }
3673}
3674
3675void ReloadParentContinuation(MacroAssembler* masm, Register return_reg,
3676 Register return_value, Register context,
3677 Register tmp1, Register tmp2, Register tmp3) {
3678 ASM_CODE_COMMENT(masm);
3679 Register active_continuation = tmp1;
3680 __ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
3681
3682 // Set a null pointer in the jump buffer's SP slot to indicate to the stack
3683 // frame iterator that this stack is empty.
3684 Register jmpbuf = tmp2;
3685 __ LoadExternalPointerField(
3686 jmpbuf,
3687 FieldMemOperand(active_continuation,
3688 WasmContinuationObject::kStackOffset),
3690 __ AddWord(jmpbuf, jmpbuf, wasm::StackMemory::jmpbuf_offset());
3691 __ StoreWord(zero_reg, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
3692 {
3693 UseScratchRegisterScope temps(masm);
3694 Register scratch = temps.Acquire();
3695 SwitchStackState(masm, jmpbuf, scratch, wasm::JumpBuffer::Active,
3697 }
3698 Register parent = tmp2;
3699 __ LoadTaggedField(parent,
3700 FieldMemOperand(active_continuation,
3701 WasmContinuationObject::kParentOffset));
3702
3703 // Update active continuation root.
3704 int32_t active_continuation_offset =
3706 RootIndex::kActiveContinuation);
3707 __ StoreWord(parent, MemOperand(kRootRegister, active_continuation_offset));
3708 jmpbuf = parent;
3709 __ LoadExternalPointerField(
3710 jmpbuf, FieldMemOperand(parent, WasmContinuationObject::kStackOffset),
3712 __ AddWord(jmpbuf, jmpbuf, wasm::StackMemory::jmpbuf_offset());
3713
3714 // Switch stack!
3715 SwitchStacks(masm, active_continuation, true, tmp3,
3716 {return_reg, return_value, context, jmpbuf});
3717 LoadJumpBuffer(masm, jmpbuf, false, tmp3, wasm::JumpBuffer::Inactive);
3718}
3719
3720void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
3721 Register tmp2) {
3722 ASM_CODE_COMMENT(masm);
3723 Register suspender = tmp1;
3724 __ LoadRoot(suspender, RootIndex::kActiveSuspender);
3725 __ LoadTaggedField(
3726 suspender,
3727 FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
3728
3729 int32_t active_suspender_offset =
3731 RootIndex::kActiveSuspender);
3732 __ StoreWord(suspender, MemOperand(kRootRegister, active_suspender_offset));
3733}
3734
3735class RegisterAllocator {
3736 public:
3737 class Scoped {
3738 public:
3739 Scoped(RegisterAllocator* allocator, Register* reg)
3740 : allocator_(allocator), reg_(reg) {}
3741 ~Scoped() { allocator_->Free(reg_); }
3742
3743 private:
3744 RegisterAllocator* allocator_;
3745 Register* reg_;
3746 };
3747
3748 explicit RegisterAllocator(const RegList& registers)
3750 void Ask(Register* reg) {
3751 DCHECK_EQ(*reg, no_reg);
3752 DCHECK(!available_.is_empty());
3753 *reg = available_.PopFirst();
3754 allocated_registers_.push_back(reg);
3755 }
3756
3757 bool registerIsAvailable(const Register& reg) { return available_.has(reg); }
3758
3759 void Pinned(const Register& requested, Register* reg) {
3760 if (!registerIsAvailable(requested)) {
3761 printf("%s register is ocupied!", RegisterName(requested));
3762 }
3763 DCHECK(registerIsAvailable(requested));
3764 *reg = requested;
3765 Reserve(requested);
3766 allocated_registers_.push_back(reg);
3767 }
3768
3769 void Free(Register* reg) {
3770 DCHECK_NE(*reg, no_reg);
3771 available_.set(*reg);
3772 *reg = no_reg;
3774 find(allocated_registers_.begin(), allocated_registers_.end(), reg));
3775 }
3776
3777 void Reserve(const Register& reg) {
3778 if (reg == no_reg) {
3779 return;
3780 }
3781 DCHECK(registerIsAvailable(reg));
3782 available_.clear(reg);
3783 }
3784
3785 void Reserve(const Register& reg1, const Register& reg2,
3786 const Register& reg3 = no_reg, const Register& reg4 = no_reg,
3787 const Register& reg5 = no_reg, const Register& reg6 = no_reg) {
3788 Reserve(reg1);
3789 Reserve(reg2);
3790 Reserve(reg3);
3791 Reserve(reg4);
3792 Reserve(reg5);
3793 Reserve(reg6);
3794 }
3795
3796 bool IsUsed(const Register& reg) {
3797 return initial_.has(reg) && !registerIsAvailable(reg);
3798 }
3799
3800 void ResetExcept(const Register& reg1 = no_reg, const Register& reg2 = no_reg,
3801 const Register& reg3 = no_reg, const Register& reg4 = no_reg,
3802 const Register& reg5 = no_reg,
3803 const Register& reg6 = no_reg) {
3805 available_.clear(reg1);
3806 available_.clear(reg2);
3807 available_.clear(reg3);
3808 available_.clear(reg4);
3809 available_.clear(reg5);
3810 available_.clear(reg6);
3811
3812 auto it = allocated_registers_.begin();
3813 while (it != allocated_registers_.end()) {
3814 if (registerIsAvailable(**it)) {
3815 **it = no_reg;
3816 allocated_registers_.erase(it);
3817 } else {
3818 it++;
3819 }
3820 }
3821 }
3822
3823 static RegisterAllocator WithAllocatableGeneralRegisters() {
3824 RegList list;
3825 const RegisterConfiguration* config(RegisterConfiguration::Default());
3826
3827 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
3828 int code = config->GetAllocatableGeneralCode(i);
3829 Register candidate = Register::from_code(code);
3830 list.set(candidate);
3831 }
3832 return RegisterAllocator(list);
3833 }
3834
3835 private:
3836 std::vector<Register*> allocated_registers_;
3839};
3840
3841#define DEFINE_REG(Name) \
3842 Register Name = no_reg; \
3843 regs.Ask(&Name);
3844
3845#define ASSIGN_REG(Name) regs.Ask(&Name);
3846
3847#define DEFINE_PINNED(Name, Reg) \
3848 Register Name = no_reg; \
3849 regs.Pinned(Reg, &Name);
3850
3851#define ASSIGN_PINNED(Name, Reg) regs.Pinned(Reg, &Name);
3852
3853#define DEFINE_SCOPED(Name) \
3854 DEFINE_REG(Name) \
3855 RegisterAllocator::Scoped scope_##Name(&regs, &Name);
3856
3857#define FREE_REG(Name) regs.Free(&Name);
3858
3859void ResetStackSwitchFrameStackSlots(MacroAssembler* masm) {
3860 ASM_CODE_COMMENT(masm);
3861 __ StoreWord(zero_reg,
3862 MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset));
3863 __ StoreWord(zero_reg,
3864 MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
3865}
3866
3867void LoadTargetJumpBuffer(MacroAssembler* masm, Register target_continuation,
3868 Register tmp,
3869 wasm::JumpBuffer::StackState expected_state) {
3870 ASM_CODE_COMMENT(masm);
3871 Register target_jmpbuf = target_continuation;
3872 __ LoadExternalPointerField(
3873 target_jmpbuf,
3874 FieldMemOperand(target_continuation,
3875 WasmContinuationObject::kStackOffset),
3877 __ AddWord(target_jmpbuf, target_jmpbuf, wasm::StackMemory::jmpbuf_offset());
3878 __ StoreWord(
3879 zero_reg,
3880 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset));
3881 // Switch stack!
3882 LoadJumpBuffer(masm, target_jmpbuf, false, tmp, expected_state);
3883}
3884} // namespace
3885
3886void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
3887 auto regs = RegisterAllocator::WithAllocatableGeneralRegisters();
3888 // Set up the stackframe.
3889 __ EnterFrame(StackFrame::STACK_SWITCH);
3890
3891 DEFINE_PINNED(suspender, a0);
3893
3894 __ SubWord(
3895 sp, sp,
3896 Operand(StackSwitchFrameConstants::kNumSpillSlots * kSystemPointerSize));
3897 // Set a sentinel value for the spill slots visited by the GC.
3898 ResetStackSwitchFrameStackSlots(masm);
3899
3900 // -------------------------------------------
3901 // Save current state in active jump buffer.
3902 // -------------------------------------------
3903 Label resume;
3905 __ LoadRoot(continuation, RootIndex::kActiveContinuation);
3906 DEFINE_REG(jmpbuf);
3907 DEFINE_REG(scratch);
3908 __ LoadExternalPointerField(
3909 jmpbuf,
3910 FieldMemOperand(continuation, WasmContinuationObject::kStackOffset),
3912 __ AddWord(jmpbuf, jmpbuf, wasm::StackMemory::jmpbuf_offset());
3913 FillJumpBuffer(masm, jmpbuf, &resume, scratch);
3914 SwitchStackState(masm, jmpbuf, scratch, wasm::JumpBuffer::Active,
3916 regs.ResetExcept(suspender, continuation);
3917
3918 DEFINE_REG(suspender_continuation);
3919 __ LoadTaggedField(
3920 suspender_continuation,
3921 FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
3922 if (v8_flags.debug_code) {
3923 // -------------------------------------------
3924 // Check that the suspender's continuation is the active continuation.
3925 // -------------------------------------------
3926 // TODO(thibaudm): Once we add core stack-switching instructions, this
3927 // check will not hold anymore: it's possible that the active continuation
3928 // changed (due to an internal switch), so we have to update the suspender.
3929 Label ok;
3930 __ Branch(&ok, eq, suspender_continuation, Operand(continuation));
3931 __ Trap();
3932 __ bind(&ok);
3933 }
3934 // -------------------------------------------
3935 // Update roots.
3936 // -------------------------------------------
3937 DEFINE_REG(caller);
3938 __ LoadTaggedField(caller,
3939 FieldMemOperand(suspender_continuation,
3940 WasmContinuationObject::kParentOffset));
3941 int32_t active_continuation_offset =
3943 RootIndex::kActiveContinuation);
3944 __ StoreWord(caller, MemOperand(kRootRegister, active_continuation_offset));
3945 DEFINE_REG(parent);
3946 __ LoadTaggedField(
3947 parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
3948 int32_t active_suspender_offset =
3950 RootIndex::kActiveSuspender);
3951 __ StoreWord(parent, MemOperand(kRootRegister, active_suspender_offset));
3952 regs.ResetExcept(suspender, caller, continuation);
3953 // -------------------------------------------
3954 // Load jump buffer.
3955 // -------------------------------------------
3956 SwitchStacks(masm, continuation, false, caller, {caller, suspender});
3958 ASSIGN_REG(jmpbuf);
3959 __ LoadExternalPointerField(
3960 jmpbuf, FieldMemOperand(caller, WasmContinuationObject::kStackOffset),
3962 __ AddWord(jmpbuf, jmpbuf, wasm::StackMemory::jmpbuf_offset());
3963 __ LoadTaggedField(
3965 FieldMemOperand(suspender, WasmSuspenderObject::kPromiseOffset));
3966 MemOperand GCScanSlotPlace =
3967 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset);
3968 __ StoreWord(zero_reg, GCScanSlotPlace);
3969 ASSIGN_REG(scratch)
3970 LoadJumpBuffer(masm, jmpbuf, true, scratch, wasm::JumpBuffer::Inactive);
3971 __ Trap();
3972 __ bind(&resume);
3973 __ LeaveFrame(StackFrame::STACK_SWITCH);
3974 __ Ret();
3975}
3976
3977
3978namespace {
3979// Resume the suspender stored in the closure. We generate two variants of this
3980// builtin: the onFulfilled variant resumes execution at the saved PC and
3981// forwards the value, the onRejected variant throws the value.
3982
3983void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
3984 auto regs = RegisterAllocator::WithAllocatableGeneralRegisters();
3985 UseScratchRegisterScope temps(masm);
3986 __ EnterFrame(StackFrame::STACK_SWITCH);
3987
3988 DEFINE_PINNED(closure, kJSFunctionRegister); // a1
3989
3990 __ SubWord(
3991 sp, sp,
3992 Operand(StackSwitchFrameConstants::kNumSpillSlots * kSystemPointerSize));
3993 // Set a sentinel value for the spill slots visited by the GC.
3994 ResetStackSwitchFrameStackSlots(masm);
3995
3996 regs.ResetExcept(closure);
3997
3998 // -------------------------------------------
3999 // Load suspender from closure.
4000 // -------------------------------------------
4001 DEFINE_REG(sfi);
4002 __ LoadTaggedField(
4003 sfi,
4004 MemOperand(
4005 closure,
4007 FREE_REG(closure);
4008 // Suspender should be ObjectRegister register to be used in
4009 // RecordWriteField calls later.
4011 DEFINE_REG(resume_data);
4012 __ LoadTaggedField(
4013 resume_data,
4014 FieldMemOperand(sfi, SharedFunctionInfo::kUntrustedFunctionDataOffset));
4015 __ LoadTaggedField(
4016 suspender,
4017 FieldMemOperand(resume_data, WasmResumeData::kSuspenderOffset));
4018 regs.ResetExcept(suspender);
4019
4020 // -------------------------------------------
4021 // Save current state.
4022 // -------------------------------------------
4023 Label suspend;
4024 DEFINE_REG(active_continuation);
4025 __ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
4026 DEFINE_REG(current_jmpbuf);
4027 DEFINE_REG(scratch);
4028 __ LoadExternalPointerField(
4029 current_jmpbuf,
4030 FieldMemOperand(active_continuation,
4031 WasmContinuationObject::kStackOffset),
4033 __ AddWord(current_jmpbuf, current_jmpbuf,
4035 FillJumpBuffer(masm, current_jmpbuf, &suspend, scratch);
4036 SwitchStackState(masm, current_jmpbuf, scratch, wasm::JumpBuffer::Active,
4038 FREE_REG(current_jmpbuf);
4039 // -------------------------------------------
4040 // Set the suspender and continuation parents and update the roots
4041 // -------------------------------------------
4042 DEFINE_REG(active_suspender);
4043 __ LoadRoot(active_suspender, RootIndex::kActiveSuspender);
4044 __ StoreTaggedField(
4045 active_suspender,
4046 FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
4047 __ RecordWriteField(suspender, WasmSuspenderObject::kParentOffset,
4048 active_suspender, kRAHasBeenSaved,
4050 int32_t active_suspender_offset =
4052 RootIndex::kActiveSuspender);
4053 __ StoreWord(suspender, MemOperand(kRootRegister, active_suspender_offset));
4054
4055 // Next line we are going to load a field from suspender, but we have to use
4056 // the same register for target_continuation to use it in RecordWriteField.
4057 // So, free suspender here to use pinned reg, but load from it next line.
4058 FREE_REG(suspender);
4060 suspender = target_continuation;
4061 __ LoadTaggedField(
4062 target_continuation,
4063 FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
4064 suspender = no_reg;
4065
4066 __ StoreTaggedField(active_continuation,
4067 FieldMemOperand(target_continuation,
4068 WasmContinuationObject::kParentOffset));
4069 DEFINE_PINNED(old_continuation, s10);
4070 __ mv(old_continuation, active_continuation);
4071 __ RecordWriteField(
4072 target_continuation, WasmContinuationObject::kParentOffset,
4073 active_continuation, kRAHasBeenSaved, SaveFPRegsMode::kIgnore);
4074 int32_t active_continuation_offset =
4076 RootIndex::kActiveContinuation);
4077 __ StoreWord(target_continuation,
4078 MemOperand(kRootRegister, active_continuation_offset));
4079
4080 SwitchStacks(masm, old_continuation, false, scratch, {target_continuation});
4081 regs.ResetExcept(target_continuation);
4082
4083 // -------------------------------------------
4084 // Load state from target jmpbuf (longjmp).
4085 // -------------------------------------------
4086 regs.Reserve(kReturnRegister0);
4087 DEFINE_REG(target_jmpbuf);
4088 ASSIGN_REG(scratch);
4089
4090 __ LoadExternalPointerField(
4091 target_jmpbuf,
4092 FieldMemOperand(target_continuation,
4093 WasmContinuationObject::kStackOffset),
4095 __ AddWord(target_jmpbuf, target_jmpbuf, wasm::StackMemory::jmpbuf_offset());
4096
4097 // Move resolved value to return register.
4099 MemOperand GCScanSlotPlace =
4100 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset);
4101 __ StoreWord(zero_reg, GCScanSlotPlace);
4102 if (on_resume == wasm::OnResume::kThrow) {
4103 // Switch to the continuation's stack without restoring the PC.
4104 LoadJumpBuffer(masm, target_jmpbuf, false, scratch,
4106 // Pop this frame now. The unwinder expects that the first STACK_SWITCH
4107 // frame is the outermost one.
4108 __ LeaveFrame(StackFrame::STACK_SWITCH);
4109 // Forward the onRejected value to kThrow.
4110 __ Push(kReturnRegister0);
4111 __ CallRuntime(Runtime::kThrow);
4112 } else {
4113 // Resume the continuation normally.
4114 LoadJumpBuffer(masm, target_jmpbuf, true, scratch,
4116 }
4117 __ Trap();
4118 __ bind(&suspend);
4119 __ LeaveFrame(StackFrame::STACK_SWITCH);
4120 // Pop receiver + parameter.
4121 // __ DropArguments(2);
4122 __ AddWord(sp, sp, Operand(2 * kSystemPointerSize));
4123 __ Ret();
4124}
4125} // namespace
4126
4127void Builtins::Generate_WasmResume(MacroAssembler* masm) {
4128 Generate_WasmResumeHelper(masm, wasm::OnResume::kContinue);
4129}
4130
4131void Builtins::Generate_WasmReject(MacroAssembler* masm) {
4132 Generate_WasmResumeHelper(masm, wasm::OnResume::kThrow);
4133}
4134
4135void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
4136 // Only needed on x64.
4137 __ Trap();
4138}
4139
4140namespace {
4141
4142void SaveState(MacroAssembler* masm, Register active_continuation, Register tmp,
4143 Label* suspend) {
4144 ASM_CODE_COMMENT(masm);
4145 Register jmpbuf = tmp;
4146 __ LoadExternalPointerField(
4147 jmpbuf,
4148 FieldMemOperand(active_continuation,
4149 WasmContinuationObject::kStackOffset),
4151 __ AddWord(jmpbuf, jmpbuf, wasm::StackMemory::jmpbuf_offset());
4152 UseScratchRegisterScope temps(masm);
4153 Register scratch = temps.Acquire();
4154 FillJumpBuffer(masm, jmpbuf, suspend, scratch);
4155}
4156
4157void SwitchToAllocatedStack(MacroAssembler* masm, RegisterAllocator& regs,
4158 Register wasm_instance, Register wrapper_buffer,
4159 Register& original_fp, Register& new_wrapper_buffer,
4160 Label* suspend) {
4161 ASM_CODE_COMMENT(masm);
4162 UseScratchRegisterScope temps(masm);
4163 ResetStackSwitchFrameStackSlots(masm);
4164 DEFINE_SCOPED(scratch)
4165 DEFINE_REG(target_continuation)
4166 __ LoadRoot(target_continuation, RootIndex::kActiveContinuation);
4167 DEFINE_PINNED(parent_continuation, a2)
4168 __ LoadTaggedField(parent_continuation,
4169 FieldMemOperand(target_continuation,
4170 WasmContinuationObject::kParentOffset));
4171 SaveState(masm, parent_continuation, scratch, suspend);
4172 SwitchStacks(masm, parent_continuation, false, scratch,
4173 {wasm_instance, wrapper_buffer});
4174 FREE_REG(parent_continuation);
4175 // Save the old stack's fp in t4, and use it to access the parameters in
4176 // the parent frame.
4177 regs.Pinned(t4, &original_fp);
4178 __ mv(original_fp, fp);
4179 __ LoadRoot(target_continuation, RootIndex::kActiveContinuation);
4180 LoadTargetJumpBuffer(masm, target_continuation, scratch,
4182 FREE_REG(target_continuation);
4183
4184 // Push the loaded fp. We know it is null, because there is no frame yet,
4185 // so we could also push 0 directly. In any case we need to push it,
4186 // because this marks the base of the stack segment for
4187 // the stack frame iterator.
4188 __ EnterFrame(StackFrame::STACK_SWITCH);
4189
4190 int stack_space =
4191 RoundUp(StackSwitchFrameConstants::kNumSpillSlots * kSystemPointerSize +
4192 JSToWasmWrapperFrameConstants::kWrapperBufferSize,
4193 16);
4194 __ SubWord(sp, sp, Operand(stack_space));
4195
4196 ASSIGN_REG(new_wrapper_buffer)
4197
4198 __ mv(new_wrapper_buffer, sp);
4199 // Copy data needed for return handling from old wrapper buffer to new one.
4200 // kWrapperBufferRefReturnCount will be copied too, because 8 bytes are copied
4201 // at the same time.
4202 static_assert(JSToWasmWrapperFrameConstants::kWrapperBufferRefReturnCount ==
4203 JSToWasmWrapperFrameConstants::kWrapperBufferReturnCount + 4);
4204 __ LoadWord(
4205 scratch,
4206 MemOperand(wrapper_buffer,
4207 JSToWasmWrapperFrameConstants::kWrapperBufferReturnCount));
4208 __ StoreWord(
4209 scratch,
4210 MemOperand(new_wrapper_buffer,
4211 JSToWasmWrapperFrameConstants::kWrapperBufferReturnCount));
4212 __ LoadWord(
4213 scratch,
4214 MemOperand(
4215 wrapper_buffer,
4216 JSToWasmWrapperFrameConstants::kWrapperBufferSigRepresentationArray));
4217 __ StoreWord(
4218 scratch,
4219 MemOperand(
4220 new_wrapper_buffer,
4221 JSToWasmWrapperFrameConstants::kWrapperBufferSigRepresentationArray));
4222}
4223
4224// Loads the context field of the WasmTrustedInstanceData or WasmImportData
4225// depending on the data's type, and places the result in the input register.
4226void GetContextFromImplicitArg(MacroAssembler* masm, Register data,
4227 Register scratch) {
4228 __ LoadTaggedField(scratch, FieldMemOperand(data, HeapObject::kMapOffset));
4229 Label instance;
4230 Label end;
4231 __ GetInstanceTypeRange(scratch, scratch, WASM_TRUSTED_INSTANCE_DATA_TYPE,
4232 scratch);
4233 // __ CompareInstanceType(scratch, scratch, WASM_TRUSTED_INSTANCE_DATA_TYPE);
4234 __ Branch(&instance, eq, scratch, Operand(zero_reg));
4235 __ LoadTaggedField(
4236 data, FieldMemOperand(data, WasmImportData::kNativeContextOffset));
4237 __ Branch(&end);
4238 __ bind(&instance);
4239 __ LoadTaggedField(
4240 data,
4241 FieldMemOperand(data, WasmTrustedInstanceData::kNativeContextOffset));
4242 __ bind(&end);
4243}
4244
4245void SwitchBackAndReturnPromise(MacroAssembler* masm, RegisterAllocator& regs,
4246 wasm::Promise mode, Label* return_promise) {
4247 ASM_CODE_COMMENT(masm);
4248 regs.ResetExcept();
4249 UseScratchRegisterScope temps(masm);
4250 // The return value of the wasm function becomes the parameter of the
4251 // FulfillPromise builtin, and the promise is the return value of this
4252 // wrapper.
4253 static const Builtin_FulfillPromise_InterfaceDescriptor desc;
4254 DEFINE_PINNED(promise, desc.GetRegisterParameter(0));
4255 DEFINE_PINNED(return_value, desc.GetRegisterParameter(1));
4256 DEFINE_SCOPED(tmp);
4257 DEFINE_SCOPED(tmp2);
4258 DEFINE_SCOPED(tmp3);
4259 if (mode == wasm::kPromise) {
4260 __ Move(return_value, kReturnRegister0);
4261 __ LoadRoot(promise, RootIndex::kActiveSuspender);
4262 __ LoadTaggedField(
4263 promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset));
4264 }
4265 __ LoadWord(kContextRegister,
4266 MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
4267 GetContextFromImplicitArg(masm, kContextRegister, tmp);
4268
4269 ReloadParentContinuation(masm, promise, return_value, kContextRegister, tmp,
4270 tmp2, tmp3);
4271 RestoreParentSuspender(masm, tmp, tmp2);
4272
4273 if (mode == wasm::kPromise) {
4274 __ li(tmp, 1);
4275 __ StoreWord(
4276 tmp, MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset));
4277 __ Push(promise);
4278 __ CallBuiltin(Builtin::kFulfillPromise);
4279 __ Pop(promise);
4280 }
4281 FREE_REG(promise);
4282 FREE_REG(return_value);
4283 __ bind(return_promise);
4284}
4285
4286void GenerateExceptionHandlingLandingPad(MacroAssembler* masm,
4287 RegisterAllocator& regs,
4288 Label* return_promise) {
4289 ASM_CODE_COMMENT(masm);
4290 static const Builtin_RejectPromise_InterfaceDescriptor desc;
4291 DEFINE_PINNED(promise, desc.GetRegisterParameter(0));
4292 DEFINE_PINNED(reason, desc.GetRegisterParameter(1));
4293 DEFINE_PINNED(debug_event, desc.GetRegisterParameter(2));
4294 int catch_handler = __ pc_offset();
4295 {
4296 DEFINE_SCOPED(thread_in_wasm_flag_addr);
4297 // Unset thread_in_wasm_flag.
4298 __ LoadWord(thread_in_wasm_flag_addr,
4301 __ StoreWord(zero_reg, MemOperand(thread_in_wasm_flag_addr, 0));
4302 }
4303 // The exception becomes the parameter of the RejectPromise builtin, and the
4304 // promise is the return value of this wrapper.
4305 __ mv(reason, kReturnRegister0);
4306 __ LoadRoot(promise, RootIndex::kActiveSuspender);
4307 __ LoadTaggedField(
4308 promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset));
4309
4310 __ LoadWord(kContextRegister,
4311 MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
4312 DEFINE_SCOPED(tmp);
4313 DEFINE_SCOPED(tmp2);
4314 DEFINE_SCOPED(tmp3);
4315 GetContextFromImplicitArg(masm, kContextRegister, tmp);
4316 ReloadParentContinuation(masm, promise, reason, kContextRegister, tmp, tmp2,
4317 tmp3);
4318 RestoreParentSuspender(masm, tmp, tmp2);
4319
4320 __ li(tmp, 1);
4321 __ StoreWord(
4322 tmp, MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset));
4323 __ Push(promise);
4324 __ LoadRoot(debug_event, RootIndex::kTrueValue);
4325 __ CallBuiltin(Builtin::kRejectPromise);
4326 __ Pop(promise);
4327
4328 // Run the rest of the wrapper normally (deconstruct the frame, ...).
4329 __ jmp(return_promise);
4330
4331 masm->isolate()->builtins()->SetJSPIPromptHandlerOffset(catch_handler);
4332}
4333
4334void JSToWasmWrapperHelper(MacroAssembler* masm, wasm::Promise mode) {
4335 bool stack_switch = mode == wasm::kPromise || mode == wasm::kStressSwitch;
4336 auto regs = RegisterAllocator::WithAllocatableGeneralRegisters();
4337
4338 __ EnterFrame(stack_switch ? StackFrame::STACK_SWITCH
4339 : StackFrame::JS_TO_WASM);
4340
4341 __ SubWord(
4342 sp, sp,
4343 Operand(StackSwitchFrameConstants::kNumSpillSlots * kSystemPointerSize));
4344
4345 // Load the implicit argument (instance data or import data) from the frame.
4347 __ LoadWord(
4348 implicit_arg,
4349 MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset));
4350
4351 DEFINE_PINNED(wrapper_buffer,
4353
4354 Label suspend;
4355 Register original_fp = no_reg;
4356 Register new_wrapper_buffer = no_reg;
4357 if (stack_switch) {
4358 SwitchToAllocatedStack(masm, regs, implicit_arg, wrapper_buffer,
4359 original_fp, new_wrapper_buffer, &suspend);
4360 } else {
4361 original_fp = fp;
4362 new_wrapper_buffer = wrapper_buffer;
4363 }
4364
4365 regs.ResetExcept(original_fp, wrapper_buffer, implicit_arg,
4366 new_wrapper_buffer);
4367
4368 {
4369 __ StoreWord(
4370 new_wrapper_buffer,
4371 MemOperand(fp, JSToWasmWrapperFrameConstants::kWrapperBufferOffset));
4372 if (stack_switch) {
4373 __ StoreWord(
4374 implicit_arg,
4375 MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
4376 UseScratchRegisterScope temps(masm);
4377 Register scratch = temps.Acquire();
4378 __ LoadWord(
4379 scratch,
4380 MemOperand(original_fp,
4381 JSToWasmWrapperFrameConstants::kResultArrayParamOffset));
4382 __ StoreWord(
4383 scratch,
4384 MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset));
4385 }
4386 }
4387 {
4388 DEFINE_SCOPED(result_size);
4389 __ LoadWord(
4390 result_size,
4391 MemOperand(wrapper_buffer, JSToWasmWrapperFrameConstants::
4392 kWrapperBufferStackReturnBufferSize));
4393 __ SllWord(result_size, result_size, kSystemPointerSizeLog2);
4394 __ SubWord(sp, sp, Operand(result_size));
4395 }
4396
4397 __ StoreWord(
4398 sp,
4399 MemOperand(
4400 new_wrapper_buffer,
4401 JSToWasmWrapperFrameConstants::kWrapperBufferStackReturnBufferStart));
4402 if (stack_switch) {
4403 FREE_REG(new_wrapper_buffer)
4404 }
4405 FREE_REG(implicit_arg)
4406 for (auto reg : wasm::kGpParamRegisters) {
4407 regs.Reserve(reg);
4408 }
4409
4410 // The first GP parameter holds the trusted instance data or the import data.
4411 // This is handled specially.
4412 int stack_params_offset =
4415
4416 {
4417 DEFINE_SCOPED(params_start);
4418 __ LoadWord(
4419 params_start,
4420 MemOperand(wrapper_buffer,
4421 JSToWasmWrapperFrameConstants::kWrapperBufferParamStart));
4422 {
4423 // Push stack parameters on the stack.
4424 DEFINE_SCOPED(params_end);
4425 __ LoadWord(
4426 params_end,
4427 MemOperand(wrapper_buffer,
4428 JSToWasmWrapperFrameConstants::kWrapperBufferParamEnd));
4429 DEFINE_SCOPED(last_stack_param);
4430
4431 __ AddWord(last_stack_param, params_start, Operand(stack_params_offset));
4432 Label loop_start;
4433 __ bind(&loop_start);
4434
4435 Label finish_stack_params;
4436 __ Branch(&finish_stack_params, ge, last_stack_param,
4437 Operand(params_end));
4438
4439 // Push parameter
4440 {
4441 DEFINE_SCOPED(scratch);
4442 __ SubWord(params_end, params_end, Operand(kSystemPointerSize));
4443 __ LoadWord(scratch, MemOperand(params_end, 0));
4444 __ Push(scratch);
4445 }
4446 __ Branch(&loop_start);
4447
4448 __ bind(&finish_stack_params);
4449 }
4450 int32_t next_offset = 0;
4451 for (size_t i = 1; i < arraysize(wasm::kGpParamRegisters); ++i) {
4452 // Check that {params_start} does not overlap with any of the parameter
4453 // registers, so that we don't overwrite it by accident with the loads
4454 // below.
4455 DCHECK_NE(params_start, wasm::kGpParamRegisters[i]);
4456 __ LoadWord(wasm::kGpParamRegisters[i],
4457 MemOperand(params_start, next_offset));
4458 next_offset += kSystemPointerSize;
4459 }
4460
4461 for (size_t i = 0; i < arraysize(wasm::kFpParamRegisters); ++i) {
4462 __ LoadDouble(wasm::kFpParamRegisters[i],
4463 MemOperand(params_start, next_offset));
4464 next_offset += kDoubleSize;
4465 }
4466 DCHECK_EQ(next_offset, stack_params_offset);
4467 }
4468
4469 {
4470 DEFINE_SCOPED(thread_in_wasm_flag_addr);
4471 __ LoadWord(thread_in_wasm_flag_addr,
4474 DEFINE_SCOPED(scratch);
4475 __ li(scratch, 1);
4476 __ Sw(scratch, MemOperand(thread_in_wasm_flag_addr, 0));
4477 }
4478 __ StoreWord(
4479 zero_reg,
4480 MemOperand(fp, StackSwitchFrameConstants::kGCScanSlotCountOffset));
4481 {
4482 DEFINE_SCOPED(call_target);
4483 __ LoadWasmCodePointer(
4484 call_target,
4485 MemOperand(wrapper_buffer,
4486 JSToWasmWrapperFrameConstants::kWrapperBufferCallTarget));
4487 // We do the call without a signature check here, since the wrapper loaded
4488 // the signature from the same trusted object as the call target to set up
4489 // the stack layout. We could add a signature hash and pass it through to
4490 // verify it here, but an attacker that could corrupt the signature could
4491 // also corrupt that signature hash (which is outside of the sandbox).
4492 __ CallWasmCodePointerNoSignatureCheck(call_target);
4493 }
4494
4495 regs.ResetExcept();
4496 // The wrapper_buffer has to be in a2 as the correct parameter register.
4497 regs.Reserve(kReturnRegister0, kReturnRegister1);
4498 ASSIGN_PINNED(wrapper_buffer, a2);
4499 {
4500 DEFINE_SCOPED(thread_in_wasm_flag_addr);
4501 __ LoadWord(thread_in_wasm_flag_addr,
4504 __ Sw(zero_reg, MemOperand(thread_in_wasm_flag_addr, 0));
4505 }
4506
4507 __ LoadWord(
4508 wrapper_buffer,
4509 MemOperand(fp, JSToWasmWrapperFrameConstants::kWrapperBufferOffset));
4510
4511 __ StoreDouble(
4513 MemOperand(
4514 wrapper_buffer,
4515 JSToWasmWrapperFrameConstants::kWrapperBufferFPReturnRegister1));
4516 __ StoreDouble(
4518 MemOperand(
4519 wrapper_buffer,
4520 JSToWasmWrapperFrameConstants::kWrapperBufferFPReturnRegister2));
4521 __ StoreWord(
4523 MemOperand(
4524 wrapper_buffer,
4525 JSToWasmWrapperFrameConstants::kWrapperBufferGPReturnRegister1));
4526 __ StoreWord(
4528 MemOperand(
4529 wrapper_buffer,
4530 JSToWasmWrapperFrameConstants::kWrapperBufferGPReturnRegister2));
4531 // Call the return value builtin with
4532 // a0: wasm instance.
4533 // a1: the result JSArray for multi-return.
4534 // a2: pointer to the byte buffer which contains all parameters.
4535 if (stack_switch) {
4536 __ LoadWord(a1,
4537 MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset));
4538 __ LoadWord(a0,
4539 MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset));
4540 } else {
4541 __ LoadWord(
4542 a1,
4543 MemOperand(fp, JSToWasmWrapperFrameConstants::kResultArrayParamOffset));
4544 __ LoadWord(
4545 a0, MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset));
4546 }
4547 {
4548 UseScratchRegisterScope temps(masm);
4549 GetContextFromImplicitArg(masm, a0, temps.Acquire());
4550 }
4551 __ CallBuiltin(Builtin::kJSToWasmHandleReturns);
4552
4553 Label return_promise;
4554 if (stack_switch) {
4555 SwitchBackAndReturnPromise(masm, regs, mode, &return_promise);
4556 }
4557 __ bind(&suspend);
4558
4559 __ LeaveFrame(stack_switch ? StackFrame::STACK_SWITCH
4560 : StackFrame::JS_TO_WASM);
4561 // Despite returning to the different location for regular and stack switching
4562 // versions, incoming argument count matches both cases:
4563 // instance and result array without suspend or
4564 // or promise resolve/reject params for callback.
4565 constexpr int64_t stack_arguments_in = 2;
4566 // __ DropArguments(stack_arguments_in);
4567 __ AddWord(sp, sp, Operand(stack_arguments_in * kSystemPointerSize));
4568 __ Ret();
4569
4570 // Catch handler for the stack-switching wrapper: reject the promise with the
4571 // thrown exception.
4572 if (mode == wasm::kPromise) {
4573 GenerateExceptionHandlingLandingPad(masm, regs, &return_promise);
4574 }
4575}
4576} // namespace
4577
4578void Builtins::Generate_JSToWasmWrapperAsm(MacroAssembler* masm) {
4579 UseScratchRegisterScope temps(masm);
4581 kWasmImplicitArgRegister, t1, t2));
4582 JSToWasmWrapperHelper(masm, wasm::kNoPromise);
4583}
4584void Builtins::Generate_WasmReturnPromiseOnSuspendAsm(MacroAssembler* masm) {
4585 UseScratchRegisterScope temps(masm);
4587 kWasmImplicitArgRegister, t1, t2));
4588 JSToWasmWrapperHelper(masm, wasm::kPromise);
4589}
4590void Builtins::Generate_JSToWasmStressSwitchStacksAsm(MacroAssembler* masm) {
4591 UseScratchRegisterScope temps(masm);
4593 kWasmImplicitArgRegister, t1, t2));
4594 JSToWasmWrapperHelper(masm, wasm::kStressSwitch);
4595}
4596
4598 CallApiCallbackMode mode) {
4599 // ----------- S t a t e -------------
4600 // CallApiCallbackMode::kOptimizedNoProfiling/kOptimized modes:
4601 // -- a1 : api function address
4602 // Both modes:
4603 // -- a2 : arguments count
4604 // -- a3 : FunctionTemplateInfo
4605 // -- cp : context
4606 // -- sp[0] : receiver
4607 // -- sp[8] : first argument
4608 // -- ...
4609 // -- sp[(argc) * 8] : last argument
4610 // -----------------------------------
4611 Register function_callback_info_arg = kCArgRegs[0];
4612
4613 Register api_function_address = no_reg;
4614 Register argc = no_reg;
4615 Register func_templ = no_reg;
4616 Register topmost_script_having_context = no_reg;
4617 Register scratch = t0;
4618
4619 switch (mode) {
4621 topmost_script_having_context = CallApiCallbackGenericDescriptor::
4623 argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister();
4624 func_templ =
4626 break;
4627
4630 // Caller context is always equal to current context because we don't
4631 // inline Api calls cross-context.
4632 topmost_script_having_context = kContextRegister;
4633 api_function_address =
4634 CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister();
4636 func_templ =
4638 break;
4639 }
4640 DCHECK(!AreAliased(api_function_address, topmost_script_having_context, argc,
4641 func_templ, scratch));
4642
4643 using FCA = FunctionCallbackArguments;
4644 using ER = ExternalReference;
4646
4647 static_assert(FCA::kArgsLength == 6);
4648 static_assert(FCA::kNewTargetIndex == 5);
4649 static_assert(FCA::kTargetIndex == 4);
4650 static_assert(FCA::kReturnValueIndex == 3);
4651 static_assert(FCA::kContextIndex == 2);
4652 static_assert(FCA::kIsolateIndex == 1);
4653 static_assert(FCA::kUnusedIndex == 0);
4654
4655 // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
4656 // Target state:
4657 // sp[0 * kSystemPointerSize]: kUnused <= FCA::implicit_args_
4658 // sp[1 * kSystemPointerSize]: kIsolate
4659 // sp[2 * kSystemPointerSize]: kContext
4660 // sp[3 * kSystemPointerSize]: undefined (kReturnValue)
4661 // sp[4 * kSystemPointerSize]: kTarget
4662 // sp[5 * kSystemPointerSize]: undefined (kNewTarget)
4663 // Existing state:
4664 // sp[6 * kSystemPointerSize]: <= FCA:::values_
4665
4666 __ StoreRootRelative(IsolateData::topmost_script_having_context_offset(),
4667 topmost_script_having_context);
4668 if (mode == CallApiCallbackMode::kGeneric) {
4669 api_function_address = ReassignRegister(topmost_script_having_context);
4670 }
4671 // Reserve space on the stack.
4672 static constexpr int kStackSize = FCA::kArgsLength;
4673 static_assert(kStackSize % 2 == 0);
4674 __ SubWord(sp, sp, Operand(kStackSize * kSystemPointerSize));
4675
4676 // kIsolate.
4677 __ li(scratch, ER::isolate_address());
4678 __ StoreWord(scratch,
4679 MemOperand(sp, FCA::kIsolateIndex * kSystemPointerSize));
4680
4681 // kContext
4682 __ StoreWord(cp, MemOperand(sp, FCA::kContextIndex * kSystemPointerSize));
4683
4684 // kReturnValue
4685 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
4686 __ StoreWord(scratch,
4687 MemOperand(sp, FCA::kReturnValueIndex * kSystemPointerSize));
4688
4689 // kTarget.
4690 __ StoreWord(func_templ,
4691 MemOperand(sp, FCA::kTargetIndex * kSystemPointerSize));
4692
4693 // kNewTarget.
4694 __ StoreWord(scratch,
4695 MemOperand(sp, FCA::kNewTargetIndex * kSystemPointerSize));
4696
4697 // kUnused.
4698 __ StoreWord(scratch, MemOperand(sp, FCA::kUnusedIndex * kSystemPointerSize));
4699
4700 FrameScope frame_scope(masm, StackFrame::MANUAL);
4701 if (mode == CallApiCallbackMode::kGeneric) {
4702 __ LoadExternalPointerField(
4703 api_function_address,
4704 FieldMemOperand(func_templ,
4705 FunctionTemplateInfo::kMaybeRedirectedCallbackOffset),
4707 }
4708
4709 __ EnterExitFrame(scratch, FC::getExtraSlotsCountFrom<ExitFrameConstants>(),
4710 StackFrame::API_CALLBACK_EXIT);
4711 MemOperand argc_operand = MemOperand(fp, FC::kFCIArgcOffset);
4712 {
4713 ASM_CODE_COMMENT_STRING(masm, "Initialize v8::FunctionCallbackInfo");
4714 // FunctionCallbackInfo::length_.
4715 // TODO(ishell): pass JSParameterCount(argc) to simplify things on the
4716 // caller end.
4717 __ StoreWord(argc, argc_operand);
4718 // FunctionCallbackInfo::implicit_args_.
4719 __ AddWord(scratch, fp, Operand(FC::kImplicitArgsArrayOffset));
4720 __ StoreWord(scratch, MemOperand(fp, FC::kFCIImplicitArgsOffset));
4721 // FunctionCallbackInfo::values_ (points at JS arguments on the stack).
4722 __ AddWord(scratch, fp, Operand(FC::kFirstArgumentOffset));
4723 __ StoreWord(scratch, MemOperand(fp, FC::kFCIValuesOffset));
4724 }
4725 __ RecordComment("v8::FunctionCallback's argument");
4726 __ AddWord(function_callback_info_arg, fp,
4727 Operand(FC::kFunctionCallbackInfoOffset));
4728 DCHECK(!AreAliased(api_function_address, function_callback_info_arg));
4729 ExternalReference thunk_ref = ER::invoke_function_callback(mode);
4730 Register no_thunk_arg = no_reg;
4731 MemOperand return_value_operand = MemOperand(fp, FC::kReturnValueOffset);
4732 static constexpr int kSlotsToDropOnReturn =
4733 FC::kFunctionCallbackInfoArgsLength + kJSArgcReceiverSlots;
4734 const bool with_profiling =
4736 CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
4737 thunk_ref, no_thunk_arg, kSlotsToDropOnReturn,
4738 &argc_operand, return_value_operand);
4739}
4740
4741void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
4742 // ----------- S t a t e -------------
4743 // -- cp : context
4744 // -- a1 : receiver
4745 // -- a3 : accessor info
4746 // -- a0 : holder
4747 // -----------------------------------
4748
4749 Register name_arg = kCArgRegs[0];
4750 Register property_callback_info_arg = kCArgRegs[1];
4751
4752 Register api_function_address = kCArgRegs[2];
4753
4754 Register receiver = ApiGetterDescriptor::ReceiverRegister();
4757 Register scratch = a4;
4758 DCHECK(!AreAliased(receiver, holder, callback, scratch));
4759
4760 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
4761 // name below the exit frame to make GC aware of them.
4762 using PCA = PropertyCallbackArguments;
4763 using ER = ExternalReference;
4765 static_assert(PCA::kPropertyKeyIndex == 0);
4766 static_assert(PCA::kShouldThrowOnErrorIndex == 1);
4767 static_assert(PCA::kHolderIndex == 2);
4768 static_assert(PCA::kIsolateIndex == 3);
4769 static_assert(PCA::kHolderV2Index == 4);
4770 static_assert(PCA::kReturnValueIndex == 5);
4771 static_assert(PCA::kDataIndex == 6);
4772 static_assert(PCA::kThisIndex == 7);
4773 static_assert(PCA::kArgsLength == 8);
4774 // Set up v8::PropertyCallbackInfo's (PCI) args_ on the stack as follows:
4775 // Target state:
4776 // sp[0 * kSystemPointerSize]: name <= PCI::args_
4777 // sp[1 * kSystemPointerSize]: kShouldThrowOnErrorIndex
4778 // sp[2 * kSystemPointerSize]: kHolderIndex
4779 // sp[3 * kSystemPointerSize]: kIsolateIndex
4780 // sp[4 * kSystemPointerSize]: kHolderV2Index
4781 // sp[5 * kSystemPointerSize]: kReturnValueIndex
4782 // sp[6 * kSystemPointerSize]: kDataIndex
4783 // sp[7 * kSystemPointerSize]: kThisIndex / receiver
4784 __ SubWord(sp, sp, (PCA::kArgsLength)*kSystemPointerSize);
4785 __ StoreWord(receiver, MemOperand(sp, (PCA::kThisIndex)*kSystemPointerSize));
4786 __ LoadTaggedField(scratch,
4787 FieldMemOperand(callback, AccessorInfo::kDataOffset));
4788 __ StoreWord(scratch, MemOperand(sp, (PCA::kDataIndex)*kSystemPointerSize));
4789 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
4790 __ StoreWord(scratch,
4791 MemOperand(sp, (PCA::kReturnValueIndex)*kSystemPointerSize));
4792 __ StoreWord(zero_reg,
4793 MemOperand(sp, (PCA::kHolderV2Index)*kSystemPointerSize));
4794 __ li(scratch, ER::isolate_address());
4795 __ StoreWord(scratch,
4796 MemOperand(sp, (PCA::kIsolateIndex)*kSystemPointerSize));
4797 __ StoreWord(holder, MemOperand(sp, (PCA::kHolderIndex)*kSystemPointerSize));
4798 // should_throw_on_error -> false
4799 DCHECK_EQ(0, Smi::zero().ptr());
4800 __ StoreWord(
4801 zero_reg,
4802 MemOperand(sp, (PCA::kShouldThrowOnErrorIndex)*kSystemPointerSize));
4803 __ LoadTaggedField(scratch,
4804 FieldMemOperand(callback, AccessorInfo::kNameOffset));
4805 __ StoreWord(scratch, MemOperand(sp, 0 * kSystemPointerSize));
4806
4807 __ RecordComment("Load api_function_address");
4808 __ LoadExternalPointerField(
4809 api_function_address,
4810 FieldMemOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset),
4812
4813 FrameScope frame_scope(masm, StackFrame::MANUAL);
4814 __ EnterExitFrame(scratch, FC::getExtraSlotsCountFrom<ExitFrameConstants>(),
4815 StackFrame::API_ACCESSOR_EXIT);
4816 __ RecordComment("Create v8::PropertyCallbackInfo object on the stack.");
4817 // property_callback_info_arg = v8::PropertyCallbackInfo&
4818 __ AddWord(property_callback_info_arg, fp, Operand(FC::kArgsArrayOffset));
4819 DCHECK(!AreAliased(api_function_address, property_callback_info_arg, name_arg,
4820 callback, scratch));
4821#ifdef V8_ENABLE_DIRECT_HANDLE
4822 // name_arg = Local<Name>(name), name value was pushed to GC-ed stack space.
4823 // |name_arg| is already initialized above.
4824#else
4825 // name_arg = Local<Name>(&name), which is &args_array[kPropertyKeyIndex].
4826 static_assert(PCA::kPropertyKeyIndex == 0);
4827 __ mv(name_arg, property_callback_info_arg);
4828#endif
4829
4830 ExternalReference thunk_ref = ER::invoke_accessor_getter_callback();
4831 // Pass AccessorInfo to thunk wrapper in case profiler or side-effect
4832 // checking is enabled.
4833 Register thunk_arg = callback;
4834
4835 MemOperand return_value_operand = MemOperand(fp, FC::kReturnValueOffset);
4836 static constexpr int kSlotsToDropOnReturn =
4837 FC::kPropertyCallbackInfoArgsLength;
4838 MemOperand* const kUseStackSpaceConstant = nullptr;
4839
4840 const bool with_profiling = true;
4841 CallApiFunctionAndReturn(masm, with_profiling, api_function_address,
4842 thunk_ref, thunk_arg, kSlotsToDropOnReturn,
4843 kUseStackSpaceConstant, return_value_operand);
4844}
4845
4846void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
4847 // The sole purpose of DirectCEntry is for movable callers (e.g. any general
4848 // purpose InstructionStream object) to be able to call into C functions that
4849 // may trigger GC and thus move the caller.
4850 //
4851 // DirectCEntry places the return address on the stack (updated by the GC),
4852 // making the call GC safe. The irregexp backend relies on this.
4853
4854 // Make place for arguments to fit C calling convention. Callers use
4855 // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't
4856 // have to do that here. Any caller must drop kCArgsSlotsSize stack space
4857 // after the call.
4858 __ AddWord(sp, sp, -kCArgsSlotsSize);
4859
4860 __ StoreWord(ra,
4861 MemOperand(sp, kCArgsSlotsSize)); // Store the return address.
4862 __ Call(t6); // Call the C++ function.
4863 __ LoadWord(t6, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code.
4864
4865 if (v8_flags.debug_code && v8_flags.enable_slow_asserts) {
4866 // In case of an error the return address may point to a memory area
4867 // filled with kZapValue by the GC. Dereference the address and check for
4868 // this.
4869 __ LoadWord(a4, MemOperand(t6));
4870 __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
4871 Operand(kZapValue));
4872 }
4873
4874 __ Jump(t6);
4875}
4876
4877namespace {
4878
4879// This code tries to be close to ia32 code so that any changes can be
4880// easily ported.
4881void Generate_DeoptimizationEntry(MacroAssembler* masm,
4882 DeoptimizeKind deopt_kind) {
4883 Isolate* isolate = masm->isolate();
4884
4885 // Unlike on ARM we don't save all the registers, just the useful ones.
4886 // For the rest, there are gaps on the stack, so the offsets remain the same.
4888
4889 RegList restored_regs = kJSCallerSaved | kCalleeSaved;
4890 RegList saved_regs = restored_regs | sp | ra;
4891
4892 const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
4893
4894 // Save all double FPU registers before messing with them.
4895 __ SubWord(sp, sp, Operand(kDoubleRegsSize));
4896 const RegisterConfiguration* config = RegisterConfiguration::Default();
4897 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4898 int code = config->GetAllocatableDoubleCode(i);
4899 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
4900 int offset = code * kDoubleSize;
4901 __ StoreDouble(fpu_reg, MemOperand(sp, offset));
4902 }
4903
4904 // Push saved_regs (needed to populate FrameDescription::registers_).
4905 // Leave gaps for other registers.
4906 __ SubWord(sp, sp, kNumberOfRegisters * kSystemPointerSize);
4907 for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
4908 if ((saved_regs.bits() & (1 << i)) != 0) {
4909 __ StoreWord(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
4910 }
4911 }
4912
4913 __ li(a2,
4914 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
4915 __ StoreWord(fp, MemOperand(a2));
4916
4917 const int kSavedRegistersAreaSize =
4918 (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
4919
4920 // Get the address of the location in the code object (a2) (return
4921 // address for lazy deoptimization) and compute the fp-to-sp delta in
4922 // register a4.
4923 __ Move(a2, ra);
4924 __ AddWord(a3, sp, Operand(kSavedRegistersAreaSize));
4925
4926 __ SubWord(a3, fp, a3);
4927
4928 // Allocate a new deoptimizer object.
4929 __ PrepareCallCFunction(5, a4);
4930 // Pass five arguments, according to n64 ABI.
4931 __ Move(a0, zero_reg);
4932 Label context_check;
4933 __ LoadWord(a1,
4935 __ JumpIfSmi(a1, &context_check);
4937 __ bind(&context_check);
4938 __ li(a1, Operand(static_cast<int64_t>(deopt_kind)));
4939 // a2: code object address
4940 // a3: fp-to-sp delta
4942
4943 // Call Deoptimizer::New().
4944 {
4945 AllowExternalCallThatCantCauseGC scope(masm);
4946 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
4947 }
4948
4949 // Preserve "deoptimizer" object in register a0 and get the input
4950 // frame descriptor pointer to a1 (deoptimizer->input_);
4951 __ LoadWord(a1, MemOperand(a0, Deoptimizer::input_offset()));
4952
4953 // Copy core registers into FrameDescription::registers_[kNumRegisters].
4955 for (int i = 0; i < kNumberOfRegisters; i++) {
4956 int offset =
4958 if ((saved_regs.bits() & (1 << i)) != 0) {
4959 __ LoadWord(a2, MemOperand(sp, i * kSystemPointerSize));
4960 __ StoreWord(a2, MemOperand(a1, offset));
4961 } else if (v8_flags.debug_code) {
4962 __ li(a2, kDebugZapValue);
4963 __ StoreWord(a2, MemOperand(a1, offset));
4964 }
4965 }
4966
4967 int double_regs_offset = FrameDescription::double_registers_offset();
4968 // int simd128_regs_offset = FrameDescription::simd128_registers_offset();
4969 // Copy FPU registers to
4970 // double_registers_[DoubleRegister::kNumAllocatableRegisters]
4971 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4972 int code = config->GetAllocatableDoubleCode(i);
4973 int dst_offset = code * kDoubleSize + double_regs_offset;
4974 int src_offset =
4976 __ LoadDouble(ft0, MemOperand(sp, src_offset));
4977 __ StoreDouble(ft0, MemOperand(a1, dst_offset));
4978 }
4979 // TODO(riscv): Add Simd128 copy
4980
4981 // Remove the saved registers from the stack.
4982 __ AddWord(sp, sp, Operand(kSavedRegistersAreaSize));
4983
4984 // Compute a pointer to the unwinding limit in register a2; that is
4985 // the first stack slot not part of the input frame.
4987 __ AddWord(a2, a2, sp);
4988
4989 // Unwind the stack down to - but not including - the unwinding
4990 // limit and copy the contents of the activation frame to the input
4991 // frame description.
4992 __ AddWord(a3, a1, Operand(FrameDescription::frame_content_offset()));
4993 Label pop_loop;
4994 Label pop_loop_header;
4995 __ BranchShort(&pop_loop_header);
4996 __ bind(&pop_loop);
4997 __ pop(a4);
4998 __ StoreWord(a4, MemOperand(a3, 0));
4999 __ AddWord(a3, a3, kSystemPointerSize);
5000 __ bind(&pop_loop_header);
5001 __ Branch(&pop_loop, ne, a2, Operand(sp), Label::Distance::kNear);
5002 // Compute the output frame in the deoptimizer.
5003 __ push(a0); // Preserve deoptimizer object across call.
5004 // a0: deoptimizer object; a1: scratch.
5005 __ PrepareCallCFunction(1, a1);
5006 // Call Deoptimizer::ComputeOutputFrames().
5007 {
5008 AllowExternalCallThatCantCauseGC scope(masm);
5009 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
5010 }
5011 __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
5012
5014
5015 // Replace the current (input) frame with the output frames.
5016 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
5017 // Outer loop state: a4 = current "FrameDescription** output_",
5018 // a1 = one past the last FrameDescription**.
5020 __ LoadWord(a4,
5021 MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
5022 __ CalcScaledAddress(a1, a4, a1, kSystemPointerSizeLog2);
5023 __ BranchShort(&outer_loop_header);
5024 __ bind(&outer_push_loop);
5025 // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
5026 __ LoadWord(a2, MemOperand(a4, 0)); // output_[ix]
5028 __ BranchShort(&inner_loop_header);
5029 __ bind(&inner_push_loop);
5030 __ SubWord(a3, a3, Operand(kSystemPointerSize));
5031 __ AddWord(a6, a2, Operand(a3));
5033 __ push(a7);
5034 __ bind(&inner_loop_header);
5035 __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
5036
5037 __ AddWord(a4, a4, Operand(kSystemPointerSize));
5038 __ bind(&outer_loop_header);
5039 __ Branch(&outer_push_loop, lt, a4, Operand(a1));
5040
5041 __ LoadWord(a1, MemOperand(a0, Deoptimizer::input_offset()));
5042 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
5043 int code = config->GetAllocatableDoubleCode(i);
5044 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
5045 int src_offset = code * kDoubleSize + double_regs_offset;
5046 __ LoadDouble(fpu_reg, MemOperand(a1, src_offset));
5047 }
5048
5049 // Push pc and continuation from the last output frame.
5050 __ LoadWord(a6, MemOperand(a2, FrameDescription::pc_offset()));
5051 __ push(a6);
5053 __ push(a6);
5054
5055 // Technically restoring 't3' should work unless zero_reg is also restored
5056 // but it's safer to check for this.
5057 DCHECK(!(restored_regs.has(t3)));
5058 // Restore the registers from the last output frame.
5059 __ Move(t3, a2);
5060 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
5061 int offset =
5063 if ((restored_regs.bits() & (1 << i)) != 0) {
5064 __ LoadWord(ToRegister(i), MemOperand(t3, offset));
5065 }
5066 }
5067
5068 __ pop(t6); // Get continuation, leave pc on stack.
5069 __ pop(ra);
5070 Label end;
5071 __ Branch(&end, eq, t6, Operand(zero_reg));
5072 __ Jump(t6);
5073 __ bind(&end);
5074 __ Ret();
5075 __ stop();
5076}
5077
5078} // namespace
5079
5080void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
5081 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
5082}
5083
5084void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
5085 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
5086}
5087
5088// If there is baseline code on the shared function info, converts an
5089// interpreter frame into a baseline frame and continues execution in baseline
5090// code. Otherwise execution continues with bytecode.
5091void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
5092 MacroAssembler* masm) {
5093 Label start;
5094 __ bind(&start);
5095
5096 // Get function from the frame.
5097 Register closure = a1;
5099
5100 // Get the InstructionStream object from the shared function info.
5101 Register code_obj = s1;
5102 __ LoadTaggedField(
5103 code_obj,
5104 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
5105
5106 ResetSharedFunctionInfoAge(masm, code_obj);
5107
5108 __ LoadTrustedPointerField(
5109 code_obj,
5110 FieldMemOperand(code_obj, SharedFunctionInfo::kTrustedFunctionDataOffset),
5112
5113 // For OSR entry it is safe to assume we always have baseline code.
5114 if (v8_flags.debug_code) {
5115 UseScratchRegisterScope temps(masm);
5116 Register scratch = temps.Acquire();
5117 __ GetObjectType(code_obj, scratch, scratch);
5118 __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
5119 Operand(CODE_TYPE));
5120 AssertCodeIsBaseline(masm, code_obj, scratch);
5121 }
5122
5123 // Load the feedback cell and vector.
5124 Register feedback_cell = a2;
5125 Register feedback_vector = t4;
5126 __ LoadTaggedField(feedback_cell,
5127 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
5128 __ LoadTaggedField(
5129 feedback_vector,
5130 FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset));
5131 Label install_baseline_code;
5132 // Check if feedback vector is valid. If not, call prepare for baseline to
5133 // allocate it.
5134 {
5135 UseScratchRegisterScope temps(masm);
5136 Register type = temps.Acquire();
5137 __ GetObjectType(feedback_vector, type, type);
5138 __ Branch(&install_baseline_code, ne, type, Operand(FEEDBACK_VECTOR_TYPE));
5139 }
5140 // Save BytecodeOffset from the stack frame.
5143 // Replace bytecode offset with feedback cell.
5146 __ StoreWord(feedback_cell,
5148 feedback_cell = no_reg;
5149 // Update feedback vector cache.
5152 __ StoreWord(
5153 feedback_vector,
5155 feedback_vector = no_reg;
5156
5157 // Compute baseline pc for bytecode offset.
5158 Register get_baseline_pc = a3;
5159 __ li(get_baseline_pc,
5160 ExternalReference::baseline_pc_for_next_executed_bytecode());
5161
5165
5166 // Get bytecode array from the stack frame.
5170 {
5171 __ Move(kCArgRegs[0], code_obj);
5174 FrameScope scope(masm, StackFrame::INTERNAL);
5175 __ PrepareCallCFunction(3, 0, a4);
5176 __ CallCFunction(get_baseline_pc, 3, 0);
5177 }
5178 __ LoadCodeInstructionStart(code_obj, code_obj, kJSEntrypointTag);
5179 __ AddWord(code_obj, code_obj, kReturnRegister0);
5181
5182 // Reset the OSR loop nesting depth to disarm back edges.
5183 // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
5184 // Sparkplug here.
5187 Generate_OSREntry(masm, code_obj);
5188 __ Trap(); // Unreachable.
5189
5190 __ bind(&install_baseline_code);
5191 {
5192 FrameScope scope(masm, StackFrame::INTERNAL);
5194 __ Push(closure);
5195 __ CallRuntime(Runtime::kInstallBaselineCode, 1);
5197 }
5198 // Retry from the start after installing baseline code.
5199 __ Branch(&start);
5200}
5201
5202void Builtins::Generate_RestartFrameTrampoline(MacroAssembler* masm) {
5203 // Frame is being dropped:
5204 // - Look up current function on the frame.
5205 // - Leave the frame.
5206 // - Restart the frame by calling the function.
5207
5210
5211 // Pop return address and frame.
5212 __ LeaveFrame(StackFrame::INTERPRETED);
5213
5214#if defined(V8_ENABLE_LEAPTIERING) && defined(V8_TARGET_ARCH_RISCV64)
5215 __ InvokeFunction(a1, a0, InvokeType::kJump,
5217#else
5218 __ li(a2, Operand(kDontAdaptArgumentsSentinel));
5219 __ InvokeFunction(a1, a2, a0, InvokeType::kJump);
5220#endif
5221}
5222
5223#undef __
5224
5225} // namespace internal
5226} // namespace v8
#define Assert(condition)
const RegList initial_
RegList available_
#define JUMP_IF_EQUAL(NAME)
#define ASSIGN_REG(Name)
RegisterAllocator * allocator_
std::vector< Register * > allocated_registers_
#define ASSIGN_PINNED(Name, Reg)
#define DEFINE_PINNED(Name, Reg)
#define DEFINE_SCOPED(Name)
Register * reg_
#define FREE_REG(Name)
#define DEFINE_REG(Name)
interpreter::Bytecode bytecode
Definition builtins.cc:43
#define RETURN_BYTECODE_LIST(V)
Definition bytecodes.h:575
static void Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler *masm, InterpreterPushArgsMode mode)
static void Generate_CallOrConstructForwardVarargs(MacroAssembler *masm, CallOrConstructMode mode, Builtin target_builtin)
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
Definition builtins.cc:189
static void Generate_InterpreterEntryTrampoline(MacroAssembler *masm, InterpreterEntryTrampolineMode mode)
static void Generate_Adaptor(MacroAssembler *masm, int formal_parameter_count, Address builtin_address)
static void Generate_CEntry(MacroAssembler *masm, int result_size, ArgvMode argv_mode, bool builtin_exit_frame, bool switch_to_central_stack)
static constexpr Builtin CallFunction(ConvertReceiverMode=ConvertReceiverMode::kAny)
static constexpr Builtin AdaptorWithBuiltinExitFrame(int formal_parameter_count)
static void Generate_MaglevFunctionEntryStackCheck(MacroAssembler *masm, bool save_new_target)
static void Generate_Call(MacroAssembler *masm, ConvertReceiverMode mode)
static void Generate_CallFunction(MacroAssembler *masm, ConvertReceiverMode mode)
static void Generate_CallOrConstructVarargs(MacroAssembler *masm, Builtin target_builtin)
static void Generate_CallApiCallbackImpl(MacroAssembler *masm, CallApiCallbackMode mode)
static constexpr Builtin Call(ConvertReceiverMode=ConvertReceiverMode::kAny)
static void Generate_CallBoundFunctionImpl(MacroAssembler *masm)
static void Generate_ConstructForwardAllArgsImpl(MacroAssembler *masm, ForwardWhichFrame which_frame)
static void Generate_InterpreterPushArgsThenCallImpl(MacroAssembler *masm, ConvertReceiverMode receiver_mode, InterpreterPushArgsMode mode)
static constexpr BytecodeOffset None()
Definition utils.h:675
static DEFINE_PARAMETERS_VARARGS(kActualArgumentsCount, kTopmostScriptHavingContext, kFunctionTemplateInfo) DEFINE_PARAMETER_TYPES(MachineType constexpr Register TopmostScriptHavingContextRegister()
static DEFINE_PARAMETERS_VARARGS(kApiFunctionAddress, kActualArgumentsCount, kFunctionTemplateInfo) DEFINE_PARAMETER_TYPES(MachineType constexpr Register ActualArgumentsCountRegister()
static constexpr int kContextOrFrameTypeOffset
static constexpr int kFixedSlotCountAboveFp
static constexpr int kFixedFrameSizeAboveFp
static bool IsSupported(CpuFeature f)
static int caller_frame_top_offset()
static int output_count_offset()
static constexpr int kNextExitFrameFPOffset
static constexpr int kNextFastCallFramePCOffset
static V8_EXPORT_PRIVATE ExternalReference isolate_address()
static ExternalReference Create(const SCTableReference &table_ref)
static const int kMantissaBits
Definition heap-number.h:39
static const uint32_t kSignMask
Definition heap-number.h:36
static const uint32_t kExponentMask
Definition heap-number.h:37
static const int kMantissaBitsInTopWord
Definition heap-number.h:45
static const int kExponentBits
Definition heap-number.h:40
static const int kExponentBias
Definition heap-number.h:41
static const int kExponentShift
Definition heap-number.h:42
static const int kNonMantissaBitsInTopWord
Definition heap-number.h:46
static constexpr int kHeaderSize
static constexpr int kMapOffset
static constexpr uint32_t thread_in_wasm_flag_address_offset()
Definition isolate.h:1338
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index)
constexpr void clear(RegisterT reg)
static constexpr DwVfpRegister from_code(int8_t code)
constexpr int8_t code() const
static const RegisterConfiguration * Default()
static constexpr Register from_code(int code)
static constexpr int kMantissaOffset
static constexpr int kExponentOffset
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr Tagged< Smi > zero()
Definition smi.h:99
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static constexpr int kFixedFrameSizeFromFp
static constexpr int kFrameTypeOffset
void Include(const Register &reg1, const Register &reg2=no_reg)
static constexpr DoubleRegList kPushedFpRegs
static constexpr int SharedFunctionInfoOffsetInTaggedJSFunction()
static constexpr uint32_t jmpbuf_offset()
Definition stacks.h:176
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
Definition globals.h:161
int start
int end
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
DirectHandle< Object > new_target
Definition execution.cc:75
bool is_construct
Definition execution.cc:82
int32_t offset
TNode< Context > context
TNode< Object > this_arg
TNode< Object > receiver
TNode< Object > callback
LiftoffRegister reg
MovableLabel continuation
Register tmp
int pc_offset
RegListBase< RegisterT > registers
const int length_
Definition mul-fft.cc:473
int int32_t
Definition unicode.cc:40
void Free(void *memory)
Definition memory.h:63
void Or(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void push(LiftoffAssembler *assm, LiftoffRegister reg, ValueKind kind, int padding=0)
constexpr DoubleRegister kFpReturnRegisters[]
constexpr Register kGpParamRegisters[]
constexpr DoubleRegister kFpParamRegisters[]
constexpr Register kGpReturnRegisters[]
constexpr Register no_reg
constexpr Register kRootRegister
constexpr int kByteSize
Definition globals.h:395
constexpr int kFunctionEntryBytecodeOffset
Definition globals.h:854
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
constexpr int kTaggedSize
Definition globals.h:542
const uint32_t kExceptionIsSwitchStackLimit
DwVfpRegister DoubleRegister
static void Generate_CheckStackOverflow(MacroAssembler *masm, Register argc, Register scratch1, Register scratch2)
constexpr DoubleRegister kScratchDoubleReg
const RegList kCalleeSaved
Definition reglist-arm.h:31
static void Generate_InterpreterEnterBytecode(MacroAssembler *masm)
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr Register kJavaScriptCallTargetRegister
constexpr int kNumberOfRegisters
constexpr uint16_t kDontAdaptArgumentsSentinel
Definition globals.h:2779
constexpr Register kJavaScriptCallArgCountRegister
constexpr Register kInterpreterAccumulatorRegister
constexpr Register kScratchReg2
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
constexpr Register kScratchReg
InterpreterPushArgsMode
Definition globals.h:2233
constexpr Register kSimulatorBreakArgument
constexpr int kJSArgcReceiverSlots
Definition globals.h:2778
static void GenerateInterpreterPushArgs(MacroAssembler *masm, Register num_args, Register start_address, Register scratch)
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler *masm, Register bytecode_array, Register bytecode_offset, Register bytecode, Register scratch1, Register scratch2, Register scratch3, Label *if_return)
MemOperand FieldMemOperand(Register object, int offset)
constexpr DoubleRegister kSingleRegZero
constexpr int kSystemPointerSize
Definition globals.h:410
static void LeaveInterpreterFrame(MacroAssembler *masm, Register scratch1, Register scratch2)
constexpr Register kReturnRegister1
constexpr int kTaggedSizeLog2
Definition globals.h:543
constexpr uint32_t kDebugZapValue
Definition globals.h:1015
constexpr uint32_t kZapValue
Definition globals.h:1005
constexpr Register kReturnRegister0
@ LAST_CALLABLE_JS_FUNCTION_TYPE
@ FIRST_CALLABLE_JS_FUNCTION_TYPE
constexpr Register kWasmImplicitArgRegister
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const int kCArgsSlotsSize
constexpr Register kInterpreterDispatchTableRegister
const int kHeapObjectTag
Definition v8-internal.h:72
@ kFunctionTemplateInfoCallbackTag
constexpr Register kWasmTrapHandlerFaultAddressRegister
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register kJavaScriptCallExtraArg1Register
const RegList kJSCallerSaved
Definition reglist-arm.h:23
constexpr int JSParameterCount(int param_count_without_receiver)
Definition globals.h:2782
Register ToRegister(int num)
const DoubleRegList kCalleeSavedFPU
constexpr Register kJavaScriptCallCodeStartRegister
constexpr Register kPtrComprCageBaseRegister
Register ReassignRegister(Register &source)
constexpr Register kWasmCompileLazyFuncIndexRegister
static void AssertCodeIsBaseline(MacroAssembler *masm, Register code, Register scratch)
static void Generate_JSEntryTrampolineHelper(MacroAssembler *masm, bool is_construct)
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
constexpr Register cp
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
Definition globals.h:407
constexpr Register kJavaScriptCallDispatchHandleRegister
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler *masm, Register sfi, Register bytecode, Register scratch1, Label *is_baseline, Label *is_unavailable)
constexpr Register kInterpreterBytecodeOffsetRegister
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
constexpr Register kInterpreterBytecodeArrayRegister
constexpr bool PointerCompressionIsEnabled()
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
#define arraysize(array)
Definition macros.h:67
#define OFFSET_OF_DATA_START(Type)