v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
memory-lowering.cc
Go to the documentation of this file.
1// Copyright 2019 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
13#include "src/compiler/node.h"
15#include "src/roots/roots-inl.h"
17
18#if V8_ENABLE_WEBASSEMBLY
21#endif
22namespace v8 {
23namespace internal {
24namespace compiler {
25
26// An allocation group represents a set of allocations that have been folded
27// together.
29 public:
30 AllocationGroup(Node* node, AllocationType allocation, Zone* zone);
31 AllocationGroup(Node* node, AllocationType allocation, Node* size,
32 Zone* zone);
33 ~AllocationGroup() = default;
34
35 void Add(Node* object);
36 bool Contains(Node* object) const;
40
42 Node* size() const { return size_; }
43
44 private:
47 Node* const size_;
48
50 // For non-generational heap, all young allocations are redirected to old
51 // space.
52 if (v8_flags.single_generation && allocation == AllocationType::kYoung) {
54 }
55 return allocation;
56 }
57
59};
60
62 JSGraphAssembler* graph_assembler, bool is_wasm,
63 AllocationFolding allocation_folding,
65 const char* function_debug_name)
66 : isolate_(jsgraph->isolate()),
67 zone_(zone),
68 graph_(jsgraph->graph()),
69 common_(jsgraph->common()),
70 machine_(jsgraph->machine()),
71 graph_assembler_(graph_assembler),
72 is_wasm_(is_wasm),
73 allocation_folding_(allocation_folding),
74 write_barrier_assert_failed_(callback),
75 function_debug_name_(function_debug_name) {}
76
77Zone* MemoryLowering::graph_zone() const { return graph()->zone(); }
78
80 switch (node->opcode()) {
81 case IrOpcode::kAllocate:
82 // Allocate nodes were purged from the graph in effect-control
83 // linearization.
85 case IrOpcode::kAllocateRaw:
86 return ReduceAllocateRaw(node);
87 case IrOpcode::kLoadFromObject:
88 case IrOpcode::kLoadImmutableFromObject:
89 return ReduceLoadFromObject(node);
90 case IrOpcode::kLoadElement:
91 return ReduceLoadElement(node);
92 case IrOpcode::kLoadField:
93 return ReduceLoadField(node);
94 case IrOpcode::kStoreToObject:
95 case IrOpcode::kInitializeImmutableInObject:
96 return ReduceStoreToObject(node);
97 case IrOpcode::kStoreElement:
98 return ReduceStoreElement(node);
99 case IrOpcode::kStoreField:
100 return ReduceStoreField(node);
101 case IrOpcode::kStore:
102 return ReduceStore(node);
103 default:
104 return NoChange();
105 }
106}
107
109 if (allocate_operator_.is_set()) return;
110
111 auto descriptor = AllocateDescriptor{};
114 auto call_descriptor = Linkage::GetStubCallDescriptor(
115 graph_zone(), descriptor, descriptor.GetStackParameterCount(),
117 allocate_operator_.set(common()->Call(call_descriptor));
118}
119
120#if V8_ENABLE_WEBASSEMBLY
122 if (wasm_instance_node_.is_set()) return wasm_instance_node_.get();
123 for (Node* use : graph()->start()->uses()) {
124 if (use->opcode() == IrOpcode::kParameter &&
126 wasm_instance_node_.set(use);
127 return use;
128 }
129 }
130 UNREACHABLE(); // The instance node must have been created before.
131}
132#endif // V8_ENABLE_WEBASSEMBLY
133
134#define __ gasm()->
135
138
139 auto already_aligned = __ MakeLabel(MachineRepresentation::kWord64);
140 Node* alignment_check = __ WordEqual(
141 __ WordAnd(value, __ UintPtrConstant(kObjectAlignment8GbHeapMask)),
142 __ UintPtrConstant(0));
143
144 __ GotoIf(alignment_check, &already_aligned, value);
145 {
146 Node* aligned_value;
148 aligned_value = __ IntPtrAdd(value, __ IntPtrConstant(kTaggedSize));
149 } else {
150 aligned_value = __ WordAnd(
152 __ UintPtrConstant(~kObjectAlignment8GbHeapMask));
153 }
154 __ Goto(&already_aligned, aligned_value);
155 }
156
157 __ Bind(&already_aligned);
158
159 return already_aligned.PhiAt(0);
160}
161
163 AllocationType allocation_type,
164 AllocationState const** state_ptr) {
165 DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
167 state_ptr != nullptr);
168 if (v8_flags.single_generation && allocation_type == AllocationType::kYoung) {
169 allocation_type = AllocationType::kOld;
170 }
171 // InstructionStream objects may have a maximum size smaller than
172 // kMaxHeapObjectSize due to guard pages. If we need to support allocating
173 // code here we would need to call
174 // MemoryChunkLayout::MaxRegularCodeObjectSize() at runtime.
175 DCHECK_NE(allocation_type, AllocationType::kCode);
176 Node* value;
177 Node* size = node->InputAt(0);
178 Node* effect = node->InputAt(1);
179 Node* control = node->InputAt(2);
180
181 gasm()->InitializeEffectControl(effect, control);
182
183 Node* allocate_builtin;
184 if (!is_wasm_) {
185 if (allocation_type == AllocationType::kYoung) {
186 allocate_builtin = __ AllocateInYoungGenerationStubConstant();
187 } else {
188 allocate_builtin = __ AllocateInOldGenerationStubConstant();
189 }
190 } else {
191#if V8_ENABLE_WEBASSEMBLY
192 // This lowering is used by Wasm, where we compile isolate-independent
193 // code. Builtin calls simply encode the target builtin ID, which will
194 // be patched to the builtin's address later.
195 if (isolate_ == nullptr) {
197 if (allocation_type == AllocationType::kYoung) {
198 builtin = Builtin::kWasmAllocateInYoungGeneration;
199 } else {
200 builtin = Builtin::kWasmAllocateInOldGeneration;
201 }
202 static_assert(std::is_same<Smi, BuiltinPtr>(), "BuiltinPtr must be Smi");
203 allocate_builtin =
204 graph()->NewNode(common()->NumberConstant(static_cast<int>(builtin)));
205 } else {
206 if (allocation_type == AllocationType::kYoung) {
207 allocate_builtin = __ WasmAllocateInYoungGenerationStubConstant();
208 } else {
209 allocate_builtin = __ WasmAllocateInOldGenerationStubConstant();
210 }
211 }
212#else
213 UNREACHABLE();
214#endif
215 }
216
217 // Determine the top/limit addresses.
218 Node* top_address;
219 Node* limit_address;
220 if (isolate_ != nullptr) {
221 top_address = __ ExternalConstant(
222 allocation_type == AllocationType::kYoung
223 ? ExternalReference::new_space_allocation_top_address(isolate())
224 : ExternalReference::old_space_allocation_top_address(isolate()));
225 limit_address = __ ExternalConstant(
226 allocation_type == AllocationType::kYoung
227 ? ExternalReference::new_space_allocation_limit_address(isolate())
228 : ExternalReference::old_space_allocation_limit_address(isolate()));
229 } else {
230 // Wasm mode: producing isolate-independent code, loading the isolate
231 // address at runtime.
232#if V8_ENABLE_WEBASSEMBLY
233 Node* instance_node = GetWasmInstanceNode();
234 int top_address_offset =
235 allocation_type == AllocationType::kYoung
236 ? WasmTrustedInstanceData::kNewAllocationTopAddressOffset
237 : WasmTrustedInstanceData::kOldAllocationTopAddressOffset;
238 int limit_address_offset =
239 allocation_type == AllocationType::kYoung
240 ? WasmTrustedInstanceData::kNewAllocationLimitAddressOffset
241 : WasmTrustedInstanceData::kOldAllocationLimitAddressOffset;
242 top_address =
243 __ Load(MachineType::Pointer(), instance_node,
244 __ IntPtrConstant(top_address_offset - kHeapObjectTag));
245 limit_address =
246 __ Load(MachineType::Pointer(), instance_node,
247 __ IntPtrConstant(limit_address_offset - kHeapObjectTag));
248#else
249 UNREACHABLE();
250#endif // V8_ENABLE_WEBASSEMBLY
251 }
252
253 // Check if we can fold this allocation into a previous allocation represented
254 // by the incoming {state}.
255 IntPtrMatcher m(size);
256 if (m.IsInRange(0, kMaxRegularHeapObjectSize) && v8_flags.inline_new &&
258 intptr_t const object_size =
259 ALIGN_TO_ALLOCATION_ALIGNMENT(m.ResolvedValue());
260 AllocationState const* state = *state_ptr;
261 if (state->size() <= kMaxRegularHeapObjectSize - object_size &&
262 state->group()->allocation() == allocation_type) {
263 // We can fold this Allocate {node} into the allocation {group}
264 // represented by the given {state}. Compute the upper bound for
265 // the new {state}.
266 intptr_t const state_size = state->size() + object_size;
267
268 // Update the reservation check to the actual maximum upper bound.
269 AllocationGroup* const group = state->group();
270 if (machine()->Is64()) {
271 if (OpParameter<int64_t>(group->size()->op()) < state_size) {
273 common()->Int64Constant(state_size));
274 }
275 } else {
276 if (OpParameter<int32_t>(group->size()->op()) < state_size) {
278 group->size(),
279 common()->Int32Constant(static_cast<int32_t>(state_size)));
280 }
281 }
282
283 // Update the allocation top with the new object allocation.
284 // TODO(bmeurer): Defer writing back top as much as possible.
286 IsAligned(object_size, kObjectAlignment8GbHeap));
287 Node* top = __ IntAdd(state->top(), __ IntPtrConstant(object_size));
290 top_address, __ IntPtrConstant(0), top);
291
292 // Compute the effective inner allocated address.
293 value = __ BitcastWordToTagged(
294 __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
295 effect = gasm()->effect();
296 control = gasm()->control();
297
298 // Extend the allocation {group}.
299 group->Add(value);
300 *state_ptr =
301 AllocationState::Open(group, state_size, top, effect, zone());
302 } else {
303 auto call_runtime = __ MakeDeferredLabel();
304 auto done = __ MakeLabel(MachineType::PointerRepresentation());
305
306 // Setup a mutable reservation size node; will be patched as we fold
307 // additional allocations into this new group.
308 Node* reservation_size = __ UniqueIntPtrConstant(object_size);
309
310 // Load allocation top and limit.
311 Node* top =
312 __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
313 Node* limit =
314 __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
315
316 // Check if we need to collect garbage before we can start bump pointer
317 // allocation (always done for folded allocations).
318 Node* check = __ UintLessThan(__ IntAdd(top, reservation_size), limit);
319
320 __ GotoIfNot(check, &call_runtime);
321 __ Goto(&done, top);
322
323 __ Bind(&call_runtime);
324 {
326 Node* vfalse = __ BitcastTaggedToWord(__ Call(
327 allocate_operator_.get(), allocate_builtin, reservation_size));
328 vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
329 __ Goto(&done, vfalse);
330 }
331
332 __ Bind(&done);
333
334 // Compute the new top and write it back.
335 top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
338 top_address, __ IntPtrConstant(0), top);
339
340 // Compute the initial object address.
341 value = __ BitcastWordToTagged(
342 __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
343 effect = gasm()->effect();
344 control = gasm()->control();
345
346 // Start a new allocation group.
348 value, allocation_type, reservation_size, zone());
349 *state_ptr =
350 AllocationState::Open(group, object_size, top, effect, zone());
351 }
352 } else {
353 auto call_runtime = __ MakeDeferredLabel();
354 auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
355
356 // Load allocation top and limit.
357 Node* top =
358 __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
359 Node* limit =
360 __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
361
362 // Compute the new top.
363 Node* new_top = __ IntAdd(top, AlignToAllocationAlignment(size));
364
365 // Check if we can do bump pointer allocation here.
366 Node* check = __ UintLessThan(new_top, limit);
367 __ GotoIfNot(check, &call_runtime);
368 __ GotoIfNot(
369 __ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)),
370 &call_runtime);
373 top_address, __ IntPtrConstant(0), new_top);
374 __ Goto(&done, __ BitcastWordToTagged(
375 __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
376
377 __ Bind(&call_runtime);
379 __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
380
381 __ Bind(&done);
382 value = done.PhiAt(0);
383 effect = gasm()->effect();
384 control = gasm()->control();
385
386 if (state_ptr) {
387 // Create an unfoldable allocation group.
388 AllocationGroup* group =
389 zone()->New<AllocationGroup>(value, allocation_type, zone());
390 *state_ptr = AllocationState::Closed(group, effect, zone());
391 }
392 }
393
394 return Replace(value);
395}
396
398 DCHECK(node->opcode() == IrOpcode::kLoadFromObject ||
399 node->opcode() == IrOpcode::kLoadImmutableFromObject);
400 ObjectAccess const& access = ObjectAccessOf(node->op());
401
402 MachineType machine_type = access.machine_type;
403
404 if (machine_type.IsMapWord()) {
405 CHECK_EQ(machine_type.semantic(), MachineSemantic::kAny);
406 return ReduceLoadMap(node);
407 }
408
409 MachineRepresentation rep = machine_type.representation();
410 const Operator* load_op =
413 ? machine()->UnalignedLoad(machine_type)
414 : machine()->Load(machine_type);
415 NodeProperties::ChangeOp(node, load_op);
416 return Changed(node);
417}
418
420 DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
421 ElementAccess const& access = ElementAccessOf(node->op());
422 Node* index = node->InputAt(1);
423 node->ReplaceInput(1, ComputeIndex(access, index));
424 MachineType type = access.machine_type;
425 DCHECK(!type.IsMapWord());
426 NodeProperties::ChangeOp(node, machine()->Load(type));
427 return Changed(node);
428}
429
431 DCHECK_EQ(node->opcode(), IrOpcode::kLoadField);
432 FieldAccess const& access = FieldAccessOf(node->op());
433
434#ifdef V8_ENABLE_SANDBOX
435 ExternalPointerTag tag = access.external_pointer_tag;
437 // Fields for sandboxed external pointer contain a 32-bit handle, not a
438 // 64-bit raw pointer.
440
441 Node* effect = NodeProperties::GetEffectInput(node);
442 Node* control = NodeProperties::GetControlInput(node);
443 __ InitializeEffectControl(effect, control);
444
445 // Clone the load node and put it here.
446 // TODO(turbofan): consider adding GraphAssembler::Clone() suitable for
447 // cloning nodes from arbitrary locations in effect/control chains.
448 static_assert(kExternalPointerIndexShift > kSystemPointerSizeLog2);
449 Node* handle = __ AddNode(graph()->CloneNode(node));
450 Node* shift_amount =
451 __ Int32Constant(kExternalPointerIndexShift - kSystemPointerSizeLog2);
452 Node* offset = __ Word32Shr(handle, shift_amount);
453
454 // Uncomment this to generate a breakpoint for debugging purposes.
455 // __ DebugBreak();
456
457 // Decode loaded external pointer.
458 //
459 // Here we access the external pointer table through an ExternalReference.
460 // Alternatively, we could also hardcode the address of the table since it
461 // is never reallocated. However, in that case we must be able to guarantee
462 // that the generated code is never executed under a different Isolate, as
463 // that would allow access to external objects from different Isolates. It
464 // also would break if the code is serialized/deserialized at some point.
465 Node* table_address =
467 ? __
469 __ ExternalConstant(
471 shared_external_pointer_table_address_address(
472 isolate())),
474 : __ ExternalConstant(
475 ExternalReference::external_pointer_table_address(isolate()));
476 Node* table = __ Load(MachineType::Pointer(), table_address,
478 Node* pointer =
479 __ Load(MachineType::Pointer(), table, __ ChangeUint32ToUint64(offset));
480 Node* actual_tag =
481 __ WordAnd(pointer, __ IntPtrConstant(kExternalPointerTagMask));
482 actual_tag = __ TruncateInt64ToInt32(
483 __ WordShr(actual_tag, __ IntPtrConstant(kExternalPointerTagShift)));
484 Node* expected_tag = __ Int32Constant(tag);
485 pointer =
486 __ Word64And(pointer, __ IntPtrConstant(kExternalPointerPayloadMask));
487 auto done = __ MakeLabel(MachineRepresentation::kWord64);
488 __ GotoIf(__ WordEqual(actual_tag, expected_tag), &done, pointer);
489 __ Goto(&done, __ IntPtrConstant(0));
490 __ Bind(&done);
491 return Replace(done.PhiAt(0));
492#else
493 NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
494 return Changed(node);
495#endif // V8_ENABLE_SANDBOX
496}
497
499#ifdef V8_ENABLE_SANDBOX
500 const Operator* load_op =
504 NodeProperties::ChangeOp(node, load_op);
505
506 Node* effect = NodeProperties::GetEffectInput(node);
507 Node* control = NodeProperties::GetControlInput(node);
508 __ InitializeEffectControl(effect, control);
509
510 Node* raw_value = __ AddNode(graph()->CloneNode(node));
511 Node* shift_amount = __ IntPtrConstant(kBoundedSizeShift);
512 Node* decoded_size = __ Word64Shr(raw_value, shift_amount);
513 return Replace(decoded_size);
514#else
515 UNREACHABLE();
516#endif
517}
518
520#ifdef V8_MAP_PACKING
522
523 Node* effect = NodeProperties::GetEffectInput(node);
524 Node* control = NodeProperties::GetControlInput(node);
525 __ InitializeEffectControl(effect, control);
526
527 node = __ AddNode(graph()->CloneNode(node));
528 return Replace(__ UnpackMapWord(node));
529#else
531 return Changed(node);
532#endif
533}
534
536 DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
537 FieldAccess const& access = FieldAccessOf(node->op());
538 Node* offset = __ IntPtrConstant(access.offset - access.tag());
539 node->InsertInput(graph_zone(), 1, offset);
540 MachineType type = access.machine_type;
541
542 if (type.IsMapWord()) {
543 DCHECK(!access.type.Is(Type::ExternalPointer()));
544 return ReduceLoadMap(node);
545 }
546
547 if (access.type.Is(Type::ExternalPointer())) {
549 }
550
551 if (access.is_bounded_size_access) {
552 return ReduceLoadBoundedSize(node);
553 }
554
555 NodeProperties::ChangeOp(node, machine()->Load(type));
556
557 return Changed(node);
558}
559
561 AllocationState const* state) {
562 DCHECK(node->opcode() == IrOpcode::kStoreToObject ||
563 node->opcode() == IrOpcode::kInitializeImmutableInObject);
564 ObjectAccess const& access = ObjectAccessOf(node->op());
565 Node* object = node->InputAt(0);
566 Node* value = node->InputAt(2);
567
568 WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
569 node, object, value, state, access.write_barrier_kind);
570 DCHECK(!access.machine_type.IsMapWord());
571 MachineRepresentation rep = access.machine_type.representation();
572 StoreRepresentation store_rep(rep, write_barrier_kind);
573 const Operator* store_op = ElementSizeInBytes(rep) > kTaggedSize &&
575 ? machine()->UnalignedStore(rep)
576 : machine()->Store(store_rep);
577 NodeProperties::ChangeOp(node, store_op);
578 return Changed(node);
579}
580
582 AllocationState const* state) {
583 DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
584 ElementAccess const& access = ElementAccessOf(node->op());
585 Node* object = node->InputAt(0);
586 Node* index = node->InputAt(1);
587 Node* value = node->InputAt(2);
588 node->ReplaceInput(1, ComputeIndex(access, index));
589 WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
590 node, object, value, state, access.write_barrier_kind);
592 node, machine()->Store(StoreRepresentation(
593 access.machine_type.representation(), write_barrier_kind)));
594 return Changed(node);
595}
596
598 AllocationState const* state) {
599 DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
600 FieldAccess const& access = FieldAccessOf(node->op());
601 // External pointer must never be stored by optimized code when sandbox is
602 // turned on
603 DCHECK(!access.type.Is(Type::ExternalPointer()) || !V8_ENABLE_SANDBOX_BOOL);
604 // SandboxedPointers are not currently stored by optimized code.
605 DCHECK(!access.type.Is(Type::SandboxedPointer()));
606 // Bounded size fields are not currently stored by optimized code.
607 DCHECK(!access.is_bounded_size_access);
608 MachineType machine_type = access.machine_type;
609 Node* object = node->InputAt(0);
610 Node* value = node->InputAt(1);
611
612 Node* effect = NodeProperties::GetEffectInput(node);
613 Node* control = NodeProperties::GetControlInput(node);
614 __ InitializeEffectControl(effect, control);
615
616 WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
617 node, object, value, state, access.write_barrier_kind);
618 Node* offset = __ IntPtrConstant(access.offset - access.tag());
619 node->InsertInput(graph_zone(), 1, offset);
620
621 if (machine_type.IsMapWord()) {
622 machine_type = MachineType::TaggedPointer();
623#ifdef V8_MAP_PACKING
624 Node* mapword = __ PackMapWord(TNode<Map>::UncheckedCast(value));
625 node->ReplaceInput(2, mapword);
626#endif
627 }
628 if (machine_type.representation() ==
630 // Indirect pointer stores require knowledge of the indirect pointer tag of
631 // the field. This is technically only required for stores that need a
632 // write barrier, but currently we track the tag for all such stores.
633 DCHECK_NE(access.indirect_pointer_tag, kIndirectPointerNullTag);
634 Node* tag = __ IntPtrConstant(access.indirect_pointer_tag);
635 node->InsertInput(graph_zone(), 3, tag);
637 node, machine()->StoreIndirectPointer(write_barrier_kind));
638 } else {
640 node, machine()->Store(StoreRepresentation(
641 machine_type.representation(), write_barrier_kind)));
642 }
643 return Changed(node);
644}
645
647 AllocationState const* state) {
648 DCHECK_EQ(IrOpcode::kStore, node->opcode());
649 StoreRepresentation representation = StoreRepresentationOf(node->op());
650 Node* object = node->InputAt(0);
651 Node* value = node->InputAt(2);
652 WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
653 node, object, value, state, representation.write_barrier_kind());
654 if (write_barrier_kind != representation.write_barrier_kind()) {
656 node, machine()->Store(StoreRepresentation(
657 representation.representation(), write_barrier_kind)));
658 return Changed(node);
659 }
660 return NoChange();
661}
662
664 int const element_size_shift =
665 ElementSizeLog2Of(access.machine_type.representation());
666 if (element_size_shift) {
667 index = __ WordShl(index, __ IntPtrConstant(element_size_shift));
668 }
669 int const fixed_offset = access.header_size - access.tag();
670 if (fixed_offset) {
671 index = __ IntAdd(index, __ IntPtrConstant(fixed_offset));
672 }
673 return index;
674}
675
676#undef __
677
678namespace {
679
680bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) {
681 switch (value->opcode()) {
682 case IrOpcode::kBitcastWordToTaggedSigned:
683 return false;
684 case IrOpcode::kHeapConstant: {
685 RootIndex root_index;
686 if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()),
687 &root_index) &&
689 return false;
690 }
691 break;
692 }
693 default:
694 break;
695 }
696 return true;
697}
698
699} // namespace
700
702 DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
703 const AllocateParameters& allocation = AllocateParametersOf(node->op());
704 return ReduceAllocateRaw(node, allocation.allocation_type(), nullptr);
705}
706
708 Node* node, Node* object, Node* value, AllocationState const* state,
709 WriteBarrierKind write_barrier_kind) {
710 if (state && state->IsYoungGenerationAllocation() &&
711 state->group()->Contains(object)) {
712 write_barrier_kind = kNoWriteBarrier;
713 }
714 if (!ValueNeedsWriteBarrier(value, isolate())) {
715 write_barrier_kind = kNoWriteBarrier;
716 }
717 if (v8_flags.disable_write_barriers) {
718 write_barrier_kind = kNoWriteBarrier;
719 }
720 if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) {
722 }
723 return write_barrier_kind;
724}
725
727 AllocationType allocation,
728 Zone* zone)
729 : node_ids_(zone),
730 allocation_(CheckAllocationType(allocation)),
731 size_(nullptr) {
732 node_ids_.insert(node->id());
733}
734
736 AllocationType allocation,
737 Node* size, Zone* zone)
738 : node_ids_(zone),
739 allocation_(CheckAllocationType(allocation)),
740 size_(size) {
741 node_ids_.insert(node->id());
742}
743
745 node_ids_.insert(node->id());
746}
747
749 // Additions should stay within the same allocated object, so it's safe to
750 // ignore them.
751 while (node_ids_.find(node->id()) == node_ids_.end()) {
752 switch (node->opcode()) {
753 case IrOpcode::kBitcastTaggedToWord:
754 case IrOpcode::kBitcastWordToTagged:
755 case IrOpcode::kInt32Add:
756 case IrOpcode::kInt64Add:
757 node = NodeProperties::GetValueInput(node, 0);
758 break;
759 default:
760 return false;
761 }
762 }
763 return true;
764}
765
767 : group_(nullptr),
768 size_(std::numeric_limits<int>::max()),
769 top_(nullptr),
770 effect_(nullptr) {}
771
773 Node* effect)
774 : group_(group),
775 size_(std::numeric_limits<int>::max()),
776 top_(nullptr),
777 effect_(effect) {}
778
780 intptr_t size, Node* top,
781 Node* effect)
782 : group_(group), size_(size), top_(top), effect_(effect) {}
783
785 return group() && group()->IsYoungGenerationAllocation();
786}
787
788} // namespace compiler
789} // namespace internal
790} // namespace v8
JSGraph * jsgraph
Isolate * isolate_
static const int kExternalPointerTableBasePointerOffset
constexpr MachineSemantic semantic() const
static constexpr MachineType Pointer()
constexpr MachineRepresentation representation() const
static constexpr MachineType AnyTagged()
static constexpr MachineType Uint64()
static constexpr MachineType Uint32()
constexpr bool IsMapWord() const
static constexpr MachineType TaggedPointer()
static constexpr MachineRepresentation PointerRepresentation()
static constexpr bool IsImmortalImmovable(RootIndex root_index)
Definition roots.h:616
T * New(Args &&... args)
Definition zone.h:114
void InitializeEffectControl(Node *effect, Node *control)
static CallDescriptor * GetStubCallDescriptor(Zone *zone, const CallInterfaceDescriptor &descriptor, int stack_parameter_count, CallDescriptor::Flags flags, Operator::Properties properties=Operator::kNoProperties, StubCallMode stub_mode=StubCallMode::kCallCodeObject)
Definition linkage.cc:587
bool UnalignedLoadSupported(MachineRepresentation rep)
const Operator * Load(LoadRepresentation rep)
const Operator * UnalignedLoad(LoadRepresentation rep)
bool UnalignedStoreSupported(MachineRepresentation rep)
const Operator * UnalignedStore(UnalignedStoreRepresentation rep)
const Operator * Store(StoreRepresentation rep)
AllocationGroup(Node *node, AllocationType allocation, Zone *zone)
static AllocationType CheckAllocationType(AllocationType allocation)
static AllocationState const * Open(AllocationGroup *group, intptr_t size, Node *top, Node *effect, Zone *zone)
static AllocationState const * Closed(AllocationGroup *group, Node *effect, Zone *zone)
Node * ComputeIndex(ElementAccess const &access, Node *node)
SetOncePointer< const Operator > allocate_operator_
Reduction ReduceStoreToObject(Node *node, AllocationState const *state=nullptr)
Node * AlignToAllocationAlignment(Node *address)
Reduction ReduceStoreElement(Node *node, AllocationState const *state=nullptr)
WriteBarrierKind ComputeWriteBarrierKind(Node *node, Node *object, Node *value, AllocationState const *state, WriteBarrierKind)
MachineOperatorBuilder * machine() const
WriteBarrierAssertFailedCallback write_barrier_assert_failed_
Reduction Reduce(Node *node) override
Reduction ReduceStoreField(Node *node, AllocationState const *state=nullptr)
Reduction ReduceAllocateRaw(Node *node, AllocationType allocation_type, AllocationState const **state)
Reduction ReduceLoadExternalPointerField(Node *node)
std::function< void( Node *node, Node *object, const char *name, Zone *temp_zone)> WriteBarrierAssertFailedCallback
CommonOperatorBuilder * common() const
MemoryLowering(JSGraph *jsgraph, Zone *zone, JSGraphAssembler *graph_assembler, bool is_wasm, AllocationFolding allocation_folding=AllocationFolding::kDontAllocationFolding, WriteBarrierAssertFailedCallback callback=[](Node *, Node *, const char *, Zone *) { UNREACHABLE();}, const char *function_debug_name=nullptr)
Reduction ReduceStore(Node *node, AllocationState const *state=nullptr)
static void ChangeOp(Node *node, const Operator *new_op)
static Node * GetEffectInput(Node *node, int index=0)
static Node * GetValueInput(Node *node, int index)
static Node * GetControlInput(Node *node, int index=0)
const Operator * op() const
Definition node.h:50
void ReplaceInput(int index, Node *new_to)
Definition node.h:76
Node * InputAt(int index) const
Definition node.h:70
static Reduction Replace(Node *node)
static Reduction Changed(Node *node)
MachineRepresentation representation() const
Node * NewNode(const Operator *op, int input_count, Node *const *inputs, bool incomplete=false)
Zone * zone_
const int size_
Definition assembler.cc:132
#define V8_COMPRESS_POINTERS_8GB_BOOL
Definition globals.h:608
#define ALIGN_TO_ALLOCATION_ALIGNMENT(value)
Definition globals.h:1796
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
const AllocationType allocation_
int start
int32_t offset
Effect effect_
TNode< Object > callback
Builtin builtin
int m
Definition mul-fft.cc:294
STL namespace.
bool ValueNeedsWriteBarrier(const Graph *graph, const Operation &value, Isolate *isolate)
NumberConstant(std::numeric_limits< double >::quiet_NaN())) DEFINE_GETTER(EmptyStateValues
Handle< HeapObject > HeapConstantOf(const Operator *op)
StoreRepresentation const & StoreRepresentationOf(Operator const *op)
int ParameterIndexOf(const Operator *const op)
const FieldAccess & FieldAccessOf(const Operator *op)
const ElementAccess & ElementAccessOf(const Operator *op)
const AllocateParameters & AllocateParametersOf(const Operator *op)
const ObjectAccess & ObjectAccessOf(const Operator *op)
T const & OpParameter(const Operator *op)
Definition operator.h:214
static const Operator * IntPtrConstant(CommonOperatorBuilder *common, intptr_t value)
constexpr int kWasmInstanceDataParameterIndex
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr uint64_t kExternalPointerTagShift
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kMaxRegularHeapObjectSize
Definition globals.h:680
static V8_INLINE constexpr bool IsSharedExternalPointerType(ExternalPointerTagRange tag_range)
constexpr uint64_t kExternalPointerPayloadMask
constexpr uint64_t kExternalPointerTagMask
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
constexpr intptr_t kObjectAlignment8GbHeapMask
Definition globals.h:935
constexpr intptr_t kObjectAlignment8GbHeap
Definition globals.h:934
const int kHeapObjectTag
Definition v8-internal.h:72
@ kExternalPointerNullTag
V8_EXPORT_PRIVATE FlagValues v8_flags
V8_EXPORT_PRIVATE constexpr int ElementSizeLog2Of(MachineRepresentation)
other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can use(0 for unbounded)") DEFINE_BOOL( stress_concurrent_inlining
return value
Definition map-inl.h:893
V8_EXPORT_PRIVATE constexpr int ElementSizeInBytes(MachineRepresentation)
constexpr bool Is64()
i::Address Load(i::Address address)
Definition unwinder.cc:19
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
TFGraph * graph_