summaryrefslogtreecommitdiffstats
path: root/chromium/v8/src/heap/heap.cc
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-12 14:27:29 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-13 09:35:20 +0000
commitc30a6232df03e1efbd9f3b226777b07e087a1122 (patch)
treee992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/v8/src/heap/heap.cc
parent7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff)
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src/heap/heap.cc')
-rw-r--r--chromium/v8/src/heap/heap.cc274
1 files changed, 157 insertions, 117 deletions
diff --git a/chromium/v8/src/heap/heap.cc b/chromium/v8/src/heap/heap.cc
index 606ba0fe65f..4d23e084b95 100644
--- a/chromium/v8/src/heap/heap.cc
+++ b/chromium/v8/src/heap/heap.cc
@@ -6,6 +6,7 @@
#include <cinttypes>
#include <iomanip>
+#include <memory>
#include <unordered_map>
#include <unordered_set>
@@ -30,8 +31,10 @@
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/barrier.h"
+#include "src/heap/code-object-registry.h"
#include "src/heap/code-stats.h"
#include "src/heap/combined-heap.h"
+#include "src/heap/concurrent-allocator.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/finalization-registry-cleanup-task.h"
@@ -51,8 +54,9 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
-#include "src/heap/remembered-set-inl.h"
+#include "src/heap/remembered-set.h"
#include "src/heap/safepoint.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
@@ -174,6 +178,10 @@ void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
set_serialized_global_proxy_sizes(sizes);
}
+void Heap::SetBasicBlockProfilingData(Handle<ArrayList> list) {
+ set_basic_block_profiling_data(*list);
+}
+
bool Heap::GCCallbackTuple::operator==(
const Heap::GCCallbackTuple& other) const {
return other.callback == callback && other.data == data;
@@ -415,7 +423,12 @@ bool Heap::CanExpandOldGeneration(size_t size) {
return memory_allocator()->Size() + size <= MaxReserved();
}
-bool Heap::HasBeenSetUp() {
+bool Heap::CanExpandOldGenerationBackground(size_t size) {
+ if (force_oom_) return false;
+ return memory_allocator()->Size() + size <= MaxReserved();
+}
+
+bool Heap::HasBeenSetUp() const {
// We will always have a new space when the heap is set up.
return new_space_ != nullptr;
}
@@ -470,8 +483,7 @@ void Heap::PrintShortHeapStatistics() {
"Read-only space, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
- read_only_space_->Size() / KB,
- read_only_space_->Available() / KB,
+ read_only_space_->Size() / KB, size_t{0},
read_only_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
"New space, used: %6zu KB"
@@ -522,8 +534,8 @@ void Heap::PrintShortHeapStatistics() {
"All spaces, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
- (this->SizeOfObjects() + ro_space->SizeOfObjects()) / KB,
- (this->Available() + ro_space->Available()) / KB,
+ (this->SizeOfObjects() + ro_space->Size()) / KB,
+ (this->Available()) / KB,
(this->CommittedMemory() + ro_space->CommittedMemory()) / KB);
PrintIsolate(isolate_,
"Unmapper buffering %zu chunks of committed: %6zu KB\n",
@@ -631,7 +643,8 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
std::stringstream stream;
stream << DICT(
MEMBER("name")
- << ESCAPE(GetSpaceName(static_cast<AllocationSpace>(space_index)))
+ << ESCAPE(BaseSpace::GetSpaceName(
+ static_cast<AllocationSpace>(space_index)))
<< ","
MEMBER("size") << space_stats.space_size() << ","
MEMBER("used_size") << space_stats.space_used_size() << ","
@@ -849,7 +862,6 @@ void Heap::GarbageCollectionPrologue() {
} else {
maximum_size_scavenges_ = 0;
}
- CheckNewSpaceExpansionCriteria();
UpdateNewSpaceAllocationCounter();
if (FLAG_track_retaining_path) {
retainer_.clear();
@@ -859,6 +871,10 @@ void Heap::GarbageCollectionPrologue() {
memory_allocator()->unmapper()->PrepareForGC();
}
+void Heap::GarbageCollectionPrologueInSafepoint() {
+ CheckNewSpaceExpansionCriteria();
+}
+
size_t Heap::SizeOfObjects() {
size_t total = 0;
@@ -876,29 +892,6 @@ size_t Heap::UsedGlobalHandlesSize() {
return isolate_->global_handles()->UsedSize();
}
-// static
-const char* Heap::GetSpaceName(AllocationSpace space) {
- switch (space) {
- case NEW_SPACE:
- return "new_space";
- case OLD_SPACE:
- return "old_space";
- case MAP_SPACE:
- return "map_space";
- case CODE_SPACE:
- return "code_space";
- case LO_SPACE:
- return "large_object_space";
- case NEW_LO_SPACE:
- return "new_large_object_space";
- case CODE_LO_SPACE:
- return "code_large_object_space";
- case RO_SPACE:
- return "read_only_space";
- }
- UNREACHABLE();
-}
-
void Heap::MergeAllocationSitePretenuringFeedback(
const PretenuringFeedbackMap& local_pretenuring_feedback) {
AllocationSite site;
@@ -1651,8 +1644,12 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
if (collector == MARK_COMPACTOR) {
- size_t committed_memory_after = CommittedOldGenerationMemory();
+ // Calculate used memory first, then committed memory. Following code
+ // assumes that committed >= used, which might not hold when this is
+ // calculated in the wrong order and background threads allocate
+ // in-between.
size_t used_memory_after = OldGenerationSizeOfObjects();
+ size_t committed_memory_after = CommittedOldGenerationMemory();
MemoryReducer::Event event;
event.type = MemoryReducer::kMarkCompact;
event.time_ms = MonotonicallyIncreasingTimeInMs();
@@ -1681,7 +1678,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
isolate()->CountUsage(v8::Isolate::kForcedGC);
}
- collection_barrier_.Increment();
+ collection_barrier_.CollectionPerformed();
// Start incremental marking for the next cycle. We do this only for scavenger
// to avoid a loop where mark-compact causes another mark-compact.
@@ -1750,10 +1747,12 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReachedBackground() {
}
const size_t old_generation_space_available = OldGenerationSpaceAvailable();
- const size_t global_memory_available = GlobalMemoryAvailable();
+ const base::Optional<size_t> global_memory_available =
+ GlobalMemoryAvailable();
if (old_generation_space_available < new_space_->Capacity() ||
- global_memory_available < new_space_->Capacity()) {
+ (global_memory_available &&
+ *global_memory_available < new_space_->Capacity())) {
incremental_marking()->incremental_marking_job()->ScheduleTask(this);
}
}
@@ -1955,6 +1954,9 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
#else
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size);
+ } else if (space == RO_SPACE) {
+ allocation = read_only_space()->AllocateRaw(
+ size, AllocationAlignment::kWordAligned);
} else {
// The deserializer will update the skip list.
allocation = paged_space(space)->AllocateRawUnaligned(size);
@@ -2013,21 +2015,29 @@ void Heap::EnsureFromSpaceIsCommitted() {
FatalProcessOutOfMemory("Committing semi space failed.");
}
-void Heap::CollectionBarrier::Increment() {
+void Heap::CollectionBarrier::CollectionPerformed() {
base::MutexGuard guard(&mutex_);
- requested_ = false;
+ gc_requested_ = false;
+ cond_.NotifyAll();
+}
+
+void Heap::CollectionBarrier::ShutdownRequested() {
+ base::MutexGuard guard(&mutex_);
+ shutdown_requested_ = true;
cond_.NotifyAll();
}
void Heap::CollectionBarrier::Wait() {
base::MutexGuard guard(&mutex_);
- if (!requested_) {
+ if (shutdown_requested_) return;
+
+ if (!gc_requested_) {
heap_->MemoryPressureNotification(MemoryPressureLevel::kCritical, false);
- requested_ = true;
+ gc_requested_ = true;
}
- while (requested_) {
+ while (gc_requested_ && !shutdown_requested_) {
cond_.Wait(&mutex_);
}
}
@@ -2062,9 +2072,6 @@ size_t Heap::PerformGarbageCollection(
base::Optional<SafepointScope> optional_safepoint_scope;
if (FLAG_local_heaps) {
optional_safepoint_scope.emplace(this);
- // Fill and reset all LABs
- safepoint()->IterateLocalHeaps(
- [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -2073,6 +2080,8 @@ size_t Heap::PerformGarbageCollection(
#endif
tracer()->StartInSafepoint();
+ GarbageCollectionPrologueInSafepoint();
+
EnsureFromSpaceIsCommitted();
size_t start_young_generation_size =
@@ -3000,10 +3009,12 @@ HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
#ifdef DEBUG
void VerifyNoNeedToClearSlots(Address start, Address end) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(start);
+ BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromAddress(start);
+ if (basic_chunk->InReadOnlySpace()) return;
+ MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
// TODO(ulan): Support verification of large pages.
if (chunk->InYoungGeneration() || chunk->IsLargePage()) return;
- Space* space = chunk->owner();
+ BaseSpace* space = chunk->owner();
if (static_cast<PagedSpace*>(space)->is_off_thread_space()) return;
space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
}
@@ -3024,6 +3035,13 @@ HeapObject Heap::CreateFillerObjectAt(ReadOnlyRoots roots, Address addr,
return filler;
}
+void Heap::CreateFillerObjectAtBackground(
+ Address addr, int size, ClearFreedMemoryMode clear_memory_mode) {
+ CreateFillerObjectAtImpl(ReadOnlyRoots(this), addr, size, clear_memory_mode);
+ // Do not verify whether slots are cleared here: the concurrent sweeper is not
+ // allowed to access the main thread's remembered set.
+}
+
HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
ClearRecordedSlots clear_slots_mode) {
if (size == 0) return HeapObject();
@@ -3059,7 +3077,7 @@ bool Heap::InOffThreadSpace(HeapObject heap_object) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
return false; // currently unsupported
#else
- Space* owner = MemoryChunk::FromHeapObject(heap_object)->owner();
+ BaseSpace* owner = BasicMemoryChunk::FromHeapObject(heap_object)->owner();
if (owner->identity() == OLD_SPACE) {
// TODO(leszeks): Should we exclude compaction spaces here?
return static_cast<PagedSpace*>(owner)->is_off_thread_space();
@@ -3078,12 +3096,12 @@ bool Heap::IsImmovable(HeapObject object) {
return true;
}
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
return chunk->NeverEvacuate() || IsLargeObject(object);
}
bool Heap::IsLargeObject(HeapObject object) {
- return MemoryChunk::FromHeapObject(object)->IsLargePage();
+ return BasicMemoryChunk::FromHeapObject(object)->IsLargePage();
}
#ifdef ENABLE_SLOW_DCHECKS
@@ -3112,8 +3130,9 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor {
namespace {
bool MayContainRecordedSlots(HeapObject object) {
// New space object do not have recorded slots.
- if (MemoryChunk::FromHeapObject(object)->InYoungGeneration()) return false;
- // Whitelist objects that definitely do not have pointers.
+ if (BasicMemoryChunk::FromHeapObject(object)->InYoungGeneration())
+ return false;
+ // Allowlist objects that definitely do not have pointers.
if (object.IsByteArray() || object.IsFixedDoubleArray()) return false;
// Conservatively return true for other objects.
return true;
@@ -3225,6 +3244,7 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
if (FLAG_enable_slow_asserts) {
// Make sure the stack or other roots (e.g., Handles) don't contain pointers
// to the original FixedArray (which is now the filler object).
+ SafepointScope scope(this);
LeftTrimmerVerifierRootVisitor root_visitor(object);
ReadOnlyRoots(this).Iterate(&root_visitor);
IterateRoots(&root_visitor, {});
@@ -3335,6 +3355,15 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
void Heap::MakeHeapIterable() {
mark_compact_collector()->EnsureSweepingCompleted();
+
+ MakeLocalHeapLabsIterable();
+}
+
+void Heap::MakeLocalHeapLabsIterable() {
+ if (!FLAG_local_heaps) return;
+ safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->MakeLinearAllocationAreaIterable();
+ });
}
namespace {
@@ -4079,7 +4108,7 @@ const char* Heap::GarbageCollectionReasonToString(
UNREACHABLE();
}
-bool Heap::Contains(HeapObject value) {
+bool Heap::Contains(HeapObject value) const {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return true;
}
@@ -4096,7 +4125,7 @@ bool Heap::Contains(HeapObject value) {
new_lo_space_->Contains(value));
}
-bool Heap::InSpace(HeapObject value, AllocationSpace space) {
+bool Heap::InSpace(HeapObject value, AllocationSpace space) const {
if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
return false;
}
@@ -4123,7 +4152,7 @@ bool Heap::InSpace(HeapObject value, AllocationSpace space) {
UNREACHABLE();
}
-bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
+bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const {
if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
return false;
}
@@ -4167,32 +4196,13 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
}
#ifdef VERIFY_HEAP
-class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
- public:
- explicit VerifyReadOnlyPointersVisitor(Heap* heap)
- : VerifyPointersVisitor(heap) {}
-
- protected:
- void VerifyPointers(HeapObject host, MaybeObjectSlot start,
- MaybeObjectSlot end) override {
- if (!host.is_null()) {
- CHECK(ReadOnlyHeap::Contains(host.map()));
- }
- VerifyPointersVisitor::VerifyPointers(host, start, end);
-
- for (MaybeObjectSlot current = start; current < end; ++current) {
- HeapObject heap_object;
- if ((*current)->GetHeapObject(&heap_object)) {
- CHECK(ReadOnlyHeap::Contains(heap_object));
- }
- }
- }
-};
-
void Heap::Verify() {
CHECK(HasBeenSetUp());
+ SafepointScope safepoint_scope(this);
HandleScope scope(isolate());
+ MakeLocalHeapLabsIterable();
+
// We have to wait here for the sweeper threads to have an iterable heap.
mark_compact_collector()->EnsureSweepingCompleted();
array_buffer_sweeper()->EnsureFinished();
@@ -4225,8 +4235,7 @@ void Heap::Verify() {
void Heap::VerifyReadOnlyHeap() {
CHECK(!read_only_space_->writable());
- VerifyReadOnlyPointersVisitor read_only_visitor(this);
- read_only_space_->Verify(isolate(), &read_only_visitor);
+ read_only_space_->Verify(isolate());
}
class SlotVerifyingVisitor : public ObjectVisitor {
@@ -4311,21 +4320,20 @@ class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
ObjectSlot target) override {
VisitPointer(host, target);
- if (FLAG_minor_mc) {
- VisitPointer(host, target);
- } else {
- // Keys are handled separately and should never appear in this set.
- CHECK(!InUntypedSet(key));
- Object k = *key;
- if (!ObjectInYoungGeneration(host) && ObjectInYoungGeneration(k)) {
- EphemeronHashTable table = EphemeronHashTable::cast(host);
- auto it = ephemeron_remembered_set_->find(table);
- CHECK(it != ephemeron_remembered_set_->end());
- int slot_index =
- EphemeronHashTable::SlotToIndex(table.address(), key.address());
- InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
- CHECK(it->second.find(entry.as_int()) != it->second.end());
- }
+#ifdef ENABLE_MINOR_MC
+ if (FLAG_minor_mc) return VisitPointer(host, target);
+#endif
+ // Keys are handled separately and should never appear in this set.
+ CHECK(!InUntypedSet(key));
+ Object k = *key;
+ if (!ObjectInYoungGeneration(host) && ObjectInYoungGeneration(k)) {
+ EphemeronHashTable table = EphemeronHashTable::cast(host);
+ auto it = ephemeron_remembered_set_->find(table);
+ CHECK(it != ephemeron_remembered_set_->end());
+ int slot_index =
+ EphemeronHashTable::SlotToIndex(table.address(), key.address());
+ InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
+ CHECK(it->second.find(entry.as_int()) != it->second.end());
}
}
@@ -4390,12 +4398,7 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
#ifdef DEBUG
void Heap::VerifyCountersAfterSweeping() {
- if (FLAG_local_heaps) {
- // Ensure heap is iterable
- safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
- local_heap->MakeLinearAllocationAreaIterable();
- });
- }
+ MakeLocalHeapLabsIterable();
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
@@ -4768,6 +4771,10 @@ void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
RoundDown<Page::kPageSize>(initial_semispace_size_);
}
+ if (FLAG_lazy_new_space_shrinking) {
+ initial_semispace_size_ = max_semi_space_size_;
+ }
+
// Initialize initial_old_space_size_.
{
initial_old_generation_size_ = kMaxInitialOldGenerationSize;
@@ -4993,6 +5000,11 @@ bool Heap::IsRetryOfFailedAllocation(LocalHeap* local_heap) {
return local_heap->allocation_failed_;
}
+void Heap::AlwaysAllocateAfterTearDownStarted() {
+ always_allocate_scope_count_++;
+ collection_barrier_.ShutdownRequested();
+}
+
Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
if (ShouldReduceMemory() || FLAG_stress_compaction) {
return Heap::HeapGrowingMode::kMinimal;
@@ -5009,12 +5021,15 @@ Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
return Heap::HeapGrowingMode::kDefault;
}
-size_t Heap::GlobalMemoryAvailable() {
- return UseGlobalMemoryScheduling()
- ? GlobalSizeOfObjects() < global_allocation_limit_
- ? global_allocation_limit_ - GlobalSizeOfObjects()
- : 0
- : new_space_->Capacity() + 1;
+base::Optional<size_t> Heap::GlobalMemoryAvailable() {
+ if (!UseGlobalMemoryScheduling()) return {};
+
+ size_t global_size = GlobalSizeOfObjects();
+
+ if (global_size < global_allocation_limit_)
+ return global_allocation_limit_ - global_size;
+
+ return 0;
}
double Heap::PercentToOldGenerationLimit() {
@@ -5097,10 +5112,12 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
}
size_t old_generation_space_available = OldGenerationSpaceAvailable();
- const size_t global_memory_available = GlobalMemoryAvailable();
+ const base::Optional<size_t> global_memory_available =
+ GlobalMemoryAvailable();
if (old_generation_space_available > new_space_->Capacity() &&
- (global_memory_available > new_space_->Capacity())) {
+ (!global_memory_available ||
+ global_memory_available > new_space_->Capacity())) {
return IncrementalMarkingLimit::kNoLimit;
}
if (ShouldOptimizeForMemoryUsage()) {
@@ -5112,7 +5129,7 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (old_generation_space_available == 0) {
return IncrementalMarkingLimit::kHardLimit;
}
- if (global_memory_available == 0) {
+ if (global_memory_available && *global_memory_available == 0) {
return IncrementalMarkingLimit::kHardLimit;
}
return IncrementalMarkingLimit::kSoftLimit;
@@ -5155,7 +5172,7 @@ HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
if (!Heap::IsImmovable(heap_object)) {
if (isolate()->serializer_enabled() ||
code_space_->first_page()->Contains(heap_object.address())) {
- MemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
+ BasicMemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
} else {
// Discard the first code allocation, which was on a page where it could
// be moved.
@@ -5298,13 +5315,15 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
DCHECK_NOT_NULL(ro_heap);
DCHECK_IMPLIES(read_only_space_ != nullptr,
read_only_space_ == ro_heap->read_only_space());
- space_[RO_SPACE] = read_only_space_ = ro_heap->read_only_space();
+ space_[RO_SPACE] = nullptr;
+ read_only_space_ = ro_heap->read_only_space();
}
void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
CHECK(V8_SHARED_RO_HEAP_BOOL);
delete read_only_space_;
- space_[RO_SPACE] = read_only_space_ = space;
+
+ read_only_space_ = space;
}
void Heap::SetUpSpaces() {
@@ -5432,6 +5451,10 @@ void Heap::NotifyDeserializationComplete() {
#endif // DEBUG
}
+ if (FLAG_stress_concurrent_allocation) {
+ StressConcurrentAllocatorTask::Schedule(isolate());
+ }
+
deserialization_complete_ = true;
}
@@ -5443,7 +5466,15 @@ void Heap::NotifyBootstrapComplete() {
}
}
-void Heap::NotifyOldGenerationExpansion() {
+void Heap::NotifyOldGenerationExpansion(AllocationSpace space,
+ MemoryChunk* chunk) {
+ // Pages created during bootstrapping may contain immortal immovable objects.
+ if (!deserialization_complete()) {
+ chunk->MarkNeverEvacuate();
+ }
+ if (space == CODE_SPACE || space == CODE_LO_SPACE) {
+ isolate()->AddCodeMemoryChunk(chunk);
+ }
const size_t kMemoryReducerActivationThreshold = 1 * MB;
if (old_generation_capacity_after_bootstrap_ && ms_count_ == 0 &&
OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ +
@@ -5494,6 +5525,14 @@ void Heap::RegisterExternallyReferencedObject(Address* location) {
void Heap::StartTearDown() {
SetGCState(TEAR_DOWN);
+
+ // Background threads may allocate and block until GC is performed. However
+ // this might never happen when the main thread tries to quit and doesn't
+ // process the event queue anymore. Avoid this deadlock by allowing all
+ // allocations after tear down was requested to make sure all background
+ // threads finish.
+ AlwaysAllocateAfterTearDownStarted();
+
#ifdef VERIFY_HEAP
// {StartTearDown} is called fairly early during Isolate teardown, so it's
// a good time to run heap verification (if requested), before starting to
@@ -5585,7 +5624,7 @@ void Heap::TearDown() {
tracer_.reset();
isolate()->read_only_heap()->OnHeapTearDown();
- space_[RO_SPACE] = read_only_space_ = nullptr;
+ read_only_space_ = nullptr;
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
delete space_[i];
space_[i] = nullptr;
@@ -5919,14 +5958,14 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
bool SkipObject(HeapObject object) override {
if (object.IsFreeSpaceOrFiller()) return true;
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
if (reachable_.count(chunk) == 0) return true;
return reachable_[chunk]->count(object) == 0;
}
private:
bool MarkAsReachable(HeapObject object) {
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
if (reachable_.count(chunk) == 0) {
reachable_[chunk] = new std::unordered_set<HeapObject, Object::Hasher>();
}
@@ -6008,7 +6047,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
Heap* heap_;
DisallowHeapAllocation no_allocation_;
- std::unordered_map<MemoryChunk*,
+ std::unordered_map<BasicMemoryChunk*,
std::unordered_set<HeapObject, Object::Hasher>*>
reachable_;
};
@@ -6016,6 +6055,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
HeapObjectIterator::HeapObjectIterator(
Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
: heap_(heap),
+ safepoint_scope_(std::make_unique<SafepointScope>(heap)),
filtering_(filtering),
filter_(nullptr),
space_iterator_(nullptr),
@@ -6794,7 +6834,7 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return true;
}
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
heap_internals::MemoryChunk* slim_chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
@@ -6803,7 +6843,7 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
CHECK_EQ(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING),
slim_chunk->IsMarking());
- AllocationSpace identity = chunk->owner_identity();
+ AllocationSpace identity = chunk->owner()->identity();
// Generation consistency.
CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE,