summaryrefslogtreecommitdiffstats
path: root/chromium/v8/src/heap/cppgc/sweeper.cc
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/heap/cppgc/sweeper.cc')
-rw-r--r--chromium/v8/src/heap/cppgc/sweeper.cc479
1 files changed, 437 insertions, 42 deletions
diff --git a/chromium/v8/src/heap/cppgc/sweeper.cc b/chromium/v8/src/heap/cppgc/sweeper.cc
index 77d2d3c33e7..98a3117a2d4 100644
--- a/chromium/v8/src/heap/cppgc/sweeper.cc
+++ b/chromium/v8/src/heap/cppgc/sweeper.cc
@@ -4,8 +4,13 @@
#include "src/heap/cppgc/sweeper.h"
+#include <atomic>
+#include <memory>
#include <vector>
+#include "include/cppgc/platform.h"
+#include "src/base/optional.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/free-list.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
@@ -17,12 +22,16 @@
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/sanitizers.h"
+#include "src/heap/cppgc/stats-collector.h"
+#include "src/heap/cppgc/task-handle.h"
namespace cppgc {
namespace internal {
namespace {
+using v8::base::Optional;
+
class ObjectStartBitmapVerifier
: private HeapVisitor<ObjectStartBitmapVerifier> {
friend class HeapVisitor<ObjectStartBitmapVerifier>;
@@ -54,15 +63,126 @@ class ObjectStartBitmapVerifier
HeapObjectHeader* prev_ = nullptr;
};
+template <typename T>
+class ThreadSafeStack {
+ public:
+ ThreadSafeStack() = default;
+
+ void Push(T t) {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ vector_.push_back(std::move(t));
+ }
+
+ Optional<T> Pop() {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ if (vector_.empty()) return v8::base::nullopt;
+ T top = std::move(vector_.back());
+ vector_.pop_back();
+ // std::move is redundant but is needed to avoid the bug in gcc-7.
+ return std::move(top);
+ }
+
+ template <typename It>
+ void Insert(It begin, It end) {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ vector_.insert(vector_.end(), begin, end);
+ }
+
+ bool IsEmpty() const {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ return vector_.empty();
+ }
+
+ private:
+ std::vector<T> vector_;
+ mutable v8::base::Mutex mutex_;
+};
+
struct SpaceState {
- BaseSpace::Pages unswept_pages;
+ struct SweptPageState {
+ BasePage* page = nullptr;
+ std::vector<HeapObjectHeader*> unfinalized_objects;
+ FreeList cached_free_list;
+ std::vector<FreeList::Block> unfinalized_free_list;
+ bool is_empty = false;
+ };
+
+ ThreadSafeStack<BasePage*> unswept_pages;
+ ThreadSafeStack<SweptPageState> swept_unfinalized_pages;
};
+
using SpaceStates = std::vector<SpaceState>;
-bool SweepNormalPage(NormalPage* page) {
+void StickyUnmark(HeapObjectHeader* header) {
+ // Young generation in Oilpan uses sticky mark bits.
+#if !defined(CPPGC_YOUNG_GENERATION)
+ header->Unmark<HeapObjectHeader::AccessMode::kAtomic>();
+#endif
+}
+
+// Builder that finalizes objects and adds freelist entries right away.
+class InlinedFinalizationBuilder final {
+ public:
+ using ResultType = bool;
+
+ explicit InlinedFinalizationBuilder(BasePage* page) : page_(page) {}
+
+ void AddFinalizer(HeapObjectHeader* header, size_t size) {
+ header->Finalize();
+ SET_MEMORY_INACCESIBLE(header, size);
+ }
+
+ void AddFreeListEntry(Address start, size_t size) {
+ auto* space = NormalPageSpace::From(page_->space());
+ space->free_list().Add({start, size});
+ }
+
+ ResultType GetResult(bool is_empty) { return is_empty; }
+
+ private:
+ BasePage* page_;
+};
+
+// Builder that produces results for deferred processing.
+class DeferredFinalizationBuilder final {
+ public:
+ using ResultType = SpaceState::SweptPageState;
+
+ explicit DeferredFinalizationBuilder(BasePage* page) { result_.page = page; }
+
+ void AddFinalizer(HeapObjectHeader* header, size_t size) {
+ if (header->IsFinalizable()) {
+ result_.unfinalized_objects.push_back({header});
+ found_finalizer_ = true;
+ } else {
+ SET_MEMORY_INACCESIBLE(header, size);
+ }
+ }
+
+ void AddFreeListEntry(Address start, size_t size) {
+ if (found_finalizer_) {
+ result_.unfinalized_free_list.push_back({start, size});
+ } else {
+ result_.cached_free_list.Add({start, size});
+ }
+ found_finalizer_ = false;
+ }
+
+ ResultType&& GetResult(bool is_empty) {
+ result_.is_empty = is_empty;
+ return std::move(result_);
+ }
+
+ private:
+ ResultType result_;
+ bool found_finalizer_ = false;
+};
+
+template <typename FinalizationBuilder>
+typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
constexpr auto kAtomicAccess = HeapObjectHeader::AccessMode::kAtomic;
+ FinalizationBuilder builder(page);
- auto* space = NormalPageSpace::From(page->space());
ObjectStartBitmap& bitmap = page->object_start_bitmap();
bitmap.Clear();
@@ -79,18 +199,18 @@ bool SweepNormalPage(NormalPage* page) {
}
// Check if object is not marked (not reachable).
if (!header->IsMarked<kAtomicAccess>()) {
- header->Finalize();
- SET_MEMORY_INACCESIBLE(header, size);
+ builder.AddFinalizer(header, size);
begin += size;
continue;
}
// The object is alive.
const Address header_address = reinterpret_cast<Address>(header);
if (start_of_gap != header_address) {
- space->AddToFreeList(start_of_gap,
- static_cast<size_t>(header_address - start_of_gap));
+ builder.AddFreeListEntry(
+ start_of_gap, static_cast<size_t>(header_address - start_of_gap));
+ bitmap.SetBit(start_of_gap);
}
- header->Unmark<kAtomicAccess>();
+ StickyUnmark(header);
bitmap.SetBit(begin);
begin += size;
start_of_gap = begin;
@@ -98,56 +218,150 @@ bool SweepNormalPage(NormalPage* page) {
if (start_of_gap != page->PayloadStart() &&
start_of_gap != page->PayloadEnd()) {
- space->AddToFreeList(
+ builder.AddFreeListEntry(
start_of_gap, static_cast<size_t>(page->PayloadEnd() - start_of_gap));
+ bitmap.SetBit(start_of_gap);
}
const bool is_empty = (start_of_gap == page->PayloadStart());
- return is_empty;
+ return builder.GetResult(is_empty);
}
-// This visitor:
-// - resets linear allocation buffers and clears free lists for all spaces;
-// - moves all Heap pages to local Sweeper's state (SpaceStates).
-class PrepareForSweepVisitor final
- : public HeapVisitor<PrepareForSweepVisitor> {
+// SweepFinalizer is responsible for heap/space/page finalization. Finalization
+// is defined as a step following concurrent sweeping which:
+// - calls finalizers;
+// - returns (unmaps) empty pages;
+// - merges freelists to the space's freelist.
+class SweepFinalizer final {
public:
- explicit PrepareForSweepVisitor(SpaceStates* states) : states_(states) {}
+ explicit SweepFinalizer(cppgc::Platform* platform) : platform_(platform) {}
- bool VisitNormalPageSpace(NormalPageSpace* space) {
- space->ResetLinearAllocationBuffer();
- space->free_list().Clear();
- (*states_)[space->index()].unswept_pages = space->RemoveAllPages();
- return true;
+ void FinalizeHeap(SpaceStates* space_states) {
+ for (SpaceState& space_state : *space_states) {
+ FinalizeSpace(&space_state);
+ }
}
- bool VisitLargePageSpace(LargePageSpace* space) {
- (*states_)[space->index()].unswept_pages = space->RemoveAllPages();
+ void FinalizeSpace(SpaceState* space_state) {
+ while (auto page_state = space_state->swept_unfinalized_pages.Pop()) {
+ FinalizePage(&*page_state);
+ }
+ }
+
+ bool FinalizeSpaceWithDeadline(SpaceState* space_state,
+ double deadline_in_seconds) {
+ DCHECK(platform_);
+ static constexpr size_t kDeadlineCheckInterval = 8;
+ size_t page_count = 1;
+
+ while (auto page_state = space_state->swept_unfinalized_pages.Pop()) {
+ FinalizePage(&*page_state);
+
+ if (page_count % kDeadlineCheckInterval == 0 &&
+ deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
+ return false;
+ }
+
+ page_count++;
+ }
return true;
}
+ void FinalizePage(SpaceState::SweptPageState* page_state) {
+ DCHECK(page_state);
+ DCHECK(page_state->page);
+ BasePage* page = page_state->page;
+
+ // Call finalizers.
+ for (HeapObjectHeader* object : page_state->unfinalized_objects) {
+ object->Finalize();
+ }
+
+ // Unmap page if empty.
+ if (page_state->is_empty) {
+ BasePage::Destroy(page);
+ return;
+ }
+
+ DCHECK(!page->is_large());
+
+ // Merge freelists without finalizers.
+ FreeList& space_freelist =
+ NormalPageSpace::From(page->space())->free_list();
+ space_freelist.Append(std::move(page_state->cached_free_list));
+
+ // Merge freelist with finalizers.
+ for (auto entry : page_state->unfinalized_free_list) {
+ space_freelist.Add(std::move(entry));
+ }
+
+ // Add the page to the space.
+ page->space()->AddPage(page);
+ }
+
private:
- SpaceStates* states_;
+ cppgc::Platform* platform_;
};
-class MutatorThreadSweepVisitor final
- : private HeapVisitor<MutatorThreadSweepVisitor> {
- friend class HeapVisitor<MutatorThreadSweepVisitor>;
+class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
+ friend class HeapVisitor<MutatorThreadSweeper>;
public:
- explicit MutatorThreadSweepVisitor(SpaceStates* space_states) {
- for (SpaceState& state : *space_states) {
- for (BasePage* page : state.unswept_pages) {
- Traverse(page);
+ explicit MutatorThreadSweeper(SpaceStates* states, cppgc::Platform* platform)
+ : states_(states), platform_(platform) {}
+
+ void Sweep() {
+ for (SpaceState& state : *states_) {
+ while (auto page = state.unswept_pages.Pop()) {
+ Traverse(*page);
+ }
+ }
+ }
+
+ bool SweepWithDeadline(double deadline_in_seconds) {
+ DCHECK(platform_);
+ static constexpr double kSlackInSeconds = 0.001;
+ for (SpaceState& state : *states_) {
+ // FinalizeSpaceWithDeadline() and SweepSpaceWithDeadline() won't check
+ // the deadline until it sweeps 10 pages. So we give a small slack for
+ // safety.
+ const double remaining_budget = deadline_in_seconds - kSlackInSeconds -
+ platform_->MonotonicallyIncreasingTime();
+ if (remaining_budget <= 0.) return false;
+
+ // First, prioritize finalization of pages that were swept concurrently.
+ SweepFinalizer finalizer(platform_);
+ if (!finalizer.FinalizeSpaceWithDeadline(&state, deadline_in_seconds)) {
+ return false;
+ }
+
+ // Help out the concurrent sweeper.
+ if (!SweepSpaceWithDeadline(&state, deadline_in_seconds)) {
+ return false;
}
- state.unswept_pages.clear();
}
+ return true;
}
private:
+ bool SweepSpaceWithDeadline(SpaceState* state, double deadline_in_seconds) {
+ static constexpr size_t kDeadlineCheckInterval = 8;
+ size_t page_count = 1;
+ while (auto page = state->unswept_pages.Pop()) {
+ Traverse(*page);
+ if (page_count % kDeadlineCheckInterval == 0 &&
+ deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
+ return false;
+ }
+ page_count++;
+ }
+
+ return true;
+ }
+
bool VisitNormalPage(NormalPage* page) {
- const bool is_empty = SweepNormalPage(page);
+ const bool is_empty = SweepNormalPage<InlinedFinalizationBuilder>(page);
if (is_empty) {
NormalPage::Destroy(page);
} else {
@@ -157,23 +371,119 @@ class MutatorThreadSweepVisitor final
}
bool VisitLargePage(LargePage* page) {
- if (page->ObjectHeader()->IsMarked()) {
+ HeapObjectHeader* header = page->ObjectHeader();
+ if (header->IsMarked()) {
+ StickyUnmark(header);
page->space()->AddPage(page);
} else {
- page->ObjectHeader()->Finalize();
+ header->Finalize();
LargePage::Destroy(page);
}
return true;
}
+
+ SpaceStates* states_;
+ cppgc::Platform* platform_;
+};
+
+class ConcurrentSweepTask final : public v8::JobTask,
+ private HeapVisitor<ConcurrentSweepTask> {
+ friend class HeapVisitor<ConcurrentSweepTask>;
+
+ public:
+ explicit ConcurrentSweepTask(SpaceStates* states) : states_(states) {}
+
+ void Run(v8::JobDelegate* delegate) final {
+ for (SpaceState& state : *states_) {
+ while (auto page = state.unswept_pages.Pop()) {
+ Traverse(*page);
+ if (delegate->ShouldYield()) return;
+ }
+ }
+ is_completed_.store(true, std::memory_order_relaxed);
+ }
+
+ size_t GetMaxConcurrency() const final {
+ return is_completed_.load(std::memory_order_relaxed) ? 0 : 1;
+ }
+
+ private:
+ bool VisitNormalPage(NormalPage* page) {
+ SpaceState::SweptPageState sweep_result =
+ SweepNormalPage<DeferredFinalizationBuilder>(page);
+ const size_t space_index = page->space()->index();
+ DCHECK_GT(states_->size(), space_index);
+ SpaceState& space_state = (*states_)[space_index];
+ space_state.swept_unfinalized_pages.Push(std::move(sweep_result));
+ return true;
+ }
+
+ bool VisitLargePage(LargePage* page) {
+ HeapObjectHeader* header = page->ObjectHeader();
+ if (header->IsMarked()) {
+ StickyUnmark(header);
+ page->space()->AddPage(page);
+ return true;
+ }
+ if (!header->IsFinalizable()) {
+ LargePage::Destroy(page);
+ return true;
+ }
+ const size_t space_index = page->space()->index();
+ DCHECK_GT(states_->size(), space_index);
+ SpaceState& state = (*states_)[space_index];
+ state.swept_unfinalized_pages.Push(
+ {page, {page->ObjectHeader()}, {}, {}, true});
+ return true;
+ }
+
+ SpaceStates* states_;
+ std::atomic_bool is_completed_{false};
+};
+
+// This visitor:
+// - resets linear allocation buffers and clears free lists for all spaces;
+// - moves all Heap pages to local Sweeper's state (SpaceStates).
+class PrepareForSweepVisitor final
+ : public HeapVisitor<PrepareForSweepVisitor> {
+ public:
+ explicit PrepareForSweepVisitor(SpaceStates* states) : states_(states) {}
+
+ bool VisitNormalPageSpace(NormalPageSpace* space) {
+ DCHECK(!space->linear_allocation_buffer().size());
+ space->free_list().Clear();
+ ExtractPages(space);
+ return true;
+ }
+
+ bool VisitLargePageSpace(LargePageSpace* space) {
+ ExtractPages(space);
+ return true;
+ }
+
+ private:
+ void ExtractPages(BaseSpace* space) {
+ BaseSpace::Pages space_pages = space->RemoveAllPages();
+ (*states_)[space->index()].unswept_pages.Insert(space_pages.begin(),
+ space_pages.end());
+ }
+
+ SpaceStates* states_;
};
} // namespace
class Sweeper::SweeperImpl final {
public:
- explicit SweeperImpl(RawHeap* heap) : heap_(heap) {
- space_states_.resize(heap_->size());
- }
+ SweeperImpl(RawHeap* heap, cppgc::Platform* platform,
+ StatsCollector* stats_collector)
+ : heap_(heap),
+ stats_collector_(stats_collector),
+ space_states_(heap->size()),
+ platform_(platform),
+ foreground_task_runner_(platform_->GetForegroundTaskRunner()) {}
+
+ ~SweeperImpl() { CancelSweepers(); }
void Start(Config config) {
is_in_progress_ = true;
@@ -181,29 +491,114 @@ class Sweeper::SweeperImpl final {
ObjectStartBitmapVerifier().Verify(heap_);
#endif
PrepareForSweepVisitor(&space_states_).Traverse(heap_);
+
if (config == Config::kAtomic) {
Finish();
} else {
DCHECK_EQ(Config::kIncrementalAndConcurrent, config);
- // TODO(chromium:1056170): Schedule concurrent sweeping.
+ ScheduleIncrementalSweeping();
+ ScheduleConcurrentSweeping();
}
}
void Finish() {
if (!is_in_progress_) return;
- MutatorThreadSweepVisitor s(&space_states_);
+ // First, call finalizers on the mutator thread.
+ SweepFinalizer finalizer(platform_);
+ finalizer.FinalizeHeap(&space_states_);
+
+ // Then, help out the concurrent thread.
+ MutatorThreadSweeper sweeper(&space_states_, platform_);
+ sweeper.Sweep();
+
+ // Synchronize with the concurrent sweeper and call remaining finalizers.
+ SynchronizeAndFinalizeConcurrentSweeping();
is_in_progress_ = false;
+
+ stats_collector_->NotifySweepingCompleted();
}
private:
- SpaceStates space_states_;
+ class IncrementalSweepTask : public v8::IdleTask {
+ public:
+ using Handle = SingleThreadedHandle;
+
+ explicit IncrementalSweepTask(SweeperImpl* sweeper)
+ : sweeper_(sweeper), handle_(Handle::NonEmptyTag{}) {}
+
+ static Handle Post(SweeperImpl* sweeper, v8::TaskRunner* runner) {
+ auto task = std::make_unique<IncrementalSweepTask>(sweeper);
+ auto handle = task->GetHandle();
+ runner->PostIdleTask(std::move(task));
+ return handle;
+ }
+
+ private:
+ void Run(double deadline_in_seconds) override {
+ if (handle_.IsCanceled() || !sweeper_->is_in_progress_) return;
+
+ MutatorThreadSweeper sweeper(&sweeper_->space_states_,
+ sweeper_->platform_);
+ const bool sweep_complete =
+ sweeper.SweepWithDeadline(deadline_in_seconds);
+
+ if (sweep_complete) {
+ sweeper_->SynchronizeAndFinalizeConcurrentSweeping();
+ } else {
+ sweeper_->ScheduleIncrementalSweeping();
+ }
+ }
+
+ Handle GetHandle() const { return handle_; }
+
+ SweeperImpl* sweeper_;
+ // TODO(chromium:1056170): Change to CancelableTask.
+ Handle handle_;
+ };
+
+ void ScheduleIncrementalSweeping() {
+ if (!platform_ || !foreground_task_runner_) return;
+
+ incremental_sweeper_handle_ =
+ IncrementalSweepTask::Post(this, foreground_task_runner_.get());
+ }
+
+ void ScheduleConcurrentSweeping() {
+ if (!platform_) return;
+
+ concurrent_sweeper_handle_ = platform_->PostJob(
+ v8::TaskPriority::kUserVisible,
+ std::make_unique<ConcurrentSweepTask>(&space_states_));
+ }
+
+ void CancelSweepers() {
+ if (incremental_sweeper_handle_) incremental_sweeper_handle_.Cancel();
+ if (concurrent_sweeper_handle_) concurrent_sweeper_handle_->Cancel();
+ }
+
+ void SynchronizeAndFinalizeConcurrentSweeping() {
+ CancelSweepers();
+
+ SweepFinalizer finalizer(platform_);
+ finalizer.FinalizeHeap(&space_states_);
+ }
+
RawHeap* heap_;
+ StatsCollector* stats_collector_;
+ SpaceStates space_states_;
+ cppgc::Platform* platform_;
+ std::shared_ptr<v8::TaskRunner> foreground_task_runner_;
+ IncrementalSweepTask::Handle incremental_sweeper_handle_;
+ std::unique_ptr<v8::JobHandle> concurrent_sweeper_handle_;
bool is_in_progress_ = false;
};
-Sweeper::Sweeper(RawHeap* heap) : impl_(std::make_unique<SweeperImpl>(heap)) {}
+Sweeper::Sweeper(RawHeap* heap, cppgc::Platform* platform,
+ StatsCollector* stats_collector)
+ : impl_(std::make_unique<SweeperImpl>(heap, platform, stats_collector)) {}
+
Sweeper::~Sweeper() = default;
void Sweeper::Start(Config config) { impl_->Start(config); }