summaryrefslogtreecommitdiffstats
path: root/chromium/v8/src/heap/basic-memory-chunk.h
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-12 14:27:29 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-13 09:35:20 +0000
commitc30a6232df03e1efbd9f3b226777b07e087a1122 (patch)
treee992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/v8/src/heap/basic-memory-chunk.h
parent7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff)
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src/heap/basic-memory-chunk.h')
-rw-r--r--chromium/v8/src/heap/basic-memory-chunk.h231
1 files changed, 205 insertions, 26 deletions
diff --git a/chromium/v8/src/heap/basic-memory-chunk.h b/chromium/v8/src/heap/basic-memory-chunk.h
index 205d02ce247..8d8fff39fbe 100644
--- a/chromium/v8/src/heap/basic-memory-chunk.h
+++ b/chromium/v8/src/heap/basic-memory-chunk.h
@@ -6,25 +6,29 @@
#define V8_HEAP_BASIC_MEMORY_CHUNK_H_
#include <type_traits>
+#include <unordered_map>
#include "src/base/atomic-utils.h"
#include "src/common/globals.h"
+#include "src/flags/flags.h"
#include "src/heap/marking.h"
-#include "src/heap/slot-set.h"
+#include "src/objects/heap-object.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
-class MemoryChunk;
-
-enum RememberedSetType {
- OLD_TO_NEW,
- OLD_TO_OLD,
- NUMBER_OF_REMEMBERED_SET_TYPES
-};
+class BaseSpace;
class BasicMemoryChunk {
public:
+ // Use with std data structures.
+ struct Hasher {
+ size_t operator()(BasicMemoryChunk* const chunk) const {
+ return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
+ }
+ };
+
enum Flag {
NO_FLAGS = 0u,
IS_EXECUTABLE = 1u << 0,
@@ -109,11 +113,30 @@ class BasicMemoryChunk {
Address address() const { return reinterpret_cast<Address>(this); }
+ // Returns the offset of a given address to this page.
+ inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
+
+ // Returns the address for a given offset to the this page.
+ Address OffsetToAddress(size_t offset) {
+ Address address_in_page = address() + offset;
+ DCHECK_GE(address_in_page, area_start());
+ DCHECK_LT(address_in_page, area_end());
+ return address_in_page;
+ }
+
+ // Some callers rely on the fact that this can operate on both
+ // tagged and aligned object addresses.
+ inline uint32_t AddressToMarkbitIndex(Address addr) const {
+ return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
+ }
+
+ inline Address MarkbitIndexToAddress(uint32_t index) const {
+ return this->address() + (index << kTaggedSizeLog2);
+ }
+
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
- size_t buckets() const { return SlotSet::BucketsForSize(size()); }
-
Address area_start() const { return area_start_; }
Address area_end() const { return area_end_; }
@@ -123,6 +146,16 @@ class BasicMemoryChunk {
return static_cast<size_t>(area_end() - area_start());
}
+ Heap* heap() const {
+ DCHECK_NOT_NULL(heap_);
+ return heap_;
+ }
+
+ // Gets the chunk's owner or null if the space has been detached.
+ BaseSpace* owner() const { return owner_; }
+
+ void set_owner(BaseSpace* space) { owner_ = space; }
+
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
void SetFlag(Flag flag) {
if (access_mode == AccessMode::NON_ATOMIC) {
@@ -155,9 +188,69 @@ class BasicMemoryChunk {
}
}
+ using Flags = uintptr_t;
+
+ static const Flags kPointersToHereAreInterestingMask =
+ POINTERS_TO_HERE_ARE_INTERESTING;
+
+ static const Flags kPointersFromHereAreInterestingMask =
+ POINTERS_FROM_HERE_ARE_INTERESTING;
+
+ static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
+
+ static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
+
+ static const Flags kIsLargePageMask = LARGE_PAGE;
+
+ static const Flags kSkipEvacuationSlotsRecordingMask =
+ kEvacuationCandidateMask | kIsInYoungGenerationMask;
+
bool InReadOnlySpace() const { return IsFlagSet(READ_ONLY_HEAP); }
- // TODO(v8:7464): Add methods for down casting to MemoryChunk.
+ bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
+
+ void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
+
+ bool CanAllocate() {
+ return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ bool IsEvacuationCandidate() {
+ DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
+ IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
+ return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ bool ShouldSkipEvacuationSlotRecording() {
+ uintptr_t flags = GetFlags<access_mode>();
+ return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
+ ((flags & COMPACTION_WAS_ABORTED) == 0);
+ }
+
+ Executability executable() {
+ return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+ }
+
+ bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
+ bool IsToPage() const { return IsFlagSet(TO_PAGE); }
+ bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
+ bool InYoungGeneration() const {
+ return (GetFlags() & kIsInYoungGenerationMask) != 0;
+ }
+ bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
+ bool InNewLargeObjectSpace() const {
+ return InYoungGeneration() && IsLargePage();
+ }
+ bool InOldSpace() const;
+ V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
+
+ bool IsWritable() const {
+ // If this is a read-only space chunk but heap_ is non-null, it has not yet
+ // been sealed and can be written to.
+ return !InReadOnlySpace() || heap_ != nullptr;
+ }
bool Contains(Address addr) const {
return addr >= area_start() && addr < area_end();
@@ -171,23 +264,92 @@ class BasicMemoryChunk {
void ReleaseMarkingBitmap();
+ static BasicMemoryChunk* Initialize(Heap* heap, Address base, size_t size,
+ Address area_start, Address area_end,
+ BaseSpace* owner,
+ VirtualMemory reservation);
+
+ size_t wasted_memory() { return wasted_memory_; }
+ void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
+ size_t allocated_bytes() { return allocated_bytes_; }
+
static const intptr_t kSizeOffset = 0;
static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize;
static const intptr_t kAreaStartOffset = kHeapOffset + kSystemPointerSize;
static const intptr_t kAreaEndOffset = kAreaStartOffset + kSystemPointerSize;
- static const intptr_t kOldToNewSlotSetOffset =
- kAreaEndOffset + kSystemPointerSize;
static const size_t kHeaderSize =
- kSizeOffset + kSizetSize // size_t size
- + kUIntptrSize // uintptr_t flags_
- + kSystemPointerSize // Bitmap* marking_bitmap_
- + kSystemPointerSize // Heap* heap_
- + kSystemPointerSize // Address area_start_
- + kSystemPointerSize // Address area_end_
- + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES; // SlotSet* array
+ kSizeOffset + kSizetSize // size_t size
+ + kUIntptrSize // uintptr_t flags_
+ + kSystemPointerSize // Bitmap* marking_bitmap_
+ + kSystemPointerSize // Heap* heap_
+ + kSystemPointerSize // Address area_start_
+ + kSystemPointerSize // Address area_end_
+ + kSizetSize // size_t allocated_bytes_
+ + kSizetSize // size_t wasted_memory_
+ + kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ + kSystemPointerSize // Address owner_
+ + 3 * kSystemPointerSize; // VirtualMemory reservation_
+
+ // Only works if the pointer is in the first kPageSize of the MemoryChunk.
+ static BasicMemoryChunk* FromAddress(Address a) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
+ return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(a));
+ }
+
+ // Only works if the object is in the first kPageSize of the MemoryChunk.
+ static BasicMemoryChunk* FromHeapObject(HeapObject o) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
+ return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(o.ptr()));
+ }
+
+ template <AccessMode mode>
+ ConcurrentBitmap<mode>* marking_bitmap() const {
+ return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
+ }
+
+ Address HighWaterMark() { return address() + high_water_mark_; }
+
+ static inline void UpdateHighWaterMark(Address mark) {
+ if (mark == kNullAddress) return;
+ // Need to subtract one from the mark because when a chunk is full the
+ // top points to the next address after the chunk, which effectively belongs
+ // to another chunk. See the comment to Page::FromAllocationAreaAddress.
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(mark - 1);
+ intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
+ intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
+ while ((new_mark > old_mark) &&
+ !chunk->high_water_mark_.compare_exchange_weak(
+ old_mark, new_mark, std::memory_order_acq_rel)) {
+ }
+ }
+
+ VirtualMemory* reserved_memory() { return &reservation_; }
+
+ void ResetAllocationStatistics() {
+ allocated_bytes_ = area_size();
+ wasted_memory_ = 0;
+ }
+
+ void IncreaseAllocatedBytes(size_t bytes) {
+ DCHECK_LE(bytes, area_size());
+ allocated_bytes_ += bytes;
+ }
+
+ void DecreaseAllocatedBytes(size_t bytes) {
+ DCHECK_LE(bytes, area_size());
+ DCHECK_GE(allocated_bytes(), bytes);
+ allocated_bytes_ -= bytes;
+ }
+
+#ifdef THREAD_SANITIZER
+ // Perform a dummy acquire load to tell TSAN that there is no data race in
+ // mark-bit initialization. See MemoryChunk::Initialize for the corresponding
+ // release store.
+ void SynchronizedHeapLoad();
+#endif
protected:
// Overall size of the chunk, including the header and guards.
@@ -207,12 +369,31 @@ class BasicMemoryChunk {
Address area_start_;
Address area_end_;
- // A single slot set for small pages (of size kPageSize) or an array of slot
- // set for large pages. In the latter case the number of entries in the array
- // is ceil(size() / kPageSize).
- SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
+ // Byte allocated on the page, which includes all objects on the page and the
+ // linear allocation area.
+ size_t allocated_bytes_;
+ // Freed memory that was not added to the free list.
+ size_t wasted_memory_;
+
+ // Assuming the initial allocation on a page is sequential, count highest
+ // number of bytes ever allocated on the page.
+ std::atomic<intptr_t> high_water_mark_;
+
+ // The space owning this memory chunk.
+ std::atomic<BaseSpace*> owner_;
+
+ // If the chunk needs to remember its memory reservation, it is stored here.
+ VirtualMemory reservation_;
friend class BasicMemoryChunkValidator;
+ friend class ConcurrentMarkingState;
+ friend class MajorMarkingState;
+ friend class MajorAtomicMarkingState;
+ friend class MajorNonAtomicMarkingState;
+ friend class MemoryAllocator;
+ friend class MinorMarkingState;
+ friend class MinorNonAtomicMarkingState;
+ friend class PagedSpace;
};
STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
@@ -227,8 +408,6 @@ class BasicMemoryChunkValidator {
offsetof(BasicMemoryChunk, marking_bitmap_));
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
offsetof(BasicMemoryChunk, heap_));
- STATIC_ASSERT(BasicMemoryChunk::kOldToNewSlotSetOffset ==
- offsetof(BasicMemoryChunk, slot_set_));
};
} // namespace internal