| Index: src/spaces.h
|
| ===================================================================
|
| --- src/spaces.h (revision 10877)
|
| +++ src/spaces.h (working copy)
|
| @@ -103,7 +103,7 @@
|
| ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
|
|
|
| #define ASSERT_OBJECT_SIZE(size) \
|
| - ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
|
| + ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
|
|
|
| #define ASSERT_PAGE_OFFSET(offset) \
|
| ASSERT((Page::kObjectStartOffset <= offset) \
|
| @@ -361,21 +361,15 @@
|
| store_buffer_counter_ = counter;
|
| }
|
|
|
| - Address body() { return address() + kObjectStartOffset; }
|
| -
|
| - Address body_limit() { return address() + size(); }
|
| -
|
| - int body_size() { return static_cast<int>(size() - kObjectStartOffset); }
|
| -
|
| bool Contains(Address addr) {
|
| - return addr >= body() && addr < address() + size();
|
| + return addr >= area_start() && addr < area_end();
|
| }
|
|
|
| // Checks whether addr can be a limit of addresses in this page.
|
| // It's a limit if it's in the page, or if it's just after the
|
| // last byte of the page.
|
| bool ContainsLimit(Address addr) {
|
| - return addr >= body() && addr <= address() + size();
|
| + return addr >= area_start() && addr <= area_end();
|
| }
|
|
|
| enum MemoryChunkFlags {
|
| @@ -487,8 +481,9 @@
|
| static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
|
|
|
| static const intptr_t kLiveBytesOffset =
|
| - kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
|
| - kPointerSize + kPointerSize + kPointerSize + kIntSize;
|
| + kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
|
| + kPointerSize + kPointerSize +
|
| + kPointerSize + kPointerSize + kPointerSize + kIntSize;
|
|
|
| static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
|
|
|
| @@ -594,12 +589,22 @@
|
| ClearFlag(EVACUATION_CANDIDATE);
|
| }
|
|
|
| + Address area_start() { return area_start_; }
|
| + Address area_end() { return area_end_; }
|
| + int area_size() {
|
| + return static_cast<int>(area_end() - area_start());
|
| + }
|
|
|
| protected:
|
| MemoryChunk* next_chunk_;
|
| MemoryChunk* prev_chunk_;
|
| size_t size_;
|
| intptr_t flags_;
|
| +
|
| + // Start and end of allocatable memory on this chunk.
|
| + Address area_start_;
|
| + Address area_end_;
|
| +
|
| // If the chunk needs to remember its memory reservation, it is stored here.
|
| VirtualMemory reservation_;
|
| // The identity of the owning space. This is tagged as a failure pointer, but
|
| @@ -618,6 +623,8 @@
|
| static MemoryChunk* Initialize(Heap* heap,
|
| Address base,
|
| size_t size,
|
| + Address area_start,
|
| + Address area_end,
|
| Executability executable,
|
| Space* owner);
|
|
|
| @@ -657,12 +664,6 @@
|
| inline void set_next_page(Page* page);
|
| inline void set_prev_page(Page* page);
|
|
|
| - // Returns the start address of the object area in this page.
|
| - Address ObjectAreaStart() { return address() + kObjectStartOffset; }
|
| -
|
| - // Returns the end address (exclusive) of the object area in this page.
|
| - Address ObjectAreaEnd() { return address() + Page::kPageSize; }
|
| -
|
| // Checks whether an address is page aligned.
|
| static bool IsAlignedToPageSize(Address a) {
|
| return 0 == (OffsetFrom(a) & kPageAlignmentMask);
|
| @@ -685,22 +686,15 @@
|
| // Page size in bytes. This must be a multiple of the OS page size.
|
| static const int kPageSize = 1 << kPageSizeBits;
|
|
|
| - // Page size mask.
|
| - static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
|
| -
|
| // Object area size in bytes.
|
| - static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
|
| + static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
|
|
|
| // Maximum object size that fits in a page.
|
| - static const int kMaxHeapObjectSize = kObjectAreaSize;
|
| + static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
|
|
|
| - static const int kFirstUsedCell =
|
| - (kObjectStartOffset/kPointerSize) >> Bitmap::kBitsPerCellLog2;
|
| + // Page size mask.
|
| + static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
|
|
|
| - static const int kLastUsedCell =
|
| - ((kPageSize - kPointerSize)/kPointerSize) >>
|
| - Bitmap::kBitsPerCellLog2;
|
| -
|
| inline void ClearGCFields();
|
|
|
| static inline Page* Initialize(Heap* heap,
|
| @@ -734,7 +728,7 @@
|
| class LargePage : public MemoryChunk {
|
| public:
|
| HeapObject* GetObject() {
|
| - return HeapObject::FromAddress(body());
|
| + return HeapObject::FromAddress(area_start());
|
| }
|
|
|
| inline LargePage* next_page() const {
|
| @@ -975,7 +969,7 @@
|
|
|
| // Returns maximum available bytes that the old space can have.
|
| intptr_t MaxAvailable() {
|
| - return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
|
| + return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
|
| }
|
|
|
| #ifdef DEBUG
|
| @@ -1028,6 +1022,20 @@
|
| bool MemoryAllocationCallbackRegistered(
|
| MemoryAllocationCallback callback);
|
|
|
| + static int CodePageGuardStartOffset();
|
| +
|
| + static int CodePageGuardSize();
|
| +
|
| + static int CodePageAreaStartOffset();
|
| +
|
| + static int CodePageAreaEndOffset();
|
| +
|
| + static int CodePageAreaSize() {
|
| + return CodePageAreaEndOffset() - CodePageAreaStartOffset();
|
| + }
|
| +
|
| + static bool CommitCodePage(VirtualMemory* vm, Address start, size_t size);
|
| +
|
| private:
|
| Isolate* isolate_;
|
|
|
| @@ -1380,7 +1388,7 @@
|
| private:
|
| // The size range of blocks, in bytes.
|
| static const int kMinBlockSize = 3 * kPointerSize;
|
| - static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
|
| + static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
|
|
|
| FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
|
|
|
| @@ -1572,12 +1580,12 @@
|
|
|
| void IncreaseUnsweptFreeBytes(Page* p) {
|
| ASSERT(ShouldBeSweptLazily(p));
|
| - unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes());
|
| + unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
|
| }
|
|
|
| void DecreaseUnsweptFreeBytes(Page* p) {
|
| ASSERT(ShouldBeSweptLazily(p));
|
| - unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes());
|
| + unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
|
| }
|
|
|
| bool AdvanceSweeper(intptr_t bytes_to_sweep);
|
| @@ -1600,11 +1608,11 @@
|
| intptr_t ratio_threshold;
|
| if (identity() == CODE_SPACE) {
|
| ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
|
| - Page::kObjectAreaSize;
|
| + AreaSize();
|
| ratio_threshold = 10;
|
| } else {
|
| ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
|
| - Page::kObjectAreaSize;
|
| + AreaSize();
|
| ratio_threshold = 15;
|
| }
|
|
|
| @@ -1614,20 +1622,20 @@
|
| identity(),
|
| static_cast<int>(sizes.small_size_),
|
| static_cast<double>(sizes.small_size_ * 100) /
|
| - Page::kObjectAreaSize,
|
| + AreaSize(),
|
| static_cast<int>(sizes.medium_size_),
|
| static_cast<double>(sizes.medium_size_ * 100) /
|
| - Page::kObjectAreaSize,
|
| + AreaSize(),
|
| static_cast<int>(sizes.large_size_),
|
| static_cast<double>(sizes.large_size_ * 100) /
|
| - Page::kObjectAreaSize,
|
| + AreaSize(),
|
| static_cast<int>(sizes.huge_size_),
|
| static_cast<double>(sizes.huge_size_ * 100) /
|
| - Page::kObjectAreaSize,
|
| + AreaSize(),
|
| (ratio > ratio_threshold) ? "[fragmented]" : "");
|
| }
|
|
|
| - if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
|
| + if (FLAG_always_compact && sizes.Total() != AreaSize()) {
|
| return 1;
|
| }
|
| if (ratio <= ratio_threshold) return 0; // Not fragmented.
|
| @@ -1642,7 +1650,14 @@
|
| // Returns the number of total pages in this space.
|
| int CountTotalPages();
|
|
|
| + // Return size of allocatable area on a page in this space.
|
| + inline int AreaSize() {
|
| + return area_size_;
|
| + }
|
| +
|
| protected:
|
| + int area_size_;
|
| +
|
| // Maximum capacity of this space.
|
| intptr_t max_capacity_;
|
|
|
| @@ -1744,6 +1759,8 @@
|
| (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
|
| (1 << MemoryChunk::SCAN_ON_SCAVENGE);
|
|
|
| + static const int kAreaSize = Page::kNonCodeObjectAreaSize;
|
| +
|
| inline NewSpacePage* next_page() const {
|
| return static_cast<NewSpacePage*>(next_chunk());
|
| }
|
| @@ -1856,22 +1873,22 @@
|
| // Returns the start address of the first page of the space.
|
| Address space_start() {
|
| ASSERT(anchor_.next_page() != &anchor_);
|
| - return anchor_.next_page()->body();
|
| + return anchor_.next_page()->area_start();
|
| }
|
|
|
| // Returns the start address of the current page of the space.
|
| Address page_low() {
|
| - return current_page_->body();
|
| + return current_page_->area_start();
|
| }
|
|
|
| // Returns one past the end address of the space.
|
| Address space_end() {
|
| - return anchor_.prev_page()->body_limit();
|
| + return anchor_.prev_page()->area_end();
|
| }
|
|
|
| // Returns one past the end address of the current page of the space.
|
| Address page_high() {
|
| - return current_page_->body_limit();
|
| + return current_page_->area_end();
|
| }
|
|
|
| bool AdvancePage() {
|
| @@ -2007,7 +2024,7 @@
|
| NewSpacePage* page = NewSpacePage::FromLimit(current_);
|
| page = page->next_page();
|
| ASSERT(!page->is_anchor());
|
| - current_ = page->body();
|
| + current_ = page->area_start();
|
| if (current_ == limit_) return NULL;
|
| }
|
|
|
| @@ -2115,7 +2132,7 @@
|
|
|
| // Return the allocated bytes in the active semispace.
|
| virtual intptr_t Size() {
|
| - return pages_used_ * Page::kObjectAreaSize +
|
| + return pages_used_ * NewSpacePage::kAreaSize +
|
| static_cast<int>(top() - to_space_.page_low());
|
| }
|
|
|
| @@ -2127,7 +2144,7 @@
|
| // Return the current capacity of a semispace.
|
| intptr_t EffectiveCapacity() {
|
| SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
|
| - return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize;
|
| + return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
|
| }
|
|
|
| // Return the current capacity of a semispace.
|
| @@ -2344,7 +2361,7 @@
|
|
|
| // The limit of allocation for a page in this space.
|
| virtual Address PageAllocationLimit(Page* page) {
|
| - return page->ObjectAreaEnd();
|
| + return page->area_end();
|
| }
|
|
|
| public:
|
| @@ -2373,12 +2390,12 @@
|
| : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
|
| object_size_in_bytes_(object_size_in_bytes),
|
| name_(name) {
|
| - page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
|
| + page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
|
| }
|
|
|
| // The limit of allocation for a page in this space.
|
| virtual Address PageAllocationLimit(Page* page) {
|
| - return page->ObjectAreaEnd() - page_extra_;
|
| + return page->area_end() - page_extra_;
|
| }
|
|
|
| int object_size_in_bytes() { return object_size_in_bytes_; }
|
| @@ -2432,7 +2449,7 @@
|
| #endif
|
|
|
| private:
|
| - static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
|
| + static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
|
|
|
| // Do map space compaction if there is a page gap.
|
| int CompactionThreshold() {
|
|
|