Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: src/spaces.h

Issue 9535013: Merge r10809 from the bleeding_edge to the 3.8 branch. (Closed) Base URL: http://v8.googlecode.com/svn/branches/3.8/
Patch Set: Created 8 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/serialize.cc ('k') | src/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
96 #define ASSERT_PAGE_ALIGNED(address) \ 96 #define ASSERT_PAGE_ALIGNED(address) \
97 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) 97 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
98 98
99 #define ASSERT_OBJECT_ALIGNED(address) \ 99 #define ASSERT_OBJECT_ALIGNED(address) \
100 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) 100 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
101 101
102 #define ASSERT_MAP_ALIGNED(address) \ 102 #define ASSERT_MAP_ALIGNED(address) \
103 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0) 103 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
104 104
105 #define ASSERT_OBJECT_SIZE(size) \ 105 #define ASSERT_OBJECT_SIZE(size) \
106 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize)) 106 ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
107 107
108 #define ASSERT_PAGE_OFFSET(offset) \ 108 #define ASSERT_PAGE_OFFSET(offset) \
109 ASSERT((Page::kObjectStartOffset <= offset) \ 109 ASSERT((Page::kObjectStartOffset <= offset) \
110 && (offset <= Page::kPageSize)) 110 && (offset <= Page::kPageSize))
111 111
112 #define ASSERT_MAP_PAGE_INDEX(index) \ 112 #define ASSERT_MAP_PAGE_INDEX(index) \
113 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) 113 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
114 114
115 115
116 class PagedSpace; 116 class PagedSpace;
(...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after
354 ClearFlag(SCAN_ON_SCAVENGE); 354 ClearFlag(SCAN_ON_SCAVENGE);
355 } 355 }
356 } 356 }
357 inline void set_scan_on_scavenge(bool scan); 357 inline void set_scan_on_scavenge(bool scan);
358 358
359 int store_buffer_counter() { return store_buffer_counter_; } 359 int store_buffer_counter() { return store_buffer_counter_; }
360 void set_store_buffer_counter(int counter) { 360 void set_store_buffer_counter(int counter) {
361 store_buffer_counter_ = counter; 361 store_buffer_counter_ = counter;
362 } 362 }
363 363
364 Address body() { return address() + kObjectStartOffset; }
365
366 Address body_limit() { return address() + size(); }
367
368 int body_size() { return static_cast<int>(size() - kObjectStartOffset); }
369
370 bool Contains(Address addr) { 364 bool Contains(Address addr) {
371 return addr >= body() && addr < address() + size(); 365 return addr >= area_start() && addr < area_end();
372 } 366 }
373 367
374 // Checks whether addr can be a limit of addresses in this page. 368 // Checks whether addr can be a limit of addresses in this page.
375 // It's a limit if it's in the page, or if it's just after the 369 // It's a limit if it's in the page, or if it's just after the
376 // last byte of the page. 370 // last byte of the page.
377 bool ContainsLimit(Address addr) { 371 bool ContainsLimit(Address addr) {
378 return addr >= body() && addr <= address() + size(); 372 return addr >= area_start() && addr <= area_end();
379 } 373 }
380 374
381 enum MemoryChunkFlags { 375 enum MemoryChunkFlags {
382 IS_EXECUTABLE, 376 IS_EXECUTABLE,
383 ABOUT_TO_BE_FREED, 377 ABOUT_TO_BE_FREED,
384 POINTERS_TO_HERE_ARE_INTERESTING, 378 POINTERS_TO_HERE_ARE_INTERESTING,
385 POINTERS_FROM_HERE_ARE_INTERESTING, 379 POINTERS_FROM_HERE_ARE_INTERESTING,
386 SCAN_ON_SCAVENGE, 380 SCAN_ON_SCAVENGE,
387 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. 381 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
388 IN_TO_SPACE, // All pages in new space has one of these two set. 382 IN_TO_SPACE, // All pages in new space has one of these two set.
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
480 static void IncrementLiveBytesFromMutator(Address address, int by); 474 static void IncrementLiveBytesFromMutator(Address address, int by);
481 475
482 static const intptr_t kAlignment = 476 static const intptr_t kAlignment =
483 (static_cast<uintptr_t>(1) << kPageSizeBits); 477 (static_cast<uintptr_t>(1) << kPageSizeBits);
484 478
485 static const intptr_t kAlignmentMask = kAlignment - 1; 479 static const intptr_t kAlignmentMask = kAlignment - 1;
486 480
487 static const intptr_t kSizeOffset = kPointerSize + kPointerSize; 481 static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
488 482
489 static const intptr_t kLiveBytesOffset = 483 static const intptr_t kLiveBytesOffset =
490 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + 484 kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
491 kPointerSize + kPointerSize + kPointerSize + kIntSize; 485 kPointerSize + kPointerSize +
486 kPointerSize + kPointerSize + kPointerSize + kIntSize;
492 487
493 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; 488 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
494 489
495 static const size_t kHeaderSize = 490 static const size_t kHeaderSize =
496 kSlotsBufferOffset + kPointerSize + kPointerSize; 491 kSlotsBufferOffset + kPointerSize + kPointerSize;
497 492
498 static const int kBodyOffset = 493 static const int kBodyOffset =
499 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); 494 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
500 495
501 // The start offset of the object area in a page. Aligned to both maps and 496 // The start offset of the object area in a page. Aligned to both maps and
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
587 void MarkEvacuationCandidate() { 582 void MarkEvacuationCandidate() {
588 ASSERT(slots_buffer_ == NULL); 583 ASSERT(slots_buffer_ == NULL);
589 SetFlag(EVACUATION_CANDIDATE); 584 SetFlag(EVACUATION_CANDIDATE);
590 } 585 }
591 586
592 void ClearEvacuationCandidate() { 587 void ClearEvacuationCandidate() {
593 ASSERT(slots_buffer_ == NULL); 588 ASSERT(slots_buffer_ == NULL);
594 ClearFlag(EVACUATION_CANDIDATE); 589 ClearFlag(EVACUATION_CANDIDATE);
595 } 590 }
596 591
592 Address area_start() { return area_start_; }
593 Address area_end() { return area_end_; }
594 int area_size() {
595 return static_cast<int>(area_end() - area_start());
596 }
597 597
598 protected: 598 protected:
599 MemoryChunk* next_chunk_; 599 MemoryChunk* next_chunk_;
600 MemoryChunk* prev_chunk_; 600 MemoryChunk* prev_chunk_;
601 size_t size_; 601 size_t size_;
602 intptr_t flags_; 602 intptr_t flags_;
603
604 // Start and end of allocatable memory on this chunk.
605 Address area_start_;
606 Address area_end_;
607
603 // If the chunk needs to remember its memory reservation, it is stored here. 608 // If the chunk needs to remember its memory reservation, it is stored here.
604 VirtualMemory reservation_; 609 VirtualMemory reservation_;
605 // The identity of the owning space. This is tagged as a failure pointer, but 610 // The identity of the owning space. This is tagged as a failure pointer, but
606 // no failure can be in an object, so this can be distinguished from any entry 611 // no failure can be in an object, so this can be distinguished from any entry
607 // in a fixed array. 612 // in a fixed array.
608 Address owner_; 613 Address owner_;
609 Heap* heap_; 614 Heap* heap_;
610 // Used by the store buffer to keep track of which pages to mark scan-on- 615 // Used by the store buffer to keep track of which pages to mark scan-on-
611 // scavenge. 616 // scavenge.
612 int store_buffer_counter_; 617 int store_buffer_counter_;
613 // Count of bytes marked black on page. 618 // Count of bytes marked black on page.
614 int live_byte_count_; 619 int live_byte_count_;
615 SlotsBuffer* slots_buffer_; 620 SlotsBuffer* slots_buffer_;
616 SkipList* skip_list_; 621 SkipList* skip_list_;
617 622
618 static MemoryChunk* Initialize(Heap* heap, 623 static MemoryChunk* Initialize(Heap* heap,
619 Address base, 624 Address base,
620 size_t size, 625 size_t size,
626 Address area_start,
627 Address area_end,
621 Executability executable, 628 Executability executable,
622 Space* owner); 629 Space* owner);
623 630
624 friend class MemoryAllocator; 631 friend class MemoryAllocator;
625 }; 632 };
626 633
627 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); 634 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
628 635
629 // ----------------------------------------------------------------------------- 636 // -----------------------------------------------------------------------------
630 // A page is a memory chunk of a size 1MB. Large object pages may be larger. 637 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
(...skipping 19 matching lines...) Expand all
650 Page* p = FromAddress(top - kPointerSize); 657 Page* p = FromAddress(top - kPointerSize);
651 return p; 658 return p;
652 } 659 }
653 660
654 // Returns the next page in the chain of pages owned by a space. 661 // Returns the next page in the chain of pages owned by a space.
655 inline Page* next_page(); 662 inline Page* next_page();
656 inline Page* prev_page(); 663 inline Page* prev_page();
657 inline void set_next_page(Page* page); 664 inline void set_next_page(Page* page);
658 inline void set_prev_page(Page* page); 665 inline void set_prev_page(Page* page);
659 666
660 // Returns the start address of the object area in this page.
661 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
662
663 // Returns the end address (exclusive) of the object area in this page.
664 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
665
666 // Checks whether an address is page aligned. 667 // Checks whether an address is page aligned.
667 static bool IsAlignedToPageSize(Address a) { 668 static bool IsAlignedToPageSize(Address a) {
668 return 0 == (OffsetFrom(a) & kPageAlignmentMask); 669 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
669 } 670 }
670 671
671 // Returns the offset of a given address to this page. 672 // Returns the offset of a given address to this page.
672 INLINE(int Offset(Address a)) { 673 INLINE(int Offset(Address a)) {
673 int offset = static_cast<int>(a - address()); 674 int offset = static_cast<int>(a - address());
674 return offset; 675 return offset;
675 } 676 }
676 677
677 // Returns the address for a given offset to the this page. 678 // Returns the address for a given offset to the this page.
678 Address OffsetToAddress(int offset) { 679 Address OffsetToAddress(int offset) {
679 ASSERT_PAGE_OFFSET(offset); 680 ASSERT_PAGE_OFFSET(offset);
680 return address() + offset; 681 return address() + offset;
681 } 682 }
682 683
683 // --------------------------------------------------------------------- 684 // ---------------------------------------------------------------------
684 685
685 // Page size in bytes. This must be a multiple of the OS page size. 686 // Page size in bytes. This must be a multiple of the OS page size.
686 static const int kPageSize = 1 << kPageSizeBits; 687 static const int kPageSize = 1 << kPageSizeBits;
687 688
689 // Object area size in bytes.
690 static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
691
692 // Maximum object size that fits in a page.
693 static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
694
688 // Page size mask. 695 // Page size mask.
689 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; 696 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
690 697
691 // Object area size in bytes.
692 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
693
694 // Maximum object size that fits in a page.
695 static const int kMaxHeapObjectSize = kObjectAreaSize;
696
697 static const int kFirstUsedCell =
698 (kObjectStartOffset/kPointerSize) >> Bitmap::kBitsPerCellLog2;
699
700 static const int kLastUsedCell =
701 ((kPageSize - kPointerSize)/kPointerSize) >>
702 Bitmap::kBitsPerCellLog2;
703
704 inline void ClearGCFields(); 698 inline void ClearGCFields();
705 699
706 static inline Page* Initialize(Heap* heap, 700 static inline Page* Initialize(Heap* heap,
707 MemoryChunk* chunk, 701 MemoryChunk* chunk,
708 Executability executable, 702 Executability executable,
709 PagedSpace* owner); 703 PagedSpace* owner);
710 704
711 void InitializeAsAnchor(PagedSpace* owner); 705 void InitializeAsAnchor(PagedSpace* owner);
712 706
713 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } 707 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
(...skipping 13 matching lines...) Expand all
727 friend class MemoryAllocator; 721 friend class MemoryAllocator;
728 }; 722 };
729 723
730 724
731 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); 725 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
732 726
733 727
734 class LargePage : public MemoryChunk { 728 class LargePage : public MemoryChunk {
735 public: 729 public:
736 HeapObject* GetObject() { 730 HeapObject* GetObject() {
737 return HeapObject::FromAddress(body()); 731 return HeapObject::FromAddress(area_start());
738 } 732 }
739 733
740 inline LargePage* next_page() const { 734 inline LargePage* next_page() const {
741 return static_cast<LargePage*>(next_chunk()); 735 return static_cast<LargePage*>(next_chunk());
742 } 736 }
743 737
744 inline void set_next_page(LargePage* page) { 738 inline void set_next_page(LargePage* page) {
745 set_next_chunk(page); 739 set_next_chunk(page);
746 } 740 }
747 private: 741 private:
(...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after
968 intptr_t AvailableExecutable() { 962 intptr_t AvailableExecutable() {
969 if (capacity_executable_ < size_executable_) return 0; 963 if (capacity_executable_ < size_executable_) return 0;
970 return capacity_executable_ - size_executable_; 964 return capacity_executable_ - size_executable_;
971 } 965 }
972 966
973 // Returns allocated executable spaces in bytes. 967 // Returns allocated executable spaces in bytes.
974 intptr_t SizeExecutable() { return size_executable_; } 968 intptr_t SizeExecutable() { return size_executable_; }
975 969
976 // Returns maximum available bytes that the old space can have. 970 // Returns maximum available bytes that the old space can have.
977 intptr_t MaxAvailable() { 971 intptr_t MaxAvailable() {
978 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; 972 return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
979 } 973 }
980 974
981 #ifdef DEBUG 975 #ifdef DEBUG
982 // Reports statistic info of the space. 976 // Reports statistic info of the space.
983 void ReportStatistics(); 977 void ReportStatistics();
984 #endif 978 #endif
985 979
986 MemoryChunk* AllocateChunk(intptr_t body_size, 980 MemoryChunk* AllocateChunk(intptr_t body_size,
987 Executability executable, 981 Executability executable,
988 Space* space); 982 Space* space);
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
1021 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, 1015 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
1022 ObjectSpace space, 1016 ObjectSpace space,
1023 AllocationAction action); 1017 AllocationAction action);
1024 1018
1025 void RemoveMemoryAllocationCallback( 1019 void RemoveMemoryAllocationCallback(
1026 MemoryAllocationCallback callback); 1020 MemoryAllocationCallback callback);
1027 1021
1028 bool MemoryAllocationCallbackRegistered( 1022 bool MemoryAllocationCallbackRegistered(
1029 MemoryAllocationCallback callback); 1023 MemoryAllocationCallback callback);
1030 1024
1025 static int CodePageGuardStartOffset();
1026
1027 static int CodePageGuardSize();
1028
1029 static int CodePageAreaStartOffset();
1030
1031 static int CodePageAreaEndOffset();
1032
1033 static int CodePageAreaSize() {
1034 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1035 }
1036
1037 static bool CommitCodePage(VirtualMemory* vm, Address start, size_t size);
1038
1031 private: 1039 private:
1032 Isolate* isolate_; 1040 Isolate* isolate_;
1033 1041
1034 // Maximum space size in bytes. 1042 // Maximum space size in bytes.
1035 size_t capacity_; 1043 size_t capacity_;
1036 // Maximum subset of capacity_ that can be executable 1044 // Maximum subset of capacity_ that can be executable
1037 size_t capacity_executable_; 1045 size_t capacity_executable_;
1038 1046
1039 // Allocated space size in bytes. 1047 // Allocated space size in bytes.
1040 size_t size_; 1048 size_t size_;
(...skipping 332 matching lines...) Expand 10 before | Expand all | Expand 10 after
1373 intptr_t huge_size_; 1381 intptr_t huge_size_;
1374 }; 1382 };
1375 1383
1376 void CountFreeListItems(Page* p, SizeStats* sizes); 1384 void CountFreeListItems(Page* p, SizeStats* sizes);
1377 1385
1378 intptr_t EvictFreeListItems(Page* p); 1386 intptr_t EvictFreeListItems(Page* p);
1379 1387
1380 private: 1388 private:
1381 // The size range of blocks, in bytes. 1389 // The size range of blocks, in bytes.
1382 static const int kMinBlockSize = 3 * kPointerSize; 1390 static const int kMinBlockSize = 3 * kPointerSize;
1383 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; 1391 static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
1384 1392
1385 FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size); 1393 FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
1386 1394
1387 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); 1395 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1388 1396
1389 PagedSpace* owner_; 1397 PagedSpace* owner_;
1390 Heap* heap_; 1398 Heap* heap_;
1391 1399
1392 // Total available bytes in all blocks on this free list. 1400 // Total available bytes in all blocks on this free list.
1393 int available_; 1401 int available_;
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after
1565 if (first == &anchor_) first = NULL; 1573 if (first == &anchor_) first = NULL;
1566 first_unswept_page_ = first; 1574 first_unswept_page_ = first;
1567 } 1575 }
1568 1576
1569 void IncrementUnsweptFreeBytes(int by) { 1577 void IncrementUnsweptFreeBytes(int by) {
1570 unswept_free_bytes_ += by; 1578 unswept_free_bytes_ += by;
1571 } 1579 }
1572 1580
1573 void IncreaseUnsweptFreeBytes(Page* p) { 1581 void IncreaseUnsweptFreeBytes(Page* p) {
1574 ASSERT(ShouldBeSweptLazily(p)); 1582 ASSERT(ShouldBeSweptLazily(p));
1575 unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes()); 1583 unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
1576 } 1584 }
1577 1585
1578 void DecreaseUnsweptFreeBytes(Page* p) { 1586 void DecreaseUnsweptFreeBytes(Page* p) {
1579 ASSERT(ShouldBeSweptLazily(p)); 1587 ASSERT(ShouldBeSweptLazily(p));
1580 unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes()); 1588 unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
1581 } 1589 }
1582 1590
1583 bool AdvanceSweeper(intptr_t bytes_to_sweep); 1591 bool AdvanceSweeper(intptr_t bytes_to_sweep);
1584 1592
1585 bool IsSweepingComplete() { 1593 bool IsSweepingComplete() {
1586 return !first_unswept_page_->is_valid(); 1594 return !first_unswept_page_->is_valid();
1587 } 1595 }
1588 1596
1589 Page* FirstPage() { return anchor_.next_page(); } 1597 Page* FirstPage() { return anchor_.next_page(); }
1590 Page* LastPage() { return anchor_.prev_page(); } 1598 Page* LastPage() { return anchor_.prev_page(); }
1591 1599
1592 // Returns zero for pages that have so little fragmentation that it is not 1600 // Returns zero for pages that have so little fragmentation that it is not
1593 // worth defragmenting them. Otherwise a positive integer that gives an 1601 // worth defragmenting them. Otherwise a positive integer that gives an
1594 // estimate of fragmentation on an arbitrary scale. 1602 // estimate of fragmentation on an arbitrary scale.
1595 int Fragmentation(Page* p) { 1603 int Fragmentation(Page* p) {
1596 FreeList::SizeStats sizes; 1604 FreeList::SizeStats sizes;
1597 free_list_.CountFreeListItems(p, &sizes); 1605 free_list_.CountFreeListItems(p, &sizes);
1598 1606
1599 intptr_t ratio; 1607 intptr_t ratio;
1600 intptr_t ratio_threshold; 1608 intptr_t ratio_threshold;
1601 if (identity() == CODE_SPACE) { 1609 if (identity() == CODE_SPACE) {
1602 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / 1610 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
1603 Page::kObjectAreaSize; 1611 AreaSize();
1604 ratio_threshold = 10; 1612 ratio_threshold = 10;
1605 } else { 1613 } else {
1606 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / 1614 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
1607 Page::kObjectAreaSize; 1615 AreaSize();
1608 ratio_threshold = 15; 1616 ratio_threshold = 15;
1609 } 1617 }
1610 1618
1611 if (FLAG_trace_fragmentation) { 1619 if (FLAG_trace_fragmentation) {
1612 PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", 1620 PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
1613 reinterpret_cast<void*>(p), 1621 reinterpret_cast<void*>(p),
1614 identity(), 1622 identity(),
1615 static_cast<int>(sizes.small_size_), 1623 static_cast<int>(sizes.small_size_),
1616 static_cast<double>(sizes.small_size_ * 100) / 1624 static_cast<double>(sizes.small_size_ * 100) /
1617 Page::kObjectAreaSize, 1625 AreaSize(),
1618 static_cast<int>(sizes.medium_size_), 1626 static_cast<int>(sizes.medium_size_),
1619 static_cast<double>(sizes.medium_size_ * 100) / 1627 static_cast<double>(sizes.medium_size_ * 100) /
1620 Page::kObjectAreaSize, 1628 AreaSize(),
1621 static_cast<int>(sizes.large_size_), 1629 static_cast<int>(sizes.large_size_),
1622 static_cast<double>(sizes.large_size_ * 100) / 1630 static_cast<double>(sizes.large_size_ * 100) /
1623 Page::kObjectAreaSize, 1631 AreaSize(),
1624 static_cast<int>(sizes.huge_size_), 1632 static_cast<int>(sizes.huge_size_),
1625 static_cast<double>(sizes.huge_size_ * 100) / 1633 static_cast<double>(sizes.huge_size_ * 100) /
1626 Page::kObjectAreaSize, 1634 AreaSize(),
1627 (ratio > ratio_threshold) ? "[fragmented]" : ""); 1635 (ratio > ratio_threshold) ? "[fragmented]" : "");
1628 } 1636 }
1629 1637
1630 if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) { 1638 if (FLAG_always_compact && sizes.Total() != AreaSize()) {
1631 return 1; 1639 return 1;
1632 } 1640 }
1633 if (ratio <= ratio_threshold) return 0; // Not fragmented. 1641 if (ratio <= ratio_threshold) return 0; // Not fragmented.
1634 1642
1635 return static_cast<int>(ratio - ratio_threshold); 1643 return static_cast<int>(ratio - ratio_threshold);
1636 } 1644 }
1637 1645
1638 void EvictEvacuationCandidatesFromFreeLists(); 1646 void EvictEvacuationCandidatesFromFreeLists();
1639 1647
1640 bool CanExpand(); 1648 bool CanExpand();
1641 1649
1642 // Returns the number of total pages in this space. 1650 // Returns the number of total pages in this space.
1643 int CountTotalPages(); 1651 int CountTotalPages();
1644 1652
1653 // Return size of allocatable area on a page in this space.
1654 inline int AreaSize() {
1655 return area_size_;
1656 }
1657
1645 protected: 1658 protected:
1659 int area_size_;
1660
1646 // Maximum capacity of this space. 1661 // Maximum capacity of this space.
1647 intptr_t max_capacity_; 1662 intptr_t max_capacity_;
1648 1663
1649 // Accounting information for this space. 1664 // Accounting information for this space.
1650 AllocationStats accounting_stats_; 1665 AllocationStats accounting_stats_;
1651 1666
1652 // The dummy page that anchors the double linked list of pages. 1667 // The dummy page that anchors the double linked list of pages.
1653 Page anchor_; 1668 Page anchor_;
1654 1669
1655 // The space's free list. 1670 // The space's free list.
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
1737 1752
1738 class NewSpacePage : public MemoryChunk { 1753 class NewSpacePage : public MemoryChunk {
1739 public: 1754 public:
1740 // GC related flags copied from from-space to to-space when 1755 // GC related flags copied from from-space to to-space when
1741 // flipping semispaces. 1756 // flipping semispaces.
1742 static const intptr_t kCopyOnFlipFlagsMask = 1757 static const intptr_t kCopyOnFlipFlagsMask =
1743 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | 1758 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
1744 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | 1759 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
1745 (1 << MemoryChunk::SCAN_ON_SCAVENGE); 1760 (1 << MemoryChunk::SCAN_ON_SCAVENGE);
1746 1761
1762 static const int kAreaSize = Page::kNonCodeObjectAreaSize;
1763
1747 inline NewSpacePage* next_page() const { 1764 inline NewSpacePage* next_page() const {
1748 return static_cast<NewSpacePage*>(next_chunk()); 1765 return static_cast<NewSpacePage*>(next_chunk());
1749 } 1766 }
1750 1767
1751 inline void set_next_page(NewSpacePage* page) { 1768 inline void set_next_page(NewSpacePage* page) {
1752 set_next_chunk(page); 1769 set_next_chunk(page);
1753 } 1770 }
1754 1771
1755 inline NewSpacePage* prev_page() const { 1772 inline NewSpacePage* prev_page() const {
1756 return static_cast<NewSpacePage*>(prev_chunk()); 1773 return static_cast<NewSpacePage*>(prev_chunk());
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
1849 bool GrowTo(int new_capacity); 1866 bool GrowTo(int new_capacity);
1850 1867
1851 // Shrinks the semispace to the new capacity. The new capacity 1868 // Shrinks the semispace to the new capacity. The new capacity
1852 // requested must be more than the amount of used memory in the 1869 // requested must be more than the amount of used memory in the
1853 // semispace and less than the current capacity. 1870 // semispace and less than the current capacity.
1854 bool ShrinkTo(int new_capacity); 1871 bool ShrinkTo(int new_capacity);
1855 1872
1856 // Returns the start address of the first page of the space. 1873 // Returns the start address of the first page of the space.
1857 Address space_start() { 1874 Address space_start() {
1858 ASSERT(anchor_.next_page() != &anchor_); 1875 ASSERT(anchor_.next_page() != &anchor_);
1859 return anchor_.next_page()->body(); 1876 return anchor_.next_page()->area_start();
1860 } 1877 }
1861 1878
1862 // Returns the start address of the current page of the space. 1879 // Returns the start address of the current page of the space.
1863 Address page_low() { 1880 Address page_low() {
1864 return current_page_->body(); 1881 return current_page_->area_start();
1865 } 1882 }
1866 1883
1867 // Returns one past the end address of the space. 1884 // Returns one past the end address of the space.
1868 Address space_end() { 1885 Address space_end() {
1869 return anchor_.prev_page()->body_limit(); 1886 return anchor_.prev_page()->area_end();
1870 } 1887 }
1871 1888
1872 // Returns one past the end address of the current page of the space. 1889 // Returns one past the end address of the current page of the space.
1873 Address page_high() { 1890 Address page_high() {
1874 return current_page_->body_limit(); 1891 return current_page_->area_end();
1875 } 1892 }
1876 1893
1877 bool AdvancePage() { 1894 bool AdvancePage() {
1878 NewSpacePage* next_page = current_page_->next_page(); 1895 NewSpacePage* next_page = current_page_->next_page();
1879 if (next_page == anchor()) return false; 1896 if (next_page == anchor()) return false;
1880 current_page_ = next_page; 1897 current_page_ = next_page;
1881 return true; 1898 return true;
1882 } 1899 }
1883 1900
1884 // Resets the space to using the first page. 1901 // Resets the space to using the first page.
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
2000 SemiSpaceIterator(NewSpace* space, Address start); 2017 SemiSpaceIterator(NewSpace* space, Address start);
2001 // Iterate from one address to another in the same semi-space. 2018 // Iterate from one address to another in the same semi-space.
2002 SemiSpaceIterator(Address from, Address to); 2019 SemiSpaceIterator(Address from, Address to);
2003 2020
2004 HeapObject* Next() { 2021 HeapObject* Next() {
2005 if (current_ == limit_) return NULL; 2022 if (current_ == limit_) return NULL;
2006 if (NewSpacePage::IsAtEnd(current_)) { 2023 if (NewSpacePage::IsAtEnd(current_)) {
2007 NewSpacePage* page = NewSpacePage::FromLimit(current_); 2024 NewSpacePage* page = NewSpacePage::FromLimit(current_);
2008 page = page->next_page(); 2025 page = page->next_page();
2009 ASSERT(!page->is_anchor()); 2026 ASSERT(!page->is_anchor());
2010 current_ = page->body(); 2027 current_ = page->area_start();
2011 if (current_ == limit_) return NULL; 2028 if (current_ == limit_) return NULL;
2012 } 2029 }
2013 2030
2014 HeapObject* object = HeapObject::FromAddress(current_); 2031 HeapObject* object = HeapObject::FromAddress(current_);
2015 int size = (size_func_ == NULL) ? object->Size() : size_func_(object); 2032 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
2016 2033
2017 current_ += size; 2034 current_ += size;
2018 return object; 2035 return object;
2019 } 2036 }
2020 2037
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
2108 == reinterpret_cast<uintptr_t>(start_); 2125 == reinterpret_cast<uintptr_t>(start_);
2109 } 2126 }
2110 2127
2111 bool Contains(Object* o) { 2128 bool Contains(Object* o) {
2112 Address a = reinterpret_cast<Address>(o); 2129 Address a = reinterpret_cast<Address>(o);
2113 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_; 2130 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2114 } 2131 }
2115 2132
2116 // Return the allocated bytes in the active semispace. 2133 // Return the allocated bytes in the active semispace.
2117 virtual intptr_t Size() { 2134 virtual intptr_t Size() {
2118 return pages_used_ * Page::kObjectAreaSize + 2135 return pages_used_ * NewSpacePage::kAreaSize +
2119 static_cast<int>(top() - to_space_.page_low()); 2136 static_cast<int>(top() - to_space_.page_low());
2120 } 2137 }
2121 2138
2122 // The same, but returning an int. We have to have the one that returns 2139 // The same, but returning an int. We have to have the one that returns
2123 // intptr_t because it is inherited, but if we know we are dealing with the 2140 // intptr_t because it is inherited, but if we know we are dealing with the
2124 // new space, which can't get as big as the other spaces then this is useful: 2141 // new space, which can't get as big as the other spaces then this is useful:
2125 int SizeAsInt() { return static_cast<int>(Size()); } 2142 int SizeAsInt() { return static_cast<int>(Size()); }
2126 2143
2127 // Return the current capacity of a semispace. 2144 // Return the current capacity of a semispace.
2128 intptr_t EffectiveCapacity() { 2145 intptr_t EffectiveCapacity() {
2129 SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity()); 2146 SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
2130 return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize; 2147 return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
2131 } 2148 }
2132 2149
2133 // Return the current capacity of a semispace. 2150 // Return the current capacity of a semispace.
2134 intptr_t Capacity() { 2151 intptr_t Capacity() {
2135 ASSERT(to_space_.Capacity() == from_space_.Capacity()); 2152 ASSERT(to_space_.Capacity() == from_space_.Capacity());
2136 return to_space_.Capacity(); 2153 return to_space_.Capacity();
2137 } 2154 }
2138 2155
2139 // Return the total amount of memory committed for new space. 2156 // Return the total amount of memory committed for new space.
2140 intptr_t CommittedMemory() { 2157 intptr_t CommittedMemory() {
(...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after
2337 OldSpace(Heap* heap, 2354 OldSpace(Heap* heap,
2338 intptr_t max_capacity, 2355 intptr_t max_capacity,
2339 AllocationSpace id, 2356 AllocationSpace id,
2340 Executability executable) 2357 Executability executable)
2341 : PagedSpace(heap, max_capacity, id, executable) { 2358 : PagedSpace(heap, max_capacity, id, executable) {
2342 page_extra_ = 0; 2359 page_extra_ = 0;
2343 } 2360 }
2344 2361
2345 // The limit of allocation for a page in this space. 2362 // The limit of allocation for a page in this space.
2346 virtual Address PageAllocationLimit(Page* page) { 2363 virtual Address PageAllocationLimit(Page* page) {
2347 return page->ObjectAreaEnd(); 2364 return page->area_end();
2348 } 2365 }
2349 2366
2350 public: 2367 public:
2351 TRACK_MEMORY("OldSpace") 2368 TRACK_MEMORY("OldSpace")
2352 }; 2369 };
2353 2370
2354 2371
2355 // For contiguous spaces, top should be in the space (or at the end) and limit 2372 // For contiguous spaces, top should be in the space (or at the end) and limit
2356 // should be the end of the space. 2373 // should be the end of the space.
2357 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ 2374 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
2358 SLOW_ASSERT((space).page_low() <= (info).top \ 2375 SLOW_ASSERT((space).page_low() <= (info).top \
2359 && (info).top <= (space).page_high() \ 2376 && (info).top <= (space).page_high() \
2360 && (info).limit <= (space).page_high()) 2377 && (info).limit <= (space).page_high())
2361 2378
2362 2379
2363 // ----------------------------------------------------------------------------- 2380 // -----------------------------------------------------------------------------
2364 // Old space for objects of a fixed size 2381 // Old space for objects of a fixed size
2365 2382
2366 class FixedSpace : public PagedSpace { 2383 class FixedSpace : public PagedSpace {
2367 public: 2384 public:
2368 FixedSpace(Heap* heap, 2385 FixedSpace(Heap* heap,
2369 intptr_t max_capacity, 2386 intptr_t max_capacity,
2370 AllocationSpace id, 2387 AllocationSpace id,
2371 int object_size_in_bytes, 2388 int object_size_in_bytes,
2372 const char* name) 2389 const char* name)
2373 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), 2390 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2374 object_size_in_bytes_(object_size_in_bytes), 2391 object_size_in_bytes_(object_size_in_bytes),
2375 name_(name) { 2392 name_(name) {
2376 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; 2393 page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
2377 } 2394 }
2378 2395
2379 // The limit of allocation for a page in this space. 2396 // The limit of allocation for a page in this space.
2380 virtual Address PageAllocationLimit(Page* page) { 2397 virtual Address PageAllocationLimit(Page* page) {
2381 return page->ObjectAreaEnd() - page_extra_; 2398 return page->area_end() - page_extra_;
2382 } 2399 }
2383 2400
2384 int object_size_in_bytes() { return object_size_in_bytes_; } 2401 int object_size_in_bytes() { return object_size_in_bytes_; }
2385 2402
2386 // Prepares for a mark-compact GC. 2403 // Prepares for a mark-compact GC.
2387 virtual void PrepareForMarkCompact(); 2404 virtual void PrepareForMarkCompact();
2388 2405
2389 protected: 2406 protected:
2390 void ResetFreeList() { 2407 void ResetFreeList() {
2391 free_list_.Reset(); 2408 free_list_.Reset();
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
2425 return (size / Map::kSize) * Map::kSize; 2442 return (size / Map::kSize) * Map::kSize;
2426 } 2443 }
2427 } 2444 }
2428 2445
2429 protected: 2446 protected:
2430 #ifdef DEBUG 2447 #ifdef DEBUG
2431 virtual void VerifyObject(HeapObject* obj); 2448 virtual void VerifyObject(HeapObject* obj);
2432 #endif 2449 #endif
2433 2450
2434 private: 2451 private:
2435 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize; 2452 static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
2436 2453
2437 // Do map space compaction if there is a page gap. 2454 // Do map space compaction if there is a page gap.
2438 int CompactionThreshold() { 2455 int CompactionThreshold() {
2439 return kMapsPerPage * (max_map_space_pages_ - 1); 2456 return kMapsPerPage * (max_map_space_pages_ - 1);
2440 } 2457 }
2441 2458
2442 const int max_map_space_pages_; 2459 const int max_map_space_pages_;
2443 2460
2444 public: 2461 public:
2445 TRACK_MEMORY("MapSpace") 2462 TRACK_MEMORY("MapSpace")
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after
2656 } 2673 }
2657 // Must be small, since an iteration is used for lookup. 2674 // Must be small, since an iteration is used for lookup.
2658 static const int kMaxComments = 64; 2675 static const int kMaxComments = 64;
2659 }; 2676 };
2660 #endif 2677 #endif
2661 2678
2662 2679
2663 } } // namespace v8::internal 2680 } } // namespace v8::internal
2664 2681
2665 #endif // V8_SPACES_H_ 2682 #endif // V8_SPACES_H_
OLDNEW
« no previous file with comments | « src/serialize.cc ('k') | src/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698