OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 487 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
498 static const int kBodyOffset = | 498 static const int kBodyOffset = |
499 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); | 499 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); |
500 | 500 |
501 // The start offset of the object area in a page. Aligned to both maps and | 501 // The start offset of the object area in a page. Aligned to both maps and |
502 // code alignment to be suitable for both. Also aligned to 32 words because | 502 // code alignment to be suitable for both. Also aligned to 32 words because |
503 // the marking bitmap is arranged in 32 bit chunks. | 503 // the marking bitmap is arranged in 32 bit chunks. |
504 static const int kObjectStartAlignment = 32 * kPointerSize; | 504 static const int kObjectStartAlignment = 32 * kPointerSize; |
505 static const int kObjectStartOffset = kBodyOffset - 1 + | 505 static const int kObjectStartOffset = kBodyOffset - 1 + |
506 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); | 506 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); |
507 | 507 |
508 size_t size() const { return size_; } | 508 intptr_t size() const { return size_; } |
509 | 509 |
510 void set_size(size_t size) { | 510 void set_size(size_t size) { size_ = size; } |
511 size_ = size; | |
512 } | |
513 | 511 |
514 Executability executable() { | 512 Executability executable() { |
515 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 513 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
516 } | 514 } |
517 | 515 |
518 bool ContainsOnlyData() { | 516 bool ContainsOnlyData() { |
519 return IsFlagSet(CONTAINS_ONLY_DATA); | 517 return IsFlagSet(CONTAINS_ONLY_DATA); |
520 } | 518 } |
521 | 519 |
522 bool InNewSpace() { | 520 bool InNewSpace() { |
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
654 // Returns the next page in the chain of pages owned by a space. | 652 // Returns the next page in the chain of pages owned by a space. |
655 inline Page* next_page(); | 653 inline Page* next_page(); |
656 inline Page* prev_page(); | 654 inline Page* prev_page(); |
657 inline void set_next_page(Page* page); | 655 inline void set_next_page(Page* page); |
658 inline void set_prev_page(Page* page); | 656 inline void set_prev_page(Page* page); |
659 | 657 |
660 // Returns the start address of the object area in this page. | 658 // Returns the start address of the object area in this page. |
661 Address ObjectAreaStart() { return address() + kObjectStartOffset; } | 659 Address ObjectAreaStart() { return address() + kObjectStartOffset; } |
662 | 660 |
663 // Returns the end address (exclusive) of the object area in this page. | 661 // Returns the end address (exclusive) of the object area in this page. |
664 Address ObjectAreaEnd() { return address() + Page::kPageSize; } | 662 Address ObjectAreaEnd() { return address() + size(); } |
665 | 663 |
666 // Checks whether an address is page aligned. | 664 // Checks whether an address is page aligned. |
667 static bool IsAlignedToPageSize(Address a) { | 665 static bool IsAlignedToPageSize(Address a) { |
668 return 0 == (OffsetFrom(a) & kPageAlignmentMask); | 666 return 0 == (OffsetFrom(a) & kPageAlignmentMask); |
669 } | 667 } |
670 | 668 |
671 // Returns the offset of a given address to this page. | 669 // Returns the offset of a given address to this page. |
672 INLINE(int Offset(Address a)) { | 670 INLINE(int Offset(Address a)) { |
673 int offset = static_cast<int>(a - address()); | 671 int offset = static_cast<int>(a - address()); |
674 return offset; | 672 return offset; |
675 } | 673 } |
676 | 674 |
677 // Returns the address for a given offset to the this page. | 675 // Returns the address for a given offset to the this page. |
678 Address OffsetToAddress(int offset) { | 676 Address OffsetToAddress(int offset) { |
679 ASSERT_PAGE_OFFSET(offset); | 677 ASSERT_PAGE_OFFSET(offset); |
680 return address() + offset; | 678 return address() + offset; |
681 } | 679 } |
682 | 680 |
| 681 // Expand the committed area for pages that are small. |
| 682 void CommitMore(intptr_t space_needed); |
| 683 |
683 // --------------------------------------------------------------------- | 684 // --------------------------------------------------------------------- |
684 | 685 |
685 // Page size in bytes. This must be a multiple of the OS page size. | 686 // Page size in bytes. This must be a multiple of the OS page size. |
686 static const int kPageSize = 1 << kPageSizeBits; | 687 static const int kPageSize = 1 << kPageSizeBits; |
687 | 688 |
| 689 // For a 1Mbyte page grow 64k at a time. |
| 690 static const int kGrowthUnit = 1 << (kPageSizeBits - 4); |
| 691 |
688 // Page size mask. | 692 // Page size mask. |
689 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; | 693 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; |
690 | 694 |
691 // Object area size in bytes. | 695 // Object area size in bytes. |
692 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; | 696 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; |
693 | 697 |
694 // Maximum object size that fits in a page. | 698 // Maximum object size that fits in a page. |
695 static const int kMaxHeapObjectSize = kObjectAreaSize; | 699 static const int kMaxHeapObjectSize = kObjectAreaSize; |
696 | 700 |
697 static const int kFirstUsedCell = | 701 static const int kFirstUsedCell = |
(...skipping 15 matching lines...) Expand all Loading... |
713 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } | 717 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } |
714 bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } | 718 bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } |
715 bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); } | 719 bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); } |
716 | 720 |
717 void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); } | 721 void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); } |
718 void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); } | 722 void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); } |
719 | 723 |
720 void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); } | 724 void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); } |
721 void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); } | 725 void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); } |
722 | 726 |
| 727 Address RoundUpToObjectAlignment(Address a); |
| 728 |
723 #ifdef DEBUG | 729 #ifdef DEBUG |
724 void Print(); | 730 void Print(); |
725 #endif // DEBUG | 731 #endif // DEBUG |
726 | 732 |
727 friend class MemoryAllocator; | 733 friend class MemoryAllocator; |
728 }; | 734 }; |
729 | 735 |
730 | 736 |
731 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); | 737 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); |
732 | 738 |
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
842 Isolate* isolate_; | 848 Isolate* isolate_; |
843 | 849 |
844 // The reserved range of virtual memory that all code objects are put in. | 850 // The reserved range of virtual memory that all code objects are put in. |
845 VirtualMemory* code_range_; | 851 VirtualMemory* code_range_; |
846 // Plain old data class, just a struct plus a constructor. | 852 // Plain old data class, just a struct plus a constructor. |
847 class FreeBlock { | 853 class FreeBlock { |
848 public: | 854 public: |
849 FreeBlock(Address start_arg, size_t size_arg) | 855 FreeBlock(Address start_arg, size_t size_arg) |
850 : start(start_arg), size(size_arg) { | 856 : start(start_arg), size(size_arg) { |
851 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); | 857 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); |
852 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); | |
853 } | 858 } |
854 FreeBlock(void* start_arg, size_t size_arg) | 859 FreeBlock(void* start_arg, size_t size_arg) |
855 : start(static_cast<Address>(start_arg)), size(size_arg) { | 860 : start(static_cast<Address>(start_arg)), size(size_arg) { |
856 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); | 861 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); |
857 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); | |
858 } | 862 } |
859 | 863 |
860 Address start; | 864 Address start; |
861 size_t size; | 865 size_t size; |
862 }; | 866 }; |
863 | 867 |
864 // Freed blocks of memory are added to the free list. When the allocation | 868 // Freed blocks of memory are added to the free list. When the allocation |
865 // list is exhausted, the free list is sorted and merged to make the new | 869 // list is exhausted, the free list is sorted and merged to make the new |
866 // allocation list. | 870 // allocation list. |
867 List<FreeBlock> free_list_; | 871 List<FreeBlock> free_list_; |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
943 class MemoryAllocator { | 947 class MemoryAllocator { |
944 public: | 948 public: |
945 explicit MemoryAllocator(Isolate* isolate); | 949 explicit MemoryAllocator(Isolate* isolate); |
946 | 950 |
947 // Initializes its internal bookkeeping structures. | 951 // Initializes its internal bookkeeping structures. |
948 // Max capacity of the total space and executable memory limit. | 952 // Max capacity of the total space and executable memory limit. |
949 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); | 953 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); |
950 | 954 |
951 void TearDown(); | 955 void TearDown(); |
952 | 956 |
953 Page* AllocatePage(PagedSpace* owner, Executability executable); | 957 Page* AllocatePage(intptr_t object_area_size, |
| 958 PagedSpace* owner, |
| 959 Executability executable); |
954 | 960 |
955 LargePage* AllocateLargePage(intptr_t object_size, | 961 LargePage* AllocateLargePage(intptr_t object_size, |
956 Executability executable, | 962 Executability executable, |
957 Space* owner); | 963 Space* owner); |
958 | 964 |
959 void Free(MemoryChunk* chunk); | 965 void Free(MemoryChunk* chunk); |
960 | 966 |
961 // Returns the maximum available bytes of heaps. | 967 // Returns the maximum available bytes of heaps. |
962 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } | 968 intptr_t Available() { |
| 969 return capacity_ < memory_allocator_reserved_ ? |
| 970 0 : |
| 971 capacity_ - memory_allocator_reserved_; |
| 972 } |
963 | 973 |
964 // Returns allocated spaces in bytes. | 974 // Returns allocated spaces in bytes. |
965 intptr_t Size() { return size_; } | 975 intptr_t Size() { return memory_allocator_reserved_; } |
966 | 976 |
967 // Returns the maximum available executable bytes of heaps. | 977 // Returns the maximum available executable bytes of heaps. |
968 intptr_t AvailableExecutable() { | 978 intptr_t AvailableExecutable() { |
969 if (capacity_executable_ < size_executable_) return 0; | 979 if (capacity_executable_ < size_executable_) return 0; |
970 return capacity_executable_ - size_executable_; | 980 return capacity_executable_ - size_executable_; |
971 } | 981 } |
972 | 982 |
973 // Returns allocated executable spaces in bytes. | 983 // Returns allocated executable spaces in bytes. |
974 intptr_t SizeExecutable() { return size_executable_; } | 984 intptr_t SizeExecutable() { return size_executable_; } |
975 | 985 |
976 // Returns maximum available bytes that the old space can have. | 986 // Returns maximum available bytes that the old space can have. |
977 intptr_t MaxAvailable() { | 987 intptr_t MaxAvailable() { |
978 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; | 988 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; |
979 } | 989 } |
980 | 990 |
981 #ifdef DEBUG | 991 #ifdef DEBUG |
982 // Reports statistic info of the space. | 992 // Reports statistic info of the space. |
983 void ReportStatistics(); | 993 void ReportStatistics(); |
984 #endif | 994 #endif |
985 | 995 |
986 MemoryChunk* AllocateChunk(intptr_t body_size, | 996 MemoryChunk* AllocateChunk(intptr_t body_size, |
| 997 intptr_t committed_body_size, |
987 Executability executable, | 998 Executability executable, |
988 Space* space); | 999 Space* space); |
989 | 1000 |
990 Address ReserveAlignedMemory(size_t requested, | 1001 Address ReserveAlignedMemory(size_t requested, |
991 size_t alignment, | 1002 size_t alignment, |
992 VirtualMemory* controller); | 1003 VirtualMemory* controller); |
993 Address AllocateAlignedMemory(size_t requested, | 1004 Address AllocateAlignedMemory(size_t requested, |
| 1005 size_t committed, |
994 size_t alignment, | 1006 size_t alignment, |
995 Executability executable, | 1007 Executability executable, |
996 VirtualMemory* controller); | 1008 VirtualMemory* controller); |
997 | 1009 |
998 void FreeMemory(VirtualMemory* reservation, Executability executable); | 1010 void FreeMemory(VirtualMemory* reservation, Executability executable); |
999 void FreeMemory(Address addr, size_t size, Executability executable); | 1011 void FreeMemory(Address addr, size_t size, Executability executable); |
1000 | 1012 |
1001 // Commit a contiguous block of memory from the initial chunk. Assumes that | 1013 // Commit a contiguous block of memory from the initial chunk. Assumes that |
1002 // the address is not NULL, the size is greater than zero, and that the | 1014 // the address is not NULL, the size is greater than zero, and that the |
1003 // block is contained in the initial chunk. Returns true if it succeeded | 1015 // block is contained in the initial chunk. Returns true if it succeeded |
1004 // and false otherwise. | 1016 // and false otherwise. |
1005 bool CommitBlock(Address start, size_t size, Executability executable); | 1017 bool CommitBlock(Address start, size_t size, Executability executable); |
1006 | 1018 |
1007 // Uncommit a contiguous block of memory [start..(start+size)[. | 1019 // Uncommit a contiguous block of memory [start..(start+size)[. |
1008 // start is not NULL, the size is greater than zero, and the | 1020 // start is not NULL, the size is greater than zero, and the |
1009 // block is contained in the initial chunk. Returns true if it succeeded | 1021 // block is contained in the initial chunk. Returns true if it succeeded |
1010 // and false otherwise. | 1022 // and false otherwise. |
1011 bool UncommitBlock(Address start, size_t size); | 1023 bool UncommitBlock(Address start, size_t size); |
1012 | 1024 |
| 1025 void AllocationBookkeeping(Space* owner, |
| 1026 Address base, |
| 1027 intptr_t reserved_size, |
| 1028 intptr_t committed_size, |
| 1029 Executability executable); |
| 1030 |
1013 // Zaps a contiguous block of memory [start..(start+size)[ thus | 1031 // Zaps a contiguous block of memory [start..(start+size)[ thus |
1014 // filling it up with a recognizable non-NULL bit pattern. | 1032 // filling it up with a recognizable non-NULL bit pattern. |
1015 void ZapBlock(Address start, size_t size); | 1033 void ZapBlock(Address start, size_t size); |
1016 | 1034 |
1017 void PerformAllocationCallback(ObjectSpace space, | 1035 void PerformAllocationCallback(ObjectSpace space, |
1018 AllocationAction action, | 1036 AllocationAction action, |
1019 size_t size); | 1037 size_t size); |
1020 | 1038 |
1021 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, | 1039 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, |
1022 ObjectSpace space, | 1040 ObjectSpace space, |
1023 AllocationAction action); | 1041 AllocationAction action); |
1024 | 1042 |
1025 void RemoveMemoryAllocationCallback( | 1043 void RemoveMemoryAllocationCallback( |
1026 MemoryAllocationCallback callback); | 1044 MemoryAllocationCallback callback); |
1027 | 1045 |
1028 bool MemoryAllocationCallbackRegistered( | 1046 bool MemoryAllocationCallbackRegistered( |
1029 MemoryAllocationCallback callback); | 1047 MemoryAllocationCallback callback); |
1030 | 1048 |
1031 private: | 1049 private: |
1032 Isolate* isolate_; | 1050 Isolate* isolate_; |
1033 | 1051 |
1034 // Maximum space size in bytes. | 1052 // Maximum space size in bytes. |
1035 size_t capacity_; | 1053 size_t capacity_; |
1036 // Maximum subset of capacity_ that can be executable | 1054 // Maximum subset of capacity_ that can be executable |
1037 size_t capacity_executable_; | 1055 size_t capacity_executable_; |
1038 | 1056 |
1039 // Allocated space size in bytes. | 1057 // Allocated space size in bytes. |
1040 size_t size_; | 1058 size_t memory_allocator_reserved_; |
1041 // Allocated executable space size in bytes. | 1059 // Allocated executable space size in bytes. |
1042 size_t size_executable_; | 1060 size_t size_executable_; |
1043 | 1061 |
1044 struct MemoryAllocationCallbackRegistration { | 1062 struct MemoryAllocationCallbackRegistration { |
1045 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, | 1063 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, |
1046 ObjectSpace space, | 1064 ObjectSpace space, |
1047 AllocationAction action) | 1065 AllocationAction action) |
1048 : callback(callback), space(space), action(action) { | 1066 : callback(callback), space(space), action(action) { |
1049 } | 1067 } |
1050 MemoryAllocationCallback callback; | 1068 MemoryAllocationCallback callback; |
(...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1375 | 1393 |
1376 void CountFreeListItems(Page* p, SizeStats* sizes); | 1394 void CountFreeListItems(Page* p, SizeStats* sizes); |
1377 | 1395 |
1378 intptr_t EvictFreeListItems(Page* p); | 1396 intptr_t EvictFreeListItems(Page* p); |
1379 | 1397 |
1380 private: | 1398 private: |
1381 // The size range of blocks, in bytes. | 1399 // The size range of blocks, in bytes. |
1382 static const int kMinBlockSize = 3 * kPointerSize; | 1400 static const int kMinBlockSize = 3 * kPointerSize; |
1383 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; | 1401 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; |
1384 | 1402 |
1385 FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size); | 1403 FreeListNode* PickNodeFromList(FreeListNode** list, |
| 1404 int* node_size, |
| 1405 int minimum_size); |
1386 | 1406 |
1387 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); | 1407 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size, Address limit); |
| 1408 FreeListNode* FindAbuttingNode(int size_in_bytes, |
| 1409 int* node_size, |
| 1410 Address limit, |
| 1411 FreeListNode** list_head); |
1388 | 1412 |
1389 PagedSpace* owner_; | 1413 PagedSpace* owner_; |
1390 Heap* heap_; | 1414 Heap* heap_; |
1391 | 1415 |
1392 // Total available bytes in all blocks on this free list. | 1416 // Total available bytes in all blocks on this free list. |
1393 int available_; | 1417 int available_; |
1394 | 1418 |
1395 static const int kSmallListMin = 0x20 * kPointerSize; | 1419 static const int kSmallListMin = 0x20 * kPointerSize; |
1396 static const int kSmallListMax = 0xff * kPointerSize; | 1420 static const int kSmallListMax = 0xff * kPointerSize; |
1397 static const int kMediumListMax = 0x7ff * kPointerSize; | 1421 static const int kMediumListMax = 0x7ff * kPointerSize; |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1477 virtual intptr_t SizeOfObjects() { | 1501 virtual intptr_t SizeOfObjects() { |
1478 ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0)); | 1502 ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0)); |
1479 return Size() - unswept_free_bytes_ - (limit() - top()); | 1503 return Size() - unswept_free_bytes_ - (limit() - top()); |
1480 } | 1504 } |
1481 | 1505 |
1482 // Wasted bytes in this space. These are just the bytes that were thrown away | 1506 // Wasted bytes in this space. These are just the bytes that were thrown away |
1483 // due to being too small to use for allocation. They do not include the | 1507 // due to being too small to use for allocation. They do not include the |
1484 // free bytes that were not found at all due to lazy sweeping. | 1508 // free bytes that were not found at all due to lazy sweeping. |
1485 virtual intptr_t Waste() { return accounting_stats_.Waste(); } | 1509 virtual intptr_t Waste() { return accounting_stats_.Waste(); } |
1486 | 1510 |
| 1511 virtual int ObjectAlignment() { return kObjectAlignment; } |
| 1512 |
1487 // Returns the allocation pointer in this space. | 1513 // Returns the allocation pointer in this space. |
1488 Address top() { return allocation_info_.top; } | 1514 Address top() { return allocation_info_.top; } |
1489 Address limit() { return allocation_info_.limit; } | 1515 Address limit() { return allocation_info_.limit; } |
1490 | 1516 |
1491 // Allocate the requested number of bytes in the space if possible, return a | 1517 // Allocate the requested number of bytes in the space if possible, return a |
1492 // failure object if not. | 1518 // failure object if not. |
1493 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); | 1519 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); |
1494 | 1520 |
1495 virtual bool ReserveSpace(int bytes); | 1521 virtual bool ReserveSpace(int bytes); |
1496 | 1522 |
1497 // Give a block of memory to the space's free list. It might be added to | 1523 // Give a block of memory to the space's free list. It might be added to |
1498 // the free list or accounted as waste. | 1524 // the free list or accounted as waste. |
1499 // If add_to_freelist is false then just accounting stats are updated and | 1525 // If add_to_freelist is false then just accounting stats are updated and |
1500 // no attempt to add area to free list is made. | 1526 // no attempt to add area to free list is made. |
1501 int Free(Address start, int size_in_bytes) { | 1527 int AddToFreeLists(Address start, int size_in_bytes) { |
1502 int wasted = free_list_.Free(start, size_in_bytes); | 1528 int wasted = free_list_.Free(start, size_in_bytes); |
1503 accounting_stats_.DeallocateBytes(size_in_bytes - wasted); | 1529 accounting_stats_.DeallocateBytes(size_in_bytes - wasted); |
1504 return size_in_bytes - wasted; | 1530 return size_in_bytes - wasted; |
1505 } | 1531 } |
1506 | 1532 |
1507 // Set space allocation info. | 1533 // Set space allocation info. |
1508 void SetTop(Address top, Address limit) { | 1534 void SetTop(Address top, Address limit) { |
| 1535 ASSERT(top == NULL || top >= Page::FromAddress(top - 1)->ObjectAreaStart()); |
1509 ASSERT(top == limit || | 1536 ASSERT(top == limit || |
1510 Page::FromAddress(top) == Page::FromAddress(limit - 1)); | 1537 Page::FromAddress(top) == Page::FromAddress(limit - 1)); |
1511 allocation_info_.top = top; | 1538 allocation_info_.top = top; |
1512 allocation_info_.limit = limit; | 1539 allocation_info_.limit = limit; |
1513 } | 1540 } |
1514 | 1541 |
1515 void Allocate(int bytes) { | 1542 void Allocate(int bytes) { |
1516 accounting_stats_.AllocateBytes(bytes); | 1543 accounting_stats_.AllocateBytes(bytes); |
1517 } | 1544 } |
1518 | 1545 |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1565 if (first == &anchor_) first = NULL; | 1592 if (first == &anchor_) first = NULL; |
1566 first_unswept_page_ = first; | 1593 first_unswept_page_ = first; |
1567 } | 1594 } |
1568 | 1595 |
1569 void IncrementUnsweptFreeBytes(int by) { | 1596 void IncrementUnsweptFreeBytes(int by) { |
1570 unswept_free_bytes_ += by; | 1597 unswept_free_bytes_ += by; |
1571 } | 1598 } |
1572 | 1599 |
1573 void IncreaseUnsweptFreeBytes(Page* p) { | 1600 void IncreaseUnsweptFreeBytes(Page* p) { |
1574 ASSERT(ShouldBeSweptLazily(p)); | 1601 ASSERT(ShouldBeSweptLazily(p)); |
1575 unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes()); | 1602 unswept_free_bytes_ += |
| 1603 (p->ObjectAreaEnd() - p->ObjectAreaStart()) - p->LiveBytes(); |
1576 } | 1604 } |
1577 | 1605 |
1578 void DecreaseUnsweptFreeBytes(Page* p) { | 1606 void DecreaseUnsweptFreeBytes(Page* p) { |
1579 ASSERT(ShouldBeSweptLazily(p)); | 1607 ASSERT(ShouldBeSweptLazily(p)); |
1580 unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes()); | 1608 unswept_free_bytes_ -= |
| 1609 (p->ObjectAreaEnd() - p->ObjectAreaStart() - p->LiveBytes()); |
1581 } | 1610 } |
1582 | 1611 |
1583 bool AdvanceSweeper(intptr_t bytes_to_sweep); | 1612 bool AdvanceSweeper(intptr_t bytes_to_sweep); |
1584 | 1613 |
1585 bool IsSweepingComplete() { | 1614 bool IsSweepingComplete() { |
1586 return !first_unswept_page_->is_valid(); | 1615 return !first_unswept_page_->is_valid(); |
1587 } | 1616 } |
1588 | 1617 |
| 1618 inline bool HasAPage() { return anchor_.next_page() != &anchor_; } |
1589 Page* FirstPage() { return anchor_.next_page(); } | 1619 Page* FirstPage() { return anchor_.next_page(); } |
1590 Page* LastPage() { return anchor_.prev_page(); } | 1620 Page* LastPage() { return anchor_.prev_page(); } |
1591 | 1621 |
1592 // Returns zero for pages that have so little fragmentation that it is not | 1622 // Returns zero for pages that have so little fragmentation that it is not |
1593 // worth defragmenting them. Otherwise a positive integer that gives an | 1623 // worth defragmenting them. Otherwise a positive integer that gives an |
1594 // estimate of fragmentation on an arbitrary scale. | 1624 // estimate of fragmentation on an arbitrary scale. |
1595 int Fragmentation(Page* p) { | 1625 int Fragmentation(Page* p) { |
1596 FreeList::SizeStats sizes; | 1626 FreeList::SizeStats sizes; |
1597 free_list_.CountFreeListItems(p, &sizes); | 1627 free_list_.CountFreeListItems(p, &sizes); |
1598 | 1628 |
| 1629 intptr_t object_area_size = p->ObjectAreaEnd() - p->ObjectAreaStart(); |
| 1630 |
1599 intptr_t ratio; | 1631 intptr_t ratio; |
1600 intptr_t ratio_threshold; | 1632 intptr_t ratio_threshold; |
1601 if (identity() == CODE_SPACE) { | 1633 if (identity() == CODE_SPACE) { |
1602 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / | 1634 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / |
1603 Page::kObjectAreaSize; | 1635 object_area_size; |
1604 ratio_threshold = 10; | 1636 ratio_threshold = 10; |
1605 } else { | 1637 } else { |
1606 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / | 1638 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / |
1607 Page::kObjectAreaSize; | 1639 object_area_size; |
1608 ratio_threshold = 15; | 1640 ratio_threshold = 15; |
1609 } | 1641 } |
1610 | 1642 |
1611 if (FLAG_trace_fragmentation) { | 1643 if (FLAG_trace_fragmentation) { |
1612 PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", | 1644 PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", |
1613 reinterpret_cast<void*>(p), | 1645 reinterpret_cast<void*>(p), |
1614 identity(), | 1646 identity(), |
1615 static_cast<int>(sizes.small_size_), | 1647 static_cast<int>(sizes.small_size_), |
1616 static_cast<double>(sizes.small_size_ * 100) / | 1648 static_cast<double>(sizes.small_size_ * 100) / |
1617 Page::kObjectAreaSize, | 1649 object_area_size, |
1618 static_cast<int>(sizes.medium_size_), | 1650 static_cast<int>(sizes.medium_size_), |
1619 static_cast<double>(sizes.medium_size_ * 100) / | 1651 static_cast<double>(sizes.medium_size_ * 100) / |
1620 Page::kObjectAreaSize, | 1652 object_area_size, |
1621 static_cast<int>(sizes.large_size_), | 1653 static_cast<int>(sizes.large_size_), |
1622 static_cast<double>(sizes.large_size_ * 100) / | 1654 static_cast<double>(sizes.large_size_ * 100) / |
1623 Page::kObjectAreaSize, | 1655 object_area_size, |
1624 static_cast<int>(sizes.huge_size_), | 1656 static_cast<int>(sizes.huge_size_), |
1625 static_cast<double>(sizes.huge_size_ * 100) / | 1657 static_cast<double>(sizes.huge_size_ * 100) / |
1626 Page::kObjectAreaSize, | 1658 object_area_size, |
1627 (ratio > ratio_threshold) ? "[fragmented]" : ""); | 1659 (ratio > ratio_threshold) ? "[fragmented]" : ""); |
1628 } | 1660 } |
1629 | 1661 |
1630 if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) { | 1662 if (FLAG_always_compact && sizes.Total() != object_area_size) { |
1631 return 1; | 1663 return 1; |
1632 } | 1664 } |
1633 if (ratio <= ratio_threshold) return 0; // Not fragmented. | 1665 if (ratio <= ratio_threshold) return 0; // Not fragmented. |
1634 | 1666 |
1635 return static_cast<int>(ratio - ratio_threshold); | 1667 return static_cast<int>(ratio - ratio_threshold); |
1636 } | 1668 } |
1637 | 1669 |
1638 void EvictEvacuationCandidatesFromFreeLists(); | 1670 void EvictEvacuationCandidatesFromFreeLists(); |
1639 | 1671 |
1640 bool CanExpand(); | 1672 bool CanExpand(); |
(...skipping 10 matching lines...) Expand all Loading... |
1651 | 1683 |
1652 // The dummy page that anchors the double linked list of pages. | 1684 // The dummy page that anchors the double linked list of pages. |
1653 Page anchor_; | 1685 Page anchor_; |
1654 | 1686 |
1655 // The space's free list. | 1687 // The space's free list. |
1656 FreeList free_list_; | 1688 FreeList free_list_; |
1657 | 1689 |
1658 // Normal allocation information. | 1690 // Normal allocation information. |
1659 AllocationInfo allocation_info_; | 1691 AllocationInfo allocation_info_; |
1660 | 1692 |
1661 // Bytes of each page that cannot be allocated. Possibly non-zero | |
1662 // for pages in spaces with only fixed-size objects. Always zero | |
1663 // for pages in spaces with variable sized objects (those pages are | |
1664 // padded with free-list nodes). | |
1665 int page_extra_; | |
1666 | |
1667 bool was_swept_conservatively_; | 1693 bool was_swept_conservatively_; |
1668 | 1694 |
1669 // The first page to be swept when the lazy sweeper advances. Is set | 1695 // The first page to be swept when the lazy sweeper advances. Is set |
1670 // to NULL when all pages have been swept. | 1696 // to NULL when all pages have been swept. |
1671 Page* first_unswept_page_; | 1697 Page* first_unswept_page_; |
1672 | 1698 |
1673 // The number of free bytes which could be reclaimed by advancing the | 1699 // The number of free bytes which could be reclaimed by advancing the |
1674 // lazy sweeper. This is only an estimation because lazy sweeping is | 1700 // lazy sweeper. This is only an estimation because lazy sweeping is |
1675 // done conservatively. | 1701 // done conservatively. |
1676 intptr_t unswept_free_bytes_; | 1702 intptr_t unswept_free_bytes_; |
1677 | 1703 |
1678 // Expands the space by allocating a fixed number of pages. Returns false if | 1704 // Expands the space by allocating a page. Returns false if it cannot |
1679 // it cannot allocate requested number of pages from OS, or if the hard heap | 1705 // allocate a page from OS, or if the hard heap size limit has been hit. The |
1680 // size limit has been hit. | 1706 // new page will have at least enough committed space to satisfy the object |
1681 bool Expand(); | 1707 // size indicated by the allocation_size argument; |
| 1708 bool Expand(intptr_t allocation_size); |
1682 | 1709 |
1683 // Generic fast case allocation function that tries linear allocation at the | 1710 // Generic fast case allocation function that tries linear allocation at the |
1684 // address denoted by top in allocation_info_. | 1711 // address denoted by top in allocation_info_. |
1685 inline HeapObject* AllocateLinearly(int size_in_bytes); | 1712 inline HeapObject* AllocateLinearly(int size_in_bytes); |
1686 | 1713 |
1687 // Slow path of AllocateRaw. This function is space-dependent. | 1714 // Slow path of AllocateRaw. This function is space-dependent. |
1688 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); | 1715 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); |
1689 | 1716 |
1690 friend class PageIterator; | 1717 friend class PageIterator; |
1691 }; | 1718 }; |
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1826 public: | 1853 public: |
1827 // Constructor. | 1854 // Constructor. |
1828 SemiSpace(Heap* heap, SemiSpaceId semispace) | 1855 SemiSpace(Heap* heap, SemiSpaceId semispace) |
1829 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), | 1856 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), |
1830 start_(NULL), | 1857 start_(NULL), |
1831 age_mark_(NULL), | 1858 age_mark_(NULL), |
1832 id_(semispace), | 1859 id_(semispace), |
1833 anchor_(this), | 1860 anchor_(this), |
1834 current_page_(NULL) { } | 1861 current_page_(NULL) { } |
1835 | 1862 |
1836 // Sets up the semispace using the given chunk. | 1863 // Sets up the semispace using the given chunk. After this, call Commit() |
| 1864 // to make the semispace usable. |
1837 void SetUp(Address start, int initial_capacity, int maximum_capacity); | 1865 void SetUp(Address start, int initial_capacity, int maximum_capacity); |
1838 | 1866 |
1839 // Tear down the space. Heap memory was not allocated by the space, so it | 1867 // Tear down the space. Heap memory was not allocated by the space, so it |
1840 // is not deallocated here. | 1868 // is not deallocated here. |
1841 void TearDown(); | 1869 void TearDown(); |
1842 | 1870 |
1843 // True if the space has been set up but not torn down. | 1871 // True if the space has been set up but not torn down. |
1844 bool HasBeenSetUp() { return start_ != NULL; } | 1872 bool HasBeenSetUp() { return start_ != NULL; } |
1845 | 1873 |
1846 // Grow the semispace to the new capacity. The new capacity | 1874 // Grow the semispace to the new capacity. The new capacity |
(...skipping 484 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2331 // Old object space (excluding map objects) | 2359 // Old object space (excluding map objects) |
2332 | 2360 |
2333 class OldSpace : public PagedSpace { | 2361 class OldSpace : public PagedSpace { |
2334 public: | 2362 public: |
2335 // Creates an old space object with a given maximum capacity. | 2363 // Creates an old space object with a given maximum capacity. |
2336 // The constructor does not allocate pages from OS. | 2364 // The constructor does not allocate pages from OS. |
2337 OldSpace(Heap* heap, | 2365 OldSpace(Heap* heap, |
2338 intptr_t max_capacity, | 2366 intptr_t max_capacity, |
2339 AllocationSpace id, | 2367 AllocationSpace id, |
2340 Executability executable) | 2368 Executability executable) |
2341 : PagedSpace(heap, max_capacity, id, executable) { | 2369 : PagedSpace(heap, max_capacity, id, executable) { } |
2342 page_extra_ = 0; | |
2343 } | |
2344 | |
2345 // The limit of allocation for a page in this space. | |
2346 virtual Address PageAllocationLimit(Page* page) { | |
2347 return page->ObjectAreaEnd(); | |
2348 } | |
2349 | 2370 |
2350 public: | 2371 public: |
2351 TRACK_MEMORY("OldSpace") | 2372 TRACK_MEMORY("OldSpace") |
2352 }; | 2373 }; |
2353 | 2374 |
2354 | 2375 |
2355 // For contiguous spaces, top should be in the space (or at the end) and limit | 2376 // For contiguous spaces, top should be in the space (or at the end) and limit |
2356 // should be the end of the space. | 2377 // should be the end of the space. |
2357 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ | 2378 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ |
2358 SLOW_ASSERT((space).page_low() <= (info).top \ | 2379 SLOW_ASSERT((space).page_low() <= (info).top \ |
2359 && (info).top <= (space).page_high() \ | 2380 && (info).top <= (space).page_high() \ |
2360 && (info).limit <= (space).page_high()) | 2381 && (info).limit <= (space).page_high()) |
2361 | 2382 |
2362 | 2383 |
2363 // ----------------------------------------------------------------------------- | 2384 // ----------------------------------------------------------------------------- |
2364 // Old space for objects of a fixed size | 2385 // Old space for objects of a fixed size |
2365 | 2386 |
2366 class FixedSpace : public PagedSpace { | 2387 class FixedSpace : public PagedSpace { |
2367 public: | 2388 public: |
2368 FixedSpace(Heap* heap, | 2389 FixedSpace(Heap* heap, |
2369 intptr_t max_capacity, | 2390 intptr_t max_capacity, |
2370 AllocationSpace id, | 2391 AllocationSpace id, |
2371 int object_size_in_bytes, | 2392 int object_size_in_bytes, |
2372 const char* name) | 2393 const char* name) |
2373 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), | 2394 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), |
2374 object_size_in_bytes_(object_size_in_bytes), | 2395 object_size_in_bytes_(object_size_in_bytes), |
2375 name_(name) { | 2396 name_(name) { } |
2376 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; | |
2377 } | |
2378 | |
2379 // The limit of allocation for a page in this space. | |
2380 virtual Address PageAllocationLimit(Page* page) { | |
2381 return page->ObjectAreaEnd() - page_extra_; | |
2382 } | |
2383 | 2397 |
2384 int object_size_in_bytes() { return object_size_in_bytes_; } | 2398 int object_size_in_bytes() { return object_size_in_bytes_; } |
2385 | 2399 |
| 2400 virtual int ObjectAlignment() { return object_size_in_bytes_; } |
| 2401 |
2386 // Prepares for a mark-compact GC. | 2402 // Prepares for a mark-compact GC. |
2387 virtual void PrepareForMarkCompact(); | 2403 virtual void PrepareForMarkCompact(); |
2388 | 2404 |
2389 protected: | 2405 protected: |
2390 void ResetFreeList() { | 2406 void ResetFreeList() { |
2391 free_list_.Reset(); | 2407 free_list_.Reset(); |
2392 } | 2408 } |
2393 | 2409 |
2394 private: | 2410 private: |
2395 // The size of objects in this space. | 2411 // The size of objects in this space. |
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2656 } | 2672 } |
2657 // Must be small, since an iteration is used for lookup. | 2673 // Must be small, since an iteration is used for lookup. |
2658 static const int kMaxComments = 64; | 2674 static const int kMaxComments = 64; |
2659 }; | 2675 }; |
2660 #endif | 2676 #endif |
2661 | 2677 |
2662 | 2678 |
2663 } } // namespace v8::internal | 2679 } } // namespace v8::internal |
2664 | 2680 |
2665 #endif // V8_SPACES_H_ | 2681 #endif // V8_SPACES_H_ |
OLD | NEW |