Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(992)

Side by Side Diff: src/spaces.h

Issue 9289047: Reduce boot-up memory use of V8. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 8 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« src/heap.cc ('K') | « src/snapshot.h ('k') | src/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 487 matching lines...) Expand 10 before | Expand all | Expand 10 after
498 static const int kBodyOffset = 498 static const int kBodyOffset =
499 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); 499 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
500 500
501 // The start offset of the object area in a page. Aligned to both maps and 501 // The start offset of the object area in a page. Aligned to both maps and
502 // code alignment to be suitable for both. Also aligned to 32 words because 502 // code alignment to be suitable for both. Also aligned to 32 words because
503 // the marking bitmap is arranged in 32 bit chunks. 503 // the marking bitmap is arranged in 32 bit chunks.
504 static const int kObjectStartAlignment = 32 * kPointerSize; 504 static const int kObjectStartAlignment = 32 * kPointerSize;
505 static const int kObjectStartOffset = kBodyOffset - 1 + 505 static const int kObjectStartOffset = kBodyOffset - 1 +
506 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); 506 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
507 507
508 size_t size() const { return size_; } 508 intptr_t size() const { return size_; }
509 509
510 void set_size(size_t size) { 510 void set_size(size_t size) { size_ = size; }
511 size_ = size;
512 }
513 511
514 Executability executable() { 512 Executability executable() {
515 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; 513 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
516 } 514 }
517 515
518 bool ContainsOnlyData() { 516 bool ContainsOnlyData() {
519 return IsFlagSet(CONTAINS_ONLY_DATA); 517 return IsFlagSet(CONTAINS_ONLY_DATA);
520 } 518 }
521 519
522 bool InNewSpace() { 520 bool InNewSpace() {
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
654 // Returns the next page in the chain of pages owned by a space. 652 // Returns the next page in the chain of pages owned by a space.
655 inline Page* next_page(); 653 inline Page* next_page();
656 inline Page* prev_page(); 654 inline Page* prev_page();
657 inline void set_next_page(Page* page); 655 inline void set_next_page(Page* page);
658 inline void set_prev_page(Page* page); 656 inline void set_prev_page(Page* page);
659 657
660 // Returns the start address of the object area in this page. 658 // Returns the start address of the object area in this page.
661 Address ObjectAreaStart() { return address() + kObjectStartOffset; } 659 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
662 660
663 // Returns the end address (exclusive) of the object area in this page. 661 // Returns the end address (exclusive) of the object area in this page.
664 Address ObjectAreaEnd() { return address() + Page::kPageSize; } 662 Address ObjectAreaEnd() { return address() + size(); }
665 663
666 // Checks whether an address is page aligned. 664 // Checks whether an address is page aligned.
667 static bool IsAlignedToPageSize(Address a) { 665 static bool IsAlignedToPageSize(Address a) {
668 return 0 == (OffsetFrom(a) & kPageAlignmentMask); 666 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
669 } 667 }
670 668
671 // Returns the offset of a given address to this page. 669 // Returns the offset of a given address to this page.
672 INLINE(int Offset(Address a)) { 670 INLINE(int Offset(Address a)) {
673 int offset = static_cast<int>(a - address()); 671 int offset = static_cast<int>(a - address());
674 return offset; 672 return offset;
675 } 673 }
676 674
677 // Returns the address for a given offset to the this page. 675 // Returns the address for a given offset to the this page.
678 Address OffsetToAddress(int offset) { 676 Address OffsetToAddress(int offset) {
679 ASSERT_PAGE_OFFSET(offset); 677 ASSERT_PAGE_OFFSET(offset);
680 return address() + offset; 678 return address() + offset;
681 } 679 }
682 680
681 // Expand the committed area for pages that are small.
682 void CommitMore(intptr_t space_needed);
683
683 // --------------------------------------------------------------------- 684 // ---------------------------------------------------------------------
684 685
685 // Page size in bytes. This must be a multiple of the OS page size. 686 // Page size in bytes. This must be a multiple of the OS page size.
686 static const int kPageSize = 1 << kPageSizeBits; 687 static const int kPageSize = 1 << kPageSizeBits;
687 688
689 // For a 1Mbyte page grow 64k at a time.
690 static const int kGrowthUnit = 1 << (kPageSizeBits - 4);
691
688 // Page size mask. 692 // Page size mask.
689 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; 693 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
690 694
691 // Object area size in bytes. 695 // Object area size in bytes.
692 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; 696 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
693 697
694 // Maximum object size that fits in a page. 698 // Maximum object size that fits in a page.
695 static const int kMaxHeapObjectSize = kObjectAreaSize; 699 static const int kMaxHeapObjectSize = kObjectAreaSize;
696 700
697 static const int kFirstUsedCell = 701 static const int kFirstUsedCell =
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
842 Isolate* isolate_; 846 Isolate* isolate_;
843 847
844 // The reserved range of virtual memory that all code objects are put in. 848 // The reserved range of virtual memory that all code objects are put in.
845 VirtualMemory* code_range_; 849 VirtualMemory* code_range_;
846 // Plain old data class, just a struct plus a constructor. 850 // Plain old data class, just a struct plus a constructor.
847 class FreeBlock { 851 class FreeBlock {
848 public: 852 public:
849 FreeBlock(Address start_arg, size_t size_arg) 853 FreeBlock(Address start_arg, size_t size_arg)
850 : start(start_arg), size(size_arg) { 854 : start(start_arg), size(size_arg) {
851 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); 855 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
852 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
853 } 856 }
854 FreeBlock(void* start_arg, size_t size_arg) 857 FreeBlock(void* start_arg, size_t size_arg)
855 : start(static_cast<Address>(start_arg)), size(size_arg) { 858 : start(static_cast<Address>(start_arg)), size(size_arg) {
856 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); 859 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
857 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
858 } 860 }
859 861
860 Address start; 862 Address start;
861 size_t size; 863 size_t size;
862 }; 864 };
863 865
864 // Freed blocks of memory are added to the free list. When the allocation 866 // Freed blocks of memory are added to the free list. When the allocation
865 // list is exhausted, the free list is sorted and merged to make the new 867 // list is exhausted, the free list is sorted and merged to make the new
866 // allocation list. 868 // allocation list.
867 List<FreeBlock> free_list_; 869 List<FreeBlock> free_list_;
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
943 class MemoryAllocator { 945 class MemoryAllocator {
944 public: 946 public:
945 explicit MemoryAllocator(Isolate* isolate); 947 explicit MemoryAllocator(Isolate* isolate);
946 948
947 // Initializes its internal bookkeeping structures. 949 // Initializes its internal bookkeeping structures.
948 // Max capacity of the total space and executable memory limit. 950 // Max capacity of the total space and executable memory limit.
949 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); 951 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
950 952
951 void TearDown(); 953 void TearDown();
952 954
953 Page* AllocatePage(PagedSpace* owner, Executability executable); 955 Page* AllocatePage(intptr_t object_area_size,
956 PagedSpace* owner,
957 Executability executable);
954 958
955 LargePage* AllocateLargePage(intptr_t object_size, 959 LargePage* AllocateLargePage(intptr_t object_size,
956 Executability executable, 960 Executability executable,
957 Space* owner); 961 Space* owner);
958 962
959 void Free(MemoryChunk* chunk); 963 void Free(MemoryChunk* chunk);
960 964
961 // Returns the maximum available bytes of heaps. 965 // Returns the maximum available bytes of heaps.
962 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } 966 intptr_t Available() {
967 return capacity_ < memory_allocator_reserved_ ?
968 0 :
969 capacity_ - memory_allocator_reserved_;
970 }
963 971
964 // Returns allocated spaces in bytes. 972 // Returns allocated spaces in bytes.
965 intptr_t Size() { return size_; } 973 intptr_t Size() { return memory_allocator_reserved_; }
966 974
967 // Returns the maximum available executable bytes of heaps. 975 // Returns the maximum available executable bytes of heaps.
968 intptr_t AvailableExecutable() { 976 intptr_t AvailableExecutable() {
969 if (capacity_executable_ < size_executable_) return 0; 977 if (capacity_executable_ < size_executable_) return 0;
970 return capacity_executable_ - size_executable_; 978 return capacity_executable_ - size_executable_;
971 } 979 }
972 980
973 // Returns allocated executable spaces in bytes. 981 // Returns allocated executable spaces in bytes.
974 intptr_t SizeExecutable() { return size_executable_; } 982 intptr_t SizeExecutable() { return size_executable_; }
975 983
976 // Returns maximum available bytes that the old space can have. 984 // Returns maximum available bytes that the old space can have.
977 intptr_t MaxAvailable() { 985 intptr_t MaxAvailable() {
978 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; 986 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
979 } 987 }
980 988
981 #ifdef DEBUG 989 #ifdef DEBUG
982 // Reports statistic info of the space. 990 // Reports statistic info of the space.
983 void ReportStatistics(); 991 void ReportStatistics();
984 #endif 992 #endif
985 993
986 MemoryChunk* AllocateChunk(intptr_t body_size, 994 MemoryChunk* AllocateChunk(intptr_t body_size,
995 intptr_t committed_body_size,
987 Executability executable, 996 Executability executable,
988 Space* space); 997 Space* space);
989 998
990 Address ReserveAlignedMemory(size_t requested, 999 Address ReserveAlignedMemory(size_t requested,
991 size_t alignment, 1000 size_t alignment,
992 VirtualMemory* controller); 1001 VirtualMemory* controller);
993 Address AllocateAlignedMemory(size_t requested, 1002 Address AllocateAlignedMemory(size_t requested,
1003 size_t committed,
994 size_t alignment, 1004 size_t alignment,
995 Executability executable, 1005 Executability executable,
996 VirtualMemory* controller); 1006 VirtualMemory* controller);
997 1007
998 void FreeMemory(VirtualMemory* reservation, Executability executable); 1008 void FreeMemory(VirtualMemory* reservation, Executability executable);
999 void FreeMemory(Address addr, size_t size, Executability executable); 1009 void FreeMemory(Address addr, size_t size, Executability executable);
1000 1010
1001 // Commit a contiguous block of memory from the initial chunk. Assumes that 1011 // Commit a contiguous block of memory from the initial chunk. Assumes that
1002 // the address is not NULL, the size is greater than zero, and that the 1012 // the address is not NULL, the size is greater than zero, and that the
1003 // block is contained in the initial chunk. Returns true if it succeeded 1013 // block is contained in the initial chunk. Returns true if it succeeded
1004 // and false otherwise. 1014 // and false otherwise.
1005 bool CommitBlock(Address start, size_t size, Executability executable); 1015 bool CommitBlock(Address start, size_t size, Executability executable);
1006 1016
1007 // Uncommit a contiguous block of memory [start..(start+size)[. 1017 // Uncommit a contiguous block of memory [start..(start+size)[.
1008 // start is not NULL, the size is greater than zero, and the 1018 // start is not NULL, the size is greater than zero, and the
1009 // block is contained in the initial chunk. Returns true if it succeeded 1019 // block is contained in the initial chunk. Returns true if it succeeded
1010 // and false otherwise. 1020 // and false otherwise.
1011 bool UncommitBlock(Address start, size_t size); 1021 bool UncommitBlock(Address start, size_t size);
1012 1022
1023 void AllocationBookkeeping(Space* owner,
1024 Address base,
1025 intptr_t reserved_size,
1026 intptr_t committed_size,
1027 Executability executable);
1028
1013 // Zaps a contiguous block of memory [start..(start+size)[ thus 1029 // Zaps a contiguous block of memory [start..(start+size)[ thus
1014 // filling it up with a recognizable non-NULL bit pattern. 1030 // filling it up with a recognizable non-NULL bit pattern.
1015 void ZapBlock(Address start, size_t size); 1031 void ZapBlock(Address start, size_t size);
1016 1032
1017 void PerformAllocationCallback(ObjectSpace space, 1033 void PerformAllocationCallback(ObjectSpace space,
1018 AllocationAction action, 1034 AllocationAction action,
1019 size_t size); 1035 size_t size);
1020 1036
1021 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, 1037 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
1022 ObjectSpace space, 1038 ObjectSpace space,
1023 AllocationAction action); 1039 AllocationAction action);
1024 1040
1025 void RemoveMemoryAllocationCallback( 1041 void RemoveMemoryAllocationCallback(
1026 MemoryAllocationCallback callback); 1042 MemoryAllocationCallback callback);
1027 1043
1028 bool MemoryAllocationCallbackRegistered( 1044 bool MemoryAllocationCallbackRegistered(
1029 MemoryAllocationCallback callback); 1045 MemoryAllocationCallback callback);
1030 1046
1031 private: 1047 private:
1032 Isolate* isolate_; 1048 Isolate* isolate_;
1033 1049
1034 // Maximum space size in bytes. 1050 // Maximum space size in bytes.
1035 size_t capacity_; 1051 size_t capacity_;
1036 // Maximum subset of capacity_ that can be executable 1052 // Maximum subset of capacity_ that can be executable
1037 size_t capacity_executable_; 1053 size_t capacity_executable_;
1038 1054
1039 // Allocated space size in bytes. 1055 // Allocated space size in bytes.
1040 size_t size_; 1056 size_t memory_allocator_reserved_;
1041 // Allocated executable space size in bytes. 1057 // Allocated executable space size in bytes.
1042 size_t size_executable_; 1058 size_t size_executable_;
1043 1059
1044 struct MemoryAllocationCallbackRegistration { 1060 struct MemoryAllocationCallbackRegistration {
1045 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, 1061 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1046 ObjectSpace space, 1062 ObjectSpace space,
1047 AllocationAction action) 1063 AllocationAction action)
1048 : callback(callback), space(space), action(action) { 1064 : callback(callback), space(space), action(action) {
1049 } 1065 }
1050 MemoryAllocationCallback callback; 1066 MemoryAllocationCallback callback;
(...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after
1375 1391
1376 void CountFreeListItems(Page* p, SizeStats* sizes); 1392 void CountFreeListItems(Page* p, SizeStats* sizes);
1377 1393
1378 intptr_t EvictFreeListItems(Page* p); 1394 intptr_t EvictFreeListItems(Page* p);
1379 1395
1380 private: 1396 private:
1381 // The size range of blocks, in bytes. 1397 // The size range of blocks, in bytes.
1382 static const int kMinBlockSize = 3 * kPointerSize; 1398 static const int kMinBlockSize = 3 * kPointerSize;
1383 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; 1399 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1384 1400
1385 FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size); 1401 FreeListNode* PickNodeFromList(FreeListNode** list,
1402 int* node_size,
1403 int minimum_size);
1386 1404
1387 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); 1405 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size, Address limit);
1406 FreeListNode* FindAbuttingNode(int size_in_bytes,
1407 int* node_size,
1408 Address limit,
1409 FreeListNode** list_head);
1388 1410
1389 PagedSpace* owner_; 1411 PagedSpace* owner_;
1390 Heap* heap_; 1412 Heap* heap_;
1391 1413
1392 // Total available bytes in all blocks on this free list. 1414 // Total available bytes in all blocks on this free list.
1393 int available_; 1415 int available_;
1394 1416
1395 static const int kSmallListMin = 0x20 * kPointerSize; 1417 static const int kSmallListMin = 0x20 * kPointerSize;
1396 static const int kSmallListMax = 0xff * kPointerSize; 1418 static const int kSmallListMax = 0xff * kPointerSize;
1397 static const int kMediumListMax = 0x7ff * kPointerSize; 1419 static const int kMediumListMax = 0x7ff * kPointerSize;
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
1477 virtual intptr_t SizeOfObjects() { 1499 virtual intptr_t SizeOfObjects() {
1478 ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0)); 1500 ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0));
1479 return Size() - unswept_free_bytes_ - (limit() - top()); 1501 return Size() - unswept_free_bytes_ - (limit() - top());
1480 } 1502 }
1481 1503
1482 // Wasted bytes in this space. These are just the bytes that were thrown away 1504 // Wasted bytes in this space. These are just the bytes that were thrown away
1483 // due to being too small to use for allocation. They do not include the 1505 // due to being too small to use for allocation. They do not include the
1484 // free bytes that were not found at all due to lazy sweeping. 1506 // free bytes that were not found at all due to lazy sweeping.
1485 virtual intptr_t Waste() { return accounting_stats_.Waste(); } 1507 virtual intptr_t Waste() { return accounting_stats_.Waste(); }
1486 1508
1509 virtual int ObjectAlignment() { return kObjectAlignment; }
1510
1487 // Returns the allocation pointer in this space. 1511 // Returns the allocation pointer in this space.
1488 Address top() { return allocation_info_.top; } 1512 Address top() { return allocation_info_.top; }
1489 Address limit() { return allocation_info_.limit; } 1513 Address limit() { return allocation_info_.limit; }
1490 1514
1491 // Allocate the requested number of bytes in the space if possible, return a 1515 // Allocate the requested number of bytes in the space if possible, return a
1492 // failure object if not. 1516 // failure object if not.
1493 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); 1517 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
1494 1518
1495 virtual bool ReserveSpace(int bytes); 1519 virtual bool ReserveSpace(int bytes);
1496 1520
1497 // Give a block of memory to the space's free list. It might be added to 1521 // Give a block of memory to the space's free list. It might be added to
1498 // the free list or accounted as waste. 1522 // the free list or accounted as waste.
1499 // If add_to_freelist is false then just accounting stats are updated and 1523 // If add_to_freelist is false then just accounting stats are updated and
1500 // no attempt to add area to free list is made. 1524 // no attempt to add area to free list is made.
1501 int Free(Address start, int size_in_bytes) { 1525 int AddToFreeLists(Address start, int size_in_bytes) {
1502 int wasted = free_list_.Free(start, size_in_bytes); 1526 int wasted = free_list_.Free(start, size_in_bytes);
1503 accounting_stats_.DeallocateBytes(size_in_bytes - wasted); 1527 accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
1504 return size_in_bytes - wasted; 1528 return size_in_bytes - wasted;
1505 } 1529 }
1506 1530
1507 // Set space allocation info. 1531 // Set space allocation info.
1508 void SetTop(Address top, Address limit) { 1532 void SetTop(Address top, Address limit) {
1533 ASSERT(top == NULL || top >= Page::FromAddress(top - 1)->ObjectAreaStart());
1509 ASSERT(top == limit || 1534 ASSERT(top == limit ||
1510 Page::FromAddress(top) == Page::FromAddress(limit - 1)); 1535 Page::FromAddress(top) == Page::FromAddress(limit - 1));
1511 allocation_info_.top = top; 1536 allocation_info_.top = top;
1512 allocation_info_.limit = limit; 1537 allocation_info_.limit = limit;
1513 } 1538 }
1514 1539
1515 void Allocate(int bytes) { 1540 void Allocate(int bytes) {
1516 accounting_stats_.AllocateBytes(bytes); 1541 accounting_stats_.AllocateBytes(bytes);
1517 } 1542 }
1518 1543
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
1563 void SetPagesToSweep(Page* first) { 1588 void SetPagesToSweep(Page* first) {
1564 ASSERT(unswept_free_bytes_ == 0); 1589 ASSERT(unswept_free_bytes_ == 0);
1565 if (first == &anchor_) first = NULL; 1590 if (first == &anchor_) first = NULL;
1566 first_unswept_page_ = first; 1591 first_unswept_page_ = first;
1567 } 1592 }
1568 1593
1569 void IncrementUnsweptFreeBytes(int by) { 1594 void IncrementUnsweptFreeBytes(int by) {
1570 unswept_free_bytes_ += by; 1595 unswept_free_bytes_ += by;
1571 } 1596 }
1572 1597
1573 void IncreaseUnsweptFreeBytes(Page* p) { 1598 void IncreaseUnsweptFreeBytes(Page* p) {
Vyacheslav Egorov (Chromium) 2012/01/26 16:12:05 I wonder if this needs to be adjusted somehow to a
1574 ASSERT(ShouldBeSweptLazily(p)); 1599 ASSERT(ShouldBeSweptLazily(p));
1575 unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes()); 1600 unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes());
1576 } 1601 }
1577 1602
1578 void DecreaseUnsweptFreeBytes(Page* p) { 1603 void DecreaseUnsweptFreeBytes(Page* p) {
1579 ASSERT(ShouldBeSweptLazily(p)); 1604 ASSERT(ShouldBeSweptLazily(p));
1580 unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes()); 1605 unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes());
1581 } 1606 }
1582 1607
1583 bool AdvanceSweeper(intptr_t bytes_to_sweep); 1608 bool AdvanceSweeper(intptr_t bytes_to_sweep);
1584 1609
1585 bool IsSweepingComplete() { 1610 bool IsSweepingComplete() {
1586 return !first_unswept_page_->is_valid(); 1611 return !first_unswept_page_->is_valid();
1587 } 1612 }
1588 1613
1614 inline bool HasAPage() { return anchor_.next_page() != &anchor_; }
1589 Page* FirstPage() { return anchor_.next_page(); } 1615 Page* FirstPage() { return anchor_.next_page(); }
1590 Page* LastPage() { return anchor_.prev_page(); } 1616 Page* LastPage() { return anchor_.prev_page(); }
1591 1617
1592 // Returns zero for pages that have so little fragmentation that it is not 1618 // Returns zero for pages that have so little fragmentation that it is not
1593 // worth defragmenting them. Otherwise a positive integer that gives an 1619 // worth defragmenting them. Otherwise a positive integer that gives an
1594 // estimate of fragmentation on an arbitrary scale. 1620 // estimate of fragmentation on an arbitrary scale.
1595 int Fragmentation(Page* p) { 1621 int Fragmentation(Page* p) {
1596 FreeList::SizeStats sizes; 1622 FreeList::SizeStats sizes;
1597 free_list_.CountFreeListItems(p, &sizes); 1623 free_list_.CountFreeListItems(p, &sizes);
1598 1624
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
1651 1677
1652 // The dummy page that anchors the double linked list of pages. 1678 // The dummy page that anchors the double linked list of pages.
1653 Page anchor_; 1679 Page anchor_;
1654 1680
1655 // The space's free list. 1681 // The space's free list.
1656 FreeList free_list_; 1682 FreeList free_list_;
1657 1683
1658 // Normal allocation information. 1684 // Normal allocation information.
1659 AllocationInfo allocation_info_; 1685 AllocationInfo allocation_info_;
1660 1686
1661 // Bytes of each page that cannot be allocated. Possibly non-zero
1662 // for pages in spaces with only fixed-size objects. Always zero
1663 // for pages in spaces with variable sized objects (those pages are
1664 // padded with free-list nodes).
1665 int page_extra_;
1666
1667 bool was_swept_conservatively_; 1687 bool was_swept_conservatively_;
1668 1688
1669 // The first page to be swept when the lazy sweeper advances. Is set 1689 // The first page to be swept when the lazy sweeper advances. Is set
1670 // to NULL when all pages have been swept. 1690 // to NULL when all pages have been swept.
1671 Page* first_unswept_page_; 1691 Page* first_unswept_page_;
1672 1692
1673 // The number of free bytes which could be reclaimed by advancing the 1693 // The number of free bytes which could be reclaimed by advancing the
1674 // lazy sweeper. This is only an estimation because lazy sweeping is 1694 // lazy sweeper. This is only an estimation because lazy sweeping is
1675 // done conservatively. 1695 // done conservatively.
1676 intptr_t unswept_free_bytes_; 1696 intptr_t unswept_free_bytes_;
1677 1697
1678 // Expands the space by allocating a fixed number of pages. Returns false if 1698 // Expands the space by allocating a page. Returns false if it cannot
1679 // it cannot allocate requested number of pages from OS, or if the hard heap 1699 // allocate a page from OS, or if the hard heap size limit has been hit. The
1680 // size limit has been hit. 1700 // new page will have at least enough committed space to satisfy the object
1681 bool Expand(); 1701 // size indicated by the allocation_size argument;
1702 bool Expand(intptr_t allocation_size);
1682 1703
1683 // Generic fast case allocation function that tries linear allocation at the 1704 // Generic fast case allocation function that tries linear allocation at the
1684 // address denoted by top in allocation_info_. 1705 // address denoted by top in allocation_info_.
1685 inline HeapObject* AllocateLinearly(int size_in_bytes); 1706 inline HeapObject* AllocateLinearly(int size_in_bytes);
1686 1707
1687 // Slow path of AllocateRaw. This function is space-dependent. 1708 // Slow path of AllocateRaw. This function is space-dependent.
1688 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); 1709 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
1689 1710
1690 friend class PageIterator; 1711 friend class PageIterator;
1691 }; 1712 };
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
1826 public: 1847 public:
1827 // Constructor. 1848 // Constructor.
1828 SemiSpace(Heap* heap, SemiSpaceId semispace) 1849 SemiSpace(Heap* heap, SemiSpaceId semispace)
1829 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), 1850 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
1830 start_(NULL), 1851 start_(NULL),
1831 age_mark_(NULL), 1852 age_mark_(NULL),
1832 id_(semispace), 1853 id_(semispace),
1833 anchor_(this), 1854 anchor_(this),
1834 current_page_(NULL) { } 1855 current_page_(NULL) { }
1835 1856
1836 // Sets up the semispace using the given chunk. 1857 // Sets up the semispace using the given chunk. After this, call Commit()
1858 // to make the semispace usable.
1837 void SetUp(Address start, int initial_capacity, int maximum_capacity); 1859 void SetUp(Address start, int initial_capacity, int maximum_capacity);
1838 1860
1839 // Tear down the space. Heap memory was not allocated by the space, so it 1861 // Tear down the space. Heap memory was not allocated by the space, so it
1840 // is not deallocated here. 1862 // is not deallocated here.
1841 void TearDown(); 1863 void TearDown();
1842 1864
1843 // True if the space has been set up but not torn down. 1865 // True if the space has been set up but not torn down.
1844 bool HasBeenSetUp() { return start_ != NULL; } 1866 bool HasBeenSetUp() { return start_ != NULL; }
1845 1867
1846 // Grow the semispace to the new capacity. The new capacity 1868 // Grow the semispace to the new capacity. The new capacity
(...skipping 484 matching lines...) Expand 10 before | Expand all | Expand 10 after
2331 // Old object space (excluding map objects) 2353 // Old object space (excluding map objects)
2332 2354
2333 class OldSpace : public PagedSpace { 2355 class OldSpace : public PagedSpace {
2334 public: 2356 public:
2335 // Creates an old space object with a given maximum capacity. 2357 // Creates an old space object with a given maximum capacity.
2336 // The constructor does not allocate pages from OS. 2358 // The constructor does not allocate pages from OS.
2337 OldSpace(Heap* heap, 2359 OldSpace(Heap* heap,
2338 intptr_t max_capacity, 2360 intptr_t max_capacity,
2339 AllocationSpace id, 2361 AllocationSpace id,
2340 Executability executable) 2362 Executability executable)
2341 : PagedSpace(heap, max_capacity, id, executable) { 2363 : PagedSpace(heap, max_capacity, id, executable) { }
2342 page_extra_ = 0;
2343 }
2344
2345 // The limit of allocation for a page in this space.
2346 virtual Address PageAllocationLimit(Page* page) {
2347 return page->ObjectAreaEnd();
2348 }
2349 2364
2350 public: 2365 public:
2351 TRACK_MEMORY("OldSpace") 2366 TRACK_MEMORY("OldSpace")
2352 }; 2367 };
2353 2368
2354 2369
2355 // For contiguous spaces, top should be in the space (or at the end) and limit 2370 // For contiguous spaces, top should be in the space (or at the end) and limit
2356 // should be the end of the space. 2371 // should be the end of the space.
2357 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ 2372 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
2358 SLOW_ASSERT((space).page_low() <= (info).top \ 2373 SLOW_ASSERT((space).page_low() <= (info).top \
2359 && (info).top <= (space).page_high() \ 2374 && (info).top <= (space).page_high() \
2360 && (info).limit <= (space).page_high()) 2375 && (info).limit <= (space).page_high())
2361 2376
2362 2377
2363 // ----------------------------------------------------------------------------- 2378 // -----------------------------------------------------------------------------
2364 // Old space for objects of a fixed size 2379 // Old space for objects of a fixed size
2365 2380
2366 class FixedSpace : public PagedSpace { 2381 class FixedSpace : public PagedSpace {
2367 public: 2382 public:
2368 FixedSpace(Heap* heap, 2383 FixedSpace(Heap* heap,
2369 intptr_t max_capacity, 2384 intptr_t max_capacity,
2370 AllocationSpace id, 2385 AllocationSpace id,
2371 int object_size_in_bytes, 2386 int object_size_in_bytes,
2372 const char* name) 2387 const char* name)
2373 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), 2388 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2374 object_size_in_bytes_(object_size_in_bytes), 2389 object_size_in_bytes_(object_size_in_bytes),
2375 name_(name) { 2390 name_(name) { }
2376 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
2377 }
2378
2379 // The limit of allocation for a page in this space.
2380 virtual Address PageAllocationLimit(Page* page) {
2381 return page->ObjectAreaEnd() - page_extra_;
2382 }
2383 2391
2384 int object_size_in_bytes() { return object_size_in_bytes_; } 2392 int object_size_in_bytes() { return object_size_in_bytes_; }
2385 2393
2394 virtual int ObjectAlignment() { return object_size_in_bytes_; }
2395
2386 // Prepares for a mark-compact GC. 2396 // Prepares for a mark-compact GC.
2387 virtual void PrepareForMarkCompact(); 2397 virtual void PrepareForMarkCompact();
2388 2398
2389 protected: 2399 protected:
2390 void ResetFreeList() { 2400 void ResetFreeList() {
2391 free_list_.Reset(); 2401 free_list_.Reset();
2392 } 2402 }
2393 2403
2394 private: 2404 private:
2395 // The size of objects in this space. 2405 // The size of objects in this space.
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after
2656 } 2666 }
2657 // Must be small, since an iteration is used for lookup. 2667 // Must be small, since an iteration is used for lookup.
2658 static const int kMaxComments = 64; 2668 static const int kMaxComments = 64;
2659 }; 2669 };
2660 #endif 2670 #endif
2661 2671
2662 2672
2663 } } // namespace v8::internal 2673 } } // namespace v8::internal
2664 2674
2665 #endif // V8_SPACES_H_ 2675 #endif // V8_SPACES_H_
OLDNEW
« src/heap.cc ('K') | « src/snapshot.h ('k') | src/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698