Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: src/spaces.h

Issue 9295047: Revert 10542 (boot time memory reduction) due to map alignment (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 8 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/snapshot.h ('k') | src/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 487 matching lines...) Expand 10 before | Expand all | Expand 10 after
498 static const int kBodyOffset = 498 static const int kBodyOffset =
499 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); 499 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
500 500
501 // The start offset of the object area in a page. Aligned to both maps and 501 // The start offset of the object area in a page. Aligned to both maps and
502 // code alignment to be suitable for both. Also aligned to 32 words because 502 // code alignment to be suitable for both. Also aligned to 32 words because
503 // the marking bitmap is arranged in 32 bit chunks. 503 // the marking bitmap is arranged in 32 bit chunks.
504 static const int kObjectStartAlignment = 32 * kPointerSize; 504 static const int kObjectStartAlignment = 32 * kPointerSize;
505 static const int kObjectStartOffset = kBodyOffset - 1 + 505 static const int kObjectStartOffset = kBodyOffset - 1 +
506 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); 506 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
507 507
508 intptr_t size() const { return size_; } 508 size_t size() const { return size_; }
509 509
510 void set_size(size_t size) { size_ = size; } 510 void set_size(size_t size) {
511 size_ = size;
512 }
511 513
512 Executability executable() { 514 Executability executable() {
513 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; 515 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
514 } 516 }
515 517
516 bool ContainsOnlyData() { 518 bool ContainsOnlyData() {
517 return IsFlagSet(CONTAINS_ONLY_DATA); 519 return IsFlagSet(CONTAINS_ONLY_DATA);
518 } 520 }
519 521
520 bool InNewSpace() { 522 bool InNewSpace() {
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
652 // Returns the next page in the chain of pages owned by a space. 654 // Returns the next page in the chain of pages owned by a space.
653 inline Page* next_page(); 655 inline Page* next_page();
654 inline Page* prev_page(); 656 inline Page* prev_page();
655 inline void set_next_page(Page* page); 657 inline void set_next_page(Page* page);
656 inline void set_prev_page(Page* page); 658 inline void set_prev_page(Page* page);
657 659
658 // Returns the start address of the object area in this page. 660 // Returns the start address of the object area in this page.
659 Address ObjectAreaStart() { return address() + kObjectStartOffset; } 661 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
660 662
661 // Returns the end address (exclusive) of the object area in this page. 663 // Returns the end address (exclusive) of the object area in this page.
662 Address ObjectAreaEnd() { return address() + size(); } 664 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
663 665
664 // Checks whether an address is page aligned. 666 // Checks whether an address is page aligned.
665 static bool IsAlignedToPageSize(Address a) { 667 static bool IsAlignedToPageSize(Address a) {
666 return 0 == (OffsetFrom(a) & kPageAlignmentMask); 668 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
667 } 669 }
668 670
669 // Returns the offset of a given address to this page. 671 // Returns the offset of a given address to this page.
670 INLINE(int Offset(Address a)) { 672 INLINE(int Offset(Address a)) {
671 int offset = static_cast<int>(a - address()); 673 int offset = static_cast<int>(a - address());
672 return offset; 674 return offset;
673 } 675 }
674 676
675 // Returns the address for a given offset to the this page. 677 // Returns the address for a given offset to the this page.
676 Address OffsetToAddress(int offset) { 678 Address OffsetToAddress(int offset) {
677 ASSERT_PAGE_OFFSET(offset); 679 ASSERT_PAGE_OFFSET(offset);
678 return address() + offset; 680 return address() + offset;
679 } 681 }
680 682
681 // Expand the committed area for pages that are small.
682 void CommitMore(intptr_t space_needed);
683
684 // --------------------------------------------------------------------- 683 // ---------------------------------------------------------------------
685 684
686 // Page size in bytes. This must be a multiple of the OS page size. 685 // Page size in bytes. This must be a multiple of the OS page size.
687 static const int kPageSize = 1 << kPageSizeBits; 686 static const int kPageSize = 1 << kPageSizeBits;
688 687
689 // For a 1Mbyte page grow 64k at a time.
690 static const int kGrowthUnit = 1 << (kPageSizeBits - 4);
691
692 // Page size mask. 688 // Page size mask.
693 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; 689 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
694 690
695 // Object area size in bytes. 691 // Object area size in bytes.
696 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; 692 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
697 693
698 // Maximum object size that fits in a page. 694 // Maximum object size that fits in a page.
699 static const int kMaxHeapObjectSize = kObjectAreaSize; 695 static const int kMaxHeapObjectSize = kObjectAreaSize;
700 696
701 static const int kFirstUsedCell = 697 static const int kFirstUsedCell =
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
846 Isolate* isolate_; 842 Isolate* isolate_;
847 843
848 // The reserved range of virtual memory that all code objects are put in. 844 // The reserved range of virtual memory that all code objects are put in.
849 VirtualMemory* code_range_; 845 VirtualMemory* code_range_;
850 // Plain old data class, just a struct plus a constructor. 846 // Plain old data class, just a struct plus a constructor.
851 class FreeBlock { 847 class FreeBlock {
852 public: 848 public:
853 FreeBlock(Address start_arg, size_t size_arg) 849 FreeBlock(Address start_arg, size_t size_arg)
854 : start(start_arg), size(size_arg) { 850 : start(start_arg), size(size_arg) {
855 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); 851 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
852 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
856 } 853 }
857 FreeBlock(void* start_arg, size_t size_arg) 854 FreeBlock(void* start_arg, size_t size_arg)
858 : start(static_cast<Address>(start_arg)), size(size_arg) { 855 : start(static_cast<Address>(start_arg)), size(size_arg) {
859 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); 856 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
857 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
860 } 858 }
861 859
862 Address start; 860 Address start;
863 size_t size; 861 size_t size;
864 }; 862 };
865 863
866 // Freed blocks of memory are added to the free list. When the allocation 864 // Freed blocks of memory are added to the free list. When the allocation
867 // list is exhausted, the free list is sorted and merged to make the new 865 // list is exhausted, the free list is sorted and merged to make the new
868 // allocation list. 866 // allocation list.
869 List<FreeBlock> free_list_; 867 List<FreeBlock> free_list_;
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
945 class MemoryAllocator { 943 class MemoryAllocator {
946 public: 944 public:
947 explicit MemoryAllocator(Isolate* isolate); 945 explicit MemoryAllocator(Isolate* isolate);
948 946
949 // Initializes its internal bookkeeping structures. 947 // Initializes its internal bookkeeping structures.
950 // Max capacity of the total space and executable memory limit. 948 // Max capacity of the total space and executable memory limit.
951 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); 949 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
952 950
953 void TearDown(); 951 void TearDown();
954 952
955 Page* AllocatePage(intptr_t object_area_size, 953 Page* AllocatePage(PagedSpace* owner, Executability executable);
956 PagedSpace* owner,
957 Executability executable);
958 954
959 LargePage* AllocateLargePage(intptr_t object_size, 955 LargePage* AllocateLargePage(intptr_t object_size,
960 Executability executable, 956 Executability executable,
961 Space* owner); 957 Space* owner);
962 958
963 void Free(MemoryChunk* chunk); 959 void Free(MemoryChunk* chunk);
964 960
965 // Returns the maximum available bytes of heaps. 961 // Returns the maximum available bytes of heaps.
966 intptr_t Available() { 962 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
967 return capacity_ < memory_allocator_reserved_ ?
968 0 :
969 capacity_ - memory_allocator_reserved_;
970 }
971 963
972 // Returns allocated spaces in bytes. 964 // Returns allocated spaces in bytes.
973 intptr_t Size() { return memory_allocator_reserved_; } 965 intptr_t Size() { return size_; }
974 966
975 // Returns the maximum available executable bytes of heaps. 967 // Returns the maximum available executable bytes of heaps.
976 intptr_t AvailableExecutable() { 968 intptr_t AvailableExecutable() {
977 if (capacity_executable_ < size_executable_) return 0; 969 if (capacity_executable_ < size_executable_) return 0;
978 return capacity_executable_ - size_executable_; 970 return capacity_executable_ - size_executable_;
979 } 971 }
980 972
981 // Returns allocated executable spaces in bytes. 973 // Returns allocated executable spaces in bytes.
982 intptr_t SizeExecutable() { return size_executable_; } 974 intptr_t SizeExecutable() { return size_executable_; }
983 975
984 // Returns maximum available bytes that the old space can have. 976 // Returns maximum available bytes that the old space can have.
985 intptr_t MaxAvailable() { 977 intptr_t MaxAvailable() {
986 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; 978 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
987 } 979 }
988 980
989 #ifdef DEBUG 981 #ifdef DEBUG
990 // Reports statistic info of the space. 982 // Reports statistic info of the space.
991 void ReportStatistics(); 983 void ReportStatistics();
992 #endif 984 #endif
993 985
994 MemoryChunk* AllocateChunk(intptr_t body_size, 986 MemoryChunk* AllocateChunk(intptr_t body_size,
995 intptr_t committed_body_size,
996 Executability executable, 987 Executability executable,
997 Space* space); 988 Space* space);
998 989
999 Address ReserveAlignedMemory(size_t requested, 990 Address ReserveAlignedMemory(size_t requested,
1000 size_t alignment, 991 size_t alignment,
1001 VirtualMemory* controller); 992 VirtualMemory* controller);
1002 Address AllocateAlignedMemory(size_t requested, 993 Address AllocateAlignedMemory(size_t requested,
1003 size_t committed,
1004 size_t alignment, 994 size_t alignment,
1005 Executability executable, 995 Executability executable,
1006 VirtualMemory* controller); 996 VirtualMemory* controller);
1007 997
1008 void FreeMemory(VirtualMemory* reservation, Executability executable); 998 void FreeMemory(VirtualMemory* reservation, Executability executable);
1009 void FreeMemory(Address addr, size_t size, Executability executable); 999 void FreeMemory(Address addr, size_t size, Executability executable);
1010 1000
1011 // Commit a contiguous block of memory from the initial chunk. Assumes that 1001 // Commit a contiguous block of memory from the initial chunk. Assumes that
1012 // the address is not NULL, the size is greater than zero, and that the 1002 // the address is not NULL, the size is greater than zero, and that the
1013 // block is contained in the initial chunk. Returns true if it succeeded 1003 // block is contained in the initial chunk. Returns true if it succeeded
1014 // and false otherwise. 1004 // and false otherwise.
1015 bool CommitBlock(Address start, size_t size, Executability executable); 1005 bool CommitBlock(Address start, size_t size, Executability executable);
1016 1006
1017 // Uncommit a contiguous block of memory [start..(start+size)[. 1007 // Uncommit a contiguous block of memory [start..(start+size)[.
1018 // start is not NULL, the size is greater than zero, and the 1008 // start is not NULL, the size is greater than zero, and the
1019 // block is contained in the initial chunk. Returns true if it succeeded 1009 // block is contained in the initial chunk. Returns true if it succeeded
1020 // and false otherwise. 1010 // and false otherwise.
1021 bool UncommitBlock(Address start, size_t size); 1011 bool UncommitBlock(Address start, size_t size);
1022 1012
1023 void AllocationBookkeeping(Space* owner,
1024 Address base,
1025 intptr_t reserved_size,
1026 intptr_t committed_size,
1027 Executability executable);
1028
1029 // Zaps a contiguous block of memory [start..(start+size)[ thus 1013 // Zaps a contiguous block of memory [start..(start+size)[ thus
1030 // filling it up with a recognizable non-NULL bit pattern. 1014 // filling it up with a recognizable non-NULL bit pattern.
1031 void ZapBlock(Address start, size_t size); 1015 void ZapBlock(Address start, size_t size);
1032 1016
1033 void PerformAllocationCallback(ObjectSpace space, 1017 void PerformAllocationCallback(ObjectSpace space,
1034 AllocationAction action, 1018 AllocationAction action,
1035 size_t size); 1019 size_t size);
1036 1020
1037 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, 1021 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
1038 ObjectSpace space, 1022 ObjectSpace space,
1039 AllocationAction action); 1023 AllocationAction action);
1040 1024
1041 void RemoveMemoryAllocationCallback( 1025 void RemoveMemoryAllocationCallback(
1042 MemoryAllocationCallback callback); 1026 MemoryAllocationCallback callback);
1043 1027
1044 bool MemoryAllocationCallbackRegistered( 1028 bool MemoryAllocationCallbackRegistered(
1045 MemoryAllocationCallback callback); 1029 MemoryAllocationCallback callback);
1046 1030
1047 private: 1031 private:
1048 Isolate* isolate_; 1032 Isolate* isolate_;
1049 1033
1050 // Maximum space size in bytes. 1034 // Maximum space size in bytes.
1051 size_t capacity_; 1035 size_t capacity_;
1052 // Maximum subset of capacity_ that can be executable 1036 // Maximum subset of capacity_ that can be executable
1053 size_t capacity_executable_; 1037 size_t capacity_executable_;
1054 1038
1055 // Allocated space size in bytes. 1039 // Allocated space size in bytes.
1056 size_t memory_allocator_reserved_; 1040 size_t size_;
1057 // Allocated executable space size in bytes. 1041 // Allocated executable space size in bytes.
1058 size_t size_executable_; 1042 size_t size_executable_;
1059 1043
1060 struct MemoryAllocationCallbackRegistration { 1044 struct MemoryAllocationCallbackRegistration {
1061 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, 1045 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1062 ObjectSpace space, 1046 ObjectSpace space,
1063 AllocationAction action) 1047 AllocationAction action)
1064 : callback(callback), space(space), action(action) { 1048 : callback(callback), space(space), action(action) {
1065 } 1049 }
1066 MemoryAllocationCallback callback; 1050 MemoryAllocationCallback callback;
(...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after
1391 1375
1392 void CountFreeListItems(Page* p, SizeStats* sizes); 1376 void CountFreeListItems(Page* p, SizeStats* sizes);
1393 1377
1394 intptr_t EvictFreeListItems(Page* p); 1378 intptr_t EvictFreeListItems(Page* p);
1395 1379
1396 private: 1380 private:
1397 // The size range of blocks, in bytes. 1381 // The size range of blocks, in bytes.
1398 static const int kMinBlockSize = 3 * kPointerSize; 1382 static const int kMinBlockSize = 3 * kPointerSize;
1399 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; 1383 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1400 1384
1401 FreeListNode* PickNodeFromList(FreeListNode** list, 1385 FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
1402 int* node_size,
1403 int minimum_size);
1404 1386
1405 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size, Address limit); 1387 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1406 FreeListNode* FindAbuttingNode(int size_in_bytes,
1407 int* node_size,
1408 Address limit,
1409 FreeListNode** list_head);
1410 1388
1411 PagedSpace* owner_; 1389 PagedSpace* owner_;
1412 Heap* heap_; 1390 Heap* heap_;
1413 1391
1414 // Total available bytes in all blocks on this free list. 1392 // Total available bytes in all blocks on this free list.
1415 int available_; 1393 int available_;
1416 1394
1417 static const int kSmallListMin = 0x20 * kPointerSize; 1395 static const int kSmallListMin = 0x20 * kPointerSize;
1418 static const int kSmallListMax = 0xff * kPointerSize; 1396 static const int kSmallListMax = 0xff * kPointerSize;
1419 static const int kMediumListMax = 0x7ff * kPointerSize; 1397 static const int kMediumListMax = 0x7ff * kPointerSize;
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
1499 virtual intptr_t SizeOfObjects() { 1477 virtual intptr_t SizeOfObjects() {
1500 ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0)); 1478 ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0));
1501 return Size() - unswept_free_bytes_ - (limit() - top()); 1479 return Size() - unswept_free_bytes_ - (limit() - top());
1502 } 1480 }
1503 1481
1504 // Wasted bytes in this space. These are just the bytes that were thrown away 1482 // Wasted bytes in this space. These are just the bytes that were thrown away
1505 // due to being too small to use for allocation. They do not include the 1483 // due to being too small to use for allocation. They do not include the
1506 // free bytes that were not found at all due to lazy sweeping. 1484 // free bytes that were not found at all due to lazy sweeping.
1507 virtual intptr_t Waste() { return accounting_stats_.Waste(); } 1485 virtual intptr_t Waste() { return accounting_stats_.Waste(); }
1508 1486
1509 virtual int ObjectAlignment() { return kObjectAlignment; }
1510
1511 // Returns the allocation pointer in this space. 1487 // Returns the allocation pointer in this space.
1512 Address top() { return allocation_info_.top; } 1488 Address top() { return allocation_info_.top; }
1513 Address limit() { return allocation_info_.limit; } 1489 Address limit() { return allocation_info_.limit; }
1514 1490
1515 // Allocate the requested number of bytes in the space if possible, return a 1491 // Allocate the requested number of bytes in the space if possible, return a
1516 // failure object if not. 1492 // failure object if not.
1517 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); 1493 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
1518 1494
1519 virtual bool ReserveSpace(int bytes); 1495 virtual bool ReserveSpace(int bytes);
1520 1496
1521 // Give a block of memory to the space's free list. It might be added to 1497 // Give a block of memory to the space's free list. It might be added to
1522 // the free list or accounted as waste. 1498 // the free list or accounted as waste.
1523 // If add_to_freelist is false then just accounting stats are updated and 1499 // If add_to_freelist is false then just accounting stats are updated and
1524 // no attempt to add area to free list is made. 1500 // no attempt to add area to free list is made.
1525 int AddToFreeLists(Address start, int size_in_bytes) { 1501 int Free(Address start, int size_in_bytes) {
1526 int wasted = free_list_.Free(start, size_in_bytes); 1502 int wasted = free_list_.Free(start, size_in_bytes);
1527 accounting_stats_.DeallocateBytes(size_in_bytes - wasted); 1503 accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
1528 return size_in_bytes - wasted; 1504 return size_in_bytes - wasted;
1529 } 1505 }
1530 1506
1531 // Set space allocation info. 1507 // Set space allocation info.
1532 void SetTop(Address top, Address limit) { 1508 void SetTop(Address top, Address limit) {
1533 ASSERT(top == NULL || top >= Page::FromAddress(top - 1)->ObjectAreaStart());
1534 ASSERT(top == limit || 1509 ASSERT(top == limit ||
1535 Page::FromAddress(top) == Page::FromAddress(limit - 1)); 1510 Page::FromAddress(top) == Page::FromAddress(limit - 1));
1536 allocation_info_.top = top; 1511 allocation_info_.top = top;
1537 allocation_info_.limit = limit; 1512 allocation_info_.limit = limit;
1538 } 1513 }
1539 1514
1540 void Allocate(int bytes) { 1515 void Allocate(int bytes) {
1541 accounting_stats_.AllocateBytes(bytes); 1516 accounting_stats_.AllocateBytes(bytes);
1542 } 1517 }
1543 1518
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
1590 if (first == &anchor_) first = NULL; 1565 if (first == &anchor_) first = NULL;
1591 first_unswept_page_ = first; 1566 first_unswept_page_ = first;
1592 } 1567 }
1593 1568
1594 void IncrementUnsweptFreeBytes(int by) { 1569 void IncrementUnsweptFreeBytes(int by) {
1595 unswept_free_bytes_ += by; 1570 unswept_free_bytes_ += by;
1596 } 1571 }
1597 1572
1598 void IncreaseUnsweptFreeBytes(Page* p) { 1573 void IncreaseUnsweptFreeBytes(Page* p) {
1599 ASSERT(ShouldBeSweptLazily(p)); 1574 ASSERT(ShouldBeSweptLazily(p));
1600 unswept_free_bytes_ += 1575 unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes());
1601 (p->ObjectAreaEnd() - p->ObjectAreaStart()) - p->LiveBytes();
1602 } 1576 }
1603 1577
1604 void DecreaseUnsweptFreeBytes(Page* p) { 1578 void DecreaseUnsweptFreeBytes(Page* p) {
1605 ASSERT(ShouldBeSweptLazily(p)); 1579 ASSERT(ShouldBeSweptLazily(p));
1606 unswept_free_bytes_ -= 1580 unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes());
1607 (p->ObjectAreaEnd() - p->ObjectAreaStart() - p->LiveBytes());
1608 } 1581 }
1609 1582
1610 bool AdvanceSweeper(intptr_t bytes_to_sweep); 1583 bool AdvanceSweeper(intptr_t bytes_to_sweep);
1611 1584
1612 bool IsSweepingComplete() { 1585 bool IsSweepingComplete() {
1613 return !first_unswept_page_->is_valid(); 1586 return !first_unswept_page_->is_valid();
1614 } 1587 }
1615 1588
1616 inline bool HasAPage() { return anchor_.next_page() != &anchor_; }
1617 Page* FirstPage() { return anchor_.next_page(); } 1589 Page* FirstPage() { return anchor_.next_page(); }
1618 Page* LastPage() { return anchor_.prev_page(); } 1590 Page* LastPage() { return anchor_.prev_page(); }
1619 1591
1620 // Returns zero for pages that have so little fragmentation that it is not 1592 // Returns zero for pages that have so little fragmentation that it is not
1621 // worth defragmenting them. Otherwise a positive integer that gives an 1593 // worth defragmenting them. Otherwise a positive integer that gives an
1622 // estimate of fragmentation on an arbitrary scale. 1594 // estimate of fragmentation on an arbitrary scale.
1623 int Fragmentation(Page* p) { 1595 int Fragmentation(Page* p) {
1624 FreeList::SizeStats sizes; 1596 FreeList::SizeStats sizes;
1625 free_list_.CountFreeListItems(p, &sizes); 1597 free_list_.CountFreeListItems(p, &sizes);
1626 1598
1627 intptr_t object_area_size = p->ObjectAreaEnd() - p->ObjectAreaStart();
1628
1629 intptr_t ratio; 1599 intptr_t ratio;
1630 intptr_t ratio_threshold; 1600 intptr_t ratio_threshold;
1631 if (identity() == CODE_SPACE) { 1601 if (identity() == CODE_SPACE) {
1632 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / 1602 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
1633 object_area_size; 1603 Page::kObjectAreaSize;
1634 ratio_threshold = 10; 1604 ratio_threshold = 10;
1635 } else { 1605 } else {
1636 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / 1606 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
1637 object_area_size; 1607 Page::kObjectAreaSize;
1638 ratio_threshold = 15; 1608 ratio_threshold = 15;
1639 } 1609 }
1640 1610
1641 if (FLAG_trace_fragmentation) { 1611 if (FLAG_trace_fragmentation) {
1642 PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", 1612 PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
1643 reinterpret_cast<void*>(p), 1613 reinterpret_cast<void*>(p),
1644 identity(), 1614 identity(),
1645 static_cast<int>(sizes.small_size_), 1615 static_cast<int>(sizes.small_size_),
1646 static_cast<double>(sizes.small_size_ * 100) / 1616 static_cast<double>(sizes.small_size_ * 100) /
1647 object_area_size, 1617 Page::kObjectAreaSize,
1648 static_cast<int>(sizes.medium_size_), 1618 static_cast<int>(sizes.medium_size_),
1649 static_cast<double>(sizes.medium_size_ * 100) / 1619 static_cast<double>(sizes.medium_size_ * 100) /
1650 object_area_size, 1620 Page::kObjectAreaSize,
1651 static_cast<int>(sizes.large_size_), 1621 static_cast<int>(sizes.large_size_),
1652 static_cast<double>(sizes.large_size_ * 100) / 1622 static_cast<double>(sizes.large_size_ * 100) /
1653 object_area_size, 1623 Page::kObjectAreaSize,
1654 static_cast<int>(sizes.huge_size_), 1624 static_cast<int>(sizes.huge_size_),
1655 static_cast<double>(sizes.huge_size_ * 100) / 1625 static_cast<double>(sizes.huge_size_ * 100) /
1656 object_area_size, 1626 Page::kObjectAreaSize,
1657 (ratio > ratio_threshold) ? "[fragmented]" : ""); 1627 (ratio > ratio_threshold) ? "[fragmented]" : "");
1658 } 1628 }
1659 1629
1660 if (FLAG_always_compact && sizes.Total() != object_area_size) { 1630 if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
1661 return 1; 1631 return 1;
1662 } 1632 }
1663 if (ratio <= ratio_threshold) return 0; // Not fragmented. 1633 if (ratio <= ratio_threshold) return 0; // Not fragmented.
1664 1634
1665 return static_cast<int>(ratio - ratio_threshold); 1635 return static_cast<int>(ratio - ratio_threshold);
1666 } 1636 }
1667 1637
1668 void EvictEvacuationCandidatesFromFreeLists(); 1638 void EvictEvacuationCandidatesFromFreeLists();
1669 1639
1670 bool CanExpand(); 1640 bool CanExpand();
(...skipping 10 matching lines...) Expand all
1681 1651
1682 // The dummy page that anchors the double linked list of pages. 1652 // The dummy page that anchors the double linked list of pages.
1683 Page anchor_; 1653 Page anchor_;
1684 1654
1685 // The space's free list. 1655 // The space's free list.
1686 FreeList free_list_; 1656 FreeList free_list_;
1687 1657
1688 // Normal allocation information. 1658 // Normal allocation information.
1689 AllocationInfo allocation_info_; 1659 AllocationInfo allocation_info_;
1690 1660
1661 // Bytes of each page that cannot be allocated. Possibly non-zero
1662 // for pages in spaces with only fixed-size objects. Always zero
1663 // for pages in spaces with variable sized objects (those pages are
1664 // padded with free-list nodes).
1665 int page_extra_;
1666
1691 bool was_swept_conservatively_; 1667 bool was_swept_conservatively_;
1692 1668
1693 // The first page to be swept when the lazy sweeper advances. Is set 1669 // The first page to be swept when the lazy sweeper advances. Is set
1694 // to NULL when all pages have been swept. 1670 // to NULL when all pages have been swept.
1695 Page* first_unswept_page_; 1671 Page* first_unswept_page_;
1696 1672
1697 // The number of free bytes which could be reclaimed by advancing the 1673 // The number of free bytes which could be reclaimed by advancing the
1698 // lazy sweeper. This is only an estimation because lazy sweeping is 1674 // lazy sweeper. This is only an estimation because lazy sweeping is
1699 // done conservatively. 1675 // done conservatively.
1700 intptr_t unswept_free_bytes_; 1676 intptr_t unswept_free_bytes_;
1701 1677
1702 // Expands the space by allocating a page. Returns false if it cannot 1678 // Expands the space by allocating a fixed number of pages. Returns false if
1703 // allocate a page from OS, or if the hard heap size limit has been hit. The 1679 // it cannot allocate requested number of pages from OS, or if the hard heap
1704 // new page will have at least enough committed space to satisfy the object 1680 // size limit has been hit.
1705 // size indicated by the allocation_size argument; 1681 bool Expand();
1706 bool Expand(intptr_t allocation_size);
1707 1682
1708 // Generic fast case allocation function that tries linear allocation at the 1683 // Generic fast case allocation function that tries linear allocation at the
1709 // address denoted by top in allocation_info_. 1684 // address denoted by top in allocation_info_.
1710 inline HeapObject* AllocateLinearly(int size_in_bytes); 1685 inline HeapObject* AllocateLinearly(int size_in_bytes);
1711 1686
1712 // Slow path of AllocateRaw. This function is space-dependent. 1687 // Slow path of AllocateRaw. This function is space-dependent.
1713 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); 1688 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
1714 1689
1715 friend class PageIterator; 1690 friend class PageIterator;
1716 }; 1691 };
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
1851 public: 1826 public:
1852 // Constructor. 1827 // Constructor.
1853 SemiSpace(Heap* heap, SemiSpaceId semispace) 1828 SemiSpace(Heap* heap, SemiSpaceId semispace)
1854 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), 1829 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
1855 start_(NULL), 1830 start_(NULL),
1856 age_mark_(NULL), 1831 age_mark_(NULL),
1857 id_(semispace), 1832 id_(semispace),
1858 anchor_(this), 1833 anchor_(this),
1859 current_page_(NULL) { } 1834 current_page_(NULL) { }
1860 1835
1861 // Sets up the semispace using the given chunk. After this, call Commit() 1836 // Sets up the semispace using the given chunk.
1862 // to make the semispace usable.
1863 void SetUp(Address start, int initial_capacity, int maximum_capacity); 1837 void SetUp(Address start, int initial_capacity, int maximum_capacity);
1864 1838
1865 // Tear down the space. Heap memory was not allocated by the space, so it 1839 // Tear down the space. Heap memory was not allocated by the space, so it
1866 // is not deallocated here. 1840 // is not deallocated here.
1867 void TearDown(); 1841 void TearDown();
1868 1842
1869 // True if the space has been set up but not torn down. 1843 // True if the space has been set up but not torn down.
1870 bool HasBeenSetUp() { return start_ != NULL; } 1844 bool HasBeenSetUp() { return start_ != NULL; }
1871 1845
1872 // Grow the semispace to the new capacity. The new capacity 1846 // Grow the semispace to the new capacity. The new capacity
(...skipping 484 matching lines...) Expand 10 before | Expand all | Expand 10 after
2357 // Old object space (excluding map objects) 2331 // Old object space (excluding map objects)
2358 2332
2359 class OldSpace : public PagedSpace { 2333 class OldSpace : public PagedSpace {
2360 public: 2334 public:
2361 // Creates an old space object with a given maximum capacity. 2335 // Creates an old space object with a given maximum capacity.
2362 // The constructor does not allocate pages from OS. 2336 // The constructor does not allocate pages from OS.
2363 OldSpace(Heap* heap, 2337 OldSpace(Heap* heap,
2364 intptr_t max_capacity, 2338 intptr_t max_capacity,
2365 AllocationSpace id, 2339 AllocationSpace id,
2366 Executability executable) 2340 Executability executable)
2367 : PagedSpace(heap, max_capacity, id, executable) { } 2341 : PagedSpace(heap, max_capacity, id, executable) {
2342 page_extra_ = 0;
2343 }
2344
2345 // The limit of allocation for a page in this space.
2346 virtual Address PageAllocationLimit(Page* page) {
2347 return page->ObjectAreaEnd();
2348 }
2368 2349
2369 public: 2350 public:
2370 TRACK_MEMORY("OldSpace") 2351 TRACK_MEMORY("OldSpace")
2371 }; 2352 };
2372 2353
2373 2354
2374 // For contiguous spaces, top should be in the space (or at the end) and limit 2355 // For contiguous spaces, top should be in the space (or at the end) and limit
2375 // should be the end of the space. 2356 // should be the end of the space.
2376 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ 2357 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
2377 SLOW_ASSERT((space).page_low() <= (info).top \ 2358 SLOW_ASSERT((space).page_low() <= (info).top \
2378 && (info).top <= (space).page_high() \ 2359 && (info).top <= (space).page_high() \
2379 && (info).limit <= (space).page_high()) 2360 && (info).limit <= (space).page_high())
2380 2361
2381 2362
2382 // ----------------------------------------------------------------------------- 2363 // -----------------------------------------------------------------------------
2383 // Old space for objects of a fixed size 2364 // Old space for objects of a fixed size
2384 2365
2385 class FixedSpace : public PagedSpace { 2366 class FixedSpace : public PagedSpace {
2386 public: 2367 public:
2387 FixedSpace(Heap* heap, 2368 FixedSpace(Heap* heap,
2388 intptr_t max_capacity, 2369 intptr_t max_capacity,
2389 AllocationSpace id, 2370 AllocationSpace id,
2390 int object_size_in_bytes, 2371 int object_size_in_bytes,
2391 const char* name) 2372 const char* name)
2392 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), 2373 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2393 object_size_in_bytes_(object_size_in_bytes), 2374 object_size_in_bytes_(object_size_in_bytes),
2394 name_(name) { } 2375 name_(name) {
2376 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
2377 }
2378
2379 // The limit of allocation for a page in this space.
2380 virtual Address PageAllocationLimit(Page* page) {
2381 return page->ObjectAreaEnd() - page_extra_;
2382 }
2395 2383
2396 int object_size_in_bytes() { return object_size_in_bytes_; } 2384 int object_size_in_bytes() { return object_size_in_bytes_; }
2397 2385
2398 virtual int ObjectAlignment() { return object_size_in_bytes_; }
2399
2400 // Prepares for a mark-compact GC. 2386 // Prepares for a mark-compact GC.
2401 virtual void PrepareForMarkCompact(); 2387 virtual void PrepareForMarkCompact();
2402 2388
2403 protected: 2389 protected:
2404 void ResetFreeList() { 2390 void ResetFreeList() {
2405 free_list_.Reset(); 2391 free_list_.Reset();
2406 } 2392 }
2407 2393
2408 private: 2394 private:
2409 // The size of objects in this space. 2395 // The size of objects in this space.
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after
2670 } 2656 }
2671 // Must be small, since an iteration is used for lookup. 2657 // Must be small, since an iteration is used for lookup.
2672 static const int kMaxComments = 64; 2658 static const int kMaxComments = 64;
2673 }; 2659 };
2674 #endif 2660 #endif
2675 2661
2676 2662
2677 } } // namespace v8::internal 2663 } } // namespace v8::internal
2678 2664
2679 #endif // V8_SPACES_H_ 2665 #endif // V8_SPACES_H_
OLDNEW
« no previous file with comments | « src/snapshot.h ('k') | src/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698