Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 429 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 440 | 440 |
| 441 chunk->heap_ = heap; | 441 chunk->heap_ = heap; |
| 442 chunk->size_ = size; | 442 chunk->size_ = size; |
| 443 chunk->area_start_ = area_start; | 443 chunk->area_start_ = area_start; |
| 444 chunk->area_end_ = area_end; | 444 chunk->area_end_ = area_end; |
| 445 chunk->flags_ = 0; | 445 chunk->flags_ = 0; |
| 446 chunk->set_owner(owner); | 446 chunk->set_owner(owner); |
| 447 chunk->InitializeReservedMemory(); | 447 chunk->InitializeReservedMemory(); |
| 448 chunk->slots_buffer_ = NULL; | 448 chunk->slots_buffer_ = NULL; |
| 449 chunk->skip_list_ = NULL; | 449 chunk->skip_list_ = NULL; |
| 450 chunk->high_water_mark_ = area_start - base; | |
| 450 chunk->ResetLiveBytes(); | 451 chunk->ResetLiveBytes(); |
| 451 Bitmap::Clear(chunk); | 452 Bitmap::Clear(chunk); |
| 452 chunk->initialize_scan_on_scavenge(false); | 453 chunk->initialize_scan_on_scavenge(false); |
| 453 chunk->SetFlag(WAS_SWEPT_PRECISELY); | 454 chunk->SetFlag(WAS_SWEPT_PRECISELY); |
| 454 | 455 |
| 455 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); | 456 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); |
| 456 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); | 457 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); |
| 457 | 458 |
| 458 if (executable == EXECUTABLE) { | 459 if (executable == EXECUTABLE) { |
| 459 chunk->SetFlag(IS_EXECUTABLE); | 460 chunk->SetFlag(IS_EXECUTABLE); |
| (...skipping 352 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 812 PageIterator iterator(this); | 813 PageIterator iterator(this); |
| 813 while (iterator.has_next()) { | 814 while (iterator.has_next()) { |
| 814 heap()->isolate()->memory_allocator()->Free(iterator.next()); | 815 heap()->isolate()->memory_allocator()->Free(iterator.next()); |
| 815 } | 816 } |
| 816 anchor_.set_next_page(&anchor_); | 817 anchor_.set_next_page(&anchor_); |
| 817 anchor_.set_prev_page(&anchor_); | 818 anchor_.set_prev_page(&anchor_); |
| 818 accounting_stats_.Clear(); | 819 accounting_stats_.Clear(); |
| 819 } | 820 } |
| 820 | 821 |
| 821 | 822 |
| 823 size_t PagedSpace::CommittedPhysicalMemory() { | |
| 824 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); | |
| 825 MemoryChunk::UpdateHighWaterMark(allocation_info_.top); | |
| 826 size_t size = 0; | |
| 827 PageIterator it(this); | |
| 828 while (it.has_next()) { | |
| 829 size += it.next()->CommittedPhysicalMemory(); | |
| 830 } | |
| 831 return size; | |
| 832 } | |
| 833 | |
| 834 | |
| 822 MaybeObject* PagedSpace::FindObject(Address addr) { | 835 MaybeObject* PagedSpace::FindObject(Address addr) { |
| 823 // Note: this function can only be called on precisely swept spaces. | 836 // Note: this function can only be called on precisely swept spaces. |
| 824 ASSERT(!heap()->mark_compact_collector()->in_use()); | 837 ASSERT(!heap()->mark_compact_collector()->in_use()); |
| 825 | 838 |
| 826 if (!Contains(addr)) return Failure::Exception(); | 839 if (!Contains(addr)) return Failure::Exception(); |
| 827 | 840 |
| 828 Page* p = Page::FromAddress(addr); | 841 Page* p = Page::FromAddress(addr); |
| 829 HeapObjectIterator it(p, NULL); | 842 HeapObjectIterator it(p, NULL); |
| 830 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 843 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 831 Address cur = obj->address(); | 844 Address cur = obj->address(); |
| (...skipping 27 matching lines...) Expand all Loading... | |
| 859 size = SizeOfFirstPage(); | 872 size = SizeOfFirstPage(); |
| 860 } | 873 } |
| 861 | 874 |
| 862 Page* p = heap()->isolate()->memory_allocator()->AllocatePage( | 875 Page* p = heap()->isolate()->memory_allocator()->AllocatePage( |
| 863 size, this, executable()); | 876 size, this, executable()); |
| 864 if (p == NULL) return false; | 877 if (p == NULL) return false; |
| 865 | 878 |
| 866 ASSERT(Capacity() <= max_capacity_); | 879 ASSERT(Capacity() <= max_capacity_); |
| 867 | 880 |
| 868 p->InsertAfter(anchor_.prev_page()); | 881 p->InsertAfter(anchor_.prev_page()); |
| 882 MemoryChunk::UpdateHighWaterMark(allocation_info_.top); | |
|
Michael Starzinger
2012/10/16 13:01:06
This function is called by PagedSpace::SlowAllocat
alph
2012/10/16 13:44:41
Done.
| |
| 869 | 883 |
| 870 return true; | 884 return true; |
| 871 } | 885 } |
| 872 | 886 |
| 873 | 887 |
| 874 intptr_t PagedSpace::SizeOfFirstPage() { | 888 intptr_t PagedSpace::SizeOfFirstPage() { |
| 875 int size = 0; | 889 int size = 0; |
| 876 switch (identity()) { | 890 switch (identity()) { |
| 877 case OLD_POINTER_SPACE: | 891 case OLD_POINTER_SPACE: |
| 878 size = 64 * kPointerSize * KB; | 892 size = 64 * kPointerSize * KB; |
| (...skipping 239 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1118 | 1132 |
| 1119 ASSERT(reservation_.IsReserved()); | 1133 ASSERT(reservation_.IsReserved()); |
| 1120 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, | 1134 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, |
| 1121 NOT_EXECUTABLE); | 1135 NOT_EXECUTABLE); |
| 1122 chunk_base_ = NULL; | 1136 chunk_base_ = NULL; |
| 1123 chunk_size_ = 0; | 1137 chunk_size_ = 0; |
| 1124 } | 1138 } |
| 1125 | 1139 |
| 1126 | 1140 |
| 1127 void NewSpace::Flip() { | 1141 void NewSpace::Flip() { |
| 1142 MemoryChunk::UpdateHighWaterMark(allocation_info_.top); | |
|
Michael Starzinger
2012/10/16 13:01:06
I think this one is better bottle-necked in NewSpa
alph
2012/10/16 13:44:41
Done.
| |
| 1128 SemiSpace::Swap(&from_space_, &to_space_); | 1143 SemiSpace::Swap(&from_space_, &to_space_); |
| 1129 } | 1144 } |
| 1130 | 1145 |
| 1131 | 1146 |
| 1132 void NewSpace::Grow() { | 1147 void NewSpace::Grow() { |
| 1133 // Double the semispace size but only up to maximum capacity. | 1148 // Double the semispace size but only up to maximum capacity. |
| 1134 ASSERT(Capacity() < MaximumCapacity()); | 1149 ASSERT(Capacity() < MaximumCapacity()); |
| 1135 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity())); | 1150 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity())); |
| 1136 if (to_space_.GrowTo(new_capacity)) { | 1151 if (to_space_.GrowTo(new_capacity)) { |
| 1137 // Only grow from space if we managed to grow to-space. | 1152 // Only grow from space if we managed to grow to-space. |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1202 Address top = allocation_info_.top; | 1217 Address top = allocation_info_.top; |
| 1203 if (NewSpacePage::IsAtStart(top)) { | 1218 if (NewSpacePage::IsAtStart(top)) { |
| 1204 // The current page is already empty. Don't try to make another. | 1219 // The current page is already empty. Don't try to make another. |
| 1205 | 1220 |
| 1206 // We should only get here if someone asks to allocate more | 1221 // We should only get here if someone asks to allocate more |
| 1207 // than what can be stored in a single page. | 1222 // than what can be stored in a single page. |
| 1208 // TODO(gc): Change the limit on new-space allocation to prevent this | 1223 // TODO(gc): Change the limit on new-space allocation to prevent this |
| 1209 // from happening (all such allocations should go directly to LOSpace). | 1224 // from happening (all such allocations should go directly to LOSpace). |
| 1210 return false; | 1225 return false; |
| 1211 } | 1226 } |
| 1227 MemoryChunk::UpdateHighWaterMark(allocation_info_.top); | |
|
Michael Starzinger
2012/10/16 13:01:06
This one will also be covered by NewSpace::UpdateA
alph
2012/10/16 13:44:41
Done.
| |
| 1212 if (!to_space_.AdvancePage()) { | 1228 if (!to_space_.AdvancePage()) { |
| 1213 // Failed to get a new page in to-space. | 1229 // Failed to get a new page in to-space. |
| 1214 return false; | 1230 return false; |
| 1215 } | 1231 } |
| 1216 | 1232 |
| 1217 // Clear remainder of current page. | 1233 // Clear remainder of current page. |
| 1218 Address limit = NewSpacePage::FromLimit(top)->area_end(); | 1234 Address limit = NewSpacePage::FromLimit(top)->area_end(); |
| 1219 if (heap()->gc_state() == Heap::SCAVENGE) { | 1235 if (heap()->gc_state() == Heap::SCAVENGE) { |
| 1220 heap()->promotion_queue()->SetNewLimit(limit); | 1236 heap()->promotion_queue()->SetNewLimit(limit); |
| 1221 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage(); | 1237 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage(); |
| (...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1377 return false; | 1393 return false; |
| 1378 } | 1394 } |
| 1379 anchor()->set_next_page(anchor()); | 1395 anchor()->set_next_page(anchor()); |
| 1380 anchor()->set_prev_page(anchor()); | 1396 anchor()->set_prev_page(anchor()); |
| 1381 | 1397 |
| 1382 committed_ = false; | 1398 committed_ = false; |
| 1383 return true; | 1399 return true; |
| 1384 } | 1400 } |
| 1385 | 1401 |
| 1386 | 1402 |
| 1403 size_t SemiSpace::CommittedPhysicalMemory() { | |
| 1404 if (!is_committed()) return 0; | |
| 1405 size_t size = 0; | |
| 1406 NewSpacePageIterator it(this); | |
| 1407 while (it.has_next()) { | |
| 1408 size += it.next()->CommittedPhysicalMemory(); | |
| 1409 } | |
| 1410 return size; | |
| 1411 } | |
| 1412 | |
| 1413 | |
| 1387 bool SemiSpace::GrowTo(int new_capacity) { | 1414 bool SemiSpace::GrowTo(int new_capacity) { |
| 1388 if (!is_committed()) { | 1415 if (!is_committed()) { |
| 1389 if (!Commit()) return false; | 1416 if (!Commit()) return false; |
| 1390 } | 1417 } |
| 1391 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); | 1418 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); |
| 1392 ASSERT(new_capacity <= maximum_capacity_); | 1419 ASSERT(new_capacity <= maximum_capacity_); |
| 1393 ASSERT(new_capacity > capacity_); | 1420 ASSERT(new_capacity > capacity_); |
| 1394 int pages_before = capacity_ / Page::kPageSize; | 1421 int pages_before = capacity_ / Page::kPageSize; |
| 1395 int pages_after = new_capacity / Page::kPageSize; | 1422 int pages_after = new_capacity / Page::kPageSize; |
| 1396 | 1423 |
| (...skipping 412 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1809 } | 1836 } |
| 1810 | 1837 |
| 1811 | 1838 |
| 1812 void NewSpace::RecordPromotion(HeapObject* obj) { | 1839 void NewSpace::RecordPromotion(HeapObject* obj) { |
| 1813 InstanceType type = obj->map()->instance_type(); | 1840 InstanceType type = obj->map()->instance_type(); |
| 1814 ASSERT(0 <= type && type <= LAST_TYPE); | 1841 ASSERT(0 <= type && type <= LAST_TYPE); |
| 1815 promoted_histogram_[type].increment_number(1); | 1842 promoted_histogram_[type].increment_number(1); |
| 1816 promoted_histogram_[type].increment_bytes(obj->Size()); | 1843 promoted_histogram_[type].increment_bytes(obj->Size()); |
| 1817 } | 1844 } |
| 1818 | 1845 |
| 1846 | |
| 1847 size_t NewSpace::CommittedPhysicalMemory() { | |
| 1848 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); | |
| 1849 MemoryChunk::UpdateHighWaterMark(allocation_info_.top); | |
| 1850 size_t size = to_space_.CommittedPhysicalMemory(); | |
| 1851 if (from_space_.is_committed()) { | |
| 1852 size += from_space_.CommittedPhysicalMemory(); | |
| 1853 } | |
| 1854 return size; | |
| 1855 } | |
| 1856 | |
| 1819 // ----------------------------------------------------------------------------- | 1857 // ----------------------------------------------------------------------------- |
| 1820 // Free lists for old object spaces implementation | 1858 // Free lists for old object spaces implementation |
| 1821 | 1859 |
| 1822 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { | 1860 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { |
| 1823 ASSERT(size_in_bytes > 0); | 1861 ASSERT(size_in_bytes > 0); |
| 1824 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1862 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
| 1825 | 1863 |
| 1826 // We write a map and possibly size information to the block. If the block | 1864 // We write a map and possibly size information to the block. If the block |
| 1827 // is big enough to be a FreeSpace with at least one extra word (the next | 1865 // is big enough to be a FreeSpace with at least one extra word (the next |
| 1828 // pointer), we set its map to be the free space map and its size to an | 1866 // pointer), we set its map to be the free space map and its size to an |
| (...skipping 854 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2683 reinterpret_cast<Object**>(object->address())[0] = | 2721 reinterpret_cast<Object**>(object->address())[0] = |
| 2684 heap()->fixed_array_map(); | 2722 heap()->fixed_array_map(); |
| 2685 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); | 2723 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); |
| 2686 #endif | 2724 #endif |
| 2687 | 2725 |
| 2688 heap()->incremental_marking()->OldSpaceStep(object_size); | 2726 heap()->incremental_marking()->OldSpaceStep(object_size); |
| 2689 return object; | 2727 return object; |
| 2690 } | 2728 } |
| 2691 | 2729 |
| 2692 | 2730 |
| 2731 size_t LargeObjectSpace::CommittedPhysicalMemory() { | |
| 2732 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); | |
| 2733 size_t size = 0; | |
| 2734 LargePage* current = first_page_; | |
| 2735 while (current != NULL) { | |
| 2736 size += current->CommittedPhysicalMemory(); | |
| 2737 current = current->next_page(); | |
| 2738 } | |
| 2739 return size; | |
| 2740 } | |
| 2741 | |
| 2742 | |
| 2693 // GC support | 2743 // GC support |
| 2694 MaybeObject* LargeObjectSpace::FindObject(Address a) { | 2744 MaybeObject* LargeObjectSpace::FindObject(Address a) { |
| 2695 LargePage* page = FindPage(a); | 2745 LargePage* page = FindPage(a); |
| 2696 if (page != NULL) { | 2746 if (page != NULL) { |
| 2697 return page->GetObject(); | 2747 return page->GetObject(); |
| 2698 } | 2748 } |
| 2699 return Failure::Exception(); | 2749 return Failure::Exception(); |
| 2700 } | 2750 } |
| 2701 | 2751 |
| 2702 | 2752 |
| (...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2887 object->ShortPrint(); | 2937 object->ShortPrint(); |
| 2888 PrintF("\n"); | 2938 PrintF("\n"); |
| 2889 } | 2939 } |
| 2890 printf(" --------------------------------------\n"); | 2940 printf(" --------------------------------------\n"); |
| 2891 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 2941 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 2892 } | 2942 } |
| 2893 | 2943 |
| 2894 #endif // DEBUG | 2944 #endif // DEBUG |
| 2895 | 2945 |
| 2896 } } // namespace v8::internal | 2946 } } // namespace v8::internal |
| OLD | NEW |