| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 942 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 953 if (id == CODE_SPACE) { | 953 if (id == CODE_SPACE) { |
| 954 area_size_ = heap->isolate()->memory_allocator()-> | 954 area_size_ = heap->isolate()->memory_allocator()-> |
| 955 CodePageAreaSize(); | 955 CodePageAreaSize(); |
| 956 } else { | 956 } else { |
| 957 area_size_ = Page::kPageSize - Page::kObjectStartOffset; | 957 area_size_ = Page::kPageSize - Page::kObjectStartOffset; |
| 958 } | 958 } |
| 959 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) | 959 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) |
| 960 * AreaSize(); | 960 * AreaSize(); |
| 961 accounting_stats_.Clear(); | 961 accounting_stats_.Clear(); |
| 962 | 962 |
| 963 allocation_info_.top = NULL; | 963 allocation_info_.set_top(NULL); |
| 964 allocation_info_.limit = NULL; | 964 allocation_info_.set_limit(NULL); |
| 965 | 965 |
| 966 anchor_.InitializeAsAnchor(this); | 966 anchor_.InitializeAsAnchor(this); |
| 967 } | 967 } |
| 968 | 968 |
| 969 | 969 |
| 970 bool PagedSpace::SetUp() { | 970 bool PagedSpace::SetUp() { |
| 971 return true; | 971 return true; |
| 972 } | 972 } |
| 973 | 973 |
| 974 | 974 |
| 975 bool PagedSpace::HasBeenSetUp() { | 975 bool PagedSpace::HasBeenSetUp() { |
| 976 return true; | 976 return true; |
| 977 } | 977 } |
| 978 | 978 |
| 979 | 979 |
| 980 void PagedSpace::TearDown() { | 980 void PagedSpace::TearDown() { |
| 981 PageIterator iterator(this); | 981 PageIterator iterator(this); |
| 982 while (iterator.has_next()) { | 982 while (iterator.has_next()) { |
| 983 heap()->isolate()->memory_allocator()->Free(iterator.next()); | 983 heap()->isolate()->memory_allocator()->Free(iterator.next()); |
| 984 } | 984 } |
| 985 anchor_.set_next_page(&anchor_); | 985 anchor_.set_next_page(&anchor_); |
| 986 anchor_.set_prev_page(&anchor_); | 986 anchor_.set_prev_page(&anchor_); |
| 987 accounting_stats_.Clear(); | 987 accounting_stats_.Clear(); |
| 988 } | 988 } |
| 989 | 989 |
| 990 | 990 |
| 991 size_t PagedSpace::CommittedPhysicalMemory() { | 991 size_t PagedSpace::CommittedPhysicalMemory() { |
| 992 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); | 992 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); |
| 993 MemoryChunk::UpdateHighWaterMark(allocation_info_.top); | 993 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 994 size_t size = 0; | 994 size_t size = 0; |
| 995 PageIterator it(this); | 995 PageIterator it(this); |
| 996 while (it.has_next()) { | 996 while (it.has_next()) { |
| 997 size += it.next()->CommittedPhysicalMemory(); | 997 size += it.next()->CommittedPhysicalMemory(); |
| 998 } | 998 } |
| 999 return size; | 999 return size; |
| 1000 } | 1000 } |
| 1001 | 1001 |
| 1002 | 1002 |
| 1003 MaybeObject* PagedSpace::FindObject(Address addr) { | 1003 MaybeObject* PagedSpace::FindObject(Address addr) { |
| (...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1115 | 1115 |
| 1116 void PagedSpace::ResetFreeListStatistics() { | 1116 void PagedSpace::ResetFreeListStatistics() { |
| 1117 PageIterator page_iterator(this); | 1117 PageIterator page_iterator(this); |
| 1118 while (page_iterator.has_next()) { | 1118 while (page_iterator.has_next()) { |
| 1119 Page* page = page_iterator.next(); | 1119 Page* page = page_iterator.next(); |
| 1120 page->ResetFreeListStatistics(); | 1120 page->ResetFreeListStatistics(); |
| 1121 } | 1121 } |
| 1122 } | 1122 } |
| 1123 | 1123 |
| 1124 | 1124 |
| 1125 void PagedSpace::IncreaseCapacity(int size) { |
| 1126 accounting_stats_.ExpandSpace(size); |
| 1127 } |
| 1128 |
| 1129 |
| 1125 void PagedSpace::ReleasePage(Page* page, bool unlink) { | 1130 void PagedSpace::ReleasePage(Page* page, bool unlink) { |
| 1126 ASSERT(page->LiveBytes() == 0); | 1131 ASSERT(page->LiveBytes() == 0); |
| 1127 ASSERT(AreaSize() == page->area_size()); | 1132 ASSERT(AreaSize() == page->area_size()); |
| 1128 | 1133 |
| 1129 // Adjust list of unswept pages if the page is the head of the list. | 1134 // Adjust list of unswept pages if the page is the head of the list. |
| 1130 if (first_unswept_page_ == page) { | 1135 if (first_unswept_page_ == page) { |
| 1131 first_unswept_page_ = page->next_page(); | 1136 first_unswept_page_ = page->next_page(); |
| 1132 if (first_unswept_page_ == anchor()) { | 1137 if (first_unswept_page_ == anchor()) { |
| 1133 first_unswept_page_ = Page::FromAddress(NULL); | 1138 first_unswept_page_ = Page::FromAddress(NULL); |
| 1134 } | 1139 } |
| 1135 } | 1140 } |
| 1136 | 1141 |
| 1137 if (page->WasSwept()) { | 1142 if (page->WasSwept()) { |
| 1138 intptr_t size = free_list_.EvictFreeListItems(page); | 1143 intptr_t size = free_list_.EvictFreeListItems(page); |
| 1139 accounting_stats_.AllocateBytes(size); | 1144 accounting_stats_.AllocateBytes(size); |
| 1140 ASSERT_EQ(AreaSize(), static_cast<int>(size)); | 1145 ASSERT_EQ(AreaSize(), static_cast<int>(size)); |
| 1141 } else { | 1146 } else { |
| 1142 DecreaseUnsweptFreeBytes(page); | 1147 DecreaseUnsweptFreeBytes(page); |
| 1143 } | 1148 } |
| 1144 | 1149 |
| 1145 if (Page::FromAllocationTop(allocation_info_.top) == page) { | 1150 if (Page::FromAllocationTop(allocation_info_.top()) == page) { |
| 1146 allocation_info_.top = allocation_info_.limit = NULL; | 1151 allocation_info_.set_top(NULL); |
| 1152 allocation_info_.set_limit(NULL); |
| 1147 } | 1153 } |
| 1148 | 1154 |
| 1149 if (unlink) { | 1155 if (unlink) { |
| 1150 page->Unlink(); | 1156 page->Unlink(); |
| 1151 } | 1157 } |
| 1152 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { | 1158 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { |
| 1153 heap()->isolate()->memory_allocator()->Free(page); | 1159 heap()->isolate()->memory_allocator()->Free(page); |
| 1154 } else { | 1160 } else { |
| 1155 heap()->QueueMemoryChunkForFree(page); | 1161 heap()->QueueMemoryChunkForFree(page); |
| 1156 } | 1162 } |
| 1157 | 1163 |
| 1158 ASSERT(Capacity() > 0); | 1164 ASSERT(Capacity() > 0); |
| 1159 accounting_stats_.ShrinkSpace(AreaSize()); | 1165 accounting_stats_.ShrinkSpace(AreaSize()); |
| 1160 } | 1166 } |
| 1161 | 1167 |
| 1162 | 1168 |
| 1163 #ifdef DEBUG | 1169 #ifdef DEBUG |
| 1164 void PagedSpace::Print() { } | 1170 void PagedSpace::Print() { } |
| 1165 #endif | 1171 #endif |
| 1166 | 1172 |
| 1167 #ifdef VERIFY_HEAP | 1173 #ifdef VERIFY_HEAP |
| 1168 void PagedSpace::Verify(ObjectVisitor* visitor) { | 1174 void PagedSpace::Verify(ObjectVisitor* visitor) { |
| 1169 // We can only iterate over the pages if they were swept precisely. | 1175 // We can only iterate over the pages if they were swept precisely. |
| 1170 if (was_swept_conservatively_) return; | 1176 if (was_swept_conservatively_) return; |
| 1171 | 1177 |
| 1172 bool allocation_pointer_found_in_space = | 1178 bool allocation_pointer_found_in_space = |
| 1173 (allocation_info_.top == allocation_info_.limit); | 1179 (allocation_info_.top() == allocation_info_.limit()); |
| 1174 PageIterator page_iterator(this); | 1180 PageIterator page_iterator(this); |
| 1175 while (page_iterator.has_next()) { | 1181 while (page_iterator.has_next()) { |
| 1176 Page* page = page_iterator.next(); | 1182 Page* page = page_iterator.next(); |
| 1177 CHECK(page->owner() == this); | 1183 CHECK(page->owner() == this); |
| 1178 if (page == Page::FromAllocationTop(allocation_info_.top)) { | 1184 if (page == Page::FromAllocationTop(allocation_info_.top())) { |
| 1179 allocation_pointer_found_in_space = true; | 1185 allocation_pointer_found_in_space = true; |
| 1180 } | 1186 } |
| 1181 CHECK(page->WasSweptPrecisely()); | 1187 CHECK(page->WasSweptPrecisely()); |
| 1182 HeapObjectIterator it(page, NULL); | 1188 HeapObjectIterator it(page, NULL); |
| 1183 Address end_of_previous_object = page->area_start(); | 1189 Address end_of_previous_object = page->area_start(); |
| 1184 Address top = page->area_end(); | 1190 Address top = page->area_end(); |
| 1185 int black_size = 0; | 1191 int black_size = 0; |
| 1186 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { | 1192 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { |
| 1187 CHECK(end_of_previous_object <= object->address()); | 1193 CHECK(end_of_previous_object <= object->address()); |
| 1188 | 1194 |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1279 if (allocated_histogram_) { | 1285 if (allocated_histogram_) { |
| 1280 DeleteArray(allocated_histogram_); | 1286 DeleteArray(allocated_histogram_); |
| 1281 allocated_histogram_ = NULL; | 1287 allocated_histogram_ = NULL; |
| 1282 } | 1288 } |
| 1283 if (promoted_histogram_) { | 1289 if (promoted_histogram_) { |
| 1284 DeleteArray(promoted_histogram_); | 1290 DeleteArray(promoted_histogram_); |
| 1285 promoted_histogram_ = NULL; | 1291 promoted_histogram_ = NULL; |
| 1286 } | 1292 } |
| 1287 | 1293 |
| 1288 start_ = NULL; | 1294 start_ = NULL; |
| 1289 allocation_info_.top = NULL; | 1295 allocation_info_.set_top(NULL); |
| 1290 allocation_info_.limit = NULL; | 1296 allocation_info_.set_limit(NULL); |
| 1291 | 1297 |
| 1292 to_space_.TearDown(); | 1298 to_space_.TearDown(); |
| 1293 from_space_.TearDown(); | 1299 from_space_.TearDown(); |
| 1294 | 1300 |
| 1295 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); | 1301 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); |
| 1296 | 1302 |
| 1297 ASSERT(reservation_.IsReserved()); | 1303 ASSERT(reservation_.IsReserved()); |
| 1298 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, | 1304 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, |
| 1299 NOT_EXECUTABLE); | 1305 NOT_EXECUTABLE); |
| 1300 chunk_base_ = NULL; | 1306 chunk_base_ = NULL; |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1337 if (!from_space_.ShrinkTo(rounded_new_capacity)) { | 1343 if (!from_space_.ShrinkTo(rounded_new_capacity)) { |
| 1338 // If we managed to shrink to-space but couldn't shrink from | 1344 // If we managed to shrink to-space but couldn't shrink from |
| 1339 // space, attempt to grow to-space again. | 1345 // space, attempt to grow to-space again. |
| 1340 if (!to_space_.GrowTo(from_space_.Capacity())) { | 1346 if (!to_space_.GrowTo(from_space_.Capacity())) { |
| 1341 // We are in an inconsistent state because we could not | 1347 // We are in an inconsistent state because we could not |
| 1342 // commit/uncommit memory from new space. | 1348 // commit/uncommit memory from new space. |
| 1343 V8::FatalProcessOutOfMemory("Failed to shrink new space."); | 1349 V8::FatalProcessOutOfMemory("Failed to shrink new space."); |
| 1344 } | 1350 } |
| 1345 } | 1351 } |
| 1346 } | 1352 } |
| 1347 allocation_info_.limit = to_space_.page_high(); | 1353 allocation_info_.set_limit(to_space_.page_high()); |
| 1348 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1354 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1349 } | 1355 } |
| 1350 | 1356 |
| 1351 | 1357 |
| 1352 void NewSpace::UpdateAllocationInfo() { | 1358 void NewSpace::UpdateAllocationInfo() { |
| 1353 MemoryChunk::UpdateHighWaterMark(allocation_info_.top); | 1359 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 1354 allocation_info_.top = to_space_.page_low(); | 1360 allocation_info_.set_top(to_space_.page_low()); |
| 1355 allocation_info_.limit = to_space_.page_high(); | 1361 allocation_info_.set_limit(to_space_.page_high()); |
| 1356 | 1362 |
| 1357 // Lower limit during incremental marking. | 1363 // Lower limit during incremental marking. |
| 1358 if (heap()->incremental_marking()->IsMarking() && | 1364 if (heap()->incremental_marking()->IsMarking() && |
| 1359 inline_allocation_limit_step() != 0) { | 1365 inline_allocation_limit_step() != 0) { |
| 1360 Address new_limit = | 1366 Address new_limit = |
| 1361 allocation_info_.top + inline_allocation_limit_step(); | 1367 allocation_info_.top() + inline_allocation_limit_step(); |
| 1362 allocation_info_.limit = Min(new_limit, allocation_info_.limit); | 1368 allocation_info_.set_limit(Min(new_limit, allocation_info_.limit())); |
| 1363 } | 1369 } |
| 1364 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1370 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1365 } | 1371 } |
| 1366 | 1372 |
| 1367 | 1373 |
| 1368 void NewSpace::ResetAllocationInfo() { | 1374 void NewSpace::ResetAllocationInfo() { |
| 1369 to_space_.Reset(); | 1375 to_space_.Reset(); |
| 1370 UpdateAllocationInfo(); | 1376 UpdateAllocationInfo(); |
| 1371 pages_used_ = 0; | 1377 pages_used_ = 0; |
| 1372 // Clear all mark-bits in the to-space. | 1378 // Clear all mark-bits in the to-space. |
| 1373 NewSpacePageIterator it(&to_space_); | 1379 NewSpacePageIterator it(&to_space_); |
| 1374 while (it.has_next()) { | 1380 while (it.has_next()) { |
| 1375 Bitmap::Clear(it.next()); | 1381 Bitmap::Clear(it.next()); |
| 1376 } | 1382 } |
| 1377 } | 1383 } |
| 1378 | 1384 |
| 1379 | 1385 |
| 1380 bool NewSpace::AddFreshPage() { | 1386 bool NewSpace::AddFreshPage() { |
| 1381 Address top = allocation_info_.top; | 1387 Address top = allocation_info_.top(); |
| 1382 if (NewSpacePage::IsAtStart(top)) { | 1388 if (NewSpacePage::IsAtStart(top)) { |
| 1383 // The current page is already empty. Don't try to make another. | 1389 // The current page is already empty. Don't try to make another. |
| 1384 | 1390 |
| 1385 // We should only get here if someone asks to allocate more | 1391 // We should only get here if someone asks to allocate more |
| 1386 // than what can be stored in a single page. | 1392 // than what can be stored in a single page. |
| 1387 // TODO(gc): Change the limit on new-space allocation to prevent this | 1393 // TODO(gc): Change the limit on new-space allocation to prevent this |
| 1388 // from happening (all such allocations should go directly to LOSpace). | 1394 // from happening (all such allocations should go directly to LOSpace). |
| 1389 return false; | 1395 return false; |
| 1390 } | 1396 } |
| 1391 if (!to_space_.AdvancePage()) { | 1397 if (!to_space_.AdvancePage()) { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 1403 int remaining_in_page = static_cast<int>(limit - top); | 1409 int remaining_in_page = static_cast<int>(limit - top); |
| 1404 heap()->CreateFillerObjectAt(top, remaining_in_page); | 1410 heap()->CreateFillerObjectAt(top, remaining_in_page); |
| 1405 pages_used_++; | 1411 pages_used_++; |
| 1406 UpdateAllocationInfo(); | 1412 UpdateAllocationInfo(); |
| 1407 | 1413 |
| 1408 return true; | 1414 return true; |
| 1409 } | 1415 } |
| 1410 | 1416 |
| 1411 | 1417 |
| 1412 MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) { | 1418 MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) { |
| 1413 Address old_top = allocation_info_.top; | 1419 Address old_top = allocation_info_.top(); |
| 1414 Address new_top = old_top + size_in_bytes; | 1420 Address new_top = old_top + size_in_bytes; |
| 1415 Address high = to_space_.page_high(); | 1421 Address high = to_space_.page_high(); |
| 1416 if (allocation_info_.limit < high) { | 1422 if (allocation_info_.limit() < high) { |
| 1417 // Incremental marking has lowered the limit to get a | 1423 // Incremental marking has lowered the limit to get a |
| 1418 // chance to do a step. | 1424 // chance to do a step. |
| 1419 allocation_info_.limit = Min( | 1425 Address new_limit = Min( |
| 1420 allocation_info_.limit + inline_allocation_limit_step_, | 1426 allocation_info_.limit() + inline_allocation_limit_step_, |
| 1421 high); | 1427 high); |
| 1428 allocation_info_.set_limit(new_limit); |
| 1422 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); | 1429 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); |
| 1423 heap()->incremental_marking()->Step( | 1430 heap()->incremental_marking()->Step( |
| 1424 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); | 1431 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); |
| 1425 top_on_previous_step_ = new_top; | 1432 top_on_previous_step_ = new_top; |
| 1426 return AllocateRaw(size_in_bytes); | 1433 return AllocateRaw(size_in_bytes); |
| 1427 } else if (AddFreshPage()) { | 1434 } else if (AddFreshPage()) { |
| 1428 // Switched to new page. Try allocating again. | 1435 // Switched to new page. Try allocating again. |
| 1429 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); | 1436 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); |
| 1430 heap()->incremental_marking()->Step( | 1437 heap()->incremental_marking()->Step( |
| 1431 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); | 1438 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1502 // Creates a space in the young generation. The constructor does not | 1509 // Creates a space in the young generation. The constructor does not |
| 1503 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of | 1510 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of |
| 1504 // memory of size 'capacity' when set up, and does not grow or shrink | 1511 // memory of size 'capacity' when set up, and does not grow or shrink |
| 1505 // otherwise. In the mark-compact collector, the memory region of the from | 1512 // otherwise. In the mark-compact collector, the memory region of the from |
| 1506 // space is used as the marking stack. It requires contiguous memory | 1513 // space is used as the marking stack. It requires contiguous memory |
| 1507 // addresses. | 1514 // addresses. |
| 1508 ASSERT(maximum_capacity >= Page::kPageSize); | 1515 ASSERT(maximum_capacity >= Page::kPageSize); |
| 1509 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); | 1516 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); |
| 1510 capacity_ = initial_capacity; | 1517 capacity_ = initial_capacity; |
| 1511 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); | 1518 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); |
| 1519 maximum_committed_ = 0; |
| 1512 committed_ = false; | 1520 committed_ = false; |
| 1513 start_ = start; | 1521 start_ = start; |
| 1514 address_mask_ = ~(maximum_capacity - 1); | 1522 address_mask_ = ~(maximum_capacity - 1); |
| 1515 object_mask_ = address_mask_ | kHeapObjectTagMask; | 1523 object_mask_ = address_mask_ | kHeapObjectTagMask; |
| 1516 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; | 1524 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; |
| 1517 age_mark_ = start_; | 1525 age_mark_ = start_; |
| 1518 } | 1526 } |
| 1519 | 1527 |
| 1520 | 1528 |
| 1521 void SemiSpace::TearDown() { | 1529 void SemiSpace::TearDown() { |
| (...skipping 12 matching lines...) Expand all Loading... |
| 1534 } | 1542 } |
| 1535 | 1543 |
| 1536 NewSpacePage* current = anchor(); | 1544 NewSpacePage* current = anchor(); |
| 1537 for (int i = 0; i < pages; i++) { | 1545 for (int i = 0; i < pages; i++) { |
| 1538 NewSpacePage* new_page = | 1546 NewSpacePage* new_page = |
| 1539 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); | 1547 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); |
| 1540 new_page->InsertAfter(current); | 1548 new_page->InsertAfter(current); |
| 1541 current = new_page; | 1549 current = new_page; |
| 1542 } | 1550 } |
| 1543 | 1551 |
| 1552 SetCapacity(capacity_); |
| 1544 committed_ = true; | 1553 committed_ = true; |
| 1545 Reset(); | 1554 Reset(); |
| 1546 return true; | 1555 return true; |
| 1547 } | 1556 } |
| 1548 | 1557 |
| 1549 | 1558 |
| 1550 bool SemiSpace::Uncommit() { | 1559 bool SemiSpace::Uncommit() { |
| 1551 ASSERT(is_committed()); | 1560 ASSERT(is_committed()); |
| 1552 Address start = start_ + maximum_capacity_ - capacity_; | 1561 Address start = start_ + maximum_capacity_ - capacity_; |
| 1553 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) { | 1562 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) { |
| (...skipping 28 matching lines...) Expand all Loading... |
| 1582 int pages_before = capacity_ / Page::kPageSize; | 1591 int pages_before = capacity_ / Page::kPageSize; |
| 1583 int pages_after = new_capacity / Page::kPageSize; | 1592 int pages_after = new_capacity / Page::kPageSize; |
| 1584 | 1593 |
| 1585 size_t delta = new_capacity - capacity_; | 1594 size_t delta = new_capacity - capacity_; |
| 1586 | 1595 |
| 1587 ASSERT(IsAligned(delta, OS::AllocateAlignment())); | 1596 ASSERT(IsAligned(delta, OS::AllocateAlignment())); |
| 1588 if (!heap()->isolate()->memory_allocator()->CommitBlock( | 1597 if (!heap()->isolate()->memory_allocator()->CommitBlock( |
| 1589 start_ + capacity_, delta, executable())) { | 1598 start_ + capacity_, delta, executable())) { |
| 1590 return false; | 1599 return false; |
| 1591 } | 1600 } |
| 1592 capacity_ = new_capacity; | 1601 SetCapacity(new_capacity); |
| 1593 NewSpacePage* last_page = anchor()->prev_page(); | 1602 NewSpacePage* last_page = anchor()->prev_page(); |
| 1594 ASSERT(last_page != anchor()); | 1603 ASSERT(last_page != anchor()); |
| 1595 for (int i = pages_before; i < pages_after; i++) { | 1604 for (int i = pages_before; i < pages_after; i++) { |
| 1596 Address page_address = start_ + i * Page::kPageSize; | 1605 Address page_address = start_ + i * Page::kPageSize; |
| 1597 NewSpacePage* new_page = NewSpacePage::Initialize(heap(), | 1606 NewSpacePage* new_page = NewSpacePage::Initialize(heap(), |
| 1598 page_address, | 1607 page_address, |
| 1599 this); | 1608 this); |
| 1600 new_page->InsertAfter(last_page); | 1609 new_page->InsertAfter(last_page); |
| 1601 Bitmap::Clear(new_page); | 1610 Bitmap::Clear(new_page); |
| 1602 // Duplicate the flags that was set on the old page. | 1611 // Duplicate the flags that was set on the old page. |
| (...skipping 19 matching lines...) Expand all Loading... |
| 1622 } | 1631 } |
| 1623 | 1632 |
| 1624 int pages_after = new_capacity / Page::kPageSize; | 1633 int pages_after = new_capacity / Page::kPageSize; |
| 1625 NewSpacePage* new_last_page = | 1634 NewSpacePage* new_last_page = |
| 1626 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize); | 1635 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize); |
| 1627 new_last_page->set_next_page(anchor()); | 1636 new_last_page->set_next_page(anchor()); |
| 1628 anchor()->set_prev_page(new_last_page); | 1637 anchor()->set_prev_page(new_last_page); |
| 1629 ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page)); | 1638 ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page)); |
| 1630 } | 1639 } |
| 1631 | 1640 |
| 1632 capacity_ = new_capacity; | 1641 SetCapacity(new_capacity); |
| 1633 | 1642 |
| 1634 return true; | 1643 return true; |
| 1635 } | 1644 } |
| 1636 | 1645 |
| 1637 | 1646 |
| 1638 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) { | 1647 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) { |
| 1639 anchor_.set_owner(this); | 1648 anchor_.set_owner(this); |
| 1640 // Fixup back-pointers to anchor. Address of anchor changes | 1649 // Fixup back-pointers to anchor. Address of anchor changes |
| 1641 // when we swap. | 1650 // when we swap. |
| 1642 anchor_.prev_page()->set_next_page(&anchor_); | 1651 anchor_.prev_page()->set_next_page(&anchor_); |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1685 // has changed. | 1694 // has changed. |
| 1686 // Swap to/from-space bits on pages. | 1695 // Swap to/from-space bits on pages. |
| 1687 // Copy GC flags from old active space (from-space) to new (to-space). | 1696 // Copy GC flags from old active space (from-space) to new (to-space). |
| 1688 intptr_t flags = from->current_page()->GetFlags(); | 1697 intptr_t flags = from->current_page()->GetFlags(); |
| 1689 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask); | 1698 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask); |
| 1690 | 1699 |
| 1691 from->FlipPages(0, 0); | 1700 from->FlipPages(0, 0); |
| 1692 } | 1701 } |
| 1693 | 1702 |
| 1694 | 1703 |
| 1704 void SemiSpace::SetCapacity(int new_capacity) { |
| 1705 capacity_ = new_capacity; |
| 1706 if (capacity_ > maximum_committed_) { |
| 1707 maximum_committed_ = capacity_; |
| 1708 } |
| 1709 } |
| 1710 |
| 1711 |
| 1695 void SemiSpace::set_age_mark(Address mark) { | 1712 void SemiSpace::set_age_mark(Address mark) { |
| 1696 ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this); | 1713 ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this); |
| 1697 age_mark_ = mark; | 1714 age_mark_ = mark; |
| 1698 // Mark all pages up to the one containing mark. | 1715 // Mark all pages up to the one containing mark. |
| 1699 NewSpacePageIterator it(space_start(), mark); | 1716 NewSpacePageIterator it(space_start(), mark); |
| 1700 while (it.has_next()) { | 1717 while (it.has_next()) { |
| 1701 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); | 1718 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); |
| 1702 } | 1719 } |
| 1703 } | 1720 } |
| 1704 | 1721 |
| (...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1966 void NewSpace::RecordPromotion(HeapObject* obj) { | 1983 void NewSpace::RecordPromotion(HeapObject* obj) { |
| 1967 InstanceType type = obj->map()->instance_type(); | 1984 InstanceType type = obj->map()->instance_type(); |
| 1968 ASSERT(0 <= type && type <= LAST_TYPE); | 1985 ASSERT(0 <= type && type <= LAST_TYPE); |
| 1969 promoted_histogram_[type].increment_number(1); | 1986 promoted_histogram_[type].increment_number(1); |
| 1970 promoted_histogram_[type].increment_bytes(obj->Size()); | 1987 promoted_histogram_[type].increment_bytes(obj->Size()); |
| 1971 } | 1988 } |
| 1972 | 1989 |
| 1973 | 1990 |
| 1974 size_t NewSpace::CommittedPhysicalMemory() { | 1991 size_t NewSpace::CommittedPhysicalMemory() { |
| 1975 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); | 1992 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); |
| 1976 MemoryChunk::UpdateHighWaterMark(allocation_info_.top); | 1993 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 1977 size_t size = to_space_.CommittedPhysicalMemory(); | 1994 size_t size = to_space_.CommittedPhysicalMemory(); |
| 1978 if (from_space_.is_committed()) { | 1995 if (from_space_.is_committed()) { |
| 1979 size += from_space_.CommittedPhysicalMemory(); | 1996 size += from_space_.CommittedPhysicalMemory(); |
| 1980 } | 1997 } |
| 1981 return size; | 1998 return size; |
| 1982 } | 1999 } |
| 1983 | 2000 |
| 1984 | 2001 |
| 1985 // ----------------------------------------------------------------------------- | 2002 // ----------------------------------------------------------------------------- |
| 1986 // Free lists for old object spaces implementation | 2003 // Free lists for old object spaces implementation |
| (...skipping 493 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2480 sum += large_list_.SumFreeList(); | 2497 sum += large_list_.SumFreeList(); |
| 2481 sum += huge_list_.SumFreeList(); | 2498 sum += huge_list_.SumFreeList(); |
| 2482 return sum; | 2499 return sum; |
| 2483 } | 2500 } |
| 2484 #endif | 2501 #endif |
| 2485 | 2502 |
| 2486 | 2503 |
| 2487 // ----------------------------------------------------------------------------- | 2504 // ----------------------------------------------------------------------------- |
| 2488 // OldSpace implementation | 2505 // OldSpace implementation |
| 2489 | 2506 |
| 2490 bool NewSpace::ReserveSpace(int bytes) { | |
| 2491 // We can't reliably unpack a partial snapshot that needs more new space | |
| 2492 // space than the minimum NewSpace size. The limit can be set lower than | |
| 2493 // the end of new space either because there is more space on the next page | |
| 2494 // or because we have lowered the limit in order to get periodic incremental | |
| 2495 // marking. The most reliable way to ensure that there is linear space is | |
| 2496 // to do the allocation, then rewind the limit. | |
| 2497 ASSERT(bytes <= InitialCapacity()); | |
| 2498 MaybeObject* maybe = AllocateRaw(bytes); | |
| 2499 Object* object = NULL; | |
| 2500 if (!maybe->ToObject(&object)) return false; | |
| 2501 HeapObject* allocation = HeapObject::cast(object); | |
| 2502 Address top = allocation_info_.top; | |
| 2503 if ((top - bytes) == allocation->address()) { | |
| 2504 allocation_info_.top = allocation->address(); | |
| 2505 return true; | |
| 2506 } | |
| 2507 // There may be a borderline case here where the allocation succeeded, but | |
| 2508 // the limit and top have moved on to a new page. In that case we try again. | |
| 2509 return ReserveSpace(bytes); | |
| 2510 } | |
| 2511 | |
| 2512 | |
| 2513 void PagedSpace::PrepareForMarkCompact() { | 2507 void PagedSpace::PrepareForMarkCompact() { |
| 2514 // We don't have a linear allocation area while sweeping. It will be restored | 2508 // We don't have a linear allocation area while sweeping. It will be restored |
| 2515 // on the first allocation after the sweep. | 2509 // on the first allocation after the sweep. |
| 2516 // Mark the old linear allocation area with a free space map so it can be | 2510 // Mark the old linear allocation area with a free space map so it can be |
| 2517 // skipped when scanning the heap. | 2511 // skipped when scanning the heap. |
| 2518 int old_linear_size = static_cast<int>(limit() - top()); | 2512 int old_linear_size = static_cast<int>(limit() - top()); |
| 2519 Free(top(), old_linear_size); | 2513 Free(top(), old_linear_size); |
| 2520 SetTop(NULL, NULL); | 2514 SetTop(NULL, NULL); |
| 2521 | 2515 |
| 2522 // Stop lazy sweeping and clear marking bits for unswept pages. | 2516 // Stop lazy sweeping and clear marking bits for unswept pages. |
| (...skipping 14 matching lines...) Expand all Loading... |
| 2537 } while (p != anchor()); | 2531 } while (p != anchor()); |
| 2538 } | 2532 } |
| 2539 first_unswept_page_ = Page::FromAddress(NULL); | 2533 first_unswept_page_ = Page::FromAddress(NULL); |
| 2540 unswept_free_bytes_ = 0; | 2534 unswept_free_bytes_ = 0; |
| 2541 | 2535 |
| 2542 // Clear the free list before a full GC---it will be rebuilt afterward. | 2536 // Clear the free list before a full GC---it will be rebuilt afterward. |
| 2543 free_list_.Reset(); | 2537 free_list_.Reset(); |
| 2544 } | 2538 } |
| 2545 | 2539 |
| 2546 | 2540 |
| 2547 bool PagedSpace::ReserveSpace(int size_in_bytes) { | |
| 2548 ASSERT(size_in_bytes <= AreaSize()); | |
| 2549 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); | |
| 2550 Address current_top = allocation_info_.top; | |
| 2551 Address new_top = current_top + size_in_bytes; | |
| 2552 if (new_top <= allocation_info_.limit) return true; | |
| 2553 | |
| 2554 HeapObject* new_area = free_list_.Allocate(size_in_bytes); | |
| 2555 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); | |
| 2556 if (new_area == NULL) return false; | |
| 2557 | |
| 2558 int old_linear_size = static_cast<int>(limit() - top()); | |
| 2559 // Mark the old linear allocation area with a free space so it can be | |
| 2560 // skipped when scanning the heap. This also puts it back in the free list | |
| 2561 // if it is big enough. | |
| 2562 Free(top(), old_linear_size); | |
| 2563 | |
| 2564 SetTop(new_area->address(), new_area->address() + size_in_bytes); | |
| 2565 return true; | |
| 2566 } | |
| 2567 | |
| 2568 | |
| 2569 intptr_t PagedSpace::SizeOfObjects() { | 2541 intptr_t PagedSpace::SizeOfObjects() { |
| 2570 ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0)); | 2542 ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0)); |
| 2571 return Size() - unswept_free_bytes_ - (limit() - top()); | 2543 return Size() - unswept_free_bytes_ - (limit() - top()); |
| 2572 } | 2544 } |
| 2573 | 2545 |
| 2574 | 2546 |
| 2575 // After we have booted, we have created a map which represents free space | 2547 // After we have booted, we have created a map which represents free space |
| 2576 // on the heap. If there was already a free list then the elements on it | 2548 // on the heap. If there was already a free list then the elements on it |
| 2577 // were created with the wrong FreeSpaceMap (normally NULL), so we need to | 2549 // were created with the wrong FreeSpaceMap (normally NULL), so we need to |
| 2578 // fix them. | 2550 // fix them. |
| 2579 void PagedSpace::RepairFreeListsAfterBoot() { | 2551 void PagedSpace::RepairFreeListsAfterBoot() { |
| 2580 free_list_.RepairLists(heap()); | 2552 free_list_.RepairLists(heap()); |
| 2581 } | 2553 } |
| 2582 | 2554 |
| 2583 | 2555 |
| 2584 // You have to call this last, since the implementation from PagedSpace | |
| 2585 // doesn't know that memory was 'promised' to large object space. | |
| 2586 bool LargeObjectSpace::ReserveSpace(int bytes) { | |
| 2587 return heap()->OldGenerationCapacityAvailable() >= bytes && | |
| 2588 (!heap()->incremental_marking()->IsStopped() || | |
| 2589 heap()->OldGenerationSpaceAvailable() >= bytes); | |
| 2590 } | |
| 2591 | |
| 2592 | |
| 2593 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { | 2556 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { |
| 2594 if (IsLazySweepingComplete()) return true; | 2557 if (IsLazySweepingComplete()) return true; |
| 2595 | 2558 |
| 2596 intptr_t freed_bytes = 0; | 2559 intptr_t freed_bytes = 0; |
| 2597 Page* p = first_unswept_page_; | 2560 Page* p = first_unswept_page_; |
| 2598 do { | 2561 do { |
| 2599 Page* next_page = p->next_page(); | 2562 Page* next_page = p->next_page(); |
| 2600 if (ShouldBeSweptLazily(p)) { | 2563 if (ShouldBeSweptLazily(p)) { |
| 2601 if (FLAG_gc_verbose) { | 2564 if (FLAG_gc_verbose) { |
| 2602 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", | 2565 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", |
| (...skipping 14 matching lines...) Expand all Loading... |
| 2617 first_unswept_page_ = p; | 2580 first_unswept_page_ = p; |
| 2618 } | 2581 } |
| 2619 | 2582 |
| 2620 heap()->FreeQueuedChunks(); | 2583 heap()->FreeQueuedChunks(); |
| 2621 | 2584 |
| 2622 return IsLazySweepingComplete(); | 2585 return IsLazySweepingComplete(); |
| 2623 } | 2586 } |
| 2624 | 2587 |
| 2625 | 2588 |
| 2626 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { | 2589 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { |
| 2627 if (allocation_info_.top >= allocation_info_.limit) return; | 2590 if (allocation_info_.top() >= allocation_info_.limit()) return; |
| 2628 | 2591 |
| 2629 if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) { | 2592 if (Page::FromAllocationTop(allocation_info_.top())-> |
| 2593 IsEvacuationCandidate()) { |
| 2630 // Create filler object to keep page iterable if it was iterable. | 2594 // Create filler object to keep page iterable if it was iterable. |
| 2631 int remaining = | 2595 int remaining = |
| 2632 static_cast<int>(allocation_info_.limit - allocation_info_.top); | 2596 static_cast<int>(allocation_info_.limit() - allocation_info_.top()); |
| 2633 heap()->CreateFillerObjectAt(allocation_info_.top, remaining); | 2597 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining); |
| 2634 | 2598 |
| 2635 allocation_info_.top = NULL; | 2599 allocation_info_.set_top(NULL); |
| 2636 allocation_info_.limit = NULL; | 2600 allocation_info_.set_limit(NULL); |
| 2637 } | 2601 } |
| 2638 } | 2602 } |
| 2639 | 2603 |
| 2640 | 2604 |
| 2641 bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) { | 2605 bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) { |
| 2642 MarkCompactCollector* collector = heap()->mark_compact_collector(); | 2606 MarkCompactCollector* collector = heap()->mark_compact_collector(); |
| 2643 if (collector->AreSweeperThreadsActivated()) { | 2607 if (collector->AreSweeperThreadsActivated()) { |
| 2644 if (collector->IsConcurrentSweepingInProgress()) { | 2608 if (collector->IsConcurrentSweepingInProgress()) { |
| 2645 if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) { | 2609 if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) { |
| 2646 if (!collector->sequential_sweeping()) { | 2610 if (!collector->sequential_sweeping()) { |
| (...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2836 | 2800 |
| 2837 if (was_swept_conservatively_) return; | 2801 if (was_swept_conservatively_) return; |
| 2838 ClearHistograms(heap()->isolate()); | 2802 ClearHistograms(heap()->isolate()); |
| 2839 HeapObjectIterator obj_it(this); | 2803 HeapObjectIterator obj_it(this); |
| 2840 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) | 2804 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) |
| 2841 CollectHistogramInfo(obj); | 2805 CollectHistogramInfo(obj); |
| 2842 ReportHistogram(heap()->isolate(), true); | 2806 ReportHistogram(heap()->isolate(), true); |
| 2843 } | 2807 } |
| 2844 #endif | 2808 #endif |
| 2845 | 2809 |
| 2846 // ----------------------------------------------------------------------------- | |
| 2847 // FixedSpace implementation | |
| 2848 | |
| 2849 void FixedSpace::PrepareForMarkCompact() { | |
| 2850 // Call prepare of the super class. | |
| 2851 PagedSpace::PrepareForMarkCompact(); | |
| 2852 | |
| 2853 // During a non-compacting collection, everything below the linear | |
| 2854 // allocation pointer except wasted top-of-page blocks is considered | |
| 2855 // allocated and we will rediscover available bytes during the | |
| 2856 // collection. | |
| 2857 accounting_stats_.AllocateBytes(free_list_.available()); | |
| 2858 | |
| 2859 // Clear the free list before a full GC---it will be rebuilt afterward. | |
| 2860 free_list_.Reset(); | |
| 2861 } | |
| 2862 | |
| 2863 | 2810 |
| 2864 // ----------------------------------------------------------------------------- | 2811 // ----------------------------------------------------------------------------- |
| 2865 // MapSpace implementation | 2812 // MapSpace implementation |
| 2866 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless | 2813 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless |
| 2867 // there is at least one non-inlined virtual function. I would prefer to hide | 2814 // there is at least one non-inlined virtual function. I would prefer to hide |
| 2868 // the VerifyObject definition behind VERIFY_HEAP. | 2815 // the VerifyObject definition behind VERIFY_HEAP. |
| 2869 | 2816 |
| 2870 void MapSpace::VerifyObject(HeapObject* object) { | 2817 void MapSpace::VerifyObject(HeapObject* object) { |
| 2871 CHECK(object->IsMap()); | 2818 CHECK(object->IsMap()); |
| 2872 } | 2819 } |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2928 first_page_(NULL), | 2875 first_page_(NULL), |
| 2929 size_(0), | 2876 size_(0), |
| 2930 page_count_(0), | 2877 page_count_(0), |
| 2931 objects_size_(0), | 2878 objects_size_(0), |
| 2932 chunk_map_(ComparePointers, 1024) {} | 2879 chunk_map_(ComparePointers, 1024) {} |
| 2933 | 2880 |
| 2934 | 2881 |
| 2935 bool LargeObjectSpace::SetUp() { | 2882 bool LargeObjectSpace::SetUp() { |
| 2936 first_page_ = NULL; | 2883 first_page_ = NULL; |
| 2937 size_ = 0; | 2884 size_ = 0; |
| 2885 maximum_committed_ = 0; |
| 2938 page_count_ = 0; | 2886 page_count_ = 0; |
| 2939 objects_size_ = 0; | 2887 objects_size_ = 0; |
| 2940 chunk_map_.Clear(); | 2888 chunk_map_.Clear(); |
| 2941 return true; | 2889 return true; |
| 2942 } | 2890 } |
| 2943 | 2891 |
| 2944 | 2892 |
| 2945 void LargeObjectSpace::TearDown() { | 2893 void LargeObjectSpace::TearDown() { |
| 2946 while (first_page_ != NULL) { | 2894 while (first_page_ != NULL) { |
| 2947 LargePage* page = first_page_; | 2895 LargePage* page = first_page_; |
| (...skipping 26 matching lines...) Expand all Loading... |
| 2974 AllocateLargePage(object_size, this, executable); | 2922 AllocateLargePage(object_size, this, executable); |
| 2975 if (page == NULL) return Failure::RetryAfterGC(identity()); | 2923 if (page == NULL) return Failure::RetryAfterGC(identity()); |
| 2976 ASSERT(page->area_size() >= object_size); | 2924 ASSERT(page->area_size() >= object_size); |
| 2977 | 2925 |
| 2978 size_ += static_cast<int>(page->size()); | 2926 size_ += static_cast<int>(page->size()); |
| 2979 objects_size_ += object_size; | 2927 objects_size_ += object_size; |
| 2980 page_count_++; | 2928 page_count_++; |
| 2981 page->set_next_page(first_page_); | 2929 page->set_next_page(first_page_); |
| 2982 first_page_ = page; | 2930 first_page_ = page; |
| 2983 | 2931 |
| 2932 if (size_ > maximum_committed_) { |
| 2933 maximum_committed_ = size_; |
| 2934 } |
| 2935 |
| 2984 // Register all MemoryChunk::kAlignment-aligned chunks covered by | 2936 // Register all MemoryChunk::kAlignment-aligned chunks covered by |
| 2985 // this large page in the chunk map. | 2937 // this large page in the chunk map. |
| 2986 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; | 2938 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; |
| 2987 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; | 2939 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; |
| 2988 for (uintptr_t key = base; key <= limit; key++) { | 2940 for (uintptr_t key = base; key <= limit; key++) { |
| 2989 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), | 2941 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), |
| 2990 static_cast<uint32_t>(key), | 2942 static_cast<uint32_t>(key), |
| 2991 true); | 2943 true); |
| 2992 ASSERT(entry != NULL); | 2944 ASSERT(entry != NULL); |
| 2993 entry->value = page; | 2945 entry->value = page; |
| (...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3220 object->ShortPrint(); | 3172 object->ShortPrint(); |
| 3221 PrintF("\n"); | 3173 PrintF("\n"); |
| 3222 } | 3174 } |
| 3223 printf(" --------------------------------------\n"); | 3175 printf(" --------------------------------------\n"); |
| 3224 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3176 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3225 } | 3177 } |
| 3226 | 3178 |
| 3227 #endif // DEBUG | 3179 #endif // DEBUG |
| 3228 | 3180 |
| 3229 } } // namespace v8::internal | 3181 } } // namespace v8::internal |
| OLD | NEW |